summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorbubulle <bubulle@alioth.debian.org>2011-06-07 20:08:36 +0000
committerbubulle <bubulle@alioth.debian.org>2011-06-07 20:08:36 +0000
commit6fe9013ae23927a67fa6b6033e2711cef99b3533 (patch)
tree5018bffeace42912accb0d67ddd3893fd15b61d1 /lib
parent4d16e8d5702fb98dda73c5c0f3404d662ae62df6 (diff)
downloadsamba-6fe9013ae23927a67fa6b6033e2711cef99b3533.tar.gz
Load samba-3.6.0rc2 into branches/samba/upstream.
git-svn-id: svn://svn.debian.org/svn/pkg-samba/branches/samba/upstream@3807 fc4039ab-9d04-0410-8cac-899223bdd6b0
Diffstat (limited to 'lib')
-rw-r--r--lib/README3
-rw-r--r--lib/addns/addns.h30
-rw-r--r--lib/addns/dns.h540
-rw-r--r--lib/addns/dnserr.h87
-rw-r--r--lib/addns/dnsgss.c334
-rw-r--r--lib/addns/dnsmarshall.c530
-rw-r--r--lib/addns/dnsrecord.c422
-rw-r--r--lib/addns/dnssock.c375
-rw-r--r--lib/addns/dnsutils.c151
-rw-r--r--lib/addns/error.c59
-rw-r--r--lib/addns/wscript_build7
-rw-r--r--lib/async_req/async_sock.c129
-rw-r--r--lib/async_req/async_sock.h21
-rw-r--r--lib/async_req/config.mk4
-rw-r--r--lib/async_req/wscript_build9
-rw-r--r--lib/compression/lzxpress.c17
-rw-r--r--lib/compression/testsuite.c51
-rw-r--r--lib/compression/wscript_build6
-rw-r--r--lib/crypto/aes.c22
-rw-r--r--lib/crypto/aes.h4
-rw-r--r--lib/crypto/arcfour.c2
-rw-r--r--lib/crypto/arcfour.h2
-rw-r--r--lib/crypto/config.mk18
-rw-r--r--lib/crypto/crc32.c2
-rw-r--r--lib/crypto/hmacmd5.c2
-rw-r--r--lib/crypto/hmacmd5test.c3
-rw-r--r--lib/crypto/hmacsha256.c2
-rw-r--r--lib/crypto/md4.c2
-rw-r--r--lib/crypto/md4test.c5
-rw-r--r--lib/crypto/md5.c12
-rw-r--r--lib/crypto/md5test.c3
-rw-r--r--lib/crypto/sha256.c2
-rw-r--r--lib/crypto/wscript_build14
-rw-r--r--lib/dnspython/.gitignore7
-rw-r--r--lib/dnspython/ChangeLog1123
-rw-r--r--lib/dnspython/LICENSE14
-rw-r--r--lib/dnspython/MANIFEST.in3
-rw-r--r--lib/dnspython/Makefile56
-rw-r--r--lib/dnspython/README402
-rw-r--r--lib/dnspython/TODO17
-rw-r--r--lib/dnspython/dns/__init__.py53
-rw-r--r--lib/dnspython/dns/dnssec.py372
-rw-r--r--lib/dnspython/dns/e164.py79
-rw-r--r--lib/dnspython/dns/edns.py142
-rw-r--r--lib/dnspython/dns/entropy.py123
-rw-r--r--lib/dnspython/dns/exception.py40
-rw-r--r--lib/dnspython/dns/flags.py106
-rw-r--r--lib/dnspython/dns/hash.py67
-rw-r--r--lib/dnspython/dns/inet.py108
-rw-r--r--lib/dnspython/dns/ipv4.py36
-rw-r--r--lib/dnspython/dns/ipv6.py163
-rw-r--r--lib/dnspython/dns/message.py1087
-rw-r--r--lib/dnspython/dns/name.py700
-rw-r--r--lib/dnspython/dns/namedict.py59
-rw-r--r--lib/dnspython/dns/node.py172
-rw-r--r--lib/dnspython/dns/opcode.py104
-rw-r--r--lib/dnspython/dns/query.py492
-rw-r--r--lib/dnspython/dns/rcode.py119
-rw-r--r--lib/dnspython/dns/rdata.py460
-rw-r--r--lib/dnspython/dns/rdataclass.py114
-rw-r--r--lib/dnspython/dns/rdataset.py329
-rw-r--r--lib/dnspython/dns/rdatatype.py232
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/AFSDB.py51
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/CERT.py131
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/CNAME.py24
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/DLV.py20
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/DNAME.py21
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/DNSKEY.py25
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/DS.py20
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/GPOS.py156
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/HINFO.py83
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/HIP.py140
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/ISDN.py96
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/KEY.py20
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/LOC.py334
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/MX.py20
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/NS.py20
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/NSEC.py141
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/NSEC3.py182
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/NSEC3PARAM.py88
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/NXT.py99
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/PTR.py20
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/RP.py86
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/RRSIG.py20
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/RT.py20
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/SIG.py26
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/SOA.py127
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/SPF.py22
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/SSHFP.py77
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/TXT.py20
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/X25.py62
-rw-r--r--lib/dnspython/dns/rdtypes/ANY/__init__.py48
-rw-r--r--lib/dnspython/dns/rdtypes/IN/A.py57
-rw-r--r--lib/dnspython/dns/rdtypes/IN/AAAA.py58
-rw-r--r--lib/dnspython/dns/rdtypes/IN/APL.py170
-rw-r--r--lib/dnspython/dns/rdtypes/IN/DHCID.py60
-rw-r--r--lib/dnspython/dns/rdtypes/IN/IPSECKEY.py159
-rw-r--r--lib/dnspython/dns/rdtypes/IN/KX.py20
-rw-r--r--lib/dnspython/dns/rdtypes/IN/NAPTR.py132
-rw-r--r--lib/dnspython/dns/rdtypes/IN/NSAP.py59
-rw-r--r--lib/dnspython/dns/rdtypes/IN/NSAP_PTR.py20
-rw-r--r--lib/dnspython/dns/rdtypes/IN/PX.py97
-rw-r--r--lib/dnspython/dns/rdtypes/IN/SRV.py89
-rw-r--r--lib/dnspython/dns/rdtypes/IN/WKS.py113
-rw-r--r--lib/dnspython/dns/rdtypes/IN/__init__.py30
-rw-r--r--lib/dnspython/dns/rdtypes/__init__.py25
-rw-r--r--lib/dnspython/dns/rdtypes/dsbase.py92
-rw-r--r--lib/dnspython/dns/rdtypes/keybase.py149
-rw-r--r--lib/dnspython/dns/rdtypes/mxbase.py105
-rw-r--r--lib/dnspython/dns/rdtypes/nsbase.py82
-rw-r--r--lib/dnspython/dns/rdtypes/sigbase.py168
-rw-r--r--lib/dnspython/dns/rdtypes/txtbase.py87
-rw-r--r--lib/dnspython/dns/renderer.py324
-rw-r--r--lib/dnspython/dns/resolver.py773
-rw-r--r--lib/dnspython/dns/reversename.py75
-rw-r--r--lib/dnspython/dns/rrset.py175
-rw-r--r--lib/dnspython/dns/set.py263
-rw-r--r--lib/dnspython/dns/tokenizer.py547
-rw-r--r--lib/dnspython/dns/tsig.py223
-rw-r--r--lib/dnspython/dns/tsigkeyring.py44
-rw-r--r--lib/dnspython/dns/ttl.py64
-rw-r--r--lib/dnspython/dns/update.py245
-rw-r--r--lib/dnspython/dns/version.py34
-rw-r--r--lib/dnspython/dns/zone.py855
-rwxr-xr-xlib/dnspython/examples/ddns.py51
-rwxr-xr-xlib/dnspython/examples/e164.py6
-rwxr-xr-xlib/dnspython/examples/mx.py7
-rwxr-xr-xlib/dnspython/examples/name.py13
-rwxr-xr-xlib/dnspython/examples/reverse.py40
-rwxr-xr-xlib/dnspython/examples/reverse_name.py6
-rwxr-xr-xlib/dnspython/examples/xfr.py10
-rwxr-xr-xlib/dnspython/examples/zonediff.py270
-rwxr-xr-xlib/dnspython/setup.py63
-rw-r--r--lib/dnspython/tests/Makefile26
-rw-r--r--lib/dnspython/tests/bugs.py44
-rw-r--r--lib/dnspython/tests/dnssec.py146
-rw-r--r--lib/dnspython/tests/example225
-rw-r--r--lib/dnspython/tests/example1.good121
-rw-r--r--lib/dnspython/tests/example2.good121
-rw-r--r--lib/dnspython/tests/flags.py59
-rw-r--r--lib/dnspython/tests/message.py179
-rw-r--r--lib/dnspython/tests/name.py697
-rw-r--r--lib/dnspython/tests/namedict.py102
-rw-r--r--lib/dnspython/tests/ntoaaton.py156
-rw-r--r--lib/dnspython/tests/rdtypeandclass.py123
-rw-r--r--lib/dnspython/tests/resolver.py127
-rw-r--r--lib/dnspython/tests/rrset.py54
-rw-r--r--lib/dnspython/tests/set.py208
-rw-r--r--lib/dnspython/tests/tokenizer.py190
-rw-r--r--lib/dnspython/tests/update.py114
-rw-r--r--lib/dnspython/tests/zone.py389
-rw-r--r--lib/dnspython/util/COPYRIGHT14
-rw-r--r--lib/dnspython/util/copyrights117
-rw-r--r--lib/iniparser/AUTHORS5
-rw-r--r--lib/iniparser/INSTALL12
-rw-r--r--lib/iniparser/LICENSE21
-rw-r--r--lib/iniparser/Makefile63
-rw-r--r--lib/iniparser/README11
-rw-r--r--lib/iniparser/html/doxygen.css358
-rw-r--r--lib/iniparser/html/doxygen.pngbin0 -> 1281 bytes
-rw-r--r--lib/iniparser/html/globals_func.html54
-rw-r--r--lib/iniparser/html/index.html156
-rw-r--r--lib/iniparser/html/iniparser_8h.html629
-rw-r--r--lib/iniparser/html/iniparser_8main.html19
-rw-r--r--lib/iniparser/html/tab_b.gifbin0 -> 35 bytes
-rw-r--r--lib/iniparser/html/tab_l.gifbin0 -> 706 bytes
-rw-r--r--lib/iniparser/html/tab_r.gifbin0 -> 2585 bytes
-rw-r--r--lib/iniparser/html/tabs.css102
-rw-r--r--lib/iniparser/src/dictionary.c514
-rw-r--r--lib/iniparser/src/dictionary.h244
-rw-r--r--lib/iniparser/src/iniparser.c536
-rw-r--r--lib/iniparser/src/iniparser.h296
-rw-r--r--lib/iniparser/src/strlib.c211
-rw-r--r--lib/iniparser/src/strlib.h108
-rw-r--r--lib/iniparser/src/wscript_build7
-rw-r--r--lib/iniparser/test/Makefile24
-rw-r--r--lib/iniparser/test/iniexample.c117
-rw-r--r--lib/iniparser_build/config.m445
-rw-r--r--lib/iniparser_build/dictionary.c7
-rw-r--r--lib/iniparser_build/iniparser.c7
-rw-r--r--lib/iniparser_build/strlib.c7
-rw-r--r--lib/nss_wrapper/config.mk7
-rw-r--r--lib/nss_wrapper/nss_wrapper.c33
-rwxr-xr-x[-rw-r--r--]lib/nss_wrapper/nss_wrapper.pl45
-rw-r--r--lib/nss_wrapper/testsuite.c111
-rw-r--r--lib/nss_wrapper/wscript17
-rw-r--r--lib/nss_wrapper/wscript_build10
-rw-r--r--lib/popt/config.mk5
-rw-r--r--lib/popt/popt.h4
-rw-r--r--lib/popt/wscript18
-rw-r--r--lib/replace/Makefile63
-rw-r--r--lib/replace/README2
-rw-r--r--lib/replace/autoconf-2.60.m450
-rwxr-xr-xlib/replace/autogen-autotools.sh (renamed from lib/replace/autogen.sh)0
-rwxr-xr-xlib/replace/configure21
-rw-r--r--lib/replace/crypt.c6
-rw-r--r--lib/replace/getifaddrs.c2
-rw-r--r--lib/replace/getpass.c14
-rw-r--r--lib/replace/hdr_replace.h2
-rw-r--r--lib/replace/libreplace.m483
-rw-r--r--lib/replace/libreplace_cc.m41
-rw-r--r--lib/replace/libreplace_macros.m44
-rw-r--r--lib/replace/libreplace_network.m440
-rw-r--r--lib/replace/poll.c133
-rw-r--r--lib/replace/replace-test.h10
-rw-r--r--lib/replace/replace.c153
-rw-r--r--lib/replace/replace.h85
-rw-r--r--lib/replace/system/config.m416
-rw-r--r--lib/replace/system/filesys.h2
-rw-r--r--lib/replace/system/network.h8
-rw-r--r--lib/replace/system/passwd.h6
-rw-r--r--lib/replace/system/readline.h8
-rw-r--r--lib/replace/system/select.h36
-rw-r--r--lib/replace/system/time.h22
-rw-r--r--lib/replace/system/wait.h4
-rw-r--r--lib/replace/system/wscript_configure26
-rw-r--r--lib/replace/test/os2_delete.c4
-rw-r--r--lib/replace/test/snprintf.c29
-rw-r--r--lib/replace/test/testsuite.c42
-rw-r--r--lib/replace/wscript465
-rw-r--r--lib/smbconf/config.mk3
-rw-r--r--lib/smbconf/smbconf.c186
-rw-r--r--lib/smbconf/smbconf.h438
-rw-r--r--lib/smbconf/smbconf_private.h36
-rw-r--r--lib/smbconf/smbconf_txt.c212
-rw-r--r--lib/smbconf/smbconf_txt.h2
-rw-r--r--lib/smbconf/smbconf_util.c26
-rw-r--r--lib/smbconf/wscript_build7
-rw-r--r--lib/socket_wrapper/config.mk8
-rw-r--r--lib/socket_wrapper/socket_wrapper.c707
-rw-r--r--lib/socket_wrapper/socket_wrapper.h6
-rw-r--r--lib/socket_wrapper/testsuite.c2
-rw-r--r--lib/socket_wrapper/wscript15
-rw-r--r--lib/socket_wrapper/wscript_build9
-rw-r--r--lib/subunit/Apache-2.0202
-rw-r--r--lib/subunit/BSD26
-rw-r--r--lib/subunit/COPYING36
-rw-r--r--lib/subunit/INSTALL32
-rw-r--r--lib/subunit/MANIFEST.in21
-rw-r--r--lib/subunit/Makefile.am136
-rw-r--r--lib/subunit/NEWS224
-rw-r--r--lib/subunit/README212
-rw-r--r--lib/subunit/c++/README50
-rw-r--r--lib/subunit/c++/SubunitTestProgressListener.cpp63
-rw-r--r--lib/subunit/c++/SubunitTestProgressListener.h56
-rw-r--r--lib/subunit/c/README68
-rw-r--r--lib/subunit/c/include/subunit/child.h96
-rw-r--r--lib/subunit/c/lib/child.c104
-rw-r--r--lib/subunit/c/tests/test_child.c231
-rw-r--r--lib/subunit/c/wscript16
-rw-r--r--lib/subunit/configure.ac75
-rwxr-xr-xlib/subunit/filters/subunit-filter105
-rwxr-xr-xlib/subunit/filters/subunit-ls44
-rwxr-xr-xlib/subunit/filters/subunit-notify65
-rwxr-xr-xlib/subunit/filters/subunit-stats41
-rwxr-xr-xlib/subunit/filters/subunit-tags26
-rwxr-xr-xlib/subunit/filters/subunit2gtk259
-rwxr-xr-xlib/subunit/filters/subunit2junitxml65
-rwxr-xr-xlib/subunit/filters/subunit2pyunit48
-rwxr-xr-xlib/subunit/filters/tap2subunit26
-rw-r--r--lib/subunit/libcppunit_subunit.pc.in11
-rw-r--r--lib/subunit/libsubunit.pc.in11
-rwxr-xr-xlib/subunit/perl/Makefile.PL.in20
-rw-r--r--lib/subunit/perl/lib/Subunit.pm183
-rw-r--r--lib/subunit/perl/lib/Subunit/Diff.pm85
-rwxr-xr-xlib/subunit/perl/subunit-diff31
-rw-r--r--lib/subunit/python/iso8601/LICENSE20
-rw-r--r--lib/subunit/python/iso8601/README26
-rw-r--r--lib/subunit/python/iso8601/README.subunit5
-rw-r--r--lib/subunit/python/iso8601/setup.py58
-rw-r--r--lib/subunit/python/iso8601/test_iso8601.py111
-rw-r--r--lib/subunit/python/subunit/__init__.py1150
-rw-r--r--lib/subunit/python/subunit/chunked.py164
-rw-r--r--lib/subunit/python/subunit/details.py113
-rw-r--r--lib/subunit/python/subunit/iso8601.py123
-rw-r--r--lib/subunit/python/subunit/progress_model.py106
-rwxr-xr-xlib/subunit/python/subunit/run.py73
-rw-r--r--lib/subunit/python/subunit/test_results.py382
-rw-r--r--lib/subunit/python/subunit/tests/__init__.py44
-rwxr-xr-xlib/subunit/python/subunit/tests/sample-script.py7
-rw-r--r--lib/subunit/python/subunit/tests/test_chunked.py127
-rw-r--r--lib/subunit/python/subunit/tests/test_details.py111
-rw-r--r--lib/subunit/python/subunit/tests/test_progress_model.py118
-rw-r--r--lib/subunit/python/subunit/tests/test_subunit_filter.py136
-rw-r--r--lib/subunit/python/subunit/tests/test_subunit_stats.py83
-rw-r--r--lib/subunit/python/subunit/tests/test_subunit_tags.py68
-rw-r--r--lib/subunit/python/subunit/tests/test_tap2subunit.py443
-rw-r--r--lib/subunit/python/subunit/tests/test_test_protocol.py984
-rw-r--r--lib/subunit/python/subunit/tests/test_test_results.py199
-rwxr-xr-xlib/subunit/runtests.py138
-rwxr-xr-xlib/subunit/setup.py59
-rw-r--r--lib/subunit/shell/README62
-rw-r--r--lib/subunit/shell/share/subunit.sh56
-rwxr-xr-xlib/subunit/shell/tests/test_function_output.sh97
-rwxr-xr-xlib/subunit/shell/tests/test_source_library.sh108
-rwxr-xr-xlib/subunit/tap2subunit35
-rw-r--r--lib/talloc/ABI/talloc-2.0.2.sigs62
-rw-r--r--lib/talloc/ABI/talloc-2.0.3.sigs62
-rw-r--r--lib/talloc/ABI/talloc-2.0.4.sigs62
-rw-r--r--lib/talloc/ABI/talloc-2.0.5.sigs62
-rw-r--r--lib/talloc/Makefile66
-rw-r--r--lib/talloc/Makefile.in62
-rw-r--r--lib/talloc/aclocal.m41
-rwxr-xr-xlib/talloc/autogen.sh14
-rw-r--r--lib/talloc/build_macros.m414
-rwxr-xr-xlib/talloc/config.guess1561
-rw-r--r--lib/talloc/config.mk7
-rwxr-xr-xlib/talloc/config.sub1686
-rwxr-xr-xlib/talloc/configure21
-rw-r--r--lib/talloc/configure.ac49
-rw-r--r--lib/talloc/doc/mainpage.dox105
-rw-r--r--lib/talloc/doxy.config1538
-rwxr-xr-xlib/talloc/install-sh238
-rw-r--r--lib/talloc/pytalloc-util.pc.in11
-rw-r--r--lib/talloc/pytalloc.c144
-rw-r--r--lib/talloc/pytalloc.h11
-rw-r--r--lib/talloc/pytalloc_util.c118
-rw-r--r--lib/talloc/rules.mk18
-rwxr-xr-xlib/talloc/script/mksigs.pl16
-rw-r--r--lib/talloc/script/mksyms.awk16
-rwxr-xr-xlib/talloc/script/release-script.sh64
-rw-r--r--lib/talloc/talloc.3.xml21
-rw-r--r--lib/talloc/talloc.c661
-rw-r--r--lib/talloc/talloc.exports68
-rw-r--r--lib/talloc/talloc.h1644
-rw-r--r--lib/talloc/talloc.mk52
-rw-r--r--lib/talloc/talloc.pc.in2
-rw-r--r--lib/talloc/talloc.signatures62
-rw-r--r--lib/talloc/talloc_guide.txt28
-rw-r--r--lib/talloc/talloc_testsuite.h7
-rw-r--r--lib/talloc/testsuite.c200
-rw-r--r--lib/talloc/testsuite_main.c3
-rw-r--r--lib/talloc/wscript151
-rw-r--r--lib/tdb/ABI/tdb-1.2.1.sigs95
-rw-r--r--lib/tdb/ABI/tdb-1.2.2.sigs60
-rw-r--r--lib/tdb/ABI/tdb-1.2.3.sigs60
-rw-r--r--lib/tdb/ABI/tdb-1.2.4.sigs60
-rw-r--r--lib/tdb/ABI/tdb-1.2.5.sigs61
-rw-r--r--lib/tdb/ABI/tdb-1.2.6.sigs61
-rw-r--r--lib/tdb/ABI/tdb-1.2.7.sigs61
-rw-r--r--lib/tdb/ABI/tdb-1.2.8.sigs61
-rw-r--r--lib/tdb/ABI/tdb-1.2.9.sigs62
-rw-r--r--lib/tdb/Makefile66
-rw-r--r--lib/tdb/Makefile.in79
-rw-r--r--lib/tdb/aclocal.m41
-rwxr-xr-xlib/tdb/autogen.sh14
-rw-r--r--lib/tdb/build_macros.m414
-rw-r--r--lib/tdb/common/check.c75
-rw-r--r--lib/tdb/common/dump.c8
-rw-r--r--lib/tdb/common/error.c8
-rw-r--r--lib/tdb/common/freelist.c12
-rw-r--r--lib/tdb/common/freelistcheck.c2
-rw-r--r--lib/tdb/common/hash.c380
-rw-r--r--lib/tdb/common/io.c30
-rw-r--r--lib/tdb/common/lock.c677
-rw-r--r--lib/tdb/common/open.c203
-rw-r--r--lib/tdb/common/summary.c201
-rw-r--r--lib/tdb/common/tdb.c91
-rw-r--r--lib/tdb/common/tdb_private.h64
-rw-r--r--lib/tdb/common/transaction.c245
-rw-r--r--lib/tdb/common/traverse.c26
-rwxr-xr-xlib/tdb/config.guess1561
-rw-r--r--lib/tdb/config.mk57
-rwxr-xr-xlib/tdb/config.sub1686
-rwxr-xr-xlib/tdb/configure21
-rw-r--r--lib/tdb/configure.ac51
-rw-r--r--lib/tdb/docs/README19
-rw-r--r--lib/tdb/docs/mainpage.dox61
-rw-r--r--lib/tdb/doxy.config1700
-rw-r--r--lib/tdb/include/tdb.h731
-rwxr-xr-xlib/tdb/install-sh238
-rw-r--r--lib/tdb/libtdb.m48
-rw-r--r--lib/tdb/manpages/tdbbackup.8.xml4
-rw-r--r--lib/tdb/manpages/tdbdump.8.xml4
-rw-r--r--lib/tdb/manpages/tdbrestore.8.xml66
-rw-r--r--lib/tdb/manpages/tdbtool.8.xml4
-rw-r--r--lib/tdb/pytdb.c112
-rw-r--r--lib/tdb/python.mk6
-rw-r--r--lib/tdb/python/tdbdump.py2
-rw-r--r--lib/tdb/python/tests/simple.py59
-rwxr-xr-xlib/tdb/release-script.sh48
-rw-r--r--lib/tdb/rules.mk16
-rwxr-xr-xlib/tdb/script/release-script.sh67
-rw-r--r--lib/tdb/tdb.exports65
-rw-r--r--lib/tdb/tdb.mk106
-rw-r--r--lib/tdb/tdb.pc.in2
-rw-r--r--lib/tdb/tdb.signatures60
-rw-r--r--lib/tdb/tools/tdbbackup.c21
-rw-r--r--lib/tdb/tools/tdbrestore.c225
-rw-r--r--lib/tdb/tools/tdbtest.c31
-rw-r--r--lib/tdb/tools/tdbtool.c12
-rw-r--r--lib/tdb/tools/tdbtorture.c221
-rw-r--r--lib/tdb/wscript136
-rw-r--r--lib/tdr/config.mk9
-rw-r--r--lib/tdr/tdr.c19
-rw-r--r--lib/tdr/tdr.h45
-rw-r--r--lib/tdr/testsuite.c22
-rw-r--r--lib/tdr/wscript_build9
-rw-r--r--lib/testtools/.testr.conf4
-rw-r--r--lib/testtools/HACKING135
-rw-r--r--lib/testtools/LICENSE39
-rw-r--r--lib/testtools/MANIFEST.in8
-rw-r--r--lib/testtools/MANUAL349
-rw-r--r--lib/testtools/Makefile35
-rw-r--r--lib/testtools/NEWS468
-rw-r--r--lib/testtools/README72
-rwxr-xr-xlib/testtools/setup.py64
-rw-r--r--lib/testtools/testtools/__init__.py72
-rw-r--r--lib/testtools/testtools/_spinner.py316
-rw-r--r--lib/testtools/testtools/compat.py279
-rw-r--r--lib/testtools/testtools/content.py102
-rw-r--r--lib/testtools/testtools/content_type.py33
-rw-r--r--lib/testtools/testtools/deferredruntest.py336
-rw-r--r--lib/testtools/testtools/helpers.py64
-rw-r--r--lib/testtools/testtools/matchers.py530
-rw-r--r--lib/testtools/testtools/monkey.py97
-rwxr-xr-xlib/testtools/testtools/run.py332
-rw-r--r--lib/testtools/testtools/runtest.py200
-rw-r--r--lib/testtools/testtools/testcase.py677
-rw-r--r--lib/testtools/testtools/testresult/__init__.py19
-rw-r--r--lib/testtools/testtools/testresult/doubles.py111
-rw-r--r--lib/testtools/testtools/testresult/real.py620
-rw-r--r--lib/testtools/testtools/tests/__init__.py41
-rw-r--r--lib/testtools/testtools/tests/helpers.py72
-rw-r--r--lib/testtools/testtools/tests/test_compat.py257
-rw-r--r--lib/testtools/testtools/tests/test_content.py94
-rw-r--r--lib/testtools/testtools/tests/test_content_type.py46
-rw-r--r--lib/testtools/testtools/tests/test_deferredruntest.py738
-rw-r--r--lib/testtools/testtools/tests/test_fixturesupport.py77
-rw-r--r--lib/testtools/testtools/tests/test_helpers.py106
-rw-r--r--lib/testtools/testtools/tests/test_matchers.py451
-rw-r--r--lib/testtools/testtools/tests/test_monkey.py167
-rw-r--r--lib/testtools/testtools/tests/test_run.py76
-rw-r--r--lib/testtools/testtools/tests/test_runtest.py300
-rw-r--r--lib/testtools/testtools/tests/test_spinner.py332
-rw-r--r--lib/testtools/testtools/tests/test_testresult.py1374
-rw-r--r--lib/testtools/testtools/tests/test_testsuite.py53
-rw-r--r--lib/testtools/testtools/tests/test_testtools.py1135
-rw-r--r--lib/testtools/testtools/testsuite.py87
-rw-r--r--lib/testtools/testtools/utils.py13
-rw-r--r--lib/tevent/ABI/tevent-0.9.10.sigs73
-rw-r--r--lib/tevent/ABI/tevent-0.9.11.sigs73
-rw-r--r--lib/tevent/ABI/tevent-0.9.9.sigs73
-rw-r--r--lib/tevent/Makefile51
-rw-r--r--lib/tevent/Makefile.in79
-rwxr-xr-xlib/tevent/autogen.sh14
-rw-r--r--lib/tevent/bindings.py62
-rw-r--r--lib/tevent/build_macros.m415
-rwxr-xr-xlib/tevent/config.guess1561
-rwxr-xr-xlib/tevent/config.sub1686
-rwxr-xr-xlib/tevent/configure21
-rw-r--r--lib/tevent/configure.ac25
-rw-r--r--lib/tevent/doc/mainpage.dox42
-rw-r--r--lib/tevent/doc/tutorials.dox43
-rw-r--r--lib/tevent/doxy.config1538
-rwxr-xr-xlib/tevent/install-sh238
-rw-r--r--lib/tevent/libtalloc.m47
-rw-r--r--lib/tevent/libtevent.m41
-rw-r--r--lib/tevent/pkg.m4156
-rw-r--r--lib/tevent/pytevent.c762
-rw-r--r--lib/tevent/rules.mk18
-rw-r--r--lib/tevent/samba.m411
-rw-r--r--lib/tevent/testsuite.c2
-rw-r--r--lib/tevent/tevent.c24
-rw-r--r--lib/tevent/tevent.exports62
-rw-r--r--lib/tevent/tevent.h1120
-rw-r--r--lib/tevent/tevent.mk46
-rw-r--r--lib/tevent/tevent.pc.in2
-rw-r--r--lib/tevent/tevent.signatures57
-rw-r--r--lib/tevent/tevent_epoll.c5
-rw-r--r--lib/tevent/tevent_fd.c6
-rw-r--r--lib/tevent/tevent_internal.h3
-rw-r--r--lib/tevent/tevent_liboop.c4
-rw-r--r--lib/tevent/tevent_poll.c307
-rw-r--r--lib/tevent/tevent_req.c184
-rw-r--r--lib/tevent/tevent_select.c5
-rw-r--r--lib/tevent/tevent_signal.c2
-rw-r--r--lib/tevent/tevent_standard.c5
-rw-r--r--lib/tevent/tevent_timed.c4
-rw-r--r--lib/tevent/tevent_util.h163
-rw-r--r--lib/tevent/wscript109
-rw-r--r--lib/torture/config.mk17
-rw-r--r--lib/torture/subunit.c97
-rw-r--r--lib/torture/torture.c254
-rw-r--r--lib/torture/torture.h85
-rw-r--r--lib/torture/torture.pc.in2
-rw-r--r--lib/torture/wscript_build9
-rw-r--r--lib/tsocket/config.mk13
-rw-r--r--lib/tsocket/tsocket.c3
-rw-r--r--lib/tsocket/tsocket.h66
-rw-r--r--lib/tsocket/tsocket_bsd.c144
-rw-r--r--lib/tsocket/tsocket_guide.txt78
-rw-r--r--lib/tsocket/wscript_build9
-rw-r--r--lib/uid_wrapper/config.mk9
-rw-r--r--lib/uid_wrapper/uid_wrapper.c16
-rw-r--r--lib/uid_wrapper/uid_wrapper.h14
-rw-r--r--lib/uid_wrapper/wscript17
-rw-r--r--lib/uid_wrapper/wscript_build10
-rwxr-xr-xlib/update-external.sh23
-rw-r--r--lib/util/asn1.c232
-rw-r--r--lib/util/asn1.h10
-rw-r--r--lib/util/attr.h18
-rw-r--r--lib/util/become_daemon.c10
-rw-r--r--lib/util/binsearch.h16
-rw-r--r--lib/util/byteorder.h29
-rw-r--r--lib/util/capability.m417
-rw-r--r--lib/util/charset/charcnv.c252
-rw-r--r--lib/util/charset/charset.h53
-rw-r--r--lib/util/charset/codepoints.c424
-rw-r--r--lib/util/charset/config.m486
-rw-r--r--lib/util/charset/config.mk11
-rw-r--r--lib/util/charset/iconv.c92
-rw-r--r--lib/util/charset/tests/charset.c2
-rw-r--r--lib/util/charset/tests/iconv.c26
-rw-r--r--lib/util/charset/util_unistr.c140
-rw-r--r--lib/util/charset/wscript_build18
-rw-r--r--lib/util/charset/wscript_configure21
-rw-r--r--lib/util/config.mk90
-rw-r--r--lib/util/data_blob.c61
-rw-r--r--lib/util/data_blob.h13
-rw-r--r--lib/util/debug.c1040
-rw-r--r--lib/util/debug.h313
-rw-r--r--lib/util/debug_s3.c106
-rw-r--r--lib/util/debug_s3.h24
-rw-r--r--lib/util/dlinklist.h164
-rw-r--r--lib/util/fault.c28
-rw-r--r--lib/util/fault.m418
-rw-r--r--lib/util/fsusage.m4200
-rw-r--r--lib/util/genrand.c48
-rw-r--r--lib/util/idtree.c12
-rw-r--r--lib/util/memory.h2
-rw-r--r--lib/util/mutex.c56
-rw-r--r--lib/util/mutex.h75
-rw-r--r--lib/util/params.c6
-rw-r--r--lib/util/samba-util.pc.in11
-rw-r--r--lib/util/select.c158
-rw-r--r--lib/util/select.h31
-rw-r--r--lib/util/signal.m41
-rw-r--r--lib/util/system.c72
-rw-r--r--lib/util/talloc_stack.c15
-rw-r--r--lib/util/talloc_stack.h2
-rw-r--r--lib/util/tdb_wrap.c194
-rw-r--r--lib/util/tdb_wrap.h42
-rw-r--r--lib/util/tests/anonymous_shared.c70
-rw-r--r--lib/util/tests/asn1_tests.c282
-rw-r--r--lib/util/tests/data_blob.c5
-rw-r--r--lib/util/tests/dlinklist.c130
-rw-r--r--lib/util/tests/file.c5
-rw-r--r--lib/util/tests/genrand.c2
-rw-r--r--lib/util/tests/idtree.c2
-rw-r--r--lib/util/tests/parmlist.c8
-rw-r--r--lib/util/tests/str.c2
-rw-r--r--lib/util/tests/strlist.c79
-rw-r--r--lib/util/tests/time.c5
-rw-r--r--lib/util/tevent_ntstatus.c44
-rw-r--r--lib/util/tevent_ntstatus.h15
-rw-r--r--lib/util/tevent_unix.c2
-rw-r--r--lib/util/tevent_unix.h2
-rw-r--r--lib/util/tevent_werror.c81
-rw-r--r--lib/util/tevent_werror.h43
-rw-r--r--lib/util/time.c115
-rw-r--r--lib/util/time.h32
-rw-r--r--lib/util/time.m49
-rw-r--r--lib/util/tsort.h40
-rw-r--r--lib/util/unix_privs.c17
-rw-r--r--lib/util/util.c297
-rw-r--r--lib/util/util.h149
-rw-r--r--lib/util/util.m41
-rw-r--r--lib/util/util_file.c24
-rw-r--r--lib/util/util_id.c4
-rw-r--r--lib/util/util_ldb.c145
-rw-r--r--lib/util/util_ldb.h10
-rw-r--r--lib/util/util_net.c171
-rw-r--r--lib/util/util_net.h65
-rw-r--r--lib/util/util_pw.c66
-rw-r--r--lib/util/util_pw.h39
-rw-r--r--lib/util/util_runcmd.c312
-rw-r--r--lib/util/util_str.c13
-rw-r--r--lib/util/util_strlist.c23
-rw-r--r--lib/util/util_tdb.c6
-rw-r--r--lib/util/util_tdb.h5
-rwxr-xr-xlib/util/wscript_build103
-rw-r--r--lib/util/wscript_configure106
-rw-r--r--lib/util/xattr.m432
-rw-r--r--lib/wscript_build34
-rw-r--r--lib/zlib/contrib/ada/zlib-streams.ads6
-rw-r--r--lib/zlib/contrib/ada/zlib-thin.ads2
-rw-r--r--lib/zlib/contrib/minizip/miniunz.c2
-rw-r--r--lib/zlib/contrib/minizip/minizip.c4
-rw-r--r--lib/zlib/contrib/minizip/unzip.c8
-rw-r--r--lib/zlib/contrib/minizip/unzip.h6
-rw-r--r--lib/zlib/contrib/minizip/zip.c6
-rw-r--r--lib/zlib/contrib/puff/puff.c2
-rw-r--r--lib/zlib/deflate.h2
-rw-r--r--lib/zlib/examples/fitblk.c2
-rw-r--r--lib/zlib/examples/gun.c2
-rw-r--r--lib/zlib/gzio.c4
-rw-r--r--lib/zlib/wscript31
-rw-r--r--lib/zlib/zlib.h2
599 files changed, 65389 insertions, 16209 deletions
diff --git a/lib/README b/lib/README
index acae62c378..85b7952db8 100644
--- a/lib/README
+++ b/lib/README
@@ -1,4 +1,5 @@
compression - Various compression algorithms (MSZIP, lzxpress)
+dnspython - Python module for working with DNS.
nss_wrapper - Wrapper for the user and group NSS API allowing the use
of other data sources.
popt - Command-line option parsing library
@@ -6,6 +7,8 @@ replace - Provides replacements for standard (POSIX, C99) functions
not provided by the host platform.
socket_wrapper - Wrapper library allowing TCP/IP traffic to be redirected
over Unix domain sockets.
+subunit - Utilities and bindings for working with the Subunit test result
+ reporting protocol.
talloc - Hierarchical pool based memory allocator
tdb - Simple but fast key/value database library, supporting multiple writers
torture - Simple unit testing helper library
diff --git a/lib/addns/addns.h b/lib/addns/addns.h
new file mode 100644
index 0000000000..6ef9329df8
--- /dev/null
+++ b/lib/addns/addns.h
@@ -0,0 +1,30 @@
+/*
+ Public Interface file for Linux DNS client library implementation
+
+ Copyright (C) 2006 Krishna Ganugapati <krishnag@centeris.com>
+ Copyright (C) 2006 Gerald Carter <jerry@samba.org>
+
+ ** NOTE! The following LGPL license applies to the libaddns
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _ADDNS_H
+#define _ADDNS_H
+
+
+#endif /* _ADDNS_H */
+
diff --git a/lib/addns/dns.h b/lib/addns/dns.h
new file mode 100644
index 0000000000..29f1ed3e99
--- /dev/null
+++ b/lib/addns/dns.h
@@ -0,0 +1,540 @@
+/*
+ Linux DNS client library implementation
+
+ Copyright (C) 2006 Krishna Ganugapati <krishnag@centeris.com>
+ Copyright (C) 2006 Gerald Carter <jerry@samba.org>
+
+ ** NOTE! The following LGPL license applies to the libaddns
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _DNS_H
+#define _DNS_H
+
+#include "../replace/replace.h"
+#include "system/network.h"
+
+/* make sure we have included the correct config.h */
+#ifndef NO_CONFIG_H /* for some tests */
+#ifndef CONFIG_H_IS_FROM_SAMBA
+#error "make sure you have removed all config.h files from standalone builds!"
+#error "the included config.h isn't from samba!"
+#endif
+#endif /* NO_CONFIG_H */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <time.h>
+#include <string.h>
+#include <errno.h>
+#include <netdb.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <stdarg.h>
+
+#ifdef HAVE_UUID_UUID_H
+#include <uuid/uuid.h>
+#endif
+
+#ifdef HAVE_KRB5_H
+#include <krb5.h>
+#endif
+
+#ifdef HAVE_INTTYPES_H
+#include <inttypes.h>
+
+#ifndef int16
+#define int16 int16_t
+#endif
+
+#ifndef uint16
+#define uint16 uint16_t
+#endif
+
+#ifndef int32
+#define int32 int32_t
+#endif
+
+#ifndef uint32
+#define uint32 uint32_t
+#endif
+#endif
+
+#ifdef HAVE_KRB5_H
+#include <krb5.h>
+#endif
+
+#if HAVE_GSSAPI_GSSAPI_H
+#include <gssapi/gssapi.h>
+#elif HAVE_GSSAPI_GSSAPI_GENERIC_H
+#include <gssapi/gssapi_generic.h>
+#elif HAVE_GSSAPI_H
+#include <gssapi.h>
+#endif
+
+#if defined(HAVE_GSSAPI_H) || defined(HAVE_GSSAPI_GSSAPI_H) || defined(HAVE_GSSAPI_GSSAPI_GENERIC_H)
+#define HAVE_GSSAPI_SUPPORT 1
+#endif
+
+#include <talloc.h>
+
+#if 0
+
+Disable these now we have checked all code paths and ensured
+NULL returns on zero request. JRA.
+
+void *_talloc_zero_zeronull(const void *ctx, size_t size, const char *name);
+void *_talloc_memdup_zeronull(const void *t, const void *p, size_t size, const char *name);
+void *_talloc_array_zeronull(const void *ctx, size_t el_size, unsigned count, const char *name);
+void *_talloc_zero_array_zeronull(const void *ctx, size_t el_size, unsigned count, const char *name);
+void *talloc_zeronull(const void *context, size_t size, const char *name);
+
+#define TALLOC(ctx, size) talloc_zeronull(ctx, size, __location__)
+#define TALLOC_P(ctx, type) (type *)talloc_zeronull(ctx, sizeof(type), #type)
+#define TALLOC_ARRAY(ctx, type, count) (type *)_talloc_array_zeronull(ctx, sizeof(type), count, #type)
+#define TALLOC_MEMDUP(ctx, ptr, size) _talloc_memdup_zeronull(ctx, ptr, size, __location__)
+#define TALLOC_ZERO(ctx, size) _talloc_zero_zeronull(ctx, size, __location__)
+#define TALLOC_ZERO_P(ctx, type) (type *)_talloc_zero_zeronull(ctx, sizeof(type), #type)
+#define TALLOC_ZERO_ARRAY(ctx, type, count) (type *)_talloc_zero_array_zeronull(ctx, sizeof(type), count, #type)
+#define TALLOC_SIZE(ctx, size) talloc_zeronull(ctx, size, __location__)
+#define TALLOC_ZERO_SIZE(ctx, size) _talloc_zero_zeronull(ctx, size, __location__)
+
+#else
+
+#define TALLOC(ctx, size) talloc_named_const(ctx, size, __location__)
+#define TALLOC_P(ctx, type) (type *)talloc_named_const(ctx, sizeof(type), #type)
+#define TALLOC_ARRAY(ctx, type, count) (type *)_talloc_array(ctx, sizeof(type), count, #type)
+#define TALLOC_MEMDUP(ctx, ptr, size) _talloc_memdup(ctx, ptr, size, __location__)
+#define TALLOC_ZERO(ctx, size) _talloc_zero(ctx, size, __location__)
+#define TALLOC_ZERO_P(ctx, type) (type *)_talloc_zero(ctx, sizeof(type), #type)
+#define TALLOC_ZERO_ARRAY(ctx, type, count) (type *)_talloc_zero_array(ctx, sizeof(type), count, #type)
+#define TALLOC_SIZE(ctx, size) talloc_named_const(ctx, size, __location__)
+#define TALLOC_ZERO_SIZE(ctx, size) _talloc_zero(ctx, size, __location__)
+
+#endif
+
+#define TALLOC_REALLOC(ctx, ptr, count) _talloc_realloc(ctx, ptr, count, __location__)
+#define TALLOC_REALLOC_ARRAY(ctx, ptr, type, count) (type *)_talloc_realloc_array(ctx, ptr, sizeof(type), count, #type)
+#define talloc_destroy(ctx) talloc_free(ctx)
+#ifndef TALLOC_FREE
+#define TALLOC_FREE(ctx) do { talloc_free(ctx); ctx=NULL; } while(0)
+#endif
+
+/*******************************************************************
+ Type definitions for int16, int32, uint16 and uint32. Needed
+ for Samba coding style
+*******************************************************************/
+
+#ifndef uint8
+# define uint8 unsigned char
+#endif
+
+#if !defined(int16) && !defined(HAVE_INT16_FROM_RPC_RPC_H)
+# if (SIZEOF_SHORT == 4)
+# define int16 __ERROR___CANNOT_DETERMINE_TYPE_FOR_INT16;
+# else /* SIZEOF_SHORT != 4 */
+# define int16 short
+# endif /* SIZEOF_SHORT != 4 */
+ /* needed to work around compile issue on HP-UX 11.x */
+# define _INT16 1
+#endif
+
+/*
+ * Note we duplicate the size tests in the unsigned
+ * case as int16 may be a typedef from rpc/rpc.h
+ */
+
+#if !defined(uint16) && !defined(HAVE_UINT16_FROM_RPC_RPC_H)
+# if (SIZEOF_SHORT == 4)
+# define uint16 __ERROR___CANNOT_DETERMINE_TYPE_FOR_INT16;
+# else /* SIZEOF_SHORT != 4 */
+# define uint16 unsigned short
+# endif /* SIZEOF_SHORT != 4 */
+#endif
+
+#if !defined(int32) && !defined(HAVE_INT32_FROM_RPC_RPC_H)
+# if (SIZEOF_INT == 4)
+# define int32 int
+# elif (SIZEOF_LONG == 4)
+# define int32 long
+# elif (SIZEOF_SHORT == 4)
+# define int32 short
+# else
+ /* uggh - no 32 bit type?? probably a CRAY. just hope this works ... */
+# define int32 int
+# endif
+# ifndef _INT32
+ /* needed to work around compile issue on HP-UX 11.x */
+# define _INT32 1
+# endif
+#endif
+
+/*
+ * Note we duplicate the size tests in the unsigned
+ * case as int32 may be a typedef from rpc/rpc.h
+ */
+
+#if !defined(uint32) && !defined(HAVE_UINT32_FROM_RPC_RPC_H)
+# if (SIZEOF_INT == 4)
+# define uint32 unsigned int
+# elif (SIZEOF_LONG == 4)
+# define uint32 unsigned long
+# elif (SIZEOF_SHORT == 4)
+# define uint32 unsigned short
+# else
+ /* uggh - no 32 bit type?? probably a CRAY. just hope this works ... */
+# define uint32 unsigned
+# endif
+#endif
+
+/*
+ * check for 8 byte long long
+ */
+
+#if !defined(uint64)
+# if (SIZEOF_LONG == 8)
+# define uint64 unsigned long
+# elif (SIZEOF_LONG_LONG == 8)
+# define uint64 unsigned long long
+# endif /* don't lie. If we don't have it, then don't use it */
+#endif
+
+/* needed on Sun boxes */
+#ifndef INADDR_NONE
+#define INADDR_NONE 0xFFFFFFFF
+#endif
+
+#include "dnserr.h"
+
+
+#define DNS_TCP 1
+#define DNS_UDP 2
+
+#define DNS_OPCODE_UPDATE 1
+
+/* DNS Class Types */
+
+#define DNS_CLASS_IN 1
+#define DNS_CLASS_ANY 255
+#define DNS_CLASS_NONE 254
+
+/* DNS RR Types */
+
+#define DNS_RR_A 1
+
+#define DNS_TCP_PORT 53
+#define DNS_UDP_PORT 53
+
+#define QTYPE_A 1
+#define QTYPE_NS 2
+#define QTYPE_MD 3
+#define QTYPE_CNAME 5
+#define QTYPE_SOA 6
+#define QTYPE_ANY 255
+#define QTYPE_TKEY 249
+#define QTYPE_TSIG 250
+
+/*
+MF 4 a mail forwarder (Obsolete - use MX)
+CNAME 5 the canonical name for an alias
+SOA 6 marks the start of a zone of authority
+MB 7 a mailbox domain name (EXPERIMENTAL)
+MG 8 a mail group member (EXPERIMENTAL)
+MR 9 a mail rename domain name (EXPERIMENTAL)
+NULL 10 a null RR (EXPERIMENTAL)
+WKS 11 a well known service description
+PTR 12 a domain name pointer
+HINFO 13 host information
+MINFO 14 mailbox or mail list information
+MX 15 mail exchange
+TXT 16 text strings
+*/
+
+#define QR_QUERY 0x0000
+#define QR_RESPONSE 0x0001
+
+#define OPCODE_QUERY 0x00
+#define OPCODE_IQUERY 0x01
+#define OPCODE_STATUS 0x02
+
+#define AA 1
+
+#define RECURSION_DESIRED 0x01
+
+#define RCODE_NOERROR 0
+#define RCODE_FORMATERROR 1
+#define RCODE_SERVER_FAILURE 2
+#define RCODE_NAME_ERROR 3
+#define RCODE_NOTIMPLEMENTED 4
+#define RCODE_REFUSED 5
+
+#define SENDBUFFER_SIZE 65536
+#define RECVBUFFER_SIZE 65536
+
+/*
+ * TKEY Modes from rfc2930
+ */
+
+#define DNS_TKEY_MODE_SERVER 1
+#define DNS_TKEY_MODE_DH 2
+#define DNS_TKEY_MODE_GSSAPI 3
+#define DNS_TKEY_MODE_RESOLVER 4
+#define DNS_TKEY_MODE_DELETE 5
+
+
+#define DNS_ONE_DAY_IN_SECS 86400
+#define DNS_TEN_HOURS_IN_SECS 36000
+
+#define SOCKET_ERROR -1
+#define INVALID_SOCKET -1
+
+#define DNS_NO_ERROR 0
+#define DNS_FORMAT_ERROR 1
+#define DNS_SERVER_FAILURE 2
+#define DNS_NAME_ERROR 3
+#define DNS_NOT_IMPLEMENTED 4
+#define DNS_REFUSED 5
+
+typedef long HANDLE;
+
+enum dns_ServerType { DNS_SRV_ANY, DNS_SRV_WIN2000, DNS_SRV_WIN2003 };
+
+struct dns_domain_label {
+ struct dns_domain_label *next;
+ char *label;
+ size_t len;
+};
+
+struct dns_domain_name {
+ struct dns_domain_label *pLabelList;
+};
+
+struct dns_question {
+ struct dns_domain_name *name;
+ uint16 q_type;
+ uint16 q_class;
+};
+
+/*
+ * Before changing the definition of dns_zone, look
+ * dns_marshall_update_request(), we rely on this being the same as
+ * dns_question right now.
+ */
+
+struct dns_zone {
+ struct dns_domain_name *name;
+ uint16 z_type;
+ uint16 z_class;
+};
+
+struct dns_rrec {
+ struct dns_domain_name *name;
+ uint16 type;
+ uint16 r_class;
+ uint32 ttl;
+ uint16 data_length;
+ uint8 *data;
+};
+
+struct dns_tkey_record {
+ struct dns_domain_name *algorithm;
+ time_t inception;
+ time_t expiration;
+ uint16 mode;
+ uint16 error;
+ uint16 key_length;
+ uint8 *key;
+};
+
+struct dns_request {
+ uint16 id;
+ uint16 flags;
+ uint16 num_questions;
+ uint16 num_answers;
+ uint16 num_auths;
+ uint16 num_additionals;
+ struct dns_question **questions;
+ struct dns_rrec **answers;
+ struct dns_rrec **auths;
+ struct dns_rrec **additionals;
+};
+
+/*
+ * Before changing the definition of dns_update_request, look
+ * dns_marshall_update_request(), we rely on this being the same as
+ * dns_request right now.
+ */
+
+struct dns_update_request {
+ uint16 id;
+ uint16 flags;
+ uint16 num_zones;
+ uint16 num_preqs;
+ uint16 num_updates;
+ uint16 num_additionals;
+ struct dns_zone **zones;
+ struct dns_rrec **preqs;
+ struct dns_rrec **updates;
+ struct dns_rrec **additionals;
+};
+
+struct dns_connection {
+ int32 hType;
+ int s;
+ struct sockaddr RecvAddr;
+};
+
+struct dns_buffer {
+ uint8 *data;
+ size_t size;
+ size_t offset;
+ DNS_ERROR error;
+};
+
+/* from dnsutils.c */
+
+DNS_ERROR dns_domain_name_from_string( TALLOC_CTX *mem_ctx,
+ const char *pszDomainName,
+ struct dns_domain_name **presult );
+char *dns_generate_keyname( TALLOC_CTX *mem_ctx );
+
+/* from dnsrecord.c */
+
+DNS_ERROR dns_create_query( TALLOC_CTX *mem_ctx, const char *name,
+ uint16 q_type, uint16 q_class,
+ struct dns_request **preq );
+DNS_ERROR dns_create_update( TALLOC_CTX *mem_ctx, const char *name,
+ struct dns_update_request **preq );
+DNS_ERROR dns_create_probe(TALLOC_CTX *mem_ctx, const char *zone,
+ const char *host, int num_ips,
+ const struct sockaddr_storage *sslist,
+ struct dns_update_request **preq);
+DNS_ERROR dns_create_rrec(TALLOC_CTX *mem_ctx, const char *name,
+ uint16 type, uint16 r_class, uint32 ttl,
+ uint16 data_length, uint8 *data,
+ struct dns_rrec **prec);
+DNS_ERROR dns_add_rrec(TALLOC_CTX *mem_ctx, struct dns_rrec *rec,
+ uint16 *num_records, struct dns_rrec ***records);
+DNS_ERROR dns_create_tkey_record(TALLOC_CTX *mem_ctx, const char *keyname,
+ const char *algorithm_name, time_t inception,
+ time_t expiration, uint16 mode, uint16 error,
+ uint16 key_length, const uint8 *key,
+ struct dns_rrec **prec);
+DNS_ERROR dns_create_name_in_use_record(TALLOC_CTX *mem_ctx,
+ const char *name,
+ const struct sockaddr_storage *ip,
+ struct dns_rrec **prec);
+DNS_ERROR dns_create_delete_record(TALLOC_CTX *mem_ctx, const char *name,
+ uint16 type, uint16 r_class,
+ struct dns_rrec **prec);
+DNS_ERROR dns_create_name_not_in_use_record(TALLOC_CTX *mem_ctx,
+ const char *name, uint32 type,
+ struct dns_rrec **prec);
+DNS_ERROR dns_create_a_record(TALLOC_CTX *mem_ctx, const char *host,
+ uint32 ttl, const struct sockaddr_storage *pss,
+ struct dns_rrec **prec);
+DNS_ERROR dns_unmarshall_tkey_record(TALLOC_CTX *mem_ctx, struct dns_rrec *rec,
+ struct dns_tkey_record **ptkey);
+DNS_ERROR dns_create_tsig_record(TALLOC_CTX *mem_ctx, const char *keyname,
+ const char *algorithm_name,
+ time_t time_signed, uint16 fudge,
+ uint16 mac_length, const uint8 *mac,
+ uint16 original_id, uint16 error,
+ struct dns_rrec **prec);
+DNS_ERROR dns_add_rrec(TALLOC_CTX *mem_ctx, struct dns_rrec *rec,
+ uint16 *num_records, struct dns_rrec ***records);
+DNS_ERROR dns_create_update_request(TALLOC_CTX *mem_ctx,
+ const char *domainname,
+ const char *hostname,
+ const struct sockaddr_storage *ip_addr,
+ size_t num_adds,
+ struct dns_update_request **preq);
+
+/* from dnssock.c */
+
+DNS_ERROR dns_open_connection( const char *nameserver, int32 dwType,
+ TALLOC_CTX *mem_ctx,
+ struct dns_connection **conn );
+DNS_ERROR dns_send(struct dns_connection *conn, const struct dns_buffer *buf);
+DNS_ERROR dns_receive(TALLOC_CTX *mem_ctx, struct dns_connection *conn,
+ struct dns_buffer **presult);
+DNS_ERROR dns_transaction(TALLOC_CTX *mem_ctx, struct dns_connection *conn,
+ const struct dns_request *req,
+ struct dns_request **resp);
+DNS_ERROR dns_update_transaction(TALLOC_CTX *mem_ctx,
+ struct dns_connection *conn,
+ struct dns_update_request *up_req,
+ struct dns_update_request **up_resp);
+
+/* from dnsmarshall.c */
+
+struct dns_buffer *dns_create_buffer(TALLOC_CTX *mem_ctx);
+void dns_marshall_buffer(struct dns_buffer *buf, const uint8 *data,
+ size_t len);
+void dns_marshall_uint16(struct dns_buffer *buf, uint16 val);
+void dns_marshall_uint32(struct dns_buffer *buf, uint32 val);
+void dns_unmarshall_buffer(struct dns_buffer *buf, uint8 *data,
+ size_t len);
+void dns_unmarshall_uint16(struct dns_buffer *buf, uint16 *val);
+void dns_unmarshall_uint32(struct dns_buffer *buf, uint32 *val);
+void dns_unmarshall_domain_name(TALLOC_CTX *mem_ctx,
+ struct dns_buffer *buf,
+ struct dns_domain_name **pname);
+void dns_marshall_domain_name(struct dns_buffer *buf,
+ const struct dns_domain_name *name);
+void dns_unmarshall_domain_name(TALLOC_CTX *mem_ctx,
+ struct dns_buffer *buf,
+ struct dns_domain_name **pname);
+DNS_ERROR dns_marshall_request(TALLOC_CTX *mem_ctx,
+ const struct dns_request *req,
+ struct dns_buffer **pbuf);
+DNS_ERROR dns_unmarshall_request(TALLOC_CTX *mem_ctx,
+ struct dns_buffer *buf,
+ struct dns_request **preq);
+DNS_ERROR dns_marshall_update_request(TALLOC_CTX *mem_ctx,
+ struct dns_update_request *update,
+ struct dns_buffer **pbuf);
+DNS_ERROR dns_unmarshall_update_request(TALLOC_CTX *mem_ctx,
+ struct dns_buffer *buf,
+ struct dns_update_request **pupreq);
+struct dns_request *dns_update2request(struct dns_update_request *update);
+struct dns_update_request *dns_request2update(struct dns_request *request);
+uint16 dns_response_code(uint16 flags);
+const char *dns_errstr(DNS_ERROR err);
+
+/* from dnsgss.c */
+
+#ifdef HAVE_GSSAPI_SUPPORT
+
+void display_status( const char *msg, OM_uint32 maj_stat, OM_uint32 min_stat );
+DNS_ERROR dns_negotiate_sec_ctx( const char *target_realm,
+ const char *servername,
+ const char *keyname,
+ gss_ctx_id_t *gss_ctx,
+ enum dns_ServerType srv_type );
+DNS_ERROR dns_sign_update(struct dns_update_request *req,
+ gss_ctx_id_t gss_ctx,
+ const char *keyname,
+ const char *algorithmname,
+ time_t time_signed, uint16 fudge);
+
+#endif /* HAVE_GSSAPI_SUPPORT */
+
+#endif /* _DNS_H */
diff --git a/lib/addns/dnserr.h b/lib/addns/dnserr.h
new file mode 100644
index 0000000000..8638a2d434
--- /dev/null
+++ b/lib/addns/dnserr.h
@@ -0,0 +1,87 @@
+/*
+ Error codes for Linux DNS client library implementation
+
+ Copyright (C) 2006 Krishna Ganugapati <krishnag@centeris.com>
+ Copyright (C) 2006 Gerald Carter <jerry@samba.org>
+
+ ** NOTE! The following LGPL license applies to the libaddns
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _DNSERR_H
+#define _DNSERR_H
+
+
+/* The Splint code analysis tool (http://www.splint.org.) doesn't
+ like immediate structures. */
+
+#ifdef _SPLINT_
+#undef HAVE_IMMEDIATE_STRUCTURES
+#endif
+
+/* Setup the DNS_ERROR typedef. Technique takes from nt_status.h */
+
+#if defined(HAVE_IMMEDIATE_STRUCTURES)
+typedef struct {uint32 v;} DNS_ERROR;
+#define ERROR_DNS(x) ((DNS_ERROR) { x })
+#define ERROR_DNS_V(x) ((x).v)
+#else
+typedef uint32 DNS_ERROR;
+#define ERROR_DNS(x) (x)
+#define ERROR_DNS_V(x) (x)
+#endif
+
+#define ERR_DNS_IS_OK(x) (ERROR_DNS_V(x) == 0)
+#define ERR_DNS_EQUAL(x,y) (ERROR_DNS_V(x) == ERROR_DNS_V(y))
+
+/*************************************************
+ * Define the error codes here
+ *************************************************/
+
+#define ERROR_DNS_SUCCESS ERROR_DNS(0)
+#define ERROR_DNS_RECORD_NOT_FOUND ERROR_DNS(1)
+#define ERROR_DNS_BAD_RESPONSE ERROR_DNS(2)
+#define ERROR_DNS_INVALID_PARAMETER ERROR_DNS(3)
+#define ERROR_DNS_NO_MEMORY ERROR_DNS(4)
+#define ERROR_DNS_INVALID_NAME_SERVER ERROR_DNS(5)
+#define ERROR_DNS_CONNECTION_FAILED ERROR_DNS(6)
+#define ERROR_DNS_GSS_ERROR ERROR_DNS(7)
+#define ERROR_DNS_INVALID_NAME ERROR_DNS(8)
+#define ERROR_DNS_INVALID_MESSAGE ERROR_DNS(9)
+#define ERROR_DNS_SOCKET_ERROR ERROR_DNS(10)
+#define ERROR_DNS_UPDATE_FAILED ERROR_DNS(11)
+
+/*
+ * About to be removed, transitional error
+ */
+#define ERROR_DNS_UNSUCCESSFUL ERROR_DNS(999)
+
+
+#define ERROR_BAD_RESPONSE 1
+#define ERROR_RECORD_NOT_FOUND 2
+#define ERROR_OUTOFMEMORY 8
+#if !defined(ERROR_INVALID_PARAMETER)
+#define ERROR_INVALID_PARAMETER 87
+#endif
+
+/*
+ * About to be removed, transitional error
+ */
+#define ERROR_UNSUCCESSFUL 999
+
+#endif /* _DNSERR_H */
+
diff --git a/lib/addns/dnsgss.c b/lib/addns/dnsgss.c
new file mode 100644
index 0000000000..c9037417da
--- /dev/null
+++ b/lib/addns/dnsgss.c
@@ -0,0 +1,334 @@
+/*
+ Public Interface file for Linux DNS client library implementation
+
+ Copyright (C) 2006 Krishna Ganugapati <krishnag@centeris.com>
+ Copyright (C) 2006 Gerald Carter <jerry@samba.org>
+
+ ** NOTE! The following LGPL license applies to the libaddns
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "dns.h"
+#include <ctype.h>
+
+
+#ifdef HAVE_GSSAPI_SUPPORT
+
+/*********************************************************************
+*********************************************************************/
+
+#ifndef HAVE_STRUPR
+static int strupr( char *szDomainName )
+{
+ if ( !szDomainName ) {
+ return ( 0 );
+ }
+ while ( *szDomainName != '\0' ) {
+ *szDomainName = toupper( *szDomainName );
+ szDomainName++;
+ }
+ return ( 0 );
+}
+#endif
+
+#if 0
+/*********************************************************************
+*********************************************************************/
+
+static void display_status_1( const char *m, OM_uint32 code, int type )
+{
+ OM_uint32 maj_stat, min_stat;
+ gss_buffer_desc msg;
+ OM_uint32 msg_ctx;
+
+ msg_ctx = 0;
+ while ( 1 ) {
+ maj_stat = gss_display_status( &min_stat, code,
+ type, GSS_C_NULL_OID,
+ &msg_ctx, &msg );
+ fprintf( stdout, "GSS-API error %s: %s\n", m,
+ ( char * ) msg.value );
+ ( void ) gss_release_buffer( &min_stat, &msg );
+
+ if ( !msg_ctx )
+ break;
+ }
+}
+
+/*********************************************************************
+*********************************************************************/
+
+void display_status( const char *msg, OM_uint32 maj_stat, OM_uint32 min_stat )
+{
+ display_status_1( msg, maj_stat, GSS_C_GSS_CODE );
+ display_status_1( msg, min_stat, GSS_C_MECH_CODE );
+}
+#endif
+
+static DNS_ERROR dns_negotiate_gss_ctx_int( TALLOC_CTX *mem_ctx,
+ struct dns_connection *conn,
+ const char *keyname,
+ const gss_name_t target_name,
+ gss_ctx_id_t *ctx,
+ enum dns_ServerType srv_type )
+{
+ struct gss_buffer_desc_struct input_desc, *input_ptr, output_desc;
+ OM_uint32 major, minor;
+ OM_uint32 ret_flags;
+ DNS_ERROR err;
+
+ gss_OID_desc krb5_oid_desc =
+ { 9, (char *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x02" };
+
+ *ctx = GSS_C_NO_CONTEXT;
+ input_ptr = NULL;
+
+ do {
+ major = gss_init_sec_context(
+ &minor, NULL, ctx, target_name, &krb5_oid_desc,
+ GSS_C_REPLAY_FLAG | GSS_C_MUTUAL_FLAG |
+ GSS_C_CONF_FLAG |
+ GSS_C_INTEG_FLAG,
+ 0, NULL, input_ptr, NULL, &output_desc,
+ &ret_flags, NULL );
+
+ if (input_ptr != NULL) {
+ TALLOC_FREE(input_desc.value);
+ }
+
+ if (output_desc.length != 0) {
+
+ struct dns_request *req;
+ struct dns_rrec *rec;
+ struct dns_buffer *buf;
+
+ time_t t = time(NULL);
+
+ err = dns_create_query(mem_ctx, keyname, QTYPE_TKEY,
+ DNS_CLASS_IN, &req);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_create_tkey_record(
+ req, keyname, "gss.microsoft.com", t,
+ t + 86400, DNS_TKEY_MODE_GSSAPI, 0,
+ output_desc.length, (uint8 *)output_desc.value,
+ &rec );
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ /* Windows 2000 DNS is broken and requires the
+ TKEY payload in the Answer section instead
+ of the Additional seciton like Windows 2003 */
+
+ if ( srv_type == DNS_SRV_WIN2000 ) {
+ err = dns_add_rrec(req, rec, &req->num_answers,
+ &req->answers);
+ } else {
+ err = dns_add_rrec(req, rec, &req->num_additionals,
+ &req->additionals);
+ }
+
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_marshall_request(req, req, &buf);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_send(conn, buf);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ TALLOC_FREE(req);
+ }
+
+ gss_release_buffer(&minor, &output_desc);
+
+ if ((major != GSS_S_COMPLETE) &&
+ (major != GSS_S_CONTINUE_NEEDED)) {
+ return ERROR_DNS_GSS_ERROR;
+ }
+
+ if (major == GSS_S_CONTINUE_NEEDED) {
+
+ struct dns_request *resp;
+ struct dns_buffer *buf;
+ struct dns_tkey_record *tkey;
+
+ err = dns_receive(mem_ctx, conn, &buf);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_unmarshall_request(buf, buf, &resp);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ /*
+ * TODO: Compare id and keyname
+ */
+
+ if ((resp->num_additionals != 1) ||
+ (resp->num_answers == 0) ||
+ (resp->answers[0]->type != QTYPE_TKEY)) {
+ err = ERROR_DNS_INVALID_MESSAGE;
+ goto error;
+ }
+
+ err = dns_unmarshall_tkey_record(
+ mem_ctx, resp->answers[0], &tkey);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ input_desc.length = tkey->key_length;
+ input_desc.value = talloc_move(mem_ctx, &tkey->key);
+
+ input_ptr = &input_desc;
+
+ TALLOC_FREE(buf);
+ }
+
+ } while ( major == GSS_S_CONTINUE_NEEDED );
+
+ /* If we arrive here, we have a valid security context */
+
+ err = ERROR_DNS_SUCCESS;
+
+ error:
+
+ return err;
+}
+
+DNS_ERROR dns_negotiate_sec_ctx( const char *target_realm,
+ const char *servername,
+ const char *keyname,
+ gss_ctx_id_t *gss_ctx,
+ enum dns_ServerType srv_type )
+{
+ OM_uint32 major, minor;
+
+ char *upcaserealm, *targetname;
+ DNS_ERROR err;
+
+ gss_buffer_desc input_name;
+ struct dns_connection *conn;
+
+ gss_name_t targ_name;
+
+ gss_OID_desc nt_host_oid_desc =
+ {10, (char *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x01"};
+
+ TALLOC_CTX *mem_ctx;
+
+ if (!(mem_ctx = talloc_init("dns_negotiate_sec_ctx"))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ err = dns_open_connection( servername, DNS_TCP, mem_ctx, &conn );
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ if (!(upcaserealm = talloc_strdup(mem_ctx, target_realm))) {
+ err = ERROR_DNS_NO_MEMORY;
+ goto error;
+ }
+
+ strupr(upcaserealm);
+
+ if (!(targetname = talloc_asprintf(mem_ctx, "dns/%s@%s",
+ servername, upcaserealm))) {
+ err = ERROR_DNS_NO_MEMORY;
+ goto error;
+ }
+
+ input_name.value = targetname;
+ input_name.length = strlen(targetname);
+
+ major = gss_import_name( &minor, &input_name,
+ &nt_host_oid_desc, &targ_name );
+
+ if (major) {
+ err = ERROR_DNS_GSS_ERROR;
+ goto error;
+ }
+
+ err = dns_negotiate_gss_ctx_int(mem_ctx, conn, keyname,
+ targ_name, gss_ctx, srv_type );
+
+ gss_release_name( &minor, &targ_name );
+
+ error:
+ TALLOC_FREE(mem_ctx);
+
+ return err;
+}
+
+DNS_ERROR dns_sign_update(struct dns_update_request *req,
+ gss_ctx_id_t gss_ctx,
+ const char *keyname,
+ const char *algorithmname,
+ time_t time_signed, uint16 fudge)
+{
+ struct dns_buffer *buf;
+ DNS_ERROR err;
+ struct dns_domain_name *key, *algorithm;
+ struct gss_buffer_desc_struct msg, mic;
+ OM_uint32 major, minor;
+ struct dns_rrec *rec;
+
+ err = dns_marshall_update_request(req, req, &buf);
+ if (!ERR_DNS_IS_OK(err)) return err;
+
+ err = dns_domain_name_from_string(buf, keyname, &key);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_domain_name_from_string(buf, algorithmname, &algorithm);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ dns_marshall_domain_name(buf, key);
+ dns_marshall_uint16(buf, DNS_CLASS_ANY);
+ dns_marshall_uint32(buf, 0); /* TTL */
+ dns_marshall_domain_name(buf, algorithm);
+ dns_marshall_uint16(buf, 0); /* Time prefix for 48-bit time_t */
+ dns_marshall_uint32(buf, time_signed);
+ dns_marshall_uint16(buf, fudge);
+ dns_marshall_uint16(buf, 0); /* error */
+ dns_marshall_uint16(buf, 0); /* other len */
+
+ err = buf->error;
+ if (!ERR_DNS_IS_OK(buf->error)) goto error;
+
+ msg.value = (void *)buf->data;
+ msg.length = buf->offset;
+
+ major = gss_get_mic(&minor, gss_ctx, 0, &msg, &mic);
+ if (major != 0) {
+ err = ERROR_DNS_GSS_ERROR;
+ goto error;
+ }
+
+ if (mic.length > 0xffff) {
+ gss_release_buffer(&minor, &mic);
+ err = ERROR_DNS_GSS_ERROR;
+ goto error;
+ }
+
+ err = dns_create_tsig_record(buf, keyname, algorithmname, time_signed,
+ fudge, mic.length, (uint8 *)mic.value,
+ req->id, 0, &rec);
+ gss_release_buffer(&minor, &mic);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_add_rrec(req, rec, &req->num_additionals, &req->additionals);
+
+ error:
+ TALLOC_FREE(buf);
+ return err;
+}
+
+#endif /* HAVE_GSSAPI_SUPPORT */
diff --git a/lib/addns/dnsmarshall.c b/lib/addns/dnsmarshall.c
new file mode 100644
index 0000000000..5530290c57
--- /dev/null
+++ b/lib/addns/dnsmarshall.c
@@ -0,0 +1,530 @@
+/*
+ Linux DNS client library implementation
+ Copyright (C) 2006 Gerald Carter <jerry@samba.org>
+
+ ** NOTE! The following LGPL license applies to the libaddns
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "dns.h"
+#include "assert.h"
+
+struct dns_buffer *dns_create_buffer(TALLOC_CTX *mem_ctx)
+{
+ struct dns_buffer *result;
+
+ if (!(result = talloc(mem_ctx, struct dns_buffer))) {
+ return NULL;
+ }
+
+ result->offset = 0;
+ result->error = ERROR_DNS_SUCCESS;
+
+ /*
+ * Small inital size to excercise the realloc code
+ */
+ result->size = 2;
+
+ if (!(result->data = TALLOC_ARRAY(result, uint8, result->size))) {
+ TALLOC_FREE(result);
+ return NULL;
+ }
+
+ return result;
+}
+
+void dns_marshall_buffer(struct dns_buffer *buf, const uint8 *data,
+ size_t len)
+{
+ if (!ERR_DNS_IS_OK(buf->error)) return;
+
+ if (buf->offset + len < buf->offset) {
+ /*
+ * Wraparound!
+ */
+ buf->error = ERROR_DNS_INVALID_PARAMETER;
+ return;
+ }
+
+ if ((buf->offset + len) > 0xffff) {
+ /*
+ * Only 64k possible
+ */
+ buf->error = ERROR_DNS_INVALID_PARAMETER;
+ return;
+ }
+
+ if (buf->offset + len > buf->size) {
+ size_t new_size = buf->offset + len;
+ uint8 *new_data;
+
+ /*
+ * Don't do too many reallocs, round up to some multiple
+ */
+
+ new_size += (64 - (new_size % 64));
+
+ if (!(new_data = TALLOC_REALLOC_ARRAY(buf, buf->data, uint8,
+ new_size))) {
+ buf->error = ERROR_DNS_NO_MEMORY;
+ return;
+ }
+
+ buf->size = new_size;
+ buf->data = new_data;
+ }
+
+ memcpy(buf->data + buf->offset, data, len);
+ buf->offset += len;
+ return;
+}
+
+void dns_marshall_uint16(struct dns_buffer *buf, uint16 val)
+{
+ uint16 n_val = htons(val);
+ dns_marshall_buffer(buf, (uint8 *)&n_val, sizeof(n_val));
+}
+
+void dns_marshall_uint32(struct dns_buffer *buf, uint32 val)
+{
+ uint32 n_val = htonl(val);
+ dns_marshall_buffer(buf, (uint8 *)&n_val, sizeof(n_val));
+}
+
+void dns_unmarshall_buffer(struct dns_buffer *buf, uint8 *data,
+ size_t len)
+{
+ if (!(ERR_DNS_IS_OK(buf->error))) return;
+
+ if ((len > buf->size) || (buf->offset + len > buf->size)) {
+ buf->error = ERROR_DNS_INVALID_MESSAGE;
+ return;
+ }
+
+ memcpy((void *)data, (const void *)(buf->data + buf->offset), len);
+ buf->offset += len;
+
+ return;
+}
+
+void dns_unmarshall_uint16(struct dns_buffer *buf, uint16 *val)
+{
+ uint16 n_val;
+
+ dns_unmarshall_buffer(buf, (uint8 *)&n_val, sizeof(n_val));
+ if (!(ERR_DNS_IS_OK(buf->error))) return;
+
+ *val = ntohs(n_val);
+}
+
+void dns_unmarshall_uint32(struct dns_buffer *buf, uint32 *val)
+{
+ uint32 n_val;
+
+ dns_unmarshall_buffer(buf, (uint8 *)&n_val, sizeof(n_val));
+ if (!(ERR_DNS_IS_OK(buf->error))) return;
+
+ *val = ntohl(n_val);
+}
+
+void dns_marshall_domain_name(struct dns_buffer *buf,
+ const struct dns_domain_name *name)
+{
+ struct dns_domain_label *label;
+ char end_char = '\0';
+
+ /*
+ * TODO: Implement DNS compression
+ */
+
+ for (label = name->pLabelList; label != NULL; label = label->next) {
+ uint8 len = label->len;
+
+ dns_marshall_buffer(buf, (uint8 *)&len, sizeof(len));
+ if (!ERR_DNS_IS_OK(buf->error)) return;
+
+ dns_marshall_buffer(buf, (uint8 *)label->label, len);
+ if (!ERR_DNS_IS_OK(buf->error)) return;
+ }
+
+ dns_marshall_buffer(buf, (uint8 *)&end_char, 1);
+}
+
+static void dns_unmarshall_label(TALLOC_CTX *mem_ctx,
+ int level,
+ struct dns_buffer *buf,
+ struct dns_domain_label **plabel)
+{
+ struct dns_domain_label *label;
+ uint8 len;
+
+ if (!ERR_DNS_IS_OK(buf->error)) return;
+
+ if (level > 128) {
+ /*
+ * Protect against recursion
+ */
+ buf->error = ERROR_DNS_INVALID_MESSAGE;
+ return;
+ }
+
+ dns_unmarshall_buffer(buf, &len, sizeof(len));
+ if (!ERR_DNS_IS_OK(buf->error)) return;
+
+ if (len == 0) {
+ *plabel = NULL;
+ return;
+ }
+
+ if ((len & 0xc0) == 0xc0) {
+ /*
+ * We've got a compressed name. Build up a new "fake" buffer
+ * and using the calculated offset.
+ */
+ struct dns_buffer new_buf;
+ uint8 low;
+
+ dns_unmarshall_buffer(buf, &low, sizeof(low));
+ if (!ERR_DNS_IS_OK(buf->error)) return;
+
+ new_buf = *buf;
+ new_buf.offset = len & 0x3f;
+ new_buf.offset <<= 8;
+ new_buf.offset |= low;
+
+ dns_unmarshall_label(mem_ctx, level+1, &new_buf, plabel);
+ buf->error = new_buf.error;
+ return;
+ }
+
+ if ((len & 0xc0) != 0) {
+ buf->error = ERROR_DNS_INVALID_NAME;
+ return;
+ }
+
+ if (!(label = talloc(mem_ctx, struct dns_domain_label))) {
+ buf->error = ERROR_DNS_NO_MEMORY;
+ return;
+ }
+
+ label->len = len;
+
+ if (!(label->label = TALLOC_ARRAY(label, char, len+1))) {
+ buf->error = ERROR_DNS_NO_MEMORY;
+ goto error;
+ }
+
+ dns_unmarshall_buffer(buf, (uint8 *)label->label, len);
+ if (!ERR_DNS_IS_OK(buf->error)) goto error;
+
+ dns_unmarshall_label(label, level+1, buf, &label->next);
+ if (!ERR_DNS_IS_OK(buf->error)) goto error;
+
+ *plabel = label;
+ return;
+
+ error:
+ TALLOC_FREE(label);
+ return;
+}
+
+void dns_unmarshall_domain_name(TALLOC_CTX *mem_ctx,
+ struct dns_buffer *buf,
+ struct dns_domain_name **pname)
+{
+ struct dns_domain_name *name;
+
+ if (!ERR_DNS_IS_OK(buf->error)) return;
+
+ if (!(name = talloc(mem_ctx, struct dns_domain_name))) {
+ buf->error = ERROR_DNS_NO_MEMORY;
+ return;
+ }
+
+ dns_unmarshall_label(name, 0, buf, &name->pLabelList);
+
+ if (!ERR_DNS_IS_OK(buf->error)) {
+ return;
+ }
+
+ *pname = name;
+ return;
+}
+
+static void dns_marshall_question(struct dns_buffer *buf,
+ const struct dns_question *q)
+{
+ dns_marshall_domain_name(buf, q->name);
+ dns_marshall_uint16(buf, q->q_type);
+ dns_marshall_uint16(buf, q->q_class);
+}
+
+static void dns_unmarshall_question(TALLOC_CTX *mem_ctx,
+ struct dns_buffer *buf,
+ struct dns_question **pq)
+{
+ struct dns_question *q;
+
+ if (!(ERR_DNS_IS_OK(buf->error))) return;
+
+ if (!(q = talloc(mem_ctx, struct dns_question))) {
+ buf->error = ERROR_DNS_NO_MEMORY;
+ return;
+ }
+
+ dns_unmarshall_domain_name(q, buf, &q->name);
+ dns_unmarshall_uint16(buf, &q->q_type);
+ dns_unmarshall_uint16(buf, &q->q_class);
+
+ if (!(ERR_DNS_IS_OK(buf->error))) return;
+
+ *pq = q;
+}
+
+static void dns_marshall_rr(struct dns_buffer *buf,
+ const struct dns_rrec *r)
+{
+ dns_marshall_domain_name(buf, r->name);
+ dns_marshall_uint16(buf, r->type);
+ dns_marshall_uint16(buf, r->r_class);
+ dns_marshall_uint32(buf, r->ttl);
+ dns_marshall_uint16(buf, r->data_length);
+ dns_marshall_buffer(buf, r->data, r->data_length);
+}
+
+static void dns_unmarshall_rr(TALLOC_CTX *mem_ctx,
+ struct dns_buffer *buf,
+ struct dns_rrec **pr)
+{
+ struct dns_rrec *r;
+
+ if (!(ERR_DNS_IS_OK(buf->error))) return;
+
+ if (!(r = talloc(mem_ctx, struct dns_rrec))) {
+ buf->error = ERROR_DNS_NO_MEMORY;
+ return;
+ }
+
+ dns_unmarshall_domain_name(r, buf, &r->name);
+ dns_unmarshall_uint16(buf, &r->type);
+ dns_unmarshall_uint16(buf, &r->r_class);
+ dns_unmarshall_uint32(buf, &r->ttl);
+ dns_unmarshall_uint16(buf, &r->data_length);
+ r->data = NULL;
+
+ if (!(ERR_DNS_IS_OK(buf->error))) return;
+
+ if (r->data_length != 0) {
+ if (!(r->data = TALLOC_ARRAY(r, uint8, r->data_length))) {
+ buf->error = ERROR_DNS_NO_MEMORY;
+ return;
+ }
+ dns_unmarshall_buffer(buf, r->data, r->data_length);
+ }
+
+ if (!(ERR_DNS_IS_OK(buf->error))) return;
+
+ *pr = r;
+}
+
+DNS_ERROR dns_marshall_request(TALLOC_CTX *mem_ctx,
+ const struct dns_request *req,
+ struct dns_buffer **pbuf)
+{
+ struct dns_buffer *buf;
+ uint16 i;
+
+ if (!(buf = dns_create_buffer(mem_ctx))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ dns_marshall_uint16(buf, req->id);
+ dns_marshall_uint16(buf, req->flags);
+ dns_marshall_uint16(buf, req->num_questions);
+ dns_marshall_uint16(buf, req->num_answers);
+ dns_marshall_uint16(buf, req->num_auths);
+ dns_marshall_uint16(buf, req->num_additionals);
+
+ for (i=0; i<req->num_questions; i++) {
+ dns_marshall_question(buf, req->questions[i]);
+ }
+ for (i=0; i<req->num_answers; i++) {
+ dns_marshall_rr(buf, req->answers[i]);
+ }
+ for (i=0; i<req->num_auths; i++) {
+ dns_marshall_rr(buf, req->auths[i]);
+ }
+ for (i=0; i<req->num_additionals; i++) {
+ dns_marshall_rr(buf, req->additionals[i]);
+ }
+
+ if (!ERR_DNS_IS_OK(buf->error)) {
+ DNS_ERROR err = buf->error;
+ TALLOC_FREE(buf);
+ return err;
+ }
+
+ *pbuf = buf;
+ return ERROR_DNS_SUCCESS;
+}
+
+DNS_ERROR dns_unmarshall_request(TALLOC_CTX *mem_ctx,
+ struct dns_buffer *buf,
+ struct dns_request **preq)
+{
+ struct dns_request *req;
+ uint16 i;
+ DNS_ERROR err;
+
+ if (!(req = TALLOC_ZERO_P(mem_ctx, struct dns_request))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ dns_unmarshall_uint16(buf, &req->id);
+ dns_unmarshall_uint16(buf, &req->flags);
+ dns_unmarshall_uint16(buf, &req->num_questions);
+ dns_unmarshall_uint16(buf, &req->num_answers);
+ dns_unmarshall_uint16(buf, &req->num_auths);
+ dns_unmarshall_uint16(buf, &req->num_additionals);
+
+ if (!ERR_DNS_IS_OK(buf->error)) goto error;
+
+ err = ERROR_DNS_NO_MEMORY;
+
+ if ((req->num_questions != 0) &&
+ !(req->questions = TALLOC_ARRAY(req, struct dns_question *,
+ req->num_questions))) {
+ goto error;
+ }
+ if ((req->num_answers != 0) &&
+ !(req->answers = TALLOC_ARRAY(req, struct dns_rrec *,
+ req->num_answers))) {
+ goto error;
+ }
+ if ((req->num_auths != 0) &&
+ !(req->auths = TALLOC_ARRAY(req, struct dns_rrec *,
+ req->num_auths))) {
+ goto error;
+ }
+ if ((req->num_additionals != 0) &&
+ !(req->additionals = TALLOC_ARRAY(req, struct dns_rrec *,
+ req->num_additionals))) {
+ goto error;
+ }
+
+ for (i=0; i<req->num_questions; i++) {
+ dns_unmarshall_question(req->questions, buf,
+ &req->questions[i]);
+ }
+ for (i=0; i<req->num_answers; i++) {
+ dns_unmarshall_rr(req->answers, buf,
+ &req->answers[i]);
+ }
+ for (i=0; i<req->num_auths; i++) {
+ dns_unmarshall_rr(req->auths, buf,
+ &req->auths[i]);
+ }
+ for (i=0; i<req->num_additionals; i++) {
+ dns_unmarshall_rr(req->additionals, buf,
+ &req->additionals[i]);
+ }
+
+ if (!ERR_DNS_IS_OK(buf->error)) {
+ err = buf->error;
+ goto error;
+ }
+
+ *preq = req;
+ return ERROR_DNS_SUCCESS;
+
+ error:
+ err = buf->error;
+ TALLOC_FREE(req);
+ return err;
+}
+
+struct dns_request *dns_update2request(struct dns_update_request *update)
+{
+ struct dns_request *req;
+
+ /*
+ * This is a non-specified construct that happens to work on Linux/gcc
+ * and I would expect it to work everywhere else. dns_request and
+ * dns_update_request are essentially the same structures with
+ * different names, so any difference would mean that the compiler
+ * applied two different variations of padding given the same types in
+ * the structures.
+ */
+
+ req = (struct dns_request *)(void *)update;
+
+ /*
+ * The assert statement here looks like we could do the equivalent
+ * assignments to get portable, but it would mean that we have to
+ * allocate the dns_question record for the dns_zone records. We
+ * assume that if this assert works then the same holds true for
+ * dns_zone<>dns_question as well.
+ */
+
+#ifdef DEVELOPER
+ assert((req->id == update->id) && (req->flags == update->flags) &&
+ (req->num_questions == update->num_zones) &&
+ (req->num_answers == update->num_preqs) &&
+ (req->num_auths == update->num_updates) &&
+ (req->num_additionals == update->num_additionals) &&
+ (req->questions ==
+ (struct dns_question **)(void *)update->zones) &&
+ (req->answers == update->preqs) &&
+ (req->auths == update->updates) &&
+ (req->additionals == update->additionals));
+#endif
+
+ return req;
+}
+
+struct dns_update_request *dns_request2update(struct dns_request *request)
+{
+ /*
+ * For portability concerns see dns_update2request;
+ */
+ return (struct dns_update_request *)(void *)request;
+}
+
+DNS_ERROR dns_marshall_update_request(TALLOC_CTX *mem_ctx,
+ struct dns_update_request *update,
+ struct dns_buffer **pbuf)
+{
+ return dns_marshall_request(mem_ctx, dns_update2request(update), pbuf);
+}
+
+DNS_ERROR dns_unmarshall_update_request(TALLOC_CTX *mem_ctx,
+ struct dns_buffer *buf,
+ struct dns_update_request **pupreq)
+{
+ /*
+ * See comments above about portability. If the above works, this will
+ * as well.
+ */
+
+ return dns_unmarshall_request(mem_ctx, buf,
+ (struct dns_request **)(void *)pupreq);
+}
+
+uint16 dns_response_code(uint16 flags)
+{
+ return flags & 0xF;
+}
diff --git a/lib/addns/dnsrecord.c b/lib/addns/dnsrecord.c
new file mode 100644
index 0000000000..559c2644d4
--- /dev/null
+++ b/lib/addns/dnsrecord.c
@@ -0,0 +1,422 @@
+/*
+ Linux DNS client library implementation
+ Copyright (C) 2006 Krishna Ganugapati <krishnag@centeris.com>
+ Copyright (C) 2006 Gerald Carter <jerry@samba.org>
+
+ ** NOTE! The following LGPL license applies to the libaddns
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "dns.h"
+
+DNS_ERROR dns_create_query( TALLOC_CTX *mem_ctx, const char *name,
+ uint16 q_type, uint16 q_class,
+ struct dns_request **preq )
+{
+ struct dns_request *req;
+ struct dns_question *q;
+ DNS_ERROR err;
+
+ if (!(req = TALLOC_ZERO_P(mem_ctx, struct dns_request)) ||
+ !(req->questions = TALLOC_ARRAY(req, struct dns_question *, 1)) ||
+ !(req->questions[0] = talloc(req->questions,
+ struct dns_question))) {
+ TALLOC_FREE(req);
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ req->id = random();
+
+ req->num_questions = 1;
+ q = req->questions[0];
+
+ err = dns_domain_name_from_string(q, name, &q->name);
+ if (!ERR_DNS_IS_OK(err)) {
+ TALLOC_FREE(req);
+ return err;
+ }
+
+ q->q_type = q_type;
+ q->q_class = q_class;
+
+ *preq = req;
+ return ERROR_DNS_SUCCESS;
+}
+
+DNS_ERROR dns_create_update( TALLOC_CTX *mem_ctx, const char *name,
+ struct dns_update_request **preq )
+{
+ struct dns_update_request *req;
+ struct dns_zone *z;
+ DNS_ERROR err;
+
+ if (!(req = TALLOC_ZERO_P(mem_ctx, struct dns_update_request)) ||
+ !(req->zones = TALLOC_ARRAY(req, struct dns_zone *, 1)) ||
+ !(req->zones[0] = talloc(req->zones, struct dns_zone))) {
+ TALLOC_FREE(req);
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ req->id = random();
+ req->flags = 0x2800; /* Dynamic update */
+
+ req->num_zones = 1;
+ z = req->zones[0];
+
+ err = dns_domain_name_from_string(z, name, &z->name);
+ if (!ERR_DNS_IS_OK(err)) {
+ TALLOC_FREE(req);
+ return err;
+ }
+
+ z->z_type = QTYPE_SOA;
+ z->z_class = DNS_CLASS_IN;
+
+ *preq = req;
+ return ERROR_DNS_SUCCESS;
+}
+
+DNS_ERROR dns_create_rrec(TALLOC_CTX *mem_ctx, const char *name,
+ uint16 type, uint16 r_class, uint32 ttl,
+ uint16 data_length, uint8 *data,
+ struct dns_rrec **prec)
+{
+ struct dns_rrec *rec;
+ DNS_ERROR err;
+
+ if (!(rec = talloc(mem_ctx, struct dns_rrec))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ err = dns_domain_name_from_string(rec, name, &rec->name);
+ if (!(ERR_DNS_IS_OK(err))) {
+ TALLOC_FREE(rec);
+ return err;
+ }
+
+ rec->type = type;
+ rec->r_class = r_class;
+ rec->ttl = ttl;
+ rec->data_length = data_length;
+ rec->data = talloc_move(rec, &data);
+
+ *prec = rec;
+ return ERROR_DNS_SUCCESS;
+}
+
+DNS_ERROR dns_create_a_record(TALLOC_CTX *mem_ctx, const char *host,
+ uint32 ttl, const struct sockaddr_storage *pss,
+ struct dns_rrec **prec)
+{
+ uint8 *data;
+ DNS_ERROR err;
+ struct in_addr ip;
+
+ if (pss->ss_family != AF_INET) {
+ /* Silently ignore this. */
+ return ERROR_DNS_SUCCESS;
+ }
+
+ ip = ((struct sockaddr_in *)pss)->sin_addr;
+ if (!(data = (uint8 *)TALLOC_MEMDUP(mem_ctx, (const void *)&ip.s_addr,
+ sizeof(ip.s_addr)))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ err = dns_create_rrec(mem_ctx, host, QTYPE_A, DNS_CLASS_IN, ttl,
+ sizeof(ip.s_addr), data, prec);
+
+ if (!ERR_DNS_IS_OK(err)) {
+ TALLOC_FREE(data);
+ }
+
+ return err;
+}
+
+DNS_ERROR dns_create_name_in_use_record(TALLOC_CTX *mem_ctx,
+ const char *name,
+ const struct sockaddr_storage *ss,
+ struct dns_rrec **prec)
+{
+ if (ss != NULL) {
+ return dns_create_a_record(mem_ctx, name, 0, ss, prec);
+ }
+
+ return dns_create_rrec(mem_ctx, name, QTYPE_ANY, DNS_CLASS_IN, 0, 0,
+ NULL, prec);
+}
+
+DNS_ERROR dns_create_name_not_in_use_record(TALLOC_CTX *mem_ctx,
+ const char *name, uint32 type,
+ struct dns_rrec **prec)
+{
+ return dns_create_rrec(mem_ctx, name, type, DNS_CLASS_NONE, 0,
+ 0, NULL, prec);
+}
+
+DNS_ERROR dns_create_delete_record(TALLOC_CTX *mem_ctx, const char *name,
+ uint16 type, uint16 r_class,
+ struct dns_rrec **prec)
+{
+ return dns_create_rrec(mem_ctx, name, type, r_class, 0, 0, NULL, prec);
+}
+
+DNS_ERROR dns_create_tkey_record(TALLOC_CTX *mem_ctx, const char *keyname,
+ const char *algorithm_name, time_t inception,
+ time_t expiration, uint16 mode, uint16 error,
+ uint16 key_length, const uint8 *key,
+ struct dns_rrec **prec)
+{
+ struct dns_buffer *buf;
+ struct dns_domain_name *algorithm;
+ DNS_ERROR err;
+
+ if (!(buf = dns_create_buffer(mem_ctx))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ err = dns_domain_name_from_string(buf, algorithm_name, &algorithm);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ dns_marshall_domain_name(buf, algorithm);
+ dns_marshall_uint32(buf, inception);
+ dns_marshall_uint32(buf, expiration);
+ dns_marshall_uint16(buf, mode);
+ dns_marshall_uint16(buf, error);
+ dns_marshall_uint16(buf, key_length);
+ dns_marshall_buffer(buf, key, key_length);
+ dns_marshall_uint16(buf, 0); /* Other Size */
+
+ if (!ERR_DNS_IS_OK(buf->error)) {
+ err = buf->error;
+ goto error;
+ }
+
+ err = dns_create_rrec(mem_ctx, keyname, QTYPE_TKEY, DNS_CLASS_ANY, 0,
+ buf->offset, buf->data, prec);
+
+ error:
+ TALLOC_FREE(buf);
+ return err;
+}
+
+DNS_ERROR dns_unmarshall_tkey_record(TALLOC_CTX *mem_ctx, struct dns_rrec *rec,
+ struct dns_tkey_record **ptkey)
+{
+ struct dns_tkey_record *tkey;
+ struct dns_buffer buf;
+ uint32 tmp_inception, tmp_expiration;
+
+ if (!(tkey = talloc(mem_ctx, struct dns_tkey_record))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ buf.data = rec->data;
+ buf.size = rec->data_length;
+ buf.offset = 0;
+ buf.error = ERROR_DNS_SUCCESS;
+
+ dns_unmarshall_domain_name(tkey, &buf, &tkey->algorithm);
+ dns_unmarshall_uint32(&buf, &tmp_inception);
+ dns_unmarshall_uint32(&buf, &tmp_expiration);
+ dns_unmarshall_uint16(&buf, &tkey->mode);
+ dns_unmarshall_uint16(&buf, &tkey->error);
+ dns_unmarshall_uint16(&buf, &tkey->key_length);
+
+ if (!ERR_DNS_IS_OK(buf.error)) goto error;
+
+ if (tkey->key_length) {
+ if (!(tkey->key = TALLOC_ARRAY(tkey, uint8, tkey->key_length))) {
+ buf.error = ERROR_DNS_NO_MEMORY;
+ goto error;
+ }
+ } else {
+ tkey->key = NULL;
+ }
+
+ dns_unmarshall_buffer(&buf, tkey->key, tkey->key_length);
+ if (!ERR_DNS_IS_OK(buf.error)) goto error;
+
+ tkey->inception = (time_t)tmp_inception;
+ tkey->expiration = (time_t)tmp_expiration;
+
+ *ptkey = tkey;
+ return ERROR_DNS_SUCCESS;
+
+ error:
+ TALLOC_FREE(tkey);
+ return buf.error;
+}
+
+DNS_ERROR dns_create_tsig_record(TALLOC_CTX *mem_ctx, const char *keyname,
+ const char *algorithm_name,
+ time_t time_signed, uint16 fudge,
+ uint16 mac_length, const uint8 *mac,
+ uint16 original_id, uint16 error,
+ struct dns_rrec **prec)
+{
+ struct dns_buffer *buf;
+ struct dns_domain_name *algorithm;
+ DNS_ERROR err;
+
+ if (!(buf = dns_create_buffer(mem_ctx))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ err = dns_domain_name_from_string(buf, algorithm_name, &algorithm);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ dns_marshall_domain_name(buf, algorithm);
+ dns_marshall_uint16(buf, 0); /* time prefix */
+ dns_marshall_uint32(buf, time_signed);
+ dns_marshall_uint16(buf, fudge);
+ dns_marshall_uint16(buf, mac_length);
+ dns_marshall_buffer(buf, mac, mac_length);
+ dns_marshall_uint16(buf, original_id);
+ dns_marshall_uint16(buf, error);
+ dns_marshall_uint16(buf, 0); /* Other Size */
+
+ if (!ERR_DNS_IS_OK(buf->error)) {
+ err = buf->error;
+ goto error;
+ }
+
+ err = dns_create_rrec(mem_ctx, keyname, QTYPE_TSIG, DNS_CLASS_ANY, 0,
+ buf->offset, buf->data, prec);
+
+ error:
+ TALLOC_FREE(buf);
+ return err;
+}
+
+DNS_ERROR dns_add_rrec(TALLOC_CTX *mem_ctx, struct dns_rrec *rec,
+ uint16 *num_records, struct dns_rrec ***records)
+{
+ struct dns_rrec **new_records;
+
+ if (!(new_records = TALLOC_REALLOC_ARRAY(mem_ctx, *records,
+ struct dns_rrec *,
+ (*num_records)+1))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ new_records[*num_records] = talloc_move(new_records, &rec);
+
+ *num_records += 1;
+ *records = new_records;
+ return ERROR_DNS_SUCCESS;
+}
+
+/*
+ * Create a request that probes a server whether the list of IP addresses
+ * provides meets our expectations
+ */
+
+DNS_ERROR dns_create_probe(TALLOC_CTX *mem_ctx, const char *zone,
+ const char *host, int num_ips,
+ const struct sockaddr_storage *sslist,
+ struct dns_update_request **preq)
+{
+ struct dns_update_request *req;
+ struct dns_rrec *rec;
+ DNS_ERROR err;
+ uint16 i;
+
+ err = dns_create_update(mem_ctx, zone, &req);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_create_name_not_in_use_record(req, host, QTYPE_CNAME, &rec);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_add_rrec(req, rec, &req->num_preqs, &req->preqs);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ for (i=0; i<num_ips; i++) {
+ err = dns_create_name_in_use_record(req, host,
+ &sslist[i], &rec);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_add_rrec(req, rec, &req->num_preqs, &req->preqs);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+ }
+
+ *preq = req;
+ return ERROR_DNS_SUCCESS;
+
+ error:
+ TALLOC_FREE(req);
+ return err;
+}
+
+DNS_ERROR dns_create_update_request(TALLOC_CTX *mem_ctx,
+ const char *domainname,
+ const char *hostname,
+ const struct sockaddr_storage *ss_addrs,
+ size_t num_addrs,
+ struct dns_update_request **preq)
+{
+ struct dns_update_request *req;
+ struct dns_rrec *rec;
+ DNS_ERROR err;
+ size_t i;
+
+ err = dns_create_update(mem_ctx, domainname, &req);
+ if (!ERR_DNS_IS_OK(err)) return err;
+
+ /*
+ * Use the same prereq as WinXP -- No CNAME records for this host.
+ */
+
+ err = dns_create_rrec(req, hostname, QTYPE_CNAME, DNS_CLASS_NONE,
+ 0, 0, NULL, &rec);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_add_rrec(req, rec, &req->num_preqs, &req->preqs);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ /*
+ * Delete any existing A records
+ */
+
+ err = dns_create_delete_record(req, hostname, QTYPE_A, DNS_CLASS_ANY,
+ &rec);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_add_rrec(req, rec, &req->num_updates, &req->updates);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ /*
+ * .. and add our IPs
+ */
+
+ for ( i=0; i<num_addrs; i++ ) {
+ err = dns_create_a_record(req, hostname, 3600, &ss_addrs[i], &rec);
+ if (!ERR_DNS_IS_OK(err))
+ goto error;
+
+ err = dns_add_rrec(req, rec, &req->num_updates, &req->updates);
+ if (!ERR_DNS_IS_OK(err))
+ goto error;
+ }
+
+ *preq = req;
+ return ERROR_DNS_SUCCESS;
+
+ error:
+ TALLOC_FREE(req);
+ return err;
+}
diff --git a/lib/addns/dnssock.c b/lib/addns/dnssock.c
new file mode 100644
index 0000000000..42b4e2d40f
--- /dev/null
+++ b/lib/addns/dnssock.c
@@ -0,0 +1,375 @@
+/*
+ Linux DNS client library implementation
+
+ Copyright (C) 2006 Krishna Ganugapati <krishnag@centeris.com>
+ Copyright (C) 2006 Gerald Carter <jerry@samba.org>
+
+ ** NOTE! The following LGPL license applies to the libaddns
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "dns.h"
+#include <sys/time.h>
+#include <unistd.h>
+#include "system/select.h"
+
+static int destroy_dns_connection(struct dns_connection *conn)
+{
+ return close(conn->s);
+}
+
+/********************************************************************
+********************************************************************/
+
+static DNS_ERROR dns_tcp_open( const char *nameserver,
+ TALLOC_CTX *mem_ctx,
+ struct dns_connection **result )
+{
+ uint32_t ulAddress;
+ struct hostent *pHost;
+ struct sockaddr_in s_in;
+ struct dns_connection *conn;
+ int res;
+
+ if (!(conn = talloc(mem_ctx, struct dns_connection))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ if ( (ulAddress = inet_addr( nameserver )) == INADDR_NONE ) {
+ if ( (pHost = gethostbyname( nameserver )) == NULL ) {
+ TALLOC_FREE(conn);
+ return ERROR_DNS_INVALID_NAME_SERVER;
+ }
+ memcpy( &ulAddress, pHost->h_addr, pHost->h_length );
+ }
+
+ conn->s = socket( PF_INET, SOCK_STREAM, 0 );
+ if (conn->s == -1) {
+ TALLOC_FREE(conn);
+ return ERROR_DNS_CONNECTION_FAILED;
+ }
+
+ talloc_set_destructor(conn, destroy_dns_connection);
+
+ s_in.sin_family = AF_INET;
+ s_in.sin_addr.s_addr = ulAddress;
+ s_in.sin_port = htons( DNS_TCP_PORT );
+
+ res = connect(conn->s, (struct sockaddr*)&s_in, sizeof( s_in ));
+ if (res == -1) {
+ TALLOC_FREE(conn);
+ return ERROR_DNS_CONNECTION_FAILED;
+ }
+
+ conn->hType = DNS_TCP;
+
+ *result = conn;
+ return ERROR_DNS_SUCCESS;
+}
+
+/********************************************************************
+********************************************************************/
+
+static DNS_ERROR dns_udp_open( const char *nameserver,
+ TALLOC_CTX *mem_ctx,
+ struct dns_connection **result )
+{
+ unsigned long ulAddress;
+ struct hostent *pHost;
+ struct sockaddr_in RecvAddr;
+ struct dns_connection *conn;
+
+ if (!(conn = talloc(NULL, struct dns_connection))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ if ( (ulAddress = inet_addr( nameserver )) == INADDR_NONE ) {
+ if ( (pHost = gethostbyname( nameserver )) == NULL ) {
+ TALLOC_FREE(conn);
+ return ERROR_DNS_INVALID_NAME_SERVER;
+ }
+ memcpy( &ulAddress, pHost->h_addr, pHost->h_length );
+ }
+
+ /* Create a socket for sending data */
+
+ conn->s = socket( AF_INET, SOCK_DGRAM, IPPROTO_UDP );
+ if (conn->s == -1) {
+ TALLOC_FREE(conn);
+ return ERROR_DNS_CONNECTION_FAILED;
+ }
+
+ talloc_set_destructor(conn, destroy_dns_connection);
+
+ /* Set up the RecvAddr structure with the IP address of
+ the receiver (in this example case "123.456.789.1")
+ and the specified port number. */
+
+ ZERO_STRUCT(RecvAddr);
+ RecvAddr.sin_family = AF_INET;
+ RecvAddr.sin_port = htons( DNS_UDP_PORT );
+ RecvAddr.sin_addr.s_addr = ulAddress;
+
+ conn->hType = DNS_UDP;
+ memcpy( &conn->RecvAddr, &RecvAddr, sizeof( struct sockaddr_in ) );
+
+ *result = conn;
+ return ERROR_DNS_SUCCESS;
+}
+
+/********************************************************************
+********************************************************************/
+
+DNS_ERROR dns_open_connection( const char *nameserver, int32 dwType,
+ TALLOC_CTX *mem_ctx,
+ struct dns_connection **conn )
+{
+ switch ( dwType ) {
+ case DNS_TCP:
+ return dns_tcp_open( nameserver, mem_ctx, conn );
+ case DNS_UDP:
+ return dns_udp_open( nameserver, mem_ctx, conn );
+ }
+
+ return ERROR_DNS_INVALID_PARAMETER;
+}
+
+static DNS_ERROR write_all(int fd, uint8 *data, size_t len)
+{
+ size_t total = 0;
+
+ while (total < len) {
+
+ ssize_t ret = write(fd, data + total, len - total);
+
+ if (ret <= 0) {
+ /*
+ * EOF or error
+ */
+ return ERROR_DNS_SOCKET_ERROR;
+ }
+
+ total += ret;
+ }
+
+ return ERROR_DNS_SUCCESS;
+}
+
+static DNS_ERROR dns_send_tcp(struct dns_connection *conn,
+ const struct dns_buffer *buf)
+{
+ uint16 len = htons(buf->offset);
+ DNS_ERROR err;
+
+ err = write_all(conn->s, (uint8 *)&len, sizeof(len));
+ if (!ERR_DNS_IS_OK(err)) return err;
+
+ return write_all(conn->s, buf->data, buf->offset);
+}
+
+static DNS_ERROR dns_send_udp(struct dns_connection *conn,
+ const struct dns_buffer *buf)
+{
+ ssize_t ret;
+
+ ret = sendto(conn->s, buf->data, buf->offset, 0,
+ (struct sockaddr *)&conn->RecvAddr,
+ sizeof(conn->RecvAddr));
+
+ if (ret != buf->offset) {
+ return ERROR_DNS_SOCKET_ERROR;
+ }
+
+ return ERROR_DNS_SUCCESS;
+}
+
+DNS_ERROR dns_send(struct dns_connection *conn, const struct dns_buffer *buf)
+{
+ if (conn->hType == DNS_TCP) {
+ return dns_send_tcp(conn, buf);
+ }
+
+ if (conn->hType == DNS_UDP) {
+ return dns_send_udp(conn, buf);
+ }
+
+ return ERROR_DNS_INVALID_PARAMETER;
+}
+
+static DNS_ERROR read_all(int fd, uint8 *data, size_t len)
+{
+ size_t total = 0;
+
+ while (total < len) {
+ struct pollfd pfd;
+ ssize_t ret;
+ int fd_ready;
+
+ ZERO_STRUCT(pfd);
+ pfd.fd = fd;
+ pfd.events = POLLIN|POLLHUP;
+
+ fd_ready = poll(&pfd, 1, 10000);
+ if ( fd_ready == 0 ) {
+ /* read timeout */
+ return ERROR_DNS_SOCKET_ERROR;
+ }
+
+ ret = read(fd, data + total, len - total);
+ if (ret <= 0) {
+ /* EOF or error */
+ return ERROR_DNS_SOCKET_ERROR;
+ }
+
+ total += ret;
+ }
+
+ return ERROR_DNS_SUCCESS;
+}
+
+static DNS_ERROR dns_receive_tcp(TALLOC_CTX *mem_ctx,
+ struct dns_connection *conn,
+ struct dns_buffer **presult)
+{
+ struct dns_buffer *buf;
+ DNS_ERROR err;
+ uint16 len;
+
+ if (!(buf = TALLOC_ZERO_P(mem_ctx, struct dns_buffer))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ err = read_all(conn->s, (uint8 *)&len, sizeof(len));
+ if (!ERR_DNS_IS_OK(err)) {
+ return err;
+ }
+
+ buf->size = ntohs(len);
+
+ if (buf->size) {
+ if (!(buf->data = TALLOC_ARRAY(buf, uint8, buf->size))) {
+ TALLOC_FREE(buf);
+ return ERROR_DNS_NO_MEMORY;
+ }
+ } else {
+ buf->data = NULL;
+ }
+
+ err = read_all(conn->s, buf->data, buf->size);
+ if (!ERR_DNS_IS_OK(err)) {
+ TALLOC_FREE(buf);
+ return err;
+ }
+
+ *presult = buf;
+ return ERROR_DNS_SUCCESS;
+}
+
+static DNS_ERROR dns_receive_udp(TALLOC_CTX *mem_ctx,
+ struct dns_connection *conn,
+ struct dns_buffer **presult)
+{
+ struct dns_buffer *buf;
+ ssize_t received;
+
+ if (!(buf = TALLOC_ZERO_P(mem_ctx, struct dns_buffer))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ /*
+ * UDP based DNS can only be 512 bytes
+ */
+
+ if (!(buf->data = TALLOC_ARRAY(buf, uint8, 512))) {
+ TALLOC_FREE(buf);
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ received = recv(conn->s, (void *)buf->data, 512, 0);
+
+ if (received == -1) {
+ TALLOC_FREE(buf);
+ return ERROR_DNS_SOCKET_ERROR;
+ }
+
+ if (received > 512) {
+ TALLOC_FREE(buf);
+ return ERROR_DNS_BAD_RESPONSE;
+ }
+
+ buf->size = received;
+ buf->offset = 0;
+
+ *presult = buf;
+ return ERROR_DNS_SUCCESS;
+}
+
+DNS_ERROR dns_receive(TALLOC_CTX *mem_ctx, struct dns_connection *conn,
+ struct dns_buffer **presult)
+{
+ if (conn->hType == DNS_TCP) {
+ return dns_receive_tcp(mem_ctx, conn, presult);
+ }
+
+ if (conn->hType == DNS_UDP) {
+ return dns_receive_udp(mem_ctx, conn, presult);
+ }
+
+ return ERROR_DNS_INVALID_PARAMETER;
+}
+
+DNS_ERROR dns_transaction(TALLOC_CTX *mem_ctx, struct dns_connection *conn,
+ const struct dns_request *req,
+ struct dns_request **resp)
+{
+ struct dns_buffer *buf = NULL;
+ DNS_ERROR err;
+
+ err = dns_marshall_request(conn, req, &buf);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_send(conn, buf);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+ TALLOC_FREE(buf);
+
+ err = dns_receive(mem_ctx, conn, &buf);
+ if (!ERR_DNS_IS_OK(err)) goto error;
+
+ err = dns_unmarshall_request(mem_ctx, buf, resp);
+
+ error:
+ TALLOC_FREE(buf);
+ return err;
+}
+
+DNS_ERROR dns_update_transaction(TALLOC_CTX *mem_ctx,
+ struct dns_connection *conn,
+ struct dns_update_request *up_req,
+ struct dns_update_request **up_resp)
+{
+ struct dns_request *resp;
+ DNS_ERROR err;
+
+ err = dns_transaction(mem_ctx, conn, dns_update2request(up_req),
+ &resp);
+
+ if (!ERR_DNS_IS_OK(err)) return err;
+
+ *up_resp = dns_request2update(resp);
+ return ERROR_DNS_SUCCESS;
+}
diff --git a/lib/addns/dnsutils.c b/lib/addns/dnsutils.c
new file mode 100644
index 0000000000..37b862c7f0
--- /dev/null
+++ b/lib/addns/dnsutils.c
@@ -0,0 +1,151 @@
+/*
+ Linux DNS client library implementation
+
+ Copyright (C) 2006 Krishna Ganugapati <krishnag@centeris.com>
+ Copyright (C) 2006 Gerald Carter <jerry@samba.org>
+
+ ** NOTE! The following LGPL license applies to the libaddns
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "dns.h"
+#include <ctype.h>
+
+static DNS_ERROR LabelList( TALLOC_CTX *mem_ctx,
+ const char *name,
+ struct dns_domain_label **presult )
+{
+ struct dns_domain_label *result;
+ const char *dot;
+
+ for (dot = name; *dot != '\0'; dot += 1) {
+ char c = *dot;
+
+ if (c == '.')
+ break;
+
+ if (c == '-') continue;
+ if ((c >= 'a') && (c <= 'z')) continue;
+ if ((c >= 'A') && (c <= 'Z')) continue;
+ if ((c >= '0') && (c <= '9')) continue;
+
+ return ERROR_DNS_INVALID_NAME;
+ }
+
+ if ((dot - name) > 63) {
+ /*
+ * DNS labels can only be 63 chars long
+ */
+ return ERROR_DNS_INVALID_NAME;
+ }
+
+ if (!(result = TALLOC_ZERO_P(mem_ctx, struct dns_domain_label))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ if (*dot == '\0') {
+ /*
+ * No dot around, so this is the last component
+ */
+
+ if (!(result->label = talloc_strdup(result, name))) {
+ TALLOC_FREE(result);
+ return ERROR_DNS_NO_MEMORY;
+ }
+ result->len = strlen(result->label);
+ *presult = result;
+ return ERROR_DNS_SUCCESS;
+ }
+
+ if (dot[1] == '.') {
+ /*
+ * Two dots in a row, reject
+ */
+
+ TALLOC_FREE(result);
+ return ERROR_DNS_INVALID_NAME;
+ }
+
+ if (dot[1] != '\0') {
+ /*
+ * Something follows, get the rest
+ */
+
+ DNS_ERROR err = LabelList(result, dot+1, &result->next);
+
+ if (!ERR_DNS_IS_OK(err)) {
+ TALLOC_FREE(result);
+ return err;
+ }
+ }
+
+ result->len = (dot - name);
+
+ if (!(result->label = talloc_strndup(result, name, result->len))) {
+ TALLOC_FREE(result);
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ *presult = result;
+ return ERROR_DNS_SUCCESS;
+}
+
+DNS_ERROR dns_domain_name_from_string( TALLOC_CTX *mem_ctx,
+ const char *pszDomainName,
+ struct dns_domain_name **presult )
+{
+ struct dns_domain_name *result;
+ DNS_ERROR err;
+
+ if (!(result = talloc(mem_ctx, struct dns_domain_name))) {
+ return ERROR_DNS_NO_MEMORY;
+ }
+
+ err = LabelList( result, pszDomainName, &result->pLabelList );
+ if (!ERR_DNS_IS_OK(err)) {
+ TALLOC_FREE(result);
+ return err;
+ }
+
+ *presult = result;
+ return ERROR_DNS_SUCCESS;
+}
+
+/*********************************************************************
+*********************************************************************/
+
+char *dns_generate_keyname( TALLOC_CTX *mem_ctx )
+{
+ char *result = NULL;
+#if defined(WITH_DNS_UPDATES)
+
+ uuid_t uuid;
+
+ /*
+ * uuid_unparse gives 36 bytes plus '\0'
+ */
+ if (!(result = TALLOC_ARRAY(mem_ctx, char, 37))) {
+ return NULL;
+ }
+
+ uuid_generate( uuid );
+ uuid_unparse( uuid, result );
+
+#endif
+
+ return result;
+}
diff --git a/lib/addns/error.c b/lib/addns/error.c
new file mode 100644
index 0000000000..361388cea3
--- /dev/null
+++ b/lib/addns/error.c
@@ -0,0 +1,59 @@
+/*
+ Linux DNS client library implementation
+ Copyright (C) 2010 Guenther Deschner
+
+ ** NOTE! The following LGPL license applies to the libaddns
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "dns.h"
+#include "dnserr.h"
+
+typedef struct {
+ const char *dns_errstr;
+ DNS_ERROR dns_errcode;
+} dns_err_code_struct;
+
+static const dns_err_code_struct dns_errs[] =
+{
+ { "ERROR_DNS_SUCCESS", ERROR_DNS_SUCCESS },
+ { "ERROR_DNS_RECORD_NOT_FOUND", ERROR_DNS_RECORD_NOT_FOUND },
+ { "ERROR_DNS_BAD_RESPONSE", ERROR_DNS_BAD_RESPONSE },
+ { "ERROR_DNS_INVALID_PARAMETER", ERROR_DNS_INVALID_PARAMETER },
+ { "ERROR_DNS_NO_MEMORY", ERROR_DNS_NO_MEMORY },
+ { "ERROR_DNS_INVALID_NAME_SERVER", ERROR_DNS_INVALID_NAME_SERVER },
+ { "ERROR_DNS_CONNECTION_FAILED", ERROR_DNS_CONNECTION_FAILED },
+ { "ERROR_DNS_GSS_ERROR", ERROR_DNS_GSS_ERROR },
+ { "ERROR_DNS_INVALID_NAME", ERROR_DNS_INVALID_NAME },
+ { "ERROR_DNS_INVALID_MESSAGE", ERROR_DNS_INVALID_MESSAGE },
+ { "ERROR_DNS_SOCKET_ERROR", ERROR_DNS_SOCKET_ERROR },
+ { "ERROR_DNS_UPDATE_FAILED", ERROR_DNS_UPDATE_FAILED },
+ { NULL, ERROR_DNS_SUCCESS },
+};
+
+const char *dns_errstr(DNS_ERROR err)
+{
+ int i;
+
+ for (i=0; dns_errs[i].dns_errstr != NULL; i++) {
+ if (ERR_DNS_EQUAL(err, dns_errs[i].dns_errcode)) {
+ return dns_errs[i].dns_errstr;
+ }
+ }
+
+ return NULL;
+}
diff --git a/lib/addns/wscript_build b/lib/addns/wscript_build
new file mode 100644
index 0000000000..69a4716245
--- /dev/null
+++ b/lib/addns/wscript_build
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+
+bld.SAMBA_LIBRARY('addns',
+ source='dnsrecord.c dnsutils.c dnssock.c dnsgss.c dnsmarshall.c error.c',
+ public_deps='talloc krb5 k5crypto com_err gssapi gssapi_krb5 uuid',
+ private_library=True,
+ vars=locals())
diff --git a/lib/async_req/async_sock.c b/lib/async_req/async_sock.c
index 18adb42a0c..2c90b6dd17 100644
--- a/lib/async_req/async_sock.c
+++ b/lib/async_req/async_sock.c
@@ -36,28 +36,29 @@
#define TALLOC_FREE(ctx) do { talloc_free(ctx); ctx=NULL; } while(0)
#endif
-struct async_send_state {
+struct sendto_state {
int fd;
const void *buf;
size_t len;
int flags;
+ const struct sockaddr_storage *addr;
+ socklen_t addr_len;
ssize_t sent;
};
-static void async_send_handler(struct tevent_context *ev,
+static void sendto_handler(struct tevent_context *ev,
struct tevent_fd *fde,
uint16_t flags, void *private_data);
-struct tevent_req *async_send_send(TALLOC_CTX *mem_ctx,
- struct tevent_context *ev,
- int fd, const void *buf, size_t len,
- int flags)
+struct tevent_req *sendto_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
+ int fd, const void *buf, size_t len, int flags,
+ const struct sockaddr_storage *addr)
{
struct tevent_req *result;
- struct async_send_state *state;
+ struct sendto_state *state;
struct tevent_fd *fde;
- result = tevent_req_create(mem_ctx, &state, struct async_send_state);
+ result = tevent_req_create(mem_ctx, &state, struct sendto_state);
if (result == NULL) {
return result;
}
@@ -65,8 +66,26 @@ struct tevent_req *async_send_send(TALLOC_CTX *mem_ctx,
state->buf = buf;
state->len = len;
state->flags = flags;
+ state->addr = addr;
+
+ switch (addr->ss_family) {
+ case AF_INET:
+ state->addr_len = sizeof(struct sockaddr_in);
+ break;
+#if defined(HAVE_IPV6)
+ case AF_INET6:
+ state->addr_len = sizeof(struct sockaddr_in6);
+ break;
+#endif
+ case AF_UNIX:
+ state->addr_len = sizeof(struct sockaddr_un);
+ break;
+ default:
+ state->addr_len = sizeof(struct sockaddr_storage);
+ break;
+ }
- fde = tevent_add_fd(ev, state, fd, TEVENT_FD_WRITE, async_send_handler,
+ fde = tevent_add_fd(ev, state, fd, TEVENT_FD_WRITE, sendto_handler,
result);
if (fde == NULL) {
TALLOC_FREE(result);
@@ -75,16 +94,17 @@ struct tevent_req *async_send_send(TALLOC_CTX *mem_ctx,
return result;
}
-static void async_send_handler(struct tevent_context *ev,
+static void sendto_handler(struct tevent_context *ev,
struct tevent_fd *fde,
uint16_t flags, void *private_data)
{
struct tevent_req *req = talloc_get_type_abort(
private_data, struct tevent_req);
- struct async_send_state *state =
- tevent_req_data(req, struct async_send_state);
+ struct sendto_state *state =
+ tevent_req_data(req, struct sendto_state);
- state->sent = send(state->fd, state->buf, state->len, state->flags);
+ state->sent = sendto(state->fd, state->buf, state->len, state->flags,
+ (struct sockaddr *)state->addr, state->addr_len);
if ((state->sent == -1) && (errno == EINTR)) {
/* retry */
return;
@@ -96,10 +116,10 @@ static void async_send_handler(struct tevent_context *ev,
tevent_req_done(req);
}
-ssize_t async_send_recv(struct tevent_req *req, int *perrno)
+ssize_t sendto_recv(struct tevent_req *req, int *perrno)
{
- struct async_send_state *state =
- tevent_req_data(req, struct async_send_state);
+ struct sendto_state *state =
+ tevent_req_data(req, struct sendto_state);
if (tevent_req_is_unix_error(req, perrno)) {
return -1;
@@ -107,27 +127,31 @@ ssize_t async_send_recv(struct tevent_req *req, int *perrno)
return state->sent;
}
-struct async_recv_state {
+struct recvfrom_state {
int fd;
void *buf;
size_t len;
int flags;
+ struct sockaddr_storage *addr;
+ socklen_t *addr_len;
ssize_t received;
};
-static void async_recv_handler(struct tevent_context *ev,
+static void recvfrom_handler(struct tevent_context *ev,
struct tevent_fd *fde,
uint16_t flags, void *private_data);
-struct tevent_req *async_recv_send(TALLOC_CTX *mem_ctx,
- struct tevent_context *ev,
- int fd, void *buf, size_t len, int flags)
+struct tevent_req *recvfrom_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ int fd, void *buf, size_t len, int flags,
+ struct sockaddr_storage *addr,
+ socklen_t *addr_len)
{
struct tevent_req *result;
- struct async_recv_state *state;
+ struct recvfrom_state *state;
struct tevent_fd *fde;
- result = tevent_req_create(mem_ctx, &state, struct async_recv_state);
+ result = tevent_req_create(mem_ctx, &state, struct recvfrom_state);
if (result == NULL) {
return result;
}
@@ -135,8 +159,10 @@ struct tevent_req *async_recv_send(TALLOC_CTX *mem_ctx,
state->buf = buf;
state->len = len;
state->flags = flags;
+ state->addr = addr;
+ state->addr_len = addr_len;
- fde = tevent_add_fd(ev, state, fd, TEVENT_FD_READ, async_recv_handler,
+ fde = tevent_add_fd(ev, state, fd, TEVENT_FD_READ, recvfrom_handler,
result);
if (fde == NULL) {
TALLOC_FREE(result);
@@ -145,17 +171,18 @@ struct tevent_req *async_recv_send(TALLOC_CTX *mem_ctx,
return result;
}
-static void async_recv_handler(struct tevent_context *ev,
+static void recvfrom_handler(struct tevent_context *ev,
struct tevent_fd *fde,
uint16_t flags, void *private_data)
{
struct tevent_req *req = talloc_get_type_abort(
private_data, struct tevent_req);
- struct async_recv_state *state =
- tevent_req_data(req, struct async_recv_state);
+ struct recvfrom_state *state =
+ tevent_req_data(req, struct recvfrom_state);
- state->received = recv(state->fd, state->buf, state->len,
- state->flags);
+ state->received = recvfrom(state->fd, state->buf, state->len,
+ state->flags, (struct sockaddr *)state->addr,
+ state->addr_len);
if ((state->received == -1) && (errno == EINTR)) {
/* retry */
return;
@@ -171,10 +198,10 @@ static void async_recv_handler(struct tevent_context *ev,
tevent_req_done(req);
}
-ssize_t async_recv_recv(struct tevent_req *req, int *perrno)
+ssize_t recvfrom_recv(struct tevent_req *req, int *perrno)
{
- struct async_recv_state *state =
- tevent_req_data(req, struct async_recv_state);
+ struct recvfrom_state *state =
+ tevent_req_data(req, struct recvfrom_state);
if (tevent_req_is_unix_error(req, perrno)) {
return -1;
@@ -358,6 +385,7 @@ struct writev_state {
int count;
size_t total_size;
uint16_t flags;
+ bool err_on_readability;
};
static void writev_trigger(struct tevent_req *req, void *private_data);
@@ -385,10 +413,8 @@ struct tevent_req *writev_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
if (state->iov == NULL) {
goto fail;
}
- state->flags = TEVENT_FD_WRITE;
- if (err_on_readability) {
- state->flags |= TEVENT_FD_READ;
- }
+ state->flags = TEVENT_FD_WRITE|TEVENT_FD_READ;
+ state->err_on_readability = err_on_readability;
if (queue == NULL) {
struct tevent_fd *fde;
@@ -434,8 +460,35 @@ static void writev_handler(struct tevent_context *ev, struct tevent_fd *fde,
to_write = 0;
if ((state->flags & TEVENT_FD_READ) && (flags & TEVENT_FD_READ)) {
- tevent_req_error(req, EPIPE);
- return;
+ int ret, value;
+
+ if (state->err_on_readability) {
+ /* Readable and the caller wants an error on read. */
+ tevent_req_error(req, EPIPE);
+ return;
+ }
+
+ /* Might be an error. Check if there are bytes to read */
+ ret = ioctl(state->fd, FIONREAD, &value);
+ /* FIXME - should we also check
+ for ret == 0 and value == 0 here ? */
+ if (ret == -1) {
+ /* There's an error. */
+ tevent_req_error(req, EPIPE);
+ return;
+ }
+ /* A request for TEVENT_FD_READ will succeed from now and
+ forevermore until the bytes are read so if there was
+ an error we'll wait until we do read, then get it in
+ the read callback function. Until then, remove TEVENT_FD_READ
+ from the flags we're waiting for. */
+ state->flags &= ~TEVENT_FD_READ;
+ TEVENT_FD_NOT_READABLE(fde);
+
+ /* If not writable, we're done. */
+ if (!(flags & TEVENT_FD_WRITE)) {
+ return;
+ }
}
for (i=0; i<state->count; i++) {
diff --git a/lib/async_req/async_sock.h b/lib/async_req/async_sock.h
index e7ddff8c92..8d98886e2b 100644
--- a/lib/async_req/async_sock.h
+++ b/lib/async_req/async_sock.h
@@ -27,16 +27,17 @@
#include <talloc.h>
#include <tevent.h>
-struct tevent_req *async_send_send(TALLOC_CTX *mem_ctx,
- struct tevent_context *ev,
- int fd, const void *buf, size_t len,
- int flags);
-ssize_t async_send_recv(struct tevent_req *req, int *perrno);
-
-struct tevent_req *async_recv_send(TALLOC_CTX *mem_ctx,
- struct tevent_context *ev,
- int fd, void *buf, size_t len, int flags);
-ssize_t async_recv_recv(struct tevent_req *req, int *perrno);
+struct tevent_req *sendto_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
+ int fd, const void *buf, size_t len, int flags,
+ const struct sockaddr_storage *addr);
+ssize_t sendto_recv(struct tevent_req *req, int *perrno);
+
+struct tevent_req *recvfrom_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ int fd, void *buf, size_t len, int flags,
+ struct sockaddr_storage *addr,
+ socklen_t *addr_len);
+ssize_t recvfrom_recv(struct tevent_req *req, int *perrno);
struct tevent_req *async_connect_send(TALLOC_CTX *mem_ctx,
struct tevent_context *ev,
diff --git a/lib/async_req/config.mk b/lib/async_req/config.mk
deleted file mode 100644
index 64e537c088..0000000000
--- a/lib/async_req/config.mk
+++ /dev/null
@@ -1,4 +0,0 @@
-[SUBSYSTEM::LIBASYNC_REQ]
-PUBLIC_DEPENDENCIES = LIBREPLACE_NETWORK LIBTALLOC LIBTEVENT
-
-LIBASYNC_REQ_OBJ_FILES = $(addprefix ../lib/async_req/, async_sock.o)
diff --git a/lib/async_req/wscript_build b/lib/async_req/wscript_build
new file mode 100644
index 0000000000..352861c958
--- /dev/null
+++ b/lib/async_req/wscript_build
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+
+bld.SAMBA_SUBSYSTEM('LIBASYNC_REQ',
+ source='async_sock.c',
+ public_deps='talloc tevent',
+ deps='UTIL_TEVENT'
+ )
+
diff --git a/lib/compression/lzxpress.c b/lib/compression/lzxpress.c
index 0abbfc4d3d..a4ded7e455 100644
--- a/lib/compression/lzxpress.c
+++ b/lib/compression/lzxpress.c
@@ -34,6 +34,7 @@
#include "replace.h"
#include "lzxpress.h"
+#include "../lib/util/byteorder.h"
#define __BUF_POS_CONST(buf,ofs)(((const uint8_t *)buf)+(ofs))
@@ -80,6 +81,7 @@ ssize_t lzxpress_compress(const uint8_t *uncompressed,
uncompressed_pos = 0;
indic = 0;
+ *(uint32_t *)compressed = 0;
compressed_pos = sizeof(uint32_t);
indic_pos = &compressed[0];
@@ -129,11 +131,11 @@ ssize_t lzxpress_compress(const uint8_t *uncompressed,
if (best_len < 10) {
/* Classical meta-data */
metadata = (uint16_t)(((best_offset - 1) << 3) | (best_len - 3));
- dest[metadata_size / sizeof(uint16_t)] = metadata;
+ SSVAL(dest, metadata_size / sizeof(uint16_t), metadata);
metadata_size += sizeof(uint16_t);
} else {
metadata = (uint16_t)(((best_offset - 1) << 3) | 7);
- dest[metadata_size / sizeof(uint16_t)] = metadata;
+ SSVAL(dest, metadata_size / sizeof(uint16_t), metadata);
metadata_size = sizeof(uint16_t);
if (best_len < (15 + 7 + 3)) {
@@ -155,7 +157,7 @@ ssize_t lzxpress_compress(const uint8_t *uncompressed,
compressed[nibble_index] |= (15 * 16);
}
- /* Additionnal best_len */
+ /* Additional best_len */
compressed[compressed_pos + metadata_size] = (best_len - (3 + 7 + 15)) & 0xFF;
metadata_size += sizeof(uint8_t);
} else {
@@ -167,7 +169,7 @@ ssize_t lzxpress_compress(const uint8_t *uncompressed,
compressed[nibble_index] |= 15 << 4;
}
- /* Additionnal best_len */
+ /* Additional best_len */
compressed[compressed_pos + metadata_size] = 255;
metadata_size += sizeof(uint8_t);
@@ -198,7 +200,7 @@ ssize_t lzxpress_compress(const uint8_t *uncompressed,
indic_bit++;
if ((indic_bit - 1) % 32 > (indic_bit % 32)) {
- *(uint32_t *)indic_pos = indic;
+ SIVAL(indic_pos, 0, indic);
indic = 0;
indic_pos = &compressed[compressed_pos];
compressed_pos += sizeof(uint32_t);
@@ -212,7 +214,7 @@ ssize_t lzxpress_compress(const uint8_t *uncompressed,
uncompressed_pos++;
compressed_pos++;
if (((indic_bit - 1) % 32) > (indic_bit % 32)){
- *(uint32_t *)indic_pos = indic;
+ SIVAL(indic_pos, 0, indic);
indic = 0;
indic_pos = &compressed[compressed_pos];
compressed_pos += sizeof(uint32_t);
@@ -223,7 +225,8 @@ ssize_t lzxpress_compress(const uint8_t *uncompressed,
for (; (indic_bit % 32) != 0; indic_bit++)
indic |= 0 << (32 - ((indic_bit % 32) + 1));
- *(uint32_t *)indic_pos = indic;
+ *(uint32_t *)&compressed[compressed_pos] = 0;
+ SIVAL(indic_pos, 0, indic);
compressed_pos += sizeof(uint32_t);
}
diff --git a/lib/compression/testsuite.c b/lib/compression/testsuite.c
index b9cebb2e8d..ff7d892e64 100644
--- a/lib/compression/testsuite.c
+++ b/lib/compression/testsuite.c
@@ -20,11 +20,58 @@
#include "includes.h"
#include "torture/torture.h"
-#include "../compression/mszip.h"
+#include "talloc.h"
+#include "mszip.h"
+#include "lzxpress.h"
+
+/*
+ test lzxpress
+ */
+static bool test_lzxpress(struct torture_context *test)
+{
+ TALLOC_CTX *tmp_ctx = talloc_new(test);
+ uint8_t *data;
+ const char *fixed_data = "this is a test. and this is a test too";
+ const uint8_t fixed_out[] = { 0x00, 0x20, 0x00, 0x04, 0x74, 0x68, 0x69, 0x73,
+ 0x20, 0x10, 0x00, 0x61, 0x20, 0x74, 0x65, 0x73,
+ 0x74, 0x2E, 0x20, 0x61, 0x6E, 0x64, 0x20, 0x9F,
+ 0x00, 0x04, 0x20, 0x74, 0x6F, 0x6F, 0x00, 0x00,
+ 0x00, 0x00 };
+ ssize_t c_size;
+ uint8_t *out, *out2;
+
+ data = talloc_size(tmp_ctx, 1023);
+ out = talloc_size(tmp_ctx, 2048);
+ memset(out, 0x42, talloc_get_size(out));
+
+ torture_comment(test, "lzxpress fixed compression\n");
+ c_size = lzxpress_compress((const uint8_t *)fixed_data,
+ strlen(fixed_data),
+ out,
+ talloc_get_size(out));
+
+ torture_assert_int_equal(test, c_size, sizeof(fixed_out), "fixed lzxpress_compress size");
+ torture_assert_mem_equal(test, out, fixed_out, c_size, "fixed lzxpress_compress data");
+
+ torture_comment(test, "lzxpress fixed decompression\n");
+ out2 = talloc_size(tmp_ctx, strlen(fixed_data));
+ c_size = lzxpress_decompress(out,
+ sizeof(fixed_out),
+ out2,
+ talloc_get_size(out2));
+
+ torture_assert_int_equal(test, c_size, strlen(fixed_data), "fixed lzxpress_decompress size");
+ torture_assert_mem_equal(test, out2, fixed_data, c_size, "fixed lzxpress_decompress data");
+
+ return true;
+}
+
struct torture_suite *torture_local_compression(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "COMPRESSION");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "compression");
+
+ torture_suite_add_simple_test(suite, "lzxpress", test_lzxpress);
return suite;
}
diff --git a/lib/compression/wscript_build b/lib/compression/wscript_build
new file mode 100644
index 0000000000..7ad533340d
--- /dev/null
+++ b/lib/compression/wscript_build
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+
+bld.SAMBA_SUBSYSTEM('LZXPRESS',
+ deps='replace',
+ source='lzxpress.c'
+ )
diff --git a/lib/crypto/aes.c b/lib/crypto/aes.c
index 7735e8ff37..a47a456593 100644
--- a/lib/crypto/aes.c
+++ b/lib/crypto/aes.c
@@ -112,3 +112,25 @@ AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
}
}
}
+
+void aes_cfb8_encrypt(const uint8_t *in, uint8_t *out,
+ size_t length, const AES_KEY *key,
+ uint8_t *iv, int forward)
+{
+ size_t i;
+
+ for (i=0; i < length; i++) {
+ uint8_t tiv[AES_BLOCK_SIZE*2];
+
+ memcpy(tiv, iv, AES_BLOCK_SIZE);
+ AES_encrypt(iv, iv, key);
+ if (!forward) {
+ tiv[AES_BLOCK_SIZE] = in[i];
+ }
+ out[i] = in[i] ^ iv[0];
+ if (forward) {
+ tiv[AES_BLOCK_SIZE] = out[i];
+ }
+ memcpy(iv, tiv+1, AES_BLOCK_SIZE);
+ }
+}
diff --git a/lib/crypto/aes.h b/lib/crypto/aes.h
index e74d345215..a2b6c077e6 100644
--- a/lib/crypto/aes.h
+++ b/lib/crypto/aes.h
@@ -72,6 +72,10 @@ void AES_cbc_encrypt(const unsigned char *, unsigned char *,
const unsigned long, const AES_KEY *,
unsigned char *, int);
+void aes_cfb8_encrypt(const uint8_t *in, uint8_t *out,
+ size_t length, const AES_KEY *key,
+ uint8_t *iv, int forward);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/crypto/arcfour.c b/lib/crypto/arcfour.c
index c57e05d0e9..1afd659be6 100644
--- a/lib/crypto/arcfour.c
+++ b/lib/crypto/arcfour.c
@@ -19,7 +19,7 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "includes.h"
+#include "replace.h"
#include "../lib/crypto/arcfour.h"
/* initialise the arcfour sbox with key */
diff --git a/lib/crypto/arcfour.h b/lib/crypto/arcfour.h
index 501b3f2fab..a9f80c474d 100644
--- a/lib/crypto/arcfour.h
+++ b/lib/crypto/arcfour.h
@@ -1,6 +1,8 @@
#ifndef ARCFOUR_HEADER_H
#define ARCFOUR_HEADER_H
+#include "../lib/util/data_blob.h"
+
struct arcfour_state {
uint8_t sbox[256];
uint8_t index_i;
diff --git a/lib/crypto/config.mk b/lib/crypto/config.mk
deleted file mode 100644
index c9ba779be2..0000000000
--- a/lib/crypto/config.mk
+++ /dev/null
@@ -1,18 +0,0 @@
-##############################
-# Start SUBSYSTEM LIBCRYPTO
-[SUBSYSTEM::LIBCRYPTO]
-# End SUBSYSTEM LIBCRYPTO
-##############################
-
-LIBCRYPTO_OBJ_FILES = $(addprefix $(libcryptosrcdir)/, \
- crc32.o md5.o hmacmd5.o md4.o \
- arcfour.o sha256.o hmacsha256.o \
- aes.o rijndael-alg-fst.o)
-
-[SUBSYSTEM::TORTURE_LIBCRYPTO]
-PRIVATE_DEPENDENCIES = LIBCRYPTO
-
-TORTURE_LIBCRYPTO_OBJ_FILES = $(addprefix $(libcryptosrcdir)/, \
- md4test.o md5test.o hmacmd5test.o)
-
-$(eval $(call proto_header_template,$(libcryptosrcdir)/test_proto.h,$(TORTURE_LIBCRYPTO_OBJ_FILES:.o=.c)))
diff --git a/lib/crypto/crc32.c b/lib/crypto/crc32.c
index e6cc529767..cca62a0c04 100644
--- a/lib/crypto/crc32.c
+++ b/lib/crypto/crc32.c
@@ -40,7 +40,7 @@
* CRC32 code derived from work by Gary S. Brown.
*/
-#include "includes.h"
+#include "replace.h"
#include "../lib/crypto/crc32.h"
static const uint32_t crc32_tab[] = {
diff --git a/lib/crypto/hmacmd5.c b/lib/crypto/hmacmd5.c
index 0c8d1ab598..cfbd428014 100644
--- a/lib/crypto/hmacmd5.c
+++ b/lib/crypto/hmacmd5.c
@@ -22,7 +22,7 @@
* for ntlmv2.
*/
-#include "includes.h"
+#include "replace.h"
#include "../lib/crypto/hmacmd5.h"
/***********************************************************************
diff --git a/lib/crypto/hmacmd5test.c b/lib/crypto/hmacmd5test.c
index 77f305a5d3..a6621a6e13 100644
--- a/lib/crypto/hmacmd5test.c
+++ b/lib/crypto/hmacmd5test.c
@@ -16,7 +16,8 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "includes.h"
+#include "replace.h"
+#include "../lib/util/util.h"
#include "../lib/crypto/crypto.h"
struct torture_context;
diff --git a/lib/crypto/hmacsha256.c b/lib/crypto/hmacsha256.c
index 53d4fe3883..1a31441297 100644
--- a/lib/crypto/hmacsha256.c
+++ b/lib/crypto/hmacsha256.c
@@ -26,7 +26,7 @@
taken direct from rfc2202 implementation and modified for suitable use
*/
-#include "includes.h"
+#include "replace.h"
#include "../lib/crypto/crypto.h"
/***********************************************************************
diff --git a/lib/crypto/md4.c b/lib/crypto/md4.c
index aea2c821c5..7eb6070cd4 100644
--- a/lib/crypto/md4.c
+++ b/lib/crypto/md4.c
@@ -17,7 +17,7 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "includes.h"
+#include "replace.h"
#include "../lib/crypto/md4.h"
/* NOTE: This code makes no attempt to be fast!
diff --git a/lib/crypto/md4test.c b/lib/crypto/md4test.c
index a6080cff82..7516e826ab 100644
--- a/lib/crypto/md4test.c
+++ b/lib/crypto/md4test.c
@@ -17,7 +17,8 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "includes.h"
+#include "replace.h"
+#include "../lib/util/util.h"
#include "../lib/crypto/crypto.h"
struct torture_context;
@@ -25,7 +26,7 @@ struct torture_context;
/*
This uses the test values from rfc1320
*/
-bool torture_local_crypto_md4(struct torture_context *torture)
+bool torture_local_crypto_md4(struct torture_context *torture)
{
bool ret = true;
uint32_t i;
diff --git a/lib/crypto/md5.c b/lib/crypto/md5.c
index 584c46ef2d..032474478e 100644
--- a/lib/crypto/md5.c
+++ b/lib/crypto/md5.c
@@ -18,7 +18,7 @@
/* This code slightly modified to fit into Samba by
abartlet@samba.org Jun 2001 */
-#include "includes.h"
+#include "replace.h"
#include "md5.h"
@@ -28,12 +28,12 @@ static void MD5Transform(uint32_t buf[4], uint32_t const in[16]);
/*
* Note: this code is harmless on little-endian machines.
*/
-static void byteReverse(uint8_t *buf, uint_t longs)
+static void byteReverse(uint8_t *buf, unsigned int longs)
{
uint32_t t;
do {
- t = (uint32_t) ((uint_t) buf[3] << 8 | buf[2]) << 16 |
- ((uint_t) buf[1] << 8 | buf[0]);
+ t = (uint32_t) ((unsigned int) buf[3] << 8 | buf[2]) << 16 |
+ ((unsigned int) buf[1] << 8 | buf[0]);
*(uint32_t *) buf = t;
buf += 4;
} while (--longs);
@@ -108,7 +108,7 @@ _PUBLIC_ void MD5Update(struct MD5Context *ctx, const uint8_t *buf, size_t len)
*/
_PUBLIC_ void MD5Final(uint8_t digest[16], struct MD5Context *ctx)
{
- uint_t count;
+ unsigned int count;
uint8_t *p;
/* Compute number of bytes mod 64 */
@@ -144,7 +144,7 @@ _PUBLIC_ void MD5Final(uint8_t digest[16], struct MD5Context *ctx)
MD5Transform(ctx->buf, (uint32_t *) ctx->in);
byteReverse((uint8_t *) ctx->buf, 4);
memmove(digest, ctx->buf, 16);
- memset(ctx, 0, sizeof(ctx)); /* In case it's sensitive */
+ memset(ctx, 0, sizeof(*ctx)); /* In case it's sensitive */
}
/* The four core functions - F1 is optimized somewhat */
diff --git a/lib/crypto/md5test.c b/lib/crypto/md5test.c
index 7223af2114..0457d4d4ea 100644
--- a/lib/crypto/md5test.c
+++ b/lib/crypto/md5test.c
@@ -17,7 +17,8 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "includes.h"
+#include "replace.h"
+#include "../lib/util/util.h"
#include "../lib/crypto/crypto.h"
struct torture_context;
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index 233abe23f8..42ab2363aa 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -38,7 +38,7 @@
* SUCH DAMAGE.
*/
-#include "includes.h"
+#include "replace.h"
#include "sha256.h"
#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
diff --git a/lib/crypto/wscript_build b/lib/crypto/wscript_build
new file mode 100644
index 0000000000..7bc4eb7dd0
--- /dev/null
+++ b/lib/crypto/wscript_build
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+bld.SAMBA_SUBSYSTEM('LIBCRYPTO',
+ source='crc32.c md5.c hmacmd5.c md4.c arcfour.c sha256.c hmacsha256.c aes.c rijndael-alg-fst.c',
+ deps='talloc'
+ )
+
+
+bld.SAMBA_SUBSYSTEM('TORTURE_LIBCRYPTO',
+ source='md4test.c md5test.c hmacmd5test.c',
+ autoproto='test_proto.h',
+ deps='LIBCRYPTO'
+ )
+
diff --git a/lib/dnspython/.gitignore b/lib/dnspython/.gitignore
new file mode 100644
index 0000000000..5592c971b0
--- /dev/null
+++ b/lib/dnspython/.gitignore
@@ -0,0 +1,7 @@
+build
+dist
+MANIFEST
+html
+html.zip
+html.tar.gz
+tests/*.out
diff --git a/lib/dnspython/ChangeLog b/lib/dnspython/ChangeLog
new file mode 100644
index 0000000000..0fff77f977
--- /dev/null
+++ b/lib/dnspython/ChangeLog
@@ -0,0 +1,1123 @@
+2010-12-17 Bob Halley <halley@dnspython.org>
+
+ * dns/message.py (_WireReader._get_section): use "is" and not "=="
+ when testing what section an RR is in. Thanks to James Raftery
+ for reporting this bug.
+
+2010-12-10 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py (Resolver.query): disallow metaqueries.
+
+ * dns/rdata.py (Rdata.__hash__): Added a __hash__ method for rdata.
+
+2010-11-23 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.9.2 released)
+
+2010-11-23 Bob Halley <halley@dnspython.org>
+
+ * dns/dnssec.py (_need_pycrypto): DSA and RSA are modules, not
+ functions, and I didn't notice because the test suite masked
+ the bug! *sigh*
+
+2010-11-22 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.9.1 released)
+
+2010-11-22 Bob Halley <halley@dnspython.org>
+
+ * dns/dnssec.py: the "from" style import used to get DSA from
+ PyCrypto trashed a DSA constant. Now a normal import is used
+ to avoid namespace contamination.
+
+2010-11-20 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.9.0 released)
+
+2010-11-07 Bob Halley <halley@dnspython.org>
+
+ * dns/dnssec.py: Added validate() to do basic DNSSEC validation
+ (requires PyCrypto). Thanks to Brian Wellington for the patch.
+
+ * dns/hash.py: Hash compatibility handling is now its own module.
+
+2010-10-31 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py (zone_for_name): A query name resulting in a
+ CNAME or DNAME response to a node which had an SOA was incorrectly
+ treated as a zone origin. In these cases, we should just look
+ higher. Thanks to Gert Berger for reporting this problem.
+
+ * Added zonediff.py to examples. This program compares two zones
+ and shows the differences either in diff-like plain text, or
+ HTML. Thanks to Dennis Kaarsemaker for contributing this
+ useful program.
+
+2010-10-27 Bob Halley <halley@dnspython.org>
+
+ * Incorporate a patch to use poll() instead of select() by
+ default on platforms which support it. Thanks to
+ Peter Schüller and Spotify for the contribution.
+
+2010-10-17 Bob Halley <halley@dnspython.org>
+
+ * Python prior to 2.5.2 doesn't compute the correct values for
+ HMAC-SHA384 and HMAC-SHA512. We now detect attempts to use
+ them and raise NotImplemented if the Python version is too old.
+ Thanks to Kevin Chen for reporting the problem.
+
+ * Various routines that took the string forms of rdata types and
+ classes did not permit the strings to be Unicode strings.
+ Thanks to Ryan Workman for reporting the issue.
+
+ * dns/tsig.py: Added symbolic constants for the algorithm strings.
+ E.g. you can now say dns.tsig.HMAC_MD5 instead of
+ "HMAC-MD5.SIG-ALG.REG.INT". Thanks to Cillian Sharkey for
+ suggesting this improvement.
+
+ * dns/tsig.py (get_algorithm): fix hashlib compatibility; thanks to
+ Kevin Chen for the patch.
+
+ * dns/dnssec.py: Added key_id() and make_ds().
+
+ * dns/message.py: message.py needs to import dns.edns since it uses
+ it.
+
+2010-05-04 Bob Halley <halley@dnspython.org>
+
+ * dns/rrset.py (RRset.__init__): "covers" was not passed to the
+ superclass __init__(). Thanks to Shanmuga Rajan for reporting
+ the problem.
+
+2010-03-10 Bob Halley <halley@dnspython.org>
+
+ * The TSIG algorithm value was passed to use_tsig() incorrectly
+ in some cases. Thanks to 'ducciovigolo' for reporting the problem.
+
+2010-01-26 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.8.0 released)
+
+2010-01-13 Bob Halley <halley@dnspython.org>
+
+ * dns/dnssec.py: Added RSASHA256 and RSASHA512 codepoints; added
+ other missing codepoints to _algorithm_by_text.
+
+2010-01-12 Bob Halley <halley@dnspython.org>
+
+ * Escapes in masterfiles now work correctly. Previously they were
+ only working correctly when the text involved was part of a domain
+ name.
+
+ * dns/tokenizer.py: The tokenizer's get() method now returns Token
+ objects, not (type, text) tuples.
+
+2009-11-13 Bob Halley <halley@dnspython.org>
+
+ * Support has been added for hmac-sha1, hmac-sha224, hmac-sha256,
+ hmac-sha384 and hmac-sha512. Thanks to Kevin Chen for a
+ thoughtful, high quality patch.
+
+ * dns/update.py (Update::present): A zero TTL was not added if
+ present() was called with a single rdata, causing _add() to be
+ unhappy. Thanks to Eugene Kim for reporting the problem and
+ submitting a patch.
+
+ * dns/entropy.py: Use os.urandom() if present. Don't seed until
+ someone wants randomness.
+
+2009-09-16 Bob Halley <halley@dnspython.org>
+
+ * dns/entropy.py: The entropy module needs locking in order to be
+ used safely in a multithreaded environment. Thanks to Beda Kosata
+ for reporting the problem.
+
+2009-07-27 Bob Halley <halley@dnspython.org>
+
+ * dns/query.py (xfr): The socket was not set to nonblocking mode.
+ Thanks to Erik Romijn for reporting this problem.
+
+2009-07-23 Bob Halley <halley@dnspython.org>
+
+ * dns/rdtypes/IN/SRV.py (SRV._cmp): SRV records were compared
+ incorrectly due to a cut-and-paste error. Thanks to Tommie
+ Gannert for reporting this bug.
+
+ * dns/e164.py (query): The resolver parameter was not used.
+ Thanks to Matías Bellone for reporting this bug.
+
+2009-06-23 Bob Halley <halley@dnspython.org>
+
+ * dns/entropy.py (EntropyPool.__init__): open /dev/random unbuffered;
+ there's no need to consume more randomness than we need. Thanks
+ to Brian Wellington for the patch.
+
+2009-06-19 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.7.1 released)
+
+2009-06-19 Bob Halley <halley@dnspython.org>
+
+ * DLV.py was omitted from the kit
+
+ * Negative prerequisites were not handled correctly in _get_section().
+
+2009-06-19 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.7.0 released)
+
+2009-06-19 Bob Halley <halley@dnspython.org>
+
+ * On Windows, the resolver set the domain incorrectly. Thanks
+ to Brandon Carpenter for reporting this bug.
+
+ * Added a to_digestable() method to rdata classes; it returns the
+ digestable form (i.e. DNSSEC canonical form) of the rdata. For
+ most rdata types this is the same uncompressed wire form. For
+ certain older DNS RR types, however, domain names in the rdata
+ are downcased.
+
+ * Added support for the HIP RR type.
+
+2009-06-18 Bob Halley <halley@dnspython.org>
+
+ * Added support for the DLV RR type.
+
+ * Added various DNSSEC related constants (e.g. algorithm identifiers,
+ flag values).
+
+ * dns/tsig.py: Added support for BADTRUNC result code.
+
+ * dns/query.py (udp): When checking that addresses are the same,
+ use the binary form of the address in the comparison. This
+ ensures that we don't treat addresses as different if they have
+ equivalent but differing textual representations. E.g. "1:00::1"
+ and "1::1" represent the same address but are not textually equal.
+ Thanks to Kim Davies for reporting this bug.
+
+ * The resolver's query() method now has an optional 'source' parameter,
+ allowing the source IP address to be specified. Thanks to
+ Alexander Lind for suggesting the change and sending a patch.
+
+ * Added NSEC3 and NSEC3PARAM support.
+
+2009-06-17 Bob Halley <halley@dnspython.org>
+
+ * Fixed NSEC.to_text(), which was only printing the last window.
+ Thanks to Brian Wellington for finding the problem and fixing it.
+
+2009-03-30 Bob Halley <halley@dnspython.org>
+
+ * dns/query.py (xfr): Allow UDP IXFRs. Use "one_rr_per_rrset" mode when
+ doing IXFR.
+
+2009-03-30 Bob Halley <halley@dnspython.org>
+
+ * Add "one_rr_per_rrset" mode switch to methods which parse
+ messages from wire format (e.g. dns.message.from_wire(),
+ dns.query.udp(), dns.query.tcp()). If set, each RR read is
+ placed in its own RRset (instead of being coalesced).
+
+2009-03-30 Bob Halley <halley@dnspython.org>
+
+ * Added EDNS option support.
+
+2008-10-16 Bob Halley <halley@dnspython.org>
+
+ * dns/rdtypes/ANY/DS.py: The from_text() parser for DS RRs did not
+ allow multiple Base64 chunks. Thanks to Rakesh Banka for
+ finding this bug and submitting a patch.
+
+2008-10-08 Bob Halley <halley@dnspython.org>
+
+ * Add entropy module.
+
+ * When validating TSIGs, we need to use the absolute name.
+
+2008-06-03 Bob Halley <halley@dnspython.org>
+
+ * dns/message.py (Message.set_rcode): The mask used preserved the
+ extended rcode, instead of everything else in ednsflags.
+
+ * dns/message.py (Message.use_edns): ednsflags was not kept
+ coherent with the specified edns version.
+
+2008-02-06 Bob Halley <halley@dnspython.org>
+
+ * dns/ipv6.py (inet_aton): We could raise an exception other than
+ dns.exception.SyntaxError in some cases.
+
+ * dns/tsig.py: Raise an exception when the peer has set a non-zero
+ TSIG error.
+
+2007-11-25 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.6.0 released)
+
+2007-11-25 Bob Halley <halley@dnspython.org>
+
+ * dns/query.py (_wait_for): if select() raises an exception due to
+ EINTR, we should just select() again.
+
+2007-06-13 Bob Halley <halley@dnspython.org>
+
+ * dns/inet.py: Added is_multicast().
+
+ * dns/query.py (udp): If the queried address is a multicast address, then
+ don't check that the address of the response is the same as the address
+ queried.
+
+2007-05-24 Bob Halley <halley@dnspython.org>
+
+ * dns/rdtypes/IN/NAPTR.py: NAPTR comparisons didn't compare the
+ preference field due to a typo.
+
+2007-02-07 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py: Integrate code submitted by Paul Marks to
+ determine whether a Windows NIC is enabled. The way dnspython
+ used to do this does not work on Windows Vista.
+
+2006-12-10 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.5.0 released)
+
+2006-11-03 Bob Halley <halley@dnspython.org>
+
+ * dns/rdtypes/IN/DHCID.py: Added support for the DHCID RR type.
+
+2006-11-02 Bob Halley <halley@dnspython.org>
+
+ * dns/query.py (udp): Messages from unexpected sources can now be
+ ignored by setting ignore_unexpected to True.
+
+2006-10-31 Bob Halley <halley@dnspython.org>
+
+ * dns/query.py (udp): When raising UnexpectedSource, add more
+ detail about what went wrong to the exception.
+
+2006-09-22 Bob Halley <halley@dnspython.org>
+
+ * dns/message.py (Message.use_edns): add reasonable defaults for
+ the ednsflags, payload, and request_payload parameters.
+
+ * dns/message.py (Message.want_dnssec): add a convenience method for
+ enabling/disabling the "DNSSEC desired" flag in requests.
+
+ * dns/message.py (make_query): add "use_edns" and "want_dnssec"
+ parameters.
+
+2006-08-17 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py (Resolver.read_resolv_conf): If /etc/resolv.conf
+ doesn't exist, just use the default resolver configuration (i.e.
+ the same thing we would have used if resolv.conf had existed and
+ been empty).
+
+2006-07-26 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py (Resolver._config_win32_fromkey): fix
+ cut-and-paste error where we passed the wrong variable to
+ self._config_win32_search(). Thanks to David Arnold for finding
+ the bug and submitting a patch.
+
+2006-07-20 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py (Answer): Add more support for the sequence
+ protocol, forwarding requests to the answer object's rrset.
+ E.g. "for a in answer" is equivalent to "for a in answer.rrset",
+ "answer[i]" is equivalent to "answer.rrset[i]", and
+ "answer[i:j]" is equivalent to "answer.rrset[i:j]".
+
+2006-07-19 Bob Halley <halley@dnspython.org>
+
+ * dns/query.py (xfr): Add IXFR support.
+
+2006-06-22 Bob Halley <halley@dnspython.org>
+
+ * dns/rdtypes/IN/IPSECKEY.py: Added support for the IPSECKEY RR type.
+
+2006-06-21 Bob Halley <halley@dnspython.org>
+
+ * dns/rdtypes/ANY/SPF.py: Added support for the SPF RR type.
+
+2006-06-02 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.4.0 released)
+
+2006-04-25 Bob Halley <halley@dnspython.org>
+
+ * dns/rrset.py (RRset.to_rdataset): Added a convenience method
+ to convert an rrset into an rdataset.
+
+2006-03-27 Bob Halley <halley@dnspython.org>
+
+ * Added dns.e164.query(). This function can be used to look for
+ NAPTR RRs for a specified number in several domains, e.g.:
+
+ dns.e164.query('16505551212',
+ ['e164.dnspython.org.', 'e164.arpa.'])
+
+2006-03-26 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py (Resolver.query): The resolver deleted from
+ a list while iterating it, which makes the iterator unhappy.
+
+2006-03-17 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py (Resolver.query): The resolver needlessly
+ delayed responses for successful queries.
+
+2006-01-18 Bob Halley <halley@dnspython.org>
+
+ * dns/rdata.py: added a validate() method to the rdata class. If
+ you change an rdata by assigning to its fields, it is a good
+ idea to call validate() when you are done making changes.
+ For example, if 'r' is an MX record and then you execute:
+
+ r.preference = 100000 # invalid, because > 65535
+ r.validate()
+
+ The validation will fail and an exception will be raised.
+
+2006-01-11 Bob Halley <halley@dnspython.org>
+
+ * dns/ttl.py: TTLs are now bounds checked to be within the closed
+ interval [0, 2^31 - 1].
+
+ * The BIND 8 TTL syntax is now accepted in the SOA refresh, retry,
+ expire, and minimum fields, and in the original_ttl field of
+ SIG and RRSIG records.
+
+2006-01-04 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py: The windows registry irritatingly changes the
+ list element delimiter in between ' ' and ',' (and vice-versa)
+ in various versions of windows. We now cope by always looking
+ for either one (' ' first).
+
+2005-12-27 Bob Halley <halley@dnspython.org>
+
+ * dns/e164.py: Added routines to convert between E.164 numbers and
+ their ENUM domain name equivalents.
+
+ * dns/reversename.py: Added routines to convert between IPv4 and
+ IPv6 addresses and their DNS reverse-map equivalents.
+
+2005-12-18 Bob Halley <halley@dnspython.org>
+
+ * dns/rdtypes/ANY/LOC.py (_tuple_to_float): The sign was lost when
+ converting a tuple into a float, which broke conversions of
+ south latitudes and west longitudes.
+
+2005-11-17 Bob Halley <halley@dnspython.org>
+
+ * dns/zone.py: The 'origin' parameter to from_text() and from_file()
+ is now optional. If not specified, dnspython will use the
+ first $ORIGIN in the text as the zone's origin.
+
+ * dns/zone.py: Sanity checks of the zone's origin node can now
+ be disabled.
+
+2005-11-12 Bob Halley <halley@dnspython.org>
+
+ * dns/name.py: Preliminary Unicode support has been added for
+ domain names. Running dns.name.from_text() on a Unicode string
+ will now encode each label using the IDN ACE encoding. The
+ to_unicode() method may be used to convert a dns.name.Name with
+ IDN ACE labels back into a Unicode string. This functionality
+ requires Python 2.3 or greater.
+
+2005-10-31 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.3.5 released)
+
+2005-10-12 Bob Halley <halley@dnspython.org>
+
+ * dns/zone.py: Zone.iterate_rdatasets() and Zone.iterate_rdatas()
+ did not have a default rdtype of dns.rdatatype.ANY as their
+ docstrings said they did. They do now.
+
+2005-10-06 Bob Halley <halley@dnspython.org>
+
+ * dns/name.py: Added the parent() method, which returns the
+ parent of a name.
+
+2005-10-01 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py: Added zone_for_name() helper, which returns
+ the name of the zone which contains the specified name.
+
+ * dns/resolver.py: Added get_default_resolver(), which returns
+ the default resolver, initializing it if necessary.
+
+2005-09-29 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py (Resolver._compute_timeout): If time goes
+ backwards a little bit, ignore it.
+
+2005-07-31 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.3.4 released)
+
+2005-07-31 Bob Halley <halley@dnspython.org>
+
+ * dns/message.py (make_response): Trying to respond to a response
+ threw a NameError while trying to throw a FormErr since it used
+ the wrong name for the FormErr exception.
+
+ * dns/query.py (_connect): We needed to ignore EALREADY too.
+
+ * dns/query.py: Optional "source" and "source_port" parameters
+ have been added to udp(), tcp(), and xfr(). Thanks to Ralf
+ Weber for suggesting the change and providing a patch.
+
+2005-06-05 Bob Halley <halley@dnspython.org>
+
+ * dns/query.py: The requirement that the "where" parameter be
+ an IPv4 or IPv6 address is now documented.
+
+2005-06-04 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py: The resolver now does exponential backoff
+ each time it runs through all of the nameservers.
+
+ * dns/resolver.py: rcodes which indicate a nameserver is likely
+ to be a "permanent failure" for a query cause the nameserver
+ to be removed from the mix for that query.
+
+2005-01-30 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.3.3 released)
+
+2004-10-25 Bob Halley <halley@dnspython.org>
+
+ * dns/rdtypes/ANY/TXT.py (TXT.from_text): The masterfile parser
+ incorrectly rejected TXT records where a value was not quoted.
+
+2004-10-11 Bob Halley <halley@dnspython.org>
+
+ * dns/message.py: Added make_response(), which creates a skeletal
+ response for the specified query. Added opcode() and set_opcode()
+ convenience methods to the Message class. Added the request_payload
+ attribute to the Message class.
+
+2004-10-10 Bob Halley <halley@dnspython.org>
+
+ * dns/zone.py (from_xfr): dns.zone.from_xfr() in relativization
+ mode incorrectly set zone.origin to the empty name.
+
+2004-09-02 Bob Halley <halley@dnspython.org>
+
+ * dns/name.py (Name.to_wire): The 'file' parameter to
+ Name.to_wire() is now optional; if omitted, the wire form will
+ be returned as the value of the function.
+
+2004-08-14 Bob Halley <halley@dnspython.org>
+
+ * dns/message.py (Message.find_rrset): find_rrset() now uses an
+ index, vastly improving the from_wire() performance of large
+ messages such as zone transfers.
+
+2004-08-07 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.3.2 released)
+
+2004-08-04 Bob Halley <halley@dnspython.org>
+
+ * dns/query.py: sending queries to a nameserver via IPv6 now
+ works.
+
+ * dns/inet.py (af_for_address): Add af_for_address(), which looks
+ at a textual-form address and attempts to determine which address
+ family it is.
+
+ * dns/query.py: the default for the 'af' parameter of the udp(),
+ tcp(), and xfr() functions has been changed from AF_INET to None,
+ which causes dns.inet.af_for_address() to be used to determine the
+ address family. If dns.inet.af_for_address() can't figure it out,
+ we fall back to AF_INET and hope for the best.
+
+2004-07-31 Bob Halley <halley@dnspython.org>
+
+ * dns/rdtypes/ANY/NSEC.py (NSEC.from_text): The NSEC text format
+ does not allow specifying types by number, so we shouldn't either.
+
+ * dns/renderer.py: the renderer module didn't import random,
+ causing an exception to be raised if a query id wasn't provided
+ when a Renderer was created.
+
+ * dns/resolver.py (Resolver.query): the resolver wasn't catching
+ dns.exception.Timeout, so a timeout erroneously caused the whole
+ resolution to fail instead of just going on to the next server.
+
+2004-06-16 Bob Halley <halley@dnspython.org>
+
+ * dns/rdtypes/ANY/LOC.py (LOC.from_text): LOC milliseconds values
+ were converted incorrectly if the length of the milliseconds
+ string was less than 3.
+
+2004-06-06 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.3.1 released)
+
+2004-05-22 Bob Halley <halley@dnspython.org>
+
+ * dns/update.py (Update.delete): We erroneously specified a
+ "deleting" value of dns.rdatatype.NONE instead of
+ dns.rdataclass.NONE when the thing being deleted was either an
+ Rdataset instance or an Rdata instance.
+
+ * dns/rdtypes/ANY/SSHFP.py: Added support for the proposed SSHFP
+ RR type.
+
+2004-05-14 Bob Halley <halley@dnspython.org>
+
+ * dns/rdata.py (from_text): The masterfile reader did not
+ accept the unknown RR syntax when used with a known RR type.
+
+2004-05-08 Bob Halley <halley@dnspython.org>
+
+ * dns/name.py (from_text): dns.name.from_text() did not raise
+ an exception if a backslash escape ended prematurely.
+
+2004-04-09 Bob Halley <halley@dnspython.org>
+
+ * dns/zone.py (_MasterReader._rr_line): The masterfile reader
+ erroneously treated lines starting with leading whitespace but
+ not having any RR definition as an error. It now treats
+ them like a blank line (which is not an error).
+
+2004-04-01 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.3.0 released)
+
+2004-03-19 Bob Halley <halley@dnspython.org>
+
+ * Added support for new DNSSEC types RRSIG, NSEC, and DNSKEY.
+
+2004-01-16 Bob Halley <halley@dnspython.org>
+
+ * dns/query.py (_connect): Windows returns EWOULDBLOCK instead
+ of EINPROGRESS when trying to connect a nonblocking socket.
+
+2003-11-13 Bob Halley <halley@dnspython.org>
+
+ * dns/rdtypes/ANY/LOC.py (LOC.to_wire): We encoded and decoded LOC
+ incorrectly, since we were interpreting the values of altitiude,
+ size, hprec, and vprec in meters instead of centimeters.
+
+ * dns/rdtypes/IN/WKS.py (WKS.from_wire): The WKS protocol value is
+ encoded with just one octet, not two!
+
+2003-11-09 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py (Cache.maybe_clean): The cleaner deleted items
+ from the dictionary while iterating it, causing a RuntimeError
+ to be raised. Thanks to Mark R. Levinson for the bug report,
+ regression test, and fix.
+
+2003-11-07 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.2.0 released)
+
+2003-11-03 Bob Halley <halley@dnspython.org>
+
+ * dns/zone.py (_MasterReader.read): The saved_state now includes
+ the default TTL.
+
+2003-11-01 Bob Halley <halley@dnspython.org>
+
+ * dns/tokenizer.py (Tokenizer.get): The tokenizer didn't
+ handle escaped delimiters.
+
+2003-10-27 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py (Resolver.read_resolv_conf): If no nameservers
+ are configured in /etc/resolv.conf, the default nameserver
+ list should be ['127.0.0.1'].
+
+2003-09-08 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py (Resolver._config_win32_fromkey): We didn't
+ catch WindowsError, which can happen if a key is not defined
+ in the registry.
+
+2003-09-06 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.2.0b1 released)
+
+2003-09-05 Bob Halley <halley@dnspython.org>
+
+ * dns/query.py: Timeout support has been overhauled to provide
+ timeouts under Python 2.2 as well as 2.3, and to provide more
+ accurate expiration.
+
+2003-08-30 Bob Halley <halley@dnspython.org>
+
+ * dns/zone.py: dns.exception.SyntaxError is raised for unknown
+ master file directives.
+
+2003-08-28 Bob Halley <halley@dnspython.org>
+
+ * dns/zone.py: $INCLUDE processing is now enabled/disabled using
+ the allow_include parameter. The default is to process $INCLUDE
+ for from_file(), and to disallow $INCLUDE for from_text(). The
+ master reader now calls zone.check_origin_node() by default after
+ the zone has been read. find_rdataset() called get_node() instead
+ of find_node(), which result in an incorrect exception. The
+ relativization state of a zone is now remembered and applied
+ consistently when looking up names. from_xfr() now supports
+ relativization like the _MasterReader.
+
+2003-08-22 Bob Halley <halley@dnspython.org>
+
+ * dns/zone.py: The _MasterReader now understands $INCLUDE.
+
+2003-08-12 Bob Halley <halley@dnspython.org>
+
+ * dns/zone.py: The _MasterReader now specifies the file and line
+ number when a syntax error occurs. The BIND 8 TTL format is now
+ understood when loading a zone, though it will never be emitted.
+ The from_file() function didn't pass the zone_factory parameter
+ to from_text().
+
+2003-08-10 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.1.0 released)
+
+2003-08-07 Bob Halley <halley@dnspython.org>
+
+ * dns/update.py (Update._add): A typo meant that _add would
+ fail if the thing being added was an Rdata object (as
+ opposed to an Rdataset or the textual form of an Rdata).
+
+2003-08-05 Bob Halley <halley@dnspython.org>
+
+ * dns/set.py: the simple Set class has been moved to its
+ own module, and augmented to support more set operations.
+
+2003-08-04 Bob Halley <halley@dnspython.org>
+
+ * Node and all rdata types have been "slotted". This speeds
+ things up a little and reduces memory usage noticeably.
+
+2003-08-02 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.1.0c1 released)
+
+2003-08-02 Bob Halley <halley@dnspython.org>
+
+ * dns/rdataset.py: SimpleSets now support more set options.
+
+ * dns/message.py: Added the get_rrset() method. from_file() now
+ allows Unicode filenames and turns on universal newline support if
+ it opens the file itself.
+
+ * dns/node.py: Added the delete_rdataset() and replace_rdataset()
+ methods.
+
+ * dns/zone.py: Added the delete_node(), delete_rdataset(), and
+ replace_rdataset() methods. from_file() now allows Unicode
+ filenames and turns on universal newline support if it opens the
+ file itself. Added a to_file() method.
+
+2003-08-01 Bob Halley <halley@dnspython.org>
+
+ * dns/opcode.py: Opcode from/to text converters now understand
+ numeric opcodes. The to_text() method will return a numeric opcode
+ string if it doesn't know a text name for the opcode.
+
+ * dns/message.py: Added set_rcode(). Fixed code where ednsflags
+ wasn't treated as a long.
+
+ * dns/rcode.py: ednsflags wasn't treated as a long. Rcode from/to
+ text converters now understand numeric rcodes. The to_text()
+ method will return a numeric rcode string if it doesn't know
+ a text name for the rcode.
+
+ * examples/reverse.py: Added a new example program that builds a
+ reverse (address-to-name) mapping table from the name-to-address
+ mapping specified by A RRs in zone files.
+
+ * dns/node.py: Added get_rdataset() method.
+
+ * dns/zone.py: Added get_rdataset() and get_rrset() methods. Added
+ iterate_rdatas().
+
+2003-07-31 Bob Halley <halley@dnspython.org>
+
+ * dns/zone.py: Added the iterate_rdatasets() method which returns
+ a generator which yields (name, rdataset) tuples for all the
+ rdatasets in the zone matching the specified rdatatype.
+
+2003-07-30 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.1.0b2 released)
+
+2003-07-30 Bob Halley <halley@dnspython.org>
+
+ * dns/zone.py: Added find_rrset() and find_rdataset() convenience
+ methods. They let you retrieve rdata with the specified name
+ and type in one call.
+
+ * dns/node.py: Nodes no longer have names; owner names are
+ associated with nodes in the Zone object's nodes dictionary.
+
+ * dns/zone.py: Zone objects now implement more of the standard
+ mapping interface. __iter__ has been changed to iterate the keys
+ rather than values to match the standard mapping interface's
+ behavior.
+
+2003-07-20 Bob Halley <halley@dnspython.org>
+
+ * dns/ipv6.py (inet_ntoa): Handle embedded IPv4 addresses.
+
+2003-07-19 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.1.0b1 released)
+
+2003-07-18 Bob Halley <halley@dnspython.org>
+
+ * dns/tsig.py: The TSIG validation of TCP streams where not
+ every message is signed now works correctly.
+
+ * dns/zone.py: Zones can now be compared for equality and
+ inequality. If the other object in the comparison is also
+ a zone, then "the right thing" happens; i.e. the zones are
+ equal iff.: they have the same rdclass, origin, and nodes.
+
+2003-07-17 Bob Halley <halley@dnspython.org>
+
+ * dns/message.py (Message.use_tsig): The method now allows for
+ greater control over the various fields in the generated signature
+ (e.g. fudge).
+ (_WireReader._get_section): UnknownTSIGKey is now raised if an
+ unknown key is encountered, or if a signed message has no keyring.
+
+2003-07-16 Bob Halley <halley@dnspython.org>
+
+ * dns/tokenizer.py (Tokenizer._get_char): get_char and unget_char
+ have been renamed to _get_char and _unget_char since they are not
+ useful to clients of the tokenizer.
+
+2003-07-15 Bob Halley <halley@dnspython.org>
+
+ * dns/zone.py (_MasterReader._rr_line): owner names were being
+ unconditionally relativized; it makes much more sense for them
+ to be relativized according to the relativization setting of
+ the reader.
+
+2003-07-12 Bob Halley <halley@dnspython.org>
+
+ * dns/resolver.py (Resolver.read_resolv_conf): The resolv.conf
+ parser did not allow blank / whitespace-only lines, nor did it
+ allow comments. Both are now supported.
+
+2003-07-11 Bob Halley <halley@dnspython.org>
+
+ * dns/name.py (Name.to_digestable): to_digestable() now
+ requires an origin to be specified if the name is relative.
+ It will raise NeedAbsoluteNameOrOrigin if the name is
+ relative and there is either no origin or the origin is
+ itself relative.
+ (Name.split): returned the wrong answer if depth was 0 or depth
+ was the length of the name. split() now does bounds checking
+ on depth, and raises ValueError if depth < 0 or depth > the length
+ of the name.
+
+2003-07-10 Bob Halley <halley@dnspython.org>
+
+ * dns/ipv6.py (inet_ntoa): The routine now minimizes its output
+ strings. E.g. the IPv6 address
+ "0000:0000:0000:0000:0000:0000:0000:0001" is minimized to "::1".
+ We do not, however, make any effort to display embedded IPv4
+ addresses in the dot-quad notation.
+
+2003-07-09 Bob Halley <halley@dnspython.org>
+
+ * dns/inet.py: We now supply our own AF_INET and AF_INET6
+ constants since AF_INET6 may not always be available. If the
+ socket module has AF_INET6, we will use it. If not, we will
+ use our own value for the constant.
+
+ * dns/query.py: the functions now take an optional af argument
+ specifying the address family to use when creating the socket.
+
+ * dns/rdatatype.py (is_metatype): a typo caused the function
+ return true only for type OPT.
+
+ * dns/message.py: message section list elements are now RRsets
+ instead of Nodes. This API change makes processing messages
+ easier for many applications.
+
+2003-07-07 Bob Halley <halley@dnspython.org>
+
+ * dns/rrset.py: added. An RRset is a named rdataset.
+
+ * dns/rdataset.py (Rdataset.__eq__): rdatasets may now be compared
+ for equality and inequality with other objects. Rdataset instance
+ variables are now slotted.
+
+ * dns/message.py: The wire format and text format readers are now
+ classes. Variables related to reader state have been moved out
+ of the message class.
+
+2003-07-06 Bob Halley <halley@dnspython.org>
+
+ * dns/name.py (from_text): '@' was not interpreted as the empty
+ name.
+
+ * dns/zone.py: the master file reader derelativized names in rdata
+ relative to the zone's origin, not relative to the current origin.
+ The reader now deals with relativization in two steps. The rdata
+ is read and derelativized using the current origin. The rdata's
+ relativity is then chosen using the zone origin and the relativize
+ boolean. Here's an example.
+
+ $ORIGIN foo.example.
+ $TTL 300
+ bar MX 0 blaz
+
+ If the zone origin is example., and relativization is on, then
+ This fragment will become:
+
+ bar.foo.example. 300 IN MX 0 blaz.foo.example.
+
+ after the first step (derelativization to current origin), and
+
+ bar.foo 300 IN MX 0 blaz.foo
+
+ after the second step (relativiation to zone origin).
+
+ * dns/namedict.py: added.
+
+ * dns/zone.py: The master file reader has been made into its
+ own class. Reader-related instance variables have been moved
+ form the zone class into the reader class.
+
+ * dns/zone.py: Add node_factory class attribute. An application
+ can now subclass Zone and Node and have a zone whose nodes are of
+ the subclassed Node type. The from_text(), from_file(), and
+ from_xfr() algorithms now take an optional zone_factory argument.
+ This allows the algorithms to be used to create zones whose class
+ is a subclass of Zone.
+
+
+2003-07-04 Bob Halley <halley@dnspython.org>
+
+ * dns/renderer.py: added new wire format rendering module and
+ converted message.py to use it. Applications which want
+ fine-grained control over the conversion to wire format may call
+ the renderer directy, instead of having it called on their behalf
+ by the message code.
+
+2003-07-02 Bob Halley <halley@dnspython.org>
+
+ * dns/name.py (_validate_labels): The NameTooLong test was
+ incorrect.
+
+ * dns/message.py (Message.to_wire): dns.exception.TooBig is
+ now raised if the wire encoding exceeds the specified
+ maximum size.
+
+2003-07-01 Bob Halley <halley@dnspython.org>
+
+ * dns/message.py: EDNS encoding was broken. from_text()
+ didn't parse rcodes, flags, or eflags correctly. Comparing
+ messages with other types of objects didn't work.
+
+2003-06-30 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.0.0 released)
+
+2003-06-30 Bob Halley <halley@dnspython.org>
+
+ * dns/rdata.py: Rdatas now implement rich comparisons instead of
+ __cmp__.
+
+ * dns/name.py: Names now implement rich comparisons instead of
+ __cmp__.
+
+ * dns/inet.py (inet_ntop): Always use our code, since the code
+ in the socket module doesn't support AF_INET6 conversions if
+ IPv6 sockets are not available on the system.
+
+ * dns/resolver.py (Answer.__init__): A dangling CNAME chain was
+ not raising NoAnswer.
+
+ * Added a simple resolver Cache class.
+
+ * Added an expiration attribute to answer instances.
+
+2003-06-24 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.0.0b3 released)
+
+2003-06-24 Bob Halley <halley@dnspython.org>
+
+ * Renamed module "DNS" to "dns" to avoid conflicting with
+ PyDNS.
+
+2003-06-23 Bob Halley <halley@dnspython.org>
+
+ * The from_text() relativization controls now work the same way as
+ the to_text() controls.
+
+ * DNS/rdata.py: The parsing of generic rdata was broken.
+
+2003-06-21 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.0.0b2 released)
+
+2003-06-21 Bob Halley <halley@dnspython.org>
+
+ * The Python 2.2 socket.inet_aton() doesn't seem to like
+ '255.255.255.255'. We work around this.
+
+ * Fixed bugs in rdata to_wire() and from_wire() routines of a few
+ types. These bugs were discovered by running the tests/zone.py
+ Torture1 test.
+
+ * Added implementation of type APL.
+
+2003-06-20 Bob Halley <halley@dnspython.org>
+
+ * DNS/rdtypes/IN/AAAA.py: Use our own versions of inet_ntop and
+ inet_pton if the socket module doesn't provide them for us.
+
+ * The resolver now does a better job handling exceptions. In
+ particular, it no longer eats all exceptions; rather it handles
+ those exceptions it understands, and leaves the rest uncaught.
+
+ * Exceptions have been pulled into their own module. Almost all
+ exceptions raised by the code are now subclasses of
+ DNS.exception.DNSException. All form errors are subclasses of
+ DNS.exception.FormError (which is itself a subclass of
+ DNS.exception.DNSException).
+
+2003-06-19 Bob Halley <halley@dnspython.org>
+
+ * Added implementations of types DS, NXT, SIG, and WKS.
+
+ * __cmp__ for type A and AAAA could produce incorrect results.
+
+2003-06-18 Bob Halley <halley@dnspython.org>
+
+ * Started test suites for zone.py and tokenizer.py.
+
+ * Added implementation of type KEY.
+
+ * DNS/rdata.py(_base64ify): \n could be emitted erroneously.
+
+ * DNS/rdtypes/ANY/SOA.py (SOA.from_text): The SOA RNAME field could
+ be set to the value of MNAME in common cases.
+
+ * DNS/rdtypes/ANY/X25.py: __init__ was broken.
+
+ * DNS/zone.py (from_text): $TTL handling erroneously caused the
+ next line to be eaten.
+
+ * DNS/tokenizer.py (Tokenizer.get): parsing was broken for empty
+ quoted strings. Quoted strings didn't handle \ddd escapes. Such
+ escapes are appear not to comply with RFC 1035, but BIND allows
+ them and they seem useful, so we allow them too.
+
+ * DNS/rdtypes/ANY/ISDN.py (ISDN.from_text): parsing was
+ broken for ISDN RRs without subaddresses.
+
+ * DNS/zone.py (from_file): from_file() didn't work because
+ some required parameters were not passed to from_text().
+
+2003-06-17 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.0.0b1 released)
+
+2003-06-17 Bob Halley <halley@dnspython.org>
+
+ * Added implementation of type PX.
+
+2003-06-16 Bob Halley <halley@dnspython.org>
+
+ * Added implementation of types CERT, GPOS, LOC, NSAP, NSAP-PTR.
+
+ * DNS/rdatatype.py (_by_value): A cut-and-paste error had broken
+ NSAP and NSAP-PTR.
+
+2003-06-12 Bob Halley <halley@dnspython.org>
+
+ * Created a tests directory and started adding tests.
+
+ * Added "and its documentation" to the permission grant in the
+ license.
+
+2003-06-12 Bob Halley <halley@dnspython.org>
+
+ * DNS/name.py (Name.is_wild): is_wild() erroneously raised IndexError
+ if the name was empty.
+
+2003-06-10 Bob Halley <halley@dnspython.org>
+
+ * Added implementations of types AFSDB, X25, and ISDN.
+
+ * The documentation associated with the various rdata types has been
+ improved. In particular, instance variables are now described.
+
+2003-06-09 Bob Halley <halley@dnspython.org>
+
+ * Added implementations of types HINFO, RP, and RT.
+
+ * DNS/message.py (make_query): Document that make_query() sets
+ flags to DNS.flags.RD, and chooses a random query id.
+
+2003-06-05 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.0.0a2 released)
+
+2003-06-05 Bob Halley <halley@dnspython.org>
+
+ * DNS/node.py: removed __getitem__ and __setitem__, since
+ they are not used by the codebase and were not useful in
+ general either.
+
+ * DNS/message.py (from_file): from_file() now allows a
+ filename to be specified instead of a file object.
+
+ * DNS/rdataset.py: The is_compatible() method of the
+ DNS.rdataset.Rdataset class was deleted.
+
+2003-06-04 Bob Halley <halley@dnspython.org>
+
+ * DNS/name.py (class Name): Names are now immutable.
+
+ * DNS/name.py: the is_comparable() method has been removed, since
+ names are always comparable.
+
+ * DNS/resolver.py (Resolver.query): A query could run for up
+ to the lifetime + the timeout. This has been corrected and the
+ query will now only run up to the lifetime.
+
+2003-06-03 Bob Halley <halley@dnspython.org>
+
+ * DNS/resolver.py: removed the 'new' function since it is not the
+ style of the library to have such a function. Call
+ DNS.resolver.Resolver() to make a new resolver.
+
+2003-06-03 Bob Halley <halley@dnspython.org>
+
+ * DNS/resolver.py (Resolver._config_win32_fromkey): The DhcpServer
+ list is space separated, not comma separated.
+
+2003-06-03 Bob Halley <halley@dnspython.org>
+
+ * DNS/update.py: Added an update module to make generating updates
+ easier.
+
+2003-06-03 Bob Halley <halley@dnspython.org>
+
+ * Commas were missing in some of the __all__ entries in various
+ __init__.py files.
+
+2003-05-30 Bob Halley <halley@dnspython.org>
+
+ * (Version 1.0.0a1 released)
diff --git a/lib/dnspython/LICENSE b/lib/dnspython/LICENSE
new file mode 100644
index 0000000000..633c18c1e7
--- /dev/null
+++ b/lib/dnspython/LICENSE
@@ -0,0 +1,14 @@
+Copyright (C) 2001-2003 Nominum, Inc.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose with or without fee is hereby granted,
+provided that the above copyright notice and this permission notice
+appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/lib/dnspython/MANIFEST.in b/lib/dnspython/MANIFEST.in
new file mode 100644
index 0000000000..d58fb8ba04
--- /dev/null
+++ b/lib/dnspython/MANIFEST.in
@@ -0,0 +1,3 @@
+include LICENSE ChangeLog TODO
+recursive-include examples *.txt *.py
+recursive-include tests *.txt *.py Makefile *.good example
diff --git a/lib/dnspython/Makefile b/lib/dnspython/Makefile
new file mode 100644
index 0000000000..3dbfe95346
--- /dev/null
+++ b/lib/dnspython/Makefile
@@ -0,0 +1,56 @@
+# Copyright (C) 2003-2007, 2009 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# $Id: Makefile,v 1.16 2004/03/19 00:17:27 halley Exp $
+
+PYTHON=python
+
+all:
+ ${PYTHON} ./setup.py build
+
+install:
+ ${PYTHON} ./setup.py install
+
+clean:
+ ${PYTHON} ./setup.py clean --all
+ find . -name '*.pyc' -exec rm {} \;
+ find . -name '*.pyo' -exec rm {} \;
+ rm -f TAGS
+
+distclean: clean docclean
+ rm -rf build dist
+ rm -f MANIFEST
+
+doc:
+ epydoc -n dnspython -u http://www.dnspython.org \
+ dns/*.py dns/rdtypes/*.py dns/rdtypes/ANY/*.py \
+ dns/rdtypes/IN/*.py
+
+dockits: doc
+ mv html dnspython-html
+ tar czf html.tar.gz dnspython-html
+ zip -r html.zip dnspython-html
+ mv dnspython-html html
+
+docclean:
+ rm -rf html.tar.gz html.zip html
+
+kits:
+ ${PYTHON} ./setup.py sdist --formats=gztar,zip
+# ${PYTHON} ./setup.py bdist_wininst
+# ${PYTHON} ./setup.py bdist_rpm
+
+tags:
+ find . -name '*.py' -print | etags -
diff --git a/lib/dnspython/README b/lib/dnspython/README
new file mode 100644
index 0000000000..d53dac61ab
--- /dev/null
+++ b/lib/dnspython/README
@@ -0,0 +1,402 @@
+dnspython
+
+INTRODUCTION
+
+dnspython is a DNS toolkit for Python. It supports almost all record
+types. It can be used for queries, zone transfers, and dynamic
+updates. It supports TSIG authenticated messages and EDNS0.
+
+dnspython provides both high and low level access to DNS. The high
+level classes perform queries for data of a given name, type, and
+class, and return an answer set. The low level classes allow direct
+manipulation of DNS zones, messages, names, and records.
+
+To see a few of the ways dnspython can be used, look in the examples/
+directory.
+
+dnspython originated at Nominum where it was developed to facilitate
+the testing of DNS software. Nominum has generously allowed it to be
+open sourced under a BSD-style license, and helps support its future
+development by continuing to employ the author :).
+
+
+ABOUT THIS RELEASE
+
+This is dnspython 1.9.2
+
+New since 1.9.1:
+
+ Nothing.
+
+Bugs fixed since 1.9.1:
+
+ The dns.dnssec module didn't work at all due to missing
+ imports that escaped detection in testing because the test
+ suite also did the imports. The third time is the charm!
+
+New since 1.9.0:
+
+ Nothing.
+
+Bugs fixed since 1.9.0:
+
+ The dns.dnssec module didn't work with DSA due to namespace
+ contamination from a "from"-style import.
+
+New since 1.8.0:
+
+ dnspython now uses poll() instead of select() when available.
+
+ Basic DNSSEC validation can be done using dns.dnsec.validate()
+ and dns.dnssec.validate_rrsig() if you have PyCrypto 2.3 or
+ later installed. Complete secure resolution is not yet
+ available.
+
+ Added key_id() to the DNSSEC module, which computes the DNSSEC
+ key id of a DNSKEY rdata.
+
+ Added make_ds() to the DNSSEC module, which returns the DS RR
+ for a given DNSKEY rdata.
+
+ dnspython now raises an exception if HMAC-SHA284 or
+ HMAC-SHA512 are used with a Python older than 2.5.2. (Older
+ Pythons do not compute the correct value.)
+
+ Symbolic constants are now available for TSIG algorithm names.
+
+Bugs fixed since 1.8.0
+
+ dns.resolver.zone_for_name() didn't handle a query response
+ with a CNAME or DNAME correctly in some cases.
+
+ When specifying rdata types and classes as text, Unicode
+ strings may now be used.
+
+ Hashlib compatibility issues have been fixed.
+
+ dns.message now imports dns.edns.
+
+ The TSIG algorithm value was passed incorrectly to use_tsig()
+ in some cases.
+
+New since 1.7.1:
+
+ Support for hmac-sha1, hmac-sha224, hmac-sha256, hmac-sha384
+ and hmac-sha512 has been contributed by Kevin Chen.
+
+ The tokenizer's tokens are now Token objects instead of (type,
+ value) tuples.
+
+Bugs fixed since 1.7.1:
+
+ Escapes in masterfiles now work correctly. Previously they
+ were only working correctly when the text involved was part of
+ a domain name.
+
+ When constructing a DDNS update, if the present() method was
+ used with a single rdata, a zero TTL was not added.
+
+ The entropy pool needed locking to be thread safe.
+
+ The entropy pool's reading of /dev/random could cause
+ dnspython to block.
+
+ The entropy pool did buffered reads, potentially consuming more
+ randomness than we needed.
+
+ The entropy pool did not seed with high quality randomness on
+ Windows.
+
+ SRV records were compared incorrectly.
+
+ In the e164 query function, the resolver parameter was not
+ used.
+
+New since 1.7.0:
+
+ Nothing
+
+Bugs fixed since 1.7.0:
+
+ The 1.7.0 kitting process inadventently omitted the code for the
+ DLV RR.
+
+ Negative DDNS prerequisites are now handled correctly.
+
+New since 1.6.0:
+
+ Rdatas now have a to_digestable() method, which returns the
+ DNSSEC canonical form of the rdata, suitable for use in
+ signature computations.
+
+ The NSEC3, NSEC3PARAM, DLV, and HIP RR types are now supported.
+
+ An entropy module has been added and is used to randomize query ids.
+
+ EDNS0 options are now supported.
+
+ UDP IXFR is now supported.
+
+ The wire format parser now has a 'one_rr_per_rrset' mode, which
+ suppresses the usual coalescing of all RRs of a given type into a
+ single RRset.
+
+ Various helpful DNSSEC-related constants are now defined.
+
+ The resolver's query() method now has an optional 'source' parameter,
+ allowing the source IP address to be specified.
+
+Bugs fixed since 1.6.0:
+
+ On Windows, the resolver set the domain incorrectly.
+
+ DS RR parsing only allowed one Base64 chunk.
+
+ TSIG validation didn't always use absolute names.
+
+ NSEC.to_text() only printed the last window.
+
+ We did not canonicalize IPv6 addresses before comparing them; we
+ would thus treat equivalent but different textual forms, e.g.
+ "1:00::1" and "1::1" as being non-equivalent.
+
+ If the peer set a TSIG error, we didn't raise an exception.
+
+ Some EDNS bugs in the message code have been fixed (see the ChangeLog
+ for details).
+
+New since 1.5.0:
+ Added dns.inet.is_multicast().
+
+Bugs fixed since 1.5.0:
+
+ If select() raises an exception due to EINTR, we should just
+ select() again.
+
+ If the queried address is a multicast address, then don't
+ check that the address of the response is the same as the
+ address queried.
+
+ NAPTR comparisons didn't compare the preference field due to a
+ typo.
+
+ Testing of whether a Windows NIC is enabled now works on Vista
+ thanks to code contributed by Paul Marks.
+
+New since 1.4.0:
+
+ Answer objects now support more of the python sequence
+ protocol, forwarding the requests to the answer rrset.
+ E.g. "for a in answer" is equivalent to "for a in
+ answer.rrset", "answer[i]" is equivalent to "answer.rrset[i]",
+ and "answer[i:j]" is equivalent to "answer.rrset[i:j]".
+
+ Making requests using EDNS, including indicating DNSSEC awareness,
+ is now easier. For example, you can now say:
+
+ q = dns.message.make_query('www.dnspython.org', 'MX',
+ want_dnssec=True)
+
+ dns.query.xfr() can now be used for IXFR.
+
+ Support has been added for the DHCID, IPSECKEY, and SPF RR types.
+
+ UDP messages from unexpected sources can now be ignored by
+ setting ignore_unexpected to True when calling dns.query.udp.
+
+Bugs fixed since 1.4.0:
+
+ If /etc/resolv.conf didn't exist, we raised an exception
+ instead of simply using the default resolver configuration.
+
+ In dns.resolver.Resolver._config_win32_fromkey(), we were
+ passing the wrong variable to self._config_win32_search().
+
+New since 1.3.5:
+
+ You can now convert E.164 numbers to/from their ENUM name
+ forms:
+
+ >>> import dns.e164
+ >>> n = dns.e164.from_e164("+1 555 1212")
+ >>> n
+ <DNS name 2.1.2.1.5.5.5.1.e164.arpa.>
+ >>> dns.e164.to_e164(n)
+ '+15551212'
+
+ You can now convert IPv4 and IPv6 address to/from their
+ corresponding DNS reverse map names:
+
+ >>> import dns.reversename
+ >>> n = dns.reversename.from_address("127.0.0.1")
+ >>> n
+ <DNS name 1.0.0.127.in-addr.arpa.>
+ >>> dns.reversename.to_address(n)
+ '127.0.0.1'
+
+ You can now convert between Unicode strings and their IDN ACE
+ form:
+
+ >>> n = dns.name.from_text(u'les-\u00e9l\u00e8ves.example.')
+ >>> n
+ <DNS name xn--les-lves-50ai.example.>
+ >>> n.to_unicode()
+ u'les-\xe9l\xe8ves.example.'
+
+ The origin parameter to dns.zone.from_text() and dns.zone.to_text()
+ is now optional. If not specified, the origin will be taken from
+ the first $ORIGIN statement in the master file.
+
+ Sanity checking of a zone can be disabled; this is useful when
+ working with files which are zone fragments.
+
+Bugs fixed since 1.3.5:
+
+ The correct delimiter was not used when retrieving the
+ list of nameservers from the registry in certain versions of
+ windows.
+
+ The floating-point version of latitude and longitude in LOC RRs
+ (float_latitude and float_longitude) had incorrect signs for
+ south latitudes and west longitudes.
+
+ BIND 8 TTL syntax is now accepted in all TTL-like places (i.e.
+ SOA fields refresh, retry, expire, and minimum; SIG/RRSIG
+ field original_ttl).
+
+ TTLs are now bounds checked when their text form is parsed,
+ and their values must be in the closed interval [0, 2^31 - 1].
+
+New since 1.3.4:
+
+ In the resolver, if time goes backward a little bit, ignore
+ it.
+
+ zone_for_name() has been added to the resolver module. It
+ returns the zone which is authoritative for the specified
+ name, which is handy for dynamic update. E.g.
+
+ import dns.resolver
+ print dns.resolver.zone_for_name('www.dnspython.org')
+
+ will output "dnspython.org." and
+
+ print dns.resolver.zone_for_name('a.b.c.d.e.f.example.')
+
+ will output ".".
+
+ The default resolver can be fetched with the
+ get_default_resolver() method.
+
+ You can now get the parent (immediate superdomain) of a name
+ by using the parent() method.
+
+ Zone.iterate_rdatasets() and Zone.iterate_rdatas() now have
+ a default rdtype of dns.rdatatype.ANY like the documentation
+ says.
+
+ A Dynamic DNS example, ddns.py, has been added.
+
+New since 1.3.3:
+
+ The source address and port may now be specified when calling
+ dns.query.{udp,tcp,xfr}.
+
+ The resolver now does exponential backoff each time it runs
+ through all of the nameservers.
+
+ Rcodes which indicate a nameserver is likely to be a
+ "permanent failure" for a query cause the nameserver to be removed
+ from the mix for that query.
+
+New since 1.3.2:
+
+ dns.message.Message.find_rrset() now uses an index, vastly
+ improving the from_wire() performance of large messages such
+ as zone transfers.
+
+ Added dns.message.make_response(), which creates a skeletal
+ response for the specified query.
+
+ Added opcode() and set_opcode() convenience methods to the
+ dns.message.Message class. Added the request_payload
+ attribute to the Message class.
+
+ The 'file' parameter of dns.name.Name.to_wire() is now
+ optional; if omitted, the wire form will be returned as the
+ value of the function.
+
+ dns.zone.from_xfr() in relativization mode incorrectly set
+ zone.origin to the empty name.
+
+ The masterfile parser incorrectly rejected TXT records where a
+ value was not quoted.
+
+New since 1.3.1:
+
+ The NSEC format doesn't allow specifying types by number, so
+ we shouldn't either. (Using the unknown type format is still
+ OK though.)
+
+ The resolver wasn't catching dns.exception.Timeout, so a timeout
+ erroneously caused the whole resolution to fail instead of just
+ going on to the next server.
+
+ The renderer module didn't import random, causing an exception
+ to be raised if a query id wasn't provided when a Renderer was
+ created.
+
+ The conversion of LOC milliseconds values from text to binary was
+ incorrect if the length of the milliseconds string was not 3.
+
+New since 1.3.0:
+
+ Added support for the SSHFP type.
+
+New since 1.2.0:
+
+ Added support for new DNSSEC types RRSIG, NSEC, and DNSKEY.
+
+This release fixes all known bugs.
+
+See the ChangeLog file for more detailed information on changes since
+the prior release.
+
+
+REQUIREMENTS
+
+Python 2.4 or later.
+
+
+INSTALLATION
+
+To build and install dnspython, type
+
+ python setup.py install
+
+
+HOME PAGE
+
+For the latest in releases, documentation, and information, visit the
+dnspython home page at
+
+ http://www.dnspython.org/
+
+
+
+DOCUMENTATION
+
+Documentation is sparse at the moment. Use pydoc, or read the HTML
+documentation at the dnspython home page, or download the HTML
+documentation.
+
+
+BUG REPORTS
+
+Bug reports may be sent to bugs@dnspython.org
+
+
+MAILING LISTS
+
+A number of mailing lists are available. Visit the dnspython home
+page to subscribe or unsubscribe.
diff --git a/lib/dnspython/TODO b/lib/dnspython/TODO
new file mode 100644
index 0000000000..59ce1be1ce
--- /dev/null
+++ b/lib/dnspython/TODO
@@ -0,0 +1,17 @@
+Tutorial documentation
+
+More examples
+
+It would be nice to have a tokenizer that used regular expressions
+because it would be faster.
+
+Teach the resolver about DNAME (right now it relies on the server adding
+synthesized CNAMEs)
+
+Add TKEY support.
+
+TSIG works, but needs cleaning up -- probably better encapsulation of
+TSIG state to make things much simpler and easier to use.
+
+Pickling support.
+
diff --git a/lib/dnspython/dns/__init__.py b/lib/dnspython/dns/__init__.py
new file mode 100644
index 0000000000..56e1e8a2ea
--- /dev/null
+++ b/lib/dnspython/dns/__init__.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2003-2007, 2009 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""dnspython DNS toolkit"""
+
+__all__ = [
+ 'dnssec',
+ 'e164',
+ 'edns',
+ 'entropy',
+ 'exception',
+ 'flags',
+ 'hash',
+ 'inet',
+ 'ipv4',
+ 'ipv6',
+ 'message',
+ 'name',
+ 'namedict',
+ 'node',
+ 'opcode',
+ 'query',
+ 'rcode',
+ 'rdata',
+ 'rdataclass',
+ 'rdataset',
+ 'rdatatype',
+ 'renderer',
+ 'resolver',
+ 'reversename',
+ 'rrset',
+ 'set',
+ 'tokenizer',
+ 'tsig',
+ 'tsigkeyring',
+ 'ttl',
+ 'rdtypes',
+ 'update',
+ 'version',
+ 'zone',
+]
diff --git a/lib/dnspython/dns/dnssec.py b/lib/dnspython/dns/dnssec.py
new file mode 100644
index 0000000000..a595fd4478
--- /dev/null
+++ b/lib/dnspython/dns/dnssec.py
@@ -0,0 +1,372 @@
+# Copyright (C) 2003-2007, 2009 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Common DNSSEC-related functions and constants."""
+
+import cStringIO
+import struct
+import time
+
+import dns.exception
+import dns.hash
+import dns.name
+import dns.node
+import dns.rdataset
+import dns.rdata
+import dns.rdatatype
+import dns.rdataclass
+
+class UnsupportedAlgorithm(dns.exception.DNSException):
+ """Raised if an algorithm is not supported."""
+ pass
+
+class ValidationFailure(dns.exception.DNSException):
+ """The DNSSEC signature is invalid."""
+ pass
+
+RSAMD5 = 1
+DH = 2
+DSA = 3
+ECC = 4
+RSASHA1 = 5
+DSANSEC3SHA1 = 6
+RSASHA1NSEC3SHA1 = 7
+RSASHA256 = 8
+RSASHA512 = 10
+INDIRECT = 252
+PRIVATEDNS = 253
+PRIVATEOID = 254
+
+_algorithm_by_text = {
+ 'RSAMD5' : RSAMD5,
+ 'DH' : DH,
+ 'DSA' : DSA,
+ 'ECC' : ECC,
+ 'RSASHA1' : RSASHA1,
+ 'DSANSEC3SHA1' : DSANSEC3SHA1,
+ 'RSASHA1NSEC3SHA1' : RSASHA1NSEC3SHA1,
+ 'RSASHA256' : RSASHA256,
+ 'RSASHA512' : RSASHA512,
+ 'INDIRECT' : INDIRECT,
+ 'PRIVATEDNS' : PRIVATEDNS,
+ 'PRIVATEOID' : PRIVATEOID,
+ }
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_algorithm_by_value = dict([(y, x) for x, y in _algorithm_by_text.iteritems()])
+
+def algorithm_from_text(text):
+ """Convert text into a DNSSEC algorithm value
+ @rtype: int"""
+
+ value = _algorithm_by_text.get(text.upper())
+ if value is None:
+ value = int(text)
+ return value
+
+def algorithm_to_text(value):
+ """Convert a DNSSEC algorithm value to text
+ @rtype: string"""
+
+ text = _algorithm_by_value.get(value)
+ if text is None:
+ text = str(value)
+ return text
+
+def _to_rdata(record, origin):
+ s = cStringIO.StringIO()
+ record.to_wire(s, origin=origin)
+ return s.getvalue()
+
+def key_id(key, origin=None):
+ rdata = _to_rdata(key, origin)
+ if key.algorithm == RSAMD5:
+ return (ord(rdata[-3]) << 8) + ord(rdata[-2])
+ else:
+ total = 0
+ for i in range(len(rdata) / 2):
+ total += (ord(rdata[2 * i]) << 8) + ord(rdata[2 * i + 1])
+ if len(rdata) % 2 != 0:
+ total += ord(rdata[len(rdata) - 1]) << 8
+ total += ((total >> 16) & 0xffff);
+ return total & 0xffff
+
+def make_ds(name, key, algorithm, origin=None):
+ if algorithm.upper() == 'SHA1':
+ dsalg = 1
+ hash = dns.hash.get('SHA1')()
+ elif algorithm.upper() == 'SHA256':
+ dsalg = 2
+ hash = dns.hash.get('SHA256')()
+ else:
+ raise UnsupportedAlgorithm, 'unsupported algorithm "%s"' % algorithm
+
+ if isinstance(name, (str, unicode)):
+ name = dns.name.from_text(name, origin)
+ hash.update(name.canonicalize().to_wire())
+ hash.update(_to_rdata(key, origin))
+ digest = hash.digest()
+
+ dsrdata = struct.pack("!HBB", key_id(key), key.algorithm, dsalg) + digest
+ return dns.rdata.from_wire(dns.rdataclass.IN, dns.rdatatype.DS, dsrdata, 0,
+ len(dsrdata))
+
+def _find_key(keys, rrsig):
+ value = keys.get(rrsig.signer)
+ if value is None:
+ return None
+ if isinstance(value, dns.node.Node):
+ try:
+ rdataset = node.find_rdataset(dns.rdataclass.IN,
+ dns.rdatatype.DNSKEY)
+ except KeyError:
+ return None
+ else:
+ rdataset = value
+ for rdata in rdataset:
+ if rdata.algorithm == rrsig.algorithm and \
+ key_id(rdata) == rrsig.key_tag:
+ return rdata
+ return None
+
+def _is_rsa(algorithm):
+ return algorithm in (RSAMD5, RSASHA1,
+ RSASHA1NSEC3SHA1, RSASHA256,
+ RSASHA512)
+
+def _is_dsa(algorithm):
+ return algorithm in (DSA, DSANSEC3SHA1)
+
+def _is_md5(algorithm):
+ return algorithm == RSAMD5
+
+def _is_sha1(algorithm):
+ return algorithm in (DSA, RSASHA1,
+ DSANSEC3SHA1, RSASHA1NSEC3SHA1)
+
+def _is_sha256(algorithm):
+ return algorithm == RSASHA256
+
+def _is_sha512(algorithm):
+ return algorithm == RSASHA512
+
+def _make_hash(algorithm):
+ if _is_md5(algorithm):
+ return dns.hash.get('MD5')()
+ if _is_sha1(algorithm):
+ return dns.hash.get('SHA1')()
+ if _is_sha256(algorithm):
+ return dns.hash.get('SHA256')()
+ if _is_sha512(algorithm):
+ return dns.hash.get('SHA512')()
+ raise ValidationFailure, 'unknown hash for algorithm %u' % algorithm
+
+def _make_algorithm_id(algorithm):
+ if _is_md5(algorithm):
+ oid = [0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05]
+ elif _is_sha1(algorithm):
+ oid = [0x2b, 0x0e, 0x03, 0x02, 0x1a]
+ elif _is_sha256(algorithm):
+ oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01]
+ elif _is_sha512(algorithm):
+ oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03]
+ else:
+ raise ValidationFailure, 'unknown algorithm %u' % algorithm
+ olen = len(oid)
+ dlen = _make_hash(algorithm).digest_size
+ idbytes = [0x30] + [8 + olen + dlen] + \
+ [0x30, olen + 4] + [0x06, olen] + oid + \
+ [0x05, 0x00] + [0x04, dlen]
+ return ''.join(map(chr, idbytes))
+
+def _validate_rrsig(rrset, rrsig, keys, origin=None, now=None):
+ """Validate an RRset against a single signature rdata
+
+ The owner name of the rrsig is assumed to be the same as the owner name
+ of the rrset.
+
+ @param rrset: The RRset to validate
+ @type rrset: dns.rrset.RRset or (dns.name.Name, dns.rdataset.Rdataset)
+ tuple
+ @param rrsig: The signature rdata
+ @type rrsig: dns.rrset.Rdata
+ @param keys: The key dictionary.
+ @type keys: a dictionary keyed by dns.name.Name with node or rdataset values
+ @param origin: The origin to use for relative names
+ @type origin: dns.name.Name or None
+ @param now: The time to use when validating the signatures. The default
+ is the current time.
+ @type now: int
+ """
+
+ if isinstance(origin, (str, unicode)):
+ origin = dns.name.from_text(origin, dns.name.root)
+
+ key = _find_key(keys, rrsig)
+ if not key:
+ raise ValidationFailure, 'unknown key'
+
+ # For convenience, allow the rrset to be specified as a (name, rdataset)
+ # tuple as well as a proper rrset
+ if isinstance(rrset, tuple):
+ rrname = rrset[0]
+ rdataset = rrset[1]
+ else:
+ rrname = rrset.name
+ rdataset = rrset
+
+ if now is None:
+ now = time.time()
+ if rrsig.expiration < now:
+ raise ValidationFailure, 'expired'
+ if rrsig.inception > now:
+ raise ValidationFailure, 'not yet valid'
+
+ hash = _make_hash(rrsig.algorithm)
+
+ if _is_rsa(rrsig.algorithm):
+ keyptr = key.key
+ (bytes,) = struct.unpack('!B', keyptr[0:1])
+ keyptr = keyptr[1:]
+ if bytes == 0:
+ (bytes,) = struct.unpack('!H', keyptr[0:2])
+ keyptr = keyptr[2:]
+ rsa_e = keyptr[0:bytes]
+ rsa_n = keyptr[bytes:]
+ keylen = len(rsa_n) * 8
+ pubkey = Crypto.PublicKey.RSA.construct(
+ (Crypto.Util.number.bytes_to_long(rsa_n),
+ Crypto.Util.number.bytes_to_long(rsa_e)))
+ sig = (Crypto.Util.number.bytes_to_long(rrsig.signature),)
+ elif _is_dsa(rrsig.algorithm):
+ keyptr = key.key
+ (t,) = struct.unpack('!B', keyptr[0:1])
+ keyptr = keyptr[1:]
+ octets = 64 + t * 8
+ dsa_q = keyptr[0:20]
+ keyptr = keyptr[20:]
+ dsa_p = keyptr[0:octets]
+ keyptr = keyptr[octets:]
+ dsa_g = keyptr[0:octets]
+ keyptr = keyptr[octets:]
+ dsa_y = keyptr[0:octets]
+ pubkey = Crypto.PublicKey.DSA.construct(
+ (Crypto.Util.number.bytes_to_long(dsa_y),
+ Crypto.Util.number.bytes_to_long(dsa_g),
+ Crypto.Util.number.bytes_to_long(dsa_p),
+ Crypto.Util.number.bytes_to_long(dsa_q)))
+ (dsa_r, dsa_s) = struct.unpack('!20s20s', rrsig.signature[1:])
+ sig = (Crypto.Util.number.bytes_to_long(dsa_r),
+ Crypto.Util.number.bytes_to_long(dsa_s))
+ else:
+ raise ValidationFailure, 'unknown algorithm %u' % rrsig.algorithm
+
+ hash.update(_to_rdata(rrsig, origin)[:18])
+ hash.update(rrsig.signer.to_digestable(origin))
+
+ if rrsig.labels < len(rrname) - 1:
+ suffix = rrname.split(rrsig.labels + 1)[1]
+ rrname = dns.name.from_text('*', suffix)
+ rrnamebuf = rrname.to_digestable(origin)
+ rrfixed = struct.pack('!HHI', rdataset.rdtype, rdataset.rdclass,
+ rrsig.original_ttl)
+ rrlist = sorted(rdataset);
+ for rr in rrlist:
+ hash.update(rrnamebuf)
+ hash.update(rrfixed)
+ rrdata = rr.to_digestable(origin)
+ rrlen = struct.pack('!H', len(rrdata))
+ hash.update(rrlen)
+ hash.update(rrdata)
+
+ digest = hash.digest()
+
+ if _is_rsa(rrsig.algorithm):
+ # PKCS1 algorithm identifier goop
+ digest = _make_algorithm_id(rrsig.algorithm) + digest
+ padlen = keylen / 8 - len(digest) - 3
+ digest = chr(0) + chr(1) + chr(0xFF) * padlen + chr(0) + digest
+ elif _is_dsa(rrsig.algorithm):
+ pass
+ else:
+ # Raise here for code clarity; this won't actually ever happen
+ # since if the algorithm is really unknown we'd already have
+ # raised an exception above
+ raise ValidationFailure, 'unknown algorithm %u' % rrsig.algorithm
+
+ if not pubkey.verify(digest, sig):
+ raise ValidationFailure, 'verify failure'
+
+def _validate(rrset, rrsigset, keys, origin=None, now=None):
+ """Validate an RRset
+
+ @param rrset: The RRset to validate
+ @type rrset: dns.rrset.RRset or (dns.name.Name, dns.rdataset.Rdataset)
+ tuple
+ @param rrsigset: The signature RRset
+ @type rrsigset: dns.rrset.RRset or (dns.name.Name, dns.rdataset.Rdataset)
+ tuple
+ @param keys: The key dictionary.
+ @type keys: a dictionary keyed by dns.name.Name with node or rdataset values
+ @param origin: The origin to use for relative names
+ @type origin: dns.name.Name or None
+ @param now: The time to use when validating the signatures. The default
+ is the current time.
+ @type now: int
+ """
+
+ if isinstance(origin, (str, unicode)):
+ origin = dns.name.from_text(origin, dns.name.root)
+
+ if isinstance(rrset, tuple):
+ rrname = rrset[0]
+ else:
+ rrname = rrset.name
+
+ if isinstance(rrsigset, tuple):
+ rrsigname = rrsigset[0]
+ rrsigrdataset = rrsigset[1]
+ else:
+ rrsigname = rrsigset.name
+ rrsigrdataset = rrsigset
+
+ rrname = rrname.choose_relativity(origin)
+ rrsigname = rrname.choose_relativity(origin)
+ if rrname != rrsigname:
+ raise ValidationFailure, "owner names do not match"
+
+ for rrsig in rrsigrdataset:
+ try:
+ _validate_rrsig(rrset, rrsig, keys, origin, now)
+ return
+ except ValidationFailure, e:
+ pass
+ raise ValidationFailure, "no RRSIGs validated"
+
+def _need_pycrypto(*args, **kwargs):
+ raise NotImplementedError, "DNSSEC validation requires pycrypto"
+
+try:
+ import Crypto.PublicKey.RSA
+ import Crypto.PublicKey.DSA
+ import Crypto.Util.number
+ validate = _validate
+ validate_rrsig = _validate_rrsig
+except ImportError:
+ validate = _need_pycrypto
+ validate_rrsig = _need_pycrypto
diff --git a/lib/dnspython/dns/e164.py b/lib/dnspython/dns/e164.py
new file mode 100644
index 0000000000..d8f71ec799
--- /dev/null
+++ b/lib/dnspython/dns/e164.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2006, 2007, 2009 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS E.164 helpers
+
+@var public_enum_domain: The DNS public ENUM domain, e164.arpa.
+@type public_enum_domain: dns.name.Name object
+"""
+
+import dns.exception
+import dns.name
+import dns.resolver
+
+public_enum_domain = dns.name.from_text('e164.arpa.')
+
+def from_e164(text, origin=public_enum_domain):
+ """Convert an E.164 number in textual form into a Name object whose
+ value is the ENUM domain name for that number.
+ @param text: an E.164 number in textual form.
+ @type text: str
+ @param origin: The domain in which the number should be constructed.
+ The default is e164.arpa.
+ @type: dns.name.Name object or None
+ @rtype: dns.name.Name object
+ """
+ parts = [d for d in text if d.isdigit()]
+ parts.reverse()
+ return dns.name.from_text('.'.join(parts), origin=origin)
+
+def to_e164(name, origin=public_enum_domain, want_plus_prefix=True):
+ """Convert an ENUM domain name into an E.164 number.
+ @param name: the ENUM domain name.
+ @type name: dns.name.Name object.
+ @param origin: A domain containing the ENUM domain name. The
+ name is relativized to this domain before being converted to text.
+ @type: dns.name.Name object or None
+ @param want_plus_prefix: if True, add a '+' to the beginning of the
+ returned number.
+ @rtype: str
+ """
+ if not origin is None:
+ name = name.relativize(origin)
+ dlabels = [d for d in name.labels if (d.isdigit() and len(d) == 1)]
+ if len(dlabels) != len(name.labels):
+ raise dns.exception.SyntaxError('non-digit labels in ENUM domain name')
+ dlabels.reverse()
+ text = ''.join(dlabels)
+ if want_plus_prefix:
+ text = '+' + text
+ return text
+
+def query(number, domains, resolver=None):
+ """Look for NAPTR RRs for the specified number in the specified domains.
+
+ e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.'])
+ """
+ if resolver is None:
+ resolver = dns.resolver.get_default_resolver()
+ for domain in domains:
+ if isinstance(domain, (str, unicode)):
+ domain = dns.name.from_text(domain)
+ qname = dns.e164.from_e164(number, domain)
+ try:
+ return resolver.query(qname, 'NAPTR')
+ except dns.resolver.NXDOMAIN:
+ pass
+ raise dns.resolver.NXDOMAIN
diff --git a/lib/dnspython/dns/edns.py b/lib/dnspython/dns/edns.py
new file mode 100644
index 0000000000..1731cedde4
--- /dev/null
+++ b/lib/dnspython/dns/edns.py
@@ -0,0 +1,142 @@
+# Copyright (C) 2009 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""EDNS Options"""
+
+NSID = 3
+
+class Option(object):
+ """Base class for all EDNS option types.
+ """
+
+ def __init__(self, otype):
+ """Initialize an option.
+ @param rdtype: The rdata type
+ @type rdtype: int
+ """
+ self.otype = otype
+
+ def to_wire(self, file):
+ """Convert an option to wire format.
+ """
+ raise NotImplementedError
+
+ def from_wire(cls, otype, wire, current, olen):
+ """Build an EDNS option object from wire format
+
+ @param otype: The option type
+ @type otype: int
+ @param wire: The wire-format message
+ @type wire: string
+ @param current: The offet in wire of the beginning of the rdata.
+ @type current: int
+ @param olen: The length of the wire-format option data
+ @type olen: int
+ @rtype: dns.ends.Option instance"""
+ raise NotImplementedError
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ """Compare an ENDS option with another option of the same type.
+ Return < 0 if self < other, 0 if self == other, and > 0 if self > other.
+ """
+ raise NotImplementedError
+
+ def __eq__(self, other):
+ if not isinstance(other, Option):
+ return False
+ if self.otype != other.otype:
+ return False
+ return self._cmp(other) == 0
+
+ def __ne__(self, other):
+ if not isinstance(other, Option):
+ return False
+ if self.otype != other.otype:
+ return False
+ return self._cmp(other) != 0
+
+ def __lt__(self, other):
+ if not isinstance(other, Option) or \
+ self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) < 0
+
+ def __le__(self, other):
+ if not isinstance(other, Option) or \
+ self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) <= 0
+
+ def __ge__(self, other):
+ if not isinstance(other, Option) or \
+ self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) >= 0
+
+ def __gt__(self, other):
+ if not isinstance(other, Option) or \
+ self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) > 0
+
+
+class GenericOption(Option):
+ """Generate Rdata Class
+
+ This class is used for EDNS option types for which we have no better
+ implementation.
+ """
+
+ def __init__(self, otype, data):
+ super(GenericOption, self).__init__(otype)
+ self.data = data
+
+ def to_wire(self, file):
+ file.write(self.data)
+
+ def from_wire(cls, otype, wire, current, olen):
+ return cls(otype, wire[current : current + olen])
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ return cmp(self.data, other.data)
+
+_type_to_class = {
+}
+
+def get_option_class(otype):
+ cls = _type_to_class.get(otype)
+ if cls is None:
+ cls = GenericOption
+ return cls
+
+def option_from_wire(otype, wire, current, olen):
+ """Build an EDNS option object from wire format
+
+ @param otype: The option type
+ @type otype: int
+ @param wire: The wire-format message
+ @type wire: string
+ @param current: The offet in wire of the beginning of the rdata.
+ @type current: int
+ @param olen: The length of the wire-format option data
+ @type olen: int
+ @rtype: dns.ends.Option instance"""
+
+ cls = get_option_class(otype)
+ return cls.from_wire(otype, wire, current, olen)
diff --git a/lib/dnspython/dns/entropy.py b/lib/dnspython/dns/entropy.py
new file mode 100644
index 0000000000..fd9d4f8cdf
--- /dev/null
+++ b/lib/dnspython/dns/entropy.py
@@ -0,0 +1,123 @@
+# Copyright (C) 2009 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import os
+import time
+try:
+ import threading as _threading
+except ImportError:
+ import dummy_threading as _threading
+
+class EntropyPool(object):
+ def __init__(self, seed=None):
+ self.pool_index = 0
+ self.digest = None
+ self.next_byte = 0
+ self.lock = _threading.Lock()
+ try:
+ import hashlib
+ self.hash = hashlib.sha1()
+ self.hash_len = 20
+ except:
+ try:
+ import sha
+ self.hash = sha.new()
+ self.hash_len = 20
+ except:
+ import md5
+ self.hash = md5.new()
+ self.hash_len = 16
+ self.pool = '\0' * self.hash_len
+ if not seed is None:
+ self.stir(seed)
+ self.seeded = True
+ else:
+ self.seeded = False
+
+ def stir(self, entropy, already_locked=False):
+ if not already_locked:
+ self.lock.acquire()
+ try:
+ bytes = [ord(c) for c in self.pool]
+ for c in entropy:
+ if self.pool_index == self.hash_len:
+ self.pool_index = 0
+ b = ord(c) & 0xff
+ bytes[self.pool_index] ^= b
+ self.pool_index += 1
+ self.pool = ''.join([chr(c) for c in bytes])
+ finally:
+ if not already_locked:
+ self.lock.release()
+
+ def _maybe_seed(self):
+ if not self.seeded:
+ try:
+ seed = os.urandom(16)
+ except:
+ try:
+ r = file('/dev/urandom', 'r', 0)
+ try:
+ seed = r.read(16)
+ finally:
+ r.close()
+ except:
+ seed = str(time.time())
+ self.seeded = True
+ self.stir(seed, True)
+
+ def random_8(self):
+ self.lock.acquire()
+ self._maybe_seed()
+ try:
+ if self.digest is None or self.next_byte == self.hash_len:
+ self.hash.update(self.pool)
+ self.digest = self.hash.digest()
+ self.stir(self.digest, True)
+ self.next_byte = 0
+ value = ord(self.digest[self.next_byte])
+ self.next_byte += 1
+ finally:
+ self.lock.release()
+ return value
+
+ def random_16(self):
+ return self.random_8() * 256 + self.random_8()
+
+ def random_32(self):
+ return self.random_16() * 65536 + self.random_16()
+
+ def random_between(self, first, last):
+ size = last - first + 1
+ if size > 4294967296L:
+ raise ValueError('too big')
+ if size > 65536:
+ rand = self.random_32
+ max = 4294967295L
+ elif size > 256:
+ rand = self.random_16
+ max = 65535
+ else:
+ rand = self.random_8
+ max = 255
+ return (first + size * rand() // (max + 1))
+
+pool = EntropyPool()
+
+def random_16():
+ return pool.random_16()
+
+def between(first, last):
+ return pool.random_between(first, last)
diff --git a/lib/dnspython/dns/exception.py b/lib/dnspython/dns/exception.py
new file mode 100644
index 0000000000..c6d6570d98
--- /dev/null
+++ b/lib/dnspython/dns/exception.py
@@ -0,0 +1,40 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Common DNS Exceptions."""
+
+class DNSException(Exception):
+ """Abstract base class shared by all dnspython exceptions."""
+ pass
+
+class FormError(DNSException):
+ """DNS message is malformed."""
+ pass
+
+class SyntaxError(DNSException):
+ """Text input is malformed."""
+ pass
+
+class UnexpectedEnd(SyntaxError):
+ """Raised if text input ends unexpectedly."""
+ pass
+
+class TooBig(DNSException):
+ """The message is too big."""
+ pass
+
+class Timeout(DNSException):
+ """The operation timed out."""
+ pass
diff --git a/lib/dnspython/dns/flags.py b/lib/dnspython/dns/flags.py
new file mode 100644
index 0000000000..17afdbc2ec
--- /dev/null
+++ b/lib/dnspython/dns/flags.py
@@ -0,0 +1,106 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Message Flags."""
+
+# Standard DNS flags
+
+QR = 0x8000
+AA = 0x0400
+TC = 0x0200
+RD = 0x0100
+RA = 0x0080
+AD = 0x0020
+CD = 0x0010
+
+# EDNS flags
+
+DO = 0x8000
+
+_by_text = {
+ 'QR' : QR,
+ 'AA' : AA,
+ 'TC' : TC,
+ 'RD' : RD,
+ 'RA' : RA,
+ 'AD' : AD,
+ 'CD' : CD
+}
+
+_edns_by_text = {
+ 'DO' : DO
+}
+
+
+# We construct the inverse mappings programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mappings not to be true inverses.
+
+_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
+
+_edns_by_value = dict([(y, x) for x, y in _edns_by_text.iteritems()])
+
+def _order_flags(table):
+ order = list(table.iteritems())
+ order.sort()
+ order.reverse()
+ return order
+
+_flags_order = _order_flags(_by_value)
+
+_edns_flags_order = _order_flags(_edns_by_value)
+
+def _from_text(text, table):
+ flags = 0
+ tokens = text.split()
+ for t in tokens:
+ flags = flags | table[t.upper()]
+ return flags
+
+def _to_text(flags, table, order):
+ text_flags = []
+ for k, v in order:
+ if flags & k != 0:
+ text_flags.append(v)
+ return ' '.join(text_flags)
+
+def from_text(text):
+ """Convert a space-separated list of flag text values into a flags
+ value.
+ @rtype: int"""
+
+ return _from_text(text, _by_text)
+
+def to_text(flags):
+ """Convert a flags value into a space-separated list of flag text
+ values.
+ @rtype: string"""
+
+ return _to_text(flags, _by_value, _flags_order)
+
+
+def edns_from_text(text):
+ """Convert a space-separated list of EDNS flag text values into a EDNS
+ flags value.
+ @rtype: int"""
+
+ return _from_text(text, _edns_by_text)
+
+def edns_to_text(flags):
+ """Convert an EDNS flags value into a space-separated list of EDNS flag
+ text values.
+ @rtype: string"""
+
+ return _to_text(flags, _edns_by_value, _edns_flags_order)
diff --git a/lib/dnspython/dns/hash.py b/lib/dnspython/dns/hash.py
new file mode 100644
index 0000000000..7bd5ae5980
--- /dev/null
+++ b/lib/dnspython/dns/hash.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Hashing backwards compatibility wrapper"""
+
+import sys
+
+_hashes = None
+
+def _need_later_python(alg):
+ def func(*args, **kwargs):
+ raise NotImplementedError("TSIG algorithm " + alg +
+ " requires Python 2.5.2 or later")
+ return func
+
+def _setup():
+ global _hashes
+ _hashes = {}
+ try:
+ import hashlib
+ _hashes['MD5'] = hashlib.md5
+ _hashes['SHA1'] = hashlib.sha1
+ _hashes['SHA224'] = hashlib.sha224
+ _hashes['SHA256'] = hashlib.sha256
+ if sys.hexversion >= 0x02050200:
+ _hashes['SHA384'] = hashlib.sha384
+ _hashes['SHA512'] = hashlib.sha512
+ else:
+ _hashes['SHA384'] = _need_later_python('SHA384')
+ _hashes['SHA512'] = _need_later_python('SHA512')
+
+ if sys.hexversion < 0x02050000:
+ # hashlib doesn't conform to PEP 247: API for
+ # Cryptographic Hash Functions, which hmac before python
+ # 2.5 requires, so add the necessary items.
+ class HashlibWrapper:
+ def __init__(self, basehash):
+ self.basehash = basehash
+ self.digest_size = self.basehash().digest_size
+
+ def new(self, *args, **kwargs):
+ return self.basehash(*args, **kwargs)
+
+ for name in _hashes:
+ _hashes[name] = HashlibWrapper(_hashes[name])
+
+ except ImportError:
+ import md5, sha
+ _hashes['MD5'] = md5
+ _hashes['SHA1'] = sha
+
+def get(algorithm):
+ if _hashes is None:
+ _setup()
+ return _hashes[algorithm.upper()]
diff --git a/lib/dnspython/dns/inet.py b/lib/dnspython/dns/inet.py
new file mode 100644
index 0000000000..8a8f3e1ca8
--- /dev/null
+++ b/lib/dnspython/dns/inet.py
@@ -0,0 +1,108 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Generic Internet address helper functions."""
+
+import socket
+
+import dns.ipv4
+import dns.ipv6
+
+
+# We assume that AF_INET is always defined.
+
+AF_INET = socket.AF_INET
+
+# AF_INET6 might not be defined in the socket module, but we need it.
+# We'll try to use the socket module's value, and if it doesn't work,
+# we'll use our own value.
+
+try:
+ AF_INET6 = socket.AF_INET6
+except AttributeError:
+ AF_INET6 = 9999
+
+def inet_pton(family, text):
+ """Convert the textual form of a network address into its binary form.
+
+ @param family: the address family
+ @type family: int
+ @param text: the textual address
+ @type text: string
+ @raises NotImplementedError: the address family specified is not
+ implemented.
+ @rtype: string
+ """
+
+ if family == AF_INET:
+ return dns.ipv4.inet_aton(text)
+ elif family == AF_INET6:
+ return dns.ipv6.inet_aton(text)
+ else:
+ raise NotImplementedError
+
+def inet_ntop(family, address):
+ """Convert the binary form of a network address into its textual form.
+
+ @param family: the address family
+ @type family: int
+ @param address: the binary address
+ @type address: string
+ @raises NotImplementedError: the address family specified is not
+ implemented.
+ @rtype: string
+ """
+ if family == AF_INET:
+ return dns.ipv4.inet_ntoa(address)
+ elif family == AF_INET6:
+ return dns.ipv6.inet_ntoa(address)
+ else:
+ raise NotImplementedError
+
+def af_for_address(text):
+ """Determine the address family of a textual-form network address.
+
+ @param text: the textual address
+ @type text: string
+ @raises ValueError: the address family cannot be determined from the input.
+ @rtype: int
+ """
+ try:
+ junk = dns.ipv4.inet_aton(text)
+ return AF_INET
+ except:
+ try:
+ junk = dns.ipv6.inet_aton(text)
+ return AF_INET6
+ except:
+ raise ValueError
+
+def is_multicast(text):
+ """Is the textual-form network address a multicast address?
+
+ @param text: the textual address
+ @raises ValueError: the address family cannot be determined from the input.
+ @rtype: bool
+ """
+ try:
+ first = ord(dns.ipv4.inet_aton(text)[0])
+ return (first >= 224 and first <= 239)
+ except:
+ try:
+ first = ord(dns.ipv6.inet_aton(text)[0])
+ return (first == 255)
+ except:
+ raise ValueError
+
diff --git a/lib/dnspython/dns/ipv4.py b/lib/dnspython/dns/ipv4.py
new file mode 100644
index 0000000000..1569da5475
--- /dev/null
+++ b/lib/dnspython/dns/ipv4.py
@@ -0,0 +1,36 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""IPv4 helper functions."""
+
+import socket
+import sys
+
+if sys.hexversion < 0x02030000 or sys.platform == 'win32':
+ #
+ # Some versions of Python 2.2 have an inet_aton which rejects
+ # the valid IP address '255.255.255.255'. It appears this
+ # problem is still present on the Win32 platform even in 2.3.
+ # We'll work around the problem.
+ #
+ def inet_aton(text):
+ if text == '255.255.255.255':
+ return '\xff' * 4
+ else:
+ return socket.inet_aton(text)
+else:
+ inet_aton = socket.inet_aton
+
+inet_ntoa = socket.inet_ntoa
diff --git a/lib/dnspython/dns/ipv6.py b/lib/dnspython/dns/ipv6.py
new file mode 100644
index 0000000000..33c6713796
--- /dev/null
+++ b/lib/dnspython/dns/ipv6.py
@@ -0,0 +1,163 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""IPv6 helper functions."""
+
+import re
+
+import dns.exception
+import dns.ipv4
+
+_leading_zero = re.compile(r'0+([0-9a-f]+)')
+
+def inet_ntoa(address):
+ """Convert a network format IPv6 address into text.
+
+ @param address: the binary address
+ @type address: string
+ @rtype: string
+ @raises ValueError: the address isn't 16 bytes long
+ """
+
+ if len(address) != 16:
+ raise ValueError("IPv6 addresses are 16 bytes long")
+ hex = address.encode('hex_codec')
+ chunks = []
+ i = 0
+ l = len(hex)
+ while i < l:
+ chunk = hex[i : i + 4]
+ # strip leading zeros. we do this with an re instead of
+ # with lstrip() because lstrip() didn't support chars until
+ # python 2.2.2
+ m = _leading_zero.match(chunk)
+ if not m is None:
+ chunk = m.group(1)
+ chunks.append(chunk)
+ i += 4
+ #
+ # Compress the longest subsequence of 0-value chunks to ::
+ #
+ best_start = 0
+ best_len = 0
+ start = -1
+ last_was_zero = False
+ for i in xrange(8):
+ if chunks[i] != '0':
+ if last_was_zero:
+ end = i
+ current_len = end - start
+ if current_len > best_len:
+ best_start = start
+ best_len = current_len
+ last_was_zero = False
+ elif not last_was_zero:
+ start = i
+ last_was_zero = True
+ if last_was_zero:
+ end = 8
+ current_len = end - start
+ if current_len > best_len:
+ best_start = start
+ best_len = current_len
+ if best_len > 0:
+ if best_start == 0 and \
+ (best_len == 6 or
+ best_len == 5 and chunks[5] == 'ffff'):
+ # We have an embedded IPv4 address
+ if best_len == 6:
+ prefix = '::'
+ else:
+ prefix = '::ffff:'
+ hex = prefix + dns.ipv4.inet_ntoa(address[12:])
+ else:
+ hex = ':'.join(chunks[:best_start]) + '::' + \
+ ':'.join(chunks[best_start + best_len:])
+ else:
+ hex = ':'.join(chunks)
+ return hex
+
+_v4_ending = re.compile(r'(.*):(\d+)\.(\d+)\.(\d+)\.(\d+)$')
+_colon_colon_start = re.compile(r'::.*')
+_colon_colon_end = re.compile(r'.*::$')
+
+def inet_aton(text):
+ """Convert a text format IPv6 address into network format.
+
+ @param text: the textual address
+ @type text: string
+ @rtype: string
+ @raises dns.exception.SyntaxError: the text was not properly formatted
+ """
+
+ #
+ # Our aim here is not something fast; we just want something that works.
+ #
+
+ if text == '::':
+ text = '0::'
+ #
+ # Get rid of the icky dot-quad syntax if we have it.
+ #
+ m = _v4_ending.match(text)
+ if not m is None:
+ text = "%s:%04x:%04x" % (m.group(1),
+ int(m.group(2)) * 256 + int(m.group(3)),
+ int(m.group(4)) * 256 + int(m.group(5)))
+ #
+ # Try to turn '::<whatever>' into ':<whatever>'; if no match try to
+ # turn '<whatever>::' into '<whatever>:'
+ #
+ m = _colon_colon_start.match(text)
+ if not m is None:
+ text = text[1:]
+ else:
+ m = _colon_colon_end.match(text)
+ if not m is None:
+ text = text[:-1]
+ #
+ # Now canonicalize into 8 chunks of 4 hex digits each
+ #
+ chunks = text.split(':')
+ l = len(chunks)
+ if l > 8:
+ raise dns.exception.SyntaxError
+ seen_empty = False
+ canonical = []
+ for c in chunks:
+ if c == '':
+ if seen_empty:
+ raise dns.exception.SyntaxError
+ seen_empty = True
+ for i in xrange(0, 8 - l + 1):
+ canonical.append('0000')
+ else:
+ lc = len(c)
+ if lc > 4:
+ raise dns.exception.SyntaxError
+ if lc != 4:
+ c = ('0' * (4 - lc)) + c
+ canonical.append(c)
+ if l < 8 and not seen_empty:
+ raise dns.exception.SyntaxError
+ text = ''.join(canonical)
+
+ #
+ # Finally we can go to binary.
+ #
+ try:
+ return text.decode('hex_codec')
+ except TypeError:
+ raise dns.exception.SyntaxError
diff --git a/lib/dnspython/dns/message.py b/lib/dnspython/dns/message.py
new file mode 100644
index 0000000000..a124a3e177
--- /dev/null
+++ b/lib/dnspython/dns/message.py
@@ -0,0 +1,1087 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Messages"""
+
+import cStringIO
+import random
+import struct
+import sys
+import time
+
+import dns.edns
+import dns.exception
+import dns.flags
+import dns.name
+import dns.opcode
+import dns.entropy
+import dns.rcode
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.rrset
+import dns.renderer
+import dns.tsig
+
+class ShortHeader(dns.exception.FormError):
+ """Raised if the DNS packet passed to from_wire() is too short."""
+ pass
+
+class TrailingJunk(dns.exception.FormError):
+ """Raised if the DNS packet passed to from_wire() has extra junk
+ at the end of it."""
+ pass
+
+class UnknownHeaderField(dns.exception.DNSException):
+ """Raised if a header field name is not recognized when converting from
+ text into a message."""
+ pass
+
+class BadEDNS(dns.exception.FormError):
+ """Raised if an OPT record occurs somewhere other than the start of
+ the additional data section."""
+ pass
+
+class BadTSIG(dns.exception.FormError):
+ """Raised if a TSIG record occurs somewhere other than the end of
+ the additional data section."""
+ pass
+
+class UnknownTSIGKey(dns.exception.DNSException):
+ """Raised if we got a TSIG but don't know the key."""
+ pass
+
+class Message(object):
+ """A DNS message.
+
+ @ivar id: The query id; the default is a randomly chosen id.
+ @type id: int
+ @ivar flags: The DNS flags of the message. @see: RFC 1035 for an
+ explanation of these flags.
+ @type flags: int
+ @ivar question: The question section.
+ @type question: list of dns.rrset.RRset objects
+ @ivar answer: The answer section.
+ @type answer: list of dns.rrset.RRset objects
+ @ivar authority: The authority section.
+ @type authority: list of dns.rrset.RRset objects
+ @ivar additional: The additional data section.
+ @type additional: list of dns.rrset.RRset objects
+ @ivar edns: The EDNS level to use. The default is -1, no Edns.
+ @type edns: int
+ @ivar ednsflags: The EDNS flags
+ @type ednsflags: long
+ @ivar payload: The EDNS payload size. The default is 0.
+ @type payload: int
+ @ivar options: The EDNS options
+ @type options: list of dns.edns.Option objects
+ @ivar request_payload: The associated request's EDNS payload size.
+ @type request_payload: int
+ @ivar keyring: The TSIG keyring to use. The default is None.
+ @type keyring: dict
+ @ivar keyname: The TSIG keyname to use. The default is None.
+ @type keyname: dns.name.Name object
+ @ivar keyalgorithm: The TSIG algorithm to use; defaults to
+ dns.tsig.default_algorithm. Constants for TSIG algorithms are defined
+ in dns.tsig, and the currently implemented algorithms are
+ HMAC_MD5, HMAC_SHA1, HMAC_SHA224, HMAC_SHA256, HMAC_SHA384, and
+ HMAC_SHA512.
+ @type keyalgorithm: string
+ @ivar request_mac: The TSIG MAC of the request message associated with
+ this message; used when validating TSIG signatures. @see: RFC 2845 for
+ more information on TSIG fields.
+ @type request_mac: string
+ @ivar fudge: TSIG time fudge; default is 300 seconds.
+ @type fudge: int
+ @ivar original_id: TSIG original id; defaults to the message's id
+ @type original_id: int
+ @ivar tsig_error: TSIG error code; default is 0.
+ @type tsig_error: int
+ @ivar other_data: TSIG other data.
+ @type other_data: string
+ @ivar mac: The TSIG MAC for this message.
+ @type mac: string
+ @ivar xfr: Is the message being used to contain the results of a DNS
+ zone transfer? The default is False.
+ @type xfr: bool
+ @ivar origin: The origin of the zone in messages which are used for
+ zone transfers or for DNS dynamic updates. The default is None.
+ @type origin: dns.name.Name object
+ @ivar tsig_ctx: The TSIG signature context associated with this
+ message. The default is None.
+ @type tsig_ctx: hmac.HMAC object
+ @ivar had_tsig: Did the message decoded from wire format have a TSIG
+ signature?
+ @type had_tsig: bool
+ @ivar multi: Is this message part of a multi-message sequence? The
+ default is false. This variable is used when validating TSIG signatures
+ on messages which are part of a zone transfer.
+ @type multi: bool
+ @ivar first: Is this message standalone, or the first of a multi
+ message sequence? This variable is used when validating TSIG signatures
+ on messages which are part of a zone transfer.
+ @type first: bool
+ @ivar index: An index of rrsets in the message. The index key is
+ (section, name, rdclass, rdtype, covers, deleting). Indexing can be
+ disabled by setting the index to None.
+ @type index: dict
+ """
+
+ def __init__(self, id=None):
+ if id is None:
+ self.id = dns.entropy.random_16()
+ else:
+ self.id = id
+ self.flags = 0
+ self.question = []
+ self.answer = []
+ self.authority = []
+ self.additional = []
+ self.edns = -1
+ self.ednsflags = 0
+ self.payload = 0
+ self.options = []
+ self.request_payload = 0
+ self.keyring = None
+ self.keyname = None
+ self.keyalgorithm = dns.tsig.default_algorithm
+ self.request_mac = ''
+ self.other_data = ''
+ self.tsig_error = 0
+ self.fudge = 300
+ self.original_id = self.id
+ self.mac = ''
+ self.xfr = False
+ self.origin = None
+ self.tsig_ctx = None
+ self.had_tsig = False
+ self.multi = False
+ self.first = True
+ self.index = {}
+
+ def __repr__(self):
+ return '<DNS message, ID ' + `self.id` + '>'
+
+ def __str__(self):
+ return self.to_text()
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ """Convert the message to text.
+
+ The I{origin}, I{relativize}, and any other keyword
+ arguments are passed to the rrset to_wire() method.
+
+ @rtype: string
+ """
+
+ s = cStringIO.StringIO()
+ print >> s, 'id %d' % self.id
+ print >> s, 'opcode %s' % \
+ dns.opcode.to_text(dns.opcode.from_flags(self.flags))
+ rc = dns.rcode.from_flags(self.flags, self.ednsflags)
+ print >> s, 'rcode %s' % dns.rcode.to_text(rc)
+ print >> s, 'flags %s' % dns.flags.to_text(self.flags)
+ if self.edns >= 0:
+ print >> s, 'edns %s' % self.edns
+ if self.ednsflags != 0:
+ print >> s, 'eflags %s' % \
+ dns.flags.edns_to_text(self.ednsflags)
+ print >> s, 'payload', self.payload
+ is_update = dns.opcode.is_update(self.flags)
+ if is_update:
+ print >> s, ';ZONE'
+ else:
+ print >> s, ';QUESTION'
+ for rrset in self.question:
+ print >> s, rrset.to_text(origin, relativize, **kw)
+ if is_update:
+ print >> s, ';PREREQ'
+ else:
+ print >> s, ';ANSWER'
+ for rrset in self.answer:
+ print >> s, rrset.to_text(origin, relativize, **kw)
+ if is_update:
+ print >> s, ';UPDATE'
+ else:
+ print >> s, ';AUTHORITY'
+ for rrset in self.authority:
+ print >> s, rrset.to_text(origin, relativize, **kw)
+ print >> s, ';ADDITIONAL'
+ for rrset in self.additional:
+ print >> s, rrset.to_text(origin, relativize, **kw)
+ #
+ # We strip off the final \n so the caller can print the result without
+ # doing weird things to get around eccentricities in Python print
+ # formatting
+ #
+ return s.getvalue()[:-1]
+
+ def __eq__(self, other):
+ """Two messages are equal if they have the same content in the
+ header, question, answer, and authority sections.
+ @rtype: bool"""
+ if not isinstance(other, Message):
+ return False
+ if self.id != other.id:
+ return False
+ if self.flags != other.flags:
+ return False
+ for n in self.question:
+ if n not in other.question:
+ return False
+ for n in other.question:
+ if n not in self.question:
+ return False
+ for n in self.answer:
+ if n not in other.answer:
+ return False
+ for n in other.answer:
+ if n not in self.answer:
+ return False
+ for n in self.authority:
+ if n not in other.authority:
+ return False
+ for n in other.authority:
+ if n not in self.authority:
+ return False
+ return True
+
+ def __ne__(self, other):
+ """Are two messages not equal?
+ @rtype: bool"""
+ return not self.__eq__(other)
+
+ def is_response(self, other):
+ """Is other a response to self?
+ @rtype: bool"""
+ if other.flags & dns.flags.QR == 0 or \
+ self.id != other.id or \
+ dns.opcode.from_flags(self.flags) != \
+ dns.opcode.from_flags(other.flags):
+ return False
+ if dns.rcode.from_flags(other.flags, other.ednsflags) != \
+ dns.rcode.NOERROR:
+ return True
+ if dns.opcode.is_update(self.flags):
+ return True
+ for n in self.question:
+ if n not in other.question:
+ return False
+ for n in other.question:
+ if n not in self.question:
+ return False
+ return True
+
+ def section_number(self, section):
+ if section is self.question:
+ return 0
+ elif section is self.answer:
+ return 1
+ elif section is self.authority:
+ return 2
+ elif section is self.additional:
+ return 3
+ else:
+ raise ValueError('unknown section')
+
+ def find_rrset(self, section, name, rdclass, rdtype,
+ covers=dns.rdatatype.NONE, deleting=None, create=False,
+ force_unique=False):
+ """Find the RRset with the given attributes in the specified section.
+
+ @param section: the section of the message to look in, e.g.
+ self.answer.
+ @type section: list of dns.rrset.RRset objects
+ @param name: the name of the RRset
+ @type name: dns.name.Name object
+ @param rdclass: the class of the RRset
+ @type rdclass: int
+ @param rdtype: the type of the RRset
+ @type rdtype: int
+ @param covers: the covers value of the RRset
+ @type covers: int
+ @param deleting: the deleting value of the RRset
+ @type deleting: int
+ @param create: If True, create the RRset if it is not found.
+ The created RRset is appended to I{section}.
+ @type create: bool
+ @param force_unique: If True and create is also True, create a
+ new RRset regardless of whether a matching RRset exists already.
+ @type force_unique: bool
+ @raises KeyError: the RRset was not found and create was False
+ @rtype: dns.rrset.RRset object"""
+
+ key = (self.section_number(section),
+ name, rdclass, rdtype, covers, deleting)
+ if not force_unique:
+ if not self.index is None:
+ rrset = self.index.get(key)
+ if not rrset is None:
+ return rrset
+ else:
+ for rrset in section:
+ if rrset.match(name, rdclass, rdtype, covers, deleting):
+ return rrset
+ if not create:
+ raise KeyError
+ rrset = dns.rrset.RRset(name, rdclass, rdtype, covers, deleting)
+ section.append(rrset)
+ if not self.index is None:
+ self.index[key] = rrset
+ return rrset
+
+ def get_rrset(self, section, name, rdclass, rdtype,
+ covers=dns.rdatatype.NONE, deleting=None, create=False,
+ force_unique=False):
+ """Get the RRset with the given attributes in the specified section.
+
+ If the RRset is not found, None is returned.
+
+ @param section: the section of the message to look in, e.g.
+ self.answer.
+ @type section: list of dns.rrset.RRset objects
+ @param name: the name of the RRset
+ @type name: dns.name.Name object
+ @param rdclass: the class of the RRset
+ @type rdclass: int
+ @param rdtype: the type of the RRset
+ @type rdtype: int
+ @param covers: the covers value of the RRset
+ @type covers: int
+ @param deleting: the deleting value of the RRset
+ @type deleting: int
+ @param create: If True, create the RRset if it is not found.
+ The created RRset is appended to I{section}.
+ @type create: bool
+ @param force_unique: If True and create is also True, create a
+ new RRset regardless of whether a matching RRset exists already.
+ @type force_unique: bool
+ @rtype: dns.rrset.RRset object or None"""
+
+ try:
+ rrset = self.find_rrset(section, name, rdclass, rdtype, covers,
+ deleting, create, force_unique)
+ except KeyError:
+ rrset = None
+ return rrset
+
+ def to_wire(self, origin=None, max_size=0, **kw):
+ """Return a string containing the message in DNS compressed wire
+ format.
+
+ Additional keyword arguments are passed to the rrset to_wire()
+ method.
+
+ @param origin: The origin to be appended to any relative names.
+ @type origin: dns.name.Name object
+ @param max_size: The maximum size of the wire format output; default
+ is 0, which means 'the message's request payload, if nonzero, or
+ 65536'.
+ @type max_size: int
+ @raises dns.exception.TooBig: max_size was exceeded
+ @rtype: string
+ """
+
+ if max_size == 0:
+ if self.request_payload != 0:
+ max_size = self.request_payload
+ else:
+ max_size = 65535
+ if max_size < 512:
+ max_size = 512
+ elif max_size > 65535:
+ max_size = 65535
+ r = dns.renderer.Renderer(self.id, self.flags, max_size, origin)
+ for rrset in self.question:
+ r.add_question(rrset.name, rrset.rdtype, rrset.rdclass)
+ for rrset in self.answer:
+ r.add_rrset(dns.renderer.ANSWER, rrset, **kw)
+ for rrset in self.authority:
+ r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw)
+ if self.edns >= 0:
+ r.add_edns(self.edns, self.ednsflags, self.payload, self.options)
+ for rrset in self.additional:
+ r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw)
+ r.write_header()
+ if not self.keyname is None:
+ r.add_tsig(self.keyname, self.keyring[self.keyname],
+ self.fudge, self.original_id, self.tsig_error,
+ self.other_data, self.request_mac,
+ self.keyalgorithm)
+ self.mac = r.mac
+ return r.get_wire()
+
+ def use_tsig(self, keyring, keyname=None, fudge=300,
+ original_id=None, tsig_error=0, other_data='',
+ algorithm=dns.tsig.default_algorithm):
+ """When sending, a TSIG signature using the specified keyring
+ and keyname should be added.
+
+ @param keyring: The TSIG keyring to use; defaults to None.
+ @type keyring: dict
+ @param keyname: The name of the TSIG key to use; defaults to None.
+ The key must be defined in the keyring. If a keyring is specified
+ but a keyname is not, then the key used will be the first key in the
+ keyring. Note that the order of keys in a dictionary is not defined,
+ so applications should supply a keyname when a keyring is used, unless
+ they know the keyring contains only one key.
+ @type keyname: dns.name.Name or string
+ @param fudge: TSIG time fudge; default is 300 seconds.
+ @type fudge: int
+ @param original_id: TSIG original id; defaults to the message's id
+ @type original_id: int
+ @param tsig_error: TSIG error code; default is 0.
+ @type tsig_error: int
+ @param other_data: TSIG other data.
+ @type other_data: string
+ @param algorithm: The TSIG algorithm to use; defaults to
+ dns.tsig.default_algorithm
+ """
+
+ self.keyring = keyring
+ if keyname is None:
+ self.keyname = self.keyring.keys()[0]
+ else:
+ if isinstance(keyname, (str, unicode)):
+ keyname = dns.name.from_text(keyname)
+ self.keyname = keyname
+ self.keyalgorithm = algorithm
+ self.fudge = fudge
+ if original_id is None:
+ self.original_id = self.id
+ else:
+ self.original_id = original_id
+ self.tsig_error = tsig_error
+ self.other_data = other_data
+
+ def use_edns(self, edns=0, ednsflags=0, payload=1280, request_payload=None, options=None):
+ """Configure EDNS behavior.
+ @param edns: The EDNS level to use. Specifying None, False, or -1
+ means 'do not use EDNS', and in this case the other parameters are
+ ignored. Specifying True is equivalent to specifying 0, i.e. 'use
+ EDNS0'.
+ @type edns: int or bool or None
+ @param ednsflags: EDNS flag values.
+ @type ednsflags: int
+ @param payload: The EDNS sender's payload field, which is the maximum
+ size of UDP datagram the sender can handle.
+ @type payload: int
+ @param request_payload: The EDNS payload size to use when sending
+ this message. If not specified, defaults to the value of payload.
+ @type request_payload: int or None
+ @param options: The EDNS options
+ @type options: None or list of dns.edns.Option objects
+ @see: RFC 2671
+ """
+ if edns is None or edns is False:
+ edns = -1
+ if edns is True:
+ edns = 0
+ if request_payload is None:
+ request_payload = payload
+ if edns < 0:
+ ednsflags = 0
+ payload = 0
+ request_payload = 0
+ options = []
+ else:
+ # make sure the EDNS version in ednsflags agrees with edns
+ ednsflags &= 0xFF00FFFFL
+ ednsflags |= (edns << 16)
+ if options is None:
+ options = []
+ self.edns = edns
+ self.ednsflags = ednsflags
+ self.payload = payload
+ self.options = options
+ self.request_payload = request_payload
+
+ def want_dnssec(self, wanted=True):
+ """Enable or disable 'DNSSEC desired' flag in requests.
+ @param wanted: Is DNSSEC desired? If True, EDNS is enabled if
+ required, and then the DO bit is set. If False, the DO bit is
+ cleared if EDNS is enabled.
+ @type wanted: bool
+ """
+ if wanted:
+ if self.edns < 0:
+ self.use_edns()
+ self.ednsflags |= dns.flags.DO
+ elif self.edns >= 0:
+ self.ednsflags &= ~dns.flags.DO
+
+ def rcode(self):
+ """Return the rcode.
+ @rtype: int
+ """
+ return dns.rcode.from_flags(self.flags, self.ednsflags)
+
+ def set_rcode(self, rcode):
+ """Set the rcode.
+ @param rcode: the rcode
+ @type rcode: int
+ """
+ (value, evalue) = dns.rcode.to_flags(rcode)
+ self.flags &= 0xFFF0
+ self.flags |= value
+ self.ednsflags &= 0x00FFFFFFL
+ self.ednsflags |= evalue
+ if self.ednsflags != 0 and self.edns < 0:
+ self.edns = 0
+
+ def opcode(self):
+ """Return the opcode.
+ @rtype: int
+ """
+ return dns.opcode.from_flags(self.flags)
+
+ def set_opcode(self, opcode):
+ """Set the opcode.
+ @param opcode: the opcode
+ @type opcode: int
+ """
+ self.flags &= 0x87FF
+ self.flags |= dns.opcode.to_flags(opcode)
+
+class _WireReader(object):
+ """Wire format reader.
+
+ @ivar wire: the wire-format message.
+ @type wire: string
+ @ivar message: The message object being built
+ @type message: dns.message.Message object
+ @ivar current: When building a message object from wire format, this
+ variable contains the offset from the beginning of wire of the next octet
+ to be read.
+ @type current: int
+ @ivar updating: Is the message a dynamic update?
+ @type updating: bool
+ @ivar one_rr_per_rrset: Put each RR into its own RRset?
+ @type one_rr_per_rrset: bool
+ @ivar zone_rdclass: The class of the zone in messages which are
+ DNS dynamic updates.
+ @type zone_rdclass: int
+ """
+
+ def __init__(self, wire, message, question_only=False,
+ one_rr_per_rrset=False):
+ self.wire = wire
+ self.message = message
+ self.current = 0
+ self.updating = False
+ self.zone_rdclass = dns.rdataclass.IN
+ self.question_only = question_only
+ self.one_rr_per_rrset = one_rr_per_rrset
+
+ def _get_question(self, qcount):
+ """Read the next I{qcount} records from the wire data and add them to
+ the question section.
+ @param qcount: the number of questions in the message
+ @type qcount: int"""
+
+ if self.updating and qcount > 1:
+ raise dns.exception.FormError
+
+ for i in xrange(0, qcount):
+ (qname, used) = dns.name.from_wire(self.wire, self.current)
+ if not self.message.origin is None:
+ qname = qname.relativize(self.message.origin)
+ self.current = self.current + used
+ (rdtype, rdclass) = \
+ struct.unpack('!HH',
+ self.wire[self.current:self.current + 4])
+ self.current = self.current + 4
+ self.message.find_rrset(self.message.question, qname,
+ rdclass, rdtype, create=True,
+ force_unique=True)
+ if self.updating:
+ self.zone_rdclass = rdclass
+
+ def _get_section(self, section, count):
+ """Read the next I{count} records from the wire data and add them to
+ the specified section.
+ @param section: the section of the message to which to add records
+ @type section: list of dns.rrset.RRset objects
+ @param count: the number of records to read
+ @type count: int"""
+
+ if self.updating or self.one_rr_per_rrset:
+ force_unique = True
+ else:
+ force_unique = False
+ seen_opt = False
+ for i in xrange(0, count):
+ rr_start = self.current
+ (name, used) = dns.name.from_wire(self.wire, self.current)
+ absolute_name = name
+ if not self.message.origin is None:
+ name = name.relativize(self.message.origin)
+ self.current = self.current + used
+ (rdtype, rdclass, ttl, rdlen) = \
+ struct.unpack('!HHIH',
+ self.wire[self.current:self.current + 10])
+ self.current = self.current + 10
+ if rdtype == dns.rdatatype.OPT:
+ if not section is self.message.additional or seen_opt:
+ raise BadEDNS
+ self.message.payload = rdclass
+ self.message.ednsflags = ttl
+ self.message.edns = (ttl & 0xff0000) >> 16
+ self.message.options = []
+ current = self.current
+ optslen = rdlen
+ while optslen > 0:
+ (otype, olen) = \
+ struct.unpack('!HH',
+ self.wire[current:current + 4])
+ current = current + 4
+ opt = dns.edns.option_from_wire(otype, self.wire, current, olen)
+ self.message.options.append(opt)
+ current = current + olen
+ optslen = optslen - 4 - olen
+ seen_opt = True
+ elif rdtype == dns.rdatatype.TSIG:
+ if not (section is self.message.additional and
+ i == (count - 1)):
+ raise BadTSIG
+ if self.message.keyring is None:
+ raise UnknownTSIGKey('got signed message without keyring')
+ secret = self.message.keyring.get(absolute_name)
+ if secret is None:
+ raise UnknownTSIGKey("key '%s' unknown" % name)
+ self.message.tsig_ctx = \
+ dns.tsig.validate(self.wire,
+ absolute_name,
+ secret,
+ int(time.time()),
+ self.message.request_mac,
+ rr_start,
+ self.current,
+ rdlen,
+ self.message.tsig_ctx,
+ self.message.multi,
+ self.message.first)
+ self.message.had_tsig = True
+ else:
+ if ttl < 0:
+ ttl = 0
+ if self.updating and \
+ (rdclass == dns.rdataclass.ANY or
+ rdclass == dns.rdataclass.NONE):
+ deleting = rdclass
+ rdclass = self.zone_rdclass
+ else:
+ deleting = None
+ if deleting == dns.rdataclass.ANY or \
+ (deleting == dns.rdataclass.NONE and \
+ section is self.message.answer):
+ covers = dns.rdatatype.NONE
+ rd = None
+ else:
+ rd = dns.rdata.from_wire(rdclass, rdtype, self.wire,
+ self.current, rdlen,
+ self.message.origin)
+ covers = rd.covers()
+ if self.message.xfr and rdtype == dns.rdatatype.SOA:
+ force_unique = True
+ rrset = self.message.find_rrset(section, name,
+ rdclass, rdtype, covers,
+ deleting, True, force_unique)
+ if not rd is None:
+ rrset.add(rd, ttl)
+ self.current = self.current + rdlen
+
+ def read(self):
+ """Read a wire format DNS message and build a dns.message.Message
+ object."""
+
+ l = len(self.wire)
+ if l < 12:
+ raise ShortHeader
+ (self.message.id, self.message.flags, qcount, ancount,
+ aucount, adcount) = struct.unpack('!HHHHHH', self.wire[:12])
+ self.current = 12
+ if dns.opcode.is_update(self.message.flags):
+ self.updating = True
+ self._get_question(qcount)
+ if self.question_only:
+ return
+ self._get_section(self.message.answer, ancount)
+ self._get_section(self.message.authority, aucount)
+ self._get_section(self.message.additional, adcount)
+ if self.current != l:
+ raise TrailingJunk
+ if self.message.multi and self.message.tsig_ctx and \
+ not self.message.had_tsig:
+ self.message.tsig_ctx.update(self.wire)
+
+
+def from_wire(wire, keyring=None, request_mac='', xfr=False, origin=None,
+ tsig_ctx = None, multi = False, first = True,
+ question_only = False, one_rr_per_rrset = False):
+ """Convert a DNS wire format message into a message
+ object.
+
+ @param keyring: The keyring to use if the message is signed.
+ @type keyring: dict
+ @param request_mac: If the message is a response to a TSIG-signed request,
+ I{request_mac} should be set to the MAC of that request.
+ @type request_mac: string
+ @param xfr: Is this message part of a zone transfer?
+ @type xfr: bool
+ @param origin: If the message is part of a zone transfer, I{origin}
+ should be the origin name of the zone.
+ @type origin: dns.name.Name object
+ @param tsig_ctx: The ongoing TSIG context, used when validating zone
+ transfers.
+ @type tsig_ctx: hmac.HMAC object
+ @param multi: Is this message part of a multiple message sequence?
+ @type multi: bool
+ @param first: Is this message standalone, or the first of a multi
+ message sequence?
+ @type first: bool
+ @param question_only: Read only up to the end of the question section?
+ @type question_only: bool
+ @param one_rr_per_rrset: Put each RR into its own RRset
+ @type one_rr_per_rrset: bool
+ @raises ShortHeader: The message is less than 12 octets long.
+ @raises TrailingJunk: There were octets in the message past the end
+ of the proper DNS message.
+ @raises BadEDNS: An OPT record was in the wrong section, or occurred more
+ than once.
+ @raises BadTSIG: A TSIG record was not the last record of the additional
+ data section.
+ @rtype: dns.message.Message object"""
+
+ m = Message(id=0)
+ m.keyring = keyring
+ m.request_mac = request_mac
+ m.xfr = xfr
+ m.origin = origin
+ m.tsig_ctx = tsig_ctx
+ m.multi = multi
+ m.first = first
+
+ reader = _WireReader(wire, m, question_only, one_rr_per_rrset)
+ reader.read()
+
+ return m
+
+
+class _TextReader(object):
+ """Text format reader.
+
+ @ivar tok: the tokenizer
+ @type tok: dns.tokenizer.Tokenizer object
+ @ivar message: The message object being built
+ @type message: dns.message.Message object
+ @ivar updating: Is the message a dynamic update?
+ @type updating: bool
+ @ivar zone_rdclass: The class of the zone in messages which are
+ DNS dynamic updates.
+ @type zone_rdclass: int
+ @ivar last_name: The most recently read name when building a message object
+ from text format.
+ @type last_name: dns.name.Name object
+ """
+
+ def __init__(self, text, message):
+ self.message = message
+ self.tok = dns.tokenizer.Tokenizer(text)
+ self.last_name = None
+ self.zone_rdclass = dns.rdataclass.IN
+ self.updating = False
+
+ def _header_line(self, section):
+ """Process one line from the text format header section."""
+
+ token = self.tok.get()
+ what = token.value
+ if what == 'id':
+ self.message.id = self.tok.get_int()
+ elif what == 'flags':
+ while True:
+ token = self.tok.get()
+ if not token.is_identifier():
+ self.tok.unget(token)
+ break
+ self.message.flags = self.message.flags | \
+ dns.flags.from_text(token.value)
+ if dns.opcode.is_update(self.message.flags):
+ self.updating = True
+ elif what == 'edns':
+ self.message.edns = self.tok.get_int()
+ self.message.ednsflags = self.message.ednsflags | \
+ (self.message.edns << 16)
+ elif what == 'eflags':
+ if self.message.edns < 0:
+ self.message.edns = 0
+ while True:
+ token = self.tok.get()
+ if not token.is_identifier():
+ self.tok.unget(token)
+ break
+ self.message.ednsflags = self.message.ednsflags | \
+ dns.flags.edns_from_text(token.value)
+ elif what == 'payload':
+ self.message.payload = self.tok.get_int()
+ if self.message.edns < 0:
+ self.message.edns = 0
+ elif what == 'opcode':
+ text = self.tok.get_string()
+ self.message.flags = self.message.flags | \
+ dns.opcode.to_flags(dns.opcode.from_text(text))
+ elif what == 'rcode':
+ text = self.tok.get_string()
+ self.message.set_rcode(dns.rcode.from_text(text))
+ else:
+ raise UnknownHeaderField
+ self.tok.get_eol()
+
+ def _question_line(self, section):
+ """Process one line from the text format question section."""
+
+ token = self.tok.get(want_leading = True)
+ if not token.is_whitespace():
+ self.last_name = dns.name.from_text(token.value, None)
+ name = self.last_name
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except:
+ rdclass = dns.rdataclass.IN
+ # Type
+ rdtype = dns.rdatatype.from_text(token.value)
+ self.message.find_rrset(self.message.question, name,
+ rdclass, rdtype, create=True,
+ force_unique=True)
+ if self.updating:
+ self.zone_rdclass = rdclass
+ self.tok.get_eol()
+
+ def _rr_line(self, section):
+ """Process one line from the text format answer, authority, or
+ additional data sections.
+ """
+
+ deleting = None
+ # Name
+ token = self.tok.get(want_leading = True)
+ if not token.is_whitespace():
+ self.last_name = dns.name.from_text(token.value, None)
+ name = self.last_name
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ # TTL
+ try:
+ ttl = int(token.value, 0)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except:
+ ttl = 0
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ if rdclass == dns.rdataclass.ANY or rdclass == dns.rdataclass.NONE:
+ deleting = rdclass
+ rdclass = self.zone_rdclass
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except:
+ rdclass = dns.rdataclass.IN
+ # Type
+ rdtype = dns.rdatatype.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_eol_or_eof():
+ self.tok.unget(token)
+ rd = dns.rdata.from_text(rdclass, rdtype, self.tok, None)
+ covers = rd.covers()
+ else:
+ rd = None
+ covers = dns.rdatatype.NONE
+ rrset = self.message.find_rrset(section, name,
+ rdclass, rdtype, covers,
+ deleting, True, self.updating)
+ if not rd is None:
+ rrset.add(rd, ttl)
+
+ def read(self):
+ """Read a text format DNS message and build a dns.message.Message
+ object."""
+
+ line_method = self._header_line
+ section = None
+ while 1:
+ token = self.tok.get(True, True)
+ if token.is_eol_or_eof():
+ break
+ if token.is_comment():
+ u = token.value.upper()
+ if u == 'HEADER':
+ line_method = self._header_line
+ elif u == 'QUESTION' or u == 'ZONE':
+ line_method = self._question_line
+ section = self.message.question
+ elif u == 'ANSWER' or u == 'PREREQ':
+ line_method = self._rr_line
+ section = self.message.answer
+ elif u == 'AUTHORITY' or u == 'UPDATE':
+ line_method = self._rr_line
+ section = self.message.authority
+ elif u == 'ADDITIONAL':
+ line_method = self._rr_line
+ section = self.message.additional
+ self.tok.get_eol()
+ continue
+ self.tok.unget(token)
+ line_method(section)
+
+
+def from_text(text):
+ """Convert the text format message into a message object.
+
+ @param text: The text format message.
+ @type text: string
+ @raises UnknownHeaderField:
+ @raises dns.exception.SyntaxError:
+ @rtype: dns.message.Message object"""
+
+ # 'text' can also be a file, but we don't publish that fact
+ # since it's an implementation detail. The official file
+ # interface is from_file().
+
+ m = Message()
+
+ reader = _TextReader(text, m)
+ reader.read()
+
+ return m
+
+def from_file(f):
+ """Read the next text format message from the specified file.
+
+ @param f: file or string. If I{f} is a string, it is treated
+ as the name of a file to open.
+ @raises UnknownHeaderField:
+ @raises dns.exception.SyntaxError:
+ @rtype: dns.message.Message object"""
+
+ if sys.hexversion >= 0x02030000:
+ # allow Unicode filenames; turn on universal newline support
+ str_type = basestring
+ opts = 'rU'
+ else:
+ str_type = str
+ opts = 'r'
+ if isinstance(f, str_type):
+ f = file(f, opts)
+ want_close = True
+ else:
+ want_close = False
+
+ try:
+ m = from_text(f)
+ finally:
+ if want_close:
+ f.close()
+ return m
+
+def make_query(qname, rdtype, rdclass = dns.rdataclass.IN, use_edns=None,
+ want_dnssec=False):
+ """Make a query message.
+
+ The query name, type, and class may all be specified either
+ as objects of the appropriate type, or as strings.
+
+ The query will have a randomly choosen query id, and its DNS flags
+ will be set to dns.flags.RD.
+
+ @param qname: The query name.
+ @type qname: dns.name.Name object or string
+ @param rdtype: The desired rdata type.
+ @type rdtype: int
+ @param rdclass: The desired rdata class; the default is class IN.
+ @type rdclass: int
+ @param use_edns: The EDNS level to use; the default is None (no EDNS).
+ See the description of dns.message.Message.use_edns() for the possible
+ values for use_edns and their meanings.
+ @type use_edns: int or bool or None
+ @param want_dnssec: Should the query indicate that DNSSEC is desired?
+ @type want_dnssec: bool
+ @rtype: dns.message.Message object"""
+
+ if isinstance(qname, (str, unicode)):
+ qname = dns.name.from_text(qname)
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(rdclass, (str, unicode)):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ m = Message()
+ m.flags |= dns.flags.RD
+ m.find_rrset(m.question, qname, rdclass, rdtype, create=True,
+ force_unique=True)
+ m.use_edns(use_edns)
+ m.want_dnssec(want_dnssec)
+ return m
+
+def make_response(query, recursion_available=False, our_payload=8192):
+ """Make a message which is a response for the specified query.
+ The message returned is really a response skeleton; it has all
+ of the infrastructure required of a response, but none of the
+ content.
+
+ The response's question section is a shallow copy of the query's
+ question section, so the query's question RRsets should not be
+ changed.
+
+ @param query: the query to respond to
+ @type query: dns.message.Message object
+ @param recursion_available: should RA be set in the response?
+ @type recursion_available: bool
+ @param our_payload: payload size to advertise in EDNS responses; default
+ is 8192.
+ @type our_payload: int
+ @rtype: dns.message.Message object"""
+
+ if query.flags & dns.flags.QR:
+ raise dns.exception.FormError('specified query message is not a query')
+ response = dns.message.Message(query.id)
+ response.flags = dns.flags.QR | (query.flags & dns.flags.RD)
+ if recursion_available:
+ response.flags |= dns.flags.RA
+ response.set_opcode(query.opcode())
+ response.question = list(query.question)
+ if query.edns >= 0:
+ response.use_edns(0, 0, our_payload, query.payload)
+ if not query.keyname is None:
+ response.keyname = query.keyname
+ response.keyring = query.keyring
+ response.request_mac = query.mac
+ return response
diff --git a/lib/dnspython/dns/name.py b/lib/dnspython/dns/name.py
new file mode 100644
index 0000000000..f239c9b5de
--- /dev/null
+++ b/lib/dnspython/dns/name.py
@@ -0,0 +1,700 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Names.
+
+@var root: The DNS root name.
+@type root: dns.name.Name object
+@var empty: The empty DNS name.
+@type empty: dns.name.Name object
+"""
+
+import cStringIO
+import struct
+import sys
+
+if sys.hexversion >= 0x02030000:
+ import encodings.idna
+
+import dns.exception
+
+NAMERELN_NONE = 0
+NAMERELN_SUPERDOMAIN = 1
+NAMERELN_SUBDOMAIN = 2
+NAMERELN_EQUAL = 3
+NAMERELN_COMMONANCESTOR = 4
+
+class EmptyLabel(dns.exception.SyntaxError):
+ """Raised if a label is empty."""
+ pass
+
+class BadEscape(dns.exception.SyntaxError):
+ """Raised if an escaped code in a text format name is invalid."""
+ pass
+
+class BadPointer(dns.exception.FormError):
+ """Raised if a compression pointer points forward instead of backward."""
+ pass
+
+class BadLabelType(dns.exception.FormError):
+ """Raised if the label type of a wire format name is unknown."""
+ pass
+
+class NeedAbsoluteNameOrOrigin(dns.exception.DNSException):
+ """Raised if an attempt is made to convert a non-absolute name to
+ wire when there is also a non-absolute (or missing) origin."""
+ pass
+
+class NameTooLong(dns.exception.FormError):
+ """Raised if a name is > 255 octets long."""
+ pass
+
+class LabelTooLong(dns.exception.SyntaxError):
+ """Raised if a label is > 63 octets long."""
+ pass
+
+class AbsoluteConcatenation(dns.exception.DNSException):
+ """Raised if an attempt is made to append anything other than the
+ empty name to an absolute name."""
+ pass
+
+class NoParent(dns.exception.DNSException):
+ """Raised if an attempt is made to get the parent of the root name
+ or the empty name."""
+ pass
+
+_escaped = {
+ '"' : True,
+ '(' : True,
+ ')' : True,
+ '.' : True,
+ ';' : True,
+ '\\' : True,
+ '@' : True,
+ '$' : True
+ }
+
+def _escapify(label):
+ """Escape the characters in label which need it.
+ @returns: the escaped string
+ @rtype: string"""
+ text = ''
+ for c in label:
+ if c in _escaped:
+ text += '\\' + c
+ elif ord(c) > 0x20 and ord(c) < 0x7F:
+ text += c
+ else:
+ text += '\\%03d' % ord(c)
+ return text
+
+def _validate_labels(labels):
+ """Check for empty labels in the middle of a label sequence,
+ labels that are too long, and for too many labels.
+ @raises NameTooLong: the name as a whole is too long
+ @raises LabelTooLong: an individual label is too long
+ @raises EmptyLabel: a label is empty (i.e. the root label) and appears
+ in a position other than the end of the label sequence"""
+
+ l = len(labels)
+ total = 0
+ i = -1
+ j = 0
+ for label in labels:
+ ll = len(label)
+ total += ll + 1
+ if ll > 63:
+ raise LabelTooLong
+ if i < 0 and label == '':
+ i = j
+ j += 1
+ if total > 255:
+ raise NameTooLong
+ if i >= 0 and i != l - 1:
+ raise EmptyLabel
+
+class Name(object):
+ """A DNS name.
+
+ The dns.name.Name class represents a DNS name as a tuple of labels.
+ Instances of the class are immutable.
+
+ @ivar labels: The tuple of labels in the name. Each label is a string of
+ up to 63 octets."""
+
+ __slots__ = ['labels']
+
+ def __init__(self, labels):
+ """Initialize a domain name from a list of labels.
+ @param labels: the labels
+ @type labels: any iterable whose values are strings
+ """
+
+ super(Name, self).__setattr__('labels', tuple(labels))
+ _validate_labels(self.labels)
+
+ def __setattr__(self, name, value):
+ raise TypeError("object doesn't support attribute assignment")
+
+ def is_absolute(self):
+ """Is the most significant label of this name the root label?
+ @rtype: bool
+ """
+
+ return len(self.labels) > 0 and self.labels[-1] == ''
+
+ def is_wild(self):
+ """Is this name wild? (I.e. Is the least significant label '*'?)
+ @rtype: bool
+ """
+
+ return len(self.labels) > 0 and self.labels[0] == '*'
+
+ def __hash__(self):
+ """Return a case-insensitive hash of the name.
+ @rtype: int
+ """
+
+ h = 0L
+ for label in self.labels:
+ for c in label:
+ h += ( h << 3 ) + ord(c.lower())
+ return int(h % sys.maxint)
+
+ def fullcompare(self, other):
+ """Compare two names, returning a 3-tuple (relation, order, nlabels).
+
+ I{relation} describes the relation ship beween the names,
+ and is one of: dns.name.NAMERELN_NONE,
+ dns.name.NAMERELN_SUPERDOMAIN, dns.name.NAMERELN_SUBDOMAIN,
+ dns.name.NAMERELN_EQUAL, or dns.name.NAMERELN_COMMONANCESTOR
+
+ I{order} is < 0 if self < other, > 0 if self > other, and ==
+ 0 if self == other. A relative name is always less than an
+ absolute name. If both names have the same relativity, then
+ the DNSSEC order relation is used to order them.
+
+ I{nlabels} is the number of significant labels that the two names
+ have in common.
+ """
+
+ sabs = self.is_absolute()
+ oabs = other.is_absolute()
+ if sabs != oabs:
+ if sabs:
+ return (NAMERELN_NONE, 1, 0)
+ else:
+ return (NAMERELN_NONE, -1, 0)
+ l1 = len(self.labels)
+ l2 = len(other.labels)
+ ldiff = l1 - l2
+ if ldiff < 0:
+ l = l1
+ else:
+ l = l2
+
+ order = 0
+ nlabels = 0
+ namereln = NAMERELN_NONE
+ while l > 0:
+ l -= 1
+ l1 -= 1
+ l2 -= 1
+ label1 = self.labels[l1].lower()
+ label2 = other.labels[l2].lower()
+ if label1 < label2:
+ order = -1
+ if nlabels > 0:
+ namereln = NAMERELN_COMMONANCESTOR
+ return (namereln, order, nlabels)
+ elif label1 > label2:
+ order = 1
+ if nlabels > 0:
+ namereln = NAMERELN_COMMONANCESTOR
+ return (namereln, order, nlabels)
+ nlabels += 1
+ order = ldiff
+ if ldiff < 0:
+ namereln = NAMERELN_SUPERDOMAIN
+ elif ldiff > 0:
+ namereln = NAMERELN_SUBDOMAIN
+ else:
+ namereln = NAMERELN_EQUAL
+ return (namereln, order, nlabels)
+
+ def is_subdomain(self, other):
+ """Is self a subdomain of other?
+
+ The notion of subdomain includes equality.
+ @rtype: bool
+ """
+
+ (nr, o, nl) = self.fullcompare(other)
+ if nr == NAMERELN_SUBDOMAIN or nr == NAMERELN_EQUAL:
+ return True
+ return False
+
+ def is_superdomain(self, other):
+ """Is self a superdomain of other?
+
+ The notion of subdomain includes equality.
+ @rtype: bool
+ """
+
+ (nr, o, nl) = self.fullcompare(other)
+ if nr == NAMERELN_SUPERDOMAIN or nr == NAMERELN_EQUAL:
+ return True
+ return False
+
+ def canonicalize(self):
+ """Return a name which is equal to the current name, but is in
+ DNSSEC canonical form.
+ @rtype: dns.name.Name object
+ """
+
+ return Name([x.lower() for x in self.labels])
+
+ def __eq__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] == 0
+ else:
+ return False
+
+ def __ne__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] != 0
+ else:
+ return True
+
+ def __lt__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] < 0
+ else:
+ return NotImplemented
+
+ def __le__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] <= 0
+ else:
+ return NotImplemented
+
+ def __ge__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] >= 0
+ else:
+ return NotImplemented
+
+ def __gt__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] > 0
+ else:
+ return NotImplemented
+
+ def __repr__(self):
+ return '<DNS name ' + self.__str__() + '>'
+
+ def __str__(self):
+ return self.to_text(False)
+
+ def to_text(self, omit_final_dot = False):
+ """Convert name to text format.
+ @param omit_final_dot: If True, don't emit the final dot (denoting the
+ root label) for absolute names. The default is False.
+ @rtype: string
+ """
+
+ if len(self.labels) == 0:
+ return '@'
+ if len(self.labels) == 1 and self.labels[0] == '':
+ return '.'
+ if omit_final_dot and self.is_absolute():
+ l = self.labels[:-1]
+ else:
+ l = self.labels
+ s = '.'.join(map(_escapify, l))
+ return s
+
+ def to_unicode(self, omit_final_dot = False):
+ """Convert name to Unicode text format.
+
+ IDN ACE lables are converted to Unicode.
+
+ @param omit_final_dot: If True, don't emit the final dot (denoting the
+ root label) for absolute names. The default is False.
+ @rtype: string
+ """
+
+ if len(self.labels) == 0:
+ return u'@'
+ if len(self.labels) == 1 and self.labels[0] == '':
+ return u'.'
+ if omit_final_dot and self.is_absolute():
+ l = self.labels[:-1]
+ else:
+ l = self.labels
+ s = u'.'.join([encodings.idna.ToUnicode(_escapify(x)) for x in l])
+ return s
+
+ def to_digestable(self, origin=None):
+ """Convert name to a format suitable for digesting in hashes.
+
+ The name is canonicalized and converted to uncompressed wire format.
+
+ @param origin: If the name is relative and origin is not None, then
+ origin will be appended to it.
+ @type origin: dns.name.Name object
+ @raises NeedAbsoluteNameOrOrigin: All names in wire format are
+ absolute. If self is a relative name, then an origin must be supplied;
+ if it is missing, then this exception is raised
+ @rtype: string
+ """
+
+ if not self.is_absolute():
+ if origin is None or not origin.is_absolute():
+ raise NeedAbsoluteNameOrOrigin
+ labels = list(self.labels)
+ labels.extend(list(origin.labels))
+ else:
+ labels = self.labels
+ dlabels = ["%s%s" % (chr(len(x)), x.lower()) for x in labels]
+ return ''.join(dlabels)
+
+ def to_wire(self, file = None, compress = None, origin = None):
+ """Convert name to wire format, possibly compressing it.
+
+ @param file: the file where the name is emitted (typically
+ a cStringIO file). If None, a string containing the wire name
+ will be returned.
+ @type file: file or None
+ @param compress: The compression table. If None (the default) names
+ will not be compressed.
+ @type compress: dict
+ @param origin: If the name is relative and origin is not None, then
+ origin will be appended to it.
+ @type origin: dns.name.Name object
+ @raises NeedAbsoluteNameOrOrigin: All names in wire format are
+ absolute. If self is a relative name, then an origin must be supplied;
+ if it is missing, then this exception is raised
+ """
+
+ if file is None:
+ file = cStringIO.StringIO()
+ want_return = True
+ else:
+ want_return = False
+
+ if not self.is_absolute():
+ if origin is None or not origin.is_absolute():
+ raise NeedAbsoluteNameOrOrigin
+ labels = list(self.labels)
+ labels.extend(list(origin.labels))
+ else:
+ labels = self.labels
+ i = 0
+ for label in labels:
+ n = Name(labels[i:])
+ i += 1
+ if not compress is None:
+ pos = compress.get(n)
+ else:
+ pos = None
+ if not pos is None:
+ value = 0xc000 + pos
+ s = struct.pack('!H', value)
+ file.write(s)
+ break
+ else:
+ if not compress is None and len(n) > 1:
+ pos = file.tell()
+ if pos < 0xc000:
+ compress[n] = pos
+ l = len(label)
+ file.write(chr(l))
+ if l > 0:
+ file.write(label)
+ if want_return:
+ return file.getvalue()
+
+ def __len__(self):
+ """The length of the name (in labels).
+ @rtype: int
+ """
+
+ return len(self.labels)
+
+ def __getitem__(self, index):
+ return self.labels[index]
+
+ def __getslice__(self, start, stop):
+ return self.labels[start:stop]
+
+ def __add__(self, other):
+ return self.concatenate(other)
+
+ def __sub__(self, other):
+ return self.relativize(other)
+
+ def split(self, depth):
+ """Split a name into a prefix and suffix at depth.
+
+ @param depth: the number of labels in the suffix
+ @type depth: int
+ @raises ValueError: the depth was not >= 0 and <= the length of the
+ name.
+ @returns: the tuple (prefix, suffix)
+ @rtype: tuple
+ """
+
+ l = len(self.labels)
+ if depth == 0:
+ return (self, dns.name.empty)
+ elif depth == l:
+ return (dns.name.empty, self)
+ elif depth < 0 or depth > l:
+ raise ValueError('depth must be >= 0 and <= the length of the name')
+ return (Name(self[: -depth]), Name(self[-depth :]))
+
+ def concatenate(self, other):
+ """Return a new name which is the concatenation of self and other.
+ @rtype: dns.name.Name object
+ @raises AbsoluteConcatenation: self is absolute and other is
+ not the empty name
+ """
+
+ if self.is_absolute() and len(other) > 0:
+ raise AbsoluteConcatenation
+ labels = list(self.labels)
+ labels.extend(list(other.labels))
+ return Name(labels)
+
+ def relativize(self, origin):
+ """If self is a subdomain of origin, return a new name which is self
+ relative to origin. Otherwise return self.
+ @rtype: dns.name.Name object
+ """
+
+ if not origin is None and self.is_subdomain(origin):
+ return Name(self[: -len(origin)])
+ else:
+ return self
+
+ def derelativize(self, origin):
+ """If self is a relative name, return a new name which is the
+ concatenation of self and origin. Otherwise return self.
+ @rtype: dns.name.Name object
+ """
+
+ if not self.is_absolute():
+ return self.concatenate(origin)
+ else:
+ return self
+
+ def choose_relativity(self, origin=None, relativize=True):
+ """Return a name with the relativity desired by the caller. If
+ origin is None, then self is returned. Otherwise, if
+ relativize is true the name is relativized, and if relativize is
+ false the name is derelativized.
+ @rtype: dns.name.Name object
+ """
+
+ if origin:
+ if relativize:
+ return self.relativize(origin)
+ else:
+ return self.derelativize(origin)
+ else:
+ return self
+
+ def parent(self):
+ """Return the parent of the name.
+ @rtype: dns.name.Name object
+ @raises NoParent: the name is either the root name or the empty name,
+ and thus has no parent.
+ """
+ if self == root or self == empty:
+ raise NoParent
+ return Name(self.labels[1:])
+
+root = Name([''])
+empty = Name([])
+
+def from_unicode(text, origin = root):
+ """Convert unicode text into a Name object.
+
+ Lables are encoded in IDN ACE form.
+
+ @rtype: dns.name.Name object
+ """
+
+ if not isinstance(text, unicode):
+ raise ValueError("input to from_unicode() must be a unicode string")
+ if not (origin is None or isinstance(origin, Name)):
+ raise ValueError("origin must be a Name or None")
+ labels = []
+ label = u''
+ escaping = False
+ edigits = 0
+ total = 0
+ if text == u'@':
+ text = u''
+ if text:
+ if text == u'.':
+ return Name(['']) # no Unicode "u" on this constant!
+ for c in text:
+ if escaping:
+ if edigits == 0:
+ if c.isdigit():
+ total = int(c)
+ edigits += 1
+ else:
+ label += c
+ escaping = False
+ else:
+ if not c.isdigit():
+ raise BadEscape
+ total *= 10
+ total += int(c)
+ edigits += 1
+ if edigits == 3:
+ escaping = False
+ label += chr(total)
+ elif c == u'.' or c == u'\u3002' or \
+ c == u'\uff0e' or c == u'\uff61':
+ if len(label) == 0:
+ raise EmptyLabel
+ labels.append(encodings.idna.ToASCII(label))
+ label = u''
+ elif c == u'\\':
+ escaping = True
+ edigits = 0
+ total = 0
+ else:
+ label += c
+ if escaping:
+ raise BadEscape
+ if len(label) > 0:
+ labels.append(encodings.idna.ToASCII(label))
+ else:
+ labels.append('')
+ if (len(labels) == 0 or labels[-1] != '') and not origin is None:
+ labels.extend(list(origin.labels))
+ return Name(labels)
+
+def from_text(text, origin = root):
+ """Convert text into a Name object.
+ @rtype: dns.name.Name object
+ """
+
+ if not isinstance(text, str):
+ if isinstance(text, unicode) and sys.hexversion >= 0x02030000:
+ return from_unicode(text, origin)
+ else:
+ raise ValueError("input to from_text() must be a string")
+ if not (origin is None or isinstance(origin, Name)):
+ raise ValueError("origin must be a Name or None")
+ labels = []
+ label = ''
+ escaping = False
+ edigits = 0
+ total = 0
+ if text == '@':
+ text = ''
+ if text:
+ if text == '.':
+ return Name([''])
+ for c in text:
+ if escaping:
+ if edigits == 0:
+ if c.isdigit():
+ total = int(c)
+ edigits += 1
+ else:
+ label += c
+ escaping = False
+ else:
+ if not c.isdigit():
+ raise BadEscape
+ total *= 10
+ total += int(c)
+ edigits += 1
+ if edigits == 3:
+ escaping = False
+ label += chr(total)
+ elif c == '.':
+ if len(label) == 0:
+ raise EmptyLabel
+ labels.append(label)
+ label = ''
+ elif c == '\\':
+ escaping = True
+ edigits = 0
+ total = 0
+ else:
+ label += c
+ if escaping:
+ raise BadEscape
+ if len(label) > 0:
+ labels.append(label)
+ else:
+ labels.append('')
+ if (len(labels) == 0 or labels[-1] != '') and not origin is None:
+ labels.extend(list(origin.labels))
+ return Name(labels)
+
+def from_wire(message, current):
+ """Convert possibly compressed wire format into a Name.
+ @param message: the entire DNS message
+ @type message: string
+ @param current: the offset of the beginning of the name from the start
+ of the message
+ @type current: int
+ @raises dns.name.BadPointer: a compression pointer did not point backwards
+ in the message
+ @raises dns.name.BadLabelType: an invalid label type was encountered.
+ @returns: a tuple consisting of the name that was read and the number
+ of bytes of the wire format message which were consumed reading it
+ @rtype: (dns.name.Name object, int) tuple
+ """
+
+ if not isinstance(message, str):
+ raise ValueError("input to from_wire() must be a byte string")
+ labels = []
+ biggest_pointer = current
+ hops = 0
+ count = ord(message[current])
+ current += 1
+ cused = 1
+ while count != 0:
+ if count < 64:
+ labels.append(message[current : current + count])
+ current += count
+ if hops == 0:
+ cused += count
+ elif count >= 192:
+ current = (count & 0x3f) * 256 + ord(message[current])
+ if hops == 0:
+ cused += 1
+ if current >= biggest_pointer:
+ raise BadPointer
+ biggest_pointer = current
+ hops += 1
+ else:
+ raise BadLabelType
+ count = ord(message[current])
+ current += 1
+ if hops == 0:
+ cused += 1
+ labels.append('')
+ return (Name(labels), cused)
diff --git a/lib/dnspython/dns/namedict.py b/lib/dnspython/dns/namedict.py
new file mode 100644
index 0000000000..54afb77188
--- /dev/null
+++ b/lib/dnspython/dns/namedict.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS name dictionary"""
+
+import dns.name
+
+class NameDict(dict):
+
+ """A dictionary whose keys are dns.name.Name objects.
+ @ivar max_depth: the maximum depth of the keys that have ever been
+ added to the dictionary.
+ @type max_depth: int
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(NameDict, self).__init__(*args, **kwargs)
+ self.max_depth = 0
+
+ def __setitem__(self, key, value):
+ if not isinstance(key, dns.name.Name):
+ raise ValueError('NameDict key must be a name')
+ depth = len(key)
+ if depth > self.max_depth:
+ self.max_depth = depth
+ super(NameDict, self).__setitem__(key, value)
+
+ def get_deepest_match(self, name):
+ """Find the deepest match to I{name} in the dictionary.
+
+ The deepest match is the longest name in the dictionary which is
+ a superdomain of I{name}.
+
+ @param name: the name
+ @type name: dns.name.Name object
+ @rtype: (key, value) tuple
+ """
+
+ depth = len(name)
+ if depth > self.max_depth:
+ depth = self.max_depth
+ for i in xrange(-depth, 0):
+ n = dns.name.Name(name[i:])
+ if self.has_key(n):
+ return (n, self[n])
+ v = self[dns.name.empty]
+ return (dns.name.empty, v)
diff --git a/lib/dnspython/dns/node.py b/lib/dnspython/dns/node.py
new file mode 100644
index 0000000000..785a245464
--- /dev/null
+++ b/lib/dnspython/dns/node.py
@@ -0,0 +1,172 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS nodes. A node is a set of rdatasets."""
+
+import StringIO
+
+import dns.rdataset
+import dns.rdatatype
+import dns.renderer
+
+class Node(object):
+ """A DNS node.
+
+ A node is a set of rdatasets
+
+ @ivar rdatasets: the node's rdatasets
+ @type rdatasets: list of dns.rdataset.Rdataset objects"""
+
+ __slots__ = ['rdatasets']
+
+ def __init__(self):
+ """Initialize a DNS node.
+ """
+
+ self.rdatasets = [];
+
+ def to_text(self, name, **kw):
+ """Convert a node to text format.
+
+ Each rdataset at the node is printed. Any keyword arguments
+ to this method are passed on to the rdataset's to_text() method.
+ @param name: the owner name of the rdatasets
+ @type name: dns.name.Name object
+ @rtype: string
+ """
+
+ s = StringIO.StringIO()
+ for rds in self.rdatasets:
+ print >> s, rds.to_text(name, **kw)
+ return s.getvalue()[:-1]
+
+ def __repr__(self):
+ return '<DNS node ' + str(id(self)) + '>'
+
+ def __eq__(self, other):
+ """Two nodes are equal if they have the same rdatasets.
+
+ @rtype: bool
+ """
+ #
+ # This is inefficient. Good thing we don't need to do it much.
+ #
+ for rd in self.rdatasets:
+ if rd not in other.rdatasets:
+ return False
+ for rd in other.rdatasets:
+ if rd not in self.rdatasets:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __len__(self):
+ return len(self.rdatasets)
+
+ def __iter__(self):
+ return iter(self.rdatasets)
+
+ def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
+ create=False):
+ """Find an rdataset matching the specified properties in the
+ current node.
+
+ @param rdclass: The class of the rdataset
+ @type rdclass: int
+ @param rdtype: The type of the rdataset
+ @type rdtype: int
+ @param covers: The covered type. Usually this value is
+ dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
+ dns.rdatatype.RRSIG, then the covers value will be the rdata
+ type the SIG/RRSIG covers. The library treats the SIG and RRSIG
+ types as if they were a family of
+ types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
+ easier to work with than if RRSIGs covering different rdata
+ types were aggregated into a single RRSIG rdataset.
+ @type covers: int
+ @param create: If True, create the rdataset if it is not found.
+ @type create: bool
+ @raises KeyError: An rdataset of the desired type and class does
+ not exist and I{create} is not True.
+ @rtype: dns.rdataset.Rdataset object
+ """
+
+ for rds in self.rdatasets:
+ if rds.match(rdclass, rdtype, covers):
+ return rds
+ if not create:
+ raise KeyError
+ rds = dns.rdataset.Rdataset(rdclass, rdtype)
+ self.rdatasets.append(rds)
+ return rds
+
+ def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
+ create=False):
+ """Get an rdataset matching the specified properties in the
+ current node.
+
+ None is returned if an rdataset of the specified type and
+ class does not exist and I{create} is not True.
+
+ @param rdclass: The class of the rdataset
+ @type rdclass: int
+ @param rdtype: The type of the rdataset
+ @type rdtype: int
+ @param covers: The covered type.
+ @type covers: int
+ @param create: If True, create the rdataset if it is not found.
+ @type create: bool
+ @rtype: dns.rdataset.Rdataset object or None
+ """
+
+ try:
+ rds = self.find_rdataset(rdclass, rdtype, covers, create)
+ except KeyError:
+ rds = None
+ return rds
+
+ def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
+ """Delete the rdataset matching the specified properties in the
+ current node.
+
+ If a matching rdataset does not exist, it is not an error.
+
+ @param rdclass: The class of the rdataset
+ @type rdclass: int
+ @param rdtype: The type of the rdataset
+ @type rdtype: int
+ @param covers: The covered type.
+ @type covers: int
+ """
+
+ rds = self.get_rdataset(rdclass, rdtype, covers)
+ if not rds is None:
+ self.rdatasets.remove(rds)
+
+ def replace_rdataset(self, replacement):
+ """Replace an rdataset.
+
+ It is not an error if there is no rdataset matching I{replacement}.
+
+ Ownership of the I{replacement} object is transferred to the node;
+ in other words, this method does not store a copy of I{replacement}
+ at the node, it stores I{replacement} itself.
+ """
+
+ self.delete_rdataset(replacement.rdclass, replacement.rdtype,
+ replacement.covers)
+ self.rdatasets.append(replacement)
diff --git a/lib/dnspython/dns/opcode.py b/lib/dnspython/dns/opcode.py
new file mode 100644
index 0000000000..705bd09a43
--- /dev/null
+++ b/lib/dnspython/dns/opcode.py
@@ -0,0 +1,104 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Opcodes."""
+
+import dns.exception
+
+QUERY = 0
+IQUERY = 1
+STATUS = 2
+NOTIFY = 4
+UPDATE = 5
+
+_by_text = {
+ 'QUERY' : QUERY,
+ 'IQUERY' : IQUERY,
+ 'STATUS' : STATUS,
+ 'NOTIFY' : NOTIFY,
+ 'UPDATE' : UPDATE
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
+
+
+class UnknownOpcode(dns.exception.DNSException):
+ """Raised if an opcode is unknown."""
+ pass
+
+def from_text(text):
+ """Convert text into an opcode.
+
+ @param text: the textual opcode
+ @type text: string
+ @raises UnknownOpcode: the opcode is unknown
+ @rtype: int
+ """
+
+ if text.isdigit():
+ value = int(text)
+ if value >= 0 and value <= 15:
+ return value
+ value = _by_text.get(text.upper())
+ if value is None:
+ raise UnknownOpcode
+ return value
+
+def from_flags(flags):
+ """Extract an opcode from DNS message flags.
+
+ @param flags: int
+ @rtype: int
+ """
+
+ return (flags & 0x7800) >> 11
+
+def to_flags(value):
+ """Convert an opcode to a value suitable for ORing into DNS message
+ flags.
+ @rtype: int
+ """
+
+ return (value << 11) & 0x7800
+
+def to_text(value):
+ """Convert an opcode to text.
+
+ @param value: the opcdoe
+ @type value: int
+ @raises UnknownOpcode: the opcode is unknown
+ @rtype: string
+ """
+
+ text = _by_value.get(value)
+ if text is None:
+ text = str(value)
+ return text
+
+def is_update(flags):
+ """True if the opcode in flags is UPDATE.
+
+ @param flags: DNS flags
+ @type flags: int
+ @rtype: bool
+ """
+
+ if (from_flags(flags) == UPDATE):
+ return True
+ return False
diff --git a/lib/dnspython/dns/query.py b/lib/dnspython/dns/query.py
new file mode 100644
index 0000000000..9dc88a635c
--- /dev/null
+++ b/lib/dnspython/dns/query.py
@@ -0,0 +1,492 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Talk to a DNS server."""
+
+from __future__ import generators
+
+import errno
+import select
+import socket
+import struct
+import sys
+import time
+
+import dns.exception
+import dns.inet
+import dns.name
+import dns.message
+import dns.rdataclass
+import dns.rdatatype
+
+class UnexpectedSource(dns.exception.DNSException):
+ """Raised if a query response comes from an unexpected address or port."""
+ pass
+
+class BadResponse(dns.exception.FormError):
+ """Raised if a query response does not respond to the question asked."""
+ pass
+
+def _compute_expiration(timeout):
+ if timeout is None:
+ return None
+ else:
+ return time.time() + timeout
+
+def _poll_for(fd, readable, writable, error, timeout):
+ """
+ @param fd: File descriptor (int).
+ @param readable: Whether to wait for readability (bool).
+ @param writable: Whether to wait for writability (bool).
+ @param expiration: Deadline timeout (expiration time, in seconds (float)).
+
+ @return True on success, False on timeout
+ """
+ event_mask = 0
+ if readable:
+ event_mask |= select.POLLIN
+ if writable:
+ event_mask |= select.POLLOUT
+ if error:
+ event_mask |= select.POLLERR
+
+ pollable = select.poll()
+ pollable.register(fd, event_mask)
+
+ if timeout:
+ event_list = pollable.poll(long(timeout * 1000))
+ else:
+ event_list = pollable.poll()
+
+ return bool(event_list)
+
+def _select_for(fd, readable, writable, error, timeout):
+ """
+ @param fd: File descriptor (int).
+ @param readable: Whether to wait for readability (bool).
+ @param writable: Whether to wait for writability (bool).
+ @param expiration: Deadline timeout (expiration time, in seconds (float)).
+
+ @return True on success, False on timeout
+ """
+ rset, wset, xset = [], [], []
+
+ if readable:
+ rset = [fd]
+ if writable:
+ wset = [fd]
+ if error:
+ xset = [fd]
+
+ if timeout is None:
+ (rcount, wcount, xcount) = select.select(rset, wset, xset)
+ else:
+ (rcount, wcount, xcount) = select.select(rset, wset, xset, timeout)
+
+ return bool((rcount or wcount or xcount))
+
+def _wait_for(fd, readable, writable, error, expiration):
+ done = False
+ while not done:
+ if expiration is None:
+ timeout = None
+ else:
+ timeout = expiration - time.time()
+ if timeout <= 0.0:
+ raise dns.exception.Timeout
+ try:
+ if not _polling_backend(fd, readable, writable, error, timeout):
+ raise dns.exception.Timeout
+ except select.error, e:
+ if e.args[0] != errno.EINTR:
+ raise e
+ done = True
+
+def _set_polling_backend(fn):
+ """
+ Internal API. Do not use.
+ """
+ global _polling_backend
+
+ _polling_backend = fn
+
+if hasattr(select, 'poll'):
+ # Prefer poll() on platforms that support it because it has no
+ # limits on the maximum value of a file descriptor (plus it will
+ # be more efficient for high values).
+ _polling_backend = _poll_for
+else:
+ _polling_backend = _select_for
+
+def _wait_for_readable(s, expiration):
+ _wait_for(s, True, False, True, expiration)
+
+def _wait_for_writable(s, expiration):
+ _wait_for(s, False, True, True, expiration)
+
+def _addresses_equal(af, a1, a2):
+ # Convert the first value of the tuple, which is a textual format
+ # address into binary form, so that we are not confused by different
+ # textual representations of the same address
+ n1 = dns.inet.inet_pton(af, a1[0])
+ n2 = dns.inet.inet_pton(af, a2[0])
+ return n1 == n2 and a1[1:] == a2[1:]
+
+def udp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
+ ignore_unexpected=False, one_rr_per_rrset=False):
+ """Return the response obtained after sending a query via UDP.
+
+ @param q: the query
+ @type q: dns.message.Message
+ @param where: where to send the message
+ @type where: string containing an IPv4 or IPv6 address
+ @param timeout: The number of seconds to wait before the query times out.
+ If None, the default, wait forever.
+ @type timeout: float
+ @param port: The port to which to send the message. The default is 53.
+ @type port: int
+ @param af: the address family to use. The default is None, which
+ causes the address family to use to be inferred from the form of of where.
+ If the inference attempt fails, AF_INET is used.
+ @type af: int
+ @rtype: dns.message.Message object
+ @param source: source address. The default is the IPv4 wildcard address.
+ @type source: string
+ @param source_port: The port from which to send the message.
+ The default is 0.
+ @type source_port: int
+ @param ignore_unexpected: If True, ignore responses from unexpected
+ sources. The default is False.
+ @type ignore_unexpected: bool
+ @param one_rr_per_rrset: Put each RR into its own RRset
+ @type one_rr_per_rrset: bool
+ """
+
+ wire = q.to_wire()
+ if af is None:
+ try:
+ af = dns.inet.af_for_address(where)
+ except:
+ af = dns.inet.AF_INET
+ if af == dns.inet.AF_INET:
+ destination = (where, port)
+ if source is not None:
+ source = (source, source_port)
+ elif af == dns.inet.AF_INET6:
+ destination = (where, port, 0, 0)
+ if source is not None:
+ source = (source, source_port, 0, 0)
+ s = socket.socket(af, socket.SOCK_DGRAM, 0)
+ try:
+ expiration = _compute_expiration(timeout)
+ s.setblocking(0)
+ if source is not None:
+ s.bind(source)
+ _wait_for_writable(s, expiration)
+ s.sendto(wire, destination)
+ while 1:
+ _wait_for_readable(s, expiration)
+ (wire, from_address) = s.recvfrom(65535)
+ if _addresses_equal(af, from_address, destination) or \
+ (dns.inet.is_multicast(where) and \
+ from_address[1:] == destination[1:]):
+ break
+ if not ignore_unexpected:
+ raise UnexpectedSource('got a response from '
+ '%s instead of %s' % (from_address,
+ destination))
+ finally:
+ s.close()
+ r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
+ one_rr_per_rrset=one_rr_per_rrset)
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+def _net_read(sock, count, expiration):
+ """Read the specified number of bytes from sock. Keep trying until we
+ either get the desired amount, or we hit EOF.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ s = ''
+ while count > 0:
+ _wait_for_readable(sock, expiration)
+ n = sock.recv(count)
+ if n == '':
+ raise EOFError
+ count = count - len(n)
+ s = s + n
+ return s
+
+def _net_write(sock, data, expiration):
+ """Write the specified data to the socket.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ current = 0
+ l = len(data)
+ while current < l:
+ _wait_for_writable(sock, expiration)
+ current += sock.send(data[current:])
+
+def _connect(s, address):
+ try:
+ s.connect(address)
+ except socket.error:
+ (ty, v) = sys.exc_info()[:2]
+ if v[0] != errno.EINPROGRESS and \
+ v[0] != errno.EWOULDBLOCK and \
+ v[0] != errno.EALREADY:
+ raise v
+
+def tcp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
+ one_rr_per_rrset=False):
+ """Return the response obtained after sending a query via TCP.
+
+ @param q: the query
+ @type q: dns.message.Message object
+ @param where: where to send the message
+ @type where: string containing an IPv4 or IPv6 address
+ @param timeout: The number of seconds to wait before the query times out.
+ If None, the default, wait forever.
+ @type timeout: float
+ @param port: The port to which to send the message. The default is 53.
+ @type port: int
+ @param af: the address family to use. The default is None, which
+ causes the address family to use to be inferred from the form of of where.
+ If the inference attempt fails, AF_INET is used.
+ @type af: int
+ @rtype: dns.message.Message object
+ @param source: source address. The default is the IPv4 wildcard address.
+ @type source: string
+ @param source_port: The port from which to send the message.
+ The default is 0.
+ @type source_port: int
+ @param one_rr_per_rrset: Put each RR into its own RRset
+ @type one_rr_per_rrset: bool
+ """
+
+ wire = q.to_wire()
+ if af is None:
+ try:
+ af = dns.inet.af_for_address(where)
+ except:
+ af = dns.inet.AF_INET
+ if af == dns.inet.AF_INET:
+ destination = (where, port)
+ if source is not None:
+ source = (source, source_port)
+ elif af == dns.inet.AF_INET6:
+ destination = (where, port, 0, 0)
+ if source is not None:
+ source = (source, source_port, 0, 0)
+ s = socket.socket(af, socket.SOCK_STREAM, 0)
+ try:
+ expiration = _compute_expiration(timeout)
+ s.setblocking(0)
+ if source is not None:
+ s.bind(source)
+ _connect(s, destination)
+
+ l = len(wire)
+
+ # copying the wire into tcpmsg is inefficient, but lets us
+ # avoid writev() or doing a short write that would get pushed
+ # onto the net
+ tcpmsg = struct.pack("!H", l) + wire
+ _net_write(s, tcpmsg, expiration)
+ ldata = _net_read(s, 2, expiration)
+ (l,) = struct.unpack("!H", ldata)
+ wire = _net_read(s, l, expiration)
+ finally:
+ s.close()
+ r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
+ one_rr_per_rrset=one_rr_per_rrset)
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+def xfr(where, zone, rdtype=dns.rdatatype.AXFR, rdclass=dns.rdataclass.IN,
+ timeout=None, port=53, keyring=None, keyname=None, relativize=True,
+ af=None, lifetime=None, source=None, source_port=0, serial=0,
+ use_udp=False, keyalgorithm=dns.tsig.default_algorithm):
+ """Return a generator for the responses to a zone transfer.
+
+ @param where: where to send the message
+ @type where: string containing an IPv4 or IPv6 address
+ @param zone: The name of the zone to transfer
+ @type zone: dns.name.Name object or string
+ @param rdtype: The type of zone transfer. The default is
+ dns.rdatatype.AXFR.
+ @type rdtype: int or string
+ @param rdclass: The class of the zone transfer. The default is
+ dns.rdatatype.IN.
+ @type rdclass: int or string
+ @param timeout: The number of seconds to wait for each response message.
+ If None, the default, wait forever.
+ @type timeout: float
+ @param port: The port to which to send the message. The default is 53.
+ @type port: int
+ @param keyring: The TSIG keyring to use
+ @type keyring: dict
+ @param keyname: The name of the TSIG key to use
+ @type keyname: dns.name.Name object or string
+ @param relativize: If True, all names in the zone will be relativized to
+ the zone origin. It is essential that the relativize setting matches
+ the one specified to dns.zone.from_xfr().
+ @type relativize: bool
+ @param af: the address family to use. The default is None, which
+ causes the address family to use to be inferred from the form of of where.
+ If the inference attempt fails, AF_INET is used.
+ @type af: int
+ @param lifetime: The total number of seconds to spend doing the transfer.
+ If None, the default, then there is no limit on the time the transfer may
+ take.
+ @type lifetime: float
+ @rtype: generator of dns.message.Message objects.
+ @param source: source address. The default is the IPv4 wildcard address.
+ @type source: string
+ @param source_port: The port from which to send the message.
+ The default is 0.
+ @type source_port: int
+ @param serial: The SOA serial number to use as the base for an IXFR diff
+ sequence (only meaningful if rdtype == dns.rdatatype.IXFR).
+ @type serial: int
+ @param use_udp: Use UDP (only meaningful for IXFR)
+ @type use_udp: bool
+ @param keyalgorithm: The TSIG algorithm to use; defaults to
+ dns.tsig.default_algorithm
+ @type keyalgorithm: string
+ """
+
+ if isinstance(zone, (str, unicode)):
+ zone = dns.name.from_text(zone)
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ q = dns.message.make_query(zone, rdtype, rdclass)
+ if rdtype == dns.rdatatype.IXFR:
+ rrset = dns.rrset.from_text(zone, 0, 'IN', 'SOA',
+ '. . %u 0 0 0 0' % serial)
+ q.authority.append(rrset)
+ if not keyring is None:
+ q.use_tsig(keyring, keyname, algorithm=keyalgorithm)
+ wire = q.to_wire()
+ if af is None:
+ try:
+ af = dns.inet.af_for_address(where)
+ except:
+ af = dns.inet.AF_INET
+ if af == dns.inet.AF_INET:
+ destination = (where, port)
+ if source is not None:
+ source = (source, source_port)
+ elif af == dns.inet.AF_INET6:
+ destination = (where, port, 0, 0)
+ if source is not None:
+ source = (source, source_port, 0, 0)
+ if use_udp:
+ if rdtype != dns.rdatatype.IXFR:
+ raise ValueError('cannot do a UDP AXFR')
+ s = socket.socket(af, socket.SOCK_DGRAM, 0)
+ else:
+ s = socket.socket(af, socket.SOCK_STREAM, 0)
+ s.setblocking(0)
+ if source is not None:
+ s.bind(source)
+ expiration = _compute_expiration(lifetime)
+ _connect(s, destination)
+ l = len(wire)
+ if use_udp:
+ _wait_for_writable(s, expiration)
+ s.send(wire)
+ else:
+ tcpmsg = struct.pack("!H", l) + wire
+ _net_write(s, tcpmsg, expiration)
+ done = False
+ soa_rrset = None
+ soa_count = 0
+ if relativize:
+ origin = zone
+ oname = dns.name.empty
+ else:
+ origin = None
+ oname = zone
+ tsig_ctx = None
+ first = True
+ while not done:
+ mexpiration = _compute_expiration(timeout)
+ if mexpiration is None or mexpiration > expiration:
+ mexpiration = expiration
+ if use_udp:
+ _wait_for_readable(s, expiration)
+ (wire, from_address) = s.recvfrom(65535)
+ else:
+ ldata = _net_read(s, 2, mexpiration)
+ (l,) = struct.unpack("!H", ldata)
+ wire = _net_read(s, l, mexpiration)
+ r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
+ xfr=True, origin=origin, tsig_ctx=tsig_ctx,
+ multi=True, first=first,
+ one_rr_per_rrset=(rdtype==dns.rdatatype.IXFR))
+ tsig_ctx = r.tsig_ctx
+ first = False
+ answer_index = 0
+ delete_mode = False
+ expecting_SOA = False
+ if soa_rrset is None:
+ if not r.answer or r.answer[0].name != oname:
+ raise dns.exception.FormError
+ rrset = r.answer[0]
+ if rrset.rdtype != dns.rdatatype.SOA:
+ raise dns.exception.FormError("first RRset is not an SOA")
+ answer_index = 1
+ soa_rrset = rrset.copy()
+ if rdtype == dns.rdatatype.IXFR:
+ if soa_rrset[0].serial == serial:
+ #
+ # We're already up-to-date.
+ #
+ done = True
+ else:
+ expecting_SOA = True
+ #
+ # Process SOAs in the answer section (other than the initial
+ # SOA in the first message).
+ #
+ for rrset in r.answer[answer_index:]:
+ if done:
+ raise dns.exception.FormError("answers after final SOA")
+ if rrset.rdtype == dns.rdatatype.SOA and rrset.name == oname:
+ if expecting_SOA:
+ if rrset[0].serial != serial:
+ raise dns.exception.FormError("IXFR base serial mismatch")
+ expecting_SOA = False
+ elif rdtype == dns.rdatatype.IXFR:
+ delete_mode = not delete_mode
+ if rrset == soa_rrset and not delete_mode:
+ done = True
+ elif expecting_SOA:
+ #
+ # We made an IXFR request and are expecting another
+ # SOA RR, but saw something else, so this must be an
+ # AXFR response.
+ #
+ rdtype = dns.rdatatype.AXFR
+ expecting_SOA = False
+ if done and q.keyring and not r.had_tsig:
+ raise dns.exception.FormError("missing TSIG")
+ yield r
+ s.close()
diff --git a/lib/dnspython/dns/rcode.py b/lib/dnspython/dns/rcode.py
new file mode 100644
index 0000000000..c055f2e7cd
--- /dev/null
+++ b/lib/dnspython/dns/rcode.py
@@ -0,0 +1,119 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Result Codes."""
+
+import dns.exception
+
+NOERROR = 0
+FORMERR = 1
+SERVFAIL = 2
+NXDOMAIN = 3
+NOTIMP = 4
+REFUSED = 5
+YXDOMAIN = 6
+YXRRSET = 7
+NXRRSET = 8
+NOTAUTH = 9
+NOTZONE = 10
+BADVERS = 16
+
+_by_text = {
+ 'NOERROR' : NOERROR,
+ 'FORMERR' : FORMERR,
+ 'SERVFAIL' : SERVFAIL,
+ 'NXDOMAIN' : NXDOMAIN,
+ 'NOTIMP' : NOTIMP,
+ 'REFUSED' : REFUSED,
+ 'YXDOMAIN' : YXDOMAIN,
+ 'YXRRSET' : YXRRSET,
+ 'NXRRSET' : NXRRSET,
+ 'NOTAUTH' : NOTAUTH,
+ 'NOTZONE' : NOTZONE,
+ 'BADVERS' : BADVERS
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be a true inverse.
+
+_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
+
+
+class UnknownRcode(dns.exception.DNSException):
+ """Raised if an rcode is unknown."""
+ pass
+
+def from_text(text):
+ """Convert text into an rcode.
+
+ @param text: the texual rcode
+ @type text: string
+ @raises UnknownRcode: the rcode is unknown
+ @rtype: int
+ """
+
+ if text.isdigit():
+ v = int(text)
+ if v >= 0 and v <= 4095:
+ return v
+ v = _by_text.get(text.upper())
+ if v is None:
+ raise UnknownRcode
+ return v
+
+def from_flags(flags, ednsflags):
+ """Return the rcode value encoded by flags and ednsflags.
+
+ @param flags: the DNS flags
+ @type flags: int
+ @param ednsflags: the EDNS flags
+ @type ednsflags: int
+ @raises ValueError: rcode is < 0 or > 4095
+ @rtype: int
+ """
+
+ value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0)
+ if value < 0 or value > 4095:
+ raise ValueError('rcode must be >= 0 and <= 4095')
+ return value
+
+def to_flags(value):
+ """Return a (flags, ednsflags) tuple which encodes the rcode.
+
+ @param value: the rcode
+ @type value: int
+ @raises ValueError: rcode is < 0 or > 4095
+ @rtype: (int, int) tuple
+ """
+
+ if value < 0 or value > 4095:
+ raise ValueError('rcode must be >= 0 and <= 4095')
+ v = value & 0xf
+ ev = long(value & 0xff0) << 20
+ return (v, ev)
+
+def to_text(value):
+ """Convert rcode into text.
+
+ @param value: the rcode
+ @type value: int
+ @rtype: string
+ """
+
+ text = _by_value.get(value)
+ if text is None:
+ text = str(value)
+ return text
diff --git a/lib/dnspython/dns/rdata.py b/lib/dnspython/dns/rdata.py
new file mode 100644
index 0000000000..399677e984
--- /dev/null
+++ b/lib/dnspython/dns/rdata.py
@@ -0,0 +1,460 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdata.
+
+@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
+the module which implements that type.
+@type _rdata_modules: dict
+@var _module_prefix: The prefix to use when forming modules names. The
+default is 'dns.rdtypes'. Changing this value will break the library.
+@type _module_prefix: string
+@var _hex_chunk: At most this many octets that will be represented in each
+chunk of hexstring that _hexify() produces before whitespace occurs.
+@type _hex_chunk: int"""
+
+import cStringIO
+
+import dns.exception
+import dns.name
+import dns.rdataclass
+import dns.rdatatype
+import dns.tokenizer
+
+_hex_chunksize = 32
+
+def _hexify(data, chunksize=None):
+ """Convert a binary string into its hex encoding, broken up into chunks
+ of I{chunksize} characters separated by a space.
+
+ @param data: the binary string
+ @type data: string
+ @param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
+ @rtype: string
+ """
+
+ if chunksize is None:
+ chunksize = _hex_chunksize
+ hex = data.encode('hex_codec')
+ l = len(hex)
+ if l > chunksize:
+ chunks = []
+ i = 0
+ while i < l:
+ chunks.append(hex[i : i + chunksize])
+ i += chunksize
+ hex = ' '.join(chunks)
+ return hex
+
+_base64_chunksize = 32
+
+def _base64ify(data, chunksize=None):
+ """Convert a binary string into its base64 encoding, broken up into chunks
+ of I{chunksize} characters separated by a space.
+
+ @param data: the binary string
+ @type data: string
+ @param chunksize: the chunk size. Default is
+ L{dns.rdata._base64_chunksize}
+ @rtype: string
+ """
+
+ if chunksize is None:
+ chunksize = _base64_chunksize
+ b64 = data.encode('base64_codec')
+ b64 = b64.replace('\n', '')
+ l = len(b64)
+ if l > chunksize:
+ chunks = []
+ i = 0
+ while i < l:
+ chunks.append(b64[i : i + chunksize])
+ i += chunksize
+ b64 = ' '.join(chunks)
+ return b64
+
+__escaped = {
+ '"' : True,
+ '\\' : True,
+ }
+
+def _escapify(qstring):
+ """Escape the characters in a quoted string which need it.
+
+ @param qstring: the string
+ @type qstring: string
+ @returns: the escaped string
+ @rtype: string
+ """
+
+ text = ''
+ for c in qstring:
+ if c in __escaped:
+ text += '\\' + c
+ elif ord(c) >= 0x20 and ord(c) < 0x7F:
+ text += c
+ else:
+ text += '\\%03d' % ord(c)
+ return text
+
+def _truncate_bitmap(what):
+ """Determine the index of greatest byte that isn't all zeros, and
+ return the bitmap that contains all the bytes less than that index.
+
+ @param what: a string of octets representing a bitmap.
+ @type what: string
+ @rtype: string
+ """
+
+ for i in xrange(len(what) - 1, -1, -1):
+ if what[i] != '\x00':
+ break
+ return ''.join(what[0 : i + 1])
+
+class Rdata(object):
+ """Base class for all DNS rdata types.
+ """
+
+ __slots__ = ['rdclass', 'rdtype']
+
+ def __init__(self, rdclass, rdtype):
+ """Initialize an rdata.
+ @param rdclass: The rdata class
+ @type rdclass: int
+ @param rdtype: The rdata type
+ @type rdtype: int
+ """
+
+ self.rdclass = rdclass
+ self.rdtype = rdtype
+
+ def covers(self):
+ """DNS SIG/RRSIG rdatas apply to a specific type; this type is
+ returned by the covers() function. If the rdata type is not
+ SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
+ creating rdatasets, allowing the rdataset to contain only RRSIGs
+ of a particular type, e.g. RRSIG(NS).
+ @rtype: int
+ """
+
+ return dns.rdatatype.NONE
+
+ def extended_rdatatype(self):
+ """Return a 32-bit type value, the least significant 16 bits of
+ which are the ordinary DNS type, and the upper 16 bits of which are
+ the "covered" type, if any.
+ @rtype: int
+ """
+
+ return self.covers() << 16 | self.rdtype
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ """Convert an rdata to text format.
+ @rtype: string
+ """
+ raise NotImplementedError
+
+ def to_wire(self, file, compress = None, origin = None):
+ """Convert an rdata to wire format.
+ @rtype: string
+ """
+
+ raise NotImplementedError
+
+ def to_digestable(self, origin = None):
+ """Convert rdata to a format suitable for digesting in hashes. This
+ is also the DNSSEC canonical form."""
+ f = cStringIO.StringIO()
+ self.to_wire(f, None, origin)
+ return f.getvalue()
+
+ def validate(self):
+ """Check that the current contents of the rdata's fields are
+ valid. If you change an rdata by assigning to its fields,
+ it is a good idea to call validate() when you are done making
+ changes.
+ """
+ dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
+
+ def __repr__(self):
+ covers = self.covers()
+ if covers == dns.rdatatype.NONE:
+ ctext = ''
+ else:
+ ctext = '(' + dns.rdatatype.to_text(covers) + ')'
+ return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
+ dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
+ str(self) + '>'
+
+ def __str__(self):
+ return self.to_text()
+
+ def _cmp(self, other):
+ """Compare an rdata with another rdata of the same rdtype and
+ rdclass. Return < 0 if self < other in the DNSSEC ordering,
+ 0 if self == other, and > 0 if self > other.
+ """
+
+ raise NotImplementedError
+
+ def __eq__(self, other):
+ if not isinstance(other, Rdata):
+ return False
+ if self.rdclass != other.rdclass or \
+ self.rdtype != other.rdtype:
+ return False
+ return self._cmp(other) == 0
+
+ def __ne__(self, other):
+ if not isinstance(other, Rdata):
+ return True
+ if self.rdclass != other.rdclass or \
+ self.rdtype != other.rdtype:
+ return True
+ return self._cmp(other) != 0
+
+ def __lt__(self, other):
+ if not isinstance(other, Rdata) or \
+ self.rdclass != other.rdclass or \
+ self.rdtype != other.rdtype:
+ return NotImplemented
+ return self._cmp(other) < 0
+
+ def __le__(self, other):
+ if not isinstance(other, Rdata) or \
+ self.rdclass != other.rdclass or \
+ self.rdtype != other.rdtype:
+ return NotImplemented
+ return self._cmp(other) <= 0
+
+ def __ge__(self, other):
+ if not isinstance(other, Rdata) or \
+ self.rdclass != other.rdclass or \
+ self.rdtype != other.rdtype:
+ return NotImplemented
+ return self._cmp(other) >= 0
+
+ def __gt__(self, other):
+ if not isinstance(other, Rdata) or \
+ self.rdclass != other.rdclass or \
+ self.rdtype != other.rdtype:
+ return NotImplemented
+ return self._cmp(other) > 0
+
+ def __hash__(self):
+ return hash(self.to_digestable(dns.name.root))
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ """Build an rdata object from text format.
+
+ @param rdclass: The rdata class
+ @type rdclass: int
+ @param rdtype: The rdata type
+ @type rdtype: int
+ @param tok: The tokenizer
+ @type tok: dns.tokenizer.Tokenizer
+ @param origin: The origin to use for relative names
+ @type origin: dns.name.Name
+ @param relativize: should names be relativized?
+ @type relativize: bool
+ @rtype: dns.rdata.Rdata instance
+ """
+
+ raise NotImplementedError
+
+ from_text = classmethod(from_text)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ """Build an rdata object from wire format
+
+ @param rdclass: The rdata class
+ @type rdclass: int
+ @param rdtype: The rdata type
+ @type rdtype: int
+ @param wire: The wire-format message
+ @type wire: string
+ @param current: The offet in wire of the beginning of the rdata.
+ @type current: int
+ @param rdlen: The length of the wire-format rdata
+ @type rdlen: int
+ @param origin: The origin to use for relative names
+ @type origin: dns.name.Name
+ @rtype: dns.rdata.Rdata instance
+ """
+
+ raise NotImplementedError
+
+ from_wire = classmethod(from_wire)
+
+ def choose_relativity(self, origin = None, relativize = True):
+ """Convert any domain names in the rdata to the specified
+ relativization.
+ """
+
+ pass
+
+
+class GenericRdata(Rdata):
+ """Generate Rdata Class
+
+ This class is used for rdata types for which we have no better
+ implementation. It implements the DNS "unknown RRs" scheme.
+ """
+
+ __slots__ = ['data']
+
+ def __init__(self, rdclass, rdtype, data):
+ super(GenericRdata, self).__init__(rdclass, rdtype)
+ self.data = data
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return r'\# %d ' % len(self.data) + _hexify(self.data)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ token = tok.get()
+ if not token.is_identifier() or token.value != '\#':
+ raise dns.exception.SyntaxError(r'generic rdata does not start with \#')
+ length = tok.get_int()
+ chunks = []
+ while 1:
+ token = tok.get()
+ if token.is_eol_or_eof():
+ break
+ chunks.append(token.value)
+ hex = ''.join(chunks)
+ data = hex.decode('hex_codec')
+ if len(data) != length:
+ raise dns.exception.SyntaxError('generic rdata hex data has wrong length')
+ return cls(rdclass, rdtype, data)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ file.write(self.data)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ return cls(rdclass, rdtype, wire[current : current + rdlen])
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ return cmp(self.data, other.data)
+
+_rdata_modules = {}
+_module_prefix = 'dns.rdtypes'
+
+def get_rdata_class(rdclass, rdtype):
+
+ def import_module(name):
+ mod = __import__(name)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+ mod = _rdata_modules.get((rdclass, rdtype))
+ rdclass_text = dns.rdataclass.to_text(rdclass)
+ rdtype_text = dns.rdatatype.to_text(rdtype)
+ rdtype_text = rdtype_text.replace('-', '_')
+ if not mod:
+ mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
+ if not mod:
+ try:
+ mod = import_module('.'.join([_module_prefix,
+ rdclass_text, rdtype_text]))
+ _rdata_modules[(rdclass, rdtype)] = mod
+ except ImportError:
+ try:
+ mod = import_module('.'.join([_module_prefix,
+ 'ANY', rdtype_text]))
+ _rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
+ except ImportError:
+ mod = None
+ if mod:
+ cls = getattr(mod, rdtype_text)
+ else:
+ cls = GenericRdata
+ return cls
+
+def from_text(rdclass, rdtype, tok, origin = None, relativize = True):
+ """Build an rdata object from text format.
+
+ This function attempts to dynamically load a class which
+ implements the specified rdata class and type. If there is no
+ class-and-type-specific implementation, the GenericRdata class
+ is used.
+
+ Once a class is chosen, its from_text() class method is called
+ with the parameters to this function.
+
+ @param rdclass: The rdata class
+ @type rdclass: int
+ @param rdtype: The rdata type
+ @type rdtype: int
+ @param tok: The tokenizer
+ @type tok: dns.tokenizer.Tokenizer
+ @param origin: The origin to use for relative names
+ @type origin: dns.name.Name
+ @param relativize: Should names be relativized?
+ @type relativize: bool
+ @rtype: dns.rdata.Rdata instance"""
+
+ if isinstance(tok, str):
+ tok = dns.tokenizer.Tokenizer(tok)
+ cls = get_rdata_class(rdclass, rdtype)
+ if cls != GenericRdata:
+ # peek at first token
+ token = tok.get()
+ tok.unget(token)
+ if token.is_identifier() and \
+ token.value == r'\#':
+ #
+ # Known type using the generic syntax. Extract the
+ # wire form from the generic syntax, and then run
+ # from_wire on it.
+ #
+ rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
+ relativize)
+ return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
+ origin)
+ return cls.from_text(rdclass, rdtype, tok, origin, relativize)
+
+def from_wire(rdclass, rdtype, wire, current, rdlen, origin = None):
+ """Build an rdata object from wire format
+
+ This function attempts to dynamically load a class which
+ implements the specified rdata class and type. If there is no
+ class-and-type-specific implementation, the GenericRdata class
+ is used.
+
+ Once a class is chosen, its from_wire() class method is called
+ with the parameters to this function.
+
+ @param rdclass: The rdata class
+ @type rdclass: int
+ @param rdtype: The rdata type
+ @type rdtype: int
+ @param wire: The wire-format message
+ @type wire: string
+ @param current: The offet in wire of the beginning of the rdata.
+ @type current: int
+ @param rdlen: The length of the wire-format rdata
+ @type rdlen: int
+ @param origin: The origin to use for relative names
+ @type origin: dns.name.Name
+ @rtype: dns.rdata.Rdata instance"""
+
+ cls = get_rdata_class(rdclass, rdtype)
+ return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
diff --git a/lib/dnspython/dns/rdataclass.py b/lib/dnspython/dns/rdataclass.py
new file mode 100644
index 0000000000..887fd1ad6b
--- /dev/null
+++ b/lib/dnspython/dns/rdataclass.py
@@ -0,0 +1,114 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Rdata Classes.
+
+@var _by_text: The rdata class textual name to value mapping
+@type _by_text: dict
+@var _by_value: The rdata class value to textual name mapping
+@type _by_value: dict
+@var _metaclasses: If an rdataclass is a metaclass, there will be a mapping
+whose key is the rdatatype value and whose value is True in this dictionary.
+@type _metaclasses: dict"""
+
+import re
+
+import dns.exception
+
+RESERVED0 = 0
+IN = 1
+CH = 3
+HS = 4
+NONE = 254
+ANY = 255
+
+_by_text = {
+ 'RESERVED0' : RESERVED0,
+ 'IN' : IN,
+ 'CH' : CH,
+ 'HS' : HS,
+ 'NONE' : NONE,
+ 'ANY' : ANY
+ }
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
+
+# Now that we've built the inverse map, we can add class aliases to
+# the _by_text mapping.
+
+_by_text.update({
+ 'INTERNET' : IN,
+ 'CHAOS' : CH,
+ 'HESIOD' : HS
+ })
+
+_metaclasses = {
+ NONE : True,
+ ANY : True
+ }
+
+_unknown_class_pattern = re.compile('CLASS([0-9]+)$', re.I);
+
+class UnknownRdataclass(dns.exception.DNSException):
+ """Raised when a class is unknown."""
+ pass
+
+def from_text(text):
+ """Convert text into a DNS rdata class value.
+ @param text: the text
+ @type text: string
+ @rtype: int
+ @raises dns.rdataclass.UnknownRdataClass: the class is unknown
+ @raises ValueError: the rdata class value is not >= 0 and <= 65535
+ """
+
+ value = _by_text.get(text.upper())
+ if value is None:
+ match = _unknown_class_pattern.match(text)
+ if match == None:
+ raise UnknownRdataclass
+ value = int(match.group(1))
+ if value < 0 or value > 65535:
+ raise ValueError("class must be between >= 0 and <= 65535")
+ return value
+
+def to_text(value):
+ """Convert a DNS rdata class to text.
+ @param value: the rdata class value
+ @type value: int
+ @rtype: string
+ @raises ValueError: the rdata class value is not >= 0 and <= 65535
+ """
+
+ if value < 0 or value > 65535:
+ raise ValueError("class must be between >= 0 and <= 65535")
+ text = _by_value.get(value)
+ if text is None:
+ text = 'CLASS' + `value`
+ return text
+
+def is_metaclass(rdclass):
+ """True if the class is a metaclass.
+ @param rdclass: the rdata class
+ @type rdclass: int
+ @rtype: bool"""
+
+ if _metaclasses.has_key(rdclass):
+ return True
+ return False
diff --git a/lib/dnspython/dns/rdataset.py b/lib/dnspython/dns/rdataset.py
new file mode 100644
index 0000000000..f556d2288b
--- /dev/null
+++ b/lib/dnspython/dns/rdataset.py
@@ -0,0 +1,329 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
+
+import random
+import StringIO
+import struct
+
+import dns.exception
+import dns.rdatatype
+import dns.rdataclass
+import dns.rdata
+import dns.set
+
+# define SimpleSet here for backwards compatibility
+SimpleSet = dns.set.Set
+
+class DifferingCovers(dns.exception.DNSException):
+ """Raised if an attempt is made to add a SIG/RRSIG whose covered type
+ is not the same as that of the other rdatas in the rdataset."""
+ pass
+
+class IncompatibleTypes(dns.exception.DNSException):
+ """Raised if an attempt is made to add rdata of an incompatible type."""
+ pass
+
+class Rdataset(dns.set.Set):
+ """A DNS rdataset.
+
+ @ivar rdclass: The class of the rdataset
+ @type rdclass: int
+ @ivar rdtype: The type of the rdataset
+ @type rdtype: int
+ @ivar covers: The covered type. Usually this value is
+ dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
+ dns.rdatatype.RRSIG, then the covers value will be the rdata
+ type the SIG/RRSIG covers. The library treats the SIG and RRSIG
+ types as if they were a family of
+ types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
+ easier to work with than if RRSIGs covering different rdata
+ types were aggregated into a single RRSIG rdataset.
+ @type covers: int
+ @ivar ttl: The DNS TTL (Time To Live) value
+ @type ttl: int
+ """
+
+ __slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
+
+ def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
+ """Create a new rdataset of the specified class and type.
+
+ @see: the description of the class instance variables for the
+ meaning of I{rdclass} and I{rdtype}"""
+
+ super(Rdataset, self).__init__()
+ self.rdclass = rdclass
+ self.rdtype = rdtype
+ self.covers = covers
+ self.ttl = 0
+
+ def _clone(self):
+ obj = super(Rdataset, self)._clone()
+ obj.rdclass = self.rdclass
+ obj.rdtype = self.rdtype
+ obj.covers = self.covers
+ obj.ttl = self.ttl
+ return obj
+
+ def update_ttl(self, ttl):
+ """Set the TTL of the rdataset to be the lesser of the set's current
+ TTL or the specified TTL. If the set contains no rdatas, set the TTL
+ to the specified TTL.
+ @param ttl: The TTL
+ @type ttl: int"""
+
+ if len(self) == 0:
+ self.ttl = ttl
+ elif ttl < self.ttl:
+ self.ttl = ttl
+
+ def add(self, rd, ttl=None):
+ """Add the specified rdata to the rdataset.
+
+ If the optional I{ttl} parameter is supplied, then
+ self.update_ttl(ttl) will be called prior to adding the rdata.
+
+ @param rd: The rdata
+ @type rd: dns.rdata.Rdata object
+ @param ttl: The TTL
+ @type ttl: int"""
+
+ #
+ # If we're adding a signature, do some special handling to
+ # check that the signature covers the same type as the
+ # other rdatas in this rdataset. If this is the first rdata
+ # in the set, initialize the covers field.
+ #
+ if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
+ raise IncompatibleTypes
+ if not ttl is None:
+ self.update_ttl(ttl)
+ if self.rdtype == dns.rdatatype.RRSIG or \
+ self.rdtype == dns.rdatatype.SIG:
+ covers = rd.covers()
+ if len(self) == 0 and self.covers == dns.rdatatype.NONE:
+ self.covers = covers
+ elif self.covers != covers:
+ raise DifferingCovers
+ if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
+ self.clear()
+ super(Rdataset, self).add(rd)
+
+ def union_update(self, other):
+ self.update_ttl(other.ttl)
+ super(Rdataset, self).union_update(other)
+
+ def intersection_update(self, other):
+ self.update_ttl(other.ttl)
+ super(Rdataset, self).intersection_update(other)
+
+ def update(self, other):
+ """Add all rdatas in other to self.
+
+ @param other: The rdataset from which to update
+ @type other: dns.rdataset.Rdataset object"""
+
+ self.update_ttl(other.ttl)
+ super(Rdataset, self).update(other)
+
+ def __repr__(self):
+ if self.covers == 0:
+ ctext = ''
+ else:
+ ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
+ return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
+ dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>'
+
+ def __str__(self):
+ return self.to_text()
+
+ def __eq__(self, other):
+ """Two rdatasets are equal if they have the same class, type, and
+ covers, and contain the same rdata.
+ @rtype: bool"""
+
+ if not isinstance(other, Rdataset):
+ return False
+ if self.rdclass != other.rdclass or \
+ self.rdtype != other.rdtype or \
+ self.covers != other.covers:
+ return False
+ return super(Rdataset, self).__eq__(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_text(self, name=None, origin=None, relativize=True,
+ override_rdclass=None, **kw):
+ """Convert the rdataset into DNS master file format.
+
+ @see: L{dns.name.Name.choose_relativity} for more information
+ on how I{origin} and I{relativize} determine the way names
+ are emitted.
+
+ Any additional keyword arguments are passed on to the rdata
+ to_text() method.
+
+ @param name: If name is not None, emit a RRs with I{name} as
+ the owner name.
+ @type name: dns.name.Name object
+ @param origin: The origin for relative names, or None.
+ @type origin: dns.name.Name object
+ @param relativize: True if names should names be relativized
+ @type relativize: bool"""
+ if not name is None:
+ name = name.choose_relativity(origin, relativize)
+ ntext = str(name)
+ pad = ' '
+ else:
+ ntext = ''
+ pad = ''
+ s = StringIO.StringIO()
+ if not override_rdclass is None:
+ rdclass = override_rdclass
+ else:
+ rdclass = self.rdclass
+ if len(self) == 0:
+ #
+ # Empty rdatasets are used for the question section, and in
+ # some dynamic updates, so we don't need to print out the TTL
+ # (which is meaningless anyway).
+ #
+ print >> s, '%s%s%s %s' % (ntext, pad,
+ dns.rdataclass.to_text(rdclass),
+ dns.rdatatype.to_text(self.rdtype))
+ else:
+ for rd in self:
+ print >> s, '%s%s%d %s %s %s' % \
+ (ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
+ dns.rdatatype.to_text(self.rdtype),
+ rd.to_text(origin=origin, relativize=relativize, **kw))
+ #
+ # We strip off the final \n for the caller's convenience in printing
+ #
+ return s.getvalue()[:-1]
+
+ def to_wire(self, name, file, compress=None, origin=None,
+ override_rdclass=None, want_shuffle=True):
+ """Convert the rdataset to wire format.
+
+ @param name: The owner name of the RRset that will be emitted
+ @type name: dns.name.Name object
+ @param file: The file to which the wire format data will be appended
+ @type file: file
+ @param compress: The compression table to use; the default is None.
+ @type compress: dict
+ @param origin: The origin to be appended to any relative names when
+ they are emitted. The default is None.
+ @returns: the number of records emitted
+ @rtype: int
+ """
+
+ if not override_rdclass is None:
+ rdclass = override_rdclass
+ want_shuffle = False
+ else:
+ rdclass = self.rdclass
+ file.seek(0, 2)
+ if len(self) == 0:
+ name.to_wire(file, compress, origin)
+ stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
+ file.write(stuff)
+ return 1
+ else:
+ if want_shuffle:
+ l = list(self)
+ random.shuffle(l)
+ else:
+ l = self
+ for rd in l:
+ name.to_wire(file, compress, origin)
+ stuff = struct.pack("!HHIH", self.rdtype, rdclass,
+ self.ttl, 0)
+ file.write(stuff)
+ start = file.tell()
+ rd.to_wire(file, compress, origin)
+ end = file.tell()
+ assert end - start < 65536
+ file.seek(start - 2)
+ stuff = struct.pack("!H", end - start)
+ file.write(stuff)
+ file.seek(0, 2)
+ return len(self)
+
+ def match(self, rdclass, rdtype, covers):
+ """Returns True if this rdataset matches the specified class, type,
+ and covers"""
+ if self.rdclass == rdclass and \
+ self.rdtype == rdtype and \
+ self.covers == covers:
+ return True
+ return False
+
+def from_text_list(rdclass, rdtype, ttl, text_rdatas):
+ """Create an rdataset with the specified class, type, and TTL, and with
+ the specified list of rdatas in text format.
+
+ @rtype: dns.rdataset.Rdataset object
+ """
+
+ if isinstance(rdclass, (str, unicode)):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ r = Rdataset(rdclass, rdtype)
+ r.update_ttl(ttl)
+ for t in text_rdatas:
+ rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
+ r.add(rd)
+ return r
+
+def from_text(rdclass, rdtype, ttl, *text_rdatas):
+ """Create an rdataset with the specified class, type, and TTL, and with
+ the specified rdatas in text format.
+
+ @rtype: dns.rdataset.Rdataset object
+ """
+
+ return from_text_list(rdclass, rdtype, ttl, text_rdatas)
+
+def from_rdata_list(ttl, rdatas):
+ """Create an rdataset with the specified TTL, and with
+ the specified list of rdata objects.
+
+ @rtype: dns.rdataset.Rdataset object
+ """
+
+ if len(rdatas) == 0:
+ raise ValueError("rdata list must not be empty")
+ r = None
+ for rd in rdatas:
+ if r is None:
+ r = Rdataset(rd.rdclass, rd.rdtype)
+ r.update_ttl(ttl)
+ first_time = False
+ r.add(rd)
+ return r
+
+def from_rdata(ttl, *rdatas):
+ """Create an rdataset with the specified TTL, and with
+ the specified rdata objects.
+
+ @rtype: dns.rdataset.Rdataset object
+ """
+
+ return from_rdata_list(ttl, rdatas)
diff --git a/lib/dnspython/dns/rdatatype.py b/lib/dnspython/dns/rdatatype.py
new file mode 100644
index 0000000000..1a02b7d3cd
--- /dev/null
+++ b/lib/dnspython/dns/rdatatype.py
@@ -0,0 +1,232 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Rdata Types.
+
+@var _by_text: The rdata type textual name to value mapping
+@type _by_text: dict
+@var _by_value: The rdata type value to textual name mapping
+@type _by_value: dict
+@var _metatypes: If an rdatatype is a metatype, there will be a mapping
+whose key is the rdatatype value and whose value is True in this dictionary.
+@type _metatypes: dict
+@var _singletons: If an rdatatype is a singleton, there will be a mapping
+whose key is the rdatatype value and whose value is True in this dictionary.
+@type _singletons: dict"""
+
+import re
+
+import dns.exception
+
+NONE = 0
+A = 1
+NS = 2
+MD = 3
+MF = 4
+CNAME = 5
+SOA = 6
+MB = 7
+MG = 8
+MR = 9
+NULL = 10
+WKS = 11
+PTR = 12
+HINFO = 13
+MINFO = 14
+MX = 15
+TXT = 16
+RP = 17
+AFSDB = 18
+X25 = 19
+ISDN = 20
+RT = 21
+NSAP = 22
+NSAP_PTR = 23
+SIG = 24
+KEY = 25
+PX = 26
+GPOS = 27
+AAAA = 28
+LOC = 29
+NXT = 30
+SRV = 33
+NAPTR = 35
+KX = 36
+CERT = 37
+A6 = 38
+DNAME = 39
+OPT = 41
+APL = 42
+DS = 43
+SSHFP = 44
+IPSECKEY = 45
+RRSIG = 46
+NSEC = 47
+DNSKEY = 48
+DHCID = 49
+NSEC3 = 50
+NSEC3PARAM = 51
+HIP = 55
+SPF = 99
+UNSPEC = 103
+TKEY = 249
+TSIG = 250
+IXFR = 251
+AXFR = 252
+MAILB = 253
+MAILA = 254
+ANY = 255
+TA = 32768
+DLV = 32769
+
+_by_text = {
+ 'NONE' : NONE,
+ 'A' : A,
+ 'NS' : NS,
+ 'MD' : MD,
+ 'MF' : MF,
+ 'CNAME' : CNAME,
+ 'SOA' : SOA,
+ 'MB' : MB,
+ 'MG' : MG,
+ 'MR' : MR,
+ 'NULL' : NULL,
+ 'WKS' : WKS,
+ 'PTR' : PTR,
+ 'HINFO' : HINFO,
+ 'MINFO' : MINFO,
+ 'MX' : MX,
+ 'TXT' : TXT,
+ 'RP' : RP,
+ 'AFSDB' : AFSDB,
+ 'X25' : X25,
+ 'ISDN' : ISDN,
+ 'RT' : RT,
+ 'NSAP' : NSAP,
+ 'NSAP-PTR' : NSAP_PTR,
+ 'SIG' : SIG,
+ 'KEY' : KEY,
+ 'PX' : PX,
+ 'GPOS' : GPOS,
+ 'AAAA' : AAAA,
+ 'LOC' : LOC,
+ 'NXT' : NXT,
+ 'SRV' : SRV,
+ 'NAPTR' : NAPTR,
+ 'KX' : KX,
+ 'CERT' : CERT,
+ 'A6' : A6,
+ 'DNAME' : DNAME,
+ 'OPT' : OPT,
+ 'APL' : APL,
+ 'DS' : DS,
+ 'SSHFP' : SSHFP,
+ 'IPSECKEY' : IPSECKEY,
+ 'RRSIG' : RRSIG,
+ 'NSEC' : NSEC,
+ 'DNSKEY' : DNSKEY,
+ 'DHCID' : DHCID,
+ 'NSEC3' : NSEC3,
+ 'NSEC3PARAM' : NSEC3PARAM,
+ 'HIP' : HIP,
+ 'SPF' : SPF,
+ 'UNSPEC' : UNSPEC,
+ 'TKEY' : TKEY,
+ 'TSIG' : TSIG,
+ 'IXFR' : IXFR,
+ 'AXFR' : AXFR,
+ 'MAILB' : MAILB,
+ 'MAILA' : MAILA,
+ 'ANY' : ANY,
+ 'TA' : TA,
+ 'DLV' : DLV,
+ }
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
+
+
+_metatypes = {
+ OPT : True
+ }
+
+_singletons = {
+ SOA : True,
+ NXT : True,
+ DNAME : True,
+ NSEC : True,
+ # CNAME is technically a singleton, but we allow multiple CNAMEs.
+ }
+
+_unknown_type_pattern = re.compile('TYPE([0-9]+)$', re.I);
+
+class UnknownRdatatype(dns.exception.DNSException):
+ """Raised if a type is unknown."""
+ pass
+
+def from_text(text):
+ """Convert text into a DNS rdata type value.
+ @param text: the text
+ @type text: string
+ @raises dns.rdatatype.UnknownRdatatype: the type is unknown
+ @raises ValueError: the rdata type value is not >= 0 and <= 65535
+ @rtype: int"""
+
+ value = _by_text.get(text.upper())
+ if value is None:
+ match = _unknown_type_pattern.match(text)
+ if match == None:
+ raise UnknownRdatatype
+ value = int(match.group(1))
+ if value < 0 or value > 65535:
+ raise ValueError("type must be between >= 0 and <= 65535")
+ return value
+
+def to_text(value):
+ """Convert a DNS rdata type to text.
+ @param value: the rdata type value
+ @type value: int
+ @raises ValueError: the rdata type value is not >= 0 and <= 65535
+ @rtype: string"""
+
+ if value < 0 or value > 65535:
+ raise ValueError("type must be between >= 0 and <= 65535")
+ text = _by_value.get(value)
+ if text is None:
+ text = 'TYPE' + `value`
+ return text
+
+def is_metatype(rdtype):
+ """True if the type is a metatype.
+ @param rdtype: the type
+ @type rdtype: int
+ @rtype: bool"""
+
+ if rdtype >= TKEY and rdtype <= ANY or _metatypes.has_key(rdtype):
+ return True
+ return False
+
+def is_singleton(rdtype):
+ """True if the type is a singleton.
+ @param rdtype: the type
+ @type rdtype: int
+ @rtype: bool"""
+
+ if _singletons.has_key(rdtype):
+ return True
+ return False
diff --git a/lib/dnspython/dns/rdtypes/ANY/AFSDB.py b/lib/dnspython/dns/rdtypes/ANY/AFSDB.py
new file mode 100644
index 0000000000..e8ca6f5cbb
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/AFSDB.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+class AFSDB(dns.rdtypes.mxbase.UncompressedDowncasingMX):
+ """AFSDB record
+
+ @ivar subtype: the subtype value
+ @type subtype: int
+ @ivar hostname: the hostname name
+ @type hostname: dns.name.Name object"""
+
+ # Use the property mechanism to make "subtype" an alias for the
+ # "preference" attribute, and "hostname" an alias for the "exchange"
+ # attribute.
+ #
+ # This lets us inherit the UncompressedMX implementation but lets
+ # the caller use appropriate attribute names for the rdata type.
+ #
+ # We probably lose some performance vs. a cut-and-paste
+ # implementation, but this way we don't copy code, and that's
+ # good.
+
+ def get_subtype(self):
+ return self.preference
+
+ def set_subtype(self, subtype):
+ self.preference = subtype
+
+ subtype = property(get_subtype, set_subtype)
+
+ def get_hostname(self):
+ return self.exchange
+
+ def set_hostname(self, hostname):
+ self.exchange = hostname
+
+ hostname = property(get_hostname, set_hostname)
diff --git a/lib/dnspython/dns/rdtypes/ANY/CERT.py b/lib/dnspython/dns/rdtypes/ANY/CERT.py
new file mode 100644
index 0000000000..d2703519d5
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/CERT.py
@@ -0,0 +1,131 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import struct
+
+import dns.exception
+import dns.dnssec
+import dns.rdata
+import dns.tokenizer
+
+_ctype_by_value = {
+ 1 : 'PKIX',
+ 2 : 'SPKI',
+ 3 : 'PGP',
+ 253 : 'URI',
+ 254 : 'OID',
+ }
+
+_ctype_by_name = {
+ 'PKIX' : 1,
+ 'SPKI' : 2,
+ 'PGP' : 3,
+ 'URI' : 253,
+ 'OID' : 254,
+ }
+
+def _ctype_from_text(what):
+ v = _ctype_by_name.get(what)
+ if not v is None:
+ return v
+ return int(what)
+
+def _ctype_to_text(what):
+ v = _ctype_by_value.get(what)
+ if not v is None:
+ return v
+ return str(what)
+
+class CERT(dns.rdata.Rdata):
+ """CERT record
+
+ @ivar certificate_type: certificate type
+ @type certificate_type: int
+ @ivar key_tag: key tag
+ @type key_tag: int
+ @ivar algorithm: algorithm
+ @type algorithm: int
+ @ivar certificate: the certificate or CRL
+ @type certificate: string
+ @see: RFC 2538"""
+
+ __slots__ = ['certificate_type', 'key_tag', 'algorithm', 'certificate']
+
+ def __init__(self, rdclass, rdtype, certificate_type, key_tag, algorithm,
+ certificate):
+ super(CERT, self).__init__(rdclass, rdtype)
+ self.certificate_type = certificate_type
+ self.key_tag = key_tag
+ self.algorithm = algorithm
+ self.certificate = certificate
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ certificate_type = _ctype_to_text(self.certificate_type)
+ return "%s %d %s %s" % (certificate_type, self.key_tag,
+ dns.dnssec.algorithm_to_text(self.algorithm),
+ dns.rdata._base64ify(self.certificate))
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ certificate_type = _ctype_from_text(tok.get_string())
+ key_tag = tok.get_uint16()
+ algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
+ if algorithm < 0 or algorithm > 255:
+ raise dns.exception.SyntaxError("bad algorithm type")
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value)
+ b64 = ''.join(chunks)
+ certificate = b64.decode('base64_codec')
+ return cls(rdclass, rdtype, certificate_type, key_tag,
+ algorithm, certificate)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ prefix = struct.pack("!HHB", self.certificate_type, self.key_tag,
+ self.algorithm)
+ file.write(prefix)
+ file.write(self.certificate)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ prefix = wire[current : current + 5]
+ current += 5
+ rdlen -= 5
+ if rdlen < 0:
+ raise dns.exception.FormError
+ (certificate_type, key_tag, algorithm) = struct.unpack("!HHB", prefix)
+ certificate = wire[current : current + rdlen]
+ return cls(rdclass, rdtype, certificate_type, key_tag, algorithm,
+ certificate)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ f = cStringIO.StringIO()
+ self.to_wire(f)
+ wire1 = f.getvalue()
+ f.seek(0)
+ f.truncate()
+ other.to_wire(f)
+ wire2 = f.getvalue()
+ f.close()
+
+ return cmp(wire1, wire2)
diff --git a/lib/dnspython/dns/rdtypes/ANY/CNAME.py b/lib/dnspython/dns/rdtypes/ANY/CNAME.py
new file mode 100644
index 0000000000..7f5c4b3bd7
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/CNAME.py
@@ -0,0 +1,24 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+class CNAME(dns.rdtypes.nsbase.NSBase):
+ """CNAME record
+
+ Note: although CNAME is officially a singleton type, dnspython allows
+ non-singleton CNAME rdatasets because such sets have been commonly
+ used by BIND and other nameservers for load balancing."""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/ANY/DLV.py b/lib/dnspython/dns/rdtypes/ANY/DLV.py
new file mode 100644
index 0000000000..07b9548342
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/DLV.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dsbase
+
+class DLV(dns.rdtypes.dsbase.DSBase):
+ """DLV record"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/ANY/DNAME.py b/lib/dnspython/dns/rdtypes/ANY/DNAME.py
new file mode 100644
index 0000000000..99b5013f33
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/DNAME.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+class DNAME(dns.rdtypes.nsbase.UncompressedNS):
+ """DNAME record"""
+ def to_digestable(self, origin = None):
+ return self.target.to_digestable(origin)
diff --git a/lib/dnspython/dns/rdtypes/ANY/DNSKEY.py b/lib/dnspython/dns/rdtypes/ANY/DNSKEY.py
new file mode 100644
index 0000000000..ad66ef0c69
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/DNSKEY.py
@@ -0,0 +1,25 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.keybase
+
+# flag constants
+SEP = 0x0001
+REVOKE = 0x0080
+ZONE = 0x0100
+
+class DNSKEY(dns.rdtypes.keybase.KEYBase):
+ """DNSKEY record"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/ANY/DS.py b/lib/dnspython/dns/rdtypes/ANY/DS.py
new file mode 100644
index 0000000000..3a06f448f7
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/DS.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dsbase
+
+class DS(dns.rdtypes.dsbase.DSBase):
+ """DS record"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/ANY/GPOS.py b/lib/dnspython/dns/rdtypes/ANY/GPOS.py
new file mode 100644
index 0000000000..6f63cc05f6
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/GPOS.py
@@ -0,0 +1,156 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+def _validate_float_string(what):
+ if what[0] == '-' or what[0] == '+':
+ what = what[1:]
+ if what.isdigit():
+ return
+ (left, right) = what.split('.')
+ if left == '' and right == '':
+ raise dns.exception.FormError
+ if not left == '' and not left.isdigit():
+ raise dns.exception.FormError
+ if not right == '' and not right.isdigit():
+ raise dns.exception.FormError
+
+class GPOS(dns.rdata.Rdata):
+ """GPOS record
+
+ @ivar latitude: latitude
+ @type latitude: string
+ @ivar longitude: longitude
+ @type longitude: string
+ @ivar altitude: altitude
+ @type altitude: string
+ @see: RFC 1712"""
+
+ __slots__ = ['latitude', 'longitude', 'altitude']
+
+ def __init__(self, rdclass, rdtype, latitude, longitude, altitude):
+ super(GPOS, self).__init__(rdclass, rdtype)
+ if isinstance(latitude, float) or \
+ isinstance(latitude, int) or \
+ isinstance(latitude, long):
+ latitude = str(latitude)
+ if isinstance(longitude, float) or \
+ isinstance(longitude, int) or \
+ isinstance(longitude, long):
+ longitude = str(longitude)
+ if isinstance(altitude, float) or \
+ isinstance(altitude, int) or \
+ isinstance(altitude, long):
+ altitude = str(altitude)
+ _validate_float_string(latitude)
+ _validate_float_string(longitude)
+ _validate_float_string(altitude)
+ self.latitude = latitude
+ self.longitude = longitude
+ self.altitude = altitude
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%s %s %s' % (self.latitude, self.longitude, self.altitude)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ latitude = tok.get_string()
+ longitude = tok.get_string()
+ altitude = tok.get_string()
+ tok.get_eol()
+ return cls(rdclass, rdtype, latitude, longitude, altitude)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ l = len(self.latitude)
+ assert l < 256
+ byte = chr(l)
+ file.write(byte)
+ file.write(self.latitude)
+ l = len(self.longitude)
+ assert l < 256
+ byte = chr(l)
+ file.write(byte)
+ file.write(self.longitude)
+ l = len(self.altitude)
+ assert l < 256
+ byte = chr(l)
+ file.write(byte)
+ file.write(self.altitude)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ l = ord(wire[current])
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ latitude = wire[current : current + l]
+ current += l
+ rdlen -= l
+ l = ord(wire[current])
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ longitude = wire[current : current + l]
+ current += l
+ rdlen -= l
+ l = ord(wire[current])
+ current += 1
+ rdlen -= 1
+ if l != rdlen:
+ raise dns.exception.FormError
+ altitude = wire[current : current + l]
+ return cls(rdclass, rdtype, latitude, longitude, altitude)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ v = cmp(self.latitude, other.latitude)
+ if v == 0:
+ v = cmp(self.longitude, other.longitude)
+ if v == 0:
+ v = cmp(self.altitude, other.altitude)
+ return v
+
+ def _get_float_latitude(self):
+ return float(self.latitude)
+
+ def _set_float_latitude(self, value):
+ self.latitude = str(value)
+
+ float_latitude = property(_get_float_latitude, _set_float_latitude,
+ doc="latitude as a floating point value")
+
+ def _get_float_longitude(self):
+ return float(self.longitude)
+
+ def _set_float_longitude(self, value):
+ self.longitude = str(value)
+
+ float_longitude = property(_get_float_longitude, _set_float_longitude,
+ doc="longitude as a floating point value")
+
+ def _get_float_altitude(self):
+ return float(self.altitude)
+
+ def _set_float_altitude(self, value):
+ self.altitude = str(value)
+
+ float_altitude = property(_get_float_altitude, _set_float_altitude,
+ doc="altitude as a floating point value")
diff --git a/lib/dnspython/dns/rdtypes/ANY/HINFO.py b/lib/dnspython/dns/rdtypes/ANY/HINFO.py
new file mode 100644
index 0000000000..e592ad39a7
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/HINFO.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+class HINFO(dns.rdata.Rdata):
+ """HINFO record
+
+ @ivar cpu: the CPU type
+ @type cpu: string
+ @ivar os: the OS type
+ @type os: string
+ @see: RFC 1035"""
+
+ __slots__ = ['cpu', 'os']
+
+ def __init__(self, rdclass, rdtype, cpu, os):
+ super(HINFO, self).__init__(rdclass, rdtype)
+ self.cpu = cpu
+ self.os = os
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '"%s" "%s"' % (dns.rdata._escapify(self.cpu),
+ dns.rdata._escapify(self.os))
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ cpu = tok.get_string()
+ os = tok.get_string()
+ tok.get_eol()
+ return cls(rdclass, rdtype, cpu, os)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ l = len(self.cpu)
+ assert l < 256
+ byte = chr(l)
+ file.write(byte)
+ file.write(self.cpu)
+ l = len(self.os)
+ assert l < 256
+ byte = chr(l)
+ file.write(byte)
+ file.write(self.os)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ l = ord(wire[current])
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ cpu = wire[current : current + l]
+ current += l
+ rdlen -= l
+ l = ord(wire[current])
+ current += 1
+ rdlen -= 1
+ if l != rdlen:
+ raise dns.exception.FormError
+ os = wire[current : current + l]
+ return cls(rdclass, rdtype, cpu, os)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ v = cmp(self.cpu, other.cpu)
+ if v == 0:
+ v = cmp(self.os, other.os)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/ANY/HIP.py b/lib/dnspython/dns/rdtypes/ANY/HIP.py
new file mode 100644
index 0000000000..8f96ae93d6
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/HIP.py
@@ -0,0 +1,140 @@
+# Copyright (C) 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import string
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+
+class HIP(dns.rdata.Rdata):
+ """HIP record
+
+ @ivar hit: the host identity tag
+ @type hit: string
+ @ivar algorithm: the public key cryptographic algorithm
+ @type algorithm: int
+ @ivar key: the public key
+ @type key: string
+ @ivar servers: the rendezvous servers
+ @type servers: list of dns.name.Name objects
+ @see: RFC 5205"""
+
+ __slots__ = ['hit', 'algorithm', 'key', 'servers']
+
+ def __init__(self, rdclass, rdtype, hit, algorithm, key, servers):
+ super(HIP, self).__init__(rdclass, rdtype)
+ self.hit = hit
+ self.algorithm = algorithm
+ self.key = key
+ self.servers = servers
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ hit = self.hit.encode('hex-codec')
+ key = self.key.encode('base64-codec').replace('\n', '')
+ text = ''
+ servers = []
+ for server in self.servers:
+ servers.append(str(server.choose_relativity(origin, relativize)))
+ if len(servers) > 0:
+ text += (' ' + ' '.join(servers))
+ return '%u %s %s%s' % (self.algorithm, hit, key, text)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ algorithm = tok.get_uint8()
+ hit = tok.get_string().decode('hex-codec')
+ if len(hit) > 255:
+ raise dns.exception.SyntaxError("HIT too long")
+ key = tok.get_string().decode('base64-codec')
+ servers = []
+ while 1:
+ token = tok.get()
+ if token.is_eol_or_eof():
+ break
+ server = dns.name.from_text(token.value, origin)
+ server.choose_relativity(origin, relativize)
+ servers.append(server)
+ return cls(rdclass, rdtype, hit, algorithm, key, servers)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ lh = len(self.hit)
+ lk = len(self.key)
+ file.write(struct.pack("!BBH", lh, self.algorithm, lk))
+ file.write(self.hit)
+ file.write(self.key)
+ for server in self.servers:
+ server.to_wire(file, None, origin)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (lh, algorithm, lk) = struct.unpack('!BBH',
+ wire[current : current + 4])
+ current += 4
+ rdlen -= 4
+ hit = wire[current : current + lh]
+ current += lh
+ rdlen -= lh
+ key = wire[current : current + lk]
+ current += lk
+ rdlen -= lk
+ servers = []
+ while rdlen > 0:
+ (server, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ current += cused
+ rdlen -= cused
+ if not origin is None:
+ server = server.relativize(origin)
+ servers.append(server)
+ return cls(rdclass, rdtype, hit, algorithm, key, servers)
+
+ from_wire = classmethod(from_wire)
+
+ def choose_relativity(self, origin = None, relativize = True):
+ servers = []
+ for server in self.servers:
+ server = server.choose_relativity(origin, relativize)
+ servers.append(server)
+ self.servers = servers
+
+ def _cmp(self, other):
+ b1 = cStringIO.StringIO()
+ lh = len(self.hit)
+ lk = len(self.key)
+ b1.write(struct.pack("!BBH", lh, self.algorithm, lk))
+ b1.write(self.hit)
+ b1.write(self.key)
+ b2 = cStringIO.StringIO()
+ lh = len(other.hit)
+ lk = len(other.key)
+ b2.write(struct.pack("!BBH", lh, other.algorithm, lk))
+ b2.write(other.hit)
+ b2.write(other.key)
+ v = cmp(b1.getvalue(), b2.getvalue())
+ if v != 0:
+ return v
+ ls = len(self.servers)
+ lo = len(other.servers)
+ count = min(ls, lo)
+ i = 0
+ while i < count:
+ v = cmp(self.servers[i], other.servers[i])
+ if v != 0:
+ return v
+ i += 1
+ return ls - lo
diff --git a/lib/dnspython/dns/rdtypes/ANY/ISDN.py b/lib/dnspython/dns/rdtypes/ANY/ISDN.py
new file mode 100644
index 0000000000..424d3a9a3c
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/ISDN.py
@@ -0,0 +1,96 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+class ISDN(dns.rdata.Rdata):
+ """ISDN record
+
+ @ivar address: the ISDN address
+ @type address: string
+ @ivar subaddress: the ISDN subaddress (or '' if not present)
+ @type subaddress: string
+ @see: RFC 1183"""
+
+ __slots__ = ['address', 'subaddress']
+
+ def __init__(self, rdclass, rdtype, address, subaddress):
+ super(ISDN, self).__init__(rdclass, rdtype)
+ self.address = address
+ self.subaddress = subaddress
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.subaddress:
+ return '"%s" "%s"' % (dns.rdata._escapify(self.address),
+ dns.rdata._escapify(self.subaddress))
+ else:
+ return '"%s"' % dns.rdata._escapify(self.address)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ address = tok.get_string()
+ t = tok.get()
+ if not t.is_eol_or_eof():
+ tok.unget(t)
+ subaddress = tok.get_string()
+ else:
+ tok.unget(t)
+ subaddress = ''
+ tok.get_eol()
+ return cls(rdclass, rdtype, address, subaddress)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ l = len(self.address)
+ assert l < 256
+ byte = chr(l)
+ file.write(byte)
+ file.write(self.address)
+ l = len(self.subaddress)
+ if l > 0:
+ assert l < 256
+ byte = chr(l)
+ file.write(byte)
+ file.write(self.subaddress)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ l = ord(wire[current])
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ address = wire[current : current + l]
+ current += l
+ rdlen -= l
+ if rdlen > 0:
+ l = ord(wire[current])
+ current += 1
+ rdlen -= 1
+ if l != rdlen:
+ raise dns.exception.FormError
+ subaddress = wire[current : current + l]
+ else:
+ subaddress = ''
+ return cls(rdclass, rdtype, address, subaddress)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ v = cmp(self.address, other.address)
+ if v == 0:
+ v = cmp(self.subaddress, other.subaddress)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/ANY/KEY.py b/lib/dnspython/dns/rdtypes/ANY/KEY.py
new file mode 100644
index 0000000000..c8581edbeb
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/KEY.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.keybase
+
+class KEY(dns.rdtypes.keybase.KEYBase):
+ """KEY record"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/ANY/LOC.py b/lib/dnspython/dns/rdtypes/ANY/LOC.py
new file mode 100644
index 0000000000..518dd6010f
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/LOC.py
@@ -0,0 +1,334 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import struct
+
+import dns.exception
+import dns.rdata
+
+_pows = (1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L,
+ 100000000L, 1000000000L, 10000000000L)
+
+def _exponent_of(what, desc):
+ exp = None
+ for i in xrange(len(_pows)):
+ if what // _pows[i] == 0L:
+ exp = i - 1
+ break
+ if exp is None or exp < 0:
+ raise dns.exception.SyntaxError("%s value out of bounds" % desc)
+ return exp
+
+def _float_to_tuple(what):
+ if what < 0:
+ sign = -1
+ what *= -1
+ else:
+ sign = 1
+ what = long(round(what * 3600000))
+ degrees = int(what // 3600000)
+ what -= degrees * 3600000
+ minutes = int(what // 60000)
+ what -= minutes * 60000
+ seconds = int(what // 1000)
+ what -= int(seconds * 1000)
+ what = int(what)
+ return (degrees * sign, minutes, seconds, what)
+
+def _tuple_to_float(what):
+ if what[0] < 0:
+ sign = -1
+ value = float(what[0]) * -1
+ else:
+ sign = 1
+ value = float(what[0])
+ value += float(what[1]) / 60.0
+ value += float(what[2]) / 3600.0
+ value += float(what[3]) / 3600000.0
+ return sign * value
+
+def _encode_size(what, desc):
+ what = long(what);
+ exponent = _exponent_of(what, desc) & 0xF
+ base = what // pow(10, exponent) & 0xF
+ return base * 16 + exponent
+
+def _decode_size(what, desc):
+ exponent = what & 0x0F
+ if exponent > 9:
+ raise dns.exception.SyntaxError("bad %s exponent" % desc)
+ base = (what & 0xF0) >> 4
+ if base > 9:
+ raise dns.exception.SyntaxError("bad %s base" % desc)
+ return long(base) * pow(10, exponent)
+
+class LOC(dns.rdata.Rdata):
+ """LOC record
+
+ @ivar latitude: latitude
+ @type latitude: (int, int, int, int) tuple specifying the degrees, minutes,
+ seconds, and milliseconds of the coordinate.
+ @ivar longitude: longitude
+ @type longitude: (int, int, int, int) tuple specifying the degrees,
+ minutes, seconds, and milliseconds of the coordinate.
+ @ivar altitude: altitude
+ @type altitude: float
+ @ivar size: size of the sphere
+ @type size: float
+ @ivar horizontal_precision: horizontal precision
+ @type horizontal_precision: float
+ @ivar vertical_precision: vertical precision
+ @type vertical_precision: float
+ @see: RFC 1876"""
+
+ __slots__ = ['latitude', 'longitude', 'altitude', 'size',
+ 'horizontal_precision', 'vertical_precision']
+
+ def __init__(self, rdclass, rdtype, latitude, longitude, altitude,
+ size=1.0, hprec=10000.0, vprec=10.0):
+ """Initialize a LOC record instance.
+
+ The parameters I{latitude} and I{longitude} may be either a 4-tuple
+ of integers specifying (degrees, minutes, seconds, milliseconds),
+ or they may be floating point values specifying the number of
+ degrees. The other parameters are floats."""
+
+ super(LOC, self).__init__(rdclass, rdtype)
+ if isinstance(latitude, int) or isinstance(latitude, long):
+ latitude = float(latitude)
+ if isinstance(latitude, float):
+ latitude = _float_to_tuple(latitude)
+ self.latitude = latitude
+ if isinstance(longitude, int) or isinstance(longitude, long):
+ longitude = float(longitude)
+ if isinstance(longitude, float):
+ longitude = _float_to_tuple(longitude)
+ self.longitude = longitude
+ self.altitude = float(altitude)
+ self.size = float(size)
+ self.horizontal_precision = float(hprec)
+ self.vertical_precision = float(vprec)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.latitude[0] > 0:
+ lat_hemisphere = 'N'
+ lat_degrees = self.latitude[0]
+ else:
+ lat_hemisphere = 'S'
+ lat_degrees = -1 * self.latitude[0]
+ if self.longitude[0] > 0:
+ long_hemisphere = 'E'
+ long_degrees = self.longitude[0]
+ else:
+ long_hemisphere = 'W'
+ long_degrees = -1 * self.longitude[0]
+ text = "%d %d %d.%03d %s %d %d %d.%03d %s %0.2fm" % (
+ lat_degrees, self.latitude[1], self.latitude[2], self.latitude[3],
+ lat_hemisphere, long_degrees, self.longitude[1], self.longitude[2],
+ self.longitude[3], long_hemisphere, self.altitude / 100.0
+ )
+
+ if self.size != 1.0 or self.horizontal_precision != 10000.0 or \
+ self.vertical_precision != 10.0:
+ text += " %0.2fm %0.2fm %0.2fm" % (
+ self.size / 100.0, self.horizontal_precision / 100.0,
+ self.vertical_precision / 100.0
+ )
+ return text
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ latitude = [0, 0, 0, 0]
+ longitude = [0, 0, 0, 0]
+ size = 1.0
+ hprec = 10000.0
+ vprec = 10.0
+
+ latitude[0] = tok.get_int()
+ t = tok.get_string()
+ if t.isdigit():
+ latitude[1] = int(t)
+ t = tok.get_string()
+ if '.' in t:
+ (seconds, milliseconds) = t.split('.')
+ if not seconds.isdigit():
+ raise dns.exception.SyntaxError('bad latitude seconds value')
+ latitude[2] = int(seconds)
+ if latitude[2] >= 60:
+ raise dns.exception.SyntaxError('latitude seconds >= 60')
+ l = len(milliseconds)
+ if l == 0 or l > 3 or not milliseconds.isdigit():
+ raise dns.exception.SyntaxError('bad latitude milliseconds value')
+ if l == 1:
+ m = 100
+ elif l == 2:
+ m = 10
+ else:
+ m = 1
+ latitude[3] = m * int(milliseconds)
+ t = tok.get_string()
+ elif t.isdigit():
+ latitude[2] = int(t)
+ t = tok.get_string()
+ if t == 'S':
+ latitude[0] *= -1
+ elif t != 'N':
+ raise dns.exception.SyntaxError('bad latitude hemisphere value')
+
+ longitude[0] = tok.get_int()
+ t = tok.get_string()
+ if t.isdigit():
+ longitude[1] = int(t)
+ t = tok.get_string()
+ if '.' in t:
+ (seconds, milliseconds) = t.split('.')
+ if not seconds.isdigit():
+ raise dns.exception.SyntaxError('bad longitude seconds value')
+ longitude[2] = int(seconds)
+ if longitude[2] >= 60:
+ raise dns.exception.SyntaxError('longitude seconds >= 60')
+ l = len(milliseconds)
+ if l == 0 or l > 3 or not milliseconds.isdigit():
+ raise dns.exception.SyntaxError('bad longitude milliseconds value')
+ if l == 1:
+ m = 100
+ elif l == 2:
+ m = 10
+ else:
+ m = 1
+ longitude[3] = m * int(milliseconds)
+ t = tok.get_string()
+ elif t.isdigit():
+ longitude[2] = int(t)
+ t = tok.get_string()
+ if t == 'W':
+ longitude[0] *= -1
+ elif t != 'E':
+ raise dns.exception.SyntaxError('bad longitude hemisphere value')
+
+ t = tok.get_string()
+ if t[-1] == 'm':
+ t = t[0 : -1]
+ altitude = float(t) * 100.0 # m -> cm
+
+ token = tok.get().unescape()
+ if not token.is_eol_or_eof():
+ value = token.value
+ if value[-1] == 'm':
+ value = value[0 : -1]
+ size = float(value) * 100.0 # m -> cm
+ token = tok.get().unescape()
+ if not token.is_eol_or_eof():
+ value = token.value
+ if value[-1] == 'm':
+ value = value[0 : -1]
+ hprec = float(value) * 100.0 # m -> cm
+ token = tok.get().unescape()
+ if not token.is_eol_or_eof():
+ value = token.value
+ if value[-1] == 'm':
+ value = value[0 : -1]
+ vprec = float(value) * 100.0 # m -> cm
+ tok.get_eol()
+
+ return cls(rdclass, rdtype, latitude, longitude, altitude,
+ size, hprec, vprec)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ if self.latitude[0] < 0:
+ sign = -1
+ degrees = long(-1 * self.latitude[0])
+ else:
+ sign = 1
+ degrees = long(self.latitude[0])
+ milliseconds = (degrees * 3600000 +
+ self.latitude[1] * 60000 +
+ self.latitude[2] * 1000 +
+ self.latitude[3]) * sign
+ latitude = 0x80000000L + milliseconds
+ if self.longitude[0] < 0:
+ sign = -1
+ degrees = long(-1 * self.longitude[0])
+ else:
+ sign = 1
+ degrees = long(self.longitude[0])
+ milliseconds = (degrees * 3600000 +
+ self.longitude[1] * 60000 +
+ self.longitude[2] * 1000 +
+ self.longitude[3]) * sign
+ longitude = 0x80000000L + milliseconds
+ altitude = long(self.altitude) + 10000000L
+ size = _encode_size(self.size, "size")
+ hprec = _encode_size(self.horizontal_precision, "horizontal precision")
+ vprec = _encode_size(self.vertical_precision, "vertical precision")
+ wire = struct.pack("!BBBBIII", 0, size, hprec, vprec, latitude,
+ longitude, altitude)
+ file.write(wire)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (version, size, hprec, vprec, latitude, longitude, altitude) = \
+ struct.unpack("!BBBBIII", wire[current : current + rdlen])
+ if latitude > 0x80000000L:
+ latitude = float(latitude - 0x80000000L) / 3600000
+ else:
+ latitude = -1 * float(0x80000000L - latitude) / 3600000
+ if latitude < -90.0 or latitude > 90.0:
+ raise dns.exception.FormError("bad latitude")
+ if longitude > 0x80000000L:
+ longitude = float(longitude - 0x80000000L) / 3600000
+ else:
+ longitude = -1 * float(0x80000000L - longitude) / 3600000
+ if longitude < -180.0 or longitude > 180.0:
+ raise dns.exception.FormError("bad longitude")
+ altitude = float(altitude) - 10000000.0
+ size = _decode_size(size, "size")
+ hprec = _decode_size(hprec, "horizontal precision")
+ vprec = _decode_size(vprec, "vertical precision")
+ return cls(rdclass, rdtype, latitude, longitude, altitude,
+ size, hprec, vprec)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ f = cStringIO.StringIO()
+ self.to_wire(f)
+ wire1 = f.getvalue()
+ f.seek(0)
+ f.truncate()
+ other.to_wire(f)
+ wire2 = f.getvalue()
+ f.close()
+
+ return cmp(wire1, wire2)
+
+ def _get_float_latitude(self):
+ return _tuple_to_float(self.latitude)
+
+ def _set_float_latitude(self, value):
+ self.latitude = _float_to_tuple(value)
+
+ float_latitude = property(_get_float_latitude, _set_float_latitude,
+ doc="latitude as a floating point value")
+
+ def _get_float_longitude(self):
+ return _tuple_to_float(self.longitude)
+
+ def _set_float_longitude(self, value):
+ self.longitude = _float_to_tuple(value)
+
+ float_longitude = property(_get_float_longitude, _set_float_longitude,
+ doc="longitude as a floating point value")
diff --git a/lib/dnspython/dns/rdtypes/ANY/MX.py b/lib/dnspython/dns/rdtypes/ANY/MX.py
new file mode 100644
index 0000000000..9cad260672
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/MX.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+class MX(dns.rdtypes.mxbase.MXBase):
+ """MX record"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/ANY/NS.py b/lib/dnspython/dns/rdtypes/ANY/NS.py
new file mode 100644
index 0000000000..4b03a3ab47
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/NS.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+class NS(dns.rdtypes.nsbase.NSBase):
+ """NS record"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/ANY/NSEC.py b/lib/dnspython/dns/rdtypes/ANY/NSEC.py
new file mode 100644
index 0000000000..72859ce108
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/NSEC.py
@@ -0,0 +1,141 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+import dns.name
+
+class NSEC(dns.rdata.Rdata):
+ """NSEC record
+
+ @ivar next: the next name
+ @type next: dns.name.Name object
+ @ivar windows: the windowed bitmap list
+ @type windows: list of (window number, string) tuples"""
+
+ __slots__ = ['next', 'windows']
+
+ def __init__(self, rdclass, rdtype, next, windows):
+ super(NSEC, self).__init__(rdclass, rdtype)
+ self.next = next
+ self.windows = windows
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ next = self.next.choose_relativity(origin, relativize)
+ text = ''
+ for (window, bitmap) in self.windows:
+ bits = []
+ for i in xrange(0, len(bitmap)):
+ byte = ord(bitmap[i])
+ for j in xrange(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(dns.rdatatype.to_text(window * 256 + \
+ i * 8 + j))
+ text += (' ' + ' '.join(bits))
+ return '%s%s' % (next, text)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ next = tok.get_name()
+ next = next.choose_relativity(origin, relativize)
+ rdtypes = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ nrdtype = dns.rdatatype.from_text(token.value)
+ if nrdtype == 0:
+ raise dns.exception.SyntaxError("NSEC with bit 0")
+ if nrdtype > 65535:
+ raise dns.exception.SyntaxError("NSEC with bit > 65535")
+ rdtypes.append(nrdtype)
+ rdtypes.sort()
+ window = 0
+ octets = 0
+ prior_rdtype = 0
+ bitmap = ['\0'] * 32
+ windows = []
+ for nrdtype in rdtypes:
+ if nrdtype == prior_rdtype:
+ continue
+ prior_rdtype = nrdtype
+ new_window = nrdtype // 256
+ if new_window != window:
+ windows.append((window, ''.join(bitmap[0:octets])))
+ bitmap = ['\0'] * 32
+ window = new_window
+ offset = nrdtype % 256
+ byte = offset / 8
+ bit = offset % 8
+ octets = byte + 1
+ bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
+ windows.append((window, ''.join(bitmap[0:octets])))
+ return cls(rdclass, rdtype, next, windows)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ self.next.to_wire(file, None, origin)
+ for (window, bitmap) in self.windows:
+ file.write(chr(window))
+ file.write(chr(len(bitmap)))
+ file.write(bitmap)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (next, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ windows = []
+ while rdlen > 0:
+ if rdlen < 3:
+ raise dns.exception.FormError("NSEC too short")
+ window = ord(wire[current])
+ octets = ord(wire[current + 1])
+ if octets == 0 or octets > 32:
+ raise dns.exception.FormError("bad NSEC octets")
+ current += 2
+ rdlen -= 2
+ if rdlen < octets:
+ raise dns.exception.FormError("bad NSEC bitmap length")
+ bitmap = wire[current : current + octets]
+ current += octets
+ rdlen -= octets
+ windows.append((window, bitmap))
+ if not origin is None:
+ next = next.relativize(origin)
+ return cls(rdclass, rdtype, next, windows)
+
+ from_wire = classmethod(from_wire)
+
+ def choose_relativity(self, origin = None, relativize = True):
+ self.next = self.next.choose_relativity(origin, relativize)
+
+ def _cmp(self, other):
+ v = cmp(self.next, other.next)
+ if v == 0:
+ b1 = cStringIO.StringIO()
+ for (window, bitmap) in self.windows:
+ b1.write(chr(window))
+ b1.write(chr(len(bitmap)))
+ b1.write(bitmap)
+ b2 = cStringIO.StringIO()
+ for (window, bitmap) in other.windows:
+ b2.write(chr(window))
+ b2.write(chr(len(bitmap)))
+ b2.write(bitmap)
+ v = cmp(b1.getvalue(), b2.getvalue())
+ return v
diff --git a/lib/dnspython/dns/rdtypes/ANY/NSEC3.py b/lib/dnspython/dns/rdtypes/ANY/NSEC3.py
new file mode 100644
index 0000000000..932d7b4032
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/NSEC3.py
@@ -0,0 +1,182 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import cStringIO
+import string
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+
+b32_hex_to_normal = string.maketrans('0123456789ABCDEFGHIJKLMNOPQRSTUV',
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')
+b32_normal_to_hex = string.maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ234567',
+ '0123456789ABCDEFGHIJKLMNOPQRSTUV')
+
+# hash algorithm constants
+SHA1 = 1
+
+# flag constants
+OPTOUT = 1
+
+class NSEC3(dns.rdata.Rdata):
+ """NSEC3 record
+
+ @ivar algorithm: the hash algorithm number
+ @type algorithm: int
+ @ivar flags: the flags
+ @type flags: int
+ @ivar iterations: the number of iterations
+ @type iterations: int
+ @ivar salt: the salt
+ @type salt: string
+ @ivar next: the next name hash
+ @type next: string
+ @ivar windows: the windowed bitmap list
+ @type windows: list of (window number, string) tuples"""
+
+ __slots__ = ['algorithm', 'flags', 'iterations', 'salt', 'next', 'windows']
+
+ def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt,
+ next, windows):
+ super(NSEC3, self).__init__(rdclass, rdtype)
+ self.algorithm = algorithm
+ self.flags = flags
+ self.iterations = iterations
+ self.salt = salt
+ self.next = next
+ self.windows = windows
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ next = base64.b32encode(self.next).translate(b32_normal_to_hex).lower()
+ if self.salt == '':
+ salt = '-'
+ else:
+ salt = self.salt.encode('hex-codec')
+ text = ''
+ for (window, bitmap) in self.windows:
+ bits = []
+ for i in xrange(0, len(bitmap)):
+ byte = ord(bitmap[i])
+ for j in xrange(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(dns.rdatatype.to_text(window * 256 + \
+ i * 8 + j))
+ text += (' ' + ' '.join(bits))
+ return '%u %u %u %s %s%s' % (self.algorithm, self.flags, self.iterations,
+ salt, next, text)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ algorithm = tok.get_uint8()
+ flags = tok.get_uint8()
+ iterations = tok.get_uint16()
+ salt = tok.get_string()
+ if salt == '-':
+ salt = ''
+ else:
+ salt = salt.decode('hex-codec')
+ next = tok.get_string().upper().translate(b32_hex_to_normal)
+ next = base64.b32decode(next)
+ rdtypes = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ nrdtype = dns.rdatatype.from_text(token.value)
+ if nrdtype == 0:
+ raise dns.exception.SyntaxError("NSEC3 with bit 0")
+ if nrdtype > 65535:
+ raise dns.exception.SyntaxError("NSEC3 with bit > 65535")
+ rdtypes.append(nrdtype)
+ rdtypes.sort()
+ window = 0
+ octets = 0
+ prior_rdtype = 0
+ bitmap = ['\0'] * 32
+ windows = []
+ for nrdtype in rdtypes:
+ if nrdtype == prior_rdtype:
+ continue
+ prior_rdtype = nrdtype
+ new_window = nrdtype // 256
+ if new_window != window:
+ windows.append((window, ''.join(bitmap[0:octets])))
+ bitmap = ['\0'] * 32
+ window = new_window
+ offset = nrdtype % 256
+ byte = offset / 8
+ bit = offset % 8
+ octets = byte + 1
+ bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
+ windows.append((window, ''.join(bitmap[0:octets])))
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next, windows)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ l = len(self.salt)
+ file.write(struct.pack("!BBHB", self.algorithm, self.flags,
+ self.iterations, l))
+ file.write(self.salt)
+ l = len(self.next)
+ file.write(struct.pack("!B", l))
+ file.write(self.next)
+ for (window, bitmap) in self.windows:
+ file.write(chr(window))
+ file.write(chr(len(bitmap)))
+ file.write(bitmap)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (algorithm, flags, iterations, slen) = struct.unpack('!BBHB',
+ wire[current : current + 5])
+ current += 5
+ rdlen -= 5
+ salt = wire[current : current + slen]
+ current += slen
+ rdlen -= slen
+ (nlen, ) = struct.unpack('!B', wire[current])
+ current += 1
+ rdlen -= 1
+ next = wire[current : current + nlen]
+ current += nlen
+ rdlen -= nlen
+ windows = []
+ while rdlen > 0:
+ if rdlen < 3:
+ raise dns.exception.FormError("NSEC3 too short")
+ window = ord(wire[current])
+ octets = ord(wire[current + 1])
+ if octets == 0 or octets > 32:
+ raise dns.exception.FormError("bad NSEC3 octets")
+ current += 2
+ rdlen -= 2
+ if rdlen < octets:
+ raise dns.exception.FormError("bad NSEC3 bitmap length")
+ bitmap = wire[current : current + octets]
+ current += octets
+ rdlen -= octets
+ windows.append((window, bitmap))
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next, windows)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ b1 = cStringIO.StringIO()
+ self.to_wire(b1)
+ b2 = cStringIO.StringIO()
+ other.to_wire(b2)
+ return cmp(b1.getvalue(), b2.getvalue())
diff --git a/lib/dnspython/dns/rdtypes/ANY/NSEC3PARAM.py b/lib/dnspython/dns/rdtypes/ANY/NSEC3PARAM.py
new file mode 100644
index 0000000000..ec91e5e85c
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/NSEC3PARAM.py
@@ -0,0 +1,88 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import struct
+
+import dns.exception
+import dns.rdata
+
+class NSEC3PARAM(dns.rdata.Rdata):
+ """NSEC3PARAM record
+
+ @ivar algorithm: the hash algorithm number
+ @type algorithm: int
+ @ivar flags: the flags
+ @type flags: int
+ @ivar iterations: the number of iterations
+ @type iterations: int
+ @ivar salt: the salt
+ @type salt: string"""
+
+ __slots__ = ['algorithm', 'flags', 'iterations', 'salt']
+
+ def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt):
+ super(NSEC3PARAM, self).__init__(rdclass, rdtype)
+ self.algorithm = algorithm
+ self.flags = flags
+ self.iterations = iterations
+ self.salt = salt
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.salt == '':
+ salt = '-'
+ else:
+ salt = self.salt.encode('hex-codec')
+ return '%u %u %u %s' % (self.algorithm, self.flags, self.iterations, salt)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ algorithm = tok.get_uint8()
+ flags = tok.get_uint8()
+ iterations = tok.get_uint16()
+ salt = tok.get_string()
+ if salt == '-':
+ salt = ''
+ else:
+ salt = salt.decode('hex-codec')
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ l = len(self.salt)
+ file.write(struct.pack("!BBHB", self.algorithm, self.flags,
+ self.iterations, l))
+ file.write(self.salt)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (algorithm, flags, iterations, slen) = struct.unpack('!BBHB',
+ wire[current : current + 5])
+ current += 5
+ rdlen -= 5
+ salt = wire[current : current + slen]
+ current += slen
+ rdlen -= slen
+ if rdlen != 0:
+ raise dns.exception.FormError
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ b1 = cStringIO.StringIO()
+ self.to_wire(b1)
+ b2 = cStringIO.StringIO()
+ other.to_wire(b2)
+ return cmp(b1.getvalue(), b2.getvalue())
diff --git a/lib/dnspython/dns/rdtypes/ANY/NXT.py b/lib/dnspython/dns/rdtypes/ANY/NXT.py
new file mode 100644
index 0000000000..99ae9b9dff
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/NXT.py
@@ -0,0 +1,99 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+import dns.name
+
+class NXT(dns.rdata.Rdata):
+ """NXT record
+
+ @ivar next: the next name
+ @type next: dns.name.Name object
+ @ivar bitmap: the type bitmap
+ @type bitmap: string
+ @see: RFC 2535"""
+
+ __slots__ = ['next', 'bitmap']
+
+ def __init__(self, rdclass, rdtype, next, bitmap):
+ super(NXT, self).__init__(rdclass, rdtype)
+ self.next = next
+ self.bitmap = bitmap
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ next = self.next.choose_relativity(origin, relativize)
+ bits = []
+ for i in xrange(0, len(self.bitmap)):
+ byte = ord(self.bitmap[i])
+ for j in xrange(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(dns.rdatatype.to_text(i * 8 + j))
+ text = ' '.join(bits)
+ return '%s %s' % (next, text)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ next = tok.get_name()
+ next = next.choose_relativity(origin, relativize)
+ bitmap = ['\x00', '\x00', '\x00', '\x00',
+ '\x00', '\x00', '\x00', '\x00',
+ '\x00', '\x00', '\x00', '\x00',
+ '\x00', '\x00', '\x00', '\x00' ]
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ if token.value.isdigit():
+ nrdtype = int(token.value)
+ else:
+ nrdtype = dns.rdatatype.from_text(token.value)
+ if nrdtype == 0:
+ raise dns.exception.SyntaxError("NXT with bit 0")
+ if nrdtype > 127:
+ raise dns.exception.SyntaxError("NXT with bit > 127")
+ i = nrdtype // 8
+ bitmap[i] = chr(ord(bitmap[i]) | (0x80 >> (nrdtype % 8)))
+ bitmap = dns.rdata._truncate_bitmap(bitmap)
+ return cls(rdclass, rdtype, next, bitmap)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ self.next.to_wire(file, None, origin)
+ file.write(self.bitmap)
+
+ def to_digestable(self, origin = None):
+ return self.next.to_digestable(origin) + self.bitmap
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (next, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ bitmap = wire[current : current + rdlen]
+ if not origin is None:
+ next = next.relativize(origin)
+ return cls(rdclass, rdtype, next, bitmap)
+
+ from_wire = classmethod(from_wire)
+
+ def choose_relativity(self, origin = None, relativize = True):
+ self.next = self.next.choose_relativity(origin, relativize)
+
+ def _cmp(self, other):
+ v = cmp(self.next, other.next)
+ if v == 0:
+ v = cmp(self.bitmap, other.bitmap)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/ANY/PTR.py b/lib/dnspython/dns/rdtypes/ANY/PTR.py
new file mode 100644
index 0000000000..6c4b79eaac
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/PTR.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+class PTR(dns.rdtypes.nsbase.NSBase):
+ """PTR record"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/ANY/RP.py b/lib/dnspython/dns/rdtypes/ANY/RP.py
new file mode 100644
index 0000000000..421ce8e207
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/RP.py
@@ -0,0 +1,86 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+class RP(dns.rdata.Rdata):
+ """RP record
+
+ @ivar mbox: The responsible person's mailbox
+ @type mbox: dns.name.Name object
+ @ivar txt: The owner name of a node with TXT records, or the root name
+ if no TXT records are associated with this RP.
+ @type txt: dns.name.Name object
+ @see: RFC 1183"""
+
+ __slots__ = ['mbox', 'txt']
+
+ def __init__(self, rdclass, rdtype, mbox, txt):
+ super(RP, self).__init__(rdclass, rdtype)
+ self.mbox = mbox
+ self.txt = txt
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ mbox = self.mbox.choose_relativity(origin, relativize)
+ txt = self.txt.choose_relativity(origin, relativize)
+ return "%s %s" % (str(mbox), str(txt))
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ mbox = tok.get_name()
+ txt = tok.get_name()
+ mbox = mbox.choose_relativity(origin, relativize)
+ txt = txt.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, mbox, txt)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ self.mbox.to_wire(file, None, origin)
+ self.txt.to_wire(file, None, origin)
+
+ def to_digestable(self, origin = None):
+ return self.mbox.to_digestable(origin) + \
+ self.txt.to_digestable(origin)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (mbox, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ current += cused
+ rdlen -= cused
+ if rdlen <= 0:
+ raise dns.exception.FormError
+ (txt, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if not origin is None:
+ mbox = mbox.relativize(origin)
+ txt = txt.relativize(origin)
+ return cls(rdclass, rdtype, mbox, txt)
+
+ from_wire = classmethod(from_wire)
+
+ def choose_relativity(self, origin = None, relativize = True):
+ self.mbox = self.mbox.choose_relativity(origin, relativize)
+ self.txt = self.txt.choose_relativity(origin, relativize)
+
+ def _cmp(self, other):
+ v = cmp(self.mbox, other.mbox)
+ if v == 0:
+ v = cmp(self.txt, other.txt)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/ANY/RRSIG.py b/lib/dnspython/dns/rdtypes/ANY/RRSIG.py
new file mode 100644
index 0000000000..0e4816f648
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/RRSIG.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.sigbase
+
+class RRSIG(dns.rdtypes.sigbase.SIGBase):
+ """RRSIG record"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/ANY/RT.py b/lib/dnspython/dns/rdtypes/ANY/RT.py
new file mode 100644
index 0000000000..1efd3724d9
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/RT.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+class RT(dns.rdtypes.mxbase.UncompressedDowncasingMX):
+ """RT record"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/ANY/SIG.py b/lib/dnspython/dns/rdtypes/ANY/SIG.py
new file mode 100644
index 0000000000..501e29cc8c
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/SIG.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.sigbase
+
+class SIG(dns.rdtypes.sigbase.SIGBase):
+ """SIG record"""
+ def to_digestable(self, origin = None):
+ return struct.pack('!HBBIIIH', self.type_covered,
+ self.algorithm, self.labels,
+ self.original_ttl, self.expiration,
+ self.inception, self.key_tag) + \
+ self.signer.to_digestable(origin) + \
+ self.signature
diff --git a/lib/dnspython/dns/rdtypes/ANY/SOA.py b/lib/dnspython/dns/rdtypes/ANY/SOA.py
new file mode 100644
index 0000000000..a25a35e29b
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/SOA.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+class SOA(dns.rdata.Rdata):
+ """SOA record
+
+ @ivar mname: the SOA MNAME (master name) field
+ @type mname: dns.name.Name object
+ @ivar rname: the SOA RNAME (responsible name) field
+ @type rname: dns.name.Name object
+ @ivar serial: The zone's serial number
+ @type serial: int
+ @ivar refresh: The zone's refresh value (in seconds)
+ @type refresh: int
+ @ivar retry: The zone's retry value (in seconds)
+ @type retry: int
+ @ivar expire: The zone's expiration value (in seconds)
+ @type expire: int
+ @ivar minimum: The zone's negative caching time (in seconds, called
+ "minimum" for historical reasons)
+ @type minimum: int
+ @see: RFC 1035"""
+
+ __slots__ = ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire',
+ 'minimum']
+
+ def __init__(self, rdclass, rdtype, mname, rname, serial, refresh, retry,
+ expire, minimum):
+ super(SOA, self).__init__(rdclass, rdtype)
+ self.mname = mname
+ self.rname = rname
+ self.serial = serial
+ self.refresh = refresh
+ self.retry = retry
+ self.expire = expire
+ self.minimum = minimum
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ mname = self.mname.choose_relativity(origin, relativize)
+ rname = self.rname.choose_relativity(origin, relativize)
+ return '%s %s %d %d %d %d %d' % (
+ mname, rname, self.serial, self.refresh, self.retry,
+ self.expire, self.minimum )
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ mname = tok.get_name()
+ rname = tok.get_name()
+ mname = mname.choose_relativity(origin, relativize)
+ rname = rname.choose_relativity(origin, relativize)
+ serial = tok.get_uint32()
+ refresh = tok.get_ttl()
+ retry = tok.get_ttl()
+ expire = tok.get_ttl()
+ minimum = tok.get_ttl()
+ tok.get_eol()
+ return cls(rdclass, rdtype, mname, rname, serial, refresh, retry,
+ expire, minimum )
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ self.mname.to_wire(file, compress, origin)
+ self.rname.to_wire(file, compress, origin)
+ five_ints = struct.pack('!IIIII', self.serial, self.refresh,
+ self.retry, self.expire, self.minimum)
+ file.write(five_ints)
+
+ def to_digestable(self, origin = None):
+ return self.mname.to_digestable(origin) + \
+ self.rname.to_digestable(origin) + \
+ struct.pack('!IIIII', self.serial, self.refresh,
+ self.retry, self.expire, self.minimum)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (mname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ (rname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ if rdlen != 20:
+ raise dns.exception.FormError
+ five_ints = struct.unpack('!IIIII',
+ wire[current : current + rdlen])
+ if not origin is None:
+ mname = mname.relativize(origin)
+ rname = rname.relativize(origin)
+ return cls(rdclass, rdtype, mname, rname,
+ five_ints[0], five_ints[1], five_ints[2], five_ints[3],
+ five_ints[4])
+
+ from_wire = classmethod(from_wire)
+
+ def choose_relativity(self, origin = None, relativize = True):
+ self.mname = self.mname.choose_relativity(origin, relativize)
+ self.rname = self.rname.choose_relativity(origin, relativize)
+
+ def _cmp(self, other):
+ v = cmp(self.mname, other.mname)
+ if v == 0:
+ v = cmp(self.rname, other.rname)
+ if v == 0:
+ self_ints = struct.pack('!IIIII', self.serial, self.refresh,
+ self.retry, self.expire, self.minimum)
+ other_ints = struct.pack('!IIIII', other.serial, other.refresh,
+ other.retry, other.expire,
+ other.minimum)
+ v = cmp(self_ints, other_ints)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/ANY/SPF.py b/lib/dnspython/dns/rdtypes/ANY/SPF.py
new file mode 100644
index 0000000000..9b5a9a9fed
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/SPF.py
@@ -0,0 +1,22 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.txtbase
+
+class SPF(dns.rdtypes.txtbase.TXTBase):
+ """SPF record
+
+ @see: RFC 4408"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/ANY/SSHFP.py b/lib/dnspython/dns/rdtypes/ANY/SSHFP.py
new file mode 100644
index 0000000000..162dda5c48
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/SSHFP.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2005-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.rdata
+import dns.rdatatype
+
+class SSHFP(dns.rdata.Rdata):
+ """SSHFP record
+
+ @ivar algorithm: the algorithm
+ @type algorithm: int
+ @ivar fp_type: the digest type
+ @type fp_type: int
+ @ivar fingerprint: the fingerprint
+ @type fingerprint: string
+ @see: draft-ietf-secsh-dns-05.txt"""
+
+ __slots__ = ['algorithm', 'fp_type', 'fingerprint']
+
+ def __init__(self, rdclass, rdtype, algorithm, fp_type,
+ fingerprint):
+ super(SSHFP, self).__init__(rdclass, rdtype)
+ self.algorithm = algorithm
+ self.fp_type = fp_type
+ self.fingerprint = fingerprint
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d %s' % (self.algorithm,
+ self.fp_type,
+ dns.rdata._hexify(self.fingerprint,
+ chunksize=128))
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ algorithm = tok.get_uint8()
+ fp_type = tok.get_uint8()
+ fingerprint = tok.get_string()
+ fingerprint = fingerprint.decode('hex_codec')
+ tok.get_eol()
+ return cls(rdclass, rdtype, algorithm, fp_type, fingerprint)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ header = struct.pack("!BB", self.algorithm, self.fp_type)
+ file.write(header)
+ file.write(self.fingerprint)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ header = struct.unpack("!BB", wire[current : current + 2])
+ current += 2
+ rdlen -= 2
+ fingerprint = wire[current : current + rdlen]
+ return cls(rdclass, rdtype, header[0], header[1], fingerprint)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ hs = struct.pack("!BB", self.algorithm, self.fp_type)
+ ho = struct.pack("!BB", other.algorithm, other.fp_type)
+ v = cmp(hs, ho)
+ if v == 0:
+ v = cmp(self.fingerprint, other.fingerprint)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/ANY/TXT.py b/lib/dnspython/dns/rdtypes/ANY/TXT.py
new file mode 100644
index 0000000000..23f4f3b7c6
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/TXT.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.txtbase
+
+class TXT(dns.rdtypes.txtbase.TXTBase):
+ """TXT record"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/ANY/X25.py b/lib/dnspython/dns/rdtypes/ANY/X25.py
new file mode 100644
index 0000000000..c3632f7fc4
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/X25.py
@@ -0,0 +1,62 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+class X25(dns.rdata.Rdata):
+ """X25 record
+
+ @ivar address: the PSDN address
+ @type address: string
+ @see: RFC 1183"""
+
+ __slots__ = ['address']
+
+ def __init__(self, rdclass, rdtype, address):
+ super(X25, self).__init__(rdclass, rdtype)
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '"%s"' % dns.rdata._escapify(self.address)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ address = tok.get_string()
+ tok.get_eol()
+ return cls(rdclass, rdtype, address)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ l = len(self.address)
+ assert l < 256
+ byte = chr(l)
+ file.write(byte)
+ file.write(self.address)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ l = ord(wire[current])
+ current += 1
+ rdlen -= 1
+ if l != rdlen:
+ raise dns.exception.FormError
+ address = wire[current : current + l]
+ return cls(rdclass, rdtype, address)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ return cmp(self.address, other.address)
diff --git a/lib/dnspython/dns/rdtypes/ANY/__init__.py b/lib/dnspython/dns/rdtypes/ANY/__init__.py
new file mode 100644
index 0000000000..0815dd5450
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/ANY/__init__.py
@@ -0,0 +1,48 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Class ANY (generic) rdata type classes."""
+
+__all__ = [
+ 'AFSDB',
+ 'CERT',
+ 'CNAME',
+ 'DLV',
+ 'DNAME',
+ 'DNSKEY',
+ 'DS',
+ 'GPOS',
+ 'HINFO',
+ 'HIP',
+ 'ISDN',
+ 'KEY',
+ 'LOC',
+ 'MX',
+ 'NS',
+ 'NSEC',
+ 'NSEC3',
+ 'NSEC3PARAM',
+ 'NXT',
+ 'PTR',
+ 'RP',
+ 'RRSIG',
+ 'RT',
+ 'SIG',
+ 'SOA',
+ 'SPF',
+ 'SSHFP',
+ 'TXT',
+ 'X25',
+]
diff --git a/lib/dnspython/dns/rdtypes/IN/A.py b/lib/dnspython/dns/rdtypes/IN/A.py
new file mode 100644
index 0000000000..e05f204a2f
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/A.py
@@ -0,0 +1,57 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.ipv4
+import dns.rdata
+import dns.tokenizer
+
+class A(dns.rdata.Rdata):
+ """A record.
+
+ @ivar address: an IPv4 address
+ @type address: string (in the standard "dotted quad" format)"""
+
+ __slots__ = ['address']
+
+ def __init__(self, rdclass, rdtype, address):
+ super(A, self).__init__(rdclass, rdtype)
+ # check that it's OK
+ junk = dns.ipv4.inet_aton(address)
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return self.address
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ address = tok.get_identifier()
+ tok.get_eol()
+ return cls(rdclass, rdtype, address)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ file.write(dns.ipv4.inet_aton(self.address))
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ address = dns.ipv4.inet_ntoa(wire[current : current + rdlen])
+ return cls(rdclass, rdtype, address)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ sa = dns.ipv4.inet_aton(self.address)
+ oa = dns.ipv4.inet_aton(other.address)
+ return cmp(sa, oa)
diff --git a/lib/dnspython/dns/rdtypes/IN/AAAA.py b/lib/dnspython/dns/rdtypes/IN/AAAA.py
new file mode 100644
index 0000000000..2d812d39eb
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/AAAA.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.inet
+import dns.rdata
+import dns.tokenizer
+
+class AAAA(dns.rdata.Rdata):
+ """AAAA record.
+
+ @ivar address: an IPv6 address
+ @type address: string (in the standard IPv6 format)"""
+
+ __slots__ = ['address']
+
+ def __init__(self, rdclass, rdtype, address):
+ super(AAAA, self).__init__(rdclass, rdtype)
+ # check that it's OK
+ junk = dns.inet.inet_pton(dns.inet.AF_INET6, address)
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return self.address
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ address = tok.get_identifier()
+ tok.get_eol()
+ return cls(rdclass, rdtype, address)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.address))
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ address = dns.inet.inet_ntop(dns.inet.AF_INET6,
+ wire[current : current + rdlen])
+ return cls(rdclass, rdtype, address)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ sa = dns.inet.inet_pton(dns.inet.AF_INET6, self.address)
+ oa = dns.inet.inet_pton(dns.inet.AF_INET6, other.address)
+ return cmp(sa, oa)
diff --git a/lib/dnspython/dns/rdtypes/IN/APL.py b/lib/dnspython/dns/rdtypes/IN/APL.py
new file mode 100644
index 0000000000..7412c02d30
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/APL.py
@@ -0,0 +1,170 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import struct
+
+import dns.exception
+import dns.inet
+import dns.rdata
+import dns.tokenizer
+
+class APLItem(object):
+ """An APL list item.
+
+ @ivar family: the address family (IANA address family registry)
+ @type family: int
+ @ivar negation: is this item negated?
+ @type negation: bool
+ @ivar address: the address
+ @type address: string
+ @ivar prefix: the prefix length
+ @type prefix: int
+ """
+
+ __slots__ = ['family', 'negation', 'address', 'prefix']
+
+ def __init__(self, family, negation, address, prefix):
+ self.family = family
+ self.negation = negation
+ self.address = address
+ self.prefix = prefix
+
+ def __str__(self):
+ if self.negation:
+ return "!%d:%s/%s" % (self.family, self.address, self.prefix)
+ else:
+ return "%d:%s/%s" % (self.family, self.address, self.prefix)
+
+ def to_wire(self, file):
+ if self.family == 1:
+ address = dns.inet.inet_pton(dns.inet.AF_INET, self.address)
+ elif self.family == 2:
+ address = dns.inet.inet_pton(dns.inet.AF_INET6, self.address)
+ else:
+ address = self.address.decode('hex_codec')
+ #
+ # Truncate least significant zero bytes.
+ #
+ last = 0
+ for i in xrange(len(address) - 1, -1, -1):
+ if address[i] != chr(0):
+ last = i + 1
+ break
+ address = address[0 : last]
+ l = len(address)
+ assert l < 128
+ if self.negation:
+ l |= 0x80
+ header = struct.pack('!HBB', self.family, self.prefix, l)
+ file.write(header)
+ file.write(address)
+
+class APL(dns.rdata.Rdata):
+ """APL record.
+
+ @ivar items: a list of APL items
+ @type items: list of APL_Item
+ @see: RFC 3123"""
+
+ __slots__ = ['items']
+
+ def __init__(self, rdclass, rdtype, items):
+ super(APL, self).__init__(rdclass, rdtype)
+ self.items = items
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return ' '.join(map(lambda x: str(x), self.items))
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ items = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ item = token.value
+ if item[0] == '!':
+ negation = True
+ item = item[1:]
+ else:
+ negation = False
+ (family, rest) = item.split(':', 1)
+ family = int(family)
+ (address, prefix) = rest.split('/', 1)
+ prefix = int(prefix)
+ item = APLItem(family, negation, address, prefix)
+ items.append(item)
+
+ return cls(rdclass, rdtype, items)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ for item in self.items:
+ item.to_wire(file)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ items = []
+ while 1:
+ if rdlen < 4:
+ raise dns.exception.FormError
+ header = struct.unpack('!HBB', wire[current : current + 4])
+ afdlen = header[2]
+ if afdlen > 127:
+ negation = True
+ afdlen -= 128
+ else:
+ negation = False
+ current += 4
+ rdlen -= 4
+ if rdlen < afdlen:
+ raise dns.exception.FormError
+ address = wire[current : current + afdlen]
+ l = len(address)
+ if header[0] == 1:
+ if l < 4:
+ address += '\x00' * (4 - l)
+ address = dns.inet.inet_ntop(dns.inet.AF_INET, address)
+ elif header[0] == 2:
+ if l < 16:
+ address += '\x00' * (16 - l)
+ address = dns.inet.inet_ntop(dns.inet.AF_INET6, address)
+ else:
+ #
+ # This isn't really right according to the RFC, but it
+ # seems better than throwing an exception
+ #
+ address = address.encode('hex_codec')
+ current += afdlen
+ rdlen -= afdlen
+ item = APLItem(header[0], negation, address, header[1])
+ items.append(item)
+ if rdlen == 0:
+ break
+ return cls(rdclass, rdtype, items)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ f = cStringIO.StringIO()
+ self.to_wire(f)
+ wire1 = f.getvalue()
+ f.seek(0)
+ f.truncate()
+ other.to_wire(f)
+ wire2 = f.getvalue()
+ f.close()
+
+ return cmp(wire1, wire2)
diff --git a/lib/dnspython/dns/rdtypes/IN/DHCID.py b/lib/dnspython/dns/rdtypes/IN/DHCID.py
new file mode 100644
index 0000000000..2d35234bf0
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/DHCID.py
@@ -0,0 +1,60 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+
+class DHCID(dns.rdata.Rdata):
+ """DHCID record
+
+ @ivar data: the data (the content of the RR is opaque as far as the
+ DNS is concerned)
+ @type data: string
+ @see: RFC 4701"""
+
+ __slots__ = ['data']
+
+ def __init__(self, rdclass, rdtype, data):
+ super(DHCID, self).__init__(rdclass, rdtype)
+ self.data = data
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return dns.rdata._base64ify(self.data)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value)
+ b64 = ''.join(chunks)
+ data = b64.decode('base64_codec')
+ return cls(rdclass, rdtype, data)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ file.write(self.data)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ data = wire[current : current + rdlen]
+ return cls(rdclass, rdtype, data)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ return cmp(self.data, other.data)
diff --git a/lib/dnspython/dns/rdtypes/IN/IPSECKEY.py b/lib/dnspython/dns/rdtypes/IN/IPSECKEY.py
new file mode 100644
index 0000000000..9ab08d881c
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/IPSECKEY.py
@@ -0,0 +1,159 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import struct
+
+import dns.exception
+import dns.inet
+import dns.name
+
+class IPSECKEY(dns.rdata.Rdata):
+ """IPSECKEY record
+
+ @ivar precedence: the precedence for this key data
+ @type precedence: int
+ @ivar gateway_type: the gateway type
+ @type gateway_type: int
+ @ivar algorithm: the algorithm to use
+ @type algorithm: int
+ @ivar gateway: the public key
+ @type gateway: None, IPv4 address, IPV6 address, or domain name
+ @ivar key: the public key
+ @type key: string
+ @see: RFC 4025"""
+
+ __slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key']
+
+ def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm,
+ gateway, key):
+ super(IPSECKEY, self).__init__(rdclass, rdtype)
+ if gateway_type == 0:
+ if gateway != '.' and not gateway is None:
+ raise SyntaxError('invalid gateway for gateway type 0')
+ gateway = None
+ elif gateway_type == 1:
+ # check that it's OK
+ junk = dns.inet.inet_pton(dns.inet.AF_INET, gateway)
+ elif gateway_type == 2:
+ # check that it's OK
+ junk = dns.inet.inet_pton(dns.inet.AF_INET6, gateway)
+ elif gateway_type == 3:
+ pass
+ else:
+ raise SyntaxError('invalid IPSECKEY gateway type: %d' % gateway_type)
+ self.precedence = precedence
+ self.gateway_type = gateway_type
+ self.algorithm = algorithm
+ self.gateway = gateway
+ self.key = key
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.gateway_type == 0:
+ gateway = '.'
+ elif self.gateway_type == 1:
+ gateway = self.gateway
+ elif self.gateway_type == 2:
+ gateway = self.gateway
+ elif self.gateway_type == 3:
+ gateway = str(self.gateway.choose_relativity(origin, relativize))
+ else:
+ raise ValueError('invalid gateway type')
+ return '%d %d %d %s %s' % (self.precedence, self.gateway_type,
+ self.algorithm, gateway,
+ dns.rdata._base64ify(self.key))
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ precedence = tok.get_uint8()
+ gateway_type = tok.get_uint8()
+ algorithm = tok.get_uint8()
+ if gateway_type == 3:
+ gateway = tok.get_name().choose_relativity(origin, relativize)
+ else:
+ gateway = tok.get_string()
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value)
+ b64 = ''.join(chunks)
+ key = b64.decode('base64_codec')
+ return cls(rdclass, rdtype, precedence, gateway_type, algorithm,
+ gateway, key)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ header = struct.pack("!BBB", self.precedence, self.gateway_type,
+ self.algorithm)
+ file.write(header)
+ if self.gateway_type == 0:
+ pass
+ elif self.gateway_type == 1:
+ file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway))
+ elif self.gateway_type == 2:
+ file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway))
+ elif self.gateway_type == 3:
+ self.gateway.to_wire(file, None, origin)
+ else:
+ raise ValueError('invalid gateway type')
+ file.write(self.key)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ if rdlen < 3:
+ raise dns.exception.FormError
+ header = struct.unpack('!BBB', wire[current : current + 3])
+ gateway_type = header[1]
+ current += 3
+ rdlen -= 3
+ if gateway_type == 0:
+ gateway = None
+ elif gateway_type == 1:
+ gateway = dns.inet.inet_ntop(dns.inet.AF_INET,
+ wire[current : current + 4])
+ current += 4
+ rdlen -= 4
+ elif gateway_type == 2:
+ gateway = dns.inet.inet_ntop(dns.inet.AF_INET6,
+ wire[current : current + 16])
+ current += 16
+ rdlen -= 16
+ elif gateway_type == 3:
+ (gateway, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ current += cused
+ rdlen -= cused
+ else:
+ raise dns.exception.FormError('invalid IPSECKEY gateway type')
+ key = wire[current : current + rdlen]
+ return cls(rdclass, rdtype, header[0], gateway_type, header[2],
+ gateway, key)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ f = cStringIO.StringIO()
+ self.to_wire(f)
+ wire1 = f.getvalue()
+ f.seek(0)
+ f.truncate()
+ other.to_wire(f)
+ wire2 = f.getvalue()
+ f.close()
+
+ return cmp(wire1, wire2)
diff --git a/lib/dnspython/dns/rdtypes/IN/KX.py b/lib/dnspython/dns/rdtypes/IN/KX.py
new file mode 100644
index 0000000000..4d8a3a7d6b
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/KX.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+class KX(dns.rdtypes.mxbase.UncompressedMX):
+ """KX record"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/IN/NAPTR.py b/lib/dnspython/dns/rdtypes/IN/NAPTR.py
new file mode 100644
index 0000000000..a3cca55e1c
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/NAPTR.py
@@ -0,0 +1,132 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.name
+import dns.rdata
+
+def _write_string(file, s):
+ l = len(s)
+ assert l < 256
+ byte = chr(l)
+ file.write(byte)
+ file.write(s)
+
+class NAPTR(dns.rdata.Rdata):
+ """NAPTR record
+
+ @ivar order: order
+ @type order: int
+ @ivar preference: preference
+ @type preference: int
+ @ivar flags: flags
+ @type flags: string
+ @ivar service: service
+ @type service: string
+ @ivar regexp: regular expression
+ @type regexp: string
+ @ivar replacement: replacement name
+ @type replacement: dns.name.Name object
+ @see: RFC 3403"""
+
+ __slots__ = ['order', 'preference', 'flags', 'service', 'regexp',
+ 'replacement']
+
+ def __init__(self, rdclass, rdtype, order, preference, flags, service,
+ regexp, replacement):
+ super(NAPTR, self).__init__(rdclass, rdtype)
+ self.order = order
+ self.preference = preference
+ self.flags = flags
+ self.service = service
+ self.regexp = regexp
+ self.replacement = replacement
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ replacement = self.replacement.choose_relativity(origin, relativize)
+ return '%d %d "%s" "%s" "%s" %s' % \
+ (self.order, self.preference,
+ dns.rdata._escapify(self.flags),
+ dns.rdata._escapify(self.service),
+ dns.rdata._escapify(self.regexp),
+ self.replacement)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ order = tok.get_uint16()
+ preference = tok.get_uint16()
+ flags = tok.get_string()
+ service = tok.get_string()
+ regexp = tok.get_string()
+ replacement = tok.get_name()
+ replacement = replacement.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, order, preference, flags, service,
+ regexp, replacement)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ two_ints = struct.pack("!HH", self.order, self.preference)
+ file.write(two_ints)
+ _write_string(file, self.flags)
+ _write_string(file, self.service)
+ _write_string(file, self.regexp)
+ self.replacement.to_wire(file, compress, origin)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (order, preference) = struct.unpack('!HH', wire[current : current + 4])
+ current += 4
+ rdlen -= 4
+ strings = []
+ for i in xrange(3):
+ l = ord(wire[current])
+ current += 1
+ rdlen -= 1
+ if l > rdlen or rdlen < 0:
+ raise dns.exception.FormError
+ s = wire[current : current + l]
+ current += l
+ rdlen -= l
+ strings.append(s)
+ (replacement, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if not origin is None:
+ replacement = replacement.relativize(origin)
+ return cls(rdclass, rdtype, order, preference, strings[0], strings[1],
+ strings[2], replacement)
+
+ from_wire = classmethod(from_wire)
+
+ def choose_relativity(self, origin = None, relativize = True):
+ self.replacement = self.replacement.choose_relativity(origin,
+ relativize)
+
+ def _cmp(self, other):
+ sp = struct.pack("!HH", self.order, self.preference)
+ op = struct.pack("!HH", other.order, other.preference)
+ v = cmp(sp, op)
+ if v == 0:
+ v = cmp(self.flags, other.flags)
+ if v == 0:
+ v = cmp(self.service, other.service)
+ if v == 0:
+ v = cmp(self.regexp, other.regexp)
+ if v == 0:
+ v = cmp(self.replacement, other.replacement)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/IN/NSAP.py b/lib/dnspython/dns/rdtypes/IN/NSAP.py
new file mode 100644
index 0000000000..22b9131ccf
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/NSAP.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+class NSAP(dns.rdata.Rdata):
+ """NSAP record.
+
+ @ivar address: a NASP
+ @type address: string
+ @see: RFC 1706"""
+
+ __slots__ = ['address']
+
+ def __init__(self, rdclass, rdtype, address):
+ super(NSAP, self).__init__(rdclass, rdtype)
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return "0x%s" % self.address.encode('hex_codec')
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ address = tok.get_string()
+ t = tok.get_eol()
+ if address[0:2] != '0x':
+ raise dns.exception.SyntaxError('string does not start with 0x')
+ address = address[2:].replace('.', '')
+ if len(address) % 2 != 0:
+ raise dns.exception.SyntaxError('hexstring has odd length')
+ address = address.decode('hex_codec')
+ return cls(rdclass, rdtype, address)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ file.write(self.address)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ address = wire[current : current + rdlen]
+ return cls(rdclass, rdtype, address)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ return cmp(self.address, other.address)
diff --git a/lib/dnspython/dns/rdtypes/IN/NSAP_PTR.py b/lib/dnspython/dns/rdtypes/IN/NSAP_PTR.py
new file mode 100644
index 0000000000..6f591f4ec0
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/NSAP_PTR.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+class NSAP_PTR(dns.rdtypes.nsbase.UncompressedNS):
+ """NSAP-PTR record"""
+ pass
diff --git a/lib/dnspython/dns/rdtypes/IN/PX.py b/lib/dnspython/dns/rdtypes/IN/PX.py
new file mode 100644
index 0000000000..0f11290724
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/PX.py
@@ -0,0 +1,97 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+class PX(dns.rdata.Rdata):
+ """PX record.
+
+ @ivar preference: the preference value
+ @type preference: int
+ @ivar map822: the map822 name
+ @type map822: dns.name.Name object
+ @ivar mapx400: the mapx400 name
+ @type mapx400: dns.name.Name object
+ @see: RFC 2163"""
+
+ __slots__ = ['preference', 'map822', 'mapx400']
+
+ def __init__(self, rdclass, rdtype, preference, map822, mapx400):
+ super(PX, self).__init__(rdclass, rdtype)
+ self.preference = preference
+ self.map822 = map822
+ self.mapx400 = mapx400
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ map822 = self.map822.choose_relativity(origin, relativize)
+ mapx400 = self.mapx400.choose_relativity(origin, relativize)
+ return '%d %s %s' % (self.preference, map822, mapx400)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ preference = tok.get_uint16()
+ map822 = tok.get_name()
+ map822 = map822.choose_relativity(origin, relativize)
+ mapx400 = tok.get_name(None)
+ mapx400 = mapx400.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, preference, map822, mapx400)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ pref = struct.pack("!H", self.preference)
+ file.write(pref)
+ self.map822.to_wire(file, None, origin)
+ self.mapx400.to_wire(file, None, origin)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (preference, ) = struct.unpack('!H', wire[current : current + 2])
+ current += 2
+ rdlen -= 2
+ (map822, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused > rdlen:
+ raise dns.exception.FormError
+ current += cused
+ rdlen -= cused
+ if not origin is None:
+ map822 = map822.relativize(origin)
+ (mapx400, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if not origin is None:
+ mapx400 = mapx400.relativize(origin)
+ return cls(rdclass, rdtype, preference, map822, mapx400)
+
+ from_wire = classmethod(from_wire)
+
+ def choose_relativity(self, origin = None, relativize = True):
+ self.map822 = self.map822.choose_relativity(origin, relativize)
+ self.mapx400 = self.mapx400.choose_relativity(origin, relativize)
+
+ def _cmp(self, other):
+ sp = struct.pack("!H", self.preference)
+ op = struct.pack("!H", other.preference)
+ v = cmp(sp, op)
+ if v == 0:
+ v = cmp(self.map822, other.map822)
+ if v == 0:
+ v = cmp(self.mapx400, other.mapx400)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/IN/SRV.py b/lib/dnspython/dns/rdtypes/IN/SRV.py
new file mode 100644
index 0000000000..c9c5823381
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/SRV.py
@@ -0,0 +1,89 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+class SRV(dns.rdata.Rdata):
+ """SRV record
+
+ @ivar priority: the priority
+ @type priority: int
+ @ivar weight: the weight
+ @type weight: int
+ @ivar port: the port of the service
+ @type port: int
+ @ivar target: the target host
+ @type target: dns.name.Name object
+ @see: RFC 2782"""
+
+ __slots__ = ['priority', 'weight', 'port', 'target']
+
+ def __init__(self, rdclass, rdtype, priority, weight, port, target):
+ super(SRV, self).__init__(rdclass, rdtype)
+ self.priority = priority
+ self.weight = weight
+ self.port = port
+ self.target = target
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ target = self.target.choose_relativity(origin, relativize)
+ return '%d %d %d %s' % (self.priority, self.weight, self.port,
+ target)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ priority = tok.get_uint16()
+ weight = tok.get_uint16()
+ port = tok.get_uint16()
+ target = tok.get_name(None)
+ target = target.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, priority, weight, port, target)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ three_ints = struct.pack("!HHH", self.priority, self.weight, self.port)
+ file.write(three_ints)
+ self.target.to_wire(file, compress, origin)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (priority, weight, port) = struct.unpack('!HHH',
+ wire[current : current + 6])
+ current += 6
+ rdlen -= 6
+ (target, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if not origin is None:
+ target = target.relativize(origin)
+ return cls(rdclass, rdtype, priority, weight, port, target)
+
+ from_wire = classmethod(from_wire)
+
+ def choose_relativity(self, origin = None, relativize = True):
+ self.target = self.target.choose_relativity(origin, relativize)
+
+ def _cmp(self, other):
+ sp = struct.pack("!HHH", self.priority, self.weight, self.port)
+ op = struct.pack("!HHH", other.priority, other.weight, other.port)
+ v = cmp(sp, op)
+ if v == 0:
+ v = cmp(self.target, other.target)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/IN/WKS.py b/lib/dnspython/dns/rdtypes/IN/WKS.py
new file mode 100644
index 0000000000..85aafb3d23
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/WKS.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import socket
+import struct
+
+import dns.ipv4
+import dns.rdata
+
+_proto_tcp = socket.getprotobyname('tcp')
+_proto_udp = socket.getprotobyname('udp')
+
+class WKS(dns.rdata.Rdata):
+ """WKS record
+
+ @ivar address: the address
+ @type address: string
+ @ivar protocol: the protocol
+ @type protocol: int
+ @ivar bitmap: the bitmap
+ @type bitmap: string
+ @see: RFC 1035"""
+
+ __slots__ = ['address', 'protocol', 'bitmap']
+
+ def __init__(self, rdclass, rdtype, address, protocol, bitmap):
+ super(WKS, self).__init__(rdclass, rdtype)
+ self.address = address
+ self.protocol = protocol
+ self.bitmap = bitmap
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ bits = []
+ for i in xrange(0, len(self.bitmap)):
+ byte = ord(self.bitmap[i])
+ for j in xrange(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(str(i * 8 + j))
+ text = ' '.join(bits)
+ return '%s %d %s' % (self.address, self.protocol, text)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ address = tok.get_string()
+ protocol = tok.get_string()
+ if protocol.isdigit():
+ protocol = int(protocol)
+ else:
+ protocol = socket.getprotobyname(protocol)
+ bitmap = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ if token.value.isdigit():
+ serv = int(token.value)
+ else:
+ if protocol != _proto_udp and protocol != _proto_tcp:
+ raise NotImplementedError("protocol must be TCP or UDP")
+ if protocol == _proto_udp:
+ protocol_text = "udp"
+ else:
+ protocol_text = "tcp"
+ serv = socket.getservbyname(token.value, protocol_text)
+ i = serv // 8
+ l = len(bitmap)
+ if l < i + 1:
+ for j in xrange(l, i + 1):
+ bitmap.append('\x00')
+ bitmap[i] = chr(ord(bitmap[i]) | (0x80 >> (serv % 8)))
+ bitmap = dns.rdata._truncate_bitmap(bitmap)
+ return cls(rdclass, rdtype, address, protocol, bitmap)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ file.write(dns.ipv4.inet_aton(self.address))
+ protocol = struct.pack('!B', self.protocol)
+ file.write(protocol)
+ file.write(self.bitmap)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ address = dns.ipv4.inet_ntoa(wire[current : current + 4])
+ protocol, = struct.unpack('!B', wire[current + 4 : current + 5])
+ current += 5
+ rdlen -= 5
+ bitmap = wire[current : current + rdlen]
+ return cls(rdclass, rdtype, address, protocol, bitmap)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ sa = dns.ipv4.inet_aton(self.address)
+ oa = dns.ipv4.inet_aton(other.address)
+ v = cmp(sa, oa)
+ if v == 0:
+ sp = struct.pack('!B', self.protocol)
+ op = struct.pack('!B', other.protocol)
+ v = cmp(sp, op)
+ if v == 0:
+ v = cmp(self.bitmap, other.bitmap)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/IN/__init__.py b/lib/dnspython/dns/rdtypes/IN/__init__.py
new file mode 100644
index 0000000000..ab931296ec
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/IN/__init__.py
@@ -0,0 +1,30 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Class IN rdata type classes."""
+
+__all__ = [
+ 'A',
+ 'AAAA',
+ 'APL',
+ 'DHCID',
+ 'KX',
+ 'NAPTR',
+ 'NSAP',
+ 'NSAP_PTR',
+ 'PX',
+ 'SRV',
+ 'WKS',
+]
diff --git a/lib/dnspython/dns/rdtypes/__init__.py b/lib/dnspython/dns/rdtypes/__init__.py
new file mode 100644
index 0000000000..13282be73a
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/__init__.py
@@ -0,0 +1,25 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdata type classes"""
+
+__all__ = [
+ 'ANY',
+ 'IN',
+ 'mxbase',
+ 'nsbase',
+ 'sigbase',
+ 'keybase',
+]
diff --git a/lib/dnspython/dns/rdtypes/dsbase.py b/lib/dnspython/dns/rdtypes/dsbase.py
new file mode 100644
index 0000000000..aa46403a5f
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/dsbase.py
@@ -0,0 +1,92 @@
+# Copyright (C) 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.rdata
+import dns.rdatatype
+
+class DSBase(dns.rdata.Rdata):
+ """Base class for rdata that is like a DS record
+
+ @ivar key_tag: the key tag
+ @type key_tag: int
+ @ivar algorithm: the algorithm
+ @type algorithm: int
+ @ivar digest_type: the digest type
+ @type digest_type: int
+ @ivar digest: the digest
+ @type digest: int
+ @see: draft-ietf-dnsext-delegation-signer-14.txt"""
+
+ __slots__ = ['key_tag', 'algorithm', 'digest_type', 'digest']
+
+ def __init__(self, rdclass, rdtype, key_tag, algorithm, digest_type,
+ digest):
+ super(DSBase, self).__init__(rdclass, rdtype)
+ self.key_tag = key_tag
+ self.algorithm = algorithm
+ self.digest_type = digest_type
+ self.digest = digest
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d %d %s' % (self.key_tag, self.algorithm,
+ self.digest_type,
+ dns.rdata._hexify(self.digest,
+ chunksize=128))
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ key_tag = tok.get_uint16()
+ algorithm = tok.get_uint8()
+ digest_type = tok.get_uint8()
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value)
+ digest = ''.join(chunks)
+ digest = digest.decode('hex_codec')
+ return cls(rdclass, rdtype, key_tag, algorithm, digest_type,
+ digest)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ header = struct.pack("!HBB", self.key_tag, self.algorithm,
+ self.digest_type)
+ file.write(header)
+ file.write(self.digest)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ header = struct.unpack("!HBB", wire[current : current + 4])
+ current += 4
+ rdlen -= 4
+ digest = wire[current : current + rdlen]
+ return cls(rdclass, rdtype, header[0], header[1], header[2], digest)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ hs = struct.pack("!HBB", self.key_tag, self.algorithm,
+ self.digest_type)
+ ho = struct.pack("!HBB", other.key_tag, other.algorithm,
+ other.digest_type)
+ v = cmp(hs, ho)
+ if v == 0:
+ v = cmp(self.digest, other.digest)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/keybase.py b/lib/dnspython/dns/rdtypes/keybase.py
new file mode 100644
index 0000000000..75c9272670
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/keybase.py
@@ -0,0 +1,149 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.dnssec
+import dns.rdata
+
+_flags_from_text = {
+ 'NOCONF': (0x4000, 0xC000),
+ 'NOAUTH': (0x8000, 0xC000),
+ 'NOKEY': (0xC000, 0xC000),
+ 'FLAG2': (0x2000, 0x2000),
+ 'EXTEND': (0x1000, 0x1000),
+ 'FLAG4': (0x0800, 0x0800),
+ 'FLAG5': (0x0400, 0x0400),
+ 'USER': (0x0000, 0x0300),
+ 'ZONE': (0x0100, 0x0300),
+ 'HOST': (0x0200, 0x0300),
+ 'NTYP3': (0x0300, 0x0300),
+ 'FLAG8': (0x0080, 0x0080),
+ 'FLAG9': (0x0040, 0x0040),
+ 'FLAG10': (0x0020, 0x0020),
+ 'FLAG11': (0x0010, 0x0010),
+ 'SIG0': (0x0000, 0x000f),
+ 'SIG1': (0x0001, 0x000f),
+ 'SIG2': (0x0002, 0x000f),
+ 'SIG3': (0x0003, 0x000f),
+ 'SIG4': (0x0004, 0x000f),
+ 'SIG5': (0x0005, 0x000f),
+ 'SIG6': (0x0006, 0x000f),
+ 'SIG7': (0x0007, 0x000f),
+ 'SIG8': (0x0008, 0x000f),
+ 'SIG9': (0x0009, 0x000f),
+ 'SIG10': (0x000a, 0x000f),
+ 'SIG11': (0x000b, 0x000f),
+ 'SIG12': (0x000c, 0x000f),
+ 'SIG13': (0x000d, 0x000f),
+ 'SIG14': (0x000e, 0x000f),
+ 'SIG15': (0x000f, 0x000f),
+ }
+
+_protocol_from_text = {
+ 'NONE' : 0,
+ 'TLS' : 1,
+ 'EMAIL' : 2,
+ 'DNSSEC' : 3,
+ 'IPSEC' : 4,
+ 'ALL' : 255,
+ }
+
+class KEYBase(dns.rdata.Rdata):
+ """KEY-like record base
+
+ @ivar flags: the key flags
+ @type flags: int
+ @ivar protocol: the protocol for which this key may be used
+ @type protocol: int
+ @ivar algorithm: the algorithm used for the key
+ @type algorithm: int
+ @ivar key: the public key
+ @type key: string"""
+
+ __slots__ = ['flags', 'protocol', 'algorithm', 'key']
+
+ def __init__(self, rdclass, rdtype, flags, protocol, algorithm, key):
+ super(KEYBase, self).__init__(rdclass, rdtype)
+ self.flags = flags
+ self.protocol = protocol
+ self.algorithm = algorithm
+ self.key = key
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d %d %s' % (self.flags, self.protocol, self.algorithm,
+ dns.rdata._base64ify(self.key))
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ flags = tok.get_string()
+ if flags.isdigit():
+ flags = int(flags)
+ else:
+ flag_names = flags.split('|')
+ flags = 0
+ for flag in flag_names:
+ v = _flags_from_text.get(flag)
+ if v is None:
+ raise dns.exception.SyntaxError('unknown flag %s' % flag)
+ flags &= ~v[1]
+ flags |= v[0]
+ protocol = tok.get_string()
+ if protocol.isdigit():
+ protocol = int(protocol)
+ else:
+ protocol = _protocol_from_text.get(protocol)
+ if protocol is None:
+ raise dns.exception.SyntaxError('unknown protocol %s' % protocol)
+
+ algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value)
+ b64 = ''.join(chunks)
+ key = b64.decode('base64_codec')
+ return cls(rdclass, rdtype, flags, protocol, algorithm, key)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ header = struct.pack("!HBB", self.flags, self.protocol, self.algorithm)
+ file.write(header)
+ file.write(self.key)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ if rdlen < 4:
+ raise dns.exception.FormError
+ header = struct.unpack('!HBB', wire[current : current + 4])
+ current += 4
+ rdlen -= 4
+ key = wire[current : current + rdlen]
+ return cls(rdclass, rdtype, header[0], header[1], header[2],
+ key)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ hs = struct.pack("!HBB", self.flags, self.protocol, self.algorithm)
+ ho = struct.pack("!HBB", other.flags, other.protocol, other.algorithm)
+ v = cmp(hs, ho)
+ if v == 0:
+ v = cmp(self.key, other.key)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/mxbase.py b/lib/dnspython/dns/rdtypes/mxbase.py
new file mode 100644
index 0000000000..5e3515bec4
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/mxbase.py
@@ -0,0 +1,105 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""MX-like base classes."""
+
+import cStringIO
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+class MXBase(dns.rdata.Rdata):
+ """Base class for rdata that is like an MX record.
+
+ @ivar preference: the preference value
+ @type preference: int
+ @ivar exchange: the exchange name
+ @type exchange: dns.name.Name object"""
+
+ __slots__ = ['preference', 'exchange']
+
+ def __init__(self, rdclass, rdtype, preference, exchange):
+ super(MXBase, self).__init__(rdclass, rdtype)
+ self.preference = preference
+ self.exchange = exchange
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ exchange = self.exchange.choose_relativity(origin, relativize)
+ return '%d %s' % (self.preference, exchange)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ preference = tok.get_uint16()
+ exchange = tok.get_name()
+ exchange = exchange.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, preference, exchange)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ pref = struct.pack("!H", self.preference)
+ file.write(pref)
+ self.exchange.to_wire(file, compress, origin)
+
+ def to_digestable(self, origin = None):
+ return struct.pack("!H", self.preference) + \
+ self.exchange.to_digestable(origin)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (preference, ) = struct.unpack('!H', wire[current : current + 2])
+ current += 2
+ rdlen -= 2
+ (exchange, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if not origin is None:
+ exchange = exchange.relativize(origin)
+ return cls(rdclass, rdtype, preference, exchange)
+
+ from_wire = classmethod(from_wire)
+
+ def choose_relativity(self, origin = None, relativize = True):
+ self.exchange = self.exchange.choose_relativity(origin, relativize)
+
+ def _cmp(self, other):
+ sp = struct.pack("!H", self.preference)
+ op = struct.pack("!H", other.preference)
+ v = cmp(sp, op)
+ if v == 0:
+ v = cmp(self.exchange, other.exchange)
+ return v
+
+class UncompressedMX(MXBase):
+ """Base class for rdata that is like an MX record, but whose name
+ is not compressed when converted to DNS wire format, and whose
+ digestable form is not downcased."""
+
+ def to_wire(self, file, compress = None, origin = None):
+ super(UncompressedMX, self).to_wire(file, None, origin)
+
+ def to_digestable(self, origin = None):
+ f = cStringIO.StringIO()
+ self.to_wire(f, None, origin)
+ return f.getvalue()
+
+class UncompressedDowncasingMX(MXBase):
+ """Base class for rdata that is like an MX record, but whose name
+ is not compressed when convert to DNS wire format."""
+
+ def to_wire(self, file, compress = None, origin = None):
+ super(UncompressedDowncasingMX, self).to_wire(file, None, origin)
diff --git a/lib/dnspython/dns/rdtypes/nsbase.py b/lib/dnspython/dns/rdtypes/nsbase.py
new file mode 100644
index 0000000000..7cdb2a0289
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/nsbase.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""NS-like base classes."""
+
+import cStringIO
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+class NSBase(dns.rdata.Rdata):
+ """Base class for rdata that is like an NS record.
+
+ @ivar target: the target name of the rdata
+ @type target: dns.name.Name object"""
+
+ __slots__ = ['target']
+
+ def __init__(self, rdclass, rdtype, target):
+ super(NSBase, self).__init__(rdclass, rdtype)
+ self.target = target
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ target = self.target.choose_relativity(origin, relativize)
+ return str(target)
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ target = tok.get_name()
+ target = target.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, target)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ self.target.to_wire(file, compress, origin)
+
+ def to_digestable(self, origin = None):
+ return self.target.to_digestable(origin)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ (target, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if not origin is None:
+ target = target.relativize(origin)
+ return cls(rdclass, rdtype, target)
+
+ from_wire = classmethod(from_wire)
+
+ def choose_relativity(self, origin = None, relativize = True):
+ self.target = self.target.choose_relativity(origin, relativize)
+
+ def _cmp(self, other):
+ return cmp(self.target, other.target)
+
+class UncompressedNS(NSBase):
+ """Base class for rdata that is like an NS record, but whose name
+ is not compressed when convert to DNS wire format, and whose
+ digestable form is not downcased."""
+
+ def to_wire(self, file, compress = None, origin = None):
+ super(UncompressedNS, self).to_wire(file, None, origin)
+
+ def to_digestable(self, origin = None):
+ f = cStringIO.StringIO()
+ self.to_wire(f, None, origin)
+ return f.getvalue()
diff --git a/lib/dnspython/dns/rdtypes/sigbase.py b/lib/dnspython/dns/rdtypes/sigbase.py
new file mode 100644
index 0000000000..ccb6dd69ae
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/sigbase.py
@@ -0,0 +1,168 @@
+# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import calendar
+import struct
+import time
+
+import dns.dnssec
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+
+class BadSigTime(dns.exception.DNSException):
+ """Raised when a SIG or RRSIG RR's time cannot be parsed."""
+ pass
+
+def sigtime_to_posixtime(what):
+ if len(what) != 14:
+ raise BadSigTime
+ year = int(what[0:4])
+ month = int(what[4:6])
+ day = int(what[6:8])
+ hour = int(what[8:10])
+ minute = int(what[10:12])
+ second = int(what[12:14])
+ return calendar.timegm((year, month, day, hour, minute, second,
+ 0, 0, 0))
+
+def posixtime_to_sigtime(what):
+ return time.strftime('%Y%m%d%H%M%S', time.gmtime(what))
+
+class SIGBase(dns.rdata.Rdata):
+ """SIG-like record base
+
+ @ivar type_covered: the rdata type this signature covers
+ @type type_covered: int
+ @ivar algorithm: the algorithm used for the sig
+ @type algorithm: int
+ @ivar labels: number of labels
+ @type labels: int
+ @ivar original_ttl: the original TTL
+ @type original_ttl: long
+ @ivar expiration: signature expiration time
+ @type expiration: long
+ @ivar inception: signature inception time
+ @type inception: long
+ @ivar key_tag: the key tag
+ @type key_tag: int
+ @ivar signer: the signer
+ @type signer: dns.name.Name object
+ @ivar signature: the signature
+ @type signature: string"""
+
+ __slots__ = ['type_covered', 'algorithm', 'labels', 'original_ttl',
+ 'expiration', 'inception', 'key_tag', 'signer',
+ 'signature']
+
+ def __init__(self, rdclass, rdtype, type_covered, algorithm, labels,
+ original_ttl, expiration, inception, key_tag, signer,
+ signature):
+ super(SIGBase, self).__init__(rdclass, rdtype)
+ self.type_covered = type_covered
+ self.algorithm = algorithm
+ self.labels = labels
+ self.original_ttl = original_ttl
+ self.expiration = expiration
+ self.inception = inception
+ self.key_tag = key_tag
+ self.signer = signer
+ self.signature = signature
+
+ def covers(self):
+ return self.type_covered
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%s %d %d %d %s %s %d %s %s' % (
+ dns.rdatatype.to_text(self.type_covered),
+ self.algorithm,
+ self.labels,
+ self.original_ttl,
+ posixtime_to_sigtime(self.expiration),
+ posixtime_to_sigtime(self.inception),
+ self.key_tag,
+ self.signer,
+ dns.rdata._base64ify(self.signature)
+ )
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ type_covered = dns.rdatatype.from_text(tok.get_string())
+ algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
+ labels = tok.get_int()
+ original_ttl = tok.get_ttl()
+ expiration = sigtime_to_posixtime(tok.get_string())
+ inception = sigtime_to_posixtime(tok.get_string())
+ key_tag = tok.get_int()
+ signer = tok.get_name()
+ signer = signer.choose_relativity(origin, relativize)
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value)
+ b64 = ''.join(chunks)
+ signature = b64.decode('base64_codec')
+ return cls(rdclass, rdtype, type_covered, algorithm, labels,
+ original_ttl, expiration, inception, key_tag, signer,
+ signature)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ header = struct.pack('!HBBIIIH', self.type_covered,
+ self.algorithm, self.labels,
+ self.original_ttl, self.expiration,
+ self.inception, self.key_tag)
+ file.write(header)
+ self.signer.to_wire(file, None, origin)
+ file.write(self.signature)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ header = struct.unpack('!HBBIIIH', wire[current : current + 18])
+ current += 18
+ rdlen -= 18
+ (signer, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ if not origin is None:
+ signer = signer.relativize(origin)
+ signature = wire[current : current + rdlen]
+ return cls(rdclass, rdtype, header[0], header[1], header[2],
+ header[3], header[4], header[5], header[6], signer,
+ signature)
+
+ from_wire = classmethod(from_wire)
+
+ def choose_relativity(self, origin = None, relativize = True):
+ self.signer = self.signer.choose_relativity(origin, relativize)
+
+ def _cmp(self, other):
+ hs = struct.pack('!HBBIIIH', self.type_covered,
+ self.algorithm, self.labels,
+ self.original_ttl, self.expiration,
+ self.inception, self.key_tag)
+ ho = struct.pack('!HBBIIIH', other.type_covered,
+ other.algorithm, other.labels,
+ other.original_ttl, other.expiration,
+ other.inception, other.key_tag)
+ v = cmp(hs, ho)
+ if v == 0:
+ v = cmp(self.signer, other.signer)
+ if v == 0:
+ v = cmp(self.signature, other.signature)
+ return v
diff --git a/lib/dnspython/dns/rdtypes/txtbase.py b/lib/dnspython/dns/rdtypes/txtbase.py
new file mode 100644
index 0000000000..43db2a48c0
--- /dev/null
+++ b/lib/dnspython/dns/rdtypes/txtbase.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""TXT-like base class."""
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+class TXTBase(dns.rdata.Rdata):
+ """Base class for rdata that is like a TXT record
+
+ @ivar strings: the text strings
+ @type strings: list of string
+ @see: RFC 1035"""
+
+ __slots__ = ['strings']
+
+ def __init__(self, rdclass, rdtype, strings):
+ super(TXTBase, self).__init__(rdclass, rdtype)
+ if isinstance(strings, str):
+ strings = [ strings ]
+ self.strings = strings[:]
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ txt = ''
+ prefix = ''
+ for s in self.strings:
+ txt += '%s"%s"' % (prefix, dns.rdata._escapify(s))
+ prefix = ' '
+ return txt
+
+ def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
+ strings = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ if not (token.is_quoted_string() or token.is_identifier()):
+ raise dns.exception.SyntaxError("expected a string")
+ if len(token.value) > 255:
+ raise dns.exception.SyntaxError("string too long")
+ strings.append(token.value)
+ if len(strings) == 0:
+ raise dns.exception.UnexpectedEnd
+ return cls(rdclass, rdtype, strings)
+
+ from_text = classmethod(from_text)
+
+ def to_wire(self, file, compress = None, origin = None):
+ for s in self.strings:
+ l = len(s)
+ assert l < 256
+ byte = chr(l)
+ file.write(byte)
+ file.write(s)
+
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
+ strings = []
+ while rdlen > 0:
+ l = ord(wire[current])
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ s = wire[current : current + l]
+ current += l
+ rdlen -= l
+ strings.append(s)
+ return cls(rdclass, rdtype, strings)
+
+ from_wire = classmethod(from_wire)
+
+ def _cmp(self, other):
+ return cmp(self.strings, other.strings)
diff --git a/lib/dnspython/dns/renderer.py b/lib/dnspython/dns/renderer.py
new file mode 100644
index 0000000000..bb0218ac30
--- /dev/null
+++ b/lib/dnspython/dns/renderer.py
@@ -0,0 +1,324 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Help for building DNS wire format messages"""
+
+import cStringIO
+import struct
+import random
+import time
+
+import dns.exception
+import dns.tsig
+
+QUESTION = 0
+ANSWER = 1
+AUTHORITY = 2
+ADDITIONAL = 3
+
+class Renderer(object):
+ """Helper class for building DNS wire-format messages.
+
+ Most applications can use the higher-level L{dns.message.Message}
+ class and its to_wire() method to generate wire-format messages.
+ This class is for those applications which need finer control
+ over the generation of messages.
+
+ Typical use::
+
+ r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512)
+ r.add_question(qname, qtype, qclass)
+ r.add_rrset(dns.renderer.ANSWER, rrset_1)
+ r.add_rrset(dns.renderer.ANSWER, rrset_2)
+ r.add_rrset(dns.renderer.AUTHORITY, ns_rrset)
+ r.add_edns(0, 0, 4096)
+ r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_1)
+ r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_2)
+ r.write_header()
+ r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac)
+ wire = r.get_wire()
+
+ @ivar output: where rendering is written
+ @type output: cStringIO.StringIO object
+ @ivar id: the message id
+ @type id: int
+ @ivar flags: the message flags
+ @type flags: int
+ @ivar max_size: the maximum size of the message
+ @type max_size: int
+ @ivar origin: the origin to use when rendering relative names
+ @type origin: dns.name.Name object
+ @ivar compress: the compression table
+ @type compress: dict
+ @ivar section: the section currently being rendered
+ @type section: int (dns.renderer.QUESTION, dns.renderer.ANSWER,
+ dns.renderer.AUTHORITY, or dns.renderer.ADDITIONAL)
+ @ivar counts: list of the number of RRs in each section
+ @type counts: int list of length 4
+ @ivar mac: the MAC of the rendered message (if TSIG was used)
+ @type mac: string
+ """
+
+ def __init__(self, id=None, flags=0, max_size=65535, origin=None):
+ """Initialize a new renderer.
+
+ @param id: the message id
+ @type id: int
+ @param flags: the DNS message flags
+ @type flags: int
+ @param max_size: the maximum message size; the default is 65535.
+ If rendering results in a message greater than I{max_size},
+ then L{dns.exception.TooBig} will be raised.
+ @type max_size: int
+ @param origin: the origin to use when rendering relative names
+ @type origin: dns.name.Namem or None.
+ """
+
+ self.output = cStringIO.StringIO()
+ if id is None:
+ self.id = random.randint(0, 65535)
+ else:
+ self.id = id
+ self.flags = flags
+ self.max_size = max_size
+ self.origin = origin
+ self.compress = {}
+ self.section = QUESTION
+ self.counts = [0, 0, 0, 0]
+ self.output.write('\x00' * 12)
+ self.mac = ''
+
+ def _rollback(self, where):
+ """Truncate the output buffer at offset I{where}, and remove any
+ compression table entries that pointed beyond the truncation
+ point.
+
+ @param where: the offset
+ @type where: int
+ """
+
+ self.output.seek(where)
+ self.output.truncate()
+ keys_to_delete = []
+ for k, v in self.compress.iteritems():
+ if v >= where:
+ keys_to_delete.append(k)
+ for k in keys_to_delete:
+ del self.compress[k]
+
+ def _set_section(self, section):
+ """Set the renderer's current section.
+
+ Sections must be rendered order: QUESTION, ANSWER, AUTHORITY,
+ ADDITIONAL. Sections may be empty.
+
+ @param section: the section
+ @type section: int
+ @raises dns.exception.FormError: an attempt was made to set
+ a section value less than the current section.
+ """
+
+ if self.section != section:
+ if self.section > section:
+ raise dns.exception.FormError
+ self.section = section
+
+ def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN):
+ """Add a question to the message.
+
+ @param qname: the question name
+ @type qname: dns.name.Name
+ @param rdtype: the question rdata type
+ @type rdtype: int
+ @param rdclass: the question rdata class
+ @type rdclass: int
+ """
+
+ self._set_section(QUESTION)
+ before = self.output.tell()
+ qname.to_wire(self.output, self.compress, self.origin)
+ self.output.write(struct.pack("!HH", rdtype, rdclass))
+ after = self.output.tell()
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.counts[QUESTION] += 1
+
+ def add_rrset(self, section, rrset, **kw):
+ """Add the rrset to the specified section.
+
+ Any keyword arguments are passed on to the rdataset's to_wire()
+ routine.
+
+ @param section: the section
+ @type section: int
+ @param rrset: the rrset
+ @type rrset: dns.rrset.RRset object
+ """
+
+ self._set_section(section)
+ before = self.output.tell()
+ n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
+ after = self.output.tell()
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.counts[section] += n
+
+ def add_rdataset(self, section, name, rdataset, **kw):
+ """Add the rdataset to the specified section, using the specified
+ name as the owner name.
+
+ Any keyword arguments are passed on to the rdataset's to_wire()
+ routine.
+
+ @param section: the section
+ @type section: int
+ @param name: the owner name
+ @type name: dns.name.Name object
+ @param rdataset: the rdataset
+ @type rdataset: dns.rdataset.Rdataset object
+ """
+
+ self._set_section(section)
+ before = self.output.tell()
+ n = rdataset.to_wire(name, self.output, self.compress, self.origin,
+ **kw)
+ after = self.output.tell()
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.counts[section] += n
+
+ def add_edns(self, edns, ednsflags, payload, options=None):
+ """Add an EDNS OPT record to the message.
+
+ @param edns: The EDNS level to use.
+ @type edns: int
+ @param ednsflags: EDNS flag values.
+ @type ednsflags: int
+ @param payload: The EDNS sender's payload field, which is the maximum
+ size of UDP datagram the sender can handle.
+ @type payload: int
+ @param options: The EDNS options list
+ @type options: list of dns.edns.Option instances
+ @see: RFC 2671
+ """
+
+ # make sure the EDNS version in ednsflags agrees with edns
+ ednsflags &= 0xFF00FFFFL
+ ednsflags |= (edns << 16)
+ self._set_section(ADDITIONAL)
+ before = self.output.tell()
+ self.output.write(struct.pack('!BHHIH', 0, dns.rdatatype.OPT, payload,
+ ednsflags, 0))
+ if not options is None:
+ lstart = self.output.tell()
+ for opt in options:
+ stuff = struct.pack("!HH", opt.otype, 0)
+ self.output.write(stuff)
+ start = self.output.tell()
+ opt.to_wire(self.output)
+ end = self.output.tell()
+ assert end - start < 65536
+ self.output.seek(start - 2)
+ stuff = struct.pack("!H", end - start)
+ self.output.write(stuff)
+ self.output.seek(0, 2)
+ lend = self.output.tell()
+ assert lend - lstart < 65536
+ self.output.seek(lstart - 2)
+ stuff = struct.pack("!H", lend - lstart)
+ self.output.write(stuff)
+ self.output.seek(0, 2)
+ after = self.output.tell()
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.counts[ADDITIONAL] += 1
+
+ def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data,
+ request_mac, algorithm=dns.tsig.default_algorithm):
+ """Add a TSIG signature to the message.
+
+ @param keyname: the TSIG key name
+ @type keyname: dns.name.Name object
+ @param secret: the secret to use
+ @type secret: string
+ @param fudge: TSIG time fudge
+ @type fudge: int
+ @param id: the message id to encode in the tsig signature
+ @type id: int
+ @param tsig_error: TSIG error code; default is 0.
+ @type tsig_error: int
+ @param other_data: TSIG other data.
+ @type other_data: string
+ @param request_mac: This message is a response to the request which
+ had the specified MAC.
+ @param algorithm: the TSIG algorithm to use
+ @type request_mac: string
+ """
+
+ self._set_section(ADDITIONAL)
+ before = self.output.tell()
+ s = self.output.getvalue()
+ (tsig_rdata, self.mac, ctx) = dns.tsig.sign(s,
+ keyname,
+ secret,
+ int(time.time()),
+ fudge,
+ id,
+ tsig_error,
+ other_data,
+ request_mac,
+ algorithm=algorithm)
+ keyname.to_wire(self.output, self.compress, self.origin)
+ self.output.write(struct.pack('!HHIH', dns.rdatatype.TSIG,
+ dns.rdataclass.ANY, 0, 0))
+ rdata_start = self.output.tell()
+ self.output.write(tsig_rdata)
+ after = self.output.tell()
+ assert after - rdata_start < 65536
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.output.seek(rdata_start - 2)
+ self.output.write(struct.pack('!H', after - rdata_start))
+ self.counts[ADDITIONAL] += 1
+ self.output.seek(10)
+ self.output.write(struct.pack('!H', self.counts[ADDITIONAL]))
+ self.output.seek(0, 2)
+
+ def write_header(self):
+ """Write the DNS message header.
+
+ Writing the DNS message header is done asfter all sections
+ have been rendered, but before the optional TSIG signature
+ is added.
+ """
+
+ self.output.seek(0)
+ self.output.write(struct.pack('!HHHHHH', self.id, self.flags,
+ self.counts[0], self.counts[1],
+ self.counts[2], self.counts[3]))
+ self.output.seek(0, 2)
+
+ def get_wire(self):
+ """Return the wire format message.
+
+ @rtype: string
+ """
+
+ return self.output.getvalue()
diff --git a/lib/dnspython/dns/resolver.py b/lib/dnspython/dns/resolver.py
new file mode 100644
index 0000000000..30977f3a8b
--- /dev/null
+++ b/lib/dnspython/dns/resolver.py
@@ -0,0 +1,773 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS stub resolver.
+
+@var default_resolver: The default resolver object
+@type default_resolver: dns.resolver.Resolver object"""
+
+import socket
+import sys
+import time
+
+import dns.exception
+import dns.message
+import dns.name
+import dns.query
+import dns.rcode
+import dns.rdataclass
+import dns.rdatatype
+
+if sys.platform == 'win32':
+ import _winreg
+
+class NXDOMAIN(dns.exception.DNSException):
+ """The query name does not exist."""
+ pass
+
+# The definition of the Timeout exception has moved from here to the
+# dns.exception module. We keep dns.resolver.Timeout defined for
+# backwards compatibility.
+
+Timeout = dns.exception.Timeout
+
+class NoAnswer(dns.exception.DNSException):
+ """The response did not contain an answer to the question."""
+ pass
+
+class NoNameservers(dns.exception.DNSException):
+ """No non-broken nameservers are available to answer the query."""
+ pass
+
+class NotAbsolute(dns.exception.DNSException):
+ """Raised if an absolute domain name is required but a relative name
+ was provided."""
+ pass
+
+class NoRootSOA(dns.exception.DNSException):
+ """Raised if for some reason there is no SOA at the root name.
+ This should never happen!"""
+ pass
+
+class NoMetaqueries(dns.exception.DNSException):
+ """Metaqueries are not allowed."""
+ pass
+
+
+class Answer(object):
+ """DNS stub resolver answer
+
+ Instances of this class bundle up the result of a successful DNS
+ resolution.
+
+ For convenience, the answer object implements much of the sequence
+ protocol, forwarding to its rrset. E.g. "for a in answer" is
+ equivalent to "for a in answer.rrset", "answer[i]" is equivalent
+ to "answer.rrset[i]", and "answer[i:j]" is equivalent to
+ "answer.rrset[i:j]".
+
+ Note that CNAMEs or DNAMEs in the response may mean that answer
+ node's name might not be the query name.
+
+ @ivar qname: The query name
+ @type qname: dns.name.Name object
+ @ivar rdtype: The query type
+ @type rdtype: int
+ @ivar rdclass: The query class
+ @type rdclass: int
+ @ivar response: The response message
+ @type response: dns.message.Message object
+ @ivar rrset: The answer
+ @type rrset: dns.rrset.RRset object
+ @ivar expiration: The time when the answer expires
+ @type expiration: float (seconds since the epoch)
+ """
+ def __init__(self, qname, rdtype, rdclass, response):
+ self.qname = qname
+ self.rdtype = rdtype
+ self.rdclass = rdclass
+ self.response = response
+ min_ttl = -1
+ rrset = None
+ for count in xrange(0, 15):
+ try:
+ rrset = response.find_rrset(response.answer, qname,
+ rdclass, rdtype)
+ if min_ttl == -1 or rrset.ttl < min_ttl:
+ min_ttl = rrset.ttl
+ break
+ except KeyError:
+ if rdtype != dns.rdatatype.CNAME:
+ try:
+ crrset = response.find_rrset(response.answer,
+ qname,
+ rdclass,
+ dns.rdatatype.CNAME)
+ if min_ttl == -1 or crrset.ttl < min_ttl:
+ min_ttl = crrset.ttl
+ for rd in crrset:
+ qname = rd.target
+ break
+ continue
+ except KeyError:
+ raise NoAnswer
+ raise NoAnswer
+ if rrset is None:
+ raise NoAnswer
+ self.rrset = rrset
+ self.expiration = time.time() + min_ttl
+
+ def __getattr__(self, attr):
+ if attr == 'name':
+ return self.rrset.name
+ elif attr == 'ttl':
+ return self.rrset.ttl
+ elif attr == 'covers':
+ return self.rrset.covers
+ elif attr == 'rdclass':
+ return self.rrset.rdclass
+ elif attr == 'rdtype':
+ return self.rrset.rdtype
+ else:
+ raise AttributeError(attr)
+
+ def __len__(self):
+ return len(self.rrset)
+
+ def __iter__(self):
+ return iter(self.rrset)
+
+ def __getitem__(self, i):
+ return self.rrset[i]
+
+ def __delitem__(self, i):
+ del self.rrset[i]
+
+ def __getslice__(self, i, j):
+ return self.rrset[i:j]
+
+ def __delslice__(self, i, j):
+ del self.rrset[i:j]
+
+class Cache(object):
+ """Simple DNS answer cache.
+
+ @ivar data: A dictionary of cached data
+ @type data: dict
+ @ivar cleaning_interval: The number of seconds between cleanings. The
+ default is 300 (5 minutes).
+ @type cleaning_interval: float
+ @ivar next_cleaning: The time the cache should next be cleaned (in seconds
+ since the epoch.)
+ @type next_cleaning: float
+ """
+
+ def __init__(self, cleaning_interval=300.0):
+ """Initialize a DNS cache.
+
+ @param cleaning_interval: the number of seconds between periodic
+ cleanings. The default is 300.0
+ @type cleaning_interval: float.
+ """
+
+ self.data = {}
+ self.cleaning_interval = cleaning_interval
+ self.next_cleaning = time.time() + self.cleaning_interval
+
+ def maybe_clean(self):
+ """Clean the cache if it's time to do so."""
+
+ now = time.time()
+ if self.next_cleaning <= now:
+ keys_to_delete = []
+ for (k, v) in self.data.iteritems():
+ if v.expiration <= now:
+ keys_to_delete.append(k)
+ for k in keys_to_delete:
+ del self.data[k]
+ now = time.time()
+ self.next_cleaning = now + self.cleaning_interval
+
+ def get(self, key):
+ """Get the answer associated with I{key}. Returns None if
+ no answer is cached for the key.
+ @param key: the key
+ @type key: (dns.name.Name, int, int) tuple whose values are the
+ query name, rdtype, and rdclass.
+ @rtype: dns.resolver.Answer object or None
+ """
+
+ self.maybe_clean()
+ v = self.data.get(key)
+ if v is None or v.expiration <= time.time():
+ return None
+ return v
+
+ def put(self, key, value):
+ """Associate key and value in the cache.
+ @param key: the key
+ @type key: (dns.name.Name, int, int) tuple whose values are the
+ query name, rdtype, and rdclass.
+ @param value: The answer being cached
+ @type value: dns.resolver.Answer object
+ """
+
+ self.maybe_clean()
+ self.data[key] = value
+
+ def flush(self, key=None):
+ """Flush the cache.
+
+ If I{key} is specified, only that item is flushed. Otherwise
+ the entire cache is flushed.
+
+ @param key: the key to flush
+ @type key: (dns.name.Name, int, int) tuple or None
+ """
+
+ if not key is None:
+ if self.data.has_key(key):
+ del self.data[key]
+ else:
+ self.data = {}
+ self.next_cleaning = time.time() + self.cleaning_interval
+
+class Resolver(object):
+ """DNS stub resolver
+
+ @ivar domain: The domain of this host
+ @type domain: dns.name.Name object
+ @ivar nameservers: A list of nameservers to query. Each nameserver is
+ a string which contains the IP address of a nameserver.
+ @type nameservers: list of strings
+ @ivar search: The search list. If the query name is a relative name,
+ the resolver will construct an absolute query name by appending the search
+ names one by one to the query name.
+ @type search: list of dns.name.Name objects
+ @ivar port: The port to which to send queries. The default is 53.
+ @type port: int
+ @ivar timeout: The number of seconds to wait for a response from a
+ server, before timing out.
+ @type timeout: float
+ @ivar lifetime: The total number of seconds to spend trying to get an
+ answer to the question. If the lifetime expires, a Timeout exception
+ will occur.
+ @type lifetime: float
+ @ivar keyring: The TSIG keyring to use. The default is None.
+ @type keyring: dict
+ @ivar keyname: The TSIG keyname to use. The default is None.
+ @type keyname: dns.name.Name object
+ @ivar keyalgorithm: The TSIG key algorithm to use. The default is
+ dns.tsig.default_algorithm.
+ @type keyalgorithm: string
+ @ivar edns: The EDNS level to use. The default is -1, no Edns.
+ @type edns: int
+ @ivar ednsflags: The EDNS flags
+ @type ednsflags: int
+ @ivar payload: The EDNS payload size. The default is 0.
+ @type payload: int
+ @ivar cache: The cache to use. The default is None.
+ @type cache: dns.resolver.Cache object
+ """
+ def __init__(self, filename='/etc/resolv.conf', configure=True):
+ """Initialize a resolver instance.
+
+ @param filename: The filename of a configuration file in
+ standard /etc/resolv.conf format. This parameter is meaningful
+ only when I{configure} is true and the platform is POSIX.
+ @type filename: string or file object
+ @param configure: If True (the default), the resolver instance
+ is configured in the normal fashion for the operating system
+ the resolver is running on. (I.e. a /etc/resolv.conf file on
+ POSIX systems and from the registry on Windows systems.)
+ @type configure: bool"""
+
+ self.reset()
+ if configure:
+ if sys.platform == 'win32':
+ self.read_registry()
+ elif filename:
+ self.read_resolv_conf(filename)
+
+ def reset(self):
+ """Reset all resolver configuration to the defaults."""
+ self.domain = \
+ dns.name.Name(dns.name.from_text(socket.gethostname())[1:])
+ if len(self.domain) == 0:
+ self.domain = dns.name.root
+ self.nameservers = []
+ self.search = []
+ self.port = 53
+ self.timeout = 2.0
+ self.lifetime = 30.0
+ self.keyring = None
+ self.keyname = None
+ self.keyalgorithm = dns.tsig.default_algorithm
+ self.edns = -1
+ self.ednsflags = 0
+ self.payload = 0
+ self.cache = None
+
+ def read_resolv_conf(self, f):
+ """Process f as a file in the /etc/resolv.conf format. If f is
+ a string, it is used as the name of the file to open; otherwise it
+ is treated as the file itself."""
+ if isinstance(f, str) or isinstance(f, unicode):
+ try:
+ f = open(f, 'r')
+ except IOError:
+ # /etc/resolv.conf doesn't exist, can't be read, etc.
+ # We'll just use the default resolver configuration.
+ self.nameservers = ['127.0.0.1']
+ return
+ want_close = True
+ else:
+ want_close = False
+ try:
+ for l in f:
+ if len(l) == 0 or l[0] == '#' or l[0] == ';':
+ continue
+ tokens = l.split()
+ if len(tokens) == 0:
+ continue
+ if tokens[0] == 'nameserver':
+ self.nameservers.append(tokens[1])
+ elif tokens[0] == 'domain':
+ self.domain = dns.name.from_text(tokens[1])
+ elif tokens[0] == 'search':
+ for suffix in tokens[1:]:
+ self.search.append(dns.name.from_text(suffix))
+ finally:
+ if want_close:
+ f.close()
+ if len(self.nameservers) == 0:
+ self.nameservers.append('127.0.0.1')
+
+ def _determine_split_char(self, entry):
+ #
+ # The windows registry irritatingly changes the list element
+ # delimiter in between ' ' and ',' (and vice-versa) in various
+ # versions of windows.
+ #
+ if entry.find(' ') >= 0:
+ split_char = ' '
+ elif entry.find(',') >= 0:
+ split_char = ','
+ else:
+ # probably a singleton; treat as a space-separated list.
+ split_char = ' '
+ return split_char
+
+ def _config_win32_nameservers(self, nameservers):
+ """Configure a NameServer registry entry."""
+ # we call str() on nameservers to convert it from unicode to ascii
+ nameservers = str(nameservers)
+ split_char = self._determine_split_char(nameservers)
+ ns_list = nameservers.split(split_char)
+ for ns in ns_list:
+ if not ns in self.nameservers:
+ self.nameservers.append(ns)
+
+ def _config_win32_domain(self, domain):
+ """Configure a Domain registry entry."""
+ # we call str() on domain to convert it from unicode to ascii
+ self.domain = dns.name.from_text(str(domain))
+
+ def _config_win32_search(self, search):
+ """Configure a Search registry entry."""
+ # we call str() on search to convert it from unicode to ascii
+ search = str(search)
+ split_char = self._determine_split_char(search)
+ search_list = search.split(split_char)
+ for s in search_list:
+ if not s in self.search:
+ self.search.append(dns.name.from_text(s))
+
+ def _config_win32_fromkey(self, key):
+ """Extract DNS info from a registry key."""
+ try:
+ servers, rtype = _winreg.QueryValueEx(key, 'NameServer')
+ except WindowsError:
+ servers = None
+ if servers:
+ self._config_win32_nameservers(servers)
+ try:
+ dom, rtype = _winreg.QueryValueEx(key, 'Domain')
+ if dom:
+ self._config_win32_domain(dom)
+ except WindowsError:
+ pass
+ else:
+ try:
+ servers, rtype = _winreg.QueryValueEx(key, 'DhcpNameServer')
+ except WindowsError:
+ servers = None
+ if servers:
+ self._config_win32_nameservers(servers)
+ try:
+ dom, rtype = _winreg.QueryValueEx(key, 'DhcpDomain')
+ if dom:
+ self._config_win32_domain(dom)
+ except WindowsError:
+ pass
+ try:
+ search, rtype = _winreg.QueryValueEx(key, 'SearchList')
+ except WindowsError:
+ search = None
+ if search:
+ self._config_win32_search(search)
+
+ def read_registry(self):
+ """Extract resolver configuration from the Windows registry."""
+ lm = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
+ want_scan = False
+ try:
+ try:
+ # XP, 2000
+ tcp_params = _winreg.OpenKey(lm,
+ r'SYSTEM\CurrentControlSet'
+ r'\Services\Tcpip\Parameters')
+ want_scan = True
+ except EnvironmentError:
+ # ME
+ tcp_params = _winreg.OpenKey(lm,
+ r'SYSTEM\CurrentControlSet'
+ r'\Services\VxD\MSTCP')
+ try:
+ self._config_win32_fromkey(tcp_params)
+ finally:
+ tcp_params.Close()
+ if want_scan:
+ interfaces = _winreg.OpenKey(lm,
+ r'SYSTEM\CurrentControlSet'
+ r'\Services\Tcpip\Parameters'
+ r'\Interfaces')
+ try:
+ i = 0
+ while True:
+ try:
+ guid = _winreg.EnumKey(interfaces, i)
+ i += 1
+ key = _winreg.OpenKey(interfaces, guid)
+ if not self._win32_is_nic_enabled(lm, guid, key):
+ continue
+ try:
+ self._config_win32_fromkey(key)
+ finally:
+ key.Close()
+ except EnvironmentError:
+ break
+ finally:
+ interfaces.Close()
+ finally:
+ lm.Close()
+
+ def _win32_is_nic_enabled(self, lm, guid, interface_key):
+ # Look in the Windows Registry to determine whether the network
+ # interface corresponding to the given guid is enabled.
+ #
+ # (Code contributed by Paul Marks, thanks!)
+ #
+ try:
+ # This hard-coded location seems to be consistent, at least
+ # from Windows 2000 through Vista.
+ connection_key = _winreg.OpenKey(
+ lm,
+ r'SYSTEM\CurrentControlSet\Control\Network'
+ r'\{4D36E972-E325-11CE-BFC1-08002BE10318}'
+ r'\%s\Connection' % guid)
+
+ try:
+ # The PnpInstanceID points to a key inside Enum
+ (pnp_id, ttype) = _winreg.QueryValueEx(
+ connection_key, 'PnpInstanceID')
+
+ if ttype != _winreg.REG_SZ:
+ raise ValueError
+
+ device_key = _winreg.OpenKey(
+ lm, r'SYSTEM\CurrentControlSet\Enum\%s' % pnp_id)
+
+ try:
+ # Get ConfigFlags for this device
+ (flags, ttype) = _winreg.QueryValueEx(
+ device_key, 'ConfigFlags')
+
+ if ttype != _winreg.REG_DWORD:
+ raise ValueError
+
+ # Based on experimentation, bit 0x1 indicates that the
+ # device is disabled.
+ return not (flags & 0x1)
+
+ finally:
+ device_key.Close()
+ finally:
+ connection_key.Close()
+ except (EnvironmentError, ValueError):
+ # Pre-vista, enabled interfaces seem to have a non-empty
+ # NTEContextList; this was how dnspython detected enabled
+ # nics before the code above was contributed. We've retained
+ # the old method since we don't know if the code above works
+ # on Windows 95/98/ME.
+ try:
+ (nte, ttype) = _winreg.QueryValueEx(interface_key,
+ 'NTEContextList')
+ return nte is not None
+ except WindowsError:
+ return False
+
+ def _compute_timeout(self, start):
+ now = time.time()
+ if now < start:
+ if start - now > 1:
+ # Time going backwards is bad. Just give up.
+ raise Timeout
+ else:
+ # Time went backwards, but only a little. This can
+ # happen, e.g. under vmware with older linux kernels.
+ # Pretend it didn't happen.
+ now = start
+ duration = now - start
+ if duration >= self.lifetime:
+ raise Timeout
+ return min(self.lifetime - duration, self.timeout)
+
+ def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+ tcp=False, source=None):
+ """Query nameservers to find the answer to the question.
+
+ The I{qname}, I{rdtype}, and I{rdclass} parameters may be objects
+ of the appropriate type, or strings that can be converted into objects
+ of the appropriate type. E.g. For I{rdtype} the integer 2 and the
+ the string 'NS' both mean to query for records with DNS rdata type NS.
+
+ @param qname: the query name
+ @type qname: dns.name.Name object or string
+ @param rdtype: the query type
+ @type rdtype: int or string
+ @param rdclass: the query class
+ @type rdclass: int or string
+ @param tcp: use TCP to make the query (default is False).
+ @type tcp: bool
+ @param source: bind to this IP address (defaults to machine default IP).
+ @type source: IP address in dotted quad notation
+ @rtype: dns.resolver.Answer instance
+ @raises Timeout: no answers could be found in the specified lifetime
+ @raises NXDOMAIN: the query name does not exist
+ @raises NoAnswer: the response did not contain an answer
+ @raises NoNameservers: no non-broken nameservers are available to
+ answer the question."""
+
+ if isinstance(qname, (str, unicode)):
+ qname = dns.name.from_text(qname, None)
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if dns.rdatatype.is_metatype(rdtype):
+ raise NoMetaqueries
+ if isinstance(rdclass, (str, unicode)):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ if dns.rdataclass.is_metaclass(rdclass):
+ raise NoMetaqueries
+ qnames_to_try = []
+ if qname.is_absolute():
+ qnames_to_try.append(qname)
+ else:
+ if len(qname) > 1:
+ qnames_to_try.append(qname.concatenate(dns.name.root))
+ if self.search:
+ for suffix in self.search:
+ qnames_to_try.append(qname.concatenate(suffix))
+ else:
+ qnames_to_try.append(qname.concatenate(self.domain))
+ all_nxdomain = True
+ start = time.time()
+ for qname in qnames_to_try:
+ if self.cache:
+ answer = self.cache.get((qname, rdtype, rdclass))
+ if answer:
+ return answer
+ request = dns.message.make_query(qname, rdtype, rdclass)
+ if not self.keyname is None:
+ request.use_tsig(self.keyring, self.keyname,
+ algorithm=self.keyalgorithm)
+ request.use_edns(self.edns, self.ednsflags, self.payload)
+ response = None
+ #
+ # make a copy of the servers list so we can alter it later.
+ #
+ nameservers = self.nameservers[:]
+ backoff = 0.10
+ while response is None:
+ if len(nameservers) == 0:
+ raise NoNameservers
+ for nameserver in nameservers[:]:
+ timeout = self._compute_timeout(start)
+ try:
+ if tcp:
+ response = dns.query.tcp(request, nameserver,
+ timeout, self.port,
+ source=source)
+ else:
+ response = dns.query.udp(request, nameserver,
+ timeout, self.port,
+ source=source)
+ except (socket.error, dns.exception.Timeout):
+ #
+ # Communication failure or timeout. Go to the
+ # next server
+ #
+ response = None
+ continue
+ except dns.query.UnexpectedSource:
+ #
+ # Who knows? Keep going.
+ #
+ response = None
+ continue
+ except dns.exception.FormError:
+ #
+ # We don't understand what this server is
+ # saying. Take it out of the mix and
+ # continue.
+ #
+ nameservers.remove(nameserver)
+ response = None
+ continue
+ rcode = response.rcode()
+ if rcode == dns.rcode.NOERROR or \
+ rcode == dns.rcode.NXDOMAIN:
+ break
+ #
+ # We got a response, but we're not happy with the
+ # rcode in it. Remove the server from the mix if
+ # the rcode isn't SERVFAIL.
+ #
+ if rcode != dns.rcode.SERVFAIL:
+ nameservers.remove(nameserver)
+ response = None
+ if not response is None:
+ break
+ #
+ # All nameservers failed!
+ #
+ if len(nameservers) > 0:
+ #
+ # But we still have servers to try. Sleep a bit
+ # so we don't pound them!
+ #
+ timeout = self._compute_timeout(start)
+ sleep_time = min(timeout, backoff)
+ backoff *= 2
+ time.sleep(sleep_time)
+ if response.rcode() == dns.rcode.NXDOMAIN:
+ continue
+ all_nxdomain = False
+ break
+ if all_nxdomain:
+ raise NXDOMAIN
+ answer = Answer(qname, rdtype, rdclass, response)
+ if self.cache:
+ self.cache.put((qname, rdtype, rdclass), answer)
+ return answer
+
+ def use_tsig(self, keyring, keyname=None,
+ algorithm=dns.tsig.default_algorithm):
+ """Add a TSIG signature to the query.
+
+ @param keyring: The TSIG keyring to use; defaults to None.
+ @type keyring: dict
+ @param keyname: The name of the TSIG key to use; defaults to None.
+ The key must be defined in the keyring. If a keyring is specified
+ but a keyname is not, then the key used will be the first key in the
+ keyring. Note that the order of keys in a dictionary is not defined,
+ so applications should supply a keyname when a keyring is used, unless
+ they know the keyring contains only one key.
+ @param algorithm: The TSIG key algorithm to use. The default
+ is dns.tsig.default_algorithm.
+ @type algorithm: string"""
+ self.keyring = keyring
+ if keyname is None:
+ self.keyname = self.keyring.keys()[0]
+ else:
+ self.keyname = keyname
+ self.keyalgorithm = algorithm
+
+ def use_edns(self, edns, ednsflags, payload):
+ """Configure Edns.
+
+ @param edns: The EDNS level to use. The default is -1, no Edns.
+ @type edns: int
+ @param ednsflags: The EDNS flags
+ @type ednsflags: int
+ @param payload: The EDNS payload size. The default is 0.
+ @type payload: int"""
+
+ if edns is None:
+ edns = -1
+ self.edns = edns
+ self.ednsflags = ednsflags
+ self.payload = payload
+
+default_resolver = None
+
+def get_default_resolver():
+ """Get the default resolver, initializing it if necessary."""
+ global default_resolver
+ if default_resolver is None:
+ default_resolver = Resolver()
+ return default_resolver
+
+def query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+ tcp=False, source=None):
+ """Query nameservers to find the answer to the question.
+
+ This is a convenience function that uses the default resolver
+ object to make the query.
+ @see: L{dns.resolver.Resolver.query} for more information on the
+ parameters."""
+ return get_default_resolver().query(qname, rdtype, rdclass, tcp, source)
+
+def zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None):
+ """Find the name of the zone which contains the specified name.
+
+ @param name: the query name
+ @type name: absolute dns.name.Name object or string
+ @param rdclass: The query class
+ @type rdclass: int
+ @param tcp: use TCP to make the query (default is False).
+ @type tcp: bool
+ @param resolver: the resolver to use
+ @type resolver: dns.resolver.Resolver object or None
+ @rtype: dns.name.Name"""
+
+ if isinstance(name, (str, unicode)):
+ name = dns.name.from_text(name, dns.name.root)
+ if resolver is None:
+ resolver = get_default_resolver()
+ if not name.is_absolute():
+ raise NotAbsolute(name)
+ while 1:
+ try:
+ answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
+ if answer.rrset.name == name:
+ return name
+ # otherwise we were CNAMEd or DNAMEd and need to look higher
+ except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
+ pass
+ try:
+ name = name.parent()
+ except dns.name.NoParent:
+ raise NoRootSOA
diff --git a/lib/dnspython/dns/reversename.py b/lib/dnspython/dns/reversename.py
new file mode 100644
index 0000000000..0a61b827b0
--- /dev/null
+++ b/lib/dnspython/dns/reversename.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Reverse Map Names.
+
+@var ipv4_reverse_domain: The DNS IPv4 reverse-map domain, in-addr.arpa.
+@type ipv4_reverse_domain: dns.name.Name object
+@var ipv6_reverse_domain: The DNS IPv6 reverse-map domain, ip6.arpa.
+@type ipv6_reverse_domain: dns.name.Name object
+"""
+
+import dns.name
+import dns.ipv6
+import dns.ipv4
+
+ipv4_reverse_domain = dns.name.from_text('in-addr.arpa.')
+ipv6_reverse_domain = dns.name.from_text('ip6.arpa.')
+
+def from_address(text):
+ """Convert an IPv4 or IPv6 address in textual form into a Name object whose
+ value is the reverse-map domain name of the address.
+ @param text: an IPv4 or IPv6 address in textual form (e.g. '127.0.0.1',
+ '::1')
+ @type text: str
+ @rtype: dns.name.Name object
+ """
+ try:
+ parts = list(dns.ipv6.inet_aton(text).encode('hex_codec'))
+ origin = ipv6_reverse_domain
+ except:
+ parts = ['%d' % ord(byte) for byte in dns.ipv4.inet_aton(text)]
+ origin = ipv4_reverse_domain
+ parts.reverse()
+ return dns.name.from_text('.'.join(parts), origin=origin)
+
+def to_address(name):
+ """Convert a reverse map domain name into textual address form.
+ @param name: an IPv4 or IPv6 address in reverse-map form.
+ @type name: dns.name.Name object
+ @rtype: str
+ """
+ if name.is_subdomain(ipv4_reverse_domain):
+ name = name.relativize(ipv4_reverse_domain)
+ labels = list(name.labels)
+ labels.reverse()
+ text = '.'.join(labels)
+ # run through inet_aton() to check syntax and make pretty.
+ return dns.ipv4.inet_ntoa(dns.ipv4.inet_aton(text))
+ elif name.is_subdomain(ipv6_reverse_domain):
+ name = name.relativize(ipv6_reverse_domain)
+ labels = list(name.labels)
+ labels.reverse()
+ parts = []
+ i = 0
+ l = len(labels)
+ while i < l:
+ parts.append(''.join(labels[i:i+4]))
+ i += 4
+ text = ':'.join(parts)
+ # run through inet_aton() to check syntax and make pretty.
+ return dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(text))
+ else:
+ raise dns.exception.SyntaxError('unknown reverse-map address family')
diff --git a/lib/dnspython/dns/rrset.py b/lib/dnspython/dns/rrset.py
new file mode 100644
index 0000000000..21468174d4
--- /dev/null
+++ b/lib/dnspython/dns/rrset.py
@@ -0,0 +1,175 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS RRsets (an RRset is a named rdataset)"""
+
+import dns.name
+import dns.rdataset
+import dns.rdataclass
+import dns.renderer
+
+class RRset(dns.rdataset.Rdataset):
+ """A DNS RRset (named rdataset).
+
+ RRset inherits from Rdataset, and RRsets can be treated as
+ Rdatasets in most cases. There are, however, a few notable
+ exceptions. RRsets have different to_wire() and to_text() method
+ arguments, reflecting the fact that RRsets always have an owner
+ name.
+ """
+
+ __slots__ = ['name', 'deleting']
+
+ def __init__(self, name, rdclass, rdtype, covers=dns.rdatatype.NONE,
+ deleting=None):
+ """Create a new RRset."""
+
+ super(RRset, self).__init__(rdclass, rdtype, covers)
+ self.name = name
+ self.deleting = deleting
+
+ def _clone(self):
+ obj = super(RRset, self)._clone()
+ obj.name = self.name
+ obj.deleting = self.deleting
+ return obj
+
+ def __repr__(self):
+ if self.covers == 0:
+ ctext = ''
+ else:
+ ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
+ if not self.deleting is None:
+ dtext = ' delete=' + dns.rdataclass.to_text(self.deleting)
+ else:
+ dtext = ''
+ return '<DNS ' + str(self.name) + ' ' + \
+ dns.rdataclass.to_text(self.rdclass) + ' ' + \
+ dns.rdatatype.to_text(self.rdtype) + ctext + dtext + ' RRset>'
+
+ def __str__(self):
+ return self.to_text()
+
+ def __eq__(self, other):
+ """Two RRsets are equal if they have the same name and the same
+ rdataset
+
+ @rtype: bool"""
+ if not isinstance(other, RRset):
+ return False
+ if self.name != other.name:
+ return False
+ return super(RRset, self).__eq__(other)
+
+ def match(self, name, rdclass, rdtype, covers, deleting=None):
+ """Returns True if this rrset matches the specified class, type,
+ covers, and deletion state."""
+
+ if not super(RRset, self).match(rdclass, rdtype, covers):
+ return False
+ if self.name != name or self.deleting != deleting:
+ return False
+ return True
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ """Convert the RRset into DNS master file format.
+
+ @see: L{dns.name.Name.choose_relativity} for more information
+ on how I{origin} and I{relativize} determine the way names
+ are emitted.
+
+ Any additional keyword arguments are passed on to the rdata
+ to_text() method.
+
+ @param origin: The origin for relative names, or None.
+ @type origin: dns.name.Name object
+ @param relativize: True if names should names be relativized
+ @type relativize: bool"""
+
+ return super(RRset, self).to_text(self.name, origin, relativize,
+ self.deleting, **kw)
+
+ def to_wire(self, file, compress=None, origin=None, **kw):
+ """Convert the RRset to wire format."""
+
+ return super(RRset, self).to_wire(self.name, file, compress, origin,
+ self.deleting, **kw)
+
+ def to_rdataset(self):
+ """Convert an RRset into an Rdataset.
+
+ @rtype: dns.rdataset.Rdataset object
+ """
+ return dns.rdataset.from_rdata_list(self.ttl, list(self))
+
+
+def from_text_list(name, ttl, rdclass, rdtype, text_rdatas):
+ """Create an RRset with the specified name, TTL, class, and type, and with
+ the specified list of rdatas in text format.
+
+ @rtype: dns.rrset.RRset object
+ """
+
+ if isinstance(name, (str, unicode)):
+ name = dns.name.from_text(name, None)
+ if isinstance(rdclass, (str, unicode)):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ r = RRset(name, rdclass, rdtype)
+ r.update_ttl(ttl)
+ for t in text_rdatas:
+ rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
+ r.add(rd)
+ return r
+
+def from_text(name, ttl, rdclass, rdtype, *text_rdatas):
+ """Create an RRset with the specified name, TTL, class, and type and with
+ the specified rdatas in text format.
+
+ @rtype: dns.rrset.RRset object
+ """
+
+ return from_text_list(name, ttl, rdclass, rdtype, text_rdatas)
+
+def from_rdata_list(name, ttl, rdatas):
+ """Create an RRset with the specified name and TTL, and with
+ the specified list of rdata objects.
+
+ @rtype: dns.rrset.RRset object
+ """
+
+ if isinstance(name, (str, unicode)):
+ name = dns.name.from_text(name, None)
+
+ if len(rdatas) == 0:
+ raise ValueError("rdata list must not be empty")
+ r = None
+ for rd in rdatas:
+ if r is None:
+ r = RRset(name, rd.rdclass, rd.rdtype)
+ r.update_ttl(ttl)
+ first_time = False
+ r.add(rd)
+ return r
+
+def from_rdata(name, ttl, *rdatas):
+ """Create an RRset with the specified name and TTL, and with
+ the specified rdata objects.
+
+ @rtype: dns.rrset.RRset object
+ """
+
+ return from_rdata_list(name, ttl, rdatas)
diff --git a/lib/dnspython/dns/set.py b/lib/dnspython/dns/set.py
new file mode 100644
index 0000000000..91f9fb8766
--- /dev/null
+++ b/lib/dnspython/dns/set.py
@@ -0,0 +1,263 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""A simple Set class."""
+
+class Set(object):
+ """A simple set class.
+
+ Sets are not in Python until 2.3, and rdata are not immutable so
+ we cannot use sets.Set anyway. This class implements subset of
+ the 2.3 Set interface using a list as the container.
+
+ @ivar items: A list of the items which are in the set
+ @type items: list"""
+
+ __slots__ = ['items']
+
+ def __init__(self, items=None):
+ """Initialize the set.
+
+ @param items: the initial set of items
+ @type items: any iterable or None
+ """
+
+ self.items = []
+ if not items is None:
+ for item in items:
+ self.add(item)
+
+ def __repr__(self):
+ return "dns.simpleset.Set(%s)" % repr(self.items)
+
+ def add(self, item):
+ """Add an item to the set."""
+ if not item in self.items:
+ self.items.append(item)
+
+ def remove(self, item):
+ """Remove an item from the set."""
+ self.items.remove(item)
+
+ def discard(self, item):
+ """Remove an item from the set if present."""
+ try:
+ self.items.remove(item)
+ except ValueError:
+ pass
+
+ def _clone(self):
+ """Make a (shallow) copy of the set.
+
+ There is a 'clone protocol' that subclasses of this class
+ should use. To make a copy, first call your super's _clone()
+ method, and use the object returned as the new instance. Then
+ make shallow copies of the attributes defined in the subclass.
+
+ This protocol allows us to write the set algorithms that
+ return new instances (e.g. union) once, and keep using them in
+ subclasses.
+ """
+
+ cls = self.__class__
+ obj = cls.__new__(cls)
+ obj.items = list(self.items)
+ return obj
+
+ def __copy__(self):
+ """Make a (shallow) copy of the set."""
+ return self._clone()
+
+ def copy(self):
+ """Make a (shallow) copy of the set."""
+ return self._clone()
+
+ def union_update(self, other):
+ """Update the set, adding any elements from other which are not
+ already in the set.
+ @param other: the collection of items with which to update the set
+ @type other: Set object
+ """
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ if self is other:
+ return
+ for item in other.items:
+ self.add(item)
+
+ def intersection_update(self, other):
+ """Update the set, removing any elements from other which are not
+ in both sets.
+ @param other: the collection of items with which to update the set
+ @type other: Set object
+ """
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ if self is other:
+ return
+ # we make a copy of the list so that we can remove items from
+ # the list without breaking the iterator.
+ for item in list(self.items):
+ if item not in other.items:
+ self.items.remove(item)
+
+ def difference_update(self, other):
+ """Update the set, removing any elements from other which are in
+ the set.
+ @param other: the collection of items with which to update the set
+ @type other: Set object
+ """
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ if self is other:
+ self.items = []
+ else:
+ for item in other.items:
+ self.discard(item)
+
+ def union(self, other):
+ """Return a new set which is the union of I{self} and I{other}.
+
+ @param other: the other set
+ @type other: Set object
+ @rtype: the same type as I{self}
+ """
+
+ obj = self._clone()
+ obj.union_update(other)
+ return obj
+
+ def intersection(self, other):
+ """Return a new set which is the intersection of I{self} and I{other}.
+
+ @param other: the other set
+ @type other: Set object
+ @rtype: the same type as I{self}
+ """
+
+ obj = self._clone()
+ obj.intersection_update(other)
+ return obj
+
+ def difference(self, other):
+ """Return a new set which I{self} - I{other}, i.e. the items
+ in I{self} which are not also in I{other}.
+
+ @param other: the other set
+ @type other: Set object
+ @rtype: the same type as I{self}
+ """
+
+ obj = self._clone()
+ obj.difference_update(other)
+ return obj
+
+ def __or__(self, other):
+ return self.union(other)
+
+ def __and__(self, other):
+ return self.intersection(other)
+
+ def __add__(self, other):
+ return self.union(other)
+
+ def __sub__(self, other):
+ return self.difference(other)
+
+ def __ior__(self, other):
+ self.union_update(other)
+ return self
+
+ def __iand__(self, other):
+ self.intersection_update(other)
+ return self
+
+ def __iadd__(self, other):
+ self.union_update(other)
+ return self
+
+ def __isub__(self, other):
+ self.difference_update(other)
+ return self
+
+ def update(self, other):
+ """Update the set, adding any elements from other which are not
+ already in the set.
+ @param other: the collection of items with which to update the set
+ @type other: any iterable type"""
+ for item in other:
+ self.add(item)
+
+ def clear(self):
+ """Make the set empty."""
+ self.items = []
+
+ def __eq__(self, other):
+ # Yes, this is inefficient but the sets we're dealing with are
+ # usually quite small, so it shouldn't hurt too much.
+ for item in self.items:
+ if not item in other.items:
+ return False
+ for item in other.items:
+ if not item in self.items:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __len__(self):
+ return len(self.items)
+
+ def __iter__(self):
+ return iter(self.items)
+
+ def __getitem__(self, i):
+ return self.items[i]
+
+ def __delitem__(self, i):
+ del self.items[i]
+
+ def __getslice__(self, i, j):
+ return self.items[i:j]
+
+ def __delslice__(self, i, j):
+ del self.items[i:j]
+
+ def issubset(self, other):
+ """Is I{self} a subset of I{other}?
+
+ @rtype: bool
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ for item in self.items:
+ if not item in other.items:
+ return False
+ return True
+
+ def issuperset(self, other):
+ """Is I{self} a superset of I{other}?
+
+ @rtype: bool
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ for item in other.items:
+ if not item in self.items:
+ return False
+ return True
diff --git a/lib/dnspython/dns/tokenizer.py b/lib/dnspython/dns/tokenizer.py
new file mode 100644
index 0000000000..4f68a2a495
--- /dev/null
+++ b/lib/dnspython/dns/tokenizer.py
@@ -0,0 +1,547 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Tokenize DNS master file format"""
+
+import cStringIO
+import sys
+
+import dns.exception
+import dns.name
+import dns.ttl
+
+_DELIMITERS = {
+ ' ' : True,
+ '\t' : True,
+ '\n' : True,
+ ';' : True,
+ '(' : True,
+ ')' : True,
+ '"' : True }
+
+_QUOTING_DELIMITERS = { '"' : True }
+
+EOF = 0
+EOL = 1
+WHITESPACE = 2
+IDENTIFIER = 3
+QUOTED_STRING = 4
+COMMENT = 5
+DELIMITER = 6
+
+class UngetBufferFull(dns.exception.DNSException):
+ """Raised when an attempt is made to unget a token when the unget
+ buffer is full."""
+ pass
+
+class Token(object):
+ """A DNS master file format token.
+
+ @ivar ttype: The token type
+ @type ttype: int
+ @ivar value: The token value
+ @type value: string
+ @ivar has_escape: Does the token value contain escapes?
+ @type has_escape: bool
+ """
+
+ def __init__(self, ttype, value='', has_escape=False):
+ """Initialize a token instance.
+
+ @param ttype: The token type
+ @type ttype: int
+ @ivar value: The token value
+ @type value: string
+ @ivar has_escape: Does the token value contain escapes?
+ @type has_escape: bool
+ """
+ self.ttype = ttype
+ self.value = value
+ self.has_escape = has_escape
+
+ def is_eof(self):
+ return self.ttype == EOF
+
+ def is_eol(self):
+ return self.ttype == EOL
+
+ def is_whitespace(self):
+ return self.ttype == WHITESPACE
+
+ def is_identifier(self):
+ return self.ttype == IDENTIFIER
+
+ def is_quoted_string(self):
+ return self.ttype == QUOTED_STRING
+
+ def is_comment(self):
+ return self.ttype == COMMENT
+
+ def is_delimiter(self):
+ return self.ttype == DELIMITER
+
+ def is_eol_or_eof(self):
+ return (self.ttype == EOL or self.ttype == EOF)
+
+ def __eq__(self, other):
+ if not isinstance(other, Token):
+ return False
+ return (self.ttype == other.ttype and
+ self.value == other.value)
+
+ def __ne__(self, other):
+ if not isinstance(other, Token):
+ return True
+ return (self.ttype != other.ttype or
+ self.value != other.value)
+
+ def __str__(self):
+ return '%d "%s"' % (self.ttype, self.value)
+
+ def unescape(self):
+ if not self.has_escape:
+ return self
+ unescaped = ''
+ l = len(self.value)
+ i = 0
+ while i < l:
+ c = self.value[i]
+ i += 1
+ if c == '\\':
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c = self.value[i]
+ i += 1
+ if c.isdigit():
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c2 = self.value[i]
+ i += 1
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c3 = self.value[i]
+ i += 1
+ if not (c2.isdigit() and c3.isdigit()):
+ raise dns.exception.SyntaxError
+ c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
+ unescaped += c
+ return Token(self.ttype, unescaped)
+
+ # compatibility for old-style tuple tokens
+
+ def __len__(self):
+ return 2
+
+ def __iter__(self):
+ return iter((self.ttype, self.value))
+
+ def __getitem__(self, i):
+ if i == 0:
+ return self.ttype
+ elif i == 1:
+ return self.value
+ else:
+ raise IndexError
+
+class Tokenizer(object):
+ """A DNS master file format tokenizer.
+
+ A token is a (type, value) tuple, where I{type} is an int, and
+ I{value} is a string. The valid types are EOF, EOL, WHITESPACE,
+ IDENTIFIER, QUOTED_STRING, COMMENT, and DELIMITER.
+
+ @ivar file: The file to tokenize
+ @type file: file
+ @ivar ungotten_char: The most recently ungotten character, or None.
+ @type ungotten_char: string
+ @ivar ungotten_token: The most recently ungotten token, or None.
+ @type ungotten_token: (int, string) token tuple
+ @ivar multiline: The current multiline level. This value is increased
+ by one every time a '(' delimiter is read, and decreased by one every time
+ a ')' delimiter is read.
+ @type multiline: int
+ @ivar quoting: This variable is true if the tokenizer is currently
+ reading a quoted string.
+ @type quoting: bool
+ @ivar eof: This variable is true if the tokenizer has encountered EOF.
+ @type eof: bool
+ @ivar delimiters: The current delimiter dictionary.
+ @type delimiters: dict
+ @ivar line_number: The current line number
+ @type line_number: int
+ @ivar filename: A filename that will be returned by the L{where} method.
+ @type filename: string
+ """
+
+ def __init__(self, f=sys.stdin, filename=None):
+ """Initialize a tokenizer instance.
+
+ @param f: The file to tokenize. The default is sys.stdin.
+ This parameter may also be a string, in which case the tokenizer
+ will take its input from the contents of the string.
+ @type f: file or string
+ @param filename: the name of the filename that the L{where} method
+ will return.
+ @type filename: string
+ """
+
+ if isinstance(f, str):
+ f = cStringIO.StringIO(f)
+ if filename is None:
+ filename = '<string>'
+ else:
+ if filename is None:
+ if f is sys.stdin:
+ filename = '<stdin>'
+ else:
+ filename = '<file>'
+ self.file = f
+ self.ungotten_char = None
+ self.ungotten_token = None
+ self.multiline = 0
+ self.quoting = False
+ self.eof = False
+ self.delimiters = _DELIMITERS
+ self.line_number = 1
+ self.filename = filename
+
+ def _get_char(self):
+ """Read a character from input.
+ @rtype: string
+ """
+
+ if self.ungotten_char is None:
+ if self.eof:
+ c = ''
+ else:
+ c = self.file.read(1)
+ if c == '':
+ self.eof = True
+ elif c == '\n':
+ self.line_number += 1
+ else:
+ c = self.ungotten_char
+ self.ungotten_char = None
+ return c
+
+ def where(self):
+ """Return the current location in the input.
+
+ @rtype: (string, int) tuple. The first item is the filename of
+ the input, the second is the current line number.
+ """
+
+ return (self.filename, self.line_number)
+
+ def _unget_char(self, c):
+ """Unget a character.
+
+ The unget buffer for characters is only one character large; it is
+ an error to try to unget a character when the unget buffer is not
+ empty.
+
+ @param c: the character to unget
+ @type c: string
+ @raises UngetBufferFull: there is already an ungotten char
+ """
+
+ if not self.ungotten_char is None:
+ raise UngetBufferFull
+ self.ungotten_char = c
+
+ def skip_whitespace(self):
+ """Consume input until a non-whitespace character is encountered.
+
+ The non-whitespace character is then ungotten, and the number of
+ whitespace characters consumed is returned.
+
+ If the tokenizer is in multiline mode, then newlines are whitespace.
+
+ @rtype: int
+ """
+
+ skipped = 0
+ while True:
+ c = self._get_char()
+ if c != ' ' and c != '\t':
+ if (c != '\n') or not self.multiline:
+ self._unget_char(c)
+ return skipped
+ skipped += 1
+
+ def get(self, want_leading = False, want_comment = False):
+ """Get the next token.
+
+ @param want_leading: If True, return a WHITESPACE token if the
+ first character read is whitespace. The default is False.
+ @type want_leading: bool
+ @param want_comment: If True, return a COMMENT token if the
+ first token read is a comment. The default is False.
+ @type want_comment: bool
+ @rtype: Token object
+ @raises dns.exception.UnexpectedEnd: input ended prematurely
+ @raises dns.exception.SyntaxError: input was badly formed
+ """
+
+ if not self.ungotten_token is None:
+ token = self.ungotten_token
+ self.ungotten_token = None
+ if token.is_whitespace():
+ if want_leading:
+ return token
+ elif token.is_comment():
+ if want_comment:
+ return token
+ else:
+ return token
+ skipped = self.skip_whitespace()
+ if want_leading and skipped > 0:
+ return Token(WHITESPACE, ' ')
+ token = ''
+ ttype = IDENTIFIER
+ has_escape = False
+ while True:
+ c = self._get_char()
+ if c == '' or c in self.delimiters:
+ if c == '' and self.quoting:
+ raise dns.exception.UnexpectedEnd
+ if token == '' and ttype != QUOTED_STRING:
+ if c == '(':
+ self.multiline += 1
+ self.skip_whitespace()
+ continue
+ elif c == ')':
+ if not self.multiline > 0:
+ raise dns.exception.SyntaxError
+ self.multiline -= 1
+ self.skip_whitespace()
+ continue
+ elif c == '"':
+ if not self.quoting:
+ self.quoting = True
+ self.delimiters = _QUOTING_DELIMITERS
+ ttype = QUOTED_STRING
+ continue
+ else:
+ self.quoting = False
+ self.delimiters = _DELIMITERS
+ self.skip_whitespace()
+ continue
+ elif c == '\n':
+ return Token(EOL, '\n')
+ elif c == ';':
+ while 1:
+ c = self._get_char()
+ if c == '\n' or c == '':
+ break
+ token += c
+ if want_comment:
+ self._unget_char(c)
+ return Token(COMMENT, token)
+ elif c == '':
+ if self.multiline:
+ raise dns.exception.SyntaxError('unbalanced parentheses')
+ return Token(EOF)
+ elif self.multiline:
+ self.skip_whitespace()
+ token = ''
+ continue
+ else:
+ return Token(EOL, '\n')
+ else:
+ # This code exists in case we ever want a
+ # delimiter to be returned. It never produces
+ # a token currently.
+ token = c
+ ttype = DELIMITER
+ else:
+ self._unget_char(c)
+ break
+ elif self.quoting:
+ if c == '\\':
+ c = self._get_char()
+ if c == '':
+ raise dns.exception.UnexpectedEnd
+ if c.isdigit():
+ c2 = self._get_char()
+ if c2 == '':
+ raise dns.exception.UnexpectedEnd
+ c3 = self._get_char()
+ if c == '':
+ raise dns.exception.UnexpectedEnd
+ if not (c2.isdigit() and c3.isdigit()):
+ raise dns.exception.SyntaxError
+ c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
+ elif c == '\n':
+ raise dns.exception.SyntaxError('newline in quoted string')
+ elif c == '\\':
+ #
+ # It's an escape. Put it and the next character into
+ # the token; it will be checked later for goodness.
+ #
+ token += c
+ has_escape = True
+ c = self._get_char()
+ if c == '' or c == '\n':
+ raise dns.exception.UnexpectedEnd
+ token += c
+ if token == '' and ttype != QUOTED_STRING:
+ if self.multiline:
+ raise dns.exception.SyntaxError('unbalanced parentheses')
+ ttype = EOF
+ return Token(ttype, token, has_escape)
+
+ def unget(self, token):
+ """Unget a token.
+
+ The unget buffer for tokens is only one token large; it is
+ an error to try to unget a token when the unget buffer is not
+ empty.
+
+ @param token: the token to unget
+ @type token: Token object
+ @raises UngetBufferFull: there is already an ungotten token
+ """
+
+ if not self.ungotten_token is None:
+ raise UngetBufferFull
+ self.ungotten_token = token
+
+ def next(self):
+ """Return the next item in an iteration.
+ @rtype: (int, string)
+ """
+
+ token = self.get()
+ if token.is_eof():
+ raise StopIteration
+ return token
+
+ def __iter__(self):
+ return self
+
+ # Helpers
+
+ def get_int(self):
+ """Read the next token and interpret it as an integer.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: int
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ if not token.value.isdigit():
+ raise dns.exception.SyntaxError('expecting an integer')
+ return int(token.value)
+
+ def get_uint8(self):
+ """Read the next token and interpret it as an 8-bit unsigned
+ integer.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: int
+ """
+
+ value = self.get_int()
+ if value < 0 or value > 255:
+ raise dns.exception.SyntaxError('%d is not an unsigned 8-bit integer' % value)
+ return value
+
+ def get_uint16(self):
+ """Read the next token and interpret it as a 16-bit unsigned
+ integer.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: int
+ """
+
+ value = self.get_int()
+ if value < 0 or value > 65535:
+ raise dns.exception.SyntaxError('%d is not an unsigned 16-bit integer' % value)
+ return value
+
+ def get_uint32(self):
+ """Read the next token and interpret it as a 32-bit unsigned
+ integer.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: int
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ if not token.value.isdigit():
+ raise dns.exception.SyntaxError('expecting an integer')
+ value = long(token.value)
+ if value < 0 or value > 4294967296L:
+ raise dns.exception.SyntaxError('%d is not an unsigned 32-bit integer' % value)
+ return value
+
+ def get_string(self, origin=None):
+ """Read the next token and interpret it as a string.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: string
+ """
+
+ token = self.get().unescape()
+ if not (token.is_identifier() or token.is_quoted_string()):
+ raise dns.exception.SyntaxError('expecting a string')
+ return token.value
+
+ def get_identifier(self, origin=None):
+ """Read the next token and raise an exception if it is not an identifier.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: string
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ return token.value
+
+ def get_name(self, origin=None):
+ """Read the next token and interpret it as a DNS name.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: dns.name.Name object"""
+
+ token = self.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ return dns.name.from_text(token.value, origin)
+
+ def get_eol(self):
+ """Read the next token and raise an exception if it isn't EOL or
+ EOF.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: string
+ """
+
+ token = self.get()
+ if not token.is_eol_or_eof():
+ raise dns.exception.SyntaxError('expected EOL or EOF, got %d "%s"' % (token.ttype, token.value))
+ return token.value
+
+ def get_ttl(self):
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ return dns.ttl.from_text(token.value)
diff --git a/lib/dnspython/dns/tsig.py b/lib/dnspython/dns/tsig.py
new file mode 100644
index 0000000000..5e58ea8841
--- /dev/null
+++ b/lib/dnspython/dns/tsig.py
@@ -0,0 +1,223 @@
+# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS TSIG support."""
+
+import hmac
+import struct
+import sys
+
+import dns.exception
+import dns.hash
+import dns.rdataclass
+import dns.name
+
+class BadTime(dns.exception.DNSException):
+ """Raised if the current time is not within the TSIG's validity time."""
+ pass
+
+class BadSignature(dns.exception.DNSException):
+ """Raised if the TSIG signature fails to verify."""
+ pass
+
+class PeerError(dns.exception.DNSException):
+ """Base class for all TSIG errors generated by the remote peer"""
+ pass
+
+class PeerBadKey(PeerError):
+ """Raised if the peer didn't know the key we used"""
+ pass
+
+class PeerBadSignature(PeerError):
+ """Raised if the peer didn't like the signature we sent"""
+ pass
+
+class PeerBadTime(PeerError):
+ """Raised if the peer didn't like the time we sent"""
+ pass
+
+class PeerBadTruncation(PeerError):
+ """Raised if the peer didn't like amount of truncation in the TSIG we sent"""
+ pass
+
+# TSIG Algorithms
+
+HMAC_MD5 = dns.name.from_text("HMAC-MD5.SIG-ALG.REG.INT")
+HMAC_SHA1 = dns.name.from_text("hmac-sha1")
+HMAC_SHA224 = dns.name.from_text("hmac-sha224")
+HMAC_SHA256 = dns.name.from_text("hmac-sha256")
+HMAC_SHA384 = dns.name.from_text("hmac-sha384")
+HMAC_SHA512 = dns.name.from_text("hmac-sha512")
+
+default_algorithm = HMAC_MD5
+
+BADSIG = 16
+BADKEY = 17
+BADTIME = 18
+BADTRUNC = 22
+
+def sign(wire, keyname, secret, time, fudge, original_id, error,
+ other_data, request_mac, ctx=None, multi=False, first=True,
+ algorithm=default_algorithm):
+ """Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata
+ for the input parameters, the HMAC MAC calculated by applying the
+ TSIG signature algorithm, and the TSIG digest context.
+ @rtype: (string, string, hmac.HMAC object)
+ @raises ValueError: I{other_data} is too long
+ @raises NotImplementedError: I{algorithm} is not supported
+ """
+
+ (algorithm_name, digestmod) = get_algorithm(algorithm)
+ if first:
+ ctx = hmac.new(secret, digestmod=digestmod)
+ ml = len(request_mac)
+ if ml > 0:
+ ctx.update(struct.pack('!H', ml))
+ ctx.update(request_mac)
+ id = struct.pack('!H', original_id)
+ ctx.update(id)
+ ctx.update(wire[2:])
+ if first:
+ ctx.update(keyname.to_digestable())
+ ctx.update(struct.pack('!H', dns.rdataclass.ANY))
+ ctx.update(struct.pack('!I', 0))
+ long_time = time + 0L
+ upper_time = (long_time >> 32) & 0xffffL
+ lower_time = long_time & 0xffffffffL
+ time_mac = struct.pack('!HIH', upper_time, lower_time, fudge)
+ pre_mac = algorithm_name + time_mac
+ ol = len(other_data)
+ if ol > 65535:
+ raise ValueError('TSIG Other Data is > 65535 bytes')
+ post_mac = struct.pack('!HH', error, ol) + other_data
+ if first:
+ ctx.update(pre_mac)
+ ctx.update(post_mac)
+ else:
+ ctx.update(time_mac)
+ mac = ctx.digest()
+ mpack = struct.pack('!H', len(mac))
+ tsig_rdata = pre_mac + mpack + mac + id + post_mac
+ if multi:
+ ctx = hmac.new(secret)
+ ml = len(mac)
+ ctx.update(struct.pack('!H', ml))
+ ctx.update(mac)
+ else:
+ ctx = None
+ return (tsig_rdata, mac, ctx)
+
+def hmac_md5(wire, keyname, secret, time, fudge, original_id, error,
+ other_data, request_mac, ctx=None, multi=False, first=True,
+ algorithm=default_algorithm):
+ return sign(wire, keyname, secret, time, fudge, original_id, error,
+ other_data, request_mac, ctx, multi, first, algorithm)
+
+def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata,
+ tsig_rdlen, ctx=None, multi=False, first=True):
+ """Validate the specified TSIG rdata against the other input parameters.
+
+ @raises FormError: The TSIG is badly formed.
+ @raises BadTime: There is too much time skew between the client and the
+ server.
+ @raises BadSignature: The TSIG signature did not validate
+ @rtype: hmac.HMAC object"""
+
+ (adcount,) = struct.unpack("!H", wire[10:12])
+ if adcount == 0:
+ raise dns.exception.FormError
+ adcount -= 1
+ new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start]
+ current = tsig_rdata
+ (aname, used) = dns.name.from_wire(wire, current)
+ current = current + used
+ (upper_time, lower_time, fudge, mac_size) = \
+ struct.unpack("!HIHH", wire[current:current + 10])
+ time = ((upper_time + 0L) << 32) + (lower_time + 0L)
+ current += 10
+ mac = wire[current:current + mac_size]
+ current += mac_size
+ (original_id, error, other_size) = \
+ struct.unpack("!HHH", wire[current:current + 6])
+ current += 6
+ other_data = wire[current:current + other_size]
+ current += other_size
+ if current != tsig_rdata + tsig_rdlen:
+ raise dns.exception.FormError
+ if error != 0:
+ if error == BADSIG:
+ raise PeerBadSignature
+ elif error == BADKEY:
+ raise PeerBadKey
+ elif error == BADTIME:
+ raise PeerBadTime
+ elif error == BADTRUNC:
+ raise PeerBadTruncation
+ else:
+ raise PeerError('unknown TSIG error code %d' % error)
+ time_low = time - fudge
+ time_high = time + fudge
+ if now < time_low or now > time_high:
+ raise BadTime
+ (junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge,
+ original_id, error, other_data,
+ request_mac, ctx, multi, first, aname)
+ if (our_mac != mac):
+ raise BadSignature
+ return ctx
+
+_hashes = None
+
+def _maybe_add_hash(tsig_alg, hash_alg):
+ try:
+ _hashes[tsig_alg] = dns.hash.get(hash_alg)
+ except KeyError:
+ pass
+
+def _setup_hashes():
+ global _hashes
+ _hashes = {}
+ _maybe_add_hash(HMAC_SHA224, 'SHA224')
+ _maybe_add_hash(HMAC_SHA256, 'SHA256')
+ _maybe_add_hash(HMAC_SHA384, 'SHA384')
+ _maybe_add_hash(HMAC_SHA512, 'SHA512')
+ _maybe_add_hash(HMAC_SHA1, 'SHA1')
+ _maybe_add_hash(HMAC_MD5, 'MD5')
+
+def get_algorithm(algorithm):
+ """Returns the wire format string and the hash module to use for the
+ specified TSIG algorithm
+
+ @rtype: (string, hash constructor)
+ @raises NotImplementedError: I{algorithm} is not supported
+ """
+
+ global _hashes
+ if _hashes is None:
+ _setup_hashes()
+
+ if isinstance(algorithm, (str, unicode)):
+ algorithm = dns.name.from_text(algorithm)
+
+ if sys.hexversion < 0x02050200 and \
+ (algorithm == HMAC_SHA384 or algorithm == HMAC_SHA512):
+ raise NotImplementedError("TSIG algorithm " + str(algorithm) +
+ " requires Python 2.5.2 or later")
+
+ try:
+ return (algorithm.to_digestable(), _hashes[algorithm])
+ except KeyError:
+ raise NotImplementedError("TSIG algorithm " + str(algorithm) +
+ " is not supported")
diff --git a/lib/dnspython/dns/tsigkeyring.py b/lib/dnspython/dns/tsigkeyring.py
new file mode 100644
index 0000000000..cbd1a27bbf
--- /dev/null
+++ b/lib/dnspython/dns/tsigkeyring.py
@@ -0,0 +1,44 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""A place to store TSIG keys."""
+
+import base64
+
+import dns.name
+
+def from_text(textring):
+ """Convert a dictionary containing (textual DNS name, base64 secret) pairs
+ into a binary keyring which has (dns.name.Name, binary secret) pairs.
+ @rtype: dict"""
+
+ keyring = {}
+ for keytext in textring:
+ keyname = dns.name.from_text(keytext)
+ secret = base64.decodestring(textring[keytext])
+ keyring[keyname] = secret
+ return keyring
+
+def to_text(keyring):
+ """Convert a dictionary containing (dns.name.Name, binary secret) pairs
+ into a text keyring which has (textual DNS name, base64 secret) pairs.
+ @rtype: dict"""
+
+ textring = {}
+ for keyname in keyring:
+ keytext = dns.name.to_text(keyname)
+ secret = base64.encodestring(keyring[keyname])
+ textring[keytext] = secret
+ return textring
diff --git a/lib/dnspython/dns/ttl.py b/lib/dnspython/dns/ttl.py
new file mode 100644
index 0000000000..f295300517
--- /dev/null
+++ b/lib/dnspython/dns/ttl.py
@@ -0,0 +1,64 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS TTL conversion."""
+
+import dns.exception
+
+class BadTTL(dns.exception.SyntaxError):
+ pass
+
+def from_text(text):
+ """Convert the text form of a TTL to an integer.
+
+ The BIND 8 units syntax for TTLs (e.g. '1w6d4h3m10s') is supported.
+
+ @param text: the textual TTL
+ @type text: string
+ @raises dns.ttl.BadTTL: the TTL is not well-formed
+ @rtype: int
+ """
+
+ if text.isdigit():
+ total = long(text)
+ else:
+ if not text[0].isdigit():
+ raise BadTTL
+ total = 0L
+ current = 0L
+ for c in text:
+ if c.isdigit():
+ current *= 10
+ current += long(c)
+ else:
+ c = c.lower()
+ if c == 'w':
+ total += current * 604800L
+ elif c == 'd':
+ total += current * 86400L
+ elif c == 'h':
+ total += current * 3600L
+ elif c == 'm':
+ total += current * 60L
+ elif c == 's':
+ total += current
+ else:
+ raise BadTTL("unknown unit '%s'" % c)
+ current = 0
+ if not current == 0:
+ raise BadTTL("trailing integer")
+ if total < 0L or total > 2147483647L:
+ raise BadTTL("TTL should be between 0 and 2^31 - 1 (inclusive)")
+ return total
diff --git a/lib/dnspython/dns/update.py b/lib/dnspython/dns/update.py
new file mode 100644
index 0000000000..e67acafec9
--- /dev/null
+++ b/lib/dnspython/dns/update.py
@@ -0,0 +1,245 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Dynamic Update Support"""
+
+import dns.message
+import dns.name
+import dns.opcode
+import dns.rdata
+import dns.rdataclass
+import dns.rdataset
+import dns.tsig
+
+class Update(dns.message.Message):
+ def __init__(self, zone, rdclass=dns.rdataclass.IN, keyring=None,
+ keyname=None, keyalgorithm=dns.tsig.default_algorithm):
+ """Initialize a new DNS Update object.
+
+ @param zone: The zone which is being updated.
+ @type zone: A dns.name.Name or string
+ @param rdclass: The class of the zone; defaults to dns.rdataclass.IN.
+ @type rdclass: An int designating the class, or a string whose value
+ is the name of a class.
+ @param keyring: The TSIG keyring to use; defaults to None.
+ @type keyring: dict
+ @param keyname: The name of the TSIG key to use; defaults to None.
+ The key must be defined in the keyring. If a keyring is specified
+ but a keyname is not, then the key used will be the first key in the
+ keyring. Note that the order of keys in a dictionary is not defined,
+ so applications should supply a keyname when a keyring is used, unless
+ they know the keyring contains only one key.
+ @type keyname: dns.name.Name or string
+ @param keyalgorithm: The TSIG algorithm to use; defaults to
+ dns.tsig.default_algorithm. Constants for TSIG algorithms are defined
+ in dns.tsig, and the currently implemented algorithms are
+ HMAC_MD5, HMAC_SHA1, HMAC_SHA224, HMAC_SHA256, HMAC_SHA384, and
+ HMAC_SHA512.
+ @type keyalgorithm: string
+ """
+ super(Update, self).__init__()
+ self.flags |= dns.opcode.to_flags(dns.opcode.UPDATE)
+ if isinstance(zone, (str, unicode)):
+ zone = dns.name.from_text(zone)
+ self.origin = zone
+ if isinstance(rdclass, str):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ self.zone_rdclass = rdclass
+ self.find_rrset(self.question, self.origin, rdclass, dns.rdatatype.SOA,
+ create=True, force_unique=True)
+ if not keyring is None:
+ self.use_tsig(keyring, keyname, algorithm=keyalgorithm)
+
+ def _add_rr(self, name, ttl, rd, deleting=None, section=None):
+ """Add a single RR to the update section."""
+
+ if section is None:
+ section = self.authority
+ covers = rd.covers()
+ rrset = self.find_rrset(section, name, self.zone_rdclass, rd.rdtype,
+ covers, deleting, True, True)
+ rrset.add(rd, ttl)
+
+ def _add(self, replace, section, name, *args):
+ """Add records. The first argument is the replace mode. If
+ false, RRs are added to an existing RRset; if true, the RRset
+ is replaced with the specified contents. The second
+ argument is the section to add to. The third argument
+ is always a name. The other arguments can be:
+
+ - rdataset...
+
+ - ttl, rdata...
+
+ - ttl, rdtype, string..."""
+
+ if isinstance(name, (str, unicode)):
+ name = dns.name.from_text(name, None)
+ if isinstance(args[0], dns.rdataset.Rdataset):
+ for rds in args:
+ if replace:
+ self.delete(name, rds.rdtype)
+ for rd in rds:
+ self._add_rr(name, rds.ttl, rd, section=section)
+ else:
+ args = list(args)
+ ttl = int(args.pop(0))
+ if isinstance(args[0], dns.rdata.Rdata):
+ if replace:
+ self.delete(name, args[0].rdtype)
+ for rd in args:
+ self._add_rr(name, ttl, rd, section=section)
+ else:
+ rdtype = args.pop(0)
+ if isinstance(rdtype, str):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if replace:
+ self.delete(name, rdtype)
+ for s in args:
+ rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s,
+ self.origin)
+ self._add_rr(name, ttl, rd, section=section)
+
+ def add(self, name, *args):
+ """Add records. The first argument is always a name. The other
+ arguments can be:
+
+ - rdataset...
+
+ - ttl, rdata...
+
+ - ttl, rdtype, string..."""
+ self._add(False, self.authority, name, *args)
+
+ def delete(self, name, *args):
+ """Delete records. The first argument is always a name. The other
+ arguments can be:
+
+ - I{nothing}
+
+ - rdataset...
+
+ - rdata...
+
+ - rdtype, [string...]"""
+
+ if isinstance(name, (str, unicode)):
+ name = dns.name.from_text(name, None)
+ if len(args) == 0:
+ rrset = self.find_rrset(self.authority, name, dns.rdataclass.ANY,
+ dns.rdatatype.ANY, dns.rdatatype.NONE,
+ dns.rdatatype.ANY, True, True)
+ elif isinstance(args[0], dns.rdataset.Rdataset):
+ for rds in args:
+ for rd in rds:
+ self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+ else:
+ args = list(args)
+ if isinstance(args[0], dns.rdata.Rdata):
+ for rd in args:
+ self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+ else:
+ rdtype = args.pop(0)
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if len(args) == 0:
+ rrset = self.find_rrset(self.authority, name,
+ self.zone_rdclass, rdtype,
+ dns.rdatatype.NONE,
+ dns.rdataclass.ANY,
+ True, True)
+ else:
+ for s in args:
+ rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s,
+ self.origin)
+ self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+
+ def replace(self, name, *args):
+ """Replace records. The first argument is always a name. The other
+ arguments can be:
+
+ - rdataset...
+
+ - ttl, rdata...
+
+ - ttl, rdtype, string...
+
+ Note that if you want to replace the entire node, you should do
+ a delete of the name followed by one or more calls to add."""
+
+ self._add(True, self.authority, name, *args)
+
+ def present(self, name, *args):
+ """Require that an owner name (and optionally an rdata type,
+ or specific rdataset) exists as a prerequisite to the
+ execution of the update. The first argument is always a name.
+ The other arguments can be:
+
+ - rdataset...
+
+ - rdata...
+
+ - rdtype, string..."""
+
+ if isinstance(name, (str, unicode)):
+ name = dns.name.from_text(name, None)
+ if len(args) == 0:
+ rrset = self.find_rrset(self.answer, name,
+ dns.rdataclass.ANY, dns.rdatatype.ANY,
+ dns.rdatatype.NONE, None,
+ True, True)
+ elif isinstance(args[0], dns.rdataset.Rdataset) or \
+ isinstance(args[0], dns.rdata.Rdata) or \
+ len(args) > 1:
+ if not isinstance(args[0], dns.rdataset.Rdataset):
+ # Add a 0 TTL
+ args = list(args)
+ args.insert(0, 0)
+ self._add(False, self.answer, name, *args)
+ else:
+ rdtype = args[0]
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ rrset = self.find_rrset(self.answer, name,
+ dns.rdataclass.ANY, rdtype,
+ dns.rdatatype.NONE, None,
+ True, True)
+
+ def absent(self, name, rdtype=None):
+ """Require that an owner name (and optionally an rdata type) does
+ not exist as a prerequisite to the execution of the update."""
+
+ if isinstance(name, (str, unicode)):
+ name = dns.name.from_text(name, None)
+ if rdtype is None:
+ rrset = self.find_rrset(self.answer, name,
+ dns.rdataclass.NONE, dns.rdatatype.ANY,
+ dns.rdatatype.NONE, None,
+ True, True)
+ else:
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ rrset = self.find_rrset(self.answer, name,
+ dns.rdataclass.NONE, rdtype,
+ dns.rdatatype.NONE, None,
+ True, True)
+
+ def to_wire(self, origin=None, max_size=65535):
+ """Return a string containing the update in DNS compressed wire
+ format.
+ @rtype: string"""
+ if origin is None:
+ origin = self.origin
+ return super(Update, self).to_wire(origin, max_size)
diff --git a/lib/dnspython/dns/version.py b/lib/dnspython/dns/version.py
new file mode 100644
index 0000000000..46799a77d9
--- /dev/null
+++ b/lib/dnspython/dns/version.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""dnspython release version information."""
+
+MAJOR = 1
+MINOR = 9
+MICRO = 3
+RELEASELEVEL = 0x0f
+SERIAL = 0
+
+if RELEASELEVEL == 0x0f:
+ version = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
+elif RELEASELEVEL == 0x00:
+ version = '%d.%d.%dx%d' % \
+ (MAJOR, MINOR, MICRO, SERIAL)
+else:
+ version = '%d.%d.%d%x%d' % \
+ (MAJOR, MINOR, MICRO, RELEASELEVEL, SERIAL)
+
+hexversion = MAJOR << 24 | MINOR << 16 | MICRO << 8 | RELEASELEVEL << 4 | \
+ SERIAL
diff --git a/lib/dnspython/dns/zone.py b/lib/dnspython/dns/zone.py
new file mode 100644
index 0000000000..db5fd5df85
--- /dev/null
+++ b/lib/dnspython/dns/zone.py
@@ -0,0 +1,855 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Zones."""
+
+from __future__ import generators
+
+import sys
+
+import dns.exception
+import dns.name
+import dns.node
+import dns.rdataclass
+import dns.rdatatype
+import dns.rdata
+import dns.rrset
+import dns.tokenizer
+import dns.ttl
+
+class BadZone(dns.exception.DNSException):
+ """The zone is malformed."""
+ pass
+
+class NoSOA(BadZone):
+ """The zone has no SOA RR at its origin."""
+ pass
+
+class NoNS(BadZone):
+ """The zone has no NS RRset at its origin."""
+ pass
+
+class UnknownOrigin(BadZone):
+ """The zone's origin is unknown."""
+ pass
+
+class Zone(object):
+ """A DNS zone.
+
+ A Zone is a mapping from names to nodes. The zone object may be
+ treated like a Python dictionary, e.g. zone[name] will retrieve
+ the node associated with that name. The I{name} may be a
+ dns.name.Name object, or it may be a string. In the either case,
+ if the name is relative it is treated as relative to the origin of
+ the zone.
+
+ @ivar rdclass: The zone's rdata class; the default is class IN.
+ @type rdclass: int
+ @ivar origin: The origin of the zone.
+ @type origin: dns.name.Name object
+ @ivar nodes: A dictionary mapping the names of nodes in the zone to the
+ nodes themselves.
+ @type nodes: dict
+ @ivar relativize: should names in the zone be relativized?
+ @type relativize: bool
+ @cvar node_factory: the factory used to create a new node
+ @type node_factory: class or callable
+ """
+
+ node_factory = dns.node.Node
+
+ __slots__ = ['rdclass', 'origin', 'nodes', 'relativize']
+
+ def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True):
+ """Initialize a zone object.
+
+ @param origin: The origin of the zone.
+ @type origin: dns.name.Name object
+ @param rdclass: The zone's rdata class; the default is class IN.
+ @type rdclass: int"""
+
+ self.rdclass = rdclass
+ self.origin = origin
+ self.nodes = {}
+ self.relativize = relativize
+
+ def __eq__(self, other):
+ """Two zones are equal if they have the same origin, class, and
+ nodes.
+ @rtype: bool
+ """
+
+ if not isinstance(other, Zone):
+ return False
+ if self.rdclass != other.rdclass or \
+ self.origin != other.origin or \
+ self.nodes != other.nodes:
+ return False
+ return True
+
+ def __ne__(self, other):
+ """Are two zones not equal?
+ @rtype: bool
+ """
+
+ return not self.__eq__(other)
+
+ def _validate_name(self, name):
+ if isinstance(name, (str, unicode)):
+ name = dns.name.from_text(name, None)
+ elif not isinstance(name, dns.name.Name):
+ raise KeyError("name parameter must be convertable to a DNS name")
+ if name.is_absolute():
+ if not name.is_subdomain(self.origin):
+ raise KeyError("name parameter must be a subdomain of the zone origin")
+ if self.relativize:
+ name = name.relativize(self.origin)
+ return name
+
+ def __getitem__(self, key):
+ key = self._validate_name(key)
+ return self.nodes[key]
+
+ def __setitem__(self, key, value):
+ key = self._validate_name(key)
+ self.nodes[key] = value
+
+ def __delitem__(self, key):
+ key = self._validate_name(key)
+ del self.nodes[key]
+
+ def __iter__(self):
+ return self.nodes.iterkeys()
+
+ def iterkeys(self):
+ return self.nodes.iterkeys()
+
+ def keys(self):
+ return self.nodes.keys()
+
+ def itervalues(self):
+ return self.nodes.itervalues()
+
+ def values(self):
+ return self.nodes.values()
+
+ def iteritems(self):
+ return self.nodes.iteritems()
+
+ def items(self):
+ return self.nodes.items()
+
+ def get(self, key):
+ key = self._validate_name(key)
+ return self.nodes.get(key)
+
+ def __contains__(self, other):
+ return other in self.nodes
+
+ def find_node(self, name, create=False):
+ """Find a node in the zone, possibly creating it.
+
+ @param name: the name of the node to find
+ @type name: dns.name.Name object or string
+ @param create: should the node be created if it doesn't exist?
+ @type create: bool
+ @raises KeyError: the name is not known and create was not specified.
+ @rtype: dns.node.Node object
+ """
+
+ name = self._validate_name(name)
+ node = self.nodes.get(name)
+ if node is None:
+ if not create:
+ raise KeyError
+ node = self.node_factory()
+ self.nodes[name] = node
+ return node
+
+ def get_node(self, name, create=False):
+ """Get a node in the zone, possibly creating it.
+
+ This method is like L{find_node}, except it returns None instead
+ of raising an exception if the node does not exist and creation
+ has not been requested.
+
+ @param name: the name of the node to find
+ @type name: dns.name.Name object or string
+ @param create: should the node be created if it doesn't exist?
+ @type create: bool
+ @rtype: dns.node.Node object or None
+ """
+
+ try:
+ node = self.find_node(name, create)
+ except KeyError:
+ node = None
+ return node
+
+ def delete_node(self, name):
+ """Delete the specified node if it exists.
+
+ It is not an error if the node does not exist.
+ """
+
+ name = self._validate_name(name)
+ if self.nodes.has_key(name):
+ del self.nodes[name]
+
+ def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
+ create=False):
+ """Look for rdata with the specified name and type in the zone,
+ and return an rdataset encapsulating it.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ The rdataset returned is not a copy; changes to it will change
+ the zone.
+
+ KeyError is raised if the name or type are not found.
+ Use L{get_rdataset} if you want to have None returned instead.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ @param create: should the node and rdataset be created if they do not
+ exist?
+ @type create: bool
+ @raises KeyError: the node or rdata could not be found
+ @rtype: dns.rrset.RRset object
+ """
+
+ name = self._validate_name(name)
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, (str, unicode)):
+ covers = dns.rdatatype.from_text(covers)
+ node = self.find_node(name, create)
+ return node.find_rdataset(self.rdclass, rdtype, covers, create)
+
+ def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
+ create=False):
+ """Look for rdata with the specified name and type in the zone,
+ and return an rdataset encapsulating it.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ The rdataset returned is not a copy; changes to it will change
+ the zone.
+
+ None is returned if the name or type are not found.
+ Use L{find_rdataset} if you want to have KeyError raised instead.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ @param create: should the node and rdataset be created if they do not
+ exist?
+ @type create: bool
+ @rtype: dns.rrset.RRset object
+ """
+
+ try:
+ rdataset = self.find_rdataset(name, rdtype, covers, create)
+ except KeyError:
+ rdataset = None
+ return rdataset
+
+ def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):
+ """Delete the rdataset matching I{rdtype} and I{covers}, if it
+ exists at the node specified by I{name}.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ It is not an error if the node does not exist, or if there is no
+ matching rdataset at the node.
+
+ If the node has no rdatasets after the deletion, it will itself
+ be deleted.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ """
+
+ name = self._validate_name(name)
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, (str, unicode)):
+ covers = dns.rdatatype.from_text(covers)
+ node = self.get_node(name)
+ if not node is None:
+ node.delete_rdataset(self.rdclass, rdtype, covers)
+ if len(node) == 0:
+ self.delete_node(name)
+
+ def replace_rdataset(self, name, replacement):
+ """Replace an rdataset at name.
+
+ It is not an error if there is no rdataset matching I{replacement}.
+
+ Ownership of the I{replacement} object is transferred to the zone;
+ in other words, this method does not store a copy of I{replacement}
+ at the node, it stores I{replacement} itself.
+
+ If the I{name} node does not exist, it is created.
+
+ @param name: the owner name
+ @type name: DNS.name.Name object or string
+ @param replacement: the replacement rdataset
+ @type replacement: dns.rdataset.Rdataset
+ """
+
+ if replacement.rdclass != self.rdclass:
+ raise ValueError('replacement.rdclass != zone.rdclass')
+ node = self.find_node(name, True)
+ node.replace_rdataset(replacement)
+
+ def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
+ """Look for rdata with the specified name and type in the zone,
+ and return an RRset encapsulating it.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ This method is less efficient than the similar
+ L{find_rdataset} because it creates an RRset instead of
+ returning the matching rdataset. It may be more convenient
+ for some uses since it returns an object which binds the owner
+ name to the rdata.
+
+ This method may not be used to create new nodes or rdatasets;
+ use L{find_rdataset} instead.
+
+ KeyError is raised if the name or type are not found.
+ Use L{get_rrset} if you want to have None returned instead.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ @raises KeyError: the node or rdata could not be found
+ @rtype: dns.rrset.RRset object
+ """
+
+ name = self._validate_name(name)
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, (str, unicode)):
+ covers = dns.rdatatype.from_text(covers)
+ rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers)
+ rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers)
+ rrset.update(rdataset)
+ return rrset
+
+ def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
+ """Look for rdata with the specified name and type in the zone,
+ and return an RRset encapsulating it.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ This method is less efficient than the similar L{get_rdataset}
+ because it creates an RRset instead of returning the matching
+ rdataset. It may be more convenient for some uses since it
+ returns an object which binds the owner name to the rdata.
+
+ This method may not be used to create new nodes or rdatasets;
+ use L{find_rdataset} instead.
+
+ None is returned if the name or type are not found.
+ Use L{find_rrset} if you want to have KeyError raised instead.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ @rtype: dns.rrset.RRset object
+ """
+
+ try:
+ rrset = self.find_rrset(name, rdtype, covers)
+ except KeyError:
+ rrset = None
+ return rrset
+
+ def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY,
+ covers=dns.rdatatype.NONE):
+ """Return a generator which yields (name, rdataset) tuples for
+ all rdatasets in the zone which have the specified I{rdtype}
+ and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
+ then all rdatasets will be matched.
+
+ @param rdtype: int or string
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ """
+
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, (str, unicode)):
+ covers = dns.rdatatype.from_text(covers)
+ for (name, node) in self.iteritems():
+ for rds in node:
+ if rdtype == dns.rdatatype.ANY or \
+ (rds.rdtype == rdtype and rds.covers == covers):
+ yield (name, rds)
+
+ def iterate_rdatas(self, rdtype=dns.rdatatype.ANY,
+ covers=dns.rdatatype.NONE):
+ """Return a generator which yields (name, ttl, rdata) tuples for
+ all rdatas in the zone which have the specified I{rdtype}
+ and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
+ then all rdatas will be matched.
+
+ @param rdtype: int or string
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ """
+
+ if isinstance(rdtype, (str, unicode)):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, (str, unicode)):
+ covers = dns.rdatatype.from_text(covers)
+ for (name, node) in self.iteritems():
+ for rds in node:
+ if rdtype == dns.rdatatype.ANY or \
+ (rds.rdtype == rdtype and rds.covers == covers):
+ for rdata in rds:
+ yield (name, rds.ttl, rdata)
+
+ def to_file(self, f, sorted=True, relativize=True, nl=None):
+ """Write a zone to a file.
+
+ @param f: file or string. If I{f} is a string, it is treated
+ as the name of a file to open.
+ @param sorted: if True, the file will be written with the
+ names sorted in DNSSEC order from least to greatest. Otherwise
+ the names will be written in whatever order they happen to have
+ in the zone's dictionary.
+ @param relativize: if True, domain names in the output will be
+ relativized to the zone's origin (if possible).
+ @type relativize: bool
+ @param nl: The end of line string. If not specified, the
+ output will use the platform's native end-of-line marker (i.e.
+ LF on POSIX, CRLF on Windows, CR on Macintosh).
+ @type nl: string or None
+ """
+
+ if sys.hexversion >= 0x02030000:
+ # allow Unicode filenames
+ str_type = basestring
+ else:
+ str_type = str
+ if nl is None:
+ opts = 'w'
+ else:
+ opts = 'wb'
+ if isinstance(f, str_type):
+ f = file(f, opts)
+ want_close = True
+ else:
+ want_close = False
+ try:
+ if sorted:
+ names = self.keys()
+ names.sort()
+ else:
+ names = self.iterkeys()
+ for n in names:
+ l = self[n].to_text(n, origin=self.origin,
+ relativize=relativize)
+ if nl is None:
+ print >> f, l
+ else:
+ f.write(l)
+ f.write(nl)
+ finally:
+ if want_close:
+ f.close()
+
+ def check_origin(self):
+ """Do some simple checking of the zone's origin.
+
+ @raises dns.zone.NoSOA: there is no SOA RR
+ @raises dns.zone.NoNS: there is no NS RRset
+ @raises KeyError: there is no origin node
+ """
+ if self.relativize:
+ name = dns.name.empty
+ else:
+ name = self.origin
+ if self.get_rdataset(name, dns.rdatatype.SOA) is None:
+ raise NoSOA
+ if self.get_rdataset(name, dns.rdatatype.NS) is None:
+ raise NoNS
+
+
+class _MasterReader(object):
+ """Read a DNS master file
+
+ @ivar tok: The tokenizer
+ @type tok: dns.tokenizer.Tokenizer object
+ @ivar ttl: The default TTL
+ @type ttl: int
+ @ivar last_name: The last name read
+ @type last_name: dns.name.Name object
+ @ivar current_origin: The current origin
+ @type current_origin: dns.name.Name object
+ @ivar relativize: should names in the zone be relativized?
+ @type relativize: bool
+ @ivar zone: the zone
+ @type zone: dns.zone.Zone object
+ @ivar saved_state: saved reader state (used when processing $INCLUDE)
+ @type saved_state: list of (tokenizer, current_origin, last_name, file)
+ tuples.
+ @ivar current_file: the file object of the $INCLUDed file being parsed
+ (None if no $INCLUDE is active).
+ @ivar allow_include: is $INCLUDE allowed?
+ @type allow_include: bool
+ @ivar check_origin: should sanity checks of the origin node be done?
+ The default is True.
+ @type check_origin: bool
+ """
+
+ def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone,
+ allow_include=False, check_origin=True):
+ if isinstance(origin, (str, unicode)):
+ origin = dns.name.from_text(origin)
+ self.tok = tok
+ self.current_origin = origin
+ self.relativize = relativize
+ self.ttl = 0
+ self.last_name = None
+ self.zone = zone_factory(origin, rdclass, relativize=relativize)
+ self.saved_state = []
+ self.current_file = None
+ self.allow_include = allow_include
+ self.check_origin = check_origin
+
+ def _eat_line(self):
+ while 1:
+ token = self.tok.get()
+ if token.is_eol_or_eof():
+ break
+
+ def _rr_line(self):
+ """Process one line from a DNS master file."""
+ # Name
+ if self.current_origin is None:
+ raise UnknownOrigin
+ token = self.tok.get(want_leading = True)
+ if not token.is_whitespace():
+ self.last_name = dns.name.from_text(token.value, self.current_origin)
+ else:
+ token = self.tok.get()
+ if token.is_eol_or_eof():
+ # treat leading WS followed by EOL/EOF as if they were EOL/EOF.
+ return
+ self.tok.unget(token)
+ name = self.last_name
+ if not name.is_subdomain(self.zone.origin):
+ self._eat_line()
+ return
+ if self.relativize:
+ name = name.relativize(self.zone.origin)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ # TTL
+ try:
+ ttl = dns.ttl.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.ttl.BadTTL:
+ ttl = self.ttl
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except:
+ rdclass = self.zone.rdclass
+ if rdclass != self.zone.rdclass:
+ raise dns.exception.SyntaxError("RR class is not zone's class")
+ # Type
+ try:
+ rdtype = dns.rdatatype.from_text(token.value)
+ except:
+ raise dns.exception.SyntaxError("unknown rdatatype '%s'" % token.value)
+ n = self.zone.nodes.get(name)
+ if n is None:
+ n = self.zone.node_factory()
+ self.zone.nodes[name] = n
+ try:
+ rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
+ self.current_origin, False)
+ except dns.exception.SyntaxError:
+ # Catch and reraise.
+ (ty, va) = sys.exc_info()[:2]
+ raise va
+ except:
+ # All exceptions that occur in the processing of rdata
+ # are treated as syntax errors. This is not strictly
+ # correct, but it is correct almost all of the time.
+ # We convert them to syntax errors so that we can emit
+ # helpful filename:line info.
+ (ty, va) = sys.exc_info()[:2]
+ raise dns.exception.SyntaxError("caught exception %s: %s" % (str(ty), str(va)))
+
+ rd.choose_relativity(self.zone.origin, self.relativize)
+ covers = rd.covers()
+ rds = n.find_rdataset(rdclass, rdtype, covers, True)
+ rds.add(rd, ttl)
+
+ def read(self):
+ """Read a DNS master file and build a zone object.
+
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ """
+
+ try:
+ while 1:
+ token = self.tok.get(True, True).unescape()
+ if token.is_eof():
+ if not self.current_file is None:
+ self.current_file.close()
+ if len(self.saved_state) > 0:
+ (self.tok,
+ self.current_origin,
+ self.last_name,
+ self.current_file,
+ self.ttl) = self.saved_state.pop(-1)
+ continue
+ break
+ elif token.is_eol():
+ continue
+ elif token.is_comment():
+ self.tok.get_eol()
+ continue
+ elif token.value[0] == '$':
+ u = token.value.upper()
+ if u == '$TTL':
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError("bad $TTL")
+ self.ttl = dns.ttl.from_text(token.value)
+ self.tok.get_eol()
+ elif u == '$ORIGIN':
+ self.current_origin = self.tok.get_name()
+ self.tok.get_eol()
+ if self.zone.origin is None:
+ self.zone.origin = self.current_origin
+ elif u == '$INCLUDE' and self.allow_include:
+ token = self.tok.get()
+ if not token.is_quoted_string():
+ raise dns.exception.SyntaxError("bad filename in $INCLUDE")
+ filename = token.value
+ token = self.tok.get()
+ if token.is_identifier():
+ new_origin = dns.name.from_text(token.value, \
+ self.current_origin)
+ self.tok.get_eol()
+ elif not token.is_eol_or_eof():
+ raise dns.exception.SyntaxError("bad origin in $INCLUDE")
+ else:
+ new_origin = self.current_origin
+ self.saved_state.append((self.tok,
+ self.current_origin,
+ self.last_name,
+ self.current_file,
+ self.ttl))
+ self.current_file = file(filename, 'r')
+ self.tok = dns.tokenizer.Tokenizer(self.current_file,
+ filename)
+ self.current_origin = new_origin
+ else:
+ raise dns.exception.SyntaxError("Unknown master file directive '" + u + "'")
+ continue
+ self.tok.unget(token)
+ self._rr_line()
+ except dns.exception.SyntaxError, detail:
+ (filename, line_number) = self.tok.where()
+ if detail is None:
+ detail = "syntax error"
+ raise dns.exception.SyntaxError("%s:%d: %s" % (filename, line_number, detail))
+
+ # Now that we're done reading, do some basic checking of the zone.
+ if self.check_origin:
+ self.zone.check_origin()
+
+def from_text(text, origin = None, rdclass = dns.rdataclass.IN,
+ relativize = True, zone_factory=Zone, filename=None,
+ allow_include=False, check_origin=True):
+ """Build a zone object from a master file format string.
+
+ @param text: the master file format input
+ @type text: string.
+ @param origin: The origin of the zone; if not specified, the first
+ $ORIGIN statement in the master file will determine the origin of the
+ zone.
+ @type origin: dns.name.Name object or string
+ @param rdclass: The zone's rdata class; the default is class IN.
+ @type rdclass: int
+ @param relativize: should names be relativized? The default is True
+ @type relativize: bool
+ @param zone_factory: The zone factory to use
+ @type zone_factory: function returning a Zone
+ @param filename: The filename to emit when describing where an error
+ occurred; the default is '<string>'.
+ @type filename: string
+ @param allow_include: is $INCLUDE allowed?
+ @type allow_include: bool
+ @param check_origin: should sanity checks of the origin node be done?
+ The default is True.
+ @type check_origin: bool
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ @rtype: dns.zone.Zone object
+ """
+
+ # 'text' can also be a file, but we don't publish that fact
+ # since it's an implementation detail. The official file
+ # interface is from_file().
+
+ if filename is None:
+ filename = '<string>'
+ tok = dns.tokenizer.Tokenizer(text, filename)
+ reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory,
+ allow_include=allow_include,
+ check_origin=check_origin)
+ reader.read()
+ return reader.zone
+
+def from_file(f, origin = None, rdclass = dns.rdataclass.IN,
+ relativize = True, zone_factory=Zone, filename=None,
+ allow_include=True, check_origin=True):
+ """Read a master file and build a zone object.
+
+ @param f: file or string. If I{f} is a string, it is treated
+ as the name of a file to open.
+ @param origin: The origin of the zone; if not specified, the first
+ $ORIGIN statement in the master file will determine the origin of the
+ zone.
+ @type origin: dns.name.Name object or string
+ @param rdclass: The zone's rdata class; the default is class IN.
+ @type rdclass: int
+ @param relativize: should names be relativized? The default is True
+ @type relativize: bool
+ @param zone_factory: The zone factory to use
+ @type zone_factory: function returning a Zone
+ @param filename: The filename to emit when describing where an error
+ occurred; the default is '<file>', or the value of I{f} if I{f} is a
+ string.
+ @type filename: string
+ @param allow_include: is $INCLUDE allowed?
+ @type allow_include: bool
+ @param check_origin: should sanity checks of the origin node be done?
+ The default is True.
+ @type check_origin: bool
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ @rtype: dns.zone.Zone object
+ """
+
+ if sys.hexversion >= 0x02030000:
+ # allow Unicode filenames; turn on universal newline support
+ str_type = basestring
+ opts = 'rU'
+ else:
+ str_type = str
+ opts = 'r'
+ if isinstance(f, str_type):
+ if filename is None:
+ filename = f
+ f = file(f, opts)
+ want_close = True
+ else:
+ if filename is None:
+ filename = '<file>'
+ want_close = False
+
+ try:
+ z = from_text(f, origin, rdclass, relativize, zone_factory,
+ filename, allow_include, check_origin)
+ finally:
+ if want_close:
+ f.close()
+ return z
+
+def from_xfr(xfr, zone_factory=Zone, relativize=True):
+ """Convert the output of a zone transfer generator into a zone object.
+
+ @param xfr: The xfr generator
+ @type xfr: generator of dns.message.Message objects
+ @param relativize: should names be relativized? The default is True.
+ It is essential that the relativize setting matches the one specified
+ to dns.query.xfr().
+ @type relativize: bool
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ @rtype: dns.zone.Zone object
+ """
+
+ z = None
+ for r in xfr:
+ if z is None:
+ if relativize:
+ origin = r.origin
+ else:
+ origin = r.answer[0].name
+ rdclass = r.answer[0].rdclass
+ z = zone_factory(origin, rdclass, relativize=relativize)
+ for rrset in r.answer:
+ znode = z.nodes.get(rrset.name)
+ if not znode:
+ znode = z.node_factory()
+ z.nodes[rrset.name] = znode
+ zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
+ rrset.covers, True)
+ zrds.update_ttl(rrset.ttl)
+ for rd in rrset:
+ rd.choose_relativity(z.origin, relativize)
+ zrds.add(rd)
+ z.check_origin()
+ return z
diff --git a/lib/dnspython/examples/ddns.py b/lib/dnspython/examples/ddns.py
new file mode 100755
index 0000000000..84814b73cf
--- /dev/null
+++ b/lib/dnspython/examples/ddns.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+#
+# Use a TSIG-signed DDNS update to update our hostname-to-address
+# mapping.
+#
+# usage: ddns.py <ip-address>
+#
+# On linux systems, you can automatically update your DNS any time an
+# interface comes up by adding an ifup-local script that invokes this
+# python code.
+#
+# E.g. on my systems I have this
+#
+# #!/bin/sh
+#
+# DEVICE=$1
+#
+# if [ "X${DEVICE}" == "Xeth0" ]; then
+# IPADDR=`LANG= LC_ALL= ifconfig ${DEVICE} | grep 'inet addr' |
+# awk -F: '{ print $2 } ' | awk '{ print $1 }'`
+# /usr/local/sbin/ddns.py $IPADDR
+# fi
+#
+# in /etc/ifup-local.
+#
+
+import sys
+
+import dns.update
+import dns.query
+import dns.tsigkeyring
+
+#
+# Replace the keyname and secret with appropriate values for your
+# configuration.
+#
+keyring = dns.tsigkeyring.from_text({
+ 'keyname.' : 'NjHwPsMKjdN++dOfE5iAiQ=='
+ })
+
+#
+# Replace "example." with your domain, and "host" with your hostname.
+#
+update = dns.update.Update('example.', keyring=keyring)
+update.replace('host', 300, 'A', sys.argv[1])
+
+#
+# Replace "10.0.0.1" with the IP address of your master server.
+#
+response = dns.query.tcp(update, '10.0.0.1', timeout=10)
diff --git a/lib/dnspython/examples/e164.py b/lib/dnspython/examples/e164.py
new file mode 100755
index 0000000000..ad40ccf84b
--- /dev/null
+++ b/lib/dnspython/examples/e164.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+
+import dns.e164
+n = dns.e164.from_e164("+1 555 1212")
+print n
+print dns.e164.to_e164(n)
diff --git a/lib/dnspython/examples/mx.py b/lib/dnspython/examples/mx.py
new file mode 100755
index 0000000000..3036e70ddf
--- /dev/null
+++ b/lib/dnspython/examples/mx.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+
+import dns.resolver
+
+answers = dns.resolver.query('nominum.com', 'MX')
+for rdata in answers:
+ print 'Host', rdata.exchange, 'has preference', rdata.preference
diff --git a/lib/dnspython/examples/name.py b/lib/dnspython/examples/name.py
new file mode 100755
index 0000000000..b099c49d16
--- /dev/null
+++ b/lib/dnspython/examples/name.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+
+import dns.name
+
+n = dns.name.from_text('www.dnspython.org')
+o = dns.name.from_text('dnspython.org')
+print n.is_subdomain(o) # True
+print n.is_superdomain(o) # False
+print n > o # True
+rel = n.relativize(o) # rel is the relative name www
+n2 = rel + o
+print n2 == n # True
+print n.labels # ['www', 'dnspython', 'org', '']
diff --git a/lib/dnspython/examples/reverse.py b/lib/dnspython/examples/reverse.py
new file mode 100755
index 0000000000..8657baed44
--- /dev/null
+++ b/lib/dnspython/examples/reverse.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Usage: reverse.py <zone_filename>...
+#
+# This demo script will load in all of the zones specified by the
+# filenames on the command line, find all the A RRs in them, and
+# construct a reverse mapping table that maps each IP address used to
+# the list of names mapping to that address. The table is then sorted
+# nicely and printed.
+#
+# Note! The zone name is taken from the basename of the filename, so
+# you must use filenames like "/wherever/you/like/dnspython.org" and
+# not something like "/wherever/you/like/foo.db" (unless you're
+# working with the ".db" GTLD, of course :)).
+#
+# If this weren't a demo script, there'd be a way of specifying the
+# origin for each zone instead of constructing it from the filename.
+
+import dns.zone
+import dns.ipv4
+import os.path
+import sys
+
+reverse_map = {}
+
+for filename in sys.argv[1:]:
+ zone = dns.zone.from_file(filename, os.path.basename(filename),
+ relativize=False)
+ for (name, ttl, rdata) in zone.iterate_rdatas('A'):
+ try:
+ reverse_map[rdata.address].append(name.to_text())
+ except KeyError:
+ reverse_map[rdata.address] = [name.to_text()]
+
+keys = reverse_map.keys()
+keys.sort(lambda a1, a2: cmp(dns.ipv4.inet_aton(a1), dns.ipv4.inet_aton(a2)))
+for k in keys:
+ v = reverse_map[k]
+ v.sort()
+ print k, v
diff --git a/lib/dnspython/examples/reverse_name.py b/lib/dnspython/examples/reverse_name.py
new file mode 100755
index 0000000000..351896b015
--- /dev/null
+++ b/lib/dnspython/examples/reverse_name.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+
+import dns.reversename
+n = dns.reversename.from_address("127.0.0.1")
+print n
+print dns.reversename.to_address(n)
diff --git a/lib/dnspython/examples/xfr.py b/lib/dnspython/examples/xfr.py
new file mode 100755
index 0000000000..5cd6f55c06
--- /dev/null
+++ b/lib/dnspython/examples/xfr.py
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+
+import dns.query
+import dns.zone
+
+z = dns.zone.from_xfr(dns.query.xfr('204.152.189.147', 'dnspython.org'))
+names = z.nodes.keys()
+names.sort()
+for n in names:
+ print z[n].to_text(n)
diff --git a/lib/dnspython/examples/zonediff.py b/lib/dnspython/examples/zonediff.py
new file mode 100755
index 0000000000..ad81fb1d2d
--- /dev/null
+++ b/lib/dnspython/examples/zonediff.py
@@ -0,0 +1,270 @@
+#!/usr/bin/env python
+#
+# Small library and commandline tool to do logical diffs of zonefiles
+# ./zonediff -h gives you help output
+#
+# Requires dnspython to do all the heavy lifting
+#
+# (c)2009 Dennis Kaarsemaker <dennis@kaarsemaker.net>
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""See diff_zones.__doc__ for more information"""
+
+__all__ = ['diff_zones', 'format_changes_plain', 'format_changes_html']
+
+try:
+ import dns.zone
+except ImportError:
+ import sys
+ sys.stderr.write("Please install dnspython")
+ sys.exit(1)
+
+def diff_zones(zone1, zone2, ignore_ttl=False, ignore_soa=False):
+ """diff_zones(zone1, zone2, ignore_ttl=False, ignore_soa=False) -> changes
+ Compares two dns.zone.Zone objects and returns a list of all changes
+ in the format (name, oldnode, newnode).
+
+ If ignore_ttl is true, a node will not be added to this list if the
+ only change is its TTL.
+
+ If ignore_soa is true, a node will not be added to this list if the
+ only changes is a change in a SOA Rdata set.
+
+ The returned nodes do include all Rdata sets, including unchanged ones.
+ """
+
+ changes = []
+ for name in zone1:
+ name = str(name)
+ n1 = zone1.get_node(name)
+ n2 = zone2.get_node(name)
+ if not n2:
+ changes.append((str(name), n1, n2))
+ elif _nodes_differ(n1, n2, ignore_ttl, ignore_soa):
+ changes.append((str(name), n1, n2))
+
+ for name in zone2:
+ n1 = zone1.get_node(name)
+ if not n1:
+ n2 = zone2.get_node(name)
+ changes.append((str(name), n1, n2))
+ return changes
+
+def _nodes_differ(n1, n2, ignore_ttl, ignore_soa):
+ if ignore_soa or not ignore_ttl:
+ # Compare datasets directly
+ for r in n1.rdatasets:
+ if ignore_soa and r.rdtype == dns.rdatatype.SOA:
+ continue
+ if r not in n2.rdatasets:
+ return True
+ if not ignore_ttl:
+ return r.ttl != n2.find_rdataset(r.rdclass, r.rdtype).ttl
+
+ for r in n2.rdatasets:
+ if ignore_soa and r.rdtype == dns.rdatatype.SOA:
+ continue
+ if r not in n1.rdatasets:
+ return True
+ else:
+ return n1 != n2
+
+def format_changes_plain(oldf, newf, changes, ignore_ttl=False):
+ """format_changes(oldfile, newfile, changes, ignore_ttl=False) -> str
+ Given 2 filenames and a list of changes from diff_zones, produce diff-like
+ output. If ignore_ttl is True, TTL-only changes are not displayed"""
+
+ ret = "--- %s\n+++ %s\n" % (oldf, newf)
+ for name, old, new in changes:
+ ret += "@ %s\n" % name
+ if not old:
+ for r in new.rdatasets:
+ ret += "+ %s\n" % str(r).replace('\n','\n+ ')
+ elif not new:
+ for r in old.rdatasets:
+ ret += "- %s\n" % str(r).replace('\n','\n+ ')
+ else:
+ for r in old.rdatasets:
+ if r not in new.rdatasets or (r.ttl != new.find_rdataset(r.rdclass, r.rdtype).ttl and not ignore_ttl):
+ ret += "- %s\n" % str(r).replace('\n','\n+ ')
+ for r in new.rdatasets:
+ if r not in old.rdatasets or (r.ttl != old.find_rdataset(r.rdclass, r.rdtype).ttl and not ignore_ttl):
+ ret += "+ %s\n" % str(r).replace('\n','\n+ ')
+ return ret
+
+def format_changes_html(oldf, newf, changes, ignore_ttl=False):
+ """format_changes(oldfile, newfile, changes, ignore_ttl=False) -> str
+ Given 2 filenames and a list of changes from diff_zones, produce nice html
+ output. If ignore_ttl is True, TTL-only changes are not displayed"""
+
+ ret = '''<table class="zonediff">
+ <thead>
+ <tr>
+ <th>&nbsp;</th>
+ <th class="old">%s</th>
+ <th class="new">%s</th>
+ </tr>
+ </thead>
+ <tbody>\n''' % (oldf, newf)
+
+ for name, old, new in changes:
+ ret += ' <tr class="rdata">\n <td class="rdname">%s</td>\n' % name
+ if not old:
+ for r in new.rdatasets:
+ ret += ' <td class="old">&nbsp;</td>\n <td class="new">%s</td>\n' % str(r).replace('\n','<br />')
+ elif not new:
+ for r in old.rdatasets:
+ ret += ' <td class="old">%s</td>\n <td class="new">&nbsp;</td>\n' % str(r).replace('\n','<br />')
+ else:
+ ret += ' <td class="old">'
+ for r in old.rdatasets:
+ if r not in new.rdatasets or (r.ttl != new.find_rdataset(r.rdclass, r.rdtype).ttl and not ignore_ttl):
+ ret += str(r).replace('\n','<br />')
+ ret += '</td>\n'
+ ret += ' <td class="new">'
+ for r in new.rdatasets:
+ if r not in old.rdatasets or (r.ttl != old.find_rdataset(r.rdclass, r.rdtype).ttl and not ignore_ttl):
+ ret += str(r).replace('\n','<br />')
+ ret += '</td>\n'
+ ret += ' </tr>\n'
+ return ret + ' </tbody>\n</table>'
+
+# Make this module usable as a script too.
+if __name__ == '__main__':
+ import optparse
+ import subprocess
+ import sys
+ import traceback
+
+ usage = """%prog zonefile1 zonefile2 - Show differences between zones in a diff-like format
+%prog [--git|--bzr|--rcs] zonefile rev1 [rev2] - Show differences between two revisions of a zonefile
+
+The differences shown will be logical differences, not textual differences.
+"""
+ p = optparse.OptionParser(usage=usage)
+ p.add_option('-s', '--ignore-soa', action="store_true", default=False, dest="ignore_soa",
+ help="Ignore SOA-only changes to records")
+ p.add_option('-t', '--ignore-ttl', action="store_true", default=False, dest="ignore_ttl",
+ help="Ignore TTL-only changes to Rdata")
+ p.add_option('-T', '--traceback', action="store_true", default=False, dest="tracebacks",
+ help="Show python tracebacks when errors occur")
+ p.add_option('-H', '--html', action="store_true", default=False, dest="html",
+ help="Print HTML output")
+ p.add_option('-g', '--git', action="store_true", default=False, dest="use_git",
+ help="Use git revisions instead of real files")
+ p.add_option('-b', '--bzr', action="store_true", default=False, dest="use_bzr",
+ help="Use bzr revisions instead of real files")
+ p.add_option('-r', '--rcs', action="store_true", default=False, dest="use_rcs",
+ help="Use rcs revisions instead of real files")
+ opts, args = p.parse_args()
+ opts.use_vc = opts.use_git or opts.use_bzr or opts.use_rcs
+
+ def _open(what, err):
+ if isinstance(what, basestring):
+ # Open as normal file
+ try:
+ return open(what, 'rb')
+ except:
+ sys.stderr.write(err + "\n")
+ if opts.tracebacks:
+ traceback.print_exc()
+ else:
+ # Must be a list, open subprocess
+ try:
+ proc = subprocess.Popen(what, stdout=subprocess.PIPE)
+ proc.wait()
+ if proc.returncode == 0:
+ return proc.stdout
+ sys.stderr.write(err + "\n")
+ except:
+ sys.stderr.write(err + "\n")
+ if opts.tracebacks:
+ traceback.print_exc()
+
+ if not opts.use_vc and len(args) != 2:
+ p.print_help()
+ sys.exit(64)
+ if opts.use_vc and len(args) not in (2,3):
+ p.print_help()
+ sys.exit(64)
+
+ # Open file desriptors
+ if not opts.use_vc:
+ oldn, newn = args
+ else:
+ if len(args) == 3:
+ filename, oldr, newr = args
+ oldn = "%s:%s" % (oldr, filename)
+ newn = "%s:%s" % (newr, filename)
+ else:
+ filename, oldr = args
+ newr = None
+ oldn = "%s:%s" % (oldr, filename)
+ newn = filename
+
+
+ old, new = None, None
+ oldz, newz = None, None
+ if opts.use_bzr:
+ old = _open(["bzr", "cat", "-r" + oldr, filename],
+ "Unable to retrieve revision %s of %s" % (oldr, filename))
+ if newr != None:
+ new = _open(["bzr", "cat", "-r" + newr, filename],
+ "Unable to retrieve revision %s of %s" % (newr, filename))
+ elif opts.use_git:
+ old = _open(["git", "show", oldn],
+ "Unable to retrieve revision %s of %s" % (oldr, filename))
+ if newr != None:
+ new = _open(["git", "show", newn],
+ "Unable to retrieve revision %s of %s" % (newr, filename))
+ elif opts.use_rcs:
+ old = _open(["co", "-q", "-p", "-r" + oldr, filename],
+ "Unable to retrieve revision %s of %s" % (oldr, filename))
+ if newr != None:
+ new = _open(["co", "-q", "-p", "-r" + newr, filename],
+ "Unable to retrieve revision %s of %s" % (newr, filename))
+ if not opts.use_vc:
+ old = _open(oldn, "Unable to open %s" % oldn)
+ if not opts.use_vc or newr == None:
+ new = _open(newn, "Unable to open %s" % newn)
+
+ if not old or not new:
+ sys.exit(65)
+
+ # Parse the zones
+ try:
+ oldz = dns.zone.from_file(old, origin = '.', check_origin=False)
+ except dns.exception.DNSException:
+ sys.stderr.write("Incorrect zonefile: %s\n", old)
+ if opts.tracebacks:
+ traceback.print_exc()
+ try:
+ newz = dns.zone.from_file(new, origin = '.', check_origin=False)
+ except dns.exception.DNSException:
+ sys.stderr.write("Incorrect zonefile: %s\n" % new)
+ if opts.tracebacks:
+ traceback.print_exc()
+ if not oldz or not newz:
+ sys.exit(65)
+
+ changes = diff_zones(oldz, newz, opts.ignore_ttl, opts.ignore_soa)
+ changes.sort()
+
+ if not changes:
+ sys.exit(0)
+ if opts.html:
+ print format_changes_html(oldn, newn, changes, opts.ignore_ttl)
+ else:
+ print format_changes_plain(oldn, newn, changes, opts.ignore_ttl)
+ sys.exit(1)
diff --git a/lib/dnspython/setup.py b/lib/dnspython/setup.py
new file mode 100755
index 0000000000..f84711f795
--- /dev/null
+++ b/lib/dnspython/setup.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import sys
+from distutils.core import setup
+
+version = '1.9.3'
+
+kwargs = {
+ 'name' : 'dnspython',
+ 'version' : version,
+ 'description' : 'DNS toolkit',
+ 'long_description' : \
+ """dnspython is a DNS toolkit for Python. It supports almost all
+record types. It can be used for queries, zone transfers, and dynamic
+updates. It supports TSIG authenticated messages and EDNS0.
+
+dnspython provides both high and low level access to DNS. The high
+level classes perform queries for data of a given name, type, and
+class, and return an answer set. The low level classes allow
+direct manipulation of DNS zones, messages, names, and records.""",
+ 'author' : 'Bob Halley',
+ 'author_email' : 'halley@dnspython.org',
+ 'license' : 'BSD-like',
+ 'url' : 'http://www.dnspython.org',
+ 'packages' : ['dns', 'dns.rdtypes', 'dns.rdtypes.IN', 'dns.rdtypes.ANY'],
+ }
+
+if sys.hexversion >= 0x02020300:
+ kwargs['download_url'] = \
+ 'http://www.dnspython.org/kits/%s/dnspython-%s.tar.gz' % (version,
+ version)
+ kwargs['classifiers'] = [
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "Intended Audience :: System Administrators",
+ "License :: Freeware",
+ "Operating System :: Microsoft :: Windows :: Windows 95/98/2000",
+ "Operating System :: POSIX",
+ "Programming Language :: Python",
+ "Topic :: Internet :: Name Service (DNS)",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ]
+
+if sys.hexversion >= 0x02050000:
+ kwargs['requires'] = []
+ kwargs['provides'] = ['dns']
+
+setup(**kwargs)
diff --git a/lib/dnspython/tests/Makefile b/lib/dnspython/tests/Makefile
new file mode 100644
index 0000000000..584f6a7da7
--- /dev/null
+++ b/lib/dnspython/tests/Makefile
@@ -0,0 +1,26 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# $Id: Makefile,v 1.5 2004/03/19 00:17:27 halley Exp $
+
+PYTHON=python
+
+check: test
+
+test:
+ @for i in *.py; do \
+ echo "Running $$i:"; \
+ ${PYTHON} $$i || exit 1; \
+ done
diff --git a/lib/dnspython/tests/bugs.py b/lib/dnspython/tests/bugs.py
new file mode 100644
index 0000000000..0896e3f02d
--- /dev/null
+++ b/lib/dnspython/tests/bugs.py
@@ -0,0 +1,44 @@
+# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.ttl
+
+class BugsTestCase(unittest.TestCase):
+
+ def test_float_LOC(self):
+ rdata = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.LOC,
+ "30 30 0.000 N 100 30 0.000 W 10.00m 20m 2000m 20m")
+ self.failUnless(rdata.float_latitude == 30.5)
+ self.failUnless(rdata.float_longitude == -100.5)
+
+ def test_SOA_BIND8_TTL(self):
+ rdata1 = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA,
+ "a b 100 1s 1m 1h 1d")
+ rdata2 = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA,
+ "a b 100 1 60 3600 86400")
+ self.failUnless(rdata1 == rdata2)
+
+ def test_TTL_bounds_check(self):
+ def bad():
+ ttl = dns.ttl.from_text("2147483648")
+ self.failUnlessRaises(dns.ttl.BadTTL, bad)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/tests/dnssec.py b/lib/dnspython/tests/dnssec.py
new file mode 100644
index 0000000000..b30e847fba
--- /dev/null
+++ b/lib/dnspython/tests/dnssec.py
@@ -0,0 +1,146 @@
+# Copyright (C) 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+
+import dns.dnssec
+import dns.name
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.rrset
+
+abs_dnspython_org = dns.name.from_text('dnspython.org')
+
+abs_keys = { abs_dnspython_org :
+ dns.rrset.from_text('dnspython.org.', 3600, 'IN', 'DNSKEY',
+ '257 3 5 AwEAAenVTr9L1OMlL1/N2ta0Qj9LLLnnmFWIr1dJoAsWM9BQfsbV7kFZ XbAkER/FY9Ji2o7cELxBwAsVBuWn6IUUAJXLH74YbC1anY0lifjgt29z SwDzuB7zmC7yVYZzUunBulVW4zT0tg1aePbpVL2EtTL8VzREqbJbE25R KuQYHZtFwG8S4iBxJUmT2Bbd0921LLxSQgVoFXlQx/gFV2+UERXcJ5ce iX6A6wc02M/pdg/YbJd2rBa0MYL3/Fz/Xltre0tqsImZGxzi6YtYDs45 NC8gH+44egz82e2DATCVM1ICPmRDjXYTLldQiWA2ZXIWnK0iitl5ue24 7EsWJefrIhE=',
+ '256 3 5 AwEAAdSSghOGjU33IQZgwZM2Hh771VGXX05olJK49FxpSyuEAjDBXY58 LGU9R2Zgeecnk/b9EAhFu/vCV9oECtiTCvwuVAkt9YEweqYDluQInmgP NGMJCKdSLlnX93DkjDw8rMYv5dqXCuSGPlKChfTJOLQxIAxGloS7lL+c 0CTZydAF')
+ }
+
+rel_keys = { dns.name.empty :
+ dns.rrset.from_text('@', 3600, 'IN', 'DNSKEY',
+ '257 3 5 AwEAAenVTr9L1OMlL1/N2ta0Qj9LLLnnmFWIr1dJoAsWM9BQfsbV7kFZ XbAkER/FY9Ji2o7cELxBwAsVBuWn6IUUAJXLH74YbC1anY0lifjgt29z SwDzuB7zmC7yVYZzUunBulVW4zT0tg1aePbpVL2EtTL8VzREqbJbE25R KuQYHZtFwG8S4iBxJUmT2Bbd0921LLxSQgVoFXlQx/gFV2+UERXcJ5ce iX6A6wc02M/pdg/YbJd2rBa0MYL3/Fz/Xltre0tqsImZGxzi6YtYDs45 NC8gH+44egz82e2DATCVM1ICPmRDjXYTLldQiWA2ZXIWnK0iitl5ue24 7EsWJefrIhE=',
+ '256 3 5 AwEAAdSSghOGjU33IQZgwZM2Hh771VGXX05olJK49FxpSyuEAjDBXY58 LGU9R2Zgeecnk/b9EAhFu/vCV9oECtiTCvwuVAkt9YEweqYDluQInmgP NGMJCKdSLlnX93DkjDw8rMYv5dqXCuSGPlKChfTJOLQxIAxGloS7lL+c 0CTZydAF')
+ }
+
+when = 1290250287
+
+abs_soa = dns.rrset.from_text('dnspython.org.', 3600, 'IN', 'SOA',
+ 'howl.dnspython.org. hostmaster.dnspython.org. 2010020047 3600 1800 604800 3600')
+
+abs_other_soa = dns.rrset.from_text('dnspython.org.', 3600, 'IN', 'SOA',
+ 'foo.dnspython.org. hostmaster.dnspython.org. 2010020047 3600 1800 604800 3600')
+
+abs_soa_rrsig = dns.rrset.from_text('dnspython.org.', 3600, 'IN', 'RRSIG',
+ 'SOA 5 2 3600 20101127004331 20101119213831 61695 dnspython.org. sDUlltRlFTQw5ITFxOXW3TgmrHeMeNpdqcZ4EXxM9FHhIlte6V9YCnDw t6dvM9jAXdIEi03l9H/RAd9xNNW6gvGMHsBGzpvvqFQxIBR2PoiZA1mX /SWHZFdbt4xjYTtXqpyYvrMK0Dt7bUYPadyhPFCJ1B+I8Zi7B5WJEOd0 8vs=')
+
+rel_soa = dns.rrset.from_text('@', 3600, 'IN', 'SOA',
+ 'howl hostmaster 2010020047 3600 1800 604800 3600')
+
+rel_other_soa = dns.rrset.from_text('@', 3600, 'IN', 'SOA',
+ 'foo hostmaster 2010020047 3600 1800 604800 3600')
+
+rel_soa_rrsig = dns.rrset.from_text('@', 3600, 'IN', 'RRSIG',
+ 'SOA 5 2 3600 20101127004331 20101119213831 61695 @ sDUlltRlFTQw5ITFxOXW3TgmrHeMeNpdqcZ4EXxM9FHhIlte6V9YCnDw t6dvM9jAXdIEi03l9H/RAd9xNNW6gvGMHsBGzpvvqFQxIBR2PoiZA1mX /SWHZFdbt4xjYTtXqpyYvrMK0Dt7bUYPadyhPFCJ1B+I8Zi7B5WJEOd0 8vs=')
+
+sep_key = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DNSKEY,
+ '257 3 5 AwEAAenVTr9L1OMlL1/N2ta0Qj9LLLnnmFWIr1dJoAsWM9BQfsbV7kFZ XbAkER/FY9Ji2o7cELxBwAsVBuWn6IUUAJXLH74YbC1anY0lifjgt29z SwDzuB7zmC7yVYZzUunBulVW4zT0tg1aePbpVL2EtTL8VzREqbJbE25R KuQYHZtFwG8S4iBxJUmT2Bbd0921LLxSQgVoFXlQx/gFV2+UERXcJ5ce iX6A6wc02M/pdg/YbJd2rBa0MYL3/Fz/Xltre0tqsImZGxzi6YtYDs45 NC8gH+44egz82e2DATCVM1ICPmRDjXYTLldQiWA2ZXIWnK0iitl5ue24 7EsWJefrIhE=')
+
+good_ds = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DS,
+ '57349 5 2 53A79A3E7488AB44FFC56B2D1109F0699D1796DD977E72108B841F96 E47D7013')
+
+when2 = 1290425644
+
+abs_example = dns.name.from_text('example')
+
+abs_dsa_keys = { abs_example :
+ dns.rrset.from_text('example.', 86400, 'IN', 'DNSKEY',
+ '257 3 3 CI3nCqyJsiCJHTjrNsJOT4RaszetzcJPYuoH3F9ZTVt3KJXncCVR3bwn 1w0iavKljb9hDlAYSfHbFCp4ic/rvg4p1L8vh5s8ToMjqDNl40A0hUGQ Ybx5hsECyK+qHoajilUX1phYSAD8d9WAGO3fDWzUPBuzR7o85NiZCDxz yXuNVfni0uhj9n1KYhEO5yAbbruDGN89wIZcxMKuQsdUY2GYD93ssnBv a55W6XRABYWayKZ90WkRVODLVYLSn53Pj/wwxGH+XdhIAZJXimrZL4yl My7rtBsLMqq8Ihs4Tows7LqYwY7cp6y/50tw6pj8tFqMYcPUjKZV36l1 M/2t5BVg3i7IK61Aidt6aoC3TDJtzAxg3ZxfjZWJfhHjMJqzQIfbW5b9 q1mjFsW5EUv39RaNnX+3JWPRLyDqD4pIwDyqfutMsdk/Py3paHn82FGp CaOg+nicqZ9TiMZURN/XXy5JoXUNQ3RNvbHCUiPUe18KUkY6mTfnyHld 1l9YCWmzXQVClkx/hOYxjJ4j8Ife58+Obu5X',
+ '256 3 3 CJE1yb9YRQiw5d2xZrMUMR+cGCTt1bp1KDCefmYKmS+Z1+q9f42ETVhx JRiQwXclYwmxborzIkSZegTNYIV6mrYwbNB27Q44c3UGcspb3PiOw5TC jNPRYEcdwGvDZ2wWy+vkSV/S9tHXY8O6ODiE6abZJDDg/RnITyi+eoDL R3KZ5n/V1f1T1b90rrV6EewhBGQJpQGDogaXb2oHww9Tm6NfXyo7SoMM pbwbzOckXv+GxRPJIQNSF4D4A9E8XCksuzVVdE/0lr37+uoiAiPia38U 5W2QWe/FJAEPLjIp2eTzf0TrADc1pKP1wrA2ASpdzpm/aX3IB5RPp8Ew S9U72eBFZJAUwg635HxJVxH1maG6atzorR566E+e0OZSaxXS9o1o6QqN 3oPlYLGPORDiExilKfez3C/x/yioOupW9K5eKF0gmtaqrHX0oq9s67f/ RIM2xVaKHgG9Vf2cgJIZkhv7sntujr+E4htnRmy9P9BxyFxsItYxPI6Z bzygHAZpGhlI/7ltEGlIwKxyTK3ZKBm67q7B')
+ }
+
+abs_dsa_soa = dns.rrset.from_text('example.', 86400, 'IN', 'SOA',
+ 'ns1.example. hostmaster.example. 2 10800 3600 604800 86400')
+
+abs_other_dsa_soa = dns.rrset.from_text('example.', 86400, 'IN', 'SOA',
+ 'ns1.example. hostmaster.example. 2 10800 3600 604800 86401')
+
+abs_dsa_soa_rrsig = dns.rrset.from_text('example.', 86400, 'IN', 'RRSIG',
+ 'SOA 3 1 86400 20101129143231 20101122112731 42088 example. CGul9SuBofsktunV8cJs4eRs6u+3NCS3yaPKvBbD+pB2C76OUXDZq9U=')
+
+example_sep_key = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DNSKEY,
+ '257 3 3 CI3nCqyJsiCJHTjrNsJOT4RaszetzcJPYuoH3F9ZTVt3KJXncCVR3bwn 1w0iavKljb9hDlAYSfHbFCp4ic/rvg4p1L8vh5s8ToMjqDNl40A0hUGQ Ybx5hsECyK+qHoajilUX1phYSAD8d9WAGO3fDWzUPBuzR7o85NiZCDxz yXuNVfni0uhj9n1KYhEO5yAbbruDGN89wIZcxMKuQsdUY2GYD93ssnBv a55W6XRABYWayKZ90WkRVODLVYLSn53Pj/wwxGH+XdhIAZJXimrZL4yl My7rtBsLMqq8Ihs4Tows7LqYwY7cp6y/50tw6pj8tFqMYcPUjKZV36l1 M/2t5BVg3i7IK61Aidt6aoC3TDJtzAxg3ZxfjZWJfhHjMJqzQIfbW5b9 q1mjFsW5EUv39RaNnX+3JWPRLyDqD4pIwDyqfutMsdk/Py3paHn82FGp CaOg+nicqZ9TiMZURN/XXy5JoXUNQ3RNvbHCUiPUe18KUkY6mTfnyHld 1l9YCWmzXQVClkx/hOYxjJ4j8Ife58+Obu5X')
+
+example_ds_sha1 = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DS,
+ '18673 3 1 71b71d4f3e11bbd71b4eff12cde69f7f9215bbe7')
+
+example_ds_sha256 = dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DS,
+ '18673 3 2 eb8344cbbf07c9d3d3d6c81d10c76653e28d8611a65e639ef8f716e4e4e5d913')
+
+class DNSSECValidatorTestCase(unittest.TestCase):
+
+ def testAbsoluteRSAGood(self):
+ dns.dnssec.validate(abs_soa, abs_soa_rrsig, abs_keys, None, when)
+
+ def testAbsoluteRSABad(self):
+ def bad():
+ dns.dnssec.validate(abs_other_soa, abs_soa_rrsig, abs_keys, None,
+ when)
+ self.failUnlessRaises(dns.dnssec.ValidationFailure, bad)
+
+ def testRelativeRSAGood(self):
+ dns.dnssec.validate(rel_soa, rel_soa_rrsig, rel_keys,
+ abs_dnspython_org, when)
+
+ def testRelativeRSABad(self):
+ def bad():
+ dns.dnssec.validate(rel_other_soa, rel_soa_rrsig, rel_keys,
+ abs_dnspython_org, when)
+ self.failUnlessRaises(dns.dnssec.ValidationFailure, bad)
+
+ def testMakeSHA256DS(self):
+ ds = dns.dnssec.make_ds(abs_dnspython_org, sep_key, 'SHA256')
+ self.failUnless(ds == good_ds)
+
+ def testAbsoluteDSAGood(self):
+ dns.dnssec.validate(abs_dsa_soa, abs_dsa_soa_rrsig, abs_dsa_keys, None,
+ when2)
+
+ def testAbsoluteDSABad(self):
+ def bad():
+ dns.dnssec.validate(abs_other_dsa_soa, abs_dsa_soa_rrsig,
+ abs_dsa_keys, None, when2)
+ self.failUnlessRaises(dns.dnssec.ValidationFailure, bad)
+
+ def testMakeExampleSHA1DS(self):
+ ds = dns.dnssec.make_ds(abs_example, example_sep_key, 'SHA1')
+ self.failUnless(ds == example_ds_sha1)
+
+ def testMakeExampleSHA256DS(self):
+ ds = dns.dnssec.make_ds(abs_example, example_sep_key, 'SHA256')
+ self.failUnless(ds == example_ds_sha256)
+
+if __name__ == '__main__':
+ import_ok = False
+ try:
+ import Crypto.Util.number
+ import_ok = True
+ except:
+ pass
+ if import_ok:
+ unittest.main()
+ else:
+ print 'skipping DNSSEC tests because pycrypto is not installed'
diff --git a/lib/dnspython/tests/example b/lib/dnspython/tests/example
new file mode 100644
index 0000000000..e8fed1161b
--- /dev/null
+++ b/lib/dnspython/tests/example
@@ -0,0 +1,225 @@
+; Copyright (C) 2000, 2001 Internet Software Consortium.
+;
+; Permission to use, copy, modify, and distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM
+; DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+; INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+; FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+; NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+; WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+; $Id: example,v 1.13 2004/03/19 00:06:37 halley Exp $
+
+$ORIGIN .
+$TTL 300 ; 5 minutes
+example IN SOA ns1.example. hostmaster.example. (
+ 1 ; serial
+ 2000 ; refresh (2000 seconds)
+ 2000 ; retry (2000 seconds)
+ 1814400 ; expire (3 weeks)
+ 3600 ; minimum (1 hour)
+ )
+example. NS ns1.example.
+ns1.example. A 10.53.0.1
+example. NS ns2.example.
+ns2.example. A 10.53.0.2
+
+$ORIGIN example.
+* MX 10 mail
+a TXT "foo foo foo"
+ PTR foo.net.
+;; The next line not starting with ';;' is leading whitespace followed by
+;; EOL. We want to treat that as if EOL had appeared alone.
+
+;; The next line not starting with ';;' is leading whitespace followed by
+;; a comment followed by EOL. We want to treat that as if EOL had appeared
+;; alone.
+ ; foo
+$TTL 3600 ; 1 hour
+a01 A 0.0.0.0
+a02 A 255.255.255.255
+;;
+;; XXXRTH dnspython doesn't currently implement A6, and since
+;; A6 records are effectively dead, it may never do so.
+;;
+;;a601 A6 0 ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
+;; A6 64 ::ffff:ffff:ffff:ffff foo.
+;; A6 127 ::1 foo.
+;; A6 128 .
+aaaa01 AAAA ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
+aaaa02 AAAA ::1
+afsdb01 AFSDB 0 hostname
+afsdb02 AFSDB 65535 .
+$TTL 300 ; 5 minutes
+b CNAME foo.net.
+c A 73.80.65.49
+$TTL 3600 ; 1 hour
+cert01 CERT 65534 65535 PRIVATEOID (
+ MxFcby9k/yvedMfQgKzhH5er0Mu/vILz45IkskceFGgi
+ WCn/GxHhai6VAuHAoNUz4YoU1tVfSCSqQYn6//11U6Nl
+ d80jEeC8aTrO+KKmCaY= )
+cname01 CNAME cname-target.
+cname02 CNAME cname-target
+cname03 CNAME .
+$TTL 300 ; 5 minutes
+d A 73.80.65.49
+$TTL 3600 ; 1 hour
+dhcid01 DHCID ( AAIBY2/AuCccgoJbsaxcQc9TUapptP69l
+ OjxfNuVAA2kjEA= )
+dhcid02 DHCID ( AAEBOSD+XR3Os/0LozeXVqcNc7FwCfQdW
+ L3b/NaiUDlW2No= )
+dhcid03 DHCID ( AAABxLmlskllE0MVjd57zHcWmEH3pCQ6V
+ ytcKD//7es/deY= )
+dname01 DNAME dname-target.
+dname02 DNAME dname-target
+dname03 DNAME .
+$TTL 300 ; 5 minutes
+e MX 10 mail
+ TXT "one"
+ TXT "three"
+ TXT "two"
+ A 73.80.65.49
+ A 73.80.65.50
+ A 73.80.65.52
+ A 73.80.65.51
+f A 73.80.65.52
+$TTL 3600 ; 1 hour
+gpos01 GPOS "-22.6882" "116.8652" "250.0"
+;;
+;; XXXRTH I have commented out the following line because I don't think
+;; it is a valid GPOS record.
+;;
+;;gpos02 GPOS "" "" ""
+hinfo01 HINFO "Generic PC clone" "NetBSD-1.4"
+hinfo02 HINFO "PC" "NetBSD"
+isdn01 ISDN "isdn-address"
+isdn02 ISDN "isdn-address" "subaddress"
+isdn03 ISDN "isdn-address"
+isdn04 ISDN "isdn-address" "subaddress"
+key01 KEY 512 255 1 (
+ AQMFD5raczCJHViKtLYhWGz8hMY9UGRuniJDBzC7w0aR
+ yzWZriO6i2odGWWQVucZqKVsENW91IOW4vqudngPZsY3
+ GvQ/xVA8/7pyFj6b7Esga60zyGW6LFe9r8n6paHrlG5o
+ jqf0BaqHT+8= )
+key02 KEY HOST|FLAG4 DNSSEC RSAMD5 (
+ AQMFD5raczCJHViKtLYhWGz8hMY9UGRuniJDBzC7w0aR
+ yzWZriO6i2odGWWQVucZqKVsENW91IOW4vqudngPZsY3
+ GvQ/xVA8/7pyFj6b7Esga60zyGW6LFe9r8n6paHrlG5o
+ jqf0BaqHT+8= )
+kx01 KX 10 kdc
+kx02 KX 10 .
+loc01 LOC 60 9 0.000 N 24 39 0.000 E 10.00m 20m 2000m 20m
+loc02 LOC 60 9 0.000 N 24 39 0.000 E 10.00m 20m 2000m 20m
+loc03 LOC 60 9 0.000 N 24 39 0.000 E 10.00m 90000000.00m 2000m 20m
+loc04 LOC 60 9 1.5 N 24 39 0.000 E 10.00m 20m 2000m 20m
+loc05 LOC 60 9 1.51 N 24 39 0.000 E 10.00m 20m 2000m 20m
+;;
+;; XXXRTH These are all obsolete and unused. dnspython doesn't implement
+;; them
+;;mb01 MG madname
+;;mb02 MG .
+;;mg01 MG mgmname
+;;mg02 MG .
+;;minfo01 MINFO rmailbx emailbx
+;;minfo02 MINFO . .
+;;mr01 MR mrname
+;;mr02 MR .
+mx01 MX 10 mail
+mx02 MX 10 .
+naptr01 NAPTR 0 0 "" "" "" .
+naptr02 NAPTR 65535 65535 "blurgh" "blorf" "blegh" foo.
+nsap-ptr01 NSAP-PTR foo.
+ NSAP-PTR .
+nsap01 NSAP 0x47000580005a0000000001e133ffffff00016100
+nsap02 NSAP 0x47.000580005a0000000001e133ffffff000161.00
+nxt01 NXT a.secure ( NS SOA MX SIG KEY LOC NXT )
+nxt02 NXT . ( NSAP-PTR NXT )
+nxt03 NXT . ( A )
+nxt04 NXT . ( 127 )
+ptr01 PTR example.
+px01 PX 65535 foo. bar.
+px02 PX 65535 . .
+rp01 RP mbox-dname txt-dname
+rp02 RP . .
+rt01 RT 0 intermediate-host
+rt02 RT 65535 .
+$TTL 300 ; 5 minutes
+s NS ns.s
+$ORIGIN s.example.
+ns A 73.80.65.49
+$ORIGIN example.
+$TTL 3600 ; 1 hour
+sig01 SIG NXT 1 3 3600 (
+ 20200101000000 20030101000000 2143 foo
+ MxFcby9k/yvedMfQgKzhH5er0Mu/vILz45IkskceFGgi
+ WCn/GxHhai6VAuHAoNUz4YoU1tVfSCSqQYn6//11U6Nl
+ d80jEeC8aTrO+KKmCaY= )
+srv01 SRV 0 0 0 .
+srv02 SRV 65535 65535 65535 old-slow-box.example.com.
+$TTL 301 ; 5 minutes 1 second
+t A 73.80.65.49
+$TTL 3600 ; 1 hour
+txt01 TXT "foo"
+txt02 TXT "foo" "bar"
+txt03 TXT "foo"
+txt04 TXT "foo" "bar"
+txt05 TXT "foo bar"
+txt06 TXT "foo bar"
+txt07 TXT "foo bar"
+txt08 TXT "foo\010bar"
+txt09 TXT "foo\010bar"
+txt10 TXT "foo bar"
+txt11 TXT "\"foo\""
+txt12 TXT "\"foo\""
+txt13 TXT foo
+$TTL 300 ; 5 minutes
+u TXT "txt-not-in-nxt"
+$ORIGIN u.example.
+a A 73.80.65.49
+b A 73.80.65.49
+$ORIGIN example.
+$TTL 3600 ; 1 hour
+wks01 WKS 10.0.0.1 6 ( 0 1 2 21 23 )
+wks02 WKS 10.0.0.1 17 ( 0 1 2 53 )
+wks03 WKS 10.0.0.2 6 ( 65535 )
+x2501 X25 "123456789"
+dlv01 DLV 12345 3 1 123456789abcdef67890123456789abcdef67890
+ds01 DS 12345 3 1 123456789abcdef67890123456789abcdef67890
+apl01 APL 1:192.168.32.0/21 !1:192.168.38.0/28
+apl02 APL 1:224.0.0.0/4 2:FF00:0:0:0:0:0:0:0/8
+unknown2 TYPE999 \# 8 0a0000010a000001
+rrsig01 RRSIG NSEC 1 3 3600 20200101000000 20030101000000 2143 foo MxFcby9k/yvedMfQgKzhH5er0Mu/ vILz45IkskceFGgiWCn/GxHhai6V AuHAoNUz4YoU1tVfSCSqQYn6//11 U6Nld80jEeC8aTrO+KKmCaY=
+nsec01 NSEC a.secure. A MX RRSIG NSEC TYPE1234
+nsec02 NSEC . NSAP-PTR NSEC
+nsec03 NSEC . NSEC TYPE65535
+dnskey01 DNSKEY 512 255 1 (
+ AQMFD5raczCJHViKtLYhWGz8hMY9UGRuniJDBzC7w0aR
+ yzWZriO6i2odGWWQVucZqKVsENW91IOW4vqudngPZsY3
+ GvQ/xVA8/7pyFj6b7Esga60zyGW6LFe9r8n6paHrlG5o
+ jqf0BaqHT+8= )
+dnskey02 DNSKEY HOST|FLAG4 DNSSEC RSAMD5 (
+ AQMFD5raczCJHViKtLYhWGz8hMY9UGRuniJDBzC7w0aR
+ yzWZriO6i2odGWWQVucZqKVsENW91IOW4vqudngPZsY3
+ GvQ/xVA8/7pyFj6b7Esga60zyGW6LFe9r8n6paHrlG5o
+ jqf0BaqHT+8= )
+;
+; test known type using unknown RR syntax
+;
+unknown3 A \# 4 7f000002
+sshfp1 SSHFP 1 1 aa549bfe898489c02d1715d97d79c57ba2fa76ab
+spf SPF "v=spf1 mx -all"
+ipseckey01 IPSECKEY 10 1 2 192.0.2.38 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==
+ipseckey02 IPSECKEY 10 0 2 . AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==
+ipseckey03 IPSECKEY 10 3 2 mygateway.example.com. AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==
+ipseckey04 IPSECKEY 10 2 2 2001:0DB8:0:8002::2000:1 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==
+ipseckey05 IPSECKEY 10 3 2 mygateway2 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==
+nsec301 NSEC3 1 1 12 aabbccdd 2t7b4g4vsa5smi47k61mv5bv1a22bojr MX DNSKEY NS SOA NSEC3PARAM RRSIG
+nsec302 NSEC3 1 1 12 - 2t7b4g4vsa5smi47k61mv5bv1a22bojr MX DNSKEY NS SOA NSEC3PARAM RRSIG
+nsec3param01 NSEC3PARAM 1 1 12 aabbccdd
+nsec3param02 NSEC3PARAM 1 1 12 -
+hip01 HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs1.example.com. rvs2
diff --git a/lib/dnspython/tests/example1.good b/lib/dnspython/tests/example1.good
new file mode 100644
index 0000000000..ca5ead6379
--- /dev/null
+++ b/lib/dnspython/tests/example1.good
@@ -0,0 +1,121 @@
+@ 300 IN SOA ns1 hostmaster 1 2000 2000 1814400 3600
+@ 300 IN NS ns1
+@ 300 IN NS ns2
+* 300 IN MX 10 mail
+a 300 IN TXT "foo foo foo"
+a 300 IN PTR foo.net.
+a01 3600 IN A 0.0.0.0
+a02 3600 IN A 255.255.255.255
+aaaa01 3600 IN AAAA ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
+aaaa02 3600 IN AAAA ::1
+afsdb01 3600 IN AFSDB 0 hostname
+afsdb02 3600 IN AFSDB 65535 .
+apl01 3600 IN APL 1:192.168.32.0/21 !1:192.168.38.0/28
+apl02 3600 IN APL 1:224.0.0.0/4 2:FF00:0:0:0:0:0:0:0/8
+b 300 IN CNAME foo.net.
+c 300 IN A 73.80.65.49
+cert01 3600 IN CERT 65534 65535 PRIVATEOID MxFcby9k/yvedMfQgKzhH5er0Mu/vILz 45IkskceFGgiWCn/GxHhai6VAuHAoNUz 4YoU1tVfSCSqQYn6//11U6Nld80jEeC8 aTrO+KKmCaY=
+cname01 3600 IN CNAME cname-target.
+cname02 3600 IN CNAME cname-target
+cname03 3600 IN CNAME .
+d 300 IN A 73.80.65.49
+dhcid01 3600 IN DHCID AAIBY2/AuCccgoJbsaxcQc9TUapptP69 lOjxfNuVAA2kjEA=
+dhcid02 3600 IN DHCID AAEBOSD+XR3Os/0LozeXVqcNc7FwCfQd WL3b/NaiUDlW2No=
+dhcid03 3600 IN DHCID AAABxLmlskllE0MVjd57zHcWmEH3pCQ6 VytcKD//7es/deY=
+dlv01 3600 IN DLV 12345 3 1 123456789abcdef67890123456789abcdef67890
+dname01 3600 IN DNAME dname-target.
+dname02 3600 IN DNAME dname-target
+dname03 3600 IN DNAME .
+dnskey01 3600 IN DNSKEY 512 255 1 AQMFD5raczCJHViKtLYhWGz8hMY9UGRu niJDBzC7w0aRyzWZriO6i2odGWWQVucZ qKVsENW91IOW4vqudngPZsY3GvQ/xVA8 /7pyFj6b7Esga60zyGW6LFe9r8n6paHr lG5ojqf0BaqHT+8=
+dnskey02 3600 IN DNSKEY 2560 3 1 AQMFD5raczCJHViKtLYhWGz8hMY9UGRu niJDBzC7w0aRyzWZriO6i2odGWWQVucZ qKVsENW91IOW4vqudngPZsY3GvQ/xVA8 /7pyFj6b7Esga60zyGW6LFe9r8n6paHr lG5ojqf0BaqHT+8=
+ds01 3600 IN DS 12345 3 1 123456789abcdef67890123456789abcdef67890
+e 300 IN MX 10 mail
+e 300 IN TXT "one"
+e 300 IN TXT "three"
+e 300 IN TXT "two"
+e 300 IN A 73.80.65.49
+e 300 IN A 73.80.65.50
+e 300 IN A 73.80.65.52
+e 300 IN A 73.80.65.51
+f 300 IN A 73.80.65.52
+gpos01 3600 IN GPOS -22.6882 116.8652 250.0
+hinfo01 3600 IN HINFO "Generic PC clone" "NetBSD-1.4"
+hinfo02 3600 IN HINFO "PC" "NetBSD"
+hip01 3600 IN HIP 2 200100107b1a74df365639cc39f1d578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs1.example.com. rvs2
+ipseckey01 3600 IN IPSECKEY 10 1 2 192.0.2.38 AQNRU3mG7TVTO2BkR47usntb102uFJtu gbo6BSGvgqt4AQ==
+ipseckey02 3600 IN IPSECKEY 10 0 2 . AQNRU3mG7TVTO2BkR47usntb102uFJtu gbo6BSGvgqt4AQ==
+ipseckey03 3600 IN IPSECKEY 10 3 2 mygateway.example.com. AQNRU3mG7TVTO2BkR47usntb102uFJtu gbo6BSGvgqt4AQ==
+ipseckey04 3600 IN IPSECKEY 10 2 2 2001:0DB8:0:8002::2000:1 AQNRU3mG7TVTO2BkR47usntb102uFJtu gbo6BSGvgqt4AQ==
+ipseckey05 3600 IN IPSECKEY 10 3 2 mygateway2 AQNRU3mG7TVTO2BkR47usntb102uFJtu gbo6BSGvgqt4AQ==
+isdn01 3600 IN ISDN "isdn-address"
+isdn02 3600 IN ISDN "isdn-address" "subaddress"
+isdn03 3600 IN ISDN "isdn-address"
+isdn04 3600 IN ISDN "isdn-address" "subaddress"
+key01 3600 IN KEY 512 255 1 AQMFD5raczCJHViKtLYhWGz8hMY9UGRu niJDBzC7w0aRyzWZriO6i2odGWWQVucZ qKVsENW91IOW4vqudngPZsY3GvQ/xVA8 /7pyFj6b7Esga60zyGW6LFe9r8n6paHr lG5ojqf0BaqHT+8=
+key02 3600 IN KEY 2560 3 1 AQMFD5raczCJHViKtLYhWGz8hMY9UGRu niJDBzC7w0aRyzWZriO6i2odGWWQVucZ qKVsENW91IOW4vqudngPZsY3GvQ/xVA8 /7pyFj6b7Esga60zyGW6LFe9r8n6paHr lG5ojqf0BaqHT+8=
+kx01 3600 IN KX 10 kdc
+kx02 3600 IN KX 10 .
+loc01 3600 IN LOC 60 9 0.000 N 24 39 0.000 E 10.00m 20.00m 2000.00m 20.00m
+loc02 3600 IN LOC 60 9 0.000 N 24 39 0.000 E 10.00m 20.00m 2000.00m 20.00m
+loc03 3600 IN LOC 60 9 0.000 N 24 39 0.000 E 10.00m 90000000.00m 2000.00m 20.00m
+loc04 3600 IN LOC 60 9 1.500 N 24 39 0.000 E 10.00m 20.00m 2000.00m 20.00m
+loc05 3600 IN LOC 60 9 1.510 N 24 39 0.000 E 10.00m 20.00m 2000.00m 20.00m
+mx01 3600 IN MX 10 mail
+mx02 3600 IN MX 10 .
+naptr01 3600 IN NAPTR 0 0 "" "" "" .
+naptr02 3600 IN NAPTR 65535 65535 "blurgh" "blorf" "blegh" foo.
+ns1 300 IN A 10.53.0.1
+ns2 300 IN A 10.53.0.2
+nsap-ptr01 3600 IN NSAP-PTR foo.
+nsap-ptr01 3600 IN NSAP-PTR .
+nsap01 3600 IN NSAP 0x47000580005a0000000001e133ffffff00016100
+nsap02 3600 IN NSAP 0x47000580005a0000000001e133ffffff00016100
+nsec01 3600 IN NSEC a.secure. A MX RRSIG NSEC TYPE1234
+nsec02 3600 IN NSEC . NSAP-PTR NSEC
+nsec03 3600 IN NSEC . NSEC TYPE65535
+nsec301 3600 IN NSEC3 1 1 12 aabbccdd 2t7b4g4vsa5smi47k61mv5bv1a22bojr NS SOA MX RRSIG DNSKEY NSEC3PARAM
+nsec302 3600 IN NSEC3 1 1 12 - 2t7b4g4vsa5smi47k61mv5bv1a22bojr NS SOA MX RRSIG DNSKEY NSEC3PARAM
+nsec3param01 3600 IN NSEC3PARAM 1 1 12 aabbccdd
+nsec3param02 3600 IN NSEC3PARAM 1 1 12 -
+nxt01 3600 IN NXT a.secure NS SOA MX SIG KEY LOC NXT
+nxt02 3600 IN NXT . NSAP-PTR NXT
+nxt03 3600 IN NXT . A
+nxt04 3600 IN NXT . TYPE127
+ptr01 3600 IN PTR @
+px01 3600 IN PX 65535 foo. bar.
+px02 3600 IN PX 65535 . .
+rp01 3600 IN RP mbox-dname txt-dname
+rp02 3600 IN RP . .
+rrsig01 3600 IN RRSIG NSEC 1 3 3600 20200101000000 20030101000000 2143 foo MxFcby9k/yvedMfQgKzhH5er0Mu/vILz 45IkskceFGgiWCn/GxHhai6VAuHAoNUz 4YoU1tVfSCSqQYn6//11U6Nld80jEeC8 aTrO+KKmCaY=
+rt01 3600 IN RT 0 intermediate-host
+rt02 3600 IN RT 65535 .
+s 300 IN NS ns.s
+ns.s 300 IN A 73.80.65.49
+sig01 3600 IN SIG NXT 1 3 3600 20200101000000 20030101000000 2143 foo MxFcby9k/yvedMfQgKzhH5er0Mu/vILz 45IkskceFGgiWCn/GxHhai6VAuHAoNUz 4YoU1tVfSCSqQYn6//11U6Nld80jEeC8 aTrO+KKmCaY=
+spf 3600 IN SPF "v=spf1 mx -all"
+srv01 3600 IN SRV 0 0 0 .
+srv02 3600 IN SRV 65535 65535 65535 old-slow-box.example.com.
+sshfp1 3600 IN SSHFP 1 1 aa549bfe898489c02d1715d97d79c57ba2fa76ab
+t 301 IN A 73.80.65.49
+txt01 3600 IN TXT "foo"
+txt02 3600 IN TXT "foo" "bar"
+txt03 3600 IN TXT "foo"
+txt04 3600 IN TXT "foo" "bar"
+txt05 3600 IN TXT "foo bar"
+txt06 3600 IN TXT "foo bar"
+txt07 3600 IN TXT "foo bar"
+txt08 3600 IN TXT "foo\010bar"
+txt09 3600 IN TXT "foo\010bar"
+txt10 3600 IN TXT "foo bar"
+txt11 3600 IN TXT "\"foo\""
+txt12 3600 IN TXT "\"foo\""
+txt13 3600 IN TXT "foo"
+u 300 IN TXT "txt-not-in-nxt"
+a.u 300 IN A 73.80.65.49
+b.u 300 IN A 73.80.65.49
+unknown2 3600 IN TYPE999 \# 8 0a0000010a000001
+unknown3 3600 IN A 127.0.0.2
+wks01 3600 IN WKS 10.0.0.1 6 0 1 2 21 23
+wks02 3600 IN WKS 10.0.0.1 17 0 1 2 53
+wks03 3600 IN WKS 10.0.0.2 6 65535
+x2501 3600 IN X25 "123456789"
diff --git a/lib/dnspython/tests/example2.good b/lib/dnspython/tests/example2.good
new file mode 100644
index 0000000000..c923c09b7c
--- /dev/null
+++ b/lib/dnspython/tests/example2.good
@@ -0,0 +1,121 @@
+example. 300 IN SOA ns1.example. hostmaster.example. 1 2000 2000 1814400 3600
+example. 300 IN NS ns1.example.
+example. 300 IN NS ns2.example.
+*.example. 300 IN MX 10 mail.example.
+a.example. 300 IN TXT "foo foo foo"
+a.example. 300 IN PTR foo.net.
+a01.example. 3600 IN A 0.0.0.0
+a02.example. 3600 IN A 255.255.255.255
+aaaa01.example. 3600 IN AAAA ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
+aaaa02.example. 3600 IN AAAA ::1
+afsdb01.example. 3600 IN AFSDB 0 hostname.example.
+afsdb02.example. 3600 IN AFSDB 65535 .
+apl01.example. 3600 IN APL 1:192.168.32.0/21 !1:192.168.38.0/28
+apl02.example. 3600 IN APL 1:224.0.0.0/4 2:FF00:0:0:0:0:0:0:0/8
+b.example. 300 IN CNAME foo.net.
+c.example. 300 IN A 73.80.65.49
+cert01.example. 3600 IN CERT 65534 65535 PRIVATEOID MxFcby9k/yvedMfQgKzhH5er0Mu/vILz 45IkskceFGgiWCn/GxHhai6VAuHAoNUz 4YoU1tVfSCSqQYn6//11U6Nld80jEeC8 aTrO+KKmCaY=
+cname01.example. 3600 IN CNAME cname-target.
+cname02.example. 3600 IN CNAME cname-target.example.
+cname03.example. 3600 IN CNAME .
+d.example. 300 IN A 73.80.65.49
+dhcid01.example. 3600 IN DHCID AAIBY2/AuCccgoJbsaxcQc9TUapptP69 lOjxfNuVAA2kjEA=
+dhcid02.example. 3600 IN DHCID AAEBOSD+XR3Os/0LozeXVqcNc7FwCfQd WL3b/NaiUDlW2No=
+dhcid03.example. 3600 IN DHCID AAABxLmlskllE0MVjd57zHcWmEH3pCQ6 VytcKD//7es/deY=
+dlv01.example. 3600 IN DLV 12345 3 1 123456789abcdef67890123456789abcdef67890
+dname01.example. 3600 IN DNAME dname-target.
+dname02.example. 3600 IN DNAME dname-target.example.
+dname03.example. 3600 IN DNAME .
+dnskey01.example. 3600 IN DNSKEY 512 255 1 AQMFD5raczCJHViKtLYhWGz8hMY9UGRu niJDBzC7w0aRyzWZriO6i2odGWWQVucZ qKVsENW91IOW4vqudngPZsY3GvQ/xVA8 /7pyFj6b7Esga60zyGW6LFe9r8n6paHr lG5ojqf0BaqHT+8=
+dnskey02.example. 3600 IN DNSKEY 2560 3 1 AQMFD5raczCJHViKtLYhWGz8hMY9UGRu niJDBzC7w0aRyzWZriO6i2odGWWQVucZ qKVsENW91IOW4vqudngPZsY3GvQ/xVA8 /7pyFj6b7Esga60zyGW6LFe9r8n6paHr lG5ojqf0BaqHT+8=
+ds01.example. 3600 IN DS 12345 3 1 123456789abcdef67890123456789abcdef67890
+e.example. 300 IN MX 10 mail.example.
+e.example. 300 IN TXT "one"
+e.example. 300 IN TXT "three"
+e.example. 300 IN TXT "two"
+e.example. 300 IN A 73.80.65.49
+e.example. 300 IN A 73.80.65.50
+e.example. 300 IN A 73.80.65.52
+e.example. 300 IN A 73.80.65.51
+f.example. 300 IN A 73.80.65.52
+gpos01.example. 3600 IN GPOS -22.6882 116.8652 250.0
+hinfo01.example. 3600 IN HINFO "Generic PC clone" "NetBSD-1.4"
+hinfo02.example. 3600 IN HINFO "PC" "NetBSD"
+hip01.example. 3600 IN HIP 2 200100107b1a74df365639cc39f1d578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs1.example.com. rvs2.example.
+ipseckey01.example. 3600 IN IPSECKEY 10 1 2 192.0.2.38 AQNRU3mG7TVTO2BkR47usntb102uFJtu gbo6BSGvgqt4AQ==
+ipseckey02.example. 3600 IN IPSECKEY 10 0 2 . AQNRU3mG7TVTO2BkR47usntb102uFJtu gbo6BSGvgqt4AQ==
+ipseckey03.example. 3600 IN IPSECKEY 10 3 2 mygateway.example.com. AQNRU3mG7TVTO2BkR47usntb102uFJtu gbo6BSGvgqt4AQ==
+ipseckey04.example. 3600 IN IPSECKEY 10 2 2 2001:0DB8:0:8002::2000:1 AQNRU3mG7TVTO2BkR47usntb102uFJtu gbo6BSGvgqt4AQ==
+ipseckey05.example. 3600 IN IPSECKEY 10 3 2 mygateway2.example. AQNRU3mG7TVTO2BkR47usntb102uFJtu gbo6BSGvgqt4AQ==
+isdn01.example. 3600 IN ISDN "isdn-address"
+isdn02.example. 3600 IN ISDN "isdn-address" "subaddress"
+isdn03.example. 3600 IN ISDN "isdn-address"
+isdn04.example. 3600 IN ISDN "isdn-address" "subaddress"
+key01.example. 3600 IN KEY 512 255 1 AQMFD5raczCJHViKtLYhWGz8hMY9UGRu niJDBzC7w0aRyzWZriO6i2odGWWQVucZ qKVsENW91IOW4vqudngPZsY3GvQ/xVA8 /7pyFj6b7Esga60zyGW6LFe9r8n6paHr lG5ojqf0BaqHT+8=
+key02.example. 3600 IN KEY 2560 3 1 AQMFD5raczCJHViKtLYhWGz8hMY9UGRu niJDBzC7w0aRyzWZriO6i2odGWWQVucZ qKVsENW91IOW4vqudngPZsY3GvQ/xVA8 /7pyFj6b7Esga60zyGW6LFe9r8n6paHr lG5ojqf0BaqHT+8=
+kx01.example. 3600 IN KX 10 kdc.example.
+kx02.example. 3600 IN KX 10 .
+loc01.example. 3600 IN LOC 60 9 0.000 N 24 39 0.000 E 10.00m 20.00m 2000.00m 20.00m
+loc02.example. 3600 IN LOC 60 9 0.000 N 24 39 0.000 E 10.00m 20.00m 2000.00m 20.00m
+loc03.example. 3600 IN LOC 60 9 0.000 N 24 39 0.000 E 10.00m 90000000.00m 2000.00m 20.00m
+loc04.example. 3600 IN LOC 60 9 1.500 N 24 39 0.000 E 10.00m 20.00m 2000.00m 20.00m
+loc05.example. 3600 IN LOC 60 9 1.510 N 24 39 0.000 E 10.00m 20.00m 2000.00m 20.00m
+mx01.example. 3600 IN MX 10 mail.example.
+mx02.example. 3600 IN MX 10 .
+naptr01.example. 3600 IN NAPTR 0 0 "" "" "" .
+naptr02.example. 3600 IN NAPTR 65535 65535 "blurgh" "blorf" "blegh" foo.
+ns1.example. 300 IN A 10.53.0.1
+ns2.example. 300 IN A 10.53.0.2
+nsap-ptr01.example. 3600 IN NSAP-PTR foo.
+nsap-ptr01.example. 3600 IN NSAP-PTR .
+nsap01.example. 3600 IN NSAP 0x47000580005a0000000001e133ffffff00016100
+nsap02.example. 3600 IN NSAP 0x47000580005a0000000001e133ffffff00016100
+nsec01.example. 3600 IN NSEC a.secure. A MX RRSIG NSEC TYPE1234
+nsec02.example. 3600 IN NSEC . NSAP-PTR NSEC
+nsec03.example. 3600 IN NSEC . NSEC TYPE65535
+nsec301.example. 3600 IN NSEC3 1 1 12 aabbccdd 2t7b4g4vsa5smi47k61mv5bv1a22bojr NS SOA MX RRSIG DNSKEY NSEC3PARAM
+nsec302.example. 3600 IN NSEC3 1 1 12 - 2t7b4g4vsa5smi47k61mv5bv1a22bojr NS SOA MX RRSIG DNSKEY NSEC3PARAM
+nsec3param01.example. 3600 IN NSEC3PARAM 1 1 12 aabbccdd
+nsec3param02.example. 3600 IN NSEC3PARAM 1 1 12 -
+nxt01.example. 3600 IN NXT a.secure.example. NS SOA MX SIG KEY LOC NXT
+nxt02.example. 3600 IN NXT . NSAP-PTR NXT
+nxt03.example. 3600 IN NXT . A
+nxt04.example. 3600 IN NXT . TYPE127
+ptr01.example. 3600 IN PTR example.
+px01.example. 3600 IN PX 65535 foo. bar.
+px02.example. 3600 IN PX 65535 . .
+rp01.example. 3600 IN RP mbox-dname.example. txt-dname.example.
+rp02.example. 3600 IN RP . .
+rrsig01.example. 3600 IN RRSIG NSEC 1 3 3600 20200101000000 20030101000000 2143 foo.example. MxFcby9k/yvedMfQgKzhH5er0Mu/vILz 45IkskceFGgiWCn/GxHhai6VAuHAoNUz 4YoU1tVfSCSqQYn6//11U6Nld80jEeC8 aTrO+KKmCaY=
+rt01.example. 3600 IN RT 0 intermediate-host.example.
+rt02.example. 3600 IN RT 65535 .
+s.example. 300 IN NS ns.s.example.
+ns.s.example. 300 IN A 73.80.65.49
+sig01.example. 3600 IN SIG NXT 1 3 3600 20200101000000 20030101000000 2143 foo.example. MxFcby9k/yvedMfQgKzhH5er0Mu/vILz 45IkskceFGgiWCn/GxHhai6VAuHAoNUz 4YoU1tVfSCSqQYn6//11U6Nld80jEeC8 aTrO+KKmCaY=
+spf.example. 3600 IN SPF "v=spf1 mx -all"
+srv01.example. 3600 IN SRV 0 0 0 .
+srv02.example. 3600 IN SRV 65535 65535 65535 old-slow-box.example.com.
+sshfp1.example. 3600 IN SSHFP 1 1 aa549bfe898489c02d1715d97d79c57ba2fa76ab
+t.example. 301 IN A 73.80.65.49
+txt01.example. 3600 IN TXT "foo"
+txt02.example. 3600 IN TXT "foo" "bar"
+txt03.example. 3600 IN TXT "foo"
+txt04.example. 3600 IN TXT "foo" "bar"
+txt05.example. 3600 IN TXT "foo bar"
+txt06.example. 3600 IN TXT "foo bar"
+txt07.example. 3600 IN TXT "foo bar"
+txt08.example. 3600 IN TXT "foo\010bar"
+txt09.example. 3600 IN TXT "foo\010bar"
+txt10.example. 3600 IN TXT "foo bar"
+txt11.example. 3600 IN TXT "\"foo\""
+txt12.example. 3600 IN TXT "\"foo\""
+txt13.example. 3600 IN TXT "foo"
+u.example. 300 IN TXT "txt-not-in-nxt"
+a.u.example. 300 IN A 73.80.65.49
+b.u.example. 300 IN A 73.80.65.49
+unknown2.example. 3600 IN TYPE999 \# 8 0a0000010a000001
+unknown3.example. 3600 IN A 127.0.0.2
+wks01.example. 3600 IN WKS 10.0.0.1 6 0 1 2 21 23
+wks02.example. 3600 IN WKS 10.0.0.1 17 0 1 2 53
+wks03.example. 3600 IN WKS 10.0.0.2 6 65535
+x2501.example. 3600 IN X25 "123456789"
diff --git a/lib/dnspython/tests/flags.py b/lib/dnspython/tests/flags.py
new file mode 100644
index 0000000000..7ee2d8e12e
--- /dev/null
+++ b/lib/dnspython/tests/flags.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+
+import dns.flags
+import dns.rcode
+import dns.opcode
+
+class FlagsTestCase(unittest.TestCase):
+
+ def test_rcode1(self):
+ self.failUnless(dns.rcode.from_text('FORMERR') == dns.rcode.FORMERR)
+
+ def test_rcode2(self):
+ self.failUnless(dns.rcode.to_text(dns.rcode.FORMERR) == "FORMERR")
+
+ def test_rcode3(self):
+ self.failUnless(dns.rcode.to_flags(dns.rcode.FORMERR) == (1, 0))
+
+ def test_rcode4(self):
+ self.failUnless(dns.rcode.to_flags(dns.rcode.BADVERS) == \
+ (0, 0x01000000))
+
+ def test_rcode6(self):
+ self.failUnless(dns.rcode.from_flags(0, 0x01000000) == \
+ dns.rcode.BADVERS)
+
+ def test_rcode6(self):
+ self.failUnless(dns.rcode.from_flags(5, 0) == dns.rcode.REFUSED)
+
+ def test_rcode7(self):
+ def bad():
+ dns.rcode.to_flags(4096)
+ self.failUnlessRaises(ValueError, bad)
+
+ def test_flags1(self):
+ self.failUnless(dns.flags.from_text("RA RD AA QR") == \
+ dns.flags.QR|dns.flags.AA|dns.flags.RD|dns.flags.RA)
+
+ def test_flags2(self):
+ flags = dns.flags.QR|dns.flags.AA|dns.flags.RD|dns.flags.RA
+ self.failUnless(dns.flags.to_text(flags) == "QR AA RD RA")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/tests/message.py b/lib/dnspython/tests/message.py
new file mode 100644
index 0000000000..7134661d3a
--- /dev/null
+++ b/lib/dnspython/tests/message.py
@@ -0,0 +1,179 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import os
+import unittest
+
+import dns.exception
+import dns.message
+
+query_text = """id 1234
+opcode QUERY
+rcode NOERROR
+flags RD
+edns 0
+eflags DO
+payload 4096
+;QUESTION
+wwww.dnspython.org. IN A
+;ANSWER
+;AUTHORITY
+;ADDITIONAL"""
+
+goodhex = '04d201000001000000000001047777777709646e73707974686f6e' \
+ '036f726700000100010000291000000080000000'
+
+goodwire = goodhex.decode('hex_codec')
+
+answer_text = """id 1234
+opcode QUERY
+rcode NOERROR
+flags QR AA RD
+;QUESTION
+dnspython.org. IN SOA
+;ANSWER
+dnspython.org. 3600 IN SOA woof.dnspython.org. hostmaster.dnspython.org. 2003052700 3600 1800 604800 3600
+;AUTHORITY
+dnspython.org. 3600 IN NS ns1.staff.nominum.org.
+dnspython.org. 3600 IN NS ns2.staff.nominum.org.
+dnspython.org. 3600 IN NS woof.play-bow.org.
+;ADDITIONAL
+woof.play-bow.org. 3600 IN A 204.152.186.150
+"""
+
+goodhex2 = '04d2 8500 0001 0001 0003 0001' \
+ '09646e73707974686f6e036f726700 0006 0001' \
+ 'c00c 0006 0001 00000e10 0028 ' \
+ '04776f6f66c00c 0a686f73746d6173746572c00c' \
+ '7764289c 00000e10 00000708 00093a80 00000e10' \
+ 'c00c 0002 0001 00000e10 0014' \
+ '036e7331057374616666076e6f6d696e756dc016' \
+ 'c00c 0002 0001 00000e10 0006 036e7332c063' \
+ 'c00c 0002 0001 00000e10 0010 04776f6f6608706c61792d626f77c016' \
+ 'c091 0001 0001 00000e10 0004 cc98ba96'
+
+
+goodwire2 = goodhex2.replace(' ', '').decode('hex_codec')
+
+query_text_2 = """id 1234
+opcode QUERY
+rcode 4095
+flags RD
+edns 0
+eflags DO
+payload 4096
+;QUESTION
+wwww.dnspython.org. IN A
+;ANSWER
+;AUTHORITY
+;ADDITIONAL"""
+
+goodhex3 = '04d2010f0001000000000001047777777709646e73707974686f6e' \
+ '036f726700000100010000291000ff0080000000'
+
+goodwire3 = goodhex3.decode('hex_codec')
+
+class MessageTestCase(unittest.TestCase):
+
+ def test_comparison_eq1(self):
+ q1 = dns.message.from_text(query_text)
+ q2 = dns.message.from_text(query_text)
+ self.failUnless(q1 == q2)
+
+ def test_comparison_ne1(self):
+ q1 = dns.message.from_text(query_text)
+ q2 = dns.message.from_text(query_text)
+ q2.id = 10
+ self.failUnless(q1 != q2)
+
+ def test_comparison_ne2(self):
+ q1 = dns.message.from_text(query_text)
+ q2 = dns.message.from_text(query_text)
+ q2.question = []
+ self.failUnless(q1 != q2)
+
+ def test_comparison_ne3(self):
+ q1 = dns.message.from_text(query_text)
+ self.failUnless(q1 != 1)
+
+ def test_EDNS_to_wire1(self):
+ q = dns.message.from_text(query_text)
+ w = q.to_wire()
+ self.failUnless(w == goodwire)
+
+ def test_EDNS_from_wire1(self):
+ m = dns.message.from_wire(goodwire)
+ self.failUnless(str(m) == query_text)
+
+ def test_EDNS_to_wire2(self):
+ q = dns.message.from_text(query_text_2)
+ w = q.to_wire()
+ self.failUnless(w == goodwire3)
+
+ def test_EDNS_from_wire2(self):
+ m = dns.message.from_wire(goodwire3)
+ self.failUnless(str(m) == query_text_2)
+
+ def test_TooBig(self):
+ def bad():
+ q = dns.message.from_text(query_text)
+ for i in xrange(0, 25):
+ rrset = dns.rrset.from_text('foo%d.' % i, 3600,
+ dns.rdataclass.IN,
+ dns.rdatatype.A,
+ '10.0.0.%d' % i)
+ q.additional.append(rrset)
+ w = q.to_wire(max_size=512)
+ self.failUnlessRaises(dns.exception.TooBig, bad)
+
+ def test_answer1(self):
+ a = dns.message.from_text(answer_text)
+ wire = a.to_wire(want_shuffle=False)
+ self.failUnless(wire == goodwire2)
+
+ def test_TrailingJunk(self):
+ def bad():
+ badwire = goodwire + '\x00'
+ m = dns.message.from_wire(badwire)
+ self.failUnlessRaises(dns.message.TrailingJunk, bad)
+
+ def test_ShortHeader(self):
+ def bad():
+ badwire = '\x00' * 11
+ m = dns.message.from_wire(badwire)
+ self.failUnlessRaises(dns.message.ShortHeader, bad)
+
+ def test_RespondingToResponse(self):
+ def bad():
+ q = dns.message.make_query('foo', 'A')
+ r1 = dns.message.make_response(q)
+ r2 = dns.message.make_response(r1)
+ self.failUnlessRaises(dns.exception.FormError, bad)
+
+ def test_ExtendedRcodeSetting(self):
+ m = dns.message.make_query('foo', 'A')
+ m.set_rcode(4095)
+ self.failUnless(m.rcode() == 4095)
+ m.set_rcode(2)
+ self.failUnless(m.rcode() == 2)
+
+ def test_EDNSVersionCoherence(self):
+ m = dns.message.make_query('foo', 'A')
+ m.use_edns(1)
+ self.failUnless((m.ednsflags >> 16) & 0xFF == 1)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/tests/name.py b/lib/dnspython/tests/name.py
new file mode 100644
index 0000000000..1ab4f52814
--- /dev/null
+++ b/lib/dnspython/tests/name.py
@@ -0,0 +1,697 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+
+import cStringIO
+import socket
+
+import dns.name
+import dns.reversename
+import dns.e164
+
+class NameTestCase(unittest.TestCase):
+ def setUp(self):
+ self.origin = dns.name.from_text('example.')
+
+ def testFromTextRel1(self):
+ n = dns.name.from_text('foo.bar')
+ self.failUnless(n.labels == ('foo', 'bar', ''))
+
+ def testFromTextRel2(self):
+ n = dns.name.from_text('foo.bar', origin=self.origin)
+ self.failUnless(n.labels == ('foo', 'bar', 'example', ''))
+
+ def testFromTextRel3(self):
+ n = dns.name.from_text('foo.bar', origin=None)
+ self.failUnless(n.labels == ('foo', 'bar'))
+
+ def testFromTextRel4(self):
+ n = dns.name.from_text('@', origin=None)
+ self.failUnless(n == dns.name.empty)
+
+ def testFromTextRel5(self):
+ n = dns.name.from_text('@', origin=self.origin)
+ self.failUnless(n == self.origin)
+
+ def testFromTextAbs1(self):
+ n = dns.name.from_text('foo.bar.')
+ self.failUnless(n.labels == ('foo', 'bar', ''))
+
+ def testTortureFromText(self):
+ good = [
+ r'.',
+ r'a',
+ r'a.',
+ r'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
+ r'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
+ r'\000.\008.\010.\032.\046.\092.\099.\255',
+ r'\\',
+ r'\..\.',
+ r'\\.\\',
+ r'!"#%&/()=+-',
+ r'\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255.\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255.\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255.\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255',
+ ]
+ bad = [
+ r'..',
+ r'.a',
+ r'\\..',
+ '\\', # yes, we don't want the 'r' prefix!
+ r'\0',
+ r'\00',
+ r'\00Z',
+ r'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
+ r'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
+ r'\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255.\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255.\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255.\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255',
+ ]
+ for t in good:
+ try:
+ n = dns.name.from_text(t)
+ except:
+ self.fail("good test '%s' raised an exception" % t)
+ for t in bad:
+ caught = False
+ try:
+ n = dns.name.from_text(t)
+ except:
+ caught = True
+ if not caught:
+ self.fail("bad test '%s' did not raise an exception" % t)
+
+ def testImmutable1(self):
+ def bad():
+ self.origin.labels = ()
+ self.failUnlessRaises(TypeError, bad)
+
+ def testImmutable2(self):
+ def bad():
+ self.origin.labels[0] = 'foo'
+ self.failUnlessRaises(TypeError, bad)
+
+ def testAbs1(self):
+ self.failUnless(dns.name.root.is_absolute())
+
+ def testAbs2(self):
+ self.failUnless(not dns.name.empty.is_absolute())
+
+ def testAbs3(self):
+ self.failUnless(self.origin.is_absolute())
+
+ def testAbs3(self):
+ n = dns.name.from_text('foo', origin=None)
+ self.failUnless(not n.is_absolute())
+
+ def testWild1(self):
+ n = dns.name.from_text('*.foo', origin=None)
+ self.failUnless(n.is_wild())
+
+ def testWild2(self):
+ n = dns.name.from_text('*a.foo', origin=None)
+ self.failUnless(not n.is_wild())
+
+ def testWild3(self):
+ n = dns.name.from_text('a.*.foo', origin=None)
+ self.failUnless(not n.is_wild())
+
+ def testWild4(self):
+ self.failUnless(not dns.name.root.is_wild())
+
+ def testWild5(self):
+ self.failUnless(not dns.name.empty.is_wild())
+
+ def testHash1(self):
+ n1 = dns.name.from_text('fOo.COM')
+ n2 = dns.name.from_text('foo.com')
+ self.failUnless(hash(n1) == hash(n2))
+
+ def testCompare1(self):
+ n1 = dns.name.from_text('a')
+ n2 = dns.name.from_text('b')
+ self.failUnless(n1 < n2)
+ self.failUnless(n2 > n1)
+
+ def testCompare2(self):
+ n1 = dns.name.from_text('')
+ n2 = dns.name.from_text('b')
+ self.failUnless(n1 < n2)
+ self.failUnless(n2 > n1)
+
+ def testCompare3(self):
+ self.failUnless(dns.name.empty < dns.name.root)
+ self.failUnless(dns.name.root > dns.name.empty)
+
+ def testCompare4(self):
+ self.failUnless(dns.name.root != 1)
+
+ def testCompare5(self):
+ self.failUnless(dns.name.root < 1 or dns.name.root > 1)
+
+ def testSubdomain1(self):
+ self.failUnless(not dns.name.empty.is_subdomain(dns.name.root))
+
+ def testSubdomain2(self):
+ self.failUnless(not dns.name.root.is_subdomain(dns.name.empty))
+
+ def testSubdomain3(self):
+ n = dns.name.from_text('foo', origin=self.origin)
+ self.failUnless(n.is_subdomain(self.origin))
+
+ def testSubdomain4(self):
+ n = dns.name.from_text('foo', origin=self.origin)
+ self.failUnless(n.is_subdomain(dns.name.root))
+
+ def testSubdomain5(self):
+ n = dns.name.from_text('foo', origin=self.origin)
+ self.failUnless(n.is_subdomain(n))
+
+ def testSuperdomain1(self):
+ self.failUnless(not dns.name.empty.is_superdomain(dns.name.root))
+
+ def testSuperdomain2(self):
+ self.failUnless(not dns.name.root.is_superdomain(dns.name.empty))
+
+ def testSuperdomain3(self):
+ n = dns.name.from_text('foo', origin=self.origin)
+ self.failUnless(self.origin.is_superdomain(n))
+
+ def testSuperdomain4(self):
+ n = dns.name.from_text('foo', origin=self.origin)
+ self.failUnless(dns.name.root.is_superdomain(n))
+
+ def testSuperdomain5(self):
+ n = dns.name.from_text('foo', origin=self.origin)
+ self.failUnless(n.is_superdomain(n))
+
+ def testCanonicalize1(self):
+ n = dns.name.from_text('FOO.bar', origin=self.origin)
+ c = n.canonicalize()
+ self.failUnless(c.labels == ('foo', 'bar', 'example', ''))
+
+ def testToText1(self):
+ n = dns.name.from_text('FOO.bar', origin=self.origin)
+ t = n.to_text()
+ self.failUnless(t == 'FOO.bar.example.')
+
+ def testToText2(self):
+ n = dns.name.from_text('FOO.bar', origin=self.origin)
+ t = n.to_text(True)
+ self.failUnless(t == 'FOO.bar.example')
+
+ def testToText3(self):
+ n = dns.name.from_text('FOO.bar', origin=None)
+ t = n.to_text()
+ self.failUnless(t == 'FOO.bar')
+
+ def testToText4(self):
+ t = dns.name.empty.to_text()
+ self.failUnless(t == '@')
+
+ def testToText5(self):
+ t = dns.name.root.to_text()
+ self.failUnless(t == '.')
+
+ def testToText6(self):
+ n = dns.name.from_text('FOO bar', origin=None)
+ t = n.to_text()
+ self.failUnless(t == r'FOO\032bar')
+
+ def testToText7(self):
+ n = dns.name.from_text(r'FOO\.bar', origin=None)
+ t = n.to_text()
+ self.failUnless(t == r'FOO\.bar')
+
+ def testToText8(self):
+ n = dns.name.from_text(r'\070OO\.bar', origin=None)
+ t = n.to_text()
+ self.failUnless(t == r'FOO\.bar')
+
+ def testSlice1(self):
+ n = dns.name.from_text(r'a.b.c.', origin=None)
+ s = n[:]
+ self.failUnless(s == ('a', 'b', 'c', ''))
+
+ def testSlice2(self):
+ n = dns.name.from_text(r'a.b.c.', origin=None)
+ s = n[:2]
+ self.failUnless(s == ('a', 'b'))
+
+ def testSlice3(self):
+ n = dns.name.from_text(r'a.b.c.', origin=None)
+ s = n[2:]
+ self.failUnless(s == ('c', ''))
+
+ def testEmptyLabel1(self):
+ def bad():
+ n = dns.name.Name(['a', '', 'b'])
+ self.failUnlessRaises(dns.name.EmptyLabel, bad)
+
+ def testEmptyLabel2(self):
+ def bad():
+ n = dns.name.Name(['', 'b'])
+ self.failUnlessRaises(dns.name.EmptyLabel, bad)
+
+ def testEmptyLabel3(self):
+ n = dns.name.Name(['b', ''])
+ self.failUnless(n)
+
+ def testLongLabel(self):
+ n = dns.name.Name(['a' * 63])
+ self.failUnless(n)
+
+ def testLabelTooLong(self):
+ def bad():
+ n = dns.name.Name(['a' * 64, 'b'])
+ self.failUnlessRaises(dns.name.LabelTooLong, bad)
+
+ def testLongName(self):
+ n = dns.name.Name(['a' * 63, 'a' * 63, 'a' * 63, 'a' * 62])
+ self.failUnless(n)
+
+ def testNameTooLong(self):
+ def bad():
+ n = dns.name.Name(['a' * 63, 'a' * 63, 'a' * 63, 'a' * 63])
+ self.failUnlessRaises(dns.name.NameTooLong, bad)
+
+ def testConcat1(self):
+ n1 = dns.name.Name(['a', 'b'])
+ n2 = dns.name.Name(['c', 'd'])
+ e = dns.name.Name(['a', 'b', 'c', 'd'])
+ r = n1 + n2
+ self.failUnless(r == e)
+
+ def testConcat2(self):
+ n1 = dns.name.Name(['a', 'b'])
+ n2 = dns.name.Name([])
+ e = dns.name.Name(['a', 'b'])
+ r = n1 + n2
+ self.failUnless(r == e)
+
+ def testConcat2(self):
+ n1 = dns.name.Name([])
+ n2 = dns.name.Name(['a', 'b'])
+ e = dns.name.Name(['a', 'b'])
+ r = n1 + n2
+ self.failUnless(r == e)
+
+ def testConcat3(self):
+ n1 = dns.name.Name(['a', 'b', ''])
+ n2 = dns.name.Name([])
+ e = dns.name.Name(['a', 'b', ''])
+ r = n1 + n2
+ self.failUnless(r == e)
+
+ def testConcat4(self):
+ n1 = dns.name.Name(['a', 'b'])
+ n2 = dns.name.Name(['c', ''])
+ e = dns.name.Name(['a', 'b', 'c', ''])
+ r = n1 + n2
+ self.failUnless(r == e)
+
+ def testConcat5(self):
+ def bad():
+ n1 = dns.name.Name(['a', 'b', ''])
+ n2 = dns.name.Name(['c'])
+ r = n1 + n2
+ self.failUnlessRaises(dns.name.AbsoluteConcatenation, bad)
+
+ def testBadEscape(self):
+ def bad():
+ n = dns.name.from_text(r'a.b\0q1.c.')
+ print n
+ self.failUnlessRaises(dns.name.BadEscape, bad)
+
+ def testDigestable1(self):
+ n = dns.name.from_text('FOO.bar')
+ d = n.to_digestable()
+ self.failUnless(d == '\x03foo\x03bar\x00')
+
+ def testDigestable2(self):
+ n1 = dns.name.from_text('FOO.bar')
+ n2 = dns.name.from_text('foo.BAR.')
+ d1 = n1.to_digestable()
+ d2 = n2.to_digestable()
+ self.failUnless(d1 == d2)
+
+ def testDigestable3(self):
+ d = dns.name.root.to_digestable()
+ self.failUnless(d == '\x00')
+
+ def testDigestable4(self):
+ n = dns.name.from_text('FOO.bar', None)
+ d = n.to_digestable(dns.name.root)
+ self.failUnless(d == '\x03foo\x03bar\x00')
+
+ def testBadDigestable(self):
+ def bad():
+ n = dns.name.from_text('FOO.bar', None)
+ d = n.to_digestable()
+ self.failUnlessRaises(dns.name.NeedAbsoluteNameOrOrigin, bad)
+
+ def testToWire1(self):
+ n = dns.name.from_text('FOO.bar')
+ f = cStringIO.StringIO()
+ compress = {}
+ n.to_wire(f, compress)
+ self.failUnless(f.getvalue() == '\x03FOO\x03bar\x00')
+
+ def testToWire2(self):
+ n = dns.name.from_text('FOO.bar')
+ f = cStringIO.StringIO()
+ compress = {}
+ n.to_wire(f, compress)
+ n.to_wire(f, compress)
+ self.failUnless(f.getvalue() == '\x03FOO\x03bar\x00\xc0\x00')
+
+ def testToWire3(self):
+ n1 = dns.name.from_text('FOO.bar')
+ n2 = dns.name.from_text('foo.bar')
+ f = cStringIO.StringIO()
+ compress = {}
+ n1.to_wire(f, compress)
+ n2.to_wire(f, compress)
+ self.failUnless(f.getvalue() == '\x03FOO\x03bar\x00\xc0\x00')
+
+ def testToWire4(self):
+ n1 = dns.name.from_text('FOO.bar')
+ n2 = dns.name.from_text('a.foo.bar')
+ f = cStringIO.StringIO()
+ compress = {}
+ n1.to_wire(f, compress)
+ n2.to_wire(f, compress)
+ self.failUnless(f.getvalue() == '\x03FOO\x03bar\x00\x01\x61\xc0\x00')
+
+ def testToWire5(self):
+ n1 = dns.name.from_text('FOO.bar')
+ n2 = dns.name.from_text('a.foo.bar')
+ f = cStringIO.StringIO()
+ compress = {}
+ n1.to_wire(f, compress)
+ n2.to_wire(f, None)
+ self.failUnless(f.getvalue() == \
+ '\x03FOO\x03bar\x00\x01\x61\x03foo\x03bar\x00')
+
+ def testToWire6(self):
+ n = dns.name.from_text('FOO.bar')
+ v = n.to_wire()
+ self.failUnless(v == '\x03FOO\x03bar\x00')
+
+ def testBadToWire(self):
+ def bad():
+ n = dns.name.from_text('FOO.bar', None)
+ f = cStringIO.StringIO()
+ compress = {}
+ n.to_wire(f, compress)
+ self.failUnlessRaises(dns.name.NeedAbsoluteNameOrOrigin, bad)
+
+ def testSplit1(self):
+ n = dns.name.from_text('foo.bar.')
+ (prefix, suffix) = n.split(2)
+ ep = dns.name.from_text('foo', None)
+ es = dns.name.from_text('bar.', None)
+ self.failUnless(prefix == ep and suffix == es)
+
+ def testSplit2(self):
+ n = dns.name.from_text('foo.bar.')
+ (prefix, suffix) = n.split(1)
+ ep = dns.name.from_text('foo.bar', None)
+ es = dns.name.from_text('.', None)
+ self.failUnless(prefix == ep and suffix == es)
+
+ def testSplit3(self):
+ n = dns.name.from_text('foo.bar.')
+ (prefix, suffix) = n.split(0)
+ ep = dns.name.from_text('foo.bar.', None)
+ es = dns.name.from_text('', None)
+ self.failUnless(prefix == ep and suffix == es)
+
+ def testSplit4(self):
+ n = dns.name.from_text('foo.bar.')
+ (prefix, suffix) = n.split(3)
+ ep = dns.name.from_text('', None)
+ es = dns.name.from_text('foo.bar.', None)
+ self.failUnless(prefix == ep and suffix == es)
+
+ def testBadSplit1(self):
+ def bad():
+ n = dns.name.from_text('foo.bar.')
+ (prefix, suffix) = n.split(-1)
+ self.failUnlessRaises(ValueError, bad)
+
+ def testBadSplit2(self):
+ def bad():
+ n = dns.name.from_text('foo.bar.')
+ (prefix, suffix) = n.split(4)
+ self.failUnlessRaises(ValueError, bad)
+
+ def testRelativize1(self):
+ n = dns.name.from_text('a.foo.bar.', None)
+ o = dns.name.from_text('bar.', None)
+ e = dns.name.from_text('a.foo', None)
+ self.failUnless(n.relativize(o) == e)
+
+ def testRelativize2(self):
+ n = dns.name.from_text('a.foo.bar.', None)
+ o = n
+ e = dns.name.empty
+ self.failUnless(n.relativize(o) == e)
+
+ def testRelativize3(self):
+ n = dns.name.from_text('a.foo.bar.', None)
+ o = dns.name.from_text('blaz.', None)
+ e = n
+ self.failUnless(n.relativize(o) == e)
+
+ def testRelativize4(self):
+ n = dns.name.from_text('a.foo', None)
+ o = dns.name.root
+ e = n
+ self.failUnless(n.relativize(o) == e)
+
+ def testDerelativize1(self):
+ n = dns.name.from_text('a.foo', None)
+ o = dns.name.from_text('bar.', None)
+ e = dns.name.from_text('a.foo.bar.', None)
+ self.failUnless(n.derelativize(o) == e)
+
+ def testDerelativize2(self):
+ n = dns.name.empty
+ o = dns.name.from_text('a.foo.bar.', None)
+ e = o
+ self.failUnless(n.derelativize(o) == e)
+
+ def testDerelativize3(self):
+ n = dns.name.from_text('a.foo.bar.', None)
+ o = dns.name.from_text('blaz.', None)
+ e = n
+ self.failUnless(n.derelativize(o) == e)
+
+ def testChooseRelativity1(self):
+ n = dns.name.from_text('a.foo.bar.', None)
+ o = dns.name.from_text('bar.', None)
+ e = dns.name.from_text('a.foo', None)
+ self.failUnless(n.choose_relativity(o, True) == e)
+
+ def testChooseRelativity2(self):
+ n = dns.name.from_text('a.foo.bar.', None)
+ o = dns.name.from_text('bar.', None)
+ e = n
+ self.failUnless(n.choose_relativity(o, False) == e)
+
+ def testChooseRelativity3(self):
+ n = dns.name.from_text('a.foo', None)
+ o = dns.name.from_text('bar.', None)
+ e = dns.name.from_text('a.foo.bar.', None)
+ self.failUnless(n.choose_relativity(o, False) == e)
+
+ def testChooseRelativity4(self):
+ n = dns.name.from_text('a.foo', None)
+ o = None
+ e = n
+ self.failUnless(n.choose_relativity(o, True) == e)
+
+ def testChooseRelativity5(self):
+ n = dns.name.from_text('a.foo', None)
+ o = None
+ e = n
+ self.failUnless(n.choose_relativity(o, False) == e)
+
+ def testChooseRelativity6(self):
+ n = dns.name.from_text('a.foo.', None)
+ o = None
+ e = n
+ self.failUnless(n.choose_relativity(o, True) == e)
+
+ def testChooseRelativity7(self):
+ n = dns.name.from_text('a.foo.', None)
+ o = None
+ e = n
+ self.failUnless(n.choose_relativity(o, False) == e)
+
+ def testFromWire1(self):
+ w = '\x03foo\x00\xc0\x00'
+ (n1, cused1) = dns.name.from_wire(w, 0)
+ (n2, cused2) = dns.name.from_wire(w, cused1)
+ en1 = dns.name.from_text('foo.')
+ en2 = en1
+ ecused1 = 5
+ ecused2 = 2
+ self.failUnless(n1 == en1 and cused1 == ecused1 and \
+ n2 == en2 and cused2 == ecused2)
+
+ def testFromWire1(self):
+ w = '\x03foo\x00\x01a\xc0\x00\x01b\xc0\x05'
+ current = 0
+ (n1, cused1) = dns.name.from_wire(w, current)
+ current += cused1
+ (n2, cused2) = dns.name.from_wire(w, current)
+ current += cused2
+ (n3, cused3) = dns.name.from_wire(w, current)
+ en1 = dns.name.from_text('foo.')
+ en2 = dns.name.from_text('a.foo.')
+ en3 = dns.name.from_text('b.a.foo.')
+ ecused1 = 5
+ ecused2 = 4
+ ecused3 = 4
+ self.failUnless(n1 == en1 and cused1 == ecused1 and \
+ n2 == en2 and cused2 == ecused2 and \
+ n3 == en3 and cused3 == ecused3)
+
+ def testBadFromWire1(self):
+ def bad():
+ w = '\x03foo\xc0\x04'
+ (n, cused) = dns.name.from_wire(w, 0)
+ self.failUnlessRaises(dns.name.BadPointer, bad)
+
+ def testBadFromWire2(self):
+ def bad():
+ w = '\x03foo\xc0\x05'
+ (n, cused) = dns.name.from_wire(w, 0)
+ self.failUnlessRaises(dns.name.BadPointer, bad)
+
+ def testBadFromWire3(self):
+ def bad():
+ w = '\xbffoo'
+ (n, cused) = dns.name.from_wire(w, 0)
+ self.failUnlessRaises(dns.name.BadLabelType, bad)
+
+ def testBadFromWire4(self):
+ def bad():
+ w = '\x41foo'
+ (n, cused) = dns.name.from_wire(w, 0)
+ self.failUnlessRaises(dns.name.BadLabelType, bad)
+
+ def testParent1(self):
+ n = dns.name.from_text('foo.bar.')
+ self.failUnless(n.parent() == dns.name.from_text('bar.'))
+ self.failUnless(n.parent().parent() == dns.name.root)
+
+ def testParent2(self):
+ n = dns.name.from_text('foo.bar', None)
+ self.failUnless(n.parent() == dns.name.from_text('bar', None))
+ self.failUnless(n.parent().parent() == dns.name.empty)
+
+ def testParent3(self):
+ def bad():
+ n = dns.name.root
+ n.parent()
+ self.failUnlessRaises(dns.name.NoParent, bad)
+
+ def testParent4(self):
+ def bad():
+ n = dns.name.empty
+ n.parent()
+ self.failUnlessRaises(dns.name.NoParent, bad)
+
+ def testFromUnicode1(self):
+ n = dns.name.from_text(u'foo.bar')
+ self.failUnless(n.labels == ('foo', 'bar', ''))
+
+ def testFromUnicode2(self):
+ n = dns.name.from_text(u'foo\u1234bar.bar')
+ self.failUnless(n.labels == ('xn--foobar-r5z', 'bar', ''))
+
+ def testFromUnicodeAlternateDot1(self):
+ n = dns.name.from_text(u'foo\u3002bar')
+ self.failUnless(n.labels == ('foo', 'bar', ''))
+
+ def testFromUnicodeAlternateDot2(self):
+ n = dns.name.from_text(u'foo\uff0ebar')
+ self.failUnless(n.labels == ('foo', 'bar', ''))
+
+ def testFromUnicodeAlternateDot3(self):
+ n = dns.name.from_text(u'foo\uff61bar')
+ self.failUnless(n.labels == ('foo', 'bar', ''))
+
+ def testToUnicode1(self):
+ n = dns.name.from_text(u'foo.bar')
+ s = n.to_unicode()
+ self.failUnless(s == u'foo.bar.')
+
+ def testToUnicode2(self):
+ n = dns.name.from_text(u'foo\u1234bar.bar')
+ s = n.to_unicode()
+ self.failUnless(s == u'foo\u1234bar.bar.')
+
+ def testToUnicode3(self):
+ n = dns.name.from_text('foo.bar')
+ s = n.to_unicode()
+ self.failUnless(s == u'foo.bar.')
+
+ def testReverseIPv4(self):
+ e = dns.name.from_text('1.0.0.127.in-addr.arpa.')
+ n = dns.reversename.from_address('127.0.0.1')
+ self.failUnless(e == n)
+
+ def testReverseIPv6(self):
+ e = dns.name.from_text('1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.')
+ n = dns.reversename.from_address('::1')
+ self.failUnless(e == n)
+
+ def testBadReverseIPv4(self):
+ def bad():
+ n = dns.reversename.from_address('127.0.foo.1')
+ self.failUnlessRaises(socket.error, bad)
+
+ def testBadReverseIPv6(self):
+ def bad():
+ n = dns.reversename.from_address('::1::1')
+ self.failUnlessRaises(socket.error, bad)
+
+ def testForwardIPv4(self):
+ n = dns.name.from_text('1.0.0.127.in-addr.arpa.')
+ e = '127.0.0.1'
+ text = dns.reversename.to_address(n)
+ self.failUnless(text == e)
+
+ def testForwardIPv6(self):
+ n = dns.name.from_text('1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.')
+ e = '::1'
+ text = dns.reversename.to_address(n)
+ self.failUnless(text == e)
+
+ def testE164ToEnum(self):
+ text = '+1 650 555 1212'
+ e = dns.name.from_text('2.1.2.1.5.5.5.0.5.6.1.e164.arpa.')
+ n = dns.e164.from_e164(text)
+ self.failUnless(n == e)
+
+ def testEnumToE164(self):
+ n = dns.name.from_text('2.1.2.1.5.5.5.0.5.6.1.e164.arpa.')
+ e = '+16505551212'
+ text = dns.e164.to_e164(n)
+ self.failUnless(text == e)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/tests/namedict.py b/lib/dnspython/tests/namedict.py
new file mode 100644
index 0000000000..0261142186
--- /dev/null
+++ b/lib/dnspython/tests/namedict.py
@@ -0,0 +1,102 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+
+import dns.name
+import dns.namedict
+
+class NameTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ndict = dns.namedict.NameDict()
+ n1 = dns.name.from_text('foo.bar.')
+ n2 = dns.name.from_text('bar.')
+ self.ndict[n1] = 1
+ self.ndict[n2] = 2
+ self.rndict = dns.namedict.NameDict()
+ n1 = dns.name.from_text('foo.bar', None)
+ n2 = dns.name.from_text('bar', None)
+ self.rndict[n1] = 1
+ self.rndict[n2] = 2
+
+ def testDepth(self):
+ self.failUnless(self.ndict.max_depth == 3)
+
+ def testLookup1(self):
+ k = dns.name.from_text('foo.bar.')
+ self.failUnless(self.ndict[k] == 1)
+
+ def testLookup2(self):
+ k = dns.name.from_text('foo.bar.')
+ self.failUnless(self.ndict.get_deepest_match(k)[1] == 1)
+
+ def testLookup3(self):
+ k = dns.name.from_text('a.b.c.foo.bar.')
+ self.failUnless(self.ndict.get_deepest_match(k)[1] == 1)
+
+ def testLookup4(self):
+ k = dns.name.from_text('a.b.c.bar.')
+ self.failUnless(self.ndict.get_deepest_match(k)[1] == 2)
+
+ def testLookup5(self):
+ def bad():
+ n = dns.name.from_text('a.b.c.')
+ (k, v) = self.ndict.get_deepest_match(n)
+ self.failUnlessRaises(KeyError, bad)
+
+ def testLookup6(self):
+ def bad():
+ (k, v) = self.ndict.get_deepest_match(dns.name.empty)
+ self.failUnlessRaises(KeyError, bad)
+
+ def testLookup7(self):
+ self.ndict[dns.name.empty] = 100
+ n = dns.name.from_text('a.b.c.')
+ (k, v) = self.ndict.get_deepest_match(n)
+ self.failUnless(v == 100)
+
+ def testLookup8(self):
+ def bad():
+ self.ndict['foo'] = 100
+ self.failUnlessRaises(ValueError, bad)
+
+ def testRelDepth(self):
+ self.failUnless(self.rndict.max_depth == 2)
+
+ def testRelLookup1(self):
+ k = dns.name.from_text('foo.bar', None)
+ self.failUnless(self.rndict[k] == 1)
+
+ def testRelLookup2(self):
+ k = dns.name.from_text('foo.bar', None)
+ self.failUnless(self.rndict.get_deepest_match(k)[1] == 1)
+
+ def testRelLookup3(self):
+ k = dns.name.from_text('a.b.c.foo.bar', None)
+ self.failUnless(self.rndict.get_deepest_match(k)[1] == 1)
+
+ def testRelLookup4(self):
+ k = dns.name.from_text('a.b.c.bar', None)
+ self.failUnless(self.rndict.get_deepest_match(k)[1] == 2)
+
+ def testRelLookup7(self):
+ self.rndict[dns.name.empty] = 100
+ n = dns.name.from_text('a.b.c', None)
+ (k, v) = self.rndict.get_deepest_match(n)
+ self.failUnless(v == 100)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/tests/ntoaaton.py b/lib/dnspython/tests/ntoaaton.py
new file mode 100644
index 0000000000..77befd26e3
--- /dev/null
+++ b/lib/dnspython/tests/ntoaaton.py
@@ -0,0 +1,156 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+
+import dns.exception
+import dns.ipv6
+
+class NtoAAtoNTestCase(unittest.TestCase):
+
+ def test_aton1(self):
+ a = dns.ipv6.inet_aton('::')
+ self.failUnless(a == '\x00' * 16)
+
+ def test_aton2(self):
+ a = dns.ipv6.inet_aton('::1')
+ self.failUnless(a == '\x00' * 15 + '\x01')
+
+ def test_aton3(self):
+ a = dns.ipv6.inet_aton('::10.0.0.1')
+ self.failUnless(a == '\x00' * 12 + '\x0a\x00\x00\x01')
+
+ def test_aton4(self):
+ a = dns.ipv6.inet_aton('abcd::dcba')
+ self.failUnless(a == '\xab\xcd' + '\x00' * 12 + '\xdc\xba')
+
+ def test_aton5(self):
+ a = dns.ipv6.inet_aton('1:2:3:4:5:6:7:8')
+ self.failUnless(a == \
+ '00010002000300040005000600070008'.decode('hex_codec'))
+
+ def test_bad_aton1(self):
+ def bad():
+ a = dns.ipv6.inet_aton('abcd:dcba')
+ self.failUnlessRaises(dns.exception.SyntaxError, bad)
+
+ def test_bad_aton2(self):
+ def bad():
+ a = dns.ipv6.inet_aton('abcd::dcba::1')
+ self.failUnlessRaises(dns.exception.SyntaxError, bad)
+
+ def test_bad_aton3(self):
+ def bad():
+ a = dns.ipv6.inet_aton('1:2:3:4:5:6:7:8:9')
+ self.failUnlessRaises(dns.exception.SyntaxError, bad)
+
+ def test_aton1(self):
+ a = dns.ipv6.inet_aton('::')
+ self.failUnless(a == '\x00' * 16)
+
+ def test_aton2(self):
+ a = dns.ipv6.inet_aton('::1')
+ self.failUnless(a == '\x00' * 15 + '\x01')
+
+ def test_aton3(self):
+ a = dns.ipv6.inet_aton('::10.0.0.1')
+ self.failUnless(a == '\x00' * 12 + '\x0a\x00\x00\x01')
+
+ def test_aton4(self):
+ a = dns.ipv6.inet_aton('abcd::dcba')
+ self.failUnless(a == '\xab\xcd' + '\x00' * 12 + '\xdc\xba')
+
+ def test_ntoa1(self):
+ b = '00010002000300040005000600070008'.decode('hex_codec')
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == '1:2:3:4:5:6:7:8')
+
+ def test_ntoa2(self):
+ b = '\x00' * 16
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == '::')
+
+ def test_ntoa3(self):
+ b = '\x00' * 15 + '\x01'
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == '::1')
+
+ def test_ntoa4(self):
+ b = '\x80' + '\x00' * 15
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == '8000::')
+
+ def test_ntoa5(self):
+ b = '\x01\xcd' + '\x00' * 12 + '\x03\xef'
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == '1cd::3ef')
+
+ def test_ntoa6(self):
+ b = 'ffff00000000ffff000000000000ffff'.decode('hex_codec')
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == 'ffff:0:0:ffff::ffff')
+
+ def test_ntoa7(self):
+ b = '00000000ffff000000000000ffffffff'.decode('hex_codec')
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == '0:0:ffff::ffff:ffff')
+
+ def test_ntoa8(self):
+ b = 'ffff0000ffff00000000ffff00000000'.decode('hex_codec')
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == 'ffff:0:ffff::ffff:0:0')
+
+ def test_ntoa9(self):
+ b = '0000000000000000000000000a000001'.decode('hex_codec')
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == '::10.0.0.1')
+
+ def test_ntoa10(self):
+ b = '0000000000000000000000010a000001'.decode('hex_codec')
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == '::1:a00:1')
+
+ def test_ntoa11(self):
+ b = '00000000000000000000ffff0a000001'.decode('hex_codec')
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == '::ffff:10.0.0.1')
+
+ def test_ntoa12(self):
+ b = '000000000000000000000000ffffffff'.decode('hex_codec')
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == '::255.255.255.255')
+
+ def test_ntoa13(self):
+ b = '00000000000000000000ffffffffffff'.decode('hex_codec')
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == '::ffff:255.255.255.255')
+
+ def test_ntoa14(self):
+ b = '0000000000000000000000000001ffff'.decode('hex_codec')
+ t = dns.ipv6.inet_ntoa(b)
+ self.failUnless(t == '::0.1.255.255')
+
+ def test_bad_ntoa1(self):
+ def bad():
+ a = dns.ipv6.inet_ntoa('')
+ self.failUnlessRaises(ValueError, bad)
+
+ def test_bad_ntoa2(self):
+ def bad():
+ a = dns.ipv6.inet_ntoa('\x00' * 17)
+ self.failUnlessRaises(ValueError, bad)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/tests/rdtypeandclass.py b/lib/dnspython/tests/rdtypeandclass.py
new file mode 100644
index 0000000000..0c8a830e27
--- /dev/null
+++ b/lib/dnspython/tests/rdtypeandclass.py
@@ -0,0 +1,123 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+
+import dns.rdataclass
+import dns.rdatatype
+
+class RdTypeAndClassTestCase(unittest.TestCase):
+
+ # Classes
+
+ def test_class_meta1(self):
+ self.failUnless(dns.rdataclass.is_metaclass(dns.rdataclass.ANY))
+
+ def test_class_meta2(self):
+ self.failUnless(not dns.rdataclass.is_metaclass(dns.rdataclass.IN))
+
+ def test_class_bytext1(self):
+ self.failUnless(dns.rdataclass.from_text('IN') == dns.rdataclass.IN)
+
+ def test_class_bytext2(self):
+ self.failUnless(dns.rdataclass.from_text('CLASS1') ==
+ dns.rdataclass.IN)
+
+ def test_class_bytext_bounds1(self):
+ self.failUnless(dns.rdataclass.from_text('CLASS0') == 0)
+ self.failUnless(dns.rdataclass.from_text('CLASS65535') == 65535)
+
+ def test_class_bytext_bounds2(self):
+ def bad():
+ junk = dns.rdataclass.from_text('CLASS65536')
+ self.failUnlessRaises(ValueError, bad)
+
+ def test_class_bytext_unknown(self):
+ def bad():
+ junk = dns.rdataclass.from_text('XXX')
+ self.failUnlessRaises(dns.rdataclass.UnknownRdataclass, bad)
+
+ def test_class_totext1(self):
+ self.failUnless(dns.rdataclass.to_text(dns.rdataclass.IN) == 'IN')
+
+ def test_class_totext1(self):
+ self.failUnless(dns.rdataclass.to_text(999) == 'CLASS999')
+
+ def test_class_totext_bounds1(self):
+ def bad():
+ junk = dns.rdataclass.to_text(-1)
+ self.failUnlessRaises(ValueError, bad)
+
+ def test_class_totext_bounds2(self):
+ def bad():
+ junk = dns.rdataclass.to_text(65536)
+ self.failUnlessRaises(ValueError, bad)
+
+ # Types
+
+ def test_type_meta1(self):
+ self.failUnless(dns.rdatatype.is_metatype(dns.rdatatype.ANY))
+
+ def test_type_meta2(self):
+ self.failUnless(dns.rdatatype.is_metatype(dns.rdatatype.OPT))
+
+ def test_type_meta3(self):
+ self.failUnless(not dns.rdatatype.is_metatype(dns.rdatatype.A))
+
+ def test_type_singleton1(self):
+ self.failUnless(dns.rdatatype.is_singleton(dns.rdatatype.SOA))
+
+ def test_type_singleton2(self):
+ self.failUnless(not dns.rdatatype.is_singleton(dns.rdatatype.A))
+
+ def test_type_bytext1(self):
+ self.failUnless(dns.rdatatype.from_text('A') == dns.rdatatype.A)
+
+ def test_type_bytext2(self):
+ self.failUnless(dns.rdatatype.from_text('TYPE1') ==
+ dns.rdatatype.A)
+
+ def test_type_bytext_bounds1(self):
+ self.failUnless(dns.rdatatype.from_text('TYPE0') == 0)
+ self.failUnless(dns.rdatatype.from_text('TYPE65535') == 65535)
+
+ def test_type_bytext_bounds2(self):
+ def bad():
+ junk = dns.rdatatype.from_text('TYPE65536')
+ self.failUnlessRaises(ValueError, bad)
+
+ def test_type_bytext_unknown(self):
+ def bad():
+ junk = dns.rdatatype.from_text('XXX')
+ self.failUnlessRaises(dns.rdatatype.UnknownRdatatype, bad)
+
+ def test_type_totext1(self):
+ self.failUnless(dns.rdatatype.to_text(dns.rdatatype.A) == 'A')
+
+ def test_type_totext1(self):
+ self.failUnless(dns.rdatatype.to_text(999) == 'TYPE999')
+
+ def test_type_totext_bounds1(self):
+ def bad():
+ junk = dns.rdatatype.to_text(-1)
+ self.failUnlessRaises(ValueError, bad)
+
+ def test_type_totext_bounds2(self):
+ def bad():
+ junk = dns.rdatatype.to_text(65536)
+ self.failUnlessRaises(ValueError, bad)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/tests/resolver.py b/lib/dnspython/tests/resolver.py
new file mode 100644
index 0000000000..bd6dc5fbc2
--- /dev/null
+++ b/lib/dnspython/tests/resolver.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import select
+import sys
+import time
+import unittest
+
+import dns.name
+import dns.message
+import dns.name
+import dns.rdataclass
+import dns.rdatatype
+import dns.resolver
+
+resolv_conf = """
+ /t/t
+# comment 1
+; comment 2
+domain foo
+nameserver 10.0.0.1
+nameserver 10.0.0.2
+"""
+
+message_text = """id 1234
+opcode QUERY
+rcode NOERROR
+flags QR AA RD
+;QUESTION
+example. IN A
+;ANSWER
+example. 1 IN A 10.0.0.1
+;AUTHORITY
+;ADDITIONAL
+"""
+
+class BaseResolverTests(object):
+
+ if sys.platform != 'win32':
+ def testRead(self):
+ f = cStringIO.StringIO(resolv_conf)
+ r = dns.resolver.Resolver(f)
+ self.failUnless(r.nameservers == ['10.0.0.1', '10.0.0.2'] and
+ r.domain == dns.name.from_text('foo'))
+
+ def testCacheExpiration(self):
+ message = dns.message.from_text(message_text)
+ name = dns.name.from_text('example.')
+ answer = dns.resolver.Answer(name, dns.rdatatype.A, dns.rdataclass.IN,
+ message)
+ cache = dns.resolver.Cache()
+ cache.put((name, dns.rdatatype.A, dns.rdataclass.IN), answer)
+ time.sleep(2)
+ self.failUnless(cache.get((name, dns.rdatatype.A, dns.rdataclass.IN))
+ is None)
+
+ def testCacheCleaning(self):
+ message = dns.message.from_text(message_text)
+ name = dns.name.from_text('example.')
+ answer = dns.resolver.Answer(name, dns.rdatatype.A, dns.rdataclass.IN,
+ message)
+ cache = dns.resolver.Cache(cleaning_interval=1.0)
+ cache.put((name, dns.rdatatype.A, dns.rdataclass.IN), answer)
+ time.sleep(2)
+ self.failUnless(cache.get((name, dns.rdatatype.A, dns.rdataclass.IN))
+ is None)
+
+ def testZoneForName1(self):
+ name = dns.name.from_text('www.dnspython.org.')
+ ezname = dns.name.from_text('dnspython.org.')
+ zname = dns.resolver.zone_for_name(name)
+ self.failUnless(zname == ezname)
+
+ def testZoneForName2(self):
+ name = dns.name.from_text('a.b.www.dnspython.org.')
+ ezname = dns.name.from_text('dnspython.org.')
+ zname = dns.resolver.zone_for_name(name)
+ self.failUnless(zname == ezname)
+
+ def testZoneForName3(self):
+ name = dns.name.from_text('dnspython.org.')
+ ezname = dns.name.from_text('dnspython.org.')
+ zname = dns.resolver.zone_for_name(name)
+ self.failUnless(zname == ezname)
+
+ def testZoneForName4(self):
+ def bad():
+ name = dns.name.from_text('dnspython.org', None)
+ zname = dns.resolver.zone_for_name(name)
+ self.failUnlessRaises(dns.resolver.NotAbsolute, bad)
+
+class PollingMonkeyPatchMixin(object):
+ def setUp(self):
+ self.__native_polling_backend = dns.query._polling_backend
+ dns.query._set_polling_backend(self.polling_backend())
+
+ unittest.TestCase.setUp(self)
+
+ def tearDown(self):
+ dns.query._set_polling_backend(self.__native_polling_backend)
+
+ unittest.TestCase.tearDown(self)
+
+class SelectResolverTestCase(PollingMonkeyPatchMixin, BaseResolverTests, unittest.TestCase):
+ def polling_backend(self):
+ return dns.query._select_for
+
+if hasattr(select, 'poll'):
+ class PollResolverTestCase(PollingMonkeyPatchMixin, BaseResolverTests, unittest.TestCase):
+ def polling_backend(self):
+ return dns.query._poll_for
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/tests/rrset.py b/lib/dnspython/tests/rrset.py
new file mode 100644
index 0000000000..3cafbee891
--- /dev/null
+++ b/lib/dnspython/tests/rrset.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+
+import dns.rrset
+
+class RRsetTestCase(unittest.TestCase):
+
+ def testEqual1(self):
+ r1 = dns.rrset.from_text('foo', 300, 'in', 'a', '10.0.0.1', '10.0.0.2')
+ r2 = dns.rrset.from_text('FOO', 300, 'in', 'a', '10.0.0.2', '10.0.0.1')
+ self.failUnless(r1 == r2)
+
+ def testEqual2(self):
+ r1 = dns.rrset.from_text('foo', 300, 'in', 'a', '10.0.0.1', '10.0.0.2')
+ r2 = dns.rrset.from_text('FOO', 600, 'in', 'a', '10.0.0.2', '10.0.0.1')
+ self.failUnless(r1 == r2)
+
+ def testNotEqual1(self):
+ r1 = dns.rrset.from_text('fooa', 30, 'in', 'a', '10.0.0.1', '10.0.0.2')
+ r2 = dns.rrset.from_text('FOO', 30, 'in', 'a', '10.0.0.2', '10.0.0.1')
+ self.failUnless(r1 != r2)
+
+ def testNotEqual2(self):
+ r1 = dns.rrset.from_text('foo', 30, 'in', 'a', '10.0.0.1', '10.0.0.3')
+ r2 = dns.rrset.from_text('FOO', 30, 'in', 'a', '10.0.0.2', '10.0.0.1')
+ self.failUnless(r1 != r2)
+
+ def testNotEqual3(self):
+ r1 = dns.rrset.from_text('foo', 30, 'in', 'a', '10.0.0.1', '10.0.0.2',
+ '10.0.0.3')
+ r2 = dns.rrset.from_text('FOO', 30, 'in', 'a', '10.0.0.2', '10.0.0.1')
+ self.failUnless(r1 != r2)
+
+ def testNotEqual4(self):
+ r1 = dns.rrset.from_text('foo', 30, 'in', 'a', '10.0.0.1')
+ r2 = dns.rrset.from_text('FOO', 30, 'in', 'a', '10.0.0.2', '10.0.0.1')
+ self.failUnless(r1 != r2)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/tests/set.py b/lib/dnspython/tests/set.py
new file mode 100644
index 0000000000..e2bca51b82
--- /dev/null
+++ b/lib/dnspython/tests/set.py
@@ -0,0 +1,208 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+
+import dns.set
+
+# for convenience
+S = dns.set.Set
+
+class SimpleSetTestCase(unittest.TestCase):
+
+ def testLen1(self):
+ s1 = S()
+ self.failUnless(len(s1) == 0)
+
+ def testLen2(self):
+ s1 = S([1, 2, 3])
+ self.failUnless(len(s1) == 3)
+
+ def testLen3(self):
+ s1 = S([1, 2, 3, 3, 3])
+ self.failUnless(len(s1) == 3)
+
+ def testUnion1(self):
+ s1 = S([1, 2, 3])
+ s2 = S([1, 2, 3])
+ e = S([1, 2, 3])
+ self.failUnless(s1 | s2 == e)
+
+ def testUnion2(self):
+ s1 = S([1, 2, 3])
+ s2 = S([])
+ e = S([1, 2, 3])
+ self.failUnless(s1 | s2 == e)
+
+ def testUnion3(self):
+ s1 = S([1, 2, 3])
+ s2 = S([3, 4])
+ e = S([1, 2, 3, 4])
+ self.failUnless(s1 | s2 == e)
+
+ def testIntersection1(self):
+ s1 = S([1, 2, 3])
+ s2 = S([1, 2, 3])
+ e = S([1, 2, 3])
+ self.failUnless(s1 & s2 == e)
+
+ def testIntersection2(self):
+ s1 = S([0, 1, 2, 3])
+ s2 = S([1, 2, 3, 4])
+ e = S([1, 2, 3])
+ self.failUnless(s1 & s2 == e)
+
+ def testIntersection3(self):
+ s1 = S([1, 2, 3])
+ s2 = S([])
+ e = S([])
+ self.failUnless(s1 & s2 == e)
+
+ def testIntersection4(self):
+ s1 = S([1, 2, 3])
+ s2 = S([5, 4])
+ e = S([])
+ self.failUnless(s1 & s2 == e)
+
+ def testDifference1(self):
+ s1 = S([1, 2, 3])
+ s2 = S([5, 4])
+ e = S([1, 2, 3])
+ self.failUnless(s1 - s2 == e)
+
+ def testDifference2(self):
+ s1 = S([1, 2, 3])
+ s2 = S([])
+ e = S([1, 2, 3])
+ self.failUnless(s1 - s2 == e)
+
+ def testDifference3(self):
+ s1 = S([1, 2, 3])
+ s2 = S([3, 2])
+ e = S([1])
+ self.failUnless(s1 - s2 == e)
+
+ def testDifference4(self):
+ s1 = S([1, 2, 3])
+ s2 = S([3, 2, 1])
+ e = S([])
+ self.failUnless(s1 - s2 == e)
+
+ def testSubset1(self):
+ s1 = S([1, 2, 3])
+ s2 = S([3, 2, 1])
+ self.failUnless(s1.issubset(s2))
+
+ def testSubset2(self):
+ s1 = S([1, 2, 3])
+ self.failUnless(s1.issubset(s1))
+
+ def testSubset3(self):
+ s1 = S([])
+ s2 = S([1, 2, 3])
+ self.failUnless(s1.issubset(s2))
+
+ def testSubset4(self):
+ s1 = S([1])
+ s2 = S([1, 2, 3])
+ self.failUnless(s1.issubset(s2))
+
+ def testSubset5(self):
+ s1 = S([])
+ s2 = S([])
+ self.failUnless(s1.issubset(s2))
+
+ def testSubset6(self):
+ s1 = S([1, 4])
+ s2 = S([1, 2, 3])
+ self.failUnless(not s1.issubset(s2))
+
+ def testSuperset1(self):
+ s1 = S([1, 2, 3])
+ s2 = S([3, 2, 1])
+ self.failUnless(s1.issuperset(s2))
+
+ def testSuperset2(self):
+ s1 = S([1, 2, 3])
+ self.failUnless(s1.issuperset(s1))
+
+ def testSuperset3(self):
+ s1 = S([1, 2, 3])
+ s2 = S([])
+ self.failUnless(s1.issuperset(s2))
+
+ def testSuperset4(self):
+ s1 = S([1, 2, 3])
+ s2 = S([1])
+ self.failUnless(s1.issuperset(s2))
+
+ def testSuperset5(self):
+ s1 = S([])
+ s2 = S([])
+ self.failUnless(s1.issuperset(s2))
+
+ def testSuperset6(self):
+ s1 = S([1, 2, 3])
+ s2 = S([1, 4])
+ self.failUnless(not s1.issuperset(s2))
+
+ def testUpdate1(self):
+ s1 = S([1, 2, 3])
+ u = (4, 5, 6)
+ e = S([1, 2, 3, 4, 5, 6])
+ s1.update(u)
+ self.failUnless(s1 == e)
+
+ def testUpdate2(self):
+ s1 = S([1, 2, 3])
+ u = []
+ e = S([1, 2, 3])
+ s1.update(u)
+ self.failUnless(s1 == e)
+
+ def testGetitem(self):
+ s1 = S([1, 2, 3])
+ i0 = s1[0]
+ i1 = s1[1]
+ i2 = s1[2]
+ s2 = S([i0, i1, i2])
+ self.failUnless(s1 == s2)
+
+ def testGetslice(self):
+ s1 = S([1, 2, 3])
+ slice = s1[0:2]
+ self.failUnless(len(slice) == 2)
+ item = s1[2]
+ slice.append(item)
+ s2 = S(slice)
+ self.failUnless(s1 == s2)
+
+ def testDelitem(self):
+ s1 = S([1, 2, 3])
+ del s1[0]
+ i1 = s1[0]
+ i2 = s1[1]
+ self.failUnless(i1 != i2)
+ self.failUnless(i1 == 1 or i1 == 2 or i1 == 3)
+ self.failUnless(i2 == 1 or i2 == 2 or i2 == 3)
+
+ def testDelslice(self):
+ s1 = S([1, 2, 3])
+ del s1[0:2]
+ i1 = s1[0]
+ self.failUnless(i1 == 1 or i1 == 2 or i1 == 3)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/tests/tokenizer.py b/lib/dnspython/tests/tokenizer.py
new file mode 100644
index 0000000000..4f4a1bdc90
--- /dev/null
+++ b/lib/dnspython/tests/tokenizer.py
@@ -0,0 +1,190 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+
+import dns.exception
+import dns.tokenizer
+
+Token = dns.tokenizer.Token
+
+class TokenizerTestCase(unittest.TestCase):
+
+ def testQuotedString1(self):
+ tok = dns.tokenizer.Tokenizer(r'"foo"')
+ token = tok.get()
+ self.failUnless(token == Token(dns.tokenizer.QUOTED_STRING, 'foo'))
+
+ def testQuotedString2(self):
+ tok = dns.tokenizer.Tokenizer(r'""')
+ token = tok.get()
+ self.failUnless(token == Token(dns.tokenizer.QUOTED_STRING, ''))
+
+ def testQuotedString3(self):
+ tok = dns.tokenizer.Tokenizer(r'"\"foo\""')
+ token = tok.get()
+ self.failUnless(token == Token(dns.tokenizer.QUOTED_STRING, '"foo"'))
+
+ def testQuotedString4(self):
+ tok = dns.tokenizer.Tokenizer(r'"foo\010bar"')
+ token = tok.get()
+ self.failUnless(token == Token(dns.tokenizer.QUOTED_STRING, 'foo\x0abar'))
+
+ def testQuotedString5(self):
+ def bad():
+ tok = dns.tokenizer.Tokenizer(r'"foo')
+ token = tok.get()
+ self.failUnlessRaises(dns.exception.UnexpectedEnd, bad)
+
+ def testQuotedString6(self):
+ def bad():
+ tok = dns.tokenizer.Tokenizer(r'"foo\01')
+ token = tok.get()
+ self.failUnlessRaises(dns.exception.SyntaxError, bad)
+
+ def testQuotedString7(self):
+ def bad():
+ tok = dns.tokenizer.Tokenizer('"foo\nbar"')
+ token = tok.get()
+ self.failUnlessRaises(dns.exception.SyntaxError, bad)
+
+ def testEmpty1(self):
+ tok = dns.tokenizer.Tokenizer('')
+ token = tok.get()
+ self.failUnless(token.is_eof())
+
+ def testEmpty2(self):
+ tok = dns.tokenizer.Tokenizer('')
+ token1 = tok.get()
+ token2 = tok.get()
+ self.failUnless(token1.is_eof() and token2.is_eof())
+
+ def testEOL(self):
+ tok = dns.tokenizer.Tokenizer('\n')
+ token1 = tok.get()
+ token2 = tok.get()
+ self.failUnless(token1.is_eol() and token2.is_eof())
+
+ def testWS1(self):
+ tok = dns.tokenizer.Tokenizer(' \n')
+ token1 = tok.get()
+ self.failUnless(token1.is_eol())
+
+ def testWS2(self):
+ tok = dns.tokenizer.Tokenizer(' \n')
+ token1 = tok.get(want_leading=True)
+ self.failUnless(token1.is_whitespace())
+
+ def testComment1(self):
+ tok = dns.tokenizer.Tokenizer(' ;foo\n')
+ token1 = tok.get()
+ self.failUnless(token1.is_eol())
+
+ def testComment2(self):
+ tok = dns.tokenizer.Tokenizer(' ;foo\n')
+ token1 = tok.get(want_comment = True)
+ token2 = tok.get()
+ self.failUnless(token1 == Token(dns.tokenizer.COMMENT, 'foo') and
+ token2.is_eol())
+
+ def testComment3(self):
+ tok = dns.tokenizer.Tokenizer(' ;foo bar\n')
+ token1 = tok.get(want_comment = True)
+ token2 = tok.get()
+ self.failUnless(token1 == Token(dns.tokenizer.COMMENT, 'foo bar') and
+ token2.is_eol())
+
+ def testMultiline1(self):
+ tok = dns.tokenizer.Tokenizer('( foo\n\n bar\n)')
+ tokens = list(iter(tok))
+ self.failUnless(tokens == [Token(dns.tokenizer.IDENTIFIER, 'foo'),
+ Token(dns.tokenizer.IDENTIFIER, 'bar')])
+
+ def testMultiline2(self):
+ tok = dns.tokenizer.Tokenizer('( foo\n\n bar\n)\n')
+ tokens = list(iter(tok))
+ self.failUnless(tokens == [Token(dns.tokenizer.IDENTIFIER, 'foo'),
+ Token(dns.tokenizer.IDENTIFIER, 'bar'),
+ Token(dns.tokenizer.EOL, '\n')])
+ def testMultiline3(self):
+ def bad():
+ tok = dns.tokenizer.Tokenizer('foo)')
+ tokens = list(iter(tok))
+ self.failUnlessRaises(dns.exception.SyntaxError, bad)
+
+ def testMultiline4(self):
+ def bad():
+ tok = dns.tokenizer.Tokenizer('((foo)')
+ tokens = list(iter(tok))
+ self.failUnlessRaises(dns.exception.SyntaxError, bad)
+
+ def testUnget1(self):
+ tok = dns.tokenizer.Tokenizer('foo')
+ t1 = tok.get()
+ tok.unget(t1)
+ t2 = tok.get()
+ self.failUnless(t1 == t2 and t1.ttype == dns.tokenizer.IDENTIFIER and \
+ t1.value == 'foo')
+
+ def testUnget2(self):
+ def bad():
+ tok = dns.tokenizer.Tokenizer('foo')
+ t1 = tok.get()
+ tok.unget(t1)
+ tok.unget(t1)
+ self.failUnlessRaises(dns.tokenizer.UngetBufferFull, bad)
+
+ def testGetEOL1(self):
+ tok = dns.tokenizer.Tokenizer('\n')
+ t = tok.get_eol()
+ self.failUnless(t == '\n')
+
+ def testGetEOL2(self):
+ tok = dns.tokenizer.Tokenizer('')
+ t = tok.get_eol()
+ self.failUnless(t == '')
+
+ def testEscapedDelimiter1(self):
+ tok = dns.tokenizer.Tokenizer(r'ch\ ld')
+ t = tok.get()
+ self.failUnless(t.ttype == dns.tokenizer.IDENTIFIER and t.value == r'ch\ ld')
+
+ def testEscapedDelimiter2(self):
+ tok = dns.tokenizer.Tokenizer(r'ch\032ld')
+ t = tok.get()
+ self.failUnless(t.ttype == dns.tokenizer.IDENTIFIER and t.value == r'ch\032ld')
+
+ def testEscapedDelimiter3(self):
+ tok = dns.tokenizer.Tokenizer(r'ch\ild')
+ t = tok.get()
+ self.failUnless(t.ttype == dns.tokenizer.IDENTIFIER and t.value == r'ch\ild')
+
+ def testEscapedDelimiter1u(self):
+ tok = dns.tokenizer.Tokenizer(r'ch\ ld')
+ t = tok.get().unescape()
+ self.failUnless(t.ttype == dns.tokenizer.IDENTIFIER and t.value == r'ch ld')
+
+ def testEscapedDelimiter2u(self):
+ tok = dns.tokenizer.Tokenizer(r'ch\032ld')
+ t = tok.get().unescape()
+ self.failUnless(t.ttype == dns.tokenizer.IDENTIFIER and t.value == 'ch ld')
+
+ def testEscapedDelimiter3u(self):
+ tok = dns.tokenizer.Tokenizer(r'ch\ild')
+ t = tok.get().unescape()
+ self.failUnless(t.ttype == dns.tokenizer.IDENTIFIER and t.value == r'child')
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/tests/update.py b/lib/dnspython/tests/update.py
new file mode 100644
index 0000000000..5f7b31f23f
--- /dev/null
+++ b/lib/dnspython/tests/update.py
@@ -0,0 +1,114 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+
+import dns.update
+import dns.rdata
+import dns.rdataset
+
+goodhex = '0001 2800 0001 0005 0007 0000' \
+ '076578616d706c6500 0006 0001' \
+ '03666f6fc00c 00ff 00ff 00000000 0000' \
+ 'c019 0001 00ff 00000000 0000' \
+ '03626172c00c 0001 0001 00000000 0004 0a000005' \
+ '05626c617a32c00c 00ff 00fe 00000000 0000' \
+ 'c049 0001 00fe 00000000 0000' \
+ 'c019 0001 00ff 00000000 0000' \
+ 'c019 0001 0001 0000012c 0004 0a000001' \
+ 'c019 0001 0001 0000012c 0004 0a000002' \
+ 'c035 0001 0001 0000012c 0004 0a000003' \
+ 'c035 0001 00fe 00000000 0004 0a000004' \
+ '04626c617ac00c 0001 00ff 00000000 0000' \
+ 'c049 00ff 00ff 00000000 0000'
+
+goodwire = goodhex.replace(' ', '').decode('hex_codec')
+
+update_text="""id 1
+opcode UPDATE
+rcode NOERROR
+;ZONE
+example. IN SOA
+;PREREQ
+foo ANY ANY
+foo ANY A
+bar 0 IN A 10.0.0.5
+blaz2 NONE ANY
+blaz2 NONE A
+;UPDATE
+foo ANY A
+foo 300 IN A 10.0.0.1
+foo 300 IN A 10.0.0.2
+bar 300 IN A 10.0.0.3
+bar 0 NONE A 10.0.0.4
+blaz ANY A
+blaz2 ANY ANY
+"""
+
+class UpdateTestCase(unittest.TestCase):
+
+ def test_to_wire1(self):
+ update = dns.update.Update('example')
+ update.id = 1
+ update.present('foo')
+ update.present('foo', 'a')
+ update.present('bar', 'a', '10.0.0.5')
+ update.absent('blaz2')
+ update.absent('blaz2', 'a')
+ update.replace('foo', 300, 'a', '10.0.0.1', '10.0.0.2')
+ update.add('bar', 300, 'a', '10.0.0.3')
+ update.delete('bar', 'a', '10.0.0.4')
+ update.delete('blaz','a')
+ update.delete('blaz2')
+ self.failUnless(update.to_wire() == goodwire)
+
+ def test_to_wire2(self):
+ update = dns.update.Update('example')
+ update.id = 1
+ update.present('foo')
+ update.present('foo', 'a')
+ update.present('bar', 'a', '10.0.0.5')
+ update.absent('blaz2')
+ update.absent('blaz2', 'a')
+ update.replace('foo', 300, 'a', '10.0.0.1', '10.0.0.2')
+ update.add('bar', 300, dns.rdata.from_text(1, 1, '10.0.0.3'))
+ update.delete('bar', 'a', '10.0.0.4')
+ update.delete('blaz','a')
+ update.delete('blaz2')
+ self.failUnless(update.to_wire() == goodwire)
+
+ def test_to_wire3(self):
+ update = dns.update.Update('example')
+ update.id = 1
+ update.present('foo')
+ update.present('foo', 'a')
+ update.present('bar', 'a', '10.0.0.5')
+ update.absent('blaz2')
+ update.absent('blaz2', 'a')
+ update.replace('foo', 300, 'a', '10.0.0.1', '10.0.0.2')
+ update.add('bar', dns.rdataset.from_text(1, 1, 300, '10.0.0.3'))
+ update.delete('bar', 'a', '10.0.0.4')
+ update.delete('blaz','a')
+ update.delete('blaz2')
+ self.failUnless(update.to_wire() == goodwire)
+
+ def test_from_text1(self):
+ update = dns.message.from_text(update_text)
+ w = update.to_wire(origin=dns.name.from_text('example'),
+ want_shuffle=False)
+ self.failUnless(w == goodwire)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/tests/zone.py b/lib/dnspython/tests/zone.py
new file mode 100644
index 0000000000..075be0a48a
--- /dev/null
+++ b/lib/dnspython/tests/zone.py
@@ -0,0 +1,389 @@
+# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import cStringIO
+import filecmp
+import os
+import unittest
+
+import dns.exception
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.rrset
+import dns.zone
+
+example_text = """$TTL 3600
+$ORIGIN example.
+@ soa foo bar 1 2 3 4 5
+@ ns ns1
+@ ns ns2
+ns1 a 10.0.0.1
+ns2 a 10.0.0.2
+$TTL 300
+$ORIGIN foo.example.
+bar mx 0 blaz
+"""
+
+example_text_output = """@ 3600 IN SOA foo bar 1 2 3 4 5
+@ 3600 IN NS ns1
+@ 3600 IN NS ns2
+bar.foo 300 IN MX 0 blaz.foo
+ns1 3600 IN A 10.0.0.1
+ns2 3600 IN A 10.0.0.2
+"""
+
+something_quite_similar = """@ 3600 IN SOA foo bar 1 2 3 4 5
+@ 3600 IN NS ns1
+@ 3600 IN NS ns2
+bar.foo 300 IN MX 0 blaz.foo
+ns1 3600 IN A 10.0.0.1
+ns2 3600 IN A 10.0.0.3
+"""
+
+something_different = """@ 3600 IN SOA fooa bar 1 2 3 4 5
+@ 3600 IN NS ns11
+@ 3600 IN NS ns21
+bar.fooa 300 IN MX 0 blaz.fooa
+ns11 3600 IN A 10.0.0.11
+ns21 3600 IN A 10.0.0.21
+"""
+
+ttl_example_text = """$TTL 1h
+$ORIGIN example.
+@ soa foo bar 1 2 3 4 5
+@ ns ns1
+@ ns ns2
+ns1 1d1s a 10.0.0.1
+ns2 1w1D1h1m1S a 10.0.0.2
+"""
+
+no_soa_text = """$TTL 1h
+$ORIGIN example.
+@ ns ns1
+@ ns ns2
+ns1 1d1s a 10.0.0.1
+ns2 1w1D1h1m1S a 10.0.0.2
+"""
+
+no_ns_text = """$TTL 1h
+$ORIGIN example.
+@ soa foo bar 1 2 3 4 5
+"""
+
+include_text = """$INCLUDE "example"
+"""
+
+bad_directive_text = """$FOO bar
+$ORIGIN example.
+@ soa foo bar 1 2 3 4 5
+@ ns ns1
+@ ns ns2
+ns1 1d1s a 10.0.0.1
+ns2 1w1D1h1m1S a 10.0.0.2
+"""
+
+_keep_output = False
+
+class ZoneTestCase(unittest.TestCase):
+
+ def testFromFile1(self):
+ z = dns.zone.from_file('example', 'example')
+ ok = False
+ try:
+ z.to_file('example1.out', nl='\x0a')
+ ok = filecmp.cmp('example1.out', 'example1.good')
+ finally:
+ if not _keep_output:
+ os.unlink('example1.out')
+ self.failUnless(ok)
+
+ def testFromFile2(self):
+ z = dns.zone.from_file('example', 'example', relativize=False)
+ ok = False
+ try:
+ z.to_file('example2.out', relativize=False, nl='\x0a')
+ ok = filecmp.cmp('example2.out', 'example2.good')
+ finally:
+ if not _keep_output:
+ os.unlink('example2.out')
+ self.failUnless(ok)
+
+ def testFromText(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ f = cStringIO.StringIO()
+ names = z.nodes.keys()
+ names.sort()
+ for n in names:
+ print >> f, z[n].to_text(n)
+ self.failUnless(f.getvalue() == example_text_output)
+
+ def testTorture1(self):
+ #
+ # Read a zone containing all our supported RR types, and
+ # for each RR in the zone, convert the rdata into wire format
+ # and then back out, and see if we get equal rdatas.
+ #
+ f = cStringIO.StringIO()
+ o = dns.name.from_text('example.')
+ z = dns.zone.from_file('example', o)
+ for (name, node) in z.iteritems():
+ for rds in node:
+ for rd in rds:
+ f.seek(0)
+ f.truncate()
+ rd.to_wire(f, origin=o)
+ wire = f.getvalue()
+ rd2 = dns.rdata.from_wire(rds.rdclass, rds.rdtype,
+ wire, 0, len(wire),
+ origin = o)
+ self.failUnless(rd == rd2)
+
+ def testEqual(self):
+ z1 = dns.zone.from_text(example_text, 'example.', relativize=True)
+ z2 = dns.zone.from_text(example_text_output, 'example.',
+ relativize=True)
+ self.failUnless(z1 == z2)
+
+ def testNotEqual1(self):
+ z1 = dns.zone.from_text(example_text, 'example.', relativize=True)
+ z2 = dns.zone.from_text(something_quite_similar, 'example.',
+ relativize=True)
+ self.failUnless(z1 != z2)
+
+ def testNotEqual2(self):
+ z1 = dns.zone.from_text(example_text, 'example.', relativize=True)
+ z2 = dns.zone.from_text(something_different, 'example.',
+ relativize=True)
+ self.failUnless(z1 != z2)
+
+ def testNotEqual3(self):
+ z1 = dns.zone.from_text(example_text, 'example.', relativize=True)
+ z2 = dns.zone.from_text(something_different, 'example2.',
+ relativize=True)
+ self.failUnless(z1 != z2)
+
+ def testFindRdataset1(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ rds = z.find_rdataset('@', 'soa')
+ exrds = dns.rdataset.from_text('IN', 'SOA', 300, 'foo bar 1 2 3 4 5')
+ self.failUnless(rds == exrds)
+
+ def testFindRdataset2(self):
+ def bad():
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ rds = z.find_rdataset('@', 'loc')
+ self.failUnlessRaises(KeyError, bad)
+
+ def testFindRRset1(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ rrs = z.find_rrset('@', 'soa')
+ exrrs = dns.rrset.from_text('@', 300, 'IN', 'SOA', 'foo bar 1 2 3 4 5')
+ self.failUnless(rrs == exrrs)
+
+ def testFindRRset2(self):
+ def bad():
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ rrs = z.find_rrset('@', 'loc')
+ self.failUnlessRaises(KeyError, bad)
+
+ def testGetRdataset1(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ rds = z.get_rdataset('@', 'soa')
+ exrds = dns.rdataset.from_text('IN', 'SOA', 300, 'foo bar 1 2 3 4 5')
+ self.failUnless(rds == exrds)
+
+ def testGetRdataset2(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ rds = z.get_rdataset('@', 'loc')
+ self.failUnless(rds == None)
+
+ def testGetRRset1(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ rrs = z.get_rrset('@', 'soa')
+ exrrs = dns.rrset.from_text('@', 300, 'IN', 'SOA', 'foo bar 1 2 3 4 5')
+ self.failUnless(rrs == exrrs)
+
+ def testGetRRset2(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ rrs = z.get_rrset('@', 'loc')
+ self.failUnless(rrs == None)
+
+ def testReplaceRdataset1(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ rdataset = dns.rdataset.from_text('in', 'ns', 300, 'ns3', 'ns4')
+ z.replace_rdataset('@', rdataset)
+ rds = z.get_rdataset('@', 'ns')
+ self.failUnless(rds is rdataset)
+
+ def testReplaceRdataset2(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ rdataset = dns.rdataset.from_text('in', 'txt', 300, '"foo"')
+ z.replace_rdataset('@', rdataset)
+ rds = z.get_rdataset('@', 'txt')
+ self.failUnless(rds is rdataset)
+
+ def testDeleteRdataset1(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ z.delete_rdataset('@', 'ns')
+ rds = z.get_rdataset('@', 'ns')
+ self.failUnless(rds is None)
+
+ def testDeleteRdataset2(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ z.delete_rdataset('ns1', 'a')
+ node = z.get_node('ns1')
+ self.failUnless(node is None)
+
+ def testNodeFindRdataset1(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ node = z['@']
+ rds = node.find_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA)
+ exrds = dns.rdataset.from_text('IN', 'SOA', 300, 'foo bar 1 2 3 4 5')
+ self.failUnless(rds == exrds)
+
+ def testNodeFindRdataset2(self):
+ def bad():
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ node = z['@']
+ rds = node.find_rdataset(dns.rdataclass.IN, dns.rdatatype.LOC)
+ self.failUnlessRaises(KeyError, bad)
+
+ def testNodeGetRdataset1(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ node = z['@']
+ rds = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA)
+ exrds = dns.rdataset.from_text('IN', 'SOA', 300, 'foo bar 1 2 3 4 5')
+ self.failUnless(rds == exrds)
+
+ def testNodeGetRdataset2(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ node = z['@']
+ rds = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.LOC)
+ self.failUnless(rds == None)
+
+ def testNodeDeleteRdataset1(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ node = z['@']
+ rds = node.delete_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA)
+ rds = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA)
+ self.failUnless(rds == None)
+
+ def testNodeDeleteRdataset2(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ node = z['@']
+ rds = node.delete_rdataset(dns.rdataclass.IN, dns.rdatatype.LOC)
+ rds = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.LOC)
+ self.failUnless(rds == None)
+
+ def testIterateRdatasets(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ ns = [n for n, r in z.iterate_rdatasets('A')]
+ ns.sort()
+ self.failUnless(ns == [dns.name.from_text('ns1', None),
+ dns.name.from_text('ns2', None)])
+
+ def testIterateAllRdatasets(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ ns = [n for n, r in z.iterate_rdatasets()]
+ ns.sort()
+ self.failUnless(ns == [dns.name.from_text('@', None),
+ dns.name.from_text('@', None),
+ dns.name.from_text('bar.foo', None),
+ dns.name.from_text('ns1', None),
+ dns.name.from_text('ns2', None)])
+
+ def testIterateRdatas(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ l = list(z.iterate_rdatas('A'))
+ l.sort()
+ exl = [(dns.name.from_text('ns1', None),
+ 3600,
+ dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
+ '10.0.0.1')),
+ (dns.name.from_text('ns2', None),
+ 3600,
+ dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
+ '10.0.0.2'))]
+ self.failUnless(l == exl)
+
+ def testIterateAllRdatas(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ l = list(z.iterate_rdatas())
+ l.sort()
+ exl = [(dns.name.from_text('@', None),
+ 3600,
+ dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
+ 'ns1')),
+ (dns.name.from_text('@', None),
+ 3600,
+ dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.NS,
+ 'ns2')),
+ (dns.name.from_text('@', None),
+ 3600,
+ dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.SOA,
+ 'foo bar 1 2 3 4 5')),
+ (dns.name.from_text('bar.foo', None),
+ 300,
+ dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.MX,
+ '0 blaz.foo')),
+ (dns.name.from_text('ns1', None),
+ 3600,
+ dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
+ '10.0.0.1')),
+ (dns.name.from_text('ns2', None),
+ 3600,
+ dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.A,
+ '10.0.0.2'))]
+ self.failUnless(l == exl)
+
+ def testTTLs(self):
+ z = dns.zone.from_text(ttl_example_text, 'example.', relativize=True)
+ n = z['@']
+ rds = n.get_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA)
+ self.failUnless(rds.ttl == 3600)
+ n = z['ns1']
+ rds = n.get_rdataset(dns.rdataclass.IN, dns.rdatatype.A)
+ self.failUnless(rds.ttl == 86401)
+ n = z['ns2']
+ rds = n.get_rdataset(dns.rdataclass.IN, dns.rdatatype.A)
+ self.failUnless(rds.ttl == 694861)
+
+ def testNoSOA(self):
+ def bad():
+ z = dns.zone.from_text(no_soa_text, 'example.',
+ relativize=True)
+ self.failUnlessRaises(dns.zone.NoSOA, bad)
+
+ def testNoNS(self):
+ def bad():
+ z = dns.zone.from_text(no_ns_text, 'example.',
+ relativize=True)
+ self.failUnlessRaises(dns.zone.NoNS, bad)
+
+ def testInclude(self):
+ z1 = dns.zone.from_text(include_text, 'example.', relativize=True,
+ allow_include=True)
+ z2 = dns.zone.from_file('example', 'example.', relativize=True)
+ self.failUnless(z1 == z2)
+
+ def testBadDirective(self):
+ def bad():
+ z = dns.zone.from_text(bad_directive_text, 'example.',
+ relativize=True)
+ self.failUnlessRaises(dns.exception.SyntaxError, bad)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/dnspython/util/COPYRIGHT b/lib/dnspython/util/COPYRIGHT
new file mode 100644
index 0000000000..7390363fbb
--- /dev/null
+++ b/lib/dnspython/util/COPYRIGHT
@@ -0,0 +1,14 @@
+Copyright (C) @YEARS@ Nominum, Inc.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose with or without fee is hereby granted,
+provided that the above copyright notice and this permission notice
+appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/lib/dnspython/util/copyrights b/lib/dnspython/util/copyrights
new file mode 100644
index 0000000000..ae578381cb
--- /dev/null
+++ b/lib/dnspython/util/copyrights
@@ -0,0 +1,117 @@
+./.gitignore X 2009
+./ChangeLog X 2003,2004,2005,2006,2007
+./LICENSE X 2003,2004,2005,2006,2007
+./MANIFEST.in X 2003,2004,2005,2006,2007
+./Makefile MAKE 2003,2004,2005,2006,2007,2009
+./README X 2003,2004,2005,2006,2007
+./TODO X 2003,2004,2005,2006,2007
+./dns/__init__.py PYTHON 2003,2004,2005,2006,2007,2009
+./dns/dnssec.py PYTHON 2003,2004,2005,2006,2007,2009
+./dns/e164.py PYTHON 2006,2007,2009
+./dns/edns.py PYTHON 2009
+./dns/entropy.py PYTHON 2009
+./dns/exception.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/flags.py PYTHON 2001,2002,2003,2004,2005,2006,2007,2009,2010
+./dns/inet.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/ipv4.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/ipv6.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/message.py PYTHON 2001,2002,2003,2004,2005,2006,2007,2009,2010
+./dns/name.py PYTHON 2001,2002,2003,2004,2005,2006,2007,2009,2010
+./dns/namedict.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/node.py PYTHON 2001,2002,2003,2004,2005,2006,2007,2009,2010
+./dns/opcode.py PYTHON 2001,2002,2003,2004,2005,2006,2007,2009,2010
+./dns/query.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rcode.py PYTHON 2001,2002,2003,2004,2005,2006,2007,2009,2010
+./dns/rdata.py PYTHON 2001,2002,2003,2004,2005,2006,2007,2009,2010
+./dns/rdataclass.py PYTHON 2001,2002,2003,2004,2005,2006,2007,2009,2010
+./dns/rdataset.py PYTHON 2001,2002,2003,2004,2005,2006,2007,2009,2010
+./dns/rdatatype.py PYTHON 2001,2002,2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/AFSDB.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/CERT.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/CNAME.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/DLV.py PYTHON 2009,2010
+./dns/rdtypes/ANY/DNAME.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/DNSKEY.py PYTHON 2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/DS.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/GPOS.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/HINFO.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/HIP.py PYTHON 2010
+./dns/rdtypes/ANY/ISDN.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/KEY.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/LOC.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/MX.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/NS.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/NSEC.py PYTHON 2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/NSEC3.py PYTHON 2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/NSEC3PARAM.py PYTHON 2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/NXT.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/PTR.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/RP.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/RRSIG.py PYTHON 2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/RT.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/SIG.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/SOA.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/SPF.py PYTHON 2006,2007,2009,2010
+./dns/rdtypes/ANY/SSHFP.py PYTHON 2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/TXT.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/X25.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/ANY/__init__.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/IN/A.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/IN/AAAA.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/IN/APL.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/IN/DHCID.py PYTHON 2006,2007,2009,2010
+./dns/rdtypes/IN/IPSECKEY.py PYTHON 2006,2007,2009,2010
+./dns/rdtypes/IN/KX.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/IN/NAPTR.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/IN/NSAP.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/IN/NSAP_PTR.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/IN/PX.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/IN/SRV.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/IN/WKS.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/IN/__init__.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/__init__.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/dsbase.py PYTHON 2010
+./dns/rdtypes/keybase.py PYTHON 2004,2005,2006,2007,2009,2010
+./dns/rdtypes/mxbase.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/nsbase.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/rdtypes/sigbase.py PYTHON 2004,2005,2006,2007,2009,2010
+./dns/rdtypes/txtbase.py PYTHON 2006,2007,2009,2010
+./dns/renderer.py PYTHON 2001,2002,2003,2004,2005,2006,2007,2009,2010
+./dns/resolver.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/reversename.py PYTHON 2006,2007,2009,2010
+./dns/rrset.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/set.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/tokenizer.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/tsig.py PYTHON 2001,2002,2003,2004,2005,2006,2007,2009,2010
+./dns/tsigkeyring.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/ttl.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/update.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/version.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./dns/zone.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./examples/ddns.py X 2006,2007
+./examples/e164.py X 2006,2007
+./examples/mx.py X 2003,2004,2005,2006,2007
+./examples/name.py X 2003,2004,2005,2006,2007
+./examples/reverse.py X 2003,2004,2005,2006,2007
+./examples/reverse_name.py X 2006,2007
+./examples/xfr.py X 2003,2004,2005,2006,2007
+./setup.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./tests/Makefile MAKE 2003,2004,2005,2006,2007,2009,2010
+./tests/bugs.py PYTHON 2006,2007,2009,2010
+./tests/example X 2003,2004,2005,2006,2007
+./tests/example1.good X 2003,2004,2005,2006,2007
+./tests/example2.good X 2003,2004,2005,2006,2007
+./tests/flags.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./tests/message.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./tests/name.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./tests/namedict.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./tests/ntoaaton.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./tests/rdtypeandclass.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./tests/resolver.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./tests/rrset.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./tests/set.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./tests/tokenizer.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./tests/update.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./tests/zone.py PYTHON 2003,2004,2005,2006,2007,2009,2010
+./util/COPYRIGHT X 2003,2004,2005,2006,2007
+./util/copyrights X 2003,2004,2005,2006,2007
diff --git a/lib/iniparser/AUTHORS b/lib/iniparser/AUTHORS
new file mode 100644
index 0000000000..f3dc876574
--- /dev/null
+++ b/lib/iniparser/AUTHORS
@@ -0,0 +1,5 @@
+Author for this package:
+Nicolas Devillard <ndevilla@free.fr>
+
+Many thanks to the many people who contributed ideas, code, suggestions,
+corrections, enhancements.
diff --git a/lib/iniparser/INSTALL b/lib/iniparser/INSTALL
new file mode 100644
index 0000000000..0d319b2670
--- /dev/null
+++ b/lib/iniparser/INSTALL
@@ -0,0 +1,12 @@
+
+iniParser installation instructions
+-----------------------------------
+
+1. Modify the Makefile to suit your environment.
+2. Type 'make' to make the library.
+3. Type 'make check' to make the test program.
+4. Type 'test/iniexample' to launch the test program.
+
+Enjoy!
+N. Devillard
+Tue Jan 14 11:52:03 CET 2003
diff --git a/lib/iniparser/LICENSE b/lib/iniparser/LICENSE
new file mode 100644
index 0000000000..2eae408337
--- /dev/null
+++ b/lib/iniparser/LICENSE
@@ -0,0 +1,21 @@
+Copyright (c) 2000 by Nicolas Devillard.
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
diff --git a/lib/iniparser/Makefile b/lib/iniparser/Makefile
new file mode 100644
index 0000000000..bc5d61211a
--- /dev/null
+++ b/lib/iniparser/Makefile
@@ -0,0 +1,63 @@
+#
+# iniparser Makefile
+#
+
+# Compiler settings
+CC = gcc
+CFLAGS = -O3 -fPIC
+
+# Ar settings to build the library
+AR = ar
+ARFLAGS = rcv
+
+SHLD = ${CC} ${CFLAGS}
+LDSHFLAGS = -shared -Wl,-Bsymbolic -Wl,-rpath -Wl,/usr/lib -Wl,-rpath,/usr/lib
+LDFLAGS = -Wl,-rpath -Wl,/usr/lib -Wl,-rpath,/usr/lib
+
+# Set RANLIB to ranlib on systems that require it (Sun OS < 4, Mac OSX)
+# RANLIB = ranlib
+RANLIB = true
+
+RM = rm -f
+
+
+# Implicit rules
+
+SUFFIXES = .o .c .h .a .so .sl
+
+COMPILE.c=$(CC) $(CFLAGS) -c
+.c.o:
+ @(echo "compiling $< ...")
+ @($(COMPILE.c) -o $@ $<)
+
+
+SRCS = src/iniparser.c \
+ src/dictionary.c \
+ src/strlib.c
+
+OBJS = $(SRCS:.c=.o)
+
+
+default: libiniparser.a libiniparser.so
+
+libiniparser.a: $(OBJS)
+ @($(AR) $(ARFLAGS) libiniparser.a $(OBJS))
+ @($(RANLIB) libiniparser.a)
+
+libiniparser.so: $(OBJS)
+ @$(SHLD) $(LDSHFLAGS) -o $@.0 $(OBJS) $(LDFLAGS) \
+ -Wl,-soname=`basename $@`.0
+
+clean:
+ $(RM) $(OBJS)
+
+veryclean:
+ $(RM) $(OBJS) libiniparser.a libiniparser.so*
+ rm -rf ./html ; mkdir html
+ cd test ; $(MAKE) veryclean
+
+docs:
+ @(cd doc ; $(MAKE))
+
+check:
+ @(cd test ; $(MAKE))
diff --git a/lib/iniparser/README b/lib/iniparser/README
new file mode 100644
index 0000000000..466d079ba5
--- /dev/null
+++ b/lib/iniparser/README
@@ -0,0 +1,11 @@
+
+Welcome to iniParser!
+
+This modules offers parsing of ini files from the C level.
+See a complete documentation in HTML format, from this directory
+open the file html/index.html with any HTML-capable browser.
+
+Enjoy!
+
+N.Devillard
+Thu Nov 17 12:31:42 CET 2005
diff --git a/lib/iniparser/html/doxygen.css b/lib/iniparser/html/doxygen.css
new file mode 100644
index 0000000000..c7db1a8a04
--- /dev/null
+++ b/lib/iniparser/html/doxygen.css
@@ -0,0 +1,358 @@
+BODY,H1,H2,H3,H4,H5,H6,P,CENTER,TD,TH,UL,DL,DIV {
+ font-family: Geneva, Arial, Helvetica, sans-serif;
+}
+BODY,TD {
+ font-size: 90%;
+}
+H1 {
+ text-align: center;
+ font-size: 160%;
+}
+H2 {
+ font-size: 120%;
+}
+H3 {
+ font-size: 100%;
+}
+CAPTION { font-weight: bold }
+DIV.qindex {
+ width: 100%;
+ background-color: #e8eef2;
+ border: 1px solid #84b0c7;
+ text-align: center;
+ margin: 2px;
+ padding: 2px;
+ line-height: 140%;
+}
+DIV.nav {
+ width: 100%;
+ background-color: #e8eef2;
+ border: 1px solid #84b0c7;
+ text-align: center;
+ margin: 2px;
+ padding: 2px;
+ line-height: 140%;
+}
+DIV.navtab {
+ background-color: #e8eef2;
+ border: 1px solid #84b0c7;
+ text-align: center;
+ margin: 2px;
+ margin-right: 15px;
+ padding: 2px;
+}
+TD.navtab {
+ font-size: 70%;
+}
+A.qindex {
+ text-decoration: none;
+ font-weight: bold;
+ color: #1A419D;
+}
+A.qindex:visited {
+ text-decoration: none;
+ font-weight: bold;
+ color: #1A419D
+}
+A.qindex:hover {
+ text-decoration: none;
+ background-color: #ddddff;
+}
+A.qindexHL {
+ text-decoration: none;
+ font-weight: bold;
+ background-color: #6666cc;
+ color: #ffffff;
+ border: 1px double #9295C2;
+}
+A.qindexHL:hover {
+ text-decoration: none;
+ background-color: #6666cc;
+ color: #ffffff;
+}
+A.qindexHL:visited { text-decoration: none; background-color: #6666cc; color: #ffffff }
+A.el { text-decoration: none; font-weight: bold }
+A.elRef { font-weight: bold }
+A.code:link { text-decoration: none; font-weight: normal; color: #0000FF}
+A.code:visited { text-decoration: none; font-weight: normal; color: #0000FF}
+A.codeRef:link { font-weight: normal; color: #0000FF}
+A.codeRef:visited { font-weight: normal; color: #0000FF}
+A:hover { text-decoration: none; background-color: #f2f2ff }
+DL.el { margin-left: -1cm }
+.fragment {
+ font-family: monospace, fixed;
+ font-size: 95%;
+}
+PRE.fragment {
+ border: 1px solid #CCCCCC;
+ background-color: #f5f5f5;
+ margin-top: 4px;
+ margin-bottom: 4px;
+ margin-left: 2px;
+ margin-right: 8px;
+ padding-left: 6px;
+ padding-right: 6px;
+ padding-top: 4px;
+ padding-bottom: 4px;
+}
+DIV.ah { background-color: black; font-weight: bold; color: #ffffff; margin-bottom: 3px; margin-top: 3px }
+
+DIV.groupHeader {
+ margin-left: 16px;
+ margin-top: 12px;
+ margin-bottom: 6px;
+ font-weight: bold;
+}
+DIV.groupText { margin-left: 16px; font-style: italic; font-size: 90% }
+BODY {
+ background: white;
+ color: black;
+ margin-right: 20px;
+ margin-left: 20px;
+}
+TD.indexkey {
+ background-color: #e8eef2;
+ font-weight: bold;
+ padding-right : 10px;
+ padding-top : 2px;
+ padding-left : 10px;
+ padding-bottom : 2px;
+ margin-left : 0px;
+ margin-right : 0px;
+ margin-top : 2px;
+ margin-bottom : 2px;
+ border: 1px solid #CCCCCC;
+}
+TD.indexvalue {
+ background-color: #e8eef2;
+ font-style: italic;
+ padding-right : 10px;
+ padding-top : 2px;
+ padding-left : 10px;
+ padding-bottom : 2px;
+ margin-left : 0px;
+ margin-right : 0px;
+ margin-top : 2px;
+ margin-bottom : 2px;
+ border: 1px solid #CCCCCC;
+}
+TR.memlist {
+ background-color: #f0f0f0;
+}
+P.formulaDsp { text-align: center; }
+IMG.formulaDsp { }
+IMG.formulaInl { vertical-align: middle; }
+SPAN.keyword { color: #008000 }
+SPAN.keywordtype { color: #604020 }
+SPAN.keywordflow { color: #e08000 }
+SPAN.comment { color: #800000 }
+SPAN.preprocessor { color: #806020 }
+SPAN.stringliteral { color: #002080 }
+SPAN.charliteral { color: #008080 }
+.mdescLeft {
+ padding: 0px 8px 4px 8px;
+ font-size: 80%;
+ font-style: italic;
+ background-color: #FAFAFA;
+ border-top: 1px none #E0E0E0;
+ border-right: 1px none #E0E0E0;
+ border-bottom: 1px none #E0E0E0;
+ border-left: 1px none #E0E0E0;
+ margin: 0px;
+}
+.mdescRight {
+ padding: 0px 8px 4px 8px;
+ font-size: 80%;
+ font-style: italic;
+ background-color: #FAFAFA;
+ border-top: 1px none #E0E0E0;
+ border-right: 1px none #E0E0E0;
+ border-bottom: 1px none #E0E0E0;
+ border-left: 1px none #E0E0E0;
+ margin: 0px;
+}
+.memItemLeft {
+ padding: 1px 0px 0px 8px;
+ margin: 4px;
+ border-top-width: 1px;
+ border-right-width: 1px;
+ border-bottom-width: 1px;
+ border-left-width: 1px;
+ border-top-color: #E0E0E0;
+ border-right-color: #E0E0E0;
+ border-bottom-color: #E0E0E0;
+ border-left-color: #E0E0E0;
+ border-top-style: solid;
+ border-right-style: none;
+ border-bottom-style: none;
+ border-left-style: none;
+ background-color: #FAFAFA;
+ font-size: 80%;
+}
+.memItemRight {
+ padding: 1px 8px 0px 8px;
+ margin: 4px;
+ border-top-width: 1px;
+ border-right-width: 1px;
+ border-bottom-width: 1px;
+ border-left-width: 1px;
+ border-top-color: #E0E0E0;
+ border-right-color: #E0E0E0;
+ border-bottom-color: #E0E0E0;
+ border-left-color: #E0E0E0;
+ border-top-style: solid;
+ border-right-style: none;
+ border-bottom-style: none;
+ border-left-style: none;
+ background-color: #FAFAFA;
+ font-size: 80%;
+}
+.memTemplItemLeft {
+ padding: 1px 0px 0px 8px;
+ margin: 4px;
+ border-top-width: 1px;
+ border-right-width: 1px;
+ border-bottom-width: 1px;
+ border-left-width: 1px;
+ border-top-color: #E0E0E0;
+ border-right-color: #E0E0E0;
+ border-bottom-color: #E0E0E0;
+ border-left-color: #E0E0E0;
+ border-top-style: none;
+ border-right-style: none;
+ border-bottom-style: none;
+ border-left-style: none;
+ background-color: #FAFAFA;
+ font-size: 80%;
+}
+.memTemplItemRight {
+ padding: 1px 8px 0px 8px;
+ margin: 4px;
+ border-top-width: 1px;
+ border-right-width: 1px;
+ border-bottom-width: 1px;
+ border-left-width: 1px;
+ border-top-color: #E0E0E0;
+ border-right-color: #E0E0E0;
+ border-bottom-color: #E0E0E0;
+ border-left-color: #E0E0E0;
+ border-top-style: none;
+ border-right-style: none;
+ border-bottom-style: none;
+ border-left-style: none;
+ background-color: #FAFAFA;
+ font-size: 80%;
+}
+.memTemplParams {
+ padding: 1px 0px 0px 8px;
+ margin: 4px;
+ border-top-width: 1px;
+ border-right-width: 1px;
+ border-bottom-width: 1px;
+ border-left-width: 1px;
+ border-top-color: #E0E0E0;
+ border-right-color: #E0E0E0;
+ border-bottom-color: #E0E0E0;
+ border-left-color: #E0E0E0;
+ border-top-style: solid;
+ border-right-style: none;
+ border-bottom-style: none;
+ border-left-style: none;
+ color: #606060;
+ background-color: #FAFAFA;
+ font-size: 80%;
+}
+.search { color: #003399;
+ font-weight: bold;
+}
+FORM.search {
+ margin-bottom: 0px;
+ margin-top: 0px;
+}
+INPUT.search { font-size: 75%;
+ color: #000080;
+ font-weight: normal;
+ background-color: #e8eef2;
+}
+TD.tiny { font-size: 75%;
+}
+a {
+ color: #1A41A8;
+}
+a:visited {
+ color: #2A3798;
+}
+.dirtab { padding: 4px;
+ border-collapse: collapse;
+ border: 1px solid #84b0c7;
+}
+TH.dirtab { background: #e8eef2;
+ font-weight: bold;
+}
+HR { height: 1px;
+ border: none;
+ border-top: 1px solid black;
+}
+
+/* Style for detailed member documentation */
+.memtemplate {
+ font-size: 80%;
+ color: #606060;
+ font-weight: normal;
+}
+.memnav {
+ background-color: #e8eef2;
+ border: 1px solid #84b0c7;
+ text-align: center;
+ margin: 2px;
+ margin-right: 15px;
+ padding: 2px;
+}
+.memitem {
+ padding: 4px;
+ background-color: #eef3f5;
+ border-width: 1px;
+ border-style: solid;
+ border-color: #dedeee;
+ -moz-border-radius: 8px 8px 8px 8px;
+}
+.memname {
+ white-space: nowrap;
+ font-weight: bold;
+}
+.memdoc{
+ padding-left: 10px;
+}
+.memproto {
+ background-color: #d5e1e8;
+ width: 100%;
+ border-width: 1px;
+ border-style: solid;
+ border-color: #84b0c7;
+ font-weight: bold;
+ -moz-border-radius: 8px 8px 8px 8px;
+}
+.paramkey {
+ text-align: right;
+}
+.paramtype {
+ white-space: nowrap;
+}
+.paramname {
+ color: #602020;
+ font-style: italic;
+ white-space: nowrap;
+}
+/* End Styling for detailed member documentation */
+
+/* for the tree view */
+.ftvtree {
+ font-family: sans-serif;
+ margin:0.5em;
+}
+.directory { font-size: 9pt; font-weight: bold; }
+.directory h3 { margin: 0px; margin-top: 1em; font-size: 11pt; }
+.directory > h3 { margin-top: 0; }
+.directory p { margin: 0px; white-space: nowrap; }
+.directory div { display: none; margin: 0px; }
+.directory img { vertical-align: -30%; }
diff --git a/lib/iniparser/html/doxygen.png b/lib/iniparser/html/doxygen.png
new file mode 100644
index 0000000000..f0a274bbaf
--- /dev/null
+++ b/lib/iniparser/html/doxygen.png
Binary files differ
diff --git a/lib/iniparser/html/globals_func.html b/lib/iniparser/html/globals_func.html
new file mode 100644
index 0000000000..dc5c7126f0
--- /dev/null
+++ b/lib/iniparser/html/globals_func.html
@@ -0,0 +1,54 @@
+<html>
+<head>
+ <meta name="author" content="ndevilla@free.fr">
+ <meta name="keywords" content="ini file, config file, parser, C library">
+ <link href="doxygen.css" rel="stylesheet" type="text/css">
+<title>iniparser 2.x</title>
+</head>
+
+<body text="#000000" bgcolor="#ffffff">
+
+
+
+<!-- Generated by Doxygen 1.5.1 -->
+<div class="tabs">
+ <ul>
+ <li><a href="globals.html"><span>All</span></a></li>
+ <li id="current"><a href="globals_func.html"><span>Functions</span></a></li>
+ </ul>
+</div>
+&nbsp;
+<p>
+<ul>
+<li>iniparser_dump()
+: <a class="el" href="iniparser_8h.html#046436b3489cd8854ba8e29109250324">iniparser.h</a>
+<li>iniparser_dump_ini()
+: <a class="el" href="iniparser_8h.html#ece0e32de371c9e9592d8333f816dfac">iniparser.h</a>
+<li>iniparser_find_entry()
+: <a class="el" href="iniparser_8h.html#3d67c98bbc0cb5239f024ad54bdc63f1">iniparser.h</a>
+<li>iniparser_freedict()
+: <a class="el" href="iniparser_8h.html#90549ee518523921886b74454ff872eb">iniparser.h</a>
+<li>iniparser_getboolean()
+: <a class="el" href="iniparser_8h.html#eb93c13fcbb75efaa396f53bfd73ff4d">iniparser.h</a>
+<li>iniparser_getdouble()
+: <a class="el" href="iniparser_8h.html#480d35322f1252344cf2246ac21ee559">iniparser.h</a>
+<li>iniparser_getint()
+: <a class="el" href="iniparser_8h.html#694eb1110f4200db8648820a0bb405fa">iniparser.h</a>
+<li>iniparser_getnsec()
+: <a class="el" href="iniparser_8h.html#0b5d6cdc7587e2d27a30f5cdc4a91931">iniparser.h</a>
+<li>iniparser_getsecname()
+: <a class="el" href="iniparser_8h.html#393212be805f395bbfdeb1bafa8bb72a">iniparser.h</a>
+<li>iniparser_getstr()
+: <a class="el" href="iniparser_8h.html#587eafb48937fdee8ae414ad7a666db8">iniparser.h</a>
+<li>iniparser_getstring()
+: <a class="el" href="iniparser_8h.html#7894f8480e1f254d4a1b4a31bdc51b46">iniparser.h</a>
+<li>iniparser_load()
+: <a class="el" href="iniparser_8h.html#b0be559bfb769224b3f1b75e26242a67">iniparser.h</a>
+<li>iniparser_setstr()
+: <a class="el" href="iniparser_8h.html#605a88057bac4c3249513fc588421c32">iniparser.h</a>
+<li>iniparser_unset()
+: <a class="el" href="iniparser_8h.html#7b1a7f2492a35043867fa801b8f21e52">iniparser.h</a>
+</ul>
+
+</body>
+</html>
diff --git a/lib/iniparser/html/index.html b/lib/iniparser/html/index.html
new file mode 100644
index 0000000000..a09575587d
--- /dev/null
+++ b/lib/iniparser/html/index.html
@@ -0,0 +1,156 @@
+<html>
+<head>
+ <meta name="author" content="ndevilla@free.fr">
+ <meta name="keywords" content="ini file, config file, parser, C library">
+ <link href="doxygen.css" rel="stylesheet" type="text/css">
+<title>iniparser 2.x</title>
+</head>
+
+<body text="#000000" bgcolor="#ffffff">
+
+
+
+<!-- Generated by Doxygen 1.5.1 -->
+<h1>iniparser documentation</h1>
+<p>
+<h3 align="center">2.x </h3><hr>
+<h2><a class="anchor" name="welcome">
+Introduction</a></h2>
+iniParser is a simple C library offering ini file parsing services. The library is pretty small (less than 1500 lines of C) and robust, and does not depend on any other external library to compile. It is written in ANSI C and should compile anywhere without difficulty.<p>
+<hr>
+<h2><a class="anchor" name="inidef">
+What is an ini file?</a></h2>
+An ini file is an ASCII file describing simple parameters (character strings, integers, floating-point values or booleans) in an explicit format, easy to use and modify for users.<p>
+An ini file is segmented into Sections, declared by the following syntax:<p>
+<div class="fragment"><pre class="fragment">
+ [Section Name]
+ </pre></div><p>
+i.e. the section name enclosed in square brackets, alone on a line. Sections names are allowed to contain any character but square brackets or linefeeds. Slashes (/) are also reserved for hierarchical sections (see below).<p>
+In any section are zero or more variables, declared with the following syntax:<p>
+<div class="fragment"><pre class="fragment">
+ Key = value ; comment
+ </pre></div><p>
+The key is any string (possibly containing blanks). The value is any character on the right side of the equal sign. Values can be given enclosed with quotes. If no quotes are present, the value is understood as containing all characters between the first and the last non-blank characters. The following declarations are identical:<p>
+<div class="fragment"><pre class="fragment">
+ Hello = "this is a long string value" ; comment
+ Hello = this is a long string value ; comment
+ </pre></div><p>
+The semicolon and comment at the end of the line are optional. If there is a comment, it starts from the first character after the semicolon up to the end of the line.<p>
+Comments in an ini file are:<p>
+<ul>
+<li>Lines starting with a hash sign</li><li>Blank lines (only blanks or tabs)</li><li>Comments given on value lines after the semicolon (if present)</li></ul>
+<p>
+<hr>
+<h2><a class="anchor" name="install">
+Compiling/installing the library</a></h2>
+Edit the Makefile to indicate the C compiler you want to use, the options to provide to compile ANSI C, and possibly the options to pass to the <code>ar</code> program on your machine to build a library (.a) from a set of object (.o) files.<p>
+Defaults are set for the gcc compiler and the standard ar library builder.<p>
+Type 'make', that should do it.<p>
+To use the library in your programs, add the following line on top of your module:<p>
+<div class="fragment"><pre class="fragment"><span class="preprocessor"> #include "<a class="code" href="iniparser_8h.html">iniparser.h</a>"</span>
+</pre></div><p>
+And link your program with the iniparser library by adding <code>-liniparser.a</code> to the compile line.<p>
+See the file test/initest.c for an example.<p>
+<hr>
+<h2><a class="anchor" name="reference">
+Library reference</a></h2>
+The library is completely documented in its header file. On-line documentation has been generated and can be consulted here:<p>
+<ul>
+<li><a class="el" href="iniparser_8h.html">iniparser.h</a></li></ul>
+<p>
+<hr>
+<h2><a class="anchor" name="usage">
+Using the parser</a></h2>
+Comments are discarded by the parser. Then sections are identified, and in each section a new entry is created for every keyword found. The keywords are stored with the following syntax:<p>
+<div class="fragment"><pre class="fragment">
+ [Section]
+ Keyword = value ; comment
+ </pre></div><p>
+is converted to the following key pair:<p>
+<div class="fragment"><pre class="fragment">
+ ("section:keyword", "value")
+ </pre></div><p>
+This means that if you want to retrieve the value that was stored in the section called <code>Pizza</code>, in the keyword <code>Cheese</code>, you would make a request to the dictionary for <code>"pizza:cheese"</code>. All section and keyword names are converted to lowercase before storage in the structure. The value side is conserved as it has been parsed, though.<p>
+Section names are also stored in the structure. They are stored using as key the section name, and a NULL associated value. They can be queried through <a class="el" href="iniparser_8h.html#3d67c98bbc0cb5239f024ad54bdc63f1">iniparser_find_entry()</a>.<p>
+To launch the parser, simply use the function called <a class="el" href="iniparser_8h.html#b0be559bfb769224b3f1b75e26242a67">iniparser_load()</a>, which takes an input file name and returns a newly allocated <em>dictionary</em> structure. This latter object should remain opaque to the user and only accessed through the following accessor functions:<p>
+<ul>
+<li><a class="el" href="iniparser_8h.html#587eafb48937fdee8ae414ad7a666db8">iniparser_getstr()</a></li><li><a class="el" href="iniparser_8h.html#694eb1110f4200db8648820a0bb405fa">iniparser_getint()</a></li><li><a class="el" href="iniparser_8h.html#480d35322f1252344cf2246ac21ee559">iniparser_getdouble()</a></li><li><a class="el" href="iniparser_8h.html#eb93c13fcbb75efaa396f53bfd73ff4d">iniparser_getboolean()</a></li></ul>
+<p>
+Finally, discard this structure using <a class="el" href="iniparser_8h.html#90549ee518523921886b74454ff872eb">iniparser_freedict()</a>.<p>
+All values parsed from the ini file are stored as strings. The getint, getdouble and getboolean accessors are just converting these strings to the requested type on the fly, but you could basically perform this conversion by yourself after having called the getstr accessor.<p>
+Notice that the <a class="el" href="iniparser_8h.html#eb93c13fcbb75efaa396f53bfd73ff4d">iniparser_getboolean()</a> function will return an integer (0 or 1), trying to make sense of what was found in the file. Strings starting with "y", "Y", "t", "T" or "1" are considered true values (return 1), strings starting with "n", "N", "f", "F", "0" are considered false (return 0). This allows flexible handling of boolean answers.<p>
+If you want to add extra information into the structure that was not present in the ini file, you can use <a class="el" href="iniparser_8h.html#605a88057bac4c3249513fc588421c32">iniparser_setstr()</a> to insert a string.<p>
+<hr>
+<h2><a class="anchor" name="implementation">
+A word about the implementation</a></h2>
+The dictionary structure is a pretty simple dictionary implementation which might find some uses in other applications. If you are curious, look into the source.<p>
+<hr>
+<h2><a class="anchor" name="hierarchical">
+Hierarchical ini files</a></h2>
+ini files are nice to present informations to the user in a readable format, but lack a very useful feature: the possibility of organizing data in a hierarchical (tree-like) fashion. The following convention can be used to make ini files obtain this second dimension:<p>
+A section depends on another section if it contains its name as a prefix, separated by slashes (/). For example: we have 2 main sections in the ini file. The first one is called <code>Pizza</code> and has two child subsections called <code>Cheese</code> and <code>Ham</code>. The second main section in the ini file is called <code>Wine</code> and has two child subsections called <code>Year</code> and <code>Grape</code>. As a tree, this could appear as:<p>
+<div class="fragment"><pre class="fragment">
+ |
+ +-- Pizza
+ | +-- Cheese
+ | +-- Ham
+ +-- Wine
+ +--- Year
+ +--- Grape
+ </pre></div><p>
+In an ini file, that would be converted to:<p>
+<div class="fragment"><pre class="fragment">
+ [Pizza]
+
+ [Pizza/Cheese]
+ Name = Gorgonzola ;
+ Origin = Italy ;
+
+ [Pizza/Ham]
+ Name = Parma ;
+ Origin = Italy ;
+
+ [Wine]
+
+ [Wine/Year]
+ Value = 1998 ;
+
+ [Wine/Grape]
+ Name = Cabernet Sauvignon ;
+ Origin = Chile ;
+ </pre></div><p>
+This proposal is actually more related to the way people write ini files, more than the parser presented here. But it is certainly a useful way of making tree-like data declarations without going through painful formats like XML.<p>
+Accessing the above tree would give something like (error checking removed for clarity sake):<p>
+<div class="fragment"><pre class="fragment"> dictionary * d ;
+
+ d = <a class="code" href="iniparser_8h.html#b0be559bfb769224b3f1b75e26242a67">iniparser_load</a>(<span class="stringliteral">"example.ini"</span>);
+
+ printf(<span class="stringliteral">"cheese name is %s\n"</span>, <a class="code" href="iniparser_8h.html#587eafb48937fdee8ae414ad7a666db8">iniparser_getstr</a>(d, <span class="stringliteral">"pizza/cheese:name"</span>));
+ printf(<span class="stringliteral">"grape name is %s\n"</span>, <a class="code" href="iniparser_8h.html#587eafb48937fdee8ae414ad7a666db8">iniparser_getstr</a>(d, <span class="stringliteral">"wine/grape:name"</span>));
+
+ <a class="code" href="iniparser_8h.html#90549ee518523921886b74454ff872eb">iniparser_freedict</a>(d);
+</pre></div><p>
+The whole ini file above is represented in the dictionary as the following list of pairs:<p>
+<div class="fragment"><pre class="fragment">
+ key value
+
+ "pizza" NULL
+ "pizza/cheese" NULL
+ "pizza/cheese:name" "Gorgonzola"
+ "pizza/cheese:origin" "Italy"
+ "pizza/ham" NULL
+ "pizza/ham:name" "Parma"
+ "pizza/ham:origin" "Italy"
+ "wine" NULL
+ "wine/year" NULL
+ "wine/year:value" "1998"
+ "wine/grape" NULL
+ "wine/grape:name" "Cabernet Sauvignon"
+ "wine/grape:origin" "Chile"
+ </pre></div><p>
+<hr>
+<h2><a class="anchor" name="authors">
+Authors</a></h2>
+Nicolas Devillard (ndevilla AT free DOT fr).
+</body>
+</html>
diff --git a/lib/iniparser/html/iniparser_8h.html b/lib/iniparser/html/iniparser_8h.html
new file mode 100644
index 0000000000..9a7d5ecbc3
--- /dev/null
+++ b/lib/iniparser/html/iniparser_8h.html
@@ -0,0 +1,629 @@
+<html>
+<head>
+ <meta name="author" content="ndevilla@free.fr">
+ <meta name="keywords" content="ini file, config file, parser, C library">
+ <link href="doxygen.css" rel="stylesheet" type="text/css">
+<title>iniparser 2.x</title>
+</head>
+
+<body text="#000000" bgcolor="#ffffff">
+
+
+
+<!-- Generated by Doxygen 1.5.1 -->
+<h1>iniparser.h File Reference</h1>Parser for ini files. <a href="#_details">More...</a>
+<p>
+<table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">int&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#0b5d6cdc7587e2d27a30f5cdc4a91931">iniparser_getnsec</a> (dictionary *d)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get number of sections in a dictionary. <a href="#0b5d6cdc7587e2d27a30f5cdc4a91931"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">char *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#393212be805f395bbfdeb1bafa8bb72a">iniparser_getsecname</a> (dictionary *d, int n)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get name for section n in a dictionary. <a href="#393212be805f395bbfdeb1bafa8bb72a"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#ece0e32de371c9e9592d8333f816dfac">iniparser_dump_ini</a> (dictionary *d, FILE *f)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Save a dictionary to a loadable ini file. <a href="#ece0e32de371c9e9592d8333f816dfac"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#046436b3489cd8854ba8e29109250324">iniparser_dump</a> (dictionary *d, FILE *f)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Dump a dictionary to an opened file pointer. <a href="#046436b3489cd8854ba8e29109250324"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">char *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#587eafb48937fdee8ae414ad7a666db8">iniparser_getstr</a> (dictionary *d, const char *key)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get the string associated to a key, return NULL if not found. <a href="#587eafb48937fdee8ae414ad7a666db8"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">char *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#7894f8480e1f254d4a1b4a31bdc51b46">iniparser_getstring</a> (dictionary *d, const char *key, char *def)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get the string associated to a key. <a href="#7894f8480e1f254d4a1b4a31bdc51b46"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">int&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#694eb1110f4200db8648820a0bb405fa">iniparser_getint</a> (dictionary *d, const char *key, int notfound)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get the string associated to a key, convert to an int. <a href="#694eb1110f4200db8648820a0bb405fa"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">double&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#480d35322f1252344cf2246ac21ee559">iniparser_getdouble</a> (dictionary *d, char *key, double notfound)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get the string associated to a key, convert to a double. <a href="#480d35322f1252344cf2246ac21ee559"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">int&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#eb93c13fcbb75efaa396f53bfd73ff4d">iniparser_getboolean</a> (dictionary *d, const char *key, int notfound)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get the string associated to a key, convert to a boolean. <a href="#eb93c13fcbb75efaa396f53bfd73ff4d"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">int&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#605a88057bac4c3249513fc588421c32">iniparser_setstr</a> (dictionary *ini, char *entry, char *val)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Set an entry in a dictionary. <a href="#605a88057bac4c3249513fc588421c32"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#7b1a7f2492a35043867fa801b8f21e52">iniparser_unset</a> (dictionary *ini, char *entry)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Delete an entry in a dictionary. <a href="#7b1a7f2492a35043867fa801b8f21e52"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">int&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#3d67c98bbc0cb5239f024ad54bdc63f1">iniparser_find_entry</a> (dictionary *ini, char *entry)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Finds out if a given entry exists in a dictionary. <a href="#3d67c98bbc0cb5239f024ad54bdc63f1"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">dictionary *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#b0be559bfb769224b3f1b75e26242a67">iniparser_load</a> (const char *ininame)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parse an ini file and return an allocated dictionary object. <a href="#b0be559bfb769224b3f1b75e26242a67"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="iniparser_8h.html#90549ee518523921886b74454ff872eb">iniparser_freedict</a> (dictionary *d)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Free all memory associated to an ini dictionary. <a href="#90549ee518523921886b74454ff872eb"></a><br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Parser for ini files.
+<p>
+<dl class="author" compact><dt><b>Author:</b></dt><dd>N. Devillard </dd></dl>
+<dl class="date" compact><dt><b>Date:</b></dt><dd>Mar 2000 </dd></dl>
+<dl class="version" compact><dt><b>Version:</b></dt><dd><dl class="rcs" compact><dt><b>Revision</b></dt><dd>1.23 </dd></dl>
+</dd></dl>
+<hr><h2>Function Documentation</h2>
+<a class="anchor" name="046436b3489cd8854ba8e29109250324"></a><!-- doxytag: member="iniparser.h::iniparser_dump" ref="046436b3489cd8854ba8e29109250324" args="(dictionary *d, FILE *f)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">void iniparser_dump </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>d</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">FILE *&nbsp;</td>
+ <td class="paramname"> <em>f</em></td><td>&nbsp;</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>)</td>
+ <td></td><td></td><td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Dump a dictionary to an opened file pointer.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>d</em>&nbsp;</td><td>Dictionary to dump. </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>f</em>&nbsp;</td><td>Opened file pointer to dump to. </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>void</dd></dl>
+This function prints out the contents of a dictionary, one element by line, onto the provided file pointer. It is OK to specify <code>stderr</code> or <code>stdout</code> as output files. This function is meant for debugging purposes mostly.
+</div>
+</div><p>
+<a class="anchor" name="ece0e32de371c9e9592d8333f816dfac"></a><!-- doxytag: member="iniparser.h::iniparser_dump_ini" ref="ece0e32de371c9e9592d8333f816dfac" args="(dictionary *d, FILE *f)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">void iniparser_dump_ini </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>d</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">FILE *&nbsp;</td>
+ <td class="paramname"> <em>f</em></td><td>&nbsp;</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>)</td>
+ <td></td><td></td><td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Save a dictionary to a loadable ini file.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>d</em>&nbsp;</td><td>Dictionary to dump </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>f</em>&nbsp;</td><td>Opened file pointer to dump to </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>void</dd></dl>
+This function dumps a given dictionary into a loadable ini file. It is Ok to specify <code>stderr</code> or <code>stdout</code> as output files.
+</div>
+</div><p>
+<a class="anchor" name="3d67c98bbc0cb5239f024ad54bdc63f1"></a><!-- doxytag: member="iniparser.h::iniparser_find_entry" ref="3d67c98bbc0cb5239f024ad54bdc63f1" args="(dictionary *ini, char *entry)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">int iniparser_find_entry </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>ini</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">char *&nbsp;</td>
+ <td class="paramname"> <em>entry</em></td><td>&nbsp;</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>)</td>
+ <td></td><td></td><td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Finds out if a given entry exists in a dictionary.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>ini</em>&nbsp;</td><td>Dictionary to search </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>entry</em>&nbsp;</td><td>Name of the entry to look for </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>integer 1 if entry exists, 0 otherwise</dd></dl>
+Finds out if a given entry exists in the dictionary. Since sections are stored as keys with NULL associated values, this is the only way of querying for the presence of sections in a dictionary.
+</div>
+</div><p>
+<a class="anchor" name="90549ee518523921886b74454ff872eb"></a><!-- doxytag: member="iniparser.h::iniparser_freedict" ref="90549ee518523921886b74454ff872eb" args="(dictionary *d)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">void iniparser_freedict </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>d</em> </td>
+ <td>&nbsp;)&nbsp;</td>
+ <td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Free all memory associated to an ini dictionary.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>d</em>&nbsp;</td><td>Dictionary to free </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>void</dd></dl>
+Free all memory associated to an ini dictionary. It is mandatory to call this function before the dictionary object gets out of the current context.
+</div>
+</div><p>
+<a class="anchor" name="eb93c13fcbb75efaa396f53bfd73ff4d"></a><!-- doxytag: member="iniparser.h::iniparser_getboolean" ref="eb93c13fcbb75efaa396f53bfd73ff4d" args="(dictionary *d, const char *key, int notfound)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">int iniparser_getboolean </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>d</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">const char *&nbsp;</td>
+ <td class="paramname"> <em>key</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">int&nbsp;</td>
+ <td class="paramname"> <em>notfound</em></td><td>&nbsp;</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>)</td>
+ <td></td><td></td><td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Get the string associated to a key, convert to a boolean.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>d</em>&nbsp;</td><td>Dictionary to search </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>key</em>&nbsp;</td><td>Key string to look for </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>notfound</em>&nbsp;</td><td>Value to return in case of error </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>integer</dd></dl>
+This function queries a dictionary for a key. A key as read from an ini file is given as "section:key". If the key cannot be found, the notfound value is returned.<p>
+A true boolean is found if one of the following is matched:<p>
+<ul>
+<li>A string starting with 'y'</li><li>A string starting with 'Y'</li><li>A string starting with 't'</li><li>A string starting with 'T'</li><li>A string starting with '1'</li></ul>
+<p>
+A false boolean is found if one of the following is matched:<p>
+<ul>
+<li>A string starting with 'n'</li><li>A string starting with 'N'</li><li>A string starting with 'f'</li><li>A string starting with 'F'</li><li>A string starting with '0'</li></ul>
+<p>
+The notfound value returned if no boolean is identified, does not necessarily have to be 0 or 1.
+</div>
+</div><p>
+<a class="anchor" name="480d35322f1252344cf2246ac21ee559"></a><!-- doxytag: member="iniparser.h::iniparser_getdouble" ref="480d35322f1252344cf2246ac21ee559" args="(dictionary *d, char *key, double notfound)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">double iniparser_getdouble </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>d</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">char *&nbsp;</td>
+ <td class="paramname"> <em>key</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">double&nbsp;</td>
+ <td class="paramname"> <em>notfound</em></td><td>&nbsp;</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>)</td>
+ <td></td><td></td><td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Get the string associated to a key, convert to a double.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>d</em>&nbsp;</td><td>Dictionary to search </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>key</em>&nbsp;</td><td>Key string to look for </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>notfound</em>&nbsp;</td><td>Value to return in case of error </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>double</dd></dl>
+This function queries a dictionary for a key. A key as read from an ini file is given as "section:key". If the key cannot be found, the notfound value is returned.
+</div>
+</div><p>
+<a class="anchor" name="694eb1110f4200db8648820a0bb405fa"></a><!-- doxytag: member="iniparser.h::iniparser_getint" ref="694eb1110f4200db8648820a0bb405fa" args="(dictionary *d, const char *key, int notfound)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">int iniparser_getint </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>d</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">const char *&nbsp;</td>
+ <td class="paramname"> <em>key</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">int&nbsp;</td>
+ <td class="paramname"> <em>notfound</em></td><td>&nbsp;</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>)</td>
+ <td></td><td></td><td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Get the string associated to a key, convert to an int.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>d</em>&nbsp;</td><td>Dictionary to search </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>key</em>&nbsp;</td><td>Key string to look for </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>notfound</em>&nbsp;</td><td>Value to return in case of error </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>integer</dd></dl>
+This function queries a dictionary for a key. A key as read from an ini file is given as "section:key". If the key cannot be found, the notfound value is returned.<p>
+Supported values for integers include the usual C notation so decimal, octal (starting with 0) and hexadecimal (starting with 0x) are supported. Examples:<p>
+<ul>
+<li>"42" -&gt; 42</li><li>"042" -&gt; 34 (octal -&gt; decimal)</li><li>"0x42" -&gt; 66 (hexa -&gt; decimal)</li></ul>
+<p>
+Warning: the conversion may overflow in various ways. Conversion is totally outsourced to strtol(), see the associated man page for overflow handling.<p>
+Credits: Thanks to A. Becker for suggesting strtol()
+</div>
+</div><p>
+<a class="anchor" name="0b5d6cdc7587e2d27a30f5cdc4a91931"></a><!-- doxytag: member="iniparser.h::iniparser_getnsec" ref="0b5d6cdc7587e2d27a30f5cdc4a91931" args="(dictionary *d)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">int iniparser_getnsec </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>d</em> </td>
+ <td>&nbsp;)&nbsp;</td>
+ <td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Get number of sections in a dictionary.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>d</em>&nbsp;</td><td>Dictionary to examine </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>int Number of sections found in dictionary</dd></dl>
+This function returns the number of sections found in a dictionary. The test to recognize sections is done on the string stored in the dictionary: a section name is given as "section" whereas a key is stored as "section:key", thus the test looks for entries that do not contain a colon.<p>
+This clearly fails in the case a section name contains a colon, but this should simply be avoided.<p>
+This function returns -1 in case of error.
+</div>
+</div><p>
+<a class="anchor" name="393212be805f395bbfdeb1bafa8bb72a"></a><!-- doxytag: member="iniparser.h::iniparser_getsecname" ref="393212be805f395bbfdeb1bafa8bb72a" args="(dictionary *d, int n)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">char* iniparser_getsecname </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>d</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">int&nbsp;</td>
+ <td class="paramname"> <em>n</em></td><td>&nbsp;</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>)</td>
+ <td></td><td></td><td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Get name for section n in a dictionary.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>d</em>&nbsp;</td><td>Dictionary to examine </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>n</em>&nbsp;</td><td>Section number (from 0 to nsec-1). </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>Pointer to char string</dd></dl>
+This function locates the n-th section in a dictionary and returns its name as a pointer to a string statically allocated inside the dictionary. Do not free or modify the returned string!<p>
+This function returns NULL in case of error.
+</div>
+</div><p>
+<a class="anchor" name="587eafb48937fdee8ae414ad7a666db8"></a><!-- doxytag: member="iniparser.h::iniparser_getstr" ref="587eafb48937fdee8ae414ad7a666db8" args="(dictionary *d, const char *key)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">char* iniparser_getstr </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>d</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">const char *&nbsp;</td>
+ <td class="paramname"> <em>key</em></td><td>&nbsp;</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>)</td>
+ <td></td><td></td><td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Get the string associated to a key, return NULL if not found.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>d</em>&nbsp;</td><td>Dictionary to search </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>key</em>&nbsp;</td><td>Key string to look for </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>pointer to statically allocated character string, or NULL.</dd></dl>
+This function queries a dictionary for a key. A key as read from an ini file is given as "section:key". If the key cannot be found, NULL is returned. The returned char pointer is pointing to a string allocated in the dictionary, do not free or modify it.<p>
+This function is only provided for backwards compatibility with previous versions of iniparser. It is recommended to use <a class="el" href="iniparser_8h.html#7894f8480e1f254d4a1b4a31bdc51b46">iniparser_getstring()</a> instead.
+</div>
+</div><p>
+<a class="anchor" name="7894f8480e1f254d4a1b4a31bdc51b46"></a><!-- doxytag: member="iniparser.h::iniparser_getstring" ref="7894f8480e1f254d4a1b4a31bdc51b46" args="(dictionary *d, const char *key, char *def)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">char* iniparser_getstring </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>d</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">const char *&nbsp;</td>
+ <td class="paramname"> <em>key</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">char *&nbsp;</td>
+ <td class="paramname"> <em>def</em></td><td>&nbsp;</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>)</td>
+ <td></td><td></td><td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Get the string associated to a key.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>d</em>&nbsp;</td><td>Dictionary to search </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>key</em>&nbsp;</td><td>Key string to look for </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>def</em>&nbsp;</td><td>Default value to return if key not found. </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>pointer to statically allocated character string</dd></dl>
+This function queries a dictionary for a key. A key as read from an ini file is given as "section:key". If the key cannot be found, the pointer passed as 'def' is returned. The returned char pointer is pointing to a string allocated in the dictionary, do not free or modify it.
+</div>
+</div><p>
+<a class="anchor" name="b0be559bfb769224b3f1b75e26242a67"></a><!-- doxytag: member="iniparser.h::iniparser_load" ref="b0be559bfb769224b3f1b75e26242a67" args="(const char *ininame)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">dictionary* iniparser_load </td>
+ <td>(</td>
+ <td class="paramtype">const char *&nbsp;</td>
+ <td class="paramname"> <em>ininame</em> </td>
+ <td>&nbsp;)&nbsp;</td>
+ <td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Parse an ini file and return an allocated dictionary object.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>ininame</em>&nbsp;</td><td>Name of the ini file to read. </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>Pointer to newly allocated dictionary</dd></dl>
+This is the parser for ini files. This function is called, providing the name of the file to be read. It returns a dictionary object that should not be accessed directly, but through accessor functions instead.<p>
+The returned dictionary must be freed using <a class="el" href="iniparser_8h.html#90549ee518523921886b74454ff872eb">iniparser_freedict()</a>.
+</div>
+</div><p>
+<a class="anchor" name="605a88057bac4c3249513fc588421c32"></a><!-- doxytag: member="iniparser.h::iniparser_setstr" ref="605a88057bac4c3249513fc588421c32" args="(dictionary *ini, char *entry, char *val)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">int iniparser_setstr </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>ini</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">char *&nbsp;</td>
+ <td class="paramname"> <em>entry</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">char *&nbsp;</td>
+ <td class="paramname"> <em>val</em></td><td>&nbsp;</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>)</td>
+ <td></td><td></td><td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Set an entry in a dictionary.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>ini</em>&nbsp;</td><td>Dictionary to modify. </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>entry</em>&nbsp;</td><td>Entry to modify (entry name) </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>val</em>&nbsp;</td><td>New value to associate to the entry. </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>int 0 if Ok, -1 otherwise.</dd></dl>
+If the given entry can be found in the dictionary, it is modified to contain the provided value. If it cannot be found, -1 is returned. It is Ok to set val to NULL.
+</div>
+</div><p>
+<a class="anchor" name="7b1a7f2492a35043867fa801b8f21e52"></a><!-- doxytag: member="iniparser.h::iniparser_unset" ref="7b1a7f2492a35043867fa801b8f21e52" args="(dictionary *ini, char *entry)" -->
+<div class="memitem">
+<div class="memproto">
+ <table class="memname">
+ <tr>
+ <td class="memname">void iniparser_unset </td>
+ <td>(</td>
+ <td class="paramtype">dictionary *&nbsp;</td>
+ <td class="paramname"> <em>ini</em>, </td>
+ </tr>
+ <tr>
+ <td class="paramkey"></td>
+ <td></td>
+ <td class="paramtype">char *&nbsp;</td>
+ <td class="paramname"> <em>entry</em></td><td>&nbsp;</td>
+ </tr>
+ <tr>
+ <td></td>
+ <td>)</td>
+ <td></td><td></td><td width="100%"></td>
+ </tr>
+ </table>
+</div>
+<div class="memdoc">
+
+<p>
+Delete an entry in a dictionary.
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+ <table border="0" cellspacing="2" cellpadding="0">
+ <tr><td valign="top"></td><td valign="top"><em>ini</em>&nbsp;</td><td>Dictionary to modify </td></tr>
+ <tr><td valign="top"></td><td valign="top"><em>entry</em>&nbsp;</td><td>Entry to delete (entry name) </td></tr>
+ </table>
+</dl>
+<dl class="return" compact><dt><b>Returns:</b></dt><dd>void</dd></dl>
+If the given entry can be found, it is deleted from the dictionary.
+</div>
+</div><p>
+
+</body>
+</html>
diff --git a/lib/iniparser/html/iniparser_8main.html b/lib/iniparser/html/iniparser_8main.html
new file mode 100644
index 0000000000..a00eed2f9e
--- /dev/null
+++ b/lib/iniparser/html/iniparser_8main.html
@@ -0,0 +1,19 @@
+<html>
+<head>
+ <meta name="author" content="ndevilla@free.fr">
+ <meta name="keywords" content="ini file, config file, parser, C library">
+ <link href="doxygen.css" rel="stylesheet" type="text/css">
+<title>iniparser 2.x</title>
+</head>
+
+<body text="#000000" bgcolor="#ffffff">
+
+
+
+<!-- Generated by Doxygen 1.5.1 -->
+<h1>iniparser.main File Reference</h1><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+</table>
+
+</body>
+</html>
diff --git a/lib/iniparser/html/tab_b.gif b/lib/iniparser/html/tab_b.gif
new file mode 100644
index 0000000000..0d623483ff
--- /dev/null
+++ b/lib/iniparser/html/tab_b.gif
Binary files differ
diff --git a/lib/iniparser/html/tab_l.gif b/lib/iniparser/html/tab_l.gif
new file mode 100644
index 0000000000..9b1e6337c9
--- /dev/null
+++ b/lib/iniparser/html/tab_l.gif
Binary files differ
diff --git a/lib/iniparser/html/tab_r.gif b/lib/iniparser/html/tab_r.gif
new file mode 100644
index 0000000000..ce9dd9f533
--- /dev/null
+++ b/lib/iniparser/html/tab_r.gif
Binary files differ
diff --git a/lib/iniparser/html/tabs.css b/lib/iniparser/html/tabs.css
new file mode 100644
index 0000000000..a61552a67a
--- /dev/null
+++ b/lib/iniparser/html/tabs.css
@@ -0,0 +1,102 @@
+/* tabs styles, based on http://www.alistapart.com/articles/slidingdoors */
+
+DIV.tabs
+{
+ float : left;
+ width : 100%;
+ background : url("tab_b.gif") repeat-x bottom;
+ margin-bottom : 4px;
+}
+
+DIV.tabs UL
+{
+ margin : 0px;
+ padding-left : 10px;
+ list-style : none;
+}
+
+DIV.tabs LI, DIV.tabs FORM
+{
+ display : inline;
+ margin : 0px;
+ padding : 0px;
+}
+
+DIV.tabs FORM
+{
+ float : right;
+}
+
+DIV.tabs A
+{
+ float : left;
+ background : url("tab_r.gif") no-repeat right top;
+ border-bottom : 1px solid #84B0C7;
+ font-size : x-small;
+ font-weight : bold;
+ text-decoration : none;
+}
+
+DIV.tabs A:hover
+{
+ background-position: 100% -150px;
+}
+
+DIV.tabs A:link, DIV.tabs A:visited,
+DIV.tabs A:active, DIV.tabs A:hover
+{
+ color: #1A419D;
+}
+
+DIV.tabs SPAN
+{
+ float : left;
+ display : block;
+ background : url("tab_l.gif") no-repeat left top;
+ padding : 5px 9px;
+ white-space : nowrap;
+}
+
+DIV.tabs INPUT
+{
+ float : right;
+ display : inline;
+ font-size : 1em;
+}
+
+DIV.tabs TD
+{
+ font-size : x-small;
+ font-weight : bold;
+ text-decoration : none;
+}
+
+
+
+/* Commented Backslash Hack hides rule from IE5-Mac \*/
+DIV.tabs SPAN {float : none;}
+/* End IE5-Mac hack */
+
+DIV.tabs A:hover SPAN
+{
+ background-position: 0% -150px;
+}
+
+DIV.tabs LI#current A
+{
+ background-position: 100% -150px;
+ border-width : 0px;
+}
+
+DIV.tabs LI#current SPAN
+{
+ background-position: 0% -150px;
+ padding-bottom : 6px;
+}
+
+DIV.nav
+{
+ background : none;
+ border : none;
+ border-bottom : 1px solid #84B0C7;
+}
diff --git a/lib/iniparser/src/dictionary.c b/lib/iniparser/src/dictionary.c
new file mode 100644
index 0000000000..b9d426dc7e
--- /dev/null
+++ b/lib/iniparser/src/dictionary.c
@@ -0,0 +1,514 @@
+
+/*-------------------------------------------------------------------------*/
+/**
+ @file dictionary.c
+ @author N. Devillard
+ @date Aug 2000
+ @version $Revision: 1.25 $
+ @brief Implements a dictionary for string variables.
+
+ This module implements a simple dictionary object, i.e. a list
+ of string/string associations. This object is useful to store e.g.
+ informations retrieved from a configuration file (ini files).
+*/
+/*--------------------------------------------------------------------------*/
+
+/*
+ $Id: dictionary.c,v 1.25 2007-05-27 13:03:43 ndevilla Exp $
+ $Author: ndevilla $
+ $Date: 2007-05-27 13:03:43 $
+ $Revision: 1.25 $
+*/
+
+/*---------------------------------------------------------------------------
+ Includes
+ ---------------------------------------------------------------------------*/
+
+#include "dictionary.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+
+/** Maximum value size for integers and doubles. */
+#define MAXVALSZ 1024
+
+/** Minimal allocated number of entries in a dictionary */
+#define DICTMINSZ 128
+
+/** Invalid key token */
+#define DICT_INVALID_KEY ((char*)-1)
+
+
+/*---------------------------------------------------------------------------
+ Private functions
+ ---------------------------------------------------------------------------*/
+
+/* Doubles the allocated size associated to a pointer */
+/* 'size' is the current allocated size. */
+static void * mem_double(void * ptr, int size)
+{
+ void * newptr ;
+
+ newptr = calloc(2*size, 1);
+ memcpy(newptr, ptr, size);
+ free(ptr);
+ return newptr ;
+}
+
+
+/*---------------------------------------------------------------------------
+ Function codes
+ ---------------------------------------------------------------------------*/
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Compute the hash key for a string.
+ @param key Character string to use for key.
+ @return 1 unsigned int on at least 32 bits.
+
+ This hash function has been taken from an Article in Dr Dobbs Journal.
+ This is normally a collision-free function, distributing keys evenly.
+ The key is stored anyway in the struct so that collision can be avoided
+ by comparing the key itself in last resort.
+ */
+/*--------------------------------------------------------------------------*/
+
+unsigned dictionary_hash(char * key)
+{
+ int len ;
+ unsigned hash ;
+ int i ;
+
+ len = strlen(key);
+ for (hash=0, i=0 ; i<len ; i++) {
+ hash += (unsigned)key[i] ;
+ hash += (hash<<10);
+ hash ^= (hash>>6) ;
+ }
+ hash += (hash <<3);
+ hash ^= (hash >>11);
+ hash += (hash <<15);
+ return hash ;
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Create a new dictionary object.
+ @param size Optional initial size of the dictionary.
+ @return 1 newly allocated dictionary objet.
+
+ This function allocates a new dictionary object of given size and returns
+ it. If you do not know in advance (roughly) the number of entries in the
+ dictionary, give size=0.
+ */
+/*--------------------------------------------------------------------------*/
+
+dictionary * dictionary_new(int size)
+{
+ dictionary * d ;
+
+ /* If no size was specified, allocate space for DICTMINSZ */
+ if (size<DICTMINSZ) size=DICTMINSZ ;
+
+ if (!(d = (dictionary *)calloc(1, sizeof(dictionary)))) {
+ return NULL;
+ }
+ d->size = size ;
+ d->val = (char **)calloc(size, sizeof(char*));
+ d->key = (char **)calloc(size, sizeof(char*));
+ d->hash = (unsigned int *)calloc(size, sizeof(unsigned));
+ return d ;
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Delete a dictionary object
+ @param d dictionary object to deallocate.
+ @return void
+
+ Deallocate a dictionary object and all memory associated to it.
+ */
+/*--------------------------------------------------------------------------*/
+
+void dictionary_del(dictionary * d)
+{
+ int i ;
+
+ if (d==NULL) return ;
+ for (i=0 ; i<d->size ; i++) {
+ if (d->key[i]!=NULL)
+ free(d->key[i]);
+ if (d->val[i]!=NULL)
+ free(d->val[i]);
+ }
+ free(d->val);
+ free(d->key);
+ free(d->hash);
+ free(d);
+ return ;
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get a value from a dictionary.
+ @param d dictionary object to search.
+ @param key Key to look for in the dictionary.
+ @param def Default value to return if key not found.
+ @return 1 pointer to internally allocated character string.
+
+ This function locates a key in a dictionary and returns a pointer to its
+ value, or the passed 'def' pointer if no such key can be found in
+ dictionary. The returned character pointer points to data internal to the
+ dictionary object, you should not try to free it or modify it.
+ */
+/*--------------------------------------------------------------------------*/
+char * dictionary_get(dictionary * d, char * key, char * def)
+{
+ unsigned hash ;
+ int i ;
+
+ hash = dictionary_hash(key);
+ for (i=0 ; i<d->size ; i++) {
+ if (d->key==NULL)
+ continue ;
+ /* Compare hash */
+ if (hash==d->hash[i]) {
+ /* Compare string, to avoid hash collisions */
+ if (!strcmp(key, d->key[i])) {
+ return d->val[i] ;
+ }
+ }
+ }
+ return def ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get a value from a dictionary, as a char.
+ @param d dictionary object to search.
+ @param key Key to look for in the dictionary.
+ @param def Default value for the key if not found.
+ @return char
+
+ This function locates a key in a dictionary using dictionary_get,
+ and returns the first char of the found string.
+ */
+/*--------------------------------------------------------------------------*/
+char dictionary_getchar(dictionary * d, char * key, char def)
+{
+ char * v ;
+
+ if ((v=dictionary_get(d,key,DICT_INVALID_KEY))==DICT_INVALID_KEY) {
+ return def ;
+ } else {
+ return v[0] ;
+ }
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get a value from a dictionary, as an int.
+ @param d dictionary object to search.
+ @param key Key to look for in the dictionary.
+ @param def Default value for the key if not found.
+ @return int
+
+ This function locates a key in a dictionary using dictionary_get,
+ and applies atoi on it to return an int. If the value cannot be found
+ in the dictionary, the default is returned.
+ */
+/*--------------------------------------------------------------------------*/
+int dictionary_getint(dictionary * d, char * key, int def)
+{
+ char * v ;
+
+ if ((v=dictionary_get(d,key,DICT_INVALID_KEY))==DICT_INVALID_KEY) {
+ return def ;
+ } else {
+ return atoi(v);
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get a value from a dictionary, as a double.
+ @param d dictionary object to search.
+ @param key Key to look for in the dictionary.
+ @param def Default value for the key if not found.
+ @return double
+
+ This function locates a key in a dictionary using dictionary_get,
+ and applies atof on it to return a double. If the value cannot be found
+ in the dictionary, the default is returned.
+ */
+/*--------------------------------------------------------------------------*/
+double dictionary_getdouble(dictionary * d, char * key, double def)
+{
+ char * v ;
+
+ if ((v=dictionary_get(d,key,DICT_INVALID_KEY))==DICT_INVALID_KEY) {
+ return def ;
+ } else {
+ return atof(v);
+ }
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Set a value in a dictionary.
+ @param d dictionary object to modify.
+ @param key Key to modify or add.
+ @param val Value to add.
+ @return void
+
+ If the given key is found in the dictionary, the associated value is
+ replaced by the provided one. If the key cannot be found in the
+ dictionary, it is added to it.
+
+ It is Ok to provide a NULL value for val, but NULL values for the dictionary
+ or the key are considered as errors: the function will return immediately
+ in such a case.
+
+ Notice that if you dictionary_set a variable to NULL, a call to
+ dictionary_get will return a NULL value: the variable will be found, and
+ its value (NULL) is returned. In other words, setting the variable
+ content to NULL is equivalent to deleting the variable from the
+ dictionary. It is not possible (in this implementation) to have a key in
+ the dictionary without value.
+ */
+/*--------------------------------------------------------------------------*/
+
+void dictionary_set(dictionary * d, char * key, char * val)
+{
+ int i ;
+ unsigned hash ;
+
+ if (d==NULL || key==NULL) return ;
+
+ /* Compute hash for this key */
+ hash = dictionary_hash(key) ;
+ /* Find if value is already in blackboard */
+ if (d->n>0) {
+ for (i=0 ; i<d->size ; i++) {
+ if (d->key[i]==NULL)
+ continue ;
+ if (hash==d->hash[i]) { /* Same hash value */
+ if (!strcmp(key, d->key[i])) { /* Same key */
+ /* Found a value: modify and return */
+ if (d->val[i]!=NULL)
+ free(d->val[i]);
+ d->val[i] = val ? strdup(val) : NULL ;
+ /* Value has been modified: return */
+ return ;
+ }
+ }
+ }
+ }
+ /* Add a new value */
+ /* See if dictionary needs to grow */
+ if (d->n==d->size) {
+
+ /* Reached maximum size: reallocate blackboard */
+ d->val = (char **)mem_double(d->val, d->size * sizeof(char*)) ;
+ d->key = (char **)mem_double(d->key, d->size * sizeof(char*)) ;
+ d->hash = (unsigned int *)mem_double(d->hash, d->size * sizeof(unsigned)) ;
+
+ /* Double size */
+ d->size *= 2 ;
+ }
+
+ /* Insert key in the first empty slot */
+ for (i=0 ; i<d->size ; i++) {
+ if (d->key[i]==NULL) {
+ /* Add key here */
+ break ;
+ }
+ }
+ /* Copy key */
+ d->key[i] = strdup(key);
+ d->val[i] = val ? strdup(val) : NULL ;
+ d->hash[i] = hash;
+ d->n ++ ;
+ return ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Delete a key in a dictionary
+ @param d dictionary object to modify.
+ @param key Key to remove.
+ @return void
+
+ This function deletes a key in a dictionary. Nothing is done if the
+ key cannot be found.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_unset(dictionary * d, char * key)
+{
+ unsigned hash ;
+ int i ;
+
+ if (key == NULL) {
+ return;
+ }
+
+ hash = dictionary_hash(key);
+ for (i=0 ; i<d->size ; i++) {
+ if (d->key[i]==NULL)
+ continue ;
+ /* Compare hash */
+ if (hash==d->hash[i]) {
+ /* Compare string, to avoid hash collisions */
+ if (!strcmp(key, d->key[i])) {
+ /* Found key */
+ break ;
+ }
+ }
+ }
+ if (i>=d->size)
+ /* Key not found */
+ return ;
+
+ free(d->key[i]);
+ d->key[i] = NULL ;
+ if (d->val[i]!=NULL) {
+ free(d->val[i]);
+ d->val[i] = NULL ;
+ }
+ d->hash[i] = 0 ;
+ d->n -- ;
+ return ;
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Set a key in a dictionary, providing an int.
+ @param d Dictionary to update.
+ @param key Key to modify or add
+ @param val Integer value to store (will be stored as a string).
+ @return void
+
+ This helper function calls dictionary_set() with the provided integer
+ converted to a string using %d.
+ */
+/*--------------------------------------------------------------------------*/
+
+
+void dictionary_setint(dictionary * d, char * key, int val)
+{
+ char sval[MAXVALSZ];
+ sprintf(sval, "%d", val);
+ dictionary_set(d, key, sval);
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Set a key in a dictionary, providing a double.
+ @param d Dictionary to update.
+ @param key Key to modify or add
+ @param val Double value to store (will be stored as a string).
+ @return void
+
+ This helper function calls dictionary_set() with the provided double
+ converted to a string using %g.
+ */
+/*--------------------------------------------------------------------------*/
+
+
+void dictionary_setdouble(dictionary * d, char * key, double val)
+{
+ char sval[MAXVALSZ];
+ sprintf(sval, "%g", val);
+ dictionary_set(d, key, sval);
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Dump a dictionary to an opened file pointer.
+ @param d Dictionary to dump
+ @param f Opened file pointer.
+ @return void
+
+ Dumps a dictionary onto an opened file pointer. Key pairs are printed out
+ as @c [Key]=[Value], one per line. It is Ok to provide stdout or stderr as
+ output file pointers.
+ */
+/*--------------------------------------------------------------------------*/
+
+void dictionary_dump(dictionary * d, FILE * out)
+{
+ int i ;
+
+ if (d==NULL || out==NULL) return ;
+ if (d->n<1) {
+ fprintf(out, "empty dictionary\n");
+ return ;
+ }
+ for (i=0 ; i<d->size ; i++) {
+ if (d->key[i]) {
+ fprintf(out, "%20s\t[%s]\n",
+ d->key[i],
+ d->val[i] ? d->val[i] : "UNDEF");
+ }
+ }
+ return ;
+}
+
+
+
+/* Example code */
+#ifdef TESTDIC
+#define NVALS 20000
+int main(int argc, char *argv[])
+{
+ dictionary * d ;
+ char * val ;
+ int i ;
+ char cval[90] ;
+
+ /* allocate blackboard */
+ printf("allocating...\n");
+ d = dictionary_new(0);
+
+ /* Set values in blackboard */
+ printf("setting %d values...\n", NVALS);
+ for (i=0 ; i<NVALS ; i++) {
+ sprintf(cval, "%04d", i);
+ dictionary_set(d, cval, "salut");
+ }
+ printf("getting %d values...\n", NVALS);
+ for (i=0 ; i<NVALS ; i++) {
+ sprintf(cval, "%04d", i);
+ val = dictionary_get(d, cval, DICT_INVALID_KEY);
+ if (val==DICT_INVALID_KEY) {
+ printf("cannot get value for key [%s]\n", cval);
+ }
+ }
+ printf("unsetting %d values...\n", NVALS);
+ for (i=0 ; i<NVALS ; i++) {
+ sprintf(cval, "%04d", i);
+ dictionary_unset(d, cval);
+ }
+ if (d->n != 0) {
+ printf("error deleting values\n");
+ }
+
+ printf("deallocating...\n");
+ dictionary_del(d);
+ return 0 ;
+}
+#endif
+/* vim: set ts=4 et sw=4 tw=75 */
diff --git a/lib/iniparser/src/dictionary.h b/lib/iniparser/src/dictionary.h
new file mode 100644
index 0000000000..b332680b04
--- /dev/null
+++ b/lib/iniparser/src/dictionary.h
@@ -0,0 +1,244 @@
+
+/*-------------------------------------------------------------------------*/
+/**
+ @file dictionary.h
+ @author N. Devillard
+ @date Aug 2000
+ @version $Revision: 1.11 $
+ @brief Implements a dictionary for string variables.
+
+ This module implements a simple dictionary object, i.e. a list
+ of string/string associations. This object is useful to store e.g.
+ informations retrieved from a configuration file (ini files).
+*/
+/*--------------------------------------------------------------------------*/
+
+/*
+ $Id: dictionary.h,v 1.11 2002-06-17 09:30:46 ndevilla Exp $
+ $Author: ndevilla $
+ $Date: 2002-06-17 09:30:46 $
+ $Revision: 1.11 $
+*/
+
+#ifndef _DICTIONARY_H_
+#define _DICTIONARY_H_
+
+/*---------------------------------------------------------------------------
+ Includes
+ ---------------------------------------------------------------------------*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/*---------------------------------------------------------------------------
+ New types
+ ---------------------------------------------------------------------------*/
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Dictionary object
+
+ This object contains a list of string/string associations. Each
+ association is identified by a unique string key. Looking up values
+ in the dictionary is speeded up by the use of a (hopefully collision-free)
+ hash function.
+ */
+/*-------------------------------------------------------------------------*/
+typedef struct _dictionary_ {
+ int n ; /** Number of entries in dictionary */
+ int size ; /** Storage size */
+ char ** val ; /** List of string values */
+ char ** key ; /** List of string keys */
+ unsigned * hash ; /** List of hash values for keys */
+} dictionary ;
+
+
+/*---------------------------------------------------------------------------
+ Function prototypes
+ ---------------------------------------------------------------------------*/
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Compute the hash key for a string.
+ @param key Character string to use for key.
+ @return 1 unsigned int on at least 32 bits.
+
+ This hash function has been taken from an Article in Dr Dobbs Journal.
+ This is normally a collision-free function, distributing keys evenly.
+ The key is stored anyway in the struct so that collision can be avoided
+ by comparing the key itself in last resort.
+ */
+/*--------------------------------------------------------------------------*/
+unsigned dictionary_hash(char * key);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Create a new dictionary object.
+ @param size Optional initial size of the dictionary.
+ @return 1 newly allocated dictionary objet.
+
+ This function allocates a new dictionary object of given size and returns
+ it. If you do not know in advance (roughly) the number of entries in the
+ dictionary, give size=0.
+ */
+/*--------------------------------------------------------------------------*/
+dictionary * dictionary_new(int size);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Delete a dictionary object
+ @param d dictionary object to deallocate.
+ @return void
+
+ Deallocate a dictionary object and all memory associated to it.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_del(dictionary * vd);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get a value from a dictionary.
+ @param d dictionary object to search.
+ @param key Key to look for in the dictionary.
+ @param def Default value to return if key not found.
+ @return 1 pointer to internally allocated character string.
+
+ This function locates a key in a dictionary and returns a pointer to its
+ value, or the passed 'def' pointer if no such key can be found in
+ dictionary. The returned character pointer points to data internal to the
+ dictionary object, you should not try to free it or modify it.
+ */
+/*--------------------------------------------------------------------------*/
+char * dictionary_get(dictionary * d, char * key, char * def);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get a value from a dictionary, as a char.
+ @param d dictionary object to search.
+ @param key Key to look for in the dictionary.
+ @param def Default value for the key if not found.
+ @return char
+
+ This function locates a key in a dictionary using dictionary_get,
+ and returns the first char of the found string.
+ */
+/*--------------------------------------------------------------------------*/
+char dictionary_getchar(dictionary * d, char * key, char def) ;
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get a value from a dictionary, as an int.
+ @param d dictionary object to search.
+ @param key Key to look for in the dictionary.
+ @param def Default value for the key if not found.
+ @return int
+
+ This function locates a key in a dictionary using dictionary_get,
+ and applies atoi on it to return an int. If the value cannot be found
+ in the dictionary, the default is returned.
+ */
+/*--------------------------------------------------------------------------*/
+int dictionary_getint(dictionary * d, char * key, int def);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get a value from a dictionary, as a double.
+ @param d dictionary object to search.
+ @param key Key to look for in the dictionary.
+ @param def Default value for the key if not found.
+ @return double
+
+ This function locates a key in a dictionary using dictionary_get,
+ and applies atof on it to return a double. If the value cannot be found
+ in the dictionary, the default is returned.
+ */
+/*--------------------------------------------------------------------------*/
+double dictionary_getdouble(dictionary * d, char * key, double def);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Set a value in a dictionary.
+ @param d dictionary object to modify.
+ @param key Key to modify or add.
+ @param val Value to add.
+ @return void
+
+ If the given key is found in the dictionary, the associated value is
+ replaced by the provided one. If the key cannot be found in the
+ dictionary, it is added to it.
+
+ It is Ok to provide a NULL value for val, but NULL values for the dictionary
+ or the key are considered as errors: the function will return immediately
+ in such a case.
+
+ Notice that if you dictionary_set a variable to NULL, a call to
+ dictionary_get will return a NULL value: the variable will be found, and
+ its value (NULL) is returned. In other words, setting the variable
+ content to NULL is equivalent to deleting the variable from the
+ dictionary. It is not possible (in this implementation) to have a key in
+ the dictionary without value.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_set(dictionary * vd, char * key, char * val);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Delete a key in a dictionary
+ @param d dictionary object to modify.
+ @param key Key to remove.
+ @return void
+
+ This function deletes a key in a dictionary. Nothing is done if the
+ key cannot be found.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_unset(dictionary * d, char * key);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Set a key in a dictionary, providing an int.
+ @param d Dictionary to update.
+ @param key Key to modify or add
+ @param val Integer value to store (will be stored as a string).
+ @return void
+
+ This helper function calls dictionary_set() with the provided integer
+ converted to a string using %d.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_setint(dictionary * d, char * key, int val);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Set a key in a dictionary, providing a double.
+ @param d Dictionary to update.
+ @param key Key to modify or add
+ @param val Double value to store (will be stored as a string).
+ @return void
+
+ This helper function calls dictionary_set() with the provided double
+ converted to a string using %g.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_setdouble(dictionary * d, char * key, double val);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Dump a dictionary to an opened file pointer.
+ @param d Dictionary to dump
+ @param f Opened file pointer.
+ @return void
+
+ Dumps a dictionary onto an opened file pointer. Key pairs are printed out
+ as @c [Key]=[Value], one per line. It is Ok to provide stdout or stderr as
+ output file pointers.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_dump(dictionary * d, FILE * out);
+
+#endif
diff --git a/lib/iniparser/src/iniparser.c b/lib/iniparser/src/iniparser.c
new file mode 100644
index 0000000000..09340876d8
--- /dev/null
+++ b/lib/iniparser/src/iniparser.c
@@ -0,0 +1,536 @@
+
+/*-------------------------------------------------------------------------*/
+/**
+ @file iniparser.c
+ @author N. Devillard
+ @date Mar 2000
+ @version $Revision: 2.17 $
+ @brief Parser for ini files.
+*/
+/*--------------------------------------------------------------------------*/
+
+/*
+ $Id: iniparser.c,v 2.17 2007-05-27 13:03:43 ndevilla Exp $
+ $Author: ndevilla $
+ $Date: 2007-05-27 13:03:43 $
+ $Revision: 2.17 $
+*/
+
+/*---------------------------------------------------------------------------
+ Includes
+ ---------------------------------------------------------------------------*/
+
+#include "iniparser.h"
+#include "strlib.h"
+
+#define ASCIILINESZ 1024
+#define INI_INVALID_KEY ((char*)-1)
+
+/*---------------------------------------------------------------------------
+ Private to this module
+ ---------------------------------------------------------------------------*/
+
+/* Private: add an entry to the dictionary */
+static void iniparser_add_entry(
+ dictionary * d,
+ char * sec,
+ char * key,
+ char * val)
+{
+ char longkey[2*ASCIILINESZ+1];
+
+ /* Make a key as section:keyword */
+ if (key!=NULL) {
+ sprintf(longkey, "%s:%s", sec, key);
+ } else {
+ strcpy(longkey, sec);
+ }
+
+ /* Add (key,val) to dictionary */
+ dictionary_set(d, longkey, val);
+ return ;
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get number of sections in a dictionary
+ @param d Dictionary to examine
+ @return int Number of sections found in dictionary
+
+ This function returns the number of sections found in a dictionary.
+ The test to recognize sections is done on the string stored in the
+ dictionary: a section name is given as "section" whereas a key is
+ stored as "section:key", thus the test looks for entries that do not
+ contain a colon.
+
+ This clearly fails in the case a section name contains a colon, but
+ this should simply be avoided.
+
+ This function returns -1 in case of error.
+ */
+/*--------------------------------------------------------------------------*/
+
+int iniparser_getnsec(dictionary * d)
+{
+ int i ;
+ int nsec ;
+
+ if (d==NULL) return -1 ;
+ nsec=0 ;
+ for (i=0 ; i<d->size ; i++) {
+ if (d->key[i]==NULL)
+ continue ;
+ if (strchr(d->key[i], ':')==NULL) {
+ nsec ++ ;
+ }
+ }
+ return nsec ;
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get name for section n in a dictionary.
+ @param d Dictionary to examine
+ @param n Section number (from 0 to nsec-1).
+ @return Pointer to char string
+
+ This function locates the n-th section in a dictionary and returns
+ its name as a pointer to a string statically allocated inside the
+ dictionary. Do not free or modify the returned string!
+
+ This function returns NULL in case of error.
+ */
+/*--------------------------------------------------------------------------*/
+
+char * iniparser_getsecname(dictionary * d, int n)
+{
+ int i ;
+ int foundsec ;
+
+ if (d==NULL || n<0) return NULL ;
+ foundsec=0 ;
+ for (i=0 ; i<d->size ; i++) {
+ if (d->key[i]==NULL)
+ continue ;
+ if (strchr(d->key[i], ':')==NULL) {
+ foundsec++ ;
+ if (foundsec>n)
+ break ;
+ }
+ }
+ if (foundsec<=n) {
+ return NULL ;
+ }
+ return d->key[i] ;
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Dump a dictionary to an opened file pointer.
+ @param d Dictionary to dump.
+ @param f Opened file pointer to dump to.
+ @return void
+
+ This function prints out the contents of a dictionary, one element by
+ line, onto the provided file pointer. It is OK to specify @c stderr
+ or @c stdout as output files. This function is meant for debugging
+ purposes mostly.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_dump(dictionary * d, FILE * f)
+{
+ int i ;
+
+ if (d==NULL || f==NULL) return ;
+ for (i=0 ; i<d->size ; i++) {
+ if (d->key[i]==NULL)
+ continue ;
+ if (d->val[i]!=NULL) {
+ fprintf(f, "[%s]=[%s]\n", d->key[i], d->val[i]);
+ } else {
+ fprintf(f, "[%s]=UNDEF\n", d->key[i]);
+ }
+ }
+ return ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Save a dictionary to a loadable ini file
+ @param d Dictionary to dump
+ @param f Opened file pointer to dump to
+ @return void
+
+ This function dumps a given dictionary into a loadable ini file.
+ It is Ok to specify @c stderr or @c stdout as output files.
+ */
+/*--------------------------------------------------------------------------*/
+
+void iniparser_dump_ini(dictionary * d, FILE * f)
+{
+ int i, j ;
+ char keym[ASCIILINESZ+1];
+ int nsec ;
+ char * secname ;
+ int seclen ;
+
+ if (d==NULL || f==NULL) return ;
+
+ nsec = iniparser_getnsec(d);
+ if (nsec<1) {
+ /* No section in file: dump all keys as they are */
+ for (i=0 ; i<d->size ; i++) {
+ if (d->key[i]==NULL)
+ continue ;
+ fprintf(f, "%s = %s\n", d->key[i], d->val[i]);
+ }
+ return ;
+ }
+ for (i=0 ; i<nsec ; i++) {
+ secname = iniparser_getsecname(d, i) ;
+ seclen = (int)strlen(secname);
+ fprintf(f, "\n[%s]\n", secname);
+ sprintf(keym, "%s:", secname);
+ for (j=0 ; j<d->size ; j++) {
+ if (d->key[j]==NULL)
+ continue ;
+ if (!strncmp(d->key[j], keym, seclen+1)) {
+ fprintf(f,
+ "%-30s = %s\n",
+ d->key[j]+seclen+1,
+ d->val[j] ? d->val[j] : "");
+ }
+ }
+ }
+ fprintf(f, "\n");
+ return ;
+}
+
+
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get the string associated to a key, return NULL if not found
+ @param d Dictionary to search
+ @param key Key string to look for
+ @return pointer to statically allocated character string, or NULL.
+
+ This function queries a dictionary for a key. A key as read from an
+ ini file is given as "section:key". If the key cannot be found,
+ NULL is returned.
+ The returned char pointer is pointing to a string allocated in
+ the dictionary, do not free or modify it.
+
+ This function is only provided for backwards compatibility with
+ previous versions of iniparser. It is recommended to use
+ iniparser_getstring() instead.
+ */
+/*--------------------------------------------------------------------------*/
+char * iniparser_getstr(dictionary * d, const char * key)
+{
+ return iniparser_getstring(d, key, NULL);
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get the string associated to a key
+ @param d Dictionary to search
+ @param key Key string to look for
+ @param def Default value to return if key not found.
+ @return pointer to statically allocated character string
+
+ This function queries a dictionary for a key. A key as read from an
+ ini file is given as "section:key". If the key cannot be found,
+ the pointer passed as 'def' is returned.
+ The returned char pointer is pointing to a string allocated in
+ the dictionary, do not free or modify it.
+ */
+/*--------------------------------------------------------------------------*/
+char * iniparser_getstring(dictionary * d, const char * key, char * def)
+{
+ char * lc_key ;
+ char * sval ;
+
+ if (d==NULL || key==NULL)
+ return def ;
+
+ if (!(lc_key = strdup(strlwc(key)))) {
+ return NULL;
+ }
+ sval = dictionary_get(d, lc_key, def);
+ free(lc_key);
+ return sval ;
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get the string associated to a key, convert to an int
+ @param d Dictionary to search
+ @param key Key string to look for
+ @param notfound Value to return in case of error
+ @return integer
+
+ This function queries a dictionary for a key. A key as read from an
+ ini file is given as "section:key". If the key cannot be found,
+ the notfound value is returned.
+
+ Supported values for integers include the usual C notation
+ so decimal, octal (starting with 0) and hexadecimal (starting with 0x)
+ are supported. Examples:
+
+ "42" -> 42
+ "042" -> 34 (octal -> decimal)
+ "0x42" -> 66 (hexa -> decimal)
+
+ Warning: the conversion may overflow in various ways. Conversion is
+ totally outsourced to strtol(), see the associated man page for overflow
+ handling.
+
+ Credits: Thanks to A. Becker for suggesting strtol()
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_getint(dictionary * d, const char * key, int notfound)
+{
+ char * str ;
+
+ str = iniparser_getstring(d, key, INI_INVALID_KEY);
+ if (str==INI_INVALID_KEY) return notfound ;
+ return (int)strtol(str, NULL, 0);
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get the string associated to a key, convert to a double
+ @param d Dictionary to search
+ @param key Key string to look for
+ @param notfound Value to return in case of error
+ @return double
+
+ This function queries a dictionary for a key. A key as read from an
+ ini file is given as "section:key". If the key cannot be found,
+ the notfound value is returned.
+ */
+/*--------------------------------------------------------------------------*/
+double iniparser_getdouble(dictionary * d, char * key, double notfound)
+{
+ char * str ;
+
+ str = iniparser_getstring(d, key, INI_INVALID_KEY);
+ if (str==INI_INVALID_KEY) return notfound ;
+ return atof(str);
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get the string associated to a key, convert to a boolean
+ @param d Dictionary to search
+ @param key Key string to look for
+ @param notfound Value to return in case of error
+ @return integer
+
+ This function queries a dictionary for a key. A key as read from an
+ ini file is given as "section:key". If the key cannot be found,
+ the notfound value is returned.
+
+ A true boolean is found if one of the following is matched:
+
+ - A string starting with 'y'
+ - A string starting with 'Y'
+ - A string starting with 't'
+ - A string starting with 'T'
+ - A string starting with '1'
+
+ A false boolean is found if one of the following is matched:
+
+ - A string starting with 'n'
+ - A string starting with 'N'
+ - A string starting with 'f'
+ - A string starting with 'F'
+ - A string starting with '0'
+
+ The notfound value returned if no boolean is identified, does not
+ necessarily have to be 0 or 1.
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_getboolean(dictionary * d, const char * key, int notfound)
+{
+ char * c ;
+ int ret ;
+
+ c = iniparser_getstring(d, key, INI_INVALID_KEY);
+ if (c==INI_INVALID_KEY) return notfound ;
+ if (c[0]=='y' || c[0]=='Y' || c[0]=='1' || c[0]=='t' || c[0]=='T') {
+ ret = 1 ;
+ } else if (c[0]=='n' || c[0]=='N' || c[0]=='0' || c[0]=='f' || c[0]=='F') {
+ ret = 0 ;
+ } else {
+ ret = notfound ;
+ }
+ return ret;
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Finds out if a given entry exists in a dictionary
+ @param ini Dictionary to search
+ @param entry Name of the entry to look for
+ @return integer 1 if entry exists, 0 otherwise
+
+ Finds out if a given entry exists in the dictionary. Since sections
+ are stored as keys with NULL associated values, this is the only way
+ of querying for the presence of sections in a dictionary.
+ */
+/*--------------------------------------------------------------------------*/
+
+int iniparser_find_entry(
+ dictionary * ini,
+ char * entry
+)
+{
+ int found=0 ;
+ if (iniparser_getstring(ini, entry, INI_INVALID_KEY)!=INI_INVALID_KEY) {
+ found = 1 ;
+ }
+ return found ;
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Set an entry in a dictionary.
+ @param ini Dictionary to modify.
+ @param entry Entry to modify (entry name)
+ @param val New value to associate to the entry.
+ @return int 0 if Ok, -1 otherwise.
+
+ If the given entry can be found in the dictionary, it is modified to
+ contain the provided value. If it cannot be found, -1 is returned.
+ It is Ok to set val to NULL.
+ */
+/*--------------------------------------------------------------------------*/
+
+int iniparser_setstr(dictionary * ini, char * entry, char * val)
+{
+ dictionary_set(ini, strlwc(entry), val);
+ return 0 ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Delete an entry in a dictionary
+ @param ini Dictionary to modify
+ @param entry Entry to delete (entry name)
+ @return void
+
+ If the given entry can be found, it is deleted from the dictionary.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_unset(dictionary * ini, char * entry)
+{
+ dictionary_unset(ini, strlwc(entry));
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Parse an ini file and return an allocated dictionary object
+ @param ininame Name of the ini file to read.
+ @return Pointer to newly allocated dictionary
+
+ This is the parser for ini files. This function is called, providing
+ the name of the file to be read. It returns a dictionary object that
+ should not be accessed directly, but through accessor functions
+ instead.
+
+ The returned dictionary must be freed using iniparser_freedict().
+ */
+/*--------------------------------------------------------------------------*/
+
+dictionary * iniparser_load(const char * ininame)
+{
+ dictionary * d ;
+ char lin[ASCIILINESZ+1];
+ char sec[ASCIILINESZ+1];
+ char key[ASCIILINESZ+1];
+ char val[ASCIILINESZ+1];
+ char * where ;
+ FILE * ini ;
+ int lineno ;
+
+ if ((ini=fopen(ininame, "r"))==NULL) {
+ return NULL ;
+ }
+
+ sec[0]=0;
+
+ /*
+ * Initialize a new dictionary entry
+ */
+ if (!(d = dictionary_new(0))) {
+ fclose(ini);
+ return NULL;
+ }
+ lineno = 0 ;
+ while (fgets(lin, ASCIILINESZ, ini)!=NULL) {
+ lineno++ ;
+ where = strskp(lin); /* Skip leading spaces */
+ if (*where==';' || *where=='#' || *where==0)
+ continue ; /* Comment lines */
+ else {
+ if (sscanf(where, "[%[^]]", sec)==1) {
+ /* Valid section name */
+ strcpy(sec, strlwc(sec));
+ iniparser_add_entry(d, sec, NULL, NULL);
+ } else if (sscanf (where, "%[^=] = \"%[^\"]\"", key, val) == 2
+ || sscanf (where, "%[^=] = '%[^\']'", key, val) == 2
+ || sscanf (where, "%[^=] = %[^;#]", key, val) == 2) {
+ strcpy(key, strlwc(strcrop(key)));
+ /*
+ * sscanf cannot handle "" or '' as empty value,
+ * this is done here
+ */
+ if (!strcmp(val, "\"\"") || !strcmp(val, "''")) {
+ val[0] = (char)0;
+ } else {
+ strcpy(val, strcrop(val));
+ }
+ iniparser_add_entry(d, sec, key, val);
+ }
+ }
+ }
+ fclose(ini);
+ return d ;
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Free all memory associated to an ini dictionary
+ @param d Dictionary to free
+ @return void
+
+ Free all memory associated to an ini dictionary.
+ It is mandatory to call this function before the dictionary object
+ gets out of the current context.
+ */
+/*--------------------------------------------------------------------------*/
+
+void iniparser_freedict(dictionary * d)
+{
+ dictionary_del(d);
+}
+
+/* vim: set ts=4 et sw=4 tw=75 */
diff --git a/lib/iniparser/src/iniparser.h b/lib/iniparser/src/iniparser.h
new file mode 100644
index 0000000000..5bbd9045cf
--- /dev/null
+++ b/lib/iniparser/src/iniparser.h
@@ -0,0 +1,296 @@
+
+/*-------------------------------------------------------------------------*/
+/**
+ @file iniparser.h
+ @author N. Devillard
+ @date Mar 2000
+ @version $Revision: 1.23 $
+ @brief Parser for ini files.
+*/
+/*--------------------------------------------------------------------------*/
+
+/*
+ $Id: iniparser.h,v 1.23 2006-09-27 11:03:35 ndevilla Exp $
+ $Author: ndevilla $
+ $Date: 2006-09-27 11:03:35 $
+ $Revision: 1.23 $
+*/
+
+#ifndef _INIPARSER_H_
+#define _INIPARSER_H_
+
+/*---------------------------------------------------------------------------
+ Includes
+ ---------------------------------------------------------------------------*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/*
+ * The following #include is necessary on many Unixes but not Linux.
+ * It is not needed for Windows platforms.
+ * Uncomment it if needed.
+ */
+/* #include <unistd.h> */
+
+#include "dictionary.h"
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get number of sections in a dictionary
+ @param d Dictionary to examine
+ @return int Number of sections found in dictionary
+
+ This function returns the number of sections found in a dictionary.
+ The test to recognize sections is done on the string stored in the
+ dictionary: a section name is given as "section" whereas a key is
+ stored as "section:key", thus the test looks for entries that do not
+ contain a colon.
+
+ This clearly fails in the case a section name contains a colon, but
+ this should simply be avoided.
+
+ This function returns -1 in case of error.
+ */
+/*--------------------------------------------------------------------------*/
+
+int iniparser_getnsec(dictionary * d);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get name for section n in a dictionary.
+ @param d Dictionary to examine
+ @param n Section number (from 0 to nsec-1).
+ @return Pointer to char string
+
+ This function locates the n-th section in a dictionary and returns
+ its name as a pointer to a string statically allocated inside the
+ dictionary. Do not free or modify the returned string!
+
+ This function returns NULL in case of error.
+ */
+/*--------------------------------------------------------------------------*/
+
+char * iniparser_getsecname(dictionary * d, int n);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Save a dictionary to a loadable ini file
+ @param d Dictionary to dump
+ @param f Opened file pointer to dump to
+ @return void
+
+ This function dumps a given dictionary into a loadable ini file.
+ It is Ok to specify @c stderr or @c stdout as output files.
+ */
+/*--------------------------------------------------------------------------*/
+
+void iniparser_dump_ini(dictionary * d, FILE * f);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Dump a dictionary to an opened file pointer.
+ @param d Dictionary to dump.
+ @param f Opened file pointer to dump to.
+ @return void
+
+ This function prints out the contents of a dictionary, one element by
+ line, onto the provided file pointer. It is OK to specify @c stderr
+ or @c stdout as output files. This function is meant for debugging
+ purposes mostly.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_dump(dictionary * d, FILE * f);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get the string associated to a key, return NULL if not found
+ @param d Dictionary to search
+ @param key Key string to look for
+ @return pointer to statically allocated character string, or NULL.
+
+ This function queries a dictionary for a key. A key as read from an
+ ini file is given as "section:key". If the key cannot be found,
+ NULL is returned.
+ The returned char pointer is pointing to a string allocated in
+ the dictionary, do not free or modify it.
+
+ This function is only provided for backwards compatibility with
+ previous versions of iniparser. It is recommended to use
+ iniparser_getstring() instead.
+ */
+/*--------------------------------------------------------------------------*/
+char * iniparser_getstr(dictionary * d, const char * key);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get the string associated to a key
+ @param d Dictionary to search
+ @param key Key string to look for
+ @param def Default value to return if key not found.
+ @return pointer to statically allocated character string
+
+ This function queries a dictionary for a key. A key as read from an
+ ini file is given as "section:key". If the key cannot be found,
+ the pointer passed as 'def' is returned.
+ The returned char pointer is pointing to a string allocated in
+ the dictionary, do not free or modify it.
+ */
+/*--------------------------------------------------------------------------*/
+char * iniparser_getstring(dictionary * d, const char * key, char * def);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get the string associated to a key, convert to an int
+ @param d Dictionary to search
+ @param key Key string to look for
+ @param notfound Value to return in case of error
+ @return integer
+
+ This function queries a dictionary for a key. A key as read from an
+ ini file is given as "section:key". If the key cannot be found,
+ the notfound value is returned.
+
+ Supported values for integers include the usual C notation
+ so decimal, octal (starting with 0) and hexadecimal (starting with 0x)
+ are supported. Examples:
+
+ - "42" -> 42
+ - "042" -> 34 (octal -> decimal)
+ - "0x42" -> 66 (hexa -> decimal)
+
+ Warning: the conversion may overflow in various ways. Conversion is
+ totally outsourced to strtol(), see the associated man page for overflow
+ handling.
+
+ Credits: Thanks to A. Becker for suggesting strtol()
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_getint(dictionary * d, const char * key, int notfound);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get the string associated to a key, convert to a double
+ @param d Dictionary to search
+ @param key Key string to look for
+ @param notfound Value to return in case of error
+ @return double
+
+ This function queries a dictionary for a key. A key as read from an
+ ini file is given as "section:key". If the key cannot be found,
+ the notfound value is returned.
+ */
+/*--------------------------------------------------------------------------*/
+double iniparser_getdouble(dictionary * d, char * key, double notfound);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Get the string associated to a key, convert to a boolean
+ @param d Dictionary to search
+ @param key Key string to look for
+ @param notfound Value to return in case of error
+ @return integer
+
+ This function queries a dictionary for a key. A key as read from an
+ ini file is given as "section:key". If the key cannot be found,
+ the notfound value is returned.
+
+ A true boolean is found if one of the following is matched:
+
+ - A string starting with 'y'
+ - A string starting with 'Y'
+ - A string starting with 't'
+ - A string starting with 'T'
+ - A string starting with '1'
+
+ A false boolean is found if one of the following is matched:
+
+ - A string starting with 'n'
+ - A string starting with 'N'
+ - A string starting with 'f'
+ - A string starting with 'F'
+ - A string starting with '0'
+
+ The notfound value returned if no boolean is identified, does not
+ necessarily have to be 0 or 1.
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_getboolean(dictionary * d, const char * key, int notfound);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Set an entry in a dictionary.
+ @param ini Dictionary to modify.
+ @param entry Entry to modify (entry name)
+ @param val New value to associate to the entry.
+ @return int 0 if Ok, -1 otherwise.
+
+ If the given entry can be found in the dictionary, it is modified to
+ contain the provided value. If it cannot be found, -1 is returned.
+ It is Ok to set val to NULL.
+ */
+/*--------------------------------------------------------------------------*/
+
+int iniparser_setstr(dictionary * ini, char * entry, char * val);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Delete an entry in a dictionary
+ @param ini Dictionary to modify
+ @param entry Entry to delete (entry name)
+ @return void
+
+ If the given entry can be found, it is deleted from the dictionary.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_unset(dictionary * ini, char * entry);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Finds out if a given entry exists in a dictionary
+ @param ini Dictionary to search
+ @param entry Name of the entry to look for
+ @return integer 1 if entry exists, 0 otherwise
+
+ Finds out if a given entry exists in the dictionary. Since sections
+ are stored as keys with NULL associated values, this is the only way
+ of querying for the presence of sections in a dictionary.
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_find_entry(dictionary * ini, char * entry) ;
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Parse an ini file and return an allocated dictionary object
+ @param ininame Name of the ini file to read.
+ @return Pointer to newly allocated dictionary
+
+ This is the parser for ini files. This function is called, providing
+ the name of the file to be read. It returns a dictionary object that
+ should not be accessed directly, but through accessor functions
+ instead.
+
+ The returned dictionary must be freed using iniparser_freedict().
+ */
+/*--------------------------------------------------------------------------*/
+dictionary * iniparser_load(const char * ininame);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Free all memory associated to an ini dictionary
+ @param d Dictionary to free
+ @return void
+
+ Free all memory associated to an ini dictionary.
+ It is mandatory to call this function before the dictionary object
+ gets out of the current context.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_freedict(dictionary * d);
+
+#endif
diff --git a/lib/iniparser/src/strlib.c b/lib/iniparser/src/strlib.c
new file mode 100644
index 0000000000..f0d85aea58
--- /dev/null
+++ b/lib/iniparser/src/strlib.c
@@ -0,0 +1,211 @@
+
+/*-------------------------------------------------------------------------*/
+/**
+ @file strlib.c
+ @author N. Devillard
+ @date Jan 2001
+ @version $Revision: 1.9 $
+ @brief Various string handling routines to complement the C lib.
+
+ This modules adds a few complementary string routines usually missing
+ in the standard C library.
+*/
+/*--------------------------------------------------------------------------*/
+
+/*
+ $Id: strlib.c,v 1.9 2006-09-27 11:04:11 ndevilla Exp $
+ $Author: ndevilla $
+ $Date: 2006-09-27 11:04:11 $
+ $Revision: 1.9 $
+*/
+
+/*---------------------------------------------------------------------------
+ Includes
+ ---------------------------------------------------------------------------*/
+
+#include <string.h>
+#include <ctype.h>
+
+#include "strlib.h"
+
+/*---------------------------------------------------------------------------
+ Defines
+ ---------------------------------------------------------------------------*/
+#define ASCIILINESZ 1024
+
+/*---------------------------------------------------------------------------
+ Function codes
+ ---------------------------------------------------------------------------*/
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Convert a string to lowercase.
+ @param s String to convert.
+ @return ptr to statically allocated string.
+
+ This function returns a pointer to a statically allocated string
+ containing a lowercased version of the input string. Do not free
+ or modify the returned string! Since the returned string is statically
+ allocated, it will be modified at each function call (not re-entrant).
+ */
+/*--------------------------------------------------------------------------*/
+
+char * strlwc(const char * s)
+{
+ static char l[ASCIILINESZ+1];
+ int i ;
+
+ if (s==NULL) return NULL ;
+ memset(l, 0, ASCIILINESZ+1);
+ i=0 ;
+ while (s[i] && i<ASCIILINESZ) {
+ l[i] = (char)tolower((int)s[i]);
+ i++ ;
+ }
+ l[ASCIILINESZ]=(char)0;
+ return l ;
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Convert a string to uppercase.
+ @param s String to convert.
+ @return ptr to statically allocated string.
+
+ This function returns a pointer to a statically allocated string
+ containing an uppercased version of the input string. Do not free
+ or modify the returned string! Since the returned string is statically
+ allocated, it will be modified at each function call (not re-entrant).
+ */
+/*--------------------------------------------------------------------------*/
+
+char * strupc(char * s)
+{
+ static char l[ASCIILINESZ+1];
+ int i ;
+
+ if (s==NULL) return NULL ;
+ memset(l, 0, ASCIILINESZ+1);
+ i=0 ;
+ while (s[i] && i<ASCIILINESZ) {
+ l[i] = (char)toupper((int)s[i]);
+ i++ ;
+ }
+ l[ASCIILINESZ]=(char)0;
+ return l ;
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Skip blanks until the first non-blank character.
+ @param s String to parse.
+ @return Pointer to char inside given string.
+
+ This function returns a pointer to the first non-blank character in the
+ given string.
+ */
+/*--------------------------------------------------------------------------*/
+
+char * strskp(char * s)
+{
+ char * skip = s;
+ if (s==NULL) return NULL ;
+ while (isspace((int)*skip) && *skip) skip++;
+ return skip ;
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Remove blanks at the end of a string.
+ @param s String to parse.
+ @return ptr to statically allocated string.
+
+ This function returns a pointer to a statically allocated string,
+ which is identical to the input string, except that all blank
+ characters at the end of the string have been removed.
+ Do not free or modify the returned string! Since the returned string
+ is statically allocated, it will be modified at each function call
+ (not re-entrant).
+ */
+/*--------------------------------------------------------------------------*/
+
+char * strcrop(char * s)
+{
+ static char l[ASCIILINESZ+1];
+ char * last ;
+
+ if (s==NULL) return NULL ;
+ memset(l, 0, ASCIILINESZ+1);
+ strcpy(l, s);
+ last = l + strlen(l);
+ while (last > l) {
+ if (!isspace((int)*(last-1)))
+ break ;
+ last -- ;
+ }
+ *last = (char)0;
+ return l ;
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Remove blanks at the beginning and the end of a string.
+ @param s String to parse.
+ @return ptr to statically allocated string.
+
+ This function returns a pointer to a statically allocated string,
+ which is identical to the input string, except that all blank
+ characters at the end and the beg. of the string have been removed.
+ Do not free or modify the returned string! Since the returned string
+ is statically allocated, it will be modified at each function call
+ (not re-entrant).
+ */
+/*--------------------------------------------------------------------------*/
+char * strstrip(char * s)
+{
+ static char l[ASCIILINESZ+1];
+ char * last ;
+
+ if (s==NULL) return NULL ;
+
+ while (isspace((int)*s) && *s) s++;
+
+ memset(l, 0, ASCIILINESZ+1);
+ strcpy(l, s);
+ last = l + strlen(l);
+ while (last > l) {
+ if (!isspace((int)*(last-1)))
+ break ;
+ last -- ;
+ }
+ *last = (char)0;
+
+ return (char*)l ;
+}
+
+/* Test code */
+#ifdef TEST
+int main(int argc, char * argv[])
+{
+ char * str ;
+
+ str = "\t\tI'm a lumberkack and I'm OK " ;
+ printf("lowercase: [%s]\n", strlwc(str));
+ printf("uppercase: [%s]\n", strupc(str));
+ printf("skipped : [%s]\n", strskp(str));
+ printf("cropped : [%s]\n", strcrop(str));
+ printf("stripped : [%s]\n", strstrip(str));
+
+ return 0 ;
+}
+#endif
+/* vim: set ts=4 et sw=4 tw=75 */
diff --git a/lib/iniparser/src/strlib.h b/lib/iniparser/src/strlib.h
new file mode 100644
index 0000000000..cd70a6287d
--- /dev/null
+++ b/lib/iniparser/src/strlib.h
@@ -0,0 +1,108 @@
+
+/*-------------------------------------------------------------------------*/
+/**
+ @file strlib.h
+ @author N. Devillard
+ @date Jan 2001
+ @version $Revision: 1.4 $
+ @brief Various string handling routines to complement the C lib.
+
+ This modules adds a few complementary string routines usually missing
+ in the standard C library.
+*/
+/*--------------------------------------------------------------------------*/
+
+/*
+ $Id: strlib.h,v 1.4 2006-09-27 11:04:11 ndevilla Exp $
+ $Author: ndevilla $
+ $Date: 2006-09-27 11:04:11 $
+ $Revision: 1.4 $
+*/
+
+#ifndef _STRLIB_H_
+#define _STRLIB_H_
+
+/*---------------------------------------------------------------------------
+ Includes
+ ---------------------------------------------------------------------------*/
+
+#include <stdio.h>
+#include <stdlib.h>
+
+/*---------------------------------------------------------------------------
+ Function codes
+ ---------------------------------------------------------------------------*/
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Convert a string to lowercase.
+ @param s String to convert.
+ @return ptr to statically allocated string.
+
+ This function returns a pointer to a statically allocated string
+ containing a lowercased version of the input string. Do not free
+ or modify the returned string! Since the returned string is statically
+ allocated, it will be modified at each function call (not re-entrant).
+ */
+/*--------------------------------------------------------------------------*/
+char * strlwc(const char * s);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Convert a string to uppercase.
+ @param s String to convert.
+ @return ptr to statically allocated string.
+
+ This function returns a pointer to a statically allocated string
+ containing an uppercased version of the input string. Do not free
+ or modify the returned string! Since the returned string is statically
+ allocated, it will be modified at each function call (not re-entrant).
+ */
+/*--------------------------------------------------------------------------*/
+char * strupc(char * s);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Skip blanks until the first non-blank character.
+ @param s String to parse.
+ @return Pointer to char inside given string.
+
+ This function returns a pointer to the first non-blank character in the
+ given string.
+ */
+/*--------------------------------------------------------------------------*/
+char * strskp(char * s);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Remove blanks at the end of a string.
+ @param s String to parse.
+ @return ptr to statically allocated string.
+
+ This function returns a pointer to a statically allocated string,
+ which is identical to the input string, except that all blank
+ characters at the end of the string have been removed.
+ Do not free or modify the returned string! Since the returned string
+ is statically allocated, it will be modified at each function call
+ (not re-entrant).
+ */
+/*--------------------------------------------------------------------------*/
+char * strcrop(char * s);
+
+/*-------------------------------------------------------------------------*/
+/**
+ @brief Remove blanks at the beginning and the end of a string.
+ @param s String to parse.
+ @return ptr to statically allocated string.
+
+ This function returns a pointer to a statically allocated string,
+ which is identical to the input string, except that all blank
+ characters at the end and the beg. of the string have been removed.
+ Do not free or modify the returned string! Since the returned string
+ is statically allocated, it will be modified at each function call
+ (not re-entrant).
+ */
+/*--------------------------------------------------------------------------*/
+char * strstrip(char * s) ;
+
+#endif
diff --git a/lib/iniparser/src/wscript_build b/lib/iniparser/src/wscript_build
new file mode 100644
index 0000000000..aa15c9f66e
--- /dev/null
+++ b/lib/iniparser/src/wscript_build
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+
+bld.SAMBA_SUBSYSTEM('LIBINIPARSER',
+ source='../../iniparser_build/iniparser.c ../../iniparser_build/dictionary.c ../../iniparser_build/strlib.c',
+ deps='replace',
+ cflags=''
+ )
diff --git a/lib/iniparser/test/Makefile b/lib/iniparser/test/Makefile
new file mode 100644
index 0000000000..aa8fcb24b5
--- /dev/null
+++ b/lib/iniparser/test/Makefile
@@ -0,0 +1,24 @@
+#
+# iniparser tests Makefile
+#
+
+CC = gcc
+CFLAGS = -g -I../src
+LFLAGS = -L.. -liniparser
+AR = ar
+ARFLAGS = rcv
+RM = rm -f
+
+
+default: all
+
+all: iniexample
+
+iniexample: iniexample.c
+ $(CC) $(CFLAGS) -o iniexample iniexample.c -I../src -L.. -liniparser
+
+clean veryclean:
+ $(RM) iniexample example.ini
+
+
+
diff --git a/lib/iniparser/test/iniexample.c b/lib/iniparser/test/iniexample.c
new file mode 100644
index 0000000000..5e8e71cdf0
--- /dev/null
+++ b/lib/iniparser/test/iniexample.c
@@ -0,0 +1,117 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "iniparser.h"
+
+void create_example_ini_file(void);
+int parse_ini_file(char * ini_name);
+
+int main(int argc, char * argv[])
+{
+ int status ;
+
+ if (argc<2) {
+ create_example_ini_file();
+ status = parse_ini_file("example.ini");
+ } else {
+ status = parse_ini_file(argv[1]);
+ }
+ return status ;
+}
+
+void create_example_ini_file(void)
+{
+ FILE * ini ;
+
+ ini = fopen("example.ini", "w");
+ fprintf(ini, "\n\
+#\n\
+# This is an example of ini file\n\
+#\n\
+\n\
+[Pizza]\n\
+\n\
+Ham = yes ;\n\
+Mushrooms = TRUE ;\n\
+Capres = 0 ;\n\
+Cheese = NO ;\n\
+\n\
+\n\
+[Wine]\n\
+\n\
+Grape = Cabernet Sauvignon ;\n\
+Year = 1989 ;\n\
+Country = Spain ;\n\
+Alcohol = 12.5 ;\n\
+\n\
+#\n\
+# end of file\n\
+#\n");
+
+ fclose(ini);
+}
+
+
+int parse_ini_file(char * ini_name)
+{
+ dictionary * ini ;
+
+ /* Some temporary variables to hold query results */
+ int b ;
+ int i ;
+ double d ;
+ char * s ;
+
+ ini = iniparser_load(ini_name);
+ if (ini==NULL) {
+ fprintf(stderr, "cannot parse file [%s]", ini_name);
+ return -1 ;
+ }
+ iniparser_dump(ini, stderr);
+
+ /* Get pizza attributes */
+ printf("Pizza:\n");
+
+ b = iniparser_getboolean(ini, "pizza:ham", -1);
+ printf("Ham: [%d]\n", b);
+ b = iniparser_getboolean(ini, "pizza:mushrooms", -1);
+ printf("Mushrooms: [%d]\n", b);
+ b = iniparser_getboolean(ini, "pizza:capres", -1);
+ printf("Capres: [%d]\n", b);
+ b = iniparser_getboolean(ini, "pizza:cheese", -1);
+ printf("Cheese: [%d]\n", b);
+
+ /* Get wine attributes */
+ printf("Wine:\n");
+ s = iniparser_getstr(ini, "wine:grape");
+ if (s) {
+ printf("grape: [%s]\n", s);
+ } else {
+ printf("grape: not found\n");
+ }
+ i = iniparser_getint(ini, "wine:year", -1);
+ if (i>0) {
+ printf("year: [%d]\n", i);
+ } else {
+ printf("year: not found\n");
+ }
+ s = iniparser_getstr(ini, "wine:country");
+ if (s) {
+ printf("country: [%s]\n", s);
+ } else {
+ printf("country: not found\n");
+ }
+ d = iniparser_getdouble(ini, "wine:alcohol", -1.0);
+ if (d>0.0) {
+ printf("alcohol: [%g]\n", d);
+ } else {
+ printf("alcohol: not found\n");
+ }
+
+ iniparser_freedict(ini);
+ return 0 ;
+}
+
+
diff --git a/lib/iniparser_build/config.m4 b/lib/iniparser_build/config.m4
new file mode 100644
index 0000000000..282c977c18
--- /dev/null
+++ b/lib/iniparser_build/config.m4
@@ -0,0 +1,45 @@
+AC_ARG_WITH(included-iniparser,
+[AS_HELP_STRING([--with-included-iniparser], [use bundled iniparser library, not from system])],
+[
+ case "$withval" in
+ yes)
+ INCLUDED_INIPARSER=yes
+ ;;
+ no)
+ INCLUDED_INIPARSER=no
+ ;;
+ esac ],
+)
+if test x"$INCLUDED_INIPARSER" != x"yes"; then
+ AC_CHECK_LIB_EXT(iniparser, LIBINIPARSER_LIBS, iniparser_load)
+
+fi
+
+AC_MSG_CHECKING(whether to use included iniparser)
+if test x"$ac_cv_lib_ext_iniparser" != x"yes"; then
+
+ iniparserpaths="../iniparser ../lib/iniparser"
+ for d in $iniparserpaths; do
+ if test -f "$srcdir/$d/src/iniparser.c"; then
+ iniparserdir="$d"
+ break;
+ fi
+ done
+ if test x"$iniparserdir" = "x"; then
+ AC_MSG_ERROR([cannot find iniparser source in $iniparserpaths])
+ fi
+ INIPARSER_CFLAGS="-I$srcdir/$iniparserdir/src"
+ AC_MSG_RESULT(yes)
+
+ INIPARSER_OBJS=""
+ INIPARSER_OBJS="$INIPARSER_OBJS $srcdir/$iniparserdir/../iniparser_build/iniparser.o"
+ INIPARSER_OBJS="$INIPARSER_OBJS $srcdir/$iniparserdir/../iniparser_build/dictionary.o"
+ INIPARSER_OBJS="$INIPARSER_OBJS $srcdir/$iniparserdir/../iniparser_build/strlib.o"
+
+ SMB_SUBSYSTEM(LIBINIPARSER,[$INIPARSER_OBJS],[],[$INIPARSER_CFLAGS])
+else
+ AC_MSG_RESULT(no)
+ SMB_EXT_LIB(LIBINIPARSER,,,,${LIBINIPARSER_LIBS})
+ SMB_ENABLE(LIBINIPARSER,YES)
+fi
+
diff --git a/lib/iniparser_build/dictionary.c b/lib/iniparser_build/dictionary.c
new file mode 100644
index 0000000000..a6b2a12484
--- /dev/null
+++ b/lib/iniparser_build/dictionary.c
@@ -0,0 +1,7 @@
+/*
+ for someplatforms it's needed to inject replace.h into
+ the iniparser source code
+ --metze
+*/
+#include "../replace/replace.h"
+#include "../iniparser/src/dictionary.c"
diff --git a/lib/iniparser_build/iniparser.c b/lib/iniparser_build/iniparser.c
new file mode 100644
index 0000000000..e20369963b
--- /dev/null
+++ b/lib/iniparser_build/iniparser.c
@@ -0,0 +1,7 @@
+/*
+ for someplatforms it's needed to inject replace.h into
+ the iniparser source code
+ --metze
+*/
+#include "../replace/replace.h"
+#include "../iniparser/src/iniparser.c"
diff --git a/lib/iniparser_build/strlib.c b/lib/iniparser_build/strlib.c
new file mode 100644
index 0000000000..12ef233552
--- /dev/null
+++ b/lib/iniparser_build/strlib.c
@@ -0,0 +1,7 @@
+/*
+ for someplatforms it's needed to inject replace.h into
+ the iniparser source code
+ --metze
+*/
+#include "../replace/replace.h"
+#include "../iniparser/src/strlib.c"
diff --git a/lib/nss_wrapper/config.mk b/lib/nss_wrapper/config.mk
deleted file mode 100644
index 015fbe511c..0000000000
--- a/lib/nss_wrapper/config.mk
+++ /dev/null
@@ -1,7 +0,0 @@
-##############################
-# Start SUBSYSTEM NSS_WRAPPER
-[SUBSYSTEM::NSS_WRAPPER]
-# End SUBSYSTEM NSS_WRAPPER
-##############################
-
-NSS_WRAPPER_OBJ_FILES = $(nsswrappersrcdir)/nss_wrapper.o
diff --git a/lib/nss_wrapper/nss_wrapper.c b/lib/nss_wrapper/nss_wrapper.c
index dc2d4f58fd..cfa5a68712 100644
--- a/lib/nss_wrapper/nss_wrapper.c
+++ b/lib/nss_wrapper/nss_wrapper.c
@@ -34,6 +34,10 @@
#ifdef _SAMBA_BUILD_
+/* defining this gives us the posix getpwnam_r() calls on solaris
+ Thanks to heimdal for this */
+#define _POSIX_PTHREAD_SEMANTICS
+
#define NSS_WRAPPER_NOT_REPLACE
#include "../replace/replace.h"
#include "system/passwd.h"
@@ -207,7 +211,7 @@ struct nwrap_ops {
void (*nw_endgrent)(struct nwrap_backend *b);
};
-/* protoypes for files backend */
+/* prototypes for files backend */
static struct passwd *nwrap_files_getpwnam(struct nwrap_backend *b,
@@ -245,7 +249,7 @@ static int nwrap_files_getgrent_r(struct nwrap_backend *b,
size_t buflen, struct group **grdstp);
static void nwrap_files_endgrent(struct nwrap_backend *b);
-/* protoypes for module backend */
+/* prototypes for module backend */
static struct passwd *nwrap_module_getpwent(struct nwrap_backend *b);
static int nwrap_module_getpwent_r(struct nwrap_backend *b,
@@ -460,6 +464,8 @@ static bool nwrap_module_init(const char *name,
int *num_backends,
struct nwrap_backend **backends)
{
+ struct nwrap_backend *b;
+
*backends = (struct nwrap_backend *)realloc(*backends,
sizeof(struct nwrap_backend) * ((*num_backends) + 1));
if (!*backends) {
@@ -468,11 +474,22 @@ static bool nwrap_module_init(const char *name,
return false;
}
- (*backends)[*num_backends].name = name;
- (*backends)[*num_backends].ops = ops;
- (*backends)[*num_backends].so_path = so_path;
- (*backends)[*num_backends].so_handle = nwrap_load_module(so_path);
- (*backends)[*num_backends].fns = nwrap_load_module_fns(&((*backends)[*num_backends]));
+ b = &((*backends)[*num_backends]);
+
+ b->name = name;
+ b->ops = ops;
+ b->so_path = so_path;
+
+ if (so_path != NULL) {
+ b->so_handle = nwrap_load_module(so_path);
+ b->fns = nwrap_load_module_fns(b);
+ if (b->fns == NULL) {
+ return false;
+ }
+ } else {
+ b->so_handle = NULL;
+ b->fns = NULL;
+ }
(*num_backends)++;
@@ -2168,7 +2185,7 @@ _PUBLIC_ int nwrap_getgrouplist(const char *user, gid_t group, gid_t *groups, in
struct group *grp;
gid_t *groups_tmp;
int count = 1;
- const char *name_of_group = NULL;
+ const char *name_of_group = "";
if (!nwrap_enabled()) {
return real_getgrouplist(user, group, groups, ngroups);
diff --git a/lib/nss_wrapper/nss_wrapper.pl b/lib/nss_wrapper/nss_wrapper.pl
index 1f1aef1118..48fa2c52b8 100644..100755
--- a/lib/nss_wrapper/nss_wrapper.pl
+++ b/lib/nss_wrapper/nss_wrapper.pl
@@ -13,18 +13,19 @@ my $opt_action = undef;
my $opt_type = undef;
my $opt_name = undef;
my $opt_member = undef;
+my $opt_gid = 65534;# nogroup gid
my $passwdfn = undef;
my $groupfn = undef;
my $memberfn = undef;
my $actionfn = undef;
-sub passwd_add($$$$);
-sub passwd_delete($$$$);
-sub group_add($$$$);
-sub group_delete($$$$);
-sub member_add($$$$);
-sub member_delete($$$$);
+sub passwd_add($$$$$);
+sub passwd_delete($$$$$);
+sub group_add($$$$$);
+sub group_delete($$$$$);
+sub member_add($$$$$);
+sub member_delete($$$$$);
sub check_path($$);
@@ -35,7 +36,8 @@ my $result = GetOptions(
'action=s' => \$opt_action,
'type=s' => \$opt_type,
'name=s' => \$opt_name,
- 'member=s' => \$opt_member
+ 'member=s' => \$opt_member,
+ 'gid=i' => \$opt_gid
);
sub usage($;$)
@@ -58,6 +60,8 @@ sub usage($;$)
--name <name> The name of the object.
--member <member> The name of the member.
+
+ --gid <gid> Primary Group ID for new users.
";
exit($ret);
}
@@ -110,7 +114,7 @@ if ($opt_name eq "") {
usage(1, "invalid: --name <name>");
}
-exit $actionfn->($opt_fullpath_passwd, $opt_member, $opt_fullpath_group, $opt_name);
+exit $actionfn->($opt_fullpath_passwd, $opt_member, $opt_fullpath_group, $opt_name, $opt_gid);
sub check_path($$)
{
@@ -388,9 +392,9 @@ sub group_save($)
rename($tmppath, $path) or die("Unable to rename $tmppath => $path");
}
-sub passwd_add($$$$)
+sub passwd_add($$$$$)
{
- my ($path, $dummy, $dummy2, $name) = @_;
+ my ($path, $dummy, $dummy2, $name, $gid) = @_;
#print "passwd_add: '$name' in '$path'\n";
@@ -400,7 +404,6 @@ sub passwd_add($$$$)
die("account[$name] already exists in '$path'") if defined($e);
my $uid = passwd_get_free_uid($passwd);
- my $gid = 65534;# nogroup gid
my $pwent = $name.":x:".$uid.":".$gid.":".$name." gecos:/nodir:/bin/false";
@@ -411,9 +414,9 @@ sub passwd_add($$$$)
return 0;
}
-sub passwd_delete($$$$)
+sub passwd_delete($$$$$)
{
- my ($path, $dummy, $dummy2, $name) = @_;
+ my ($path, $dummy, $dummy2, $name, $dummy3) = @_;
#print "passwd_delete: '$name' in '$path'\n";
@@ -429,9 +432,9 @@ sub passwd_delete($$$$)
return 0;
}
-sub group_add($$$$)
+sub group_add($$$$$)
{
- my ($dummy, $dummy2, $path, $name) = @_;
+ my ($dummy, $dummy2, $path, $name, $dummy3) = @_;
#print "group_add: '$name' in '$path'\n";
@@ -453,9 +456,9 @@ sub group_add($$$$)
return 0;
}
-sub group_delete($$$$)
+sub group_delete($$$$$)
{
- my ($dummy, $dummy2, $path, $name) = @_;
+ my ($dummy, $dummy2, $path, $name, $dummy3) = @_;
#print "group_delete: '$name' in '$path'\n";
@@ -471,9 +474,9 @@ sub group_delete($$$$)
return 0;
}
-sub member_add($$$$)
+sub member_add($$$$$)
{
- my ($passwd_path, $username, $group_path, $groupname) = @_;
+ my ($passwd_path, $username, $group_path, $groupname, $dummy) = @_;
#print "member_add: adding '$username' in '$passwd_path' to '$groupname' in '$group_path'\n";
@@ -494,9 +497,9 @@ sub member_add($$$$)
return 0;
}
-sub member_delete($$$$)
+sub member_delete($$$$$)
{
- my ($passwd_path, $username, $group_path, $groupname) = @_;
+ my ($passwd_path, $username, $group_path, $groupname, $dummy) = @_;
#print "member_delete: removing '$username' in '$passwd_path' from '$groupname' in '$group_path'\n";
diff --git a/lib/nss_wrapper/testsuite.c b/lib/nss_wrapper/testsuite.c
index 02a10e36c1..3d3f748da4 100644
--- a/lib/nss_wrapper/testsuite.c
+++ b/lib/nss_wrapper/testsuite.c
@@ -3,7 +3,7 @@
local testing of the nss wrapper
- Copyright (C) Guenther Deschner 2009
+ Copyright (C) Guenther Deschner 2009-2010
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -20,9 +20,13 @@
*/
#include "includes.h"
+
+#ifndef NSS_WRAPPER
+#define NSS_WRAPPER
+#endif
+
#include "torture/torture.h"
#include "lib/replace/system/passwd.h"
-#include "lib/nss_wrapper/nss_wrapper.h"
static bool copy_passwd(struct torture_context *tctx,
const struct passwd *pwd,
@@ -175,7 +179,7 @@ static void print_group(struct group *grp)
grp->gr_passwd,
(unsigned long)grp->gr_gid);
- if (!grp->gr_mem[0]) {
+ if ((grp->gr_mem == NULL) || !grp->gr_mem[0]) {
printf("\n");
return;
}
@@ -772,8 +776,8 @@ static bool test_nwrap_membership(struct torture_context *tctx)
int i;
if (!old_pwd || !old_group) {
+ torture_comment(tctx, "ENV NSS_WRAPPER_PASSWD or NSS_WRAPPER_GROUP not set\n");
torture_skip(tctx, "nothing to test\n");
- return true;
}
torture_assert(tctx, test_nwrap_enum_passwd(tctx, &pwd, &num_pwd),
@@ -797,8 +801,8 @@ static bool test_nwrap_enumeration(struct torture_context *tctx)
const char *old_group = getenv("NSS_WRAPPER_GROUP");
if (!old_pwd || !old_group) {
+ torture_comment(tctx, "ENV NSS_WRAPPER_PASSWD or NSS_WRAPPER_GROUP not set\n");
torture_skip(tctx, "nothing to test\n");
- return true;
}
torture_assert(tctx, test_nwrap_passwd(tctx),
@@ -815,8 +819,8 @@ static bool test_nwrap_reentrant_enumeration(struct torture_context *tctx)
const char *old_group = getenv("NSS_WRAPPER_GROUP");
if (!old_pwd || !old_group) {
+ torture_comment(tctx, "ENV NSS_WRAPPER_PASSWD or NSS_WRAPPER_GROUP not set\n");
torture_skip(tctx, "nothing to test\n");
- return true;
}
torture_comment(tctx, "Testing re-entrant calls\n");
@@ -835,8 +839,8 @@ static bool test_nwrap_reentrant_enumeration_crosschecks(struct torture_context
const char *old_group = getenv("NSS_WRAPPER_GROUP");
if (!old_pwd || !old_group) {
+ torture_comment(tctx, "ENV NSS_WRAPPER_PASSWD or NSS_WRAPPER_GROUP not set\n");
torture_skip(tctx, "nothing to test\n");
- return true;
}
torture_comment(tctx, "Testing re-entrant calls with cross checks\n");
@@ -849,14 +853,105 @@ static bool test_nwrap_reentrant_enumeration_crosschecks(struct torture_context
return true;
}
+static bool test_nwrap_passwd_duplicates(struct torture_context *tctx)
+{
+ int i, d;
+ struct passwd *pwd;
+ size_t num_pwd;
+ int duplicates = 0;
+
+ torture_assert(tctx, test_nwrap_enum_passwd(tctx, &pwd, &num_pwd),
+ "failed to enumerate passwd");
+
+ for (i=0; i < num_pwd; i++) {
+ const char *current_name = pwd[i].pw_name;
+ for (d=0; d < num_pwd; d++) {
+ const char *dup_name = pwd[d].pw_name;
+ if (d == i) {
+ continue;
+ }
+ if (!strequal(current_name, dup_name)) {
+ continue;
+ }
+
+ torture_warning(tctx, "found duplicate names:");
+ print_passwd(&pwd[d]);
+ print_passwd(&pwd[i]);
+ duplicates++;
+ }
+ }
+
+ if (duplicates) {
+ torture_fail(tctx, talloc_asprintf(tctx, "found %d duplicate names", duplicates));
+ }
+
+ return true;
+}
+
+static bool test_nwrap_group_duplicates(struct torture_context *tctx)
+{
+ int i, d;
+ struct group *grp;
+ size_t num_grp;
+ int duplicates = 0;
+
+ torture_assert(tctx, test_nwrap_enum_group(tctx, &grp, &num_grp),
+ "failed to enumerate group");
+
+ for (i=0; i < num_grp; i++) {
+ const char *current_name = grp[i].gr_name;
+ for (d=0; d < num_grp; d++) {
+ const char *dup_name = grp[d].gr_name;
+ if (d == i) {
+ continue;
+ }
+ if (!strequal(current_name, dup_name)) {
+ continue;
+ }
+
+ torture_warning(tctx, "found duplicate names:");
+ print_group(&grp[d]);
+ print_group(&grp[i]);
+ duplicates++;
+ }
+ }
+
+ if (duplicates) {
+ torture_fail(tctx, talloc_asprintf(tctx, "found %d duplicate names", duplicates));
+ }
+
+ return true;
+}
+
+
+static bool test_nwrap_duplicates(struct torture_context *tctx)
+{
+ const char *old_pwd = getenv("NSS_WRAPPER_PASSWD");
+ const char *old_group = getenv("NSS_WRAPPER_GROUP");
+
+ if (!old_pwd || !old_group) {
+ torture_comment(tctx, "ENV NSS_WRAPPER_PASSWD or NSS_WRAPPER_GROUP not set\n");
+ torture_skip(tctx, "nothing to test\n");
+ }
+
+ torture_assert(tctx, test_nwrap_passwd_duplicates(tctx),
+ "failed to test users");
+ torture_assert(tctx, test_nwrap_group_duplicates(tctx),
+ "failed to test groups");
+
+ return true;
+}
+
+
struct torture_suite *torture_local_nss_wrapper(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "NSS-WRAPPER");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "nss-wrapper");
torture_suite_add_simple_test(suite, "enumeration", test_nwrap_enumeration);
torture_suite_add_simple_test(suite, "reentrant enumeration", test_nwrap_reentrant_enumeration);
torture_suite_add_simple_test(suite, "reentrant enumeration crosschecks", test_nwrap_reentrant_enumeration_crosschecks);
torture_suite_add_simple_test(suite, "membership", test_nwrap_membership);
+ torture_suite_add_simple_test(suite, "duplicates", test_nwrap_duplicates);
return suite;
}
diff --git a/lib/nss_wrapper/wscript b/lib/nss_wrapper/wscript
new file mode 100644
index 0000000000..5cfff1f76c
--- /dev/null
+++ b/lib/nss_wrapper/wscript
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+
+import Options
+
+def set_options(opt):
+ gr = opt.option_group('developer options')
+ gr.add_option('--enable-nss-wrapper',
+ help=("Turn on nss wrapper library (default=no)"),
+ action="store_true", dest='enable_nss_wrapper', default=False)
+
+def configure(conf):
+ if (Options.options.enable_nss_wrapper or
+ Options.options.developer or
+ Options.options.enable_selftest):
+ conf.DEFINE('NSS_WRAPPER', 1)
+ conf.ADD_GLOBAL_DEPENDENCY('nss_wrapper')
+
diff --git a/lib/nss_wrapper/wscript_build b/lib/nss_wrapper/wscript_build
new file mode 100644
index 0000000000..5f9df3a600
--- /dev/null
+++ b/lib/nss_wrapper/wscript_build
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+
+
+bld.SAMBA_LIBRARY('nss_wrapper',
+ source='nss_wrapper.c',
+ deps='replace',
+ private_library=True,
+ enabled=bld.CONFIG_SET("NSS_WRAPPER"),
+ )
+
diff --git a/lib/popt/config.mk b/lib/popt/config.mk
deleted file mode 100644
index 04cc2c7ea5..0000000000
--- a/lib/popt/config.mk
+++ /dev/null
@@ -1,5 +0,0 @@
-[SUBSYSTEM::LIBPOPT]
-CFLAGS = -I$(poptsrcdir)
-
-LIBPOPT_OBJ_FILES = $(addprefix $(poptsrcdir)/, findme.o popt.o poptconfig.o popthelp.o poptparse.o)
-
diff --git a/lib/popt/popt.h b/lib/popt/popt.h
index 7b94a98d03..c60ae29c19 100644
--- a/lib/popt/popt.h
+++ b/lib/popt/popt.h
@@ -458,7 +458,7 @@ void poptSetExecPath(poptContext con, const char * path, int allowAbsolute)
/** \ingroup popt
* Print detailed description of options.
* @param con context
- * @param fp ouput file handle
+ * @param fp output file handle
* @param flags (unused)
*/
void poptPrintHelp(poptContext con, FILE * fp, /*@unused@*/ int flags)
@@ -468,7 +468,7 @@ void poptPrintHelp(poptContext con, FILE * fp, /*@unused@*/ int flags)
/** \ingroup popt
* Print terse description of options.
* @param con context
- * @param fp ouput file handle
+ * @param fp output file handle
* @param flags (unused)
*/
void poptPrintUsage(poptContext con, FILE * fp, /*@unused@*/ int flags)
diff --git a/lib/popt/wscript b/lib/popt/wscript
new file mode 100644
index 0000000000..7fd15ed53a
--- /dev/null
+++ b/lib/popt/wscript
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+import Options
+
+def configure(conf):
+ conf.CHECK_HEADERS('float.h')
+
+ if conf.CHECK_BUNDLED_SYSTEM('popt', checkfunctions='poptGetContext', headers='popt.h'):
+ conf.define('USING_SYSTEM_POPT', 1)
+
+def build(bld):
+ if bld.CONFIG_SET('USING_SYSTEM_POPT'):
+ return
+
+ bld.SAMBA_LIBRARY('popt',
+ source='findme.c popt.c poptconfig.c popthelp.c poptparse.c',
+ cflags='-DDBL_EPSILON=__DBL_EPSILON__',
+ private_library=True)
diff --git a/lib/replace/Makefile b/lib/replace/Makefile
new file mode 100644
index 0000000000..364990153e
--- /dev/null
+++ b/lib/replace/Makefile
@@ -0,0 +1,63 @@
+# simple makefile wrapper to run waf
+
+WAF=WAF_MAKE=1 PATH=buildtools/bin:../../buildtools/bin:$$PATH waf
+
+all:
+ $(WAF) build
+
+install:
+ $(WAF) install
+
+uninstall:
+ $(WAF) uninstall
+
+test:
+ $(WAF) test $(TEST_OPTIONS)
+
+testenv:
+ $(WAF) test --testenv $(TEST_OPTIONS)
+
+quicktest:
+ $(WAF) test --quick $(TEST_OPTIONS)
+
+dist:
+ touch .tmplock
+ WAFLOCK=.tmplock $(WAF) dist
+
+distcheck:
+ touch .tmplock
+ WAFLOCK=.tmplock $(WAF) distcheck
+
+clean:
+ $(WAF) clean
+
+distclean:
+ $(WAF) distclean
+
+reconfigure: configure
+ $(WAF) reconfigure
+
+show_waf_options:
+ $(WAF) --help
+
+# some compatibility make targets
+everything: all
+
+testsuite: all
+
+check: test
+
+torture: all
+
+# this should do an install as well, once install is finished
+installcheck: test
+
+etags:
+ $(WAF) etags
+
+ctags:
+ $(WAF) ctags
+
+bin/%:: FORCE
+ $(WAF) --targets=`basename $@`
+FORCE:
diff --git a/lib/replace/README b/lib/replace/README
index 26383bc89a..bf4e67ff0c 100644
--- a/lib/replace/README
+++ b/lib/replace/README
@@ -33,6 +33,7 @@ opendir
readdir
telldir
seekdir
+clock_gettime
closedir
dlopen
dlclose
@@ -75,7 +76,6 @@ realpath
Types:
bool
socklen_t
-uint_t
uint{8,16,32,64}_t
int{8,16,32,64}_t
intptr_t
diff --git a/lib/replace/autoconf-2.60.m4 b/lib/replace/autoconf-2.60.m4
index 2d5dbc1661..b2694fde62 100644
--- a/lib/replace/autoconf-2.60.m4
+++ b/lib/replace/autoconf-2.60.m4
@@ -181,32 +181,56 @@ AC_DEFUN([AC_PROG_CC_C99],
# typically due to standards-conformance issues.
m4_ifndef([AC_USE_SYSTEM_EXTENSIONS],[
AC_DEFUN([AC_USE_SYSTEM_EXTENSIONS],
-[
- AC_BEFORE([$0], [AC_COMPILE_IFELSE])
- AC_BEFORE([$0], [AC_RUN_IFELSE])
+[AC_BEFORE([$0], [AC_COMPILE_IFELSE])dnl
+AC_BEFORE([$0], [AC_RUN_IFELSE])dnl
- AC_REQUIRE([AC_GNU_SOURCE])
- AC_REQUIRE([AC_AIX])
- AC_REQUIRE([AC_MINIX])
+ AC_CHECK_HEADER([minix/config.h], [MINIX=yes], [MINIX=])
+ if test "$MINIX" = yes; then
+ AC_DEFINE([_POSIX_SOURCE], [1],
+ [Define to 1 if you need to in order for `stat' and other
+ things to work.])
+ AC_DEFINE([_POSIX_1_SOURCE], [2],
+ [Define to 2 if the system does not provide POSIX.1 features
+ except with this defined.])
+ AC_DEFINE([_MINIX], [1],
+ [Define to 1 if on MINIX.])
+ fi
AH_VERBATIM([__EXTENSIONS__],
-[/* Enable extensions on Solaris. */
-#ifndef __EXTENSIONS__
-# undef __EXTENSIONS__
+[/* Enable extensions on AIX 3, Interix. */
+#ifndef _ALL_SOURCE
+# undef _ALL_SOURCE
#endif
+/* Enable GNU extensions on systems that have them. */
+#ifndef _GNU_SOURCE
+# undef _GNU_SOURCE
+#endif
+/* Enable threading extensions on Solaris. */
#ifndef _POSIX_PTHREAD_SEMANTICS
# undef _POSIX_PTHREAD_SEMANTICS
-#endif])
+#endif
+/* Enable extensions on HP NonStop. */
+#ifndef _TANDEM_SOURCE
+# undef _TANDEM_SOURCE
+#endif
+/* Enable general extensions on Solaris. */
+#ifndef __EXTENSIONS__
+# undef __EXTENSIONS__
+#endif
+])
AC_CACHE_CHECK([whether it is safe to define __EXTENSIONS__],
[ac_cv_safe_to_define___extensions__],
[AC_COMPILE_IFELSE(
- [AC_LANG_PROGRAM([
+ [AC_LANG_PROGRAM([[
# define __EXTENSIONS__ 1
- AC_INCLUDES_DEFAULT])],
+ ]AC_INCLUDES_DEFAULT])],
[ac_cv_safe_to_define___extensions__=yes],
[ac_cv_safe_to_define___extensions__=no])])
test $ac_cv_safe_to_define___extensions__ = yes &&
AC_DEFINE([__EXTENSIONS__])
+ AC_DEFINE([_ALL_SOURCE])
+ AC_DEFINE([_GNU_SOURCE])
AC_DEFINE([_POSIX_PTHREAD_SEMANTICS])
-])
+ AC_DEFINE([_TANDEM_SOURCE])
+])# AC_USE_SYSTEM_EXTENSIONS
])
diff --git a/lib/replace/autogen.sh b/lib/replace/autogen-autotools.sh
index d46a4279f3..d46a4279f3 100755
--- a/lib/replace/autogen.sh
+++ b/lib/replace/autogen-autotools.sh
diff --git a/lib/replace/configure b/lib/replace/configure
new file mode 100755
index 0000000000..6a9f875511
--- /dev/null
+++ b/lib/replace/configure
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+PREVPATH=`dirname $0`
+
+if [ -f $PREVPATH/../../buildtools/bin/waf ]; then
+ WAF=../../buildtools/bin/waf
+elif [ -f $PREVPATH/buildtools/bin/waf ]; then
+ WAF=./buildtools/bin/waf
+else
+ echo "replace: Unable to find waf"
+ exit 1
+fi
+
+# using JOBS=1 gives maximum compatibility with
+# systems like AIX which have broken threading in python
+JOBS=1
+export JOBS
+
+cd . || exit 1
+$WAF configure "$@" || exit 1
+cd $PREVPATH
diff --git a/lib/replace/crypt.c b/lib/replace/crypt.c
index 22341ce511..3a067bcc77 100644
--- a/lib/replace/crypt.c
+++ b/lib/replace/crypt.c
@@ -46,11 +46,11 @@
#ifndef long32
-#define long32 int32
+#define long32 int32_t
#endif
#ifndef long64
-#define long64 int64
+#define long64 int64_t
#endif
#ifndef ufc_long
@@ -665,7 +665,7 @@ char *ufc_crypt(const char *key,const char *salt)
* Setup key schedule
*/
clearmem(ktab, sizeof ktab);
- StrnCpy(ktab, key, 8);
+ strncpy(ktab, key, 8);
ufc_mk_keytab(ktab);
/*
diff --git a/lib/replace/getifaddrs.c b/lib/replace/getifaddrs.c
index 3a91bb40d2..84d790689d 100644
--- a/lib/replace/getifaddrs.c
+++ b/lib/replace/getifaddrs.c
@@ -282,7 +282,7 @@ int rep_getifaddrs(struct ifaddrs **ifap)
i = ifc.ifc_len;
while (i > 0) {
- uint_t inc;
+ unsigned int inc;
inc = ifr->ifr_addr.sa_len;
diff --git a/lib/replace/getpass.c b/lib/replace/getpass.c
index 0be618fc91..f95109f2b4 100644
--- a/lib/replace/getpass.c
+++ b/lib/replace/getpass.c
@@ -34,10 +34,6 @@ typedef int sig_atomic_t;
#define SIGCLD SIGCHLD
#endif
-#ifndef SIGNAL_CAST
-#define SIGNAL_CAST (RETSIGTYPE (*)(int))
-#endif
-
#ifdef SYSV_TERMIO
/* SYSTEM V TERMIO HANDLING */
@@ -99,7 +95,7 @@ static int tcsetattr(int fd, int flags, struct sgttyb *_t)
static struct termios t;
#endif /* SYSV_TERMIO */
-static void catch_signal(int signum,void (*handler)(int ))
+static void catch_signal(int signum, void (*handler)(int ))
{
#ifdef HAVE_SIGACTION
struct sigaction act;
@@ -131,7 +127,7 @@ static int in_fd = -1;
Signal function to tell us were ^C'ed.
****************************************************************/
-static void gotintr_sig(void)
+static void gotintr_sig(int signum)
{
gotintr = 1;
if (in_fd != -1)
@@ -148,7 +144,7 @@ char *rep_getpass(const char *prompt)
size_t nread;
/* Catch problematic signals */
- catch_signal(SIGINT, SIGNAL_CAST gotintr_sig);
+ catch_signal(SIGINT, gotintr_sig);
/* Try to write to and read from the terminal if we can.
If we can't open the terminal, use stderr and stdin. */
@@ -211,10 +207,10 @@ char *rep_getpass(const char *prompt)
fclose(in);
/* Catch problematic signals */
- catch_signal(SIGINT, SIGNAL_CAST SIG_DFL);
+ catch_signal(SIGINT, SIG_DFL);
if (gotintr) {
- printf("Interupted by signal.\n");
+ printf("Interrupted by signal.\n");
fflush(stdout);
exit(1);
}
diff --git a/lib/replace/hdr_replace.h b/lib/replace/hdr_replace.h
new file mode 100644
index 0000000000..6cfa50f809
--- /dev/null
+++ b/lib/replace/hdr_replace.h
@@ -0,0 +1,2 @@
+/* this is a replacement header for a missing system header */
+#include "replace.h"
diff --git a/lib/replace/libreplace.m4 b/lib/replace/libreplace.m4
index af8587938d..808d5d1c06 100644
--- a/lib/replace/libreplace.m4
+++ b/lib/replace/libreplace.m4
@@ -51,7 +51,6 @@ AC_SUBST(LIBREPLACEOBJ)
LIBREPLACEOBJ="${LIBREPLACEOBJ} $libreplacedir/snprintf.o"
-AC_TYPE_SIGNAL
AC_TYPE_UID_T
AC_TYPE_MODE_T
AC_TYPE_OFF_T
@@ -90,6 +89,8 @@ AC_INCLUDES_DEFAULT
#endif]
)
+AC_CHECK_HEADERS(linux/types.h)
+
AC_CACHE_CHECK([for working mmap],libreplace_cv_HAVE_MMAP,[
AC_TRY_RUN([#include "$libreplacedir/test/shared_mmap.c"],
libreplace_cv_HAVE_MMAP=yes,libreplace_cv_HAVE_MMAP=no,libreplace_cv_HAVE_MMAP=cross)])
@@ -105,11 +106,25 @@ AC_CHECK_HEADERS(sys/mount.h mntent.h)
AC_CHECK_HEADERS(stropts.h)
AC_CHECK_HEADERS(unix.h)
-AC_CHECK_FUNCS(seteuid setresuid setegid setresgid chroot bzero strerror)
+AC_CHECK_FUNCS(seteuid setresuid setegid setresgid chroot bzero strerror strerror_r)
AC_CHECK_FUNCS(vsyslog setlinebuf mktime ftruncate chsize rename)
AC_CHECK_FUNCS(waitpid wait4 strlcpy strlcat initgroups memmove strdup)
-AC_CHECK_FUNCS(pread pwrite strndup strcasestr strtok_r mkdtemp dup2)
+AC_CHECK_FUNCS(pread pwrite strndup strcasestr strtok_r mkdtemp dup2 dprintf vdprintf)
AC_CHECK_FUNCS(isatty chown lchown link readlink symlink realpath)
+AC_CHECK_FUNCS(fdatasync,,[
+ # if we didn't find it, look in librt (Solaris hides it there...)
+ AC_CHECK_LIB(rt, fdatasync,
+ [libreplace_cv_HAVE_FDATASYNC_IN_LIBRT=yes
+ AC_DEFINE(HAVE_FDATASYNC, 1, Define to 1 if there is support for fdatasync)])
+])
+AC_HAVE_DECL(fdatasync, [#include <unistd.h>])
+AC_CHECK_FUNCS(clock_gettime,libreplace_cv_have_clock_gettime=yes,[
+ AC_CHECK_LIB(rt, clock_gettime,
+ [libreplace_cv_HAVE_CLOCK_GETTIME_IN_LIBRT=yes
+ libreplace_cv_have_clock_gettime=yes
+ AC_DEFINE(HAVE_CLOCK_GETTIME, 1, Define to 1 if there is support for clock_gettime)])
+])
+AC_CHECK_FUNCS(get_current_dir_name)
AC_HAVE_DECL(setresuid, [#include <unistd.h>])
AC_HAVE_DECL(setresgid, [#include <unistd.h>])
AC_HAVE_DECL(errno, [#include <errno.h>])
@@ -228,6 +243,8 @@ AC_HAVE_DECL(environ, [#include <unistd.h>])
AC_CHECK_FUNCS(strnlen)
AC_CHECK_FUNCS(strtoull __strtoull strtouq strtoll __strtoll strtoq)
+AC_CHECK_FUNCS(memmem)
+
# this test disabled as we don't actually need __VA_ARGS__ yet
AC_TRY_CPP([
#define eprintf(...) fprintf(stderr, __VA_ARGS__)
@@ -280,6 +297,35 @@ m4_include(timegm.m4)
m4_include(repdir.m4)
m4_include(crypt.m4)
+if test x$libreplace_cv_have_clock_gettime = xyes ; then
+ SMB_CHECK_CLOCK_ID(CLOCK_MONOTONIC)
+ SMB_CHECK_CLOCK_ID(CLOCK_PROCESS_CPUTIME_ID)
+ SMB_CHECK_CLOCK_ID(CLOCK_REALTIME)
+fi
+
+AC_CACHE_CHECK([for struct timespec type],libreplace_cv_struct_timespec, [
+ AC_TRY_COMPILE([
+#include <sys/types.h>
+#if STDC_HEADERS
+#include <stdlib.h>
+#include <stddef.h>
+#endif
+#if TIME_WITH_SYS_TIME
+# include <sys/time.h>
+# include <time.h>
+#else
+# if HAVE_SYS_TIME_H
+# include <sys/time.h>
+# else
+# include <time.h>
+# endif
+#endif
+],[struct timespec ts;],
+ libreplace_cv_struct_timespec=yes,libreplace_cv_struct_timespec=no)])
+if test x"$libreplace_cv_struct_timespec" = x"yes"; then
+ AC_DEFINE(HAVE_STRUCT_TIMESPEC,1,[Whether we have struct timespec])
+fi
+
AC_CHECK_FUNCS([printf memset memcpy],,[AC_MSG_ERROR([Required function not found])])
echo "LIBREPLACE_BROKEN_CHECKS: END"
@@ -308,4 +354,35 @@ m4_include(libreplace_ld.m4)
m4_include(libreplace_network.m4)
m4_include(libreplace_macros.m4)
+
+dnl SMB_CHECK_CLOCK_ID(clockid)
+dnl Test whether the specified clock_gettime clock ID is available. If it
+dnl is, we define HAVE_clockid
+AC_DEFUN([SMB_CHECK_CLOCK_ID],
+[
+ AC_MSG_CHECKING(for $1)
+ AC_TRY_LINK([
+#if TIME_WITH_SYS_TIME
+# include <sys/time.h>
+# include <time.h>
+#else
+# if HAVE_SYS_TIME_H
+# include <sys/time.h>
+# else
+# include <time.h>
+# endif
+#endif
+ ],
+ [
+clockid_t clk = $1;
+ ],
+ [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_$1, 1,
+ [Whether the clock_gettime clock ID $1 is available])
+ ],
+ [
+ AC_MSG_RESULT(no)
+ ])
+])
m4_ifndef([AC_USE_SYSTEM_EXTENSIONS],[m4_include(autoconf-2.60.m4)])
diff --git a/lib/replace/libreplace_cc.m4 b/lib/replace/libreplace_cc.m4
index a26dee498e..48d9e84a32 100644
--- a/lib/replace/libreplace_cc.m4
+++ b/lib/replace/libreplace_cc.m4
@@ -115,7 +115,6 @@ AC_CHECK_SIZEOF(short)
AC_CHECK_SIZEOF(long)
AC_CHECK_SIZEOF(long long)
-AC_CHECK_TYPE(uint_t, unsigned int)
AC_CHECK_TYPE(int8_t, char)
AC_CHECK_TYPE(uint8_t, unsigned char)
AC_CHECK_TYPE(int16_t, short)
diff --git a/lib/replace/libreplace_macros.m4 b/lib/replace/libreplace_macros.m4
index 1856eacf66..46aedd3a83 100644
--- a/lib/replace/libreplace_macros.m4
+++ b/lib/replace/libreplace_macros.m4
@@ -293,9 +293,9 @@ AC_DEFUN(AC_VERIFY_C_PROTOTYPE,
$2
}
])],[
- AS_TR_SH([ac_cv_c_prototype_$1])=yes
+ eval AS_TR_SH([ac_cv_c_prototype_$1])=yes
],[
- AS_TR_SH([ac_cv_c_prototype_$1])=no
+ eval AS_TR_SH([ac_cv_c_prototype_$1])=no
])
)
AS_IF([test $AS_TR_SH([ac_cv_c_prototype_$1]) = yes],[$3],[$4])
diff --git a/lib/replace/libreplace_network.m4 b/lib/replace/libreplace_network.m4
index d8ed8a1d53..eadcc6bfc1 100644
--- a/lib/replace/libreplace_network.m4
+++ b/lib/replace/libreplace_network.m4
@@ -114,7 +114,7 @@ if test x"$libreplace_cv_HAVE_UNIXSOCKET" = x"yes"; then
AC_DEFINE(HAVE_UNIXSOCKET,1,[If we need to build with unixscoket support])
fi
-dnl The following test is roughl taken from the cvs sources.
+dnl The following test is roughly taken from the cvs sources.
dnl
dnl If we can't find connect, try looking in -lsocket, -lnsl, and -linet.
dnl The Irix 5 libc.so has connect and gethostbyname, but Irix 5 also has
@@ -226,6 +226,44 @@ ret = getnameinfo(&sa, sizeof(sa),
],
libreplace_cv_HAVE_GETADDRINFO=yes,libreplace_cv_HAVE_GETADDRINFO=no)])
+
+if test x"$libreplace_cv_HAVE_GETADDRINFO" = x"yes"; then
+ # getaddrinfo is broken on some AIX systems
+ # see bug 5910, use our replacements if we detect
+ # a broken system.
+ AC_TRY_RUN([
+ #include <stddef.h>
+ #include <sys/types.h>
+ #include <sys/socket.h>
+ #include <netdb.h>
+ int main(int argc, const char *argv[])
+ {
+ struct addrinfo hints = {0,};
+ struct addrinfo *ppres;
+ const char hostname1[] = "0.0.0.0";
+ const char hostname2[] = "127.0.0.1";
+ const char hostname3[] = "::";
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_flags =
+ AI_NUMERICHOST|AI_PASSIVE|AI_ADDRCONFIG;
+ /* Test for broken flag combination on AIX. */
+ if (getaddrinfo(hostname1, NULL, &hints, &ppres) == EAI_BADFLAGS) {
+ /* This fails on an IPv6-only box, but not with
+ the EAI_BADFLAGS error. */
+ return 1;
+ }
+ if (getaddrinfo(hostname2, NULL, &hints, &ppres) == 0) {
+ /* IPv4 lookup works - good enough. */
+ return 0;
+ }
+ /* Uh-oh, no IPv4. Are we IPv6-only ? */
+ return getaddrinfo(hostname3, NULL, &hints, &ppres) != 0 ? 1 : 0;
+ }],
+ libreplace_cv_HAVE_GETADDRINFO=yes,
+ libreplace_cv_HAVE_GETADDRINFO=no)
+fi
+
if test x"$libreplace_cv_HAVE_GETADDRINFO" = x"yes"; then
AC_DEFINE(HAVE_GETADDRINFO,1,[Whether the system has getaddrinfo])
AC_DEFINE(HAVE_GETNAMEINFO,1,[Whether the system has getnameinfo])
diff --git a/lib/replace/poll.c b/lib/replace/poll.c
new file mode 100644
index 0000000000..e41548dd39
--- /dev/null
+++ b/lib/replace/poll.c
@@ -0,0 +1,133 @@
+/*
+ Unix SMB/CIFS implementation.
+ poll.c - poll wrapper
+
+ This file is based on code from libssh (LGPLv2.1+ at the time it
+ was downloaded), thus the following copyrights:
+
+ Copyright (c) 2009-2010 by Andreas Schneider <mail@cynapses.org>
+ Copyright (c) 2003-2009 by Aris Adamantiadis
+ Copyright (c) 2009 Aleksandar Kanchev
+ Copyright (C) Volker Lendecke 2011
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "replace.h"
+#include "system/select.h"
+
+
+int rep_poll(struct pollfd *fds, nfds_t nfds, int timeout)
+{
+ fd_set rfds, wfds, efds;
+ struct timeval tv, *ptv;
+ int max_fd;
+ int rc;
+ nfds_t i;
+
+ if (fds == NULL) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ FD_ZERO(&rfds);
+ FD_ZERO(&wfds);
+ FD_ZERO(&efds);
+
+ rc = 0;
+ max_fd = 0;
+
+ /* compute fd_sets and find largest descriptor */
+ for (i = 0; i < nfds; i++) {
+ if ((fds[i].fd < 0) || (fds[i].fd >= FD_SETSIZE)) {
+ fds[i].revents = POLLNVAL;
+ continue;
+ }
+
+ if (fds[i].events & (POLLIN | POLLRDNORM)) {
+ FD_SET(fds[i].fd, &rfds);
+ }
+ if (fds[i].events & (POLLOUT | POLLWRNORM | POLLWRBAND)) {
+ FD_SET(fds[i].fd, &wfds);
+ }
+ if (fds[i].events & (POLLPRI | POLLRDBAND)) {
+ FD_SET(fds[i].fd, &efds);
+ }
+ if (fds[i].fd > max_fd &&
+ (fds[i].events & (POLLIN | POLLOUT | POLLPRI |
+ POLLRDNORM | POLLRDBAND |
+ POLLWRNORM | POLLWRBAND))) {
+ max_fd = fds[i].fd;
+ }
+ }
+
+ if (timeout < 0) {
+ ptv = NULL;
+ } else {
+ ptv = &tv;
+ if (timeout == 0) {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ } else {
+ tv.tv_sec = timeout / 1000;
+ tv.tv_usec = (timeout % 1000) * 1000;
+ }
+ }
+
+ rc = select(max_fd + 1, &rfds, &wfds, &efds, ptv);
+ if (rc < 0) {
+ return -1;
+ }
+
+ for (rc = 0, i = 0; i < nfds; i++) {
+ if ((fds[i].fd < 0) || (fds[i].fd >= FD_SETSIZE)) {
+ continue;
+ }
+
+ fds[i].revents = 0;
+
+ if (FD_ISSET(fds[i].fd, &rfds)) {
+ int err = errno;
+ int available = 0;
+ int ret;
+
+ /* support for POLLHUP */
+ ret = ioctl(fds[i].fd, FIONREAD, &available);
+ if ((ret == -1) || (available == 0)) {
+ fds[i].revents |= POLLHUP;
+ } else {
+ fds[i].revents |= fds[i].events
+ & (POLLIN | POLLRDNORM);
+ }
+
+ errno = err;
+ }
+ if (FD_ISSET(fds[i].fd, &wfds)) {
+ fds[i].revents |= fds[i].events
+ & (POLLOUT | POLLWRNORM | POLLWRBAND);
+ }
+ if (FD_ISSET(fds[i].fd, &efds)) {
+ fds[i].revents |= fds[i].events
+ & (POLLPRI | POLLRDBAND);
+ }
+ if (fds[i].revents & ~POLLHUP) {
+ rc++;
+ }
+ }
+ return rc;
+}
diff --git a/lib/replace/replace-test.h b/lib/replace/replace-test.h
new file mode 100644
index 0000000000..95fb7abe1c
--- /dev/null
+++ b/lib/replace/replace-test.h
@@ -0,0 +1,10 @@
+#ifndef __LIB_REPLACE_REPLACE_TEST_H__
+#define __LIB_REPLACE_REPLACE_TEST_H__
+
+bool torture_local_replace(struct torture_context *ctx);
+int libreplace_test_strptime(void);
+int test_readdir_os2_delete(void);
+int getifaddrs_test(void);
+
+#endif /* __LIB_REPLACE_REPLACE_TEST_H__ */
+
diff --git a/lib/replace/replace.c b/lib/replace/replace.c
index fc15717349..d9a96ff8ef 100644
--- a/lib/replace/replace.c
+++ b/lib/replace/replace.c
@@ -3,6 +3,7 @@
replacement routines for broken systems
Copyright (C) Andrew Tridgell 1992-1998
Copyright (C) Jelmer Vernooij 2005-2008
+ Copyright (C) Matthieu Patou 2010
** NOTE! The following LGPL license applies to the replace
** library. This does NOT imply that all of Samba is released
@@ -409,7 +410,7 @@ int rep_chroot(const char *dname)
int rep_mkstemp(char *template)
{
/* have a reasonable go at emulating it. Hope that
- the system mktemp() isn't completly hopeless */
+ the system mktemp() isn't completely hopeless */
char *p = mktemp(template);
if (!p)
return -1;
@@ -502,6 +503,7 @@ char *rep_strtok_r(char *s, const char *delim, char **save_ptr)
}
#endif
+
#ifndef HAVE_STRTOLL
long long int rep_strtoll(const char *str, char **endptr, int base)
{
@@ -515,7 +517,29 @@ long long int rep_strtoll(const char *str, char **endptr, int base)
# error "You need a strtoll function"
#endif
}
-#endif
+#else
+#ifdef HAVE_BSD_STRTOLL
+#ifdef HAVE_STRTOQ
+long long int rep_strtoll(const char *str, char **endptr, int base)
+{
+ long long int nb = strtoq(str, endptr, base);
+ /* In linux EINVAL is only returned if base is not ok */
+ if (errno == EINVAL) {
+ if (base == 0 || (base >1 && base <37)) {
+ /* Base was ok so it's because we were not
+ * able to make the convertion.
+ * Let's reset errno.
+ */
+ errno = 0;
+ }
+ }
+ return nb;
+}
+#else
+#error "You need the strtoq function"
+#endif /* HAVE_STRTOQ */
+#endif /* HAVE_BSD_STRTOLL */
+#endif /* HAVE_STRTOLL */
#ifndef HAVE_STRTOULL
@@ -531,7 +555,29 @@ unsigned long long int rep_strtoull(const char *str, char **endptr, int base)
# error "You need a strtoull function"
#endif
}
-#endif
+#else
+#ifdef HAVE_BSD_STRTOLL
+#ifdef HAVE_STRTOUQ
+unsigned long long int rep_strtoull(const char *str, char **endptr, int base)
+{
+ unsigned long long int nb = strtouq(str, endptr, base);
+ /* In linux EINVAL is only returned if base is not ok */
+ if (errno == EINVAL) {
+ if (base == 0 || (base >1 && base <37)) {
+ /* Base was ok so it's because we were not
+ * able to make the convertion.
+ * Let's reset errno.
+ */
+ errno = 0;
+ }
+ }
+ return nb;
+}
+#else
+#error "You need the strtouq function"
+#endif /* HAVE_STRTOUQ */
+#endif /* HAVE_BSD_STRTOLL */
+#endif /* HAVE_STRTOULL */
#ifndef HAVE_SETENV
int rep_setenv(const char *name, const char *value, int overwrite)
@@ -681,3 +727,104 @@ char *rep_realpath(const char *path, char *resolved_path)
return NULL;
}
#endif
+
+
+#ifndef HAVE_MEMMEM
+void *rep_memmem(const void *haystack, size_t haystacklen,
+ const void *needle, size_t needlelen)
+{
+ if (needlelen == 0) {
+ return discard_const(haystack);
+ }
+ while (haystacklen >= needlelen) {
+ char *p = (char *)memchr(haystack, *(const char *)needle,
+ haystacklen-(needlelen-1));
+ if (!p) return NULL;
+ if (memcmp(p, needle, needlelen) == 0) {
+ return p;
+ }
+ haystack = p+1;
+ haystacklen -= (p - (const char *)haystack) + 1;
+ }
+ return NULL;
+}
+#endif
+
+#ifndef HAVE_VDPRINTF
+int rep_vdprintf(int fd, const char *format, va_list ap)
+{
+ char *s = NULL;
+ int ret;
+
+ vasprintf(&s, format, ap);
+ if (s == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+ ret = write(fd, s, strlen(s));
+ free(s);
+ return ret;
+}
+#endif
+
+#ifndef HAVE_DPRINTF
+int rep_dprintf(int fd, const char *format, ...)
+{
+ int ret;
+ va_list ap;
+
+ va_start(ap, format);
+ ret = vdprintf(fd, format, ap);
+ va_end(ap);
+
+ return ret;
+}
+#endif
+
+#ifndef HAVE_GET_CURRENT_DIR_NAME
+char *rep_get_current_dir_name(void)
+{
+ char buf[PATH_MAX+1];
+ char *p;
+ p = getcwd(buf, sizeof(buf));
+ if (p == NULL) {
+ return NULL;
+ }
+ return strdup(p);
+}
+#endif
+
+#if !defined(HAVE_STRERROR_R) || !defined(STRERROR_R_PROTO_COMPATIBLE)
+int rep_strerror_r(int errnum, char *buf, size_t buflen)
+{
+ char *s = strerror(errnum);
+ if (strlen(s)+1 > buflen) {
+ errno = ERANGE;
+ return -1;
+ }
+ strncpy(buf, s, buflen);
+ return 0;
+}
+#endif
+
+#ifndef HAVE_CLOCK_GETTIME
+int rep_clock_gettime(clockid_t clk_id, struct timespec *tp)
+{
+ struct timeval tval;
+ switch (clk_id) {
+ case 0: /* CLOCK_REALTIME :*/
+#ifdef HAVE_GETTIMEOFDAY_TZ
+ gettimeofday(&tval,NULL);
+#else
+ gettimeofday(&tval);
+#endif
+ tp->tv_sec = tval.tv_sec;
+ tp->tv_nsec = tval.tv_usec * 1000;
+ break;
+ default:
+ errno = EINVAL;
+ return -1;
+ }
+ return 0;
+}
+#endif
diff --git a/lib/replace/replace.h b/lib/replace/replace.h
index 6424d10c0f..c081f23c26 100644
--- a/lib/replace/replace.h
+++ b/lib/replace/replace.h
@@ -81,13 +81,13 @@
#ifndef PRIi8
# define PRIi8 "i"
#endif
-#ifndef PRIi8
+#ifndef PRIi16
# define PRIi16 "i"
#endif
-#ifndef PRIi8
+#ifndef PRIi32
# define PRIi32 "i"
#endif
-#ifndef PRIi8
+#ifndef PRIi64
# define PRIi64 __PRI64_PREFIX "i"
#endif
@@ -121,6 +121,13 @@
#include <stddef.h>
#endif
+#ifdef HAVE_LINUX_TYPES_H
+/*
+ * This is needed as some broken header files require this to be included early
+ */
+#include <linux/types.h>
+#endif
+
#ifndef HAVE_STRERROR
extern char *sys_errlist[];
#define strerror(i) sys_errlist[i]
@@ -140,6 +147,12 @@ char *rep_strdup(const char *s);
void *rep_memmove(void *dest,const void *src,int size);
#endif
+#ifndef HAVE_MEMMEM
+#define memmem rep_memmem
+void *rep_memmem(const void *haystack, size_t haystacklen,
+ const void *needle, size_t needlelen);
+#endif
+
#ifndef HAVE_MKTIME
#define mktime rep_mktime
/* prototype is in "system/time.h" */
@@ -277,14 +290,26 @@ char *rep_strcasestr(const char *haystack, const char *needle);
char *rep_strtok_r(char *s, const char *delim, char **save_ptr);
#endif
+
+
#ifndef HAVE_STRTOLL
#define strtoll rep_strtoll
long long int rep_strtoll(const char *str, char **endptr, int base);
+#else
+#ifdef HAVE_BSD_STRTOLL
+#define strtoll rep_strtoll
+long long int rep_strtoll(const char *str, char **endptr, int base);
+#endif
#endif
#ifndef HAVE_STRTOULL
#define strtoull rep_strtoull
unsigned long long int rep_strtoull(const char *str, char **endptr, int base);
+#else
+#ifdef HAVE_BSD_STRTOLL /* yes, it's not HAVE_BSD_STRTOULL */
+#define strtoull rep_strtoull
+unsigned long long int rep_strtoull(const char *str, char **endptr, int base);
+#endif
#endif
#ifndef HAVE_FTRUNCATE
@@ -330,6 +355,16 @@ int rep_dlclose(void *handle);
/* prototype is in system/network.h */
#endif
+#ifndef HAVE_VDPRINTF
+#define vdprintf rep_vdprintf
+int rep_vdprintf(int fd, const char *format, va_list ap);
+#endif
+
+#ifndef HAVE_DPRINTF
+#define dprintf rep_dprintf
+int rep_dprintf(int fd, const char *format, ...);
+#endif
+
#ifndef PRINTF_ATTRIBUTE
#if (__GNUC__ >= 3) && (__GNUC_MINOR__ >= 1 )
/** Use gcc attribute to check printf fns. a1 is the 1-based index of
@@ -491,6 +526,21 @@ ssize_t rep_pwrite(int __fd, const void *__buf, size_t __nbytes, off_t __offset)
/* prototype is in "system/network.h" */
#endif
+#ifndef HAVE_GET_CURRENT_DIR_NAME
+#define get_current_dir_name rep_get_current_dir_name
+char *rep_get_current_dir_name(void);
+#endif
+
+#if !defined(HAVE_STRERROR_R) || !defined(STRERROR_R_PROTO_COMPATIBLE)
+#undef strerror_r
+#define strerror_r rep_strerror_r
+int rep_strerror_r(int errnum, char *buf, size_t buflen);
+#endif
+
+#if !defined(HAVE_CLOCK_GETTIME)
+#define clock_gettime rep_clock_gettime
+#endif
+
#ifdef HAVE_LIMITS_H
#include <limits.h>
#endif
@@ -723,4 +773,33 @@ char *ufc_crypt(const char *key, const char *salt);
#endif
#endif
+#ifndef HAVE_FDATASYNC
+#define fdatasync(fd) fsync(fd)
+#elif !defined(HAVE_DECL_FDATASYNC)
+int fdatasync(int );
+#endif
+
+/* these are used to mark symbols as local to a shared lib, or
+ * publicly available via the shared lib API */
+#ifndef _PUBLIC_
+#ifdef HAVE_VISIBILITY_ATTR
+#define _PUBLIC_ __attribute__((visibility("default")))
+#else
+#define _PUBLIC_
+#endif
+#endif
+
+#ifndef _PRIVATE_
+#ifdef HAVE_VISIBILITY_ATTR
+# define _PRIVATE_ __attribute__((visibility("hidden")))
+#else
+# define _PRIVATE_
+#endif
+#endif
+
+#ifndef HAVE_POLL
+#define poll rep_poll
+/* prototype is in "system/network.h" */
+#endif
+
#endif /* _LIBREPLACE_REPLACE_H */
diff --git a/lib/replace/system/config.m4 b/lib/replace/system/config.m4
index 39c2f58283..b7cdf1414a 100644
--- a/lib/replace/system/config.m4
+++ b/lib/replace/system/config.m4
@@ -1,16 +1,30 @@
# filesys
AC_HEADER_DIRENT
AC_CHECK_HEADERS(fcntl.h sys/fcntl.h sys/resource.h sys/ioctl.h sys/mode.h sys/filio.h sys/fs/s5param.h sys/filsys.h)
-AC_CHECK_HEADERS(sys/acl.h acl/libacl.h)
+AC_CHECK_HEADERS(sys/acl.h acl/libacl.h sys/file.h)
# select
AC_CHECK_HEADERS(sys/select.h)
+# poll
+AC_CHECK_HEADERS(poll.h)
+AC_CHECK_FUNCS(poll,[],[LIBREPLACEOBJ="${LIBREPLACEOBJ} $libreplacedir/poll.o"])
+
# time
AC_CHECK_HEADERS(sys/time.h utime.h)
AC_HEADER_TIME
AC_CHECK_FUNCS(utime utimes)
+AC_CACHE_CHECK([if gettimeofday takes TZ argument],libreplace_cv_HAVE_GETTIMEOFDAY_TZ,[
+AC_TRY_RUN([
+#include <sys/time.h>
+#include <unistd.h>
+main() { struct timeval tv; exit(gettimeofday(&tv, NULL));}],
+ libreplace_cv_HAVE_GETTIMEOFDAY_TZ=yes,libreplace_cv_HAVE_GETTIMEOFDAY_TZ=no,libreplace_cv_HAVE_GETTIMEOFDAY_TZ=yes)])
+if test x"$libreplace_cv_HAVE_GETTIMEOFDAY_TZ" = x"yes"; then
+ AC_DEFINE(HAVE_GETTIMEOFDAY_TZ,1,[Whether gettimeofday() is available])
+fi
+
# wait
AC_HEADER_SYS_WAIT
diff --git a/lib/replace/system/filesys.h b/lib/replace/system/filesys.h
index 22e3d23f3e..6cf2dd287a 100644
--- a/lib/replace/system/filesys.h
+++ b/lib/replace/system/filesys.h
@@ -77,7 +77,9 @@
#include <sys/filio.h>
#endif
+#ifdef HAVE_SYS_FILE_H
#include <sys/file.h>
+#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
diff --git a/lib/replace/system/network.h b/lib/replace/system/network.h
index 93d533c9b9..a4e6a7e31a 100644
--- a/lib/replace/system/network.h
+++ b/lib/replace/system/network.h
@@ -307,12 +307,12 @@ typedef unsigned short int sa_family_t;
#define sockaddr_storage sockaddr_in6
#define ss_family sin6_family
#define HAVE_SS_FAMILY 1
-#else
+#else /*HAVE_STRUCT_SOCKADDR_IN6*/
#define sockaddr_storage sockaddr_in
#define ss_family sin_family
#define HAVE_SS_FAMILY 1
-#endif
-#endif
+#endif /*HAVE_STRUCT_SOCKADDR_IN6*/
+#endif /*HAVE_STRUCT_SOCKADDR_STORAGE*/
#ifndef HAVE_SS_FAMILY
#ifdef HAVE___SS_FAMILY
@@ -331,8 +331,6 @@ typedef unsigned short int sa_family_t;
* which might return 512 or bigger
*/
# define IOV_MAX 512
-# else
-# error IOV_MAX and UIO_MAXIOV undefined
# endif
# endif
#endif
diff --git a/lib/replace/system/passwd.h b/lib/replace/system/passwd.h
index b41608c551..aaea9c8344 100644
--- a/lib/replace/system/passwd.h
+++ b/lib/replace/system/passwd.h
@@ -101,10 +101,12 @@ char *rep_getpass(const char *prompt);
#endif
#ifdef NSS_WRAPPER
+#ifndef NSS_WRAPPER_DISABLE
#ifndef NSS_WRAPPER_NOT_REPLACE
#define NSS_WRAPPER_REPLACE
-#endif
+#endif /* NSS_WRAPPER_NOT_REPLACE */
#include "../nss_wrapper/nss_wrapper.h"
-#endif
+#endif /* NSS_WRAPPER_DISABLE */
+#endif /* NSS_WRAPPER */
#endif
diff --git a/lib/replace/system/readline.h b/lib/replace/system/readline.h
index ba34dc6a61..e6b8fb9129 100644
--- a/lib/replace/system/readline.h
+++ b/lib/replace/system/readline.h
@@ -43,7 +43,13 @@
#endif
#ifdef HAVE_NEW_LIBREADLINE
-# define RL_COMPLETION_CAST (rl_completion_func_t *)
+#ifdef HAVE_CPPFUNCTION
+# define RL_COMPLETION_CAST (CPPFunction *)
+#elif HAVE_RL_COMPLETION_T
+# define RL_COMPLETION_CAST (rl_completion_t *)
+#else
+# define RL_COMPLETION_CAST
+#endif
#else
/* This type is missing from libreadline<4.0 (approximately) */
# define RL_COMPLETION_CAST
diff --git a/lib/replace/system/select.h b/lib/replace/system/select.h
index da18de0cfc..11c5390d90 100644
--- a/lib/replace/system/select.h
+++ b/lib/replace/system/select.h
@@ -38,4 +38,40 @@
#define SELECT_CAST
#endif
+#ifdef HAVE_POLL
+
+#include <poll.h>
+
+#else
+
+/* Type used for the number of file descriptors. */
+typedef unsigned long int nfds_t;
+
+/* Data structure describing a polling request. */
+struct pollfd
+{
+ int fd; /* File descriptor to poll. */
+ short int events; /* Types of events poller cares about. */
+ short int revents; /* Types of events that actually occurred. */
+};
+
+/* Event types that can be polled for. These bits may be set in `events'
+ to indicate the interesting event types; they will appear in `revents'
+ to indicate the status of the file descriptor. */
+#define POLLIN 0x001 /* There is data to read. */
+#define POLLPRI 0x002 /* There is urgent data to read. */
+#define POLLOUT 0x004 /* Writing now will not block. */
+#define POLLRDNORM 0x040 /* Normal data may be read. */
+#define POLLRDBAND 0x080 /* Priority data may be read. */
+#define POLLWRNORM 0x100 /* Writing now will not block. */
+#define POLLWRBAND 0x200 /* Priority data may be written. */
+#define POLLERR 0x008 /* Error condition. */
+#define POLLHUP 0x010 /* Hung up. */
+#define POLLNVAL 0x020 /* Invalid polling request. */
+
+/* define is in "replace.h" */
+int rep_poll(struct pollfd *fds, nfds_t nfds, int timeout);
+
+#endif
+
#endif
diff --git a/lib/replace/system/time.h b/lib/replace/system/time.h
index 4abf295d1a..b6d2609289 100644
--- a/lib/replace/system/time.h
+++ b/lib/replace/system/time.h
@@ -46,6 +46,13 @@ struct utimbuf {
};
#endif
+#ifndef HAVE_STRUCT_TIMESPEC
+struct timespec {
+ time_t tv_sec; /* Seconds. */
+ long tv_nsec; /* Nanoseconds. */
+};
+#endif
+
#ifndef HAVE_MKTIME
/* define is in "replace.h" */
time_t rep_mktime(struct tm *t);
@@ -66,4 +73,19 @@ int rep_utime(const char *filename, const struct utimbuf *buf);
int rep_utimes(const char *filename, const struct timeval tv[2]);
#endif
+#ifndef HAVE_CLOCK_GETTIME
+/* CLOCK_REALTIME is required by POSIX */
+#define CLOCK_REALTIME 0
+typedef int clockid_t;
+int rep_clock_gettime(clockid_t clk_id, struct timespec *tp);
+#endif
+/* make sure we have a best effort CUSTOM_CLOCK_MONOTONIC we can rely on */
+#if defined(CLOCK_MONOTONIC)
+#define CUSTOM_CLOCK_MONOTONIC CLOCK_MONOTONIC
+#elif defined(CLOCK_HIGHRES)
+#define CUSTOM_CLOCK_MONOTONIC CLOCK_HIGHRES
+#else
+#define CUSTOM_CLOCK_MONOTONIC CLOCK_REALTIME
+#endif
+
#endif
diff --git a/lib/replace/system/wait.h b/lib/replace/system/wait.h
index 79583ad2ab..41db1806a5 100644
--- a/lib/replace/system/wait.h
+++ b/lib/replace/system/wait.h
@@ -36,10 +36,6 @@
#define SIGCLD SIGCHLD
#endif
-#ifndef SIGNAL_CAST
-#define SIGNAL_CAST (RETSIGTYPE (*)(int))
-#endif
-
#ifdef HAVE_SETJMP_H
#include <setjmp.h>
#endif
diff --git a/lib/replace/system/wscript_configure b/lib/replace/system/wscript_configure
new file mode 100644
index 0000000000..2035474b32
--- /dev/null
+++ b/lib/replace/system/wscript_configure
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+conf.CHECK_HEADERS('sys/capability.h')
+conf.CHECK_FUNCS('getpwnam_r getpwuid_r getpwent_r')
+
+# solaris varients of getXXent_r
+conf.CHECK_C_PROTOTYPE('getpwent_r',
+ 'struct passwd *getpwent_r(struct passwd *src, char *buf, int buflen)',
+ define='SOLARIS_GETPWENT_R', headers='pwd.h')
+conf.CHECK_C_PROTOTYPE('getgrent_r',
+ 'struct group *getgrent_r(struct group *src, char *buf, int buflen)',
+ define='SOLARIS_GETGRENT_R', headers='grp.h')
+
+# the irix varients
+conf.CHECK_C_PROTOTYPE('getpwent_r',
+ 'struct passwd *getpwent_r(struct passwd *src, char *buf, size_t buflen)',
+ define='SOLARIS_GETPWENT_R', headers='pwd.h')
+conf.CHECK_C_PROTOTYPE('getgrent_r',
+ 'struct group *getgrent_r(struct group *src, char *buf, size_t buflen)',
+ define='SOLARIS_GETGRENT_R', headers='grp.h')
+
+conf.CHECK_FUNCS('getgrouplist')
+conf.CHECK_HEADERS('ctype.h locale.h langinfo.h')
+conf.CHECK_HEADERS('fnmatch.h locale.h langinfo.h')
+conf.CHECK_HEADERS('sys/ipc.h sys/mman.h sys/shm.h')
+conf.CHECK_HEADERS('termios.h termio.h sys/termio.h')
diff --git a/lib/replace/test/os2_delete.c b/lib/replace/test/os2_delete.c
index 8b52837018..9d760bfdc2 100644
--- a/lib/replace/test/os2_delete.c
+++ b/lib/replace/test/os2_delete.c
@@ -46,7 +46,7 @@ static void create_files(void)
for (i=0;i<NUM_FILES;i++) {
char fname[40];
int fd;
- sprintf(fname, TESTDIR "/test%u.txt", i);
+ snprintf(fname, sizeof(fname), TESTDIR "/test%u.txt", i);
fd = open(fname, O_CREAT|O_RDWR, 0600);
if (fd < 0) {
FAILED("open");
@@ -79,7 +79,7 @@ static int os2_delete(DIR *d)
/* delete the first few */
for (j=0; j<MIN(i, DELETE_SIZE); j++) {
char fname[40];
- sprintf(fname, TESTDIR "/%s", names[j]);
+ snprintf(fname, sizeof(fname), TESTDIR "/%s", names[j]);
unlink(fname) == 0 || FAILED("unlink");
}
diff --git a/lib/replace/test/snprintf.c b/lib/replace/test/snprintf.c
new file mode 100644
index 0000000000..d06630bcc9
--- /dev/null
+++ b/lib/replace/test/snprintf.c
@@ -0,0 +1,29 @@
+void foo(const char *format, ...)
+{
+ va_list ap;
+ int len;
+ char buf[20];
+ long long l = 1234567890;
+ l *= 100;
+
+ va_start(ap, format);
+ len = vsnprintf(buf, 0, format, ap);
+ va_end(ap);
+ if (len != 5) exit(1);
+
+ va_start(ap, format);
+ len = vsnprintf(0, 0, format, ap);
+ va_end(ap);
+ if (len != 5) exit(2);
+
+ if (snprintf(buf, 3, "hello") != 5 || strcmp(buf, "he") != 0) exit(3);
+
+ if (snprintf(buf, 20, "%lld", l) != 12 || strcmp(buf, "123456789000") != 0) exit(4);
+ if (snprintf(buf, 20, "%zu", 123456789) != 9 || strcmp(buf, "123456789") != 0) exit(5);
+ if (snprintf(buf, 20, "%2\$d %1\$d", 3, 4) != 3 || strcmp(buf, "4 3") != 0) exit(6);
+ if (snprintf(buf, 20, "%s", 0) < 3) exit(7);
+
+ printf("1");
+ exit(0);
+}
+main() { foo("hello"); }
diff --git a/lib/replace/test/testsuite.c b/lib/replace/test/testsuite.c
index 7929f11add..0e455f2317 100644
--- a/lib/replace/test/testsuite.c
+++ b/lib/replace/test/testsuite.c
@@ -751,7 +751,6 @@ FIXME:
Types:
bool
socklen_t
-uint_t
uint{8,16,32,64}_t
int{8,16,32,64}_t
intptr_t
@@ -772,7 +771,7 @@ static int test_FUNCTION(void)
{
printf("test: FUNCTION\n");
if (strcmp(__FUNCTION__, "test_FUNCTION") != 0) {
- printf("failure: FAILURE [\nFAILURE invalid\n]\n");
+ printf("failure: FUNCTION [\nFUNCTION invalid\n]\n");
return false;
}
printf("success: FUNCTION\n");
@@ -1015,6 +1014,44 @@ static int test_utimes(void)
return true;
}
+static int test_memmem(void)
+{
+ char *s;
+
+ printf("test: memmem\n");
+
+ s = (char *)memmem("foo", 3, "fo", 2);
+ if (strcmp(s, "foo") != 0) {
+ printf(__location__ ": Failed memmem\n");
+ return false;
+ }
+
+ s = (char *)memmem("foo", 3, "", 0);
+ /* it is allowable for this to return NULL (as happens on
+ FreeBSD) */
+ if (s && strcmp(s, "foo") != 0) {
+ printf(__location__ ": Failed memmem\n");
+ return false;
+ }
+
+ s = (char *)memmem("foo", 4, "o", 1);
+ if (strcmp(s, "oo") != 0) {
+ printf(__location__ ": Failed memmem\n");
+ return false;
+ }
+
+ s = (char *)memmem("foobarfodx", 11, "fod", 3);
+ if (strcmp(s, "fodx") != 0) {
+ printf(__location__ ": Failed memmem\n");
+ return false;
+ }
+
+ printf("success: memmem\n");
+
+ return true;
+}
+
+
struct torture_context;
bool torture_local_replace(struct torture_context *ctx)
{
@@ -1065,6 +1102,7 @@ bool torture_local_replace(struct torture_context *ctx)
ret &= test_getifaddrs();
ret &= test_utime();
ret &= test_utimes();
+ ret &= test_memmem();
return ret;
}
diff --git a/lib/replace/wscript b/lib/replace/wscript
new file mode 100644
index 0000000000..c24d6e76b3
--- /dev/null
+++ b/lib/replace/wscript
@@ -0,0 +1,465 @@
+#!/usr/bin/env python
+
+APPNAME = 'libreplace'
+VERSION = '1.2.1'
+
+blddir = 'bin'
+
+import sys, os, Utils
+
+# find the buildtools directory
+srcdir = '.'
+while not os.path.exists(srcdir+'/buildtools') and len(srcdir.split('/')) < 5:
+ srcdir = '../' + srcdir
+sys.path.insert(0, srcdir + '/buildtools/wafsamba')
+
+import wafsamba, samba_dist
+import Options, os, preproc
+
+samba_dist.DIST_DIRS('lib/replace buildtools:buildtools')
+
+def set_options(opt):
+ opt.BUILTIN_DEFAULT('NONE')
+ opt.PRIVATE_EXTENSION_DEFAULT('')
+ opt.RECURSE('buildtools/wafsamba')
+
+@wafsamba.runonce
+def configure(conf):
+ conf.RECURSE('buildtools/wafsamba')
+
+ conf.env.standalone_replace = conf.IN_LAUNCH_DIR()
+
+ conf.DEFINE('LIBREPLACE_NETWORK_CHECKS', 1)
+
+ # on Tru64 certain features are only available with _OSF_SOURCE set to 1
+ # and _XOPEN_SOURCE set to 600
+ if conf.env['SYSTEM_UNAME_SYSNAME'] == 'OSF1':
+ conf.DEFINE('_OSF_SOURCE', 1, add_to_cflags=True)
+ conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True)
+
+ conf.CHECK_HEADERS('linux/types.h crypt.h locale.h acl/libacl.h compat.h')
+ conf.CHECK_HEADERS('acl/libacl.h attr/xattr.h compat.h ctype.h dustat.h')
+ conf.CHECK_HEADERS('fcntl.h fnmatch.h glob.h history.h krb5.h langinfo.h')
+ conf.CHECK_HEADERS('libaio.h locale.h ndir.h pwd.h')
+ conf.CHECK_HEADERS('shadow.h sys/acl.h')
+ conf.CHECK_HEADERS('sys/attributes.h sys/capability.h sys/dir.h sys/epoll.h')
+ conf.CHECK_HEADERS('sys/fcntl.h sys/filio.h sys/filsys.h sys/fs/s5param.h sys/fs/vx/quota.h')
+ conf.CHECK_HEADERS('sys/id.h sys/ioctl.h sys/ipc.h sys/mman.h sys/mode.h sys/ndir.h sys/priv.h')
+ conf.CHECK_HEADERS('sys/resource.h sys/security.h sys/shm.h sys/statfs.h sys/statvfs.h sys/termio.h')
+ conf.CHECK_HEADERS('sys/vfs.h sys/xattr.h termio.h termios.h sys/file.h')
+ conf.CHECK_HEADERS('sys/wait.h sys/stat.h malloc.h grp.h')
+ conf.CHECK_HEADERS('sys/select.h setjmp.h utime.h sys/syslog.h syslog.h')
+ conf.CHECK_HEADERS('stdarg.h vararg.h sys/mount.h mntent.h')
+ conf.CHECK_HEADERS('stropts.h unix.h string.h strings.h sys/param.h limits.h')
+ conf.CHECK_HEADERS('''sys/socket.h netinet/in.h netdb.h arpa/inet.h netinet/in_systm.h
+ netinet/ip.h netinet/tcp.h netinet/in_ip.h
+ sys/sockio.h sys/un.h''', together=True)
+ conf.CHECK_HEADERS('sys/uio.h ifaddrs.h direct.h dirent.h')
+ conf.CHECK_HEADERS('windows.h winsock2.h ws2tcpip.h')
+ conf.CHECK_HEADERS('libintl.h errno.h')
+ conf.CHECK_HEADERS('gcrypt.h getopt.h iconv.h')
+ conf.CHECK_HEADERS('sys/inotify.h memory.h nss.h sasl/sasl.h')
+ conf.CHECK_HEADERS('security/pam_appl.h sys/inotify.h zlib.h asm/unistd.h')
+ conf.CHECK_HEADERS('aio.h sys/unistd.h rpc/rpc.h rpc/nettype.h alloca.h float.h')
+
+ conf.CHECK_HEADERS('rpcsvc/nis.h rpcsvc/ypclnt.h sys/prctl.h sys/sysctl.h')
+ conf.CHECK_HEADERS('sys/fileio.h sys/filesys.h sys/dustat.h sys/sysmacros.h')
+ conf.CHECK_HEADERS('xfs/libxfs.h netgroup.h rpcsvc/yp_prot.h')
+ conf.CHECK_HEADERS('valgrind.h valgrind/valgrind.h valgrind/memcheck.h')
+ conf.CHECK_HEADERS('nss_common.h nsswitch.h ns_api.h')
+ conf.CHECK_HEADERS('sys/extattr.h sys/ea.h sys/proplist.h sys/cdefs.h')
+ conf.CHECK_HEADERS('utmp.h utmpx.h lastlog.h')
+ conf.CHECK_HEADERS('syscall.h sys/syscall.h inttypes.h')
+
+ conf.CHECK_TYPES('"long long" intptr_t uintptr_t ptrdiff_t comparison_fn_t')
+ conf.CHECK_TYPE('_Bool', define='HAVE__Bool')
+ conf.CHECK_TYPE('bool', define='HAVE_BOOL')
+
+ conf.CHECK_TYPE('int8_t', 'char')
+ conf.CHECK_TYPE('uint8_t', 'unsigned char')
+ conf.CHECK_TYPE('int16_t', 'short')
+ conf.CHECK_TYPE('uint16_t', 'unsigned short')
+ conf.CHECK_TYPE('int32_t', 'int')
+ conf.CHECK_TYPE('uint32_t', 'unsigned')
+ conf.CHECK_TYPE('int64_t', 'long long')
+ conf.CHECK_TYPE('uint64_t', 'unsigned long long')
+ conf.CHECK_TYPE('size_t', 'unsigned int')
+ conf.CHECK_TYPE('ssize_t', 'int')
+ conf.CHECK_TYPE('ino_t', 'unsigned')
+ conf.CHECK_TYPE('loff_t', 'off_t')
+ conf.CHECK_TYPE('offset_t', 'loff_t')
+ conf.CHECK_TYPE('volatile int', define='HAVE_VOLATILE')
+ conf.CHECK_TYPE('uint_t', 'unsigned int')
+
+ conf.CHECK_SIZEOF('bool char int "long long" long short size_t ssize_t')
+ conf.CHECK_SIZEOF('int8_t uint8_t int16_t uint16_t int32_t uint32_t int64_t uint64_t')
+ conf.CHECK_SIZEOF('void*', define='SIZEOF_VOID_P')
+ conf.CHECK_SIZEOF('off_t dev_t ino_t time_t')
+
+ conf.CHECK_TYPES('socklen_t', headers='sys/socket.h')
+ conf.CHECK_TYPE_IN('struct ifaddrs', 'ifaddrs.h')
+ conf.CHECK_TYPE_IN('struct addrinfo', 'netdb.h')
+ conf.CHECK_TYPE_IN('struct sockaddr', 'sys/socket.h')
+ conf.CHECK_CODE('struct sockaddr_in6 x', define='HAVE_STRUCT_SOCKADDR_IN6',
+ headers='sys/socket.h netdb.h netinet/in.h')
+ conf.CHECK_TYPE_IN('struct sockaddr_storage', 'sys/socket.h')
+ conf.CHECK_TYPE_IN('sa_family_t', 'sys/socket.h')
+
+ conf.CHECK_TYPE_IN('sig_atomic_t', 'signal.h', define='HAVE_SIG_ATOMIC_T_TYPE')
+
+ conf.CHECK_FUNCS_IN('''inet_ntoa inet_aton inet_ntop inet_pton connect gethostbyname
+ getaddrinfo getnameinfo freeaddrinfo gai_strerror socketpair''',
+ 'socket nsl', checklibc=True,
+ headers='sys/socket.h netinet/in.h arpa/inet.h netdb.h')
+
+ # Some old Linux systems have broken header files and
+ # miss the IPV6_V6ONLY define in netinet/in.h,
+ # but have it in linux/in6.h.
+ # We can't include both files so we just check if the value
+ # if defined and do the replacement in system/network.h
+ if not conf.CHECK_VARIABLE('IPV6_V6ONLY',
+ headers='sys/socket.h netdb.h netinet/in.h'):
+ conf.CHECK_CODE('''
+ #include <linux/in6.h>
+ #if (IPV6_V6ONLY != 26)
+ #error no IPV6_V6ONLY support on linux
+ #endif
+ int main(void) { return IPV6_V6ONLY; }
+ ''',
+ define='HAVE_LINUX_IPV6_V6ONLY_26',
+ addmain=False,
+ msg='Checking for IPV6_V6ONLY in linux/in6.h',
+ local_include=False)
+
+ conf.CHECK_CODE('''
+ struct sockaddr_storage sa_store;
+ struct addrinfo *ai = NULL;
+ struct in6_addr in6addr;
+ int idx = if_nametoindex("iface1");
+ int s = socket(AF_INET6, SOCK_STREAM, 0);
+ int ret = getaddrinfo(NULL, NULL, NULL, &ai);
+ if (ret != 0) {
+ const char *es = gai_strerror(ret);
+ }
+ freeaddrinfo(ai);
+ {
+ int val = 1;
+ #ifdef HAVE_LINUX_IPV6_V6ONLY_26
+ #define IPV6_V6ONLY 26
+ #endif
+ ret = setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY,
+ (const void *)&val, sizeof(val));
+ }
+ ''',
+ define='HAVE_IPV6',
+ lib='nsl socket',
+ headers='sys/socket.h netdb.h netinet/in.h')
+
+ # these may be builtins, so we need the link=False strategy
+ conf.CHECK_FUNCS('strdup memmem printf memset memcpy memmove strcpy strncpy bzero', link=False)
+
+ conf.CHECK_FUNCS('shl_load shl_unload shl_findsym')
+ conf.CHECK_FUNCS('pipe strftime srandom random srand rand usleep setbuffer')
+ conf.CHECK_FUNCS('lstat getpgrp utime utimes seteuid setresuid setegid')
+ conf.CHECK_FUNCS('setresgid chroot strerror vsyslog setlinebuf mktime')
+ conf.CHECK_FUNCS('ftruncate chsize rename waitpid wait4 strlcpy strlcat')
+ conf.CHECK_FUNCS('initgroups pread pwrite strndup strcasestr')
+ conf.CHECK_FUNCS('strtok_r mkdtemp dup2 dprintf vdprintf isatty chown lchown')
+ conf.CHECK_FUNCS('link readlink symlink realpath snprintf vsnprintf')
+ conf.CHECK_FUNCS('asprintf vasprintf setenv unsetenv strnlen strtoull __strtoull')
+ conf.CHECK_FUNCS('strtouq strtoll __strtoll strtoq')
+ #Some OS (ie. freebsd) return EINVAL if the convertion could not be done, it's not what we expect
+ #Let's detect those cases
+ if conf.CONFIG_SET('HAVE_STRTOLL'):
+ conf.CHECK_CODE('''
+ long long nb = strtoll("Text", NULL, 0);
+ if (errno == EINVAL) {
+ return 0;
+ } else {
+ return 1;
+ }
+ ''',
+ msg="Checking correct behavior of strtoll",
+ headers = 'errno.h',
+ execute = True,
+ define_ret = True,
+ define = 'HAVE_BSD_STRTOLL',
+ )
+ conf.CHECK_FUNCS('if_nametoindex strerror_r')
+ conf.CHECK_FUNCS('getdirentries getdents syslog')
+ conf.CHECK_FUNCS('gai_strerror get_current_dir_name')
+ conf.CHECK_FUNCS('timegm getifaddrs freeifaddrs mmap setgroups setsid')
+ conf.CHECK_FUNCS('getgrent_r getgrgid_r getgrnam_r getgrouplist getpagesize')
+ conf.CHECK_FUNCS('getpwent_r getpwnam_r getpwuid_r epoll_create')
+
+ conf.CHECK_FUNCS_IN('dlopen dlsym dlerror dlclose', 'dl',
+ checklibc=True, headers='dlfcn.h dl.h')
+
+ conf.CHECK_C_PROTOTYPE('dlopen', 'void *dlopen(const char* filename, unsigned int flags)',
+ define='DLOPEN_TAKES_UNSIGNED_FLAGS', headers='dlfcn.h dl.h')
+
+ if conf.CHECK_FUNCS_IN('fdatasync', 'rt', checklibc=True):
+ # some systems are missing the declaration
+ conf.CHECK_DECLS('fdatasync')
+
+ if conf.CHECK_FUNCS_IN('clock_gettime', 'rt', checklibc=True):
+ for c in ['CLOCK_MONOTONIC', 'CLOCK_PROCESS_CPUTIME_ID', 'CLOCK_REALTIME']:
+ conf.CHECK_CODE('''
+ #if TIME_WITH_SYS_TIME
+ # include <sys/time.h>
+ # include <time.h>
+ #else
+ # if HAVE_SYS_TIME_H
+ # include <sys/time.h>
+ # else
+ # include <time.h>
+ # endif
+ #endif
+ clockid_t clk = %s''' % c,
+ 'HAVE_%s' % c,
+ msg='Checking whether the clock_gettime clock ID %s is available' % c)
+
+ conf.CHECK_TYPE('struct timespec', headers='sys/time.h time.h')
+
+ # these headers need to be tested as a group on freebsd
+ conf.CHECK_HEADERS(headers='sys/socket.h net/if.h', together=True)
+ conf.CHECK_HEADERS(headers='netinet/in.h arpa/nameser.h resolv.h', together=True)
+ conf.CHECK_FUNCS_IN('res_search', 'resolv', checklibc=True,
+ headers='netinet/in.h arpa/nameser.h resolv.h')
+
+
+ if not conf.CHECK_FUNCS_IN('gettext', 'intl', checklibc=True, headers='libintl.h'):
+ # Some hosts need lib iconv for linking with lib intl
+ # So we try with flags just in case it helps.
+ oldflags = conf.env['LDFLAGS_INTL']
+ conf.env['LDFLAGS_INTL'] = "-liconv"
+ if not conf.CHECK_LIB('intl'):
+ conf.env['LDFLAGS_INTL'] = oldflags
+ else:
+ conf.CHECK_FUNCS_IN('gettext', 'intl', checklibc=True, headers='libintl.h')
+
+ conf.CHECK_FUNCS_IN('dgettext gettext', 'intl', headers='libintl.h')
+ conf.CHECK_FUNCS_IN('pthread_create', 'pthread', checklibc=True, headers='pthread.h')
+
+ conf.CHECK_FUNCS_IN('crypt', 'crypt', checklibc=True)
+
+ conf.CHECK_VARIABLE('rl_event_hook', define='HAVE_DECL_RL_EVENT_HOOK', always=True,
+ headers='readline.h readline/readline.h readline/history.h')
+
+ conf.CHECK_DECLS('snprintf vsnprintf asprintf vasprintf')
+
+ conf.CHECK_DECLS('errno', headers='errno.h', reverse=True)
+ conf.CHECK_DECLS('environ getgrent_r getpwent_r', reverse=True, headers='pwd.h grp.h')
+ conf.CHECK_DECLS('pread pwrite setenv setresgid setresuid', reverse=True)
+
+ if conf.CONFIG_SET('HAVE_EPOLL_CREATE') and conf.CONFIG_SET('HAVE_SYS_EPOLL_H'):
+ conf.DEFINE('HAVE_EPOLL', 1)
+
+ conf.CHECK_HEADERS('poll.h')
+ conf.CHECK_FUNCS('poll')
+
+ if not conf.CHECK_CODE('''#define LIBREPLACE_CONFIGURE_TEST_STRPTIME
+ #include "test/strptime.c"''',
+ define='HAVE_STRPTIME',
+ addmain=False,
+ msg='Checking for working strptime'):
+ conf.DEFINE('REPLACE_STRPTIME', 1)
+ else:
+ conf.CHECK_CODE('''
+ const char *s = "20070414101546Z";
+ char *ret;
+ struct tm t;
+ memset(&t, 0, sizeof(t));
+ ret = strptime(s, "%Y%m%d%H%M%S", &t);
+ if (ret == NULL || t.tm_wday != 6) {
+ return 0;
+ } else {
+ return 1;
+ }
+ ''',
+ msg="Checking correct behavior of strptime",
+ headers = 'time.h',
+ execute = True,
+ define_ret = True,
+ define = 'REPLACE_STRPTIME',
+ )
+
+ conf.CHECK_CODE('gettimeofday(NULL, NULL)', 'HAVE_GETTIMEOFDAY_TZ', execute=False)
+
+ conf.CHECK_CODE('#include "test/snprintf.c"',
+ define="HAVE_C99_VSNPRINTF",
+ execute=1,
+ addmain=False,
+ msg="Checking for C99 vsnprintf")
+
+ conf.SAMBA_BUILD_ENV()
+
+ conf.CHECK_CODE('''
+ typedef struct {unsigned x;} FOOBAR;
+ #define X_FOOBAR(x) ((FOOBAR) { x })
+ #define FOO_ONE X_FOOBAR(1)
+ FOOBAR f = FOO_ONE;
+ static const struct {
+ FOOBAR y;
+ } f2[] = {
+ {FOO_ONE}
+ };
+ static const FOOBAR f3[] = {FOO_ONE};
+ ''',
+ define='HAVE_IMMEDIATE_STRUCTURES')
+
+ conf.CHECK_CODE('mkdir("foo",0777)', define='HAVE_MKDIR_MODE', headers='sys/stat.h')
+
+ conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_mtim.tv_nsec', define='HAVE_STAT_TV_NSEC',
+ headers='sys/stat.h')
+ # we need the st_rdev test under two names
+ conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_rdev',
+ define='HAVE_STRUCT_STAT_ST_RDEV',
+ headers='sys/stat.h')
+ conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_rdev', define='HAVE_ST_RDEV',
+ headers='sys/stat.h')
+ conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_storage', 'ss_family',
+ headers='sys/socket.h netinet/in.h')
+ conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_storage', '__ss_family',
+ headers='sys/socket.h netinet/in.h')
+
+
+ if conf.CHECK_STRUCTURE_MEMBER('struct sockaddr', 'sa_len',
+ headers='sys/socket.h netinet/in.h',
+ define='HAVE_SOCKADDR_SA_LEN'):
+ # the old build system produced both defines
+ conf.DEFINE('HAVE_STRUCT_SOCKADDR_SA_LEN', 1)
+
+ conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_in', 'sin_len',
+ headers='sys/socket.h netinet/in.h',
+ define='HAVE_SOCK_SIN_LEN')
+
+ conf.CHECK_CODE('struct sockaddr_un sunaddr; sunaddr.sun_family = AF_UNIX;',
+ define='HAVE_UNIXSOCKET', headers='sys/socket.h sys/un.h')
+
+
+ conf.CHECK_CODE('''
+ struct stat st;
+ char tpl[20]="/tmp/test.XXXXXX";
+ int fd = mkstemp(tpl);
+ if (fd == -1) exit(1);
+ unlink(tpl);
+ if (fstat(fd, &st) != 0) exit(1);
+ if ((st.st_mode & 0777) != 0600) exit(1);
+ exit(0);
+ ''',
+ define='HAVE_SECURE_MKSTEMP',
+ execute=True,
+ mandatory=True) # lets see if we get a mandatory failure for this one
+
+ if conf.CHECK_CFLAGS('-fvisibility=hidden'):
+ conf.env.VISIBILITY_CFLAGS = '-fvisibility=hidden'
+ conf.CHECK_CODE('''void vis_foo1(void) {}
+ __attribute__((visibility("default"))) void vis_foo2(void) {}''',
+ cflags=conf.env.VISIBILITY_CFLAGS,
+ define='HAVE_VISIBILITY_ATTR')
+
+ # look for a method of finding the list of network interfaces
+ for method in ['HAVE_IFACE_GETIFADDRS', 'HAVE_IFACE_AIX', 'HAVE_IFACE_IFCONF', 'HAVE_IFACE_IFREQ']:
+ if conf.CHECK_CODE('''
+ #define %s 1
+ #define NO_CONFIG_H 1
+ #define AUTOCONF_TEST 1
+ #define SOCKET_WRAPPER_NOT_REPLACE
+ #include "replace.c"
+ #include "inet_ntop.c"
+ #include "snprintf.c"
+ #include "getifaddrs.c"
+ #define getifaddrs_test main
+ #include "test/getifaddrs.c"
+ ''' % method,
+ method,
+ lib='nsl socket',
+ addmain=False,
+ execute=True):
+ break
+
+ if conf.CHECK_FUNCS('getpass getpassphrase'):
+ # if we have both, then we prefer getpassphrase
+ conf.DEFINE('REPLACE_GETPASS_BY_GETPASSPHRASE', 1)
+ conf.DEFINE('REPLACE_GETPASS', 1)
+ else:
+ conf.CHECK_CODE('''#include "getpass.c"
+ int main(void) { return 0; }''',
+ addmain=False,
+ define='REPLACE_GETPASS',
+ cflags='-DNO_CONFIG_H')
+
+ conf.RECURSE('system')
+ conf.SAMBA_CONFIG_H()
+
+
+def build(bld):
+ bld.RECURSE('buildtools/wafsamba')
+
+ REPLACE_HOSTCC_SOURCE = 'replace.c snprintf.c'
+
+ if bld.CONFIG_SET('REPLACE_STRPTIME'): REPLACE_HOSTCC_SOURCE += ' strptime.c'
+ if not bld.CONFIG_SET('HAVE_TIMEGM'): REPLACE_HOSTCC_SOURCE += ' timegm.c'
+
+ bld.SAMBA_SUBSYSTEM('LIBREPLACE_HOSTCC',
+ REPLACE_HOSTCC_SOURCE,
+ use_hostcc=True,
+ use_global_deps=False,
+ cflags='-DSOCKET_WRAPPER_DISABLE=1 -DNSS_WRAPPER_DISABLE=1 -D_SAMBA_HOSTCC_',
+ group='compiler_libraries'
+ )
+
+ REPLACE_SOURCE = REPLACE_HOSTCC_SOURCE
+
+ if bld.CONFIG_SET('REPLACE_GETPASS'): REPLACE_SOURCE += ' getpass.c'
+ if not bld.CONFIG_SET('HAVE_CRYPT'): REPLACE_SOURCE += ' crypt.c'
+ if not bld.CONFIG_SET('HAVE_DLOPEN'): REPLACE_SOURCE += ' dlfcn.c'
+ if not bld.CONFIG_SET('HAVE_POLL'): REPLACE_SOURCE += ' poll.c'
+
+ if not bld.CONFIG_SET('HAVE_SOCKETPAIR'): REPLACE_SOURCE += ' socketpair.c'
+ if not bld.CONFIG_SET('HAVE_CONNECT'): REPLACE_SOURCE += ' socket.c'
+ if not bld.CONFIG_SET('HAVE_GETIFADDRS'): REPLACE_SOURCE += ' getifaddrs.c'
+ if not bld.CONFIG_SET('HAVE_GETADDRINFO'): REPLACE_SOURCE += ' getaddrinfo.c'
+ if not bld.CONFIG_SET('HAVE_INET_NTOA'): REPLACE_SOURCE += ' inet_ntoa.c'
+ if not bld.CONFIG_SET('HAVE_INET_ATON'): REPLACE_SOURCE += ' inet_aton.c'
+ if not bld.CONFIG_SET('HAVE_INET_NTOP'): REPLACE_SOURCE += ' inet_ntop.c'
+ if not bld.CONFIG_SET('HAVE_INET_PTON'): REPLACE_SOURCE += ' inet_pton.c'
+
+ bld.SAMBA_LIBRARY('replace',
+ source=REPLACE_SOURCE,
+ group='base_libraries',
+ # FIXME: Ideally symbols should be hidden here so they
+ # don't appear in the global namespace when Samba
+ # libraries are loaded, but this doesn't appear to work
+ # at the moment:
+ # hide_symbols=bld.BUILTIN_LIBRARY('replace'),
+ private_library=True,
+ deps='crypt dl nsl socket rt')
+
+ bld.SAMBA_SUBSYSTEM('replace-test',
+ source='''test/testsuite.c test/strptime.c
+ test/os2_delete.c test/getifaddrs.c''',
+ deps='replace')
+
+ if bld.env.standalone_replace:
+ bld.SAMBA_BINARY('replace_testsuite',
+ source='test/main.c',
+ deps='replace replace-test',
+ install=False)
+
+ # build replacements for stdint.h and stdbool.h if needed
+ bld.SAMBA_GENERATOR('replace_stdint_h',
+ rule='cp ${SRC} ${TGT}',
+ source='hdr_replace.h',
+ target='stdint.h',
+ enabled = not bld.CONFIG_SET('HAVE_STDINT_H'))
+ bld.SAMBA_GENERATOR('replace_stdbool_h',
+ rule='cp ${SRC} ${TGT}',
+ source='hdr_replace.h',
+ target='stdbool.h',
+ enabled = not bld.CONFIG_SET('HAVE_STDBOOL_H'))
+
+def dist():
+ '''makes a tarball for distribution'''
+ samba_dist.dist()
diff --git a/lib/smbconf/config.mk b/lib/smbconf/config.mk
deleted file mode 100644
index de66b90918..0000000000
--- a/lib/smbconf/config.mk
+++ /dev/null
@@ -1,3 +0,0 @@
-[SUBSYSTEM::LIBSMBCONF]
-
-LIBSMBCONF_OBJ_FILES = $(addprefix ../lib/smbconf/, smbconf.o smbconf_txt.o smbconf_util.o)
diff --git a/lib/smbconf/smbconf.c b/lib/smbconf/smbconf.c
index 80fe9aac37..e0441ed985 100644
--- a/lib/smbconf/smbconf.c
+++ b/lib/smbconf/smbconf.c
@@ -27,12 +27,13 @@
*
**********************************************************************/
-static WERROR smbconf_global_check(struct smbconf_ctx *ctx)
+static sbcErr smbconf_global_check(struct smbconf_ctx *ctx)
{
if (!smbconf_share_exists(ctx, GLOBAL_NAME)) {
return smbconf_create_share(ctx, GLOBAL_NAME);
}
- return WERR_OK;
+
+ return SBC_ERR_OK;
}
@@ -42,6 +43,41 @@ static WERROR smbconf_global_check(struct smbconf_ctx *ctx)
*
**********************************************************************/
+const char *sbcErrorString(sbcErr error)
+{
+ switch (error) {
+ case SBC_ERR_OK:
+ return "SBC_ERR_OK";
+ case SBC_ERR_NOT_IMPLEMENTED:
+ return "SBC_ERR_NOT_IMPLEMENTED";
+ case SBC_ERR_NOT_SUPPORTED:
+ return "SBC_ERR_NOT_SUPPORTED";
+ case SBC_ERR_UNKNOWN_FAILURE:
+ return "SBC_ERR_UNKNOWN_FAILURE";
+ case SBC_ERR_NOMEM:
+ return "SBC_ERR_NOMEM";
+ case SBC_ERR_INVALID_PARAM:
+ return "SBC_ERR_INVALID_PARAM";
+ case SBC_ERR_BADFILE:
+ return "SBC_ERR_BADFILE";
+ case SBC_ERR_NO_SUCH_SERVICE:
+ return "SBC_ERR_NO_SUCH_SERVICE";
+ case SBC_ERR_IO_FAILURE:
+ return "SBC_ERR_IO_FAILURE";
+ case SBC_ERR_CAN_NOT_COMPLETE:
+ return "SBC_ERR_CAN_NOT_COMPLETE";
+ case SBC_ERR_NO_MORE_ITEMS:
+ return "SBC_ERR_NO_MORE_ITEMS";
+ case SBC_ERR_FILE_EXISTS:
+ return "SBC_ERR_FILE_EXISTS";
+ case SBC_ERR_ACCESS_DENIED:
+ return "SBC_ERR_ACCESS_DENIED";
+ }
+
+ return "unknown sbcErr value";
+}
+
+
/**
* Tell whether the backend requires messaging to be set up
* for the backend to work correctly.
@@ -91,7 +127,7 @@ bool smbconf_changed(struct smbconf_ctx *ctx, struct smbconf_csn *csn,
/**
* Drop the whole configuration (restarting empty).
*/
-WERROR smbconf_drop(struct smbconf_ctx *ctx)
+sbcErr smbconf_drop(struct smbconf_ctx *ctx)
{
return ctx->ops->drop(ctx);
}
@@ -105,12 +141,12 @@ WERROR smbconf_drop(struct smbconf_ctx *ctx)
* param_names : list of lists of parameter names for each share
* param_values : list of lists of parameter values for each share
*/
-WERROR smbconf_get_config(struct smbconf_ctx *ctx,
+sbcErr smbconf_get_config(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
uint32_t *num_shares,
struct smbconf_service ***services)
{
- WERROR werr = WERR_OK;
+ sbcErr err;
TALLOC_CTX *tmp_ctx = NULL;
uint32_t tmp_num_shares;
char **tmp_share_names;
@@ -118,36 +154,35 @@ WERROR smbconf_get_config(struct smbconf_ctx *ctx,
uint32_t count;
if ((num_shares == NULL) || (services == NULL)) {
- werr = WERR_INVALID_PARAM;
+ err = SBC_ERR_INVALID_PARAM;
goto done;
}
tmp_ctx = talloc_stackframe();
- werr = smbconf_get_share_names(ctx, tmp_ctx, &tmp_num_shares,
- &tmp_share_names);
- if (!W_ERROR_IS_OK(werr)) {
+ err = smbconf_get_share_names(ctx, tmp_ctx, &tmp_num_shares,
+ &tmp_share_names);
+ if (!SBC_ERROR_IS_OK(err)) {
goto done;
}
tmp_services = talloc_array(tmp_ctx, struct smbconf_service *,
tmp_num_shares);
-
if (tmp_services == NULL) {
- werr = WERR_NOMEM;
+ err = SBC_ERR_NOMEM;
goto done;
}
for (count = 0; count < tmp_num_shares; count++) {
- werr = smbconf_get_share(ctx, tmp_services,
- tmp_share_names[count],
- &tmp_services[count]);
- if (!W_ERROR_IS_OK(werr)) {
+ err = smbconf_get_share(ctx, tmp_services,
+ tmp_share_names[count],
+ &tmp_services[count]);
+ if (!SBC_ERROR_IS_OK(err)) {
goto done;
}
}
- werr = WERR_OK;
+ err = SBC_ERR_OK;
*num_shares = tmp_num_shares;
if (tmp_num_shares > 0) {
@@ -158,13 +193,13 @@ WERROR smbconf_get_config(struct smbconf_ctx *ctx,
done:
talloc_free(tmp_ctx);
- return werr;
+ return err;
}
/**
* get the list of share names defined in the configuration.
*/
-WERROR smbconf_get_share_names(struct smbconf_ctx *ctx,
+sbcErr smbconf_get_share_names(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
uint32_t *num_shares,
char ***share_names)
@@ -185,11 +220,11 @@ bool smbconf_share_exists(struct smbconf_ctx *ctx,
/**
* Add a service if it does not already exist.
*/
-WERROR smbconf_create_share(struct smbconf_ctx *ctx,
+sbcErr smbconf_create_share(struct smbconf_ctx *ctx,
const char *servicename)
{
if ((servicename != NULL) && smbconf_share_exists(ctx, servicename)) {
- return WERR_FILE_EXISTS;
+ return SBC_ERR_FILE_EXISTS;
}
return ctx->ops->create_share(ctx, servicename);
@@ -198,7 +233,7 @@ WERROR smbconf_create_share(struct smbconf_ctx *ctx,
/**
* get a definition of a share (service) from configuration.
*/
-WERROR smbconf_get_share(struct smbconf_ctx *ctx,
+sbcErr smbconf_get_share(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *servicename,
struct smbconf_service **service)
@@ -209,10 +244,10 @@ WERROR smbconf_get_share(struct smbconf_ctx *ctx,
/**
* delete a service from configuration
*/
-WERROR smbconf_delete_share(struct smbconf_ctx *ctx, const char *servicename)
+sbcErr smbconf_delete_share(struct smbconf_ctx *ctx, const char *servicename)
{
if (!smbconf_share_exists(ctx, servicename)) {
- return WERR_NO_SUCH_SERVICE;
+ return SBC_ERR_NO_SUCH_SERVICE;
}
return ctx->ops->delete_share(ctx, servicename);
@@ -221,7 +256,7 @@ WERROR smbconf_delete_share(struct smbconf_ctx *ctx, const char *servicename)
/**
* set a configuration parameter to the value provided.
*/
-WERROR smbconf_set_parameter(struct smbconf_ctx *ctx,
+sbcErr smbconf_set_parameter(struct smbconf_ctx *ctx,
const char *service,
const char *param,
const char *valstr)
@@ -235,30 +270,31 @@ WERROR smbconf_set_parameter(struct smbconf_ctx *ctx,
*
* This also creates [global] when it does not exist.
*/
-WERROR smbconf_set_global_parameter(struct smbconf_ctx *ctx,
+sbcErr smbconf_set_global_parameter(struct smbconf_ctx *ctx,
const char *param, const char *val)
{
- WERROR werr;
+ sbcErr err;
- werr = smbconf_global_check(ctx);
- if (W_ERROR_IS_OK(werr)) {
- werr = smbconf_set_parameter(ctx, GLOBAL_NAME, param, val);
+ err = smbconf_global_check(ctx);
+ if (!SBC_ERROR_IS_OK(err)) {
+ return err;
}
+ err = smbconf_set_parameter(ctx, GLOBAL_NAME, param, val);
- return werr;
+ return err;
}
/**
* get the value of a configuration parameter as a string
*/
-WERROR smbconf_get_parameter(struct smbconf_ctx *ctx,
+sbcErr smbconf_get_parameter(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *service,
const char *param,
char **valstr)
{
if (valstr == NULL) {
- return WERR_INVALID_PARAM;
+ return SBC_ERR_INVALID_PARAM;
}
return ctx->ops->get_parameter(ctx, mem_ctx, service, param, valstr);
@@ -269,26 +305,28 @@ WERROR smbconf_get_parameter(struct smbconf_ctx *ctx,
*
* Create [global] if it does not exist.
*/
-WERROR smbconf_get_global_parameter(struct smbconf_ctx *ctx,
+sbcErr smbconf_get_global_parameter(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *param,
char **valstr)
{
- WERROR werr;
+ sbcErr err;
- werr = smbconf_global_check(ctx);
- if (W_ERROR_IS_OK(werr)) {
- werr = smbconf_get_parameter(ctx, mem_ctx, GLOBAL_NAME, param,
- valstr);
+ err = smbconf_global_check(ctx);
+ if (!SBC_ERROR_IS_OK(err)) {
+ return err;
}
- return werr;
+ err = smbconf_get_parameter(ctx, mem_ctx, GLOBAL_NAME, param,
+ valstr);
+
+ return err;
}
/**
* delete a parameter from configuration
*/
-WERROR smbconf_delete_parameter(struct smbconf_ctx *ctx,
+sbcErr smbconf_delete_parameter(struct smbconf_ctx *ctx,
const char *service, const char *param)
{
return ctx->ops->delete_parameter(ctx, service, param);
@@ -299,20 +337,21 @@ WERROR smbconf_delete_parameter(struct smbconf_ctx *ctx,
*
* Create [global] if it does not exist.
*/
-WERROR smbconf_delete_global_parameter(struct smbconf_ctx *ctx,
+sbcErr smbconf_delete_global_parameter(struct smbconf_ctx *ctx,
const char *param)
{
- WERROR werr;
+ sbcErr err;
- werr = smbconf_global_check(ctx);
- if (W_ERROR_IS_OK(werr)) {
- werr = smbconf_delete_parameter(ctx, GLOBAL_NAME, param);
+ err = smbconf_global_check(ctx);
+ if (!SBC_ERROR_IS_OK(err)) {
+ return err;
}
+ err = smbconf_delete_parameter(ctx, GLOBAL_NAME, param);
- return werr;
+ return err;
}
-WERROR smbconf_get_includes(struct smbconf_ctx *ctx,
+sbcErr smbconf_get_includes(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *service,
uint32_t *num_includes, char ***includes)
@@ -321,72 +360,75 @@ WERROR smbconf_get_includes(struct smbconf_ctx *ctx,
includes);
}
-WERROR smbconf_get_global_includes(struct smbconf_ctx *ctx,
+sbcErr smbconf_get_global_includes(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
uint32_t *num_includes, char ***includes)
{
- WERROR werr;
+ sbcErr err;
- werr = smbconf_global_check(ctx);
- if (W_ERROR_IS_OK(werr)) {
- werr = smbconf_get_includes(ctx, mem_ctx, GLOBAL_NAME,
- num_includes, includes);
+ err = smbconf_global_check(ctx);
+ if (!SBC_ERROR_IS_OK(err)) {
+ return err;
}
+ err = smbconf_get_includes(ctx, mem_ctx, GLOBAL_NAME,
+ num_includes, includes);
- return werr;
+ return err;
}
-WERROR smbconf_set_includes(struct smbconf_ctx *ctx,
+sbcErr smbconf_set_includes(struct smbconf_ctx *ctx,
const char *service,
uint32_t num_includes, const char **includes)
{
return ctx->ops->set_includes(ctx, service, num_includes, includes);
}
-WERROR smbconf_set_global_includes(struct smbconf_ctx *ctx,
+sbcErr smbconf_set_global_includes(struct smbconf_ctx *ctx,
uint32_t num_includes,
const char **includes)
{
- WERROR werr;
+ sbcErr err;
- werr = smbconf_global_check(ctx);
- if (W_ERROR_IS_OK(werr)) {
- werr = smbconf_set_includes(ctx, GLOBAL_NAME,
- num_includes, includes);
+ err = smbconf_global_check(ctx);
+ if (!SBC_ERROR_IS_OK(err)) {
+ return err;
}
+ err = smbconf_set_includes(ctx, GLOBAL_NAME,
+ num_includes, includes);
- return werr;
+ return err;
}
-WERROR smbconf_delete_includes(struct smbconf_ctx *ctx, const char *service)
+sbcErr smbconf_delete_includes(struct smbconf_ctx *ctx, const char *service)
{
return ctx->ops->delete_includes(ctx, service);
}
-WERROR smbconf_delete_global_includes(struct smbconf_ctx *ctx)
+sbcErr smbconf_delete_global_includes(struct smbconf_ctx *ctx)
{
- WERROR werr;
+ sbcErr err;
- werr = smbconf_global_check(ctx);
- if (W_ERROR_IS_OK(werr)) {
- werr = smbconf_delete_includes(ctx, GLOBAL_NAME);
+ err = smbconf_global_check(ctx);
+ if (!SBC_ERROR_IS_OK(err)) {
+ return err;
}
+ err = smbconf_delete_includes(ctx, GLOBAL_NAME);
- return werr;
+ return err;
}
-WERROR smbconf_transaction_start(struct smbconf_ctx *ctx)
+sbcErr smbconf_transaction_start(struct smbconf_ctx *ctx)
{
return ctx->ops->transaction_start(ctx);
}
-WERROR smbconf_transaction_commit(struct smbconf_ctx *ctx)
+sbcErr smbconf_transaction_commit(struct smbconf_ctx *ctx)
{
return ctx->ops->transaction_commit(ctx);
}
-WERROR smbconf_transaction_cancel(struct smbconf_ctx *ctx)
+sbcErr smbconf_transaction_cancel(struct smbconf_ctx *ctx)
{
return ctx->ops->transaction_cancel(ctx);
}
diff --git a/lib/smbconf/smbconf.h b/lib/smbconf/smbconf.h
index 517302ac88..7f62b06af4 100644
--- a/lib/smbconf/smbconf.h
+++ b/lib/smbconf/smbconf.h
@@ -20,6 +20,39 @@
#ifndef __LIBSMBCONF_H__
#define __LIBSMBCONF_H__
+/**
+ * @defgroup libsmbconf The smbconf API
+ *
+ * libsmbconf is a library to read or, based on the backend, modify the Samba
+ * configuration.
+ *
+ * @{
+ */
+
+/**
+ * @brief Status codes returned from smbconf functions
+ */
+enum _sbcErrType {
+ SBC_ERR_OK = 0, /**< Successful completion **/
+ SBC_ERR_NOT_IMPLEMENTED, /**< Function not implemented **/
+ SBC_ERR_NOT_SUPPORTED, /**< Function not supported **/
+ SBC_ERR_UNKNOWN_FAILURE, /**< General failure **/
+ SBC_ERR_NOMEM, /**< Memory allocation error **/
+ SBC_ERR_INVALID_PARAM, /**< An Invalid parameter was supplied **/
+ SBC_ERR_BADFILE, /**< A bad file was supplied **/
+ SBC_ERR_NO_SUCH_SERVICE, /**< There is no such service provided **/
+ SBC_ERR_IO_FAILURE, /**< There was an IO error **/
+ SBC_ERR_CAN_NOT_COMPLETE,/**< Can not complete action **/
+ SBC_ERR_NO_MORE_ITEMS, /**< No more items left **/
+ SBC_ERR_FILE_EXISTS, /**< File already exists **/
+ SBC_ERR_ACCESS_DENIED, /**< Access has been denied **/
+};
+
+typedef enum _sbcErrType sbcErr;
+
+#define SBC_ERROR_IS_OK(x) ((x) == SBC_ERR_OK)
+#define SBC_ERROR_EQUAL(x,y) ((x) == (y))
+
struct smbconf_ctx;
/* the change sequence number */
@@ -27,75 +60,428 @@ struct smbconf_csn {
uint64_t csn;
};
+/** Information about a service */
struct smbconf_service {
- char *name;
- uint32_t num_params;
- char **param_names;
- char **param_values;
+ char *name; /**< The name of the share */
+ uint32_t num_params; /**< List of length num_shares of parameter counts for each share */
+ char **param_names; /**< List of lists of parameter names for each share */
+ char **param_values; /**< List of lists of parameter values for each share */
};
/*
- * the smbconf API functions
+ * The smbconf API functions
+ */
+
+/**
+ * @brief Translate an error value into a string
+ *
+ * @param error
+ *
+ * @return a pointer to a static string
+ **/
+const char *sbcErrorString(sbcErr error);
+
+/**
+ * @brief Check if the backend requires messaging to be set up.
+ *
+ * Tell whether the backend requires messaging to be set up
+ * for the backend to work correctly.
+ *
+ * @param[in] ctx The smbconf context to check.
+ *
+ * @return True if needed, false if not.
*/
bool smbconf_backend_requires_messaging(struct smbconf_ctx *ctx);
+
+/**
+ * @brief Tell whether the source is writeable.
+ *
+ * @param[in] ctx The smbconf context to check.
+ *
+ * @return True if it is writeable, false if not.
+ */
bool smbconf_is_writeable(struct smbconf_ctx *ctx);
+
+/**
+ * @brief Close the configuration.
+ *
+ * @param[in] ctx The smbconf context to close.
+ */
void smbconf_shutdown(struct smbconf_ctx *ctx);
+
+/**
+ * @brief Detect changes in the configuration.
+ *
+ * Get the change sequence number of the given service/parameter. Service and
+ * parameter strings may be NULL.
+ *
+ * The given change sequence number (csn) struct is filled with the current
+ * csn. smbconf_changed() can also be used for initial retrieval of the csn.
+ *
+ * @param[in] ctx The smbconf context to check for changes.
+ *
+ * @param[inout] csn The smbconf csn to be filled.
+ *
+ * @param[in] service The service name to check or NULL.
+ *
+ * @param[in] param The param to check or NULL.
+ *
+ * @return True if it has been changed, false if not.
+ */
bool smbconf_changed(struct smbconf_ctx *ctx, struct smbconf_csn *csn,
const char *service, const char *param);
-WERROR smbconf_drop(struct smbconf_ctx *ctx);
-WERROR smbconf_get_config(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Drop the whole configuration (restarting empty).
+ *
+ * @param[in] ctx The smbconf context to drop the config.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_drop(struct smbconf_ctx *ctx);
+
+/**
+ * @brief Get the whole configuration as lists of strings with counts.
+ *
+ * @param[in] ctx The smbconf context to get the lists from.
+ *
+ * @param[in] mem_ctx The memory context to use.
+ *
+ * @param[in] num_shares A pointer to store the number of shares.
+ *
+ * @param[out] services A pointer to store the services.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ *
+ * @see smbconf_service
+ */
+sbcErr smbconf_get_config(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
uint32_t *num_shares,
struct smbconf_service ***services);
-WERROR smbconf_get_share_names(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Get the list of share names defined in the configuration.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] mem_ctx The memory context to use.
+ *
+ * @param[in] num_shares A pointer to store the number of shares.
+ *
+ * @param[in] share_names A pointer to store the share names.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_get_share_names(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
uint32_t *num_shares,
char ***share_names);
+
+/**
+ * @brief Check if a share/service of a given name exists.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] servicename The service name to check if it exists.
+ *
+ * @return True if it exists, false if not.
+ */
bool smbconf_share_exists(struct smbconf_ctx *ctx, const char *servicename);
-WERROR smbconf_create_share(struct smbconf_ctx *ctx, const char *servicename);
-WERROR smbconf_get_share(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Add a service if it does not already exist.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] servicename The name of the service to add.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_create_share(struct smbconf_ctx *ctx, const char *servicename);
+
+/**
+ * @brief Get a definition of a share (service) from configuration.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] mem_ctx A memory context to allocate the result.
+ *
+ * @param[in] servicename The service name to get the information from.
+ *
+ * @param[out] service A pointer to store the service information about the
+ * share.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ *
+ * @see smbconf_service
+ */
+sbcErr smbconf_get_share(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *servicename,
struct smbconf_service **service);
-WERROR smbconf_delete_share(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Delete a service from configuration.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] servicename The service name to delete.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_delete_share(struct smbconf_ctx *ctx,
const char *servicename);
-WERROR smbconf_set_parameter(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Set a configuration parameter to the value provided.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] service The service name to set the parameter.
+ *
+ * @param[in] param The name of the parameter to set.
+ *
+ * @param[in] valstr The value to set.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_set_parameter(struct smbconf_ctx *ctx,
const char *service,
const char *param,
const char *valstr);
-WERROR smbconf_set_global_parameter(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Set a global configuration parameter to the value provided.
+ *
+ * This adds a paramet in the [global] service. It also creates [global] if it
+ * does't exist.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] param The name of the parameter to set.
+ *
+ * @param[in] val The value to set.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_set_global_parameter(struct smbconf_ctx *ctx,
const char *param, const char *val);
-WERROR smbconf_get_parameter(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Get the value of a configuration parameter as a string.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] mem_ctx The memory context to allocate the string on.
+ *
+ * @param[in] service The name of the service where to find the parameter.
+ *
+ * @param[in] param The parameter to get.
+ *
+ * @param[out] valstr A pointer to store the value as a string.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_get_parameter(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *service,
const char *param,
char **valstr);
-WERROR smbconf_get_global_parameter(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Get the value of a global configuration parameter as a string.
+ *
+ * It also creates [global] if it does't exist.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] mem_ctx The memory context to allocate the string on.
+ *
+ * @param[in] param The parameter to get.
+ *
+ * @param[out] valstr A pointer to store the value as a string.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_get_global_parameter(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *param,
char **valstr);
-WERROR smbconf_delete_parameter(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Delete a parameter from the configuration.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] service The service where the parameter can be found.
+ *
+ * @param[in] param The name of the parameter to delete.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_delete_parameter(struct smbconf_ctx *ctx,
const char *service, const char *param);
-WERROR smbconf_delete_global_parameter(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Delete a global parameter from the configuration.
+ *
+ * It also creates [global] if it does't exist.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] param The name of the parameter to delete.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_delete_global_parameter(struct smbconf_ctx *ctx,
const char *param);
-WERROR smbconf_get_includes(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Get the list of names of included files.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] mem_ctx The memory context to allocate the names.
+ *
+ * @param[in] service The service name to get the include files.
+ *
+ * @param[out] num_includes A pointer to store the number of included files.
+ *
+ * @param[out] includes A pointer to store the paths of the included files.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_get_includes(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *service,
uint32_t *num_includes, char ***includes);
-WERROR smbconf_get_global_includes(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Get the list of globally included files.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] mem_ctx The memory context to allocate the names.
+ *
+ * @param[out] num_includes A pointer to store the number of included files.
+ *
+ * @param[out] includes A pointer to store the paths of the included files.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_get_global_includes(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
uint32_t *num_includes, char ***includes);
-WERROR smbconf_set_includes(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Set a list of config files to include on the given service.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] service The service to add includes.
+ *
+ * @param[in] num_includes The number of includes to set.
+ *
+ * @param[in] includes A list of paths to include.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_set_includes(struct smbconf_ctx *ctx,
const char *service,
uint32_t num_includes, const char **includes);
-WERROR smbconf_set_global_includes(struct smbconf_ctx *ctx,
+
+/**
+ * @brief Set a list of config files to include globally.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] num_includes The number of includes to set.
+ *
+ * @param[in] includes A list of paths to include.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_set_global_includes(struct smbconf_ctx *ctx,
uint32_t num_includes,
const char **includes);
-WERROR smbconf_delete_includes(struct smbconf_ctx *ctx, const char *service);
-WERROR smbconf_delete_global_includes(struct smbconf_ctx *ctx);
-WERROR smbconf_transaction_start(struct smbconf_ctx *ctx);
-WERROR smbconf_transaction_commit(struct smbconf_ctx *ctx);
-WERROR smbconf_transaction_cancel(struct smbconf_ctx *ctx);
+/**
+ * @brief Delete include parameter on the given service.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @param[in] service The name of the service to delete the includes from.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_delete_includes(struct smbconf_ctx *ctx, const char *service);
+
+/**
+ * @brief Delete include parameter from the global service.
+ *
+ * @param[in] ctx The smbconf context to use.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_delete_global_includes(struct smbconf_ctx *ctx);
+
+/**
+ * @brief Start a transaction on the configuration backend.
+ *
+ * This is to speed up writes to the registry based backend.
+ *
+ * @param[in] ctx The smbconf context to start the transaction.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ */
+sbcErr smbconf_transaction_start(struct smbconf_ctx *ctx);
+
+/**
+ * @brief Commit a transaction on the configuration backend.
+ *
+ * This is to speed up writes to the registry based backend.
+ *
+ * @param[in] ctx The smbconf context to commit the transaction.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ *
+ * @see smbconf_transaction_start()
+ */
+sbcErr smbconf_transaction_commit(struct smbconf_ctx *ctx);
+
+/**
+ * @brief Cancel a transaction on the configuration backend.
+ *
+ * @param[in] ctx The smbconf context to cancel the transaction.
+ *
+ * @return SBC_ERR_OK on success, a corresponding sbcErr if an
+ * error occured.
+ *
+ * @see smbconf_transaction_start()
+ */
+sbcErr smbconf_transaction_cancel(struct smbconf_ctx *ctx);
+
+/* @} ******************************************************************/
#endif /* _LIBSMBCONF_H_ */
diff --git a/lib/smbconf/smbconf_private.h b/lib/smbconf/smbconf_private.h
index e6998ad639..e768c30b91 100644
--- a/lib/smbconf/smbconf_private.h
+++ b/lib/smbconf/smbconf_private.h
@@ -27,50 +27,50 @@
#include "lib/smbconf/smbconf.h"
struct smbconf_ops {
- WERROR (*init)(struct smbconf_ctx *ctx, const char *path);
+ sbcErr (*init)(struct smbconf_ctx *ctx, const char *path);
int (*shutdown)(struct smbconf_ctx *ctx);
bool (*requires_messaging)(struct smbconf_ctx *ctx);
bool (*is_writeable)(struct smbconf_ctx *ctx);
- WERROR (*open_conf)(struct smbconf_ctx *ctx);
+ sbcErr (*open_conf)(struct smbconf_ctx *ctx);
int (*close_conf)(struct smbconf_ctx *ctx);
void (*get_csn)(struct smbconf_ctx *ctx, struct smbconf_csn *csn,
const char *service, const char *param);
- WERROR (*drop)(struct smbconf_ctx *ctx);
- WERROR (*get_share_names)(struct smbconf_ctx *ctx,
+ sbcErr (*drop)(struct smbconf_ctx *ctx);
+ sbcErr (*get_share_names)(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
uint32_t *num_shares,
char ***share_names);
bool (*share_exists)(struct smbconf_ctx *ctx, const char *service);
- WERROR (*create_share)(struct smbconf_ctx *ctx, const char *service);
- WERROR (*get_share)(struct smbconf_ctx *ctx,
+ sbcErr (*create_share)(struct smbconf_ctx *ctx, const char *service);
+ sbcErr (*get_share)(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *servicename,
struct smbconf_service **service);
- WERROR (*delete_share)(struct smbconf_ctx *ctx,
+ sbcErr (*delete_share)(struct smbconf_ctx *ctx,
const char *servicename);
- WERROR (*set_parameter)(struct smbconf_ctx *ctx,
+ sbcErr (*set_parameter)(struct smbconf_ctx *ctx,
const char *service,
const char *param,
const char *valstr);
- WERROR (*get_parameter)(struct smbconf_ctx *ctx,
+ sbcErr (*get_parameter)(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *service,
const char *param,
char **valstr);
- WERROR (*delete_parameter)(struct smbconf_ctx *ctx,
+ sbcErr (*delete_parameter)(struct smbconf_ctx *ctx,
const char *service, const char *param);
- WERROR (*get_includes)(struct smbconf_ctx *ctx,
+ sbcErr (*get_includes)(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *service,
uint32_t *num_includes, char ***includes);
- WERROR (*set_includes)(struct smbconf_ctx *ctx,
+ sbcErr (*set_includes)(struct smbconf_ctx *ctx,
const char *service,
uint32_t num_includes, const char **includes);
- WERROR (*delete_includes)(struct smbconf_ctx *ctx,
+ sbcErr (*delete_includes)(struct smbconf_ctx *ctx,
const char *service);
- WERROR (*transaction_start)(struct smbconf_ctx *ctx);
- WERROR (*transaction_commit)(struct smbconf_ctx *ctx);
- WERROR (*transaction_cancel)(struct smbconf_ctx *ctx);
+ sbcErr (*transaction_start)(struct smbconf_ctx *ctx);
+ sbcErr (*transaction_commit)(struct smbconf_ctx *ctx);
+ sbcErr (*transaction_cancel)(struct smbconf_ctx *ctx);
};
struct smbconf_ctx {
@@ -79,10 +79,10 @@ struct smbconf_ctx {
void *data; /* private data for use in backends */
};
-WERROR smbconf_init_internal(TALLOC_CTX *mem_ctx, struct smbconf_ctx **conf_ctx,
+sbcErr smbconf_init_internal(TALLOC_CTX *mem_ctx, struct smbconf_ctx **conf_ctx,
const char *path, struct smbconf_ops *ops);
-WERROR smbconf_add_string_to_array(TALLOC_CTX *mem_ctx,
+sbcErr smbconf_add_string_to_array(TALLOC_CTX *mem_ctx,
char ***array,
uint32_t count,
const char *string);
diff --git a/lib/smbconf/smbconf_txt.c b/lib/smbconf/smbconf_txt.c
index 501382cc5f..5c4bd27b9d 100644
--- a/lib/smbconf/smbconf_txt.c
+++ b/lib/smbconf/smbconf_txt.c
@@ -27,6 +27,7 @@
#include "includes.h"
#include "smbconf_private.h"
+#include "lib/smbconf/smbconf_txt.h"
struct txt_cache {
uint32_t current_share;
@@ -59,7 +60,7 @@ static struct txt_private_data *pd(struct smbconf_ctx *ctx)
static bool smbconf_txt_do_section(const char *section, void *private_data)
{
- WERROR werr;
+ sbcErr err;
uint32_t idx;
struct txt_private_data *tpd = (struct txt_private_data *)private_data;
struct txt_cache *cache = tpd->cache;
@@ -71,9 +72,9 @@ static bool smbconf_txt_do_section(const char *section, void *private_data)
return true;
}
- werr = smbconf_add_string_to_array(cache, &(cache->share_names),
- cache->num_shares, section);
- if (!W_ERROR_IS_OK(werr)) {
+ err = smbconf_add_string_to_array(cache, &(cache->share_names),
+ cache->num_shares, section);
+ if (!SBC_ERROR_IS_OK(err)) {
return false;
}
cache->current_share = cache->num_shares;
@@ -113,7 +114,7 @@ static bool smbconf_txt_do_parameter(const char *param_name,
const char *param_value,
void *private_data)
{
- WERROR werr;
+ sbcErr err;
char **param_names, **param_values;
uint32_t num_params;
uint32_t idx;
@@ -145,17 +146,17 @@ static bool smbconf_txt_do_parameter(const char *param_name,
}
return true;
}
- werr = smbconf_add_string_to_array(cache,
+ err = smbconf_add_string_to_array(cache,
&(cache->param_names[cache->current_share]),
num_params, param_name);
- if (!W_ERROR_IS_OK(werr)) {
+ if (!SBC_ERROR_IS_OK(err)) {
return false;
}
- werr = smbconf_add_string_to_array(cache,
+ err = smbconf_add_string_to_array(cache,
&(cache->param_values[cache->current_share]),
num_params, param_value);
cache->num_params[cache->current_share]++;
- return W_ERROR_IS_OK(werr);
+ return SBC_ERROR_IS_OK(err);
}
static void smbconf_txt_flush_cache(struct smbconf_ctx *ctx)
@@ -164,7 +165,7 @@ static void smbconf_txt_flush_cache(struct smbconf_ctx *ctx)
pd(ctx)->cache = NULL;
}
-static WERROR smbconf_txt_init_cache(struct smbconf_ctx *ctx)
+static sbcErr smbconf_txt_init_cache(struct smbconf_ctx *ctx)
{
if (pd(ctx)->cache != NULL) {
smbconf_txt_flush_cache(ctx);
@@ -173,40 +174,40 @@ static WERROR smbconf_txt_init_cache(struct smbconf_ctx *ctx)
pd(ctx)->cache = talloc_zero(pd(ctx), struct txt_cache);
if (pd(ctx)->cache == NULL) {
- return WERR_NOMEM;
+ return SBC_ERR_NOMEM;
}
- return WERR_OK;
+ return SBC_ERR_OK;
}
-static WERROR smbconf_txt_load_file(struct smbconf_ctx *ctx)
+static sbcErr smbconf_txt_load_file(struct smbconf_ctx *ctx)
{
- WERROR werr;
+ sbcErr err;
uint64_t new_csn;
if (!file_exist(ctx->path)) {
- return WERR_BADFILE;
+ return SBC_ERR_BADFILE;
}
new_csn = (uint64_t)file_modtime(ctx->path);
if (new_csn == pd(ctx)->csn) {
- return WERR_OK;
+ return SBC_ERR_OK;
}
- werr = smbconf_txt_init_cache(ctx);
- if (!W_ERROR_IS_OK(werr)) {
- return werr;
+ err = smbconf_txt_init_cache(ctx);
+ if (!SBC_ERROR_IS_OK(err)) {
+ return err;
}
if (!pm_process(ctx->path, smbconf_txt_do_section,
smbconf_txt_do_parameter, pd(ctx)))
{
- return WERR_CAN_NOT_COMPLETE;
+ return SBC_ERR_CAN_NOT_COMPLETE;
}
pd(ctx)->csn = new_csn;
- return WERR_OK;
+ return SBC_ERR_OK;
}
@@ -219,24 +220,24 @@ static WERROR smbconf_txt_load_file(struct smbconf_ctx *ctx)
/**
* initialize the text based smbconf backend
*/
-static WERROR smbconf_txt_init(struct smbconf_ctx *ctx, const char *path)
+static sbcErr smbconf_txt_init(struct smbconf_ctx *ctx, const char *path)
{
if (path == NULL) {
- return WERR_BADFILE;
+ return SBC_ERR_BADFILE;
}
ctx->path = talloc_strdup(ctx, path);
if (ctx->path == NULL) {
- return WERR_NOMEM;
+ return SBC_ERR_NOMEM;
}
ctx->data = talloc_zero(ctx, struct txt_private_data);
if (ctx->data == NULL) {
- return WERR_NOMEM;
+ return SBC_ERR_NOMEM;
}
pd(ctx)->verbatim = true;
- return WERR_OK;
+ return SBC_ERR_OK;
}
static int smbconf_txt_shutdown(struct smbconf_ctx *ctx)
@@ -255,7 +256,7 @@ static bool smbconf_txt_is_writeable(struct smbconf_ctx *ctx)
return false;
}
-static WERROR smbconf_txt_open(struct smbconf_ctx *ctx)
+static sbcErr smbconf_txt_open(struct smbconf_ctx *ctx)
{
return smbconf_txt_load_file(ctx);
}
@@ -284,15 +285,15 @@ static void smbconf_txt_get_csn(struct smbconf_ctx *ctx,
/**
* Drop the whole configuration (restarting empty)
*/
-static WERROR smbconf_txt_drop(struct smbconf_ctx *ctx)
+static sbcErr smbconf_txt_drop(struct smbconf_ctx *ctx)
{
- return WERR_NOT_SUPPORTED;
+ return SBC_ERR_NOT_SUPPORTED;
}
/**
* get the list of share names defined in the configuration.
*/
-static WERROR smbconf_txt_get_share_names(struct smbconf_ctx *ctx,
+static sbcErr smbconf_txt_get_share_names(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
uint32_t *num_shares,
char ***share_names)
@@ -300,17 +301,16 @@ static WERROR smbconf_txt_get_share_names(struct smbconf_ctx *ctx,
uint32_t count;
uint32_t added_count = 0;
TALLOC_CTX *tmp_ctx = NULL;
- WERROR werr = WERR_OK;
+ sbcErr err = SBC_ERR_OK;
char **tmp_share_names = NULL;
if ((num_shares == NULL) || (share_names == NULL)) {
- werr = WERR_INVALID_PARAM;
- goto done;
+ return SBC_ERR_INVALID_PARAM;
}
- werr = smbconf_txt_load_file(ctx);
- if (!W_ERROR_IS_OK(werr)) {
- return werr;
+ err = smbconf_txt_load_file(ctx);
+ if (!SBC_ERROR_IS_OK(err)) {
+ return err;
}
tmp_ctx = talloc_stackframe();
@@ -319,18 +319,18 @@ static WERROR smbconf_txt_get_share_names(struct smbconf_ctx *ctx,
* possibly after NULL section */
if (smbconf_share_exists(ctx, NULL)) {
- werr = smbconf_add_string_to_array(tmp_ctx, &tmp_share_names,
- 0, NULL);
- if (!W_ERROR_IS_OK(werr)) {
+ err = smbconf_add_string_to_array(tmp_ctx, &tmp_share_names,
+ 0, NULL);
+ if (!SBC_ERROR_IS_OK(err)) {
goto done;
}
added_count++;
}
if (smbconf_share_exists(ctx, GLOBAL_NAME)) {
- werr = smbconf_add_string_to_array(tmp_ctx, &tmp_share_names,
+ err = smbconf_add_string_to_array(tmp_ctx, &tmp_share_names,
added_count, GLOBAL_NAME);
- if (!W_ERROR_IS_OK(werr)) {
+ if (!SBC_ERROR_IS_OK(err)) {
goto done;
}
added_count++;
@@ -343,10 +343,10 @@ static WERROR smbconf_txt_get_share_names(struct smbconf_ctx *ctx,
continue;
}
- werr = smbconf_add_string_to_array(tmp_ctx, &tmp_share_names,
+ err = smbconf_add_string_to_array(tmp_ctx, &tmp_share_names,
added_count,
pd(ctx)->cache->share_names[count]);
- if (!W_ERROR_IS_OK(werr)) {
+ if (!SBC_ERROR_IS_OK(err)) {
goto done;
}
added_count++;
@@ -361,7 +361,7 @@ static WERROR smbconf_txt_get_share_names(struct smbconf_ctx *ctx,
done:
talloc_free(tmp_ctx);
- return werr;
+ return err;
}
/**
@@ -370,10 +370,10 @@ done:
static bool smbconf_txt_share_exists(struct smbconf_ctx *ctx,
const char *servicename)
{
- WERROR werr;
+ sbcErr err;
- werr = smbconf_txt_load_file(ctx);
- if (!W_ERROR_IS_OK(werr)) {
+ err = smbconf_txt_load_file(ctx);
+ if (!SBC_ERROR_IS_OK(err)) {
return false;
}
@@ -385,29 +385,29 @@ static bool smbconf_txt_share_exists(struct smbconf_ctx *ctx,
/**
* Add a service if it does not already exist
*/
-static WERROR smbconf_txt_create_share(struct smbconf_ctx *ctx,
+static sbcErr smbconf_txt_create_share(struct smbconf_ctx *ctx,
const char *servicename)
{
- return WERR_NOT_SUPPORTED;
+ return SBC_ERR_NOT_SUPPORTED;
}
/**
* get a definition of a share (service) from configuration.
*/
-static WERROR smbconf_txt_get_share(struct smbconf_ctx *ctx,
+static sbcErr smbconf_txt_get_share(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *servicename,
struct smbconf_service **service)
{
- WERROR werr;
+ sbcErr err;
uint32_t sidx, count;
bool found;
TALLOC_CTX *tmp_ctx = NULL;
struct smbconf_service *tmp_service = NULL;
- werr = smbconf_txt_load_file(ctx);
- if (!W_ERROR_IS_OK(werr)) {
- return werr;
+ err = smbconf_txt_load_file(ctx);
+ if (!SBC_ERROR_IS_OK(err)) {
+ return err;
}
found = smbconf_find_in_array(servicename,
@@ -415,90 +415,86 @@ static WERROR smbconf_txt_get_share(struct smbconf_ctx *ctx,
pd(ctx)->cache->num_shares,
&sidx);
if (!found) {
- return WERR_NO_SUCH_SERVICE;
+ return SBC_ERR_NO_SUCH_SERVICE;
}
tmp_ctx = talloc_stackframe();
tmp_service = talloc_zero(tmp_ctx, struct smbconf_service);
if (tmp_service == NULL) {
- werr = WERR_NOMEM;
+ err = SBC_ERR_NOMEM;
goto done;
}
if (servicename != NULL) {
tmp_service->name = talloc_strdup(tmp_service, servicename);
if (tmp_service->name == NULL) {
- werr = WERR_NOMEM;
+ err = SBC_ERR_NOMEM;
goto done;
}
}
for (count = 0; count < pd(ctx)->cache->num_params[sidx]; count++) {
- werr = smbconf_add_string_to_array(tmp_service,
+ err = smbconf_add_string_to_array(tmp_service,
&(tmp_service->param_names),
count,
pd(ctx)->cache->param_names[sidx][count]);
- if (!W_ERROR_IS_OK(werr)) {
+ if (!SBC_ERROR_IS_OK(err)) {
goto done;
}
- werr = smbconf_add_string_to_array(tmp_service,
+ err = smbconf_add_string_to_array(tmp_service,
&(tmp_service->param_values),
count,
pd(ctx)->cache->param_values[sidx][count]);
- if (!W_ERROR_IS_OK(werr)) {
+ if (!SBC_ERROR_IS_OK(err)) {
goto done;
}
}
tmp_service->num_params = count;
- if (count > 0) {
- *service = talloc_move(mem_ctx, &tmp_service);
- } else {
- *service = NULL;
- }
+ *service = talloc_move(mem_ctx, &tmp_service);
done:
talloc_free(tmp_ctx);
- return werr;
+ return err;
}
/**
* delete a service from configuration
*/
-static WERROR smbconf_txt_delete_share(struct smbconf_ctx *ctx,
+static sbcErr smbconf_txt_delete_share(struct smbconf_ctx *ctx,
const char *servicename)
{
- return WERR_NOT_SUPPORTED;
+ return SBC_ERR_NOT_SUPPORTED;
}
/**
* set a configuration parameter to the value provided.
*/
-static WERROR smbconf_txt_set_parameter(struct smbconf_ctx *ctx,
+static sbcErr smbconf_txt_set_parameter(struct smbconf_ctx *ctx,
const char *service,
const char *param,
const char *valstr)
{
- return WERR_NOT_SUPPORTED;
+ return SBC_ERR_NOT_SUPPORTED;
}
/**
* get the value of a configuration parameter as a string
*/
-static WERROR smbconf_txt_get_parameter(struct smbconf_ctx *ctx,
+static sbcErr smbconf_txt_get_parameter(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *service,
const char *param,
char **valstr)
{
- WERROR werr;
+ sbcErr err;
bool found;
uint32_t share_index, param_index;
- werr = smbconf_txt_load_file(ctx);
- if (!W_ERROR_IS_OK(werr)) {
- return werr;
+ err = smbconf_txt_load_file(ctx);
+ if (!SBC_ERROR_IS_OK(err)) {
+ return err;
}
found = smbconf_find_in_array(service,
@@ -506,7 +502,7 @@ static WERROR smbconf_txt_get_parameter(struct smbconf_ctx *ctx,
pd(ctx)->cache->num_shares,
&share_index);
if (!found) {
- return WERR_NO_SUCH_SERVICE;
+ return SBC_ERR_NO_SUCH_SERVICE;
}
found = smbconf_reverse_find_in_array(param,
@@ -514,45 +510,45 @@ static WERROR smbconf_txt_get_parameter(struct smbconf_ctx *ctx,
pd(ctx)->cache->num_params[share_index],
&param_index);
if (!found) {
- return WERR_INVALID_PARAM;
+ return SBC_ERR_INVALID_PARAM;
}
*valstr = talloc_strdup(mem_ctx,
pd(ctx)->cache->param_values[share_index][param_index]);
if (*valstr == NULL) {
- return WERR_NOMEM;
+ return SBC_ERR_NOMEM;
}
- return WERR_OK;
+ return SBC_ERR_OK;
}
/**
* delete a parameter from configuration
*/
-static WERROR smbconf_txt_delete_parameter(struct smbconf_ctx *ctx,
+static sbcErr smbconf_txt_delete_parameter(struct smbconf_ctx *ctx,
const char *service,
const char *param)
{
- return WERR_NOT_SUPPORTED;
+ return SBC_ERR_NOT_SUPPORTED;
}
-static WERROR smbconf_txt_get_includes(struct smbconf_ctx *ctx,
+static sbcErr smbconf_txt_get_includes(struct smbconf_ctx *ctx,
TALLOC_CTX *mem_ctx,
const char *service,
uint32_t *num_includes,
char ***includes)
{
- WERROR werr;
+ sbcErr err;
bool found;
uint32_t sidx, count;
TALLOC_CTX *tmp_ctx = NULL;
uint32_t tmp_num_includes = 0;
char **tmp_includes = NULL;
- werr = smbconf_txt_load_file(ctx);
- if (!W_ERROR_IS_OK(werr)) {
- return werr;
+ err = smbconf_txt_load_file(ctx);
+ if (!SBC_ERROR_IS_OK(err)) {
+ return err;
}
found = smbconf_find_in_array(service,
@@ -560,7 +556,7 @@ static WERROR smbconf_txt_get_includes(struct smbconf_ctx *ctx,
pd(ctx)->cache->num_shares,
&sidx);
if (!found) {
- return WERR_NO_SUCH_SERVICE;
+ return SBC_ERR_NO_SUCH_SERVICE;
}
tmp_ctx = talloc_stackframe();
@@ -569,11 +565,11 @@ static WERROR smbconf_txt_get_includes(struct smbconf_ctx *ctx,
if (strequal(pd(ctx)->cache->param_names[sidx][count],
"include"))
{
- werr = smbconf_add_string_to_array(tmp_ctx,
+ err = smbconf_add_string_to_array(tmp_ctx,
&tmp_includes,
tmp_num_includes,
pd(ctx)->cache->param_values[sidx][count]);
- if (!W_ERROR_IS_OK(werr)) {
+ if (!SBC_ERROR_IS_OK(err)) {
goto done;
}
tmp_num_includes++;
@@ -584,47 +580,47 @@ static WERROR smbconf_txt_get_includes(struct smbconf_ctx *ctx,
if (*num_includes > 0) {
*includes = talloc_move(mem_ctx, &tmp_includes);
if (*includes == NULL) {
- werr = WERR_NOMEM;
+ err = SBC_ERR_NOMEM;
goto done;
}
} else {
*includes = NULL;
}
- werr = WERR_OK;
+ err = SBC_ERR_OK;
done:
talloc_free(tmp_ctx);
- return werr;
+ return err;
}
-static WERROR smbconf_txt_set_includes(struct smbconf_ctx *ctx,
+static sbcErr smbconf_txt_set_includes(struct smbconf_ctx *ctx,
const char *service,
uint32_t num_includes,
const char **includes)
{
- return WERR_NOT_SUPPORTED;
+ return SBC_ERR_NOT_SUPPORTED;
}
-static WERROR smbconf_txt_delete_includes(struct smbconf_ctx *ctx,
+static sbcErr smbconf_txt_delete_includes(struct smbconf_ctx *ctx,
const char *service)
{
- return WERR_NOT_SUPPORTED;
+ return SBC_ERR_NOT_SUPPORTED;
}
-static WERROR smbconf_txt_transaction_start(struct smbconf_ctx *ctx)
+static sbcErr smbconf_txt_transaction_start(struct smbconf_ctx *ctx)
{
- return WERR_OK;
+ return SBC_ERR_OK;
}
-static WERROR smbconf_txt_transaction_commit(struct smbconf_ctx *ctx)
+static sbcErr smbconf_txt_transaction_commit(struct smbconf_ctx *ctx)
{
- return WERR_OK;
+ return SBC_ERR_OK;
}
-static WERROR smbconf_txt_transaction_cancel(struct smbconf_ctx *ctx)
+static sbcErr smbconf_txt_transaction_cancel(struct smbconf_ctx *ctx)
{
- return WERR_OK;
+ return SBC_ERR_OK;
}
static struct smbconf_ops smbconf_ops_txt = {
@@ -657,15 +653,15 @@ static struct smbconf_ops smbconf_ops_txt = {
* initialize the smbconf text backend
* the only function that is exported from this module
*/
-WERROR smbconf_init_txt(TALLOC_CTX *mem_ctx,
+sbcErr smbconf_init_txt(TALLOC_CTX *mem_ctx,
struct smbconf_ctx **conf_ctx,
const char *path)
{
- WERROR werr;
+ sbcErr err;
- werr = smbconf_init_internal(mem_ctx, conf_ctx, path, &smbconf_ops_txt);
- if (!W_ERROR_IS_OK(werr)) {
- return werr;
+ err = smbconf_init_internal(mem_ctx, conf_ctx, path, &smbconf_ops_txt);
+ if (!SBC_ERROR_IS_OK(err)) {
+ return err;
}
return smbconf_txt_load_file(*conf_ctx);
diff --git a/lib/smbconf/smbconf_txt.h b/lib/smbconf/smbconf_txt.h
index 688bbc9d48..72d6207521 100644
--- a/lib/smbconf/smbconf_txt.h
+++ b/lib/smbconf/smbconf_txt.h
@@ -26,7 +26,7 @@ struct smbconf_ctx;
* initialization functions for the text/file backend modules
*/
-WERROR smbconf_init_txt(TALLOC_CTX *mem_ctx,
+sbcErr smbconf_init_txt(TALLOC_CTX *mem_ctx,
struct smbconf_ctx **conf_ctx,
const char *path);
diff --git a/lib/smbconf/smbconf_util.c b/lib/smbconf/smbconf_util.c
index b309a3454b..86a95988f1 100644
--- a/lib/smbconf/smbconf_util.c
+++ b/lib/smbconf/smbconf_util.c
@@ -39,43 +39,43 @@ static int smbconf_destroy_ctx(struct smbconf_ctx *ctx)
* After the work with the configuration is completed, smbconf_shutdown()
* should be called.
*/
-WERROR smbconf_init_internal(TALLOC_CTX *mem_ctx, struct smbconf_ctx **conf_ctx,
+sbcErr smbconf_init_internal(TALLOC_CTX *mem_ctx, struct smbconf_ctx **conf_ctx,
const char *path, struct smbconf_ops *ops)
{
- WERROR werr = WERR_OK;
+ sbcErr err = SBC_ERR_OK;
struct smbconf_ctx *ctx;
if (conf_ctx == NULL) {
- return WERR_INVALID_PARAM;
+ return SBC_ERR_INVALID_PARAM;
}
ctx = talloc_zero(mem_ctx, struct smbconf_ctx);
if (ctx == NULL) {
- return WERR_NOMEM;
+ return SBC_ERR_NOMEM;
}
ctx->ops = ops;
- werr = ctx->ops->init(ctx, path);
- if (!W_ERROR_IS_OK(werr)) {
+ err = ctx->ops->init(ctx, path);
+ if (!SBC_ERROR_IS_OK(err)) {
goto fail;
}
talloc_set_destructor(ctx, smbconf_destroy_ctx);
*conf_ctx = ctx;
- return werr;
+ return err;
fail:
talloc_free(ctx);
- return werr;
+ return err;
}
/**
* add a string to a talloced array of strings.
*/
-WERROR smbconf_add_string_to_array(TALLOC_CTX *mem_ctx,
+sbcErr smbconf_add_string_to_array(TALLOC_CTX *mem_ctx,
char ***array,
uint32_t count,
const char *string)
@@ -83,12 +83,12 @@ WERROR smbconf_add_string_to_array(TALLOC_CTX *mem_ctx,
char **new_array = NULL;
if (array == NULL) {
- return WERR_INVALID_PARAM;
+ return SBC_ERR_INVALID_PARAM;
}
new_array = talloc_realloc(mem_ctx, *array, char *, count + 1);
if (new_array == NULL) {
- return WERR_NOMEM;
+ return SBC_ERR_NOMEM;
}
if (string == NULL) {
@@ -97,13 +97,13 @@ WERROR smbconf_add_string_to_array(TALLOC_CTX *mem_ctx,
new_array[count] = talloc_strdup(new_array, string);
if (new_array[count] == NULL) {
talloc_free(new_array);
- return WERR_NOMEM;
+ return SBC_ERR_NOMEM;
}
}
*array = new_array;
- return WERR_OK;
+ return SBC_ERR_OK;
}
bool smbconf_find_in_array(const char *string, char **list,
diff --git a/lib/smbconf/wscript_build b/lib/smbconf/wscript_build
new file mode 100644
index 0000000000..cf92c3ba6b
--- /dev/null
+++ b/lib/smbconf/wscript_build
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+
+bld.SAMBA_SUBSYSTEM('LIBSMBCONF',
+ source='smbconf.c smbconf_txt.c smbconf_util.c',
+ deps='talloc'
+ )
+
diff --git a/lib/socket_wrapper/config.mk b/lib/socket_wrapper/config.mk
deleted file mode 100644
index 60cfb3209a..0000000000
--- a/lib/socket_wrapper/config.mk
+++ /dev/null
@@ -1,8 +0,0 @@
-##############################
-# Start SUBSYSTEM SOCKET_WRAPPER
-[SUBSYSTEM::SOCKET_WRAPPER]
-PRIVATE_DEPENDENCIES = LIBREPLACE_NETWORK
-# End SUBSYSTEM SOCKET_WRAPPER
-##############################
-
-SOCKET_WRAPPER_OBJ_FILES = $(socketwrappersrcdir)/socket_wrapper.o
diff --git a/lib/socket_wrapper/socket_wrapper.c b/lib/socket_wrapper/socket_wrapper.c
index 9d732ee652..6eb4979d3f 100644
--- a/lib/socket_wrapper/socket_wrapper.c
+++ b/lib/socket_wrapper/socket_wrapper.c
@@ -296,8 +296,8 @@ static int convert_un_in(const struct sockaddr_un *un, struct sockaddr *in, sock
switch(type) {
case SOCKET_TYPE_CHAR_TCP:
case SOCKET_TYPE_CHAR_UDP: {
- struct sockaddr_in *in2 = (struct sockaddr_in *)in;
-
+ struct sockaddr_in *in2 = (struct sockaddr_in *)(void *)in;
+
if ((*len) < sizeof(*in2)) {
errno = EINVAL;
return -1;
@@ -314,8 +314,8 @@ static int convert_un_in(const struct sockaddr_un *un, struct sockaddr *in, sock
#ifdef HAVE_IPV6
case SOCKET_TYPE_CHAR_TCP_V6:
case SOCKET_TYPE_CHAR_UDP_V6: {
- struct sockaddr_in6 *in2 = (struct sockaddr_in6 *)in;
-
+ struct sockaddr_in6 *in2 = (struct sockaddr_in6 *)(void *)in;
+
if ((*len) < sizeof(*in2)) {
errno = EINVAL;
return -1;
@@ -352,7 +352,7 @@ static int convert_in_un_remote(struct socket_info *si, const struct sockaddr *i
switch (inaddr->sa_family) {
case AF_INET: {
const struct sockaddr_in *in =
- (const struct sockaddr_in *)inaddr;
+ (const struct sockaddr_in *)(const void *)inaddr;
unsigned int addr = ntohl(in->sin_addr.s_addr);
char u_type = '\0';
char b_type = '\0';
@@ -395,8 +395,8 @@ static int convert_in_un_remote(struct socket_info *si, const struct sockaddr *i
#ifdef HAVE_IPV6
case AF_INET6: {
const struct sockaddr_in6 *in =
- (const struct sockaddr_in6 *)inaddr;
- struct in6_addr cmp;
+ (const struct sockaddr_in6 *)(const void *)inaddr;
+ struct in6_addr cmp1, cmp2;
switch (si->type) {
case SOCK_STREAM:
@@ -411,9 +411,10 @@ static int convert_in_un_remote(struct socket_info *si, const struct sockaddr *i
prt = ntohs(in->sin6_port);
- cmp = in->sin6_addr;
- cmp.s6_addr[15] = 0;
- if (IN6_ARE_ADDR_EQUAL(swrap_ipv6(), &cmp)) {
+ cmp1 = *swrap_ipv6();
+ cmp2 = in->sin6_addr;
+ cmp2.s6_addr[15] = 0;
+ if (IN6_ARE_ADDR_EQUAL(&cmp1, &cmp2)) {
iface = in->sin6_addr.s6_addr[15];
} else {
errno = ENETUNREACH;
@@ -460,7 +461,7 @@ static int convert_in_un_alloc(struct socket_info *si, const struct sockaddr *in
switch (si->family) {
case AF_INET: {
const struct sockaddr_in *in =
- (const struct sockaddr_in *)inaddr;
+ (const struct sockaddr_in *)(const void *)inaddr;
unsigned int addr = ntohl(in->sin_addr.s_addr);
char u_type = '\0';
char d_type = '\0';
@@ -511,8 +512,8 @@ static int convert_in_un_alloc(struct socket_info *si, const struct sockaddr *in
#ifdef HAVE_IPV6
case AF_INET6: {
const struct sockaddr_in6 *in =
- (const struct sockaddr_in6 *)inaddr;
- struct in6_addr cmp;
+ (const struct sockaddr_in6 *)(const void *)inaddr;
+ struct in6_addr cmp1, cmp2;
switch (si->type) {
case SOCK_STREAM:
@@ -527,11 +528,12 @@ static int convert_in_un_alloc(struct socket_info *si, const struct sockaddr *in
prt = ntohs(in->sin6_port);
- cmp = in->sin6_addr;
- cmp.s6_addr[15] = 0;
+ cmp1 = *swrap_ipv6();
+ cmp2 = in->sin6_addr;
+ cmp2.s6_addr[15] = 0;
if (IN6_IS_ADDR_UNSPECIFIED(&in->sin6_addr)) {
iface = socket_wrapper_default_iface();
- } else if (IN6_ARE_ADDR_EQUAL(swrap_ipv6(), &cmp)) {
+ } else if (IN6_ARE_ADDR_EQUAL(&cmp1, &cmp2)) {
iface = in->sin6_addr.s6_addr[15];
} else {
errno = EADDRNOTAVAIL;
@@ -584,10 +586,14 @@ static struct socket_info *find_socket_info(int fd)
static int sockaddr_convert_to_un(struct socket_info *si, const struct sockaddr *in_addr, socklen_t in_len,
struct sockaddr_un *out_addr, int alloc_sock, int *bcast)
{
+ struct sockaddr *out = (struct sockaddr *)(void *)out_addr;
if (!out_addr)
return 0;
- out_addr->sun_family = AF_UNIX;
+ out->sa_family = AF_UNIX;
+#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
+ out->sa_len = sizeof(*out_addr);
+#endif
switch (in_addr->sa_family) {
case AF_INET:
@@ -610,7 +616,7 @@ static int sockaddr_convert_to_un(struct socket_info *si, const struct sockaddr
default:
break;
}
-
+
errno = EAFNOSUPPORT;
return -1;
}
@@ -622,6 +628,8 @@ static int sockaddr_convert_from_un(const struct socket_info *si,
struct sockaddr *out_addr,
socklen_t *out_addrlen)
{
+ int ret;
+
if (out_addr == NULL || out_addrlen == NULL)
return 0;
@@ -643,7 +651,11 @@ static int sockaddr_convert_from_un(const struct socket_info *si,
errno = ESOCKTNOSUPPORT;
return -1;
}
- return convert_un_in(in_addr, out_addr, out_addrlen);
+ ret = convert_un_in(in_addr, out_addr, out_addrlen);
+#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
+ out_addr->sa_len = *out_addrlen;
+#endif
+ return ret;
default:
break;
}
@@ -959,7 +971,7 @@ static uint8_t *swrap_packet_init(struct timeval *tval,
ip->v6.ver_prio = 0x60; /* version 4 and 5 * 32 bit words */
ip->v6.flow_label_high = 0x00;
ip->v6.flow_label_low = 0x0000;
- ip->v6.payload_length = htons(wire_len - icmp_truncate_len);//TODO
+ ip->v6.payload_length = htons(wire_len - icmp_truncate_len); /* TODO */
ip->v6.next_header = protocol;
memcpy(ip->v6.src_addr, src_in6->sin6_addr.s6_addr, 16);
memcpy(ip->v6.dest_addr, dest_in6->sin6_addr.s6_addr, 16);
@@ -1009,7 +1021,7 @@ static uint8_t *swrap_packet_init(struct timeval *tval,
ip->v6.ver_prio = 0x60; /* version 4 and 5 * 32 bit words */
ip->v6.flow_label_high = 0x00;
ip->v6.flow_label_low = 0x0000;
- ip->v6.payload_length = htons(wire_len - icmp_truncate_len);//TODO
+ ip->v6.payload_length = htons(wire_len - icmp_truncate_len); /* TODO */
ip->v6.next_header = protocol;
memcpy(ip->v6.src_addr, dest_in6->sin6_addr.s6_addr, 16);
memcpy(ip->v6.dest_addr, src_in6->sin6_addr.s6_addr, 16);
@@ -1494,7 +1506,7 @@ _PUBLIC_ int swrap_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
memset(&un_addr, 0, sizeof(un_addr));
memset(&un_my_addr, 0, sizeof(un_my_addr));
- ret = real_accept(s, (struct sockaddr *)&un_addr, &un_addrlen);
+ ret = real_accept(s, (struct sockaddr *)(void *)&un_addr, &un_addrlen);
if (ret == -1) {
free(my_addr);
return ret;
@@ -1526,13 +1538,15 @@ _PUBLIC_ int swrap_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
child_si->peername = sockaddr_dup(my_addr, len);
if (addr != NULL && addrlen != NULL) {
- *addrlen = len;
- if (*addrlen >= len)
- memcpy(addr, my_addr, len);
- *addrlen = 0;
+ size_t copy_len = MIN(*addrlen, len);
+ if (copy_len > 0) {
+ memcpy(addr, my_addr, copy_len);
+ }
+ *addrlen = len;
}
- ret = real_getsockname(fd, (struct sockaddr *)&un_my_addr, &un_my_addrlen);
+ ret = real_getsockname(fd, (struct sockaddr *)(void *)&un_my_addr,
+ &un_my_addrlen);
if (ret == -1) {
free(child_si);
close(fd);
@@ -1659,8 +1673,9 @@ static int swrap_auto_bind(struct socket_info *si, int family)
"%s/"SOCKET_FORMAT, socket_wrapper_dir(),
type, socket_wrapper_default_iface(), port);
if (stat(un_addr.sun_path, &st) == 0) continue;
-
- ret = real_bind(si->fd, (struct sockaddr *)&un_addr, sizeof(un_addr));
+
+ ret = real_bind(si->fd, (struct sockaddr *)(void *)&un_addr,
+ sizeof(un_addr));
if (ret == -1) return ret;
si->tmp_path = strdup(un_addr.sun_path);
@@ -1685,6 +1700,7 @@ _PUBLIC_ int swrap_connect(int s, const struct sockaddr *serv_addr, socklen_t ad
int ret;
struct sockaddr_un un_addr;
struct socket_info *si = find_socket_info(s);
+ int bcast = 0;
if (!si) {
return real_connect(s, serv_addr, addrlen);
@@ -1700,16 +1716,22 @@ _PUBLIC_ int swrap_connect(int s, const struct sockaddr *serv_addr, socklen_t ad
return -1;
}
- ret = sockaddr_convert_to_un(si, (const struct sockaddr *)serv_addr, addrlen, &un_addr, 0, NULL);
+ ret = sockaddr_convert_to_un(si, serv_addr,
+ addrlen, &un_addr, 0, &bcast);
if (ret == -1) return -1;
+ if (bcast) {
+ errno = ENETUNREACH;
+ return -1;
+ }
+
if (si->type == SOCK_DGRAM) {
si->defer_connect = 1;
ret = 0;
} else {
swrap_dump_packet(si, serv_addr, SWRAP_CONNECT_SEND, NULL, 0);
- ret = real_connect(s, (struct sockaddr *)&un_addr,
+ ret = real_connect(s, (struct sockaddr *)(void *)&un_addr,
sizeof(struct sockaddr_un));
}
@@ -1745,12 +1767,12 @@ _PUBLIC_ int swrap_bind(int s, const struct sockaddr *myaddr, socklen_t addrlen)
si->myname_len = addrlen;
si->myname = sockaddr_dup(myaddr, addrlen);
- ret = sockaddr_convert_to_un(si, (const struct sockaddr *)myaddr, addrlen, &un_addr, 1, &si->bcast);
+ ret = sockaddr_convert_to_un(si, myaddr, addrlen, &un_addr, 1, &si->bcast);
if (ret == -1) return -1;
unlink(un_addr.sun_path);
- ret = real_bind(s, (struct sockaddr *)&un_addr,
+ ret = real_bind(s, (struct sockaddr *)(void *)&un_addr,
sizeof(struct sockaddr_un));
if (ret == 0) {
@@ -1849,6 +1871,223 @@ _PUBLIC_ int swrap_setsockopt(int s, int level, int optname, const void *o
}
}
+_PUBLIC_ int swrap_ioctl(int s, int r, void *p)
+{
+ int ret;
+ struct socket_info *si = find_socket_info(s);
+ int value;
+
+ if (!si) {
+ return real_ioctl(s, r, p);
+ }
+
+ ret = real_ioctl(s, r, p);
+
+ switch (r) {
+ case FIONREAD:
+ value = *((int *)p);
+ if (ret == -1 && errno != EAGAIN && errno != ENOBUFS) {
+ swrap_dump_packet(si, NULL, SWRAP_PENDING_RST, NULL, 0);
+ } else if (value == 0) { /* END OF FILE */
+ swrap_dump_packet(si, NULL, SWRAP_PENDING_RST, NULL, 0);
+ }
+ break;
+ }
+
+ return ret;
+}
+
+static ssize_t swrap_sendmsg_before(struct socket_info *si,
+ struct msghdr *msg,
+ struct iovec *tmp_iov,
+ struct sockaddr_un *tmp_un,
+ const struct sockaddr_un **to_un,
+ const struct sockaddr **to,
+ int *bcast)
+{
+ size_t i, len = 0;
+ ssize_t ret;
+
+ if (to_un) {
+ *to_un = NULL;
+ }
+ if (to) {
+ *to = NULL;
+ }
+ if (bcast) {
+ *bcast = 0;
+ }
+
+ switch (si->type) {
+ case SOCK_STREAM:
+ if (!si->connected) {
+ errno = ENOTCONN;
+ return -1;
+ }
+
+ if (msg->msg_iovlen == 0) {
+ break;
+ }
+
+ /*
+ * cut down to 1500 byte packets for stream sockets,
+ * which makes it easier to format PCAP capture files
+ * (as the caller will simply continue from here)
+ */
+
+ for (i=0; i < msg->msg_iovlen; i++) {
+ size_t nlen;
+ nlen = len + msg->msg_iov[i].iov_len;
+ if (nlen > 1500) {
+ break;
+ }
+ }
+ msg->msg_iovlen = i;
+ if (msg->msg_iovlen == 0) {
+ *tmp_iov = msg->msg_iov[0];
+ tmp_iov->iov_len = MIN(tmp_iov->iov_len, 1500);
+ msg->msg_iov = tmp_iov;
+ msg->msg_iovlen = 1;
+ }
+ break;
+
+ case SOCK_DGRAM:
+ if (si->connected) {
+ if (msg->msg_name) {
+ errno = EISCONN;
+ return -1;
+ }
+ } else {
+ const struct sockaddr *msg_name;
+ msg_name = (const struct sockaddr *)msg->msg_name;
+
+ if (msg_name == NULL) {
+ errno = ENOTCONN;
+ return -1;
+ }
+
+
+ ret = sockaddr_convert_to_un(si, msg_name, msg->msg_namelen,
+ tmp_un, 0, bcast);
+ if (ret == -1) return -1;
+
+ if (to_un) {
+ *to_un = tmp_un;
+ }
+ if (to) {
+ *to = msg_name;
+ }
+ msg->msg_name = tmp_un;
+ msg->msg_namelen = sizeof(*tmp_un);
+ }
+
+ if (si->bound == 0) {
+ ret = swrap_auto_bind(si, si->family);
+ if (ret == -1) return -1;
+ }
+
+ if (!si->defer_connect) {
+ break;
+ }
+
+ ret = sockaddr_convert_to_un(si, si->peername, si->peername_len,
+ tmp_un, 0, NULL);
+ if (ret == -1) return -1;
+
+ ret = real_connect(si->fd, (struct sockaddr *)(void *)tmp_un,
+ sizeof(*tmp_un));
+
+ /* to give better errors */
+ if (ret == -1 && errno == ENOENT) {
+ errno = EHOSTUNREACH;
+ }
+
+ if (ret == -1) {
+ return ret;
+ }
+
+ si->defer_connect = 0;
+ break;
+ default:
+ errno = EHOSTUNREACH;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void swrap_sendmsg_after(struct socket_info *si,
+ struct msghdr *msg,
+ const struct sockaddr *to,
+ ssize_t ret)
+{
+ int saved_errno = errno;
+ size_t i, len = 0;
+ uint8_t *buf;
+ off_t ofs = 0;
+ size_t avail = 0;
+ size_t remain;
+
+ /* to give better errors */
+ if (ret == -1 && saved_errno == ENOENT) {
+ saved_errno = EHOSTUNREACH;
+ }
+
+ for (i=0; i < msg->msg_iovlen; i++) {
+ avail += msg->msg_iov[i].iov_len;
+ }
+
+ if (ret == -1) {
+ remain = MIN(80, avail);
+ } else {
+ remain = ret;
+ }
+
+ /* we capture it as one single packet */
+ buf = (uint8_t *)malloc(remain);
+ if (!buf) {
+ /* we just not capture the packet */
+ errno = saved_errno;
+ return;
+ }
+
+ for (i=0; i < msg->msg_iovlen; i++) {
+ size_t this_time = MIN(remain, msg->msg_iov[i].iov_len);
+ memcpy(buf + ofs,
+ msg->msg_iov[i].iov_base,
+ this_time);
+ ofs += this_time;
+ remain -= this_time;
+ }
+ len = ofs;
+
+ switch (si->type) {
+ case SOCK_STREAM:
+ if (ret == -1) {
+ swrap_dump_packet(si, NULL, SWRAP_SEND, buf, len);
+ swrap_dump_packet(si, NULL, SWRAP_SEND_RST, NULL, 0);
+ } else {
+ swrap_dump_packet(si, NULL, SWRAP_SEND, buf, len);
+ }
+ break;
+
+ case SOCK_DGRAM:
+ if (si->connected) {
+ to = si->peername;
+ }
+ if (ret == -1) {
+ swrap_dump_packet(si, to, SWRAP_SENDTO, buf, len);
+ swrap_dump_packet(si, to, SWRAP_SENDTO_UNREACH, buf, len);
+ } else {
+ swrap_dump_packet(si, to, SWRAP_SENDTO, buf, len);
+ }
+ break;
+ }
+
+ free(buf);
+ errno = saved_errno;
+}
+
_PUBLIC_ ssize_t swrap_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlen)
{
struct sockaddr_un un_addr;
@@ -1863,7 +2102,7 @@ _PUBLIC_ ssize_t swrap_recvfrom(int s, void *buf, size_t len, int flags, struct
}
if (!from) {
- from = (struct sockaddr *)&ss;
+ from = (struct sockaddr *)(void *)&ss;
fromlen = &ss_len;
}
@@ -1876,7 +2115,8 @@ _PUBLIC_ ssize_t swrap_recvfrom(int s, void *buf, size_t len, int flags, struct
/* irix 6.4 forgets to null terminate the sun_path string :-( */
memset(&un_addr, 0, sizeof(un_addr));
- ret = real_recvfrom(s, buf, len, flags, (struct sockaddr *)&un_addr, &un_addrlen);
+ ret = real_recvfrom(s, buf, len, flags,
+ (struct sockaddr *)(void *)&un_addr, &un_addrlen);
if (ret == -1)
return ret;
@@ -1893,8 +2133,11 @@ _PUBLIC_ ssize_t swrap_recvfrom(int s, void *buf, size_t len, int flags, struct
_PUBLIC_ ssize_t swrap_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen)
{
+ struct msghdr msg;
+ struct iovec tmp;
struct sockaddr_un un_addr;
- int ret;
+ const struct sockaddr_un *to_un = NULL;
+ ssize_t ret;
struct socket_info *si = find_socket_info(s);
int bcast = 0;
@@ -1902,116 +2145,53 @@ _PUBLIC_ ssize_t swrap_sendto(int s, const void *buf, size_t len, int flags, con
return real_sendto(s, buf, len, flags, to, tolen);
}
- if (si->connected) {
- if (to) {
- errno = EISCONN;
- return -1;
- }
-
- to = si->peername;
- tolen = si->peername_len;
- }
+ tmp.iov_base = discard_const_p(char, buf);
+ tmp.iov_len = len;
- switch (si->type) {
- case SOCK_STREAM:
- /* cut down to 1500 byte packets for stream sockets,
- * which makes it easier to format PCAP capture files
- * (as the caller will simply continue from here) */
- len = MIN(len, 1500);
-
- ret = real_send(s, buf, len, flags);
- break;
- case SOCK_DGRAM:
- if (si->bound == 0) {
- ret = swrap_auto_bind(si, si->family);
- if (ret == -1) return -1;
- }
-
- ret = sockaddr_convert_to_un(si, to, tolen, &un_addr, 0, &bcast);
- if (ret == -1) return -1;
-
- if (bcast) {
- struct stat st;
- unsigned int iface;
- unsigned int prt = ntohs(((const struct sockaddr_in *)to)->sin_port);
- char type;
-
- type = SOCKET_TYPE_CHAR_UDP;
-
- for(iface=0; iface <= MAX_WRAPPED_INTERFACES; iface++) {
- snprintf(un_addr.sun_path, sizeof(un_addr.sun_path), "%s/"SOCKET_FORMAT,
- socket_wrapper_dir(), type, iface, prt);
- if (stat(un_addr.sun_path, &st) != 0) continue;
-
- /* ignore the any errors in broadcast sends */
- real_sendto(s, buf, len, flags, (struct sockaddr *)&un_addr, sizeof(un_addr));
- }
-
- swrap_dump_packet(si, to, SWRAP_SENDTO, buf, len);
-
- return len;
- }
+ ZERO_STRUCT(msg);
+ msg.msg_name = discard_const_p(struct sockaddr, to); /* optional address */
+ msg.msg_namelen = tolen; /* size of address */
+ msg.msg_iov = &tmp; /* scatter/gather array */
+ msg.msg_iovlen = 1; /* # elements in msg_iov */
+#if 0 /* not available on solaris */
+ msg.msg_control = NULL; /* ancillary data, see below */
+ msg.msg_controllen = 0; /* ancillary data buffer len */
+ msg.msg_flags = 0; /* flags on received message */
+#endif
- if (si->defer_connect) {
- ret = real_connect(s, (struct sockaddr *)&un_addr,
- sizeof(un_addr));
+ ret = swrap_sendmsg_before(si, &msg, &tmp, &un_addr, &to_un, &to, &bcast);
+ if (ret == -1) return -1;
- /* to give better errors */
- if (ret == -1 && errno == ENOENT) {
- errno = EHOSTUNREACH;
- }
+ buf = msg.msg_iov[0].iov_base;
+ len = msg.msg_iov[0].iov_len;
- if (ret == -1) {
- return ret;
- }
- si->defer_connect = 0;
- }
+ if (bcast) {
+ struct stat st;
+ unsigned int iface;
+ unsigned int prt = ntohs(((const struct sockaddr_in *)to)->sin_port);
+ char type;
- ret = real_sendto(s, buf, len, flags, (struct sockaddr *)&un_addr, sizeof(un_addr));
- break;
- default:
- ret = -1;
- errno = EHOSTUNREACH;
- break;
- }
-
- /* to give better errors */
- if (ret == -1 && errno == ENOENT) {
- errno = EHOSTUNREACH;
- }
+ type = SOCKET_TYPE_CHAR_UDP;
- if (ret == -1) {
- swrap_dump_packet(si, to, SWRAP_SENDTO, buf, len);
- swrap_dump_packet(si, to, SWRAP_SENDTO_UNREACH, buf, len);
- } else {
- swrap_dump_packet(si, to, SWRAP_SENDTO, buf, ret);
- }
+ for(iface=0; iface <= MAX_WRAPPED_INTERFACES; iface++) {
+ snprintf(un_addr.sun_path, sizeof(un_addr.sun_path), "%s/"SOCKET_FORMAT,
+ socket_wrapper_dir(), type, iface, prt);
+ if (stat(un_addr.sun_path, &st) != 0) continue;
- return ret;
-}
+ /* ignore the any errors in broadcast sends */
+ real_sendto(s, buf, len, flags,
+ (struct sockaddr *)(void *)&un_addr,
+ sizeof(un_addr));
+ }
-_PUBLIC_ int swrap_ioctl(int s, int r, void *p)
-{
- int ret;
- struct socket_info *si = find_socket_info(s);
- int value;
+ swrap_dump_packet(si, to, SWRAP_SENDTO, buf, len);
- if (!si) {
- return real_ioctl(s, r, p);
+ return len;
}
- ret = real_ioctl(s, r, p);
+ ret = real_sendto(s, buf, len, flags, msg.msg_name, msg.msg_namelen);
- switch (r) {
- case FIONREAD:
- value = *((int *)p);
- if (ret == -1 && errno != EAGAIN && errno != ENOBUFS) {
- swrap_dump_packet(si, NULL, SWRAP_PENDING_RST, NULL, 0);
- } else if (value == 0) { /* END OF FILE */
- swrap_dump_packet(si, NULL, SWRAP_PENDING_RST, NULL, 0);
- }
- break;
- }
+ swrap_sendmsg_after(si, &msg, to, ret);
return ret;
}
@@ -2075,126 +2255,133 @@ _PUBLIC_ ssize_t swrap_read(int s, void *buf, size_t len)
_PUBLIC_ ssize_t swrap_send(int s, const void *buf, size_t len, int flags)
{
- int ret;
+ struct msghdr msg;
+ struct iovec tmp;
+ struct sockaddr_un un_addr;
+ ssize_t ret;
struct socket_info *si = find_socket_info(s);
if (!si) {
return real_send(s, buf, len, flags);
}
- if (si->type == SOCK_STREAM) {
- /* cut down to 1500 byte packets for stream sockets,
- * which makes it easier to format PCAP capture files
- * (as the caller will simply continue from here) */
- len = MIN(len, 1500);
- }
-
- if (si->defer_connect) {
- struct sockaddr_un un_addr;
- int bcast = 0;
-
- if (si->bound == 0) {
- ret = swrap_auto_bind(si, si->family);
- if (ret == -1) return -1;
- }
-
- ret = sockaddr_convert_to_un(si, si->peername, si->peername_len,
- &un_addr, 0, &bcast);
- if (ret == -1) return -1;
+ tmp.iov_base = discard_const_p(char, buf);
+ tmp.iov_len = len;
- ret = real_connect(s, (struct sockaddr *)&un_addr,
- sizeof(un_addr));
+ ZERO_STRUCT(msg);
+ msg.msg_name = NULL; /* optional address */
+ msg.msg_namelen = 0; /* size of address */
+ msg.msg_iov = &tmp; /* scatter/gather array */
+ msg.msg_iovlen = 1; /* # elements in msg_iov */
+#if 0 /* not available on solaris */
+ msg.msg_control = NULL; /* ancillary data, see below */
+ msg.msg_controllen = 0; /* ancillary data buffer len */
+ msg.msg_flags = 0; /* flags on received message */
+#endif
- /* to give better errors */
- if (ret == -1 && errno == ENOENT) {
- errno = EHOSTUNREACH;
- }
+ ret = swrap_sendmsg_before(si, &msg, &tmp, &un_addr, NULL, NULL, NULL);
+ if (ret == -1) return -1;
- if (ret == -1) {
- return ret;
- }
- si->defer_connect = 0;
- }
+ buf = msg.msg_iov[0].iov_base;
+ len = msg.msg_iov[0].iov_len;
ret = real_send(s, buf, len, flags);
- if (ret == -1) {
- swrap_dump_packet(si, NULL, SWRAP_SEND, buf, len);
- swrap_dump_packet(si, NULL, SWRAP_SEND_RST, NULL, 0);
- } else {
- swrap_dump_packet(si, NULL, SWRAP_SEND, buf, ret);
- }
+ swrap_sendmsg_after(si, &msg, NULL, ret);
return ret;
}
-_PUBLIC_ ssize_t swrap_sendmsg(int s, const struct msghdr *msg, int flags)
+_PUBLIC_ ssize_t swrap_sendmsg(int s, const struct msghdr *omsg, int flags)
{
- int ret;
- uint8_t *buf;
- off_t ofs = 0;
- size_t i;
- size_t remain;
-
+ struct msghdr msg;
+ struct iovec tmp;
+ struct sockaddr_un un_addr;
+ const struct sockaddr_un *to_un = NULL;
+ const struct sockaddr *to = NULL;
+ ssize_t ret;
struct socket_info *si = find_socket_info(s);
+ int bcast = 0;
if (!si) {
- return real_sendmsg(s, msg, flags);
- }
+ return real_sendmsg(s, omsg, flags);
+ }
+
+ tmp.iov_base = NULL;
+ tmp.iov_len = 0;
+
+ msg = *omsg;
+#if 0
+ msg.msg_name = omsg->msg_name; /* optional address */
+ msg.msg_namelen = omsg->msg_namelen; /* size of address */
+ msg.msg_iov = omsg->msg_iov; /* scatter/gather array */
+ msg.msg_iovlen = omsg->msg_iovlen; /* # elements in msg_iov */
+ /* the following is not available on solaris */
+ msg.msg_control = omsg->msg_control; /* ancillary data, see below */
+ msg.msg_controllen = omsg->msg_controllen; /* ancillary data buffer len */
+ msg.msg_flags = omsg->msg_flags; /* flags on received message */
+#endif
- if (si->defer_connect) {
- struct sockaddr_un un_addr;
- int bcast = 0;
+ ret = swrap_sendmsg_before(si, &msg, &tmp, &un_addr, &to_un, &to, &bcast);
+ if (ret == -1) return -1;
- if (si->bound == 0) {
- ret = swrap_auto_bind(si, si->family);
- if (ret == -1) return -1;
+ if (bcast) {
+ struct stat st;
+ unsigned int iface;
+ unsigned int prt = ntohs(((const struct sockaddr_in *)to)->sin_port);
+ char type;
+ size_t i, len = 0;
+ uint8_t *buf;
+ off_t ofs = 0;
+ size_t avail = 0;
+ size_t remain;
+
+ for (i=0; i < msg.msg_iovlen; i++) {
+ avail += msg.msg_iov[i].iov_len;
}
- ret = sockaddr_convert_to_un(si, si->peername, si->peername_len,
- &un_addr, 0, &bcast);
- if (ret == -1) return -1;
+ len = avail;
+ remain = avail;
- ret = real_connect(s, (struct sockaddr *)&un_addr,
- sizeof(un_addr));
+ /* we capture it as one single packet */
+ buf = (uint8_t *)malloc(remain);
+ if (!buf) {
+ return -1;
+ }
- /* to give better errors */
- if (ret == -1 && errno == ENOENT) {
- errno = EHOSTUNREACH;
+ for (i=0; i < msg.msg_iovlen; i++) {
+ size_t this_time = MIN(remain, msg.msg_iov[i].iov_len);
+ memcpy(buf + ofs,
+ msg.msg_iov[i].iov_base,
+ this_time);
+ ofs += this_time;
+ remain -= this_time;
}
- if (ret == -1) {
- return ret;
+ type = SOCKET_TYPE_CHAR_UDP;
+
+ for(iface=0; iface <= MAX_WRAPPED_INTERFACES; iface++) {
+ snprintf(un_addr.sun_path, sizeof(un_addr.sun_path), "%s/"SOCKET_FORMAT,
+ socket_wrapper_dir(), type, iface, prt);
+ if (stat(un_addr.sun_path, &st) != 0) continue;
+
+ msg.msg_name = &un_addr; /* optional address */
+ msg.msg_namelen = sizeof(un_addr); /* size of address */
+
+ /* ignore the any errors in broadcast sends */
+ real_sendmsg(s, &msg, flags);
}
- si->defer_connect = 0;
- }
- ret = real_sendmsg(s, msg, flags);
- remain = ret;
-
- /* we capture it as one single packet */
- buf = (uint8_t *)malloc(ret);
- if (!buf) {
- /* we just not capture the packet */
- errno = 0;
- return ret;
- }
-
- for (i=0; i < msg->msg_iovlen; i++) {
- size_t this_time = MIN(remain, msg->msg_iov[i].iov_len);
- memcpy(buf + ofs,
- msg->msg_iov[i].iov_base,
- this_time);
- ofs += this_time;
- remain -= this_time;
- }
-
- swrap_dump_packet(si, NULL, SWRAP_SEND, buf, ret);
- free(buf);
- if (ret == -1) {
- swrap_dump_packet(si, NULL, SWRAP_SEND_RST, NULL, 0);
+ swrap_dump_packet(si, to, SWRAP_SENDTO, buf, len);
+ free(buf);
+
+ return len;
}
+ ret = real_sendmsg(s, &msg, flags);
+
+ swrap_sendmsg_after(si, &msg, to, ret);
+
return ret;
}
@@ -2208,12 +2395,17 @@ int swrap_readv(int s, const struct iovec *vector, size_t count)
return real_readv(s, vector, count);
}
+ if (!si->connected) {
+ errno = ENOTCONN;
+ return -1;
+ }
+
if (si->type == SOCK_STREAM && count > 0) {
/* cut down to 1500 byte packets for stream sockets,
* which makes it easier to format PCAP capture files
* (as the caller will simply continue from here) */
size_t i, len = 0;
-
+
for (i=0; i < count; i++) {
size_t nlen;
nlen = len + vector[i].iov_len;
@@ -2267,65 +2459,36 @@ int swrap_readv(int s, const struct iovec *vector, size_t count)
int swrap_writev(int s, const struct iovec *vector, size_t count)
{
- int ret;
+ struct msghdr msg;
+ struct iovec tmp;
+ struct sockaddr_un un_addr;
+ ssize_t ret;
struct socket_info *si = find_socket_info(s);
- struct iovec v;
if (!si) {
return real_writev(s, vector, count);
}
- if (si->type == SOCK_STREAM && count > 0) {
- /* cut down to 1500 byte packets for stream sockets,
- * which makes it easier to format PCAP capture files
- * (as the caller will simply continue from here) */
- size_t i, len = 0;
+ tmp.iov_base = NULL;
+ tmp.iov_len = 0;
- for (i=0; i < count; i++) {
- size_t nlen;
- nlen = len + vector[i].iov_len;
- if (nlen > 1500) {
- break;
- }
- }
- count = i;
- if (count == 0) {
- v = vector[0];
- v.iov_len = MIN(v.iov_len, 1500);
- vector = &v;
- count = 1;
- }
- }
-
- ret = real_writev(s, vector, count);
- if (ret == -1) {
- swrap_dump_packet(si, NULL, SWRAP_SEND_RST, NULL, 0);
- } else {
- uint8_t *buf;
- off_t ofs = 0;
- size_t i;
- size_t remain = ret;
+ ZERO_STRUCT(msg);
+ msg.msg_name = NULL; /* optional address */
+ msg.msg_namelen = 0; /* size of address */
+ msg.msg_iov = discard_const_p(struct iovec, vector); /* scatter/gather array */
+ msg.msg_iovlen = count; /* # elements in msg_iov */
+#if 0 /* not available on solaris */
+ msg.msg_control = NULL; /* ancillary data, see below */
+ msg.msg_controllen = 0; /* ancillary data buffer len */
+ msg.msg_flags = 0; /* flags on received message */
+#endif
- /* we capture it as one single packet */
- buf = (uint8_t *)malloc(ret);
- if (!buf) {
- /* we just not capture the packet */
- errno = 0;
- return ret;
- }
+ ret = swrap_sendmsg_before(si, &msg, &tmp, &un_addr, NULL, NULL, NULL);
+ if (ret == -1) return -1;
- for (i=0; i < count; i++) {
- size_t this_time = MIN(remain, vector[i].iov_len);
- memcpy(buf + ofs,
- vector[i].iov_base,
- this_time);
- ofs += this_time;
- remain -= this_time;
- }
+ ret = real_writev(s, msg.msg_iov, msg.msg_iovlen);
- swrap_dump_packet(si, NULL, SWRAP_SEND, buf, ret);
- free(buf);
- }
+ swrap_sendmsg_after(si, &msg, NULL, ret);
return ret;
}
diff --git a/lib/socket_wrapper/socket_wrapper.h b/lib/socket_wrapper/socket_wrapper.h
index 472aa19ced..77af6feadd 100644
--- a/lib/socket_wrapper/socket_wrapper.h
+++ b/lib/socket_wrapper/socket_wrapper.h
@@ -50,6 +50,7 @@ int swrap_setsockopt(int s, int level, int optname, const void *optval, so
ssize_t swrap_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlen);
ssize_t swrap_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen);
ssize_t swrap_sendmsg(int s, const struct msghdr *msg, int flags);
+ssize_t swrap_recvmsg(int s, struct msghdr *msg, int flags);
int swrap_ioctl(int s, int req, void *ptr);
ssize_t swrap_recv(int s, void *buf, size_t len, int flags);
ssize_t swrap_read(int s, void *buf, size_t len);
@@ -115,6 +116,11 @@ int swrap_close(int);
#endif
#define sendmsg(s,msg,flags) swrap_sendmsg(s,msg,flags)
+#ifdef recvmsg
+#undef recvmsg
+#endif
+#define recvmsg(s,msg,flags) swrap_recvmsg(s,msg,flags)
+
#ifdef ioctl
#undef ioctl
#endif
diff --git a/lib/socket_wrapper/testsuite.c b/lib/socket_wrapper/testsuite.c
index e6e08e3be6..9274e7f485 100644
--- a/lib/socket_wrapper/testsuite.c
+++ b/lib/socket_wrapper/testsuite.c
@@ -95,7 +95,7 @@ static bool test_socket_wrapper_default_iface(struct torture_context *tctx)
struct torture_suite *torture_local_socket_wrapper(TALLOC_CTX *mem_ctx)
{
struct torture_suite *suite = torture_suite_create(mem_ctx,
- "SOCKET-WRAPPER");
+ "socket-wrapper");
torture_suite_add_simple_test(suite, "socket_wrapper_dir", test_socket_wrapper_dir);
torture_suite_add_simple_test(suite, "socket", test_swrap_socket);
diff --git a/lib/socket_wrapper/wscript b/lib/socket_wrapper/wscript
new file mode 100644
index 0000000000..9da578fb63
--- /dev/null
+++ b/lib/socket_wrapper/wscript
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+
+import Options
+
+def set_options(opt):
+ gr = opt.option_group('developer options')
+ gr.add_option('--enable-socket-wrapper',
+ help=("Turn on socket wrapper library (default=no)"),
+ action="store_true", dest='enable_socket_wrapper', default=False)
+
+def configure(conf):
+ if (Options.options.enable_socket_wrapper or Options.options.developer or Options.options.enable_selftest):
+ conf.DEFINE('SOCKET_WRAPPER', 1)
+ conf.ADD_GLOBAL_DEPENDENCY('socket_wrapper')
+
diff --git a/lib/socket_wrapper/wscript_build b/lib/socket_wrapper/wscript_build
new file mode 100644
index 0000000000..a81c7aa61a
--- /dev/null
+++ b/lib/socket_wrapper/wscript_build
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+bld.SAMBA_LIBRARY('socket_wrapper',
+ source='socket_wrapper.c',
+ group='base_libraries',
+ private_library=True,
+ enabled=bld.CONFIG_SET('SOCKET_WRAPPER'),
+ deps='replace')
+
diff --git a/lib/subunit/Apache-2.0 b/lib/subunit/Apache-2.0
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/lib/subunit/Apache-2.0
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/lib/subunit/BSD b/lib/subunit/BSD
new file mode 100644
index 0000000000..fa130cd529
--- /dev/null
+++ b/lib/subunit/BSD
@@ -0,0 +1,26 @@
+Copyright (c) Robert Collins and Subunit contributors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Robert Collins nor the names of Subunit contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ROBERT COLLINS AND SUBUNIT CONTRIBUTORS ``AS IS''
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
diff --git a/lib/subunit/COPYING b/lib/subunit/COPYING
new file mode 100644
index 0000000000..3ba50f8e08
--- /dev/null
+++ b/lib/subunit/COPYING
@@ -0,0 +1,36 @@
+Subunit is licensed under two licenses, the Apache License, Version 2.0 or the
+3-clause BSD License. You may use this project under either of these licenses
+- choose the one that works best for you.
+
+We require contributions to be licensed under both licenses. The primary
+difference between them is that the Apache license takes care of potential
+issues with Patents and other intellectual property concerns. This is
+important to Subunit as Subunit wants to be license compatible in a very
+broad manner to allow reuse and incorporation into other projects.
+
+Generally every source file in Subunit needs a license grant under both these
+licenses. As the code is shipped as a single unit, a brief form is used:
+----
+Copyright (c) [yyyy][,yyyy]* [name or 'Subunit Contributors']
+
+Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+license at the users choice. A copy of both licenses are available in the
+project source as Apache-2.0 and BSD. You may not use this file except in
+compliance with one of these two licences.
+
+Unless required by applicable law or agreed to in writing, software
+distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+license you chose for the specific language governing permissions and
+limitations under that license.
+----
+
+Code that has been incorporated into Subunit from other projects will
+naturally be under its own license, and will retain that license.
+
+A known list of such code is maintained here:
+* The python/iso8601 module by Michael Twomey, distributed under an MIT style
+ licence - see python/iso8601/LICENSE for details.
+* The runtests.py and python/subunit/tests/TestUtil.py module are GPL test
+ support modules. There are not installed by Subunit - they are only ever
+ used on the build machine. Copyright 2004 Canonical Limited.
diff --git a/lib/subunit/INSTALL b/lib/subunit/INSTALL
new file mode 100644
index 0000000000..06122552ed
--- /dev/null
+++ b/lib/subunit/INSTALL
@@ -0,0 +1,32 @@
+To install subunit
+------------------
+
+Bootstrap::
+ autoreconf -vi
+Configure::
+ ./configure
+Install::
+ make install
+
+Dependencies
+------------
+
+* Python for the filters
+* 'testtools' (On Debian and Ubuntu systems the 'python-testtools' package,
+ the testtools package on pypi, or https://launchpad.net/testtools) for
+ the extended test API which permits attachments. Version 0.9.8 or newer is
+ required. Of particular note, http://testtools.python-hosting.com/ is not
+ the testtools you want.
+* A C compiler for the C bindings
+* Perl for the Perl tools (including subunit-diff)
+* Check to run the subunit test suite.
+* python-gtk2 if you wish to use subunit2gtk
+* python-junitxml if you wish to use subunit2junitxml
+* pkg-config for configure detection of supporting libraries.
+
+Binary packages
+---------------
+
+A number of distributions now include subunit, you can try via your package
+manager. The authors maintain a personal package archive on Launchpad::
+ https://launchpad.net/~testing-cabal/+archive/archive
diff --git a/lib/subunit/MANIFEST.in b/lib/subunit/MANIFEST.in
new file mode 100644
index 0000000000..7c449cf7e7
--- /dev/null
+++ b/lib/subunit/MANIFEST.in
@@ -0,0 +1,21 @@
+exclude .bzrignore
+exclude aclocal.m4
+prune autom4te.cache
+prune c
+prune c++
+prune compile
+exclude configure*
+exclude depcomp
+exclude INSTALL
+exclude install-sh
+exclude lib*
+exclude ltmain.sh
+prune m4
+exclude Makefile*
+exclude missing
+prune perl
+exclude py-compile
+prune shell
+prune python/iso8601
+exclude stamp-h1
+include NEWS
diff --git a/lib/subunit/Makefile.am b/lib/subunit/Makefile.am
new file mode 100644
index 0000000000..716fa0fe21
--- /dev/null
+++ b/lib/subunit/Makefile.am
@@ -0,0 +1,136 @@
+EXTRA_DIST = \
+ .bzrignore \
+ Apache-2.0 \
+ BSD \
+ INSTALL \
+ Makefile.am \
+ NEWS \
+ README \
+ c++/README \
+ c/README \
+ c/check-subunit-0.9.3.patch \
+ c/check-subunit-0.9.5.patch \
+ c/check-subunit-0.9.6.patch \
+ perl/Makefile.PL.in \
+ perl/lib/Subunit.pm \
+ perl/lib/Subunit/Diff.pm \
+ perl/subunit-diff \
+ python/iso8601/LICENSE \
+ python/iso8601/README \
+ python/iso8601/README.subunit \
+ python/iso8601/setup.py \
+ python/iso8601/test_iso8601.py \
+ python/subunit/tests/TestUtil.py \
+ python/subunit/tests/__init__.py \
+ python/subunit/tests/sample-script.py \
+ python/subunit/tests/sample-two-script.py \
+ python/subunit/tests/test_chunked.py \
+ python/subunit/tests/test_details.py \
+ python/subunit/tests/test_progress_model.py \
+ python/subunit/tests/test_subunit_filter.py \
+ python/subunit/tests/test_subunit_stats.py \
+ python/subunit/tests/test_subunit_tags.py \
+ python/subunit/tests/test_tap2subunit.py \
+ python/subunit/tests/test_test_protocol.py \
+ python/subunit/tests/test_test_results.py \
+ runtests.py \
+ shell/README \
+ shell/share/subunit.sh \
+ shell/subunit-ui.patch \
+ shell/tests/test_function_output.sh \
+ shell/tests/test_source_library.sh
+
+ACLOCAL_AMFLAGS = -I m4
+
+include_subunitdir = $(includedir)/subunit
+
+dist_bin_SCRIPTS = \
+ filters/subunit-filter \
+ filters/subunit-ls \
+ filters/subunit-stats \
+ filters/subunit-tags \
+ filters/subunit2gtk \
+ filters/subunit2junitxml \
+ filters/subunit2pyunit \
+ filters/tap2subunit
+
+TESTS_ENVIRONMENT = SHELL_SHARE='$(top_srcdir)/shell/share/' PYTHONPATH='$(abs_top_srcdir)/python':${PYTHONPATH}
+TESTS = runtests.py $(check_PROGRAMS)
+
+## install libsubunit.pc
+pcdatadir = $(libdir)/pkgconfig
+pcdata_DATA = \
+ libsubunit.pc \
+ libcppunit_subunit.pc
+
+pkgpython_PYTHON = \
+ python/subunit/__init__.py \
+ python/subunit/chunked.py \
+ python/subunit/details.py \
+ python/subunit/iso8601.py \
+ python/subunit/progress_model.py \
+ python/subunit/run.py \
+ python/subunit/test_results.py
+
+lib_LTLIBRARIES = libsubunit.la
+lib_LTLIBRARIES += libcppunit_subunit.la
+
+include_subunit_HEADERS = \
+ c/include/subunit/child.h \
+ c++/SubunitTestProgressListener.h
+
+check_PROGRAMS = \
+ c/tests/test_child
+
+check_SCRIPTS = \
+ runtests.py
+
+libsubunit_la_SOURCES = \
+ c/lib/child.c \
+ c/include/subunit/child.h
+
+libcppunit_subunit_la_SOURCES = \
+ c++/SubunitTestProgressListener.cpp \
+ c++/SubunitTestProgressListener.h
+
+tests_LDADD = @CHECK_LIBS@ $(top_builddir)/libsubunit.la
+c_tests_test_child_CFLAGS = -I$(top_srcdir)/c/include $(SUBUNIT_CFLAGS) @CHECK_CFLAGS@
+c_tests_test_child_LDADD = $(tests_LDADD)
+
+
+all-local: perl/Makefile
+ $(MAKE) -C perl all
+
+check-local: perl/Makefile
+ $(MAKE) -C perl check
+
+clean-local:
+ find . -type f -name "*.pyc" -exec rm {} ';'
+ rm -f perl/Makefile
+
+# Remove perl dir for VPATH builds.
+distclean-local:
+ -rmdir perl > /dev/null
+ -rm perl/Makefile.PL > /dev/null
+
+install-exec-local: perl/Makefile
+ $(MAKE) -C perl install
+
+mostlyclean-local:
+ rm -rf perl/blib
+ rm -rf perl/pm_to_blib
+
+# 'uninstall' perl files during distcheck
+uninstall-local:
+ if [ "_inst" = `basename ${prefix}` ]; then \
+ $(MAKE) -C perl uninstall_distcheck; \
+ rm -f "$(DESTDIR)$(bindir)"/subunit-diff; \
+ fi
+
+# The default for MakeMaker; can be overridden by exporting
+INSTALLDIRS ?= site
+
+perl/Makefile: perl/Makefile.PL
+ mkdir -p perl
+ cd perl && perl Makefile.PL INSTALLDIRS=${INSTALLDIRS}
+ -rm perl/Makefile.old > /dev/null
diff --git a/lib/subunit/NEWS b/lib/subunit/NEWS
new file mode 100644
index 0000000000..f1fd9ce06f
--- /dev/null
+++ b/lib/subunit/NEWS
@@ -0,0 +1,224 @@
+---------------------
+subunit release notes
+---------------------
+
+NEXT (In development)
+---------------------
+
+The Subunit Python test runner ``python -m subunit.run`` can now report the
+test ids and also filter via a test id list file thanks to improvements in
+``testtools.run``. See the testtools manual, or testrepository - a major
+user of such functionality.
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* The ``subunit.run`` Python module supports ``-l`` and ``--load-list`` as
+ per ``testtools.run``. This required a dependency bump due to a small
+ API change in ``testtools``. (Robert Collins)
+
+
+0.0.6
+-----
+
+This release of subunit fixes a number of unicode related bugs. This depends on
+testtools 0.9.4 and will not function without it. Thanks to Tres Seaver there
+is also an optional native setup.py file for use with easy_install and the
+like.
+
+BUG FIXES
+~~~~~~~~~
+
+* Be consistent about delivering unicode content to testtools StringException
+ class which has become (appropriately) conservative. (Robert Collins)
+
+* Fix incorrect reference to subunit_test_failf in c/README.
+ (Brad Hards, #524341)
+
+* Fix incorrect ordering of tags method parameters in TestResultDecorator. This
+ is purely cosmetic as the parameters are passed down with no interpretation.
+ (Robert Collins, #537611)
+
+* Old style tracebacks with no encoding info are now treated as UTF8 rather
+ than some-random-codec-like-ascii. (Robert Collins)
+
+* On windows, ProtocolTestCase and TestProtocolClient will set their streams to
+ binary mode by calling into msvcrt; this avoids having their input or output
+ mangled by the default line ending translation on that platform.
+ (Robert Collins, Martin [gz], #579296)
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Subunit now has a setup.py for python deployments that are not using
+ distribution packages. (Tres Seaver, #538181)
+
+* Subunit now supports test discovery by building on the testtools support for
+ it. You can take advantage of it with "python -m subunit.run discover [path]"
+ and see "python -m subunit.run discover --help" for more options.
+
+* Subunit now uses the improved unicode support in testtools when outputting
+ non-details based test information; this should consistently UTF8 encode such
+ strings.
+
+* The Python TestProtocolClient now flushes output on startTest and stopTest.
+ (Martin [gz]).
+
+
+0.0.5
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* make check was failing if subunit wasn't installed due to a missing include
+ path for the test program test_child.
+
+* make distcheck was failing due to a missing $(top_srcdir) rune.
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* New filter `subunit-notify` that will show a notification window with test
+ statistics when the test run finishes.
+
+* subunit.run will now pipe its output to the command in the
+ SUBUNIT_FORMATTER environment variable, if set.
+
+0.0.4
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* subunit2junitxml -f required a value, this is now fixed and -f acts as a
+ boolean switch with no parameter.
+
+* Building with autoconf 2.65 is now supported.
+
+
+0.0.3
+-----
+
+ CHANGES:
+
+ * License change, by unanimous agreement of contributors to BSD/Apache
+ License Version 2.0. This makes Subunit compatible with more testing
+ frameworks.
+
+ IMPROVEMENTS:
+
+ * CPPUnit is now directly supported: subunit builds a cppunit listener
+ ``libcppunit-subunit``.
+
+ * In the python API ``addExpectedFailure`` and ``addUnexpectedSuccess``
+ from python 2.7/3.1 are now supported. ``addExpectedFailure`` is
+ serialised as ``xfail``, and ``addUnexpectedSuccess`` as ``success``.
+ The ``ProtocolTestCase`` parser now calls outcomes using an extended
+ API that permits attaching arbitrary MIME resources such as text files
+ log entries and so on. This extended API is being developed with the
+ Python testing community, and is in flux. ``TestResult`` objects that
+ do not support the API will be detected and transparently downgraded
+ back to the regular Python unittest API.
+
+ * INSTALLDIRS can be set to control the perl MakeMaker 'INSTALLDIRS'
+ viarable when installing.
+
+ * Multipart test outcomes are tentatively supported; the exact protocol
+ for them, both serialiser and object is not yet finalised. Testers and
+ early adopters are sought. As part of this and also in an attempt to
+ provider a more precise focus on the wire protocol and toolchain,
+ Subunit now depends on testtools (http://launchpad.net/testtools)
+ release 0.9.0 or newer.
+
+ * subunit2junitxml supports a new option, --forward which causes it
+ to forward the raw subunit stream in a similar manner to tee. This
+ is used with the -o option to both write a xml report and get some
+ other subunit filter to process the stream.
+
+ * The C library now has ``subunit_test_skip``.
+
+ BUG FIXES:
+
+ * Install progress_model.py correctly.
+
+ * Non-gcc builds will no longer try to use gcc specific flags.
+ (Thanks trondn-norbye)
+
+ API CHANGES:
+
+ INTERNALS:
+
+0.0.2
+-----
+
+ CHANGES:
+
+ IMPROVEMENTS:
+
+ * A number of filters now support ``--no-passthrough`` to cause all
+ non-subunit content to be discarded. This is useful when precise control
+ over what is output is required - such as with subunit2junitxml.
+
+ * A small perl parser is now included, and a new ``subunit-diff`` tool
+ using that is included. (Jelmer Vernooij)
+
+ * Subunit streams can now include optional, incremental lookahead
+ information about progress. This allows reporters to make estimates
+ about completion, when such information is available. See the README
+ under ``progress`` for more details.
+
+ * ``subunit-filter`` now supports regex filtering via ``--with`` and
+ ``without`` options. (Martin Pool)
+
+ * ``subunit2gtk`` has been added, a filter that shows a GTK summary of a
+ test stream.
+
+ * ``subunit2pyunit`` has a --progress flag which will cause the bzrlib
+ test reporter to be used, which has a textual progress bar. This requires
+ a recent bzrlib as a minor bugfix was required in bzrlib to support this.
+
+ * ``subunit2junitxml`` has been added. This filter converts a subunit
+ stream to a single JUnit style XML stream using the pyjunitxml
+ python library.
+
+ * The shell functions support skipping via ``subunit_skip_test`` now.
+
+ BUG FIXES:
+
+ * ``xfail`` outcomes are now passed to python TestResult's via
+ addExpectedFailure if it is present on the TestResult. Python 2.6 and
+ earlier which do not have this function will have ``xfail`` outcomes
+ passed through as success outcomes as earlier versions of subunit did.
+
+ API CHANGES:
+
+ * tags are no longer passed around in python via the ``TestCase.tags``
+ attribute. Instead ``TestResult.tags(new_tags, gone_tags)`` is called,
+ and like in the protocol, if called while a test is active only applies
+ to that test. (Robert Collins)
+
+ * ``TestResultFilter`` takes a new optional constructor parameter
+ ``filter_predicate``. (Martin Pool)
+
+ * When a progress: directive is encountered in a subunit stream, the
+ python bindings now call the ``progress(offset, whence)`` method on
+ ``TestResult``.
+
+ * When a time: directive is encountered in a subunit stream, the python
+ bindings now call the ``time(seconds)`` method on ``TestResult``.
+
+ INTERNALS:
+
+ * (python) Added ``subunit.test_results.AutoTimingTestResultDecorator``. Most
+ users of subunit will want to wrap their ``TestProtocolClient`` objects
+ in this decorator to get test timing data for performance analysis.
+
+ * (python) ExecTestCase supports passing arguments to test scripts.
+
+ * (python) New helper ``subunit.test_results.HookedTestResultDecorator``
+ which can be used to call some code on every event, without having to
+ implement all the event methods.
+
+ * (python) ``TestProtocolClient.time(a_datetime)`` has been added which
+ causes a timestamp to be output to the stream.
diff --git a/lib/subunit/README b/lib/subunit/README
new file mode 100644
index 0000000000..6ac258485f
--- /dev/null
+++ b/lib/subunit/README
@@ -0,0 +1,212 @@
+
+ subunit: A streaming protocol for test results
+ Copyright (C) 2005-2009 Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
+
+ See the COPYING file for full details on the licensing of Subunit.
+
+ subunit reuses iso8601 by Michael Twomey, distributed under an MIT style
+ licence - see python/iso8601/LICENSE for details.
+
+Subunit
+-------
+
+Subunit is a streaming protocol for test results. The protocol is human
+readable and easily generated and parsed. By design all the components of
+the protocol conceptually fit into the xUnit TestCase->TestResult interaction.
+
+Subunit comes with command line filters to process a subunit stream and
+language bindings for python, C, C++ and shell. Bindings are easy to write
+for other languages.
+
+A number of useful things can be done easily with subunit:
+ * Test aggregation: Tests run separately can be combined and then
+ reported/displayed together. For instance, tests from different languages
+ can be shown as a seamless whole.
+ * Test archiving: A test run may be recorded and replayed later.
+ * Test isolation: Tests that may crash or otherwise interact badly with each
+ other can be run seperately and then aggregated, rather than interfering
+ with each other.
+ * Grid testing: subunit can act as the necessary serialisation and
+ deserialiation to get test runs on distributed machines to be reported in
+ real time.
+
+Subunit supplies the following filters:
+ * tap2subunit - convert perl's TestAnythingProtocol to subunit.
+ * subunit2pyunit - convert a subunit stream to pyunit test results.
+ * subunit2gtk - show a subunit stream in GTK.
+ * subunit2junitxml - convert a subunit stream to JUnit's XML format.
+ * subunit-diff - compare two subunit streams.
+ * subunit-filter - filter out tests from a subunit stream.
+ * subunit-ls - list info about tests present in a subunit stream.
+ * subunit-stats - generate a summary of a subunit stream.
+ * subunit-tags - add or remove tags from a stream.
+
+Integration with other tools
+----------------------------
+
+Subunit's language bindings act as integration with various test runners like
+'check', 'cppunit', Python's 'unittest'. Beyond that a small amount of glue
+(typically a few lines) will allow Subunit to be used in more sophisticated
+ways.
+
+Python
+======
+
+Subunit has excellent Python support: most of the filters and tools are written
+in python and there are facilities for using Subunit to increase test isolation
+seamlessly within a test suite.
+
+One simple way to run an existing python test suite and have it output subunit
+is the module ``subunit.run``::
+
+ $ python -m subunit.run mypackage.tests.test_suite
+
+For more information on the Python support Subunit offers , please see
+``pydoc subunit``, or the source in ``python/subunit/__init__.py``
+
+C
+=
+
+Subunit has C bindings to emit the protocol, and comes with a patch for 'check'
+which has been nominally accepted by the 'check' developers. See 'c/README' for
+more details.
+
+C++
+===
+
+The C library is includable and usable directly from C++. A TestListener for
+CPPUnit is included in the Subunit distribution. See 'c++/README' for details.
+
+shell
+=====
+
+Similar to C, the shell bindings consist of simple functions to output protocol
+elements, and a patch for adding subunit output to the 'ShUnit' shell test
+runner. See 'shell/README' for details.
+
+Filter recipes
+--------------
+
+To ignore some failing tests whose root cause is already known::
+
+ subunit-filter --without 'AttributeError.*flavor'
+
+
+The protocol
+------------
+
+Sample subunit wire contents
+----------------------------
+
+The following::
+ test: test foo works
+ success: test foo works.
+ test: tar a file.
+ failure: tar a file. [
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+ ]
+ a writeln to stdout
+
+When run through subunit2pyunit::
+ .F
+ a writeln to stdout
+
+ ========================
+ FAILURE: tar a file.
+ -------------------
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+
+
+Subunit protocol description
+============================
+
+This description is being ported to an EBNF style. Currently its only partly in
+that style, but should be fairly clear all the same. When in doubt, refer the
+source (and ideally help fix up the description!). Generally the protocol is
+line orientated and consists of either directives and their parameters, or
+when outside a DETAILS region unexpected lines which are not interpreted by
+the parser - they should be forwarded unaltered.
+
+test|testing|test:|testing: test label
+success|success:|successful|successful: test label
+success|success:|successful|successful: test label DETAILS
+failure: test label
+failure: test label DETAILS
+error: test label
+error: test label DETAILS
+skip[:] test label
+skip[:] test label DETAILS
+xfail[:] test label
+xfail[:] test label DETAILS
+progress: [+|-]X
+progress: push
+progress: pop
+tags: [-]TAG ...
+time: YYYY-MM-DD HH:MM:SSZ
+
+DETAILS ::= BRACKETED | MULTIPART
+BRACKETED ::= '[' CR UTF8-lines ']' CR
+MULTIPART ::= '[ multipart' CR PART* ']' CR
+PART ::= PART_TYPE CR NAME CR PART_BYTES CR
+PART_TYPE ::= Content-Type: type/sub-type(;parameter=value,parameter=value)
+PART_BYTES ::= (DIGITS CR LF BYTE{DIGITS})* '0' CR LF
+
+unexpected output on stdout -> stdout.
+exit w/0 or last test completing -> error
+
+Tags given outside a test are applied to all following tests
+Tags given after a test: line and before the result line for the same test
+apply only to that test, and inherit the current global tags.
+A '-' before a tag is used to remove tags - e.g. to prevent a global tag
+applying to a single test, or to cancel a global tag.
+
+The progress directive is used to provide progress information about a stream
+so that stream consumer can provide completion estimates, progress bars and so
+on. Stream generators that know how many tests will be present in the stream
+should output "progress: COUNT". Stream filters that add tests should output
+"progress: +COUNT", and those that remove tests should output
+"progress: -COUNT". An absolute count should reset the progress indicators in
+use - it indicates that two separate streams from different generators have
+been trivially concatenated together, and there is no knowledge of how many
+more complete streams are incoming. Smart concatenation could scan each stream
+for their count and sum them, or alternatively translate absolute counts into
+relative counts inline. It is recommended that outputters avoid absolute counts
+unless necessary. The push and pop directives are used to provide local regions
+for progress reporting. This fits with hierarchically operating test
+environments - such as those that organise tests into suites - the top-most
+runner can report on the number of suites, and each suite surround its output
+with a (push, pop) pair. Interpreters should interpret a pop as also advancing
+the progress of the restored level by one step. Encountering progress
+directives between the start and end of a test pair indicates that a previous
+test was interrupted and did not cleanly terminate: it should be implicitly
+closed with an error (the same as when a stream ends with no closing test
+directive for the most recently started test).
+
+The time directive acts as a clock event - it sets the time for all future
+events. The value should be a valid ISO8601 time.
+
+The skip result is used to indicate a test that was found by the runner but not
+fully executed due to some policy or dependency issue. This is represented in
+python using the addSkip interface that testtools
+(https://edge.launchpad.net/testtools) defines. When communicating with a non
+skip aware test result, the test is reported as an error.
+The xfail result is used to indicate a test that was expected to fail failing
+in the expected manner. As this is a normal condition for such tests it is
+represented as a successful test in Python.
+In future, skip and xfail results will be represented semantically in Python,
+but some discussion is underway on the right way to do this.
diff --git a/lib/subunit/c++/README b/lib/subunit/c++/README
new file mode 100644
index 0000000000..7b8184400e
--- /dev/null
+++ b/lib/subunit/c++/README
@@ -0,0 +1,50 @@
+#
+# subunit C++ bindings.
+# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+Currently there are no native C++ bindings for subunit. However the C library
+can be used from C++ safely. A CPPUnit listener is built as part of Subunit to
+allow CPPUnit users to simply get Subunit output.
+
+To use the listener, use pkg-config (or your preferred replacement) to get the
+cflags and link settings from libcppunit_subunit.pc.
+
+In your test driver main, use SubunitTestProgressListener, as shown in this
+example main::
+
+ {
+ // Create the event manager and test controller
+ CPPUNIT_NS::TestResult controller;
+
+ // Add a listener that collects test result
+ // so we can get the overall status.
+ // note this isn't needed for subunit...
+ CPPUNIT_NS::TestResultCollector result;
+ controller.addListener( &result );
+
+ // Add a listener that print test activity in subunit format.
+ CPPUNIT_NS::SubunitTestProgressListener progress;
+ controller.addListener( &progress );
+
+ // Add the top suite to the test runner
+ CPPUNIT_NS::TestRunner runner;
+ runner.addTest( CPPUNIT_NS::TestFactoryRegistry::getRegistry().makeTest() );
+ runner.run( controller );
+
+ return result.wasSuccessful() ? 0 : 1;
+ }
diff --git a/lib/subunit/c++/SubunitTestProgressListener.cpp b/lib/subunit/c++/SubunitTestProgressListener.cpp
new file mode 100644
index 0000000000..76cd9e1194
--- /dev/null
+++ b/lib/subunit/c++/SubunitTestProgressListener.cpp
@@ -0,0 +1,63 @@
+/* Subunit test listener for cppunit (http://cppunit.sourceforge.net).
+ * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+ *
+ * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ * license at the users choice. A copy of both licenses are available in the
+ * project source as Apache-2.0 and BSD. You may not use this file except in
+ * compliance with one of these two licences.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under these licenses is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the license you chose for the specific language governing permissions
+ * and limitations under that license.
+ */
+
+#include <cppunit/Exception.h>
+#include <cppunit/Test.h>
+#include <cppunit/TestFailure.h>
+#include <cppunit/TextOutputter.h>
+#include <iostream>
+
+// Have to be able to import the public interface without config.h.
+#include "SubunitTestProgressListener.h"
+#include "config.h"
+#include "subunit/child.h"
+
+
+CPPUNIT_NS_BEGIN
+
+
+void
+SubunitTestProgressListener::startTest( Test *test )
+{
+ subunit_test_start(test->getName().c_str());
+ last_test_failed = false;
+}
+
+void
+SubunitTestProgressListener::addFailure( const TestFailure &failure )
+{
+ std::ostringstream capture_stream;
+ TextOutputter outputter(NULL, capture_stream);
+ outputter.printFailureLocation(failure.sourceLine());
+ outputter.printFailureDetail(failure.thrownException());
+
+ if (failure.isError())
+ subunit_test_error(failure.failedTestName().c_str(),
+ capture_stream.str().c_str());
+ else
+ subunit_test_fail(failure.failedTestName().c_str(),
+ capture_stream.str().c_str());
+ last_test_failed = true;
+}
+
+void
+SubunitTestProgressListener::endTest( Test *test)
+{
+ if (!last_test_failed)
+ subunit_test_pass(test->getName().c_str());
+}
+
+
+CPPUNIT_NS_END
diff --git a/lib/subunit/c++/SubunitTestProgressListener.h b/lib/subunit/c++/SubunitTestProgressListener.h
new file mode 100644
index 0000000000..5206d833c7
--- /dev/null
+++ b/lib/subunit/c++/SubunitTestProgressListener.h
@@ -0,0 +1,56 @@
+/* Subunit test listener for cppunit (http://cppunit.sourceforge.net).
+ * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+ *
+ * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ * license at the users choice. A copy of both licenses are available in the
+ * project source as Apache-2.0 and BSD. You may not use this file except in
+ * compliance with one of these two licences.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under these licenses is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the license you chose for the specific language governing permissions
+ * and limitations under that license.
+ */
+#ifndef CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
+#define CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
+
+#include <cppunit/TestListener.h>
+
+
+CPPUNIT_NS_BEGIN
+
+
+/*!
+ * \brief TestListener that outputs subunit
+ * (http://www.robertcollins.net/unittest/subunit) compatible output.
+ * \ingroup TrackingTestExecution
+ */
+class CPPUNIT_API SubunitTestProgressListener : public TestListener
+{
+public:
+
+ SubunitTestProgressListener() {}
+
+ void startTest( Test *test );
+
+ void addFailure( const TestFailure &failure );
+
+ void endTest( Test *test );
+
+private:
+ /// Prevents the use of the copy constructor.
+ SubunitTestProgressListener( const SubunitTestProgressListener &copy );
+
+ /// Prevents the use of the copy operator.
+ void operator =( const SubunitTestProgressListener &copy );
+
+private:
+ int last_test_failed;
+};
+
+
+CPPUNIT_NS_END
+
+#endif // CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
+
diff --git a/lib/subunit/c/README b/lib/subunit/c/README
new file mode 100644
index 0000000000..b62fd45395
--- /dev/null
+++ b/lib/subunit/c/README
@@ -0,0 +1,68 @@
+#
+# subunit C bindings.
+# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+This subtree contains an implementation of the subunit child protocol.
+Currently I have no plans to write a test runner in C, so I have not written
+an implementation of the parent protocol. [but will happily accept patches].
+This implementation is built using SCons and tested via 'check'.
+See the tests/ directory for the test programs.
+You can use `make check` or `scons check` to run the tests.
+
+The C protocol consists of four functions which you can use to output test
+metadata trivially. See lib/subunit_child.[ch] for details.
+
+However, this is not a test runner - subunit provides no support for [for
+instance] managing assertions, cleaning up on errors etc. You can look at
+'check' (http://check.sourceforge.net/) or
+'gunit' (https://garage.maemo.org/projects/gunit) for C unit test
+frameworks.
+There is a patch for 'check' (check-subunit-*.patch) in this source tree.
+Its also available as request ID #1470750 in the sourceforge request tracker
+http://sourceforge.net/tracker/index.php. The 'check' developers have indicated
+they will merge this during the current release cycle.
+
+If you are a test environment maintainer - either homegrown, or 'check' or
+'gunit' or some other, you will to know how the subunit calls should be used.
+Here is what a manually written test using the bindings might look like:
+
+
+void
+a_test(void) {
+ int result;
+ subunit_test_start("test name");
+ # determine if test passes or fails
+ result = SOME_VALUE;
+ if (!result) {
+ subunit_test_pass("test name");
+ } else {
+ subunit_test_fail("test name",
+ "Something went wrong running something:\n"
+ "exited with result: '%s'", result);
+ }
+}
+
+Which when run with a subunit test runner will generate something like:
+test name ... ok
+
+on success, and:
+
+test name ... FAIL
+
+======================================================================
+FAIL: test name
+----------------------------------------------------------------------
+RemoteError:
+Something went wrong running something:
+exited with result: '1'
diff --git a/lib/subunit/c/include/subunit/child.h b/lib/subunit/c/include/subunit/child.h
new file mode 100644
index 0000000000..896d2dfad0
--- /dev/null
+++ b/lib/subunit/c/include/subunit/child.h
@@ -0,0 +1,96 @@
+/**
+ *
+ * subunit C bindings.
+ * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+ *
+ * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ * license at the users choice. A copy of both licenses are available in the
+ * project source as Apache-2.0 and BSD. You may not use this file except in
+ * compliance with one of these two licences.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under these licenses is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the license you chose for the specific language governing permissions
+ * and limitations under that license.
+ **/
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * subunit_test_start:
+ *
+ * Report that a test is starting.
+ * @name: test case name
+ */
+extern void subunit_test_start(char const * const name);
+
+
+/**
+ * subunit_test_pass:
+ *
+ * Report that a test has passed.
+ *
+ * @name: test case name
+ */
+extern void subunit_test_pass(char const * const name);
+
+
+/**
+ * subunit_test_fail:
+ *
+ * Report that a test has failed.
+ * @name: test case name
+ * @error: a string describing the error.
+ */
+extern void subunit_test_fail(char const * const name, char const * const error);
+
+
+/**
+ * subunit_test_error:
+ *
+ * Report that a test has errored. An error is an unintentional failure - i.e.
+ * a segfault rather than a failed assertion.
+ * @name: test case name
+ * @error: a string describing the error.
+ */
+extern void subunit_test_error(char const * const name,
+ char const * const error);
+
+
+/**
+ * subunit_test_skip:
+ *
+ * Report that a test has been skipped. An skip is a test that has not run to
+ * conclusion but hasn't given an error either - its result is unknown.
+ * @name: test case name
+ * @reason: a string describing the reason for the skip.
+ */
+extern void subunit_test_skip(char const * const name,
+ char const * const reason);
+
+
+enum subunit_progress_whence {
+ SUBUNIT_PROGRESS_SET,
+ SUBUNIT_PROGRESS_CUR,
+ SUBUNIT_PROGRESS_POP,
+ SUBUNIT_PROGRESS_PUSH,
+};
+
+/**
+ * subunit_progress:
+ *
+ * Report the progress of a test run.
+ * @whence: The type of progress update to report.
+ * @offset: Offset of the progress (only for SUBUNIT_PROGRESS_SET
+ * and SUBUNIT_PROGRESS_CUR).
+ */
+extern void subunit_progress(enum subunit_progress_whence whence, int offset);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/lib/subunit/c/lib/child.c b/lib/subunit/c/lib/child.c
new file mode 100644
index 0000000000..20f38da8c9
--- /dev/null
+++ b/lib/subunit/c/lib/child.c
@@ -0,0 +1,104 @@
+/**
+ *
+ * subunit C child-side bindings: report on tests being run.
+ * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+ *
+ * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ * license at the users choice. A copy of both licenses are available in the
+ * project source as Apache-2.0 and BSD. You may not use this file except in
+ * compliance with one of these two licences.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under these licenses is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the license you chose for the specific language governing permissions
+ * and limitations under that license.
+ **/
+
+#include <stdio.h>
+#include <string.h>
+#include "subunit/child.h"
+
+/* Write details about a test event. It is the callers responsibility to ensure
+ * that details are only provided for events the protocol expects details on.
+ * @event: The event - e.g. 'skip'
+ * @name: The test name/id.
+ * @details: The details of the event, may be NULL if no details are present.
+ */
+static void
+subunit_send_event(char const * const event, char const * const name,
+ char const * const details)
+{
+ if (NULL == details) {
+ fprintf(stdout, "%s: %s\n", event, name);
+ } else {
+ fprintf(stdout, "%s: %s [\n", event, name);
+ fprintf(stdout, "%s", details);
+ if (details[strlen(details) - 1] != '\n')
+ fprintf(stdout, "\n");
+ fprintf(stdout, "]\n");
+ }
+ fflush(stdout);
+}
+
+/* these functions all flush to ensure that the test runner knows the action
+ * that has been taken even if the subsequent test etc takes a long time or
+ * never completes (i.e. a segfault).
+ */
+
+void
+subunit_test_start(char const * const name)
+{
+ subunit_send_event("test", name, NULL);
+}
+
+
+void
+subunit_test_pass(char const * const name)
+{
+ /* TODO: add success details as an option */
+ subunit_send_event("success", name, NULL);
+}
+
+
+void
+subunit_test_fail(char const * const name, char const * const error)
+{
+ subunit_send_event("failure", name, error);
+}
+
+
+void
+subunit_test_error(char const * const name, char const * const error)
+{
+ subunit_send_event("error", name, error);
+}
+
+
+void
+subunit_test_skip(char const * const name, char const * const reason)
+{
+ subunit_send_event("skip", name, reason);
+}
+
+void
+subunit_progress(enum subunit_progress_whence whence, int offset)
+{
+ switch (whence) {
+ case SUBUNIT_PROGRESS_SET:
+ printf("progress: %d\n", offset);
+ break;
+ case SUBUNIT_PROGRESS_CUR:
+ printf("progress: %+-d\n", offset);
+ break;
+ case SUBUNIT_PROGRESS_POP:
+ printf("progress: pop\n");
+ break;
+ case SUBUNIT_PROGRESS_PUSH:
+ printf("progress: push\n");
+ break;
+ default:
+ fprintf(stderr, "Invalid whence %d in subunit_progress()\n", whence);
+ break;
+ }
+}
diff --git a/lib/subunit/c/tests/test_child.c b/lib/subunit/c/tests/test_child.c
new file mode 100644
index 0000000000..0744599b9f
--- /dev/null
+++ b/lib/subunit/c/tests/test_child.c
@@ -0,0 +1,231 @@
+/**
+ *
+ * subunit C bindings.
+ * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+ *
+ * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ * license at the users choice. A copy of both licenses are available in the
+ * project source as Apache-2.0 and BSD. You may not use this file except in
+ * compliance with one of these two licences.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under these licenses is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the license you chose for the specific language governing permissions
+ * and limitations under that license.
+ **/
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <check.h>
+
+#include "subunit/child.h"
+
+/**
+ * Helper function to capture stdout, run some call, and check what
+ * was written.
+ * @expected the expected stdout content
+ * @function the function to call.
+ **/
+static void
+test_stdout_function(char const * expected,
+ void (*function)(void))
+{
+ /* test that the start function emits a correct test: line. */
+ int bytecount;
+ int old_stdout;
+ int new_stdout[2];
+ char buffer[100];
+ /* we need a socketpair to capture stdout in */
+ fail_if(pipe(new_stdout), "Failed to create a socketpair.");
+ /* backup stdout so we can replace it */
+ old_stdout = dup(1);
+ if (old_stdout == -1) {
+ close(new_stdout[0]);
+ close(new_stdout[1]);
+ fail("Failed to backup stdout before replacing.");
+ }
+ /* redirect stdout so we can analyse it */
+ if (dup2(new_stdout[1], 1) != 1) {
+ close(old_stdout);
+ close(new_stdout[0]);
+ close(new_stdout[1]);
+ fail("Failed to redirect stdout");
+ }
+ /* yes this can block. Its a test case with < 100 bytes of output.
+ * DEAL.
+ */
+ function();
+ /* restore stdout now */
+ if (dup2(old_stdout, 1) != 1) {
+ close(old_stdout);
+ close(new_stdout[0]);
+ close(new_stdout[1]);
+ fail("Failed to restore stdout");
+ }
+ /* and we dont need the write side any more */
+ if (close(new_stdout[1])) {
+ close(new_stdout[0]);
+ fail("Failed to close write side of socketpair.");
+ }
+ /* get the output */
+ bytecount = read(new_stdout[0], buffer, 100);
+ if (0 > bytecount) {
+ close(new_stdout[0]);
+ fail("Failed to read captured output.");
+ }
+ buffer[bytecount]='\0';
+ /* and we dont need the read side any more */
+ fail_if(close(new_stdout[0]), "Failed to close write side of socketpair.");
+ /* compare with expected outcome */
+ fail_if(strcmp(expected, buffer), "Did not get expected output [%s], got [%s]", expected, buffer);
+}
+
+
+static void
+call_test_start(void)
+{
+ subunit_test_start("test case");
+}
+
+
+START_TEST (test_start)
+{
+ test_stdout_function("test: test case\n", call_test_start);
+}
+END_TEST
+
+
+static void
+call_test_pass(void)
+{
+ subunit_test_pass("test case");
+}
+
+
+START_TEST (test_pass)
+{
+ test_stdout_function("success: test case\n", call_test_pass);
+}
+END_TEST
+
+
+static void
+call_test_fail(void)
+{
+ subunit_test_fail("test case", "Multiple lines\n of error\n");
+}
+
+
+START_TEST (test_fail)
+{
+ test_stdout_function("failure: test case [\n"
+ "Multiple lines\n"
+ " of error\n"
+ "]\n",
+ call_test_fail);
+}
+END_TEST
+
+
+static void
+call_test_error(void)
+{
+ subunit_test_error("test case", "Multiple lines\n of output\n");
+}
+
+
+START_TEST (test_error)
+{
+ test_stdout_function("error: test case [\n"
+ "Multiple lines\n"
+ " of output\n"
+ "]\n",
+ call_test_error);
+}
+END_TEST
+
+
+static void
+call_test_skip(void)
+{
+ subunit_test_skip("test case", "Multiple lines\n of output\n");
+}
+
+
+START_TEST (test_skip)
+{
+ test_stdout_function("skip: test case [\n"
+ "Multiple lines\n"
+ " of output\n"
+ "]\n",
+ call_test_skip);
+}
+END_TEST
+
+
+static void
+call_test_progress_pop(void)
+{
+ subunit_progress(SUBUNIT_PROGRESS_POP, 0);
+}
+
+static void
+call_test_progress_set(void)
+{
+ subunit_progress(SUBUNIT_PROGRESS_SET, 5);
+}
+
+static void
+call_test_progress_push(void)
+{
+ subunit_progress(SUBUNIT_PROGRESS_PUSH, 0);
+}
+
+static void
+call_test_progress_cur(void)
+{
+ subunit_progress(SUBUNIT_PROGRESS_CUR, -6);
+}
+
+START_TEST (test_progress)
+{
+ test_stdout_function("progress: pop\n",
+ call_test_progress_pop);
+ test_stdout_function("progress: push\n",
+ call_test_progress_push);
+ test_stdout_function("progress: 5\n",
+ call_test_progress_set);
+ test_stdout_function("progress: -6\n",
+ call_test_progress_cur);
+}
+END_TEST
+
+static Suite *
+child_suite(void)
+{
+ Suite *s = suite_create("subunit_child");
+ TCase *tc_core = tcase_create("Core");
+ suite_add_tcase (s, tc_core);
+ tcase_add_test (tc_core, test_start);
+ tcase_add_test (tc_core, test_pass);
+ tcase_add_test (tc_core, test_fail);
+ tcase_add_test (tc_core, test_error);
+ tcase_add_test (tc_core, test_skip);
+ tcase_add_test (tc_core, test_progress);
+ return s;
+}
+
+
+int
+main(void)
+{
+ int nf;
+ Suite *s = child_suite();
+ SRunner *sr = srunner_create(s);
+ srunner_run_all(sr, CK_NORMAL);
+ nf = srunner_ntests_failed(sr);
+ srunner_free(sr);
+ return (nf == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/lib/subunit/c/wscript b/lib/subunit/c/wscript
new file mode 100644
index 0000000000..3e5311d79f
--- /dev/null
+++ b/lib/subunit/c/wscript
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+import Options
+
+def configure(conf):
+ if conf.CHECK_BUNDLED_SYSTEM('subunit', checkfunctions='subunit_test_start', headers='subunit/child.h'):
+ conf.define('USING_SYSTEM_SUBUNIT', 1)
+
+def build(bld):
+ if bld.CONFIG_SET('USING_SYSTEM_SUBUNIT'):
+ return
+
+ bld.SAMBA_LIBRARY('subunit',
+ source='lib/child.c',
+ private_library=True,
+ includes='include')
diff --git a/lib/subunit/configure.ac b/lib/subunit/configure.ac
new file mode 100644
index 0000000000..5696573464
--- /dev/null
+++ b/lib/subunit/configure.ac
@@ -0,0 +1,75 @@
+m4_define([SUBUNIT_MAJOR_VERSION], [0])
+m4_define([SUBUNIT_MINOR_VERSION], [0])
+m4_define([SUBUNIT_MICRO_VERSION], [6])
+m4_define([SUBUNIT_VERSION],
+m4_defn([SUBUNIT_MAJOR_VERSION]).m4_defn([SUBUNIT_MINOR_VERSION]).m4_defn([SUBUNIT_MICRO_VERSION]))
+AC_PREREQ([2.59])
+AC_INIT([subunit], [SUBUNIT_VERSION], [subunit-dev@lists.launchpad.net])
+AC_CONFIG_SRCDIR([c/lib/child.c])
+AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects])
+AC_CONFIG_MACRO_DIR([m4])
+[SUBUNIT_MAJOR_VERSION]=SUBUNIT_MAJOR_VERSION
+[SUBUNIT_MINOR_VERSION]=SUBUNIT_MINOR_VERSION
+[SUBUNIT_MICRO_VERSION]=SUBUNIT_MICRO_VERSION
+[SUBUNIT_VERSION]=SUBUNIT_VERSION
+AC_SUBST([SUBUNIT_MAJOR_VERSION])
+AC_SUBST([SUBUNIT_MINOR_VERSION])
+AC_SUBST([SUBUNIT_MICRO_VERSION])
+AC_SUBST([SUBUNIT_VERSION])
+AC_USE_SYSTEM_EXTENSIONS
+AC_PROG_CC
+AC_PROG_CXX
+AM_PROG_CC_C_O
+AC_PROG_INSTALL
+AC_PROG_LN_S
+AC_PROG_LIBTOOL
+AM_PATH_PYTHON
+
+AS_IF([test "$GCC" = "yes"],
+ [
+ SUBUNIT_CFLAGS="-Wall -Werror -Wextra -Wstrict-prototypes "
+ SUBUNIT_CFLAGS="$SUBUNIT_CFLAGS -Wmissing-prototypes -Wwrite-strings "
+ SUBUNIT_CFLAGS="$SUBUNIT_CFLAGS -Wno-variadic-macros "
+ SUBUNIT_CXXFLAGS="-Wall -Werror -Wextra -Wwrite-strings -Wno-variadic-macros"
+ ])
+
+AM_CFLAGS="$SUBUNIT_CFLAGS -I\$(top_srcdir)/c/include"
+AM_CXXFLAGS="$SUBUNIT_CXXFLAGS -I\$(top_srcdir)/c/include"
+AC_SUBST(AM_CFLAGS)
+AC_SUBST(AM_CXXFLAGS)
+
+# Checks for libraries.
+
+# Checks for header files.
+AC_CHECK_HEADERS([stdlib.h])
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_C_CONST
+AC_TYPE_PID_T
+AC_TYPE_SIZE_T
+AC_HEADER_TIME
+AC_STRUCT_TM
+
+AC_CHECK_SIZEOF(int, 4)
+AC_CHECK_SIZEOF(short, 2)
+AC_CHECK_SIZEOF(long, 4)
+
+# Checks for library functions.
+AC_FUNC_MALLOC
+AC_FUNC_REALLOC
+
+# Easier memory management.
+# C unit testing.
+PKG_CHECK_MODULES([CHECK], [check >= 0.9.4])
+# C++ unit testing.
+PKG_CHECK_MODULES([CPPUNIT], [cppunit])
+
+# Output files
+AC_CONFIG_HEADERS([config.h])
+
+AC_CONFIG_FILES([libsubunit.pc
+ libcppunit_subunit.pc
+ Makefile
+ perl/Makefile.PL
+ ])
+AC_OUTPUT
diff --git a/lib/subunit/filters/subunit-filter b/lib/subunit/filters/subunit-filter
new file mode 100755
index 0000000000..c06a03a827
--- /dev/null
+++ b/lib/subunit/filters/subunit-filter
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2008 Robert Collins <robertc@robertcollins.net>
+# (C) 2009 Martin Pool
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Filter a subunit stream to include/exclude tests.
+
+The default is to strip successful tests.
+
+Tests can be filtered by Python regular expressions with --with and --without,
+which match both the test name and the error text (if any). The result
+contains tests which match any of the --with expressions and none of the
+--without expressions. For case-insensitive matching prepend '(?i)'.
+Remember to quote shell metacharacters.
+"""
+
+from optparse import OptionParser
+import sys
+import unittest
+import re
+
+from subunit import (
+ DiscardStream,
+ ProtocolTestCase,
+ TestProtocolClient,
+ )
+from subunit.test_results import TestResultFilter
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--error", action="store_false",
+ help="include errors", default=False, dest="error")
+parser.add_option("-e", "--no-error", action="store_true",
+ help="exclude errors", dest="error")
+parser.add_option("--failure", action="store_false",
+ help="include failures", default=False, dest="failure")
+parser.add_option("-f", "--no-failure", action="store_true",
+ help="include failures", dest="failure")
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+parser.add_option("-s", "--success", action="store_false",
+ help="include successes", dest="success")
+parser.add_option("--no-skip", action="store_true",
+ help="exclude skips", dest="skip")
+parser.add_option("--no-success", action="store_true",
+ help="exclude successes", default=True, dest="success")
+parser.add_option("-m", "--with", type=str,
+ help="regexp to include (case-sensitive by default)",
+ action="append", dest="with_regexps")
+parser.add_option("--without", type=str,
+ help="regexp to exclude (case-sensitive by default)",
+ action="append", dest="without_regexps")
+
+(options, args) = parser.parse_args()
+
+
+def _compile_re_from_list(l):
+ return re.compile("|".join(l), re.MULTILINE)
+
+
+def _make_regexp_filter(with_regexps, without_regexps):
+ """Make a callback that checks tests against regexps.
+
+ with_regexps and without_regexps are each either a list of regexp strings,
+ or None.
+ """
+ with_re = with_regexps and _compile_re_from_list(with_regexps)
+ without_re = without_regexps and _compile_re_from_list(without_regexps)
+
+ def check_regexps(test, outcome, err, details):
+ """Check if this test and error match the regexp filters."""
+ test_str = str(test) + outcome + str(err) + str(details)
+ if with_re and not with_re.search(test_str):
+ return False
+ if without_re and without_re.search(test_str):
+ return False
+ return True
+ return check_regexps
+
+
+regexp_filter = _make_regexp_filter(options.with_regexps,
+ options.without_regexps)
+result = TestProtocolClient(sys.stdout)
+result = TestResultFilter(result, filter_error=options.error,
+ filter_failure=options.failure, filter_success=options.success,
+ filter_skip=options.skip,
+ filter_predicate=regexp_filter)
+if options.no_passthrough:
+ passthrough_stream = DiscardStream()
+else:
+ passthrough_stream = None
+test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
+test.run(result)
+sys.exit(0)
diff --git a/lib/subunit/filters/subunit-ls b/lib/subunit/filters/subunit-ls
new file mode 100755
index 0000000000..86461347d3
--- /dev/null
+++ b/lib/subunit/filters/subunit-ls
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2008 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""List tests in a subunit stream."""
+
+from optparse import OptionParser
+import sys
+
+from subunit import DiscardStream, ProtocolTestCase
+from subunit.test_results import TestIdPrintingResult
+
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--times", action="store_true",
+ help="list the time each test took (requires a timestamped stream)",
+ default=False)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+(options, args) = parser.parse_args()
+result = TestIdPrintingResult(sys.stdout, options.times)
+if options.no_passthrough:
+ passthrough_stream = DiscardStream()
+else:
+ passthrough_stream = None
+test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
+test.run(result)
+if result.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit-notify b/lib/subunit/filters/subunit-notify
new file mode 100755
index 0000000000..758e7fc8ff
--- /dev/null
+++ b/lib/subunit/filters/subunit-notify
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2010 Jelmer Vernooij <jelmer@samba.org>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Notify the user of a finished test run."""
+
+from optparse import OptionParser
+import sys
+
+import pygtk
+pygtk.require('2.0')
+import pynotify
+
+from subunit import DiscardStream, ProtocolTestCase, TestResultStats
+
+if not pynotify.init("Subunit-notify"):
+ sys.exit(1)
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+parser.add_option("-f", "--forward", action="store_true", default=False,
+ help="Forward subunit stream on stdout.")
+(options, args) = parser.parse_args()
+result = TestResultStats(sys.stdout)
+if options.no_passthrough:
+ passthrough_stream = DiscardStream()
+else:
+ passthrough_stream = None
+if options.forward:
+ forward_stream = sys.stdout
+else:
+ forward_stream = None
+test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream,
+ forward=forward_stream)
+test.run(result)
+if result.failed_tests > 0:
+ summary = "Test run failed"
+else:
+ summary = "Test run successful"
+body = "Total tests: %d; Passed: %d; Failed: %d" % (
+ result.total_tests,
+ result.passed_tests,
+ result.failed_tests,
+ )
+nw = pynotify.Notification(summary, body)
+nw.show()
+
+if result.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit-stats b/lib/subunit/filters/subunit-stats
new file mode 100755
index 0000000000..4734988fc2
--- /dev/null
+++ b/lib/subunit/filters/subunit-stats
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Filter a subunit stream to get aggregate statistics."""
+
+from optparse import OptionParser
+import sys
+import unittest
+
+from subunit import DiscardStream, ProtocolTestCase, TestResultStats
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+(options, args) = parser.parse_args()
+result = TestResultStats(sys.stdout)
+if options.no_passthrough:
+ passthrough_stream = DiscardStream()
+else:
+ passthrough_stream = None
+test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
+test.run(result)
+result.formatStats()
+if result.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit-tags b/lib/subunit/filters/subunit-tags
new file mode 100755
index 0000000000..edbbfce480
--- /dev/null
+++ b/lib/subunit/filters/subunit-tags
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""A filter to change tags on a subunit stream.
+
+subunit-tags foo -> adds foo
+subunit-tags foo -bar -> adds foo and removes bar
+"""
+
+import sys
+
+from subunit import tag_stream
+sys.exit(tag_stream(sys.stdin, sys.stdout, sys.argv[1:]))
diff --git a/lib/subunit/filters/subunit2gtk b/lib/subunit/filters/subunit2gtk
new file mode 100755
index 0000000000..c2cb2de3ce
--- /dev/null
+++ b/lib/subunit/filters/subunit2gtk
@@ -0,0 +1,259 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+### The GTK progress bar __init__ function is derived from the pygtk tutorial:
+# The PyGTK Tutorial is Copyright (C) 2001-2005 John Finlay.
+#
+# The GTK Tutorial is Copyright (C) 1997 Ian Main.
+#
+# Copyright (C) 1998-1999 Tony Gale.
+#
+# Permission is granted to make and distribute verbatim copies of this manual
+# provided the copyright notice and this permission notice are preserved on all
+# copies.
+#
+# Permission is granted to copy and distribute modified versions of this
+# document under the conditions for verbatim copying, provided that this
+# copyright notice is included exactly as in the original, and that the entire
+# resulting derived work is distributed under the terms of a permission notice
+# identical to this one.
+#
+# Permission is granted to copy and distribute translations of this document
+# into another language, under the above conditions for modified versions.
+#
+# If you are intending to incorporate this document into a published work,
+# please contact the maintainer, and we will make an effort to ensure that you
+# have the most up to date information available.
+#
+# There is no guarantee that this document lives up to its intended purpose.
+# This is simply provided as a free resource. As such, the authors and
+# maintainers of the information provided within can not make any guarantee
+# that the information is even accurate.
+
+"""Display a subunit stream in a gtk progress window."""
+
+import sys
+import unittest
+
+import pygtk
+pygtk.require('2.0')
+import gtk, gtk.gdk, gobject
+
+from subunit import (
+ PROGRESS_POP,
+ PROGRESS_PUSH,
+ PROGRESS_SET,
+ TestProtocolServer,
+ )
+from subunit.progress_model import ProgressModel
+
+
+class GTKTestResult(unittest.TestResult):
+
+ def __init__(self):
+ super(GTKTestResult, self).__init__()
+ # Instance variables (in addition to TestResult)
+ self.window = None
+ self.run_label = None
+ self.ok_label = None
+ self.not_ok_label = None
+ self.total_tests = None
+
+ self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
+ self.window.set_resizable(True)
+
+ self.window.connect("destroy", gtk.main_quit)
+ self.window.set_title("Tests...")
+ self.window.set_border_width(0)
+
+ vbox = gtk.VBox(False, 5)
+ vbox.set_border_width(10)
+ self.window.add(vbox)
+ vbox.show()
+
+ # Create a centering alignment object
+ align = gtk.Alignment(0.5, 0.5, 0, 0)
+ vbox.pack_start(align, False, False, 5)
+ align.show()
+
+ # Create the ProgressBar
+ self.pbar = gtk.ProgressBar()
+ align.add(self.pbar)
+ self.pbar.set_text("Running")
+ self.pbar.show()
+ self.progress_model = ProgressModel()
+
+ separator = gtk.HSeparator()
+ vbox.pack_start(separator, False, False, 0)
+ separator.show()
+
+ # rows, columns, homogeneous
+ table = gtk.Table(2, 3, False)
+ vbox.pack_start(table, False, True, 0)
+ table.show()
+ # Show summary details about the run. Could use an expander.
+ label = gtk.Label("Run:")
+ table.attach(label, 0, 1, 1, 2, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ label.show()
+ self.run_label = gtk.Label("N/A")
+ table.attach(self.run_label, 1, 2, 1, 2, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ self.run_label.show()
+
+ label = gtk.Label("OK:")
+ table.attach(label, 0, 1, 2, 3, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ label.show()
+ self.ok_label = gtk.Label("N/A")
+ table.attach(self.ok_label, 1, 2, 2, 3, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ self.ok_label.show()
+
+ label = gtk.Label("Not OK:")
+ table.attach(label, 0, 1, 3, 4, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ label.show()
+ self.not_ok_label = gtk.Label("N/A")
+ table.attach(self.not_ok_label, 1, 2, 3, 4, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ self.not_ok_label.show()
+
+ self.window.show()
+ # For the demo.
+ self.window.set_keep_above(True)
+ self.window.present()
+
+ def stopTest(self, test):
+ super(GTKTestResult, self).stopTest(test)
+ self.progress_model.advance()
+ if self.progress_model.width() == 0:
+ self.pbar.pulse()
+ else:
+ pos = self.progress_model.pos()
+ width = self.progress_model.width()
+ percentage = (pos / float(width))
+ self.pbar.set_fraction(percentage)
+
+ def stopTestRun(self):
+ try:
+ super(GTKTestResult, self).stopTestRun()
+ except AttributeError:
+ pass
+ self.pbar.set_text('Finished')
+
+ def addError(self, test, err):
+ super(GTKTestResult, self).addError(test, err)
+ self.update_counts()
+
+ def addFailure(self, test, err):
+ super(GTKTestResult, self).addFailure(test, err)
+ self.update_counts()
+
+ def addSuccess(self, test):
+ super(GTKTestResult, self).addSuccess(test)
+ self.update_counts()
+
+ def addSkip(self, test, reason):
+ # addSkip is new in Python 2.7/3.1
+ addSkip = getattr(super(GTKTestResult, self), 'addSkip', None)
+ if callable(addSkip):
+ addSkip(test, reason)
+ self.update_counts()
+
+ def addExpectedFailure(self, test, err):
+ # addExpectedFailure is new in Python 2.7/3.1
+ addExpectedFailure = getattr(super(GTKTestResult, self),
+ 'addExpectedFailure', None)
+ if callable(addExpectedFailure):
+ addExpectedFailure(test, err)
+ self.update_counts()
+
+ def addUnexpectedSuccess(self, test):
+ # addUnexpectedSuccess is new in Python 2.7/3.1
+ addUnexpectedSuccess = getattr(super(GTKTestResult, self),
+ 'addUnexpectedSuccess', None)
+ if callable(addUnexpectedSuccess):
+ addUnexpectedSuccess(test)
+ self.update_counts()
+
+ def progress(self, offset, whence):
+ if whence == PROGRESS_PUSH:
+ self.progress_model.push()
+ elif whence == PROGRESS_POP:
+ self.progress_model.pop()
+ elif whence == PROGRESS_SET:
+ self.total_tests = offset
+ self.progress_model.set_width(offset)
+ else:
+ self.total_tests += offset
+ self.progress_model.adjust_width(offset)
+
+ def time(self, a_datetime):
+ # We don't try to estimate completion yet.
+ pass
+
+ def update_counts(self):
+ self.run_label.set_text(str(self.testsRun))
+ bad = len(self.failures + self.errors)
+ self.ok_label.set_text(str(self.testsRun - bad))
+ self.not_ok_label.set_text(str(bad))
+
+
+class GIOProtocolTestCase(object):
+
+ def __init__(self, stream, result, on_finish):
+ self.stream = stream
+ self.schedule_read()
+ self.hup_id = gobject.io_add_watch(stream, gobject.IO_HUP, self.hup)
+ self.protocol = TestProtocolServer(result)
+ self.on_finish = on_finish
+
+ def read(self, source, condition, all=False):
+ #NB: \o/ actually blocks
+ line = source.readline()
+ if not line:
+ self.protocol.lostConnection()
+ self.on_finish()
+ return False
+ self.protocol.lineReceived(line)
+ # schedule more IO shortly - if we say we're willing to do it
+ # immediately we starve things.
+ if not all:
+ source_id = gobject.timeout_add(1, self.schedule_read)
+ return False
+ else:
+ return True
+
+ def schedule_read(self):
+ self.read_id = gobject.io_add_watch(self.stream, gobject.IO_IN, self.read)
+
+ def hup(self, source, condition):
+ while self.read(source, condition, all=True): pass
+ self.protocol.lostConnection()
+ gobject.source_remove(self.read_id)
+ self.on_finish()
+ return False
+
+
+result = GTKTestResult()
+test = GIOProtocolTestCase(sys.stdin, result, result.stopTestRun)
+gtk.main()
+if result.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit2junitxml b/lib/subunit/filters/subunit2junitxml
new file mode 100755
index 0000000000..bea795d2bd
--- /dev/null
+++ b/lib/subunit/filters/subunit2junitxml
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Filter a subunit stream to get aggregate statistics."""
+
+from optparse import OptionParser
+import sys
+import unittest
+
+from subunit import DiscardStream, ProtocolTestCase
+try:
+ from junitxml import JUnitXmlResult
+except ImportError:
+ sys.stderr.write("python-junitxml (https://launchpad.net/pyjunitxml or "
+ "http://pypi.python.org/pypi/junitxml) is required for this filter.")
+ raise
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+parser.add_option("-o", "--output-to",
+ help="Output the XML to this path rather than stdout.")
+parser.add_option("-f", "--forward", action="store_true", default=False,
+ help="Forward subunit stream on stdout.")
+(options, args) = parser.parse_args()
+if options.output_to is None:
+ output_to = sys.stdout
+else:
+ output_to = file(options.output_to, 'wb')
+try:
+ result = JUnitXmlResult(output_to)
+ if options.no_passthrough:
+ passthrough_stream = DiscardStream()
+ else:
+ passthrough_stream = None
+ if options.forward:
+ forward_stream = sys.stdout
+ else:
+ forward_stream = None
+ test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream,
+ forward=forward_stream)
+ result.startTestRun()
+ test.run(result)
+ result.stopTestRun()
+finally:
+ if options.output_to is not None:
+ output_to.close()
+if result.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit2pyunit b/lib/subunit/filters/subunit2pyunit
new file mode 100755
index 0000000000..83a23d14d1
--- /dev/null
+++ b/lib/subunit/filters/subunit2pyunit
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Display a subunit stream through python's unittest test runner."""
+
+from optparse import OptionParser
+import sys
+import unittest
+
+from subunit import DiscardStream, ProtocolTestCase, TestProtocolServer
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+parser.add_option("--progress", action="store_true",
+ help="Use bzrlib's test reporter (requires bzrlib)",
+ default=False)
+(options, args) = parser.parse_args()
+if options.no_passthrough:
+ passthrough_stream = DiscardStream()
+else:
+ passthrough_stream = None
+test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
+if options.progress:
+ from bzrlib.tests import TextTestRunner
+ from bzrlib import ui
+ ui.ui_factory = ui.make_ui_for_terminal(None, sys.stdout, sys.stderr)
+ runner = TextTestRunner()
+else:
+ runner = unittest.TextTestRunner(verbosity=2)
+if runner.run(test).wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/lib/subunit/filters/tap2subunit b/lib/subunit/filters/tap2subunit
new file mode 100755
index 0000000000..c571972225
--- /dev/null
+++ b/lib/subunit/filters/tap2subunit
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""A filter that reads a TAP stream and outputs a subunit stream.
+
+More information on TAP is available at
+http://testanything.org/wiki/index.php/Main_Page.
+"""
+
+import sys
+
+from subunit import TAP2SubUnit
+sys.exit(TAP2SubUnit(sys.stdin, sys.stdout))
diff --git a/lib/subunit/libcppunit_subunit.pc.in b/lib/subunit/libcppunit_subunit.pc.in
new file mode 100644
index 0000000000..98982c78ae
--- /dev/null
+++ b/lib/subunit/libcppunit_subunit.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: cppunit subunit listener
+Description: Subunit output listener for the CPPUnit test library.
+URL: http://launchpad.net/subunit
+Version: @VERSION@
+Libs: -L${libdir} -lsubunit
+Cflags: -I${includedir}
diff --git a/lib/subunit/libsubunit.pc.in b/lib/subunit/libsubunit.pc.in
new file mode 100644
index 0000000000..67564148e8
--- /dev/null
+++ b/lib/subunit/libsubunit.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: subunit
+Description: Subunit test protocol library.
+URL: http://launchpad.net/subunit
+Version: @VERSION@
+Libs: -L${libdir} -lsubunit
+Cflags: -I${includedir}
diff --git a/lib/subunit/perl/Makefile.PL.in b/lib/subunit/perl/Makefile.PL.in
new file mode 100755
index 0000000000..26e1c181f0
--- /dev/null
+++ b/lib/subunit/perl/Makefile.PL.in
@@ -0,0 +1,20 @@
+use ExtUtils::MakeMaker;
+WriteMakefile(
+ 'INSTALL_BASE' => '@prefix@',
+ 'NAME' => 'Subunit',
+ 'VERSION' => '@SUBUNIT_VERSION@',
+ 'test' => { 'TESTS' => 'tests/*.pl' },
+ 'PMLIBDIRS' => [ 'lib' ],
+ 'EXE_FILES' => [ '@abs_srcdir@/subunit-diff' ],
+);
+sub MY::postamble {
+<<'EOT';
+check: # test
+
+uninstall_distcheck:
+ rm -fr $(DESTINSTALLARCHLIB)
+
+VPATH = @srcdir@
+.PHONY: uninstall_distcheck
+EOT
+}
diff --git a/lib/subunit/perl/lib/Subunit.pm b/lib/subunit/perl/lib/Subunit.pm
new file mode 100644
index 0000000000..dac4a2601d
--- /dev/null
+++ b/lib/subunit/perl/lib/Subunit.pm
@@ -0,0 +1,183 @@
+# Perl module for parsing and generating the Subunit protocol
+# Copyright (C) 2008-2009 Jelmer Vernooij <jelmer@samba.org>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+package Subunit;
+use POSIX;
+
+require Exporter;
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(parse_results $VERSION);
+
+use vars qw ( $VERSION );
+
+$VERSION = '0.0.2';
+
+use strict;
+
+sub parse_results($$$)
+{
+ my ($msg_ops, $statistics, $fh) = @_;
+ my $expected_fail = 0;
+ my $unexpected_fail = 0;
+ my $unexpected_err = 0;
+ my $open_tests = [];
+
+ while(<$fh>) {
+ if (/^test: (.+)\n/) {
+ $msg_ops->control_msg($_);
+ $msg_ops->start_test($1);
+ push (@$open_tests, $1);
+ } elsif (/^time: (\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)Z\n/) {
+ $msg_ops->report_time(mktime($6, $5, $4, $3, $2, $1-1900));
+ } elsif (/^(success|successful|failure|fail|skip|knownfail|error|xfail): (.*?)( \[)?([ \t]*)\n/) {
+ $msg_ops->control_msg($_);
+ my $result = $1;
+ my $testname = $2;
+ my $reason = undef;
+ if ($3) {
+ $reason = "";
+ # reason may be specified in next lines
+ my $terminated = 0;
+ while(<$fh>) {
+ $msg_ops->control_msg($_);
+ if ($_ eq "]\n") { $terminated = 1; last; } else { $reason .= $_; }
+ }
+
+ unless ($terminated) {
+ $statistics->{TESTS_ERROR}++;
+ $msg_ops->end_test($testname, "error", 1, "reason ($result) interrupted");
+ return 1;
+ }
+ }
+ if ($result eq "success" or $result eq "successful") {
+ pop(@$open_tests); #FIXME: Check that popped value == $testname
+ $statistics->{TESTS_EXPECTED_OK}++;
+ $msg_ops->end_test($testname, $result, 0, $reason);
+ } elsif ($result eq "xfail" or $result eq "knownfail") {
+ pop(@$open_tests); #FIXME: Check that popped value == $testname
+ $statistics->{TESTS_EXPECTED_FAIL}++;
+ $msg_ops->end_test($testname, $result, 0, $reason);
+ $expected_fail++;
+ } elsif ($result eq "failure" or $result eq "fail") {
+ pop(@$open_tests); #FIXME: Check that popped value == $testname
+ $statistics->{TESTS_UNEXPECTED_FAIL}++;
+ $msg_ops->end_test($testname, $result, 1, $reason);
+ $unexpected_fail++;
+ } elsif ($result eq "skip") {
+ $statistics->{TESTS_SKIP}++;
+ my $last = pop(@$open_tests);
+ if (defined($last) and $last ne $testname) {
+ push (@$open_tests, $testname);
+ }
+ $msg_ops->end_test($testname, $result, 0, $reason);
+ } elsif ($result eq "error") {
+ $statistics->{TESTS_ERROR}++;
+ pop(@$open_tests); #FIXME: Check that popped value == $testname
+ $msg_ops->end_test($testname, $result, 1, $reason);
+ $unexpected_err++;
+ }
+ } else {
+ $msg_ops->output_msg($_);
+ }
+ }
+
+ while ($#$open_tests+1 > 0) {
+ $msg_ops->end_test(pop(@$open_tests), "error", 1,
+ "was started but never finished!");
+ $statistics->{TESTS_ERROR}++;
+ $unexpected_err++;
+ }
+
+ return 1 if $unexpected_err > 0;
+ return 1 if $unexpected_fail > 0;
+ return 0;
+}
+
+sub start_test($)
+{
+ my ($testname) = @_;
+ print "test: $testname\n";
+}
+
+sub end_test($$;$)
+{
+ my $name = shift;
+ my $result = shift;
+ my $reason = shift;
+ if ($reason) {
+ print "$result: $name [\n";
+ print "$reason";
+ print "]\n";
+ } else {
+ print "$result: $name\n";
+ }
+}
+
+sub skip_test($;$)
+{
+ my $name = shift;
+ my $reason = shift;
+ end_test($name, "skip", $reason);
+}
+
+sub fail_test($;$)
+{
+ my $name = shift;
+ my $reason = shift;
+ end_test($name, "fail", $reason);
+}
+
+sub success_test($;$)
+{
+ my $name = shift;
+ my $reason = shift;
+ end_test($name, "success", $reason);
+}
+
+sub xfail_test($;$)
+{
+ my $name = shift;
+ my $reason = shift;
+ end_test($name, "xfail", $reason);
+}
+
+sub report_time($)
+{
+ my ($time) = @_;
+ my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) = localtime($time);
+ printf "time: %04d-%02d-%02d %02d:%02d:%02dZ\n", $year+1900, $mon, $mday, $hour, $min, $sec;
+}
+
+sub progress_pop()
+{
+ print "progress: pop\n";
+}
+
+sub progress_push()
+{
+ print "progress: push\n";
+}
+
+sub progress($;$)
+{
+ my ($count, $whence) = @_;
+
+ unless(defined($whence)) {
+ $whence = "";
+ }
+
+ print "progress: $whence$count\n";
+}
+
+1;
diff --git a/lib/subunit/perl/lib/Subunit/Diff.pm b/lib/subunit/perl/lib/Subunit/Diff.pm
new file mode 100644
index 0000000000..e7841c3b00
--- /dev/null
+++ b/lib/subunit/perl/lib/Subunit/Diff.pm
@@ -0,0 +1,85 @@
+#!/usr/bin/perl
+# Diff two subunit streams
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+package Subunit::Diff;
+
+use strict;
+
+use Subunit qw(parse_results);
+
+sub control_msg() { }
+sub report_time($$) { }
+
+sub output_msg($$)
+{
+ my ($self, $msg) = @_;
+
+ # No output for now, perhaps later diff this as well ?
+}
+
+sub start_test($$)
+{
+ my ($self, $testname) = @_;
+}
+
+sub end_test($$$$$)
+{
+ my ($self, $testname, $result, $unexpected, $reason) = @_;
+
+ $self->{$testname} = $result;
+}
+
+sub new {
+ my ($class) = @_;
+
+ my $self = {
+ };
+ bless($self, $class);
+}
+
+sub from_file($)
+{
+ my ($path) = @_;
+ my $statistics = {
+ TESTS_UNEXPECTED_OK => 0,
+ TESTS_EXPECTED_OK => 0,
+ TESTS_UNEXPECTED_FAIL => 0,
+ TESTS_EXPECTED_FAIL => 0,
+ TESTS_ERROR => 0,
+ TESTS_SKIP => 0,
+ };
+
+ my $ret = new Subunit::Diff();
+ open(IN, $path) or return;
+ parse_results($ret, $statistics, *IN);
+ close(IN);
+ return $ret;
+}
+
+sub diff($$)
+{
+ my ($old, $new) = @_;
+ my $ret = {};
+
+ foreach my $testname (keys %$old) {
+ if ($new->{$testname} ne $old->{$testname}) {
+ $ret->{$testname} = [$old->{$testname}, $new->{$testname}];
+ }
+ }
+
+ return $ret;
+}
+
+1;
diff --git a/lib/subunit/perl/subunit-diff b/lib/subunit/perl/subunit-diff
new file mode 100755
index 0000000000..581e832ae3
--- /dev/null
+++ b/lib/subunit/perl/subunit-diff
@@ -0,0 +1,31 @@
+#!/usr/bin/perl
+# Diff two subunit streams
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+use Getopt::Long;
+use strict;
+use FindBin qw($RealBin $Script);
+use lib "$RealBin/lib";
+use Subunit::Diff;
+
+my $old = Subunit::Diff::from_file($ARGV[0]);
+my $new = Subunit::Diff::from_file($ARGV[1]);
+
+my $ret = Subunit::Diff::diff($old, $new);
+
+foreach my $e (sort(keys %$ret)) {
+ printf "%s: %s -> %s\n", $e, $ret->{$e}[0], $ret->{$e}[1];
+}
+
+0;
diff --git a/lib/subunit/python/iso8601/LICENSE b/lib/subunit/python/iso8601/LICENSE
new file mode 100644
index 0000000000..5ca93dae79
--- /dev/null
+++ b/lib/subunit/python/iso8601/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2007 Michael Twomey
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lib/subunit/python/iso8601/README b/lib/subunit/python/iso8601/README
new file mode 100644
index 0000000000..5ec9d45597
--- /dev/null
+++ b/lib/subunit/python/iso8601/README
@@ -0,0 +1,26 @@
+A simple package to deal with ISO 8601 date time formats.
+
+ISO 8601 defines a neutral, unambiguous date string format, which also
+has the property of sorting naturally.
+
+e.g. YYYY-MM-DDTHH:MM:SSZ or 2007-01-25T12:00:00Z
+
+Currently this covers only the most common date formats encountered, not
+all of ISO 8601 is handled.
+
+Currently the following formats are handled:
+
+* 2006-01-01T00:00:00Z
+* 2006-01-01T00:00:00[+-]00:00
+
+I'll add more as I encounter them in my day to day life. Patches with
+new formats and tests will be gratefully accepted of course :)
+
+References:
+
+* http://www.cl.cam.ac.uk/~mgk25/iso-time.html - simple overview
+
+* http://hydracen.com/dx/iso8601.htm - more detailed enumeration of
+ valid formats.
+
+See the LICENSE file for the license this package is released under.
diff --git a/lib/subunit/python/iso8601/README.subunit b/lib/subunit/python/iso8601/README.subunit
new file mode 100644
index 0000000000..d1ed8a11a6
--- /dev/null
+++ b/lib/subunit/python/iso8601/README.subunit
@@ -0,0 +1,5 @@
+This is a [slightly rearranged] import of http://pypi.python.org/pypi/iso8601/
+version 0.1.4. The OS X hidden files have been stripped, and the package
+turned into a single module, to simplify installation. The remainder of the
+source distribution is included in the subunit source tree at python/iso8601
+for reference.
diff --git a/lib/subunit/python/iso8601/setup.py b/lib/subunit/python/iso8601/setup.py
new file mode 100644
index 0000000000..cdb61ecf6a
--- /dev/null
+++ b/lib/subunit/python/iso8601/setup.py
@@ -0,0 +1,58 @@
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils import setup
+
+long_description="""Simple module to parse ISO 8601 dates
+
+This module parses the most common forms of ISO 8601 date strings (e.g.
+2007-01-14T20:34:22+00:00) into datetime objects.
+
+>>> import iso8601
+>>> iso8601.parse_date("2007-01-25T12:00:00Z")
+datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
+>>>
+
+Changes
+=======
+
+0.1.4
+-----
+
+* The default_timezone argument wasn't being passed through correctly,
+ UTC was being used in every case. Fixes issue 10.
+
+0.1.3
+-----
+
+* Fixed the microsecond handling, the generated microsecond values were
+ way too small. Fixes issue 9.
+
+0.1.2
+-----
+
+* Adding ParseError to __all__ in iso8601 module, allows people to import it.
+ Addresses issue 7.
+* Be a little more flexible when dealing with dates without leading zeroes.
+ This violates the spec a little, but handles more dates as seen in the
+ field. Addresses issue 6.
+* Allow date/time separators other than T.
+
+0.1.1
+-----
+
+* When parsing dates without a timezone the specified default is used. If no
+ default is specified then UTC is used. Addresses issue 4.
+"""
+
+setup(
+ name="iso8601",
+ version="0.1.4",
+ description=long_description.split("\n")[0],
+ long_description=long_description,
+ author="Michael Twomey",
+ author_email="micktwomey+iso8601@gmail.com",
+ url="http://code.google.com/p/pyiso8601/",
+ packages=["iso8601"],
+ license="MIT",
+)
diff --git a/lib/subunit/python/iso8601/test_iso8601.py b/lib/subunit/python/iso8601/test_iso8601.py
new file mode 100644
index 0000000000..ff9e2731cf
--- /dev/null
+++ b/lib/subunit/python/iso8601/test_iso8601.py
@@ -0,0 +1,111 @@
+import iso8601
+
+def test_iso8601_regex():
+ assert iso8601.ISO8601_REGEX.match("2006-10-11T00:14:33Z")
+
+def test_timezone_regex():
+ assert iso8601.TIMEZONE_REGEX.match("+01:00")
+ assert iso8601.TIMEZONE_REGEX.match("+00:00")
+ assert iso8601.TIMEZONE_REGEX.match("+01:20")
+ assert iso8601.TIMEZONE_REGEX.match("-01:00")
+
+def test_parse_date():
+ d = iso8601.parse_date("2006-10-20T15:34:56Z")
+ assert d.year == 2006
+ assert d.month == 10
+ assert d.day == 20
+ assert d.hour == 15
+ assert d.minute == 34
+ assert d.second == 56
+ assert d.tzinfo == iso8601.UTC
+
+def test_parse_date_fraction():
+ d = iso8601.parse_date("2006-10-20T15:34:56.123Z")
+ assert d.year == 2006
+ assert d.month == 10
+ assert d.day == 20
+ assert d.hour == 15
+ assert d.minute == 34
+ assert d.second == 56
+ assert d.microsecond == 123000
+ assert d.tzinfo == iso8601.UTC
+
+def test_parse_date_fraction_2():
+ """From bug 6
+
+ """
+ d = iso8601.parse_date("2007-5-7T11:43:55.328Z'")
+ assert d.year == 2007
+ assert d.month == 5
+ assert d.day == 7
+ assert d.hour == 11
+ assert d.minute == 43
+ assert d.second == 55
+ assert d.microsecond == 328000
+ assert d.tzinfo == iso8601.UTC
+
+def test_parse_date_tz():
+ d = iso8601.parse_date("2006-10-20T15:34:56.123+02:30")
+ assert d.year == 2006
+ assert d.month == 10
+ assert d.day == 20
+ assert d.hour == 15
+ assert d.minute == 34
+ assert d.second == 56
+ assert d.microsecond == 123000
+ assert d.tzinfo.tzname(None) == "+02:30"
+ offset = d.tzinfo.utcoffset(None)
+ assert offset.days == 0
+ assert offset.seconds == 60 * 60 * 2.5
+
+def test_parse_invalid_date():
+ try:
+ iso8601.parse_date(None)
+ except iso8601.ParseError:
+ pass
+ else:
+ assert 1 == 2
+
+def test_parse_invalid_date2():
+ try:
+ iso8601.parse_date("23")
+ except iso8601.ParseError:
+ pass
+ else:
+ assert 1 == 2
+
+def test_parse_no_timezone():
+ """issue 4 - Handle datetime string without timezone
+
+ This tests what happens when you parse a date with no timezone. While not
+ strictly correct this is quite common. I'll assume UTC for the time zone
+ in this case.
+ """
+ d = iso8601.parse_date("2007-01-01T08:00:00")
+ assert d.year == 2007
+ assert d.month == 1
+ assert d.day == 1
+ assert d.hour == 8
+ assert d.minute == 0
+ assert d.second == 0
+ assert d.microsecond == 0
+ assert d.tzinfo == iso8601.UTC
+
+def test_parse_no_timezone_different_default():
+ tz = iso8601.FixedOffset(2, 0, "test offset")
+ d = iso8601.parse_date("2007-01-01T08:00:00", default_timezone=tz)
+ assert d.tzinfo == tz
+
+def test_space_separator():
+ """Handle a separator other than T
+
+ """
+ d = iso8601.parse_date("2007-06-23 06:40:34.00Z")
+ assert d.year == 2007
+ assert d.month == 6
+ assert d.day == 23
+ assert d.hour == 6
+ assert d.minute == 40
+ assert d.second == 34
+ assert d.microsecond == 0
+ assert d.tzinfo == iso8601.UTC
diff --git a/lib/subunit/python/subunit/__init__.py b/lib/subunit/python/subunit/__init__.py
index 406cd8765b..b2c7a29237 100644
--- a/lib/subunit/python/subunit/__init__.py
+++ b/lib/subunit/python/subunit/__init__.py
@@ -1,28 +1,149 @@
#
-# subunit: extensions to python unittest to get test results from subprocesses.
+# subunit: extensions to Python unittest to get test results from subprocesses.
# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
-# Copyright (C) 2007 Jelmer Vernooij <jelmer@samba.org>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
#
+"""Subunit - a streaming test protocol
+
+Overview
+++++++++
+
+The ``subunit`` Python package provides a number of ``unittest`` extensions
+which can be used to cause tests to output Subunit, to parse Subunit streams
+into test activity, perform seamless test isolation within a regular test
+case and variously sort, filter and report on test runs.
+
+
+Key Classes
+-----------
+
+The ``subunit.TestProtocolClient`` class is a ``unittest.TestResult``
+extension which will translate a test run into a Subunit stream.
+
+The ``subunit.ProtocolTestCase`` class is an adapter between the Subunit wire
+protocol and the ``unittest.TestCase`` object protocol. It is used to translate
+a stream into a test run, which regular ``unittest.TestResult`` objects can
+process and report/inspect.
+
+Subunit has support for non-blocking usage too, for use with asyncore or
+Twisted. See the ``TestProtocolServer`` parser class for more details.
+
+Subunit includes extensions to the Python ``TestResult`` protocol. These are
+all done in a compatible manner: ``TestResult`` objects that do not implement
+the extension methods will not cause errors to be raised, instead the extension
+will either lose fidelity (for instance, folding expected failures to success
+in Python versions < 2.7 or 3.1), or discard the extended data (for extra
+details, tags, timestamping and progress markers).
+
+The test outcome methods ``addSuccess``, ``addError``, ``addExpectedFailure``,
+``addFailure``, ``addSkip`` take an optional keyword parameter ``details``
+which can be used instead of the usual python unittest parameter.
+When used the value of details should be a dict from ``string`` to
+``testtools.content.Content`` objects. This is a draft API being worked on with
+the Python Testing In Python mail list, with the goal of permitting a common
+way to provide additional data beyond a traceback, such as captured data from
+disk, logging messages etc. The reference for this API is in testtools (0.9.0
+and newer).
+
+The ``tags(new_tags, gone_tags)`` method is called (if present) to add or
+remove tags in the test run that is currently executing. If called when no
+test is in progress (that is, if called outside of the ``startTest``,
+``stopTest`` pair), the the tags apply to all sebsequent tests. If called
+when a test is in progress, then the tags only apply to that test.
+
+The ``time(a_datetime)`` method is called (if present) when a ``time:``
+directive is encountered in a Subunit stream. This is used to tell a TestResult
+about the time that events in the stream occured at, to allow reconstructing
+test timing from a stream.
+
+The ``progress(offset, whence)`` method controls progress data for a stream.
+The offset parameter is an int, and whence is one of subunit.PROGRESS_CUR,
+subunit.PROGRESS_SET, PROGRESS_PUSH, PROGRESS_POP. Push and pop operations
+ignore the offset parameter.
+
+
+Python test support
+-------------------
+
+``subunit.run`` is a convenience wrapper to run a Python test suite via
+the command line, reporting via Subunit::
+
+ $ python -m subunit.run mylib.tests.test_suite
+
+The ``IsolatedTestSuite`` class is a TestSuite that forks before running its
+tests, allowing isolation between the test runner and some tests.
+
+Similarly, ``IsolatedTestCase`` is a base class which can be subclassed to get
+tests that will fork() before that individual test is run.
+
+`ExecTestCase`` is a convenience wrapper for running an external
+program to get a Subunit stream and then report that back to an arbitrary
+result object::
+
+ class AggregateTests(subunit.ExecTestCase):
+
+ def test_script_one(self):
+ './bin/script_one'
+
+ def test_script_two(self):
+ './bin/script_two'
+
+ # Normally your normal test loading would take of this automatically,
+ # It is only spelt out in detail here for clarity.
+ suite = unittest.TestSuite([AggregateTests("test_script_one"),
+ AggregateTests("test_script_two")])
+ # Create any TestResult class you like.
+ result = unittest._TextTestResult(sys.stdout)
+ # And run your suite as normal, Subunit will exec each external script as
+ # needed and report to your result object.
+ suite.run(result)
+
+Utility modules
+---------------
+
+* subunit.chunked contains HTTP chunked encoding/decoding logic.
+* subunit.test_results contains TestResult helper classes.
+"""
+
+import datetime
import os
+import re
from StringIO import StringIO
+import subprocess
import sys
import unittest
+import iso8601
+from testtools import content, content_type, ExtendedToOriginalDecorator
+try:
+ from testtools.testresult.real import _StringException
+ RemoteException = _StringException
+ _remote_exception_str = '_StringException' # For testing.
+except ImportError:
+ raise ImportError ("testtools.testresult.real does not contain "
+ "_StringException, check your version.")
+from testtools import testresult
+
+import chunked, details, test_results
+
+
+PROGRESS_SET = 0
+PROGRESS_CUR = 1
+PROGRESS_PUSH = 2
+PROGRESS_POP = 3
+
+
def test_suite():
import subunit.tests
return subunit.tests.test_suite()
@@ -42,211 +163,567 @@ def join_dir(base_path, path):
return os.path.join(os.path.dirname(os.path.abspath(base_path)), path)
-class TestProtocolServer(object):
- """A class for receiving results from a TestProtocol client."""
-
- OUTSIDE_TEST = 0
- TEST_STARTED = 1
- READING_FAILURE = 2
- READING_ERROR = 3
-
- def __init__(self, client, stream=sys.stdout):
- """Create a TestProtocol server instance.
-
- client should be an object that provides
- - startTest
- - addSuccess
- - addFailure
- - addError
- - stopTest
- methods, i.e. a TestResult.
- """
- self.state = TestProtocolServer.OUTSIDE_TEST
- self.client = client
- self._stream = stream
-
- def _addError(self, offset, line):
- if (self.state == TestProtocolServer.TEST_STARTED and
- self.current_test_description == line[offset:-1]):
- self.state = TestProtocolServer.OUTSIDE_TEST
- self.current_test_description = None
- self.client.addError(self._current_test, RemoteError(""))
- self.client.stopTest(self._current_test)
- self._current_test = None
- elif (self.state == TestProtocolServer.TEST_STARTED and
- self.current_test_description + " [" == line[offset:-1]):
- self.state = TestProtocolServer.READING_ERROR
- self._message = ""
+def tags_to_new_gone(tags):
+ """Split a list of tags into a new_set and a gone_set."""
+ new_tags = set()
+ gone_tags = set()
+ for tag in tags:
+ if tag[0] == '-':
+ gone_tags.add(tag[1:])
else:
- self.stdOutLineReceived(line)
-
- def _addFailure(self, offset, line):
- if (self.state == TestProtocolServer.TEST_STARTED and
- self.current_test_description == line[offset:-1]):
- self.state = TestProtocolServer.OUTSIDE_TEST
- self.current_test_description = None
- self.client.addFailure(self._current_test, RemoteError())
- self.client.stopTest(self._current_test)
- elif (self.state == TestProtocolServer.TEST_STARTED and
- self.current_test_description + " [" == line[offset:-1]):
- self.state = TestProtocolServer.READING_FAILURE
- self._message = ""
- else:
- self.stdOutLineReceived(line)
-
- def _addSuccess(self, offset, line):
- if (self.state == TestProtocolServer.TEST_STARTED and
- self.current_test_description == line[offset:-1]):
- self.client.addSuccess(self._current_test)
- self.client.stopTest(self._current_test)
- self.current_test_description = None
- self._current_test = None
- self.state = TestProtocolServer.OUTSIDE_TEST
+ new_tags.add(tag)
+ return new_tags, gone_tags
+
+
+class DiscardStream(object):
+ """A filelike object which discards what is written to it."""
+
+ def write(self, bytes):
+ pass
+
+
+class _ParserState(object):
+ """State for the subunit parser."""
+
+ def __init__(self, parser):
+ self.parser = parser
+
+ def addError(self, offset, line):
+ """An 'error:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def addExpectedFail(self, offset, line):
+ """An 'xfail:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def addFailure(self, offset, line):
+ """A 'failure:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def addSkip(self, offset, line):
+ """A 'skip:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def addSuccess(self, offset, line):
+ """A 'success:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def lineReceived(self, line):
+ """a line has been received."""
+ parts = line.split(None, 1)
+ if len(parts) == 2 and line.startswith(parts[0]):
+ cmd, rest = parts
+ offset = len(cmd) + 1
+ cmd = cmd.rstrip(':')
+ if cmd in ('test', 'testing'):
+ self.startTest(offset, line)
+ elif cmd == 'error':
+ self.addError(offset, line)
+ elif cmd == 'failure':
+ self.addFailure(offset, line)
+ elif cmd == 'progress':
+ self.parser._handleProgress(offset, line)
+ elif cmd == 'skip':
+ self.addSkip(offset, line)
+ elif cmd in ('success', 'successful'):
+ self.addSuccess(offset, line)
+ elif cmd in ('tags',):
+ self.parser._handleTags(offset, line)
+ self.parser.subunitLineReceived(line)
+ elif cmd in ('time',):
+ self.parser._handleTime(offset, line)
+ self.parser.subunitLineReceived(line)
+ elif cmd == 'xfail':
+ self.addExpectedFail(offset, line)
+ else:
+ self.parser.stdOutLineReceived(line)
else:
- self.stdOutLineReceived(line)
+ self.parser.stdOutLineReceived(line)
+
+ def lostConnection(self):
+ """Connection lost."""
+ self.parser._lostConnectionInTest(u'unknown state of ')
+
+ def startTest(self, offset, line):
+ """A test start command received."""
+ self.parser.stdOutLineReceived(line)
+
- def _appendMessage(self, line):
- if line[0:2] == " ]":
- # quoted ] start
- self._message += line[1:]
+class _InTest(_ParserState):
+ """State for the subunit parser after reading a test: directive."""
+
+ def _outcome(self, offset, line, no_details, details_state):
+ """An outcome directive has been read.
+
+ :param no_details: Callable to call when no details are presented.
+ :param details_state: The state to switch to for details
+ processing of this outcome.
+ """
+ if self.parser.current_test_description == line[offset:-1]:
+ self.parser._state = self.parser._outside_test
+ self.parser.current_test_description = None
+ no_details()
+ self.parser.client.stopTest(self.parser._current_test)
+ self.parser._current_test = None
+ self.parser.subunitLineReceived(line)
+ elif self.parser.current_test_description + " [" == line[offset:-1]:
+ self.parser._state = details_state
+ details_state.set_simple()
+ self.parser.subunitLineReceived(line)
+ elif self.parser.current_test_description + " [ multipart" == \
+ line[offset:-1]:
+ self.parser._state = details_state
+ details_state.set_multipart()
+ self.parser.subunitLineReceived(line)
else:
- self._message += line
-
- def endQuote(self, line):
- if self.state == TestProtocolServer.READING_FAILURE:
- self.state = TestProtocolServer.OUTSIDE_TEST
- self.current_test_description = None
- self.client.addFailure(self._current_test,
- RemoteError(self._message))
- self.client.stopTest(self._current_test)
- elif self.state == TestProtocolServer.READING_ERROR:
- self.state = TestProtocolServer.OUTSIDE_TEST
- self.current_test_description = None
- self.client.addError(self._current_test,
- RemoteError(self._message))
- self.client.stopTest(self._current_test)
+ self.parser.stdOutLineReceived(line)
+
+ def _error(self):
+ self.parser.client.addError(self.parser._current_test,
+ details={})
+
+ def addError(self, offset, line):
+ """An 'error:' directive has been read."""
+ self._outcome(offset, line, self._error,
+ self.parser._reading_error_details)
+
+ def _xfail(self):
+ self.parser.client.addExpectedFailure(self.parser._current_test,
+ details={})
+
+ def addExpectedFail(self, offset, line):
+ """An 'xfail:' directive has been read."""
+ self._outcome(offset, line, self._xfail,
+ self.parser._reading_xfail_details)
+
+ def _failure(self):
+ self.parser.client.addFailure(self.parser._current_test, details={})
+
+ def addFailure(self, offset, line):
+ """A 'failure:' directive has been read."""
+ self._outcome(offset, line, self._failure,
+ self.parser._reading_failure_details)
+
+ def _skip(self):
+ self.parser.client.addSkip(self.parser._current_test, details={})
+
+ def addSkip(self, offset, line):
+ """A 'skip:' directive has been read."""
+ self._outcome(offset, line, self._skip,
+ self.parser._reading_skip_details)
+
+ def _succeed(self):
+ self.parser.client.addSuccess(self.parser._current_test, details={})
+
+ def addSuccess(self, offset, line):
+ """A 'success:' directive has been read."""
+ self._outcome(offset, line, self._succeed,
+ self.parser._reading_success_details)
+
+ def lostConnection(self):
+ """Connection lost."""
+ self.parser._lostConnectionInTest(u'')
+
+
+class _OutSideTest(_ParserState):
+ """State for the subunit parser outside of a test context."""
+
+ def lostConnection(self):
+ """Connection lost."""
+
+ def startTest(self, offset, line):
+ """A test start command received."""
+ self.parser._state = self.parser._in_test
+ self.parser._current_test = RemotedTestCase(line[offset:-1])
+ self.parser.current_test_description = line[offset:-1]
+ self.parser.client.startTest(self.parser._current_test)
+ self.parser.subunitLineReceived(line)
+
+
+class _ReadingDetails(_ParserState):
+ """Common logic for readin state details."""
+
+ def endDetails(self):
+ """The end of a details section has been reached."""
+ self.parser._state = self.parser._outside_test
+ self.parser.current_test_description = None
+ self._report_outcome()
+ self.parser.client.stopTest(self.parser._current_test)
+
+ def lineReceived(self, line):
+ """a line has been received."""
+ self.details_parser.lineReceived(line)
+ self.parser.subunitLineReceived(line)
+
+ def lostConnection(self):
+ """Connection lost."""
+ self.parser._lostConnectionInTest(u'%s report of ' %
+ self._outcome_label())
+
+ def _outcome_label(self):
+ """The label to describe this outcome."""
+ raise NotImplementedError(self._outcome_label)
+
+ def set_simple(self):
+ """Start a simple details parser."""
+ self.details_parser = details.SimpleDetailsParser(self)
+
+ def set_multipart(self):
+ """Start a multipart details parser."""
+ self.details_parser = details.MultipartDetailsParser(self)
+
+
+class _ReadingFailureDetails(_ReadingDetails):
+ """State for the subunit parser when reading failure details."""
+
+ def _report_outcome(self):
+ self.parser.client.addFailure(self.parser._current_test,
+ details=self.details_parser.get_details())
+
+ def _outcome_label(self):
+ return "failure"
+
+
+class _ReadingErrorDetails(_ReadingDetails):
+ """State for the subunit parser when reading error details."""
+
+ def _report_outcome(self):
+ self.parser.client.addError(self.parser._current_test,
+ details=self.details_parser.get_details())
+
+ def _outcome_label(self):
+ return "error"
+
+
+class _ReadingExpectedFailureDetails(_ReadingDetails):
+ """State for the subunit parser when reading xfail details."""
+
+ def _report_outcome(self):
+ self.parser.client.addExpectedFailure(self.parser._current_test,
+ details=self.details_parser.get_details())
+
+ def _outcome_label(self):
+ return "xfail"
+
+
+class _ReadingSkipDetails(_ReadingDetails):
+ """State for the subunit parser when reading skip details."""
+
+ def _report_outcome(self):
+ self.parser.client.addSkip(self.parser._current_test,
+ details=self.details_parser.get_details("skip"))
+
+ def _outcome_label(self):
+ return "skip"
+
+
+class _ReadingSuccessDetails(_ReadingDetails):
+ """State for the subunit parser when reading success details."""
+
+ def _report_outcome(self):
+ self.parser.client.addSuccess(self.parser._current_test,
+ details=self.details_parser.get_details("success"))
+
+ def _outcome_label(self):
+ return "success"
+
+
+class TestProtocolServer(object):
+ """A parser for subunit.
+
+ :ivar tags: The current tags associated with the protocol stream.
+ """
+
+ def __init__(self, client, stream=None, forward_stream=None):
+ """Create a TestProtocolServer instance.
+
+ :param client: An object meeting the unittest.TestResult protocol.
+ :param stream: The stream that lines received which are not part of the
+ subunit protocol should be written to. This allows custom handling
+ of mixed protocols. By default, sys.stdout will be used for
+ convenience.
+ :param forward_stream: A stream to forward subunit lines to. This
+ allows a filter to forward the entire stream while still parsing
+ and acting on it. By default forward_stream is set to
+ DiscardStream() and no forwarding happens.
+ """
+ self.client = ExtendedToOriginalDecorator(client)
+ if stream is None:
+ stream = sys.stdout
+ self._stream = stream
+ self._forward_stream = forward_stream or DiscardStream()
+ # state objects we can switch too
+ self._in_test = _InTest(self)
+ self._outside_test = _OutSideTest(self)
+ self._reading_error_details = _ReadingErrorDetails(self)
+ self._reading_failure_details = _ReadingFailureDetails(self)
+ self._reading_skip_details = _ReadingSkipDetails(self)
+ self._reading_success_details = _ReadingSuccessDetails(self)
+ self._reading_xfail_details = _ReadingExpectedFailureDetails(self)
+ # start with outside test.
+ self._state = self._outside_test
+
+ def _handleProgress(self, offset, line):
+ """Process a progress directive."""
+ line = line[offset:].strip()
+ if line[0] in '+-':
+ whence = PROGRESS_CUR
+ delta = int(line)
+ elif line == "push":
+ whence = PROGRESS_PUSH
+ delta = None
+ elif line == "pop":
+ whence = PROGRESS_POP
+ delta = None
else:
- self.stdOutLineReceived(line)
+ whence = PROGRESS_SET
+ delta = int(line)
+ self.client.progress(delta, whence)
+
+ def _handleTags(self, offset, line):
+ """Process a tags command."""
+ tags = line[offset:].split()
+ new_tags, gone_tags = tags_to_new_gone(tags)
+ self.client.tags(new_tags, gone_tags)
+
+ def _handleTime(self, offset, line):
+ # Accept it, but do not do anything with it yet.
+ try:
+ event_time = iso8601.parse_date(line[offset:-1])
+ except TypeError, e:
+ raise TypeError("Failed to parse %r, got %r" % (line, e))
+ self.client.time(event_time)
def lineReceived(self, line):
"""Call the appropriate local method for the received line."""
- if line == "]\n":
- self.endQuote(line)
- elif (self.state == TestProtocolServer.READING_FAILURE or
- self.state == TestProtocolServer.READING_ERROR):
- self._appendMessage(line)
- else:
- parts = line.split(None, 1)
- if len(parts) == 2:
- cmd, rest = parts
- offset = len(cmd) + 1
- cmd = cmd.strip(':')
- if cmd in ('test', 'testing'):
- self._startTest(offset, line)
- elif cmd == 'error':
- self._addError(offset, line)
- elif cmd == 'failure':
- self._addFailure(offset, line)
- elif cmd in ('success', 'successful'):
- self._addSuccess(offset, line)
- else:
- self.stdOutLineReceived(line)
- else:
- self.stdOutLineReceived(line)
+ self._state.lineReceived(line)
+
+ def _lostConnectionInTest(self, state_string):
+ error_string = u"lost connection during %stest '%s'" % (
+ state_string, self.current_test_description)
+ self.client.addError(self._current_test, RemoteError(error_string))
+ self.client.stopTest(self._current_test)
def lostConnection(self):
"""The input connection has finished."""
- if self.state == TestProtocolServer.TEST_STARTED:
- self.client.addError(self._current_test,
- RemoteError("lost connection during test '%s'"
- % self.current_test_description))
- self.client.stopTest(self._current_test)
- elif self.state == TestProtocolServer.READING_ERROR:
- self.client.addError(self._current_test,
- RemoteError("lost connection during "
- "error report of test "
- "'%s'" %
- self.current_test_description))
- self.client.stopTest(self._current_test)
- elif self.state == TestProtocolServer.READING_FAILURE:
- self.client.addError(self._current_test,
- RemoteError("lost connection during "
- "failure report of test "
- "'%s'" %
- self.current_test_description))
- self.client.stopTest(self._current_test)
+ self._state.lostConnection()
def readFrom(self, pipe):
+ """Blocking convenience API to parse an entire stream.
+
+ :param pipe: A file-like object supporting readlines().
+ :return: None.
+ """
for line in pipe.readlines():
self.lineReceived(line)
self.lostConnection()
def _startTest(self, offset, line):
"""Internal call to change state machine. Override startTest()."""
- if self.state == TestProtocolServer.OUTSIDE_TEST:
- self.state = TestProtocolServer.TEST_STARTED
- self._current_test = RemotedTestCase(line[offset:-1])
- self.current_test_description = line[offset:-1]
- self.client.startTest(self._current_test)
- else:
- self.stdOutLineReceived(line)
+ self._state.startTest(offset, line)
+
+ def subunitLineReceived(self, line):
+ self._forward_stream.write(line)
def stdOutLineReceived(self, line):
self._stream.write(line)
-class RemoteException(Exception):
- """An exception that occured remotely to python."""
-
- def __eq__(self, other):
- try:
- return self.args == other.args
- except AttributeError:
- return False
-
-
-class TestProtocolClient(unittest.TestResult):
- """A class that looks like a TestResult and informs a TestProtocolServer."""
+class TestProtocolClient(testresult.TestResult):
+ """A TestResult which generates a subunit stream for a test run.
+
+ # Get a TestSuite or TestCase to run
+ suite = make_suite()
+ # Create a stream (any object with a 'write' method)
+ stream = file('tests.log', 'wb')
+ # Create a subunit result object which will output to the stream
+ result = subunit.TestProtocolClient(stream)
+ # Optionally, to get timing data for performance analysis, wrap the
+ # serialiser with a timing decorator
+ result = subunit.test_results.AutoTimingTestResultDecorator(result)
+ # Run the test suite reporting to the subunit result object
+ suite.run(result)
+ # Close the stream.
+ stream.close()
+ """
def __init__(self, stream):
- super(TestProtocolClient, self).__init__()
+ testresult.TestResult.__init__(self)
self._stream = stream
-
- def addError(self, test, error):
- """Report an error in test test."""
- self._stream.write("error: %s [\n" % (test.shortDescription() or str(test)))
- for line in self._exc_info_to_string(error, test).splitlines():
- self._stream.write("%s\n" % line)
+ _make_stream_binary(stream)
+
+ def addError(self, test, error=None, details=None):
+ """Report an error in test test.
+
+ Only one of error and details should be provided: conceptually there
+ are two separate methods:
+ addError(self, test, error)
+ addError(self, test, details)
+
+ :param error: Standard unittest positional argument form - an
+ exc_info tuple.
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ """
+ self._addOutcome("error", test, error=error, details=details)
+
+ def addExpectedFailure(self, test, error=None, details=None):
+ """Report an expected failure in test test.
+
+ Only one of error and details should be provided: conceptually there
+ are two separate methods:
+ addError(self, test, error)
+ addError(self, test, details)
+
+ :param error: Standard unittest positional argument form - an
+ exc_info tuple.
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ """
+ self._addOutcome("xfail", test, error=error, details=details)
+
+ def addFailure(self, test, error=None, details=None):
+ """Report a failure in test test.
+
+ Only one of error and details should be provided: conceptually there
+ are two separate methods:
+ addFailure(self, test, error)
+ addFailure(self, test, details)
+
+ :param error: Standard unittest positional argument form - an
+ exc_info tuple.
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ """
+ self._addOutcome("failure", test, error=error, details=details)
+
+ def _addOutcome(self, outcome, test, error=None, details=None):
+ """Report a failure in test test.
+
+ Only one of error and details should be provided: conceptually there
+ are two separate methods:
+ addOutcome(self, test, error)
+ addOutcome(self, test, details)
+
+ :param outcome: A string describing the outcome - used as the
+ event name in the subunit stream.
+ :param error: Standard unittest positional argument form - an
+ exc_info tuple.
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ """
+ self._stream.write("%s: %s" % (outcome, test.id()))
+ if error is None and details is None:
+ raise ValueError
+ if error is not None:
+ self._stream.write(" [\n")
+ # XXX: this needs to be made much stricter, along the lines of
+ # Martin[gz]'s work in testtools. Perhaps subunit can use that?
+ for line in self._exc_info_to_unicode(error, test).splitlines():
+ self._stream.write(("%s\n" % line).encode('utf8'))
+ else:
+ self._write_details(details)
self._stream.write("]\n")
- super(TestProtocolClient, self).addError(test, error)
- def addFailure(self, test, error):
- """Report a failure in test test."""
- self._stream.write("failure: %s [\n" % (test.shortDescription() or str(test)))
- for line in self._exc_info_to_string(error, test).splitlines():
- self._stream.write("%s\n" % line)
- self._stream.write("]\n")
- super(TestProtocolClient, self).addFailure(test, error)
+ def addSkip(self, test, reason=None, details=None):
+ """Report a skipped test."""
+ if reason is None:
+ self._addOutcome("skip", test, error=None, details=details)
+ else:
+ self._stream.write("skip: %s [\n" % test.id())
+ self._stream.write("%s\n" % reason)
+ self._stream.write("]\n")
- def addSuccess(self, test):
+ def addSuccess(self, test, details=None):
"""Report a success in a test."""
- self._stream.write("successful: %s\n" % (test.shortDescription() or str(test)))
- super(TestProtocolClient, self).addSuccess(test)
+ self._stream.write("successful: %s" % test.id())
+ if not details:
+ self._stream.write("\n")
+ else:
+ self._write_details(details)
+ self._stream.write("]\n")
+ addUnexpectedSuccess = addSuccess
def startTest(self, test):
"""Mark a test as starting its test run."""
- self._stream.write("test: %s\n" % (test.shortDescription() or str(test)))
super(TestProtocolClient, self).startTest(test)
+ self._stream.write("test: %s\n" % test.id())
+ self._stream.flush()
+
+ def stopTest(self, test):
+ super(TestProtocolClient, self).stopTest(test)
+ self._stream.flush()
+
+ def progress(self, offset, whence):
+ """Provide indication about the progress/length of the test run.
+
+ :param offset: Information about the number of tests remaining. If
+ whence is PROGRESS_CUR, then offset increases/decreases the
+ remaining test count. If whence is PROGRESS_SET, then offset
+ specifies exactly the remaining test count.
+ :param whence: One of PROGRESS_CUR, PROGRESS_SET, PROGRESS_PUSH,
+ PROGRESS_POP.
+ """
+ if whence == PROGRESS_CUR and offset > -1:
+ prefix = "+"
+ elif whence == PROGRESS_PUSH:
+ prefix = ""
+ offset = "push"
+ elif whence == PROGRESS_POP:
+ prefix = ""
+ offset = "pop"
+ else:
+ prefix = ""
+ self._stream.write("progress: %s%s\n" % (prefix, offset))
+ def time(self, a_datetime):
+ """Inform the client of the time.
-def RemoteError(description=""):
- if description == "":
- description = "\n"
- return (RemoteException, RemoteException(description), None)
+ ":param datetime: A datetime.datetime object.
+ """
+ time = a_datetime.astimezone(iso8601.Utc())
+ self._stream.write("time: %04d-%02d-%02d %02d:%02d:%02d.%06dZ\n" % (
+ time.year, time.month, time.day, time.hour, time.minute,
+ time.second, time.microsecond))
+
+ def _write_details(self, details):
+ """Output details to the stream.
+
+ :param details: An extended details dict for a test outcome.
+ """
+ self._stream.write(" [ multipart\n")
+ for name, content in sorted(details.iteritems()):
+ self._stream.write("Content-Type: %s/%s" %
+ (content.content_type.type, content.content_type.subtype))
+ parameters = content.content_type.parameters
+ if parameters:
+ self._stream.write(";")
+ param_strs = []
+ for param, value in parameters.iteritems():
+ param_strs.append("%s=%s" % (param, value))
+ self._stream.write(",".join(param_strs))
+ self._stream.write("\n%s\n" % name)
+ encoder = chunked.Encoder(self._stream)
+ map(encoder.write, content.iter_bytes())
+ encoder.close()
+
+ def done(self):
+ """Obey the testtools result.done() interface."""
+
+
+def RemoteError(description=u""):
+ return (_StringException, _StringException(description), None)
class RemotedTestCase(unittest.TestCase):
- """A class to represent test cases run in child processes."""
+ """A class to represent test cases run in child processes.
+
+ Instances of this class are used to provide the Python test API a TestCase
+ that can be printed to the screen, introspected for metadata and so on.
+ However, as they are a simply a memoisation of a test that was actually
+ run in the past by a separate process, they cannot perform any interactive
+ actions.
+ """
def __eq__ (self, other):
try:
@@ -272,7 +749,7 @@ class RemotedTestCase(unittest.TestCase):
return self.__description
def id(self):
- return "%s.%s" % (self._strclass(), self.__description)
+ return "%s" % (self.__description,)
def __str__(self):
return "%s (%s)" % (self.__description, self._strclass())
@@ -284,7 +761,7 @@ class RemotedTestCase(unittest.TestCase):
def run(self, result=None):
if result is None: result = self.defaultTestResult()
result.startTest(self)
- result.addError(self, RemoteError("Cannot run RemotedTestCases.\n"))
+ result.addError(self, RemoteError(u"Cannot run RemotedTestCases.\n"))
result.stopTest(self)
def _strclass(self):
@@ -314,16 +791,22 @@ class ExecTestCase(unittest.TestCase):
def debug(self):
"""Run the test without collecting errors in a TestResult"""
- self._run(unittest.TestResult())
+ self._run(testresult.TestResult())
def _run(self, result):
protocol = TestProtocolServer(result)
- output = os.popen(self.script, mode='r')
- protocol.readFrom(output)
+ output = subprocess.Popen(self.script, shell=True,
+ stdout=subprocess.PIPE).communicate()[0]
+ protocol.readFrom(StringIO(output))
class IsolatedTestCase(unittest.TestCase):
- """A TestCase which runs its tests in a forked process."""
+ """A TestCase which executes in a forked process.
+
+ Each test gets its own process, which has a performance overhead but will
+ provide excellent isolation from global state (such as django configs,
+ zope utilities and so on).
+ """
def run(self, result=None):
if result is None: result = self.defaultTestResult()
@@ -331,10 +814,16 @@ class IsolatedTestCase(unittest.TestCase):
class IsolatedTestSuite(unittest.TestSuite):
- """A TestCase which runs its tests in a forked process."""
+ """A TestSuite which runs its tests in a forked process.
+
+ This decorator that will fork() before running the tests and report the
+ results from the child process using a Subunit stream. This is useful for
+ handling tests that mutate global state, or are testing C extensions that
+ could crash the VM.
+ """
def run(self, result=None):
- if result is None: result = unittest.TestResult()
+ if result is None: result = testresult.TestResult()
run_isolated(unittest.TestSuite, self, result)
@@ -376,13 +865,282 @@ def run_isolated(klass, self, result):
return result
-class SubunitTestRunner(object):
- def __init__(self, stream=sys.stdout):
- self.stream = stream
+def TAP2SubUnit(tap, subunit):
+ """Filter a TAP pipe into a subunit pipe.
+
+ :param tap: A tap pipe/stream/file object.
+ :param subunit: A pipe/stream/file object to write subunit results to.
+ :return: The exit code to exit with.
+ """
+ BEFORE_PLAN = 0
+ AFTER_PLAN = 1
+ SKIP_STREAM = 2
+ client = TestProtocolClient(subunit)
+ state = BEFORE_PLAN
+ plan_start = 1
+ plan_stop = 0
+ def _skipped_test(subunit, plan_start):
+ # Some tests were skipped.
+ subunit.write('test test %d\n' % plan_start)
+ subunit.write('error test %d [\n' % plan_start)
+ subunit.write('test missing from TAP output\n')
+ subunit.write(']\n')
+ return plan_start + 1
+ # Test data for the next test to emit
+ test_name = None
+ log = []
+ result = None
+ def _emit_test():
+ "write out a test"
+ if test_name is None:
+ return
+ subunit.write("test %s\n" % test_name)
+ if not log:
+ subunit.write("%s %s\n" % (result, test_name))
+ else:
+ subunit.write("%s %s [\n" % (result, test_name))
+ if log:
+ for line in log:
+ subunit.write("%s\n" % line)
+ subunit.write("]\n")
+ del log[:]
+ for line in tap:
+ if state == BEFORE_PLAN:
+ match = re.match("(\d+)\.\.(\d+)\s*(?:\#\s+(.*))?\n", line)
+ if match:
+ state = AFTER_PLAN
+ _, plan_stop, comment = match.groups()
+ plan_stop = int(plan_stop)
+ if plan_start > plan_stop and plan_stop == 0:
+ # skipped file
+ state = SKIP_STREAM
+ subunit.write("test file skip\n")
+ subunit.write("skip file skip [\n")
+ subunit.write("%s\n" % comment)
+ subunit.write("]\n")
+ continue
+ # not a plan line, or have seen one before
+ match = re.match("(ok|not ok)(?:\s+(\d+)?)?(?:\s+([^#]*[^#\s]+)\s*)?(?:\s+#\s+(TODO|SKIP|skip|todo)(?:\s+(.*))?)?\n", line)
+ if match:
+ # new test, emit current one.
+ _emit_test()
+ status, number, description, directive, directive_comment = match.groups()
+ if status == 'ok':
+ result = 'success'
+ else:
+ result = "failure"
+ if description is None:
+ description = ''
+ else:
+ description = ' ' + description
+ if directive is not None:
+ if directive.upper() == 'TODO':
+ result = 'xfail'
+ elif directive.upper() == 'SKIP':
+ result = 'skip'
+ if directive_comment is not None:
+ log.append(directive_comment)
+ if number is not None:
+ number = int(number)
+ while plan_start < number:
+ plan_start = _skipped_test(subunit, plan_start)
+ test_name = "test %d%s" % (plan_start, description)
+ plan_start += 1
+ continue
+ match = re.match("Bail out\!(?:\s*(.*))?\n", line)
+ if match:
+ reason, = match.groups()
+ if reason is None:
+ extra = ''
+ else:
+ extra = ' %s' % reason
+ _emit_test()
+ test_name = "Bail out!%s" % extra
+ result = "error"
+ state = SKIP_STREAM
+ continue
+ match = re.match("\#.*\n", line)
+ if match:
+ log.append(line[:-1])
+ continue
+ subunit.write(line)
+ _emit_test()
+ while plan_start <= plan_stop:
+ # record missed tests
+ plan_start = _skipped_test(subunit, plan_start)
+ return 0
+
+
+def tag_stream(original, filtered, tags):
+ """Alter tags on a stream.
+
+ :param original: The input stream.
+ :param filtered: The output stream.
+ :param tags: The tags to apply. As in a normal stream - a list of 'TAG' or
+ '-TAG' commands.
+
+ A 'TAG' command will add the tag to the output stream,
+ and override any existing '-TAG' command in that stream.
+ Specifically:
+ * A global 'tags: TAG' will be added to the start of the stream.
+ * Any tags commands with -TAG will have the -TAG removed.
+
+ A '-TAG' command will remove the TAG command from the stream.
+ Specifically:
+ * A 'tags: -TAG' command will be added to the start of the stream.
+ * Any 'tags: TAG' command will have 'TAG' removed from it.
+ Additionally, any redundant tagging commands (adding a tag globally
+ present, or removing a tag globally removed) are stripped as a
+ by-product of the filtering.
+ :return: 0
+ """
+ new_tags, gone_tags = tags_to_new_gone(tags)
+ def write_tags(new_tags, gone_tags):
+ if new_tags or gone_tags:
+ filtered.write("tags: " + ' '.join(new_tags))
+ if gone_tags:
+ for tag in gone_tags:
+ filtered.write("-" + tag)
+ filtered.write("\n")
+ write_tags(new_tags, gone_tags)
+ # TODO: use the protocol parser and thus don't mangle test comments.
+ for line in original:
+ if line.startswith("tags:"):
+ line_tags = line[5:].split()
+ line_new, line_gone = tags_to_new_gone(line_tags)
+ line_new = line_new - gone_tags
+ line_gone = line_gone - new_tags
+ write_tags(line_new, line_gone)
+ else:
+ filtered.write(line)
+ return 0
+
+
+class ProtocolTestCase(object):
+ """Subunit wire protocol to unittest.TestCase adapter.
+
+ ProtocolTestCase honours the core of ``unittest.TestCase`` protocol -
+ calling a ProtocolTestCase or invoking the run() method will make a 'test
+ run' happen. The 'test run' will simply be a replay of the test activity
+ that has been encoded into the stream. The ``unittest.TestCase`` ``debug``
+ and ``countTestCases`` methods are not supported because there isn't a
+ sensible mapping for those methods.
+
+ # Get a stream (any object with a readline() method), in this case the
+ # stream output by the example from ``subunit.TestProtocolClient``.
+ stream = file('tests.log', 'rb')
+ # Create a parser which will read from the stream and emit
+ # activity to a unittest.TestResult when run() is called.
+ suite = subunit.ProtocolTestCase(stream)
+ # Create a result object to accept the contents of that stream.
+ result = unittest._TextTestResult(sys.stdout)
+ # 'run' the tests - process the stream and feed its contents to result.
+ suite.run(result)
+ stream.close()
+
+ :seealso: TestProtocolServer (the subunit wire protocol parser).
+ """
+
+ def __init__(self, stream, passthrough=None, forward=False):
+ """Create a ProtocolTestCase reading from stream.
+
+ :param stream: A filelike object which a subunit stream can be read
+ from.
+ :param passthrough: A stream pass non subunit input on to. If not
+ supplied, the TestProtocolServer default is used.
+ :param forward: A stream to pass subunit input on to. If not supplied
+ subunit input is not forwarded.
+ """
+ self._stream = stream
+ _make_stream_binary(stream)
+ self._passthrough = passthrough
+ self._forward = forward
+ _make_stream_binary(forward)
+
+ def __call__(self, result=None):
+ return self.run(result)
+
+ def run(self, result=None):
+ if result is None:
+ result = self.defaultTestResult()
+ protocol = TestProtocolServer(result, self._passthrough, self._forward)
+ line = self._stream.readline()
+ while line:
+ protocol.lineReceived(line)
+ line = self._stream.readline()
+ protocol.lostConnection()
+
+
+class TestResultStats(testresult.TestResult):
+ """A pyunit TestResult interface implementation for making statistics.
+
+ :ivar total_tests: The total tests seen.
+ :ivar passed_tests: The tests that passed.
+ :ivar failed_tests: The tests that failed.
+ :ivar seen_tags: The tags seen across all tests.
+ """
+
+ def __init__(self, stream):
+ """Create a TestResultStats which outputs to stream."""
+ testresult.TestResult.__init__(self)
+ self._stream = stream
+ self.failed_tests = 0
+ self.skipped_tests = 0
+ self.seen_tags = set()
+
+ @property
+ def total_tests(self):
+ return self.testsRun
+
+ def addError(self, test, err, details=None):
+ self.failed_tests += 1
+
+ def addFailure(self, test, err, details=None):
+ self.failed_tests += 1
+
+ def addSkip(self, test, reason, details=None):
+ self.skipped_tests += 1
+
+ def formatStats(self):
+ self._stream.write("Total tests: %5d\n" % self.total_tests)
+ self._stream.write("Passed tests: %5d\n" % self.passed_tests)
+ self._stream.write("Failed tests: %5d\n" % self.failed_tests)
+ self._stream.write("Skipped tests: %5d\n" % self.skipped_tests)
+ tags = sorted(self.seen_tags)
+ self._stream.write("Seen tags: %s\n" % (", ".join(tags)))
+
+ @property
+ def passed_tests(self):
+ return self.total_tests - self.failed_tests - self.skipped_tests
+
+ def tags(self, new_tags, gone_tags):
+ """Accumulate the seen tags."""
+ self.seen_tags.update(new_tags)
+
+ def wasSuccessful(self):
+ """Tells whether or not this result was a success"""
+ return self.failed_tests == 0
+
+
+def get_default_formatter():
+ """Obtain the default formatter to write to.
+
+ :return: A file-like object.
+ """
+ formatter = os.getenv("SUBUNIT_FORMATTER")
+ if formatter:
+ return os.popen(formatter, "w")
+ else:
+ return sys.stdout
+
- def run(self, test):
- "Run the given test case or test suite."
- result = TestProtocolClient(self.stream)
- test(result)
- return result
+def _make_stream_binary(stream):
+ """Ensure that a stream will be binary safe. See _make_binary_on_windows."""
+ if getattr(stream, 'fileno', None) is not None:
+ _make_binary_on_windows(stream.fileno())
+def _make_binary_on_windows(fileno):
+ """Win32 mangles \r\n to \n and that breaks streams. See bug lp:505078."""
+ if sys.platform == "win32":
+ import msvcrt
+ msvcrt.setmode(fileno, os.O_BINARY)
diff --git a/lib/subunit/python/subunit/chunked.py b/lib/subunit/python/subunit/chunked.py
new file mode 100644
index 0000000000..82e4b0ddfc
--- /dev/null
+++ b/lib/subunit/python/subunit/chunked.py
@@ -0,0 +1,164 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Encoder/decoder for http style chunked encoding."""
+
+class Decoder(object):
+ """Decode chunked content to a byte stream."""
+
+ def __init__(self, output):
+ """Create a decoder decoding to output.
+
+ :param output: A file-like object. Bytes written to the Decoder are
+ decoded to strip off the chunking and written to the output.
+ Up to a full write worth of data or a single control line may be
+ buffered (whichever is larger). The close method should be called
+ when no more data is available, to detect short streams; the
+ write method will return none-None when the end of a stream is
+ detected.
+ """
+ self.output = output
+ self.buffered_bytes = []
+ self.state = self._read_length
+ self.body_length = 0
+
+ def close(self):
+ """Close the decoder.
+
+ :raises ValueError: If the stream is incomplete ValueError is raised.
+ """
+ if self.state != self._finished:
+ raise ValueError("incomplete stream")
+
+ def _finished(self):
+ """Finished reading, return any remaining bytes."""
+ if self.buffered_bytes:
+ buffered_bytes = self.buffered_bytes
+ self.buffered_bytes = []
+ return ''.join(buffered_bytes)
+ else:
+ raise ValueError("stream is finished")
+
+ def _read_body(self):
+ """Pass body bytes to the output."""
+ while self.body_length and self.buffered_bytes:
+ if self.body_length >= len(self.buffered_bytes[0]):
+ self.output.write(self.buffered_bytes[0])
+ self.body_length -= len(self.buffered_bytes[0])
+ del self.buffered_bytes[0]
+ # No more data available.
+ if not self.body_length:
+ self.state = self._read_length
+ else:
+ self.output.write(self.buffered_bytes[0][:self.body_length])
+ self.buffered_bytes[0] = \
+ self.buffered_bytes[0][self.body_length:]
+ self.body_length = 0
+ self.state = self._read_length
+ return self.state()
+
+ def _read_length(self):
+ """Try to decode a length from the bytes."""
+ count = -1
+ match_chars = "0123456789abcdefABCDEF\r\n"
+ count_chars = []
+ for bytes in self.buffered_bytes:
+ for byte in bytes:
+ if byte not in match_chars:
+ break
+ count_chars.append(byte)
+ if byte == '\n':
+ break
+ if not count_chars:
+ return
+ if count_chars[-1][-1] != '\n':
+ return
+ count_str = ''.join(count_chars)
+ self.body_length = int(count_str[:-2], 16)
+ excess_bytes = len(count_str)
+ while excess_bytes:
+ if excess_bytes >= len(self.buffered_bytes[0]):
+ excess_bytes -= len(self.buffered_bytes[0])
+ del self.buffered_bytes[0]
+ else:
+ self.buffered_bytes[0] = self.buffered_bytes[0][excess_bytes:]
+ excess_bytes = 0
+ if not self.body_length:
+ self.state = self._finished
+ if not self.buffered_bytes:
+ # May not call into self._finished with no buffered data.
+ return ''
+ else:
+ self.state = self._read_body
+ return self.state()
+
+ def write(self, bytes):
+ """Decode bytes to the output stream.
+
+ :raises ValueError: If the stream has already seen the end of file
+ marker.
+ :returns: None, or the excess bytes beyond the end of file marker.
+ """
+ if bytes:
+ self.buffered_bytes.append(bytes)
+ return self.state()
+
+
+class Encoder(object):
+ """Encode content to a stream using HTTP Chunked coding."""
+
+ def __init__(self, output):
+ """Create an encoder encoding to output.
+
+ :param output: A file-like object. Bytes written to the Encoder
+ will be encoded using HTTP chunking. Small writes may be buffered
+ and the ``close`` method must be called to finish the stream.
+ """
+ self.output = output
+ self.buffered_bytes = []
+ self.buffer_size = 0
+
+ def flush(self, extra_len=0):
+ """Flush the encoder to the output stream.
+
+ :param extra_len: Increase the size of the chunk by this many bytes
+ to allow for a subsequent write.
+ """
+ if not self.buffer_size and not extra_len:
+ return
+ buffered_bytes = self.buffered_bytes
+ buffer_size = self.buffer_size
+ self.buffered_bytes = []
+ self.buffer_size = 0
+ self.output.write("%X\r\n" % (buffer_size + extra_len))
+ if buffer_size:
+ self.output.write(''.join(buffered_bytes))
+ return True
+
+ def write(self, bytes):
+ """Encode bytes to the output stream."""
+ bytes_len = len(bytes)
+ if self.buffer_size + bytes_len >= 65536:
+ self.flush(bytes_len)
+ self.output.write(bytes)
+ else:
+ self.buffered_bytes.append(bytes)
+ self.buffer_size += bytes_len
+
+ def close(self):
+ """Finish the stream. This does not close the output stream."""
+ self.flush()
+ self.output.write("0\r\n")
diff --git a/lib/subunit/python/subunit/details.py b/lib/subunit/python/subunit/details.py
new file mode 100644
index 0000000000..a37b2acb93
--- /dev/null
+++ b/lib/subunit/python/subunit/details.py
@@ -0,0 +1,113 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Handlers for outcome details."""
+
+from cStringIO import StringIO
+
+from testtools import content, content_type
+
+import chunked
+
+
+class DetailsParser(object):
+ """Base class/API reference for details parsing."""
+
+
+class SimpleDetailsParser(DetailsParser):
+ """Parser for single-part [] delimited details."""
+
+ def __init__(self, state):
+ self._message = ""
+ self._state = state
+
+ def lineReceived(self, line):
+ if line == "]\n":
+ self._state.endDetails()
+ return
+ if line[0:2] == " ]":
+ # quoted ] start
+ self._message += line[1:]
+ else:
+ self._message += line
+
+ def get_details(self, style=None):
+ result = {}
+ if not style:
+ # We know that subunit/testtools serialise [] formatted
+ # tracebacks as utf8, but perhaps we need a ReplacingContent
+ # or something like that.
+ result['traceback'] = content.Content(
+ content_type.ContentType("text", "x-traceback",
+ {"charset": "utf8"}),
+ lambda:[self._message])
+ else:
+ if style == 'skip':
+ name = 'reason'
+ else:
+ name = 'message'
+ result[name] = content.Content(
+ content_type.ContentType("text", "plain"),
+ lambda:[self._message])
+ return result
+
+ def get_message(self):
+ return self._message
+
+
+class MultipartDetailsParser(DetailsParser):
+ """Parser for multi-part [] surrounded MIME typed chunked details."""
+
+ def __init__(self, state):
+ self._state = state
+ self._details = {}
+ self._parse_state = self._look_for_content
+
+ def _look_for_content(self, line):
+ if line == "]\n":
+ self._state.endDetails()
+ return
+ # TODO error handling
+ field, value = line[:-1].split(' ', 1)
+ main, sub = value.split('/')
+ self._content_type = content_type.ContentType(main, sub)
+ self._parse_state = self._get_name
+
+ def _get_name(self, line):
+ self._name = line[:-1]
+ self._body = StringIO()
+ self._chunk_parser = chunked.Decoder(self._body)
+ self._parse_state = self._feed_chunks
+
+ def _feed_chunks(self, line):
+ residue = self._chunk_parser.write(line)
+ if residue is not None:
+ # Line based use always ends on no residue.
+ assert residue == '', 'residue: %r' % (residue,)
+ body = self._body
+ self._details[self._name] = content.Content(
+ self._content_type, lambda:[body.getvalue()])
+ self._chunk_parser.close()
+ self._parse_state = self._look_for_content
+
+ def get_details(self, for_skip=False):
+ return self._details
+
+ def get_message(self):
+ return None
+
+ def lineReceived(self, line):
+ self._parse_state(line)
diff --git a/lib/subunit/python/subunit/iso8601.py b/lib/subunit/python/subunit/iso8601.py
new file mode 100644
index 0000000000..93c92fb516
--- /dev/null
+++ b/lib/subunit/python/subunit/iso8601.py
@@ -0,0 +1,123 @@
+# Copyright (c) 2007 Michael Twomey
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+"""ISO 8601 date time string parsing
+
+Basic usage:
+>>> import iso8601
+>>> iso8601.parse_date("2007-01-25T12:00:00Z")
+datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
+>>>
+
+"""
+
+from datetime import datetime, timedelta, tzinfo
+import re
+
+__all__ = ["parse_date", "ParseError"]
+
+# Adapted from http://delete.me.uk/2005/03/iso8601.html
+ISO8601_REGEX = re.compile(r"(?P<year>[0-9]{4})(-(?P<month>[0-9]{1,2})(-(?P<day>[0-9]{1,2})"
+ r"((?P<separator>.)(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2})(:(?P<second>[0-9]{2})(\.(?P<fraction>[0-9]+))?)?"
+ r"(?P<timezone>Z|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?"
+)
+TIMEZONE_REGEX = re.compile("(?P<prefix>[+-])(?P<hours>[0-9]{2}).(?P<minutes>[0-9]{2})")
+
+class ParseError(Exception):
+ """Raised when there is a problem parsing a date string"""
+
+# Yoinked from python docs
+ZERO = timedelta(0)
+class Utc(tzinfo):
+ """UTC
+
+ """
+ def utcoffset(self, dt):
+ return ZERO
+
+ def tzname(self, dt):
+ return "UTC"
+
+ def dst(self, dt):
+ return ZERO
+UTC = Utc()
+
+class FixedOffset(tzinfo):
+ """Fixed offset in hours and minutes from UTC
+
+ """
+ def __init__(self, offset_hours, offset_minutes, name):
+ self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
+ self.__name = name
+
+ def utcoffset(self, dt):
+ return self.__offset
+
+ def tzname(self, dt):
+ return self.__name
+
+ def dst(self, dt):
+ return ZERO
+
+ def __repr__(self):
+ return "<FixedOffset %r>" % self.__name
+
+def parse_timezone(tzstring, default_timezone=UTC):
+ """Parses ISO 8601 time zone specs into tzinfo offsets
+
+ """
+ if tzstring == "Z":
+ return default_timezone
+ # This isn't strictly correct, but it's common to encounter dates without
+ # timezones so I'll assume the default (which defaults to UTC).
+ # Addresses issue 4.
+ if tzstring is None:
+ return default_timezone
+ m = TIMEZONE_REGEX.match(tzstring)
+ prefix, hours, minutes = m.groups()
+ hours, minutes = int(hours), int(minutes)
+ if prefix == "-":
+ hours = -hours
+ minutes = -minutes
+ return FixedOffset(hours, minutes, tzstring)
+
+def parse_date(datestring, default_timezone=UTC):
+ """Parses ISO 8601 dates into datetime objects
+
+ The timezone is parsed from the date string. However it is quite common to
+ have dates without a timezone (not strictly correct). In this case the
+ default timezone specified in default_timezone is used. This is UTC by
+ default.
+ """
+ if not isinstance(datestring, basestring):
+ raise ParseError("Expecting a string %r" % datestring)
+ m = ISO8601_REGEX.match(datestring)
+ if not m:
+ raise ParseError("Unable to parse date string %r" % datestring)
+ groups = m.groupdict()
+ tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
+ if groups["fraction"] is None:
+ groups["fraction"] = 0
+ else:
+ groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6)
+ return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]),
+ int(groups["hour"]), int(groups["minute"]), int(groups["second"]),
+ int(groups["fraction"]), tz)
diff --git a/lib/subunit/python/subunit/progress_model.py b/lib/subunit/python/subunit/progress_model.py
new file mode 100644
index 0000000000..3a6af89a33
--- /dev/null
+++ b/lib/subunit/python/subunit/progress_model.py
@@ -0,0 +1,106 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Support for dealing with progress state."""
+
+class ProgressModel(object):
+ """A model of progress indicators as subunit defines it.
+
+ Instances of this class represent a single logical operation that is
+ progressing. The operation may have many steps, and some of those steps may
+ supply their own progress information. ProgressModel uses a nested concept
+ where the overall state can be pushed, creating new starting state, and
+ later pushed to return to the prior state. Many user interfaces will want
+ to display an overall summary though, and accordingly the pos() and width()
+ methods return overall summary information rather than information on the
+ current subtask.
+
+ The default state is 0/0 - indicating that the overall progress is unknown.
+ Anytime the denominator of pos/width is 0, rendering of a ProgressModel
+ should should take this into consideration.
+
+ :ivar: _tasks. This private attribute stores the subtasks. Each is a tuple:
+ pos, width, overall_numerator, overall_denominator. The overall fields
+ store the calculated overall numerator and denominator for the state
+ that was pushed.
+ """
+
+ def __init__(self):
+ """Create a ProgressModel.
+
+ The new model has no progress data at all - it will claim a summary
+ width of zero and position of 0.
+ """
+ self._tasks = []
+ self.push()
+
+ def adjust_width(self, offset):
+ """Adjust the with of the current subtask."""
+ self._tasks[-1][1] += offset
+
+ def advance(self):
+ """Advance the current subtask."""
+ self._tasks[-1][0] += 1
+
+ def pop(self):
+ """Pop a subtask off the ProgressModel.
+
+ See push for a description of how push and pop work.
+ """
+ self._tasks.pop()
+
+ def pos(self):
+ """Return how far through the operation has progressed."""
+ if not self._tasks:
+ return 0
+ task = self._tasks[-1]
+ if len(self._tasks) > 1:
+ # scale up the overall pos by the current task or preserve it if
+ # no current width is known.
+ offset = task[2] * (task[1] or 1)
+ else:
+ offset = 0
+ return offset + task[0]
+
+ def push(self):
+ """Push a new subtask.
+
+ After pushing a new subtask, the overall progress hasn't changed. Calls
+ to adjust_width, advance, set_width will only after the progress within
+ the range that calling 'advance' would have before - the subtask
+ represents progressing one step in the earlier task.
+
+ Call pop() to restore the progress model to the state before push was
+ called.
+ """
+ self._tasks.append([0, 0, self.pos(), self.width()])
+
+ def set_width(self, width):
+ """Set the width of the current subtask."""
+ self._tasks[-1][1] = width
+
+ def width(self):
+ """Return the total width of the operation."""
+ if not self._tasks:
+ return 0
+ task = self._tasks[-1]
+ if len(self._tasks) > 1:
+ # scale up the overall width by the current task or preserve it if
+ # no current width is known.
+ return task[3] * (task[1] or 1)
+ else:
+ return task[1]
+
diff --git a/lib/subunit/python/subunit/run.py b/lib/subunit/python/subunit/run.py
new file mode 100755
index 0000000000..b390de33f7
--- /dev/null
+++ b/lib/subunit/python/subunit/run.py
@@ -0,0 +1,73 @@
+#!/usr/bin/python
+#
+# Simple subunit testrunner for python
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Run a unittest testcase reporting results as Subunit.
+
+ $ python -m subunit.run mylib.tests.test_suite
+"""
+
+import sys
+
+from subunit import TestProtocolClient, get_default_formatter
+from testtools.run import (
+ BUFFEROUTPUT,
+ CATCHBREAK,
+ FAILFAST,
+ TestProgram,
+ USAGE_AS_MAIN,
+ )
+
+
+class SubunitTestRunner(object):
+ def __init__(self, stream=sys.stdout):
+ self.stream = stream
+
+ def run(self, test):
+ "Run the given test case or test suite."
+ result = TestProtocolClient(self.stream)
+ test(result)
+ return result
+
+
+class SubunitTestProgram(TestProgram):
+
+ USAGE = USAGE_AS_MAIN
+
+ def usageExit(self, msg=None):
+ if msg:
+ print msg
+ usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
+ 'buffer': ''}
+ if self.failfast != False:
+ usage['failfast'] = FAILFAST
+ if self.catchbreak != False:
+ usage['catchbreak'] = CATCHBREAK
+ if self.buffer != False:
+ usage['buffer'] = BUFFEROUTPUT
+ usage_text = self.USAGE % usage
+ usage_lines = usage_text.split('\n')
+ usage_lines.insert(2, "Run a test suite with a subunit reporter.")
+ usage_lines.insert(3, "")
+ print('\n'.join(usage_lines))
+ sys.exit(2)
+
+
+if __name__ == '__main__':
+ stream = get_default_formatter()
+ runner = SubunitTestRunner(stream)
+ SubunitTestProgram(module=None, argv=sys.argv, testRunner=runner,
+ stdout=sys.stdout)
diff --git a/lib/subunit/python/subunit/test_results.py b/lib/subunit/python/subunit/test_results.py
new file mode 100644
index 0000000000..1c91daadc6
--- /dev/null
+++ b/lib/subunit/python/subunit/test_results.py
@@ -0,0 +1,382 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""TestResult helper classes used to by subunit."""
+
+import datetime
+
+import iso8601
+import testtools
+
+
+# NOT a TestResult, because we are implementing the interface, not inheriting
+# it.
+class TestResultDecorator(object):
+ """General pass-through decorator.
+
+ This provides a base that other TestResults can inherit from to
+ gain basic forwarding functionality. It also takes care of
+ handling the case where the target doesn't support newer methods
+ or features by degrading them.
+ """
+
+ def __init__(self, decorated):
+ """Create a TestResultDecorator forwarding to decorated."""
+ # Make every decorator degrade gracefully.
+ self.decorated = testtools.ExtendedToOriginalDecorator(decorated)
+
+ def startTest(self, test):
+ return self.decorated.startTest(test)
+
+ def startTestRun(self):
+ return self.decorated.startTestRun()
+
+ def stopTest(self, test):
+ return self.decorated.stopTest(test)
+
+ def stopTestRun(self):
+ return self.decorated.stopTestRun()
+
+ def addError(self, test, err=None, details=None):
+ return self.decorated.addError(test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ return self.decorated.addFailure(test, err, details=details)
+
+ def addSuccess(self, test, details=None):
+ return self.decorated.addSuccess(test, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ return self.decorated.addSkip(test, reason, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ return self.decorated.addExpectedFailure(test, err, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ return self.decorated.addUnexpectedSuccess(test, details=details)
+
+ def progress(self, offset, whence):
+ return self.decorated.progress(offset, whence)
+
+ def wasSuccessful(self):
+ return self.decorated.wasSuccessful()
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def stop(self):
+ return self.decorated.stop()
+
+ def tags(self, new_tags, gone_tags):
+ return self.decorated.time(new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ return self.decorated.time(a_datetime)
+
+
+class HookedTestResultDecorator(TestResultDecorator):
+ """A TestResult which calls a hook on every event."""
+
+ def __init__(self, decorated):
+ self.super = super(HookedTestResultDecorator, self)
+ self.super.__init__(decorated)
+
+ def startTest(self, test):
+ self._before_event()
+ return self.super.startTest(test)
+
+ def startTestRun(self):
+ self._before_event()
+ return self.super.startTestRun()
+
+ def stopTest(self, test):
+ self._before_event()
+ return self.super.stopTest(test)
+
+ def stopTestRun(self):
+ self._before_event()
+ return self.super.stopTestRun()
+
+ def addError(self, test, err=None, details=None):
+ self._before_event()
+ return self.super.addError(test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ self._before_event()
+ return self.super.addFailure(test, err, details=details)
+
+ def addSuccess(self, test, details=None):
+ self._before_event()
+ return self.super.addSuccess(test, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ self._before_event()
+ return self.super.addSkip(test, reason, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._before_event()
+ return self.super.addExpectedFailure(test, err, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self._before_event()
+ return self.super.addUnexpectedSuccess(test, details=details)
+
+ def progress(self, offset, whence):
+ self._before_event()
+ return self.super.progress(offset, whence)
+
+ def wasSuccessful(self):
+ self._before_event()
+ return self.super.wasSuccessful()
+
+ @property
+ def shouldStop(self):
+ self._before_event()
+ return self.super.shouldStop
+
+ def stop(self):
+ self._before_event()
+ return self.super.stop()
+
+ def time(self, a_datetime):
+ self._before_event()
+ return self.super.time(a_datetime)
+
+
+class AutoTimingTestResultDecorator(HookedTestResultDecorator):
+ """Decorate a TestResult to add time events to a test run.
+
+ By default this will cause a time event before every test event,
+ but if explicit time data is being provided by the test run, then
+ this decorator will turn itself off to prevent causing confusion.
+ """
+
+ def __init__(self, decorated):
+ self._time = None
+ super(AutoTimingTestResultDecorator, self).__init__(decorated)
+
+ def _before_event(self):
+ time = self._time
+ if time is not None:
+ return
+ time = datetime.datetime.utcnow().replace(tzinfo=iso8601.Utc())
+ self.decorated.time(time)
+
+ def progress(self, offset, whence):
+ return self.decorated.progress(offset, whence)
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def time(self, a_datetime):
+ """Provide a timestamp for the current test activity.
+
+ :param a_datetime: If None, automatically add timestamps before every
+ event (this is the default behaviour if time() is not called at
+ all). If not None, pass the provided time onto the decorated
+ result object and disable automatic timestamps.
+ """
+ self._time = a_datetime
+ return self.decorated.time(a_datetime)
+
+
+class TestResultFilter(TestResultDecorator):
+ """A pyunit TestResult interface implementation which filters tests.
+
+ Tests that pass the filter are handed on to another TestResult instance
+ for further processing/reporting. To obtain the filtered results,
+ the other instance must be interrogated.
+
+ :ivar result: The result that tests are passed to after filtering.
+ :ivar filter_predicate: The callback run to decide whether to pass
+ a result.
+ """
+
+ def __init__(self, result, filter_error=False, filter_failure=False,
+ filter_success=True, filter_skip=False,
+ filter_predicate=None):
+ """Create a FilterResult object filtering to result.
+
+ :param filter_error: Filter out errors.
+ :param filter_failure: Filter out failures.
+ :param filter_success: Filter out successful tests.
+ :param filter_skip: Filter out skipped tests.
+ :param filter_predicate: A callable taking (test, outcome, err,
+ details) and returning True if the result should be passed
+ through. err and details may be none if no error or extra
+ metadata is available. outcome is the name of the outcome such
+ as 'success' or 'failure'.
+ """
+ TestResultDecorator.__init__(self, result)
+ self._filter_error = filter_error
+ self._filter_failure = filter_failure
+ self._filter_success = filter_success
+ self._filter_skip = filter_skip
+ if filter_predicate is None:
+ filter_predicate = lambda test, outcome, err, details: True
+ self.filter_predicate = filter_predicate
+ # The current test (for filtering tags)
+ self._current_test = None
+ # Has the current test been filtered (for outputting test tags)
+ self._current_test_filtered = None
+ # The (new, gone) tags for the current test.
+ self._current_test_tags = None
+
+ def addError(self, test, err=None, details=None):
+ if (not self._filter_error and
+ self.filter_predicate(test, 'error', err, details)):
+ self.decorated.startTest(test)
+ self.decorated.addError(test, err, details=details)
+ else:
+ self._filtered()
+
+ def addFailure(self, test, err=None, details=None):
+ if (not self._filter_failure and
+ self.filter_predicate(test, 'failure', err, details)):
+ self.decorated.startTest(test)
+ self.decorated.addFailure(test, err, details=details)
+ else:
+ self._filtered()
+
+ def addSkip(self, test, reason=None, details=None):
+ if (not self._filter_skip and
+ self.filter_predicate(test, 'skip', reason, details)):
+ self.decorated.startTest(test)
+ self.decorated.addSkip(test, reason, details=details)
+ else:
+ self._filtered()
+
+ def addSuccess(self, test, details=None):
+ if (not self._filter_success and
+ self.filter_predicate(test, 'success', None, details)):
+ self.decorated.startTest(test)
+ self.decorated.addSuccess(test, details=details)
+ else:
+ self._filtered()
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ if self.filter_predicate(test, 'expectedfailure', err, details):
+ self.decorated.startTest(test)
+ return self.decorated.addExpectedFailure(test, err,
+ details=details)
+ else:
+ self._filtered()
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self.decorated.startTest(test)
+ return self.decorated.addUnexpectedSuccess(test, details=details)
+
+ def _filtered(self):
+ self._current_test_filtered = True
+
+ def startTest(self, test):
+ """Start a test.
+
+ Not directly passed to the client, but used for handling of tags
+ correctly.
+ """
+ self._current_test = test
+ self._current_test_filtered = False
+ self._current_test_tags = set(), set()
+
+ def stopTest(self, test):
+ """Stop a test.
+
+ Not directly passed to the client, but used for handling of tags
+ correctly.
+ """
+ if not self._current_test_filtered:
+ # Tags to output for this test.
+ if self._current_test_tags[0] or self._current_test_tags[1]:
+ self.decorated.tags(*self._current_test_tags)
+ self.decorated.stopTest(test)
+ self._current_test = None
+ self._current_test_filtered = None
+ self._current_test_tags = None
+
+ def tags(self, new_tags, gone_tags):
+ """Handle tag instructions.
+
+ Adds and removes tags as appropriate. If a test is currently running,
+ tags are not affected for subsequent tests.
+
+ :param new_tags: Tags to add,
+ :param gone_tags: Tags to remove.
+ """
+ if self._current_test is not None:
+ # gather the tags until the test stops.
+ self._current_test_tags[0].update(new_tags)
+ self._current_test_tags[0].difference_update(gone_tags)
+ self._current_test_tags[1].update(gone_tags)
+ self._current_test_tags[1].difference_update(new_tags)
+ return self.decorated.tags(new_tags, gone_tags)
+
+ def id_to_orig_id(self, id):
+ if id.startswith("subunit.RemotedTestCase."):
+ return id[len("subunit.RemotedTestCase."):]
+ return id
+
+
+class TestIdPrintingResult(testtools.TestResult):
+
+ def __init__(self, stream, show_times=False):
+ """Create a FilterResult object outputting to stream."""
+ testtools.TestResult.__init__(self)
+ self._stream = stream
+ self.failed_tests = 0
+ self.__time = 0
+ self.show_times = show_times
+ self._test = None
+ self._test_duration = 0
+
+ def addError(self, test, err):
+ self.failed_tests += 1
+ self._test = test
+
+ def addFailure(self, test, err):
+ self.failed_tests += 1
+ self._test = test
+
+ def addSuccess(self, test):
+ self._test = test
+
+ def reportTest(self, test, duration):
+ if self.show_times:
+ seconds = duration.seconds
+ seconds += duration.days * 3600 * 24
+ seconds += duration.microseconds / 1000000.0
+ self._stream.write(test.id() + ' %0.3f\n' % seconds)
+ else:
+ self._stream.write(test.id() + '\n')
+
+ def startTest(self, test):
+ self._start_time = self._time()
+
+ def stopTest(self, test):
+ test_duration = self._time() - self._start_time
+ self.reportTest(self._test, test_duration)
+
+ def time(self, time):
+ self.__time = time
+
+ def _time(self):
+ return self.__time
+
+ def wasSuccessful(self):
+ "Tells whether or not this result was a success"
+ return self.failed_tests == 0
diff --git a/lib/subunit/python/subunit/tests/__init__.py b/lib/subunit/python/subunit/tests/__init__.py
index 544d0e704f..a78cec8572 100644
--- a/lib/subunit/python/subunit/tests/__init__.py
+++ b/lib/subunit/python/subunit/tests/__init__.py
@@ -2,24 +2,40 @@
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
#
-from subunit.tests import TestUtil, test_test_protocol
+from subunit.tests import (
+ TestUtil,
+ test_chunked,
+ test_details,
+ test_progress_model,
+ test_subunit_filter,
+ test_subunit_stats,
+ test_subunit_tags,
+ test_tap2subunit,
+ test_test_protocol,
+ test_test_results,
+ )
def test_suite():
result = TestUtil.TestSuite()
+ result.addTest(test_chunked.test_suite())
+ result.addTest(test_details.test_suite())
+ result.addTest(test_progress_model.test_suite())
+ result.addTest(test_test_results.test_suite())
result.addTest(test_test_protocol.test_suite())
+ result.addTest(test_tap2subunit.test_suite())
+ result.addTest(test_subunit_filter.test_suite())
+ result.addTest(test_subunit_tags.test_suite())
+ result.addTest(test_subunit_stats.test_suite())
return result
diff --git a/lib/subunit/python/subunit/tests/sample-script.py b/lib/subunit/python/subunit/tests/sample-script.py
index 223d2f5d9f..0ee019ae4a 100755
--- a/lib/subunit/python/subunit/tests/sample-script.py
+++ b/lib/subunit/python/subunit/tests/sample-script.py
@@ -1,5 +1,12 @@
#!/usr/bin/env python
import sys
+if len(sys.argv) == 2:
+ # subunit.tests.test_test_protocol.TestExecTestCase.test_sample_method_args
+ # uses this code path to be sure that the arguments were passed to
+ # sample-script.py
+ print "test fail"
+ print "error fail"
+ sys.exit(0)
print "test old mcdonald"
print "success old mcdonald"
print "test bing crosby"
diff --git a/lib/subunit/python/subunit/tests/test_chunked.py b/lib/subunit/python/subunit/tests/test_chunked.py
new file mode 100644
index 0000000000..a24e31e0c2
--- /dev/null
+++ b/lib/subunit/python/subunit/tests/test_chunked.py
@@ -0,0 +1,127 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+from cStringIO import StringIO
+import unittest
+
+import subunit.chunked
+
+
+def test_suite():
+ loader = subunit.tests.TestUtil.TestLoader()
+ result = loader.loadTestsFromName(__name__)
+ return result
+
+
+class TestDecode(unittest.TestCase):
+
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+ self.output = StringIO()
+ self.decoder = subunit.chunked.Decoder(self.output)
+
+ def test_close_read_length_short_errors(self):
+ self.assertRaises(ValueError, self.decoder.close)
+
+ def test_close_body_short_errors(self):
+ self.assertEqual(None, self.decoder.write('2\r\na'))
+ self.assertRaises(ValueError, self.decoder.close)
+
+ def test_close_body_buffered_data_errors(self):
+ self.assertEqual(None, self.decoder.write('2\r'))
+ self.assertRaises(ValueError, self.decoder.close)
+
+ def test_close_after_finished_stream_safe(self):
+ self.assertEqual(None, self.decoder.write('2\r\nab'))
+ self.assertEqual('', self.decoder.write('0\r\n'))
+ self.decoder.close()
+
+ def test_decode_nothing(self):
+ self.assertEqual('', self.decoder.write('0\r\n'))
+ self.assertEqual('', self.output.getvalue())
+
+ def test_decode_serialised_form(self):
+ self.assertEqual(None, self.decoder.write("F\r\n"))
+ self.assertEqual(None, self.decoder.write("serialised\n"))
+ self.assertEqual('', self.decoder.write("form0\r\n"))
+
+ def test_decode_short(self):
+ self.assertEqual('', self.decoder.write('3\r\nabc0\r\n'))
+ self.assertEqual('abc', self.output.getvalue())
+
+ def test_decode_combines_short(self):
+ self.assertEqual('', self.decoder.write('6\r\nabcdef0\r\n'))
+ self.assertEqual('abcdef', self.output.getvalue())
+
+ def test_decode_excess_bytes_from_write(self):
+ self.assertEqual('1234', self.decoder.write('3\r\nabc0\r\n1234'))
+ self.assertEqual('abc', self.output.getvalue())
+
+ def test_decode_write_after_finished_errors(self):
+ self.assertEqual('1234', self.decoder.write('3\r\nabc0\r\n1234'))
+ self.assertRaises(ValueError, self.decoder.write, '')
+
+ def test_decode_hex(self):
+ self.assertEqual('', self.decoder.write('A\r\n12345678900\r\n'))
+ self.assertEqual('1234567890', self.output.getvalue())
+
+ def test_decode_long_ranges(self):
+ self.assertEqual(None, self.decoder.write('10000\r\n'))
+ self.assertEqual(None, self.decoder.write('1' * 65536))
+ self.assertEqual(None, self.decoder.write('10000\r\n'))
+ self.assertEqual(None, self.decoder.write('2' * 65536))
+ self.assertEqual('', self.decoder.write('0\r\n'))
+ self.assertEqual('1' * 65536 + '2' * 65536, self.output.getvalue())
+
+
+class TestEncode(unittest.TestCase):
+
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+ self.output = StringIO()
+ self.encoder = subunit.chunked.Encoder(self.output)
+
+ def test_encode_nothing(self):
+ self.encoder.close()
+ self.assertEqual('0\r\n', self.output.getvalue())
+
+ def test_encode_empty(self):
+ self.encoder.write('')
+ self.encoder.close()
+ self.assertEqual('0\r\n', self.output.getvalue())
+
+ def test_encode_short(self):
+ self.encoder.write('abc')
+ self.encoder.close()
+ self.assertEqual('3\r\nabc0\r\n', self.output.getvalue())
+
+ def test_encode_combines_short(self):
+ self.encoder.write('abc')
+ self.encoder.write('def')
+ self.encoder.close()
+ self.assertEqual('6\r\nabcdef0\r\n', self.output.getvalue())
+
+ def test_encode_over_9_is_in_hex(self):
+ self.encoder.write('1234567890')
+ self.encoder.close()
+ self.assertEqual('A\r\n12345678900\r\n', self.output.getvalue())
+
+ def test_encode_long_ranges_not_combined(self):
+ self.encoder.write('1' * 65536)
+ self.encoder.write('2' * 65536)
+ self.encoder.close()
+ self.assertEqual('10000\r\n' + '1' * 65536 + '10000\r\n' +
+ '2' * 65536 + '0\r\n', self.output.getvalue())
diff --git a/lib/subunit/python/subunit/tests/test_details.py b/lib/subunit/python/subunit/tests/test_details.py
new file mode 100644
index 0000000000..41c32129d0
--- /dev/null
+++ b/lib/subunit/python/subunit/tests/test_details.py
@@ -0,0 +1,111 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+from cStringIO import StringIO
+import unittest
+
+import subunit.tests
+from subunit import content, content_type, details
+
+
+def test_suite():
+ loader = subunit.tests.TestUtil.TestLoader()
+ result = loader.loadTestsFromName(__name__)
+ return result
+
+
+class TestSimpleDetails(unittest.TestCase):
+
+ def test_lineReceived(self):
+ parser = details.SimpleDetailsParser(None)
+ parser.lineReceived("foo\n")
+ parser.lineReceived("bar\n")
+ self.assertEqual("foo\nbar\n", parser._message)
+
+ def test_lineReceived_escaped_bracket(self):
+ parser = details.SimpleDetailsParser(None)
+ parser.lineReceived("foo\n")
+ parser.lineReceived(" ]are\n")
+ parser.lineReceived("bar\n")
+ self.assertEqual("foo\n]are\nbar\n", parser._message)
+
+ def test_get_message(self):
+ parser = details.SimpleDetailsParser(None)
+ self.assertEqual("", parser.get_message())
+
+ def test_get_details(self):
+ parser = details.SimpleDetailsParser(None)
+ traceback = ""
+ expected = {}
+ expected['traceback'] = content.Content(
+ content_type.ContentType("text", "x-traceback",
+ {'charset': 'utf8'}),
+ lambda:[""])
+ found = parser.get_details()
+ self.assertEqual(expected.keys(), found.keys())
+ self.assertEqual(expected['traceback'].content_type,
+ found['traceback'].content_type)
+ self.assertEqual(''.join(expected['traceback'].iter_bytes()),
+ ''.join(found['traceback'].iter_bytes()))
+
+ def test_get_details_skip(self):
+ parser = details.SimpleDetailsParser(None)
+ traceback = ""
+ expected = {}
+ expected['reason'] = content.Content(
+ content_type.ContentType("text", "plain"),
+ lambda:[""])
+ found = parser.get_details("skip")
+ self.assertEqual(expected, found)
+
+ def test_get_details_success(self):
+ parser = details.SimpleDetailsParser(None)
+ traceback = ""
+ expected = {}
+ expected['message'] = content.Content(
+ content_type.ContentType("text", "plain"),
+ lambda:[""])
+ found = parser.get_details("success")
+ self.assertEqual(expected, found)
+
+
+class TestMultipartDetails(unittest.TestCase):
+
+ def test_get_message_is_None(self):
+ parser = details.MultipartDetailsParser(None)
+ self.assertEqual(None, parser.get_message())
+
+ def test_get_details(self):
+ parser = details.MultipartDetailsParser(None)
+ self.assertEqual({}, parser.get_details())
+
+ def test_parts(self):
+ parser = details.MultipartDetailsParser(None)
+ parser.lineReceived("Content-Type: text/plain\n")
+ parser.lineReceived("something\n")
+ parser.lineReceived("F\r\n")
+ parser.lineReceived("serialised\n")
+ parser.lineReceived("form0\r\n")
+ expected = {}
+ expected['something'] = content.Content(
+ content_type.ContentType("text", "plain"),
+ lambda:["serialised\nform"])
+ found = parser.get_details()
+ self.assertEqual(expected.keys(), found.keys())
+ self.assertEqual(expected['something'].content_type,
+ found['something'].content_type)
+ self.assertEqual(''.join(expected['something'].iter_bytes()),
+ ''.join(found['something'].iter_bytes()))
diff --git a/lib/subunit/python/subunit/tests/test_progress_model.py b/lib/subunit/python/subunit/tests/test_progress_model.py
new file mode 100644
index 0000000000..76200c6107
--- /dev/null
+++ b/lib/subunit/python/subunit/tests/test_progress_model.py
@@ -0,0 +1,118 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import unittest
+
+import subunit
+from subunit.progress_model import ProgressModel
+
+
+class TestProgressModel(unittest.TestCase):
+
+ def assertProgressSummary(self, pos, total, progress):
+ """Assert that a progress model has reached a particular point."""
+ self.assertEqual(pos, progress.pos())
+ self.assertEqual(total, progress.width())
+
+ def test_new_progress_0_0(self):
+ progress = ProgressModel()
+ self.assertProgressSummary(0, 0, progress)
+
+ def test_advance_0_0(self):
+ progress = ProgressModel()
+ progress.advance()
+ self.assertProgressSummary(1, 0, progress)
+
+ def test_advance_1_0(self):
+ progress = ProgressModel()
+ progress.advance()
+ self.assertProgressSummary(1, 0, progress)
+
+ def test_set_width_absolute(self):
+ progress = ProgressModel()
+ progress.set_width(10)
+ self.assertProgressSummary(0, 10, progress)
+
+ def test_set_width_absolute_preserves_pos(self):
+ progress = ProgressModel()
+ progress.advance()
+ progress.set_width(2)
+ self.assertProgressSummary(1, 2, progress)
+
+ def test_adjust_width(self):
+ progress = ProgressModel()
+ progress.adjust_width(10)
+ self.assertProgressSummary(0, 10, progress)
+ progress.adjust_width(-10)
+ self.assertProgressSummary(0, 0, progress)
+
+ def test_adjust_width_preserves_pos(self):
+ progress = ProgressModel()
+ progress.advance()
+ progress.adjust_width(10)
+ self.assertProgressSummary(1, 10, progress)
+ progress.adjust_width(-10)
+ self.assertProgressSummary(1, 0, progress)
+
+ def test_push_preserves_progress(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ self.assertProgressSummary(1, 3, progress)
+
+ def test_advance_advances_substack(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ progress.adjust_width(1)
+ progress.advance()
+ self.assertProgressSummary(2, 3, progress)
+
+ def test_adjust_width_adjusts_substack(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ progress.adjust_width(2)
+ progress.advance()
+ self.assertProgressSummary(3, 6, progress)
+
+ def test_set_width_adjusts_substack(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ progress.set_width(2)
+ progress.advance()
+ self.assertProgressSummary(3, 6, progress)
+
+ def test_pop_restores_progress(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ progress.adjust_width(1)
+ progress.advance()
+ progress.pop()
+ self.assertProgressSummary(1, 3, progress)
+
+
+def test_suite():
+ loader = subunit.tests.TestUtil.TestLoader()
+ result = loader.loadTestsFromName(__name__)
+ return result
diff --git a/lib/subunit/python/subunit/tests/test_subunit_filter.py b/lib/subunit/python/subunit/tests/test_subunit_filter.py
new file mode 100644
index 0000000000..3c65ed3afc
--- /dev/null
+++ b/lib/subunit/python/subunit/tests/test_subunit_filter.py
@@ -0,0 +1,136 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Tests for subunit.TestResultFilter."""
+
+import unittest
+from StringIO import StringIO
+
+import subunit
+from subunit.test_results import TestResultFilter
+
+
+class TestTestResultFilter(unittest.TestCase):
+ """Test for TestResultFilter, a TestResult object which filters tests."""
+
+ def _setUp(self):
+ self.output = StringIO()
+
+ def test_default(self):
+ """The default is to exclude success and include everything else."""
+ self.filtered_result = unittest.TestResult()
+ self.filter = TestResultFilter(self.filtered_result)
+ self.run_tests()
+ # skips are seen as success by default python TestResult.
+ self.assertEqual(['error'],
+ [error[0].id() for error in self.filtered_result.errors])
+ self.assertEqual(['failed'],
+ [failure[0].id() for failure in
+ self.filtered_result.failures])
+ self.assertEqual(4, self.filtered_result.testsRun)
+
+ def test_exclude_errors(self):
+ self.filtered_result = unittest.TestResult()
+ self.filter = TestResultFilter(self.filtered_result,
+ filter_error=True)
+ self.run_tests()
+ # skips are seen as errors by default python TestResult.
+ self.assertEqual([], self.filtered_result.errors)
+ self.assertEqual(['failed'],
+ [failure[0].id() for failure in
+ self.filtered_result.failures])
+ self.assertEqual(3, self.filtered_result.testsRun)
+
+ def test_exclude_failure(self):
+ self.filtered_result = unittest.TestResult()
+ self.filter = TestResultFilter(self.filtered_result,
+ filter_failure=True)
+ self.run_tests()
+ self.assertEqual(['error'],
+ [error[0].id() for error in self.filtered_result.errors])
+ self.assertEqual([],
+ [failure[0].id() for failure in
+ self.filtered_result.failures])
+ self.assertEqual(3, self.filtered_result.testsRun)
+
+ def test_exclude_skips(self):
+ self.filtered_result = subunit.TestResultStats(None)
+ self.filter = TestResultFilter(self.filtered_result,
+ filter_skip=True)
+ self.run_tests()
+ self.assertEqual(0, self.filtered_result.skipped_tests)
+ self.assertEqual(2, self.filtered_result.failed_tests)
+ self.assertEqual(3, self.filtered_result.testsRun)
+
+ def test_include_success(self):
+ """Success's can be included if requested."""
+ self.filtered_result = unittest.TestResult()
+ self.filter = TestResultFilter(self.filtered_result,
+ filter_success=False)
+ self.run_tests()
+ self.assertEqual(['error'],
+ [error[0].id() for error in self.filtered_result.errors])
+ self.assertEqual(['failed'],
+ [failure[0].id() for failure in
+ self.filtered_result.failures])
+ self.assertEqual(5, self.filtered_result.testsRun)
+
+ def test_filter_predicate(self):
+ """You can filter by predicate callbacks"""
+ self.filtered_result = unittest.TestResult()
+ def filter_cb(test, outcome, err, details):
+ return outcome == 'success'
+ self.filter = TestResultFilter(self.filtered_result,
+ filter_predicate=filter_cb,
+ filter_success=False)
+ self.run_tests()
+ # Only success should pass
+ self.assertEqual(1, self.filtered_result.testsRun)
+
+ def run_tests(self):
+ self.setUpTestStream()
+ self.test = subunit.ProtocolTestCase(self.input_stream)
+ self.test.run(self.filter)
+
+ def setUpTestStream(self):
+ # While TestResultFilter works on python objects, using a subunit
+ # stream is an easy pithy way of getting a series of test objects to
+ # call into the TestResult, and as TestResultFilter is intended for
+ # use with subunit also has the benefit of detecting any interface
+ # skew issues.
+ self.input_stream = StringIO()
+ self.input_stream.write("""tags: global
+test passed
+success passed
+test failed
+tags: local
+failure failed
+test error
+error error [
+error details
+]
+test skipped
+skip skipped
+test todo
+xfail todo
+""")
+ self.input_stream.seek(0)
+
+
+def test_suite():
+ loader = subunit.tests.TestUtil.TestLoader()
+ result = loader.loadTestsFromName(__name__)
+ return result
diff --git a/lib/subunit/python/subunit/tests/test_subunit_stats.py b/lib/subunit/python/subunit/tests/test_subunit_stats.py
new file mode 100644
index 0000000000..a7f8fca675
--- /dev/null
+++ b/lib/subunit/python/subunit/tests/test_subunit_stats.py
@@ -0,0 +1,83 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Tests for subunit.TestResultStats."""
+
+import unittest
+from StringIO import StringIO
+
+import subunit
+
+
+class TestTestResultStats(unittest.TestCase):
+ """Test for TestResultStats, a TestResult object that generates stats."""
+
+ def setUp(self):
+ self.output = StringIO()
+ self.result = subunit.TestResultStats(self.output)
+ self.input_stream = StringIO()
+ self.test = subunit.ProtocolTestCase(self.input_stream)
+
+ def test_stats_empty(self):
+ self.test.run(self.result)
+ self.assertEqual(0, self.result.total_tests)
+ self.assertEqual(0, self.result.passed_tests)
+ self.assertEqual(0, self.result.failed_tests)
+ self.assertEqual(set(), self.result.seen_tags)
+
+ def setUpUsedStream(self):
+ self.input_stream.write("""tags: global
+test passed
+success passed
+test failed
+tags: local
+failure failed
+test error
+error error
+test skipped
+skip skipped
+test todo
+xfail todo
+""")
+ self.input_stream.seek(0)
+ self.test.run(self.result)
+
+ def test_stats_smoke_everything(self):
+ # Statistics are calculated usefully.
+ self.setUpUsedStream()
+ self.assertEqual(5, self.result.total_tests)
+ self.assertEqual(2, self.result.passed_tests)
+ self.assertEqual(2, self.result.failed_tests)
+ self.assertEqual(1, self.result.skipped_tests)
+ self.assertEqual(set(["global", "local"]), self.result.seen_tags)
+
+ def test_stat_formatting(self):
+ expected = ("""
+Total tests: 5
+Passed tests: 2
+Failed tests: 2
+Skipped tests: 1
+Seen tags: global, local
+""")[1:]
+ self.setUpUsedStream()
+ self.result.formatStats()
+ self.assertEqual(expected, self.output.getvalue())
+
+
+def test_suite():
+ loader = subunit.tests.TestUtil.TestLoader()
+ result = loader.loadTestsFromName(__name__)
+ return result
diff --git a/lib/subunit/python/subunit/tests/test_subunit_tags.py b/lib/subunit/python/subunit/tests/test_subunit_tags.py
new file mode 100644
index 0000000000..227e2b7475
--- /dev/null
+++ b/lib/subunit/python/subunit/tests/test_subunit_tags.py
@@ -0,0 +1,68 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Tests for subunit.tag_stream."""
+
+import unittest
+from StringIO import StringIO
+
+import subunit
+import subunit.test_results
+
+
+class TestSubUnitTags(unittest.TestCase):
+
+ def setUp(self):
+ self.original = StringIO()
+ self.filtered = StringIO()
+
+ def test_add_tag(self):
+ self.original.write("tags: foo\n")
+ self.original.write("test: test\n")
+ self.original.write("tags: bar -quux\n")
+ self.original.write("success: test\n")
+ self.original.seek(0)
+ result = subunit.tag_stream(self.original, self.filtered, ["quux"])
+ self.assertEqual([
+ "tags: quux",
+ "tags: foo",
+ "test: test",
+ "tags: bar",
+ "success: test",
+ ],
+ self.filtered.getvalue().splitlines())
+
+ def test_remove_tag(self):
+ self.original.write("tags: foo\n")
+ self.original.write("test: test\n")
+ self.original.write("tags: bar -quux\n")
+ self.original.write("success: test\n")
+ self.original.seek(0)
+ result = subunit.tag_stream(self.original, self.filtered, ["-bar"])
+ self.assertEqual([
+ "tags: -bar",
+ "tags: foo",
+ "test: test",
+ "tags: -quux",
+ "success: test",
+ ],
+ self.filtered.getvalue().splitlines())
+
+
+def test_suite():
+ loader = subunit.tests.TestUtil.TestLoader()
+ result = loader.loadTestsFromName(__name__)
+ return result
diff --git a/lib/subunit/python/subunit/tests/test_tap2subunit.py b/lib/subunit/python/subunit/tests/test_tap2subunit.py
new file mode 100644
index 0000000000..c4ca4cdb3a
--- /dev/null
+++ b/lib/subunit/python/subunit/tests/test_tap2subunit.py
@@ -0,0 +1,443 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Tests for TAP2SubUnit."""
+
+import unittest
+from StringIO import StringIO
+import subunit
+
+
+class TestTAP2SubUnit(unittest.TestCase):
+ """Tests for TAP2SubUnit.
+
+ These tests test TAP string data in, and subunit string data out.
+ This is ok because the subunit protocol is intended to be stable,
+ but it might be easier/pithier to write tests against TAP string in,
+ parsed subunit objects out (by hooking the subunit stream to a subunit
+ protocol server.
+ """
+
+ def setUp(self):
+ self.tap = StringIO()
+ self.subunit = StringIO()
+
+ def test_skip_entire_file(self):
+ # A file
+ # 1..- # Skipped: comment
+ # results in a single skipped test.
+ self.tap.write("1..0 # Skipped: entire file skipped\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ "test file skip",
+ "skip file skip [",
+ "Skipped: entire file skipped",
+ "]",
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_ok_test_pass(self):
+ # A file
+ # ok
+ # results in a passed test with name 'test 1' (a synthetic name as tap
+ # does not require named fixtures - it is the first test in the tap
+ # stream).
+ self.tap.write("ok\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ "test test 1",
+ "success test 1",
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_ok_test_number_pass(self):
+ # A file
+ # ok 1
+ # results in a passed test with name 'test 1'
+ self.tap.write("ok 1\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ "test test 1",
+ "success test 1",
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_ok_test_number_description_pass(self):
+ # A file
+ # ok 1 - There is a description
+ # results in a passed test with name 'test 1 - There is a description'
+ self.tap.write("ok 1 - There is a description\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ "test test 1 - There is a description",
+ "success test 1 - There is a description",
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_ok_test_description_pass(self):
+ # A file
+ # ok There is a description
+ # results in a passed test with name 'test 1 There is a description'
+ self.tap.write("ok There is a description\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ "test test 1 There is a description",
+ "success test 1 There is a description",
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_ok_SKIP_skip(self):
+ # A file
+ # ok # SKIP
+ # results in a skkip test with name 'test 1'
+ self.tap.write("ok # SKIP\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ "test test 1",
+ "skip test 1",
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_ok_skip_number_comment_lowercase(self):
+ self.tap.write("ok 1 # skip no samba environment available, skipping compilation\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ "test test 1",
+ "skip test 1 [",
+ "no samba environment available, skipping compilation",
+ "]"
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_ok_number_description_SKIP_skip_comment(self):
+ # A file
+ # ok 1 foo # SKIP Not done yet
+ # results in a skip test with name 'test 1 foo' and a log of
+ # Not done yet
+ self.tap.write("ok 1 foo # SKIP Not done yet\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ "test test 1 foo",
+ "skip test 1 foo [",
+ "Not done yet",
+ "]",
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_ok_SKIP_skip_comment(self):
+ # A file
+ # ok # SKIP Not done yet
+ # results in a skip test with name 'test 1' and a log of Not done yet
+ self.tap.write("ok # SKIP Not done yet\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ "test test 1",
+ "skip test 1 [",
+ "Not done yet",
+ "]",
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_ok_TODO_xfail(self):
+ # A file
+ # ok # TODO
+ # results in a xfail test with name 'test 1'
+ self.tap.write("ok # TODO\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ "test test 1",
+ "xfail test 1",
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_ok_TODO_xfail_comment(self):
+ # A file
+ # ok # TODO Not done yet
+ # results in a xfail test with name 'test 1' and a log of Not done yet
+ self.tap.write("ok # TODO Not done yet\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ "test test 1",
+ "xfail test 1 [",
+ "Not done yet",
+ "]",
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_bail_out_errors(self):
+ # A file with line in it
+ # Bail out! COMMENT
+ # is treated as an error
+ self.tap.write("ok 1 foo\n")
+ self.tap.write("Bail out! Lifejacket engaged\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ "test test 1 foo",
+ "success test 1 foo",
+ "test Bail out! Lifejacket engaged",
+ "error Bail out! Lifejacket engaged",
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_missing_test_at_end_with_plan_adds_error(self):
+ # A file
+ # 1..3
+ # ok first test
+ # not ok third test
+ # results in three tests, with the third being created
+ self.tap.write('1..3\n')
+ self.tap.write('ok first test\n')
+ self.tap.write('not ok second test\n')
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ 'test test 1 first test',
+ 'success test 1 first test',
+ 'test test 2 second test',
+ 'failure test 2 second test',
+ 'test test 3',
+ 'error test 3 [',
+ 'test missing from TAP output',
+ ']',
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_missing_test_with_plan_adds_error(self):
+ # A file
+ # 1..3
+ # ok first test
+ # not ok 3 third test
+ # results in three tests, with the second being created
+ self.tap.write('1..3\n')
+ self.tap.write('ok first test\n')
+ self.tap.write('not ok 3 third test\n')
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ 'test test 1 first test',
+ 'success test 1 first test',
+ 'test test 2',
+ 'error test 2 [',
+ 'test missing from TAP output',
+ ']',
+ 'test test 3 third test',
+ 'failure test 3 third test',
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_missing_test_no_plan_adds_error(self):
+ # A file
+ # ok first test
+ # not ok 3 third test
+ # results in three tests, with the second being created
+ self.tap.write('ok first test\n')
+ self.tap.write('not ok 3 third test\n')
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ 'test test 1 first test',
+ 'success test 1 first test',
+ 'test test 2',
+ 'error test 2 [',
+ 'test missing from TAP output',
+ ']',
+ 'test test 3 third test',
+ 'failure test 3 third test',
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_four_tests_in_a_row_trailing_plan(self):
+ # A file
+ # ok 1 - first test in a script with no plan at all
+ # not ok 2 - second
+ # ok 3 - third
+ # not ok 4 - fourth
+ # 1..4
+ # results in four tests numbered and named
+ self.tap.write('ok 1 - first test in a script with trailing plan\n')
+ self.tap.write('not ok 2 - second\n')
+ self.tap.write('ok 3 - third\n')
+ self.tap.write('not ok 4 - fourth\n')
+ self.tap.write('1..4\n')
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ 'test test 1 - first test in a script with trailing plan',
+ 'success test 1 - first test in a script with trailing plan',
+ 'test test 2 - second',
+ 'failure test 2 - second',
+ 'test test 3 - third',
+ 'success test 3 - third',
+ 'test test 4 - fourth',
+ 'failure test 4 - fourth'
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_four_tests_in_a_row_with_plan(self):
+ # A file
+ # 1..4
+ # ok 1 - first test in a script with no plan at all
+ # not ok 2 - second
+ # ok 3 - third
+ # not ok 4 - fourth
+ # results in four tests numbered and named
+ self.tap.write('1..4\n')
+ self.tap.write('ok 1 - first test in a script with a plan\n')
+ self.tap.write('not ok 2 - second\n')
+ self.tap.write('ok 3 - third\n')
+ self.tap.write('not ok 4 - fourth\n')
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ 'test test 1 - first test in a script with a plan',
+ 'success test 1 - first test in a script with a plan',
+ 'test test 2 - second',
+ 'failure test 2 - second',
+ 'test test 3 - third',
+ 'success test 3 - third',
+ 'test test 4 - fourth',
+ 'failure test 4 - fourth'
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_four_tests_in_a_row_no_plan(self):
+ # A file
+ # ok 1 - first test in a script with no plan at all
+ # not ok 2 - second
+ # ok 3 - third
+ # not ok 4 - fourth
+ # results in four tests numbered and named
+ self.tap.write('ok 1 - first test in a script with no plan at all\n')
+ self.tap.write('not ok 2 - second\n')
+ self.tap.write('ok 3 - third\n')
+ self.tap.write('not ok 4 - fourth\n')
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ 'test test 1 - first test in a script with no plan at all',
+ 'success test 1 - first test in a script with no plan at all',
+ 'test test 2 - second',
+ 'failure test 2 - second',
+ 'test test 3 - third',
+ 'success test 3 - third',
+ 'test test 4 - fourth',
+ 'failure test 4 - fourth'
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_todo_and_skip(self):
+ # A file
+ # not ok 1 - a fail but # TODO but is TODO
+ # not ok 2 - another fail # SKIP instead
+ # results in two tests, numbered and commented.
+ self.tap.write("not ok 1 - a fail but # TODO but is TODO\n")
+ self.tap.write("not ok 2 - another fail # SKIP instead\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ 'test test 1 - a fail but',
+ 'xfail test 1 - a fail but [',
+ 'but is TODO',
+ ']',
+ 'test test 2 - another fail',
+ 'skip test 2 - another fail [',
+ 'instead',
+ ']',
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_leading_comments_add_to_next_test_log(self):
+ # A file
+ # # comment
+ # ok
+ # ok
+ # results in a single test with the comment included
+ # in the first test and not the second.
+ self.tap.write("# comment\n")
+ self.tap.write("ok\n")
+ self.tap.write("ok\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ 'test test 1',
+ 'success test 1 [',
+ '# comment',
+ ']',
+ 'test test 2',
+ 'success test 2',
+ ],
+ self.subunit.getvalue().splitlines())
+
+ def test_trailing_comments_are_included_in_last_test_log(self):
+ # A file
+ # ok foo
+ # ok foo
+ # # comment
+ # results in a two tests, with the second having the comment
+ # attached to its log.
+ self.tap.write("ok\n")
+ self.tap.write("ok\n")
+ self.tap.write("# comment\n")
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.assertEqual([
+ 'test test 1',
+ 'success test 1',
+ 'test test 2',
+ 'success test 2 [',
+ '# comment',
+ ']',
+ ],
+ self.subunit.getvalue().splitlines())
+
+
+def test_suite():
+ loader = subunit.tests.TestUtil.TestLoader()
+ result = loader.loadTestsFromName(__name__)
+ return result
diff --git a/lib/subunit/python/subunit/tests/test_test_protocol.py b/lib/subunit/python/subunit/tests/test_test_protocol.py
index af31584a97..e1287b6c81 100644
--- a/lib/subunit/python/subunit/tests/test_test_protocol.py
+++ b/lib/subunit/python/subunit/tests/test_test_protocol.py
@@ -1,126 +1,80 @@
#
-# subunit: extensions to python unittest to get test results from subprocesses.
+# subunit: extensions to Python unittest to get test results from subprocesses.
# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
#
+import datetime
import unittest
from StringIO import StringIO
import os
-import subunit
import sys
-try:
- class MockTestProtocolServerClient(object):
- """A mock protocol server client to test callbacks."""
-
- def __init__(self):
- self.end_calls = []
- self.error_calls = []
- self.failure_calls = []
- self.start_calls = []
- self.success_calls = []
- super(MockTestProtocolServerClient, self).__init__()
-
- def addError(self, test, error):
- self.error_calls.append((test, error))
-
- def addFailure(self, test, error):
- self.failure_calls.append((test, error))
-
- def addSuccess(self, test):
- self.success_calls.append(test)
-
- def stopTest(self, test):
- self.end_calls.append(test)
-
- def startTest(self, test):
- self.start_calls.append(test)
-
-except AttributeError:
- MockTestProtocolServer = None
+from testtools.content import Content, TracebackContent
+from testtools.content_type import ContentType
+from testtools.tests.helpers import (
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ )
-
-class TestMockTestProtocolServer(unittest.TestCase):
-
- def test_start_test(self):
- protocol = MockTestProtocolServerClient()
- protocol.startTest(subunit.RemotedTestCase("test old mcdonald"))
- self.assertEqual(protocol.start_calls,
- [subunit.RemotedTestCase("test old mcdonald")])
- self.assertEqual(protocol.end_calls, [])
- self.assertEqual(protocol.error_calls, [])
- self.assertEqual(protocol.failure_calls, [])
- self.assertEqual(protocol.success_calls, [])
-
- def test_add_error(self):
- protocol = MockTestProtocolServerClient()
- protocol.addError(subunit.RemotedTestCase("old mcdonald"),
- subunit.RemoteError("omg it works"))
- self.assertEqual(protocol.start_calls, [])
- self.assertEqual(protocol.end_calls, [])
- self.assertEqual(protocol.error_calls, [(
- subunit.RemotedTestCase("old mcdonald"),
- subunit.RemoteError("omg it works"))])
- self.assertEqual(protocol.failure_calls, [])
- self.assertEqual(protocol.success_calls, [])
-
- def test_add_failure(self):
- protocol = MockTestProtocolServerClient()
- protocol.addFailure(subunit.RemotedTestCase("old mcdonald"),
- subunit.RemoteError("omg it works"))
- self.assertEqual(protocol.start_calls, [])
- self.assertEqual(protocol.end_calls, [])
- self.assertEqual(protocol.error_calls, [])
- self.assertEqual(protocol.failure_calls, [
- (subunit.RemotedTestCase("old mcdonald"),
- subunit.RemoteError("omg it works"))])
- self.assertEqual(protocol.success_calls, [])
-
- def test_add_success(self):
- protocol = MockTestProtocolServerClient()
- protocol.addSuccess(subunit.RemotedTestCase("test old mcdonald"))
- self.assertEqual(protocol.start_calls, [])
- self.assertEqual(protocol.end_calls, [])
- self.assertEqual(protocol.error_calls, [])
- self.assertEqual(protocol.failure_calls, [])
- self.assertEqual(protocol.success_calls,
- [subunit.RemotedTestCase("test old mcdonald")])
-
- def test_end_test(self):
- protocol = MockTestProtocolServerClient()
- protocol.stopTest(subunit.RemotedTestCase("test old mcdonald"))
- self.assertEqual(protocol.end_calls,
- [subunit.RemotedTestCase("test old mcdonald")])
- self.assertEqual(protocol.error_calls, [])
- self.assertEqual(protocol.failure_calls, [])
- self.assertEqual(protocol.success_calls, [])
- self.assertEqual(protocol.start_calls, [])
+import subunit
+from subunit import _remote_exception_str
+import subunit.iso8601 as iso8601
class TestTestImports(unittest.TestCase):
def test_imports(self):
+ from subunit import DiscardStream
from subunit import TestProtocolServer
from subunit import RemotedTestCase
from subunit import RemoteError
from subunit import ExecTestCase
from subunit import IsolatedTestCase
from subunit import TestProtocolClient
+ from subunit import ProtocolTestCase
+
+
+class TestDiscardStream(unittest.TestCase):
+
+ def test_write(self):
+ subunit.DiscardStream().write("content")
+
+
+class TestProtocolServerForward(unittest.TestCase):
+
+ def test_story(self):
+ client = unittest.TestResult()
+ out = StringIO()
+ protocol = subunit.TestProtocolServer(client, forward_stream=out)
+ pipe = StringIO("test old mcdonald\n"
+ "success old mcdonald\n")
+ protocol.readFrom(pipe)
+ mcdonald = subunit.RemotedTestCase("old mcdonald")
+ self.assertEqual(client.testsRun, 1)
+ self.assertEqual(pipe.getvalue(), out.getvalue())
+ def test_not_command(self):
+ client = unittest.TestResult()
+ out = StringIO()
+ protocol = subunit.TestProtocolServer(client,
+ stream=subunit.DiscardStream(), forward_stream=out)
+ pipe = StringIO("success old mcdonald\n")
+ protocol.readFrom(pipe)
+ self.assertEqual(client.testsRun, 0)
+ self.assertEqual("", out.getvalue())
+
class TestTestProtocolServerPipe(unittest.TestCase):
@@ -140,47 +94,55 @@ class TestTestProtocolServerPipe(unittest.TestCase):
bing = subunit.RemotedTestCase("bing crosby")
an_error = subunit.RemotedTestCase("an error")
self.assertEqual(client.errors,
- [(an_error, 'RemoteException: \n\n')])
+ [(an_error, _remote_exception_str + '\n')])
self.assertEqual(
client.failures,
- [(bing, "RemoteException: foo.c:53:ERROR invalid state\n\n")])
+ [(bing, _remote_exception_str + ": Text attachment: traceback\n"
+ "------------\nfoo.c:53:ERROR invalid state\n"
+ "------------\n\n")])
self.assertEqual(client.testsRun, 3)
+ def test_non_test_characters_forwarded_immediately(self):
+ pass
+
class TestTestProtocolServerStartTest(unittest.TestCase):
def setUp(self):
- self.client = MockTestProtocolServerClient()
+ self.client = Python26TestResult()
self.protocol = subunit.TestProtocolServer(self.client)
def test_start_test(self):
self.protocol.lineReceived("test old mcdonald\n")
- self.assertEqual(self.client.start_calls,
- [subunit.RemotedTestCase("old mcdonald")])
+ self.assertEqual(self.client._events,
+ [('startTest', subunit.RemotedTestCase("old mcdonald"))])
def test_start_testing(self):
self.protocol.lineReceived("testing old mcdonald\n")
- self.assertEqual(self.client.start_calls,
- [subunit.RemotedTestCase("old mcdonald")])
+ self.assertEqual(self.client._events,
+ [('startTest', subunit.RemotedTestCase("old mcdonald"))])
def test_start_test_colon(self):
self.protocol.lineReceived("test: old mcdonald\n")
- self.assertEqual(self.client.start_calls,
- [subunit.RemotedTestCase("old mcdonald")])
+ self.assertEqual(self.client._events,
+ [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+ def test_indented_test_colon_ignored(self):
+ self.protocol.lineReceived(" test: old mcdonald\n")
+ self.assertEqual([], self.client._events)
def test_start_testing_colon(self):
self.protocol.lineReceived("testing: old mcdonald\n")
- self.assertEqual(self.client.start_calls,
- [subunit.RemotedTestCase("old mcdonald")])
+ self.assertEqual(self.client._events,
+ [('startTest', subunit.RemotedTestCase("old mcdonald"))])
class TestTestProtocolServerPassThrough(unittest.TestCase):
def setUp(self):
- from StringIO import StringIO
self.stdout = StringIO()
self.test = subunit.RemotedTestCase("old mcdonald")
- self.client = MockTestProtocolServerClient()
+ self.client = ExtendedTestResult()
self.protocol = subunit.TestProtocolServer(self.client, self.stdout)
def keywords_before_test(self):
@@ -205,42 +167,37 @@ class TestTestProtocolServerPassThrough(unittest.TestCase):
def test_keywords_before_test(self):
self.keywords_before_test()
- self.assertEqual(self.client.start_calls, [])
- self.assertEqual(self.client.error_calls, [])
- self.assertEqual(self.client.failure_calls, [])
- self.assertEqual(self.client.success_calls, [])
+ self.assertEqual(self.client._events, [])
def test_keywords_after_error(self):
self.protocol.lineReceived("test old mcdonald\n")
self.protocol.lineReceived("error old mcdonald\n")
self.keywords_before_test()
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls,
- [(self.test, subunit.RemoteError(""))])
- self.assertEqual(self.client.failure_calls, [])
- self.assertEqual(self.client.success_calls, [])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, {}),
+ ('stopTest', self.test),
+ ], self.client._events)
def test_keywords_after_failure(self):
self.protocol.lineReceived("test old mcdonald\n")
self.protocol.lineReceived("failure old mcdonald\n")
self.keywords_before_test()
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [])
- self.assertEqual(self.client.failure_calls,
- [(self.test, subunit.RemoteError())])
- self.assertEqual(self.client.success_calls, [])
+ self.assertEqual(self.client._events, [
+ ('startTest', self.test),
+ ('addFailure', self.test, {}),
+ ('stopTest', self.test),
+ ])
def test_keywords_after_success(self):
self.protocol.lineReceived("test old mcdonald\n")
self.protocol.lineReceived("success old mcdonald\n")
self.keywords_before_test()
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [])
- self.assertEqual(self.client.failure_calls, [])
- self.assertEqual(self.client.success_calls, [self.test])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
def test_keywords_after_test(self):
self.protocol.lineReceived("test old mcdonald\n")
@@ -265,14 +222,15 @@ class TestTestProtocolServerPassThrough(unittest.TestCase):
"successful a\n"
"successful: a\n"
"]\n")
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.failure_calls,
- [(self.test, subunit.RemoteError())])
- self.assertEqual(self.client.error_calls, [])
- self.assertEqual(self.client.success_calls, [])
+ self.assertEqual(self.client._events, [
+ ('startTest', self.test),
+ ('addFailure', self.test, {}),
+ ('stopTest', self.test),
+ ])
def test_keywords_during_failure(self):
+ # A smoke test to make sure that the details parsers have control
+ # appropriately.
self.protocol.lineReceived("test old mcdonald\n")
self.protocol.lineReceived("failure: old mcdonald [\n")
self.protocol.lineReceived("test old mcdonald\n")
@@ -287,21 +245,25 @@ class TestTestProtocolServerPassThrough(unittest.TestCase):
self.protocol.lineReceived(" ]\n")
self.protocol.lineReceived("]\n")
self.assertEqual(self.stdout.getvalue(), "")
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.failure_calls,
- [(self.test, subunit.RemoteError("test old mcdonald\n"
- "failure a\n"
- "failure: a\n"
- "error a\n"
- "error: a\n"
- "success a\n"
- "success: a\n"
- "successful a\n"
- "successful: a\n"
- "]\n"))])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [])
- self.assertEqual(self.client.success_calls, [])
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}),
+ lambda:[
+ "test old mcdonald\n"
+ "failure a\n"
+ "failure: a\n"
+ "error a\n"
+ "error: a\n"
+ "success a\n"
+ "success: a\n"
+ "successful a\n"
+ "successful: a\n"
+ "]\n"])
+ self.assertEqual(self.client._events, [
+ ('startTest', self.test),
+ ('addFailure', self.test, details),
+ ('stopTest', self.test),
+ ])
def test_stdout_passthrough(self):
"""Lines received which cannot be interpreted as any protocol action
@@ -315,103 +277,133 @@ class TestTestProtocolServerPassThrough(unittest.TestCase):
class TestTestProtocolServerLostConnection(unittest.TestCase):
def setUp(self):
- self.client = MockTestProtocolServerClient()
+ self.client = Python26TestResult()
self.protocol = subunit.TestProtocolServer(self.client)
self.test = subunit.RemotedTestCase("old mcdonald")
def test_lost_connection_no_input(self):
self.protocol.lostConnection()
- self.assertEqual(self.client.start_calls, [])
- self.assertEqual(self.client.error_calls, [])
- self.assertEqual(self.client.failure_calls, [])
- self.assertEqual(self.client.success_calls, [])
+ self.assertEqual([], self.client._events)
def test_lost_connection_after_start(self):
self.protocol.lineReceived("test old mcdonald\n")
self.protocol.lostConnection()
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [
- (self.test, subunit.RemoteError("lost connection during "
- "test 'old mcdonald'"))])
- self.assertEqual(self.client.failure_calls, [])
- self.assertEqual(self.client.success_calls, [])
+ failure = subunit.RemoteError(
+ u"lost connection during test 'old mcdonald'")
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, failure),
+ ('stopTest', self.test),
+ ], self.client._events)
def test_lost_connected_after_error(self):
self.protocol.lineReceived("test old mcdonald\n")
self.protocol.lineReceived("error old mcdonald\n")
self.protocol.lostConnection()
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.failure_calls, [])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [
- (self.test, subunit.RemoteError(""))])
- self.assertEqual(self.client.success_calls, [])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, subunit.RemoteError(u"")),
+ ('stopTest', self.test),
+ ], self.client._events)
- def test_lost_connection_during_error(self):
+ def do_connection_lost(self, outcome, opening):
self.protocol.lineReceived("test old mcdonald\n")
- self.protocol.lineReceived("error old mcdonald [\n")
+ self.protocol.lineReceived("%s old mcdonald %s" % (outcome, opening))
self.protocol.lostConnection()
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [
- (self.test, subunit.RemoteError("lost connection during error "
- "report of test 'old mcdonald'"))])
- self.assertEqual(self.client.failure_calls, [])
- self.assertEqual(self.client.success_calls, [])
+ failure = subunit.RemoteError(
+ u"lost connection during %s report of test 'old mcdonald'" %
+ outcome)
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, failure),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_lost_connection_during_error(self):
+ self.do_connection_lost("error", "[\n")
+
+ def test_lost_connection_during_error_details(self):
+ self.do_connection_lost("error", "[ multipart\n")
def test_lost_connected_after_failure(self):
self.protocol.lineReceived("test old mcdonald\n")
self.protocol.lineReceived("failure old mcdonald\n")
self.protocol.lostConnection()
- test = subunit.RemotedTestCase("old mcdonald")
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [])
- self.assertEqual(self.client.failure_calls,
- [(self.test, subunit.RemoteError())])
- self.assertEqual(self.client.success_calls, [])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addFailure', self.test, subunit.RemoteError(u"")),
+ ('stopTest', self.test),
+ ], self.client._events)
def test_lost_connection_during_failure(self):
- self.protocol.lineReceived("test old mcdonald\n")
- self.protocol.lineReceived("failure old mcdonald [\n")
- self.protocol.lostConnection()
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls,
- [(self.test,
- subunit.RemoteError("lost connection during "
- "failure report"
- " of test 'old mcdonald'"))])
- self.assertEqual(self.client.failure_calls, [])
- self.assertEqual(self.client.success_calls, [])
+ self.do_connection_lost("failure", "[\n")
+
+ def test_lost_connection_during_failure_details(self):
+ self.do_connection_lost("failure", "[ multipart\n")
def test_lost_connection_after_success(self):
self.protocol.lineReceived("test old mcdonald\n")
self.protocol.lineReceived("success old mcdonald\n")
self.protocol.lostConnection()
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [])
- self.assertEqual(self.client.failure_calls, [])
- self.assertEqual(self.client.success_calls, [self.test])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_lost_connection_during_success(self):
+ self.do_connection_lost("success", "[\n")
+
+ def test_lost_connection_during_success_details(self):
+ self.do_connection_lost("success", "[ multipart\n")
+
+ def test_lost_connection_during_skip(self):
+ self.do_connection_lost("skip", "[\n")
+
+ def test_lost_connection_during_skip_details(self):
+ self.do_connection_lost("skip", "[ multipart\n")
+
+ def test_lost_connection_during_xfail(self):
+ self.do_connection_lost("xfail", "[\n")
+
+ def test_lost_connection_during_xfail_details(self):
+ self.do_connection_lost("xfail", "[ multipart\n")
+
+
+class TestInTestMultipart(unittest.TestCase):
+
+ def setUp(self):
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived("test mcdonalds farm\n")
+ self.test = subunit.RemotedTestCase("mcdonalds farm")
+
+ def test__outcome_sets_details_parser(self):
+ self.protocol._reading_success_details.details_parser = None
+ self.protocol._state._outcome(0, "mcdonalds farm [ multipart\n",
+ None, self.protocol._reading_success_details)
+ parser = self.protocol._reading_success_details.details_parser
+ self.assertNotEqual(None, parser)
+ self.assertTrue(isinstance(parser,
+ subunit.details.MultipartDetailsParser))
class TestTestProtocolServerAddError(unittest.TestCase):
def setUp(self):
- self.client = MockTestProtocolServerClient()
+ self.client = ExtendedTestResult()
self.protocol = subunit.TestProtocolServer(self.client)
self.protocol.lineReceived("test mcdonalds farm\n")
self.test = subunit.RemotedTestCase("mcdonalds farm")
def simple_error_keyword(self, keyword):
self.protocol.lineReceived("%s mcdonalds farm\n" % keyword)
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [
- (self.test, subunit.RemoteError(""))])
- self.assertEqual(self.client.failure_calls, [])
+ details = {}
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
def test_simple_error(self):
self.simple_error_keyword("error")
@@ -422,21 +414,27 @@ class TestTestProtocolServerAddError(unittest.TestCase):
def test_error_empty_message(self):
self.protocol.lineReceived("error mcdonalds farm [\n")
self.protocol.lineReceived("]\n")
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [
- (self.test, subunit.RemoteError(""))])
- self.assertEqual(self.client.failure_calls, [])
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:[""])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
def error_quoted_bracket(self, keyword):
self.protocol.lineReceived("%s mcdonalds farm [\n" % keyword)
self.protocol.lineReceived(" ]\n")
self.protocol.lineReceived("]\n")
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [
- (self.test, subunit.RemoteError("]\n"))])
- self.assertEqual(self.client.failure_calls, [])
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:["]\n"])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
def test_error_quoted_bracket(self):
self.error_quoted_bracket("error")
@@ -448,18 +446,22 @@ class TestTestProtocolServerAddError(unittest.TestCase):
class TestTestProtocolServerAddFailure(unittest.TestCase):
def setUp(self):
- self.client = MockTestProtocolServerClient()
+ self.client = ExtendedTestResult()
self.protocol = subunit.TestProtocolServer(self.client)
self.protocol.lineReceived("test mcdonalds farm\n")
self.test = subunit.RemotedTestCase("mcdonalds farm")
+ def assertFailure(self, details):
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addFailure', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
def simple_failure_keyword(self, keyword):
self.protocol.lineReceived("%s mcdonalds farm\n" % keyword)
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [])
- self.assertEqual(self.client.failure_calls,
- [(self.test, subunit.RemoteError())])
+ details = {}
+ self.assertFailure(details)
def test_simple_failure(self):
self.simple_failure_keyword("failure")
@@ -470,21 +472,19 @@ class TestTestProtocolServerAddFailure(unittest.TestCase):
def test_failure_empty_message(self):
self.protocol.lineReceived("failure mcdonalds farm [\n")
self.protocol.lineReceived("]\n")
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [])
- self.assertEqual(self.client.failure_calls,
- [(self.test, subunit.RemoteError())])
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:[""])
+ self.assertFailure(details)
def failure_quoted_bracket(self, keyword):
self.protocol.lineReceived("%s mcdonalds farm [\n" % keyword)
self.protocol.lineReceived(" ]\n")
self.protocol.lineReceived("]\n")
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [])
- self.assertEqual(self.client.failure_calls,
- [(self.test, subunit.RemoteError("]\n"))])
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:["]\n"])
+ self.assertFailure(details)
def test_failure_quoted_bracket(self):
self.failure_quoted_bracket("failure")
@@ -493,20 +493,192 @@ class TestTestProtocolServerAddFailure(unittest.TestCase):
self.failure_quoted_bracket("failure:")
+class TestTestProtocolServerAddxFail(unittest.TestCase):
+ """Tests for the xfail keyword.
+
+ In Python this can thunk through to Success due to stdlib limitations (see
+ README).
+ """
+
+ def capture_expected_failure(self, test, err):
+ self._events.append((test, err))
+
+ def setup_python26(self):
+ """Setup a test object ready to be xfailed and thunk to success."""
+ self.client = Python26TestResult()
+ self.setup_protocol()
+
+ def setup_python27(self):
+ """Setup a test object ready to be xfailed."""
+ self.client = Python27TestResult()
+ self.setup_protocol()
+
+ def setup_python_ex(self):
+ """Setup a test object ready to be xfailed with details."""
+ self.client = ExtendedTestResult()
+ self.setup_protocol()
+
+ def setup_protocol(self):
+ """Setup the protocol based on self.client."""
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived("test mcdonalds farm\n")
+ self.test = self.client._events[-1][-1]
+
+ def simple_xfail_keyword(self, keyword, as_success):
+ self.protocol.lineReceived("%s mcdonalds farm\n" % keyword)
+ self.check_success_or_xfail(as_success)
+
+ def check_success_or_xfail(self, as_success, error_message=None):
+ if as_success:
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+ else:
+ details = {}
+ if error_message is not None:
+ details['traceback'] = Content(
+ ContentType("text", "x-traceback", {'charset': 'utf8'}),
+ lambda:[error_message])
+ if isinstance(self.client, ExtendedTestResult):
+ value = details
+ else:
+ if error_message is not None:
+ value = subunit.RemoteError(u'Text attachment: traceback\n'
+ '------------\n' + error_message + '------------\n')
+ else:
+ value = subunit.RemoteError()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addExpectedFailure', self.test, value),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_simple_xfail(self):
+ self.setup_python26()
+ self.simple_xfail_keyword("xfail", True)
+ self.setup_python27()
+ self.simple_xfail_keyword("xfail", False)
+ self.setup_python_ex()
+ self.simple_xfail_keyword("xfail", False)
+
+ def test_simple_xfail_colon(self):
+ self.setup_python26()
+ self.simple_xfail_keyword("xfail:", True)
+ self.setup_python27()
+ self.simple_xfail_keyword("xfail:", False)
+ self.setup_python_ex()
+ self.simple_xfail_keyword("xfail:", False)
+
+ def test_xfail_empty_message(self):
+ self.setup_python26()
+ self.empty_message(True)
+ self.setup_python27()
+ self.empty_message(False)
+ self.setup_python_ex()
+ self.empty_message(False, error_message="")
+
+ def empty_message(self, as_success, error_message="\n"):
+ self.protocol.lineReceived("xfail mcdonalds farm [\n")
+ self.protocol.lineReceived("]\n")
+ self.check_success_or_xfail(as_success, error_message)
+
+ def xfail_quoted_bracket(self, keyword, as_success):
+ # This tests it is accepted, but cannot test it is used today, because
+ # of not having a way to expose it in Python so far.
+ self.protocol.lineReceived("%s mcdonalds farm [\n" % keyword)
+ self.protocol.lineReceived(" ]\n")
+ self.protocol.lineReceived("]\n")
+ self.check_success_or_xfail(as_success, "]\n")
+
+ def test_xfail_quoted_bracket(self):
+ self.setup_python26()
+ self.xfail_quoted_bracket("xfail", True)
+ self.setup_python27()
+ self.xfail_quoted_bracket("xfail", False)
+ self.setup_python_ex()
+ self.xfail_quoted_bracket("xfail", False)
+
+ def test_xfail_colon_quoted_bracket(self):
+ self.setup_python26()
+ self.xfail_quoted_bracket("xfail:", True)
+ self.setup_python27()
+ self.xfail_quoted_bracket("xfail:", False)
+ self.setup_python_ex()
+ self.xfail_quoted_bracket("xfail:", False)
+
+
+class TestTestProtocolServerAddSkip(unittest.TestCase):
+ """Tests for the skip keyword.
+
+ In Python this meets the testtools extended TestResult contract.
+ (See https://launchpad.net/testtools).
+ """
+
+ def setUp(self):
+ """Setup a test object ready to be skipped."""
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived("test mcdonalds farm\n")
+ self.test = self.client._events[-1][-1]
+
+ def assertSkip(self, reason):
+ details = {}
+ if reason is not None:
+ details['reason'] = Content(
+ ContentType("text", "plain"), lambda:[reason])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSkip', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def simple_skip_keyword(self, keyword):
+ self.protocol.lineReceived("%s mcdonalds farm\n" % keyword)
+ self.assertSkip(None)
+
+ def test_simple_skip(self):
+ self.simple_skip_keyword("skip")
+
+ def test_simple_skip_colon(self):
+ self.simple_skip_keyword("skip:")
+
+ def test_skip_empty_message(self):
+ self.protocol.lineReceived("skip mcdonalds farm [\n")
+ self.protocol.lineReceived("]\n")
+ self.assertSkip("")
+
+ def skip_quoted_bracket(self, keyword):
+ # This tests it is accepted, but cannot test it is used today, because
+ # of not having a way to expose it in Python so far.
+ self.protocol.lineReceived("%s mcdonalds farm [\n" % keyword)
+ self.protocol.lineReceived(" ]\n")
+ self.protocol.lineReceived("]\n")
+ self.assertSkip("]\n")
+
+ def test_skip_quoted_bracket(self):
+ self.skip_quoted_bracket("skip")
+
+ def test_skip_colon_quoted_bracket(self):
+ self.skip_quoted_bracket("skip:")
+
+
class TestTestProtocolServerAddSuccess(unittest.TestCase):
def setUp(self):
- self.client = MockTestProtocolServerClient()
+ self.client = ExtendedTestResult()
self.protocol = subunit.TestProtocolServer(self.client)
self.protocol.lineReceived("test mcdonalds farm\n")
self.test = subunit.RemotedTestCase("mcdonalds farm")
def simple_success_keyword(self, keyword):
self.protocol.lineReceived("%s mcdonalds farm\n" % keyword)
- self.assertEqual(self.client.start_calls, [self.test])
- self.assertEqual(self.client.end_calls, [self.test])
- self.assertEqual(self.client.error_calls, [])
- self.assertEqual(self.client.success_calls, [self.test])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
def test_simple_success(self):
self.simple_success_keyword("failure")
@@ -520,6 +692,134 @@ class TestTestProtocolServerAddSuccess(unittest.TestCase):
def test_simple_success_colon(self):
self.simple_success_keyword("successful:")
+ def assertSuccess(self, details):
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_success_empty_message(self):
+ self.protocol.lineReceived("success mcdonalds farm [\n")
+ self.protocol.lineReceived("]\n")
+ details = {}
+ details['message'] = Content(ContentType("text", "plain"),
+ lambda:[""])
+ self.assertSuccess(details)
+
+ def success_quoted_bracket(self, keyword):
+ # This tests it is accepted, but cannot test it is used today, because
+ # of not having a way to expose it in Python so far.
+ self.protocol.lineReceived("%s mcdonalds farm [\n" % keyword)
+ self.protocol.lineReceived(" ]\n")
+ self.protocol.lineReceived("]\n")
+ details = {}
+ details['message'] = Content(ContentType("text", "plain"),
+ lambda:["]\n"])
+ self.assertSuccess(details)
+
+ def test_success_quoted_bracket(self):
+ self.success_quoted_bracket("success")
+
+ def test_success_colon_quoted_bracket(self):
+ self.success_quoted_bracket("success:")
+
+
+class TestTestProtocolServerProgress(unittest.TestCase):
+ """Test receipt of progress: directives."""
+
+ def test_progress_accepted_stdlib(self):
+ self.result = Python26TestResult()
+ self.stream = StringIO()
+ self.protocol = subunit.TestProtocolServer(self.result,
+ stream=self.stream)
+ self.protocol.lineReceived("progress: 23")
+ self.protocol.lineReceived("progress: -2")
+ self.protocol.lineReceived("progress: +4")
+ self.assertEqual("", self.stream.getvalue())
+
+ def test_progress_accepted_extended(self):
+ # With a progress capable TestResult, progress events are emitted.
+ self.result = ExtendedTestResult()
+ self.stream = StringIO()
+ self.protocol = subunit.TestProtocolServer(self.result,
+ stream=self.stream)
+ self.protocol.lineReceived("progress: 23")
+ self.protocol.lineReceived("progress: push")
+ self.protocol.lineReceived("progress: -2")
+ self.protocol.lineReceived("progress: pop")
+ self.protocol.lineReceived("progress: +4")
+ self.assertEqual("", self.stream.getvalue())
+ self.assertEqual([
+ ('progress', 23, subunit.PROGRESS_SET),
+ ('progress', None, subunit.PROGRESS_PUSH),
+ ('progress', -2, subunit.PROGRESS_CUR),
+ ('progress', None, subunit.PROGRESS_POP),
+ ('progress', 4, subunit.PROGRESS_CUR),
+ ], self.result._events)
+
+
+class TestTestProtocolServerStreamTags(unittest.TestCase):
+ """Test managing tags on the protocol level."""
+
+ def setUp(self):
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+
+ def test_initial_tags(self):
+ self.protocol.lineReceived("tags: foo bar:baz quux\n")
+ self.assertEqual([
+ ('tags', set(["foo", "bar:baz", "quux"]), set()),
+ ], self.client._events)
+
+ def test_minus_removes_tags(self):
+ self.protocol.lineReceived("tags: -bar quux\n")
+ self.assertEqual([
+ ('tags', set(["quux"]), set(["bar"])),
+ ], self.client._events)
+
+ def test_tags_do_not_get_set_on_test(self):
+ self.protocol.lineReceived("test mcdonalds farm\n")
+ test = self.client._events[0][-1]
+ self.assertEqual(None, getattr(test, 'tags', None))
+
+ def test_tags_do_not_get_set_on_global_tags(self):
+ self.protocol.lineReceived("tags: foo bar\n")
+ self.protocol.lineReceived("test mcdonalds farm\n")
+ test = self.client._events[-1][-1]
+ self.assertEqual(None, getattr(test, 'tags', None))
+
+ def test_tags_get_set_on_test_tags(self):
+ self.protocol.lineReceived("test mcdonalds farm\n")
+ test = self.client._events[-1][-1]
+ self.protocol.lineReceived("tags: foo bar\n")
+ self.protocol.lineReceived("success mcdonalds farm\n")
+ self.assertEqual(None, getattr(test, 'tags', None))
+
+
+class TestTestProtocolServerStreamTime(unittest.TestCase):
+ """Test managing time information at the protocol level."""
+
+ def test_time_accepted_stdlib(self):
+ self.result = Python26TestResult()
+ self.stream = StringIO()
+ self.protocol = subunit.TestProtocolServer(self.result,
+ stream=self.stream)
+ self.protocol.lineReceived("time: 2001-12-12 12:59:59Z\n")
+ self.assertEqual("", self.stream.getvalue())
+
+ def test_time_accepted_extended(self):
+ self.result = ExtendedTestResult()
+ self.stream = StringIO()
+ self.protocol = subunit.TestProtocolServer(self.result,
+ stream=self.stream)
+ self.protocol.lineReceived("time: 2001-12-12 12:59:59Z\n")
+ self.assertEqual("", self.stream.getvalue())
+ self.assertEqual([
+ ('time', datetime.datetime(2001, 12, 12, 12, 59, 59, 0,
+ iso8601.Utc()))
+ ], self.result._events)
+
class TestRemotedTestCase(unittest.TestCase):
@@ -529,14 +829,14 @@ class TestRemotedTestCase(unittest.TestCase):
self.assertRaises(NotImplementedError, test.tearDown)
self.assertEqual("A test description",
test.shortDescription())
- self.assertEqual("subunit.RemotedTestCase.A test description",
+ self.assertEqual("A test description",
test.id())
self.assertEqual("A test description (subunit.RemotedTestCase)", "%s" % test)
self.assertEqual("<subunit.RemotedTestCase description="
"'A test description'>", "%r" % test)
result = unittest.TestResult()
test.run(result)
- self.assertEqual([(test, "RemoteException: "
+ self.assertEqual([(test, _remote_exception_str + ": "
"Cannot run RemotedTestCases.\n\n")],
result.errors)
self.assertEqual(1, result.testsRun)
@@ -550,15 +850,15 @@ class TestRemotedTestCase(unittest.TestCase):
class TestRemoteError(unittest.TestCase):
def test_eq(self):
- error = subunit.RemoteError("Something went wrong")
- another_error = subunit.RemoteError("Something went wrong")
- different_error = subunit.RemoteError("boo!")
+ error = subunit.RemoteError(u"Something went wrong")
+ another_error = subunit.RemoteError(u"Something went wrong")
+ different_error = subunit.RemoteError(u"boo!")
self.assertEqual(error, another_error)
self.assertNotEqual(error, different_error)
self.assertNotEqual(different_error, another_error)
def test_empty_constructor(self):
- self.assertEqual(subunit.RemoteError(), subunit.RemoteError(""))
+ self.assertEqual(subunit.RemoteError(), subunit.RemoteError(u""))
class TestExecTestCase(unittest.TestCase):
@@ -570,27 +870,43 @@ class TestExecTestCase(unittest.TestCase):
# the sample script runs three tests, one each
# that fails, errors and succeeds
+ def test_sample_method_args(self):
+ """sample-script.py foo"""
+ # sample that will run just one test.
def test_construct(self):
test = self.SampleExecTestCase("test_sample_method")
self.assertEqual(test.script,
subunit.join_dir(__file__, 'sample-script.py'))
+ def test_args(self):
+ result = unittest.TestResult()
+ test = self.SampleExecTestCase("test_sample_method_args")
+ test.run(result)
+ self.assertEqual(1, result.testsRun)
+
def test_run(self):
- runner = MockTestProtocolServerClient()
+ result = ExtendedTestResult()
test = self.SampleExecTestCase("test_sample_method")
- test.run(runner)
+ test.run(result)
mcdonald = subunit.RemotedTestCase("old mcdonald")
bing = subunit.RemotedTestCase("bing crosby")
+ bing_details = {}
+ bing_details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:["foo.c:53:ERROR invalid state\n"])
an_error = subunit.RemotedTestCase("an error")
- self.assertEqual(runner.error_calls,
- [(an_error, subunit.RemoteError())])
- self.assertEqual(runner.failure_calls,
- [(bing,
- subunit.RemoteError(
- "foo.c:53:ERROR invalid state\n"))])
- self.assertEqual(runner.start_calls, [mcdonald, bing, an_error])
- self.assertEqual(runner.end_calls, [mcdonald, bing, an_error])
+ error_details = {}
+ self.assertEqual([
+ ('startTest', mcdonald),
+ ('addSuccess', mcdonald),
+ ('stopTest', mcdonald),
+ ('startTest', bing),
+ ('addFailure', bing, bing_details),
+ ('stopTest', bing),
+ ('startTest', an_error),
+ ('addError', an_error, error_details),
+ ('stopTest', an_error),
+ ], result._events)
def test_debug(self):
test = self.SampleExecTestCase("test_sample_method")
@@ -689,7 +1005,11 @@ class TestTestProtocolClient(unittest.TestCase):
self.io = StringIO()
self.protocol = subunit.TestProtocolClient(self.io)
self.test = TestTestProtocolClient("test_start_test")
-
+ self.sample_details = {'something':Content(
+ ContentType('text', 'plain'), lambda:['serialised\nform'])}
+ self.sample_tb_details = dict(self.sample_details)
+ self.sample_tb_details['traceback'] = TracebackContent(
+ subunit.RemoteError(u"boo qux"), self.test)
def test_start_test(self):
"""Test startTest on a TestProtocolClient."""
@@ -697,7 +1017,7 @@ class TestTestProtocolClient(unittest.TestCase):
self.assertEqual(self.io.getvalue(), "test: %s\n" % self.test.id())
def test_stop_test(self):
- """Test stopTest on a TestProtocolClient."""
+ # stopTest doesn't output anything.
self.protocol.stopTest(self.test)
self.assertEqual(self.io.getvalue(), "")
@@ -707,22 +1027,154 @@ class TestTestProtocolClient(unittest.TestCase):
self.assertEqual(
self.io.getvalue(), "successful: %s\n" % self.test.id())
+ def test_add_success_details(self):
+ """Test addSuccess on a TestProtocolClient with details."""
+ self.protocol.addSuccess(self.test, details=self.sample_details)
+ self.assertEqual(
+ self.io.getvalue(), "successful: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n]\n" % self.test.id())
+
def test_add_failure(self):
"""Test addFailure on a TestProtocolClient."""
- self.protocol.addFailure(self.test, subunit.RemoteError("boo"))
+ self.protocol.addFailure(
+ self.test, subunit.RemoteError(u"boo qux"))
+ self.assertEqual(
+ self.io.getvalue(),
+ ('failure: %s [\n' + _remote_exception_str + ': boo qux\n]\n')
+ % self.test.id())
+
+ def test_add_failure_details(self):
+ """Test addFailure on a TestProtocolClient with details."""
+ self.protocol.addFailure(
+ self.test, details=self.sample_tb_details)
self.assertEqual(
self.io.getvalue(),
- 'failure: %s [\nRemoteException: boo\n]\n' % self.test.id())
+ ("failure: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;charset=utf8,language=python\n"
+ "traceback\n"
+ "1A\r\n" + _remote_exception_str + ": boo qux\n0\r\n"
+ "]\n") % self.test.id())
def test_add_error(self):
"""Test stopTest on a TestProtocolClient."""
- self.protocol.addError(self.test, subunit.RemoteError("phwoar"))
+ self.protocol.addError(
+ self.test, subunit.RemoteError(u"phwoar crikey"))
+ self.assertEqual(
+ self.io.getvalue(),
+ ('error: %s [\n' +
+ _remote_exception_str + ": phwoar crikey\n"
+ "]\n") % self.test.id())
+
+ def test_add_error_details(self):
+ """Test stopTest on a TestProtocolClient with details."""
+ self.protocol.addError(
+ self.test, details=self.sample_tb_details)
+ self.assertEqual(
+ self.io.getvalue(),
+ ("error: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;charset=utf8,language=python\n"
+ "traceback\n"
+ "1A\r\n" + _remote_exception_str + ": boo qux\n0\r\n"
+ "]\n") % self.test.id())
+
+ def test_add_expected_failure(self):
+ """Test addExpectedFailure on a TestProtocolClient."""
+ self.protocol.addExpectedFailure(
+ self.test, subunit.RemoteError(u"phwoar crikey"))
self.assertEqual(
self.io.getvalue(),
- 'error: %s [\n'
- "RemoteException: phwoar\n"
+ ('xfail: %s [\n' +
+ _remote_exception_str + ": phwoar crikey\n"
+ "]\n") % self.test.id())
+
+ def test_add_expected_failure_details(self):
+ """Test addExpectedFailure on a TestProtocolClient with details."""
+ self.protocol.addExpectedFailure(
+ self.test, details=self.sample_tb_details)
+ self.assertEqual(
+ self.io.getvalue(),
+ ("xfail: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;charset=utf8,language=python\n"
+ "traceback\n"
+ "1A\r\n"+ _remote_exception_str + ": boo qux\n0\r\n"
+ "]\n") % self.test.id())
+
+ def test_add_skip(self):
+ """Test addSkip on a TestProtocolClient."""
+ self.protocol.addSkip(
+ self.test, "Has it really?")
+ self.assertEqual(
+ self.io.getvalue(),
+ 'skip: %s [\nHas it really?\n]\n' % self.test.id())
+
+ def test_add_skip_details(self):
+ """Test addSkip on a TestProtocolClient with details."""
+ details = {'reason':Content(
+ ContentType('text', 'plain'), lambda:['Has it really?'])}
+ self.protocol.addSkip(
+ self.test, details=details)
+ self.assertEqual(
+ self.io.getvalue(),
+ "skip: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "reason\n"
+ "E\r\nHas it really?0\r\n"
"]\n" % self.test.id())
+ def test_progress_set(self):
+ self.protocol.progress(23, subunit.PROGRESS_SET)
+ self.assertEqual(self.io.getvalue(), 'progress: 23\n')
+
+ def test_progress_neg_cur(self):
+ self.protocol.progress(-23, subunit.PROGRESS_CUR)
+ self.assertEqual(self.io.getvalue(), 'progress: -23\n')
+
+ def test_progress_pos_cur(self):
+ self.protocol.progress(23, subunit.PROGRESS_CUR)
+ self.assertEqual(self.io.getvalue(), 'progress: +23\n')
+
+ def test_progress_pop(self):
+ self.protocol.progress(1234, subunit.PROGRESS_POP)
+ self.assertEqual(self.io.getvalue(), 'progress: pop\n')
+
+ def test_progress_push(self):
+ self.protocol.progress(1234, subunit.PROGRESS_PUSH)
+ self.assertEqual(self.io.getvalue(), 'progress: push\n')
+
+ def test_time(self):
+ # Calling time() outputs a time signal immediately.
+ self.protocol.time(
+ datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc()))
+ self.assertEqual(
+ "time: 2009-10-11 12:13:14.000015Z\n",
+ self.io.getvalue())
+
+ def test_add_unexpected_success(self):
+ """Test addUnexpectedSuccess on a TestProtocolClient."""
+ self.protocol.addUnexpectedSuccess(self.test)
+ self.assertEqual(
+ self.io.getvalue(), "successful: %s\n" % self.test.id())
+
+ def test_add_unexpected_success_details(self):
+ """Test addUnexpectedSuccess on a TestProtocolClient with details."""
+ self.protocol.addUnexpectedSuccess(self.test, details=self.sample_details)
+ self.assertEqual(
+ self.io.getvalue(), "successful: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n]\n" % self.test.id())
+
def test_suite():
loader = subunit.tests.TestUtil.TestLoader()
diff --git a/lib/subunit/python/subunit/tests/test_test_results.py b/lib/subunit/python/subunit/tests/test_test_results.py
new file mode 100644
index 0000000000..fe82c04b06
--- /dev/null
+++ b/lib/subunit/python/subunit/tests/test_test_results.py
@@ -0,0 +1,199 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import datetime
+import unittest
+from StringIO import StringIO
+import os
+import sys
+
+from testtools.content_type import ContentType
+from testtools.content import Content
+
+import subunit
+import subunit.iso8601 as iso8601
+import subunit.test_results
+
+
+class LoggingDecorator(subunit.test_results.HookedTestResultDecorator):
+
+ def __init__(self, decorated):
+ self._calls = 0
+ super(LoggingDecorator, self).__init__(decorated)
+
+ def _before_event(self):
+ self._calls += 1
+
+
+class AssertBeforeTestResult(LoggingDecorator):
+ """A TestResult for checking preconditions."""
+
+ def __init__(self, decorated, test):
+ self.test = test
+ super(AssertBeforeTestResult, self).__init__(decorated)
+
+ def _before_event(self):
+ self.test.assertEqual(1, self.earlier._calls)
+ super(AssertBeforeTestResult, self)._before_event()
+
+
+class TimeCapturingResult(unittest.TestResult):
+
+ def __init__(self):
+ super(TimeCapturingResult, self).__init__()
+ self._calls = []
+
+ def time(self, a_datetime):
+ self._calls.append(a_datetime)
+
+
+class TestHookedTestResultDecorator(unittest.TestCase):
+
+ def setUp(self):
+ # An end to the chain
+ terminal = unittest.TestResult()
+ # Asserts that the call was made to self.result before asserter was
+ # called.
+ asserter = AssertBeforeTestResult(terminal, self)
+ # The result object we call, which much increase its call count.
+ self.result = LoggingDecorator(asserter)
+ asserter.earlier = self.result
+ self.decorated = asserter
+
+ def tearDown(self):
+ # The hook in self.result must have been called
+ self.assertEqual(1, self.result._calls)
+ # The hook in asserter must have been called too, otherwise the
+ # assertion about ordering won't have completed.
+ self.assertEqual(1, self.decorated._calls)
+
+ def test_startTest(self):
+ self.result.startTest(self)
+
+ def test_startTestRun(self):
+ self.result.startTestRun()
+
+ def test_stopTest(self):
+ self.result.stopTest(self)
+
+ def test_stopTestRun(self):
+ self.result.stopTestRun()
+
+ def test_addError(self):
+ self.result.addError(self, subunit.RemoteError())
+
+ def test_addError_details(self):
+ self.result.addError(self, details={})
+
+ def test_addFailure(self):
+ self.result.addFailure(self, subunit.RemoteError())
+
+ def test_addFailure_details(self):
+ self.result.addFailure(self, details={})
+
+ def test_addSuccess(self):
+ self.result.addSuccess(self)
+
+ def test_addSuccess_details(self):
+ self.result.addSuccess(self, details={})
+
+ def test_addSkip(self):
+ self.result.addSkip(self, "foo")
+
+ def test_addSkip_details(self):
+ self.result.addSkip(self, details={})
+
+ def test_addExpectedFailure(self):
+ self.result.addExpectedFailure(self, subunit.RemoteError())
+
+ def test_addExpectedFailure_details(self):
+ self.result.addExpectedFailure(self, details={})
+
+ def test_addUnexpectedSuccess(self):
+ self.result.addUnexpectedSuccess(self)
+
+ def test_addUnexpectedSuccess_details(self):
+ self.result.addUnexpectedSuccess(self, details={})
+
+ def test_progress(self):
+ self.result.progress(1, subunit.PROGRESS_SET)
+
+ def test_wasSuccessful(self):
+ self.result.wasSuccessful()
+
+ def test_shouldStop(self):
+ self.result.shouldStop
+
+ def test_stop(self):
+ self.result.stop()
+
+ def test_time(self):
+ self.result.time(None)
+
+
+class TestAutoTimingTestResultDecorator(unittest.TestCase):
+
+ def setUp(self):
+ # And end to the chain which captures time events.
+ terminal = TimeCapturingResult()
+ # The result object under test.
+ self.result = subunit.test_results.AutoTimingTestResultDecorator(
+ terminal)
+ self.decorated = terminal
+
+ def test_without_time_calls_time_is_called_and_not_None(self):
+ self.result.startTest(self)
+ self.assertEqual(1, len(self.decorated._calls))
+ self.assertNotEqual(None, self.decorated._calls[0])
+
+ def test_no_time_from_progress(self):
+ self.result.progress(1, subunit.PROGRESS_CUR)
+ self.assertEqual(0, len(self.decorated._calls))
+
+ def test_no_time_from_shouldStop(self):
+ self.decorated.stop()
+ self.result.shouldStop
+ self.assertEqual(0, len(self.decorated._calls))
+
+ def test_calling_time_inhibits_automatic_time(self):
+ # Calling time() outputs a time signal immediately and prevents
+ # automatically adding one when other methods are called.
+ time = datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc())
+ self.result.time(time)
+ self.result.startTest(self)
+ self.result.stopTest(self)
+ self.assertEqual(1, len(self.decorated._calls))
+ self.assertEqual(time, self.decorated._calls[0])
+
+ def test_calling_time_None_enables_automatic_time(self):
+ time = datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc())
+ self.result.time(time)
+ self.assertEqual(1, len(self.decorated._calls))
+ self.assertEqual(time, self.decorated._calls[0])
+ # Calling None passes the None through, in case other results care.
+ self.result.time(None)
+ self.assertEqual(2, len(self.decorated._calls))
+ self.assertEqual(None, self.decorated._calls[1])
+ # Calling other methods doesn't generate an automatic time event.
+ self.result.startTest(self)
+ self.assertEqual(3, len(self.decorated._calls))
+ self.assertNotEqual(None, self.decorated._calls[2])
+
+
+def test_suite():
+ loader = subunit.tests.TestUtil.TestLoader()
+ result = loader.loadTestsFromName(__name__)
+ return result
diff --git a/lib/subunit/runtests.py b/lib/subunit/runtests.py
new file mode 100755
index 0000000000..8ecc6cd3fb
--- /dev/null
+++ b/lib/subunit/runtests.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+# -*- Mode: python -*-
+#
+# Copyright (C) 2004 Canonical.com
+# Author: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+import unittest
+from subunit.tests.TestUtil import TestVisitor, TestSuite
+import subunit
+import sys
+import os
+import shutil
+import logging
+
+class ParameterisableTextTestRunner(unittest.TextTestRunner):
+ """I am a TextTestRunner whose result class is
+ parameterisable without further subclassing"""
+ def __init__(self, **args):
+ unittest.TextTestRunner.__init__(self, **args)
+ self._resultFactory=None
+ def resultFactory(self, *args):
+ """set or retrieve the result factory"""
+ if args:
+ self._resultFactory=args[0]
+ return self
+ if self._resultFactory is None:
+ self._resultFactory=unittest._TextTestResult
+ return self._resultFactory
+
+ def _makeResult(self):
+ return self.resultFactory()(self.stream, self.descriptions, self.verbosity)
+
+
+class EarlyStoppingTextTestResult(unittest._TextTestResult):
+ """I am a TextTestResult that can optionally stop at the first failure
+ or error"""
+
+ def addError(self, test, err):
+ unittest._TextTestResult.addError(self, test, err)
+ if self.stopOnError():
+ self.stop()
+
+ def addFailure(self, test, err):
+ unittest._TextTestResult.addError(self, test, err)
+ if self.stopOnFailure():
+ self.stop()
+
+ def stopOnError(self, *args):
+ """should this result indicate an abort when an error occurs?
+ TODO parameterise this"""
+ return True
+
+ def stopOnFailure(self, *args):
+ """should this result indicate an abort when a failure error occurs?
+ TODO parameterise this"""
+ return True
+
+
+def earlyStopFactory(*args, **kwargs):
+ """return a an early stopping text test result"""
+ result=EarlyStoppingTextTestResult(*args, **kwargs)
+ return result
+
+
+class ShellTests(subunit.ExecTestCase):
+
+ def test_sourcing(self):
+ """./shell/tests/test_source_library.sh"""
+
+ def test_functions(self):
+ """./shell/tests/test_function_output.sh"""
+
+
+def test_suite():
+ result = TestSuite()
+ result.addTest(subunit.test_suite())
+ result.addTest(ShellTests('test_sourcing'))
+ result.addTest(ShellTests('test_functions'))
+ return result
+
+
+class filteringVisitor(TestVisitor):
+ """I accrue all the testCases I visit that pass a regexp filter on id
+ into my suite
+ """
+
+ def __init__(self, filter):
+ import re
+ TestVisitor.__init__(self)
+ self._suite=None
+ self.filter=re.compile(filter)
+
+ def suite(self):
+ """answer the suite we are building"""
+ if self._suite is None:
+ self._suite=TestSuite()
+ return self._suite
+
+ def visitCase(self, aCase):
+ if self.filter.match(aCase.id()):
+ self.suite().addTest(aCase)
+
+
+def main(argv):
+ """To parameterise what tests are run, run this script like so:
+ python test_all.py REGEX
+ i.e.
+ python test_all.py .*Protocol.*
+ to run all tests with Protocol in their id."""
+ if len(argv) > 1:
+ pattern = argv[1]
+ else:
+ pattern = ".*"
+ visitor = filteringVisitor(pattern)
+ test_suite().visit(visitor)
+ runner = ParameterisableTextTestRunner(verbosity=2)
+ runner.resultFactory(unittest._TextTestResult)
+ if not runner.run(visitor.suite()).wasSuccessful():
+ return 1
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/lib/subunit/setup.py b/lib/subunit/setup.py
new file mode 100755
index 0000000000..2038d04826
--- /dev/null
+++ b/lib/subunit/setup.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+try:
+ # If the user has setuptools / distribute installed, use it
+ from setuptools import setup
+except ImportError:
+ # Otherwise, fall back to distutils.
+ from distutils.core import setup
+ extra = {}
+else:
+ extra = {
+ 'install_requires': [
+ 'testtools>=0.9.6',
+ ]
+ }
+
+try:
+ # Assume we are in a distribution, which has PKG-INFO
+ version_lines = [x for x in open('PKG-INFO').readlines()
+ if x.startswith('Version:')]
+ version_line = version_lines and version_lines[-1] or 'VERSION = 0.0'
+ VERSION = version_line.split(':')[1].strip()
+
+except IOError:
+ # Must be a development checkout, so use the Makefile
+ version_lines = [x for x in open('Makefile').readlines()
+ if x.startswith('VERSION')]
+ version_line = version_lines and version_lines[-1] or 'VERSION = 0.0'
+ VERSION = version_line.split('=')[1].strip()
+
+
+setup(
+ name='python-subunit',
+ version=VERSION,
+ description=('Python implementation of subunit test streaming protocol'),
+ long_description=open('README').read(),
+ classifiers=[
+ 'Intended Audience :: Developers',
+ 'Programming Language :: Python',
+ 'Topic :: Software Development :: Testing',
+ ],
+ keywords='python test streaming',
+ author='Robert Collins',
+ author_email='subunit-dev@lists.launchpad.net',
+ url='http://launchpad.net/subunit',
+ packages=['subunit'],
+ package_dir={'subunit': 'python/subunit'},
+ scripts = [
+ 'filters/subunit2gtk',
+ 'filters/subunit2junitxml',
+ 'filters/subunit2pyunit',
+ 'filters/subunit-filter',
+ 'filters/subunit-ls',
+ 'filters/subunit-notify',
+ 'filters/subunit-stats',
+ 'filters/subunit-tags',
+ 'filters/tap2subunit',
+ ],
+ **extra
+)
diff --git a/lib/subunit/shell/README b/lib/subunit/shell/README
new file mode 100644
index 0000000000..af894a2bd3
--- /dev/null
+++ b/lib/subunit/shell/README
@@ -0,0 +1,62 @@
+#
+# subunit shell bindings.
+# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+This tree contains shell bindings to the subunit protocol. They are written
+entirely in shell, and unit tested in shell. See the tests/ directory for the
+test scripts. You can use `make check` to run the tests. There is a trivial
+python test_shell.py which uses the pyunit gui to expose the test results in a
+compact form.
+
+The shell bindings consist of four functions which you can use to output test
+metadata trivially. See share/subunit.sh for the functions and comments.
+
+However, this is not a full test environment, its support code for reporting to
+subunit. You can look at ShUnit (http://shunit.sourceforge.net) for 'proper'
+shell based xUnit functionality. There is a patch for ShUnit 1.3
+(subunit-ui.patch) in the subunit source tree. I hope to have that integrated
+upstream in the near future. I will delete the copy of the patch in the subunit
+tree a release or two later.
+
+If you are a test environment maintainer - either homegrown, or ShUnit or some
+such, you will need to see how the subunit calls should be used. Here is what
+a manually written test using the bindings might look like:
+
+
+subunit_start_test "test name"
+# determine if test passes or fails
+result=$(something)
+if [ $result == 0 ]; then
+ subunit_pass_test "test name"
+else
+ subunit_fail_test "test name" <<END
+Something went wrong running something:
+exited with result: '$func_status'
+END
+fi
+
+Which when run with a subunit test runner will generate something like:
+test name ... ok
+
+on success, and:
+
+test name ... FAIL
+
+======================================================================
+FAIL: test name
+----------------------------------------------------------------------
+RemoteError:
+Something went wrong running something:
+exited with result: '1'
diff --git a/lib/subunit/shell/share/subunit.sh b/lib/subunit/shell/share/subunit.sh
new file mode 100644
index 0000000000..82737276b8
--- /dev/null
+++ b/lib/subunit/shell/share/subunit.sh
@@ -0,0 +1,56 @@
+#
+# subunit.sh: shell functions to report test status via the subunit protocol.
+# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+subunit_start_test () {
+ # emit the current protocol start-marker for test $1
+ echo "test: $1"
+}
+
+
+subunit_pass_test () {
+ # emit the current protocol test passed marker for test $1
+ echo "success: $1"
+}
+
+
+subunit_fail_test () {
+ # emit the current protocol fail-marker for test $1, and emit stdin as
+ # the error text.
+ # we use stdin because the failure message can be arbitrarily long, and this
+ # makes it convenient to write in scripts (using <<END syntax.
+ echo "failure: $1 ["
+ cat -
+ echo "]"
+}
+
+
+subunit_error_test () {
+ # emit the current protocol error-marker for test $1, and emit stdin as
+ # the error text.
+ # we use stdin because the failure message can be arbitrarily long, and this
+ # makes it convenient to write in scripts (using <<END syntax.
+ echo "error: $1 ["
+ cat -
+ echo "]"
+}
+
+
+subunit_skip_test () {
+ # emit the current protocol test skipped marker for test $1
+ echo "skip: $1"
+}
+
+
diff --git a/lib/subunit/shell/tests/test_function_output.sh b/lib/subunit/shell/tests/test_function_output.sh
new file mode 100755
index 0000000000..b78eee6946
--- /dev/null
+++ b/lib/subunit/shell/tests/test_function_output.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+# subunit shell bindings.
+# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+
+# this script tests the output of the methods. As each is tested we start using
+# it.
+# So the first test manually implements the entire protocol, the next uses the
+# start method and so on.
+# it is assumed that we are running from the 'shell' tree root in the source
+# of subunit, and that the library sourcing tests have all passed - if they
+# have not, this test script may well fail strangely.
+
+# import the library.
+. ${SHELL_SHARE}subunit.sh
+
+echo 'test: subunit_start_test output'
+func_output=$(subunit_start_test "foo bar")
+func_status=$?
+if [ $func_status == 0 -a "x$func_output" = "xtest: foo bar" ]; then
+ echo 'success: subunit_start_test output'
+else
+ echo 'failure: subunit_start_test output ['
+ echo 'got an error code or incorrect output:'
+ echo "exit: $func_status"
+ echo "output: '$func_output'"
+ echo ']' ;
+fi
+
+subunit_start_test "subunit_pass_test output"
+func_output=$(subunit_pass_test "foo bar")
+func_status=$?
+if [ $func_status == 0 -a "x$func_output" = "xsuccess: foo bar" ]; then
+ subunit_pass_test "subunit_pass_test output"
+else
+ echo 'failure: subunit_pass_test output ['
+ echo 'got an error code or incorrect output:'
+ echo "exit: $func_status"
+ echo "output: '$func_output'"
+ echo ']' ;
+fi
+
+subunit_start_test "subunit_fail_test output"
+func_output=$(subunit_fail_test "foo bar" <<END
+something
+ wrong
+here
+END
+)
+func_status=$?
+if [ $func_status == 0 -a "x$func_output" = "xfailure: foo bar [
+something
+ wrong
+here
+]" ]; then
+ subunit_pass_test "subunit_fail_test output"
+else
+ echo 'failure: subunit_fail_test output ['
+ echo 'got an error code or incorrect output:'
+ echo "exit: $func_status"
+ echo "output: '$func_output'"
+ echo ']' ;
+fi
+
+subunit_start_test "subunit_error_test output"
+func_output=$(subunit_error_test "foo bar" <<END
+something
+ died
+here
+END
+)
+func_status=$?
+if [ $func_status == 0 -a "x$func_output" = "xerror: foo bar [
+something
+ died
+here
+]" ]; then
+ subunit_pass_test "subunit_error_test output"
+else
+ subunit_fail_test "subunit_error_test output" <<END
+got an error code or incorrect output:
+exit: $func_status
+output: '$func_output'
+END
+fi
diff --git a/lib/subunit/shell/tests/test_source_library.sh b/lib/subunit/shell/tests/test_source_library.sh
new file mode 100755
index 0000000000..699f1281bc
--- /dev/null
+++ b/lib/subunit/shell/tests/test_source_library.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+# subunit shell bindings.
+# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+
+# this script tests that we can source the subunit shell bindings successfully.
+# It manually implements the control protocol so that it des not depend on the
+# bindings being complete yet.
+
+# we expect to be run from the tree root.
+
+echo 'test: shell bindings can be sourced'
+# if any output occurs, this has failed to source cleanly
+source_output=$(. ${SHELL_SHARE}subunit.sh 2>&1)
+if [ $? == 0 -a "x$source_output" = "x" ]; then
+ echo 'success: shell bindings can be sourced'
+else
+ echo 'failure: shell bindings can be sourced ['
+ echo 'got an error code or output during sourcing.:'
+ echo $source_output
+ echo ']' ;
+fi
+
+# now source it for real
+. ${SHELL_SHARE}subunit.sh
+
+# we should have a start_test function
+echo 'test: subunit_start_test exists'
+found_type=$(type -t subunit_start_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+ echo 'success: subunit_start_test exists'
+else
+ echo 'failure: subunit_start_test exists ['
+ echo 'subunit_start_test is not a function:'
+ echo "type -t status: $status"
+ echo "output: $found_type"
+ echo ']' ;
+fi
+
+# we should have a pass_test function
+echo 'test: subunit_pass_test exists'
+found_type=$(type -t subunit_pass_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+ echo 'success: subunit_pass_test exists'
+else
+ echo 'failure: subunit_pass_test exists ['
+ echo 'subunit_pass_test is not a function:'
+ echo "type -t status: $status"
+ echo "output: $found_type"
+ echo ']' ;
+fi
+
+# we should have a fail_test function
+echo 'test: subunit_fail_test exists'
+found_type=$(type -t subunit_fail_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+ echo 'success: subunit_fail_test exists'
+else
+ echo 'failure: subunit_fail_test exists ['
+ echo 'subunit_fail_test is not a function:'
+ echo "type -t status: $status"
+ echo "output: $found_type"
+ echo ']' ;
+fi
+
+# we should have a error_test function
+echo 'test: subunit_error_test exists'
+found_type=$(type -t subunit_error_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+ echo 'success: subunit_error_test exists'
+else
+ echo 'failure: subunit_error_test exists ['
+ echo 'subunit_error_test is not a function:'
+ echo "type -t status: $status"
+ echo "output: $found_type"
+ echo ']' ;
+fi
+
+# we should have a skip_test function
+echo 'test: subunit_skip_test exists'
+found_type=$(type -t subunit_skip_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+ echo 'success: subunit_skip_test exists'
+else
+ echo 'failure: subunit_skip_test exists ['
+ echo 'subunit_skip_test is not a function:'
+ echo "type -t status: $status"
+ echo "output: $found_type"
+ echo ']' ;
+fi
+
diff --git a/lib/subunit/tap2subunit b/lib/subunit/tap2subunit
deleted file mode 100755
index 9e335168f5..0000000000
--- a/lib/subunit/tap2subunit
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/perl
-# Simple script that converts Perl test harness output to
-# Subunit
-# Copyright (C) 2008 Jelmer Vernooij <jelmer@samba.org>
-# Published under the GNU GPL, v3 or later
-
-my $firstline = 1;
-my $error = 0;
-while(<STDIN>) {
- if ($firstline) {
- $firstline = 0;
- next;
- }
- if (/^not ok (\d+) - (.*)$/) {
- print "test: $2\n";
- print "failure: $2\n";
- $error = 1;
- } elsif (/^ok (\d+) - (.*)$/) {
- print "test: $2\n";
- print "success: $2\n";
- } elsif (/^ok (\d+)$/) {
- print "test: $1\n";
- print "success: $1\n";
- } elsif (/^ok (\d+) # skip (.*)$/) {
- print "test: $1\n";
- print "skip: $1 [\n$2\n]\n";
- } elsif (/^not ok (\d+)$/) {
- print "test: $1\n";
- print "failure: $1\n";
- $error = 1;
- } else {
- print;
- }
-}
-exit $error;
diff --git a/lib/talloc/ABI/talloc-2.0.2.sigs b/lib/talloc/ABI/talloc-2.0.2.sigs
new file mode 100644
index 0000000000..6e236d52ac
--- /dev/null
+++ b/lib/talloc/ABI/talloc-2.0.2.sigs
@@ -0,0 +1,62 @@
+_talloc: void *(const void *, size_t)
+_talloc_array: void *(const void *, size_t, unsigned int, const char *)
+_talloc_free: int (void *, const char *)
+_talloc_get_type_abort: void *(const void *, const char *, const char *)
+_talloc_memdup: void *(const void *, const void *, size_t, const char *)
+_talloc_move: void *(const void *, const void *)
+_talloc_realloc: void *(const void *, void *, size_t, const char *)
+_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *)
+_talloc_reference_loc: void *(const void *, const void *, const char *)
+_talloc_set_destructor: void (const void *, int (*)(void *))
+_talloc_steal_loc: void *(const void *, const void *, const char *)
+_talloc_zero: void *(const void *, size_t, const char *)
+_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *)
+talloc_asprintf: char *(const void *, const char *, ...)
+talloc_asprintf_append: char *(char *, const char *, ...)
+talloc_asprintf_append_buffer: char *(char *, const char *, ...)
+talloc_autofree_context: void *(void)
+talloc_check_name: void *(const void *, const char *)
+talloc_disable_null_tracking: void (void)
+talloc_enable_leak_report: void (void)
+talloc_enable_leak_report_full: void (void)
+talloc_enable_null_tracking: void (void)
+talloc_enable_null_tracking_no_autofree: void (void)
+talloc_find_parent_byname: void *(const void *, const char *)
+talloc_free_children: void (void *)
+talloc_get_name: const char *(const void *)
+talloc_get_size: size_t (const void *)
+talloc_increase_ref_count: int (const void *)
+talloc_init: void *(const char *, ...)
+talloc_is_parent: int (const void *, const void *)
+talloc_named: void *(const void *, size_t, const char *, ...)
+talloc_named_const: void *(const void *, size_t, const char *)
+talloc_parent: void *(const void *)
+talloc_parent_name: const char *(const void *)
+talloc_pool: void *(const void *, size_t)
+talloc_realloc_fn: void *(const void *, void *, size_t)
+talloc_reference_count: size_t (const void *)
+talloc_reparent: void *(const void *, const void *, const void *)
+talloc_report: void (const void *, FILE *)
+talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *)
+talloc_report_depth_file: void (const void *, int, int, FILE *)
+talloc_report_full: void (const void *, FILE *)
+talloc_set_abort_fn: void (void (*)(const char *))
+talloc_set_log_fn: void (void (*)(const char *))
+talloc_set_log_stderr: void (void)
+talloc_set_name: const char *(const void *, const char *, ...)
+talloc_set_name_const: void (const void *, const char *)
+talloc_show_parents: void (const void *, FILE *)
+talloc_strdup: char *(const void *, const char *)
+talloc_strdup_append: char *(char *, const char *)
+talloc_strdup_append_buffer: char *(char *, const char *)
+talloc_strndup: char *(const void *, const char *, size_t)
+talloc_strndup_append: char *(char *, const char *, size_t)
+talloc_strndup_append_buffer: char *(char *, const char *, size_t)
+talloc_total_blocks: size_t (const void *)
+talloc_total_size: size_t (const void *)
+talloc_unlink: int (const void *, void *)
+talloc_vasprintf: char *(const void *, const char *, va_list)
+talloc_vasprintf_append: char *(char *, const char *, va_list)
+talloc_vasprintf_append_buffer: char *(char *, const char *, va_list)
+talloc_version_major: int (void)
+talloc_version_minor: int (void)
diff --git a/lib/talloc/ABI/talloc-2.0.3.sigs b/lib/talloc/ABI/talloc-2.0.3.sigs
new file mode 100644
index 0000000000..6e236d52ac
--- /dev/null
+++ b/lib/talloc/ABI/talloc-2.0.3.sigs
@@ -0,0 +1,62 @@
+_talloc: void *(const void *, size_t)
+_talloc_array: void *(const void *, size_t, unsigned int, const char *)
+_talloc_free: int (void *, const char *)
+_talloc_get_type_abort: void *(const void *, const char *, const char *)
+_talloc_memdup: void *(const void *, const void *, size_t, const char *)
+_talloc_move: void *(const void *, const void *)
+_talloc_realloc: void *(const void *, void *, size_t, const char *)
+_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *)
+_talloc_reference_loc: void *(const void *, const void *, const char *)
+_talloc_set_destructor: void (const void *, int (*)(void *))
+_talloc_steal_loc: void *(const void *, const void *, const char *)
+_talloc_zero: void *(const void *, size_t, const char *)
+_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *)
+talloc_asprintf: char *(const void *, const char *, ...)
+talloc_asprintf_append: char *(char *, const char *, ...)
+talloc_asprintf_append_buffer: char *(char *, const char *, ...)
+talloc_autofree_context: void *(void)
+talloc_check_name: void *(const void *, const char *)
+talloc_disable_null_tracking: void (void)
+talloc_enable_leak_report: void (void)
+talloc_enable_leak_report_full: void (void)
+talloc_enable_null_tracking: void (void)
+talloc_enable_null_tracking_no_autofree: void (void)
+talloc_find_parent_byname: void *(const void *, const char *)
+talloc_free_children: void (void *)
+talloc_get_name: const char *(const void *)
+talloc_get_size: size_t (const void *)
+talloc_increase_ref_count: int (const void *)
+talloc_init: void *(const char *, ...)
+talloc_is_parent: int (const void *, const void *)
+talloc_named: void *(const void *, size_t, const char *, ...)
+talloc_named_const: void *(const void *, size_t, const char *)
+talloc_parent: void *(const void *)
+talloc_parent_name: const char *(const void *)
+talloc_pool: void *(const void *, size_t)
+talloc_realloc_fn: void *(const void *, void *, size_t)
+talloc_reference_count: size_t (const void *)
+talloc_reparent: void *(const void *, const void *, const void *)
+talloc_report: void (const void *, FILE *)
+talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *)
+talloc_report_depth_file: void (const void *, int, int, FILE *)
+talloc_report_full: void (const void *, FILE *)
+talloc_set_abort_fn: void (void (*)(const char *))
+talloc_set_log_fn: void (void (*)(const char *))
+talloc_set_log_stderr: void (void)
+talloc_set_name: const char *(const void *, const char *, ...)
+talloc_set_name_const: void (const void *, const char *)
+talloc_show_parents: void (const void *, FILE *)
+talloc_strdup: char *(const void *, const char *)
+talloc_strdup_append: char *(char *, const char *)
+talloc_strdup_append_buffer: char *(char *, const char *)
+talloc_strndup: char *(const void *, const char *, size_t)
+talloc_strndup_append: char *(char *, const char *, size_t)
+talloc_strndup_append_buffer: char *(char *, const char *, size_t)
+talloc_total_blocks: size_t (const void *)
+talloc_total_size: size_t (const void *)
+talloc_unlink: int (const void *, void *)
+talloc_vasprintf: char *(const void *, const char *, va_list)
+talloc_vasprintf_append: char *(char *, const char *, va_list)
+talloc_vasprintf_append_buffer: char *(char *, const char *, va_list)
+talloc_version_major: int (void)
+talloc_version_minor: int (void)
diff --git a/lib/talloc/ABI/talloc-2.0.4.sigs b/lib/talloc/ABI/talloc-2.0.4.sigs
new file mode 100644
index 0000000000..6e236d52ac
--- /dev/null
+++ b/lib/talloc/ABI/talloc-2.0.4.sigs
@@ -0,0 +1,62 @@
+_talloc: void *(const void *, size_t)
+_talloc_array: void *(const void *, size_t, unsigned int, const char *)
+_talloc_free: int (void *, const char *)
+_talloc_get_type_abort: void *(const void *, const char *, const char *)
+_talloc_memdup: void *(const void *, const void *, size_t, const char *)
+_talloc_move: void *(const void *, const void *)
+_talloc_realloc: void *(const void *, void *, size_t, const char *)
+_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *)
+_talloc_reference_loc: void *(const void *, const void *, const char *)
+_talloc_set_destructor: void (const void *, int (*)(void *))
+_talloc_steal_loc: void *(const void *, const void *, const char *)
+_talloc_zero: void *(const void *, size_t, const char *)
+_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *)
+talloc_asprintf: char *(const void *, const char *, ...)
+talloc_asprintf_append: char *(char *, const char *, ...)
+talloc_asprintf_append_buffer: char *(char *, const char *, ...)
+talloc_autofree_context: void *(void)
+talloc_check_name: void *(const void *, const char *)
+talloc_disable_null_tracking: void (void)
+talloc_enable_leak_report: void (void)
+talloc_enable_leak_report_full: void (void)
+talloc_enable_null_tracking: void (void)
+talloc_enable_null_tracking_no_autofree: void (void)
+talloc_find_parent_byname: void *(const void *, const char *)
+talloc_free_children: void (void *)
+talloc_get_name: const char *(const void *)
+talloc_get_size: size_t (const void *)
+talloc_increase_ref_count: int (const void *)
+talloc_init: void *(const char *, ...)
+talloc_is_parent: int (const void *, const void *)
+talloc_named: void *(const void *, size_t, const char *, ...)
+talloc_named_const: void *(const void *, size_t, const char *)
+talloc_parent: void *(const void *)
+talloc_parent_name: const char *(const void *)
+talloc_pool: void *(const void *, size_t)
+talloc_realloc_fn: void *(const void *, void *, size_t)
+talloc_reference_count: size_t (const void *)
+talloc_reparent: void *(const void *, const void *, const void *)
+talloc_report: void (const void *, FILE *)
+talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *)
+talloc_report_depth_file: void (const void *, int, int, FILE *)
+talloc_report_full: void (const void *, FILE *)
+talloc_set_abort_fn: void (void (*)(const char *))
+talloc_set_log_fn: void (void (*)(const char *))
+talloc_set_log_stderr: void (void)
+talloc_set_name: const char *(const void *, const char *, ...)
+talloc_set_name_const: void (const void *, const char *)
+talloc_show_parents: void (const void *, FILE *)
+talloc_strdup: char *(const void *, const char *)
+talloc_strdup_append: char *(char *, const char *)
+talloc_strdup_append_buffer: char *(char *, const char *)
+talloc_strndup: char *(const void *, const char *, size_t)
+talloc_strndup_append: char *(char *, const char *, size_t)
+talloc_strndup_append_buffer: char *(char *, const char *, size_t)
+talloc_total_blocks: size_t (const void *)
+talloc_total_size: size_t (const void *)
+talloc_unlink: int (const void *, void *)
+talloc_vasprintf: char *(const void *, const char *, va_list)
+talloc_vasprintf_append: char *(char *, const char *, va_list)
+talloc_vasprintf_append_buffer: char *(char *, const char *, va_list)
+talloc_version_major: int (void)
+talloc_version_minor: int (void)
diff --git a/lib/talloc/ABI/talloc-2.0.5.sigs b/lib/talloc/ABI/talloc-2.0.5.sigs
new file mode 100644
index 0000000000..6e236d52ac
--- /dev/null
+++ b/lib/talloc/ABI/talloc-2.0.5.sigs
@@ -0,0 +1,62 @@
+_talloc: void *(const void *, size_t)
+_talloc_array: void *(const void *, size_t, unsigned int, const char *)
+_talloc_free: int (void *, const char *)
+_talloc_get_type_abort: void *(const void *, const char *, const char *)
+_talloc_memdup: void *(const void *, const void *, size_t, const char *)
+_talloc_move: void *(const void *, const void *)
+_talloc_realloc: void *(const void *, void *, size_t, const char *)
+_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *)
+_talloc_reference_loc: void *(const void *, const void *, const char *)
+_talloc_set_destructor: void (const void *, int (*)(void *))
+_talloc_steal_loc: void *(const void *, const void *, const char *)
+_talloc_zero: void *(const void *, size_t, const char *)
+_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *)
+talloc_asprintf: char *(const void *, const char *, ...)
+talloc_asprintf_append: char *(char *, const char *, ...)
+talloc_asprintf_append_buffer: char *(char *, const char *, ...)
+talloc_autofree_context: void *(void)
+talloc_check_name: void *(const void *, const char *)
+talloc_disable_null_tracking: void (void)
+talloc_enable_leak_report: void (void)
+talloc_enable_leak_report_full: void (void)
+talloc_enable_null_tracking: void (void)
+talloc_enable_null_tracking_no_autofree: void (void)
+talloc_find_parent_byname: void *(const void *, const char *)
+talloc_free_children: void (void *)
+talloc_get_name: const char *(const void *)
+talloc_get_size: size_t (const void *)
+talloc_increase_ref_count: int (const void *)
+talloc_init: void *(const char *, ...)
+talloc_is_parent: int (const void *, const void *)
+talloc_named: void *(const void *, size_t, const char *, ...)
+talloc_named_const: void *(const void *, size_t, const char *)
+talloc_parent: void *(const void *)
+talloc_parent_name: const char *(const void *)
+talloc_pool: void *(const void *, size_t)
+talloc_realloc_fn: void *(const void *, void *, size_t)
+talloc_reference_count: size_t (const void *)
+talloc_reparent: void *(const void *, const void *, const void *)
+talloc_report: void (const void *, FILE *)
+talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *)
+talloc_report_depth_file: void (const void *, int, int, FILE *)
+talloc_report_full: void (const void *, FILE *)
+talloc_set_abort_fn: void (void (*)(const char *))
+talloc_set_log_fn: void (void (*)(const char *))
+talloc_set_log_stderr: void (void)
+talloc_set_name: const char *(const void *, const char *, ...)
+talloc_set_name_const: void (const void *, const char *)
+talloc_show_parents: void (const void *, FILE *)
+talloc_strdup: char *(const void *, const char *)
+talloc_strdup_append: char *(char *, const char *)
+talloc_strdup_append_buffer: char *(char *, const char *)
+talloc_strndup: char *(const void *, const char *, size_t)
+talloc_strndup_append: char *(char *, const char *, size_t)
+talloc_strndup_append_buffer: char *(char *, const char *, size_t)
+talloc_total_blocks: size_t (const void *)
+talloc_total_size: size_t (const void *)
+talloc_unlink: int (const void *, void *)
+talloc_vasprintf: char *(const void *, const char *, va_list)
+talloc_vasprintf_append: char *(char *, const char *, va_list)
+talloc_vasprintf_append_buffer: char *(char *, const char *, va_list)
+talloc_version_major: int (void)
+talloc_version_minor: int (void)
diff --git a/lib/talloc/Makefile b/lib/talloc/Makefile
new file mode 100644
index 0000000000..4c28653446
--- /dev/null
+++ b/lib/talloc/Makefile
@@ -0,0 +1,66 @@
+# simple makefile wrapper to run waf
+
+WAF=WAF_MAKE=1 PATH=buildtools/bin:../../buildtools/bin:$$PATH waf
+
+all:
+ $(WAF) build
+
+install:
+ $(WAF) install
+
+uninstall:
+ $(WAF) uninstall
+
+test:
+ $(WAF) test $(TEST_OPTIONS)
+
+testenv:
+ $(WAF) test --testenv $(TEST_OPTIONS)
+
+quicktest:
+ $(WAF) test --quick $(TEST_OPTIONS)
+
+dist:
+ touch .tmplock
+ WAFLOCK=.tmplock $(WAF) dist
+
+distcheck:
+ touch .tmplock
+ WAFLOCK=.tmplock $(WAF) distcheck
+
+clean:
+ $(WAF) clean
+
+distclean:
+ $(WAF) distclean
+
+reconfigure: configure
+ $(WAF) reconfigure
+
+show_waf_options:
+ $(WAF) --help
+
+# some compatibility make targets
+everything: all
+
+testsuite: all
+
+check: test
+
+torture: all
+
+# this should do an install as well, once install is finished
+installcheck: test
+
+etags:
+ $(WAF) etags
+
+ctags:
+ $(WAF) ctags
+
+pydoctor:
+ $(WAF) pydoctor
+
+bin/%:: FORCE
+ $(WAF) --targets=`basename $@`
+FORCE:
diff --git a/lib/talloc/Makefile.in b/lib/talloc/Makefile.in
deleted file mode 100644
index 94f031a4bb..0000000000
--- a/lib/talloc/Makefile.in
+++ /dev/null
@@ -1,62 +0,0 @@
-#!gmake
-#
-prefix = @prefix@
-datarootdir = @datarootdir@
-exec_prefix = @exec_prefix@
-includedir = @includedir@
-libdir = @libdir@
-mandir = @mandir@
-VPATH = @srcdir@:@libreplacedir@
-srcdir = @srcdir@
-builddir = @builddir@
-sharedbuilddir = @sharedbuilddir@
-XSLTPROC = @XSLTPROC@
-INSTALLCMD = @INSTALL@
-CC = @CC@
-CFLAGS = @CFLAGS@ -DHAVE_CONFIG_H= -I. -I@srcdir@
-EXTRA_TARGETS = @DOC_TARGET@
-PICFLAG = @PICFLAG@
-TALLOC_VERSION = @TALLOC_VERSION@
-TALLOC_VERSION_MAJOR = @TALLOC_VERSION_MAJOR@
-TALLOC_VERSION_MINOR = @TALLOC_VERSION_MINOR@
-TALLOC_VERSION_RELEASE = @TALLOC_VERSION_RELEASE@
-SHLIBEXT = @SHLIBEXT@
-SHLD = @SHLD@
-SHLD_FLAGS = @SHLD_FLAGS@
-tallocdir = @tallocdir@
-
-LIBOBJ = $(TALLOC_OBJ) @LIBREPLACEOBJ@
-
-SONAMEFLAG = @SONAMEFLAG@
-VERSIONSCRIPT = @VERSIONSCRIPT@
-EXPORTSFILE = @EXPORTSFILE@
-
-all:: showflags $(EXTRA_TARGETS)
-
-include $(tallocdir)/rules.mk
-include $(tallocdir)/talloc.mk
-
-@TALLOC_COMPAT1_MK@
-
-$(TALLOC_SOLIB): $(LIBOBJ)
- $(SHLD) $(SHLD_FLAGS) -o $@ $(LIBOBJ) $(VERSIONSCRIPT) $(EXPORTSFILE) $(SONAMEFLAG)$(TALLOC_SONAME)
-
-shared-build: all
- ${INSTALLCMD} -d $(sharedbuilddir)/lib
- ${INSTALLCMD} -m 644 libtalloc.a $(sharedbuilddir)/lib
- ${INSTALLCMD} -m 755 $(TALLOC_SOLIB) $(sharedbuilddir)/lib
- ln -sf $(TALLOC_SOLIB) $(sharedbuilddir)/lib/$(TALLOC_SONAME)
- ln -sf $(TALLOC_SOLIB) $(sharedbuilddir)/lib/libtalloc.so
- ${INSTALLCMD} -d $(sharedbuilddir)/include
- ${INSTALLCMD} -m 644 $(srcdir)/talloc.h $(sharedbuilddir)/include
-
-check: test
-
-installcheck:: test install
-
-distclean:: clean
- rm -f Makefile
- rm -f config.log config.status config.h config.cache
-
-realdistclean:: distclean
- rm -f configure config.h.in
diff --git a/lib/talloc/aclocal.m4 b/lib/talloc/aclocal.m4
deleted file mode 100644
index 5605e476ba..0000000000
--- a/lib/talloc/aclocal.m4
+++ /dev/null
@@ -1 +0,0 @@
-m4_include(libreplace.m4)
diff --git a/lib/talloc/autogen.sh b/lib/talloc/autogen.sh
deleted file mode 100755
index bf84eeee19..0000000000
--- a/lib/talloc/autogen.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-
-rm -rf autom4te.cache
-rm -f configure config.h.in
-
-IPATHS="-I libreplace -I lib/replace -I ../libreplace -I ../replace"
-autoconf $IPATHS || exit 1
-autoheader $IPATHS || exit 1
-
-rm -rf autom4te.cache
-
-echo "Now run ./configure and then make."
-exit 0
-
diff --git a/lib/talloc/build_macros.m4 b/lib/talloc/build_macros.m4
deleted file mode 100644
index c036668cd1..0000000000
--- a/lib/talloc/build_macros.m4
+++ /dev/null
@@ -1,14 +0,0 @@
-AC_DEFUN(BUILD_WITH_SHARED_BUILD_DIR,
- [ AC_ARG_WITH([shared-build-dir],
- [AC_HELP_STRING([--with-shared-build-dir=DIR],
- [temporary build directory where libraries are installed [$srcdir/sharedbuild]])])
-
- sharedbuilddir="$srcdir/sharedbuild"
- if test x"$with_shared_build_dir" != x; then
- sharedbuilddir=$with_shared_build_dir
- CFLAGS="$CFLAGS -I$with_shared_build_dir/include"
- LDFLAGS="$LDFLAGS -L$with_shared_build_dir/lib"
- fi
- AC_SUBST(sharedbuilddir)
- ])
-
diff --git a/lib/talloc/config.guess b/lib/talloc/config.guess
deleted file mode 100755
index da83314608..0000000000
--- a/lib/talloc/config.guess
+++ /dev/null
@@ -1,1561 +0,0 @@
-#! /bin/sh
-# Attempt to guess a canonical system name.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
-# Free Software Foundation, Inc.
-
-timestamp='2009-04-27'
-
-# This file is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Originally written by Per Bothner <per@bothner.com>.
-# Please send patches to <config-patches@gnu.org>. Submit a context
-# diff and a properly formatted ChangeLog entry.
-#
-# This script attempts to guess a canonical system name similar to
-# config.sub. If it succeeds, it prints the system name on stdout, and
-# exits with 0. Otherwise, it exits with 1.
-#
-# The plan is that this can be called by configure scripts if you
-# don't specify an explicit build system type.
-
-me=`echo "$0" | sed -e 's,.*/,,'`
-
-usage="\
-Usage: $0 [OPTION]
-
-Output the configuration name of the system \`$me' is run on.
-
-Operation modes:
- -h, --help print this help, then exit
- -t, --time-stamp print date of last modification, then exit
- -v, --version print version number, then exit
-
-Report bugs and patches to <config-patches@gnu.org>."
-
-version="\
-GNU config.guess ($timestamp)
-
-Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
-
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-
-help="
-Try \`$me --help' for more information."
-
-# Parse command line
-while test $# -gt 0 ; do
- case $1 in
- --time-stamp | --time* | -t )
- echo "$timestamp" ; exit ;;
- --version | -v )
- echo "$version" ; exit ;;
- --help | --h* | -h )
- echo "$usage"; exit ;;
- -- ) # Stop option processing
- shift; break ;;
- - ) # Use stdin as input.
- break ;;
- -* )
- echo "$me: invalid option $1$help" >&2
- exit 1 ;;
- * )
- break ;;
- esac
-done
-
-if test $# != 0; then
- echo "$me: too many arguments$help" >&2
- exit 1
-fi
-
-trap 'exit 1' 1 2 15
-
-# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
-# compiler to aid in system detection is discouraged as it requires
-# temporary files to be created and, as you can see below, it is a
-# headache to deal with in a portable fashion.
-
-# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
-# use `HOST_CC' if defined, but it is deprecated.
-
-# Portable tmp directory creation inspired by the Autoconf team.
-
-set_cc_for_build='
-trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
-trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
-: ${TMPDIR=/tmp} ;
- { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
- { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
- { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
- { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
-dummy=$tmp/dummy ;
-tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
-case $CC_FOR_BUILD,$HOST_CC,$CC in
- ,,) echo "int x;" > $dummy.c ;
- for c in cc gcc c89 c99 ; do
- if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
- CC_FOR_BUILD="$c"; break ;
- fi ;
- done ;
- if test x"$CC_FOR_BUILD" = x ; then
- CC_FOR_BUILD=no_compiler_found ;
- fi
- ;;
- ,,*) CC_FOR_BUILD=$CC ;;
- ,*,*) CC_FOR_BUILD=$HOST_CC ;;
-esac ; set_cc_for_build= ;'
-
-# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
-# (ghazi@noc.rutgers.edu 1994-08-24)
-if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
- PATH=$PATH:/.attbin ; export PATH
-fi
-
-UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
-UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
-UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
-UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
-
-# Note: order is significant - the case branches are not exclusive.
-
-case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
- *:NetBSD:*:*)
- # NetBSD (nbsd) targets should (where applicable) match one or
- # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
- # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
- # switched to ELF, *-*-netbsd* would select the old
- # object file format. This provides both forward
- # compatibility and a consistent mechanism for selecting the
- # object file format.
- #
- # Note: NetBSD doesn't particularly care about the vendor
- # portion of the name. We always set it to "unknown".
- sysctl="sysctl -n hw.machine_arch"
- UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
- /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
- case "${UNAME_MACHINE_ARCH}" in
- armeb) machine=armeb-unknown ;;
- arm*) machine=arm-unknown ;;
- sh3el) machine=shl-unknown ;;
- sh3eb) machine=sh-unknown ;;
- sh5el) machine=sh5le-unknown ;;
- *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
- esac
- # The Operating System including object format, if it has switched
- # to ELF recently, or will in the future.
- case "${UNAME_MACHINE_ARCH}" in
- arm*|i386|m68k|ns32k|sh3*|sparc|vax)
- eval $set_cc_for_build
- if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
- | grep __ELF__ >/dev/null
- then
- # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
- # Return netbsd for either. FIX?
- os=netbsd
- else
- os=netbsdelf
- fi
- ;;
- *)
- os=netbsd
- ;;
- esac
- # The OS release
- # Debian GNU/NetBSD machines have a different userland, and
- # thus, need a distinct triplet. However, they do not need
- # kernel version information, so it can be replaced with a
- # suitable tag, in the style of linux-gnu.
- case "${UNAME_VERSION}" in
- Debian*)
- release='-gnu'
- ;;
- *)
- release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
- ;;
- esac
- # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
- # contains redundant information, the shorter form:
- # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
- echo "${machine}-${os}${release}"
- exit ;;
- *:OpenBSD:*:*)
- UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
- echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
- exit ;;
- *:ekkoBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
- exit ;;
- *:SolidBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
- exit ;;
- macppc:MirBSD:*:*)
- echo powerpc-unknown-mirbsd${UNAME_RELEASE}
- exit ;;
- *:MirBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
- exit ;;
- alpha:OSF1:*:*)
- case $UNAME_RELEASE in
- *4.0)
- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
- ;;
- *5.*)
- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
- ;;
- esac
- # According to Compaq, /usr/sbin/psrinfo has been available on
- # OSF/1 and Tru64 systems produced since 1995. I hope that
- # covers most systems running today. This code pipes the CPU
- # types through head -n 1, so we only detect the type of CPU 0.
- ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
- case "$ALPHA_CPU_TYPE" in
- "EV4 (21064)")
- UNAME_MACHINE="alpha" ;;
- "EV4.5 (21064)")
- UNAME_MACHINE="alpha" ;;
- "LCA4 (21066/21068)")
- UNAME_MACHINE="alpha" ;;
- "EV5 (21164)")
- UNAME_MACHINE="alphaev5" ;;
- "EV5.6 (21164A)")
- UNAME_MACHINE="alphaev56" ;;
- "EV5.6 (21164PC)")
- UNAME_MACHINE="alphapca56" ;;
- "EV5.7 (21164PC)")
- UNAME_MACHINE="alphapca57" ;;
- "EV6 (21264)")
- UNAME_MACHINE="alphaev6" ;;
- "EV6.7 (21264A)")
- UNAME_MACHINE="alphaev67" ;;
- "EV6.8CB (21264C)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.8AL (21264B)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.8CX (21264D)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.9A (21264/EV69A)")
- UNAME_MACHINE="alphaev69" ;;
- "EV7 (21364)")
- UNAME_MACHINE="alphaev7" ;;
- "EV7.9 (21364A)")
- UNAME_MACHINE="alphaev79" ;;
- esac
- # A Pn.n version is a patched version.
- # A Vn.n version is a released version.
- # A Tn.n version is a released field test version.
- # A Xn.n version is an unreleased experimental baselevel.
- # 1.2 uses "1.2" for uname -r.
- echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- exit ;;
- Alpha\ *:Windows_NT*:*)
- # How do we know it's Interix rather than the generic POSIX subsystem?
- # Should we change UNAME_MACHINE based on the output of uname instead
- # of the specific Alpha model?
- echo alpha-pc-interix
- exit ;;
- 21064:Windows_NT:50:3)
- echo alpha-dec-winnt3.5
- exit ;;
- Amiga*:UNIX_System_V:4.0:*)
- echo m68k-unknown-sysv4
- exit ;;
- *:[Aa]miga[Oo][Ss]:*:*)
- echo ${UNAME_MACHINE}-unknown-amigaos
- exit ;;
- *:[Mm]orph[Oo][Ss]:*:*)
- echo ${UNAME_MACHINE}-unknown-morphos
- exit ;;
- *:OS/390:*:*)
- echo i370-ibm-openedition
- exit ;;
- *:z/VM:*:*)
- echo s390-ibm-zvmoe
- exit ;;
- *:OS400:*:*)
- echo powerpc-ibm-os400
- exit ;;
- arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
- echo arm-acorn-riscix${UNAME_RELEASE}
- exit ;;
- arm:riscos:*:*|arm:RISCOS:*:*)
- echo arm-unknown-riscos
- exit ;;
- SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
- echo hppa1.1-hitachi-hiuxmpp
- exit ;;
- Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
- # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
- if test "`(/bin/universe) 2>/dev/null`" = att ; then
- echo pyramid-pyramid-sysv3
- else
- echo pyramid-pyramid-bsd
- fi
- exit ;;
- NILE*:*:*:dcosx)
- echo pyramid-pyramid-svr4
- exit ;;
- DRS?6000:unix:4.0:6*)
- echo sparc-icl-nx6
- exit ;;
- DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
- case `/usr/bin/uname -p` in
- sparc) echo sparc-icl-nx7; exit ;;
- esac ;;
- s390x:SunOS:*:*)
- echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4H:SunOS:5.*:*)
- echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
- echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
- eval $set_cc_for_build
- SUN_ARCH="i386"
- # If there is a compiler, see if it is configured for 64-bit objects.
- # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
- # This test works for both compilers.
- if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
- if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
- (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
- grep IS_64BIT_ARCH >/dev/null
- then
- SUN_ARCH="x86_64"
- fi
- fi
- echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:6*:*)
- # According to config.sub, this is the proper way to canonicalize
- # SunOS6. Hard to guess exactly what SunOS6 will be like, but
- # it's likely to be more like Solaris than SunOS4.
- echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:*:*)
- case "`/usr/bin/arch -k`" in
- Series*|S4*)
- UNAME_RELEASE=`uname -v`
- ;;
- esac
- # Japanese Language versions have a version number like `4.1.3-JL'.
- echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
- exit ;;
- sun3*:SunOS:*:*)
- echo m68k-sun-sunos${UNAME_RELEASE}
- exit ;;
- sun*:*:4.2BSD:*)
- UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
- test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
- case "`/bin/arch`" in
- sun3)
- echo m68k-sun-sunos${UNAME_RELEASE}
- ;;
- sun4)
- echo sparc-sun-sunos${UNAME_RELEASE}
- ;;
- esac
- exit ;;
- aushp:SunOS:*:*)
- echo sparc-auspex-sunos${UNAME_RELEASE}
- exit ;;
- # The situation for MiNT is a little confusing. The machine name
- # can be virtually everything (everything which is not
- # "atarist" or "atariste" at least should have a processor
- # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
- # to the lowercase version "mint" (or "freemint"). Finally
- # the system name "TOS" denotes a system which is actually not
- # MiNT. But MiNT is downward compatible to TOS, so this should
- # be no problem.
- atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
- echo m68k-milan-mint${UNAME_RELEASE}
- exit ;;
- hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
- echo m68k-hades-mint${UNAME_RELEASE}
- exit ;;
- *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
- echo m68k-unknown-mint${UNAME_RELEASE}
- exit ;;
- m68k:machten:*:*)
- echo m68k-apple-machten${UNAME_RELEASE}
- exit ;;
- powerpc:machten:*:*)
- echo powerpc-apple-machten${UNAME_RELEASE}
- exit ;;
- RISC*:Mach:*:*)
- echo mips-dec-mach_bsd4.3
- exit ;;
- RISC*:ULTRIX:*:*)
- echo mips-dec-ultrix${UNAME_RELEASE}
- exit ;;
- VAX*:ULTRIX*:*:*)
- echo vax-dec-ultrix${UNAME_RELEASE}
- exit ;;
- 2020:CLIX:*:* | 2430:CLIX:*:*)
- echo clipper-intergraph-clix${UNAME_RELEASE}
- exit ;;
- mips:*:*:UMIPS | mips:*:*:RISCos)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
-#ifdef __cplusplus
-#include <stdio.h> /* for printf() prototype */
- int main (int argc, char *argv[]) {
-#else
- int main (argc, argv) int argc; char *argv[]; {
-#endif
- #if defined (host_mips) && defined (MIPSEB)
- #if defined (SYSTYPE_SYSV)
- printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
- #endif
- #if defined (SYSTYPE_SVR4)
- printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
- #endif
- #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
- printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
- #endif
- #endif
- exit (-1);
- }
-EOF
- $CC_FOR_BUILD -o $dummy $dummy.c &&
- dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
- SYSTEM_NAME=`$dummy $dummyarg` &&
- { echo "$SYSTEM_NAME"; exit; }
- echo mips-mips-riscos${UNAME_RELEASE}
- exit ;;
- Motorola:PowerMAX_OS:*:*)
- echo powerpc-motorola-powermax
- exit ;;
- Motorola:*:4.3:PL8-*)
- echo powerpc-harris-powermax
- exit ;;
- Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
- echo powerpc-harris-powermax
- exit ;;
- Night_Hawk:Power_UNIX:*:*)
- echo powerpc-harris-powerunix
- exit ;;
- m88k:CX/UX:7*:*)
- echo m88k-harris-cxux7
- exit ;;
- m88k:*:4*:R4*)
- echo m88k-motorola-sysv4
- exit ;;
- m88k:*:3*:R3*)
- echo m88k-motorola-sysv3
- exit ;;
- AViiON:dgux:*:*)
- # DG/UX returns AViiON for all architectures
- UNAME_PROCESSOR=`/usr/bin/uname -p`
- if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
- then
- if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
- [ ${TARGET_BINARY_INTERFACE}x = x ]
- then
- echo m88k-dg-dgux${UNAME_RELEASE}
- else
- echo m88k-dg-dguxbcs${UNAME_RELEASE}
- fi
- else
- echo i586-dg-dgux${UNAME_RELEASE}
- fi
- exit ;;
- M88*:DolphinOS:*:*) # DolphinOS (SVR3)
- echo m88k-dolphin-sysv3
- exit ;;
- M88*:*:R3*:*)
- # Delta 88k system running SVR3
- echo m88k-motorola-sysv3
- exit ;;
- XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
- echo m88k-tektronix-sysv3
- exit ;;
- Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
- echo m68k-tektronix-bsd
- exit ;;
- *:IRIX*:*:*)
- echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
- exit ;;
- ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
- echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
- exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
- i*86:AIX:*:*)
- echo i386-ibm-aix
- exit ;;
- ia64:AIX:*:*)
- if [ -x /usr/bin/oslevel ] ; then
- IBM_REV=`/usr/bin/oslevel`
- else
- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
- fi
- echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
- exit ;;
- *:AIX:2:3)
- if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <sys/systemcfg.h>
-
- main()
- {
- if (!__power_pc())
- exit(1);
- puts("powerpc-ibm-aix3.2.5");
- exit(0);
- }
-EOF
- if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
- then
- echo "$SYSTEM_NAME"
- else
- echo rs6000-ibm-aix3.2.5
- fi
- elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
- echo rs6000-ibm-aix3.2.4
- else
- echo rs6000-ibm-aix3.2
- fi
- exit ;;
- *:AIX:*:[456])
- IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
- if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
- IBM_ARCH=rs6000
- else
- IBM_ARCH=powerpc
- fi
- if [ -x /usr/bin/oslevel ] ; then
- IBM_REV=`/usr/bin/oslevel`
- else
- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
- fi
- echo ${IBM_ARCH}-ibm-aix${IBM_REV}
- exit ;;
- *:AIX:*:*)
- echo rs6000-ibm-aix
- exit ;;
- ibmrt:4.4BSD:*|romp-ibm:BSD:*)
- echo romp-ibm-bsd4.4
- exit ;;
- ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
- echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
- exit ;; # report: romp-ibm BSD 4.3
- *:BOSX:*:*)
- echo rs6000-bull-bosx
- exit ;;
- DPX/2?00:B.O.S.:*:*)
- echo m68k-bull-sysv3
- exit ;;
- 9000/[34]??:4.3bsd:1.*:*)
- echo m68k-hp-bsd
- exit ;;
- hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
- echo m68k-hp-bsd4.4
- exit ;;
- 9000/[34678]??:HP-UX:*:*)
- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
- case "${UNAME_MACHINE}" in
- 9000/31? ) HP_ARCH=m68000 ;;
- 9000/[34]?? ) HP_ARCH=m68k ;;
- 9000/[678][0-9][0-9])
- if [ -x /usr/bin/getconf ]; then
- sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
- sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
- case "${sc_cpu_version}" in
- 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
- 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
- 532) # CPU_PA_RISC2_0
- case "${sc_kernel_bits}" in
- 32) HP_ARCH="hppa2.0n" ;;
- 64) HP_ARCH="hppa2.0w" ;;
- '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
- esac ;;
- esac
- fi
- if [ "${HP_ARCH}" = "" ]; then
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
-
- #define _HPUX_SOURCE
- #include <stdlib.h>
- #include <unistd.h>
-
- int main ()
- {
- #if defined(_SC_KERNEL_BITS)
- long bits = sysconf(_SC_KERNEL_BITS);
- #endif
- long cpu = sysconf (_SC_CPU_VERSION);
-
- switch (cpu)
- {
- case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
- case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
- case CPU_PA_RISC2_0:
- #if defined(_SC_KERNEL_BITS)
- switch (bits)
- {
- case 64: puts ("hppa2.0w"); break;
- case 32: puts ("hppa2.0n"); break;
- default: puts ("hppa2.0"); break;
- } break;
- #else /* !defined(_SC_KERNEL_BITS) */
- puts ("hppa2.0"); break;
- #endif
- default: puts ("hppa1.0"); break;
- }
- exit (0);
- }
-EOF
- (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
- test -z "$HP_ARCH" && HP_ARCH=hppa
- fi ;;
- esac
- if [ ${HP_ARCH} = "hppa2.0w" ]
- then
- eval $set_cc_for_build
-
- # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
- # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
- # generating 64-bit code. GNU and HP use different nomenclature:
- #
- # $ CC_FOR_BUILD=cc ./config.guess
- # => hppa2.0w-hp-hpux11.23
- # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
- # => hppa64-hp-hpux11.23
-
- if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
- grep __LP64__ >/dev/null
- then
- HP_ARCH="hppa2.0w"
- else
- HP_ARCH="hppa64"
- fi
- fi
- echo ${HP_ARCH}-hp-hpux${HPUX_REV}
- exit ;;
- ia64:HP-UX:*:*)
- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
- echo ia64-hp-hpux${HPUX_REV}
- exit ;;
- 3050*:HI-UX:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <unistd.h>
- int
- main ()
- {
- long cpu = sysconf (_SC_CPU_VERSION);
- /* The order matters, because CPU_IS_HP_MC68K erroneously returns
- true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
- results, however. */
- if (CPU_IS_PA_RISC (cpu))
- {
- switch (cpu)
- {
- case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
- case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
- case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
- default: puts ("hppa-hitachi-hiuxwe2"); break;
- }
- }
- else if (CPU_IS_HP_MC68K (cpu))
- puts ("m68k-hitachi-hiuxwe2");
- else puts ("unknown-hitachi-hiuxwe2");
- exit (0);
- }
-EOF
- $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
- { echo "$SYSTEM_NAME"; exit; }
- echo unknown-hitachi-hiuxwe2
- exit ;;
- 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
- echo hppa1.1-hp-bsd
- exit ;;
- 9000/8??:4.3bsd:*:*)
- echo hppa1.0-hp-bsd
- exit ;;
- *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
- echo hppa1.0-hp-mpeix
- exit ;;
- hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
- echo hppa1.1-hp-osf
- exit ;;
- hp8??:OSF1:*:*)
- echo hppa1.0-hp-osf
- exit ;;
- i*86:OSF1:*:*)
- if [ -x /usr/sbin/sysversion ] ; then
- echo ${UNAME_MACHINE}-unknown-osf1mk
- else
- echo ${UNAME_MACHINE}-unknown-osf1
- fi
- exit ;;
- parisc*:Lites*:*:*)
- echo hppa1.1-hp-lites
- exit ;;
- C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
- echo c1-convex-bsd
- exit ;;
- C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
- if getsysinfo -f scalar_acc
- then echo c32-convex-bsd
- else echo c2-convex-bsd
- fi
- exit ;;
- C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
- echo c34-convex-bsd
- exit ;;
- C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
- echo c38-convex-bsd
- exit ;;
- C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
- echo c4-convex-bsd
- exit ;;
- CRAY*Y-MP:*:*:*)
- echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*[A-Z]90:*:*:*)
- echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
- | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
- -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
- -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*TS:*:*:*)
- echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*T3E:*:*:*)
- echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*SV1:*:*:*)
- echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- *:UNICOS/mp:*:*)
- echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
- FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
- echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
- exit ;;
- 5000:UNIX_System_V:4.*:*)
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
- echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
- exit ;;
- i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
- echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
- exit ;;
- sparc*:BSD/OS:*:*)
- echo sparc-unknown-bsdi${UNAME_RELEASE}
- exit ;;
- *:BSD/OS:*:*)
- echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
- exit ;;
- *:FreeBSD:*:*)
- case ${UNAME_MACHINE} in
- pc98)
- echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- amd64)
- echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- *)
- echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- esac
- exit ;;
- i*:CYGWIN*:*)
- echo ${UNAME_MACHINE}-pc-cygwin
- exit ;;
- *:MINGW*:*)
- echo ${UNAME_MACHINE}-pc-mingw32
- exit ;;
- i*:windows32*:*)
- # uname -m includes "-pc" on this system.
- echo ${UNAME_MACHINE}-mingw32
- exit ;;
- i*:PW*:*)
- echo ${UNAME_MACHINE}-pc-pw32
- exit ;;
- *:Interix*:[3456]*)
- case ${UNAME_MACHINE} in
- x86)
- echo i586-pc-interix${UNAME_RELEASE}
- exit ;;
- EM64T | authenticamd | genuineintel)
- echo x86_64-unknown-interix${UNAME_RELEASE}
- exit ;;
- IA64)
- echo ia64-unknown-interix${UNAME_RELEASE}
- exit ;;
- esac ;;
- [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
- echo i${UNAME_MACHINE}-pc-mks
- exit ;;
- i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
- # How do we know it's Interix rather than the generic POSIX subsystem?
- # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
- # UNAME_MACHINE based on the output of uname instead of i386?
- echo i586-pc-interix
- exit ;;
- i*:UWIN*:*)
- echo ${UNAME_MACHINE}-pc-uwin
- exit ;;
- amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
- echo x86_64-unknown-cygwin
- exit ;;
- p*:CYGWIN*:*)
- echo powerpcle-unknown-cygwin
- exit ;;
- prep*:SunOS:5.*:*)
- echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- *:GNU:*:*)
- # the GNU system
- echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
- exit ;;
- *:GNU/*:*:*)
- # other systems with GNU libc and userland
- echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
- exit ;;
- i*86:Minix:*:*)
- echo ${UNAME_MACHINE}-pc-minix
- exit ;;
- arm*:Linux:*:*)
- eval $set_cc_for_build
- if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
- | grep -q __ARM_EABI__
- then
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- else
- echo ${UNAME_MACHINE}-unknown-linux-gnueabi
- fi
- exit ;;
- avr32*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- cris:Linux:*:*)
- echo cris-axis-linux-gnu
- exit ;;
- crisv32:Linux:*:*)
- echo crisv32-axis-linux-gnu
- exit ;;
- frv:Linux:*:*)
- echo frv-unknown-linux-gnu
- exit ;;
- ia64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- m32r*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- m68*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- mips:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips
- #undef mipsel
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mipsel
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips
- #else
- CPU=
- #endif
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
- ;;
- mips64:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips64
- #undef mips64el
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mips64el
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips64
- #else
- CPU=
- #endif
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
- ;;
- or32:Linux:*:*)
- echo or32-unknown-linux-gnu
- exit ;;
- ppc:Linux:*:*)
- echo powerpc-unknown-linux-gnu
- exit ;;
- ppc64:Linux:*:*)
- echo powerpc64-unknown-linux-gnu
- exit ;;
- alpha:Linux:*:*)
- case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
- EV5) UNAME_MACHINE=alphaev5 ;;
- EV56) UNAME_MACHINE=alphaev56 ;;
- PCA56) UNAME_MACHINE=alphapca56 ;;
- PCA57) UNAME_MACHINE=alphapca56 ;;
- EV6) UNAME_MACHINE=alphaev6 ;;
- EV67) UNAME_MACHINE=alphaev67 ;;
- EV68*) UNAME_MACHINE=alphaev68 ;;
- esac
- objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
- if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
- echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
- exit ;;
- padre:Linux:*:*)
- echo sparc-unknown-linux-gnu
- exit ;;
- parisc:Linux:*:* | hppa:Linux:*:*)
- # Look for CPU level
- case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
- PA7*) echo hppa1.1-unknown-linux-gnu ;;
- PA8*) echo hppa2.0-unknown-linux-gnu ;;
- *) echo hppa-unknown-linux-gnu ;;
- esac
- exit ;;
- parisc64:Linux:*:* | hppa64:Linux:*:*)
- echo hppa64-unknown-linux-gnu
- exit ;;
- s390:Linux:*:* | s390x:Linux:*:*)
- echo ${UNAME_MACHINE}-ibm-linux
- exit ;;
- sh64*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- sh*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- sparc:Linux:*:* | sparc64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- vax:Linux:*:*)
- echo ${UNAME_MACHINE}-dec-linux-gnu
- exit ;;
- x86_64:Linux:*:*)
- echo x86_64-unknown-linux-gnu
- exit ;;
- xtensa*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- i*86:Linux:*:*)
- # The BFD linker knows what the default object file format is, so
- # first see if it will tell us. cd to the root directory to prevent
- # problems with other programs or directories called `ld' in the path.
- # Set LC_ALL=C to ensure ld outputs messages in English.
- ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
- | sed -ne '/supported targets:/!d
- s/[ ][ ]*/ /g
- s/.*supported targets: *//
- s/ .*//
- p'`
- case "$ld_supported_targets" in
- elf32-i386)
- TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
- ;;
- a.out-i386-linux)
- echo "${UNAME_MACHINE}-pc-linux-gnuaout"
- exit ;;
- "")
- # Either a pre-BFD a.out linker (linux-gnuoldld) or
- # one that does not give us useful --help.
- echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
- exit ;;
- esac
- # Determine whether the default compiler is a.out or elf
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <features.h>
- #ifdef __ELF__
- # ifdef __GLIBC__
- # if __GLIBC__ >= 2
- LIBC=gnu
- # else
- LIBC=gnulibc1
- # endif
- # else
- LIBC=gnulibc1
- # endif
- #else
- #if defined(__INTEL_COMPILER) || defined(__PGI) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
- LIBC=gnu
- #else
- LIBC=gnuaout
- #endif
- #endif
- #ifdef __dietlibc__
- LIBC=dietlibc
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^LIBC/{
- s: ::g
- p
- }'`"
- test x"${LIBC}" != x && {
- echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
- exit
- }
- test x"${TENTATIVE}" != x && { echo "${TENTATIVE}"; exit; }
- ;;
- i*86:DYNIX/ptx:4*:*)
- # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
- # earlier versions are messed up and put the nodename in both
- # sysname and nodename.
- echo i386-sequent-sysv4
- exit ;;
- i*86:UNIX_SV:4.2MP:2.*)
- # Unixware is an offshoot of SVR4, but it has its own version
- # number series starting with 2...
- # I am not positive that other SVR4 systems won't match this,
- # I just have to hope. -- rms.
- # Use sysv4.2uw... so that sysv4* matches it.
- echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
- exit ;;
- i*86:OS/2:*:*)
- # If we were able to find `uname', then EMX Unix compatibility
- # is probably installed.
- echo ${UNAME_MACHINE}-pc-os2-emx
- exit ;;
- i*86:XTS-300:*:STOP)
- echo ${UNAME_MACHINE}-unknown-stop
- exit ;;
- i*86:atheos:*:*)
- echo ${UNAME_MACHINE}-unknown-atheos
- exit ;;
- i*86:syllable:*:*)
- echo ${UNAME_MACHINE}-pc-syllable
- exit ;;
- i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
- echo i386-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- i*86:*DOS:*:*)
- echo ${UNAME_MACHINE}-pc-msdosdjgpp
- exit ;;
- i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
- UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
- if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
- echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
- else
- echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
- fi
- exit ;;
- i*86:*:5:[678]*)
- # UnixWare 7.x, OpenUNIX and OpenServer 6.
- case `/bin/uname -X | grep "^Machine"` in
- *486*) UNAME_MACHINE=i486 ;;
- *Pentium) UNAME_MACHINE=i586 ;;
- *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
- esac
- echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
- exit ;;
- i*86:*:3.2:*)
- if test -f /usr/options/cb.name; then
- UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
- echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
- elif /bin/uname -X 2>/dev/null >/dev/null ; then
- UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
- (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
- (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
- && UNAME_MACHINE=i586
- (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
- && UNAME_MACHINE=i686
- (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
- && UNAME_MACHINE=i686
- echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
- else
- echo ${UNAME_MACHINE}-pc-sysv32
- fi
- exit ;;
- pc:*:*:*)
- # Left here for compatibility:
- # uname -m prints for DJGPP always 'pc', but it prints nothing about
- # the processor, so we play safe by assuming i586.
- # Note: whatever this is, it MUST be the same as what config.sub
- # prints for the "djgpp" host, or else GDB configury will decide that
- # this is a cross-build.
- echo i586-pc-msdosdjgpp
- exit ;;
- Intel:Mach:3*:*)
- echo i386-pc-mach3
- exit ;;
- paragon:*:*:*)
- echo i860-intel-osf1
- exit ;;
- i860:*:4.*:*) # i860-SVR4
- if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
- echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
- else # Add other i860-SVR4 vendors below as they are discovered.
- echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
- fi
- exit ;;
- mini*:CTIX:SYS*5:*)
- # "miniframe"
- echo m68010-convergent-sysv
- exit ;;
- mc68k:UNIX:SYSTEM5:3.51m)
- echo m68k-convergent-sysv
- exit ;;
- M680?0:D-NIX:5.3:*)
- echo m68k-diab-dnix
- exit ;;
- M68*:*:R3V[5678]*:*)
- test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
- 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
- OS_REL=''
- test -r /etc/.relid \
- && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
- /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
- 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4; exit; } ;;
- NCR*:*:4.2:* | MPRAS*:*:4.2:*)
- OS_REL='.3'
- test -r /etc/.relid \
- && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
- /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
- /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
- m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
- echo m68k-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- mc68030:UNIX_System_V:4.*:*)
- echo m68k-atari-sysv4
- exit ;;
- TSUNAMI:LynxOS:2.*:*)
- echo sparc-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- rs6000:LynxOS:2.*:*)
- echo rs6000-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
- echo powerpc-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- SM[BE]S:UNIX_SV:*:*)
- echo mips-dde-sysv${UNAME_RELEASE}
- exit ;;
- RM*:ReliantUNIX-*:*:*)
- echo mips-sni-sysv4
- exit ;;
- RM*:SINIX-*:*:*)
- echo mips-sni-sysv4
- exit ;;
- *:SINIX-*:*:*)
- if uname -p 2>/dev/null >/dev/null ; then
- UNAME_MACHINE=`(uname -p) 2>/dev/null`
- echo ${UNAME_MACHINE}-sni-sysv4
- else
- echo ns32k-sni-sysv
- fi
- exit ;;
- PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
- # says <Richard.M.Bartel@ccMail.Census.GOV>
- echo i586-unisys-sysv4
- exit ;;
- *:UNIX_System_V:4*:FTX*)
- # From Gerald Hewes <hewes@openmarket.com>.
- # How about differentiating between stratus architectures? -djm
- echo hppa1.1-stratus-sysv4
- exit ;;
- *:*:*:FTX*)
- # From seanf@swdc.stratus.com.
- echo i860-stratus-sysv4
- exit ;;
- i*86:VOS:*:*)
- # From Paul.Green@stratus.com.
- echo ${UNAME_MACHINE}-stratus-vos
- exit ;;
- *:VOS:*:*)
- # From Paul.Green@stratus.com.
- echo hppa1.1-stratus-vos
- exit ;;
- mc68*:A/UX:*:*)
- echo m68k-apple-aux${UNAME_RELEASE}
- exit ;;
- news*:NEWS-OS:6*:*)
- echo mips-sony-newsos6
- exit ;;
- R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
- if [ -d /usr/nec ]; then
- echo mips-nec-sysv${UNAME_RELEASE}
- else
- echo mips-unknown-sysv${UNAME_RELEASE}
- fi
- exit ;;
- BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
- echo powerpc-be-beos
- exit ;;
- BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
- echo powerpc-apple-beos
- exit ;;
- BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
- echo i586-pc-beos
- exit ;;
- BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
- echo i586-pc-haiku
- exit ;;
- SX-4:SUPER-UX:*:*)
- echo sx4-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-5:SUPER-UX:*:*)
- echo sx5-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-6:SUPER-UX:*:*)
- echo sx6-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-7:SUPER-UX:*:*)
- echo sx7-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-8:SUPER-UX:*:*)
- echo sx8-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-8R:SUPER-UX:*:*)
- echo sx8r-nec-superux${UNAME_RELEASE}
- exit ;;
- Power*:Rhapsody:*:*)
- echo powerpc-apple-rhapsody${UNAME_RELEASE}
- exit ;;
- *:Rhapsody:*:*)
- echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
- exit ;;
- *:Darwin:*:*)
- UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
- case $UNAME_PROCESSOR in
- unknown) UNAME_PROCESSOR=powerpc ;;
- esac
- echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
- exit ;;
- *:procnto*:*:* | *:QNX:[0123456789]*:*)
- UNAME_PROCESSOR=`uname -p`
- if test "$UNAME_PROCESSOR" = "x86"; then
- UNAME_PROCESSOR=i386
- UNAME_MACHINE=pc
- fi
- echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
- exit ;;
- *:QNX:*:4*)
- echo i386-pc-qnx
- exit ;;
- NSE-?:NONSTOP_KERNEL:*:*)
- echo nse-tandem-nsk${UNAME_RELEASE}
- exit ;;
- NSR-?:NONSTOP_KERNEL:*:*)
- echo nsr-tandem-nsk${UNAME_RELEASE}
- exit ;;
- *:NonStop-UX:*:*)
- echo mips-compaq-nonstopux
- exit ;;
- BS2000:POSIX*:*:*)
- echo bs2000-siemens-sysv
- exit ;;
- DS/*:UNIX_System_V:*:*)
- echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
- exit ;;
- *:Plan9:*:*)
- # "uname -m" is not consistent, so use $cputype instead. 386
- # is converted to i386 for consistency with other x86
- # operating systems.
- if test "$cputype" = "386"; then
- UNAME_MACHINE=i386
- else
- UNAME_MACHINE="$cputype"
- fi
- echo ${UNAME_MACHINE}-unknown-plan9
- exit ;;
- *:TOPS-10:*:*)
- echo pdp10-unknown-tops10
- exit ;;
- *:TENEX:*:*)
- echo pdp10-unknown-tenex
- exit ;;
- KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
- echo pdp10-dec-tops20
- exit ;;
- XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
- echo pdp10-xkl-tops20
- exit ;;
- *:TOPS-20:*:*)
- echo pdp10-unknown-tops20
- exit ;;
- *:ITS:*:*)
- echo pdp10-unknown-its
- exit ;;
- SEI:*:*:SEIUX)
- echo mips-sei-seiux${UNAME_RELEASE}
- exit ;;
- *:DragonFly:*:*)
- echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
- exit ;;
- *:*VMS:*:*)
- UNAME_MACHINE=`(uname -p) 2>/dev/null`
- case "${UNAME_MACHINE}" in
- A*) echo alpha-dec-vms ; exit ;;
- I*) echo ia64-dec-vms ; exit ;;
- V*) echo vax-dec-vms ; exit ;;
- esac ;;
- *:XENIX:*:SysV)
- echo i386-pc-xenix
- exit ;;
- i*86:skyos:*:*)
- echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
- exit ;;
- i*86:rdos:*:*)
- echo ${UNAME_MACHINE}-pc-rdos
- exit ;;
- i*86:AROS:*:*)
- echo ${UNAME_MACHINE}-pc-aros
- exit ;;
-esac
-
-#echo '(No uname command or uname output not recognized.)' 1>&2
-#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
-
-eval $set_cc_for_build
-cat >$dummy.c <<EOF
-#ifdef _SEQUENT_
-# include <sys/types.h>
-# include <sys/utsname.h>
-#endif
-main ()
-{
-#if defined (sony)
-#if defined (MIPSEB)
- /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
- I don't know.... */
- printf ("mips-sony-bsd\n"); exit (0);
-#else
-#include <sys/param.h>
- printf ("m68k-sony-newsos%s\n",
-#ifdef NEWSOS4
- "4"
-#else
- ""
-#endif
- ); exit (0);
-#endif
-#endif
-
-#if defined (__arm) && defined (__acorn) && defined (__unix)
- printf ("arm-acorn-riscix\n"); exit (0);
-#endif
-
-#if defined (hp300) && !defined (hpux)
- printf ("m68k-hp-bsd\n"); exit (0);
-#endif
-
-#if defined (NeXT)
-#if !defined (__ARCHITECTURE__)
-#define __ARCHITECTURE__ "m68k"
-#endif
- int version;
- version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
- if (version < 4)
- printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
- else
- printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
- exit (0);
-#endif
-
-#if defined (MULTIMAX) || defined (n16)
-#if defined (UMAXV)
- printf ("ns32k-encore-sysv\n"); exit (0);
-#else
-#if defined (CMU)
- printf ("ns32k-encore-mach\n"); exit (0);
-#else
- printf ("ns32k-encore-bsd\n"); exit (0);
-#endif
-#endif
-#endif
-
-#if defined (__386BSD__)
- printf ("i386-pc-bsd\n"); exit (0);
-#endif
-
-#if defined (sequent)
-#if defined (i386)
- printf ("i386-sequent-dynix\n"); exit (0);
-#endif
-#if defined (ns32000)
- printf ("ns32k-sequent-dynix\n"); exit (0);
-#endif
-#endif
-
-#if defined (_SEQUENT_)
- struct utsname un;
-
- uname(&un);
-
- if (strncmp(un.version, "V2", 2) == 0) {
- printf ("i386-sequent-ptx2\n"); exit (0);
- }
- if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
- printf ("i386-sequent-ptx1\n"); exit (0);
- }
- printf ("i386-sequent-ptx\n"); exit (0);
-
-#endif
-
-#if defined (vax)
-# if !defined (ultrix)
-# include <sys/param.h>
-# if defined (BSD)
-# if BSD == 43
- printf ("vax-dec-bsd4.3\n"); exit (0);
-# else
-# if BSD == 199006
- printf ("vax-dec-bsd4.3reno\n"); exit (0);
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# endif
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# else
- printf ("vax-dec-ultrix\n"); exit (0);
-# endif
-#endif
-
-#if defined (alliant) && defined (i860)
- printf ("i860-alliant-bsd\n"); exit (0);
-#endif
-
- exit (1);
-}
-EOF
-
-$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
- { echo "$SYSTEM_NAME"; exit; }
-
-# Apollos put the system type in the environment.
-
-test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
-
-# Convex versions that predate uname can use getsysinfo(1)
-
-if [ -x /usr/convex/getsysinfo ]
-then
- case `getsysinfo -f cpu_type` in
- c1*)
- echo c1-convex-bsd
- exit ;;
- c2*)
- if getsysinfo -f scalar_acc
- then echo c32-convex-bsd
- else echo c2-convex-bsd
- fi
- exit ;;
- c34*)
- echo c34-convex-bsd
- exit ;;
- c38*)
- echo c38-convex-bsd
- exit ;;
- c4*)
- echo c4-convex-bsd
- exit ;;
- esac
-fi
-
-cat >&2 <<EOF
-$0: unable to guess system type
-
-This script, last modified $timestamp, has failed to recognize
-the operating system you are using. It is advised that you
-download the most up to date version of the config scripts from
-
- http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
-and
- http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
-
-If the version you run ($0) is already up to date, please
-send the following data and any information you think might be
-pertinent to <config-patches@gnu.org> in order to provide the needed
-information to handle your system.
-
-config.guess timestamp = $timestamp
-
-uname -m = `(uname -m) 2>/dev/null || echo unknown`
-uname -r = `(uname -r) 2>/dev/null || echo unknown`
-uname -s = `(uname -s) 2>/dev/null || echo unknown`
-uname -v = `(uname -v) 2>/dev/null || echo unknown`
-
-/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
-/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
-
-hostinfo = `(hostinfo) 2>/dev/null`
-/bin/universe = `(/bin/universe) 2>/dev/null`
-/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
-/bin/arch = `(/bin/arch) 2>/dev/null`
-/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
-/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
-
-UNAME_MACHINE = ${UNAME_MACHINE}
-UNAME_RELEASE = ${UNAME_RELEASE}
-UNAME_SYSTEM = ${UNAME_SYSTEM}
-UNAME_VERSION = ${UNAME_VERSION}
-EOF
-
-exit 1
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "timestamp='"
-# time-stamp-format: "%:y-%02m-%02d"
-# time-stamp-end: "'"
-# End:
diff --git a/lib/talloc/config.mk b/lib/talloc/config.mk
deleted file mode 100644
index 5cdf3a1a8e..0000000000
--- a/lib/talloc/config.mk
+++ /dev/null
@@ -1,7 +0,0 @@
-[LIBRARY::LIBTALLOC]
-OUTPUT_TYPE = MERGED_OBJ
-CFLAGS = -I$(tallocsrcdir)
-
-LIBTALLOC_OBJ_FILES = $(tallocsrcdir)/talloc.o
-
-MANPAGES += $(tallocdir)/talloc.3
diff --git a/lib/talloc/config.sub b/lib/talloc/config.sub
deleted file mode 100755
index a39437d015..0000000000
--- a/lib/talloc/config.sub
+++ /dev/null
@@ -1,1686 +0,0 @@
-#! /bin/sh
-# Configuration validation subroutine script.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
-# Free Software Foundation, Inc.
-
-timestamp='2009-04-17'
-
-# This file is (in principle) common to ALL GNU software.
-# The presence of a machine in this file suggests that SOME GNU software
-# can handle that machine. It does not imply ALL GNU software can.
-#
-# This file is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Please send patches to <config-patches@gnu.org>. Submit a context
-# diff and a properly formatted ChangeLog entry.
-#
-# Configuration subroutine to validate and canonicalize a configuration type.
-# Supply the specified configuration type as an argument.
-# If it is invalid, we print an error message on stderr and exit with code 1.
-# Otherwise, we print the canonical config type on stdout and succeed.
-
-# This file is supposed to be the same for all GNU packages
-# and recognize all the CPU types, system types and aliases
-# that are meaningful with *any* GNU software.
-# Each package is responsible for reporting which valid configurations
-# it does not support. The user should be able to distinguish
-# a failure to support a valid configuration from a meaningless
-# configuration.
-
-# The goal of this file is to map all the various variations of a given
-# machine specification into a single specification in the form:
-# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
-# or in some cases, the newer four-part form:
-# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
-# It is wrong to echo any other type of specification.
-
-me=`echo "$0" | sed -e 's,.*/,,'`
-
-usage="\
-Usage: $0 [OPTION] CPU-MFR-OPSYS
- $0 [OPTION] ALIAS
-
-Canonicalize a configuration name.
-
-Operation modes:
- -h, --help print this help, then exit
- -t, --time-stamp print date of last modification, then exit
- -v, --version print version number, then exit
-
-Report bugs and patches to <config-patches@gnu.org>."
-
-version="\
-GNU config.sub ($timestamp)
-
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
-
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-
-help="
-Try \`$me --help' for more information."
-
-# Parse command line
-while test $# -gt 0 ; do
- case $1 in
- --time-stamp | --time* | -t )
- echo "$timestamp" ; exit ;;
- --version | -v )
- echo "$version" ; exit ;;
- --help | --h* | -h )
- echo "$usage"; exit ;;
- -- ) # Stop option processing
- shift; break ;;
- - ) # Use stdin as input.
- break ;;
- -* )
- echo "$me: invalid option $1$help"
- exit 1 ;;
-
- *local*)
- # First pass through any local machine types.
- echo $1
- exit ;;
-
- * )
- break ;;
- esac
-done
-
-case $# in
- 0) echo "$me: missing argument$help" >&2
- exit 1;;
- 1) ;;
- *) echo "$me: too many arguments$help" >&2
- exit 1;;
-esac
-
-# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
-# Here we must recognize all the valid KERNEL-OS combinations.
-maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
-case $maybe_os in
- nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \
- uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \
- kopensolaris*-gnu* | \
- storm-chaos* | os2-emx* | rtmk-nova*)
- os=-$maybe_os
- basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
- ;;
- *)
- basic_machine=`echo $1 | sed 's/-[^-]*$//'`
- if [ $basic_machine != $1 ]
- then os=`echo $1 | sed 's/.*-/-/'`
- else os=; fi
- ;;
-esac
-
-### Let's recognize common machines as not being operating systems so
-### that things like config.sub decstation-3100 work. We also
-### recognize some manufacturers as not being operating systems, so we
-### can provide default operating systems below.
-case $os in
- -sun*os*)
- # Prevent following clause from handling this invalid input.
- ;;
- -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
- -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
- -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
- -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
- -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
- -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
- -apple | -axis | -knuth | -cray)
- os=
- basic_machine=$1
- ;;
- -sim | -cisco | -oki | -wec | -winbond)
- os=
- basic_machine=$1
- ;;
- -scout)
- ;;
- -wrs)
- os=-vxworks
- basic_machine=$1
- ;;
- -chorusos*)
- os=-chorusos
- basic_machine=$1
- ;;
- -chorusrdb)
- os=-chorusrdb
- basic_machine=$1
- ;;
- -hiux*)
- os=-hiuxwe2
- ;;
- -sco6)
- os=-sco5v6
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco5)
- os=-sco3.2v5
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco4)
- os=-sco3.2v4
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco3.2.[4-9]*)
- os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco3.2v[4-9]*)
- # Don't forget version if it is 3.2v4 or newer.
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco5v6*)
- # Don't forget version if it is 3.2v4 or newer.
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco*)
- os=-sco3.2v2
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -udk*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -isc)
- os=-isc2.2
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -clix*)
- basic_machine=clipper-intergraph
- ;;
- -isc*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -lynx*)
- os=-lynxos
- ;;
- -ptx*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
- ;;
- -windowsnt*)
- os=`echo $os | sed -e 's/windowsnt/winnt/'`
- ;;
- -psos*)
- os=-psos
- ;;
- -mint | -mint[0-9]*)
- basic_machine=m68k-atari
- os=-mint
- ;;
-esac
-
-# Decode aliases for certain CPU-COMPANY combinations.
-case $basic_machine in
- # Recognize the basic CPU types without company name.
- # Some are omitted here because they have special meanings below.
- 1750a | 580 \
- | a29k \
- | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
- | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
- | am33_2.0 \
- | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
- | bfin \
- | c4x | clipper \
- | d10v | d30v | dlx | dsp16xx \
- | fido | fr30 | frv \
- | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
- | i370 | i860 | i960 | ia64 \
- | ip2k | iq2000 \
- | lm32 \
- | m32c | m32r | m32rle | m68000 | m68k | m88k \
- | maxq | mb | microblaze | mcore | mep | metag \
- | mips | mipsbe | mipseb | mipsel | mipsle \
- | mips16 \
- | mips64 | mips64el \
- | mips64octeon | mips64octeonel \
- | mips64orion | mips64orionel \
- | mips64r5900 | mips64r5900el \
- | mips64vr | mips64vrel \
- | mips64vr4100 | mips64vr4100el \
- | mips64vr4300 | mips64vr4300el \
- | mips64vr5000 | mips64vr5000el \
- | mips64vr5900 | mips64vr5900el \
- | mipsisa32 | mipsisa32el \
- | mipsisa32r2 | mipsisa32r2el \
- | mipsisa64 | mipsisa64el \
- | mipsisa64r2 | mipsisa64r2el \
- | mipsisa64sb1 | mipsisa64sb1el \
- | mipsisa64sr71k | mipsisa64sr71kel \
- | mipstx39 | mipstx39el \
- | mn10200 | mn10300 \
- | moxie \
- | mt \
- | msp430 \
- | nios | nios2 \
- | ns16k | ns32k \
- | or32 \
- | pdp10 | pdp11 | pj | pjl \
- | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
- | pyramid \
- | score \
- | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
- | sh64 | sh64le \
- | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
- | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
- | spu | strongarm \
- | tahoe | thumb | tic4x | tic80 | tron \
- | v850 | v850e \
- | we32k \
- | x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \
- | z8k | z80)
- basic_machine=$basic_machine-unknown
- ;;
- m6811 | m68hc11 | m6812 | m68hc12)
- # Motorola 68HC11/12.
- basic_machine=$basic_machine-unknown
- os=-none
- ;;
- m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
- ;;
- ms1)
- basic_machine=mt-unknown
- ;;
-
- # We use `pc' rather than `unknown'
- # because (1) that's what they normally are, and
- # (2) the word "unknown" tends to confuse beginning users.
- i*86 | x86_64)
- basic_machine=$basic_machine-pc
- ;;
- # Object if more than one company name word.
- *-*-*)
- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
- exit 1
- ;;
- # Recognize the basic CPU types with company name.
- 580-* \
- | a29k-* \
- | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
- | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
- | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
- | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
- | avr-* | avr32-* \
- | bfin-* | bs2000-* \
- | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
- | clipper-* | craynv-* | cydra-* \
- | d10v-* | d30v-* | dlx-* \
- | elxsi-* \
- | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
- | h8300-* | h8500-* \
- | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
- | i*86-* | i860-* | i960-* | ia64-* \
- | ip2k-* | iq2000-* \
- | lm32-* \
- | m32c-* | m32r-* | m32rle-* \
- | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
- | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
- | mips16-* \
- | mips64-* | mips64el-* \
- | mips64octeon-* | mips64octeonel-* \
- | mips64orion-* | mips64orionel-* \
- | mips64r5900-* | mips64r5900el-* \
- | mips64vr-* | mips64vrel-* \
- | mips64vr4100-* | mips64vr4100el-* \
- | mips64vr4300-* | mips64vr4300el-* \
- | mips64vr5000-* | mips64vr5000el-* \
- | mips64vr5900-* | mips64vr5900el-* \
- | mipsisa32-* | mipsisa32el-* \
- | mipsisa32r2-* | mipsisa32r2el-* \
- | mipsisa64-* | mipsisa64el-* \
- | mipsisa64r2-* | mipsisa64r2el-* \
- | mipsisa64sb1-* | mipsisa64sb1el-* \
- | mipsisa64sr71k-* | mipsisa64sr71kel-* \
- | mipstx39-* | mipstx39el-* \
- | mmix-* \
- | mt-* \
- | msp430-* \
- | nios-* | nios2-* \
- | none-* | np1-* | ns16k-* | ns32k-* \
- | orion-* \
- | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
- | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
- | pyramid-* \
- | romp-* | rs6000-* \
- | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
- | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
- | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
- | sparclite-* \
- | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \
- | tahoe-* | thumb-* \
- | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* | tile-* \
- | tron-* \
- | v850-* | v850e-* | vax-* \
- | we32k-* \
- | x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \
- | xstormy16-* | xtensa*-* \
- | ymp-* \
- | z8k-* | z80-*)
- ;;
- # Recognize the basic CPU types without company name, with glob match.
- xtensa*)
- basic_machine=$basic_machine-unknown
- ;;
- # Recognize the various machine names and aliases which stand
- # for a CPU type and a company and sometimes even an OS.
- 386bsd)
- basic_machine=i386-unknown
- os=-bsd
- ;;
- 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
- basic_machine=m68000-att
- ;;
- 3b*)
- basic_machine=we32k-att
- ;;
- a29khif)
- basic_machine=a29k-amd
- os=-udi
- ;;
- abacus)
- basic_machine=abacus-unknown
- ;;
- adobe68k)
- basic_machine=m68010-adobe
- os=-scout
- ;;
- alliant | fx80)
- basic_machine=fx80-alliant
- ;;
- altos | altos3068)
- basic_machine=m68k-altos
- ;;
- am29k)
- basic_machine=a29k-none
- os=-bsd
- ;;
- amd64)
- basic_machine=x86_64-pc
- ;;
- amd64-*)
- basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- amdahl)
- basic_machine=580-amdahl
- os=-sysv
- ;;
- amiga | amiga-*)
- basic_machine=m68k-unknown
- ;;
- amigaos | amigados)
- basic_machine=m68k-unknown
- os=-amigaos
- ;;
- amigaunix | amix)
- basic_machine=m68k-unknown
- os=-sysv4
- ;;
- apollo68)
- basic_machine=m68k-apollo
- os=-sysv
- ;;
- apollo68bsd)
- basic_machine=m68k-apollo
- os=-bsd
- ;;
- aros)
- basic_machine=i386-pc
- os=-aros
- ;;
- aux)
- basic_machine=m68k-apple
- os=-aux
- ;;
- balance)
- basic_machine=ns32k-sequent
- os=-dynix
- ;;
- blackfin)
- basic_machine=bfin-unknown
- os=-linux
- ;;
- blackfin-*)
- basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=-linux
- ;;
- c90)
- basic_machine=c90-cray
- os=-unicos
- ;;
- cegcc)
- basic_machine=arm-unknown
- os=-cegcc
- ;;
- convex-c1)
- basic_machine=c1-convex
- os=-bsd
- ;;
- convex-c2)
- basic_machine=c2-convex
- os=-bsd
- ;;
- convex-c32)
- basic_machine=c32-convex
- os=-bsd
- ;;
- convex-c34)
- basic_machine=c34-convex
- os=-bsd
- ;;
- convex-c38)
- basic_machine=c38-convex
- os=-bsd
- ;;
- cray | j90)
- basic_machine=j90-cray
- os=-unicos
- ;;
- craynv)
- basic_machine=craynv-cray
- os=-unicosmp
- ;;
- cr16)
- basic_machine=cr16-unknown
- os=-elf
- ;;
- crds | unos)
- basic_machine=m68k-crds
- ;;
- crisv32 | crisv32-* | etraxfs*)
- basic_machine=crisv32-axis
- ;;
- cris | cris-* | etrax*)
- basic_machine=cris-axis
- ;;
- crx)
- basic_machine=crx-unknown
- os=-elf
- ;;
- da30 | da30-*)
- basic_machine=m68k-da30
- ;;
- decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
- basic_machine=mips-dec
- ;;
- decsystem10* | dec10*)
- basic_machine=pdp10-dec
- os=-tops10
- ;;
- decsystem20* | dec20*)
- basic_machine=pdp10-dec
- os=-tops20
- ;;
- delta | 3300 | motorola-3300 | motorola-delta \
- | 3300-motorola | delta-motorola)
- basic_machine=m68k-motorola
- ;;
- delta88)
- basic_machine=m88k-motorola
- os=-sysv3
- ;;
- dicos)
- basic_machine=i686-pc
- os=-dicos
- ;;
- djgpp)
- basic_machine=i586-pc
- os=-msdosdjgpp
- ;;
- dpx20 | dpx20-*)
- basic_machine=rs6000-bull
- os=-bosx
- ;;
- dpx2* | dpx2*-bull)
- basic_machine=m68k-bull
- os=-sysv3
- ;;
- ebmon29k)
- basic_machine=a29k-amd
- os=-ebmon
- ;;
- elxsi)
- basic_machine=elxsi-elxsi
- os=-bsd
- ;;
- encore | umax | mmax)
- basic_machine=ns32k-encore
- ;;
- es1800 | OSE68k | ose68k | ose | OSE)
- basic_machine=m68k-ericsson
- os=-ose
- ;;
- fx2800)
- basic_machine=i860-alliant
- ;;
- genix)
- basic_machine=ns32k-ns
- ;;
- gmicro)
- basic_machine=tron-gmicro
- os=-sysv
- ;;
- go32)
- basic_machine=i386-pc
- os=-go32
- ;;
- h3050r* | hiux*)
- basic_machine=hppa1.1-hitachi
- os=-hiuxwe2
- ;;
- h8300hms)
- basic_machine=h8300-hitachi
- os=-hms
- ;;
- h8300xray)
- basic_machine=h8300-hitachi
- os=-xray
- ;;
- h8500hms)
- basic_machine=h8500-hitachi
- os=-hms
- ;;
- harris)
- basic_machine=m88k-harris
- os=-sysv3
- ;;
- hp300-*)
- basic_machine=m68k-hp
- ;;
- hp300bsd)
- basic_machine=m68k-hp
- os=-bsd
- ;;
- hp300hpux)
- basic_machine=m68k-hp
- os=-hpux
- ;;
- hp3k9[0-9][0-9] | hp9[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hp9k2[0-9][0-9] | hp9k31[0-9])
- basic_machine=m68000-hp
- ;;
- hp9k3[2-9][0-9])
- basic_machine=m68k-hp
- ;;
- hp9k6[0-9][0-9] | hp6[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hp9k7[0-79][0-9] | hp7[0-79][0-9])
- basic_machine=hppa1.1-hp
- ;;
- hp9k78[0-9] | hp78[0-9])
- # FIXME: really hppa2.0-hp
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
- # FIXME: really hppa2.0-hp
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[0-9][13679] | hp8[0-9][13679])
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[0-9][0-9] | hp8[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hppa-next)
- os=-nextstep3
- ;;
- hppaosf)
- basic_machine=hppa1.1-hp
- os=-osf
- ;;
- hppro)
- basic_machine=hppa1.1-hp
- os=-proelf
- ;;
- i370-ibm* | ibm*)
- basic_machine=i370-ibm
- ;;
-# I'm not sure what "Sysv32" means. Should this be sysv3.2?
- i*86v32)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv32
- ;;
- i*86v4*)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv4
- ;;
- i*86v)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv
- ;;
- i*86sol2)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-solaris2
- ;;
- i386mach)
- basic_machine=i386-mach
- os=-mach
- ;;
- i386-vsta | vsta)
- basic_machine=i386-unknown
- os=-vsta
- ;;
- iris | iris4d)
- basic_machine=mips-sgi
- case $os in
- -irix*)
- ;;
- *)
- os=-irix4
- ;;
- esac
- ;;
- isi68 | isi)
- basic_machine=m68k-isi
- os=-sysv
- ;;
- m68knommu)
- basic_machine=m68k-unknown
- os=-linux
- ;;
- m68knommu-*)
- basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=-linux
- ;;
- m88k-omron*)
- basic_machine=m88k-omron
- ;;
- magnum | m3230)
- basic_machine=mips-mips
- os=-sysv
- ;;
- merlin)
- basic_machine=ns32k-utek
- os=-sysv
- ;;
- mingw32)
- basic_machine=i386-pc
- os=-mingw32
- ;;
- mingw32ce)
- basic_machine=arm-unknown
- os=-mingw32ce
- ;;
- miniframe)
- basic_machine=m68000-convergent
- ;;
- *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
- basic_machine=m68k-atari
- os=-mint
- ;;
- mips3*-*)
- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
- ;;
- mips3*)
- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
- ;;
- monitor)
- basic_machine=m68k-rom68k
- os=-coff
- ;;
- morphos)
- basic_machine=powerpc-unknown
- os=-morphos
- ;;
- msdos)
- basic_machine=i386-pc
- os=-msdos
- ;;
- ms1-*)
- basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
- ;;
- mvs)
- basic_machine=i370-ibm
- os=-mvs
- ;;
- ncr3000)
- basic_machine=i486-ncr
- os=-sysv4
- ;;
- netbsd386)
- basic_machine=i386-unknown
- os=-netbsd
- ;;
- netwinder)
- basic_machine=armv4l-rebel
- os=-linux
- ;;
- news | news700 | news800 | news900)
- basic_machine=m68k-sony
- os=-newsos
- ;;
- news1000)
- basic_machine=m68030-sony
- os=-newsos
- ;;
- news-3600 | risc-news)
- basic_machine=mips-sony
- os=-newsos
- ;;
- necv70)
- basic_machine=v70-nec
- os=-sysv
- ;;
- next | m*-next )
- basic_machine=m68k-next
- case $os in
- -nextstep* )
- ;;
- -ns2*)
- os=-nextstep2
- ;;
- *)
- os=-nextstep3
- ;;
- esac
- ;;
- nh3000)
- basic_machine=m68k-harris
- os=-cxux
- ;;
- nh[45]000)
- basic_machine=m88k-harris
- os=-cxux
- ;;
- nindy960)
- basic_machine=i960-intel
- os=-nindy
- ;;
- mon960)
- basic_machine=i960-intel
- os=-mon960
- ;;
- nonstopux)
- basic_machine=mips-compaq
- os=-nonstopux
- ;;
- np1)
- basic_machine=np1-gould
- ;;
- nsr-tandem)
- basic_machine=nsr-tandem
- ;;
- op50n-* | op60c-*)
- basic_machine=hppa1.1-oki
- os=-proelf
- ;;
- openrisc | openrisc-*)
- basic_machine=or32-unknown
- ;;
- os400)
- basic_machine=powerpc-ibm
- os=-os400
- ;;
- OSE68000 | ose68000)
- basic_machine=m68000-ericsson
- os=-ose
- ;;
- os68k)
- basic_machine=m68k-none
- os=-os68k
- ;;
- pa-hitachi)
- basic_machine=hppa1.1-hitachi
- os=-hiuxwe2
- ;;
- paragon)
- basic_machine=i860-intel
- os=-osf
- ;;
- parisc)
- basic_machine=hppa-unknown
- os=-linux
- ;;
- parisc-*)
- basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=-linux
- ;;
- pbd)
- basic_machine=sparc-tti
- ;;
- pbb)
- basic_machine=m68k-tti
- ;;
- pc532 | pc532-*)
- basic_machine=ns32k-pc532
- ;;
- pc98)
- basic_machine=i386-pc
- ;;
- pc98-*)
- basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentium | p5 | k5 | k6 | nexgen | viac3)
- basic_machine=i586-pc
- ;;
- pentiumpro | p6 | 6x86 | athlon | athlon_*)
- basic_machine=i686-pc
- ;;
- pentiumii | pentium2 | pentiumiii | pentium3)
- basic_machine=i686-pc
- ;;
- pentium4)
- basic_machine=i786-pc
- ;;
- pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
- basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentiumpro-* | p6-* | 6x86-* | athlon-*)
- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentium4-*)
- basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pn)
- basic_machine=pn-gould
- ;;
- power) basic_machine=power-ibm
- ;;
- ppc) basic_machine=powerpc-unknown
- ;;
- ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppcle | powerpclittle | ppc-le | powerpc-little)
- basic_machine=powerpcle-unknown
- ;;
- ppcle-* | powerpclittle-*)
- basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppc64) basic_machine=powerpc64-unknown
- ;;
- ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppc64le | powerpc64little | ppc64-le | powerpc64-little)
- basic_machine=powerpc64le-unknown
- ;;
- ppc64le-* | powerpc64little-*)
- basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ps2)
- basic_machine=i386-ibm
- ;;
- pw32)
- basic_machine=i586-unknown
- os=-pw32
- ;;
- rdos)
- basic_machine=i386-pc
- os=-rdos
- ;;
- rom68k)
- basic_machine=m68k-rom68k
- os=-coff
- ;;
- rm[46]00)
- basic_machine=mips-siemens
- ;;
- rtpc | rtpc-*)
- basic_machine=romp-ibm
- ;;
- s390 | s390-*)
- basic_machine=s390-ibm
- ;;
- s390x | s390x-*)
- basic_machine=s390x-ibm
- ;;
- sa29200)
- basic_machine=a29k-amd
- os=-udi
- ;;
- sb1)
- basic_machine=mipsisa64sb1-unknown
- ;;
- sb1el)
- basic_machine=mipsisa64sb1el-unknown
- ;;
- sde)
- basic_machine=mipsisa32-sde
- os=-elf
- ;;
- sei)
- basic_machine=mips-sei
- os=-seiux
- ;;
- sequent)
- basic_machine=i386-sequent
- ;;
- sh)
- basic_machine=sh-hitachi
- os=-hms
- ;;
- sh5el)
- basic_machine=sh5le-unknown
- ;;
- sh64)
- basic_machine=sh64-unknown
- ;;
- sparclite-wrs | simso-wrs)
- basic_machine=sparclite-wrs
- os=-vxworks
- ;;
- sps7)
- basic_machine=m68k-bull
- os=-sysv2
- ;;
- spur)
- basic_machine=spur-unknown
- ;;
- st2000)
- basic_machine=m68k-tandem
- ;;
- stratus)
- basic_machine=i860-stratus
- os=-sysv4
- ;;
- sun2)
- basic_machine=m68000-sun
- ;;
- sun2os3)
- basic_machine=m68000-sun
- os=-sunos3
- ;;
- sun2os4)
- basic_machine=m68000-sun
- os=-sunos4
- ;;
- sun3os3)
- basic_machine=m68k-sun
- os=-sunos3
- ;;
- sun3os4)
- basic_machine=m68k-sun
- os=-sunos4
- ;;
- sun4os3)
- basic_machine=sparc-sun
- os=-sunos3
- ;;
- sun4os4)
- basic_machine=sparc-sun
- os=-sunos4
- ;;
- sun4sol2)
- basic_machine=sparc-sun
- os=-solaris2
- ;;
- sun3 | sun3-*)
- basic_machine=m68k-sun
- ;;
- sun4)
- basic_machine=sparc-sun
- ;;
- sun386 | sun386i | roadrunner)
- basic_machine=i386-sun
- ;;
- sv1)
- basic_machine=sv1-cray
- os=-unicos
- ;;
- symmetry)
- basic_machine=i386-sequent
- os=-dynix
- ;;
- t3e)
- basic_machine=alphaev5-cray
- os=-unicos
- ;;
- t90)
- basic_machine=t90-cray
- os=-unicos
- ;;
- tic54x | c54x*)
- basic_machine=tic54x-unknown
- os=-coff
- ;;
- tic55x | c55x*)
- basic_machine=tic55x-unknown
- os=-coff
- ;;
- tic6x | c6x*)
- basic_machine=tic6x-unknown
- os=-coff
- ;;
- tile*)
- basic_machine=tile-unknown
- os=-linux-gnu
- ;;
- tx39)
- basic_machine=mipstx39-unknown
- ;;
- tx39el)
- basic_machine=mipstx39el-unknown
- ;;
- toad1)
- basic_machine=pdp10-xkl
- os=-tops20
- ;;
- tower | tower-32)
- basic_machine=m68k-ncr
- ;;
- tpf)
- basic_machine=s390x-ibm
- os=-tpf
- ;;
- udi29k)
- basic_machine=a29k-amd
- os=-udi
- ;;
- ultra3)
- basic_machine=a29k-nyu
- os=-sym1
- ;;
- v810 | necv810)
- basic_machine=v810-nec
- os=-none
- ;;
- vaxv)
- basic_machine=vax-dec
- os=-sysv
- ;;
- vms)
- basic_machine=vax-dec
- os=-vms
- ;;
- vpp*|vx|vx-*)
- basic_machine=f301-fujitsu
- ;;
- vxworks960)
- basic_machine=i960-wrs
- os=-vxworks
- ;;
- vxworks68)
- basic_machine=m68k-wrs
- os=-vxworks
- ;;
- vxworks29k)
- basic_machine=a29k-wrs
- os=-vxworks
- ;;
- w65*)
- basic_machine=w65-wdc
- os=-none
- ;;
- w89k-*)
- basic_machine=hppa1.1-winbond
- os=-proelf
- ;;
- xbox)
- basic_machine=i686-pc
- os=-mingw32
- ;;
- xps | xps100)
- basic_machine=xps100-honeywell
- ;;
- ymp)
- basic_machine=ymp-cray
- os=-unicos
- ;;
- z8k-*-coff)
- basic_machine=z8k-unknown
- os=-sim
- ;;
- z80-*-coff)
- basic_machine=z80-unknown
- os=-sim
- ;;
- none)
- basic_machine=none-none
- os=-none
- ;;
-
-# Here we handle the default manufacturer of certain CPU types. It is in
-# some cases the only manufacturer, in others, it is the most popular.
- w89k)
- basic_machine=hppa1.1-winbond
- ;;
- op50n)
- basic_machine=hppa1.1-oki
- ;;
- op60c)
- basic_machine=hppa1.1-oki
- ;;
- romp)
- basic_machine=romp-ibm
- ;;
- mmix)
- basic_machine=mmix-knuth
- ;;
- rs6000)
- basic_machine=rs6000-ibm
- ;;
- vax)
- basic_machine=vax-dec
- ;;
- pdp10)
- # there are many clones, so DEC is not a safe bet
- basic_machine=pdp10-unknown
- ;;
- pdp11)
- basic_machine=pdp11-dec
- ;;
- we32k)
- basic_machine=we32k-att
- ;;
- sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
- basic_machine=sh-unknown
- ;;
- sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
- basic_machine=sparc-sun
- ;;
- cydra)
- basic_machine=cydra-cydrome
- ;;
- orion)
- basic_machine=orion-highlevel
- ;;
- orion105)
- basic_machine=clipper-highlevel
- ;;
- mac | mpw | mac-mpw)
- basic_machine=m68k-apple
- ;;
- pmac | pmac-mpw)
- basic_machine=powerpc-apple
- ;;
- *-unknown)
- # Make sure to match an already-canonicalized machine name.
- ;;
- *)
- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
- exit 1
- ;;
-esac
-
-# Here we canonicalize certain aliases for manufacturers.
-case $basic_machine in
- *-digital*)
- basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
- ;;
- *-commodore*)
- basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
- ;;
- *)
- ;;
-esac
-
-# Decode manufacturer-specific aliases for certain operating systems.
-
-if [ x"$os" != x"" ]
-then
-case $os in
- # First match some system type aliases
- # that might get confused with valid system types.
- # -solaris* is a basic system type, with this one exception.
- -solaris1 | -solaris1.*)
- os=`echo $os | sed -e 's|solaris1|sunos4|'`
- ;;
- -solaris)
- os=-solaris2
- ;;
- -svr4*)
- os=-sysv4
- ;;
- -unixware*)
- os=-sysv4.2uw
- ;;
- -gnu/linux*)
- os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
- ;;
- # First accept the basic system types.
- # The portable systems comes first.
- # Each alternative MUST END IN A *, to match a version number.
- # -sysv* is not here because it comes later, after sysvr4.
- -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
- | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
- | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
- | -kopensolaris* \
- | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
- | -aos* | -aros* \
- | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
- | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
- | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
- | -openbsd* | -solidbsd* \
- | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
- | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
- | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
- | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
- | -chorusos* | -chorusrdb* | -cegcc* \
- | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \
- | -uxpv* | -beos* | -mpeix* | -udk* \
- | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
- | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
- | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
- | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
- | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
- | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
- | -skyos* | -haiku* | -rdos* | -toppers* | -drops*)
- # Remember, each alternative MUST END IN *, to match a version number.
- ;;
- -qnx*)
- case $basic_machine in
- x86-* | i*86-*)
- ;;
- *)
- os=-nto$os
- ;;
- esac
- ;;
- -nto-qnx*)
- ;;
- -nto*)
- os=`echo $os | sed -e 's|nto|nto-qnx|'`
- ;;
- -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
- | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
- | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
- ;;
- -mac*)
- os=`echo $os | sed -e 's|mac|macos|'`
- ;;
- -linux-dietlibc)
- os=-linux-dietlibc
- ;;
- -linux*)
- os=`echo $os | sed -e 's|linux|linux-gnu|'`
- ;;
- -sunos5*)
- os=`echo $os | sed -e 's|sunos5|solaris2|'`
- ;;
- -sunos6*)
- os=`echo $os | sed -e 's|sunos6|solaris3|'`
- ;;
- -opened*)
- os=-openedition
- ;;
- -os400*)
- os=-os400
- ;;
- -wince*)
- os=-wince
- ;;
- -osfrose*)
- os=-osfrose
- ;;
- -osf*)
- os=-osf
- ;;
- -utek*)
- os=-bsd
- ;;
- -dynix*)
- os=-bsd
- ;;
- -acis*)
- os=-aos
- ;;
- -atheos*)
- os=-atheos
- ;;
- -syllable*)
- os=-syllable
- ;;
- -386bsd)
- os=-bsd
- ;;
- -ctix* | -uts*)
- os=-sysv
- ;;
- -nova*)
- os=-rtmk-nova
- ;;
- -ns2 )
- os=-nextstep2
- ;;
- -nsk*)
- os=-nsk
- ;;
- # Preserve the version number of sinix5.
- -sinix5.*)
- os=`echo $os | sed -e 's|sinix|sysv|'`
- ;;
- -sinix*)
- os=-sysv4
- ;;
- -tpf*)
- os=-tpf
- ;;
- -triton*)
- os=-sysv3
- ;;
- -oss*)
- os=-sysv3
- ;;
- -svr4)
- os=-sysv4
- ;;
- -svr3)
- os=-sysv3
- ;;
- -sysvr4)
- os=-sysv4
- ;;
- # This must come after -sysvr4.
- -sysv*)
- ;;
- -ose*)
- os=-ose
- ;;
- -es1800*)
- os=-ose
- ;;
- -xenix)
- os=-xenix
- ;;
- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
- os=-mint
- ;;
- -aros*)
- os=-aros
- ;;
- -kaos*)
- os=-kaos
- ;;
- -zvmoe)
- os=-zvmoe
- ;;
- -dicos*)
- os=-dicos
- ;;
- -none)
- ;;
- *)
- # Get rid of the `-' at the beginning of $os.
- os=`echo $os | sed 's/[^-]*-//'`
- echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
- exit 1
- ;;
-esac
-else
-
-# Here we handle the default operating systems that come with various machines.
-# The value should be what the vendor currently ships out the door with their
-# machine or put another way, the most popular os provided with the machine.
-
-# Note that if you're going to try to match "-MANUFACTURER" here (say,
-# "-sun"), then you have to tell the case statement up towards the top
-# that MANUFACTURER isn't an operating system. Otherwise, code above
-# will signal an error saying that MANUFACTURER isn't an operating
-# system, and we'll never get to this point.
-
-case $basic_machine in
- score-*)
- os=-elf
- ;;
- spu-*)
- os=-elf
- ;;
- *-acorn)
- os=-riscix1.2
- ;;
- arm*-rebel)
- os=-linux
- ;;
- arm*-semi)
- os=-aout
- ;;
- c4x-* | tic4x-*)
- os=-coff
- ;;
- # This must come before the *-dec entry.
- pdp10-*)
- os=-tops20
- ;;
- pdp11-*)
- os=-none
- ;;
- *-dec | vax-*)
- os=-ultrix4.2
- ;;
- m68*-apollo)
- os=-domain
- ;;
- i386-sun)
- os=-sunos4.0.2
- ;;
- m68000-sun)
- os=-sunos3
- # This also exists in the configure program, but was not the
- # default.
- # os=-sunos4
- ;;
- m68*-cisco)
- os=-aout
- ;;
- mep-*)
- os=-elf
- ;;
- mips*-cisco)
- os=-elf
- ;;
- mips*-*)
- os=-elf
- ;;
- or32-*)
- os=-coff
- ;;
- *-tti) # must be before sparc entry or we get the wrong os.
- os=-sysv3
- ;;
- sparc-* | *-sun)
- os=-sunos4.1.1
- ;;
- *-be)
- os=-beos
- ;;
- *-haiku)
- os=-haiku
- ;;
- *-ibm)
- os=-aix
- ;;
- *-knuth)
- os=-mmixware
- ;;
- *-wec)
- os=-proelf
- ;;
- *-winbond)
- os=-proelf
- ;;
- *-oki)
- os=-proelf
- ;;
- *-hp)
- os=-hpux
- ;;
- *-hitachi)
- os=-hiux
- ;;
- i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
- os=-sysv
- ;;
- *-cbm)
- os=-amigaos
- ;;
- *-dg)
- os=-dgux
- ;;
- *-dolphin)
- os=-sysv3
- ;;
- m68k-ccur)
- os=-rtu
- ;;
- m88k-omron*)
- os=-luna
- ;;
- *-next )
- os=-nextstep
- ;;
- *-sequent)
- os=-ptx
- ;;
- *-crds)
- os=-unos
- ;;
- *-ns)
- os=-genix
- ;;
- i370-*)
- os=-mvs
- ;;
- *-next)
- os=-nextstep3
- ;;
- *-gould)
- os=-sysv
- ;;
- *-highlevel)
- os=-bsd
- ;;
- *-encore)
- os=-bsd
- ;;
- *-sgi)
- os=-irix
- ;;
- *-siemens)
- os=-sysv4
- ;;
- *-masscomp)
- os=-rtu
- ;;
- f30[01]-fujitsu | f700-fujitsu)
- os=-uxpv
- ;;
- *-rom68k)
- os=-coff
- ;;
- *-*bug)
- os=-coff
- ;;
- *-apple)
- os=-macos
- ;;
- *-atari*)
- os=-mint
- ;;
- *)
- os=-none
- ;;
-esac
-fi
-
-# Here we handle the case where we know the os, and the CPU type, but not the
-# manufacturer. We pick the logical manufacturer.
-vendor=unknown
-case $basic_machine in
- *-unknown)
- case $os in
- -riscix*)
- vendor=acorn
- ;;
- -sunos*)
- vendor=sun
- ;;
- -aix*)
- vendor=ibm
- ;;
- -beos*)
- vendor=be
- ;;
- -hpux*)
- vendor=hp
- ;;
- -mpeix*)
- vendor=hp
- ;;
- -hiux*)
- vendor=hitachi
- ;;
- -unos*)
- vendor=crds
- ;;
- -dgux*)
- vendor=dg
- ;;
- -luna*)
- vendor=omron
- ;;
- -genix*)
- vendor=ns
- ;;
- -mvs* | -opened*)
- vendor=ibm
- ;;
- -os400*)
- vendor=ibm
- ;;
- -ptx*)
- vendor=sequent
- ;;
- -tpf*)
- vendor=ibm
- ;;
- -vxsim* | -vxworks* | -windiss*)
- vendor=wrs
- ;;
- -aux*)
- vendor=apple
- ;;
- -hms*)
- vendor=hitachi
- ;;
- -mpw* | -macos*)
- vendor=apple
- ;;
- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
- vendor=atari
- ;;
- -vos*)
- vendor=stratus
- ;;
- esac
- basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
- ;;
-esac
-
-echo $basic_machine$os
-exit
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "timestamp='"
-# time-stamp-format: "%:y-%02m-%02d"
-# time-stamp-end: "'"
-# End:
diff --git a/lib/talloc/configure b/lib/talloc/configure
new file mode 100755
index 0000000000..6a9f875511
--- /dev/null
+++ b/lib/talloc/configure
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+PREVPATH=`dirname $0`
+
+if [ -f $PREVPATH/../../buildtools/bin/waf ]; then
+ WAF=../../buildtools/bin/waf
+elif [ -f $PREVPATH/buildtools/bin/waf ]; then
+ WAF=./buildtools/bin/waf
+else
+ echo "replace: Unable to find waf"
+ exit 1
+fi
+
+# using JOBS=1 gives maximum compatibility with
+# systems like AIX which have broken threading in python
+JOBS=1
+export JOBS
+
+cd . || exit 1
+$WAF configure "$@" || exit 1
+cd $PREVPATH
diff --git a/lib/talloc/configure.ac b/lib/talloc/configure.ac
deleted file mode 100644
index c1b1d2e4a1..0000000000
--- a/lib/talloc/configure.ac
+++ /dev/null
@@ -1,49 +0,0 @@
-AC_PREREQ(2.50)
-AC_INIT(talloc, 2.0.1)
-AC_CONFIG_SRCDIR([talloc.c])
-AC_SUBST(datarootdir)
-AC_CONFIG_HEADER(config.h)
-
-TALLOC_VERSION=${PACKAGE_VERSION}
-TALLOC_VERSION_MAJOR=`echo ${PACKAGE_VERSION} | cut -d '.' -f1`
-TALLOC_VERSION_MINOR=`echo ${PACKAGE_VERSION} | cut -d '.' -f2`
-TALLOC_VERSION_RELEASE=`echo ${PACKAGE_VERSION} | cut -d '.' -f3`
-
-AC_SUBST(TALLOC_VERSION)
-AC_SUBST(TALLOC_VERSION_MAJOR)
-AC_SUBST(TALLOC_VERSION_MINOR)
-AC_SUBST(TALLOC_VERSION_RELEASE)
-
-AC_DEFINE_UNQUOTED(TALLOC_BUILD_VERSION_MAJOR,
- [${TALLOC_VERSION_MAJOR}],
- [talloc major version])
-AC_DEFINE_UNQUOTED(TALLOC_BUILD_VERSION_MINOR,
- [${TALLOC_VERSION_MINOR}],
- [talloc minor version])
-AC_DEFINE_UNQUOTED(TALLOC_BUILD_VERSION_RELEASE,
- [${TALLOC_VERSION_RELEASE}],
- [talloc release version])
-
-AC_LIBREPLACE_ALL_CHECKS
-
-AC_LD_PICFLAG
-AC_LD_SHLIBEXT
-AC_LD_SONAMEFLAG
-AC_LD_VERSIONSCRIPT
-AC_LIBREPLACE_SHLD
-AC_LIBREPLACE_SHLD_FLAGS
-
-m4_include(libtalloc.m4)
-m4_include(compat/talloc_compat1.m4)
-
-AC_PATH_PROG(XSLTPROC,xsltproc)
-DOC_TARGET=""
-if test -n "$XSLTPROC"; then
- DOC_TARGET=doc
-fi
-AC_SUBST(DOC_TARGET)
-
-m4_include(build_macros.m4)
-BUILD_WITH_SHARED_BUILD_DIR
-
-AC_OUTPUT(Makefile talloc.pc)
diff --git a/lib/talloc/doc/mainpage.dox b/lib/talloc/doc/mainpage.dox
new file mode 100644
index 0000000000..3204e8a5c2
--- /dev/null
+++ b/lib/talloc/doc/mainpage.dox
@@ -0,0 +1,105 @@
+/**
+ * @mainpage
+ *
+ * talloc is a hierarchical, reference counted memory pool system with
+ * destructors. It is the core memory allocator used in Samba.
+ *
+ * @section talloc_download Download
+ *
+ * You can download the latest releases of talloc from the
+ * <a href="http://samba.org/ftp/talloc" target="_blank">talloc directory</a>
+ * on the samba public source archive.
+ *
+ * @section talloc_bugs Discussion and bug reports
+ *
+ * talloc does not currently have its own mailing list or bug tracking system.
+ * For now, please use the
+ * <a href="https://lists.samba.org/mailman/listinfo/samba-technical" target="_blank">samba-technical</a>
+ * mailing list, and the
+ * <a href="http://bugzilla.samba.org/" target="_blank">Samba bugzilla</a>
+ * bug tracking system.
+ *
+ * @section talloc_devel Development
+ * You can download the latest code either via git or rsync.
+ *
+ * To fetch via git see the following guide:
+ *
+ * <a href="http://wiki.samba.org/index.php/Using_Git_for_Samba_Development" target="_blank">Using Git for Samba Development</a>
+ *
+ * Once you have cloned the tree switch to the master branch and cd into the
+ * lib/tevent directory.
+ *
+ * To fetch via rsync use this command:
+ *
+ * rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/talloc .
+ *
+ * @section talloc_preample Preamble
+ *
+ * talloc is a hierarchical, reference counted memory pool system with
+ * destructors.
+ *
+ * Perhaps the biggest difference from other memory pool systems is that there
+ * is no distinction between a "talloc context" and a "talloc pointer". Any
+ * pointer returned from talloc() is itself a valid talloc context. This means
+ * you can do this:
+ *
+ * @code
+ * struct foo *X = talloc(mem_ctx, struct foo);
+ * X->name = talloc_strdup(X, "foo");
+ * @endcode
+ *
+ * The pointer X->name would be a "child" of the talloc context "X" which is
+ * itself a child of mem_ctx. So if you do talloc_free(mem_ctx) then it is all
+ * destroyed, whereas if you do talloc_free(X) then just X and X->name are
+ * destroyed, and if you do talloc_free(X->name) then just the name element of
+ * X is destroyed.
+ *
+ * If you think about this, then what this effectively gives you is an n-ary
+ * tree, where you can free any part of the tree with talloc_free().
+ *
+ * If you find this confusing, then run the testsuite to watch talloc in
+ * action. You may also like to add your own tests to testsuite.c to clarify
+ * how some particular situation is handled.
+ *
+ * @section talloc_performance Performance
+ *
+ * All the additional features of talloc() over malloc() do come at a price. We
+ * have a simple performance test in Samba4 that measures talloc() versus
+ * malloc() performance, and it seems that talloc() is about 4% slower than
+ * malloc() on my x86 Debian Linux box. For Samba, the great reduction in code
+ * complexity that we get by using talloc makes this worthwhile, especially as
+ * the total overhead of talloc/malloc in Samba is already quite small.
+ *
+ * @section talloc_named Named blocks
+ *
+ * Every talloc chunk has a name that can be used as a dynamic type-checking
+ * system. If for some reason like a callback function you had to cast a
+ * "struct foo *" to a "void *" variable, later you can safely reassign the
+ * "void *" pointer to a "struct foo *" by using the talloc_get_type() or
+ * talloc_get_type_abort() macros.
+ *
+ * @code
+ * struct foo *X = talloc_get_type_abort(ptr, struct foo);
+ * @endcode
+ *
+ * This will abort if "ptr" does not contain a pointer that has been created
+ * with talloc(mem_ctx, struct foo).
+ *
+ * @section talloc_threading Multi-threading
+ *
+ * talloc itself does not deal with threads. It is thread-safe (assuming the
+ * underlying "malloc" is), as long as each thread uses different memory
+ * contexts.
+ *
+ * If two threads uses the same context then they need to synchronize in order
+ * to be safe. In particular:
+ *
+ * - when using talloc_enable_leak_report(), giving directly NULL as a parent
+ * context implicitly refers to a hidden "null context" global variable, so
+ * this should not be used in a multi-threaded environment without proper
+ * synchronization.
+ * - the context returned by talloc_autofree_context() is also global so
+ * shouldn't be used by several threads simultaneously without
+ * synchronization.
+ *
+ */
diff --git a/lib/talloc/doxy.config b/lib/talloc/doxy.config
new file mode 100644
index 0000000000..5e3a3197ba
--- /dev/null
+++ b/lib/talloc/doxy.config
@@ -0,0 +1,1538 @@
+# Doxyfile 1.6.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = talloc
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER = 2.0
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = doc
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF = "The $name class" \
+ "The $name widget" \
+ "The $name file" \
+ is \
+ provides \
+ specifies \
+ contains \
+ represents \
+ a \
+ an \
+ the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = YES
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it parses.
+# With this tag you can assign which parser to use for a given extension.
+# Doxygen has a built-in mapping, but you can override or extend it using this tag.
+# The format is ext=language, where ext is a file extension, and language is one of
+# the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP,
+# Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat
+# .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran),
+# use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen to replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penality.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will rougly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = NO
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespace are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = YES
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = YES
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the (brief and detailed) documentation of class members so that constructors and destructors are listed first. If set to NO (the default) the constructors will appear in the respective orders defined by SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by
+# doxygen. The layout file controls the global structure of the generated output files
+# in an output format independent way. The create the layout file that represents
+# doxygen's defaults, run doxygen with the -l option. You can optionally specify a
+# file name after the option, if omitted DoxygenLayout.xml will be used as the name
+# of the layout file.
+
+LAYOUT_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = . doc
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS = *.cpp \
+ *.cc \
+ *.c \
+ *.h \
+ *.hh \
+ *.hpp \
+ *.dox
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS = */.git/* \
+ */.svn/* \
+ */cmake/* \
+ */build/*
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# If the HTML_FOOTER_DESCRIPTION tag is set to YES, Doxygen will
+# add generated date, project name and doxygen version to HTML footer.
+
+HTML_FOOTER_DESCRIPTION= NO
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER
+# are set, an additional index file will be generated that can be used as input for
+# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated
+# HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE =
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add.
+# For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NONE
+
+# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list.
+
+USE_INLINE_TREES = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# When the SEARCHENGINE tag is enable doxygen will generate a search box for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using HTML help (GENERATE_HTMLHELP) or Qt help (GENERATE_QHP)
+# there is already a search function so this one should typically
+# be disabled.
+
+SEARCHENGINE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = YES
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = YES
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED = DOXYGEN PRINTF_ATTRIBUTE(x,y)=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# By default doxygen will write a font called FreeSans.ttf to the output
+# directory and reference it in all dot files that doxygen generates. This
+# font does not include all possible unicode characters however, so when you need
+# these (or just want a differently looking font) you can specify the font name
+# using DOT_FONTNAME. You need need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = YES
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/lib/talloc/install-sh b/lib/talloc/install-sh
deleted file mode 100755
index 58719246f0..0000000000
--- a/lib/talloc/install-sh
+++ /dev/null
@@ -1,238 +0,0 @@
-#! /bin/sh
-#
-# install - install a program, script, or datafile
-# This comes from X11R5.
-#
-# Calling this script install-sh is preferred over install.sh, to prevent
-# `make' implicit rules from creating a file called install from it
-# when there is no Makefile.
-#
-# This script is compatible with the BSD install script, but was written
-# from scratch.
-#
-
-
-# set DOITPROG to echo to test this script
-
-# Don't use :- since 4.3BSD and earlier shells don't like it.
-doit="${DOITPROG-}"
-
-
-# put in absolute paths if you don't have them in your path; or use env. vars.
-
-mvprog="${MVPROG-mv}"
-cpprog="${CPPROG-cp}"
-chmodprog="${CHMODPROG-chmod}"
-chownprog="${CHOWNPROG-chown}"
-chgrpprog="${CHGRPPROG-chgrp}"
-stripprog="${STRIPPROG-strip}"
-rmprog="${RMPROG-rm}"
-mkdirprog="${MKDIRPROG-mkdir}"
-
-transformbasename=""
-transform_arg=""
-instcmd="$mvprog"
-chmodcmd="$chmodprog 0755"
-chowncmd=""
-chgrpcmd=""
-stripcmd=""
-rmcmd="$rmprog -f"
-mvcmd="$mvprog"
-src=""
-dst=""
-dir_arg=""
-
-while [ x"$1" != x ]; do
- case $1 in
- -c) instcmd="$cpprog"
- shift
- continue;;
-
- -d) dir_arg=true
- shift
- continue;;
-
- -m) chmodcmd="$chmodprog $2"
- shift
- shift
- continue;;
-
- -o) chowncmd="$chownprog $2"
- shift
- shift
- continue;;
-
- -g) chgrpcmd="$chgrpprog $2"
- shift
- shift
- continue;;
-
- -s) stripcmd="$stripprog"
- shift
- continue;;
-
- -t=*) transformarg=`echo $1 | sed 's/-t=//'`
- shift
- continue;;
-
- -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
- shift
- continue;;
-
- *) if [ x"$src" = x ]
- then
- src=$1
- else
- # this colon is to work around a 386BSD /bin/sh bug
- :
- dst=$1
- fi
- shift
- continue;;
- esac
-done
-
-if [ x"$src" = x ]
-then
- echo "install: no input file specified"
- exit 1
-else
- true
-fi
-
-if [ x"$dir_arg" != x ]; then
- dst=$src
- src=""
-
- if [ -d $dst ]; then
- instcmd=:
- else
- instcmd=mkdir
- fi
-else
-
-# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
-# might cause directories to be created, which would be especially bad
-# if $src (and thus $dsttmp) contains '*'.
-
- if [ -f $src -o -d $src ]
- then
- true
- else
- echo "install: $src does not exist"
- exit 1
- fi
-
- if [ x"$dst" = x ]
- then
- echo "install: no destination specified"
- exit 1
- else
- true
- fi
-
-# If destination is a directory, append the input filename; if your system
-# does not like double slashes in filenames, you may need to add some logic
-
- if [ -d $dst ]
- then
- dst="$dst"/`basename $src`
- else
- true
- fi
-fi
-
-## this sed command emulates the dirname command
-dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
-
-# Make sure that the destination directory exists.
-# this part is taken from Noah Friedman's mkinstalldirs script
-
-# Skip lots of stat calls in the usual case.
-if [ ! -d "$dstdir" ]; then
-defaultIFS='
-'
-IFS="${IFS-${defaultIFS}}"
-
-oIFS="${IFS}"
-# Some sh's can't handle IFS=/ for some reason.
-IFS='%'
-set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
-IFS="${oIFS}"
-
-pathcomp=''
-
-while [ $# -ne 0 ] ; do
- pathcomp="${pathcomp}${1}"
- shift
-
- if [ ! -d "${pathcomp}" ] ;
- then
- $mkdirprog "${pathcomp}"
- else
- true
- fi
-
- pathcomp="${pathcomp}/"
-done
-fi
-
-if [ x"$dir_arg" != x ]
-then
- $doit $instcmd $dst &&
-
- if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
- if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
- if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
- if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
-else
-
-# If we're going to rename the final executable, determine the name now.
-
- if [ x"$transformarg" = x ]
- then
- dstfile=`basename $dst`
- else
- dstfile=`basename $dst $transformbasename |
- sed $transformarg`$transformbasename
- fi
-
-# don't allow the sed command to completely eliminate the filename
-
- if [ x"$dstfile" = x ]
- then
- dstfile=`basename $dst`
- else
- true
- fi
-
-# Make a temp file name in the proper directory.
-
- dsttmp=$dstdir/#inst.$$#
-
-# Move or copy the file name to the temp name
-
- $doit $instcmd $src $dsttmp &&
-
- trap "rm -f ${dsttmp}" 0 &&
-
-# and set any options; do chmod last to preserve setuid bits
-
-# If any of these fail, we abort the whole thing. If we want to
-# ignore errors from any of these, just make sure not to ignore
-# errors from the above "$doit $instcmd $src $dsttmp" command.
-
- if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
- if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
- if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
- if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
-
-# Now rename the file to the real destination.
-
- $doit $rmcmd -f $dstdir/$dstfile &&
- $doit $mvcmd $dsttmp $dstdir/$dstfile
-
-fi &&
-
-
-exit 0
diff --git a/lib/talloc/pytalloc-util.pc.in b/lib/talloc/pytalloc-util.pc.in
new file mode 100644
index 0000000000..bc704b4929
--- /dev/null
+++ b/lib/talloc/pytalloc-util.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: pytalloc-util
+Description: Utility functions for using talloc objects with Python
+Version: @TALLOC_VERSION@
+Libs: -L${libdir} -lpytalloc-util
+Cflags: @LIB_RPATH@ -I${includedir}
+URL: http://talloc.samba.org/
diff --git a/lib/talloc/pytalloc.c b/lib/talloc/pytalloc.c
index c6decf33f1..614b81f057 100644
--- a/lib/talloc/pytalloc.c
+++ b/lib/talloc/pytalloc.c
@@ -1,79 +1,85 @@
/*
Unix SMB/CIFS implementation.
- Python/Talloc glue
- Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008
-
+ Python Talloc Module
+ Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2010
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "replace.h"
+#include <Python.h>
#include <talloc.h>
#include <pytalloc.h>
-/**
- * Simple dealloc for talloc-wrapping PyObjects
- */
-void py_talloc_dealloc(PyObject* self)
-{
- py_talloc_Object *obj = (py_talloc_Object *)self;
- talloc_free(obj->talloc_ctx);
- obj->talloc_ctx = NULL;
- self->ob_type->tp_free(self);
-}
+void inittalloc(void);
-/**
- * Import an existing talloc pointer into a Python object.
- */
-PyObject *py_talloc_steal_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx,
- void *ptr)
+/* print a talloc tree report for a talloc python object */
+static PyObject *py_talloc_report_full(PyObject *self, PyObject *args)
{
- py_talloc_Object *ret = (py_talloc_Object *)py_type->tp_alloc(py_type, 0);
- ret->talloc_ctx = talloc_new(NULL);
- if (ret->talloc_ctx == NULL) {
- return NULL;
- }
- if (talloc_steal(ret->talloc_ctx, mem_ctx) == NULL) {
+ PyObject *py_obj = Py_None;
+ PyTypeObject *type;
+
+ if (!PyArg_ParseTuple(args, "|O", &py_obj))
return NULL;
+
+ if (py_obj == Py_None) {
+ talloc_report_full(NULL, stdout);
+ } else {
+ type = (PyTypeObject*)PyObject_Type(py_obj);
+ talloc_report_full(py_talloc_get_mem_ctx(py_obj), stdout);
}
- ret->ptr = ptr;
- return (PyObject *)ret;
+ return Py_None;
}
+/* enable null tracking */
+static PyObject *py_talloc_enable_null_tracking(PyObject *self)
+{
+ talloc_enable_null_tracking();
+ return Py_None;
+}
-/**
- * Import an existing talloc pointer into a Python object, leaving the
- * original parent, and creating a reference to the object in the python
- * object
- */
-PyObject *py_talloc_reference_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr)
+/* return the number of talloc blocks */
+static PyObject *py_talloc_total_blocks(PyObject *self, PyObject *args)
{
- py_talloc_Object *ret = (py_talloc_Object *)py_type->tp_alloc(py_type, 0);
- ret->talloc_ctx = talloc_new(NULL);
- if (ret->talloc_ctx == NULL) {
- return NULL;
- }
- if (talloc_reference(ret->talloc_ctx, mem_ctx) == NULL) {
+ PyObject *py_obj = Py_None;
+ PyTypeObject *type;
+
+ if (!PyArg_ParseTuple(args, "|O", &py_obj))
return NULL;
+
+ if (py_obj == Py_None) {
+ return PyLong_FromLong(talloc_total_blocks(NULL));
}
- ret->ptr = ptr;
- return (PyObject *)ret;
+
+ type = (PyTypeObject*)PyObject_Type(py_obj);
+
+ return PyLong_FromLong(talloc_total_blocks(py_talloc_get_mem_ctx(py_obj)));
}
+static PyMethodDef talloc_methods[] = {
+ { "report_full", (PyCFunction)py_talloc_report_full, METH_VARARGS,
+ "show a talloc tree for an object"},
+ { "enable_null_tracking", (PyCFunction)py_talloc_enable_null_tracking, METH_NOARGS,
+ "enable tracking of the NULL object"},
+ { "total_blocks", (PyCFunction)py_talloc_total_blocks, METH_VARARGS,
+ "return talloc block count"},
+ { NULL }
+};
+
/**
- * Default (but slightly more useful than the default) implementation of Repr().
+ * Default (but only slightly more useful than the default) implementation of Repr().
*/
-PyObject *py_talloc_default_repr(PyObject *obj)
+static PyObject *py_talloc_default_repr(PyObject *obj)
{
py_talloc_Object *talloc_obj = (py_talloc_Object *)obj;
PyTypeObject *type = (PyTypeObject*)PyObject_Type(obj);
@@ -82,12 +88,52 @@ PyObject *py_talloc_default_repr(PyObject *obj)
type->tp_name, talloc_obj->ptr);
}
-static void py_cobject_talloc_free(void *ptr)
+/**
+ * Simple dealloc for talloc-wrapping PyObjects
+ */
+static void py_talloc_dealloc(PyObject* self)
+{
+ py_talloc_Object *obj = (py_talloc_Object *)self;
+ assert(talloc_unlink(NULL, obj->talloc_ctx) != -1);
+ obj->talloc_ctx = NULL;
+ self->ob_type->tp_free(self);
+}
+
+/**
+ * Default (but only slightly more useful than the default) implementation of cmp.
+ */
+static int py_talloc_default_cmp(PyObject *_obj1, PyObject *_obj2)
{
- talloc_free(ptr);
+ py_talloc_Object *obj1 = (py_talloc_Object *)_obj1,
+ *obj2 = (py_talloc_Object *)_obj2;
+ if (obj1->ob_type != obj2->ob_type)
+ return (obj1->ob_type - obj2->ob_type);
+
+ return ((char *)py_talloc_get_ptr(obj1) - (char *)py_talloc_get_ptr(obj2));
}
-PyObject *PyCObject_FromTallocPtr(void *ptr)
+static PyTypeObject TallocObject_Type = {
+ .tp_name = "talloc.Object",
+ .tp_doc = "Python wrapper for a talloc-maintained object.",
+ .tp_basicsize = sizeof(py_talloc_Object),
+ .tp_dealloc = (destructor)py_talloc_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
+ .tp_repr = py_talloc_default_repr,
+ .tp_compare = py_talloc_default_cmp,
+};
+
+void inittalloc(void)
{
- return PyCObject_FromVoidPtr(ptr, py_cobject_talloc_free);
+ PyObject *m;
+
+ if (PyType_Ready(&TallocObject_Type) < 0)
+ return;
+
+ m = Py_InitModule3("talloc", talloc_methods,
+ "Python wrapping of talloc-maintained objects.");
+ if (m == NULL)
+ return;
+
+ Py_INCREF(&TallocObject_Type);
+ PyModule_AddObject(m, "Object", (PyObject *)&TallocObject_Type);
}
diff --git a/lib/talloc/pytalloc.h b/lib/talloc/pytalloc.h
index 9b6587261c..bfd9c2e017 100644
--- a/lib/talloc/pytalloc.h
+++ b/lib/talloc/pytalloc.h
@@ -29,8 +29,8 @@ typedef struct {
void *ptr;
} py_talloc_Object;
-/* Deallocate a py_talloc_Object */
-void py_talloc_dealloc(PyObject* self);
+PyTypeObject *PyTalloc_GetObjectType(void);
+int PyTalloc_Check(PyObject *);
/* Retrieve the pointer for a py_talloc_object. Like talloc_get_type()
* but for py_talloc_Objects. */
@@ -43,15 +43,14 @@ void py_talloc_dealloc(PyObject* self);
#define py_talloc_get_mem_ctx(py_obj) ((py_talloc_Object *)py_obj)->talloc_ctx
PyObject *py_talloc_steal_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr);
+PyObject *py_talloc_steal(PyTypeObject *py_type, void *ptr);
PyObject *py_talloc_reference_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr);
-#define py_talloc_steal(py_type, talloc_ptr) py_talloc_steal_ex(py_type, talloc_ptr, talloc_ptr)
#define py_talloc_reference(py_type, talloc_ptr) py_talloc_reference_ex(py_type, talloc_ptr, talloc_ptr)
-/* Sane default implementation of reprfunc. */
-PyObject *py_talloc_default_repr(PyObject *py_obj);
-
#define py_talloc_new(type, typeobj) py_talloc_steal(typeobj, talloc_zero(NULL, type))
PyObject *PyCObject_FromTallocPtr(void *);
+PyObject *PyString_FromString_check_null(const char *ptr);
+
#endif /* _PY_TALLOC_H_ */
diff --git a/lib/talloc/pytalloc_util.c b/lib/talloc/pytalloc_util.c
new file mode 100644
index 0000000000..c8a7e6ac58
--- /dev/null
+++ b/lib/talloc/pytalloc_util.c
@@ -0,0 +1,118 @@
+/*
+ Unix SMB/CIFS implementation.
+ Python/Talloc glue
+ Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <Python.h>
+#include "replace.h"
+#include <talloc.h>
+#include "pytalloc.h"
+#include <assert.h>
+
+_PUBLIC_ PyTypeObject *PyTalloc_GetObjectType(void)
+{
+ static PyTypeObject *type = NULL;
+ PyObject *mod;
+
+ if (type != NULL) {
+ return type;
+ }
+
+ mod = PyImport_ImportModule("talloc");
+ if (mod == NULL) {
+ return NULL;
+ }
+
+ type = (PyTypeObject *)PyObject_GetAttrString(mod, "Object");
+ Py_DECREF(mod);
+
+ return type;
+}
+
+/**
+ * Import an existing talloc pointer into a Python object.
+ */
+_PUBLIC_ PyObject *py_talloc_steal_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx,
+ void *ptr)
+{
+ py_talloc_Object *ret = (py_talloc_Object *)py_type->tp_alloc(py_type, 0);
+ ret->talloc_ctx = talloc_new(NULL);
+ if (ret->talloc_ctx == NULL) {
+ return NULL;
+ }
+ if (talloc_steal(ret->talloc_ctx, mem_ctx) == NULL) {
+ return NULL;
+ }
+ talloc_set_name_const(ret->talloc_ctx, py_type->tp_name);
+ ret->ptr = ptr;
+ return (PyObject *)ret;
+}
+
+/**
+ * Import an existing talloc pointer into a Python object.
+ */
+_PUBLIC_ PyObject *py_talloc_steal(PyTypeObject *py_type, void *ptr)
+{
+ return py_talloc_steal_ex(py_type, ptr, ptr);
+}
+
+
+/**
+ * Import an existing talloc pointer into a Python object, leaving the
+ * original parent, and creating a reference to the object in the python
+ * object
+ */
+_PUBLIC_ PyObject *py_talloc_reference_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr)
+{
+ py_talloc_Object *ret;
+
+ if (ptr == NULL) {
+ Py_RETURN_NONE;
+ }
+
+ ret = (py_talloc_Object *)py_type->tp_alloc(py_type, 0);
+ ret->talloc_ctx = talloc_new(NULL);
+ if (ret->talloc_ctx == NULL) {
+ return NULL;
+ }
+ if (talloc_reference(ret->talloc_ctx, mem_ctx) == NULL) {
+ return NULL;
+ }
+ talloc_set_name_const(ret->talloc_ctx, py_type->tp_name);
+ ret->ptr = ptr;
+ return (PyObject *)ret;
+}
+
+static void py_cobject_talloc_free(void *ptr)
+{
+ talloc_free(ptr);
+}
+
+_PUBLIC_ PyObject *PyCObject_FromTallocPtr(void *ptr)
+{
+ if (ptr == NULL) {
+ Py_RETURN_NONE;
+ }
+ return PyCObject_FromVoidPtr(ptr, py_cobject_talloc_free);
+}
+
+_PUBLIC_ int PyTalloc_Check(PyObject *obj)
+{
+ PyTypeObject *tp = PyTalloc_GetObjectType();
+
+ return PyObject_TypeCheck(obj, tp);
+}
diff --git a/lib/talloc/rules.mk b/lib/talloc/rules.mk
deleted file mode 100644
index 00c909ee2b..0000000000
--- a/lib/talloc/rules.mk
+++ /dev/null
@@ -1,18 +0,0 @@
-.SUFFIXES: .c .o .3 .3.xml .xml .html
-
-showflags::
- @echo 'talloc will be compiled with flags:'
- @echo ' CFLAGS = $(CFLAGS)'
- @echo ' LIBS = $(LIBS)'
-
-.c.o:
- $(CC) $(PICFLAG) $(ABI_CHECK) -o $@ -c $< $(CFLAGS)
-
-.3.xml.3:
- -test -z "$(XSLTPROC)" || $(XSLTPROC) -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $<
-
-.xml.html:
- -test -z "$(XSLTPROC)" || $(XSLTPROC) -o $@ http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl $<
-
-distclean::
- rm -f *~ */*~
diff --git a/lib/talloc/script/mksigs.pl b/lib/talloc/script/mksigs.pl
index 755cd79603..dfe36bc138 100755
--- a/lib/talloc/script/mksigs.pl
+++ b/lib/talloc/script/mksigs.pl
@@ -27,6 +27,7 @@ use strict;
use warnings;
my $in_comment = 0;
+my $in_doxygen = 0;
my $extern_C_block = 0;
while (my $LINE = <>) {
@@ -41,6 +42,14 @@ while (my $LINE = <>) {
}
}
+ # find end of DOXYGEN section
+ if ($in_doxygen) {
+ if ($LINE =~ /^#\s*else(?:\s+.*)?$/) {
+ $in_doxygen = 0;
+ }
+ next;
+ }
+
# strip C++-style comments
$LINE =~ s/^(.*?)\/\/.*$/$1/;
@@ -69,6 +78,13 @@ while (my $LINE = <>) {
$LINE .= " " . $LINE2;
}
+ # remove DOXYGEN sections
+ if ($LINE =~ /^#\s*ifdef\s+DOXYGEN(?:\s+.*)?$/) {
+ $in_doxygen = 1;
+ next;
+ }
+
+
# remove all preprocessor directives
next if ($LINE =~ /^#/);
diff --git a/lib/talloc/script/mksyms.awk b/lib/talloc/script/mksyms.awk
index 8775faff3f..83497a7a06 100644
--- a/lib/talloc/script/mksyms.awk
+++ b/lib/talloc/script/mksyms.awk
@@ -8,6 +8,7 @@
#
BEGIN {
inheader=0;
+ indoxygen=0;
}
END {
@@ -20,9 +21,15 @@ END {
}
next;
}
+ if (indoxygen) {
+ if (match($0,"^#[ \t]*else[ \t]*.*$")) {
+ indoxygen = 0;
+ }
+ next;
+ }
}
-/^static/ || /^[ \t]*typedef/ || !/^[a-zA-Z\_]/ {
+/^static/ || /^[ \t]*typedef/ || !/^[a-zA-Z\_\#]/ {
next;
}
@@ -33,11 +40,16 @@ END {
next;
}
+/^#[ \t]*ifdef[ \t]*DOXYGEN[ \t]*.*$/ {
+ indoxygen=1;
+ next;
+}
+
# look for function headers:
{
gotstart = 0;
if ($0 ~ /^[A-Za-z_][A-Za-z0-9_]+/) {
- gotstart = 1;
+ gotstart = 1;
}
if(!gotstart) {
next;
diff --git a/lib/talloc/script/release-script.sh b/lib/talloc/script/release-script.sh
index fd5c1eff5d..4b8aac7d3c 100755
--- a/lib/talloc/script/release-script.sh
+++ b/lib/talloc/script/release-script.sh
@@ -1,58 +1,68 @@
#!/bin/bash
+LNAME=talloc
+LINCLUDE=talloc.h
+
if [ "$1" = "" ]; then
echo "Please provide version string, eg: 1.2.0"
exit 1
fi
-if [ ! -d "lib/talloc" ]; then
+if [ ! -d "lib/${LNAME}" ]; then
echo "Run this script from the samba base directory."
exit 1
fi
-# Check exports and signatures are up to date
-pushd lib/talloc
-./script/abi_checks.sh talloc talloc.h
-abicheck=$?
-popd
-if [ ! "$abicheck" = "0" ]; then
- echo "ERROR: ABI Checks produced warnings!"
- exit 1
-fi
-
-git clean -f -x -d lib/talloc
-git clean -f -x -d lib/replace
-
curbranch=`git branch |grep "^*" | tr -d "* "`
version=$1
strver=`echo ${version} | tr "." "-"`
# Checkout the release tag
-git branch -f talloc-release-script-${strver} talloc-${strver}
+git branch -f ${LNAME}-release-script-${strver} ${LNAME}-${strver}
if [ ! "$?" = "0" ]; then
- echo "Unable to checkout talloc-${strver} release"
+ echo "Unable to checkout ${LNAME}-${strver} release"
exit 1
fi
-git checkout talloc-release-script-${strver}
+function cleanquit {
+ #Clean up
+ git checkout $curbranch
+ git branch -d ${LNAME}-release-script-${strver}
+ exit $1
+}
+
+# NOTE: use cleanquit after this point
+git checkout ${LNAME}-release-script-${strver}
# Test configure agrees with us
-confver=`grep "^AC_INIT" lib/talloc/configure.ac | tr -d "AC_INIT(talloc, " | tr -d ")"`
+confver=`grep "^AC_INIT" lib/${LNAME}/configure.ac | tr -d "AC_INIT(${LNAME}, " | tr -d ")"`
if [ ! "$confver" = "$version" ]; then
echo "Wrong version, requested release for ${version}, found ${confver}"
- exit 1
+ cleanquit 1
fi
+# Check exports and signatures are up to date
+pushd lib/${LNAME}
+./script/abi_checks.sh ${LNAME} ${LINCLUDE}
+abicheck=$?
+popd
+if [ ! "$abicheck" = "0" ]; then
+ echo "ERROR: ABI Checks produced warnings!"
+ cleanquit 1
+fi
+
+git clean -f -x -d lib/${LNAME}
+git clean -f -x -d lib/replace
+
# Now build tarball
-cp -a lib/talloc talloc-${version}
-cp -a lib/replace talloc-${version}/libreplace
-pushd talloc-${version}
+cp -a lib/${LNAME} ${LNAME}-${version}
+cp -a lib/replace ${LNAME}-${version}/libreplace
+pushd ${LNAME}-${version}
./autogen.sh
popd
-tar cvzf talloc-${version}.tar.gz talloc-${version}
-rm -fr talloc-${version}
+tar cvzf ${LNAME}-${version}.tar.gz ${LNAME}-${version}
+rm -fr ${LNAME}-${version}
+
+cleanquit 0
-#Clean up
-git checkout $curbranch
-git branch -d talloc-release-script-${strver}
diff --git a/lib/talloc/talloc.3.xml b/lib/talloc/talloc.3.xml
index 8d9e08226d..a327922dbe 100644
--- a/lib/talloc/talloc.3.xml
+++ b/lib/talloc/talloc.3.xml
@@ -10,7 +10,7 @@
<refpurpose>hierarchical reference counted memory pool system with destructors</refpurpose>
</refnamediv>
<refsynopsisdiv>
-<synopsis>#include &lt;talloc/talloc.h&gt;</synopsis>
+<synopsis>#include &lt;talloc.h&gt;</synopsis>
</refsynopsisdiv>
<refsect1><title>DESCRIPTION</title>
<para>
@@ -362,7 +362,7 @@ talloc_set_name_const(ptr, name);</programlisting>
<refsect2><title>void *talloc_new(void *<emphasis role="italic">ctx</emphasis>);</title>
<para>
This is a utility macro that creates a new memory context hanging
- off an exiting context, automatically naming it "talloc_new:
+ off an existing context, automatically naming it "talloc_new:
__location__" where __location__ is the source line it is called
from. It is particularly useful for creating a new temporary
working context.
@@ -645,17 +645,6 @@ if (ptr) memcpy(ptr, p, strlen(p)+1);</programlisting>
</para>
<programlisting>talloc_set_name_const(ptr, ptr)</programlisting>
</refsect2>
- <refsect2><title>char *talloc_append_string(const void *<emphasis role="italic">t</emphasis>, char *<emphasis role="italic">orig</emphasis>, const char *<emphasis role="italic">append</emphasis>);</title>
- <para>
- The talloc_append_string() function appends the given formatted
- string to the given string.
- </para>
- <para>
- This function sets the name of the new pointer to the new
- string. This is equivalent to:
- </para>
- <programlisting>talloc_set_name_const(ptr, ptr)</programlisting>
- </refsect2>
<refsect2><title>char *talloc_vasprintf(const void *<emphasis role="italic">t</emphasis>, const char *<emphasis role="italic">fmt</emphasis>, va_list <emphasis role="italic">ap</emphasis>);</title>
<para>
The talloc_vasprintf() function is the talloc equivalent of the C
@@ -689,7 +678,7 @@ if (ptr) memcpy(ptr, p, strlen(p)+1);</programlisting>
</para>
<programlisting>talloc_set_name_const(ptr, ptr)</programlisting>
</refsect2>
- <refsect2><title>(type *)talloc_array(const void *ctx, type, uint_t count);</title>
+ <refsect2><title>(type *)talloc_array(const void *ctx, type, unsigned int count);</title>
<para>
The talloc_array() macro is equivalent to:
</para>
@@ -699,14 +688,14 @@ if (ptr) memcpy(ptr, p, strlen(p)+1);</programlisting>
multiply, returning NULL if the multiply overflows.
</para>
</refsect2>
- <refsect2><title>void *talloc_array_size(const void *ctx, size_t size, uint_t count);</title>
+ <refsect2><title>void *talloc_array_size(const void *ctx, size_t size, unsigned int count);</title>
<para>
The talloc_array_size() function is useful when the type is not
known. It operates in the same way as talloc_array(), but takes a
size instead of a type.
</para>
</refsect2>
- <refsect2><title>(typeof(ptr)) talloc_array_ptrtype(const void *ctx, ptr, uint_t count);</title>
+ <refsect2><title>(typeof(ptr)) talloc_array_ptrtype(const void *ctx, ptr, unsigned int count);</title>
<para>
The talloc_ptrtype() macro should be used when you have a pointer to an array
and want to allocate memory of an array to point at with this pointer. When compiling
diff --git a/lib/talloc/talloc.c b/lib/talloc/talloc.c
index f7b1ac3dbd..4700aa99e8 100644
--- a/lib/talloc/talloc.c
+++ b/lib/talloc/talloc.c
@@ -45,6 +45,15 @@
#endif
#endif
+/* Special macros that are no-ops except when run under Valgrind on
+ * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
+#ifdef HAVE_VALGRIND_MEMCHECK_H
+ /* memcheck.h includes valgrind.h */
+#include <valgrind/memcheck.h>
+#elif defined(HAVE_VALGRIND_H)
+#include <valgrind.h>
+#endif
+
/* use this to force every realloc to change the pointer, to stress test
code that might not cope */
#define ALWAYS_REALLOC 0
@@ -104,6 +113,114 @@
static void *null_context;
static void *autofree_context;
+/* used to enable fill of memory on free, which can be useful for
+ * catching use after free errors when valgrind is too slow
+ */
+static struct {
+ bool initialised;
+ bool enabled;
+ uint8_t fill_value;
+} talloc_fill;
+
+#define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
+
+/*
+ * do not wipe the header, to allow the
+ * double-free logic to still work
+ */
+#define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
+ if (unlikely(talloc_fill.enabled)) { \
+ size_t _flen = (_tc)->size; \
+ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
+ memset(_fptr, talloc_fill.fill_value, _flen); \
+ } \
+} while (0)
+
+#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
+/* Mark the whole chunk as not accessable */
+#define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
+ size_t _flen = TC_HDR_SIZE + (_tc)->size; \
+ char *_fptr = (char *)(_tc); \
+ VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
+} while(0)
+#else
+#define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
+#endif
+
+#define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
+ TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
+ TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
+} while (0)
+
+#define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
+ if (unlikely(talloc_fill.enabled)) { \
+ size_t _flen = (_tc)->size - (_new_size); \
+ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
+ _fptr += (_new_size); \
+ memset(_fptr, talloc_fill.fill_value, _flen); \
+ } \
+} while (0)
+
+#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
+/* Mark the unused bytes not accessable */
+#define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
+ size_t _flen = (_tc)->size - (_new_size); \
+ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
+ _fptr += (_new_size); \
+ VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
+} while (0)
+#else
+#define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
+#endif
+
+#define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
+ TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
+ TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
+} while (0)
+
+#define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
+ if (unlikely(talloc_fill.enabled)) { \
+ size_t _flen = (_tc)->size - (_new_size); \
+ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
+ _fptr += (_new_size); \
+ memset(_fptr, talloc_fill.fill_value, _flen); \
+ } \
+} while (0)
+
+#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
+/* Mark the unused bytes as undefined */
+#define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
+ size_t _flen = (_tc)->size - (_new_size); \
+ char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
+ _fptr += (_new_size); \
+ VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
+} while (0)
+#else
+#define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
+#endif
+
+#define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
+ TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
+ TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
+} while (0)
+
+#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
+/* Mark the new bytes as undefined */
+#define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
+ size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
+ size_t _new_used = TC_HDR_SIZE + (_new_size); \
+ size_t _flen = _new_used - _old_used; \
+ char *_fptr = _old_used + (char *)(_tc); \
+ VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
+} while (0)
+#else
+#define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
+#endif
+
+#define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
+ TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
+} while (0)
+
struct talloc_reference_handle {
struct talloc_reference_handle *next, *prev;
void *ptr;
@@ -136,22 +253,23 @@ struct talloc_chunk {
};
/* 16 byte alignment seems to keep everyone happy */
-#define TC_HDR_SIZE ((sizeof(struct talloc_chunk)+15)&~15)
+#define TC_ALIGN16(s) (((s)+15)&~15)
+#define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
#define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
-int talloc_version_major(void)
+_PUBLIC_ int talloc_version_major(void)
{
return TALLOC_VERSION_MAJOR;
}
-int talloc_version_minor(void)
+_PUBLIC_ int talloc_version_minor(void)
{
return TALLOC_VERSION_MINOR;
}
static void (*talloc_log_fn)(const char *message);
-void talloc_set_log_fn(void (*log_fn)(const char *message))
+_PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
{
talloc_log_fn = log_fn;
}
@@ -179,14 +297,14 @@ static void talloc_log_stderr(const char *message)
fprintf(stderr, "%s", message);
}
-void talloc_set_log_stderr(void)
+_PUBLIC_ void talloc_set_log_stderr(void)
{
talloc_set_log_fn(talloc_log_stderr);
}
static void (*talloc_abort_fn)(const char *reason);
-void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
+_PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
{
talloc_abort_fn = abort_fn;
}
@@ -213,9 +331,9 @@ static void talloc_abort_magic(unsigned magic)
talloc_abort("Bad talloc magic value - wrong talloc version used/mixed");
}
-static void talloc_abort_double_free(void)
+static void talloc_abort_access_after_free(void)
{
- talloc_abort("Bad talloc magic value - double free");
+ talloc_abort("Bad talloc magic value - access after free");
}
static void talloc_abort_unknown_value(void)
@@ -235,8 +353,8 @@ static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
}
if (tc->flags & TALLOC_FLAG_FREE) {
- talloc_log("talloc: double free error - first free may be at %s\n", tc->name);
- talloc_abort_double_free();
+ talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
+ talloc_abort_access_after_free();
return NULL;
} else {
talloc_abort_unknown_value();
@@ -291,7 +409,7 @@ static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
return tc->parent;
}
-void *talloc_parent(const void *ptr)
+_PUBLIC_ void *talloc_parent(const void *ptr)
{
struct talloc_chunk *tc = talloc_parent_chunk(ptr);
return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
@@ -300,7 +418,7 @@ void *talloc_parent(const void *ptr)
/*
find parents name
*/
-const char *talloc_parent_name(const void *ptr)
+_PUBLIC_ const char *talloc_parent_name(const void *ptr)
{
struct talloc_chunk *tc = talloc_parent_chunk(ptr);
return tc? tc->name : NULL;
@@ -321,9 +439,47 @@ const char *talloc_parent_name(const void *ptr)
#define TALLOC_POOL_HDR_SIZE 16
+#define TC_POOL_SPACE_LEFT(_pool_tc) \
+ PTR_DIFF(TC_HDR_SIZE + (_pool_tc)->size + (char *)(_pool_tc), \
+ (_pool_tc)->pool)
+
+#define TC_POOL_FIRST_CHUNK(_pool_tc) \
+ ((void *)(TC_HDR_SIZE + TALLOC_POOL_HDR_SIZE + (char *)(_pool_tc)))
+
+#define TC_POOLMEM_CHUNK_SIZE(_tc) \
+ TC_ALIGN16(TC_HDR_SIZE + (_tc)->size)
+
+#define TC_POOLMEM_NEXT_CHUNK(_tc) \
+ ((void *)(TC_POOLMEM_CHUNK_SIZE(tc) + (char*)(_tc)))
+
+/* Mark the whole remaining pool as not accessable */
+#define TC_INVALIDATE_FILL_POOL(_pool_tc) do { \
+ if (unlikely(talloc_fill.enabled)) { \
+ size_t _flen = TC_POOL_SPACE_LEFT(_pool_tc); \
+ char *_fptr = (char *)(_pool_tc)->pool; \
+ memset(_fptr, talloc_fill.fill_value, _flen); \
+ } \
+} while(0)
+
+#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
+/* Mark the whole remaining pool as not accessable */
+#define TC_INVALIDATE_VALGRIND_POOL(_pool_tc) do { \
+ size_t _flen = TC_POOL_SPACE_LEFT(_pool_tc); \
+ char *_fptr = (char *)(_pool_tc)->pool; \
+ VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
+} while(0)
+#else
+#define TC_INVALIDATE_VALGRIND_POOL(_pool_tc) do { } while (0)
+#endif
+
+#define TC_INVALIDATE_POOL(_pool_tc) do { \
+ TC_INVALIDATE_FILL_POOL(_pool_tc); \
+ TC_INVALIDATE_VALGRIND_POOL(_pool_tc); \
+} while (0)
+
static unsigned int *talloc_pool_objectcount(struct talloc_chunk *tc)
{
- return (unsigned int *)((char *)tc + sizeof(struct talloc_chunk));
+ return (unsigned int *)((char *)tc + TC_HDR_SIZE);
}
/*
@@ -353,13 +509,12 @@ static struct talloc_chunk *talloc_alloc_pool(struct talloc_chunk *parent,
return NULL;
}
- space_left = ((char *)pool_ctx + TC_HDR_SIZE + pool_ctx->size)
- - ((char *)pool_ctx->pool);
+ space_left = TC_POOL_SPACE_LEFT(pool_ctx);
/*
* Align size to 16 bytes
*/
- chunk_size = ((size + 15) & ~15);
+ chunk_size = TC_ALIGN16(size);
if (space_left < chunk_size) {
return NULL;
@@ -438,7 +593,7 @@ static inline void *__talloc(const void *context, size_t size)
* Create a talloc pool
*/
-void *talloc_pool(const void *context, size_t size)
+_PUBLIC_ void *talloc_pool(const void *context, size_t size)
{
void *result = __talloc(context, size + TALLOC_POOL_HDR_SIZE);
struct talloc_chunk *tc;
@@ -450,13 +605,11 @@ void *talloc_pool(const void *context, size_t size)
tc = talloc_chunk_from_ptr(result);
tc->flags |= TALLOC_FLAG_POOL;
- tc->pool = (char *)result + TALLOC_POOL_HDR_SIZE;
+ tc->pool = TC_POOL_FIRST_CHUNK(tc);
*talloc_pool_objectcount(tc) = 1;
-#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
- VALGRIND_MAKE_MEM_NOACCESS(tc->pool, size);
-#endif
+ TC_INVALIDATE_POOL(tc);
return result;
}
@@ -467,7 +620,7 @@ void *talloc_pool(const void *context, size_t size)
if the destructor fails then the free is failed, and the memory can
be continued to be used
*/
-void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
+_PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
{
struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
tc->destructor = destructor;
@@ -476,7 +629,7 @@ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
/*
increase the reference count on a piece of memory.
*/
-int talloc_increase_ref_count(const void *ptr)
+_PUBLIC_ int talloc_increase_ref_count(const void *ptr)
{
if (unlikely(!talloc_reference(null_context, ptr))) {
return -1;
@@ -532,7 +685,7 @@ static inline void *_talloc_named_const(const void *context, size_t size, const
same underlying data, and you want to be able to free the two instances separately,
and in either order
*/
-void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
+_PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
{
struct talloc_chunk *tc;
struct talloc_reference_handle *handle;
@@ -556,6 +709,69 @@ void *_talloc_reference_loc(const void *context, const void *ptr, const char *lo
static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
+static inline void _talloc_free_poolmem(struct talloc_chunk *tc,
+ const char *location)
+{
+ struct talloc_chunk *pool;
+ void *next_tc;
+ unsigned int *pool_object_count;
+
+ pool = (struct talloc_chunk *)tc->pool;
+ next_tc = TC_POOLMEM_NEXT_CHUNK(tc);
+
+ tc->flags |= TALLOC_FLAG_FREE;
+
+ /* we mark the freed memory with where we called the free
+ * from. This means on a double free error we can report where
+ * the first free came from
+ */
+ tc->name = location;
+
+ TC_INVALIDATE_FULL_CHUNK(tc);
+
+ pool_object_count = talloc_pool_objectcount(pool);
+
+ if (unlikely(*pool_object_count == 0)) {
+ talloc_abort("Pool object count zero!");
+ return;
+ }
+
+ *pool_object_count -= 1;
+
+ if (unlikely(*pool_object_count == 1 && !(pool->flags & TALLOC_FLAG_FREE))) {
+ /*
+ * if there is just one object left in the pool
+ * and pool->flags does not have TALLOC_FLAG_FREE,
+ * it means this is the pool itself and
+ * the rest is available for new objects
+ * again.
+ */
+ pool->pool = TC_POOL_FIRST_CHUNK(pool);
+ TC_INVALIDATE_POOL(pool);
+ } else if (unlikely(*pool_object_count == 0)) {
+ /*
+ * we mark the freed memory with where we called the free
+ * from. This means on a double free error we can report where
+ * the first free came from
+ */
+ pool->name = location;
+
+ TC_INVALIDATE_FULL_CHUNK(pool);
+ free(pool);
+ } else if (pool->pool == next_tc) {
+ /*
+ * if pool->pool still points to end of
+ * 'tc' (which is stored in the 'next_tc' variable),
+ * we can reclaim the memory of 'tc'.
+ */
+ pool->pool = tc;
+ }
+}
+
+static inline void _talloc_free_children_internal(struct talloc_chunk *tc,
+ void *ptr,
+ const char *location);
+
/*
internal talloc_free call
*/
@@ -567,12 +783,22 @@ static inline int _talloc_free_internal(void *ptr, const char *location)
return -1;
}
+ /* possibly initialised the talloc fill value */
+ if (unlikely(!talloc_fill.initialised)) {
+ const char *fill = getenv(TALLOC_FILL_ENV);
+ if (fill != NULL) {
+ talloc_fill.enabled = true;
+ talloc_fill.fill_value = strtoul(fill, NULL, 0);
+ }
+ talloc_fill.initialised = true;
+ }
+
tc = talloc_chunk_from_ptr(ptr);
if (unlikely(tc->refs)) {
int is_child;
- /* check this is a reference from a child or grantchild
- * back to it's parent or grantparent
+ /* check if this is a reference from a child or
+ * grandchild back to it's parent or grandparent
*
* in that case we need to remove the reference and
* call another instance of talloc_free() on the current
@@ -616,26 +842,7 @@ static inline int _talloc_free_internal(void *ptr, const char *location)
tc->flags |= TALLOC_FLAG_LOOP;
- while (tc->child) {
- /* we need to work out who will own an abandoned child
- if it cannot be freed. In priority order, the first
- choice is owner of any remaining reference to this
- pointer, the second choice is our parent, and the
- final choice is the null context. */
- void *child = TC_PTR_FROM_CHUNK(tc->child);
- const void *new_parent = null_context;
- if (unlikely(tc->child->refs)) {
- struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
- if (p) new_parent = TC_PTR_FROM_CHUNK(p);
- }
- if (unlikely(_talloc_free_internal(child, location) == -1)) {
- if (new_parent == null_context) {
- struct talloc_chunk *p = talloc_parent_chunk(ptr);
- if (p) new_parent = TC_PTR_FROM_CHUNK(p);
- }
- _talloc_steal_internal(new_parent, child);
- }
- }
+ _talloc_free_children_internal(tc, ptr, location);
tc->flags |= TALLOC_FLAG_FREE;
@@ -645,27 +852,26 @@ static inline int _talloc_free_internal(void *ptr, const char *location)
*/
tc->name = location;
- if (tc->flags & (TALLOC_FLAG_POOL|TALLOC_FLAG_POOLMEM)) {
- struct talloc_chunk *pool;
+ if (tc->flags & TALLOC_FLAG_POOL) {
unsigned int *pool_object_count;
- pool = (tc->flags & TALLOC_FLAG_POOL)
- ? tc : (struct talloc_chunk *)tc->pool;
-
- pool_object_count = talloc_pool_objectcount(pool);
+ pool_object_count = talloc_pool_objectcount(tc);
- if (*pool_object_count == 0) {
+ if (unlikely(*pool_object_count == 0)) {
talloc_abort("Pool object count zero!");
return 0;
}
*pool_object_count -= 1;
- if (*pool_object_count == 0) {
- free(pool);
+ if (unlikely(*pool_object_count == 0)) {
+ TC_INVALIDATE_FULL_CHUNK(tc);
+ free(tc);
}
- }
- else {
+ } else if (tc->flags & TALLOC_FLAG_POOLMEM) {
+ _talloc_free_poolmem(tc, location);
+ } else {
+ TC_INVALIDATE_FULL_CHUNK(tc);
free(tc);
}
return 0;
@@ -733,7 +939,7 @@ static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
ptr on success, or NULL if it could not be transferred.
passing NULL as ptr will always return NULL with no side effects.
*/
-void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
+_PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
{
struct talloc_chunk *tc;
@@ -754,6 +960,14 @@ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *locati
h->location);
}
}
+
+#if 0
+ /* this test is probably too expensive to have on in the
+ normal build, but it useful for debugging */
+ if (talloc_is_parent(new_ctx, ptr)) {
+ talloc_log("WARNING: stealing into talloc child at %s\n", location);
+ }
+#endif
return _talloc_steal_internal(new_ctx, ptr);
}
@@ -765,7 +979,7 @@ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *locati
The old parent can be either a reference or a parent
*/
-void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
+_PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
{
struct talloc_chunk *tc;
struct talloc_reference_handle *h;
@@ -825,7 +1039,7 @@ static inline int talloc_unreference(const void *context, const void *ptr)
remove a specific parent context from a pointer. This is a more
controlled varient of talloc_free()
*/
-int talloc_unlink(const void *context, void *ptr)
+_PUBLIC_ int talloc_unlink(const void *context, void *ptr)
{
struct talloc_chunk *tc_p, *new_p;
void *new_parent;
@@ -892,7 +1106,7 @@ static inline const char *talloc_set_name_v(const void *ptr, const char *fmt, va
/*
add a name to an existing pointer
*/
-const char *talloc_set_name(const void *ptr, const char *fmt, ...)
+_PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
{
const char *name;
va_list ap;
@@ -908,7 +1122,7 @@ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
talloc_named() operates just like talloc() except that it allows you
to name the pointer.
*/
-void *talloc_named(const void *context, size_t size, const char *fmt, ...)
+_PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
{
va_list ap;
void *ptr;
@@ -932,7 +1146,7 @@ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
/*
return the name of a talloc ptr, or "UNNAMED"
*/
-const char *talloc_get_name(const void *ptr)
+_PUBLIC_ const char *talloc_get_name(const void *ptr)
{
struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
@@ -949,7 +1163,7 @@ const char *talloc_get_name(const void *ptr)
check if a pointer has the given name. If it does, return the pointer,
otherwise return NULL
*/
-void *talloc_check_name(const void *ptr, const char *name)
+_PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
{
const char *pname;
if (unlikely(ptr == NULL)) return NULL;
@@ -978,7 +1192,7 @@ static void talloc_abort_type_missmatch(const char *location,
talloc_abort(reason);
}
-void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
+_PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
{
const char *pname;
@@ -999,19 +1213,12 @@ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *loca
/*
this is for compatibility with older versions of talloc
*/
-void *talloc_init(const char *fmt, ...)
+_PUBLIC_ void *talloc_init(const char *fmt, ...)
{
va_list ap;
void *ptr;
const char *name;
- /*
- * samba3 expects talloc_report_depth_cb(NULL, ...)
- * reports all talloc'ed memory, so we need to enable
- * null_tracking
- */
- talloc_enable_null_tracking();
-
ptr = __talloc(NULL, 0);
if (unlikely(ptr == NULL)) return NULL;
@@ -1027,21 +1234,10 @@ void *talloc_init(const char *fmt, ...)
return ptr;
}
-/*
- this is a replacement for the Samba3 talloc_destroy_pool functionality. It
- should probably not be used in new code. It's in here to keep the talloc
- code consistent across Samba 3 and 4.
-*/
-void talloc_free_children(void *ptr)
+static inline void _talloc_free_children_internal(struct talloc_chunk *tc,
+ void *ptr,
+ const char *location)
{
- struct talloc_chunk *tc;
-
- if (unlikely(ptr == NULL)) {
- return;
- }
-
- tc = talloc_chunk_from_ptr(ptr);
-
while (tc->child) {
/* we need to work out who will own an abandoned child
if it cannot be freed. In priority order, the first
@@ -1050,33 +1246,57 @@ void talloc_free_children(void *ptr)
final choice is the null context. */
void *child = TC_PTR_FROM_CHUNK(tc->child);
const void *new_parent = null_context;
+ struct talloc_chunk *old_parent = NULL;
if (unlikely(tc->child->refs)) {
struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
if (p) new_parent = TC_PTR_FROM_CHUNK(p);
}
- if (unlikely(talloc_free(child) == -1)) {
+ /* finding the parent here is potentially quite
+ expensive, but the alternative, which is to change
+ talloc to always have a valid tc->parent pointer,
+ makes realloc more expensive where there are a
+ large number of children.
+
+ The reason we need the parent pointer here is that
+ if _talloc_free_internal() fails due to references
+ or a failing destructor we need to re-parent, but
+ the free call can invalidate the prev pointer.
+ */
+ if (new_parent == null_context && (tc->child->refs || tc->child->destructor)) {
+ old_parent = talloc_parent_chunk(ptr);
+ }
+ if (unlikely(_talloc_free_internal(child, location) == -1)) {
if (new_parent == null_context) {
- struct talloc_chunk *p = talloc_parent_chunk(ptr);
+ struct talloc_chunk *p = old_parent;
if (p) new_parent = TC_PTR_FROM_CHUNK(p);
}
_talloc_steal_internal(new_parent, child);
}
}
+}
- if ((tc->flags & TALLOC_FLAG_POOL)
- && (*talloc_pool_objectcount(tc) == 1)) {
- tc->pool = ((char *)tc + TC_HDR_SIZE + TALLOC_POOL_HDR_SIZE);
-#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
- VALGRIND_MAKE_MEM_NOACCESS(
- tc->pool, tc->size - TALLOC_POOL_HDR_SIZE);
-#endif
+/*
+ this is a replacement for the Samba3 talloc_destroy_pool functionality. It
+ should probably not be used in new code. It's in here to keep the talloc
+ code consistent across Samba 3 and 4.
+*/
+_PUBLIC_ void talloc_free_children(void *ptr)
+{
+ struct talloc_chunk *tc;
+
+ if (unlikely(ptr == NULL)) {
+ return;
}
+
+ tc = talloc_chunk_from_ptr(ptr);
+
+ _talloc_free_children_internal(tc, ptr, __location__);
}
/*
Allocate a bit of memory as a child of an existing pointer
*/
-void *_talloc(const void *context, size_t size)
+_PUBLIC_ void *_talloc(const void *context, size_t size)
{
return __talloc(context, size);
}
@@ -1084,7 +1304,7 @@ void *_talloc(const void *context, size_t size)
/*
externally callable talloc_set_name_const()
*/
-void talloc_set_name_const(const void *ptr, const char *name)
+_PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
{
_talloc_set_name_const(ptr, name);
}
@@ -1094,7 +1314,7 @@ void talloc_set_name_const(const void *ptr, const char *name)
talloc_named() operates just like talloc() except that it allows you
to name the pointer.
*/
-void *talloc_named_const(const void *context, size_t size, const char *name)
+_PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
{
return _talloc_named_const(context, size, name);
}
@@ -1107,7 +1327,7 @@ void *talloc_named_const(const void *context, size_t size, const char *name)
will not be freed if the ref_count is > 1 or the destructor (if
any) returns non-zero
*/
-int _talloc_free(void *ptr, const char *location)
+_PUBLIC_ int _talloc_free(void *ptr, const char *location)
{
struct talloc_chunk *tc;
@@ -1120,6 +1340,13 @@ int _talloc_free(void *ptr, const char *location)
if (unlikely(tc->refs != NULL)) {
struct talloc_reference_handle *h;
+ if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
+ /* in this case we do know which parent should
+ get this pointer, as there is really only
+ one parent */
+ return talloc_unlink(null_context, ptr);
+ }
+
talloc_log("ERROR: talloc_free with references at %s\n",
location);
@@ -1139,11 +1366,12 @@ int _talloc_free(void *ptr, const char *location)
A talloc version of realloc. The context argument is only used if
ptr is NULL
*/
-void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
+_PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
{
struct talloc_chunk *tc;
void *new_ptr;
bool malloced = false;
+ struct talloc_chunk *pool_tc = NULL;
/* size zero is equivalent to free() */
if (unlikely(size == 0)) {
@@ -1172,27 +1400,154 @@ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *n
return NULL;
}
+ /* don't let anybody try to realloc a talloc_pool */
+ if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
+ pool_tc = (struct talloc_chunk *)tc->pool;
+ }
+
+#if (ALWAYS_REALLOC == 0)
/* don't shrink if we have less than 1k to gain */
- if ((size < tc->size) && ((tc->size - size) < 1024)) {
- tc->size = size;
+ if (size < tc->size) {
+ if (pool_tc) {
+ void *next_tc = TC_POOLMEM_NEXT_CHUNK(tc);
+ TC_INVALIDATE_SHRINK_CHUNK(tc, size);
+ tc->size = size;
+ if (next_tc == pool_tc->pool) {
+ pool_tc->pool = TC_POOLMEM_NEXT_CHUNK(tc);
+ }
+ return ptr;
+ } else if ((tc->size - size) < 1024) {
+ /*
+ * if we call TC_INVALIDATE_SHRINK_CHUNK() here
+ * we would need to call TC_UNDEFINE_GROW_CHUNK()
+ * after each realloc call, which slows down
+ * testing a lot :-(.
+ *
+ * That is why we only mark memory as undefined here.
+ */
+ TC_UNDEFINE_SHRINK_CHUNK(tc, size);
+
+ /* do not shrink if we have less than 1k to gain */
+ tc->size = size;
+ return ptr;
+ }
+ } else if (tc->size == size) {
+ /*
+ * do not change the pointer if it is exactly
+ * the same size.
+ */
return ptr;
}
+#endif
/* by resetting magic we catch users of the old memory */
tc->flags |= TALLOC_FLAG_FREE;
#if ALWAYS_REALLOC
- new_ptr = malloc(size + TC_HDR_SIZE);
- if (new_ptr) {
- memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
- free(tc);
+ if (pool_tc) {
+ new_ptr = talloc_alloc_pool(tc, size + TC_HDR_SIZE);
+ *talloc_pool_objectcount(pool_tc) -= 1;
+
+ if (new_ptr == NULL) {
+ new_ptr = malloc(TC_HDR_SIZE+size);
+ malloced = true;
+ }
+
+ if (new_ptr) {
+ memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
+ TC_INVALIDATE_FULL_CHUNK(tc);
+ }
+ } else {
+ new_ptr = malloc(size + TC_HDR_SIZE);
+ if (new_ptr) {
+ memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
+ free(tc);
+ }
}
#else
- if (tc->flags & TALLOC_FLAG_POOLMEM) {
+ if (pool_tc) {
+ void *next_tc = TC_POOLMEM_NEXT_CHUNK(tc);
+ size_t old_chunk_size = TC_POOLMEM_CHUNK_SIZE(tc);
+ size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
+ size_t space_needed;
+ size_t space_left;
+ unsigned int chunk_count = *talloc_pool_objectcount(pool_tc);
+
+ if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
+ chunk_count -= 1;
+ }
+
+ if (chunk_count == 1) {
+ /*
+ * optimize for the case where 'tc' is the only
+ * chunk in the pool.
+ */
+ space_needed = new_chunk_size;
+ space_left = pool_tc->size - TALLOC_POOL_HDR_SIZE;
+
+ if (space_left >= space_needed) {
+ size_t old_used = TC_HDR_SIZE + tc->size;
+ size_t new_used = TC_HDR_SIZE + size;
+ pool_tc->pool = TC_POOL_FIRST_CHUNK(pool_tc);
+#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
+ /*
+ * we need to prepare the memmove into
+ * the unaccessable area.
+ */
+ {
+ size_t diff = PTR_DIFF(tc, pool_tc->pool);
+ size_t flen = MIN(diff, old_used);
+ char *fptr = (char *)pool_tc->pool;
+ VALGRIND_MAKE_MEM_UNDEFINED(fptr, flen);
+ }
+#endif
+ memmove(pool_tc->pool, tc, old_used);
+ new_ptr = pool_tc->pool;
+
+ tc = (struct talloc_chunk *)new_ptr;
+ TC_UNDEFINE_GROW_CHUNK(tc, size);
+
+ /*
+ * first we do not align the pool pointer
+ * because we want to invalidate the padding
+ * too.
+ */
+ pool_tc->pool = new_used + (char *)new_ptr;
+ TC_INVALIDATE_POOL(pool_tc);
+
+ /* now the aligned pointer */
+ pool_tc->pool = new_chunk_size + (char *)new_ptr;
+ goto got_new_ptr;
+ }
+
+ next_tc = NULL;
+ }
+
+ if (new_chunk_size == old_chunk_size) {
+ TC_UNDEFINE_GROW_CHUNK(tc, size);
+ tc->flags &= ~TALLOC_FLAG_FREE;
+ tc->size = size;
+ return ptr;
+ }
+
+ if (next_tc == pool_tc->pool) {
+ /*
+ * optimize for the case where 'tc' is the last
+ * chunk in the pool.
+ */
+ space_needed = new_chunk_size - old_chunk_size;
+ space_left = TC_POOL_SPACE_LEFT(pool_tc);
+
+ if (space_left >= space_needed) {
+ TC_UNDEFINE_GROW_CHUNK(tc, size);
+ tc->flags &= ~TALLOC_FLAG_FREE;
+ tc->size = size;
+ pool_tc->pool = TC_POOLMEM_NEXT_CHUNK(tc);
+ return ptr;
+ }
+ }
new_ptr = talloc_alloc_pool(tc, size + TC_HDR_SIZE);
- *talloc_pool_objectcount((struct talloc_chunk *)
- (tc->pool)) -= 1;
if (new_ptr == NULL) {
new_ptr = malloc(TC_HDR_SIZE+size);
@@ -1201,11 +1556,14 @@ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *n
if (new_ptr) {
memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
+
+ _talloc_free_poolmem(tc, __location__ "_talloc_realloc");
}
}
else {
new_ptr = realloc(tc, size + TC_HDR_SIZE);
}
+got_new_ptr:
#endif
if (unlikely(!new_ptr)) {
tc->flags &= ~TALLOC_FLAG_FREE;
@@ -1241,7 +1599,7 @@ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *n
a wrapper around talloc_steal() for situations where you are moving a pointer
between two structures, and want the old pointer to be set to NULL
*/
-void *_talloc_move(const void *new_ctx, const void *_pptr)
+_PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
{
const void **pptr = discard_const_p(const void *,_pptr);
void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
@@ -1252,7 +1610,7 @@ void *_talloc_move(const void *new_ctx, const void *_pptr)
/*
return the total size of a talloc pool (subtree)
*/
-size_t talloc_total_size(const void *ptr)
+_PUBLIC_ size_t talloc_total_size(const void *ptr)
{
size_t total = 0;
struct talloc_chunk *c, *tc;
@@ -1287,7 +1645,7 @@ size_t talloc_total_size(const void *ptr)
/*
return the total number of blocks in a talloc pool (subtree)
*/
-size_t talloc_total_blocks(const void *ptr)
+_PUBLIC_ size_t talloc_total_blocks(const void *ptr)
{
size_t total = 0;
struct talloc_chunk *c, *tc;
@@ -1320,7 +1678,7 @@ size_t talloc_total_blocks(const void *ptr)
/*
return the number of external references to a pointer
*/
-size_t talloc_reference_count(const void *ptr)
+_PUBLIC_ size_t talloc_reference_count(const void *ptr)
{
struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
struct talloc_reference_handle *h;
@@ -1335,7 +1693,7 @@ size_t talloc_reference_count(const void *ptr)
/*
report on memory usage by all children of a pointer, giving a full tree view
*/
-void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
+_PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
void (*callback)(const void *ptr,
int depth, int max_depth,
int is_ref,
@@ -1419,7 +1777,7 @@ static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_
/*
report on memory usage by all children of a pointer, giving a full tree view
*/
-void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
+_PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
{
if (f) {
talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
@@ -1430,7 +1788,7 @@ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f
/*
report on memory usage by all children of a pointer, giving a full tree view
*/
-void talloc_report_full(const void *ptr, FILE *f)
+_PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
{
talloc_report_depth_file(ptr, 0, -1, f);
}
@@ -1438,7 +1796,7 @@ void talloc_report_full(const void *ptr, FILE *f)
/*
report on memory usage by all children of a pointer
*/
-void talloc_report(const void *ptr, FILE *f)
+_PUBLIC_ void talloc_report(const void *ptr, FILE *f)
{
talloc_report_depth_file(ptr, 0, 1, f);
}
@@ -1466,7 +1824,7 @@ static void talloc_report_null_full(void)
/*
enable tracking of the NULL context
*/
-void talloc_enable_null_tracking(void)
+_PUBLIC_ void talloc_enable_null_tracking(void)
{
if (null_context == NULL) {
null_context = _talloc_named_const(NULL, 0, "null_context");
@@ -1480,7 +1838,7 @@ void talloc_enable_null_tracking(void)
enable tracking of the NULL context, not moving the autofree context
into the NULL context. This is needed for the talloc testsuite
*/
-void talloc_enable_null_tracking_no_autofree(void)
+_PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
{
if (null_context == NULL) {
null_context = _talloc_named_const(NULL, 0, "null_context");
@@ -1490,7 +1848,7 @@ void talloc_enable_null_tracking_no_autofree(void)
/*
disable tracking of the NULL context
*/
-void talloc_disable_null_tracking(void)
+_PUBLIC_ void talloc_disable_null_tracking(void)
{
if (null_context != NULL) {
/* we have to move any children onto the real NULL
@@ -1515,7 +1873,7 @@ void talloc_disable_null_tracking(void)
/*
enable leak reporting on exit
*/
-void talloc_enable_leak_report(void)
+_PUBLIC_ void talloc_enable_leak_report(void)
{
talloc_enable_null_tracking();
atexit(talloc_report_null);
@@ -1524,7 +1882,7 @@ void talloc_enable_leak_report(void)
/*
enable full leak reporting on exit
*/
-void talloc_enable_leak_report_full(void)
+_PUBLIC_ void talloc_enable_leak_report_full(void)
{
talloc_enable_null_tracking();
atexit(talloc_report_null_full);
@@ -1533,7 +1891,7 @@ void talloc_enable_leak_report_full(void)
/*
talloc and zero memory.
*/
-void *_talloc_zero(const void *ctx, size_t size, const char *name)
+_PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
{
void *p = _talloc_named_const(ctx, size, name);
@@ -1547,7 +1905,7 @@ void *_talloc_zero(const void *ctx, size_t size, const char *name)
/*
memdup with a talloc.
*/
-void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
+_PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
{
void *newp = _talloc_named_const(t, size, name);
@@ -1575,7 +1933,7 @@ static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
/*
strdup with a talloc
*/
-char *talloc_strdup(const void *t, const char *p)
+_PUBLIC_ char *talloc_strdup(const void *t, const char *p)
{
if (unlikely(!p)) return NULL;
return __talloc_strlendup(t, p, strlen(p));
@@ -1584,7 +1942,7 @@ char *talloc_strdup(const void *t, const char *p)
/*
strndup with a talloc
*/
-char *talloc_strndup(const void *t, const char *p, size_t n)
+_PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
{
if (unlikely(!p)) return NULL;
return __talloc_strlendup(t, p, strnlen(p, n));
@@ -1609,7 +1967,7 @@ static inline char *__talloc_strlendup_append(char *s, size_t slen,
/*
* Appends at the end of the string.
*/
-char *talloc_strdup_append(char *s, const char *a)
+_PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
{
if (unlikely(!s)) {
return talloc_strdup(NULL, a);
@@ -1626,7 +1984,7 @@ char *talloc_strdup_append(char *s, const char *a)
* Appends at the end of the talloc'ed buffer,
* not the end of the string.
*/
-char *talloc_strdup_append_buffer(char *s, const char *a)
+_PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
{
size_t slen;
@@ -1649,7 +2007,7 @@ char *talloc_strdup_append_buffer(char *s, const char *a)
/*
* Appends at the end of the string.
*/
-char *talloc_strndup_append(char *s, const char *a, size_t n)
+_PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
{
if (unlikely(!s)) {
return talloc_strdup(NULL, a);
@@ -1666,7 +2024,7 @@ char *talloc_strndup_append(char *s, const char *a, size_t n)
* Appends at the end of the talloc'ed buffer,
* not the end of the string.
*/
-char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
+_PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
{
size_t slen;
@@ -1694,7 +2052,7 @@ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
#endif
#endif
-char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
+_PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
{
int len;
char *ret;
@@ -1725,7 +2083,7 @@ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
Perform string formatting, and return a pointer to newly allocated
memory holding the result, inside a memory pool.
*/
-char *talloc_asprintf(const void *t, const char *fmt, ...)
+_PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
{
va_list ap;
char *ret;
@@ -1778,7 +2136,7 @@ static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
* accumulating output into a string buffer. Appends at the end
* of the string.
**/
-char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
+_PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
{
if (unlikely(!s)) {
return talloc_vasprintf(NULL, fmt, ap);
@@ -1792,7 +2150,7 @@ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
* and return @p s, which may have moved. Always appends at the
* end of the talloc'ed buffer, not the end of the string.
**/
-char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
+_PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
{
size_t slen;
@@ -1813,7 +2171,7 @@ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
s, which may have moved. Good for gradually accumulating output
into a string buffer.
*/
-char *talloc_asprintf_append(char *s, const char *fmt, ...)
+_PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
{
va_list ap;
@@ -1828,7 +2186,7 @@ char *talloc_asprintf_append(char *s, const char *fmt, ...)
s, which may have moved. Good for gradually accumulating output
into a buffer.
*/
-char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
+_PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
{
va_list ap;
@@ -1841,7 +2199,7 @@ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
/*
alloc an array, checking for integer overflow in the array size
*/
-void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
+_PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
{
if (count >= MAX_TALLOC_SIZE/el_size) {
return NULL;
@@ -1852,7 +2210,7 @@ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char
/*
alloc an zero array, checking for integer overflow in the array size
*/
-void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
+_PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
{
if (count >= MAX_TALLOC_SIZE/el_size) {
return NULL;
@@ -1863,7 +2221,7 @@ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const
/*
realloc an array, checking for integer overflow in the array size
*/
-void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
+_PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
{
if (count >= MAX_TALLOC_SIZE/el_size) {
return NULL;
@@ -1876,7 +2234,7 @@ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned
to libraries that want a realloc function (a realloc function encapsulates
all the basic capabilities of an allocation library, which is why this is useful)
*/
-void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
+_PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
{
return _talloc_realloc(context, ptr, size, NULL);
}
@@ -1897,7 +2255,7 @@ static void talloc_autofree(void)
return a context which will be auto-freed on exit
this is useful for reducing the noise in leak reports
*/
-void *talloc_autofree_context(void)
+_PUBLIC_ void *talloc_autofree_context(void)
{
if (autofree_context == NULL) {
autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
@@ -1907,7 +2265,7 @@ void *talloc_autofree_context(void)
return autofree_context;
}
-size_t talloc_get_size(const void *context)
+_PUBLIC_ size_t talloc_get_size(const void *context)
{
struct talloc_chunk *tc;
@@ -1926,7 +2284,7 @@ size_t talloc_get_size(const void *context)
/*
find a parent of this context that has the given name, if any
*/
-void *talloc_find_parent_byname(const void *context, const char *name)
+_PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
{
struct talloc_chunk *tc;
@@ -1950,7 +2308,7 @@ void *talloc_find_parent_byname(const void *context, const char *name)
/*
show the parentage of a context
*/
-void talloc_show_parents(const void *context, FILE *file)
+_PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
{
struct talloc_chunk *tc;
@@ -1974,7 +2332,7 @@ void talloc_show_parents(const void *context, FILE *file)
/*
return 1 if ptr is a parent of context
*/
-int talloc_is_parent(const void *context, const void *ptr)
+static int _talloc_is_parent(const void *context, const void *ptr, int depth)
{
struct talloc_chunk *tc;
@@ -1983,12 +2341,21 @@ int talloc_is_parent(const void *context, const void *ptr)
}
tc = talloc_chunk_from_ptr(context);
- while (tc) {
+ while (tc && depth > 0) {
if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
while (tc && tc->prev) tc = tc->prev;
if (tc) {
tc = tc->parent;
+ depth--;
}
}
return 0;
}
+
+/*
+ return 1 if ptr is a parent of context
+*/
+_PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
+{
+ return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
+}
diff --git a/lib/talloc/talloc.exports b/lib/talloc/talloc.exports
deleted file mode 100644
index 1b8062f4a0..0000000000
--- a/lib/talloc/talloc.exports
+++ /dev/null
@@ -1,68 +0,0 @@
-# This file is autogenerated, please DO NOT EDIT
-{
- global:
- _talloc;
- _talloc_array;
- _talloc_free;
- _talloc_get_type_abort;
- _talloc_memdup;
- _talloc_move;
- _talloc_realloc;
- _talloc_realloc_array;
- _talloc_reference_loc;
- _talloc_set_destructor;
- _talloc_steal_loc;
- _talloc_zero;
- _talloc_zero_array;
- talloc_asprintf;
- talloc_asprintf_append;
- talloc_asprintf_append_buffer;
- talloc_autofree_context;
- talloc_check_name;
- talloc_disable_null_tracking;
- talloc_enable_leak_report;
- talloc_enable_leak_report_full;
- talloc_enable_null_tracking;
- talloc_enable_null_tracking_no_autofree;
- talloc_find_parent_byname;
- talloc_free_children;
- talloc_get_name;
- talloc_get_size;
- talloc_increase_ref_count;
- talloc_init;
- talloc_is_parent;
- talloc_named;
- talloc_named_const;
- talloc_parent;
- talloc_parent_name;
- talloc_pool;
- talloc_realloc_fn;
- talloc_reference_count;
- talloc_reparent;
- talloc_report;
- talloc_report_depth_cb;
- talloc_report_depth_file;
- talloc_report_full;
- talloc_set_abort_fn;
- talloc_set_log_fn;
- talloc_set_log_stderr;
- talloc_set_name;
- talloc_set_name_const;
- talloc_show_parents;
- talloc_strdup;
- talloc_strdup_append;
- talloc_strdup_append_buffer;
- talloc_strndup;
- talloc_strndup_append;
- talloc_strndup_append_buffer;
- talloc_total_blocks;
- talloc_total_size;
- talloc_unlink;
- talloc_vasprintf;
- talloc_vasprintf_append;
- talloc_vasprintf_append_buffer;
- talloc_version_major;
- talloc_version_minor;
-
- local: *;
-};
diff --git a/lib/talloc/talloc.h b/lib/talloc/talloc.h
index f549a17fba..571086193c 100644
--- a/lib/talloc/talloc.h
+++ b/lib/talloc/talloc.h
@@ -29,13 +29,48 @@
#include <stdio.h>
#include <stdarg.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup talloc The talloc API
+ *
+ * talloc is a hierarchical, reference counted memory pool system with
+ * destructors. It is the core memory allocator used in Samba.
+ *
+ * @{
+ */
+
#define TALLOC_VERSION_MAJOR 2
#define TALLOC_VERSION_MINOR 0
int talloc_version_major(void);
int talloc_version_minor(void);
-/* this is only needed for compatibility with the old talloc */
+/**
+ * @brief Define a talloc parent type
+ *
+ * As talloc is a hierarchial memory allocator, every talloc chunk is a
+ * potential parent to other talloc chunks. So defining a separate type for a
+ * talloc chunk is not strictly necessary. TALLOC_CTX is defined nevertheless,
+ * as it provides an indicator for function arguments. You will frequently
+ * write code like
+ *
+ * @code
+ * struct foo *foo_create(TALLOC_CTX *mem_ctx)
+ * {
+ * struct foo *result;
+ * result = talloc(mem_ctx, struct foo);
+ * if (result == NULL) return NULL;
+ * ... initialize foo ...
+ * return result;
+ * }
+ * @endcode
+ *
+ * In this type of allocating functions it is handy to have a general
+ * TALLOC_CTX type to indicate which parent to put allocated structures on.
+ */
typedef void TALLOC_CTX;
/*
@@ -64,6 +99,240 @@ typedef void TALLOC_CTX;
#endif
#endif
+#ifdef DOXYGEN
+/**
+ * @brief Create a new talloc context.
+ *
+ * The talloc() macro is the core of the talloc library. It takes a memory
+ * context and a type, and returns a pointer to a new area of memory of the
+ * given type.
+ *
+ * The returned pointer is itself a talloc context, so you can use it as the
+ * context argument to more calls to talloc if you wish.
+ *
+ * The returned pointer is a "child" of the supplied context. This means that if
+ * you talloc_free() the context then the new child disappears as well.
+ * Alternatively you can free just the child.
+ *
+ * @param[in] ctx A talloc context to create a new reference on or NULL to
+ * create a new top level context.
+ *
+ * @param[in] type The type of memory to allocate.
+ *
+ * @return A type casted talloc context or NULL on error.
+ *
+ * @code
+ * unsigned int *a, *b;
+ *
+ * a = talloc(NULL, unsigned int);
+ * b = talloc(a, unsigned int);
+ * @endcode
+ *
+ * @see talloc_zero
+ * @see talloc_array
+ * @see talloc_steal
+ * @see talloc_free
+ */
+void *talloc(const void *ctx, #type);
+#else
+#define talloc(ctx, type) (type *)talloc_named_const(ctx, sizeof(type), #type)
+void *_talloc(const void *context, size_t size);
+#endif
+
+/**
+ * @brief Create a new top level talloc context.
+ *
+ * This function creates a zero length named talloc context as a top level
+ * context. It is equivalent to:
+ *
+ * @code
+ * talloc_named(NULL, 0, fmt, ...);
+ * @endcode
+ * @param[in] fmt Format string for the name.
+ *
+ * @param[in] ... Additional printf-style arguments.
+ *
+ * @return The allocated memory chunk, NULL on error.
+ *
+ * @see talloc_named()
+ */
+void *talloc_init(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
+
+#ifdef DOXYGEN
+/**
+ * @brief Free a chunk of talloc memory.
+ *
+ * The talloc_free() function frees a piece of talloc memory, and all its
+ * children. You can call talloc_free() on any pointer returned by
+ * talloc().
+ *
+ * The return value of talloc_free() indicates success or failure, with 0
+ * returned for success and -1 for failure. A possible failure condition
+ * is if the pointer had a destructor attached to it and the destructor
+ * returned -1. See talloc_set_destructor() for details on
+ * destructors. Likewise, if "ptr" is NULL, then the function will make
+ * no modifications and return -1.
+ *
+ * If this pointer has an additional parent when talloc_free() is called
+ * then the memory is not actually released, but instead the most
+ * recently established parent is destroyed. See talloc_reference() for
+ * details on establishing additional parents.
+ *
+ * For more control on which parent is removed, see talloc_unlink()
+ *
+ * talloc_free() operates recursively on its children.
+ *
+ * From the 2.0 version of talloc, as a special case, talloc_free() is
+ * refused on pointers that have more than one parent, as talloc would
+ * have no way of knowing which parent should be removed. To free a
+ * pointer that has more than one parent please use talloc_unlink().
+ *
+ * To help you find problems in your code caused by this behaviour, if
+ * you do try and free a pointer with more than one parent then the
+ * talloc logging function will be called to give output like this:
+ *
+ * @code
+ * ERROR: talloc_free with references at some_dir/source/foo.c:123
+ * reference at some_dir/source/other.c:325
+ * reference at some_dir/source/third.c:121
+ * @endcode
+ *
+ * Please see the documentation for talloc_set_log_fn() and
+ * talloc_set_log_stderr() for more information on talloc logging
+ * functions.
+ *
+ * @param[in] ptr The chunk to be freed.
+ *
+ * @return Returns 0 on success and -1 on error. A possible
+ * failure condition is if the pointer had a destructor
+ * attached to it and the destructor returned -1. Likewise,
+ * if "ptr" is NULL, then the function will make no
+ * modifications and returns -1.
+ *
+ * Example:
+ * @code
+ * unsigned int *a, *b;
+ * a = talloc(NULL, unsigned int);
+ * b = talloc(a, unsigned int);
+ *
+ * talloc_free(a); // Frees a and b
+ * @endcode
+ *
+ * @see talloc_set_destructor()
+ * @see talloc_unlink()
+ */
+int talloc_free(void *ptr);
+#else
+#define talloc_free(ctx) _talloc_free(ctx, __location__)
+int _talloc_free(void *ptr, const char *location);
+#endif
+
+/**
+ * @brief Free a talloc chunk's children.
+ *
+ * The function walks along the list of all children of a talloc context and
+ * talloc_free()s only the children, not the context itself.
+ *
+ * @param[in] ptr The chunk that you want to free the children of.
+ */
+void talloc_free_children(void *ptr);
+
+#ifdef DOXYGEN
+/**
+ * @brief Assign a destructor function to be called when a chunk is freed.
+ *
+ * The function talloc_set_destructor() sets the "destructor" for the pointer
+ * "ptr". A destructor is a function that is called when the memory used by a
+ * pointer is about to be released. The destructor receives the pointer as an
+ * argument, and should return 0 for success and -1 for failure.
+ *
+ * The destructor can do anything it wants to, including freeing other pieces
+ * of memory. A common use for destructors is to clean up operating system
+ * resources (such as open file descriptors) contained in the structure the
+ * destructor is placed on.
+ *
+ * You can only place one destructor on a pointer. If you need more than one
+ * destructor then you can create a zero-length child of the pointer and place
+ * an additional destructor on that.
+ *
+ * To remove a destructor call talloc_set_destructor() with NULL for the
+ * destructor.
+ *
+ * If your destructor attempts to talloc_free() the pointer that it is the
+ * destructor for then talloc_free() will return -1 and the free will be
+ * ignored. This would be a pointless operation anyway, as the destructor is
+ * only called when the memory is just about to go away.
+ *
+ * @param[in] ptr The talloc chunk to add a destructor to.
+ *
+ * @param[in] destructor The destructor function to be called. NULL to remove
+ * it.
+ *
+ * Example:
+ * @code
+ * static int destroy_fd(int *fd) {
+ * close(*fd);
+ * return 0;
+ * }
+ *
+ * int *open_file(const char *filename) {
+ * int *fd = talloc(NULL, int);
+ * *fd = open(filename, O_RDONLY);
+ * if (*fd < 0) {
+ * talloc_free(fd);
+ * return NULL;
+ * }
+ * // Whenever they free this, we close the file.
+ * talloc_set_destructor(fd, destroy_fd);
+ * return fd;
+ * }
+ * @endcode
+ *
+ * @see talloc()
+ * @see talloc_free()
+ */
+void talloc_set_destructor(const void *ptr, int (*destructor)(void *));
+
+/**
+ * @brief Change a talloc chunk's parent.
+ *
+ * The talloc_steal() function changes the parent context of a talloc
+ * pointer. It is typically used when the context that the pointer is
+ * currently a child of is going to be freed and you wish to keep the
+ * memory for a longer time.
+ *
+ * To make the changed hierarchy less error-prone, you might consider to use
+ * talloc_move().
+ *
+ * If you try and call talloc_steal() on a pointer that has more than one
+ * parent then the result is ambiguous. Talloc will choose to remove the
+ * parent that is currently indicated by talloc_parent() and replace it with
+ * the chosen parent. You will also get a message like this via the talloc
+ * logging functions:
+ *
+ * @code
+ * WARNING: talloc_steal with references at some_dir/source/foo.c:123
+ * reference at some_dir/source/other.c:325
+ * reference at some_dir/source/third.c:121
+ * @endcode
+ *
+ * To unambiguously change the parent of a pointer please see the function
+ * talloc_reparent(). See the talloc_set_log_fn() documentation for more
+ * information on talloc logging.
+ *
+ * @param[in] new_ctx The new parent context.
+ *
+ * @param[in] ptr The talloc chunk to move.
+ *
+ * @return Returns the pointer that you pass it. It does not have
+ * any failure modes.
+ *
+ * @note It is possible to produce loops in the parent/child relationship
+ * if you are not careful with talloc_steal(). No guarantees are provided
+ * as to your sanity or the safety of your data if you do this.
+ */
+void *talloc_steal(const void *new_ctx, const void *ptr);
+#else /* DOXYGEN */
/* try to make talloc_set_destructor() and talloc_steal() type safe,
if we have a recent gcc */
#if (__GNUC__ >= 3)
@@ -76,127 +345,1368 @@ typedef void TALLOC_CTX;
/* this extremely strange macro is to avoid some braindamaged warning
stupidity in gcc 4.1.x */
#define talloc_steal(ctx, ptr) ({ _TALLOC_TYPEOF(ptr) __talloc_steal_ret = (_TALLOC_TYPEOF(ptr))_talloc_steal_loc((ctx),(ptr), __location__); __talloc_steal_ret; })
-#else
+#else /* __GNUC__ >= 3 */
#define talloc_set_destructor(ptr, function) \
_talloc_set_destructor((ptr), (int (*)(void *))(function))
#define _TALLOC_TYPEOF(ptr) void *
#define talloc_steal(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_steal_loc((ctx),(ptr), __location__)
-#endif
+#endif /* __GNUC__ >= 3 */
+void _talloc_set_destructor(const void *ptr, int (*_destructor)(void *));
+void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location);
+#endif /* DOXYGEN */
-#define talloc_reference(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_reference_loc((ctx),(ptr), __location__)
+/**
+ * @brief Assign a name to a talloc chunk.
+ *
+ * Each talloc pointer has a "name". The name is used principally for
+ * debugging purposes, although it is also possible to set and get the name on
+ * a pointer in as a way of "marking" pointers in your code.
+ *
+ * The main use for names on pointer is for "talloc reports". See
+ * talloc_report() and talloc_report_full() for details. Also see
+ * talloc_enable_leak_report() and talloc_enable_leak_report_full().
+ *
+ * The talloc_set_name() function allocates memory as a child of the
+ * pointer. It is logically equivalent to:
+ *
+ * @code
+ * talloc_set_name_const(ptr, talloc_asprintf(ptr, fmt, ...));
+ * @endcode
+ *
+ * @param[in] ptr The talloc chunk to assign a name to.
+ *
+ * @param[in] fmt Format string for the name.
+ *
+ * @param[in] ... Add printf-style additional arguments.
+ *
+ * @return The assigned name, NULL on error.
+ *
+ * @note Multiple calls to talloc_set_name() will allocate more memory without
+ * releasing the name. All of the memory is released when the ptr is freed
+ * using talloc_free().
+ */
+const char *talloc_set_name(const void *ptr, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3);
+
+#ifdef DOXYGEN
+/**
+ * @brief Change a talloc chunk's parent.
+ *
+ * This function has the same effect as talloc_steal(), and additionally sets
+ * the source pointer to NULL. You would use it like this:
+ *
+ * @code
+ * struct foo *X = talloc(tmp_ctx, struct foo);
+ * struct foo *Y;
+ * Y = talloc_move(new_ctx, &X);
+ * @endcode
+ *
+ * @param[in] new_ctx The new parent context.
+ *
+ * @param[in] ptr Pointer to the talloc chunk to move.
+ *
+ * @return The pointer of the talloc chunk it has been moved to,
+ * NULL on error.
+ */
+void *talloc_move(const void *new_ctx, const void *ptr);
+#else
#define talloc_move(ctx, ptr) (_TALLOC_TYPEOF(*(ptr)))_talloc_move((ctx),(void *)(ptr))
+void *_talloc_move(const void *new_ctx, const void *pptr);
+#endif
-/* useful macros for creating type checked pointers */
-#define talloc(ctx, type) (type *)talloc_named_const(ctx, sizeof(type), #type)
+/**
+ * @brief Assign a name to a talloc chunk.
+ *
+ * The function is just like talloc_set_name(), but it takes a string constant,
+ * and is much faster. It is extensively used by the "auto naming" macros, such
+ * as talloc_p().
+ *
+ * This function does not allocate any memory. It just copies the supplied
+ * pointer into the internal representation of the talloc ptr. This means you
+ * must not pass a name pointer to memory that will disappear before the ptr
+ * is freed with talloc_free().
+ *
+ * @param[in] ptr The talloc chunk to assign a name to.
+ *
+ * @param[in] name Format string for the name.
+ */
+void talloc_set_name_const(const void *ptr, const char *name);
+
+/**
+ * @brief Create a named talloc chunk.
+ *
+ * The talloc_named() function creates a named talloc pointer. It is
+ * equivalent to:
+ *
+ * @code
+ * ptr = talloc_size(context, size);
+ * talloc_set_name(ptr, fmt, ....);
+ * @endcode
+ *
+ * @param[in] context The talloc context to hang the result off.
+ *
+ * @param[in] size Number of char's that you want to allocate.
+ *
+ * @param[in] fmt Format string for the name.
+ *
+ * @param[in] ... Additional printf-style arguments.
+ *
+ * @return The allocated memory chunk, NULL on error.
+ *
+ * @see talloc_set_name()
+ */
+void *talloc_named(const void *context, size_t size,
+ const char *fmt, ...) PRINTF_ATTRIBUTE(3,4);
+
+/**
+ * @brief Basic routine to allocate a chunk of memory.
+ *
+ * This is equivalent to:
+ *
+ * @code
+ * ptr = talloc_size(context, size);
+ * talloc_set_name_const(ptr, name);
+ * @endcode
+ *
+ * @param[in] context The parent context.
+ *
+ * @param[in] size The number of char's that we want to allocate.
+ *
+ * @param[in] name The name the talloc block has.
+ *
+ * @return The allocated memory chunk, NULL on error.
+ */
+void *talloc_named_const(const void *context, size_t size, const char *name);
+
+#ifdef DOXYGEN
+/**
+ * @brief Untyped allocation.
+ *
+ * The function should be used when you don't have a convenient type to pass to
+ * talloc(). Unlike talloc(), it is not type safe (as it returns a void *), so
+ * you are on your own for type checking.
+ *
+ * Best to use talloc() or talloc_array() instead.
+ *
+ * @param[in] ctx The talloc context to hang the result off.
+ *
+ * @param[in] size Number of char's that you want to allocate.
+ *
+ * @return The allocated memory chunk, NULL on error.
+ *
+ * Example:
+ * @code
+ * void *mem = talloc_size(NULL, 100);
+ * @endcode
+ */
+void *talloc_size(const void *ctx, size_t size);
+#else
#define talloc_size(ctx, size) talloc_named_const(ctx, size, __location__)
+#endif
+
+#ifdef DOXYGEN
+/**
+ * @brief Allocate into a typed pointer.
+ *
+ * The talloc_ptrtype() macro should be used when you have a pointer and want
+ * to allocate memory to point at with this pointer. When compiling with
+ * gcc >= 3 it is typesafe. Note this is a wrapper of talloc_size() and
+ * talloc_get_name() will return the current location in the source file and
+ * not the type.
+ *
+ * @param[in] ctx The talloc context to hang the result off.
+ *
+ * @param[in] type The pointer you want to assign the result to.
+ *
+ * @return The properly casted allocated memory chunk, NULL on
+ * error.
+ *
+ * Example:
+ * @code
+ * unsigned int *a = talloc_ptrtype(NULL, a);
+ * @endcode
+ */
+void *talloc_ptrtype(const void *ctx, #type);
+#else
#define talloc_ptrtype(ctx, ptr) (_TALLOC_TYPEOF(ptr))talloc_size(ctx, sizeof(*(ptr)))
+#endif
+#ifdef DOXYGEN
+/**
+ * @brief Allocate a new 0-sized talloc chunk.
+ *
+ * This is a utility macro that creates a new memory context hanging off an
+ * existing context, automatically naming it "talloc_new: __location__" where
+ * __location__ is the source line it is called from. It is particularly
+ * useful for creating a new temporary working context.
+ *
+ * @param[in] ctx The talloc parent context.
+ *
+ * @return A new talloc chunk, NULL on error.
+ */
+void *talloc_new(const void *ctx);
+#else
#define talloc_new(ctx) talloc_named_const(ctx, 0, "talloc_new: " __location__)
+#endif
+
+#ifdef DOXYGEN
+/**
+ * @brief Allocate a 0-initizialized structure.
+ *
+ * The macro is equivalent to:
+ *
+ * @code
+ * ptr = talloc(ctx, type);
+ * if (ptr) memset(ptr, 0, sizeof(type));
+ * @endcode
+ *
+ * @param[in] ctx The talloc context to hang the result off.
+ *
+ * @param[in] type The type that we want to allocate.
+ *
+ * @return Pointer to a piece of memory, properly cast to 'type *',
+ * NULL on error.
+ *
+ * Example:
+ * @code
+ * unsigned int *a, *b;
+ * a = talloc_zero(NULL, unsigned int);
+ * b = talloc_zero(a, unsigned int);
+ * @endcode
+ *
+ * @see talloc()
+ * @see talloc_zero_size()
+ * @see talloc_zero_array()
+ */
+void *talloc_zero(const void *ctx, #type);
+/**
+ * @brief Allocate untyped, 0-initialized memory.
+ *
+ * @param[in] ctx The talloc context to hang the result off.
+ *
+ * @param[in] size Number of char's that you want to allocate.
+ *
+ * @return The allocated memory chunk.
+ */
+void *talloc_zero_size(const void *ctx, size_t size);
+#else
#define talloc_zero(ctx, type) (type *)_talloc_zero(ctx, sizeof(type), #type)
#define talloc_zero_size(ctx, size) _talloc_zero(ctx, size, __location__)
+void *_talloc_zero(const void *ctx, size_t size, const char *name);
+#endif
-#define talloc_zero_array(ctx, type, count) (type *)_talloc_zero_array(ctx, sizeof(type), count, #type)
-#define talloc_array(ctx, type, count) (type *)_talloc_array(ctx, sizeof(type), count, #type)
-#define talloc_array_size(ctx, size, count) _talloc_array(ctx, size, count, __location__)
-#define talloc_array_ptrtype(ctx, ptr, count) (_TALLOC_TYPEOF(ptr))talloc_array_size(ctx, sizeof(*(ptr)), count)
-#define talloc_array_length(ctx) (talloc_get_size(ctx)/sizeof(*ctx))
+/**
+ * @brief Return the name of a talloc chunk.
+ *
+ * @param[in] ptr The talloc chunk.
+ *
+ * @return The current name for the given talloc pointer.
+ *
+ * @see talloc_set_name()
+ */
+const char *talloc_get_name(const void *ptr);
-#define talloc_realloc(ctx, p, type, count) (type *)_talloc_realloc_array(ctx, p, sizeof(type), count, #type)
-#define talloc_realloc_size(ctx, ptr, size) _talloc_realloc(ctx, ptr, size, __location__)
+/**
+ * @brief Verify that a talloc chunk carries a specified name.
+ *
+ * This function checks if a pointer has the specified name. If it does
+ * then the pointer is returned.
+ *
+ * @param[in] ptr The talloc chunk to check.
+ *
+ * @param[in] name The name to check against.
+ *
+ * @return The pointer if the name matches, NULL if it doesn't.
+ */
+void *talloc_check_name(const void *ptr, const char *name);
+
+/**
+ * @brief Get the parent chunk of a pointer.
+ *
+ * @param[in] ptr The talloc pointer to inspect.
+ *
+ * @return The talloc parent of ptr, NULL on error.
+ */
+void *talloc_parent(const void *ptr);
+/**
+ * @brief Get a talloc chunk's parent name.
+ *
+ * @param[in] ptr The talloc pointer to inspect.
+ *
+ * @return The name of ptr's parent chunk.
+ */
+const char *talloc_parent_name(const void *ptr);
+
+/**
+ * @brief Get the total size of a talloc chunk including its children.
+ *
+ * The function returns the total size in bytes used by this pointer and all
+ * child pointers. Mostly useful for debugging.
+ *
+ * Passing NULL is allowed, but it will only give a meaningful result if
+ * talloc_enable_leak_report() or talloc_enable_leak_report_full() has
+ * been called.
+ *
+ * @param[in] ptr The talloc chunk.
+ *
+ * @return The total size.
+ */
+size_t talloc_total_size(const void *ptr);
+
+/**
+ * @brief Get the number of talloc chunks hanging off a chunk.
+ *
+ * The talloc_total_blocks() function returns the total memory block
+ * count used by this pointer and all child pointers. Mostly useful for
+ * debugging.
+ *
+ * Passing NULL is allowed, but it will only give a meaningful result if
+ * talloc_enable_leak_report() or talloc_enable_leak_report_full() has
+ * been called.
+ *
+ * @param[in] ptr The talloc chunk.
+ *
+ * @return The total size.
+ */
+size_t talloc_total_blocks(const void *ptr);
+
+#ifdef DOXYGEN
+/**
+ * @brief Duplicate a memory area into a talloc chunk.
+ *
+ * The function is equivalent to:
+ *
+ * @code
+ * ptr = talloc_size(ctx, size);
+ * if (ptr) memcpy(ptr, p, size);
+ * @endcode
+ *
+ * @param[in] t The talloc context to hang the result off.
+ *
+ * @param[in] p The memory chunk you want to duplicate.
+ *
+ * @param[in] size Number of char's that you want copy.
+ *
+ * @return The allocated memory chunk.
+ *
+ * @see talloc_size()
+ */
+void *talloc_memdup(const void *t, const void *p, size_t size);
+#else
#define talloc_memdup(t, p, size) _talloc_memdup(t, p, size, __location__)
+void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name);
+#endif
+#ifdef DOXYGEN
+/**
+ * @brief Assign a type to a talloc chunk.
+ *
+ * This macro allows you to force the name of a pointer to be a particular type.
+ * This can be used in conjunction with talloc_get_type() to do type checking on
+ * void* pointers.
+ *
+ * It is equivalent to this:
+ *
+ * @code
+ * talloc_set_name_const(ptr, #type)
+ * @endcode
+ *
+ * @param[in] ptr The talloc chunk to assign the type to.
+ *
+ * @param[in] type The type to assign.
+ */
+void talloc_set_type(const char *ptr, #type);
+
+/**
+ * @brief Get a typed pointer out of a talloc pointer.
+ *
+ * This macro allows you to do type checking on talloc pointers. It is
+ * particularly useful for void* private pointers. It is equivalent to
+ * this:
+ *
+ * @code
+ * (type *)talloc_check_name(ptr, #type)
+ * @endcode
+ *
+ * @param[in] ptr The talloc pointer to check.
+ *
+ * @param[in] type The type to check against.
+ *
+ * @return The properly casted pointer given by ptr, NULL on error.
+ */
+type *talloc_get_type(const void *ptr, #type);
+#else
#define talloc_set_type(ptr, type) talloc_set_name_const(ptr, #type)
#define talloc_get_type(ptr, type) (type *)talloc_check_name(ptr, #type)
-#define talloc_get_type_abort(ptr, type) (type *)_talloc_get_type_abort(ptr, #type, __location__)
+#endif
-#define talloc_find_parent_bytype(ptr, type) (type *)talloc_find_parent_byname(ptr, #type)
-#define talloc_free(ctx) _talloc_free(ctx, __location__)
+#ifdef DOXYGEN
+/**
+ * @brief Safely turn a void pointer into a typed pointer.
+ *
+ * This macro is used together with talloc(mem_ctx, struct foo). If you had to
+ * assing the talloc chunk pointer to some void pointer variable,
+ * talloc_get_type_abort() is the recommended way to get the convert the void
+ * pointer back to a typed pointer.
+ *
+ * @param[in] ptr The void pointer to convert.
+ *
+ * @param[in] type The type that this chunk contains
+ *
+ * @return The same value as ptr, type-checked and properly cast.
+ */
+void *talloc_get_type_abort(const void *ptr, #type);
+#else
+#define talloc_get_type_abort(ptr, type) (type *)_talloc_get_type_abort(ptr, #type, __location__)
+void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location);
+#endif
+/**
+ * @brief Find a parent context by name.
+ *
+ * Find a parent memory context of the current context that has the given
+ * name. This can be very useful in complex programs where it may be
+ * difficult to pass all information down to the level you need, but you
+ * know the structure you want is a parent of another context.
+ *
+ * @param[in] ctx The talloc chunk to start from.
+ *
+ * @param[in] name The name of the parent we look for.
+ *
+ * @return The memory context we are looking for, NULL if not
+ * found.
+ */
+void *talloc_find_parent_byname(const void *ctx, const char *name);
-#if TALLOC_DEPRECATED
-#define talloc_zero_p(ctx, type) talloc_zero(ctx, type)
-#define talloc_p(ctx, type) talloc(ctx, type)
-#define talloc_array_p(ctx, type, count) talloc_array(ctx, type, count)
-#define talloc_realloc_p(ctx, p, type, count) talloc_realloc(ctx, p, type, count)
-#define talloc_destroy(ctx) talloc_free(ctx)
-#define talloc_append_string(c, s, a) (s?talloc_strdup_append(s,a):talloc_strdup(c, a))
+#ifdef DOXYGEN
+/**
+ * @brief Find a parent context by type.
+ *
+ * Find a parent memory context of the current context that has the given
+ * name. This can be very useful in complex programs where it may be
+ * difficult to pass all information down to the level you need, but you
+ * know the structure you want is a parent of another context.
+ *
+ * Like talloc_find_parent_byname() but takes a type, making it typesafe.
+ *
+ * @param[in] ptr The talloc chunk to start from.
+ *
+ * @param[in] type The type of the parent to look for.
+ *
+ * @return The memory context we are looking for, NULL if not
+ * found.
+ */
+void *talloc_find_parent_bytype(const void *ptr, #type);
+#else
+#define talloc_find_parent_bytype(ptr, type) (type *)talloc_find_parent_byname(ptr, #type)
#endif
+/**
+ * @brief Allocate a talloc pool.
+ *
+ * A talloc pool is a pure optimization for specific situations. In the
+ * release process for Samba 3.2 we found out that we had become considerably
+ * slower than Samba 3.0 was. Profiling showed that malloc(3) was a large CPU
+ * consumer in benchmarks. For Samba 3.2 we have internally converted many
+ * static buffers to dynamically allocated ones, so malloc(3) being beaten
+ * more was no surprise. But it made us slower.
+ *
+ * talloc_pool() is an optimization to call malloc(3) a lot less for the use
+ * pattern Samba has: The SMB protocol is mainly a request/response protocol
+ * where we have to allocate a certain amount of memory per request and free
+ * that after the SMB reply is sent to the client.
+ *
+ * talloc_pool() creates a talloc chunk that you can use as a talloc parent
+ * exactly as you would use any other ::TALLOC_CTX. The difference is that
+ * when you talloc a child of this pool, no malloc(3) is done. Instead, talloc
+ * just increments a pointer inside the talloc_pool. This also works
+ * recursively. If you use the child of the talloc pool as a parent for
+ * grand-children, their memory is also taken from the talloc pool.
+ *
+ * If you talloc_free() children of a talloc pool, the memory is not given
+ * back to the system. Instead, free(3) is only called if the talloc_pool()
+ * itself is released with talloc_free().
+ *
+ * The downside of a talloc pool is that if you talloc_move() a child of a
+ * talloc pool to a talloc parent outside the pool, the whole pool memory is
+ * not free(3)'ed until that moved chunk is also talloc_free()ed.
+ *
+ * @param[in] context The talloc context to hang the result off.
+ *
+ * @param[in] size Size of the talloc pool.
+ *
+ * @return The allocated talloc pool, NULL on error.
+ */
+void *talloc_pool(const void *context, size_t size);
+
+/**
+ * @brief Free a talloc chunk and NULL out the pointer.
+ *
+ * TALLOC_FREE() frees a pointer and sets it to NULL. Use this if you want
+ * immediate feedback (i.e. crash) if you use a pointer after having free'ed
+ * it.
+ *
+ * @param[in] ctx The chunk to be freed.
+ */
#define TALLOC_FREE(ctx) do { talloc_free(ctx); ctx=NULL; } while(0)
-/* The following definitions come from talloc.c */
-void *_talloc(const void *context, size_t size);
-void *talloc_pool(const void *context, size_t size);
-void _talloc_set_destructor(const void *ptr, int (*_destructor)(void *));
+/* @} ******************************************************************/
+
+/**
+ * \defgroup talloc_ref The talloc reference function.
+ * @ingroup talloc
+ *
+ * This module contains the definitions around talloc references
+ *
+ * @{
+ */
+
+/**
+ * @brief Increase the reference count of a talloc chunk.
+ *
+ * The talloc_increase_ref_count(ptr) function is exactly equivalent to:
+ *
+ * @code
+ * talloc_reference(NULL, ptr);
+ * @endcode
+ *
+ * You can use either syntax, depending on which you think is clearer in
+ * your code.
+ *
+ * @param[in] ptr The pointer to increase the reference count.
+ *
+ * @return 0 on success, -1 on error.
+ */
int talloc_increase_ref_count(const void *ptr);
+
+/**
+ * @brief Get the number of references to a talloc chunk.
+ *
+ * @param[in] ptr The pointer to retrieve the reference count from.
+ *
+ * @return The number of references.
+ */
size_t talloc_reference_count(const void *ptr);
+
+#ifdef DOXYGEN
+/**
+ * @brief Create an additional talloc parent to a pointer.
+ *
+ * The talloc_reference() function makes "context" an additional parent of
+ * ptr. Each additional reference consumes around 48 bytes of memory on intel
+ * x86 platforms.
+ *
+ * If ptr is NULL, then the function is a no-op, and simply returns NULL.
+ *
+ * After creating a reference you can free it in one of the following ways:
+ *
+ * - you can talloc_free() any parent of the original pointer. That
+ * will reduce the number of parents of this pointer by 1, and will
+ * cause this pointer to be freed if it runs out of parents.
+ *
+ * - you can talloc_free() the pointer itself. That will destroy the
+ * most recently established parent to the pointer and leave the
+ * pointer as a child of its current parent.
+ *
+ * For more control on which parent to remove, see talloc_unlink()
+ * @param[in] ctx The additional parent.
+ *
+ * @param[in] ptr The pointer you want to create an additional parent for.
+ *
+ * @return The original pointer 'ptr', NULL if talloc ran out of
+ * memory in creating the reference.
+ *
+ * Example:
+ * @code
+ * unsigned int *a, *b, *c;
+ * a = talloc(NULL, unsigned int);
+ * b = talloc(NULL, unsigned int);
+ * c = talloc(a, unsigned int);
+ * // b also serves as a parent of c.
+ * talloc_reference(b, c);
+ * @endcode
+ *
+ * @see talloc_unlink()
+ */
+void *talloc_reference(const void *ctx, const void *ptr);
+#else
+#define talloc_reference(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_reference_loc((ctx),(ptr), __location__)
void *_talloc_reference_loc(const void *context, const void *ptr, const char *location);
+#endif
+
+/**
+ * @brief Remove a specific parent from a talloc chunk.
+ *
+ * The function removes a specific parent from ptr. The context passed must
+ * either be a context used in talloc_reference() with this pointer, or must be
+ * a direct parent of ptr.
+ *
+ * Usually you can just use talloc_free() instead of talloc_unlink(), but
+ * sometimes it is useful to have the additional control on which parent is
+ * removed.
+ *
+ * @param[in] context The talloc parent to remove.
+ *
+ * @param[in] ptr The talloc ptr you want to remove the parent from.
+ *
+ * @return 0 on success, -1 on error.
+ *
+ * @note If the parent has already been removed using talloc_free() then
+ * this function will fail and will return -1. Likewise, if ptr is NULL,
+ * then the function will make no modifications and return -1.
+ *
+ * Example:
+ * @code
+ * unsigned int *a, *b, *c;
+ * a = talloc(NULL, unsigned int);
+ * b = talloc(NULL, unsigned int);
+ * c = talloc(a, unsigned int);
+ * // b also serves as a parent of c.
+ * talloc_reference(b, c);
+ * talloc_unlink(b, c);
+ * @endcode
+ */
int talloc_unlink(const void *context, void *ptr);
-const char *talloc_set_name(const void *ptr, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3);
-void talloc_set_name_const(const void *ptr, const char *name);
-void *talloc_named(const void *context, size_t size,
- const char *fmt, ...) PRINTF_ATTRIBUTE(3,4);
-void *talloc_named_const(const void *context, size_t size, const char *name);
-const char *talloc_get_name(const void *ptr);
-void *talloc_check_name(const void *ptr, const char *name);
-void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location);
-void *talloc_parent(const void *ptr);
-const char *talloc_parent_name(const void *ptr);
-void *talloc_init(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
-int _talloc_free(void *ptr, const char *location);
-void talloc_free_children(void *ptr);
-void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name);
-void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location);
-void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr);
-void *_talloc_move(const void *new_ctx, const void *pptr);
-size_t talloc_total_size(const void *ptr);
-size_t talloc_total_blocks(const void *ptr);
-void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
- void (*callback)(const void *ptr,
- int depth, int max_depth,
- int is_ref,
- void *private_data),
- void *private_data);
-void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f);
-void talloc_report_full(const void *ptr, FILE *f);
-void talloc_report(const void *ptr, FILE *f);
-void talloc_enable_null_tracking(void);
-void talloc_enable_null_tracking_no_autofree(void);
-void talloc_disable_null_tracking(void);
-void talloc_enable_leak_report(void);
-void talloc_enable_leak_report_full(void);
-void *_talloc_zero(const void *ctx, size_t size, const char *name);
-void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name);
-void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name);
-void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name);
-void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name);
-void *talloc_realloc_fn(const void *context, void *ptr, size_t size);
+
+/**
+ * @brief Provide a talloc context that is freed at program exit.
+ *
+ * This is a handy utility function that returns a talloc context
+ * which will be automatically freed on program exit. This can be used
+ * to reduce the noise in memory leak reports.
+ *
+ * Never use this in code that might be used in objects loaded with
+ * dlopen and unloaded with dlclose. talloc_autofree_context()
+ * internally uses atexit(3). Some platforms like modern Linux handles
+ * this fine, but for example FreeBSD does not deal well with dlopen()
+ * and atexit() used simultaneously: dlclose() does not clean up the
+ * list of atexit-handlers, so when the program exits the code that
+ * was registered from within talloc_autofree_context() is gone, the
+ * program crashes at exit.
+ *
+ * @return A talloc context, NULL on error.
+ */
void *talloc_autofree_context(void);
+
+/**
+ * @brief Get the size of a talloc chunk.
+ *
+ * This function lets you know the amount of memory alloced so far by
+ * this context. It does NOT account for subcontext memory.
+ * This can be used to calculate the size of an array.
+ *
+ * @param[in] ctx The talloc chunk.
+ *
+ * @return The size of the talloc chunk.
+ */
size_t talloc_get_size(const void *ctx);
-void *talloc_find_parent_byname(const void *ctx, const char *name);
+
+/**
+ * @brief Show the parentage of a context.
+ *
+ * @param[in] context The talloc context to look at.
+ *
+ * @param[in] file The output to use, a file, stdout or stderr.
+ */
void talloc_show_parents(const void *context, FILE *file);
+
+/**
+ * @brief Check if a context is parent of a talloc chunk.
+ *
+ * This checks if context is referenced in the talloc hierarchy above ptr.
+ *
+ * @param[in] context The assumed talloc context.
+ *
+ * @param[in] ptr The talloc chunk to check.
+ *
+ * @return Return 1 if this is the case, 0 if not.
+ */
int talloc_is_parent(const void *context, const void *ptr);
+/**
+ * @brief Change the parent context of a talloc pointer.
+ *
+ * The function changes the parent context of a talloc pointer. It is typically
+ * used when the context that the pointer is currently a child of is going to be
+ * freed and you wish to keep the memory for a longer time.
+ *
+ * The difference between talloc_reparent() and talloc_steal() is that
+ * talloc_reparent() can specify which parent you wish to change. This is
+ * useful when a pointer has multiple parents via references.
+ *
+ * @param[in] old_parent
+ * @param[in] new_parent
+ * @param[in] ptr
+ *
+ * @return Return the pointer you passed. It does not have any
+ * failure modes.
+ */
+void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr);
+
+/* @} ******************************************************************/
+
+/**
+ * @defgroup talloc_array The talloc array functions
+ * @ingroup talloc
+ *
+ * Talloc contains some handy helpers for handling Arrays conveniently
+ *
+ * @{
+ */
+
+#ifdef DOXYGEN
+/**
+ * @brief Allocate an array.
+ *
+ * The macro is equivalent to:
+ *
+ * @code
+ * (type *)talloc_size(ctx, sizeof(type) * count);
+ * @endcode
+ *
+ * except that it provides integer overflow protection for the multiply,
+ * returning NULL if the multiply overflows.
+ *
+ * @param[in] ctx The talloc context to hang the result off.
+ *
+ * @param[in] type The type that we want to allocate.
+ *
+ * @param[in] count The number of 'type' elements you want to allocate.
+ *
+ * @return The allocated result, properly cast to 'type *', NULL on
+ * error.
+ *
+ * Example:
+ * @code
+ * unsigned int *a, *b;
+ * a = talloc_zero(NULL, unsigned int);
+ * b = talloc_array(a, unsigned int, 100);
+ * @endcode
+ *
+ * @see talloc()
+ * @see talloc_zero_array()
+ */
+void *talloc_array(const void *ctx, #type, unsigned count);
+#else
+#define talloc_array(ctx, type, count) (type *)_talloc_array(ctx, sizeof(type), count, #type)
+void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name);
+#endif
+
+#ifdef DOXYGEN
+/**
+ * @brief Allocate an array.
+ *
+ * @param[in] ctx The talloc context to hang the result off.
+ *
+ * @param[in] size The size of an array element.
+ *
+ * @param[in] count The number of elements you want to allocate.
+ *
+ * @return The allocated result, NULL on error.
+ */
+void *talloc_array_size(const void *ctx, size_t size, unsigned count);
+#else
+#define talloc_array_size(ctx, size, count) _talloc_array(ctx, size, count, __location__)
+#endif
+
+#ifdef DOXYGEN
+/**
+ * @brief Allocate an array into a typed pointer.
+ *
+ * The macro should be used when you have a pointer to an array and want to
+ * allocate memory of an array to point at with this pointer. When compiling
+ * with gcc >= 3 it is typesafe. Note this is a wrapper of talloc_array_size()
+ * and talloc_get_name() will return the current location in the source file
+ * and not the type.
+ *
+ * @param[in] ctx The talloc context to hang the result off.
+ *
+ * @param[in] ptr The pointer you want to assign the result to.
+ *
+ * @param[in] count The number of elements you want to allocate.
+ *
+ * @return The allocated memory chunk, properly casted. NULL on
+ * error.
+ */
+void *talloc_array_ptrtype(const void *ctx, const void *ptr, unsigned count);
+#else
+#define talloc_array_ptrtype(ctx, ptr, count) (_TALLOC_TYPEOF(ptr))talloc_array_size(ctx, sizeof(*(ptr)), count)
+#endif
+
+#ifdef DOXYGEN
+/**
+ * @brief Get the number of elements in a talloc'ed array.
+ *
+ * A talloc chunk carries its own size, so for talloc'ed arrays it is not
+ * necessary to store the number of elements explicitly.
+ *
+ * @param[in] ctx The allocated array.
+ *
+ * @return The number of elements in ctx.
+ */
+size_t talloc_array_length(const void *ctx);
+#else
+#define talloc_array_length(ctx) (talloc_get_size(ctx)/sizeof(*ctx))
+#endif
+
+#ifdef DOXYGEN
+/**
+ * @brief Allocate a zero-initialized array
+ *
+ * @param[in] ctx The talloc context to hang the result off.
+ *
+ * @param[in] type The type that we want to allocate.
+ *
+ * @param[in] count The number of "type" elements you want to allocate.
+ *
+ * @return The allocated result casted to "type *", NULL on error.
+ *
+ * The talloc_zero_array() macro is equivalent to:
+ *
+ * @code
+ * ptr = talloc_array(ctx, type, count);
+ * if (ptr) memset(ptr, sizeof(type) * count);
+ * @endcode
+ */
+void *talloc_zero_array(const void *ctx, #type, unsigned count);
+#else
+#define talloc_zero_array(ctx, type, count) (type *)_talloc_zero_array(ctx, sizeof(type), count, #type)
+void *_talloc_zero_array(const void *ctx,
+ size_t el_size,
+ unsigned count,
+ const char *name);
+#endif
+
+#ifdef DOXYGEN
+/**
+ * @brief Change the size of a talloc array.
+ *
+ * The macro changes the size of a talloc pointer. The 'count' argument is the
+ * number of elements of type 'type' that you want the resulting pointer to
+ * hold.
+ *
+ * talloc_realloc() has the following equivalences:
+ *
+ * @code
+ * talloc_realloc(ctx, NULL, type, 1) ==> talloc(ctx, type);
+ * talloc_realloc(ctx, NULL, type, N) ==> talloc_array(ctx, type, N);
+ * talloc_realloc(ctx, ptr, type, 0) ==> talloc_free(ptr);
+ * @endcode
+ *
+ * The "context" argument is only used if "ptr" is NULL, otherwise it is
+ * ignored.
+ *
+ * @param[in] ctx The parent context used if ptr is NULL.
+ *
+ * @param[in] ptr The chunk to be resized.
+ *
+ * @param[in] type The type of the array element inside ptr.
+ *
+ * @param[in] count The intended number of array elements.
+ *
+ * @return The new array, NULL on error. The call will fail either
+ * due to a lack of memory, or because the pointer has more
+ * than one parent (see talloc_reference()).
+ */
+void *talloc_realloc(const void *ctx, void *ptr, #type, size_t count);
+#else
+#define talloc_realloc(ctx, p, type, count) (type *)_talloc_realloc_array(ctx, p, sizeof(type), count, #type)
+void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name);
+#endif
+
+#ifdef DOXYGEN
+/**
+ * @brief Untyped realloc to change the size of a talloc array.
+ *
+ * The macro is useful when the type is not known so the typesafe
+ * talloc_realloc() cannot be used.
+ *
+ * @param[in] ctx The parent context used if 'ptr' is NULL.
+ *
+ * @param[in] ptr The chunk to be resized.
+ *
+ * @param[in] size The new chunk size.
+ *
+ * @return The new array, NULL on error.
+ */
+void *talloc_realloc_size(const void *ctx, void *ptr, size_t size);
+#else
+#define talloc_realloc_size(ctx, ptr, size) _talloc_realloc(ctx, ptr, size, __location__)
+void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name);
+#endif
+
+/**
+ * @brief Provide a function version of talloc_realloc_size.
+ *
+ * This is a non-macro version of talloc_realloc(), which is useful as
+ * libraries sometimes want a ralloc function pointer. A realloc()
+ * implementation encapsulates the functionality of malloc(), free() and
+ * realloc() in one call, which is why it is useful to be able to pass around
+ * a single function pointer.
+ *
+ * @param[in] context The parent context used if ptr is NULL.
+ *
+ * @param[in] ptr The chunk to be resized.
+ *
+ * @param[in] size The new chunk size.
+ *
+ * @return The new chunk, NULL on error.
+ */
+void *talloc_realloc_fn(const void *context, void *ptr, size_t size);
+
+/* @} ******************************************************************/
+
+/**
+ * @defgroup talloc_string The talloc string functions.
+ * @ingroup talloc
+ *
+ * talloc string allocation and manipulation functions.
+ * @{
+ */
+
+/**
+ * @brief Duplicate a string into a talloc chunk.
+ *
+ * This function is equivalent to:
+ *
+ * @code
+ * ptr = talloc_size(ctx, strlen(p)+1);
+ * if (ptr) memcpy(ptr, p, strlen(p)+1);
+ * @endcode
+ *
+ * This functions sets the name of the new pointer to the passed
+ * string. This is equivalent to:
+ *
+ * @code
+ * talloc_set_name_const(ptr, ptr)
+ * @endcode
+ *
+ * @param[in] t The talloc context to hang the result off.
+ *
+ * @param[in] p The string you want to duplicate.
+ *
+ * @return The duplicated string, NULL on error.
+ */
char *talloc_strdup(const void *t, const char *p);
+
+/**
+ * @brief Append a string to given string and duplicate the result.
+ *
+ * @param[in] s The destination to append to.
+ *
+ * @param[in] a The string you want to append.
+ *
+ * @return The duplicated string, NULL on error.
+ *
+ * @see talloc_strdup()
+ */
char *talloc_strdup_append(char *s, const char *a);
+
+/**
+ * @brief Append a string to a given buffer and duplicate the result.
+ *
+ * @param[in] s The destination buffer to append to.
+ *
+ * @param[in] a The string you want to append.
+ *
+ * @return The duplicated string, NULL on error.
+ *
+ * @see talloc_strdup()
+ */
char *talloc_strdup_append_buffer(char *s, const char *a);
+/**
+ * @brief Duplicate a length-limited string into a talloc chunk.
+ *
+ * This function is the talloc equivalent of the C library function strndup(3).
+ *
+ * This functions sets the name of the new pointer to the passed string. This is
+ * equivalent to:
+ *
+ * @code
+ * talloc_set_name_const(ptr, ptr)
+ * @endcode
+ *
+ * @param[in] t The talloc context to hang the result off.
+ *
+ * @param[in] p The string you want to duplicate.
+ *
+ * @param[in] n The maximum string length to duplicate.
+ *
+ * @return The duplicated string, NULL on error.
+ */
char *talloc_strndup(const void *t, const char *p, size_t n);
+
+/**
+ * @brief Append at most n characters of a string to given string and duplicate
+ * the result.
+ *
+ * @param[in] s The destination string to append to.
+ *
+ * @param[in] a The source string you want to append.
+ *
+ * @param[in] n The number of characters you want to append from the
+ * string.
+ *
+ * @return The duplicated string, NULL on error.
+ *
+ * @see talloc_strndup()
+ */
char *talloc_strndup_append(char *s, const char *a, size_t n);
+
+/**
+ * @brief Append at most n characters of a string to given buffer and duplicate
+ * the result.
+ *
+ * @param[in] s The destination buffer to append to.
+ *
+ * @param[in] a The source string you want to append.
+ *
+ * @param[in] n The number of characters you want to append from the
+ * string.
+ *
+ * @return The duplicated string, NULL on error.
+ *
+ * @see talloc_strndup()
+ */
char *talloc_strndup_append_buffer(char *s, const char *a, size_t n);
+/**
+ * @brief Format a string given a va_list.
+ *
+ * This function is the talloc equivalent of the C library function
+ * vasprintf(3).
+ *
+ * This functions sets the name of the new pointer to the new string. This is
+ * equivalent to:
+ *
+ * @code
+ * talloc_set_name_const(ptr, ptr)
+ * @endcode
+ *
+ * @param[in] t The talloc context to hang the result off.
+ *
+ * @param[in] fmt The format string.
+ *
+ * @param[in] ap The parameters used to fill fmt.
+ *
+ * @return The formatted string, NULL on error.
+ */
char *talloc_vasprintf(const void *t, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0);
+
+/**
+ * @brief Format a string given a va_list and append it to the given destination
+ * string.
+ *
+ * @param[in] s The destination string to append to.
+ *
+ * @param[in] fmt The format string.
+ *
+ * @param[in] ap The parameters used to fill fmt.
+ *
+ * @return The formatted string, NULL on error.
+ *
+ * @see talloc_vasprintf()
+ */
char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0);
+
+/**
+ * @brief Format a string given a va_list and append it to the given destination
+ * buffer.
+ *
+ * @param[in] s The destination buffer to append to.
+ *
+ * @param[in] fmt The format string.
+ *
+ * @param[in] ap The parameters used to fill fmt.
+ *
+ * @return The formatted string, NULL on error.
+ *
+ * @see talloc_vasprintf()
+ */
char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0);
+/**
+ * @brief Format a string.
+ *
+ * This function is the talloc equivalent of the C library function asprintf(3).
+ *
+ * This functions sets the name of the new pointer to the new string. This is
+ * equivalent to:
+ *
+ * @code
+ * talloc_set_name_const(ptr, ptr)
+ * @endcode
+ *
+ * @param[in] t The talloc context to hang the result off.
+ *
+ * @param[in] fmt The format string.
+ *
+ * @param[in] ... The parameters used to fill fmt.
+ *
+ * @return The formatted string, NULL on error.
+ */
char *talloc_asprintf(const void *t, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3);
+
+/**
+ * @brief Append a formatted string to another string.
+ *
+ * This function appends the given formatted string to the given string. Use
+ * this varient when the string in the current talloc buffer may have been
+ * truncated in length.
+ *
+ * This functions sets the name of the new pointer to the new
+ * string. This is equivalent to:
+ *
+ * @code
+ * talloc_set_name_const(ptr, ptr)
+ * @endcode
+ *
+ * @param[in] s The string to append to.
+ *
+ * @param[in] fmt The format string.
+ *
+ * @param[in] ... The parameters used to fill fmt.
+ *
+ * @return The formatted string, NULL on error.
+ */
char *talloc_asprintf_append(char *s, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3);
+
+/**
+ * @brief Append a formatted string to another string.
+ *
+ * @param[in] s The string to append to
+ *
+ * @param[in] fmt The format string.
+ *
+ * @param[in] ... The parameters used to fill fmt.
+ *
+ * @return The formatted string, NULL on error.
+ */
char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3);
+/* @} ******************************************************************/
+
+/**
+ * @defgroup talloc_debug The talloc debugging support functions
+ * @ingroup talloc
+ *
+ * To aid memory debugging, talloc contains routines to inspect the currently
+ * allocated memory hierarchy.
+ *
+ * @{
+ */
+
+/**
+ * @brief Walk a complete talloc hierarchy.
+ *
+ * This provides a more flexible reports than talloc_report(). It
+ * will recursively call the callback for the entire tree of memory
+ * referenced by the pointer. References in the tree are passed with
+ * is_ref = 1 and the pointer that is referenced.
+ *
+ * You can pass NULL for the pointer, in which case a report is
+ * printed for the top level memory context, but only if
+ * talloc_enable_leak_report() or talloc_enable_leak_report_full()
+ * has been called.
+ *
+ * The recursion is stopped when depth >= max_depth.
+ * max_depth = -1 means only stop at leaf nodes.
+ *
+ * @param[in] ptr The talloc chunk.
+ *
+ * @param[in] depth Internal parameter to control recursion. Call with 0.
+ *
+ * @param[in] max_depth Maximum recursion level.
+ *
+ * @param[in] callback Function to be called on every chunk.
+ *
+ * @param[in] private_data Private pointer passed to callback.
+ */
+void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
+ void (*callback)(const void *ptr,
+ int depth, int max_depth,
+ int is_ref,
+ void *private_data),
+ void *private_data);
+
+/**
+ * @brief Print a talloc hierarchy.
+ *
+ * This provides a more flexible reports than talloc_report(). It
+ * will let you specify the depth and max_depth.
+ *
+ * @param[in] ptr The talloc chunk.
+ *
+ * @param[in] depth Internal parameter to control recursion. Call with 0.
+ *
+ * @param[in] max_depth Maximum recursion level.
+ *
+ * @param[in] f The file handle to print to.
+ */
+void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f);
+
+/**
+ * @brief Print a summary report of all memory used by ptr.
+ *
+ * This provides a more detailed report than talloc_report(). It will
+ * recursively print the ensire tree of memory referenced by the
+ * pointer. References in the tree are shown by giving the name of the
+ * pointer that is referenced.
+ *
+ * You can pass NULL for the pointer, in which case a report is printed
+ * for the top level memory context, but only if
+ * talloc_enable_leak_report() or talloc_enable_leak_report_full() has
+ * been called.
+ *
+ * @param[in] ptr The talloc chunk.
+ *
+ * @param[in] f The file handle to print to.
+ *
+ * Example:
+ * @code
+ * unsigned int *a, *b;
+ * a = talloc(NULL, unsigned int);
+ * b = talloc(a, unsigned int);
+ * fprintf(stderr, "Dumping memory tree for a:\n");
+ * talloc_report_full(a, stderr);
+ * @endcode
+ *
+ * @see talloc_report()
+ */
+void talloc_report_full(const void *ptr, FILE *f);
+
+/**
+ * @brief Print a summary report of all memory used by ptr.
+ *
+ * This function prints a summary report of all memory used by ptr. One line of
+ * report is printed for each immediate child of ptr, showing the total memory
+ * and number of blocks used by that child.
+ *
+ * You can pass NULL for the pointer, in which case a report is printed
+ * for the top level memory context, but only if talloc_enable_leak_report()
+ * or talloc_enable_leak_report_full() has been called.
+ *
+ * @param[in] ptr The talloc chunk.
+ *
+ * @param[in] f The file handle to print to.
+ *
+ * Example:
+ * @code
+ * unsigned int *a, *b;
+ * a = talloc(NULL, unsigned int);
+ * b = talloc(a, unsigned int);
+ * fprintf(stderr, "Summary of memory tree for a:\n");
+ * talloc_report(a, stderr);
+ * @endcode
+ *
+ * @see talloc_report_full()
+ */
+void talloc_report(const void *ptr, FILE *f);
+
+/**
+ * @brief Enable tracking the use of NULL memory contexts.
+ *
+ * This enables tracking of the NULL memory context without enabling leak
+ * reporting on exit. Useful for when you want to do your own leak
+ * reporting call via talloc_report_null_full();
+ */
+void talloc_enable_null_tracking(void);
+
+/**
+ * @brief Enable tracking the use of NULL memory contexts.
+ *
+ * This enables tracking of the NULL memory context without enabling leak
+ * reporting on exit. Useful for when you want to do your own leak
+ * reporting call via talloc_report_null_full();
+ */
+void talloc_enable_null_tracking_no_autofree(void);
+
+/**
+ * @brief Disable tracking of the NULL memory context.
+ *
+ * This disables tracking of the NULL memory context.
+ */
+void talloc_disable_null_tracking(void);
+
+/**
+ * @brief Enable leak report when a program exits.
+ *
+ * This enables calling of talloc_report(NULL, stderr) when the program
+ * exits. In Samba4 this is enabled by using the --leak-report command
+ * line option.
+ *
+ * For it to be useful, this function must be called before any other
+ * talloc function as it establishes a "null context" that acts as the
+ * top of the tree. If you don't call this function first then passing
+ * NULL to talloc_report() or talloc_report_full() won't give you the
+ * full tree printout.
+ *
+ * Here is a typical talloc report:
+ *
+ * @code
+ * talloc report on 'null_context' (total 267 bytes in 15 blocks)
+ * libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks
+ * libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks
+ * iconv(UTF8,CP850) contains 42 bytes in 2 blocks
+ * libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks
+ * iconv(CP850,UTF8) contains 42 bytes in 2 blocks
+ * iconv(UTF8,UTF-16LE) contains 45 bytes in 2 blocks
+ * iconv(UTF-16LE,UTF8) contains 45 bytes in 2 blocks
+ * @endcode
+ */
+void talloc_enable_leak_report(void);
+
+/**
+ * @brief Enable full leak report when a program exits.
+ *
+ * This enables calling of talloc_report_full(NULL, stderr) when the
+ * program exits. In Samba4 this is enabled by using the
+ * --leak-report-full command line option.
+ *
+ * For it to be useful, this function must be called before any other
+ * talloc function as it establishes a "null context" that acts as the
+ * top of the tree. If you don't call this function first then passing
+ * NULL to talloc_report() or talloc_report_full() won't give you the
+ * full tree printout.
+ *
+ * Here is a typical full report:
+ *
+ * @code
+ * full talloc report on 'root' (total 18 bytes in 8 blocks)
+ * p1 contains 18 bytes in 7 blocks (ref 0)
+ * r1 contains 13 bytes in 2 blocks (ref 0)
+ * reference to: p2
+ * p2 contains 1 bytes in 1 blocks (ref 1)
+ * x3 contains 1 bytes in 1 blocks (ref 0)
+ * x2 contains 1 bytes in 1 blocks (ref 0)
+ * x1 contains 1 bytes in 1 blocks (ref 0)
+ * @endcode
+ */
+void talloc_enable_leak_report_full(void);
+
+/* @} ******************************************************************/
+
void talloc_set_abort_fn(void (*abort_fn)(const char *reason));
void talloc_set_log_fn(void (*log_fn)(const char *message));
void talloc_set_log_stderr(void);
+#if TALLOC_DEPRECATED
+#define talloc_zero_p(ctx, type) talloc_zero(ctx, type)
+#define talloc_p(ctx, type) talloc(ctx, type)
+#define talloc_array_p(ctx, type, count) talloc_array(ctx, type, count)
+#define talloc_realloc_p(ctx, p, type, count) talloc_realloc(ctx, p, type, count)
+#define talloc_destroy(ctx) talloc_free(ctx)
+#define talloc_append_string(c, s, a) (s?talloc_strdup_append(s,a):talloc_strdup(c, a))
+#endif
+
+#ifndef TALLOC_MAX_DEPTH
+#define TALLOC_MAX_DEPTH 10000
+#endif
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
#endif
diff --git a/lib/talloc/talloc.mk b/lib/talloc/talloc.mk
deleted file mode 100644
index fc90f4d41e..0000000000
--- a/lib/talloc/talloc.mk
+++ /dev/null
@@ -1,52 +0,0 @@
-TALLOC_OBJ = $(tallocdir)/talloc.o
-
-TALLOC_SHLIB = libtalloc.$(SHLIBEXT)
-TALLOC_SOLIB = libtalloc.$(SHLIBEXT).$(TALLOC_VERSION)
-TALLOC_SONAME = libtalloc.$(SHLIBEXT).$(TALLOC_VERSION_MAJOR)
-TALLOC_STLIB = libtalloc.a
-
-all:: $(TALLOC_STLIB) $(TALLOC_SOLIB) testsuite
-
-testsuite:: $(LIBOBJ) testsuite.o testsuite_main.o
- $(CC) $(CFLAGS) -o testsuite testsuite.o testsuite_main.o $(LIBOBJ) $(LIBS)
-
-$(TALLOC_STLIB): $(LIBOBJ)
- ar -rv $@ $(LIBOBJ)
- @-ranlib $@
-
-install:: all
- ${INSTALLCMD} -d $(DESTDIR)$(libdir)
- ${INSTALLCMD} -d $(DESTDIR)$(libdir)/pkgconfig
- ${INSTALLCMD} -m 755 $(TALLOC_STLIB) $(DESTDIR)$(libdir)
- ${INSTALLCMD} -m 755 $(TALLOC_SOLIB) $(DESTDIR)$(libdir)
- ${INSTALLCMD} -d $(DESTDIR)${includedir}
- ${INSTALLCMD} -m 644 $(srcdir)/talloc.h $(DESTDIR)$(includedir)
- ${INSTALLCMD} -m 644 talloc.pc $(DESTDIR)$(libdir)/pkgconfig
- if [ -f talloc.3 ];then ${INSTALLCMD} -d $(DESTDIR)$(mandir)/man3; fi
- if [ -f talloc.3 ];then ${INSTALLCMD} -m 644 talloc.3 $(DESTDIR)$(mandir)/man3; fi
- which swig >/dev/null 2>&1 && ${INSTALLCMD} -d $(DESTDIR)`swig -swiglib` || true
- which swig >/dev/null 2>&1 && ${INSTALLCMD} -m 644 talloc.i $(DESTDIR)`swig -swiglib` || true
- rm -f $(DESTDIR)$(libdir)/$(TALLOC_SONAME)
- ln -s $(TALLOC_SOLIB) $(DESTDIR)$(libdir)/$(TALLOC_SONAME)
- rm -f $(DESTDIR)$(libdir)/$(TALLOC_SHLIB)
- ln -s $(TALLOC_SOLIB) $(DESTDIR)$(libdir)/$(TALLOC_SHLIB)
-
-doc:: talloc.3 talloc.3.html
-
-clean::
- rm -f *~ $(LIBOBJ) $(TALLOC_SOLIB) $(TALLOC_STLIB) testsuite testsuite.o testsuite_main.o *.gc?? talloc.3 talloc.3.html
- rm -fr abi
- rm -f talloc.exports.sort talloc.exports.check talloc.exports.check.sort
- rm -f talloc.signatures.sort talloc.signatures.check talloc.signatures.check.sort
-
-test:: testsuite
- ./testsuite
-
-abi_checks::
- @echo ABI checks:
- @./script/abi_checks.sh talloc talloc.h
-
-test:: abi_checks
-
-gcov::
- gcov talloc.c
diff --git a/lib/talloc/talloc.pc.in b/lib/talloc/talloc.pc.in
index 5ce2109866..437281a69a 100644
--- a/lib/talloc/talloc.pc.in
+++ b/lib/talloc/talloc.pc.in
@@ -6,6 +6,6 @@ includedir=@includedir@
Name: talloc
Description: A hierarchical pool based memory system with destructors
Version: @TALLOC_VERSION@
-Libs: -L${libdir} -ltalloc
+Libs: @LIB_RPATH@ -L${libdir} -ltalloc
Cflags: -I${includedir}
URL: http://talloc.samba.org/
diff --git a/lib/talloc/talloc.signatures b/lib/talloc/talloc.signatures
deleted file mode 100644
index f2868e8269..0000000000
--- a/lib/talloc/talloc.signatures
+++ /dev/null
@@ -1,62 +0,0 @@
-char *talloc_asprintf (const void *, const char *, ...);
-char *talloc_asprintf_append (char *, const char *, ...);
-char *talloc_asprintf_append_buffer (char *, const char *, ...);
-char *talloc_strdup (const void *, const char *);
-char *talloc_strdup_append (char *, const char *);
-char *talloc_strdup_append_buffer (char *, const char *);
-char *talloc_strndup (const void *, const char *, size_t);
-char *talloc_strndup_append (char *, const char *, size_t);
-char *talloc_strndup_append_buffer (char *, const char *, size_t);
-char *talloc_vasprintf (const void *, const char *, va_list);
-char *talloc_vasprintf_append (char *, const char *, va_list);
-char *talloc_vasprintf_append_buffer (char *, const char *, va_list);
-const char *talloc_get_name (const void *);
-const char *talloc_parent_name (const void *);
-const char *talloc_set_name (const void *, const char *, ...);
-int _talloc_free (void *, const char *);
-int talloc_increase_ref_count (const void *);
-int talloc_is_parent (const void *, const void *);
-int talloc_unlink (const void *, void *);
-int talloc_version_major (void);
-int talloc_version_minor (void);
-size_t talloc_get_size (const void *);
-size_t talloc_reference_count (const void *);
-size_t talloc_total_blocks (const void *);
-size_t talloc_total_size (const void *);
-void *_talloc (const void *, size_t);
-void *_talloc_array (const void *, size_t, unsigned int, const char *);
-void *_talloc_get_type_abort (const void *, const char *, const char *);
-void *_talloc_memdup (const void *, const void *, size_t, const char *);
-void *_talloc_move (const void *, const void *);
-void *_talloc_realloc (const void *, void *, size_t, const char *);
-void *_talloc_realloc_array (const void *, void *, size_t, unsigned int, const char *);
-void *_talloc_reference_loc (const void *, const void *, const char *);
-void *_talloc_steal_loc (const void *, const void *, const char *);
-void *_talloc_zero (const void *, size_t, const char *);
-void *_talloc_zero_array (const void *, size_t, unsigned int, const char *);
-void *talloc_autofree_context (void);
-void *talloc_check_name (const void *, const char *);
-void *talloc_find_parent_byname (const void *, const char *);
-void *talloc_init (const char *, ...);
-void *talloc_named (const void *, size_t, const char *, ...);
-void *talloc_named_const (const void *, size_t, const char *);
-void *talloc_parent (const void *);
-void *talloc_pool (const void *, size_t);
-void *talloc_realloc_fn (const void *, void *, size_t);
-void *talloc_reparent (const void *, const void *, const void *);
-void _talloc_set_destructor (const void *, int (*) (void *));
-void talloc_disable_null_tracking (void);
-void talloc_enable_leak_report (void);
-void talloc_enable_leak_report_full (void);
-void talloc_enable_null_tracking (void);
-void talloc_enable_null_tracking_no_autofree (void);
-void talloc_free_children (void *);
-void talloc_report (const void *, FILE *);
-void talloc_report_depth_cb (const void *, int, int, void (*) (const void *, int, int, int, void *), void *);
-void talloc_report_depth_file (const void *, int, int, FILE *);
-void talloc_report_full (const void *, FILE *);
-void talloc_set_abort_fn (void (*) (const char *));
-void talloc_set_log_fn (void (*) (const char *));
-void talloc_set_log_stderr (void);
-void talloc_set_name_const (const void *, const char *);
-void talloc_show_parents (const void *, FILE *);
diff --git a/lib/talloc/talloc_guide.txt b/lib/talloc/talloc_guide.txt
index 01de806662..f29b1d699a 100644
--- a/lib/talloc/talloc_guide.txt
+++ b/lib/talloc/talloc_guide.txt
@@ -74,6 +74,19 @@ without proper synchronization ;
shouldn't be used by several threads simultaneously without
synchronization.
+talloc and shared objects
+-------------------------
+
+talloc can be used in shared objects. Special care needs to be taken
+to never use talloc_autofree_context() in code that might be loaded
+with dlopen() and unloaded with dlclose(), as talloc_autofree_context()
+internally uses atexit(3). Some platforms like modern Linux handles
+this fine, but for example FreeBSD does not deal well with dlopen()
+and atexit() used simultaneously: dlclose() does not clean up the list
+of atexit-handlers, so when the program exits the code that was
+registered from within talloc_autofree_context() is gone, the program
+crashes at exit.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
(type *)talloc(const void *context, type);
@@ -117,10 +130,11 @@ children. You can call talloc_free() on any pointer returned by
talloc().
The return value of talloc_free() indicates success or failure, with 0
-returned for success and -1 for failure. The only possible failure
-condition is if the pointer had a destructor attached to it and the
-destructor returned -1. See talloc_set_destructor() for details on
-destructors.
+returned for success and -1 for failure. A possible failure condition
+is if the pointer had a destructor attached to it and the destructor
+returned -1. See talloc_set_destructor() for details on
+destructors. Likewise, if "ptr" is NULL, then the function will make
+no modifications and returns -1.
If this pointer has an additional parent when talloc_free() is called
then the memory is not actually released, but instead the most
@@ -652,7 +666,7 @@ string. This is equivalent to::
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-((type *)talloc_array(const void *ctx, type, uint_t count);
+((type *)talloc_array(const void *ctx, type, unsigned int count);
The talloc_array() macro is equivalent to::
@@ -663,14 +677,14 @@ returning NULL if the multiply overflows.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-void *talloc_array_size(const void *ctx, size_t size, uint_t count);
+void *talloc_array_size(const void *ctx, size_t size, unsigned int count);
The talloc_array_size() function is useful when the type is not
known. It operates in the same way as talloc_array(), but takes a size
instead of a type.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-(typeof(ptr)) talloc_array_ptrtype(const void *ctx, ptr, uint_t count);
+(typeof(ptr)) talloc_array_ptrtype(const void *ctx, ptr, unsigned int count);
The talloc_ptrtype() macro should be used when you have a pointer to an array
and want to allocate memory of an array to point at with this pointer. When compiling
diff --git a/lib/talloc/talloc_testsuite.h b/lib/talloc/talloc_testsuite.h
new file mode 100644
index 0000000000..acb9701041
--- /dev/null
+++ b/lib/talloc/talloc_testsuite.h
@@ -0,0 +1,7 @@
+#ifndef __LIB_TALLOC_TALLOC_TESTSUITE_H__
+#define __LIB_TALLOC_TALLOC_TESTSUITE_H__
+
+struct torture_context;
+bool torture_local_talloc(struct torture_context *tctx);
+
+#endif
diff --git a/lib/talloc/testsuite.c b/lib/talloc/testsuite.c
index 08aa20863a..90417c6ade 100644
--- a/lib/talloc/testsuite.c
+++ b/lib/talloc/testsuite.c
@@ -25,7 +25,9 @@
#include "replace.h"
#include "system/time.h"
-#include "talloc.h"
+#include <talloc.h>
+
+#include "talloc_testsuite.h"
static struct timeval timeval_current(void)
{
@@ -101,6 +103,7 @@ static double timeval_elapsed(struct timeval *tv)
static unsigned int test_abort_count;
+#if 0
static void test_abort_fn(const char *reason)
{
printf("# test_abort_fn(%s)\n", reason);
@@ -112,6 +115,7 @@ static void test_abort_start(void)
test_abort_count = 0;
talloc_set_abort_fn(test_abort_fn);
}
+#endif
static void test_abort_stop(void)
{
@@ -1119,19 +1123,204 @@ static bool test_pool(void)
{
void *pool;
void *p1, *p2, *p3, *p4;
+ void *p2_2;
pool = talloc_pool(NULL, 1024);
p1 = talloc_size(pool, 80);
+ memset(p1, 0x11, talloc_get_size(p1));
p2 = talloc_size(pool, 20);
+ memset(p2, 0x11, talloc_get_size(p2));
p3 = talloc_size(p1, 50);
+ memset(p3, 0x11, talloc_get_size(p3));
p4 = talloc_size(p3, 1000);
+ memset(p4, 0x11, talloc_get_size(p4));
+
+#if 1 /* this relies on ALWAYS_REALLOC == 0 in talloc.c */
+ p2_2 = talloc_realloc_size(pool, p2, 20+1);
+ torture_assert("pool realloc 20+1", p2_2 == p2, "failed: pointer changed");
+ memset(p2, 0x11, talloc_get_size(p2));
+ p2_2 = talloc_realloc_size(pool, p2, 20-1);
+ torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed");
+ memset(p2, 0x11, talloc_get_size(p2));
+ p2_2 = talloc_realloc_size(pool, p2, 20-1);
+ torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed");
+ memset(p2, 0x11, talloc_get_size(p2));
+
+ talloc_free(p3);
+
+ /* this should reclaim the memory of p4 and p3 */
+ p2_2 = talloc_realloc_size(pool, p2, 400);
+ torture_assert("pool realloc 400", p2_2 == p2, "failed: pointer changed");
+ memset(p2, 0x11, talloc_get_size(p2));
+
+ talloc_free(p1);
+
+ /* this should reclaim the memory of p1 */
+ p2_2 = talloc_realloc_size(pool, p2, 800);
+ torture_assert("pool realloc 800", p2_2 == p1, "failed: pointer not changed");
+ p2 = p2_2;
+ memset(p2, 0x11, talloc_get_size(p2));
+
+ /* this should do a malloc */
+ p2_2 = talloc_realloc_size(pool, p2, 1800);
+ torture_assert("pool realloc 1800", p2_2 != p2, "failed: pointer not changed");
+ p2 = p2_2;
+ memset(p2, 0x11, talloc_get_size(p2));
+
+ /* this should reclaim the memory from the pool */
+ p3 = talloc_size(pool, 80);
+ torture_assert("pool alloc 80", p3 == p1, "failed: pointer changed");
+ memset(p3, 0x11, talloc_get_size(p3));
+
+ talloc_free(p2);
+ talloc_free(p3);
+
+ p1 = talloc_size(pool, 80);
+ memset(p1, 0x11, talloc_get_size(p1));
+ p2 = talloc_size(pool, 20);
+ memset(p2, 0x11, talloc_get_size(p2));
+
+ talloc_free(p1);
+
+ p2_2 = talloc_realloc_size(pool, p2, 20-1);
+ torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed");
+ memset(p2, 0x11, talloc_get_size(p2));
+ p2_2 = talloc_realloc_size(pool, p2, 20-1);
+ torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed");
+ memset(p2, 0x11, talloc_get_size(p2));
+
+ /* this should do a malloc */
+ p2_2 = talloc_realloc_size(pool, p2, 1800);
+ torture_assert("pool realloc 1800", p2_2 != p2, "failed: pointer not changed");
+ p2 = p2_2;
+ memset(p2, 0x11, talloc_get_size(p2));
+
+ /* this should reclaim the memory from the pool */
+ p3 = talloc_size(pool, 800);
+ torture_assert("pool alloc 800", p3 == p1, "failed: pointer changed");
+ memset(p3, 0x11, talloc_get_size(p3));
+
+#endif /* this relies on ALWAYS_REALLOC == 0 in talloc.c */
+
+ talloc_free(pool);
+
+ return true;
+}
+
+static bool test_pool_steal(void)
+{
+ void *root;
+ void *pool;
+ void *p1, *p2;
+ void *p1_2, *p2_2;
+ size_t hdr;
+ size_t ofs1, ofs2;
+
+ root = talloc_new(NULL);
+ pool = talloc_pool(root, 1024);
+
+ p1 = talloc_size(pool, 4 * 16);
+ torture_assert("pool allocate 4 * 16", p1 != NULL, "failed ");
+ memset(p1, 0x11, talloc_get_size(p1));
+ p2 = talloc_size(pool, 4 * 16);
+ torture_assert("pool allocate 4 * 16", p2 > p1, "failed: !(p2 > p1) ");
+ memset(p2, 0x11, talloc_get_size(p2));
+
+ ofs1 = PTR_DIFF(p2, p1);
+ hdr = ofs1 - talloc_get_size(p1);
+
+ talloc_steal(root, p1);
+ talloc_steal(root, p2);
talloc_free(pool);
+ p1_2 = p1;
+
+#if 1 /* this relies on ALWAYS_REALLOC == 0 in talloc.c */
+ p1_2 = talloc_realloc_size(root, p1, 5 * 16);
+ torture_assert("pool realloc 5 * 16", p1_2 > p2, "failed: pointer not changed");
+ memset(p1_2, 0x11, talloc_get_size(p1_2));
+ ofs1 = PTR_DIFF(p1_2, p2);
+ ofs2 = talloc_get_size(p2) + hdr;
+
+ torture_assert("pool realloc ", ofs1 == ofs2, "failed: pointer offset unexpected");
+
+ p2_2 = talloc_realloc_size(root, p2, 3 * 16);
+ torture_assert("pool realloc 5 * 16", p2_2 == p2, "failed: pointer changed");
+ memset(p2_2, 0x11, talloc_get_size(p2_2));
+#endif /* this relies on ALWAYS_REALLOC == 0 in talloc.c */
+
+ talloc_free(p1_2);
+
+ p2_2 = p2;
+
+#if 1 /* this relies on ALWAYS_REALLOC == 0 in talloc.c */
+ /* now we should reclaim the full pool */
+ p2_2 = talloc_realloc_size(root, p2, 8 * 16);
+ torture_assert("pool realloc 8 * 16", p2_2 == p1, "failed: pointer not expected");
+ p2 = p2_2;
+ memset(p2_2, 0x11, talloc_get_size(p2_2));
+
+ /* now we malloc and free the full pool space */
+ p2_2 = talloc_realloc_size(root, p2, 2 * 1024);
+ torture_assert("pool realloc 2 * 1024", p2_2 != p1, "failed: pointer not expected");
+ memset(p2_2, 0x11, talloc_get_size(p2_2));
+
+#endif /* this relies on ALWAYS_REALLOC == 0 in talloc.c */
+
+ talloc_free(p2_2);
+
+ talloc_free(root);
+
+ return true;
+}
+
+static bool test_free_ref_null_context(void)
+{
+ void *p1, *p2, *p3;
+ int ret;
+
+ talloc_disable_null_tracking();
+ p1 = talloc_new(NULL);
+ p2 = talloc_new(NULL);
+
+ p3 = talloc_reference(p2, p1);
+ torture_assert("reference", p3 == p1, "failed: reference on null");
+
+ ret = talloc_free(p1);
+ torture_assert("ref free with null parent", ret == 0, "failed: free with null parent");
+ talloc_free(p2);
+
+ talloc_enable_null_tracking_no_autofree();
+ p1 = talloc_new(NULL);
+ p2 = talloc_new(NULL);
+
+ p3 = talloc_reference(p2, p1);
+ torture_assert("reference", p3 == p1, "failed: reference on null");
+
+ ret = talloc_free(p1);
+ torture_assert("ref free with null tracked parent", ret == 0, "failed: free with null parent");
+ talloc_free(p2);
+
return true;
}
+static bool test_rusty(void)
+{
+ void *root;
+ const char *p1;
+
+ talloc_enable_null_tracking();
+ root = talloc_new(NULL);
+ p1 = talloc_strdup(root, "foo");
+ talloc_increase_ref_count(p1);
+ talloc_report_full(root, stdout);
+ talloc_free(root);
+ return true;
+}
+
+
static void test_reset(void)
{
talloc_set_log_fn(test_log_stdout);
@@ -1140,7 +1329,6 @@ static void test_reset(void)
talloc_enable_null_tracking_no_autofree();
}
-struct torture_context;
bool torture_local_talloc(struct torture_context *tctx)
{
bool ret = true;
@@ -1185,6 +1373,12 @@ bool torture_local_talloc(struct torture_context *tctx)
ret &= test_talloc_free_in_destructor();
test_reset();
ret &= test_pool();
+ test_reset();
+ ret &= test_pool_steal();
+ test_reset();
+ ret &= test_free_ref_null_context();
+ test_reset();
+ ret &= test_rusty();
if (ret) {
test_reset();
@@ -1194,6 +1388,6 @@ bool torture_local_talloc(struct torture_context *tctx)
ret &= test_autofree();
test_reset();
-
+ talloc_disable_null_tracking();
return ret;
}
diff --git a/lib/talloc/testsuite_main.c b/lib/talloc/testsuite_main.c
index 1b51333278..50ce0f8e3b 100644
--- a/lib/talloc/testsuite_main.c
+++ b/lib/talloc/testsuite_main.c
@@ -25,8 +25,7 @@
#include "replace.h"
-struct torture_context;
-bool torture_local_talloc(struct torture_context *tctx);
+#include "talloc_testsuite.h"
int main(void)
{
diff --git a/lib/talloc/wscript b/lib/talloc/wscript
new file mode 100644
index 0000000000..c96c69cdd9
--- /dev/null
+++ b/lib/talloc/wscript
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+
+APPNAME = 'talloc'
+VERSION = '2.0.5'
+
+
+blddir = 'bin'
+
+import Logs
+import os, sys
+
+# find the buildtools directory
+srcdir = '.'
+while not os.path.exists(srcdir+'/buildtools') and len(srcdir.split('/')) < 5:
+ srcdir = '../' + srcdir
+sys.path.insert(0, srcdir + '/buildtools/wafsamba')
+
+import sys
+sys.path.insert(0, srcdir+"/buildtools/wafsamba")
+import wafsamba, samba_dist, Options
+
+# setup what directories to put in a tarball
+samba_dist.DIST_DIRS('lib/talloc:. lib/replace:lib/replace buildtools:buildtools')
+
+
+def set_options(opt):
+ opt.BUILTIN_DEFAULT('replace')
+ opt.PRIVATE_EXTENSION_DEFAULT('talloc', noextension='talloc')
+ opt.RECURSE('lib/replace')
+ opt.add_option('--enable-talloc-compat1',
+ help=("Build talloc 1.x.x compat library [False]"),
+ action="store_true", dest='TALLOC_COMPAT1', default=False)
+ if opt.IN_LAUNCH_DIR():
+ opt.add_option('--disable-python',
+ help=("disable the pytalloc module"),
+ action="store_true", dest='disable_python', default=False)
+
+
+def configure(conf):
+ conf.RECURSE('lib/replace')
+
+ conf.env.standalone_talloc = conf.IN_LAUNCH_DIR()
+
+ conf.env.disable_python = getattr(Options.options, 'disable_python', False)
+
+ if not conf.env.standalone_talloc:
+ if conf.CHECK_BUNDLED_SYSTEM('talloc', minversion=VERSION,
+ implied_deps='replace'):
+ conf.define('USING_SYSTEM_TALLOC', 1)
+ if conf.CHECK_BUNDLED_SYSTEM('pytalloc-util', minversion=VERSION,
+ implied_deps='talloc replace'):
+ conf.define('USING_SYSTEM_PYTALLOC_UTIL', 1)
+
+ conf.env.TALLOC_COMPAT1 = Options.options.TALLOC_COMPAT1
+
+ conf.CHECK_XSLTPROC_MANPAGES()
+
+ if not conf.env.disable_python:
+ # also disable if we don't have the python libs installed
+ conf.check_tool('python')
+ conf.check_python_version((2,4,2))
+ conf.SAMBA_CHECK_PYTHON_HEADERS(mandatory=False)
+ if not conf.env.HAVE_PYTHON_H:
+ Logs.warn('Disabling pytalloc-util as python devel libs not found')
+ conf.env.disable_python = True
+
+ conf.SAMBA_CONFIG_H()
+
+
+def build(bld):
+ bld.RECURSE('lib/replace')
+
+ if bld.env.standalone_talloc:
+ bld.env.PKGCONFIGDIR = '${LIBDIR}/pkgconfig'
+ bld.env.TALLOC_VERSION = VERSION
+ bld.PKG_CONFIG_FILES('talloc.pc', vnum=VERSION)
+ private_library = False
+
+ # should we also install the symlink to libtalloc1.so here?
+ bld.SAMBA_LIBRARY('talloc-compat1-%s' % (VERSION),
+ 'compat/talloc_compat1.c',
+ public_deps='talloc',
+ soname='libtalloc.so.1',
+ enabled=bld.env.TALLOC_COMPAT1)
+
+ if not bld.env.disable_python:
+ bld.PKG_CONFIG_FILES('pytalloc-util.pc', vnum=VERSION)
+ else:
+ private_library = True
+
+ if not bld.CONFIG_SET('USING_SYSTEM_TALLOC'):
+
+ bld.SAMBA_LIBRARY('talloc',
+ 'talloc.c',
+ deps='replace',
+ abi_directory='ABI',
+ abi_match='talloc* _talloc*',
+ hide_symbols=True,
+ vnum=VERSION,
+ public_headers='talloc.h',
+ public_headers_install=not private_library,
+ private_library=private_library,
+ manpages='talloc.3')
+
+ if not bld.CONFIG_SET('USING_SYSTEM_PYTALLOC_UTIL') and not bld.env.disable_python:
+
+ bld.SAMBA_LIBRARY('pytalloc-util',
+ source='pytalloc_util.c',
+ public_deps='talloc',
+ pyext=True,
+ vnum=VERSION,
+ private_library=private_library,
+ public_headers='pytalloc.h'
+ )
+ bld.SAMBA_PYTHON('pytalloc',
+ 'pytalloc.c',
+ deps='talloc pytalloc-util',
+ enabled=True,
+ realname='talloc.so')
+
+ if not getattr(bld.env, '_SAMBA_BUILD_', 0) == 4:
+ # s4 already has the talloc testsuite builtin to smbtorture
+ bld.SAMBA_BINARY('talloc_testsuite',
+ 'testsuite_main.c testsuite.c',
+ deps='talloc',
+ install=False)
+
+
+def test(ctx):
+ '''run talloc testsuite'''
+ import Utils, samba_utils
+ cmd = os.path.join(Utils.g_module.blddir, 'talloc_testsuite')
+ ret = samba_utils.RUN_COMMAND(cmd)
+ print("testsuite returned %d" % ret)
+ sys.exit(ret)
+
+def dist():
+ '''makes a tarball for distribution'''
+ samba_dist.dist()
+
+def reconfigure(ctx):
+ '''reconfigure if config scripts have changed'''
+ import samba_utils
+ samba_utils.reconfigure(ctx)
+
+
+def pydoctor(ctx):
+ '''build python apidocs'''
+ cmd='PYTHONPATH=bin/python pydoctor --project-name=talloc --project-url=http://talloc.samba.org/ --make-html --docformat=restructuredtext --introspect-c-modules --add-module bin/python/talloc.*'
+ print("Running: %s" % cmd)
+ os.system(cmd)
diff --git a/lib/tdb/ABI/tdb-1.2.1.sigs b/lib/tdb/ABI/tdb-1.2.1.sigs
new file mode 100644
index 0000000000..84f200745e
--- /dev/null
+++ b/lib/tdb/ABI/tdb-1.2.1.sigs
@@ -0,0 +1,95 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_alloc_read: unsigned char *(struct tdb_context *, tdb_off_t, tdb_len_t)
+tdb_allocate: tdb_off_t (struct tdb_context *, tdb_len_t, struct tdb_record *)
+tdb_allrecord_lock: int (struct tdb_context *, int, enum tdb_lock_flags, bool)
+tdb_allrecord_unlock: int (struct tdb_context *, int, bool)
+tdb_allrecord_upgrade: int (struct tdb_context *)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_brlock: int (struct tdb_context *, int, tdb_off_t, size_t, enum tdb_lock_flags)
+tdb_brunlock: int (struct tdb_context *, int, tdb_off_t, size_t)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_convert: void *(void *, uint32_t)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_do_delete: int (struct tdb_context *, tdb_off_t, struct tdb_record *)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_expand: int (struct tdb_context *, tdb_off_t)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_find_lock_hash: tdb_off_t (struct tdb_context *, TDB_DATA, uint32_t, int, struct tdb_record *)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_free: int (struct tdb_context *, tdb_off_t, struct tdb_record *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_have_extra_locks: bool (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_io_init: void (struct tdb_context *)
+tdb_lock: int (struct tdb_context *, int, int)
+tdb_lock_nonblock: int (struct tdb_context *, int, int)
+tdb_lock_record: int (struct tdb_context *, tdb_off_t)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_mmap: void (struct tdb_context *)
+tdb_munmap: int (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_needs_recovery: bool (struct tdb_context *)
+tdb_nest_lock: int (struct tdb_context *, uint32_t, int, enum tdb_lock_flags)
+tdb_nest_unlock: int (struct tdb_context *, uint32_t, int, bool)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_ofs_read: int (struct tdb_context *, tdb_off_t, tdb_off_t *)
+tdb_ofs_write: int (struct tdb_context *, tdb_off_t, tdb_off_t *)
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_data: int (struct tdb_context *, TDB_DATA, tdb_off_t, tdb_len_t, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_rec_free_read: int (struct tdb_context *, tdb_off_t, struct tdb_record *)
+tdb_rec_read: int (struct tdb_context *, tdb_off_t, struct tdb_record *)
+tdb_rec_write: int (struct tdb_context *, tdb_off_t, struct tdb_record *)
+tdb_release_transaction_locks: void (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_lock: int (struct tdb_context *, int, enum tdb_lock_flags)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_recover: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_transaction_unlock: int (struct tdb_context *, int)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlock: int (struct tdb_context *, int, int)
+tdb_unlock_record: int (struct tdb_context *, tdb_off_t)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
+tdb_write_lock_record: int (struct tdb_context *, tdb_off_t)
+tdb_write_unlock_record: int (struct tdb_context *, tdb_off_t)
diff --git a/lib/tdb/ABI/tdb-1.2.2.sigs b/lib/tdb/ABI/tdb-1.2.2.sigs
new file mode 100644
index 0000000000..043790d27e
--- /dev/null
+++ b/lib/tdb/ABI/tdb-1.2.2.sigs
@@ -0,0 +1,60 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/lib/tdb/ABI/tdb-1.2.3.sigs b/lib/tdb/ABI/tdb-1.2.3.sigs
new file mode 100644
index 0000000000..043790d27e
--- /dev/null
+++ b/lib/tdb/ABI/tdb-1.2.3.sigs
@@ -0,0 +1,60 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/lib/tdb/ABI/tdb-1.2.4.sigs b/lib/tdb/ABI/tdb-1.2.4.sigs
new file mode 100644
index 0000000000..043790d27e
--- /dev/null
+++ b/lib/tdb/ABI/tdb-1.2.4.sigs
@@ -0,0 +1,60 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/lib/tdb/ABI/tdb-1.2.5.sigs b/lib/tdb/ABI/tdb-1.2.5.sigs
new file mode 100644
index 0000000000..1e01f3ba24
--- /dev/null
+++ b/lib/tdb/ABI/tdb-1.2.5.sigs
@@ -0,0 +1,61 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/lib/tdb/ABI/tdb-1.2.6.sigs b/lib/tdb/ABI/tdb-1.2.6.sigs
new file mode 100644
index 0000000000..1e01f3ba24
--- /dev/null
+++ b/lib/tdb/ABI/tdb-1.2.6.sigs
@@ -0,0 +1,61 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/lib/tdb/ABI/tdb-1.2.7.sigs b/lib/tdb/ABI/tdb-1.2.7.sigs
new file mode 100644
index 0000000000..1e01f3ba24
--- /dev/null
+++ b/lib/tdb/ABI/tdb-1.2.7.sigs
@@ -0,0 +1,61 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/lib/tdb/ABI/tdb-1.2.8.sigs b/lib/tdb/ABI/tdb-1.2.8.sigs
new file mode 100644
index 0000000000..1e01f3ba24
--- /dev/null
+++ b/lib/tdb/ABI/tdb-1.2.8.sigs
@@ -0,0 +1,61 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/lib/tdb/ABI/tdb-1.2.9.sigs b/lib/tdb/ABI/tdb-1.2.9.sigs
new file mode 100644
index 0000000000..9e4149b4e5
--- /dev/null
+++ b/lib/tdb/ABI/tdb-1.2.9.sigs
@@ -0,0 +1,62 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_summary: char *(struct tdb_context *)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/lib/tdb/Makefile b/lib/tdb/Makefile
new file mode 100644
index 0000000000..4c28653446
--- /dev/null
+++ b/lib/tdb/Makefile
@@ -0,0 +1,66 @@
+# simple makefile wrapper to run waf
+
+WAF=WAF_MAKE=1 PATH=buildtools/bin:../../buildtools/bin:$$PATH waf
+
+all:
+ $(WAF) build
+
+install:
+ $(WAF) install
+
+uninstall:
+ $(WAF) uninstall
+
+test:
+ $(WAF) test $(TEST_OPTIONS)
+
+testenv:
+ $(WAF) test --testenv $(TEST_OPTIONS)
+
+quicktest:
+ $(WAF) test --quick $(TEST_OPTIONS)
+
+dist:
+ touch .tmplock
+ WAFLOCK=.tmplock $(WAF) dist
+
+distcheck:
+ touch .tmplock
+ WAFLOCK=.tmplock $(WAF) distcheck
+
+clean:
+ $(WAF) clean
+
+distclean:
+ $(WAF) distclean
+
+reconfigure: configure
+ $(WAF) reconfigure
+
+show_waf_options:
+ $(WAF) --help
+
+# some compatibility make targets
+everything: all
+
+testsuite: all
+
+check: test
+
+torture: all
+
+# this should do an install as well, once install is finished
+installcheck: test
+
+etags:
+ $(WAF) etags
+
+ctags:
+ $(WAF) ctags
+
+pydoctor:
+ $(WAF) pydoctor
+
+bin/%:: FORCE
+ $(WAF) --targets=`basename $@`
+FORCE:
diff --git a/lib/tdb/Makefile.in b/lib/tdb/Makefile.in
deleted file mode 100644
index dc22ee3fea..0000000000
--- a/lib/tdb/Makefile.in
+++ /dev/null
@@ -1,79 +0,0 @@
-#!gmake
-#
-# Makefile for tdb directory
-#
-
-CC = @CC@
-prefix = @prefix@
-exec_prefix = @exec_prefix@
-bindir = @bindir@
-includedir = @includedir@
-libdir = @libdir@
-mandir = @mandir@
-VPATH = @srcdir@:@libreplacedir@
-srcdir = @srcdir@
-builddir = @builddir@
-sharedbuilddir = @sharedbuilddir@
-INSTALLCMD = @INSTALL@
-CPPFLAGS = @CPPFLAGS@ -I$(srcdir)/include -Iinclude
-CFLAGS = $(CPPFLAGS) @CFLAGS@
-LDFLAGS = @LDFLAGS@
-EXEEXT = @EXEEXT@
-SHLD = @SHLD@
-SHLD_FLAGS = @SHLD_FLAGS@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PICFLAG = @PICFLAG@
-SHLIBEXT = @SHLIBEXT@
-PYTHON = @PYTHON@
-PYTHON_CONFIG = @PYTHON_CONFIG@
-PYTHON_BUILD_TARGET = @PYTHON_BUILD_TARGET@
-PYTHON_INSTALL_TARGET = @PYTHON_INSTALL_TARGET@
-PYTHON_CHECK_TARGET = @PYTHON_CHECK_TARGET@
-LIB_PATH_VAR = @LIB_PATH_VAR@
-tdbdir = @tdbdir@
-
-EXTRA_TARGETS = @DOC_TARGET@
-
-TDB_OBJ = @TDB_OBJ@ @LIBREPLACEOBJ@
-
-SONAMEFLAG = @SONAMEFLAG@
-VERSIONSCRIPT = @VERSIONSCRIPT@
-EXPORTSFILE = @EXPORTSFILE@
-
-XSLTPROC = @XSLTPROC@
-
-default: all
-
-include $(tdbdir)/tdb.mk
-include $(tdbdir)/rules.mk
-
-all:: showflags dirs $(PROGS) $(TDB_SOLIB) libtdb.a $(PYTHON_BUILD_TARGET) $(EXTRA_TARGETS)
-
-install:: all
-$(TDB_SOLIB): $(TDB_OBJ)
- $(SHLD) $(SHLD_FLAGS) -o $@ $(TDB_OBJ) $(VERSIONSCRIPT) $(EXPORTSFILE) $(SONAMEFLAG)$(TDB_SONAME)
-
-shared-build: all
- ${INSTALLCMD} -d $(sharedbuilddir)/lib
- ${INSTALLCMD} -m 644 libtdb.a $(sharedbuilddir)/lib
- ${INSTALLCMD} -m 755 $(TDB_SOLIB) $(sharedbuilddir)/lib
- ln -sf $(TDB_SOLIB) $(sharedbuilddir)/lib/$(TDB_SONAME)
- ln -sf $(TDB_SOLIB) $(sharedbuilddir)/lib/libtdb.so
- ${INSTALLCMD} -d $(sharedbuilddir)/include
- ${INSTALLCMD} -m 644 $(srcdir)/include/tdb.h $(sharedbuilddir)/include
-
-check: test
-
-test:: $(PYTHON_CHECK_TARGET)
-installcheck:: test install
-
-clean::
- rm -f *.o *.a */*.o
- rm -fr abi
-
-distclean:: clean
- rm -f config.log config.status include/config.h config.cache
- rm -f Makefile
-
-realdistclean:: distclean
- rm -f configure include/config.h.in
diff --git a/lib/tdb/aclocal.m4 b/lib/tdb/aclocal.m4
deleted file mode 100644
index 5605e476ba..0000000000
--- a/lib/tdb/aclocal.m4
+++ /dev/null
@@ -1 +0,0 @@
-m4_include(libreplace.m4)
diff --git a/lib/tdb/autogen.sh b/lib/tdb/autogen.sh
deleted file mode 100755
index bf84eeee19..0000000000
--- a/lib/tdb/autogen.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-
-rm -rf autom4te.cache
-rm -f configure config.h.in
-
-IPATHS="-I libreplace -I lib/replace -I ../libreplace -I ../replace"
-autoconf $IPATHS || exit 1
-autoheader $IPATHS || exit 1
-
-rm -rf autom4te.cache
-
-echo "Now run ./configure and then make."
-exit 0
-
diff --git a/lib/tdb/build_macros.m4 b/lib/tdb/build_macros.m4
deleted file mode 100644
index c036668cd1..0000000000
--- a/lib/tdb/build_macros.m4
+++ /dev/null
@@ -1,14 +0,0 @@
-AC_DEFUN(BUILD_WITH_SHARED_BUILD_DIR,
- [ AC_ARG_WITH([shared-build-dir],
- [AC_HELP_STRING([--with-shared-build-dir=DIR],
- [temporary build directory where libraries are installed [$srcdir/sharedbuild]])])
-
- sharedbuilddir="$srcdir/sharedbuild"
- if test x"$with_shared_build_dir" != x; then
- sharedbuilddir=$with_shared_build_dir
- CFLAGS="$CFLAGS -I$with_shared_build_dir/include"
- LDFLAGS="$LDFLAGS -L$with_shared_build_dir/lib"
- fi
- AC_SUBST(sharedbuilddir)
- ])
-
diff --git a/lib/tdb/common/check.c b/lib/tdb/common/check.c
index f0a15f801b..3387fbd098 100644
--- a/lib/tdb/common/check.c
+++ b/lib/tdb/common/check.c
@@ -28,8 +28,9 @@
static bool tdb_check_header(struct tdb_context *tdb, tdb_off_t *recovery)
{
struct tdb_header hdr;
+ uint32_t h1, h2;
- if (tdb->methods->tdb_read(tdb, 0, &hdr, sizeof(hdr), DOCONV()) == -1)
+ if (tdb->methods->tdb_read(tdb, 0, &hdr, sizeof(hdr), 0) == -1)
return false;
if (strcmp(hdr.magic_food, TDB_MAGIC_FOOD) != 0)
goto corrupt;
@@ -38,7 +39,12 @@ static bool tdb_check_header(struct tdb_context *tdb, tdb_off_t *recovery)
if (hdr.version != TDB_VERSION)
goto corrupt;
- if (hdr.rwlocks != 0)
+ if (hdr.rwlocks != 0 && hdr.rwlocks != TDB_HASH_RWLOCK_MAGIC)
+ goto corrupt;
+
+ tdb_header_hash(tdb, &h1, &h2);
+ if (hdr.magic1_hash && hdr.magic2_hash &&
+ (hdr.magic1_hash != h1 || hdr.magic2_hash != h2))
goto corrupt;
if (hdr.hash_size == 0)
@@ -301,7 +307,22 @@ static bool tdb_check_free_record(struct tdb_context *tdb,
return true;
}
-int tdb_check(struct tdb_context *tdb,
+/* Slow, but should be very rare. */
+size_t tdb_dead_space(struct tdb_context *tdb, tdb_off_t off)
+{
+ size_t len;
+
+ for (len = 0; off + len < tdb->map_size; len++) {
+ char c;
+ if (tdb->methods->tdb_read(tdb, off, &c, 1, 0))
+ return 0;
+ if (c != 0 && c != 0x42)
+ break;
+ }
+ return len;
+}
+
+_PUBLIC_ int tdb_check(struct tdb_context *tdb,
int (*check)(TDB_DATA key, TDB_DATA data, void *private_data),
void *private_data)
{
@@ -310,9 +331,18 @@ int tdb_check(struct tdb_context *tdb,
tdb_off_t off, recovery_start;
struct tdb_record rec;
bool found_recovery = false;
-
- if (tdb_lockall(tdb) == -1)
- return -1;
+ tdb_len_t dead;
+ bool locked;
+
+ /* Read-only databases use no locking at all: it's best-effort.
+ * We may have a write lock already, so skip that case too. */
+ if (tdb->read_only || tdb->allrecord_lock.count != 0) {
+ locked = false;
+ } else {
+ if (tdb_lockall_read(tdb) == -1)
+ return -1;
+ locked = true;
+ }
/* Make sure we know true size of the underlying file. */
tdb->methods->tdb_oob(tdb, tdb->map_size + 1, 1);
@@ -369,8 +399,23 @@ int tdb_check(struct tdb_context *tdb,
if (!tdb_check_free_record(tdb, off, &rec, hashes))
goto free;
break;
+ /* If we crash after ftruncate, we can get zeroes or fill. */
+ case TDB_RECOVERY_INVALID_MAGIC:
+ case 0x42424242:
+ if (recovery_start == off) {
+ found_recovery = true;
+ break;
+ }
+ dead = tdb_dead_space(tdb, off);
+ if (dead < sizeof(rec))
+ goto corrupt;
+
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Dead space at %d-%d (of %u)\n",
+ off, off + dead, tdb->map_size));
+ rec.rec_len = dead - sizeof(rec);
+ break;
case TDB_RECOVERY_MAGIC:
- case 0: /* Used for invalid (or in-progress) recovery area. */
if (recovery_start != off) {
TDB_LOG((tdb, TDB_DEBUG_ERROR,
"Unexpected recovery record at offset %d\n",
@@ -379,7 +424,8 @@ int tdb_check(struct tdb_context *tdb,
}
found_recovery = true;
break;
- default:
+ default: ;
+ corrupt:
tdb->ecode = TDB_ERR_CORRUPT;
TDB_LOG((tdb, TDB_DEBUG_ERROR,
"Bad magic 0x%x at offset %d\n",
@@ -405,19 +451,22 @@ int tdb_check(struct tdb_context *tdb,
/* We must have found recovery area if there was one. */
if (recovery_start != 0 && !found_recovery) {
TDB_LOG((tdb, TDB_DEBUG_ERROR,
- "Expected %s recovery area, got %s\n",
- recovery_start ? "a" : "no",
- found_recovery ? "one" : "none"));
+ "Expected a recovery area at %u\n",
+ recovery_start));
goto free;
}
free(hashes);
- tdb_unlockall(tdb);
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
return 0;
free:
free(hashes);
unlock:
- tdb_unlockall(tdb);
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
return -1;
}
diff --git a/lib/tdb/common/dump.c b/lib/tdb/common/dump.c
index bdcbfab139..67de04e37c 100644
--- a/lib/tdb/common/dump.c
+++ b/lib/tdb/common/dump.c
@@ -6,11 +6,11 @@
Copyright (C) Andrew Tridgell 1999-2005
Copyright (C) Paul `Rusty' Russell 2000
Copyright (C) Jeremy Allison 2000-2003
-
+
** NOTE! The following LGPL license applies to the tdb
** library. This does NOT imply that all of Samba is released
** under the LGPL
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -80,7 +80,7 @@ static int tdb_dump_chain(struct tdb_context *tdb, int i)
return tdb_unlock(tdb, i, F_WRLCK);
}
-void tdb_dump_all(struct tdb_context *tdb)
+_PUBLIC_ void tdb_dump_all(struct tdb_context *tdb)
{
int i;
for (i=0;i<tdb->header.hash_size;i++) {
@@ -90,7 +90,7 @@ void tdb_dump_all(struct tdb_context *tdb)
tdb_dump_chain(tdb, -1);
}
-int tdb_printfreelist(struct tdb_context *tdb)
+_PUBLIC_ int tdb_printfreelist(struct tdb_context *tdb)
{
int ret;
long total_free = 0;
diff --git a/lib/tdb/common/error.c b/lib/tdb/common/error.c
index 195ab23815..2aaaa8134e 100644
--- a/lib/tdb/common/error.c
+++ b/lib/tdb/common/error.c
@@ -6,11 +6,11 @@
Copyright (C) Andrew Tridgell 1999-2005
Copyright (C) Paul `Rusty' Russell 2000
Copyright (C) Jeremy Allison 2000-2003
-
+
** NOTE! The following LGPL license applies to the tdb
** library. This does NOT imply that all of Samba is released
** under the LGPL
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -27,7 +27,7 @@
#include "tdb_private.h"
-enum TDB_ERROR tdb_error(struct tdb_context *tdb)
+_PUBLIC_ enum TDB_ERROR tdb_error(struct tdb_context *tdb)
{
return tdb->ecode;
}
@@ -46,7 +46,7 @@ static struct tdb_errname {
{TDB_ERR_RDONLY, "write not permitted"} };
/* Error string for the last tdb error */
-const char *tdb_errorstr(struct tdb_context *tdb)
+_PUBLIC_ const char *tdb_errorstr(struct tdb_context *tdb)
{
uint32_t i;
for (i = 0; i < sizeof(emap) / sizeof(struct tdb_errname); i++)
diff --git a/lib/tdb/common/freelist.c b/lib/tdb/common/freelist.c
index 8113b54951..927078a7aa 100644
--- a/lib/tdb/common/freelist.c
+++ b/lib/tdb/common/freelist.c
@@ -6,11 +6,11 @@
Copyright (C) Andrew Tridgell 1999-2005
Copyright (C) Paul `Rusty' Russell 2000
Copyright (C) Jeremy Allison 2000-2003
-
+
** NOTE! The following LGPL license applies to the tdb
** library. This does NOT imply that all of Samba is released
** under the LGPL
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -98,7 +98,7 @@ static int update_tailer(struct tdb_context *tdb, tdb_off_t offset,
}
/* Add an element into the freelist. Merge adjacent records if
- neccessary. */
+ necessary. */
int tdb_free(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec)
{
/* Allocation and tailer lock */
@@ -143,7 +143,7 @@ left:
tdb_off_t left = offset - sizeof(tdb_off_t);
struct tdb_record l;
tdb_off_t leftsize;
-
+
/* Read in tailer and jump back to header */
if (tdb_ofs_read(tdb, left, &leftsize) == -1) {
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: left offset read failed at %u\n", left));
@@ -334,7 +334,7 @@ tdb_off_t tdb_allocate(struct tdb_context *tdb, tdb_len_t length, struct tdb_rec
bestfit.rec_len < length * multiplier) {
break;
}
-
+
/* this multiplier means we only extremely rarely
search more than 50 or so records. At 50 records we
accept records up to 11 times larger than what we
@@ -367,7 +367,7 @@ tdb_off_t tdb_allocate(struct tdb_context *tdb, tdb_len_t length, struct tdb_rec
/*
return the size of the freelist - used to decide if we should repack
*/
-int tdb_freelist_size(struct tdb_context *tdb)
+_PUBLIC_ int tdb_freelist_size(struct tdb_context *tdb)
{
tdb_off_t ptr;
int count=0;
diff --git a/lib/tdb/common/freelistcheck.c b/lib/tdb/common/freelistcheck.c
index 8d1ebabe04..ab6e78f02d 100644
--- a/lib/tdb/common/freelistcheck.c
+++ b/lib/tdb/common/freelistcheck.c
@@ -43,7 +43,7 @@ static int seen_insert(struct tdb_context *mem_tdb, tdb_off_t rec_ptr)
return tdb_store(mem_tdb, key, data, TDB_INSERT);
}
-int tdb_validate_freelist(struct tdb_context *tdb, int *pnum_entries)
+_PUBLIC_ int tdb_validate_freelist(struct tdb_context *tdb, int *pnum_entries)
{
struct tdb_context *mem_tdb = NULL;
struct tdb_record rec;
diff --git a/lib/tdb/common/hash.c b/lib/tdb/common/hash.c
new file mode 100644
index 0000000000..2472ed1ace
--- /dev/null
+++ b/lib/tdb/common/hash.c
@@ -0,0 +1,380 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Rusty Russell 2010
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include "tdb_private.h"
+
+/* This is based on the hash algorithm from gdbm */
+unsigned int tdb_old_hash(TDB_DATA *key)
+{
+ uint32_t value; /* Used to compute the hash value. */
+ uint32_t i; /* Used to cycle through random values. */
+
+ /* Set the initial value from the key size. */
+ for (value = 0x238F13AF * key->dsize, i=0; i < key->dsize; i++)
+ value = (value + (key->dptr[i] << (i*5 % 24)));
+
+ return (1103515243 * value + 12345);
+}
+
+#ifndef WORDS_BIGENDIAN
+# define HASH_LITTLE_ENDIAN 1
+# define HASH_BIG_ENDIAN 0
+#else
+# define HASH_LITTLE_ENDIAN 0
+# define HASH_BIG_ENDIAN 1
+#endif
+
+/*
+-------------------------------------------------------------------------------
+lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+
+These are functions for producing 32-bit hashes for hash table lookup.
+hash_word(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
+are externally useful functions. Routines to test the hash are included
+if SELF_TEST is defined. You can use this free for any purpose. It's in
+the public domain. It has no warranty.
+
+You probably want to use hashlittle(). hashlittle() and hashbig()
+hash byte arrays. hashlittle() is is faster than hashbig() on
+little-endian machines. Intel and AMD are little-endian machines.
+On second thought, you probably want hashlittle2(), which is identical to
+hashlittle() except it returns two 32-bit hashes for the price of one.
+You could implement hashbig2() if you wanted but I haven't bothered here.
+
+If you want to find a hash of, say, exactly 7 integers, do
+ a = i1; b = i2; c = i3;
+ mix(a,b,c);
+ a += i4; b += i5; c += i6;
+ mix(a,b,c);
+ a += i7;
+ final(a,b,c);
+then use c as the hash value. If you have a variable length array of
+4-byte integers to hash, use hash_word(). If you have a byte array (like
+a character string), use hashlittle(). If you have several byte arrays, or
+a mix of things, see the comments above hashlittle().
+
+Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
+then mix those integers. This is fast (you can do a lot more thorough
+mixing with 12*3 instructions on 3 integers than you can with 3 instructions
+on 1 byte), but shoehorning those bytes into integers efficiently is messy.
+*/
+
+#define hashsize(n) ((uint32_t)1<<(n))
+#define hashmask(n) (hashsize(n)-1)
+#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
+
+/*
+-------------------------------------------------------------------------------
+mix -- mix 3 32-bit values reversibly.
+
+This is reversible, so any information in (a,b,c) before mix() is
+still in (a,b,c) after mix().
+
+If four pairs of (a,b,c) inputs are run through mix(), or through
+mix() in reverse, there are at least 32 bits of the output that
+are sometimes the same for one pair and different for another pair.
+This was tested for:
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
+satisfy this are
+ 4 6 8 16 19 4
+ 9 15 3 18 27 15
+ 14 9 3 7 17 3
+Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
+for "differ" defined as + with a one-bit base and a two-bit delta. I
+used http://burtleburtle.net/bob/hash/avalanche.html to choose
+the operations, constants, and arrangements of the variables.
+
+This does not achieve avalanche. There are input bits of (a,b,c)
+that fail to affect some output bits of (a,b,c), especially of a. The
+most thoroughly mixed value is c, but it doesn't really even achieve
+avalanche in c.
+
+This allows some parallelism. Read-after-writes are good at doubling
+the number of bits affected, so the goal of mixing pulls in the opposite
+direction as the goal of parallelism. I did what I could. Rotates
+seem to cost as much as shifts on every machine I could lay my hands
+on, and rotates are much kinder to the top and bottom bits, so I used
+rotates.
+-------------------------------------------------------------------------------
+*/
+#define mix(a,b,c) \
+{ \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c,16); c += b; \
+ b -= a; b ^= rot(a,19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+}
+
+/*
+-------------------------------------------------------------------------------
+final -- final mixing of 3 32-bit values (a,b,c) into c
+
+Pairs of (a,b,c) values differing in only a few bits will usually
+produce values of c that look totally different. This was tested for
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+These constants passed:
+ 14 11 25 16 4 14 24
+ 12 14 25 16 4 14 24
+and these came close:
+ 4 8 15 26 3 22 24
+ 10 8 15 26 3 22 24
+ 11 8 15 26 3 22 24
+-------------------------------------------------------------------------------
+*/
+#define final(a,b,c) \
+{ \
+ c ^= b; c -= rot(b,14); \
+ a ^= c; a -= rot(c,11); \
+ b ^= a; b -= rot(a,25); \
+ c ^= b; c -= rot(b,16); \
+ a ^= c; a -= rot(c,4); \
+ b ^= a; b -= rot(a,14); \
+ c ^= b; c -= rot(b,24); \
+}
+
+
+/*
+-------------------------------------------------------------------------------
+hashlittle() -- hash a variable-length key into a 32-bit value
+ k : the key (the unaligned variable-length array of bytes)
+ length : the length of the key, counting by bytes
+ val2 : IN: can be any 4-byte value OUT: second 32 bit hash.
+Returns a 32-bit value. Every bit of the key affects every bit of
+the return value. Two keys differing by one or two bits will have
+totally different hash values. Note that the return value is better
+mixed than val2, so use that first.
+
+The best hash table sizes are powers of 2. There is no need to do
+mod a prime (mod is sooo slow!). If you need less than 32 bits,
+use a bitmask. For example, if you need only 10 bits, do
+ h = (h & hashmask(10));
+In which case, the hash table should have hashsize(10) elements.
+
+If you are hashing n strings (uint8_t **)k, do it like this:
+ for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
+
+By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
+code any way you wish, private, educational, or commercial. It's free.
+
+Use for hash table lookup, or anything where one collision in 2^^32 is
+acceptable. Do NOT use for cryptographic purposes.
+-------------------------------------------------------------------------------
+*/
+
+static uint32_t hashlittle( const void *key, size_t length )
+{
+ uint32_t a,b,c; /* internal state */
+ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length);
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+#ifdef VALGRIND
+ const uint8_t *k8;
+#endif
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * "k[2]&0xffffff" actually reads beyond the end of the string, but
+ * then masks off the part it's not allowed to read. Because the
+ * string is aligned, the masked-off tail is in the same word as the
+ * rest of the string. Every machine with memory protection I've seen
+ * does it on word boundaries, so is OK with this. But VALGRIND will
+ * still catch it and complain. The masking trick does make the hash
+ * noticably faster for short strings (like English words).
+ */
+#ifndef VALGRIND
+
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
+ case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
+ case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
+ case 6 : b+=k[1]&0xffff; a+=k[0]; break;
+ case 5 : b+=k[1]&0xff; a+=k[0]; break;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=k[0]&0xffffff; break;
+ case 2 : a+=k[0]&0xffff; break;
+ case 1 : a+=k[0]&0xff; break;
+ case 0 : return c; /* zero length strings require no mixing */
+ }
+
+#else /* make valgrind happy */
+
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
+ case 1 : a+=k8[0]; break;
+ case 0 : return c;
+ }
+
+#endif /* !valgrind */
+
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t)k[1])<<16);
+ b += k[2] + (((uint32_t)k[3])<<16);
+ c += k[4] + (((uint32_t)k[5])<<16);
+ mix(a,b,c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t)k[5])<<16);
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : return c; /* zero length requires no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ a += ((uint32_t)k[1])<<8;
+ a += ((uint32_t)k[2])<<16;
+ a += ((uint32_t)k[3])<<24;
+ b += k[4];
+ b += ((uint32_t)k[5])<<8;
+ b += ((uint32_t)k[6])<<16;
+ b += ((uint32_t)k[7])<<24;
+ c += k[8];
+ c += ((uint32_t)k[9])<<8;
+ c += ((uint32_t)k[10])<<16;
+ c += ((uint32_t)k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=((uint32_t)k[11])<<24;
+ case 11: c+=((uint32_t)k[10])<<16;
+ case 10: c+=((uint32_t)k[9])<<8;
+ case 9 : c+=k[8];
+ case 8 : b+=((uint32_t)k[7])<<24;
+ case 7 : b+=((uint32_t)k[6])<<16;
+ case 6 : b+=((uint32_t)k[5])<<8;
+ case 5 : b+=k[4];
+ case 4 : a+=((uint32_t)k[3])<<24;
+ case 3 : a+=((uint32_t)k[2])<<16;
+ case 2 : a+=((uint32_t)k[1])<<8;
+ case 1 : a+=k[0];
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a,b,c);
+ return c;
+}
+
+_PUBLIC_ unsigned int tdb_jenkins_hash(TDB_DATA *key)
+{
+ return hashlittle(key->dptr, key->dsize);
+}
diff --git a/lib/tdb/common/io.c b/lib/tdb/common/io.c
index d549715f83..78bbf2ec77 100644
--- a/lib/tdb/common/io.c
+++ b/lib/tdb/common/io.c
@@ -6,11 +6,11 @@
Copyright (C) Andrew Tridgell 1999-2005
Copyright (C) Paul `Rusty' Russell 2000
Copyright (C) Jeremy Allison 2000-2003
-
+
** NOTE! The following LGPL license applies to the tdb
** library. This does NOT imply that all of Samba is released
** under the LGPL
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -299,7 +299,7 @@ static int tdb_expand_file(struct tdb_context *tdb, tdb_off_t size, tdb_off_t ad
int tdb_expand(struct tdb_context *tdb, tdb_off_t size)
{
struct tdb_record rec;
- tdb_off_t offset, new_size;
+ tdb_off_t offset, new_size, top_size, map_size;
if (tdb_lock(tdb, -1, F_WRLCK) == -1) {
TDB_LOG((tdb, TDB_DEBUG_ERROR, "lock failed in tdb_expand\n"));
@@ -309,10 +309,25 @@ int tdb_expand(struct tdb_context *tdb, tdb_off_t size)
/* must know about any previous expansions by another process */
tdb->methods->tdb_oob(tdb, tdb->map_size + 1, 1);
- /* always make room for at least 100 more records, and at
- least 25% more space. Round the database up to a multiple
- of the page size */
- new_size = MAX(tdb->map_size + size*100, tdb->map_size * 1.25);
+ /* limit size in order to avoid using up huge amounts of memory for
+ * in memory tdbs if an oddball huge record creeps in */
+ if (size > 100 * 1024) {
+ top_size = tdb->map_size + size * 2;
+ } else {
+ top_size = tdb->map_size + size * 100;
+ }
+
+ /* always make room for at least top_size more records, and at
+ least 25% more space. if the DB is smaller than 100MiB,
+ otherwise grow it by 10% only. */
+ if (tdb->map_size > 100 * 1024 * 1024) {
+ map_size = tdb->map_size * 1.10;
+ } else {
+ map_size = tdb->map_size * 1.25;
+ }
+
+ /* Round the database up to a multiple of the page size */
+ new_size = MAX(top_size, map_size);
size = TDB_ALIGN(new_size, tdb->page_size) - tdb->map_size;
if (!(tdb->flags & TDB_INTERNAL))
@@ -461,7 +476,6 @@ static const struct tdb_methods io_methods = {
tdb_next_hash_chain,
tdb_oob,
tdb_expand_file,
- tdb_brlock
};
/*
diff --git a/lib/tdb/common/lock.c b/lib/tdb/common/lock.c
index 0984e516ea..c6a2485171 100644
--- a/lib/tdb/common/lock.c
+++ b/lib/tdb/common/lock.c
@@ -6,11 +6,11 @@
Copyright (C) Andrew Tridgell 1999-2005
Copyright (C) Paul `Rusty' Russell 2000
Copyright (C) Jeremy Allison 2000-2003
-
+
** NOTE! The following LGPL license applies to the tdb
** library. This does NOT imply that all of Samba is released
** under the LGPL
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -27,13 +27,104 @@
#include "tdb_private.h"
-#define TDB_MARK_LOCK 0x80000000
-
-void tdb_setalarm_sigptr(struct tdb_context *tdb, volatile sig_atomic_t *ptr)
+_PUBLIC_ void tdb_setalarm_sigptr(struct tdb_context *tdb, volatile sig_atomic_t *ptr)
{
tdb->interrupt_sig_ptr = ptr;
}
+static int fcntl_lock(struct tdb_context *tdb,
+ int rw, off_t off, off_t len, bool waitflag)
+{
+ struct flock fl;
+
+ fl.l_type = rw;
+ fl.l_whence = SEEK_SET;
+ fl.l_start = off;
+ fl.l_len = len;
+ fl.l_pid = 0;
+
+ if (waitflag)
+ return fcntl(tdb->fd, F_SETLKW, &fl);
+ else
+ return fcntl(tdb->fd, F_SETLK, &fl);
+}
+
+static int fcntl_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
+{
+ struct flock fl;
+#if 0 /* Check they matched up locks and unlocks correctly. */
+ char line[80];
+ FILE *locks;
+ bool found = false;
+
+ locks = fopen("/proc/locks", "r");
+
+ while (fgets(line, 80, locks)) {
+ char *p;
+ int type, start, l;
+
+ /* eg. 1: FLOCK ADVISORY WRITE 2440 08:01:2180826 0 EOF */
+ p = strchr(line, ':') + 1;
+ if (strncmp(p, " POSIX ADVISORY ", strlen(" POSIX ADVISORY ")))
+ continue;
+ p += strlen(" FLOCK ADVISORY ");
+ if (strncmp(p, "READ ", strlen("READ ")) == 0)
+ type = F_RDLCK;
+ else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
+ type = F_WRLCK;
+ else
+ abort();
+ p += 6;
+ if (atoi(p) != getpid())
+ continue;
+ p = strchr(strchr(p, ' ') + 1, ' ') + 1;
+ start = atoi(p);
+ p = strchr(p, ' ') + 1;
+ if (strncmp(p, "EOF", 3) == 0)
+ l = 0;
+ else
+ l = atoi(p) - start + 1;
+
+ if (off == start) {
+ if (len != l) {
+ fprintf(stderr, "Len %u should be %u: %s",
+ (int)len, l, line);
+ abort();
+ }
+ if (type != rw) {
+ fprintf(stderr, "Type %s wrong: %s",
+ rw == F_RDLCK ? "READ" : "WRITE", line);
+ abort();
+ }
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ fprintf(stderr, "Unlock on %u@%u not found!\n",
+ (int)off, (int)len);
+ abort();
+ }
+
+ fclose(locks);
+#endif
+
+ fl.l_type = F_UNLCK;
+ fl.l_whence = SEEK_SET;
+ fl.l_start = off;
+ fl.l_len = len;
+ fl.l_pid = 0;
+
+ return fcntl(tdb->fd, F_SETLKW, &fl);
+}
+
+/* list -1 is the alloc list, otherwise a hash chain. */
+static tdb_off_t lock_offset(int list)
+{
+ return FREELIST_TOP + 4*list;
+}
+
/* a byte range locking function - return 0 on success
this functions locks/unlocks 1 byte at the specified offset.
@@ -42,30 +133,28 @@ void tdb_setalarm_sigptr(struct tdb_context *tdb, volatile sig_atomic_t *ptr)
note that a len of zero means lock to end of file
*/
-int tdb_brlock(struct tdb_context *tdb, tdb_off_t offset,
- int rw_type, int lck_type, int probe, size_t len)
+int tdb_brlock(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len,
+ enum tdb_lock_flags flags)
{
- struct flock fl;
int ret;
if (tdb->flags & TDB_NOLOCK) {
return 0;
}
+ if (flags & TDB_LOCK_MARK_ONLY) {
+ return 0;
+ }
+
if ((rw_type == F_WRLCK) && (tdb->read_only || tdb->traverse_read)) {
tdb->ecode = TDB_ERR_RDONLY;
return -1;
}
- fl.l_type = rw_type;
- fl.l_whence = SEEK_SET;
- fl.l_start = offset;
- fl.l_len = len;
- fl.l_pid = 0;
-
do {
- ret = fcntl(tdb->fd,lck_type,&fl);
-
+ ret = fcntl_lock(tdb, rw_type, offset, len,
+ flags & TDB_LOCK_WAIT);
/* Check for a sigalarm break. */
if (ret == -1 && errno == EINTR &&
tdb->interrupt_sig_ptr &&
@@ -79,15 +168,34 @@ int tdb_brlock(struct tdb_context *tdb, tdb_off_t offset,
/* Generic lock error. errno set by fcntl.
* EAGAIN is an expected return from non-blocking
* locks. */
- if (!probe && lck_type != F_SETLK) {
- TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brlock failed (fd=%d) at offset %d rw_type=%d lck_type=%d len=%d\n",
- tdb->fd, offset, rw_type, lck_type, (int)len));
+ if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) {
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brlock failed (fd=%d) at offset %d rw_type=%d flags=%d len=%d\n",
+ tdb->fd, offset, rw_type, flags, (int)len));
}
return -1;
}
return 0;
}
+int tdb_brunlock(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len)
+{
+ int ret;
+
+ if (tdb->flags & TDB_NOLOCK) {
+ return 0;
+ }
+
+ do {
+ ret = fcntl_unlock(tdb, rw_type, offset, len);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brunlock failed (fd=%d) at offset %d rw_type=%d len=%d\n",
+ tdb->fd, offset, rw_type, (int)len));
+ }
+ return ret;
+}
/*
upgrade a read lock to a write lock. This needs to be handled in a
@@ -95,12 +203,29 @@ int tdb_brlock(struct tdb_context *tdb, tdb_off_t offset,
deadlock detection and claim a deadlock when progress can be
made. For those OSes we may loop for a while.
*/
-int tdb_brlock_upgrade(struct tdb_context *tdb, tdb_off_t offset, size_t len)
+int tdb_allrecord_upgrade(struct tdb_context *tdb)
{
int count = 1000;
+
+ if (tdb->allrecord_lock.count != 1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_allrecord_upgrade failed: count %u too high\n",
+ tdb->allrecord_lock.count));
+ return -1;
+ }
+
+ if (tdb->allrecord_lock.off != 1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_allrecord_upgrade failed: already upgraded?\n"));
+ return -1;
+ }
+
while (count--) {
struct timeval tv;
- if (tdb_brlock(tdb, offset, F_WRLCK, F_SETLKW, 1, len) == 0) {
+ if (tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0,
+ TDB_LOCK_WAIT|TDB_LOCK_PROBE) == 0) {
+ tdb->allrecord_lock.ltype = F_WRLCK;
+ tdb->allrecord_lock.off = 0;
return 0;
}
if (errno != EDEADLK) {
@@ -111,57 +236,46 @@ int tdb_brlock_upgrade(struct tdb_context *tdb, tdb_off_t offset, size_t len)
tv.tv_usec = 1;
select(0, NULL, NULL, NULL, &tv);
}
- TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brlock_upgrade failed at offset %d\n", offset));
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_allrecord_upgrade failed\n"));
return -1;
}
-
-/* lock a list in the database. list -1 is the alloc list */
-static int _tdb_lock(struct tdb_context *tdb, int list, int ltype, int op)
+static struct tdb_lock_type *find_nestlock(struct tdb_context *tdb,
+ tdb_off_t offset)
{
- struct tdb_lock_type *new_lck;
- int i;
- bool mark_lock = ((ltype & TDB_MARK_LOCK) == TDB_MARK_LOCK);
+ unsigned int i;
- ltype &= ~TDB_MARK_LOCK;
-
- /* a global lock allows us to avoid per chain locks */
- if (tdb->global_lock.count &&
- (ltype == tdb->global_lock.ltype || ltype == F_RDLCK)) {
- return 0;
+ for (i=0; i<tdb->num_lockrecs; i++) {
+ if (tdb->lockrecs[i].off == offset) {
+ return &tdb->lockrecs[i];
+ }
}
+ return NULL;
+}
- if (tdb->global_lock.count) {
- tdb->ecode = TDB_ERR_LOCK;
- return -1;
- }
+/* lock an offset in the database. */
+int tdb_nest_lock(struct tdb_context *tdb, uint32_t offset, int ltype,
+ enum tdb_lock_flags flags)
+{
+ struct tdb_lock_type *new_lck;
- if (list < -1 || list >= (int)tdb->header.hash_size) {
+ if (offset >= lock_offset(tdb->header.hash_size)) {
tdb->ecode = TDB_ERR_LOCK;
- TDB_LOG((tdb, TDB_DEBUG_ERROR,"tdb_lock: invalid list %d for ltype=%d\n",
- list, ltype));
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,"tdb_lock: invalid offset %u for ltype=%d\n",
+ offset, ltype));
return -1;
}
if (tdb->flags & TDB_NOLOCK)
return 0;
- for (i=0; i<tdb->num_lockrecs; i++) {
- if (tdb->lockrecs[i].list == list) {
- if (tdb->lockrecs[i].count == 0) {
- /*
- * Can't happen, see tdb_unlock(). It should
- * be an assert.
- */
- TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lock: "
- "lck->count == 0 for list %d", list));
- }
- /*
- * Just increment the in-memory struct, posix locks
- * don't stack.
- */
- tdb->lockrecs[i].count++;
- return 0;
- }
+ new_lck = find_nestlock(tdb, offset);
+ if (new_lck) {
+ /*
+ * Just increment the in-memory struct, posix locks
+ * don't stack.
+ */
+ new_lck->count++;
+ return 0;
}
new_lck = (struct tdb_lock_type *)realloc(
@@ -175,27 +289,89 @@ static int _tdb_lock(struct tdb_context *tdb, int list, int ltype, int op)
/* Since fcntl locks don't nest, we do a lock for the first one,
and simply bump the count for future ones */
- if (!mark_lock &&
- tdb->methods->tdb_brlock(tdb,FREELIST_TOP+4*list, ltype, op,
- 0, 1)) {
+ if (tdb_brlock(tdb, ltype, offset, 1, flags)) {
return -1;
}
- tdb->num_locks++;
-
- tdb->lockrecs[tdb->num_lockrecs].list = list;
+ tdb->lockrecs[tdb->num_lockrecs].off = offset;
tdb->lockrecs[tdb->num_lockrecs].count = 1;
tdb->lockrecs[tdb->num_lockrecs].ltype = ltype;
- tdb->num_lockrecs += 1;
+ tdb->num_lockrecs++;
return 0;
}
+static int tdb_lock_and_recover(struct tdb_context *tdb)
+{
+ int ret;
+
+ /* We need to match locking order in transaction commit. */
+ if (tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0, TDB_LOCK_WAIT)) {
+ return -1;
+ }
+
+ if (tdb_brlock(tdb, F_WRLCK, OPEN_LOCK, 1, TDB_LOCK_WAIT)) {
+ tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
+ return -1;
+ }
+
+ ret = tdb_transaction_recover(tdb);
+
+ tdb_brunlock(tdb, F_WRLCK, OPEN_LOCK, 1);
+ tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
+
+ return ret;
+}
+
+static bool have_data_locks(const struct tdb_context *tdb)
+{
+ unsigned int i;
+
+ for (i = 0; i < tdb->num_lockrecs; i++) {
+ if (tdb->lockrecs[i].off >= lock_offset(-1))
+ return true;
+ }
+ return false;
+}
+
+static int tdb_lock_list(struct tdb_context *tdb, int list, int ltype,
+ enum tdb_lock_flags waitflag)
+{
+ int ret;
+ bool check = false;
+
+ /* a allrecord lock allows us to avoid per chain locks */
+ if (tdb->allrecord_lock.count &&
+ (ltype == tdb->allrecord_lock.ltype || ltype == F_RDLCK)) {
+ return 0;
+ }
+
+ if (tdb->allrecord_lock.count) {
+ tdb->ecode = TDB_ERR_LOCK;
+ ret = -1;
+ } else {
+ /* Only check when we grab first data lock. */
+ check = !have_data_locks(tdb);
+ ret = tdb_nest_lock(tdb, lock_offset(list), ltype, waitflag);
+
+ if (ret == 0 && check && tdb_needs_recovery(tdb)) {
+ tdb_nest_unlock(tdb, lock_offset(list), ltype, false);
+
+ if (tdb_lock_and_recover(tdb) == -1) {
+ return -1;
+ }
+ return tdb_lock_list(tdb, list, ltype, waitflag);
+ }
+ }
+ return ret;
+}
+
/* lock a list in the database. list -1 is the alloc list */
int tdb_lock(struct tdb_context *tdb, int list, int ltype)
{
int ret;
- ret = _tdb_lock(tdb, list, ltype, F_SETLKW);
+
+ ret = tdb_lock_list(tdb, list, ltype, TDB_LOCK_WAIT);
if (ret) {
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lock failed on list %d "
"ltype=%d (%s)\n", list, ltype, strerror(errno)));
@@ -206,49 +382,26 @@ int tdb_lock(struct tdb_context *tdb, int list, int ltype)
/* lock a list in the database. list -1 is the alloc list. non-blocking lock */
int tdb_lock_nonblock(struct tdb_context *tdb, int list, int ltype)
{
- return _tdb_lock(tdb, list, ltype, F_SETLK);
+ return tdb_lock_list(tdb, list, ltype, TDB_LOCK_NOWAIT);
}
-/* unlock the database: returns void because it's too late for errors. */
- /* changed to return int it may be interesting to know there
- has been an error --simo */
-int tdb_unlock(struct tdb_context *tdb, int list, int ltype)
+int tdb_nest_unlock(struct tdb_context *tdb, uint32_t offset, int ltype,
+ bool mark_lock)
{
int ret = -1;
- int i;
- struct tdb_lock_type *lck = NULL;
- bool mark_lock = ((ltype & TDB_MARK_LOCK) == TDB_MARK_LOCK);
-
- ltype &= ~TDB_MARK_LOCK;
-
- /* a global lock allows us to avoid per chain locks */
- if (tdb->global_lock.count &&
- (ltype == tdb->global_lock.ltype || ltype == F_RDLCK)) {
- return 0;
- }
-
- if (tdb->global_lock.count) {
- tdb->ecode = TDB_ERR_LOCK;
- return -1;
- }
+ struct tdb_lock_type *lck;
if (tdb->flags & TDB_NOLOCK)
return 0;
/* Sanity checks */
- if (list < -1 || list >= (int)tdb->header.hash_size) {
- TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: list %d invalid (%d)\n", list, tdb->header.hash_size));
+ if (offset >= lock_offset(tdb->header.hash_size)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: offset %u invalid (%d)\n", offset, tdb->header.hash_size));
return ret;
}
- for (i=0; i<tdb->num_lockrecs; i++) {
- if (tdb->lockrecs[i].list == list) {
- lck = &tdb->lockrecs[i];
- break;
- }
- }
-
+ lck = find_nestlock(tdb, offset);
if ((lck == NULL) || (lck->count == 0)) {
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: count is 0\n"));
return -1;
@@ -269,20 +422,14 @@ int tdb_unlock(struct tdb_context *tdb, int list, int ltype)
if (mark_lock) {
ret = 0;
} else {
- ret = tdb->methods->tdb_brlock(tdb, FREELIST_TOP+4*list, F_UNLCK,
- F_SETLKW, 0, 1);
+ ret = tdb_brunlock(tdb, ltype, offset, 1);
}
- tdb->num_locks--;
/*
* Shrink the array by overwriting the element just unlocked with the
* last array element.
*/
-
- if (tdb->num_lockrecs > 1) {
- *lck = tdb->lockrecs[tdb->num_lockrecs-1];
- }
- tdb->num_lockrecs -= 1;
+ *lck = tdb->lockrecs[--tdb->num_lockrecs];
/*
* We don't bother with realloc when the array shrinks, but if we have
@@ -298,93 +445,161 @@ int tdb_unlock(struct tdb_context *tdb, int list, int ltype)
return ret;
}
-/*
- get the transaction lock
- */
-int tdb_transaction_lock(struct tdb_context *tdb, int ltype)
+int tdb_unlock(struct tdb_context *tdb, int list, int ltype)
{
- if (tdb->global_lock.count) {
- return 0;
- }
- if (tdb->transaction_lock_count > 0) {
- tdb->transaction_lock_count++;
+ /* a global lock allows us to avoid per chain locks */
+ if (tdb->allrecord_lock.count &&
+ (ltype == tdb->allrecord_lock.ltype || ltype == F_RDLCK)) {
return 0;
}
- if (tdb->methods->tdb_brlock(tdb, TRANSACTION_LOCK, ltype,
- F_SETLKW, 0, 1) == -1) {
- TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_lock: failed to get transaction lock\n"));
+ if (tdb->allrecord_lock.count) {
tdb->ecode = TDB_ERR_LOCK;
return -1;
}
- tdb->transaction_lock_count++;
- return 0;
+
+ return tdb_nest_unlock(tdb, lock_offset(list), ltype, false);
}
/*
- release the transaction lock
+ get the transaction lock
*/
-int tdb_transaction_unlock(struct tdb_context *tdb)
+int tdb_transaction_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags lockflags)
{
- int ret;
- if (tdb->global_lock.count) {
- return 0;
- }
- if (tdb->transaction_lock_count > 1) {
- tdb->transaction_lock_count--;
- return 0;
- }
- ret = tdb->methods->tdb_brlock(tdb, TRANSACTION_LOCK, F_UNLCK, F_SETLKW, 0, 1);
- if (ret == 0) {
- tdb->transaction_lock_count = 0;
- }
- return ret;
+ return tdb_nest_lock(tdb, TRANSACTION_LOCK, ltype, lockflags);
}
-
-
-
-/* lock/unlock entire database */
-static int _tdb_lockall(struct tdb_context *tdb, int ltype, int op)
+/*
+ release the transaction lock
+ */
+int tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
{
- bool mark_lock = ((ltype & TDB_MARK_LOCK) == TDB_MARK_LOCK);
-
- ltype &= ~TDB_MARK_LOCK;
+ return tdb_nest_unlock(tdb, TRANSACTION_LOCK, ltype, false);
+}
+/* Returns 0 if all done, -1 if error, 1 if ok. */
+static int tdb_allrecord_check(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags, bool upgradable)
+{
/* There are no locks on read-only dbs */
if (tdb->read_only || tdb->traverse_read) {
tdb->ecode = TDB_ERR_LOCK;
return -1;
}
- if (tdb->global_lock.count && tdb->global_lock.ltype == ltype) {
- tdb->global_lock.count++;
+ if (tdb->allrecord_lock.count && tdb->allrecord_lock.ltype == ltype) {
+ tdb->allrecord_lock.count++;
return 0;
}
- if (tdb->global_lock.count) {
+ if (tdb->allrecord_lock.count) {
/* a global lock of a different type exists */
tdb->ecode = TDB_ERR_LOCK;
return -1;
}
-
- if (tdb->num_locks != 0) {
+
+ if (tdb_have_extra_locks(tdb)) {
/* can't combine global and chain locks */
tdb->ecode = TDB_ERR_LOCK;
return -1;
}
- if (!mark_lock &&
- tdb->methods->tdb_brlock(tdb, FREELIST_TOP, ltype, op,
- 0, 4*tdb->header.hash_size)) {
- if (op == F_SETLKW) {
- TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lockall failed (%s)\n", strerror(errno)));
- }
+ if (upgradable && ltype != F_RDLCK) {
+ /* tdb error: you can't upgrade a write lock! */
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+ return 1;
+}
+
+/* We only need to lock individual bytes, but Linux merges consecutive locks
+ * so we lock in contiguous ranges. */
+static int tdb_chainlock_gradual(struct tdb_context *tdb,
+ int ltype, enum tdb_lock_flags flags,
+ size_t off, size_t len)
+{
+ int ret;
+ enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
+
+ if (len <= 4) {
+ /* Single record. Just do blocking lock. */
+ return tdb_brlock(tdb, ltype, off, len, flags);
+ }
+
+ /* First we try non-blocking. */
+ ret = tdb_brlock(tdb, ltype, off, len, nb_flags);
+ if (ret == 0) {
+ return 0;
+ }
+
+ /* Try locking first half, then second. */
+ ret = tdb_chainlock_gradual(tdb, ltype, flags, off, len / 2);
+ if (ret == -1)
+ return -1;
+
+ ret = tdb_chainlock_gradual(tdb, ltype, flags,
+ off + len / 2, len - len / 2);
+ if (ret == -1) {
+ tdb_brunlock(tdb, ltype, off, len / 2);
+ return -1;
+ }
+ return 0;
+}
+
+/* lock/unlock entire database. It can only be upgradable if you have some
+ * other way of guaranteeing exclusivity (ie. transaction write lock).
+ * We do the locking gradually to avoid being starved by smaller locks. */
+int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags, bool upgradable)
+{
+ switch (tdb_allrecord_check(tdb, ltype, flags, upgradable)) {
+ case -1:
return -1;
+ case 0:
+ return 0;
}
- tdb->global_lock.count = 1;
- tdb->global_lock.ltype = ltype;
+ /* We cover two kinds of locks:
+ * 1) Normal chain locks. Taken for almost all operations.
+ * 3) Individual records locks. Taken after normal or free
+ * chain locks.
+ *
+ * It is (1) which cause the starvation problem, so we're only
+ * gradual for that. */
+ if (tdb_chainlock_gradual(tdb, ltype, flags, FREELIST_TOP,
+ tdb->header.hash_size * 4) == -1) {
+ return -1;
+ }
+
+ /* Grab individual record locks. */
+ if (tdb_brlock(tdb, ltype, lock_offset(tdb->header.hash_size), 0,
+ flags) == -1) {
+ tdb_brunlock(tdb, ltype, FREELIST_TOP,
+ tdb->header.hash_size * 4);
+ return -1;
+ }
+
+ tdb->allrecord_lock.count = 1;
+ /* If it's upgradable, it's actually exclusive so we can treat
+ * it as a write lock. */
+ tdb->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
+ tdb->allrecord_lock.off = upgradable;
+
+ if (tdb_needs_recovery(tdb)) {
+ bool mark = flags & TDB_LOCK_MARK_ONLY;
+ tdb_allrecord_unlock(tdb, ltype, mark);
+ if (mark) {
+ tdb->ecode = TDB_ERR_LOCK;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_lockall_mark cannot do recovery\n"));
+ return -1;
+ }
+ if (tdb_lock_and_recover(tdb) == -1) {
+ return -1;
+ }
+ return tdb_allrecord_lock(tdb, ltype, flags, upgradable);
+ }
return 0;
}
@@ -392,102 +607,103 @@ static int _tdb_lockall(struct tdb_context *tdb, int ltype, int op)
/* unlock entire db */
-static int _tdb_unlockall(struct tdb_context *tdb, int ltype)
+int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype, bool mark_lock)
{
- bool mark_lock = ((ltype & TDB_MARK_LOCK) == TDB_MARK_LOCK);
-
- ltype &= ~TDB_MARK_LOCK;
-
/* There are no locks on read-only dbs */
if (tdb->read_only || tdb->traverse_read) {
tdb->ecode = TDB_ERR_LOCK;
return -1;
}
- if (tdb->global_lock.ltype != ltype || tdb->global_lock.count == 0) {
+ if (tdb->allrecord_lock.count == 0) {
tdb->ecode = TDB_ERR_LOCK;
return -1;
}
- if (tdb->global_lock.count > 1) {
- tdb->global_lock.count--;
+ /* Upgradable locks are marked as write locks. */
+ if (tdb->allrecord_lock.ltype != ltype
+ && (!tdb->allrecord_lock.off || ltype != F_RDLCK)) {
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb->allrecord_lock.count > 1) {
+ tdb->allrecord_lock.count--;
return 0;
}
- if (!mark_lock &&
- tdb->methods->tdb_brlock(tdb, FREELIST_TOP, F_UNLCK, F_SETLKW,
- 0, 4*tdb->header.hash_size)) {
+ if (!mark_lock && tdb_brunlock(tdb, ltype, FREELIST_TOP, 0)) {
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlockall failed (%s)\n", strerror(errno)));
return -1;
}
- tdb->global_lock.count = 0;
- tdb->global_lock.ltype = 0;
+ tdb->allrecord_lock.count = 0;
+ tdb->allrecord_lock.ltype = 0;
return 0;
}
/* lock entire database with write lock */
-int tdb_lockall(struct tdb_context *tdb)
+_PUBLIC_ int tdb_lockall(struct tdb_context *tdb)
{
tdb_trace(tdb, "tdb_lockall");
- return _tdb_lockall(tdb, F_WRLCK, F_SETLKW);
+ return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
}
/* lock entire database with write lock - mark only */
-int tdb_lockall_mark(struct tdb_context *tdb)
+_PUBLIC_ int tdb_lockall_mark(struct tdb_context *tdb)
{
tdb_trace(tdb, "tdb_lockall_mark");
- return _tdb_lockall(tdb, F_WRLCK | TDB_MARK_LOCK, F_SETLKW);
+ return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_MARK_ONLY, false);
}
/* unlock entire database with write lock - unmark only */
-int tdb_lockall_unmark(struct tdb_context *tdb)
+_PUBLIC_ int tdb_lockall_unmark(struct tdb_context *tdb)
{
tdb_trace(tdb, "tdb_lockall_unmark");
- return _tdb_unlockall(tdb, F_WRLCK | TDB_MARK_LOCK);
+ return tdb_allrecord_unlock(tdb, F_WRLCK, true);
}
/* lock entire database with write lock - nonblocking varient */
-int tdb_lockall_nonblock(struct tdb_context *tdb)
+_PUBLIC_ int tdb_lockall_nonblock(struct tdb_context *tdb)
{
- int ret = _tdb_lockall(tdb, F_WRLCK, F_SETLK);
+ int ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_NOWAIT, false);
tdb_trace_ret(tdb, "tdb_lockall_nonblock", ret);
return ret;
}
/* unlock entire database with write lock */
-int tdb_unlockall(struct tdb_context *tdb)
+_PUBLIC_ int tdb_unlockall(struct tdb_context *tdb)
{
tdb_trace(tdb, "tdb_unlockall");
- return _tdb_unlockall(tdb, F_WRLCK);
+ return tdb_allrecord_unlock(tdb, F_WRLCK, false);
}
/* lock entire database with read lock */
-int tdb_lockall_read(struct tdb_context *tdb)
+_PUBLIC_ int tdb_lockall_read(struct tdb_context *tdb)
{
tdb_trace(tdb, "tdb_lockall_read");
- return _tdb_lockall(tdb, F_RDLCK, F_SETLKW);
+ return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
}
/* lock entire database with read lock - nonblock varient */
-int tdb_lockall_read_nonblock(struct tdb_context *tdb)
+_PUBLIC_ int tdb_lockall_read_nonblock(struct tdb_context *tdb)
{
- int ret = _tdb_lockall(tdb, F_RDLCK, F_SETLK);
+ int ret = tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_NOWAIT, false);
tdb_trace_ret(tdb, "tdb_lockall_read_nonblock", ret);
return ret;
}
/* unlock entire database with read lock */
-int tdb_unlockall_read(struct tdb_context *tdb)
+_PUBLIC_ int tdb_unlockall_read(struct tdb_context *tdb)
{
tdb_trace(tdb, "tdb_unlockall_read");
- return _tdb_unlockall(tdb, F_RDLCK);
+ return tdb_allrecord_unlock(tdb, F_RDLCK, false);
}
/* lock/unlock one hash chain. This is meant to be used to reduce
contention - it cannot guarantee how many records will be locked */
-int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
+_PUBLIC_ int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
{
int ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
tdb_trace_1rec(tdb, "tdb_chainlock", key);
@@ -497,7 +713,7 @@ int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
/* lock/unlock one hash chain, non-blocking. This is meant to be used
to reduce contention - it cannot guarantee how many records will be
locked */
-int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key)
+_PUBLIC_ int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key)
{
int ret = tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
tdb_trace_1rec_ret(tdb, "tdb_chainlock_nonblock", key, ret);
@@ -505,27 +721,29 @@ int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key)
}
/* mark a chain as locked without actually locking it. Warning! use with great caution! */
-int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key)
+_PUBLIC_ int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key)
{
- int ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK | TDB_MARK_LOCK);
+ int ret = tdb_nest_lock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))),
+ F_WRLCK, TDB_LOCK_MARK_ONLY);
tdb_trace_1rec(tdb, "tdb_chainlock_mark", key);
return ret;
}
/* unmark a chain as locked without actually locking it. Warning! use with great caution! */
-int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key)
+_PUBLIC_ int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key)
{
tdb_trace_1rec(tdb, "tdb_chainlock_unmark", key);
- return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK | TDB_MARK_LOCK);
+ return tdb_nest_unlock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))),
+ F_WRLCK, true);
}
-int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
+_PUBLIC_ int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
{
tdb_trace_1rec(tdb, "tdb_chainunlock", key);
return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
}
-int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key)
+_PUBLIC_ int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key)
{
int ret;
ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
@@ -533,21 +751,19 @@ int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key)
return ret;
}
-int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key)
+_PUBLIC_ int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key)
{
tdb_trace_1rec(tdb, "tdb_chainunlock_read", key);
return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
}
-
-
/* record lock stops delete underneath */
int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off)
{
- if (tdb->global_lock.count) {
+ if (tdb->allrecord_lock.count) {
return 0;
}
- return off ? tdb->methods->tdb_brlock(tdb, off, F_RDLCK, F_SETLKW, 0, 1) : 0;
+ return off ? tdb_brlock(tdb, F_RDLCK, off, 1, TDB_LOCK_WAIT) : 0;
}
/*
@@ -561,16 +777,21 @@ int tdb_write_lock_record(struct tdb_context *tdb, tdb_off_t off)
for (i = &tdb->travlocks; i; i = i->next)
if (i->off == off)
return -1;
- return tdb->methods->tdb_brlock(tdb, off, F_WRLCK, F_SETLK, 1, 1);
+ if (tdb->allrecord_lock.count) {
+ if (tdb->allrecord_lock.ltype == F_WRLCK) {
+ return 0;
+ }
+ return -1;
+ }
+ return tdb_brlock(tdb, F_WRLCK, off, 1, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
}
-/*
- Note this is meant to be F_SETLK, *not* F_SETLKW, as it's not
- an error to fail to get the lock here.
-*/
int tdb_write_unlock_record(struct tdb_context *tdb, tdb_off_t off)
{
- return tdb->methods->tdb_brlock(tdb, off, F_UNLCK, F_SETLK, 0, 1);
+ if (tdb->allrecord_lock.count) {
+ return 0;
+ }
+ return tdb_brunlock(tdb, F_WRLCK, off, 1);
}
/* fcntl locks don't stack: avoid unlocking someone else's */
@@ -579,7 +800,7 @@ int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off)
struct tdb_traverse_lock *i;
uint32_t count = 0;
- if (tdb->global_lock.count) {
+ if (tdb->allrecord_lock.count) {
return 0;
}
@@ -588,5 +809,53 @@ int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off)
for (i = &tdb->travlocks; i; i = i->next)
if (i->off == off)
count++;
- return (count == 1 ? tdb->methods->tdb_brlock(tdb, off, F_UNLCK, F_SETLKW, 0, 1) : 0);
+ return (count == 1 ? tdb_brunlock(tdb, F_RDLCK, off, 1) : 0);
+}
+
+bool tdb_have_extra_locks(struct tdb_context *tdb)
+{
+ unsigned int extra = tdb->num_lockrecs;
+
+ /* A transaction holds the lock for all records. */
+ if (!tdb->transaction && tdb->allrecord_lock.count) {
+ return true;
+ }
+
+ /* We always hold the active lock if CLEAR_IF_FIRST. */
+ if (find_nestlock(tdb, ACTIVE_LOCK)) {
+ extra--;
+ }
+
+ /* In a transaction, we expect to hold the transaction lock */
+ if (tdb->transaction && find_nestlock(tdb, TRANSACTION_LOCK)) {
+ extra--;
+ }
+
+ return extra;
+}
+
+/* The transaction code uses this to remove all locks. */
+void tdb_release_transaction_locks(struct tdb_context *tdb)
+{
+ unsigned int i, active = 0;
+
+ if (tdb->allrecord_lock.count != 0) {
+ tdb_brunlock(tdb, tdb->allrecord_lock.ltype, FREELIST_TOP, 0);
+ tdb->allrecord_lock.count = 0;
+ }
+
+ for (i=0;i<tdb->num_lockrecs;i++) {
+ struct tdb_lock_type *lck = &tdb->lockrecs[i];
+
+ /* Don't release the active lock! Copy it to first entry. */
+ if (lck->off == ACTIVE_LOCK) {
+ tdb->lockrecs[active++] = *lck;
+ } else {
+ tdb_brunlock(tdb, lck->ltype, lck->off, 1);
+ }
+ }
+ tdb->num_lockrecs = active;
+ if (tdb->num_lockrecs == 0) {
+ SAFE_FREE(tdb->lockrecs);
+ }
}
diff --git a/lib/tdb/common/open.c b/lib/tdb/common/open.c
index 4d4f95a3da..ec45689ffc 100644
--- a/lib/tdb/common/open.c
+++ b/lib/tdb/common/open.c
@@ -6,11 +6,11 @@
Copyright (C) Andrew Tridgell 1999-2005
Copyright (C) Paul `Rusty' Russell 2000
Copyright (C) Jeremy Allison 2000-2003
-
+
** NOTE! The following LGPL license applies to the tdb
** library. This does NOT imply that all of Samba is released
** under the LGPL
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -30,20 +30,25 @@
/* all contexts, to ensure no double-opens (fcntl locks don't nest!) */
static struct tdb_context *tdbs = NULL;
-
-/* This is based on the hash algorithm from gdbm */
-static unsigned int default_tdb_hash(TDB_DATA *key)
+/* We use two hashes to double-check they're using the right hash function. */
+void tdb_header_hash(struct tdb_context *tdb,
+ uint32_t *magic1_hash, uint32_t *magic2_hash)
{
- uint32_t value; /* Used to compute the hash value. */
- uint32_t i; /* Used to cycle through random values. */
+ TDB_DATA hash_key;
+ uint32_t tdb_magic = TDB_MAGIC;
- /* Set the initial value from the key size. */
- for (value = 0x238F13AF * key->dsize, i=0; i < key->dsize; i++)
- value = (value + (key->dptr[i] << (i*5 % 24)));
+ hash_key.dptr = discard_const_p(unsigned char, TDB_MAGIC_FOOD);
+ hash_key.dsize = sizeof(TDB_MAGIC_FOOD);
+ *magic1_hash = tdb->hash_fn(&hash_key);
- return (1103515243 * value + 12345);
-}
+ hash_key.dptr = (unsigned char *)CONVERT(tdb_magic);
+ hash_key.dsize = sizeof(tdb_magic);
+ *magic2_hash = tdb->hash_fn(&hash_key);
+ /* Make sure at least one hash is non-zero! */
+ if (*magic1_hash == 0 && *magic2_hash == 0)
+ *magic1_hash = 1;
+}
/* initialise a new database with a specified hash size */
static int tdb_new_database(struct tdb_context *tdb, int hash_size)
@@ -51,7 +56,6 @@ static int tdb_new_database(struct tdb_context *tdb, int hash_size)
struct tdb_header *newdb;
size_t size;
int ret = -1;
- ssize_t written;
/* We make it up in memory, then write it out if not internal */
size = sizeof(struct tdb_header) + (hash_size+1)*sizeof(tdb_off_t);
@@ -63,6 +67,14 @@ static int tdb_new_database(struct tdb_context *tdb, int hash_size)
/* Fill in the header */
newdb->version = TDB_VERSION;
newdb->hash_size = hash_size;
+
+ tdb_header_hash(tdb, &newdb->magic1_hash, &newdb->magic2_hash);
+
+ /* Make sure older tdbs (which don't check the magic hash fields)
+ * will refuse to open this TDB. */
+ if (tdb->flags & TDB_INCOMPATIBLE_HASH)
+ newdb->rwlocks = TDB_HASH_RWLOCK_MAGIC;
+
if (tdb->flags & TDB_INTERNAL) {
tdb->map_size = size;
tdb->map_ptr = (char *)newdb;
@@ -83,22 +95,8 @@ static int tdb_new_database(struct tdb_context *tdb, int hash_size)
/* Don't endian-convert the magic food! */
memcpy(newdb->magic_food, TDB_MAGIC_FOOD, strlen(TDB_MAGIC_FOOD)+1);
/* we still have "ret == -1" here */
- written = write(tdb->fd, newdb, size);
- if (written == size) {
- ret = 0;
- } else if (written != -1) {
- /* call write once again, this usually should return -1 and
- * set errno appropriately */
- size -= written;
- written = write(tdb->fd, newdb+written, size);
- if (written == size) {
+ if (tdb_write_all(tdb->fd, newdb, size))
ret = 0;
- } else if (written >= 0) {
- /* a second incomplete write - we give up.
- * guessing the errno... */
- errno = ENOSPC;
- }
- }
fail:
SAFE_FREE(newdb);
@@ -111,7 +109,7 @@ static int tdb_already_open(dev_t device,
ino_t ino)
{
struct tdb_context *i;
-
+
for (i = tdbs; i; i = i->next) {
if (i->device == device && i->inode == ino) {
return 1;
@@ -131,7 +129,7 @@ static int tdb_already_open(dev_t device,
try to call tdb_error or tdb_errname, just do strerror(errno).
@param name may be NULL for internal databases. */
-struct tdb_context *tdb_open(const char *name, int hash_size, int tdb_flags,
+_PUBLIC_ struct tdb_context *tdb_open(const char *name, int hash_size, int tdb_flags,
int open_flags, mode_t mode)
{
return tdb_open_ex(name, hash_size, tdb_flags, open_flags, mode, NULL, NULL);
@@ -143,8 +141,28 @@ static void null_log_fn(struct tdb_context *tdb, enum tdb_debug_level level, con
{
}
+static bool check_header_hash(struct tdb_context *tdb,
+ bool default_hash, uint32_t *m1, uint32_t *m2)
+{
+ tdb_header_hash(tdb, m1, m2);
+ if (tdb->header.magic1_hash == *m1 &&
+ tdb->header.magic2_hash == *m2) {
+ return true;
+ }
+
+ /* If they explicitly set a hash, always respect it. */
+ if (!default_hash)
+ return false;
+
+ /* Otherwise, try the other inbuilt hash. */
+ if (tdb->hash_fn == tdb_old_hash)
+ tdb->hash_fn = tdb_jenkins_hash;
+ else
+ tdb->hash_fn = tdb_old_hash;
+ return check_header_hash(tdb, false, m1, m2);
+}
-struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
+_PUBLIC_ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
int open_flags, mode_t mode,
const struct tdb_logging_context *log_ctx,
tdb_hash_func hash_fn)
@@ -155,6 +173,8 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
unsigned char *vp;
uint32_t vertest;
unsigned v;
+ const char *hash_alg;
+ uint32_t magic1, magic2;
if (!(tdb = (struct tdb_context *)calloc(1, sizeof *tdb))) {
/* Can't log this */
@@ -176,7 +196,45 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
tdb->log.log_fn = null_log_fn;
tdb->log.log_private = NULL;
}
- tdb->hash_fn = hash_fn ? hash_fn : default_tdb_hash;
+
+ if (name == NULL && (tdb_flags & TDB_INTERNAL)) {
+ name = "__TDB_INTERNAL__";
+ }
+
+ if (name == NULL) {
+ tdb->name = discard_const_p(char, "__NULL__");
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: called with name == NULL\n"));
+ tdb->name = NULL;
+ errno = EINVAL;
+ goto fail;
+ }
+
+ /* now make a copy of the name, as the caller memory might went away */
+ if (!(tdb->name = (char *)strdup(name))) {
+ /*
+ * set the name as the given string, so that tdb_name() will
+ * work in case of an error.
+ */
+ tdb->name = discard_const_p(char, name);
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: can't strdup(%s)\n",
+ name));
+ tdb->name = NULL;
+ errno = ENOMEM;
+ goto fail;
+ }
+
+ if (hash_fn) {
+ tdb->hash_fn = hash_fn;
+ hash_alg = "the user defined";
+ } else {
+ /* This controls what we use when creating a tdb. */
+ if (tdb->flags & TDB_INCOMPATIBLE_HASH) {
+ tdb->hash_fn = tdb_jenkins_hash;
+ } else {
+ tdb->hash_fn = tdb_old_hash;
+ }
+ hash_alg = "either default";
+ }
/* cache the page size */
tdb->page_size = getpagesize();
@@ -192,7 +250,7 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
errno = EINVAL;
goto fail;
}
-
+
if (hash_size == 0)
hash_size = DEFAULT_HASH_SIZE;
if ((open_flags & O_ACCMODE) == O_RDONLY) {
@@ -211,6 +269,10 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
goto fail;
}
+ if (getenv("TDB_NO_FSYNC")) {
+ tdb->flags |= TDB_NOSYNC;
+ }
+
/*
* TDB_ALLOW_NESTING is the default behavior.
* Note: this may change in future versions!
@@ -241,8 +303,8 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
/* ensure there is only one process initialising at once */
- if (tdb->methods->tdb_brlock(tdb, GLOBAL_LOCK, F_WRLCK, F_SETLKW, 0, 1) == -1) {
- TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to get global lock on %s: %s\n",
+ if (tdb_nest_lock(tdb, OPEN_LOCK, F_WRLCK, TDB_LOCK_WAIT) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to get open lock on %s: %s\n",
name, strerror(errno)));
goto fail; /* errno set by tdb_brlock */
}
@@ -250,7 +312,7 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
/* we need to zero database if we are the only one with it open */
if ((tdb_flags & TDB_CLEAR_IF_FIRST) &&
(!tdb->read_only) &&
- (locked = (tdb->methods->tdb_brlock(tdb, ACTIVE_LOCK, F_WRLCK, F_SETLK, 0, 1) == 0))) {
+ (locked = (tdb_nest_lock(tdb, ACTIVE_LOCK, F_WRLCK, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE) == 0))) {
open_flags |= O_CREAT;
if (ftruncate(tdb->fd, 0) == -1) {
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
@@ -289,11 +351,31 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
if (fstat(tdb->fd, &st) == -1)
goto fail;
- if (tdb->header.rwlocks != 0) {
+ if (tdb->header.rwlocks != 0 &&
+ tdb->header.rwlocks != TDB_HASH_RWLOCK_MAGIC) {
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: spinlocks no longer supported\n"));
goto fail;
}
+ if ((tdb->header.magic1_hash == 0) && (tdb->header.magic2_hash == 0)) {
+ /* older TDB without magic hash references */
+ tdb->hash_fn = tdb_old_hash;
+ } else if (!check_header_hash(tdb, !hash_fn, &magic1, &magic2)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "%s was not created with %s hash function we are using\n"
+ "magic1_hash[0x%08X %s 0x%08X] "
+ "magic2_hash[0x%08X %s 0x%08X]\n",
+ name, hash_alg,
+ tdb->header.magic1_hash,
+ (tdb->header.magic1_hash == magic1) ? "==" : "!=",
+ magic1,
+ tdb->header.magic2_hash,
+ (tdb->header.magic2_hash == magic2) ? "==" : "!=",
+ magic2));
+ errno = EINVAL;
+ goto fail;
+ }
+
/* Is it already in the open list? If so, fail. */
if (tdb_already_open(st.st_dev, st.st_ino)) {
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
@@ -303,19 +385,14 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
goto fail;
}
- if (!(tdb->name = (char *)strdup(name))) {
- errno = ENOMEM;
- goto fail;
- }
-
tdb->map_size = st.st_size;
tdb->device = st.st_dev;
tdb->inode = st.st_ino;
tdb_mmap(tdb);
if (locked) {
- if (tdb->methods->tdb_brlock(tdb, ACTIVE_LOCK, F_UNLCK, F_SETLK, 0, 1) == -1) {
+ if (tdb_nest_unlock(tdb, ACTIVE_LOCK, F_WRLCK, false) == -1) {
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
- "failed to take ACTIVE_LOCK on %s: %s\n",
+ "failed to release ACTIVE_LOCK on %s: %s\n",
name, strerror(errno)));
goto fail;
}
@@ -328,8 +405,9 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
if (tdb_flags & TDB_CLEAR_IF_FIRST) {
/* leave this lock in place to indicate it's in use */
- if (tdb->methods->tdb_brlock(tdb, ACTIVE_LOCK, F_RDLCK, F_SETLKW, 0, 1) == -1)
+ if (tdb_nest_lock(tdb, ACTIVE_LOCK, F_RDLCK, TDB_LOCK_WAIT) == -1) {
goto fail;
+ }
}
/* if needed, run recovery */
@@ -356,9 +434,10 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
internal:
/* Internal (memory-only) databases skip all the code above to
* do with disk files, and resume here by releasing their
- * global lock and hooking into the active list. */
- if (tdb->methods->tdb_brlock(tdb, GLOBAL_LOCK, F_UNLCK, F_SETLKW, 0, 1) == -1)
+ * open lock and hooking into the active list. */
+ if (tdb_nest_unlock(tdb, OPEN_LOCK, F_WRLCK, false) == -1) {
goto fail;
+ }
tdb->next = tdbs;
tdbs = tdb;
return tdb;
@@ -378,10 +457,11 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
else
tdb_munmap(tdb);
}
- SAFE_FREE(tdb->name);
if (tdb->fd != -1)
if (close(tdb->fd) != 0)
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to close tdb->fd on error!\n"));
+ SAFE_FREE(tdb->lockrecs);
+ SAFE_FREE(tdb->name);
SAFE_FREE(tdb);
errno = save_errno;
return NULL;
@@ -392,7 +472,7 @@ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
* Set the maximum number of dead records per hash chain
*/
-void tdb_set_max_dead(struct tdb_context *tdb, int max_dead)
+_PUBLIC_ void tdb_set_max_dead(struct tdb_context *tdb, int max_dead)
{
tdb->max_dead_records = max_dead;
}
@@ -402,15 +482,15 @@ void tdb_set_max_dead(struct tdb_context *tdb, int max_dead)
*
* @returns -1 for error; 0 for success.
**/
-int tdb_close(struct tdb_context *tdb)
+_PUBLIC_ int tdb_close(struct tdb_context *tdb)
{
struct tdb_context **i;
int ret = 0;
- tdb_trace(tdb, "tdb_close");
if (tdb->transaction) {
- _tdb_transaction_cancel(tdb);
+ tdb_transaction_cancel(tdb);
}
+ tdb_trace(tdb, "tdb_close");
if (tdb->map_ptr) {
if (tdb->flags & TDB_INTERNAL)
@@ -443,13 +523,13 @@ int tdb_close(struct tdb_context *tdb)
}
/* register a loging function */
-void tdb_set_logging_function(struct tdb_context *tdb,
- const struct tdb_logging_context *log_ctx)
+_PUBLIC_ void tdb_set_logging_function(struct tdb_context *tdb,
+ const struct tdb_logging_context *log_ctx)
{
tdb->log = *log_ctx;
}
-void *tdb_get_logging_private(struct tdb_context *tdb)
+_PUBLIC_ void *tdb_get_logging_private(struct tdb_context *tdb)
{
return tdb->log.log_private;
}
@@ -465,7 +545,7 @@ static int tdb_reopen_internal(struct tdb_context *tdb, bool active_lock)
return 0; /* Nothing to do. */
}
- if (tdb->num_locks != 0 || tdb->global_lock.count) {
+ if (tdb_have_extra_locks(tdb)) {
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_reopen: reopen not allowed with locks held\n"));
goto fail;
}
@@ -500,8 +580,11 @@ static int tdb_reopen_internal(struct tdb_context *tdb, bool active_lock)
tdb_mmap(tdb);
#endif /* fake pread or pwrite */
- if (active_lock &&
- (tdb->methods->tdb_brlock(tdb, ACTIVE_LOCK, F_RDLCK, F_SETLKW, 0, 1) == -1)) {
+ /* We may still think we hold the active lock. */
+ tdb->num_lockrecs = 0;
+ SAFE_FREE(tdb->lockrecs);
+
+ if (active_lock && tdb_nest_lock(tdb, ACTIVE_LOCK, F_RDLCK, TDB_LOCK_WAIT) == -1) {
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: failed to obtain active lock\n"));
goto fail;
}
@@ -515,13 +598,13 @@ fail:
/* reopen a tdb - this can be used after a fork to ensure that we have an independent
seek pointer from our parent and to re-establish locks */
-int tdb_reopen(struct tdb_context *tdb)
+_PUBLIC_ int tdb_reopen(struct tdb_context *tdb)
{
return tdb_reopen_internal(tdb, tdb->flags & TDB_CLEAR_IF_FIRST);
}
/* reopen all tdb's */
-int tdb_reopen_all(int parent_longlived)
+_PUBLIC_ int tdb_reopen_all(int parent_longlived)
{
struct tdb_context *tdb;
diff --git a/lib/tdb/common/summary.c b/lib/tdb/common/summary.c
new file mode 100644
index 0000000000..171a1a2055
--- /dev/null
+++ b/lib/tdb/common/summary.c
@@ -0,0 +1,201 @@
+ /*
+ Trivial Database: human-readable summary code
+ Copyright (C) Rusty Russell 2010
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include "tdb_private.h"
+
+#define SUMMARY_FORMAT \
+ "Size of file/data: %u/%zu\n" \
+ "Number of records: %zu\n" \
+ "Smallest/average/largest keys: %zu/%zu/%zu\n" \
+ "Smallest/average/largest data: %zu/%zu/%zu\n" \
+ "Smallest/average/largest padding: %zu/%zu/%zu\n" \
+ "Number of dead records: %zu\n" \
+ "Smallest/average/largest dead records: %zu/%zu/%zu\n" \
+ "Number of free records: %zu\n" \
+ "Smallest/average/largest free records: %zu/%zu/%zu\n" \
+ "Number of hash chains: %zu\n" \
+ "Smallest/average/largest hash chains: %zu/%zu/%zu\n" \
+ "Number of uncoalesced records: %zu\n" \
+ "Smallest/average/largest uncoalesced runs: %zu/%zu/%zu\n" \
+ "Percentage keys/data/padding/free/dead/rechdrs&tailers/hashes: %.0f/%.0f/%.0f/%.0f/%.0f/%.0f/%.0f\n"
+
+/* We don't use tally module, to keep upstream happy. */
+struct tally {
+ size_t min, max, total;
+ size_t num;
+};
+
+static void tally_init(struct tally *tally)
+{
+ tally->total = 0;
+ tally->num = 0;
+ tally->min = tally->max = 0;
+}
+
+static void tally_add(struct tally *tally, size_t len)
+{
+ if (tally->num == 0)
+ tally->max = tally->min = len;
+ else if (len > tally->max)
+ tally->max = len;
+ else if (len < tally->min)
+ tally->min = len;
+ tally->num++;
+ tally->total += len;
+}
+
+static size_t tally_mean(const struct tally *tally)
+{
+ if (!tally->num)
+ return 0;
+ return tally->total / tally->num;
+}
+
+static size_t get_hash_length(struct tdb_context *tdb, unsigned int i)
+{
+ tdb_off_t rec_ptr;
+ size_t count = 0;
+
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(i), &rec_ptr) == -1)
+ return 0;
+
+ /* keep looking until we find the right record */
+ while (rec_ptr) {
+ struct tdb_record r;
+ ++count;
+ if (tdb_rec_read(tdb, rec_ptr, &r) == -1)
+ return 0;
+ rec_ptr = r.next;
+ }
+ return count;
+}
+
+_PUBLIC_ char *tdb_summary(struct tdb_context *tdb)
+{
+ tdb_off_t off, rec_off;
+ struct tally freet, keys, data, dead, extra, hash, uncoal;
+ struct tdb_record rec;
+ char *ret = NULL;
+ bool locked;
+ size_t len, unc = 0;
+ struct tdb_record recovery;
+
+ /* Read-only databases use no locking at all: it's best-effort.
+ * We may have a write lock already, so skip that case too. */
+ if (tdb->read_only || tdb->allrecord_lock.count != 0) {
+ locked = false;
+ } else {
+ if (tdb_lockall_read(tdb) == -1)
+ return NULL;
+ locked = true;
+ }
+
+ if (tdb_recovery_area(tdb, tdb->methods, &rec_off, &recovery) != 0) {
+ goto unlock;
+ }
+
+ tally_init(&freet);
+ tally_init(&keys);
+ tally_init(&data);
+ tally_init(&dead);
+ tally_init(&extra);
+ tally_init(&hash);
+ tally_init(&uncoal);
+
+ for (off = TDB_DATA_START(tdb->header.hash_size);
+ off < tdb->map_size - 1;
+ off += sizeof(rec) + rec.rec_len) {
+ if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec),
+ DOCONV()) == -1)
+ goto unlock;
+ switch (rec.magic) {
+ case TDB_MAGIC:
+ tally_add(&keys, rec.key_len);
+ tally_add(&data, rec.data_len);
+ tally_add(&extra, rec.rec_len - (rec.key_len
+ + rec.data_len));
+ if (unc > 1)
+ tally_add(&uncoal, unc - 1);
+ unc = 0;
+ break;
+ case TDB_FREE_MAGIC:
+ tally_add(&freet, rec.rec_len);
+ unc++;
+ break;
+ /* If we crash after ftruncate, we can get zeroes or fill. */
+ case TDB_RECOVERY_INVALID_MAGIC:
+ case 0x42424242:
+ unc++;
+ /* If it's a valid recovery, we can trust rec_len. */
+ if (off != rec_off) {
+ rec.rec_len = tdb_dead_space(tdb, off)
+ - sizeof(rec);
+ }
+ /* Fall through */
+ case TDB_DEAD_MAGIC:
+ tally_add(&dead, rec.rec_len);
+ break;
+ default:
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Unexpected record magic 0x%x at offset %d\n",
+ rec.magic, off));
+ goto unlock;
+ }
+ }
+ if (unc > 1)
+ tally_add(&uncoal, unc - 1);
+
+ for (off = 0; off < tdb->header.hash_size; off++)
+ tally_add(&hash, get_hash_length(tdb, off));
+
+ /* 20 is max length of a %zu. */
+ len = strlen(SUMMARY_FORMAT) + 35*20 + 1;
+ ret = (char *)malloc(len);
+ if (!ret)
+ goto unlock;
+
+ snprintf(ret, len, SUMMARY_FORMAT,
+ tdb->map_size, keys.total+data.total,
+ keys.num,
+ keys.min, tally_mean(&keys), keys.max,
+ data.min, tally_mean(&data), data.max,
+ extra.min, tally_mean(&extra), extra.max,
+ dead.num,
+ dead.min, tally_mean(&dead), dead.max,
+ freet.num,
+ freet.min, tally_mean(&freet), freet.max,
+ hash.num,
+ hash.min, tally_mean(&hash), hash.max,
+ uncoal.total,
+ uncoal.min, tally_mean(&uncoal), uncoal.max,
+ keys.total * 100.0 / tdb->map_size,
+ data.total * 100.0 / tdb->map_size,
+ extra.total * 100.0 / tdb->map_size,
+ freet.total * 100.0 / tdb->map_size,
+ dead.total * 100.0 / tdb->map_size,
+ (keys.num + freet.num + dead.num)
+ * (sizeof(struct tdb_record) + sizeof(uint32_t))
+ * 100.0 / tdb->map_size,
+ tdb->header.hash_size * sizeof(tdb_off_t)
+ * 100.0 / tdb->map_size);
+
+unlock:
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
+ return ret;
+}
diff --git a/lib/tdb/common/tdb.c b/lib/tdb/common/tdb.c
index d2688def04..66be555a06 100644
--- a/lib/tdb/common/tdb.c
+++ b/lib/tdb/common/tdb.c
@@ -6,11 +6,11 @@
Copyright (C) Andrew Tridgell 1999-2005
Copyright (C) Paul `Rusty' Russell 2000
Copyright (C) Jeremy Allison 2000-2003
-
+
** NOTE! The following LGPL license applies to the tdb
** library. This does NOT imply that all of Samba is released
** under the LGPL
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -27,16 +27,16 @@
#include "tdb_private.h"
-TDB_DATA tdb_null;
+_PUBLIC_ TDB_DATA tdb_null;
/*
non-blocking increment of the tdb sequence number if the tdb has been opened using
the TDB_SEQNUM flag
*/
-void tdb_increment_seqnum_nonblock(struct tdb_context *tdb)
+_PUBLIC_ void tdb_increment_seqnum_nonblock(struct tdb_context *tdb)
{
tdb_off_t seqnum=0;
-
+
if (!(tdb->flags & TDB_SEQNUM)) {
return;
}
@@ -59,13 +59,14 @@ static void tdb_increment_seqnum(struct tdb_context *tdb)
return;
}
- if (tdb_brlock(tdb, TDB_SEQNUM_OFS, F_WRLCK, F_SETLKW, 1, 1) != 0) {
+ if (tdb_nest_lock(tdb, TDB_SEQNUM_OFS, F_WRLCK,
+ TDB_LOCK_WAIT|TDB_LOCK_PROBE) != 0) {
return;
}
tdb_increment_seqnum_nonblock(tdb);
- tdb_brlock(tdb, TDB_SEQNUM_OFS, F_UNLCK, F_SETLKW, 1, 1);
+ tdb_nest_unlock(tdb, TDB_SEQNUM_OFS, F_WRLCK, false);
}
static int tdb_key_compare(TDB_DATA key, TDB_DATA data, void *private_data)
@@ -79,7 +80,7 @@ static tdb_off_t tdb_find(struct tdb_context *tdb, TDB_DATA key, uint32_t hash,
struct tdb_record *r)
{
tdb_off_t rec_ptr;
-
+
/* read in the hash top */
if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1)
return 0;
@@ -153,7 +154,6 @@ static int tdb_update_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash,
free(data.dptr);
}
}
-
/* must be long enough key, data and tailer */
if (rec.rec_len < key.dsize + dbuf.dsize + sizeof(tdb_off_t)) {
@@ -170,7 +170,7 @@ static int tdb_update_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash,
rec.data_len = dbuf.dsize;
return tdb_rec_write(tdb, rec_ptr, &rec);
}
-
+
return 0;
}
@@ -199,7 +199,7 @@ static TDB_DATA _tdb_fetch(struct tdb_context *tdb, TDB_DATA key)
return ret;
}
-TDB_DATA tdb_fetch(struct tdb_context *tdb, TDB_DATA key)
+_PUBLIC_ TDB_DATA tdb_fetch(struct tdb_context *tdb, TDB_DATA key)
{
TDB_DATA ret = _tdb_fetch(tdb, key);
@@ -212,7 +212,7 @@ TDB_DATA tdb_fetch(struct tdb_context *tdb, TDB_DATA key)
* function. The parsing function is executed under the chain read lock, so it
* should be fast and should not block on other syscalls.
*
- * DONT CALL OTHER TDB CALLS FROM THE PARSER, THIS MIGHT LEAD TO SEGFAULTS.
+ * DON'T CALL OTHER TDB CALLS FROM THE PARSER, THIS MIGHT LEAD TO SEGFAULTS.
*
* For mmapped tdb's that do not have a transaction open it points the parsing
* function directly at the mmap area, it avoids the malloc/memcpy in this
@@ -221,9 +221,11 @@ TDB_DATA tdb_fetch(struct tdb_context *tdb, TDB_DATA key)
*
* This is interesting for all readers of potentially large data structures in
* the tdb records, ldb indexes being one example.
+ *
+ * Return -1 if the record was not found.
*/
-int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key,
+_PUBLIC_ int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key,
int (*parser)(TDB_DATA key, TDB_DATA data,
void *private_data),
void *private_data)
@@ -237,9 +239,10 @@ int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key,
hash = tdb->hash_fn(&key);
if (!(rec_ptr = tdb_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) {
+ /* record not found */
tdb_trace_1rec_ret(tdb, "tdb_parse_record", key, -1);
tdb->ecode = TDB_ERR_NOEXIST;
- return 0;
+ return -1;
}
tdb_trace_1rec_ret(tdb, "tdb_parse_record", key, 0);
@@ -260,14 +263,14 @@ int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key,
static int tdb_exists_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash)
{
struct tdb_record rec;
-
+
if (tdb_find_lock_hash(tdb, key, hash, F_RDLCK, &rec) == 0)
return 0;
tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK);
return 1;
}
-int tdb_exists(struct tdb_context *tdb, TDB_DATA key)
+_PUBLIC_ int tdb_exists(struct tdb_context *tdb, TDB_DATA key)
{
uint32_t hash = tdb->hash_fn(&key);
int ret;
@@ -318,7 +321,7 @@ static int tdb_count_dead(struct tdb_context *tdb, uint32_t hash)
int res = 0;
tdb_off_t rec_ptr;
struct tdb_record rec;
-
+
/* read in the hash top */
if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1)
return 0;
@@ -347,7 +350,7 @@ static int tdb_purge_dead(struct tdb_context *tdb, uint32_t hash)
if (tdb_lock(tdb, -1, F_WRLCK) == -1) {
return -1;
}
-
+
/* read in the hash top */
if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1)
goto fail;
@@ -426,7 +429,7 @@ static int tdb_delete_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash)
return ret;
}
-int tdb_delete(struct tdb_context *tdb, TDB_DATA key)
+_PUBLIC_ int tdb_delete(struct tdb_context *tdb, TDB_DATA key)
{
uint32_t hash = tdb->hash_fn(&key);
int ret;
@@ -443,7 +446,7 @@ static tdb_off_t tdb_find_dead(struct tdb_context *tdb, uint32_t hash,
struct tdb_record *r, tdb_len_t length)
{
tdb_off_t rec_ptr;
-
+
/* read in the hash top */
if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1)
return 0;
@@ -596,7 +599,7 @@ static int _tdb_store(struct tdb_context *tdb, TDB_DATA key,
return 0 on success, -1 on failure
*/
-int tdb_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag)
+_PUBLIC_ int tdb_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag)
{
uint32_t hash;
int ret;
@@ -619,7 +622,7 @@ int tdb_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag)
}
/* Append to an entry. Create if not exist. */
-int tdb_append(struct tdb_context *tdb, TDB_DATA key, TDB_DATA new_dbuf)
+_PUBLIC_ int tdb_append(struct tdb_context *tdb, TDB_DATA key, TDB_DATA new_dbuf)
{
uint32_t hash;
TDB_DATA dbuf;
@@ -658,7 +661,7 @@ int tdb_append(struct tdb_context *tdb, TDB_DATA key, TDB_DATA new_dbuf)
ret = _tdb_store(tdb, key, dbuf, 0, hash);
tdb_trace_2rec_retrec(tdb, "tdb_append", key, new_dbuf, dbuf);
-
+
failed:
tdb_unlock(tdb, BUCKET(hash), F_WRLCK);
SAFE_FREE(dbuf.dptr);
@@ -670,7 +673,7 @@ failed:
return the name of the current tdb file
useful for external logging functions
*/
-const char *tdb_name(struct tdb_context *tdb)
+_PUBLIC_ const char *tdb_name(struct tdb_context *tdb)
{
return tdb->name;
}
@@ -680,7 +683,7 @@ const char *tdb_name(struct tdb_context *tdb)
useful for external routines that want to check the device/inode
of the fd
*/
-int tdb_fd(struct tdb_context *tdb)
+_PUBLIC_ int tdb_fd(struct tdb_context *tdb)
{
return tdb->fd;
}
@@ -689,7 +692,7 @@ int tdb_fd(struct tdb_context *tdb)
return the current logging function
useful for external tdb routines that wish to log tdb errors
*/
-tdb_log_func tdb_log_fn(struct tdb_context *tdb)
+_PUBLIC_ tdb_log_func tdb_log_fn(struct tdb_context *tdb)
{
return tdb->log.log_fn;
}
@@ -705,7 +708,7 @@ tdb_log_func tdb_log_fn(struct tdb_context *tdb)
The aim of this sequence number is to allow for a very lightweight
test of a possible tdb change.
*/
-int tdb_get_seqnum(struct tdb_context *tdb)
+_PUBLIC_ int tdb_get_seqnum(struct tdb_context *tdb)
{
tdb_off_t seqnum=0;
@@ -713,22 +716,22 @@ int tdb_get_seqnum(struct tdb_context *tdb)
return seqnum;
}
-int tdb_hash_size(struct tdb_context *tdb)
+_PUBLIC_ int tdb_hash_size(struct tdb_context *tdb)
{
return tdb->header.hash_size;
}
-size_t tdb_map_size(struct tdb_context *tdb)
+_PUBLIC_ size_t tdb_map_size(struct tdb_context *tdb)
{
return tdb->map_size;
}
-int tdb_get_flags(struct tdb_context *tdb)
+_PUBLIC_ int tdb_get_flags(struct tdb_context *tdb)
{
return tdb->flags;
}
-void tdb_add_flags(struct tdb_context *tdb, unsigned flags)
+_PUBLIC_ void tdb_add_flags(struct tdb_context *tdb, unsigned flags)
{
if ((flags & TDB_ALLOW_NESTING) &&
(flags & TDB_DISALLOW_NESTING)) {
@@ -748,7 +751,7 @@ void tdb_add_flags(struct tdb_context *tdb, unsigned flags)
tdb->flags |= flags;
}
-void tdb_remove_flags(struct tdb_context *tdb, unsigned flags)
+_PUBLIC_ void tdb_remove_flags(struct tdb_context *tdb, unsigned flags)
{
if ((flags & TDB_ALLOW_NESTING) &&
(flags & TDB_DISALLOW_NESTING)) {
@@ -772,7 +775,7 @@ void tdb_remove_flags(struct tdb_context *tdb, unsigned flags)
/*
enable sequence number handling on an open tdb
*/
-void tdb_enable_seqnum(struct tdb_context *tdb)
+_PUBLIC_ void tdb_enable_seqnum(struct tdb_context *tdb)
{
tdb->flags |= TDB_SEQNUM;
}
@@ -804,12 +807,12 @@ static int tdb_free_region(struct tdb_context *tdb, tdb_off_t offset, ssize_t le
/*
wipe the entire database, deleting all records. This can be done
- very fast by using a global lock. The entire data portion of the
+ very fast by using a allrecord lock. The entire data portion of the
file becomes a single entry in the freelist.
This code carefully steps around the recovery area, leaving it alone
*/
-int tdb_wipe_all(struct tdb_context *tdb)
+_PUBLIC_ int tdb_wipe_all(struct tdb_context *tdb)
{
int i;
tdb_off_t offset = 0;
@@ -916,7 +919,7 @@ static int repack_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
/*
repack a tdb
*/
-int tdb_repack(struct tdb_context *tdb)
+_PUBLIC_ int tdb_repack(struct tdb_context *tdb)
{
struct tdb_context *tmp_db;
struct traverse_state state;
@@ -986,10 +989,24 @@ int tdb_repack(struct tdb_context *tdb)
return 0;
}
+/* Even on files, we can get partial writes due to signals. */
+bool tdb_write_all(int fd, const void *buf, size_t count)
+{
+ while (count) {
+ ssize_t ret;
+ ret = write(fd, buf, count);
+ if (ret < 0)
+ return false;
+ buf = (const char *)buf + ret;
+ count -= ret;
+ }
+ return true;
+}
+
#ifdef TDB_TRACE
static void tdb_trace_write(struct tdb_context *tdb, const char *str)
{
- if (write(tdb->tracefd, str, strlen(str)) != strlen(str)) {
+ if (!tdb_write_alltdb->tracefd, str, strlen(str)) {
close(tdb->tracefd);
tdb->tracefd = -1;
}
diff --git a/lib/tdb/common/tdb_private.h b/lib/tdb/common/tdb_private.h
index be9be72b15..140d4ecec5 100644
--- a/lib/tdb/common/tdb_private.h
+++ b/lib/tdb/common/tdb_private.h
@@ -4,11 +4,11 @@
trivial database library - private includes
Copyright (C) Andrew Tridgell 2005
-
+
** NOTE! The following LGPL license applies to the tdb
** library. This does NOT imply that all of Samba is released
** under the LGPL
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -49,6 +49,8 @@ typedef uint32_t tdb_off_t;
#define TDB_FREE_MAGIC (~TDB_MAGIC)
#define TDB_DEAD_MAGIC (0xFEE1DEAD)
#define TDB_RECOVERY_MAGIC (0xf53bc0e7U)
+#define TDB_RECOVERY_INVALID_MAGIC (0x0)
+#define TDB_HASH_RWLOCK_MAGIC (0xbad1a51U)
#define TDB_ALIGNMENT 4
#define DEFAULT_HASH_SIZE 131
#define FREELIST_TOP (sizeof(struct tdb_header))
@@ -101,7 +103,7 @@ void tdb_trace_2rec_retrec(struct tdb_context *tdb, const char *op,
#endif /* !TDB_TRACE */
/* lock offsets */
-#define GLOBAL_LOCK 0
+#define OPEN_LOCK 0
#define ACTIVE_LOCK 4
#define TRANSACTION_LOCK 8
@@ -146,11 +148,13 @@ struct tdb_header {
tdb_off_t rwlocks; /* obsolete - kept to detect old formats */
tdb_off_t recovery_start; /* offset of transaction recovery region */
tdb_off_t sequence_number; /* used when TDB_SEQNUM is set */
- tdb_off_t reserved[29];
+ uint32_t magic1_hash; /* hash of TDB_MAGIC_FOOD. */
+ uint32_t magic2_hash; /* hash of TDB_MAGIC. */
+ tdb_off_t reserved[27];
};
struct tdb_lock_type {
- int list;
+ uint32_t off;
uint32_t count;
uint32_t ltype;
};
@@ -162,6 +166,15 @@ struct tdb_traverse_lock {
int lock_rw;
};
+enum tdb_lock_flags {
+ /* WAIT == F_SETLKW, NOWAIT == F_SETLK */
+ TDB_LOCK_NOWAIT = 0,
+ TDB_LOCK_WAIT = 1,
+ /* If set, don't log an error on failure. */
+ TDB_LOCK_PROBE = 2,
+ /* If set, don't actually lock at all. */
+ TDB_LOCK_MARK_ONLY = 4,
+};
struct tdb_methods {
int (*tdb_read)(struct tdb_context *, tdb_off_t , void *, tdb_len_t , int );
@@ -169,7 +182,6 @@ struct tdb_methods {
void (*next_hash_chain)(struct tdb_context *, uint32_t *);
int (*tdb_oob)(struct tdb_context *, tdb_off_t , int );
int (*tdb_expand_file)(struct tdb_context *, tdb_off_t , tdb_off_t );
- int (*tdb_brlock)(struct tdb_context *, tdb_off_t , int, int, int, size_t);
};
struct tdb_context {
@@ -180,7 +192,7 @@ struct tdb_context {
int read_only; /* opened read-only */
int traverse_read; /* read-only traversal */
int traverse_write; /* read-write traversal */
- struct tdb_lock_type global_lock;
+ struct tdb_lock_type allrecord_lock; /* .offset == upgradable */
int num_lockrecs;
struct tdb_lock_type *lockrecs; /* only real locks, all with count>0 */
enum TDB_ERROR ecode; /* error code for last tdb error */
@@ -193,12 +205,10 @@ struct tdb_context {
struct tdb_logging_context log;
unsigned int (*hash_fn)(TDB_DATA *key);
int open_flags; /* flags used in the open - needed by reopen */
- unsigned int num_locks; /* number of chain locks held */
const struct tdb_methods *methods;
struct tdb_transaction *transaction;
int page_size;
int max_dead_records;
- int transaction_lock_count;
#ifdef TDB_TRACE
int tracefd;
#endif
@@ -213,11 +223,29 @@ int tdb_munmap(struct tdb_context *tdb);
void tdb_mmap(struct tdb_context *tdb);
int tdb_lock(struct tdb_context *tdb, int list, int ltype);
int tdb_lock_nonblock(struct tdb_context *tdb, int list, int ltype);
+int tdb_nest_lock(struct tdb_context *tdb, uint32_t offset, int ltype,
+ enum tdb_lock_flags flags);
+int tdb_nest_unlock(struct tdb_context *tdb, uint32_t offset, int ltype,
+ bool mark_lock);
int tdb_unlock(struct tdb_context *tdb, int list, int ltype);
-int tdb_brlock(struct tdb_context *tdb, tdb_off_t offset, int rw_type, int lck_type, int probe, size_t len);
-int tdb_transaction_lock(struct tdb_context *tdb, int ltype);
-int tdb_transaction_unlock(struct tdb_context *tdb);
-int tdb_brlock_upgrade(struct tdb_context *tdb, tdb_off_t offset, size_t len);
+int tdb_brlock(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len,
+ enum tdb_lock_flags flags);
+int tdb_brunlock(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len);
+bool tdb_have_extra_locks(struct tdb_context *tdb);
+void tdb_release_transaction_locks(struct tdb_context *tdb);
+int tdb_transaction_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags lockflags);
+int tdb_transaction_unlock(struct tdb_context *tdb, int ltype);
+int tdb_recovery_area(struct tdb_context *tdb,
+ const struct tdb_methods *methods,
+ tdb_off_t *recovery_offset,
+ struct tdb_record *rec);
+int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags, bool upgradable);
+int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype, bool mark_lock);
+int tdb_allrecord_upgrade(struct tdb_context *tdb);
int tdb_write_lock_record(struct tdb_context *tdb, tdb_off_t off);
int tdb_write_unlock_record(struct tdb_context *tdb, tdb_off_t off);
int tdb_ofs_read(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d);
@@ -229,7 +257,7 @@ int tdb_ofs_read(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d);
int tdb_ofs_write(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d);
int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off);
int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off);
-int _tdb_transaction_cancel(struct tdb_context *tdb);
+bool tdb_needs_recovery(struct tdb_context *tdb);
int tdb_rec_read(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec);
int tdb_rec_write(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec);
int tdb_do_delete(struct tdb_context *tdb, tdb_off_t rec_ptr, struct tdb_record *rec);
@@ -245,5 +273,9 @@ void tdb_io_init(struct tdb_context *tdb);
int tdb_expand(struct tdb_context *tdb, tdb_off_t size);
int tdb_rec_free_read(struct tdb_context *tdb, tdb_off_t off,
struct tdb_record *rec);
-
-
+bool tdb_write_all(int fd, const void *buf, size_t count);
+int tdb_transaction_recover(struct tdb_context *tdb);
+void tdb_header_hash(struct tdb_context *tdb,
+ uint32_t *magic1_hash, uint32_t *magic2_hash);
+unsigned int tdb_old_hash(TDB_DATA *key);
+size_t tdb_dead_space(struct tdb_context *tdb, tdb_off_t off);
diff --git a/lib/tdb/common/transaction.c b/lib/tdb/common/transaction.c
index b8988ea830..e4573cb0a9 100644
--- a/lib/tdb/common/transaction.c
+++ b/lib/tdb/common/transaction.c
@@ -8,7 +8,7 @@
** NOTE! The following LGPL license applies to the tdb
** library. This does NOT imply that all of Samba is released
** under the LGPL
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -59,7 +59,7 @@
- allow for nested calls to tdb_transaction_start(), re-using the
existing transaction record. If the inner transaction is cancelled
then a subsequent commit will fail
-
+
- keep a mirrored copy of the tdb hash chain heads to allow for the
fast hash heads scan on traverse, updating the mirrored copy in
the transaction version of tdb_write
@@ -76,7 +76,7 @@
to reduce this to 3 or even 2 with some more work.
- check for a valid recovery record on open of the tdb, while the
- global lock is held. Automatically recover from the transaction
+ open lock is held. Automatically recover from the transaction
recovery area if needed, then continue with the open as
usual. This allows for smooth crash recovery with no administrator
intervention.
@@ -135,14 +135,11 @@ struct tdb_transaction {
bool prepared;
tdb_off_t magic_offset;
- /* set when the GLOBAL_LOCK has been taken */
- bool global_lock_taken;
-
/* old file size before transaction */
tdb_len_t old_map_size;
- /* we should re-pack on commit */
- bool need_repack;
+ /* did we expand in this transaction */
+ bool expanded;
};
@@ -188,7 +185,7 @@ static int transaction_read(struct tdb_context *tdb, tdb_off_t off, void *buf,
goto fail;
}
}
-
+
/* now copy it out of this block */
memcpy(buf, tdb->transaction->blocks[blk] + (off % tdb->transaction->block_size), len);
if (cv) {
@@ -295,7 +292,7 @@ static int transaction_write(struct tdb_context *tdb, tdb_off_t off,
}
}
}
-
+
/* overwrite part of an existing block */
if (buf == NULL) {
memset(tdb->transaction->blocks[blk] + off, 0, len);
@@ -406,17 +403,8 @@ static int transaction_expand_file(struct tdb_context *tdb, tdb_off_t size,
return -1;
}
- tdb->transaction->need_repack = true;
-
- return 0;
-}
+ tdb->transaction->expanded = true;
-/*
- brlock during a transaction - ignore them
-*/
-static int transaction_brlock(struct tdb_context *tdb, tdb_off_t offset,
- int rw_type, int lck_type, int probe, size_t len)
-{
return 0;
}
@@ -426,7 +414,6 @@ static const struct tdb_methods transaction_methods = {
transaction_next_hash_chain,
transaction_oob,
transaction_expand_file,
- transaction_brlock
};
@@ -434,7 +421,8 @@ static const struct tdb_methods transaction_methods = {
start a tdb transaction. No token is returned, as only a single
transaction is allowed to be pending per tdb_context
*/
-int tdb_transaction_start(struct tdb_context *tdb)
+static int _tdb_transaction_start(struct tdb_context *tdb,
+ enum tdb_lock_flags lockflags)
{
/* some sanity checks */
if (tdb->read_only || (tdb->flags & TDB_INTERNAL) || tdb->traverse_read) {
@@ -455,7 +443,7 @@ int tdb_transaction_start(struct tdb_context *tdb)
return 0;
}
- if (tdb->num_locks != 0 || tdb->global_lock.count) {
+ if (tdb_have_extra_locks(tdb)) {
/* the caller must not have any locks when starting a
transaction as otherwise we'll be screwed by lack
of nested locks in posix */
@@ -486,18 +474,20 @@ int tdb_transaction_start(struct tdb_context *tdb)
/* get the transaction write lock. This is a blocking lock. As
discussed with Volker, there are a number of ways we could
make this async, which we will probably do in the future */
- if (tdb_transaction_lock(tdb, F_WRLCK) == -1) {
+ if (tdb_transaction_lock(tdb, F_WRLCK, lockflags) == -1) {
SAFE_FREE(tdb->transaction->blocks);
SAFE_FREE(tdb->transaction);
+ if ((lockflags & TDB_LOCK_WAIT) == 0) {
+ tdb->ecode = TDB_ERR_NOLOCK;
+ }
return -1;
}
-
+
/* get a read lock from the freelist to the end of file. This
is upgraded to a write lock during the commit */
- if (tdb_brlock(tdb, FREELIST_TOP, F_RDLCK, F_SETLKW, 0, 0) == -1) {
+ if (tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, true) == -1) {
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: failed to get hash locks\n"));
- tdb->ecode = TDB_ERR_LOCK;
- goto fail;
+ goto fail_allrecord_lock;
}
/* setup a copy of the hash table heads so the hash scan in
@@ -528,16 +518,26 @@ int tdb_transaction_start(struct tdb_context *tdb)
/* Trace at the end, so we get sequence number correct. */
tdb_trace(tdb, "tdb_transaction_start");
return 0;
-
+
fail:
- tdb_brlock(tdb, FREELIST_TOP, F_UNLCK, F_SETLKW, 0, 0);
- tdb_transaction_unlock(tdb);
+ tdb_allrecord_unlock(tdb, F_RDLCK, false);
+fail_allrecord_lock:
+ tdb_transaction_unlock(tdb, F_WRLCK);
SAFE_FREE(tdb->transaction->blocks);
SAFE_FREE(tdb->transaction->hash_heads);
SAFE_FREE(tdb->transaction);
return -1;
}
+_PUBLIC_ int tdb_transaction_start(struct tdb_context *tdb)
+{
+ return _tdb_transaction_start(tdb, TDB_LOCK_WAIT);
+}
+
+_PUBLIC_ int tdb_transaction_start_nonblock(struct tdb_context *tdb)
+{
+ return _tdb_transaction_start(tdb, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
+}
/*
sync to disk
@@ -548,7 +548,11 @@ static int transaction_sync(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t
return 0;
}
+#ifdef HAVE_FDATASYNC
+ if (fdatasync(tdb->fd) != 0) {
+#else
if (fsync(tdb->fd) != 0) {
+#endif
tdb->ecode = TDB_ERR_IO;
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction: fsync failed\n"));
return -1;
@@ -569,7 +573,7 @@ static int transaction_sync(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t
}
-int _tdb_transaction_cancel(struct tdb_context *tdb)
+static int _tdb_transaction_cancel(struct tdb_context *tdb)
{
int i, ret = 0;
@@ -596,53 +600,32 @@ int _tdb_transaction_cancel(struct tdb_context *tdb)
if (tdb->transaction->magic_offset) {
const struct tdb_methods *methods = tdb->transaction->io_methods;
- uint32_t zero = 0;
+ const uint32_t invalid = TDB_RECOVERY_INVALID_MAGIC;
/* remove the recovery marker */
- if (methods->tdb_write(tdb, tdb->transaction->magic_offset, &zero, 4) == -1 ||
+ if (methods->tdb_write(tdb, tdb->transaction->magic_offset, &invalid, 4) == -1 ||
transaction_sync(tdb, tdb->transaction->magic_offset, 4) == -1) {
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_cancel: failed to remove recovery magic\n"));
ret = -1;
}
}
- if (tdb->transaction->global_lock_taken) {
- tdb_brlock(tdb, GLOBAL_LOCK, F_UNLCK, F_SETLKW, 0, 1);
- tdb->transaction->global_lock_taken = false;
- }
-
- /* remove any global lock created during the transaction */
- if (tdb->global_lock.count != 0) {
- tdb_brlock(tdb, FREELIST_TOP, F_UNLCK, F_SETLKW, 0, 4*tdb->header.hash_size);
- tdb->global_lock.count = 0;
- }
-
- /* remove any locks created during the transaction */
- if (tdb->num_locks != 0) {
- for (i=0;i<tdb->num_lockrecs;i++) {
- tdb_brlock(tdb,FREELIST_TOP+4*tdb->lockrecs[i].list,
- F_UNLCK,F_SETLKW, 0, 1);
- }
- tdb->num_locks = 0;
- tdb->num_lockrecs = 0;
- SAFE_FREE(tdb->lockrecs);
- }
+ /* This also removes the OPEN_LOCK, if we have it. */
+ tdb_release_transaction_locks(tdb);
/* restore the normal io methods */
tdb->methods = tdb->transaction->io_methods;
- tdb_brlock(tdb, FREELIST_TOP, F_UNLCK, F_SETLKW, 0, 0);
- tdb_transaction_unlock(tdb);
SAFE_FREE(tdb->transaction->hash_heads);
SAFE_FREE(tdb->transaction);
-
+
return ret;
}
/*
cancel the current transaction
*/
-int tdb_transaction_cancel(struct tdb_context *tdb)
+_PUBLIC_ int tdb_transaction_cancel(struct tdb_context *tdb)
{
tdb_trace(tdb, "tdb_transaction_cancel");
return _tdb_transaction_cancel(tdb);
@@ -675,6 +658,34 @@ static tdb_len_t tdb_recovery_size(struct tdb_context *tdb)
return recovery_size;
}
+int tdb_recovery_area(struct tdb_context *tdb,
+ const struct tdb_methods *methods,
+ tdb_off_t *recovery_offset,
+ struct tdb_record *rec)
+{
+ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, recovery_offset) == -1) {
+ return -1;
+ }
+
+ if (*recovery_offset == 0) {
+ rec->rec_len = 0;
+ return 0;
+ }
+
+ if (methods->tdb_read(tdb, *recovery_offset, rec, sizeof(*rec),
+ DOCONV()) == -1) {
+ return -1;
+ }
+
+ /* ignore invalid recovery regions: can happen in crash */
+ if (rec->magic != TDB_RECOVERY_MAGIC &&
+ rec->magic != TDB_RECOVERY_INVALID_MAGIC) {
+ *recovery_offset = 0;
+ rec->rec_len = 0;
+ }
+ return 0;
+}
+
/*
allocate the recovery area, or use an existing recovery area if it is
large enough
@@ -688,19 +699,11 @@ static int tdb_recovery_allocate(struct tdb_context *tdb,
const struct tdb_methods *methods = tdb->transaction->io_methods;
tdb_off_t recovery_head;
- if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) {
+ if (tdb_recovery_area(tdb, methods, &recovery_head, &rec) == -1) {
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to read recovery head\n"));
return -1;
}
- rec.rec_len = 0;
-
- if (recovery_head != 0 &&
- methods->tdb_read(tdb, recovery_head, &rec, sizeof(rec), DOCONV()) == -1) {
- TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to read recovery record\n"));
- return -1;
- }
-
*recovery_size = tdb_recovery_size(tdb);
if (recovery_head != 0 && *recovery_size <= rec.rec_len) {
@@ -793,11 +796,11 @@ static int transaction_setup_recovery(struct tdb_context *tdb,
rec = (struct tdb_record *)data;
memset(rec, 0, sizeof(*rec));
- rec->magic = 0;
+ rec->magic = TDB_RECOVERY_INVALID_MAGIC;
rec->data_len = recovery_size;
rec->rec_len = recovery_max_size;
rec->key_len = old_map_size;
- CONVERT(rec);
+ CONVERT(*rec);
/* build the recovery data into a single blob to allow us to do a single
large write, which should be more efficient */
@@ -815,7 +818,7 @@ static int transaction_setup_recovery(struct tdb_context *tdb,
if (i == tdb->transaction->num_blocks-1) {
length = tdb->transaction->last_block_size;
}
-
+
if (offset >= old_map_size) {
continue;
}
@@ -844,7 +847,9 @@ static int transaction_setup_recovery(struct tdb_context *tdb,
/* and the tailer */
tailer = sizeof(*rec) + recovery_max_size;
memcpy(p, &tailer, 4);
- CONVERT(p);
+ if (DOCONV()) {
+ tdb_convert(p, 4);
+ }
/* write the recovery data to the recovery area */
if (methods->tdb_write(tdb, recovery_offset, data, sizeof(*rec) + recovery_size) == -1) {
@@ -928,10 +933,10 @@ static int _tdb_transaction_prepare_commit(struct tdb_context *tdb)
}
methods = tdb->transaction->io_methods;
-
+
/* if there are any locks pending then the caller has not
nested their locks properly, so fail the transaction */
- if (tdb->num_locks || tdb->global_lock.count) {
+ if (tdb_have_extra_locks(tdb)) {
tdb->ecode = TDB_ERR_LOCK;
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: locks pending on commit\n"));
_tdb_transaction_cancel(tdb);
@@ -939,24 +944,20 @@ static int _tdb_transaction_prepare_commit(struct tdb_context *tdb)
}
/* upgrade the main transaction lock region to a write lock */
- if (tdb_brlock_upgrade(tdb, FREELIST_TOP, 0) == -1) {
+ if (tdb_allrecord_upgrade(tdb) == -1) {
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: failed to upgrade hash locks\n"));
- tdb->ecode = TDB_ERR_LOCK;
_tdb_transaction_cancel(tdb);
return -1;
}
- /* get the global lock - this prevents new users attaching to the database
+ /* get the open lock - this prevents new users attaching to the database
during the commit */
- if (tdb_brlock(tdb, GLOBAL_LOCK, F_WRLCK, F_SETLKW, 0, 1) == -1) {
- TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: failed to get global lock\n"));
- tdb->ecode = TDB_ERR_LOCK;
+ if (tdb_nest_lock(tdb, OPEN_LOCK, F_WRLCK, TDB_LOCK_WAIT) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: failed to get open lock\n"));
_tdb_transaction_cancel(tdb);
return -1;
}
- tdb->transaction->global_lock_taken = true;
-
if (!(tdb->flags & TDB_NOSYNC)) {
/* write the recovery data to the end of the file */
if (transaction_setup_recovery(tdb, &tdb->transaction->magic_offset) == -1) {
@@ -982,7 +983,7 @@ static int _tdb_transaction_prepare_commit(struct tdb_context *tdb)
methods->tdb_oob(tdb, tdb->map_size + 1, 1);
}
- /* Keep the global lock until the actual commit */
+ /* Keep the open lock until the actual commit */
return 0;
}
@@ -990,20 +991,42 @@ static int _tdb_transaction_prepare_commit(struct tdb_context *tdb)
/*
prepare to commit the current transaction
*/
-int tdb_transaction_prepare_commit(struct tdb_context *tdb)
-{
+_PUBLIC_ int tdb_transaction_prepare_commit(struct tdb_context *tdb)
+{
tdb_trace(tdb, "tdb_transaction_prepare_commit");
return _tdb_transaction_prepare_commit(tdb);
}
+/* A repack is worthwhile if the largest is less than half total free. */
+static bool repack_worthwhile(struct tdb_context *tdb)
+{
+ tdb_off_t ptr;
+ struct tdb_record rec;
+ tdb_len_t total = 0, largest = 0;
+
+ if (tdb_ofs_read(tdb, FREELIST_TOP, &ptr) == -1) {
+ return false;
+ }
+
+ while (ptr != 0 && tdb_rec_free_read(tdb, ptr, &rec) == 0) {
+ total += rec.rec_len;
+ if (rec.rec_len > largest) {
+ largest = rec.rec_len;
+ }
+ ptr = rec.next;
+ }
+
+ return total > largest * 2;
+}
+
/*
commit the current transaction
*/
-int tdb_transaction_commit(struct tdb_context *tdb)
-{
+_PUBLIC_ int tdb_transaction_commit(struct tdb_context *tdb)
+{
const struct tdb_methods *methods;
int i;
- bool need_repack;
+ bool need_repack = false;
if (tdb->transaction == NULL) {
TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: no transaction\n"));
@@ -1056,7 +1079,7 @@ int tdb_transaction_commit(struct tdb_context *tdb)
if (methods->tdb_write(tdb, offset, tdb->transaction->blocks[i], length) == -1) {
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed during commit\n"));
-
+
/* we've overwritten part of the data and
possibly expanded the file, so we need to
run the crash recovery code */
@@ -1071,6 +1094,11 @@ int tdb_transaction_commit(struct tdb_context *tdb)
SAFE_FREE(tdb->transaction->blocks[i]);
}
+ /* Do this before we drop lock or blocks. */
+ if (tdb->transaction->expanded) {
+ need_repack = repack_worthwhile(tdb);
+ }
+
SAFE_FREE(tdb->transaction->blocks);
tdb->transaction->num_blocks = 0;
@@ -1094,8 +1122,6 @@ int tdb_transaction_commit(struct tdb_context *tdb)
utime(tdb->name, NULL);
#endif
- need_repack = tdb->transaction->need_repack;
-
/* use a transaction cancel to free memory and remove the
transaction locks */
_tdb_transaction_cancel(tdb);
@@ -1110,7 +1136,7 @@ int tdb_transaction_commit(struct tdb_context *tdb)
/*
recover from an aborted transaction. Must be called with exclusive
- database write access already established (including the global
+ database write access already established (including the open
lock to prevent new processes attaching)
*/
int tdb_transaction_recover(struct tdb_context *tdb)
@@ -1211,16 +1237,6 @@ int tdb_transaction_recover(struct tdb_context *tdb)
tdb->ecode = TDB_ERR_IO;
return -1;
}
-
- /* reduce the file size to the old size */
- tdb_munmap(tdb);
- if (ftruncate(tdb->fd, recovery_eof) != 0) {
- TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to reduce to recovery size\n"));
- tdb->ecode = TDB_ERR_IO;
- return -1;
- }
- tdb->map_size = recovery_eof;
- tdb_mmap(tdb);
if (transaction_sync(tdb, 0, recovery_eof) == -1) {
TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to sync2 recovery\n"));
@@ -1234,3 +1250,28 @@ int tdb_transaction_recover(struct tdb_context *tdb)
/* all done */
return 0;
}
+
+/* Any I/O failures we say "needs recovery". */
+bool tdb_needs_recovery(struct tdb_context *tdb)
+{
+ tdb_off_t recovery_head;
+ struct tdb_record rec;
+
+ /* find the recovery area */
+ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) {
+ return true;
+ }
+
+ if (recovery_head == 0) {
+ /* we have never allocated a recovery record */
+ return false;
+ }
+
+ /* read the recovery record */
+ if (tdb->methods->tdb_read(tdb, recovery_head, &rec,
+ sizeof(rec), DOCONV()) == -1) {
+ return true;
+ }
+
+ return (rec.magic == TDB_RECOVERY_MAGIC);
+}
diff --git a/lib/tdb/common/traverse.c b/lib/tdb/common/traverse.c
index c340dd354b..517fecb4fc 100644
--- a/lib/tdb/common/traverse.c
+++ b/lib/tdb/common/traverse.c
@@ -6,11 +6,11 @@
Copyright (C) Andrew Tridgell 1999-2005
Copyright (C) Paul `Rusty' Russell 2000
Copyright (C) Jeremy Allison 2000-2003
-
+
** NOTE! The following LGPL license applies to the tdb
** library. This does NOT imply that all of Samba is released
** under the LGPL
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -44,7 +44,7 @@ static tdb_off_t tdb_next_lock(struct tdb_context *tdb, struct tdb_traverse_lock
common for the use of tdb with ldb, where large
hashes are used. In that case we spend most of our
time in tdb_brlock(), locking empty hash chains.
-
+
To avoid this, we do an unlocked pre-check to see
if the hash chain is empty before starting to look
inside it. If it is empty then we can avoid that
@@ -52,7 +52,7 @@ static tdb_off_t tdb_next_lock(struct tdb_context *tdb, struct tdb_traverse_lock
the value we get back, as we read it without a
lock, so instead we get the lock and re-fetch the
value below.
-
+
Notice that not doing this optimisation on the
first hash chain is critical. We must guarantee
that we have done at least one fcntl lock at the
@@ -62,7 +62,7 @@ static tdb_off_t tdb_next_lock(struct tdb_context *tdb, struct tdb_traverse_lock
could possibly miss those with this trick, but we
could miss them anyway without this trick, so the
semantics don't change.
-
+
With a non-indexed ldb search this trick gains us a
factor of around 80 in speed on a linux 2.6.x
system (testing using ldbtest).
@@ -212,7 +212,7 @@ out:
/*
a write style traverse - temporarily marks the db read only
*/
-int tdb_traverse_read(struct tdb_context *tdb,
+_PUBLIC_ int tdb_traverse_read(struct tdb_context *tdb,
tdb_traverse_func fn, void *private_data)
{
struct tdb_traverse_lock tl = { NULL, 0, 0, F_RDLCK };
@@ -220,7 +220,7 @@ int tdb_traverse_read(struct tdb_context *tdb,
/* we need to get a read lock on the transaction lock here to
cope with the lock ordering semantics of solaris10 */
- if (tdb_transaction_lock(tdb, F_RDLCK)) {
+ if (tdb_transaction_lock(tdb, F_RDLCK, TDB_LOCK_WAIT)) {
return -1;
}
@@ -229,7 +229,7 @@ int tdb_traverse_read(struct tdb_context *tdb,
ret = tdb_traverse_internal(tdb, fn, private_data, &tl);
tdb->traverse_read--;
- tdb_transaction_unlock(tdb);
+ tdb_transaction_unlock(tdb, F_RDLCK);
return ret;
}
@@ -241,7 +241,7 @@ int tdb_traverse_read(struct tdb_context *tdb,
WARNING: The data buffer given to the callback fn does NOT meet the
alignment restrictions malloc gives you.
*/
-int tdb_traverse(struct tdb_context *tdb,
+_PUBLIC_ int tdb_traverse(struct tdb_context *tdb,
tdb_traverse_func fn, void *private_data)
{
struct tdb_traverse_lock tl = { NULL, 0, 0, F_WRLCK };
@@ -251,7 +251,7 @@ int tdb_traverse(struct tdb_context *tdb,
return tdb_traverse_read(tdb, fn, private_data);
}
- if (tdb_transaction_lock(tdb, F_WRLCK)) {
+ if (tdb_transaction_lock(tdb, F_WRLCK, TDB_LOCK_WAIT)) {
return -1;
}
@@ -260,14 +260,14 @@ int tdb_traverse(struct tdb_context *tdb,
ret = tdb_traverse_internal(tdb, fn, private_data, &tl);
tdb->traverse_write--;
- tdb_transaction_unlock(tdb);
+ tdb_transaction_unlock(tdb, F_WRLCK);
return ret;
}
/* find the first entry in the database and return its key */
-TDB_DATA tdb_firstkey(struct tdb_context *tdb)
+_PUBLIC_ TDB_DATA tdb_firstkey(struct tdb_context *tdb)
{
TDB_DATA key;
struct tdb_record rec;
@@ -298,7 +298,7 @@ TDB_DATA tdb_firstkey(struct tdb_context *tdb)
}
/* find the next entry in the database, returning its key */
-TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA oldkey)
+_PUBLIC_ TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA oldkey)
{
uint32_t oldhash;
TDB_DATA key = tdb_null;
diff --git a/lib/tdb/config.guess b/lib/tdb/config.guess
deleted file mode 100755
index da83314608..0000000000
--- a/lib/tdb/config.guess
+++ /dev/null
@@ -1,1561 +0,0 @@
-#! /bin/sh
-# Attempt to guess a canonical system name.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
-# Free Software Foundation, Inc.
-
-timestamp='2009-04-27'
-
-# This file is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Originally written by Per Bothner <per@bothner.com>.
-# Please send patches to <config-patches@gnu.org>. Submit a context
-# diff and a properly formatted ChangeLog entry.
-#
-# This script attempts to guess a canonical system name similar to
-# config.sub. If it succeeds, it prints the system name on stdout, and
-# exits with 0. Otherwise, it exits with 1.
-#
-# The plan is that this can be called by configure scripts if you
-# don't specify an explicit build system type.
-
-me=`echo "$0" | sed -e 's,.*/,,'`
-
-usage="\
-Usage: $0 [OPTION]
-
-Output the configuration name of the system \`$me' is run on.
-
-Operation modes:
- -h, --help print this help, then exit
- -t, --time-stamp print date of last modification, then exit
- -v, --version print version number, then exit
-
-Report bugs and patches to <config-patches@gnu.org>."
-
-version="\
-GNU config.guess ($timestamp)
-
-Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
-
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-
-help="
-Try \`$me --help' for more information."
-
-# Parse command line
-while test $# -gt 0 ; do
- case $1 in
- --time-stamp | --time* | -t )
- echo "$timestamp" ; exit ;;
- --version | -v )
- echo "$version" ; exit ;;
- --help | --h* | -h )
- echo "$usage"; exit ;;
- -- ) # Stop option processing
- shift; break ;;
- - ) # Use stdin as input.
- break ;;
- -* )
- echo "$me: invalid option $1$help" >&2
- exit 1 ;;
- * )
- break ;;
- esac
-done
-
-if test $# != 0; then
- echo "$me: too many arguments$help" >&2
- exit 1
-fi
-
-trap 'exit 1' 1 2 15
-
-# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
-# compiler to aid in system detection is discouraged as it requires
-# temporary files to be created and, as you can see below, it is a
-# headache to deal with in a portable fashion.
-
-# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
-# use `HOST_CC' if defined, but it is deprecated.
-
-# Portable tmp directory creation inspired by the Autoconf team.
-
-set_cc_for_build='
-trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
-trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
-: ${TMPDIR=/tmp} ;
- { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
- { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
- { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
- { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
-dummy=$tmp/dummy ;
-tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
-case $CC_FOR_BUILD,$HOST_CC,$CC in
- ,,) echo "int x;" > $dummy.c ;
- for c in cc gcc c89 c99 ; do
- if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
- CC_FOR_BUILD="$c"; break ;
- fi ;
- done ;
- if test x"$CC_FOR_BUILD" = x ; then
- CC_FOR_BUILD=no_compiler_found ;
- fi
- ;;
- ,,*) CC_FOR_BUILD=$CC ;;
- ,*,*) CC_FOR_BUILD=$HOST_CC ;;
-esac ; set_cc_for_build= ;'
-
-# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
-# (ghazi@noc.rutgers.edu 1994-08-24)
-if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
- PATH=$PATH:/.attbin ; export PATH
-fi
-
-UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
-UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
-UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
-UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
-
-# Note: order is significant - the case branches are not exclusive.
-
-case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
- *:NetBSD:*:*)
- # NetBSD (nbsd) targets should (where applicable) match one or
- # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
- # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
- # switched to ELF, *-*-netbsd* would select the old
- # object file format. This provides both forward
- # compatibility and a consistent mechanism for selecting the
- # object file format.
- #
- # Note: NetBSD doesn't particularly care about the vendor
- # portion of the name. We always set it to "unknown".
- sysctl="sysctl -n hw.machine_arch"
- UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
- /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
- case "${UNAME_MACHINE_ARCH}" in
- armeb) machine=armeb-unknown ;;
- arm*) machine=arm-unknown ;;
- sh3el) machine=shl-unknown ;;
- sh3eb) machine=sh-unknown ;;
- sh5el) machine=sh5le-unknown ;;
- *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
- esac
- # The Operating System including object format, if it has switched
- # to ELF recently, or will in the future.
- case "${UNAME_MACHINE_ARCH}" in
- arm*|i386|m68k|ns32k|sh3*|sparc|vax)
- eval $set_cc_for_build
- if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
- | grep __ELF__ >/dev/null
- then
- # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
- # Return netbsd for either. FIX?
- os=netbsd
- else
- os=netbsdelf
- fi
- ;;
- *)
- os=netbsd
- ;;
- esac
- # The OS release
- # Debian GNU/NetBSD machines have a different userland, and
- # thus, need a distinct triplet. However, they do not need
- # kernel version information, so it can be replaced with a
- # suitable tag, in the style of linux-gnu.
- case "${UNAME_VERSION}" in
- Debian*)
- release='-gnu'
- ;;
- *)
- release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
- ;;
- esac
- # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
- # contains redundant information, the shorter form:
- # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
- echo "${machine}-${os}${release}"
- exit ;;
- *:OpenBSD:*:*)
- UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
- echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
- exit ;;
- *:ekkoBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
- exit ;;
- *:SolidBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
- exit ;;
- macppc:MirBSD:*:*)
- echo powerpc-unknown-mirbsd${UNAME_RELEASE}
- exit ;;
- *:MirBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
- exit ;;
- alpha:OSF1:*:*)
- case $UNAME_RELEASE in
- *4.0)
- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
- ;;
- *5.*)
- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
- ;;
- esac
- # According to Compaq, /usr/sbin/psrinfo has been available on
- # OSF/1 and Tru64 systems produced since 1995. I hope that
- # covers most systems running today. This code pipes the CPU
- # types through head -n 1, so we only detect the type of CPU 0.
- ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
- case "$ALPHA_CPU_TYPE" in
- "EV4 (21064)")
- UNAME_MACHINE="alpha" ;;
- "EV4.5 (21064)")
- UNAME_MACHINE="alpha" ;;
- "LCA4 (21066/21068)")
- UNAME_MACHINE="alpha" ;;
- "EV5 (21164)")
- UNAME_MACHINE="alphaev5" ;;
- "EV5.6 (21164A)")
- UNAME_MACHINE="alphaev56" ;;
- "EV5.6 (21164PC)")
- UNAME_MACHINE="alphapca56" ;;
- "EV5.7 (21164PC)")
- UNAME_MACHINE="alphapca57" ;;
- "EV6 (21264)")
- UNAME_MACHINE="alphaev6" ;;
- "EV6.7 (21264A)")
- UNAME_MACHINE="alphaev67" ;;
- "EV6.8CB (21264C)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.8AL (21264B)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.8CX (21264D)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.9A (21264/EV69A)")
- UNAME_MACHINE="alphaev69" ;;
- "EV7 (21364)")
- UNAME_MACHINE="alphaev7" ;;
- "EV7.9 (21364A)")
- UNAME_MACHINE="alphaev79" ;;
- esac
- # A Pn.n version is a patched version.
- # A Vn.n version is a released version.
- # A Tn.n version is a released field test version.
- # A Xn.n version is an unreleased experimental baselevel.
- # 1.2 uses "1.2" for uname -r.
- echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- exit ;;
- Alpha\ *:Windows_NT*:*)
- # How do we know it's Interix rather than the generic POSIX subsystem?
- # Should we change UNAME_MACHINE based on the output of uname instead
- # of the specific Alpha model?
- echo alpha-pc-interix
- exit ;;
- 21064:Windows_NT:50:3)
- echo alpha-dec-winnt3.5
- exit ;;
- Amiga*:UNIX_System_V:4.0:*)
- echo m68k-unknown-sysv4
- exit ;;
- *:[Aa]miga[Oo][Ss]:*:*)
- echo ${UNAME_MACHINE}-unknown-amigaos
- exit ;;
- *:[Mm]orph[Oo][Ss]:*:*)
- echo ${UNAME_MACHINE}-unknown-morphos
- exit ;;
- *:OS/390:*:*)
- echo i370-ibm-openedition
- exit ;;
- *:z/VM:*:*)
- echo s390-ibm-zvmoe
- exit ;;
- *:OS400:*:*)
- echo powerpc-ibm-os400
- exit ;;
- arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
- echo arm-acorn-riscix${UNAME_RELEASE}
- exit ;;
- arm:riscos:*:*|arm:RISCOS:*:*)
- echo arm-unknown-riscos
- exit ;;
- SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
- echo hppa1.1-hitachi-hiuxmpp
- exit ;;
- Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
- # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
- if test "`(/bin/universe) 2>/dev/null`" = att ; then
- echo pyramid-pyramid-sysv3
- else
- echo pyramid-pyramid-bsd
- fi
- exit ;;
- NILE*:*:*:dcosx)
- echo pyramid-pyramid-svr4
- exit ;;
- DRS?6000:unix:4.0:6*)
- echo sparc-icl-nx6
- exit ;;
- DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
- case `/usr/bin/uname -p` in
- sparc) echo sparc-icl-nx7; exit ;;
- esac ;;
- s390x:SunOS:*:*)
- echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4H:SunOS:5.*:*)
- echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
- echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
- eval $set_cc_for_build
- SUN_ARCH="i386"
- # If there is a compiler, see if it is configured for 64-bit objects.
- # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
- # This test works for both compilers.
- if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
- if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
- (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
- grep IS_64BIT_ARCH >/dev/null
- then
- SUN_ARCH="x86_64"
- fi
- fi
- echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:6*:*)
- # According to config.sub, this is the proper way to canonicalize
- # SunOS6. Hard to guess exactly what SunOS6 will be like, but
- # it's likely to be more like Solaris than SunOS4.
- echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:*:*)
- case "`/usr/bin/arch -k`" in
- Series*|S4*)
- UNAME_RELEASE=`uname -v`
- ;;
- esac
- # Japanese Language versions have a version number like `4.1.3-JL'.
- echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
- exit ;;
- sun3*:SunOS:*:*)
- echo m68k-sun-sunos${UNAME_RELEASE}
- exit ;;
- sun*:*:4.2BSD:*)
- UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
- test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
- case "`/bin/arch`" in
- sun3)
- echo m68k-sun-sunos${UNAME_RELEASE}
- ;;
- sun4)
- echo sparc-sun-sunos${UNAME_RELEASE}
- ;;
- esac
- exit ;;
- aushp:SunOS:*:*)
- echo sparc-auspex-sunos${UNAME_RELEASE}
- exit ;;
- # The situation for MiNT is a little confusing. The machine name
- # can be virtually everything (everything which is not
- # "atarist" or "atariste" at least should have a processor
- # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
- # to the lowercase version "mint" (or "freemint"). Finally
- # the system name "TOS" denotes a system which is actually not
- # MiNT. But MiNT is downward compatible to TOS, so this should
- # be no problem.
- atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
- echo m68k-milan-mint${UNAME_RELEASE}
- exit ;;
- hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
- echo m68k-hades-mint${UNAME_RELEASE}
- exit ;;
- *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
- echo m68k-unknown-mint${UNAME_RELEASE}
- exit ;;
- m68k:machten:*:*)
- echo m68k-apple-machten${UNAME_RELEASE}
- exit ;;
- powerpc:machten:*:*)
- echo powerpc-apple-machten${UNAME_RELEASE}
- exit ;;
- RISC*:Mach:*:*)
- echo mips-dec-mach_bsd4.3
- exit ;;
- RISC*:ULTRIX:*:*)
- echo mips-dec-ultrix${UNAME_RELEASE}
- exit ;;
- VAX*:ULTRIX*:*:*)
- echo vax-dec-ultrix${UNAME_RELEASE}
- exit ;;
- 2020:CLIX:*:* | 2430:CLIX:*:*)
- echo clipper-intergraph-clix${UNAME_RELEASE}
- exit ;;
- mips:*:*:UMIPS | mips:*:*:RISCos)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
-#ifdef __cplusplus
-#include <stdio.h> /* for printf() prototype */
- int main (int argc, char *argv[]) {
-#else
- int main (argc, argv) int argc; char *argv[]; {
-#endif
- #if defined (host_mips) && defined (MIPSEB)
- #if defined (SYSTYPE_SYSV)
- printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
- #endif
- #if defined (SYSTYPE_SVR4)
- printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
- #endif
- #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
- printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
- #endif
- #endif
- exit (-1);
- }
-EOF
- $CC_FOR_BUILD -o $dummy $dummy.c &&
- dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
- SYSTEM_NAME=`$dummy $dummyarg` &&
- { echo "$SYSTEM_NAME"; exit; }
- echo mips-mips-riscos${UNAME_RELEASE}
- exit ;;
- Motorola:PowerMAX_OS:*:*)
- echo powerpc-motorola-powermax
- exit ;;
- Motorola:*:4.3:PL8-*)
- echo powerpc-harris-powermax
- exit ;;
- Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
- echo powerpc-harris-powermax
- exit ;;
- Night_Hawk:Power_UNIX:*:*)
- echo powerpc-harris-powerunix
- exit ;;
- m88k:CX/UX:7*:*)
- echo m88k-harris-cxux7
- exit ;;
- m88k:*:4*:R4*)
- echo m88k-motorola-sysv4
- exit ;;
- m88k:*:3*:R3*)
- echo m88k-motorola-sysv3
- exit ;;
- AViiON:dgux:*:*)
- # DG/UX returns AViiON for all architectures
- UNAME_PROCESSOR=`/usr/bin/uname -p`
- if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
- then
- if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
- [ ${TARGET_BINARY_INTERFACE}x = x ]
- then
- echo m88k-dg-dgux${UNAME_RELEASE}
- else
- echo m88k-dg-dguxbcs${UNAME_RELEASE}
- fi
- else
- echo i586-dg-dgux${UNAME_RELEASE}
- fi
- exit ;;
- M88*:DolphinOS:*:*) # DolphinOS (SVR3)
- echo m88k-dolphin-sysv3
- exit ;;
- M88*:*:R3*:*)
- # Delta 88k system running SVR3
- echo m88k-motorola-sysv3
- exit ;;
- XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
- echo m88k-tektronix-sysv3
- exit ;;
- Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
- echo m68k-tektronix-bsd
- exit ;;
- *:IRIX*:*:*)
- echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
- exit ;;
- ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
- echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
- exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
- i*86:AIX:*:*)
- echo i386-ibm-aix
- exit ;;
- ia64:AIX:*:*)
- if [ -x /usr/bin/oslevel ] ; then
- IBM_REV=`/usr/bin/oslevel`
- else
- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
- fi
- echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
- exit ;;
- *:AIX:2:3)
- if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <sys/systemcfg.h>
-
- main()
- {
- if (!__power_pc())
- exit(1);
- puts("powerpc-ibm-aix3.2.5");
- exit(0);
- }
-EOF
- if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
- then
- echo "$SYSTEM_NAME"
- else
- echo rs6000-ibm-aix3.2.5
- fi
- elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
- echo rs6000-ibm-aix3.2.4
- else
- echo rs6000-ibm-aix3.2
- fi
- exit ;;
- *:AIX:*:[456])
- IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
- if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
- IBM_ARCH=rs6000
- else
- IBM_ARCH=powerpc
- fi
- if [ -x /usr/bin/oslevel ] ; then
- IBM_REV=`/usr/bin/oslevel`
- else
- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
- fi
- echo ${IBM_ARCH}-ibm-aix${IBM_REV}
- exit ;;
- *:AIX:*:*)
- echo rs6000-ibm-aix
- exit ;;
- ibmrt:4.4BSD:*|romp-ibm:BSD:*)
- echo romp-ibm-bsd4.4
- exit ;;
- ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
- echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
- exit ;; # report: romp-ibm BSD 4.3
- *:BOSX:*:*)
- echo rs6000-bull-bosx
- exit ;;
- DPX/2?00:B.O.S.:*:*)
- echo m68k-bull-sysv3
- exit ;;
- 9000/[34]??:4.3bsd:1.*:*)
- echo m68k-hp-bsd
- exit ;;
- hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
- echo m68k-hp-bsd4.4
- exit ;;
- 9000/[34678]??:HP-UX:*:*)
- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
- case "${UNAME_MACHINE}" in
- 9000/31? ) HP_ARCH=m68000 ;;
- 9000/[34]?? ) HP_ARCH=m68k ;;
- 9000/[678][0-9][0-9])
- if [ -x /usr/bin/getconf ]; then
- sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
- sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
- case "${sc_cpu_version}" in
- 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
- 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
- 532) # CPU_PA_RISC2_0
- case "${sc_kernel_bits}" in
- 32) HP_ARCH="hppa2.0n" ;;
- 64) HP_ARCH="hppa2.0w" ;;
- '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
- esac ;;
- esac
- fi
- if [ "${HP_ARCH}" = "" ]; then
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
-
- #define _HPUX_SOURCE
- #include <stdlib.h>
- #include <unistd.h>
-
- int main ()
- {
- #if defined(_SC_KERNEL_BITS)
- long bits = sysconf(_SC_KERNEL_BITS);
- #endif
- long cpu = sysconf (_SC_CPU_VERSION);
-
- switch (cpu)
- {
- case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
- case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
- case CPU_PA_RISC2_0:
- #if defined(_SC_KERNEL_BITS)
- switch (bits)
- {
- case 64: puts ("hppa2.0w"); break;
- case 32: puts ("hppa2.0n"); break;
- default: puts ("hppa2.0"); break;
- } break;
- #else /* !defined(_SC_KERNEL_BITS) */
- puts ("hppa2.0"); break;
- #endif
- default: puts ("hppa1.0"); break;
- }
- exit (0);
- }
-EOF
- (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
- test -z "$HP_ARCH" && HP_ARCH=hppa
- fi ;;
- esac
- if [ ${HP_ARCH} = "hppa2.0w" ]
- then
- eval $set_cc_for_build
-
- # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
- # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
- # generating 64-bit code. GNU and HP use different nomenclature:
- #
- # $ CC_FOR_BUILD=cc ./config.guess
- # => hppa2.0w-hp-hpux11.23
- # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
- # => hppa64-hp-hpux11.23
-
- if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
- grep __LP64__ >/dev/null
- then
- HP_ARCH="hppa2.0w"
- else
- HP_ARCH="hppa64"
- fi
- fi
- echo ${HP_ARCH}-hp-hpux${HPUX_REV}
- exit ;;
- ia64:HP-UX:*:*)
- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
- echo ia64-hp-hpux${HPUX_REV}
- exit ;;
- 3050*:HI-UX:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <unistd.h>
- int
- main ()
- {
- long cpu = sysconf (_SC_CPU_VERSION);
- /* The order matters, because CPU_IS_HP_MC68K erroneously returns
- true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
- results, however. */
- if (CPU_IS_PA_RISC (cpu))
- {
- switch (cpu)
- {
- case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
- case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
- case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
- default: puts ("hppa-hitachi-hiuxwe2"); break;
- }
- }
- else if (CPU_IS_HP_MC68K (cpu))
- puts ("m68k-hitachi-hiuxwe2");
- else puts ("unknown-hitachi-hiuxwe2");
- exit (0);
- }
-EOF
- $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
- { echo "$SYSTEM_NAME"; exit; }
- echo unknown-hitachi-hiuxwe2
- exit ;;
- 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
- echo hppa1.1-hp-bsd
- exit ;;
- 9000/8??:4.3bsd:*:*)
- echo hppa1.0-hp-bsd
- exit ;;
- *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
- echo hppa1.0-hp-mpeix
- exit ;;
- hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
- echo hppa1.1-hp-osf
- exit ;;
- hp8??:OSF1:*:*)
- echo hppa1.0-hp-osf
- exit ;;
- i*86:OSF1:*:*)
- if [ -x /usr/sbin/sysversion ] ; then
- echo ${UNAME_MACHINE}-unknown-osf1mk
- else
- echo ${UNAME_MACHINE}-unknown-osf1
- fi
- exit ;;
- parisc*:Lites*:*:*)
- echo hppa1.1-hp-lites
- exit ;;
- C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
- echo c1-convex-bsd
- exit ;;
- C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
- if getsysinfo -f scalar_acc
- then echo c32-convex-bsd
- else echo c2-convex-bsd
- fi
- exit ;;
- C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
- echo c34-convex-bsd
- exit ;;
- C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
- echo c38-convex-bsd
- exit ;;
- C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
- echo c4-convex-bsd
- exit ;;
- CRAY*Y-MP:*:*:*)
- echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*[A-Z]90:*:*:*)
- echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
- | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
- -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
- -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*TS:*:*:*)
- echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*T3E:*:*:*)
- echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*SV1:*:*:*)
- echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- *:UNICOS/mp:*:*)
- echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
- FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
- echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
- exit ;;
- 5000:UNIX_System_V:4.*:*)
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
- echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
- exit ;;
- i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
- echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
- exit ;;
- sparc*:BSD/OS:*:*)
- echo sparc-unknown-bsdi${UNAME_RELEASE}
- exit ;;
- *:BSD/OS:*:*)
- echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
- exit ;;
- *:FreeBSD:*:*)
- case ${UNAME_MACHINE} in
- pc98)
- echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- amd64)
- echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- *)
- echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- esac
- exit ;;
- i*:CYGWIN*:*)
- echo ${UNAME_MACHINE}-pc-cygwin
- exit ;;
- *:MINGW*:*)
- echo ${UNAME_MACHINE}-pc-mingw32
- exit ;;
- i*:windows32*:*)
- # uname -m includes "-pc" on this system.
- echo ${UNAME_MACHINE}-mingw32
- exit ;;
- i*:PW*:*)
- echo ${UNAME_MACHINE}-pc-pw32
- exit ;;
- *:Interix*:[3456]*)
- case ${UNAME_MACHINE} in
- x86)
- echo i586-pc-interix${UNAME_RELEASE}
- exit ;;
- EM64T | authenticamd | genuineintel)
- echo x86_64-unknown-interix${UNAME_RELEASE}
- exit ;;
- IA64)
- echo ia64-unknown-interix${UNAME_RELEASE}
- exit ;;
- esac ;;
- [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
- echo i${UNAME_MACHINE}-pc-mks
- exit ;;
- i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
- # How do we know it's Interix rather than the generic POSIX subsystem?
- # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
- # UNAME_MACHINE based on the output of uname instead of i386?
- echo i586-pc-interix
- exit ;;
- i*:UWIN*:*)
- echo ${UNAME_MACHINE}-pc-uwin
- exit ;;
- amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
- echo x86_64-unknown-cygwin
- exit ;;
- p*:CYGWIN*:*)
- echo powerpcle-unknown-cygwin
- exit ;;
- prep*:SunOS:5.*:*)
- echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- *:GNU:*:*)
- # the GNU system
- echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
- exit ;;
- *:GNU/*:*:*)
- # other systems with GNU libc and userland
- echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
- exit ;;
- i*86:Minix:*:*)
- echo ${UNAME_MACHINE}-pc-minix
- exit ;;
- arm*:Linux:*:*)
- eval $set_cc_for_build
- if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
- | grep -q __ARM_EABI__
- then
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- else
- echo ${UNAME_MACHINE}-unknown-linux-gnueabi
- fi
- exit ;;
- avr32*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- cris:Linux:*:*)
- echo cris-axis-linux-gnu
- exit ;;
- crisv32:Linux:*:*)
- echo crisv32-axis-linux-gnu
- exit ;;
- frv:Linux:*:*)
- echo frv-unknown-linux-gnu
- exit ;;
- ia64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- m32r*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- m68*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- mips:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips
- #undef mipsel
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mipsel
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips
- #else
- CPU=
- #endif
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
- ;;
- mips64:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips64
- #undef mips64el
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mips64el
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips64
- #else
- CPU=
- #endif
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
- ;;
- or32:Linux:*:*)
- echo or32-unknown-linux-gnu
- exit ;;
- ppc:Linux:*:*)
- echo powerpc-unknown-linux-gnu
- exit ;;
- ppc64:Linux:*:*)
- echo powerpc64-unknown-linux-gnu
- exit ;;
- alpha:Linux:*:*)
- case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
- EV5) UNAME_MACHINE=alphaev5 ;;
- EV56) UNAME_MACHINE=alphaev56 ;;
- PCA56) UNAME_MACHINE=alphapca56 ;;
- PCA57) UNAME_MACHINE=alphapca56 ;;
- EV6) UNAME_MACHINE=alphaev6 ;;
- EV67) UNAME_MACHINE=alphaev67 ;;
- EV68*) UNAME_MACHINE=alphaev68 ;;
- esac
- objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
- if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
- echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
- exit ;;
- padre:Linux:*:*)
- echo sparc-unknown-linux-gnu
- exit ;;
- parisc:Linux:*:* | hppa:Linux:*:*)
- # Look for CPU level
- case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
- PA7*) echo hppa1.1-unknown-linux-gnu ;;
- PA8*) echo hppa2.0-unknown-linux-gnu ;;
- *) echo hppa-unknown-linux-gnu ;;
- esac
- exit ;;
- parisc64:Linux:*:* | hppa64:Linux:*:*)
- echo hppa64-unknown-linux-gnu
- exit ;;
- s390:Linux:*:* | s390x:Linux:*:*)
- echo ${UNAME_MACHINE}-ibm-linux
- exit ;;
- sh64*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- sh*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- sparc:Linux:*:* | sparc64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- vax:Linux:*:*)
- echo ${UNAME_MACHINE}-dec-linux-gnu
- exit ;;
- x86_64:Linux:*:*)
- echo x86_64-unknown-linux-gnu
- exit ;;
- xtensa*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- i*86:Linux:*:*)
- # The BFD linker knows what the default object file format is, so
- # first see if it will tell us. cd to the root directory to prevent
- # problems with other programs or directories called `ld' in the path.
- # Set LC_ALL=C to ensure ld outputs messages in English.
- ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
- | sed -ne '/supported targets:/!d
- s/[ ][ ]*/ /g
- s/.*supported targets: *//
- s/ .*//
- p'`
- case "$ld_supported_targets" in
- elf32-i386)
- TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
- ;;
- a.out-i386-linux)
- echo "${UNAME_MACHINE}-pc-linux-gnuaout"
- exit ;;
- "")
- # Either a pre-BFD a.out linker (linux-gnuoldld) or
- # one that does not give us useful --help.
- echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
- exit ;;
- esac
- # Determine whether the default compiler is a.out or elf
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <features.h>
- #ifdef __ELF__
- # ifdef __GLIBC__
- # if __GLIBC__ >= 2
- LIBC=gnu
- # else
- LIBC=gnulibc1
- # endif
- # else
- LIBC=gnulibc1
- # endif
- #else
- #if defined(__INTEL_COMPILER) || defined(__PGI) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
- LIBC=gnu
- #else
- LIBC=gnuaout
- #endif
- #endif
- #ifdef __dietlibc__
- LIBC=dietlibc
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^LIBC/{
- s: ::g
- p
- }'`"
- test x"${LIBC}" != x && {
- echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
- exit
- }
- test x"${TENTATIVE}" != x && { echo "${TENTATIVE}"; exit; }
- ;;
- i*86:DYNIX/ptx:4*:*)
- # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
- # earlier versions are messed up and put the nodename in both
- # sysname and nodename.
- echo i386-sequent-sysv4
- exit ;;
- i*86:UNIX_SV:4.2MP:2.*)
- # Unixware is an offshoot of SVR4, but it has its own version
- # number series starting with 2...
- # I am not positive that other SVR4 systems won't match this,
- # I just have to hope. -- rms.
- # Use sysv4.2uw... so that sysv4* matches it.
- echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
- exit ;;
- i*86:OS/2:*:*)
- # If we were able to find `uname', then EMX Unix compatibility
- # is probably installed.
- echo ${UNAME_MACHINE}-pc-os2-emx
- exit ;;
- i*86:XTS-300:*:STOP)
- echo ${UNAME_MACHINE}-unknown-stop
- exit ;;
- i*86:atheos:*:*)
- echo ${UNAME_MACHINE}-unknown-atheos
- exit ;;
- i*86:syllable:*:*)
- echo ${UNAME_MACHINE}-pc-syllable
- exit ;;
- i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
- echo i386-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- i*86:*DOS:*:*)
- echo ${UNAME_MACHINE}-pc-msdosdjgpp
- exit ;;
- i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
- UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
- if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
- echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
- else
- echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
- fi
- exit ;;
- i*86:*:5:[678]*)
- # UnixWare 7.x, OpenUNIX and OpenServer 6.
- case `/bin/uname -X | grep "^Machine"` in
- *486*) UNAME_MACHINE=i486 ;;
- *Pentium) UNAME_MACHINE=i586 ;;
- *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
- esac
- echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
- exit ;;
- i*86:*:3.2:*)
- if test -f /usr/options/cb.name; then
- UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
- echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
- elif /bin/uname -X 2>/dev/null >/dev/null ; then
- UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
- (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
- (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
- && UNAME_MACHINE=i586
- (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
- && UNAME_MACHINE=i686
- (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
- && UNAME_MACHINE=i686
- echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
- else
- echo ${UNAME_MACHINE}-pc-sysv32
- fi
- exit ;;
- pc:*:*:*)
- # Left here for compatibility:
- # uname -m prints for DJGPP always 'pc', but it prints nothing about
- # the processor, so we play safe by assuming i586.
- # Note: whatever this is, it MUST be the same as what config.sub
- # prints for the "djgpp" host, or else GDB configury will decide that
- # this is a cross-build.
- echo i586-pc-msdosdjgpp
- exit ;;
- Intel:Mach:3*:*)
- echo i386-pc-mach3
- exit ;;
- paragon:*:*:*)
- echo i860-intel-osf1
- exit ;;
- i860:*:4.*:*) # i860-SVR4
- if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
- echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
- else # Add other i860-SVR4 vendors below as they are discovered.
- echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
- fi
- exit ;;
- mini*:CTIX:SYS*5:*)
- # "miniframe"
- echo m68010-convergent-sysv
- exit ;;
- mc68k:UNIX:SYSTEM5:3.51m)
- echo m68k-convergent-sysv
- exit ;;
- M680?0:D-NIX:5.3:*)
- echo m68k-diab-dnix
- exit ;;
- M68*:*:R3V[5678]*:*)
- test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
- 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
- OS_REL=''
- test -r /etc/.relid \
- && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
- /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
- 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4; exit; } ;;
- NCR*:*:4.2:* | MPRAS*:*:4.2:*)
- OS_REL='.3'
- test -r /etc/.relid \
- && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
- /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
- /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
- m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
- echo m68k-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- mc68030:UNIX_System_V:4.*:*)
- echo m68k-atari-sysv4
- exit ;;
- TSUNAMI:LynxOS:2.*:*)
- echo sparc-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- rs6000:LynxOS:2.*:*)
- echo rs6000-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
- echo powerpc-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- SM[BE]S:UNIX_SV:*:*)
- echo mips-dde-sysv${UNAME_RELEASE}
- exit ;;
- RM*:ReliantUNIX-*:*:*)
- echo mips-sni-sysv4
- exit ;;
- RM*:SINIX-*:*:*)
- echo mips-sni-sysv4
- exit ;;
- *:SINIX-*:*:*)
- if uname -p 2>/dev/null >/dev/null ; then
- UNAME_MACHINE=`(uname -p) 2>/dev/null`
- echo ${UNAME_MACHINE}-sni-sysv4
- else
- echo ns32k-sni-sysv
- fi
- exit ;;
- PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
- # says <Richard.M.Bartel@ccMail.Census.GOV>
- echo i586-unisys-sysv4
- exit ;;
- *:UNIX_System_V:4*:FTX*)
- # From Gerald Hewes <hewes@openmarket.com>.
- # How about differentiating between stratus architectures? -djm
- echo hppa1.1-stratus-sysv4
- exit ;;
- *:*:*:FTX*)
- # From seanf@swdc.stratus.com.
- echo i860-stratus-sysv4
- exit ;;
- i*86:VOS:*:*)
- # From Paul.Green@stratus.com.
- echo ${UNAME_MACHINE}-stratus-vos
- exit ;;
- *:VOS:*:*)
- # From Paul.Green@stratus.com.
- echo hppa1.1-stratus-vos
- exit ;;
- mc68*:A/UX:*:*)
- echo m68k-apple-aux${UNAME_RELEASE}
- exit ;;
- news*:NEWS-OS:6*:*)
- echo mips-sony-newsos6
- exit ;;
- R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
- if [ -d /usr/nec ]; then
- echo mips-nec-sysv${UNAME_RELEASE}
- else
- echo mips-unknown-sysv${UNAME_RELEASE}
- fi
- exit ;;
- BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
- echo powerpc-be-beos
- exit ;;
- BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
- echo powerpc-apple-beos
- exit ;;
- BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
- echo i586-pc-beos
- exit ;;
- BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
- echo i586-pc-haiku
- exit ;;
- SX-4:SUPER-UX:*:*)
- echo sx4-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-5:SUPER-UX:*:*)
- echo sx5-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-6:SUPER-UX:*:*)
- echo sx6-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-7:SUPER-UX:*:*)
- echo sx7-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-8:SUPER-UX:*:*)
- echo sx8-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-8R:SUPER-UX:*:*)
- echo sx8r-nec-superux${UNAME_RELEASE}
- exit ;;
- Power*:Rhapsody:*:*)
- echo powerpc-apple-rhapsody${UNAME_RELEASE}
- exit ;;
- *:Rhapsody:*:*)
- echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
- exit ;;
- *:Darwin:*:*)
- UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
- case $UNAME_PROCESSOR in
- unknown) UNAME_PROCESSOR=powerpc ;;
- esac
- echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
- exit ;;
- *:procnto*:*:* | *:QNX:[0123456789]*:*)
- UNAME_PROCESSOR=`uname -p`
- if test "$UNAME_PROCESSOR" = "x86"; then
- UNAME_PROCESSOR=i386
- UNAME_MACHINE=pc
- fi
- echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
- exit ;;
- *:QNX:*:4*)
- echo i386-pc-qnx
- exit ;;
- NSE-?:NONSTOP_KERNEL:*:*)
- echo nse-tandem-nsk${UNAME_RELEASE}
- exit ;;
- NSR-?:NONSTOP_KERNEL:*:*)
- echo nsr-tandem-nsk${UNAME_RELEASE}
- exit ;;
- *:NonStop-UX:*:*)
- echo mips-compaq-nonstopux
- exit ;;
- BS2000:POSIX*:*:*)
- echo bs2000-siemens-sysv
- exit ;;
- DS/*:UNIX_System_V:*:*)
- echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
- exit ;;
- *:Plan9:*:*)
- # "uname -m" is not consistent, so use $cputype instead. 386
- # is converted to i386 for consistency with other x86
- # operating systems.
- if test "$cputype" = "386"; then
- UNAME_MACHINE=i386
- else
- UNAME_MACHINE="$cputype"
- fi
- echo ${UNAME_MACHINE}-unknown-plan9
- exit ;;
- *:TOPS-10:*:*)
- echo pdp10-unknown-tops10
- exit ;;
- *:TENEX:*:*)
- echo pdp10-unknown-tenex
- exit ;;
- KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
- echo pdp10-dec-tops20
- exit ;;
- XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
- echo pdp10-xkl-tops20
- exit ;;
- *:TOPS-20:*:*)
- echo pdp10-unknown-tops20
- exit ;;
- *:ITS:*:*)
- echo pdp10-unknown-its
- exit ;;
- SEI:*:*:SEIUX)
- echo mips-sei-seiux${UNAME_RELEASE}
- exit ;;
- *:DragonFly:*:*)
- echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
- exit ;;
- *:*VMS:*:*)
- UNAME_MACHINE=`(uname -p) 2>/dev/null`
- case "${UNAME_MACHINE}" in
- A*) echo alpha-dec-vms ; exit ;;
- I*) echo ia64-dec-vms ; exit ;;
- V*) echo vax-dec-vms ; exit ;;
- esac ;;
- *:XENIX:*:SysV)
- echo i386-pc-xenix
- exit ;;
- i*86:skyos:*:*)
- echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
- exit ;;
- i*86:rdos:*:*)
- echo ${UNAME_MACHINE}-pc-rdos
- exit ;;
- i*86:AROS:*:*)
- echo ${UNAME_MACHINE}-pc-aros
- exit ;;
-esac
-
-#echo '(No uname command or uname output not recognized.)' 1>&2
-#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
-
-eval $set_cc_for_build
-cat >$dummy.c <<EOF
-#ifdef _SEQUENT_
-# include <sys/types.h>
-# include <sys/utsname.h>
-#endif
-main ()
-{
-#if defined (sony)
-#if defined (MIPSEB)
- /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
- I don't know.... */
- printf ("mips-sony-bsd\n"); exit (0);
-#else
-#include <sys/param.h>
- printf ("m68k-sony-newsos%s\n",
-#ifdef NEWSOS4
- "4"
-#else
- ""
-#endif
- ); exit (0);
-#endif
-#endif
-
-#if defined (__arm) && defined (__acorn) && defined (__unix)
- printf ("arm-acorn-riscix\n"); exit (0);
-#endif
-
-#if defined (hp300) && !defined (hpux)
- printf ("m68k-hp-bsd\n"); exit (0);
-#endif
-
-#if defined (NeXT)
-#if !defined (__ARCHITECTURE__)
-#define __ARCHITECTURE__ "m68k"
-#endif
- int version;
- version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
- if (version < 4)
- printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
- else
- printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
- exit (0);
-#endif
-
-#if defined (MULTIMAX) || defined (n16)
-#if defined (UMAXV)
- printf ("ns32k-encore-sysv\n"); exit (0);
-#else
-#if defined (CMU)
- printf ("ns32k-encore-mach\n"); exit (0);
-#else
- printf ("ns32k-encore-bsd\n"); exit (0);
-#endif
-#endif
-#endif
-
-#if defined (__386BSD__)
- printf ("i386-pc-bsd\n"); exit (0);
-#endif
-
-#if defined (sequent)
-#if defined (i386)
- printf ("i386-sequent-dynix\n"); exit (0);
-#endif
-#if defined (ns32000)
- printf ("ns32k-sequent-dynix\n"); exit (0);
-#endif
-#endif
-
-#if defined (_SEQUENT_)
- struct utsname un;
-
- uname(&un);
-
- if (strncmp(un.version, "V2", 2) == 0) {
- printf ("i386-sequent-ptx2\n"); exit (0);
- }
- if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
- printf ("i386-sequent-ptx1\n"); exit (0);
- }
- printf ("i386-sequent-ptx\n"); exit (0);
-
-#endif
-
-#if defined (vax)
-# if !defined (ultrix)
-# include <sys/param.h>
-# if defined (BSD)
-# if BSD == 43
- printf ("vax-dec-bsd4.3\n"); exit (0);
-# else
-# if BSD == 199006
- printf ("vax-dec-bsd4.3reno\n"); exit (0);
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# endif
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# else
- printf ("vax-dec-ultrix\n"); exit (0);
-# endif
-#endif
-
-#if defined (alliant) && defined (i860)
- printf ("i860-alliant-bsd\n"); exit (0);
-#endif
-
- exit (1);
-}
-EOF
-
-$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
- { echo "$SYSTEM_NAME"; exit; }
-
-# Apollos put the system type in the environment.
-
-test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
-
-# Convex versions that predate uname can use getsysinfo(1)
-
-if [ -x /usr/convex/getsysinfo ]
-then
- case `getsysinfo -f cpu_type` in
- c1*)
- echo c1-convex-bsd
- exit ;;
- c2*)
- if getsysinfo -f scalar_acc
- then echo c32-convex-bsd
- else echo c2-convex-bsd
- fi
- exit ;;
- c34*)
- echo c34-convex-bsd
- exit ;;
- c38*)
- echo c38-convex-bsd
- exit ;;
- c4*)
- echo c4-convex-bsd
- exit ;;
- esac
-fi
-
-cat >&2 <<EOF
-$0: unable to guess system type
-
-This script, last modified $timestamp, has failed to recognize
-the operating system you are using. It is advised that you
-download the most up to date version of the config scripts from
-
- http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
-and
- http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
-
-If the version you run ($0) is already up to date, please
-send the following data and any information you think might be
-pertinent to <config-patches@gnu.org> in order to provide the needed
-information to handle your system.
-
-config.guess timestamp = $timestamp
-
-uname -m = `(uname -m) 2>/dev/null || echo unknown`
-uname -r = `(uname -r) 2>/dev/null || echo unknown`
-uname -s = `(uname -s) 2>/dev/null || echo unknown`
-uname -v = `(uname -v) 2>/dev/null || echo unknown`
-
-/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
-/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
-
-hostinfo = `(hostinfo) 2>/dev/null`
-/bin/universe = `(/bin/universe) 2>/dev/null`
-/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
-/bin/arch = `(/bin/arch) 2>/dev/null`
-/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
-/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
-
-UNAME_MACHINE = ${UNAME_MACHINE}
-UNAME_RELEASE = ${UNAME_RELEASE}
-UNAME_SYSTEM = ${UNAME_SYSTEM}
-UNAME_VERSION = ${UNAME_VERSION}
-EOF
-
-exit 1
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "timestamp='"
-# time-stamp-format: "%:y-%02m-%02d"
-# time-stamp-end: "'"
-# End:
diff --git a/lib/tdb/config.mk b/lib/tdb/config.mk
deleted file mode 100644
index b2e322cebc..0000000000
--- a/lib/tdb/config.mk
+++ /dev/null
@@ -1,57 +0,0 @@
-################################################
-# Start SUBSYSTEM LIBTDB
-[LIBRARY::LIBTDB]
-OUTPUT_TYPE = MERGED_OBJ
-CFLAGS = -I$(tdbsrcdir)/include
-#
-# End SUBSYSTEM ldb
-################################################
-
-LIBTDB_OBJ_FILES = $(addprefix $(tdbsrcdir)/common/, \
- tdb.o dump.o io.o lock.o \
- open.o traverse.o freelist.o \
- error.o transaction.o check.o)
-
-################################################
-# Start BINARY tdbtool
-[BINARY::tdbtool]
-INSTALLDIR = BINDIR
-PRIVATE_DEPENDENCIES = \
- LIBTDB
-# End BINARY tdbtool
-################################################
-
-tdbtool_OBJ_FILES = $(tdbsrcdir)/tools/tdbtool.o
-
-################################################
-# Start BINARY tdbtorture
-[BINARY::tdbtorture]
-INSTALLDIR = BINDIR
-PRIVATE_DEPENDENCIES = \
- LIBTDB
-# End BINARY tdbtorture
-################################################
-
-tdbtorture_OBJ_FILES = $(tdbsrcdir)/tools/tdbtorture.o
-
-################################################
-# Start BINARY tdbdump
-[BINARY::tdbdump]
-INSTALLDIR = BINDIR
-PRIVATE_DEPENDENCIES = \
- LIBTDB
-# End BINARY tdbdump
-################################################
-
-tdbdump_OBJ_FILES = $(tdbsrcdir)/tools/tdbdump.o
-
-################################################
-# Start BINARY tdbbackup
-[BINARY::tdbbackup]
-INSTALLDIR = BINDIR
-PRIVATE_DEPENDENCIES = \
- LIBTDB
-# End BINARY tdbbackup
-################################################
-
-tdbbackup_OBJ_FILES = $(tdbsrcdir)/tools/tdbbackup.o
diff --git a/lib/tdb/config.sub b/lib/tdb/config.sub
deleted file mode 100755
index a39437d015..0000000000
--- a/lib/tdb/config.sub
+++ /dev/null
@@ -1,1686 +0,0 @@
-#! /bin/sh
-# Configuration validation subroutine script.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
-# Free Software Foundation, Inc.
-
-timestamp='2009-04-17'
-
-# This file is (in principle) common to ALL GNU software.
-# The presence of a machine in this file suggests that SOME GNU software
-# can handle that machine. It does not imply ALL GNU software can.
-#
-# This file is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Please send patches to <config-patches@gnu.org>. Submit a context
-# diff and a properly formatted ChangeLog entry.
-#
-# Configuration subroutine to validate and canonicalize a configuration type.
-# Supply the specified configuration type as an argument.
-# If it is invalid, we print an error message on stderr and exit with code 1.
-# Otherwise, we print the canonical config type on stdout and succeed.
-
-# This file is supposed to be the same for all GNU packages
-# and recognize all the CPU types, system types and aliases
-# that are meaningful with *any* GNU software.
-# Each package is responsible for reporting which valid configurations
-# it does not support. The user should be able to distinguish
-# a failure to support a valid configuration from a meaningless
-# configuration.
-
-# The goal of this file is to map all the various variations of a given
-# machine specification into a single specification in the form:
-# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
-# or in some cases, the newer four-part form:
-# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
-# It is wrong to echo any other type of specification.
-
-me=`echo "$0" | sed -e 's,.*/,,'`
-
-usage="\
-Usage: $0 [OPTION] CPU-MFR-OPSYS
- $0 [OPTION] ALIAS
-
-Canonicalize a configuration name.
-
-Operation modes:
- -h, --help print this help, then exit
- -t, --time-stamp print date of last modification, then exit
- -v, --version print version number, then exit
-
-Report bugs and patches to <config-patches@gnu.org>."
-
-version="\
-GNU config.sub ($timestamp)
-
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
-
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-
-help="
-Try \`$me --help' for more information."
-
-# Parse command line
-while test $# -gt 0 ; do
- case $1 in
- --time-stamp | --time* | -t )
- echo "$timestamp" ; exit ;;
- --version | -v )
- echo "$version" ; exit ;;
- --help | --h* | -h )
- echo "$usage"; exit ;;
- -- ) # Stop option processing
- shift; break ;;
- - ) # Use stdin as input.
- break ;;
- -* )
- echo "$me: invalid option $1$help"
- exit 1 ;;
-
- *local*)
- # First pass through any local machine types.
- echo $1
- exit ;;
-
- * )
- break ;;
- esac
-done
-
-case $# in
- 0) echo "$me: missing argument$help" >&2
- exit 1;;
- 1) ;;
- *) echo "$me: too many arguments$help" >&2
- exit 1;;
-esac
-
-# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
-# Here we must recognize all the valid KERNEL-OS combinations.
-maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
-case $maybe_os in
- nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \
- uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \
- kopensolaris*-gnu* | \
- storm-chaos* | os2-emx* | rtmk-nova*)
- os=-$maybe_os
- basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
- ;;
- *)
- basic_machine=`echo $1 | sed 's/-[^-]*$//'`
- if [ $basic_machine != $1 ]
- then os=`echo $1 | sed 's/.*-/-/'`
- else os=; fi
- ;;
-esac
-
-### Let's recognize common machines as not being operating systems so
-### that things like config.sub decstation-3100 work. We also
-### recognize some manufacturers as not being operating systems, so we
-### can provide default operating systems below.
-case $os in
- -sun*os*)
- # Prevent following clause from handling this invalid input.
- ;;
- -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
- -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
- -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
- -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
- -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
- -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
- -apple | -axis | -knuth | -cray)
- os=
- basic_machine=$1
- ;;
- -sim | -cisco | -oki | -wec | -winbond)
- os=
- basic_machine=$1
- ;;
- -scout)
- ;;
- -wrs)
- os=-vxworks
- basic_machine=$1
- ;;
- -chorusos*)
- os=-chorusos
- basic_machine=$1
- ;;
- -chorusrdb)
- os=-chorusrdb
- basic_machine=$1
- ;;
- -hiux*)
- os=-hiuxwe2
- ;;
- -sco6)
- os=-sco5v6
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco5)
- os=-sco3.2v5
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco4)
- os=-sco3.2v4
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco3.2.[4-9]*)
- os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco3.2v[4-9]*)
- # Don't forget version if it is 3.2v4 or newer.
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco5v6*)
- # Don't forget version if it is 3.2v4 or newer.
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco*)
- os=-sco3.2v2
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -udk*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -isc)
- os=-isc2.2
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -clix*)
- basic_machine=clipper-intergraph
- ;;
- -isc*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -lynx*)
- os=-lynxos
- ;;
- -ptx*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
- ;;
- -windowsnt*)
- os=`echo $os | sed -e 's/windowsnt/winnt/'`
- ;;
- -psos*)
- os=-psos
- ;;
- -mint | -mint[0-9]*)
- basic_machine=m68k-atari
- os=-mint
- ;;
-esac
-
-# Decode aliases for certain CPU-COMPANY combinations.
-case $basic_machine in
- # Recognize the basic CPU types without company name.
- # Some are omitted here because they have special meanings below.
- 1750a | 580 \
- | a29k \
- | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
- | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
- | am33_2.0 \
- | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
- | bfin \
- | c4x | clipper \
- | d10v | d30v | dlx | dsp16xx \
- | fido | fr30 | frv \
- | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
- | i370 | i860 | i960 | ia64 \
- | ip2k | iq2000 \
- | lm32 \
- | m32c | m32r | m32rle | m68000 | m68k | m88k \
- | maxq | mb | microblaze | mcore | mep | metag \
- | mips | mipsbe | mipseb | mipsel | mipsle \
- | mips16 \
- | mips64 | mips64el \
- | mips64octeon | mips64octeonel \
- | mips64orion | mips64orionel \
- | mips64r5900 | mips64r5900el \
- | mips64vr | mips64vrel \
- | mips64vr4100 | mips64vr4100el \
- | mips64vr4300 | mips64vr4300el \
- | mips64vr5000 | mips64vr5000el \
- | mips64vr5900 | mips64vr5900el \
- | mipsisa32 | mipsisa32el \
- | mipsisa32r2 | mipsisa32r2el \
- | mipsisa64 | mipsisa64el \
- | mipsisa64r2 | mipsisa64r2el \
- | mipsisa64sb1 | mipsisa64sb1el \
- | mipsisa64sr71k | mipsisa64sr71kel \
- | mipstx39 | mipstx39el \
- | mn10200 | mn10300 \
- | moxie \
- | mt \
- | msp430 \
- | nios | nios2 \
- | ns16k | ns32k \
- | or32 \
- | pdp10 | pdp11 | pj | pjl \
- | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
- | pyramid \
- | score \
- | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
- | sh64 | sh64le \
- | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
- | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
- | spu | strongarm \
- | tahoe | thumb | tic4x | tic80 | tron \
- | v850 | v850e \
- | we32k \
- | x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \
- | z8k | z80)
- basic_machine=$basic_machine-unknown
- ;;
- m6811 | m68hc11 | m6812 | m68hc12)
- # Motorola 68HC11/12.
- basic_machine=$basic_machine-unknown
- os=-none
- ;;
- m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
- ;;
- ms1)
- basic_machine=mt-unknown
- ;;
-
- # We use `pc' rather than `unknown'
- # because (1) that's what they normally are, and
- # (2) the word "unknown" tends to confuse beginning users.
- i*86 | x86_64)
- basic_machine=$basic_machine-pc
- ;;
- # Object if more than one company name word.
- *-*-*)
- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
- exit 1
- ;;
- # Recognize the basic CPU types with company name.
- 580-* \
- | a29k-* \
- | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
- | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
- | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
- | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
- | avr-* | avr32-* \
- | bfin-* | bs2000-* \
- | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
- | clipper-* | craynv-* | cydra-* \
- | d10v-* | d30v-* | dlx-* \
- | elxsi-* \
- | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
- | h8300-* | h8500-* \
- | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
- | i*86-* | i860-* | i960-* | ia64-* \
- | ip2k-* | iq2000-* \
- | lm32-* \
- | m32c-* | m32r-* | m32rle-* \
- | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
- | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
- | mips16-* \
- | mips64-* | mips64el-* \
- | mips64octeon-* | mips64octeonel-* \
- | mips64orion-* | mips64orionel-* \
- | mips64r5900-* | mips64r5900el-* \
- | mips64vr-* | mips64vrel-* \
- | mips64vr4100-* | mips64vr4100el-* \
- | mips64vr4300-* | mips64vr4300el-* \
- | mips64vr5000-* | mips64vr5000el-* \
- | mips64vr5900-* | mips64vr5900el-* \
- | mipsisa32-* | mipsisa32el-* \
- | mipsisa32r2-* | mipsisa32r2el-* \
- | mipsisa64-* | mipsisa64el-* \
- | mipsisa64r2-* | mipsisa64r2el-* \
- | mipsisa64sb1-* | mipsisa64sb1el-* \
- | mipsisa64sr71k-* | mipsisa64sr71kel-* \
- | mipstx39-* | mipstx39el-* \
- | mmix-* \
- | mt-* \
- | msp430-* \
- | nios-* | nios2-* \
- | none-* | np1-* | ns16k-* | ns32k-* \
- | orion-* \
- | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
- | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
- | pyramid-* \
- | romp-* | rs6000-* \
- | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
- | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
- | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
- | sparclite-* \
- | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \
- | tahoe-* | thumb-* \
- | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* | tile-* \
- | tron-* \
- | v850-* | v850e-* | vax-* \
- | we32k-* \
- | x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \
- | xstormy16-* | xtensa*-* \
- | ymp-* \
- | z8k-* | z80-*)
- ;;
- # Recognize the basic CPU types without company name, with glob match.
- xtensa*)
- basic_machine=$basic_machine-unknown
- ;;
- # Recognize the various machine names and aliases which stand
- # for a CPU type and a company and sometimes even an OS.
- 386bsd)
- basic_machine=i386-unknown
- os=-bsd
- ;;
- 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
- basic_machine=m68000-att
- ;;
- 3b*)
- basic_machine=we32k-att
- ;;
- a29khif)
- basic_machine=a29k-amd
- os=-udi
- ;;
- abacus)
- basic_machine=abacus-unknown
- ;;
- adobe68k)
- basic_machine=m68010-adobe
- os=-scout
- ;;
- alliant | fx80)
- basic_machine=fx80-alliant
- ;;
- altos | altos3068)
- basic_machine=m68k-altos
- ;;
- am29k)
- basic_machine=a29k-none
- os=-bsd
- ;;
- amd64)
- basic_machine=x86_64-pc
- ;;
- amd64-*)
- basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- amdahl)
- basic_machine=580-amdahl
- os=-sysv
- ;;
- amiga | amiga-*)
- basic_machine=m68k-unknown
- ;;
- amigaos | amigados)
- basic_machine=m68k-unknown
- os=-amigaos
- ;;
- amigaunix | amix)
- basic_machine=m68k-unknown
- os=-sysv4
- ;;
- apollo68)
- basic_machine=m68k-apollo
- os=-sysv
- ;;
- apollo68bsd)
- basic_machine=m68k-apollo
- os=-bsd
- ;;
- aros)
- basic_machine=i386-pc
- os=-aros
- ;;
- aux)
- basic_machine=m68k-apple
- os=-aux
- ;;
- balance)
- basic_machine=ns32k-sequent
- os=-dynix
- ;;
- blackfin)
- basic_machine=bfin-unknown
- os=-linux
- ;;
- blackfin-*)
- basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=-linux
- ;;
- c90)
- basic_machine=c90-cray
- os=-unicos
- ;;
- cegcc)
- basic_machine=arm-unknown
- os=-cegcc
- ;;
- convex-c1)
- basic_machine=c1-convex
- os=-bsd
- ;;
- convex-c2)
- basic_machine=c2-convex
- os=-bsd
- ;;
- convex-c32)
- basic_machine=c32-convex
- os=-bsd
- ;;
- convex-c34)
- basic_machine=c34-convex
- os=-bsd
- ;;
- convex-c38)
- basic_machine=c38-convex
- os=-bsd
- ;;
- cray | j90)
- basic_machine=j90-cray
- os=-unicos
- ;;
- craynv)
- basic_machine=craynv-cray
- os=-unicosmp
- ;;
- cr16)
- basic_machine=cr16-unknown
- os=-elf
- ;;
- crds | unos)
- basic_machine=m68k-crds
- ;;
- crisv32 | crisv32-* | etraxfs*)
- basic_machine=crisv32-axis
- ;;
- cris | cris-* | etrax*)
- basic_machine=cris-axis
- ;;
- crx)
- basic_machine=crx-unknown
- os=-elf
- ;;
- da30 | da30-*)
- basic_machine=m68k-da30
- ;;
- decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
- basic_machine=mips-dec
- ;;
- decsystem10* | dec10*)
- basic_machine=pdp10-dec
- os=-tops10
- ;;
- decsystem20* | dec20*)
- basic_machine=pdp10-dec
- os=-tops20
- ;;
- delta | 3300 | motorola-3300 | motorola-delta \
- | 3300-motorola | delta-motorola)
- basic_machine=m68k-motorola
- ;;
- delta88)
- basic_machine=m88k-motorola
- os=-sysv3
- ;;
- dicos)
- basic_machine=i686-pc
- os=-dicos
- ;;
- djgpp)
- basic_machine=i586-pc
- os=-msdosdjgpp
- ;;
- dpx20 | dpx20-*)
- basic_machine=rs6000-bull
- os=-bosx
- ;;
- dpx2* | dpx2*-bull)
- basic_machine=m68k-bull
- os=-sysv3
- ;;
- ebmon29k)
- basic_machine=a29k-amd
- os=-ebmon
- ;;
- elxsi)
- basic_machine=elxsi-elxsi
- os=-bsd
- ;;
- encore | umax | mmax)
- basic_machine=ns32k-encore
- ;;
- es1800 | OSE68k | ose68k | ose | OSE)
- basic_machine=m68k-ericsson
- os=-ose
- ;;
- fx2800)
- basic_machine=i860-alliant
- ;;
- genix)
- basic_machine=ns32k-ns
- ;;
- gmicro)
- basic_machine=tron-gmicro
- os=-sysv
- ;;
- go32)
- basic_machine=i386-pc
- os=-go32
- ;;
- h3050r* | hiux*)
- basic_machine=hppa1.1-hitachi
- os=-hiuxwe2
- ;;
- h8300hms)
- basic_machine=h8300-hitachi
- os=-hms
- ;;
- h8300xray)
- basic_machine=h8300-hitachi
- os=-xray
- ;;
- h8500hms)
- basic_machine=h8500-hitachi
- os=-hms
- ;;
- harris)
- basic_machine=m88k-harris
- os=-sysv3
- ;;
- hp300-*)
- basic_machine=m68k-hp
- ;;
- hp300bsd)
- basic_machine=m68k-hp
- os=-bsd
- ;;
- hp300hpux)
- basic_machine=m68k-hp
- os=-hpux
- ;;
- hp3k9[0-9][0-9] | hp9[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hp9k2[0-9][0-9] | hp9k31[0-9])
- basic_machine=m68000-hp
- ;;
- hp9k3[2-9][0-9])
- basic_machine=m68k-hp
- ;;
- hp9k6[0-9][0-9] | hp6[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hp9k7[0-79][0-9] | hp7[0-79][0-9])
- basic_machine=hppa1.1-hp
- ;;
- hp9k78[0-9] | hp78[0-9])
- # FIXME: really hppa2.0-hp
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
- # FIXME: really hppa2.0-hp
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[0-9][13679] | hp8[0-9][13679])
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[0-9][0-9] | hp8[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hppa-next)
- os=-nextstep3
- ;;
- hppaosf)
- basic_machine=hppa1.1-hp
- os=-osf
- ;;
- hppro)
- basic_machine=hppa1.1-hp
- os=-proelf
- ;;
- i370-ibm* | ibm*)
- basic_machine=i370-ibm
- ;;
-# I'm not sure what "Sysv32" means. Should this be sysv3.2?
- i*86v32)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv32
- ;;
- i*86v4*)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv4
- ;;
- i*86v)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv
- ;;
- i*86sol2)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-solaris2
- ;;
- i386mach)
- basic_machine=i386-mach
- os=-mach
- ;;
- i386-vsta | vsta)
- basic_machine=i386-unknown
- os=-vsta
- ;;
- iris | iris4d)
- basic_machine=mips-sgi
- case $os in
- -irix*)
- ;;
- *)
- os=-irix4
- ;;
- esac
- ;;
- isi68 | isi)
- basic_machine=m68k-isi
- os=-sysv
- ;;
- m68knommu)
- basic_machine=m68k-unknown
- os=-linux
- ;;
- m68knommu-*)
- basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=-linux
- ;;
- m88k-omron*)
- basic_machine=m88k-omron
- ;;
- magnum | m3230)
- basic_machine=mips-mips
- os=-sysv
- ;;
- merlin)
- basic_machine=ns32k-utek
- os=-sysv
- ;;
- mingw32)
- basic_machine=i386-pc
- os=-mingw32
- ;;
- mingw32ce)
- basic_machine=arm-unknown
- os=-mingw32ce
- ;;
- miniframe)
- basic_machine=m68000-convergent
- ;;
- *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
- basic_machine=m68k-atari
- os=-mint
- ;;
- mips3*-*)
- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
- ;;
- mips3*)
- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
- ;;
- monitor)
- basic_machine=m68k-rom68k
- os=-coff
- ;;
- morphos)
- basic_machine=powerpc-unknown
- os=-morphos
- ;;
- msdos)
- basic_machine=i386-pc
- os=-msdos
- ;;
- ms1-*)
- basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
- ;;
- mvs)
- basic_machine=i370-ibm
- os=-mvs
- ;;
- ncr3000)
- basic_machine=i486-ncr
- os=-sysv4
- ;;
- netbsd386)
- basic_machine=i386-unknown
- os=-netbsd
- ;;
- netwinder)
- basic_machine=armv4l-rebel
- os=-linux
- ;;
- news | news700 | news800 | news900)
- basic_machine=m68k-sony
- os=-newsos
- ;;
- news1000)
- basic_machine=m68030-sony
- os=-newsos
- ;;
- news-3600 | risc-news)
- basic_machine=mips-sony
- os=-newsos
- ;;
- necv70)
- basic_machine=v70-nec
- os=-sysv
- ;;
- next | m*-next )
- basic_machine=m68k-next
- case $os in
- -nextstep* )
- ;;
- -ns2*)
- os=-nextstep2
- ;;
- *)
- os=-nextstep3
- ;;
- esac
- ;;
- nh3000)
- basic_machine=m68k-harris
- os=-cxux
- ;;
- nh[45]000)
- basic_machine=m88k-harris
- os=-cxux
- ;;
- nindy960)
- basic_machine=i960-intel
- os=-nindy
- ;;
- mon960)
- basic_machine=i960-intel
- os=-mon960
- ;;
- nonstopux)
- basic_machine=mips-compaq
- os=-nonstopux
- ;;
- np1)
- basic_machine=np1-gould
- ;;
- nsr-tandem)
- basic_machine=nsr-tandem
- ;;
- op50n-* | op60c-*)
- basic_machine=hppa1.1-oki
- os=-proelf
- ;;
- openrisc | openrisc-*)
- basic_machine=or32-unknown
- ;;
- os400)
- basic_machine=powerpc-ibm
- os=-os400
- ;;
- OSE68000 | ose68000)
- basic_machine=m68000-ericsson
- os=-ose
- ;;
- os68k)
- basic_machine=m68k-none
- os=-os68k
- ;;
- pa-hitachi)
- basic_machine=hppa1.1-hitachi
- os=-hiuxwe2
- ;;
- paragon)
- basic_machine=i860-intel
- os=-osf
- ;;
- parisc)
- basic_machine=hppa-unknown
- os=-linux
- ;;
- parisc-*)
- basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=-linux
- ;;
- pbd)
- basic_machine=sparc-tti
- ;;
- pbb)
- basic_machine=m68k-tti
- ;;
- pc532 | pc532-*)
- basic_machine=ns32k-pc532
- ;;
- pc98)
- basic_machine=i386-pc
- ;;
- pc98-*)
- basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentium | p5 | k5 | k6 | nexgen | viac3)
- basic_machine=i586-pc
- ;;
- pentiumpro | p6 | 6x86 | athlon | athlon_*)
- basic_machine=i686-pc
- ;;
- pentiumii | pentium2 | pentiumiii | pentium3)
- basic_machine=i686-pc
- ;;
- pentium4)
- basic_machine=i786-pc
- ;;
- pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
- basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentiumpro-* | p6-* | 6x86-* | athlon-*)
- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentium4-*)
- basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pn)
- basic_machine=pn-gould
- ;;
- power) basic_machine=power-ibm
- ;;
- ppc) basic_machine=powerpc-unknown
- ;;
- ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppcle | powerpclittle | ppc-le | powerpc-little)
- basic_machine=powerpcle-unknown
- ;;
- ppcle-* | powerpclittle-*)
- basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppc64) basic_machine=powerpc64-unknown
- ;;
- ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppc64le | powerpc64little | ppc64-le | powerpc64-little)
- basic_machine=powerpc64le-unknown
- ;;
- ppc64le-* | powerpc64little-*)
- basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ps2)
- basic_machine=i386-ibm
- ;;
- pw32)
- basic_machine=i586-unknown
- os=-pw32
- ;;
- rdos)
- basic_machine=i386-pc
- os=-rdos
- ;;
- rom68k)
- basic_machine=m68k-rom68k
- os=-coff
- ;;
- rm[46]00)
- basic_machine=mips-siemens
- ;;
- rtpc | rtpc-*)
- basic_machine=romp-ibm
- ;;
- s390 | s390-*)
- basic_machine=s390-ibm
- ;;
- s390x | s390x-*)
- basic_machine=s390x-ibm
- ;;
- sa29200)
- basic_machine=a29k-amd
- os=-udi
- ;;
- sb1)
- basic_machine=mipsisa64sb1-unknown
- ;;
- sb1el)
- basic_machine=mipsisa64sb1el-unknown
- ;;
- sde)
- basic_machine=mipsisa32-sde
- os=-elf
- ;;
- sei)
- basic_machine=mips-sei
- os=-seiux
- ;;
- sequent)
- basic_machine=i386-sequent
- ;;
- sh)
- basic_machine=sh-hitachi
- os=-hms
- ;;
- sh5el)
- basic_machine=sh5le-unknown
- ;;
- sh64)
- basic_machine=sh64-unknown
- ;;
- sparclite-wrs | simso-wrs)
- basic_machine=sparclite-wrs
- os=-vxworks
- ;;
- sps7)
- basic_machine=m68k-bull
- os=-sysv2
- ;;
- spur)
- basic_machine=spur-unknown
- ;;
- st2000)
- basic_machine=m68k-tandem
- ;;
- stratus)
- basic_machine=i860-stratus
- os=-sysv4
- ;;
- sun2)
- basic_machine=m68000-sun
- ;;
- sun2os3)
- basic_machine=m68000-sun
- os=-sunos3
- ;;
- sun2os4)
- basic_machine=m68000-sun
- os=-sunos4
- ;;
- sun3os3)
- basic_machine=m68k-sun
- os=-sunos3
- ;;
- sun3os4)
- basic_machine=m68k-sun
- os=-sunos4
- ;;
- sun4os3)
- basic_machine=sparc-sun
- os=-sunos3
- ;;
- sun4os4)
- basic_machine=sparc-sun
- os=-sunos4
- ;;
- sun4sol2)
- basic_machine=sparc-sun
- os=-solaris2
- ;;
- sun3 | sun3-*)
- basic_machine=m68k-sun
- ;;
- sun4)
- basic_machine=sparc-sun
- ;;
- sun386 | sun386i | roadrunner)
- basic_machine=i386-sun
- ;;
- sv1)
- basic_machine=sv1-cray
- os=-unicos
- ;;
- symmetry)
- basic_machine=i386-sequent
- os=-dynix
- ;;
- t3e)
- basic_machine=alphaev5-cray
- os=-unicos
- ;;
- t90)
- basic_machine=t90-cray
- os=-unicos
- ;;
- tic54x | c54x*)
- basic_machine=tic54x-unknown
- os=-coff
- ;;
- tic55x | c55x*)
- basic_machine=tic55x-unknown
- os=-coff
- ;;
- tic6x | c6x*)
- basic_machine=tic6x-unknown
- os=-coff
- ;;
- tile*)
- basic_machine=tile-unknown
- os=-linux-gnu
- ;;
- tx39)
- basic_machine=mipstx39-unknown
- ;;
- tx39el)
- basic_machine=mipstx39el-unknown
- ;;
- toad1)
- basic_machine=pdp10-xkl
- os=-tops20
- ;;
- tower | tower-32)
- basic_machine=m68k-ncr
- ;;
- tpf)
- basic_machine=s390x-ibm
- os=-tpf
- ;;
- udi29k)
- basic_machine=a29k-amd
- os=-udi
- ;;
- ultra3)
- basic_machine=a29k-nyu
- os=-sym1
- ;;
- v810 | necv810)
- basic_machine=v810-nec
- os=-none
- ;;
- vaxv)
- basic_machine=vax-dec
- os=-sysv
- ;;
- vms)
- basic_machine=vax-dec
- os=-vms
- ;;
- vpp*|vx|vx-*)
- basic_machine=f301-fujitsu
- ;;
- vxworks960)
- basic_machine=i960-wrs
- os=-vxworks
- ;;
- vxworks68)
- basic_machine=m68k-wrs
- os=-vxworks
- ;;
- vxworks29k)
- basic_machine=a29k-wrs
- os=-vxworks
- ;;
- w65*)
- basic_machine=w65-wdc
- os=-none
- ;;
- w89k-*)
- basic_machine=hppa1.1-winbond
- os=-proelf
- ;;
- xbox)
- basic_machine=i686-pc
- os=-mingw32
- ;;
- xps | xps100)
- basic_machine=xps100-honeywell
- ;;
- ymp)
- basic_machine=ymp-cray
- os=-unicos
- ;;
- z8k-*-coff)
- basic_machine=z8k-unknown
- os=-sim
- ;;
- z80-*-coff)
- basic_machine=z80-unknown
- os=-sim
- ;;
- none)
- basic_machine=none-none
- os=-none
- ;;
-
-# Here we handle the default manufacturer of certain CPU types. It is in
-# some cases the only manufacturer, in others, it is the most popular.
- w89k)
- basic_machine=hppa1.1-winbond
- ;;
- op50n)
- basic_machine=hppa1.1-oki
- ;;
- op60c)
- basic_machine=hppa1.1-oki
- ;;
- romp)
- basic_machine=romp-ibm
- ;;
- mmix)
- basic_machine=mmix-knuth
- ;;
- rs6000)
- basic_machine=rs6000-ibm
- ;;
- vax)
- basic_machine=vax-dec
- ;;
- pdp10)
- # there are many clones, so DEC is not a safe bet
- basic_machine=pdp10-unknown
- ;;
- pdp11)
- basic_machine=pdp11-dec
- ;;
- we32k)
- basic_machine=we32k-att
- ;;
- sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
- basic_machine=sh-unknown
- ;;
- sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
- basic_machine=sparc-sun
- ;;
- cydra)
- basic_machine=cydra-cydrome
- ;;
- orion)
- basic_machine=orion-highlevel
- ;;
- orion105)
- basic_machine=clipper-highlevel
- ;;
- mac | mpw | mac-mpw)
- basic_machine=m68k-apple
- ;;
- pmac | pmac-mpw)
- basic_machine=powerpc-apple
- ;;
- *-unknown)
- # Make sure to match an already-canonicalized machine name.
- ;;
- *)
- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
- exit 1
- ;;
-esac
-
-# Here we canonicalize certain aliases for manufacturers.
-case $basic_machine in
- *-digital*)
- basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
- ;;
- *-commodore*)
- basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
- ;;
- *)
- ;;
-esac
-
-# Decode manufacturer-specific aliases for certain operating systems.
-
-if [ x"$os" != x"" ]
-then
-case $os in
- # First match some system type aliases
- # that might get confused with valid system types.
- # -solaris* is a basic system type, with this one exception.
- -solaris1 | -solaris1.*)
- os=`echo $os | sed -e 's|solaris1|sunos4|'`
- ;;
- -solaris)
- os=-solaris2
- ;;
- -svr4*)
- os=-sysv4
- ;;
- -unixware*)
- os=-sysv4.2uw
- ;;
- -gnu/linux*)
- os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
- ;;
- # First accept the basic system types.
- # The portable systems comes first.
- # Each alternative MUST END IN A *, to match a version number.
- # -sysv* is not here because it comes later, after sysvr4.
- -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
- | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
- | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
- | -kopensolaris* \
- | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
- | -aos* | -aros* \
- | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
- | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
- | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
- | -openbsd* | -solidbsd* \
- | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
- | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
- | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
- | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
- | -chorusos* | -chorusrdb* | -cegcc* \
- | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \
- | -uxpv* | -beos* | -mpeix* | -udk* \
- | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
- | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
- | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
- | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
- | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
- | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
- | -skyos* | -haiku* | -rdos* | -toppers* | -drops*)
- # Remember, each alternative MUST END IN *, to match a version number.
- ;;
- -qnx*)
- case $basic_machine in
- x86-* | i*86-*)
- ;;
- *)
- os=-nto$os
- ;;
- esac
- ;;
- -nto-qnx*)
- ;;
- -nto*)
- os=`echo $os | sed -e 's|nto|nto-qnx|'`
- ;;
- -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
- | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
- | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
- ;;
- -mac*)
- os=`echo $os | sed -e 's|mac|macos|'`
- ;;
- -linux-dietlibc)
- os=-linux-dietlibc
- ;;
- -linux*)
- os=`echo $os | sed -e 's|linux|linux-gnu|'`
- ;;
- -sunos5*)
- os=`echo $os | sed -e 's|sunos5|solaris2|'`
- ;;
- -sunos6*)
- os=`echo $os | sed -e 's|sunos6|solaris3|'`
- ;;
- -opened*)
- os=-openedition
- ;;
- -os400*)
- os=-os400
- ;;
- -wince*)
- os=-wince
- ;;
- -osfrose*)
- os=-osfrose
- ;;
- -osf*)
- os=-osf
- ;;
- -utek*)
- os=-bsd
- ;;
- -dynix*)
- os=-bsd
- ;;
- -acis*)
- os=-aos
- ;;
- -atheos*)
- os=-atheos
- ;;
- -syllable*)
- os=-syllable
- ;;
- -386bsd)
- os=-bsd
- ;;
- -ctix* | -uts*)
- os=-sysv
- ;;
- -nova*)
- os=-rtmk-nova
- ;;
- -ns2 )
- os=-nextstep2
- ;;
- -nsk*)
- os=-nsk
- ;;
- # Preserve the version number of sinix5.
- -sinix5.*)
- os=`echo $os | sed -e 's|sinix|sysv|'`
- ;;
- -sinix*)
- os=-sysv4
- ;;
- -tpf*)
- os=-tpf
- ;;
- -triton*)
- os=-sysv3
- ;;
- -oss*)
- os=-sysv3
- ;;
- -svr4)
- os=-sysv4
- ;;
- -svr3)
- os=-sysv3
- ;;
- -sysvr4)
- os=-sysv4
- ;;
- # This must come after -sysvr4.
- -sysv*)
- ;;
- -ose*)
- os=-ose
- ;;
- -es1800*)
- os=-ose
- ;;
- -xenix)
- os=-xenix
- ;;
- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
- os=-mint
- ;;
- -aros*)
- os=-aros
- ;;
- -kaos*)
- os=-kaos
- ;;
- -zvmoe)
- os=-zvmoe
- ;;
- -dicos*)
- os=-dicos
- ;;
- -none)
- ;;
- *)
- # Get rid of the `-' at the beginning of $os.
- os=`echo $os | sed 's/[^-]*-//'`
- echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
- exit 1
- ;;
-esac
-else
-
-# Here we handle the default operating systems that come with various machines.
-# The value should be what the vendor currently ships out the door with their
-# machine or put another way, the most popular os provided with the machine.
-
-# Note that if you're going to try to match "-MANUFACTURER" here (say,
-# "-sun"), then you have to tell the case statement up towards the top
-# that MANUFACTURER isn't an operating system. Otherwise, code above
-# will signal an error saying that MANUFACTURER isn't an operating
-# system, and we'll never get to this point.
-
-case $basic_machine in
- score-*)
- os=-elf
- ;;
- spu-*)
- os=-elf
- ;;
- *-acorn)
- os=-riscix1.2
- ;;
- arm*-rebel)
- os=-linux
- ;;
- arm*-semi)
- os=-aout
- ;;
- c4x-* | tic4x-*)
- os=-coff
- ;;
- # This must come before the *-dec entry.
- pdp10-*)
- os=-tops20
- ;;
- pdp11-*)
- os=-none
- ;;
- *-dec | vax-*)
- os=-ultrix4.2
- ;;
- m68*-apollo)
- os=-domain
- ;;
- i386-sun)
- os=-sunos4.0.2
- ;;
- m68000-sun)
- os=-sunos3
- # This also exists in the configure program, but was not the
- # default.
- # os=-sunos4
- ;;
- m68*-cisco)
- os=-aout
- ;;
- mep-*)
- os=-elf
- ;;
- mips*-cisco)
- os=-elf
- ;;
- mips*-*)
- os=-elf
- ;;
- or32-*)
- os=-coff
- ;;
- *-tti) # must be before sparc entry or we get the wrong os.
- os=-sysv3
- ;;
- sparc-* | *-sun)
- os=-sunos4.1.1
- ;;
- *-be)
- os=-beos
- ;;
- *-haiku)
- os=-haiku
- ;;
- *-ibm)
- os=-aix
- ;;
- *-knuth)
- os=-mmixware
- ;;
- *-wec)
- os=-proelf
- ;;
- *-winbond)
- os=-proelf
- ;;
- *-oki)
- os=-proelf
- ;;
- *-hp)
- os=-hpux
- ;;
- *-hitachi)
- os=-hiux
- ;;
- i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
- os=-sysv
- ;;
- *-cbm)
- os=-amigaos
- ;;
- *-dg)
- os=-dgux
- ;;
- *-dolphin)
- os=-sysv3
- ;;
- m68k-ccur)
- os=-rtu
- ;;
- m88k-omron*)
- os=-luna
- ;;
- *-next )
- os=-nextstep
- ;;
- *-sequent)
- os=-ptx
- ;;
- *-crds)
- os=-unos
- ;;
- *-ns)
- os=-genix
- ;;
- i370-*)
- os=-mvs
- ;;
- *-next)
- os=-nextstep3
- ;;
- *-gould)
- os=-sysv
- ;;
- *-highlevel)
- os=-bsd
- ;;
- *-encore)
- os=-bsd
- ;;
- *-sgi)
- os=-irix
- ;;
- *-siemens)
- os=-sysv4
- ;;
- *-masscomp)
- os=-rtu
- ;;
- f30[01]-fujitsu | f700-fujitsu)
- os=-uxpv
- ;;
- *-rom68k)
- os=-coff
- ;;
- *-*bug)
- os=-coff
- ;;
- *-apple)
- os=-macos
- ;;
- *-atari*)
- os=-mint
- ;;
- *)
- os=-none
- ;;
-esac
-fi
-
-# Here we handle the case where we know the os, and the CPU type, but not the
-# manufacturer. We pick the logical manufacturer.
-vendor=unknown
-case $basic_machine in
- *-unknown)
- case $os in
- -riscix*)
- vendor=acorn
- ;;
- -sunos*)
- vendor=sun
- ;;
- -aix*)
- vendor=ibm
- ;;
- -beos*)
- vendor=be
- ;;
- -hpux*)
- vendor=hp
- ;;
- -mpeix*)
- vendor=hp
- ;;
- -hiux*)
- vendor=hitachi
- ;;
- -unos*)
- vendor=crds
- ;;
- -dgux*)
- vendor=dg
- ;;
- -luna*)
- vendor=omron
- ;;
- -genix*)
- vendor=ns
- ;;
- -mvs* | -opened*)
- vendor=ibm
- ;;
- -os400*)
- vendor=ibm
- ;;
- -ptx*)
- vendor=sequent
- ;;
- -tpf*)
- vendor=ibm
- ;;
- -vxsim* | -vxworks* | -windiss*)
- vendor=wrs
- ;;
- -aux*)
- vendor=apple
- ;;
- -hms*)
- vendor=hitachi
- ;;
- -mpw* | -macos*)
- vendor=apple
- ;;
- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
- vendor=atari
- ;;
- -vos*)
- vendor=stratus
- ;;
- esac
- basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
- ;;
-esac
-
-echo $basic_machine$os
-exit
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "timestamp='"
-# time-stamp-format: "%:y-%02m-%02d"
-# time-stamp-end: "'"
-# End:
diff --git a/lib/tdb/configure b/lib/tdb/configure
new file mode 100755
index 0000000000..6a9f875511
--- /dev/null
+++ b/lib/tdb/configure
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+PREVPATH=`dirname $0`
+
+if [ -f $PREVPATH/../../buildtools/bin/waf ]; then
+ WAF=../../buildtools/bin/waf
+elif [ -f $PREVPATH/buildtools/bin/waf ]; then
+ WAF=./buildtools/bin/waf
+else
+ echo "replace: Unable to find waf"
+ exit 1
+fi
+
+# using JOBS=1 gives maximum compatibility with
+# systems like AIX which have broken threading in python
+JOBS=1
+export JOBS
+
+cd . || exit 1
+$WAF configure "$@" || exit 1
+cd $PREVPATH
diff --git a/lib/tdb/configure.ac b/lib/tdb/configure.ac
deleted file mode 100644
index 395121937f..0000000000
--- a/lib/tdb/configure.ac
+++ /dev/null
@@ -1,51 +0,0 @@
-AC_PREREQ(2.50)
-AC_DEFUN([SMB_MODULE_DEFAULT], [echo -n ""])
-AC_DEFUN([SMB_LIBRARY_ENABLE], [echo -n ""])
-AC_DEFUN([SMB_ENABLE], [echo -n ""])
-AC_INIT(tdb, 1.2.1)
-AC_CONFIG_SRCDIR([common/tdb.c])
-AC_CONFIG_HEADER(include/config.h)
-AC_LIBREPLACE_ALL_CHECKS
-AC_LD_SONAMEFLAG
-AC_LD_VERSIONSCRIPT
-AC_LD_PICFLAG
-AC_LD_SHLIBEXT
-AC_LIBREPLACE_SHLD
-AC_LIBREPLACE_SHLD_FLAGS
-AC_LIBREPLACE_RUNTIME_LIB_PATH_VAR
-m4_include(libtdb.m4)
-AC_PATH_PROGS([PYTHON_CONFIG], [python2.6-config python2.5-config python2.4-config python-config])
-AC_PATH_PROGS([PYTHON], [python2.6 python2.5 python2.4 python])
-
-PYTHON_BUILD_TARGET="build-python"
-PYTHON_INSTALL_TARGET="install-python"
-PYTHON_CHECK_TARGET="check-python"
-AC_SUBST(PYTHON_BUILD_TARGET)
-AC_SUBST(PYTHON_INSTALL_TARGET)
-AC_SUBST(PYTHON_CHECK_TARGET)
-if test -z "$PYTHON_CONFIG"; then
- PYTHON_BUILD_TARGET=""
- PYTHON_INSTALL_TARGET=""
- PYTHON_CHECK_TARGET=""
-fi
-
-AC_ARG_ENABLE(python,
- AS_HELP_STRING([--enable-python], [Enables python binding]),
- [ if test "x$enableval" = "xno" ; then
- PYTHON_BUILD_TARGET=""
- PYTHON_INSTALL_TARGET=""
- PYTHON_CHECK_TARGET=""
- fi
- ])
-
-AC_PATH_PROG(XSLTPROC,xsltproc)
-DOC_TARGET=""
-if test -n "$XSLTPROC"; then
- DOC_TARGET=doc
-fi
-AC_SUBST(DOC_TARGET)
-
-m4_include(build_macros.m4)
-BUILD_WITH_SHARED_BUILD_DIR
-
-AC_OUTPUT(Makefile tdb.pc)
diff --git a/lib/tdb/docs/README b/lib/tdb/docs/README
index c02ee0e030..fe0e258183 100644
--- a/lib/tdb/docs/README
+++ b/lib/tdb/docs/README
@@ -105,6 +105,25 @@ TDB_DATA tdb_fetch(TDB_CONTEXT *tdb, TDB_DATA key);
caller must free the resulting data
----------------------------------------------------------------------
+int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key,
+ int (*parser)(TDB_DATA key, TDB_DATA data,
+ void *private_data),
+ void *private_data);
+
+ Hand a record to a parser function without allocating it.
+
+ This function is meant as a fast tdb_fetch alternative for large records
+ that are frequently read. The "key" and "data" arguments point directly
+ into the tdb shared memory, they are not aligned at any boundary.
+
+ WARNING: The parser is called while tdb holds a lock on the record. DO NOT
+ call other tdb routines from within the parser. Also, for good performance
+ you should make the parser fast to allow parallel operations.
+
+ tdb_parse_record returns -1 if the record was not found. If the record was
+ found, the return value of "parser" is passed up to the caller.
+
+----------------------------------------------------------------------
int tdb_exists(TDB_CONTEXT *tdb, TDB_DATA key);
check if an entry in the database exists
diff --git a/lib/tdb/docs/mainpage.dox b/lib/tdb/docs/mainpage.dox
new file mode 100644
index 0000000000..d130769356
--- /dev/null
+++ b/lib/tdb/docs/mainpage.dox
@@ -0,0 +1,61 @@
+/**
+
+@mainpage
+
+This is a simple database API. It was inspired by the realisation that in Samba
+we have several ad-hoc bits of code that essentially implement small databases
+for sharing structures between parts of Samba.
+
+The interface is based on gdbm. gdbm couldn't be use as we needed to be able to
+have multiple writers to the databases at one time.
+
+@section tdb_download Download
+
+You can download the latest releases of tdb from the
+<a href="http://samba.org/ftp/tdb">tdb directory</a> on the samba public source
+archive.
+
+You can download the latest code either via git or rsync.
+
+To fetch via git see the following guide:
+
+<a href="http://wiki.samba.org/index.php/Using_Git_for_Samba_Development">Using Git for Samba Development</a>
+Once you have cloned the tree switch to the master branch and cd into the source/lib/tdb directory.
+
+To fetch via rsync use these commands:
+
+<pre>
+ rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/tdb .
+ rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/replace .
+</pre>
+
+and build in tdb. It will find the replace library in the directory above
+automatically.
+
+@section tdb_bugs Discussion and bug reports
+
+tdb does not currently have its own mailing list or bug tracking system. For now,
+please use the
+<a href="https://lists.samba.org/mailman/listinfo/samba-technical">samba-technical</a>
+mailing list, and the <a href="http://bugzilla.samba.org/">Samba bugzilla</a> bug
+tracking system.
+
+
+@section tdb_compilation Compilation
+
+add HAVE_MMAP=1 to use mmap instead of read/write
+add NOLOCK=1 to disable locking code
+
+@section tdb_testing Testing
+
+Compile tdbtest.c and link with gdbm for testing. tdbtest will perform
+identical operations via tdb and gdbm then make sure the result is the
+same
+
+Also included is tdbtool, which allows simple database manipulation
+on the commandline.
+
+tdbtest and tdbtool are not built as part of Samba, but are included
+for completeness.
+
+*/
diff --git a/lib/tdb/doxy.config b/lib/tdb/doxy.config
new file mode 100644
index 0000000000..5376486c2a
--- /dev/null
+++ b/lib/tdb/doxy.config
@@ -0,0 +1,1700 @@
+# Doxyfile 1.7.3
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = tdb
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER = 1.2.9
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description for a project that appears at the top of each page and should give viewer a quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# Doxygen will copy the logo to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = docs
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF = "The $name class" \
+ "The $name widget" \
+ "The $name file" \
+ is \
+ provides \
+ specifies \
+ contains \
+ represents \
+ a \
+ an \
+ the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file system
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = YES
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given extension.
+# Doxygen has a built-in mapping, but you can override or extend it using this
+# tag. The format is ext=language, where ext is a file extension, and language
+# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
+# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
+# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also makes the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penalty.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will roughly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = NO
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespaces are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = YES
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = YES
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even if there is only one candidate or it is obvious which candidate to choose by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# will still accept a match between prototype and implementation in such cases.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or macro consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and macros in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. The create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = include \
+ docs
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# *.f90 *.f *.for *.vhd *.vhdl
+
+FILE_PATTERNS = *.cpp \
+ *.cc \
+ *.c \
+ *.h \
+ *.hh \
+ *.hpp \
+ *.dox
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS = */.git/* \
+ */.svn/* \
+ */cmake/* \
+ */build/*
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty or if
+# non of the patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
+# FILTER_SOURCE_FILES is enabled.
+
+FILTER_SOURCE_PATTERNS =
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the stylesheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP = NO
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE =
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [0,1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+# Note that a value of 0 will completely suppress the enum values from appearing in the overview section.
+
+ENUM_VALUES_PER_LINE = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NONE
+
+# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list.
+
+USE_INLINE_TREES = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you also need to install MathJax separately and
+# configure the path to it using the MATHJAX_RELPATH option.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing
+# MathJax, but it is strongly recommended to install a local copy of MathJax
+# before deployment.
+
+MATHJAX_RELPATH = http://www.mathjax.org/mathjax
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE = NO
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a PHP enabled web server instead of at the web client
+# using Javascript. Doxygen will generate the search PHP script and index
+# file to put on the web server. The advantage of the server
+# based approach is that it scales better to large projects and allows
+# full text search. The disadvantages are that it is more difficult to setup
+# and does not have live searching capabilities.
+
+SERVER_BASED_SEARCH = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = YES
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = YES
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED = DOXYGEN \
+ PRINTF_ATTRIBUTE(x,y)=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition that overrules the definition found in the source code.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
+# semicolon, because these will confuse the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option also works with HAVE_DOT disabled, but it is recommended to
+# install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS = 0
+
+# By default doxygen will write a font called Helvetica to the output
+# directory and reference it in all dot files that doxygen generates.
+# When you want a differently looking font you can specify the font name
+# using DOT_FONTNAME. You need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will generate a graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, svg, gif or svg.
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
+# \mscfile command).
+
+MSCFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = YES
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/lib/tdb/include/tdb.h b/lib/tdb/include/tdb.h
index c9e946a885..5eb95625da 100644
--- a/lib/tdb/include/tdb.h
+++ b/lib/tdb/include/tdb.h
@@ -30,37 +30,68 @@
extern "C" {
#endif
-#include "signal.h"
-
-/* flags to tdb_store() */
-#define TDB_REPLACE 1 /* Unused */
-#define TDB_INSERT 2 /* Don't overwrite an existing entry */
-#define TDB_MODIFY 3 /* Don't create an existing entry */
-
-/* flags for tdb_open() */
-#define TDB_DEFAULT 0 /* just a readability place holder */
-#define TDB_CLEAR_IF_FIRST 1
-#define TDB_INTERNAL 2 /* don't store on disk */
-#define TDB_NOLOCK 4 /* don't do any locking */
-#define TDB_NOMMAP 8 /* don't use mmap */
-#define TDB_CONVERT 16 /* convert endian (internal use) */
-#define TDB_BIGENDIAN 32 /* header is big-endian (internal use) */
-#define TDB_NOSYNC 64 /* don't use synchronous transactions */
-#define TDB_SEQNUM 128 /* maintain a sequence number */
-#define TDB_VOLATILE 256 /* Activate the per-hashchain freelist, default 5 */
-#define TDB_ALLOW_NESTING 512 /* Allow transactions to nest */
-#define TDB_DISALLOW_NESTING 1024 /* Disallow transactions to nest */
-
-/* error codes */
+#include <signal.h>
+
+/**
+ * @defgroup tdb The tdb API
+ *
+ * tdb is a Trivial database. In concept, it is very much like GDBM, and BSD's
+ * DB except that it allows multiple simultaneous writers and uses locking
+ * internally to keep writers from trampling on each other. tdb is also
+ * extremely small.
+ *
+ * @section tdb_interface Interface
+ *
+ * The interface is very similar to gdbm except for the following:
+ *
+ * <ul>
+ * <li>different open interface. The tdb_open call is more similar to a
+ * traditional open()</li>
+ * <li>no tdbm_reorganise() function</li>
+ * <li>no tdbm_sync() function. No operations are cached in the library
+ * anyway</li>
+ * <li>added a tdb_traverse() function for traversing the whole database</li>
+ * <li>added transactions support</li>
+ * </ul>
+ *
+ * A general rule for using tdb is that the caller frees any returned TDB_DATA
+ * structures. Just call free(p.dptr) to free a TDB_DATA return value called p.
+ * This is the same as gdbm.
+ *
+ * @{
+ */
+
+/** Flags to tdb_store() */
+#define TDB_REPLACE 1 /** Unused */
+#define TDB_INSERT 2 /** Don't overwrite an existing entry */
+#define TDB_MODIFY 3 /** Don't create an existing entry */
+
+/** Flags for tdb_open() */
+#define TDB_DEFAULT 0 /** just a readability place holder */
+#define TDB_CLEAR_IF_FIRST 1 /** If this is the first open, wipe the db */
+#define TDB_INTERNAL 2 /** Don't store on disk */
+#define TDB_NOLOCK 4 /** Don't do any locking */
+#define TDB_NOMMAP 8 /** Don't use mmap */
+#define TDB_CONVERT 16 /** Convert endian (internal use) */
+#define TDB_BIGENDIAN 32 /** Header is big-endian (internal use) */
+#define TDB_NOSYNC 64 /** Don't use synchronous transactions */
+#define TDB_SEQNUM 128 /** Maintain a sequence number */
+#define TDB_VOLATILE 256 /** Activate the per-hashchain freelist, default 5 */
+#define TDB_ALLOW_NESTING 512 /** Allow transactions to nest */
+#define TDB_DISALLOW_NESTING 1024 /** Disallow transactions to nest */
+#define TDB_INCOMPATIBLE_HASH 2048 /** Better hashing: can't be opened by tdb < 1.2.6. */
+
+/** The tdb error codes */
enum TDB_ERROR {TDB_SUCCESS=0, TDB_ERR_CORRUPT, TDB_ERR_IO, TDB_ERR_LOCK,
TDB_ERR_OOM, TDB_ERR_EXISTS, TDB_ERR_NOLOCK, TDB_ERR_LOCK_TIMEOUT,
TDB_ERR_NOEXIST, TDB_ERR_EINVAL, TDB_ERR_RDONLY,
TDB_ERR_NESTING};
-/* debugging uses one of the following levels */
+/** Debugging uses one of the following levels */
enum tdb_debug_level {TDB_DEBUG_FATAL = 0, TDB_DEBUG_ERROR,
TDB_DEBUG_WARNING, TDB_DEBUG_TRACE};
+/** The tdb data structure */
typedef struct TDB_DATA {
unsigned char *dptr;
size_t dsize;
@@ -78,7 +109,7 @@ typedef struct TDB_DATA {
#endif
#endif
-/* this is the context structure that is returned from a db open */
+/** This is the context structure that is returned from a db open. */
typedef struct tdb_context TDB_CONTEXT;
typedef int (*tdb_traverse_func)(struct tdb_context *, TDB_DATA, TDB_DATA, void *);
@@ -90,62 +121,701 @@ struct tdb_logging_context {
void *log_private;
};
+/**
+ * @brief Open the database and creating it if necessary.
+ *
+ * @param[in] name The name of the db to open.
+ *
+ * @param[in] hash_size The hash size is advisory, use zero for a default
+ * value.
+ *
+ * @param[in] tdb_flags The flags to use to open the db:\n\n
+ * TDB_CLEAR_IF_FIRST - Clear database if we are the
+ * only one with it open\n
+ * TDB_INTERNAL - Don't use a file, instaed store the
+ * data in memory. The filename is
+ * ignored in this case.\n
+ * TDB_NOLOCK - Don't do any locking\n
+ * TDB_NOMMAP - Don't use mmap\n
+ * TDB_NOSYNC - Don't synchronise transactions to disk\n
+ * TDB_SEQNUM - Maintain a sequence number\n
+ * TDB_VOLATILE - activate the per-hashchain freelist,
+ * default 5.\n
+ * TDB_ALLOW_NESTING - Allow transactions to nest.\n
+ * TDB_DISALLOW_NESTING - Disallow transactions to nest.\n
+ *
+ * @param[in] open_flags Flags for the open(2) function.
+ *
+ * @param[in] mode The mode for the open(2) function.
+ *
+ * @return A tdb context structure, NULL on error.
+ */
struct tdb_context *tdb_open(const char *name, int hash_size, int tdb_flags,
int open_flags, mode_t mode);
+
+/**
+ * @brief Open the database and creating it if necessary.
+ *
+ * This is like tdb_open(), but allows you to pass an initial logging and
+ * hash function. Be careful when passing a hash function - all users of the
+ * database must use the same hash function or you will get data corruption.
+ *
+ * @param[in] name The name of the db to open.
+ *
+ * @param[in] hash_size The hash size is advisory, use zero for a default
+ * value.
+ *
+ * @param[in] tdb_flags The flags to use to open the db:\n\n
+ * TDB_CLEAR_IF_FIRST - Clear database if we are the
+ * only one with it open\n
+ * TDB_INTERNAL - Don't use a file, instaed store the
+ * data in memory. The filename is
+ * ignored in this case.\n
+ * TDB_NOLOCK - Don't do any locking\n
+ * TDB_NOMMAP - Don't use mmap\n
+ * TDB_NOSYNC - Don't synchronise transactions to disk\n
+ * TDB_SEQNUM - Maintain a sequence number\n
+ * TDB_VOLATILE - activate the per-hashchain freelist,
+ * default 5.\n
+ * TDB_ALLOW_NESTING - Allow transactions to nest.\n
+ * TDB_DISALLOW_NESTING - Disallow transactions to nest.\n
+ *
+ * @param[in] open_flags Flags for the open(2) function.
+ *
+ * @param[in] mode The mode for the open(2) function.
+ *
+ * @param[in] log_ctx The logging function to use.
+ *
+ * @param[in] hash_fn The hash function you want to use.
+ *
+ * @return A tdb context structure, NULL on error.
+ *
+ * @see tdb_open()
+ */
struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
int open_flags, mode_t mode,
const struct tdb_logging_context *log_ctx,
tdb_hash_func hash_fn);
+
+/**
+ * @brief Set the maximum number of dead records per hash chain.
+ *
+ * @param[in] tdb The database handle to set the maximum.
+ *
+ * @param[in] max_dead The maximum number of dead records per hash chain.
+ */
void tdb_set_max_dead(struct tdb_context *tdb, int max_dead);
+/**
+ * @brief Reopen a tdb.
+ *
+ * This can be used after a fork to ensure that we have an independent seek
+ * pointer from our parent and to re-establish locks.
+ *
+ * @param[in] tdb The database to reopen.
+ *
+ * @return 0 on success, -1 on error.
+ */
int tdb_reopen(struct tdb_context *tdb);
+
+/**
+ * @brief Reopen all tdb's
+ *
+ * If the parent is longlived (ie. a parent daemon architecture), we know it
+ * will keep it's active lock on a tdb opened with CLEAR_IF_FIRST. Thus for
+ * child processes we don't have to add an active lock. This is essential to
+ * improve performance on systems that keep POSIX locks as a non-scalable data
+ * structure in the kernel.
+ *
+ * @param[in] parent_longlived Wether the parent is longlived or not.
+ *
+ * @return 0 on success, -1 on error.
+ */
int tdb_reopen_all(int parent_longlived);
+
+/**
+ * @brief Set a different tdb logging function.
+ *
+ * @param[in] tdb The tdb to set the logging function.
+ *
+ * @param[in] log_ctx The logging function to set.
+ */
void tdb_set_logging_function(struct tdb_context *tdb, const struct tdb_logging_context *log_ctx);
+
+/**
+ * @brief Get the tdb last error code.
+ *
+ * @param[in] tdb The tdb to get the error code from.
+ *
+ * @return A TDB_ERROR code.
+ *
+ * @see TDB_ERROR
+ */
enum TDB_ERROR tdb_error(struct tdb_context *tdb);
+
+/**
+ * @brief Get a error string for the last tdb error
+ *
+ * @param[in] tdb The tdb to get the error code from.
+ *
+ * @return An error string.
+ */
const char *tdb_errorstr(struct tdb_context *tdb);
+
+/**
+ * @brief Fetch an entry in the database given a key.
+ *
+ * The caller must free the resulting data.
+ *
+ * @param[in] tdb The tdb to fetch the key.
+ *
+ * @param[in] key The key to fetch.
+ *
+ * @return The key entry found in the database, NULL on error with
+ * TDB_ERROR set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
TDB_DATA tdb_fetch(struct tdb_context *tdb, TDB_DATA key);
+
+/**
+ * @brief Hand a record to a parser function without allocating it.
+ *
+ * This function is meant as a fast tdb_fetch alternative for large records
+ * that are frequently read. The "key" and "data" arguments point directly
+ * into the tdb shared memory, they are not aligned at any boundary.
+ *
+ * @warning The parser is called while tdb holds a lock on the record. DO NOT
+ * call other tdb routines from within the parser. Also, for good performance
+ * you should make the parser fast to allow parallel operations.
+ *
+ * @param[in] tdb The tdb to parse the record.
+ *
+ * @param[in] key The key to parse.
+ *
+ * @param[in] parser The parser to use to parse the data.
+ *
+ * @param[in] private_data A private data pointer which is passed to the parser
+ * function.
+ *
+ * @return -1 if the record was not found. If the record was found,
+ * the return value of "parser" is passed up to the caller.
+ */
int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key,
- int (*parser)(TDB_DATA key, TDB_DATA data,
- void *private_data),
- void *private_data);
+ int (*parser)(TDB_DATA key, TDB_DATA data,
+ void *private_data),
+ void *private_data);
+
+/**
+ * @brief Delete an entry in the database given a key.
+ *
+ * @param[in] tdb The tdb to delete the key.
+ *
+ * @param[in] key The key to delete.
+ *
+ * @return 0 on success, -1 if the key doesn't exist.
+ */
int tdb_delete(struct tdb_context *tdb, TDB_DATA key);
+
+/**
+ * @brief Store an element in the database.
+ *
+ * This replaces any existing element with the same key.
+ *
+ * @param[in] tdb The tdb to store the entry.
+ *
+ * @param[in] key The key to use to store the entry.
+ *
+ * @param[in] dbuf The data to store under the key.
+ *
+ * @param[in] flag The flags to store the key:\n\n
+ * TDB_INSERT: Don't overwrite an existing entry.\n
+ * TDB_MODIFY: Don't create a new entry\n
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag);
+
+/**
+ * @brief Append data to an entry.
+ *
+ * If the entry doesn't exist, it will create a new one.
+ *
+ * @param[in] tdb The database to use.
+ *
+ * @param[in] key The key to append the data.
+ *
+ * @param[in] new_dbuf The data to append to the key.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_append(struct tdb_context *tdb, TDB_DATA key, TDB_DATA new_dbuf);
+
+/**
+ * @brief Close a database.
+ *
+ * @param[in] tdb The database to close.
+ *
+ * @return 0 for success, -1 on error.
+ */
int tdb_close(struct tdb_context *tdb);
+
+/**
+ * @brief Find the first entry in the database and return its key.
+ *
+ * The caller must free the returned data.
+ *
+ * @param[in] tdb The database to use.
+ *
+ * @return The first entry of the database, an empty TDB_DATA entry
+ * if the database is empty.
+ */
TDB_DATA tdb_firstkey(struct tdb_context *tdb);
+
+/**
+ * @brief Find the next entry in the database, returning its key.
+ *
+ * The caller must free the returned data.
+ *
+ * @param[in] tdb The database to use.
+ *
+ * @param[in] key The key from which you want the next key.
+ *
+ * @return The next entry of the current key, an empty TDB_DATA
+ * entry if there is no entry.
+ */
TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA key);
-int tdb_traverse(struct tdb_context *tdb, tdb_traverse_func fn, void *);
-int tdb_traverse_read(struct tdb_context *tdb, tdb_traverse_func fn, void *);
+
+/**
+ * @brief Traverse the entire database.
+ *
+ * While travering the function fn(tdb, key, data, state) is called on each
+ * element. If fn is NULL then it is not called. A non-zero return value from
+ * fn() indicates that the traversal should stop. Traversal callbacks may not
+ * start transactions.
+ *
+ * @warning The data buffer given to the callback fn does NOT meet the alignment
+ * restrictions malloc gives you.
+ *
+ * @param[in] tdb The database to traverse.
+ *
+ * @param[in] fn The function to call on each entry.
+ *
+ * @param[in] private_data The private data which should be passed to the
+ * traversing function.
+ *
+ * @return The record count traversed, -1 on error.
+ */
+int tdb_traverse(struct tdb_context *tdb, tdb_traverse_func fn, void *private_data);
+
+/**
+ * @brief Traverse the entire database.
+ *
+ * While traversing the database the function fn(tdb, key, data, state) is
+ * called on each element, but marking the database read only during the
+ * traversal, so any write operations will fail. This allows tdb to use read
+ * locks, which increases the parallelism possible during the traversal.
+ *
+ * @param[in] tdb The database to traverse.
+ *
+ * @param[in] fn The function to call on each entry.
+ *
+ * @param[in] private_data The private data which should be passed to the
+ * traversing function.
+ *
+ * @return The record count traversed, -1 on error.
+ */
+int tdb_traverse_read(struct tdb_context *tdb, tdb_traverse_func fn, void *private_data);
+
+/**
+ * @brief Check if an entry in the database exists.
+ *
+ * @note 1 is returned if the key is found and 0 is returned if not found this
+ * doesn't match the conventions in the rest of this module, but is compatible
+ * with gdbm.
+ *
+ * @param[in] tdb The database to check if the entry exists.
+ *
+ * @param[in] key The key to check if the entry exists.
+ *
+ * @return 1 if the key is found, 0 if not.
+ */
int tdb_exists(struct tdb_context *tdb, TDB_DATA key);
+
+/**
+ * @brief Lock entire database with a write lock.
+ *
+ * @param[in] tdb The database to lock.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_lockall(struct tdb_context *tdb);
+
+/**
+ * @brief Lock entire database with a write lock.
+ *
+ * This is the non-blocking call.
+ *
+ * @param[in] tdb The database to lock.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_lockall()
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_lockall_nonblock(struct tdb_context *tdb);
+
+/**
+ * @brief Unlock entire database with write lock.
+ *
+ * @param[in] tdb The database to unlock.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_lockall()
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_unlockall(struct tdb_context *tdb);
+
+/**
+ * @brief Lock entire database with a read lock.
+ *
+ * @param[in] tdb The database to lock.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_lockall_read(struct tdb_context *tdb);
+
+/**
+ * @brief Lock entire database with a read lock.
+ *
+ * This is the non-blocking call.
+ *
+ * @param[in] tdb The database to lock.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_lockall_read()
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_lockall_read_nonblock(struct tdb_context *tdb);
+
+/**
+ * @brief Unlock entire database with read lock.
+ *
+ * @param[in] tdb The database to unlock.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_lockall_read()
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_unlockall_read(struct tdb_context *tdb);
+
+/**
+ * @brief Lock entire database with write lock - mark only.
+ *
+ * @todo Add more details.
+ *
+ * @param[in] tdb The database to mark.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_lockall_mark(struct tdb_context *tdb);
+
+/**
+ * @brief Lock entire database with write lock - unmark only.
+ *
+ * @todo Add more details.
+ *
+ * @param[in] tdb The database to mark.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_lockall_unmark(struct tdb_context *tdb);
+
+/**
+ * @brief Get the name of the current tdb file.
+ *
+ * This is useful for external logging functions.
+ *
+ * @param[in] tdb The database to get the name from.
+ *
+ * @return The name of the database.
+ */
const char *tdb_name(struct tdb_context *tdb);
+
+/**
+ * @brief Get the underlying file descriptor being used by tdb.
+ *
+ * This is useful for external routines that want to check the device/inode
+ * of the fd.
+ *
+ * @param[in] tdb The database to get the fd from.
+ *
+ * @return The file descriptor or -1.
+ */
int tdb_fd(struct tdb_context *tdb);
+
+/**
+ * @brief Get the current logging function.
+ *
+ * This is useful for external tdb routines that wish to log tdb errors.
+ *
+ * @param[in] tdb The database to get the logging function from.
+ *
+ * @return The logging function of the database.
+ *
+ * @see tdb_get_logging_private()
+ */
tdb_log_func tdb_log_fn(struct tdb_context *tdb);
+
+/**
+ * @brief Get the private data of the logging function.
+ *
+ * @param[in] tdb The database to get the data from.
+ *
+ * @return The private data pointer of the logging function.
+ *
+ * @see tdb_log_fn()
+ */
void *tdb_get_logging_private(struct tdb_context *tdb);
+
+/**
+ * @brief Start a transaction.
+ *
+ * All operations after the transaction start can either be committed with
+ * tdb_transaction_commit() or cancelled with tdb_transaction_cancel().
+ *
+ * If you call tdb_transaction_start() again on the same tdb context while a
+ * transaction is in progress, then the same transaction buffer is re-used. The
+ * number of tdb_transaction_{commit,cancel} operations must match the number
+ * of successful tdb_transaction_start() calls.
+ *
+ * Note that transactions are by default disk synchronous, and use a recover
+ * area in the database to automatically recover the database on the next open
+ * if the system crashes during a transaction. You can disable the synchronous
+ * transaction recovery setup using the TDB_NOSYNC flag, which will greatly
+ * speed up operations at the risk of corrupting your database if the system
+ * crashes.
+ *
+ * Operations made within a transaction are not visible to other users of the
+ * database until a successful commit.
+ *
+ * @param[in] tdb The database to start the transaction.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_transaction_start(struct tdb_context *tdb);
+
+/**
+ * @brief Start a transaction, non-blocking.
+ *
+ * @param[in] tdb The database to start the transaction.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ * @see tdb_transaction_start()
+ */
+int tdb_transaction_start_nonblock(struct tdb_context *tdb);
+
+/**
+ * @brief Prepare to commit a current transaction, for two-phase commits.
+ *
+ * Once prepared for commit, the only allowed calls are tdb_transaction_commit()
+ * or tdb_transaction_cancel(). Preparing allocates disk space for the pending
+ * updates, so a subsequent commit should succeed (barring any hardware
+ * failures).
+ *
+ * @param[in] tdb The database to prepare the commit.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_transaction_prepare_commit(struct tdb_context *tdb);
+
+/**
+ * @brief Commit a current transaction.
+ *
+ * This updates the database and releases the current transaction locks.
+ *
+ * @param[in] tdb The database to commit the transaction.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_transaction_commit(struct tdb_context *tdb);
+
+/**
+ * @brief Cancel a current transaction.
+ *
+ * This discards all write and lock operations that have been made since the
+ * transaction started.
+ *
+ * @param[in] tdb The tdb to cancel the transaction on.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_transaction_cancel(struct tdb_context *tdb);
-int tdb_transaction_recover(struct tdb_context *tdb);
+
+/**
+ * @brief Get the tdb sequence number.
+ *
+ * Only makes sense if the writers opened with TDB_SEQNUM set. Note that this
+ * sequence number will wrap quite quickly, so it should only be used for a
+ * 'has something changed' test, not for code that relies on the count of the
+ * number of changes made. If you want a counter then use a tdb record.
+ *
+ * The aim of this sequence number is to allow for a very lightweight test of a
+ * possible tdb change.
+ *
+ * @param[in] tdb The database to get the sequence number from.
+ *
+ * @return The sequence number or 0.
+ *
+ * @see tdb_open()
+ * @see tdb_enable_seqnum()
+ */
int tdb_get_seqnum(struct tdb_context *tdb);
+
+/**
+ * @brief Get the hash size.
+ *
+ * @param[in] tdb The database to get the hash size from.
+ *
+ * @return The hash size.
+ */
int tdb_hash_size(struct tdb_context *tdb);
+
+/**
+ * @brief Get the map size.
+ *
+ * @param[in] tdb The database to get the map size from.
+ *
+ * @return The map size.
+ */
size_t tdb_map_size(struct tdb_context *tdb);
+
+/**
+ * @brief Get the tdb flags set during open.
+ *
+ * @param[in] tdb The database to get the flags form.
+ *
+ * @return The flags set to on the database.
+ */
int tdb_get_flags(struct tdb_context *tdb);
+
+/**
+ * @brief Add flags to the database.
+ *
+ * @param[in] tdb The database to add the flags.
+ *
+ * @param[in] flag The tdb flags to add.
+ */
void tdb_add_flags(struct tdb_context *tdb, unsigned flag);
+
+/**
+ * @brief Remove flags from the database.
+ *
+ * @param[in] tdb The database to remove the flags.
+ *
+ * @param[in] flag The tdb flags to remove.
+ */
void tdb_remove_flags(struct tdb_context *tdb, unsigned flag);
+
+/**
+ * @brief Enable sequence number handling on an open tdb.
+ *
+ * @param[in] tdb The database to enable sequence number handling.
+ *
+ * @see tdb_get_seqnum()
+ */
void tdb_enable_seqnum(struct tdb_context *tdb);
+
+/**
+ * @brief Increment the tdb sequence number.
+ *
+ * This only works if the tdb has been opened using the TDB_SEQNUM flag or
+ * enabled useing tdb_enable_seqnum().
+ *
+ * @param[in] tdb The database to increment the sequence number.
+ *
+ * @see tdb_enable_seqnum()
+ * @see tdb_get_seqnum()
+ */
void tdb_increment_seqnum_nonblock(struct tdb_context *tdb);
+
+/**
+ * @brief Create a hash of the key.
+ *
+ * @param[in] key The key to hash
+ *
+ * @return The hash.
+ */
+unsigned int tdb_jenkins_hash(TDB_DATA *key);
+
+/**
+ * @brief Check the consistency of the database.
+ *
+ * This check the consistency of the database calling back the check function
+ * (if non-NULL) on each record. If some consistency check fails, or the
+ * supplied check function returns -1, tdb_check returns -1, otherwise 0.
+ *
+ * @note The logging function (if set) will be called with additional
+ * information on the corruption found.
+ *
+ * @param[in] tdb The database to check.
+ *
+ * @param[in] check The check function to use.
+ *
+ * @param[in] private_data the private data to pass to the check function.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
int tdb_check(struct tdb_context *tdb,
int (*check) (TDB_DATA key, TDB_DATA data, void *private_data),
void *private_data);
+/* @} ******************************************************************/
+
/* Low level locking functions: use with care */
int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key);
int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key);
@@ -166,6 +836,7 @@ void tdb_dump_all(struct tdb_context *tdb);
int tdb_printfreelist(struct tdb_context *tdb);
int tdb_validate_freelist(struct tdb_context *tdb, int *pnum_entries);
int tdb_freelist_size(struct tdb_context *tdb);
+char *tdb_summary(struct tdb_context *tdb);
extern TDB_DATA tdb_null;
diff --git a/lib/tdb/install-sh b/lib/tdb/install-sh
deleted file mode 100755
index 58719246f0..0000000000
--- a/lib/tdb/install-sh
+++ /dev/null
@@ -1,238 +0,0 @@
-#! /bin/sh
-#
-# install - install a program, script, or datafile
-# This comes from X11R5.
-#
-# Calling this script install-sh is preferred over install.sh, to prevent
-# `make' implicit rules from creating a file called install from it
-# when there is no Makefile.
-#
-# This script is compatible with the BSD install script, but was written
-# from scratch.
-#
-
-
-# set DOITPROG to echo to test this script
-
-# Don't use :- since 4.3BSD and earlier shells don't like it.
-doit="${DOITPROG-}"
-
-
-# put in absolute paths if you don't have them in your path; or use env. vars.
-
-mvprog="${MVPROG-mv}"
-cpprog="${CPPROG-cp}"
-chmodprog="${CHMODPROG-chmod}"
-chownprog="${CHOWNPROG-chown}"
-chgrpprog="${CHGRPPROG-chgrp}"
-stripprog="${STRIPPROG-strip}"
-rmprog="${RMPROG-rm}"
-mkdirprog="${MKDIRPROG-mkdir}"
-
-transformbasename=""
-transform_arg=""
-instcmd="$mvprog"
-chmodcmd="$chmodprog 0755"
-chowncmd=""
-chgrpcmd=""
-stripcmd=""
-rmcmd="$rmprog -f"
-mvcmd="$mvprog"
-src=""
-dst=""
-dir_arg=""
-
-while [ x"$1" != x ]; do
- case $1 in
- -c) instcmd="$cpprog"
- shift
- continue;;
-
- -d) dir_arg=true
- shift
- continue;;
-
- -m) chmodcmd="$chmodprog $2"
- shift
- shift
- continue;;
-
- -o) chowncmd="$chownprog $2"
- shift
- shift
- continue;;
-
- -g) chgrpcmd="$chgrpprog $2"
- shift
- shift
- continue;;
-
- -s) stripcmd="$stripprog"
- shift
- continue;;
-
- -t=*) transformarg=`echo $1 | sed 's/-t=//'`
- shift
- continue;;
-
- -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
- shift
- continue;;
-
- *) if [ x"$src" = x ]
- then
- src=$1
- else
- # this colon is to work around a 386BSD /bin/sh bug
- :
- dst=$1
- fi
- shift
- continue;;
- esac
-done
-
-if [ x"$src" = x ]
-then
- echo "install: no input file specified"
- exit 1
-else
- true
-fi
-
-if [ x"$dir_arg" != x ]; then
- dst=$src
- src=""
-
- if [ -d $dst ]; then
- instcmd=:
- else
- instcmd=mkdir
- fi
-else
-
-# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
-# might cause directories to be created, which would be especially bad
-# if $src (and thus $dsttmp) contains '*'.
-
- if [ -f $src -o -d $src ]
- then
- true
- else
- echo "install: $src does not exist"
- exit 1
- fi
-
- if [ x"$dst" = x ]
- then
- echo "install: no destination specified"
- exit 1
- else
- true
- fi
-
-# If destination is a directory, append the input filename; if your system
-# does not like double slashes in filenames, you may need to add some logic
-
- if [ -d $dst ]
- then
- dst="$dst"/`basename $src`
- else
- true
- fi
-fi
-
-## this sed command emulates the dirname command
-dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
-
-# Make sure that the destination directory exists.
-# this part is taken from Noah Friedman's mkinstalldirs script
-
-# Skip lots of stat calls in the usual case.
-if [ ! -d "$dstdir" ]; then
-defaultIFS='
-'
-IFS="${IFS-${defaultIFS}}"
-
-oIFS="${IFS}"
-# Some sh's can't handle IFS=/ for some reason.
-IFS='%'
-set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
-IFS="${oIFS}"
-
-pathcomp=''
-
-while [ $# -ne 0 ] ; do
- pathcomp="${pathcomp}${1}"
- shift
-
- if [ ! -d "${pathcomp}" ] ;
- then
- $mkdirprog "${pathcomp}"
- else
- true
- fi
-
- pathcomp="${pathcomp}/"
-done
-fi
-
-if [ x"$dir_arg" != x ]
-then
- $doit $instcmd $dst &&
-
- if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
- if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
- if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
- if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
-else
-
-# If we're going to rename the final executable, determine the name now.
-
- if [ x"$transformarg" = x ]
- then
- dstfile=`basename $dst`
- else
- dstfile=`basename $dst $transformbasename |
- sed $transformarg`$transformbasename
- fi
-
-# don't allow the sed command to completely eliminate the filename
-
- if [ x"$dstfile" = x ]
- then
- dstfile=`basename $dst`
- else
- true
- fi
-
-# Make a temp file name in the proper directory.
-
- dsttmp=$dstdir/#inst.$$#
-
-# Move or copy the file name to the temp name
-
- $doit $instcmd $src $dsttmp &&
-
- trap "rm -f ${dsttmp}" 0 &&
-
-# and set any options; do chmod last to preserve setuid bits
-
-# If any of these fail, we abort the whole thing. If we want to
-# ignore errors from any of these, just make sure not to ignore
-# errors from the above "$doit $instcmd $src $dsttmp" command.
-
- if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
- if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
- if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
- if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
-
-# Now rename the file to the real destination.
-
- $doit $rmcmd -f $dstdir/$dstfile &&
- $doit $mvcmd $dsttmp $dstdir/$dstfile
-
-fi &&
-
-
-exit 0
diff --git a/lib/tdb/libtdb.m4 b/lib/tdb/libtdb.m4
index feae1c2cc6..b5164fc5b2 100644
--- a/lib/tdb/libtdb.m4
+++ b/lib/tdb/libtdb.m4
@@ -13,13 +13,19 @@ if test x"$tdbdir" = "x"; then
AC_MSG_ERROR([cannot find tdb source in $tdbpaths])
fi
TDB_OBJ="common/tdb.o common/dump.o common/transaction.o common/error.o common/traverse.o"
-TDB_OBJ="$TDB_OBJ common/freelist.o common/freelistcheck.o common/io.o common/lock.o common/open.o common/check.o"
+TDB_OBJ="$TDB_OBJ common/freelist.o common/freelistcheck.o common/io.o common/lock.o common/open.o common/check.o common/hash.o common/summary.o"
AC_SUBST(TDB_OBJ)
AC_SUBST(LIBREPLACEOBJ)
TDB_LIBS=""
AC_SUBST(TDB_LIBS)
+TDB_DEPS=""
+if test x$libreplace_cv_HAVE_FDATASYNC_IN_LIBRT = xyes ; then
+ TDB_DEPS="$TDB_DEPS -lrt"
+fi
+AC_SUBST(TDB_DEPS)
+
TDB_CFLAGS="-I$tdbdir/include"
AC_SUBST(TDB_CFLAGS)
diff --git a/lib/tdb/manpages/tdbbackup.8.xml b/lib/tdb/manpages/tdbbackup.8.xml
index c3a6e2b051..78fe32eb8e 100644
--- a/lib/tdb/manpages/tdbbackup.8.xml
+++ b/lib/tdb/manpages/tdbbackup.8.xml
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="iso-8859-1"?>
-<!DOCTYPE refentry PUBLIC "-//Samba-Team//DTD DocBook V4.2-Based Variant V1.0//EN" "http://www.samba.org/samba/DTD/samba-doc">
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<refentry id="tdbbackup.8">
<refmeta>
@@ -7,7 +7,7 @@
<manvolnum>8</manvolnum>
<refmiscinfo class="source">Samba</refmiscinfo>
<refmiscinfo class="manual">System Administration tools</refmiscinfo>
- <refmiscinfo class="version">3.5</refmiscinfo>
+ <refmiscinfo class="version">3.6</refmiscinfo>
</refmeta>
diff --git a/lib/tdb/manpages/tdbdump.8.xml b/lib/tdb/manpages/tdbdump.8.xml
index 5c0028db42..90465e53e8 100644
--- a/lib/tdb/manpages/tdbdump.8.xml
+++ b/lib/tdb/manpages/tdbdump.8.xml
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="iso-8859-1"?>
-<!DOCTYPE refentry PUBLIC "-//Samba-Team//DTD DocBook V4.2-Based Variant V1.0//EN" "http://www.samba.org/samba/DTD/samba-doc">
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<refentry id="tdbdump.8">
<refmeta>
@@ -7,7 +7,7 @@
<manvolnum>8</manvolnum>
<refmiscinfo class="source">Samba</refmiscinfo>
<refmiscinfo class="manual">System Administration tools</refmiscinfo>
- <refmiscinfo class="version">3.5</refmiscinfo>
+ <refmiscinfo class="version">3.6</refmiscinfo>
</refmeta>
diff --git a/lib/tdb/manpages/tdbrestore.8.xml b/lib/tdb/manpages/tdbrestore.8.xml
new file mode 100644
index 0000000000..64c0ba2dac
--- /dev/null
+++ b/lib/tdb/manpages/tdbrestore.8.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="iso-8859-1"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+<refentry id="tdbrestore.8">
+
+<refmeta>
+ <refentrytitle>tdbrestore</refentrytitle>
+ <manvolnum>8</manvolnum>
+ <refmiscinfo class="source">Samba</refmiscinfo>
+ <refmiscinfo class="manual">System Administration tools</refmiscinfo>
+ <refmiscinfo class="version">3.6</refmiscinfo>
+</refmeta>
+
+
+<refnamediv>
+ <refname>tdbrestore</refname>
+ <refpurpose>tool for creating a TDB file out of a tdbdump output</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+ <cmdsynopsis>
+ <command>tdbrestore</command>
+ <arg choice="req">tdbfilename</arg>
+ </cmdsynopsis>
+</refsynopsisdiv>
+
+<refsect1>
+ <title>DESCRIPTION</title>
+
+ <para>This tool is part of the <citerefentry><refentrytitle>samba</refentrytitle>
+ <manvolnum>1</manvolnum></citerefentry> suite.</para>
+
+ <para><command>tdbrestore</command> is a very simple utility that 'restores' the
+ contents of dump file into TDB (Trivial DataBase) file. The dump file is obtained from the tdbdump
+ command.
+ </para>
+
+ <para>This tool wait on the standard input for the content of the dump and will write the tdb in the tdbfilename
+ parameter.
+ </para>
+ <para>This tool can be used for unpacking the content of tdb as backup mean.
+ </para>
+</refsect1>
+
+
+<refsect1>
+ <title>VERSION</title>
+
+ <para>This man page is correct for version 3 of the Samba suite.</para>
+</refsect1>
+
+<refsect1>
+ <title>AUTHOR</title>
+
+ <para>
+ The original Samba software and related utilities were created by Andrew Tridgell.
+ Samba is now developed by the Samba Team as an Open Source project similar to the way
+ the Linux kernel is developed.
+
+ This tool was initially written by Volker Lendecke based on an
+ idea by Simon McVittie.
+ </para>
+
+ <para>The tdbrestore man page was written by Matthieu Patou.</para>
+</refsect1>
+
+</refentry>
diff --git a/lib/tdb/manpages/tdbtool.8.xml b/lib/tdb/manpages/tdbtool.8.xml
index a755653106..9f96db277d 100644
--- a/lib/tdb/manpages/tdbtool.8.xml
+++ b/lib/tdb/manpages/tdbtool.8.xml
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="iso-8859-1"?>
-<!DOCTYPE refentry PUBLIC "-//Samba-Team//DTD DocBook V4.2-Based Variant V1.0//EN" "http://www.samba.org/samba/DTD/samba-doc">
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<refentry id="tdbtool.8">
<refmeta>
@@ -7,7 +7,7 @@
<manvolnum>8</manvolnum>
<refmiscinfo class="source">Samba</refmiscinfo>
<refmiscinfo class="manual">System Administration tools</refmiscinfo>
- <refmiscinfo class="version">3.5</refmiscinfo>
+ <refmiscinfo class="version">3.6</refmiscinfo>
</refmeta>
diff --git a/lib/tdb/pytdb.c b/lib/tdb/pytdb.c
index 202dca1571..3dd785e7be 100644
--- a/lib/tdb/pytdb.c
+++ b/lib/tdb/pytdb.c
@@ -9,7 +9,7 @@
** NOTE! The following LGPL license applies to the tdb
** library. This does NOT imply that all of Samba is released
** under the LGPL
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
@@ -24,10 +24,10 @@
License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include <Python.h>
#include "replace.h"
#include "system/filesys.h"
-#include <Python.h>
#ifndef Py_RETURN_NONE
#define Py_RETURN_NONE return Py_INCREF(Py_None), Py_None
#endif
@@ -41,7 +41,7 @@ typedef struct {
bool closed;
} PyTdbObject;
-PyAPI_DATA(PyTypeObject) PyTdb;
+staticforward PyTypeObject PyTdb;
static void PyErr_SetTDBError(TDB_CONTEXT *tdb)
{
@@ -77,15 +77,19 @@ static PyObject *PyString_FromTDB_DATA(TDB_DATA data)
static PyObject *py_tdb_open(PyTypeObject *type, PyObject *args, PyObject *kwargs)
{
- char *name;
+ char *name = NULL;
int hash_size = 0, tdb_flags = TDB_DEFAULT, flags = O_RDWR, mode = 0600;
TDB_CONTEXT *ctx;
PyTdbObject *ret;
const char *kwnames[] = { "name", "hash_size", "tdb_flags", "flags", "mode", NULL };
- if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|iiii", (char **)kwnames, &name, &hash_size, &tdb_flags, &flags, &mode))
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|siiii", (char **)kwnames, &name, &hash_size, &tdb_flags, &flags, &mode))
return NULL;
+ if (name == NULL) {
+ tdb_flags |= TDB_INTERNAL;
+ }
+
ctx = tdb_open(name, hash_size, tdb_flags, flags, mode);
if (ctx == NULL) {
PyErr_SetFromErrno(PyExc_IOError);
@@ -93,6 +97,11 @@ static PyObject *py_tdb_open(PyTypeObject *type, PyObject *args, PyObject *kwarg
}
ret = PyObject_New(PyTdbObject, &PyTdb);
+ if (!ret) {
+ tdb_close(ctx);
+ return NULL;
+ }
+
ret->ctx = ctx;
ret->closed = false;
return (PyObject *)ret;
@@ -112,9 +121,9 @@ static PyObject *obj_transaction_commit(PyTdbObject *self)
Py_RETURN_NONE;
}
-static PyObject *obj_transaction_recover(PyTdbObject *self)
+static PyObject *obj_transaction_prepare_commit(PyTdbObject *self)
{
- int ret = tdb_transaction_recover(self->ctx);
+ int ret = tdb_transaction_prepare_commit(self->ctx);
PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
Py_RETURN_NONE;
}
@@ -266,6 +275,27 @@ static PyObject *obj_store(PyTdbObject *self, PyObject *args)
Py_RETURN_NONE;
}
+static PyObject *obj_add_flags(PyTdbObject *self, PyObject *args)
+{
+ unsigned flags;
+
+ if (!PyArg_ParseTuple(args, "I", &flags))
+ return NULL;
+
+ tdb_add_flags(self->ctx, flags);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_remove_flags(PyTdbObject *self, PyObject *args)
+{
+ unsigned flags;
+
+ if (!PyArg_ParseTuple(args, "I", &flags))
+ return NULL;
+
+ tdb_remove_flags(self->ctx, flags);
+ Py_RETURN_NONE;
+}
typedef struct {
PyObject_HEAD
@@ -305,6 +335,8 @@ static PyObject *tdb_object_iter(PyTdbObject *self)
PyTdbIteratorObject *ret;
ret = PyObject_New(PyTdbIteratorObject, &PyTdbIterator);
+ if (!ret)
+ return NULL;
ret->current = tdb_firstkey(self->ctx);
ret->iteratee = self;
Py_INCREF(self);
@@ -318,6 +350,25 @@ static PyObject *obj_clear(PyTdbObject *self)
Py_RETURN_NONE;
}
+static PyObject *obj_repack(PyTdbObject *self)
+{
+ int ret = tdb_repack(self->ctx);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_enable_seqnum(PyTdbObject *self)
+{
+ tdb_enable_seqnum(self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_increment_seqnum_nonblock(PyTdbObject *self)
+{
+ tdb_increment_seqnum_nonblock(self->ctx);
+ Py_RETURN_NONE;
+}
+
static PyMethodDef tdb_object_methods[] = {
{ "transaction_cancel", (PyCFunction)obj_transaction_cancel, METH_NOARGS,
"S.transaction_cancel() -> None\n"
@@ -325,9 +376,9 @@ static PyMethodDef tdb_object_methods[] = {
{ "transaction_commit", (PyCFunction)obj_transaction_commit, METH_NOARGS,
"S.transaction_commit() -> None\n"
"Commit the currently active transaction." },
- { "transaction_recover", (PyCFunction)obj_transaction_recover, METH_NOARGS,
- "S.transaction_recover() -> None\n"
- "Recover the currently active transaction." },
+ { "transaction_prepare_commit", (PyCFunction)obj_transaction_prepare_commit, METH_NOARGS,
+ "S.transaction_prepare_commit() -> None\n"
+ "Prepare to commit the currently active transaction" },
{ "transaction_start", (PyCFunction)obj_transaction_start, METH_NOARGS,
"S.transaction_start() -> None\n"
"Start a new transaction." },
@@ -351,9 +402,17 @@ static PyMethodDef tdb_object_methods[] = {
"Check whether key exists in this database." },
{ "store", (PyCFunction)obj_store, METH_VARARGS, "S.store(key, data, flag=REPLACE) -> None"
"Store data." },
+ { "add_flags", (PyCFunction)obj_add_flags, METH_VARARGS, "S.add_flags(flags) -> None" },
+ { "remove_flags", (PyCFunction)obj_remove_flags, METH_VARARGS, "S.remove_flags(flags) -> None" },
{ "iterkeys", (PyCFunction)tdb_object_iter, METH_NOARGS, "S.iterkeys() -> iterator" },
{ "clear", (PyCFunction)obj_clear, METH_NOARGS, "S.clear() -> None\n"
"Wipe the entire database." },
+ { "repack", (PyCFunction)obj_repack, METH_NOARGS, "S.repack() -> None\n"
+ "Repack the entire database." },
+ { "enable_seqnum", (PyCFunction)obj_enable_seqnum, METH_NOARGS,
+ "S.enable_seqnum() -> None" },
+ { "increment_seqnum_nonblock", (PyCFunction)obj_increment_seqnum_nonblock, METH_NOARGS,
+ "S.increment_seqnum_nonblock() -> None" },
{ NULL }
};
@@ -375,6 +434,11 @@ static PyObject *obj_get_map_size(PyTdbObject *self, void *closure)
return PyInt_FromLong(tdb_map_size(self->ctx));
}
+static PyObject *obj_get_freelist_size(PyTdbObject *self, void *closure)
+{
+ return PyInt_FromLong(tdb_freelist_size(self->ctx));
+}
+
static PyObject *obj_get_flags(PyTdbObject *self, void *closure)
{
return PyInt_FromLong(tdb_get_flags(self->ctx));
@@ -385,25 +449,37 @@ static PyObject *obj_get_filename(PyTdbObject *self, void *closure)
return PyString_FromString(tdb_name(self->ctx));
}
+static PyObject *obj_get_seqnum(PyTdbObject *self, void *closure)
+{
+ return PyInt_FromLong(tdb_get_seqnum(self->ctx));
+}
+
+
static PyGetSetDef tdb_object_getsetters[] = {
{ (char *)"hash_size", (getter)obj_get_hash_size, NULL, NULL },
{ (char *)"map_size", (getter)obj_get_map_size, NULL, NULL },
+ { (char *)"freelist_size", (getter)obj_get_freelist_size, NULL, NULL },
{ (char *)"flags", (getter)obj_get_flags, NULL, NULL },
{ (char *)"max_dead", NULL, (setter)obj_set_max_dead, NULL },
{ (char *)"filename", (getter)obj_get_filename, NULL, (char *)"The filename of this TDB file."},
+ { (char *)"seqnum", (getter)obj_get_seqnum, NULL, NULL },
{ NULL }
};
static PyObject *tdb_object_repr(PyTdbObject *self)
{
- return PyString_FromFormat("Tdb('%s')", tdb_name(self->ctx));
+ if (tdb_get_flags(self->ctx) & TDB_INTERNAL) {
+ return PyString_FromString("Tdb(<internal>)");
+ } else {
+ return PyString_FromFormat("Tdb('%s')", tdb_name(self->ctx));
+ }
}
static void tdb_object_dealloc(PyTdbObject *self)
{
if (!self->closed)
tdb_close(self->ctx);
- PyObject_Del(self);
+ self->ob_type->tp_free(self);
}
static PyObject *obj_getitem(PyTdbObject *self, PyObject *key)
@@ -462,7 +538,7 @@ static PyMappingMethods tdb_object_mapping = {
.mp_subscript = (binaryfunc)obj_getitem,
.mp_ass_subscript = (objobjargproc)obj_setitem,
};
-PyTypeObject PyTdb = {
+static PyTypeObject PyTdb = {
.tp_name = "Tdb",
.tp_basicsize = sizeof(PyTdbObject),
.tp_methods = tdb_object_methods,
@@ -482,6 +558,7 @@ static PyMethodDef tdb_methods[] = {
{ NULL }
};
+void inittdb(void);
void inittdb(void)
{
PyObject *m;
@@ -507,8 +584,17 @@ void inittdb(void)
PyModule_AddObject(m, "NOMMAP", PyInt_FromLong(TDB_NOMMAP));
PyModule_AddObject(m, "CONVERT", PyInt_FromLong(TDB_CONVERT));
PyModule_AddObject(m, "BIGENDIAN", PyInt_FromLong(TDB_BIGENDIAN));
+ PyModule_AddObject(m, "NOSYNC", PyInt_FromLong(TDB_NOSYNC));
+ PyModule_AddObject(m, "SEQNUM", PyInt_FromLong(TDB_SEQNUM));
+ PyModule_AddObject(m, "VOLATILE", PyInt_FromLong(TDB_VOLATILE));
+ PyModule_AddObject(m, "ALLOW_NESTING", PyInt_FromLong(TDB_ALLOW_NESTING));
+ PyModule_AddObject(m, "DISALLOW_NESTING", PyInt_FromLong(TDB_DISALLOW_NESTING));
+ PyModule_AddObject(m, "INCOMPATIBLE_HASH", PyInt_FromLong(TDB_INCOMPATIBLE_HASH));
+
PyModule_AddObject(m, "__docformat__", PyString_FromString("restructuredText"));
+ PyModule_AddObject(m, "__version__", PyString_FromString(PACKAGE_VERSION));
+
Py_INCREF(&PyTdb);
PyModule_AddObject(m, "Tdb", (PyObject *)&PyTdb);
diff --git a/lib/tdb/python.mk b/lib/tdb/python.mk
deleted file mode 100644
index 1f2d4ca4a8..0000000000
--- a/lib/tdb/python.mk
+++ /dev/null
@@ -1,6 +0,0 @@
-[PYTHON::pytdb]
-LIBRARY_REALNAME = tdb.$(SHLIBEXT)
-PUBLIC_DEPENDENCIES = LIBTDB DYNCONFIG
-
-pytdb_OBJ_FILES = $(tdbsrcdir)/pytdb.o
-
diff --git a/lib/tdb/python/tdbdump.py b/lib/tdb/python/tdbdump.py
index d759d771c8..01859ebce2 100644
--- a/lib/tdb/python/tdbdump.py
+++ b/lib/tdb/python/tdbdump.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# Trivial reimplementation of tdbdump in Python
import tdb, sys
diff --git a/lib/tdb/python/tests/simple.py b/lib/tdb/python/tests/simple.py
index c7443c0d43..f5484a0523 100644
--- a/lib/tdb/python/tests/simple.py
+++ b/lib/tdb/python/tests/simple.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# Some simple tests for the Python bindings for TDB
# Note that this tests the interface of the Python bindings
# It does not test tdb itself.
@@ -12,12 +12,16 @@ import os, tempfile
class OpenTdbTests(TestCase):
+
def test_nonexistant_read(self):
- self.assertRaises(IOError, tdb.Tdb, "/some/nonexistant/file", 0, tdb.DEFAULT, os.O_RDWR)
+ self.assertRaises(IOError, tdb.Tdb, "/some/nonexistant/file", 0,
+ tdb.DEFAULT, os.O_RDWR)
class CloseTdbTests(TestCase):
+
def test_double_close(self):
- self.tdb = tdb.Tdb(tempfile.mkstemp()[1], 0, tdb.DEFAULT, os.O_CREAT|os.O_RDWR)
+ self.tdb = tdb.Tdb(tempfile.mkstemp()[1], 0, tdb.DEFAULT,
+ os.O_CREAT|os.O_RDWR)
self.assertNotEqual(None, self.tdb)
# ensure that double close does not crash python
@@ -25,10 +29,21 @@ class CloseTdbTests(TestCase):
self.tdb.close()
+class InternalTdbTests(TestCase):
+
+ def test_repr(self):
+ self.tdb = tdb.Tdb()
+
+ # repr used to crash on internal db
+ self.assertEquals(repr(self.tdb), "Tdb(<internal>)")
+
+
class SimpleTdbTests(TestCase):
+
def setUp(self):
super(SimpleTdbTests, self).setUp()
- self.tdb = tdb.Tdb(tempfile.mkstemp()[1], 0, tdb.DEFAULT, os.O_CREAT|os.O_RDWR)
+ self.tdb = tdb.Tdb(tempfile.mkstemp()[1], 0, tdb.DEFAULT,
+ os.O_CREAT|os.O_RDWR)
self.assertNotEqual(None, self.tdb)
def tearDown(self):
@@ -81,6 +96,9 @@ class SimpleTdbTests(TestCase):
def test_map_size(self):
self.tdb.map_size
+ def test_freelist_size(self):
+ self.tdb.freelist_size
+
def test_name(self):
self.tdb.filename
@@ -103,11 +121,13 @@ class SimpleTdbTests(TestCase):
self.tdb.transaction_commit()
self.assertEquals("1", self.tdb["bloe"])
- def test_iterator(self):
+ def test_transaction_prepare_commit(self):
self.tdb["bloe"] = "2"
- self.tdb["bla"] = "hoi"
- i = iter(self.tdb)
- self.assertEquals(set(["bloe", "bla"]), set([i.next(), i.next()]))
+ self.tdb.transaction_start()
+ self.tdb["bloe"] = "1"
+ self.tdb.transaction_prepare_commit()
+ self.tdb.transaction_commit()
+ self.assertEquals("1", self.tdb["bloe"])
def test_iterkeys(self):
self.tdb["bloe"] = "2"
@@ -122,11 +142,34 @@ class SimpleTdbTests(TestCase):
self.tdb.clear()
self.assertEquals(0, len(list(self.tdb)))
+ def test_repack(self):
+ self.tdb["foo"] = "abc"
+ self.tdb["bar"] = "def"
+ del self.tdb["foo"]
+ self.tdb.repack()
+
+ def test_seqnum(self):
+ self.tdb.enable_seqnum()
+ seq1 = self.tdb.seqnum
+ self.tdb.increment_seqnum_nonblock()
+ seq2 = self.tdb.seqnum
+ self.assertEquals(seq2-seq1, 1)
+
def test_len(self):
self.assertEquals(0, len(list(self.tdb)))
self.tdb["entry"] = "value"
self.assertEquals(1, len(list(self.tdb)))
+ def test_add_flags(self):
+ self.tdb.add_flags(tdb.NOMMAP)
+ self.tdb.remove_flags(tdb.NOMMAP)
+
+
+class VersionTests(TestCase):
+
+ def test_present(self):
+ self.assertTrue(isinstance(tdb.__version__, str))
+
if __name__ == '__main__':
import unittest
diff --git a/lib/tdb/release-script.sh b/lib/tdb/release-script.sh
deleted file mode 100755
index 273ca30be8..0000000000
--- a/lib/tdb/release-script.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-
-if [ "$1" = "" ]; then
- echo "Please provide version string, eg: 1.2.0"
- exit 1
-fi
-
-if [ ! -d "lib/tdb" ]; then
- echo "Run this script from the samba base directory."
- exit 1
-fi
-
-git clean -f -x -d lib/tdb
-git clean -f -x -d lib/replace
-
-curbranch=`git branch |grep "^*" | tr -d "* "`
-
-version=$1
-strver=`echo ${version} | tr "." "-"`
-
-# Checkout the release tag
-git branch -f tdb-release-script-${strver} tdb-${strver}
-if [ ! "$?" = "0" ]; then
- echo "Unable to checkout tdb-${strver} release"
- exit 1
-fi
-
-git checkout tdb-release-script-${strver}
-
-# Test configure agrees with us
-confver=`grep "^AC_INIT" lib/tdb/configure.ac | tr -d "AC_INIT(tdb, " | tr -d ")"`
-if [ ! "$confver" = "$version" ]; then
- echo "Wrong version, requested release for ${version}, found ${confver}"
- exit 1
-fi
-
-# Now build tarball
-cp -a lib/tdb tdb-${version}
-cp -a lib/replace tdb-${version}/libreplace
-pushd tdb-${version}
-./autogen.sh
-popd
-tar cvzf tdb-${version}.tar.gz tdb-${version}
-rm -fr tdb-${version}
-
-#Clean up
-git checkout $curbranch
-git branch -d tdb-release-script-${strver}
diff --git a/lib/tdb/rules.mk b/lib/tdb/rules.mk
deleted file mode 100644
index 023e0ce534..0000000000
--- a/lib/tdb/rules.mk
+++ /dev/null
@@ -1,16 +0,0 @@
-showflags::
- @echo 'tdb will be compiled with flags:'
- @echo ' CFLAGS = $(CFLAGS)'
- @echo ' CPPFLAGS = $(CPPFLAGS)'
- @echo ' LDFLAGS = $(LDFLAGS)'
- @echo ' LIBS = $(LIBS)'
-
-.SUFFIXES: .c .o
-
-.c.o:
- @echo Compiling $*.c
- @mkdir -p `dirname $@`
- @$(CC) $(PICFLAG) $(CFLAGS) $(ABI_CHECK) -c $< -o $@
-
-distclean::
- rm -f *~ */*~
diff --git a/lib/tdb/script/release-script.sh b/lib/tdb/script/release-script.sh
new file mode 100755
index 0000000000..e9a023d7a5
--- /dev/null
+++ b/lib/tdb/script/release-script.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+LNAME=tdb
+LINCLUDE=include/tdb.h
+
+if [ "$1" = "" ]; then
+ echo "Please provide version string, eg: 1.2.0"
+ exit 1
+fi
+
+if [ ! -d "lib/${LNAME}" ]; then
+ echo "Run this script from the samba base directory."
+ exit 1
+fi
+
+curbranch=`git branch |grep "^*" | tr -d "* "`
+
+version=$1
+strver=`echo ${version} | tr "." "-"`
+
+# Checkout the release tag
+git branch -f ${LNAME}-release-script-${strver} ${LNAME}-${strver}
+if [ ! "$?" = "0" ]; then
+ echo "Unable to checkout ${LNAME}-${strver} release"
+ exit 1
+fi
+
+function cleanquit {
+ #Clean up
+ git checkout $curbranch
+ git branch -d ${LNAME}-release-script-${strver}
+ exit $1
+}
+
+# NOTE: use cleanquit after this point
+git checkout ${LNAME}-release-script-${strver}
+
+# Test configure agrees with us
+confver=`grep "^AC_INIT" lib/${LNAME}/configure.ac | tr -d "AC_INIT(${LNAME}, " | tr -d ")"`
+if [ ! "$confver" = "$version" ]; then
+ echo "Wrong version, requested release for ${version}, found ${confver}"
+ exit 1
+fi
+
+# Check exports and signatures are up to date
+pushd lib/${LNAME}
+./script/abi_checks.sh ${LNAME} ${LINCLUDE}
+abicheck=$?
+popd
+if [ ! "$abicheck" = "0" ]; then
+ echo "ERROR: ABI Checks produced warnings!"
+ cleanquit 1
+fi
+
+git clean -f -x -d lib/${LNAME}
+git clean -f -x -d lib/replace
+
+# Now build tarball
+cp -a lib/${LNAME} ${LNAME}-${version}
+cp -a lib/replace ${LNAME}-${version}/libreplace
+pushd ${LNAME}-${version}
+./autogen.sh
+popd
+tar cvzf ${LNAME}-${version}.tar.gz ${LNAME}-${version}
+rm -fr ${LNAME}-${version}
+
+cleanquit 0
diff --git a/lib/tdb/tdb.exports b/lib/tdb/tdb.exports
deleted file mode 100644
index cf287d8f32..0000000000
--- a/lib/tdb/tdb.exports
+++ /dev/null
@@ -1,65 +0,0 @@
-{
- global:
- tdb_add_flags;
- tdb_append;
- tdb_chainlock;
- tdb_chainlock_mark;
- tdb_chainlock_nonblock;
- tdb_chainlock_read;
- tdb_chainlock_unmark;
- tdb_chainunlock;
- tdb_chainunlock_read;
- tdb_check;
- tdb_close;
- tdb_delete;
- tdb_dump_all;
- tdb_enable_seqnum;
- tdb_error;
- tdb_errorstr;
- tdb_exists;
- tdb_fd;
- tdb_fetch;
- tdb_firstkey;
- tdb_freelist_size;
- tdb_get_flags;
- tdb_get_logging_private;
- tdb_get_seqnum;
- tdb_hash_size;
- tdb_increment_seqnum_nonblock;
- tdb_lockall;
- tdb_lockall_mark;
- tdb_lockall_nonblock;
- tdb_lockall_read;
- tdb_lockall_read_nonblock;
- tdb_lockall_unmark;
- tdb_log_fn;
- tdb_map_size;
- tdb_name;
- tdb_nextkey;
- tdb_open;
- tdb_open_ex;
- tdb_parse_record;
- tdb_printfreelist;
- tdb_remove_flags;
- tdb_reopen;
- tdb_reopen_all;
- tdb_repack;
- tdb_setalarm_sigptr;
- tdb_set_logging_function;
- tdb_set_max_dead;
- tdb_store;
- tdb_transaction_cancel;
- tdb_transaction_commit;
- tdb_transaction_prepare_commit;
- tdb_transaction_recover;
- tdb_transaction_start;
- tdb_traverse;
- tdb_traverse_read;
- tdb_unlockall;
- tdb_unlockall_read;
- tdb_validate_freelist;
- tdb_wipe_all;
- tdb_null;
-
- local: *;
-};
diff --git a/lib/tdb/tdb.mk b/lib/tdb/tdb.mk
deleted file mode 100644
index ecc6f9fd08..0000000000
--- a/lib/tdb/tdb.mk
+++ /dev/null
@@ -1,106 +0,0 @@
-dirs::
- @mkdir -p bin common tools
-
-PROGS = bin/tdbtool$(EXEEXT) bin/tdbdump$(EXEEXT) bin/tdbbackup$(EXEEXT)
-PROGS_NOINSTALL = bin/tdbtest$(EXEEXT) bin/tdbtorture$(EXEEXT)
-ALL_PROGS = $(PROGS) $(PROGS_NOINSTALL)
-
-TDB_SONAME = libtdb.$(SHLIBEXT).1
-TDB_SOLIB = libtdb.$(SHLIBEXT).$(PACKAGE_VERSION)
-TDB_STLIB = libtdb.a
-
-TDB_LIB = $(TDB_STLIB)
-
-bin/tdbtest$(EXEEXT): tools/tdbtest.o $(TDB_LIB)
- $(CC) $(CFLAGS) $(LDFLAGS) -o bin/tdbtest tools/tdbtest.o -L. -ltdb -lgdbm
-
-bin/tdbtool$(EXEEXT): tools/tdbtool.o $(TDB_LIB)
- $(CC) $(CFLAGS) $(LDFLAGS) -o bin/tdbtool tools/tdbtool.o -L. -ltdb
-
-bin/tdbtorture$(EXEEXT): tools/tdbtorture.o $(TDB_LIB)
- $(CC) $(CFLAGS) $(LDFLAGS) -o bin/tdbtorture tools/tdbtorture.o -L. -ltdb
-
-bin/tdbdump$(EXEEXT): tools/tdbdump.o $(TDB_LIB)
- $(CC) $(CFLAGS) $(LDFLAGS) -o bin/tdbdump tools/tdbdump.o -L. -ltdb
-
-bin/tdbbackup$(EXEEXT): tools/tdbbackup.o $(TDB_LIB)
- $(CC) $(CFLAGS) $(LDFLAGS) -o bin/tdbbackup tools/tdbbackup.o -L. -ltdb
-
-test:: abi_checks
-
-test:: bin/tdbtorture$(EXEEXT) $(TDB_SONAME)
- $(LIB_PATH_VAR)=. bin/tdbtorture$(EXEEXT)
-
-abi_checks::
- @echo ABI checks:
- @./script/abi_checks.sh tdb include/tdb.h
-
-clean::
- rm -f test.db test.tdb torture.tdb test.gdbm
- rm -f $(TDB_SONAME) $(TDB_SOLIB) $(TDB_STLIB) libtdb.$(SHLIBEXT)
- rm -f $(ALL_PROGS) tdb.pc
- rm -f tdb.exports.sort tdb.exports.check tdb.exports.check.sort
- rm -f tdb.signatures.sort tdb.signatures.check tdb.signatures.check.sort
-
-build-python:: tdb.$(SHLIBEXT)
-
-pytdb.o: $(tdbdir)/pytdb.c
- $(CC) $(PICFLAG) -c $(tdbdir)/pytdb.c $(CFLAGS) `$(PYTHON_CONFIG) --cflags`
-
-tdb.$(SHLIBEXT): libtdb.$(SHLIBEXT) pytdb.o
- $(SHLD) $(SHLD_FLAGS) -o $@ pytdb.o -L. -ltdb `$(PYTHON_CONFIG) --ldflags`
-
-install:: installdirs installbin installheaders installlibs \
- $(PYTHON_INSTALL_TARGET) installdocs
-
-doc:: manpages/tdbbackup.8 manpages/tdbdump.8 manpages/tdbtool.8
-
-.SUFFIXES: .8.xml .8
-
-.8.xml.8:
- -test -z "$(XSLTPROC)" || $(XSLTPROC) -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $<
-
-installdocs::
- ${INSTALLCMD} -d $(DESTDIR)$(mandir)/man8
- for I in manpages/*.8; do \
- ${INSTALLCMD} -m 644 $$I $(DESTDIR)$(mandir)/man8; \
- done
-
-install-python:: build-python
- mkdir -p $(DESTDIR)`$(PYTHON) -c "import distutils.sysconfig; print distutils.sysconfig.get_python_lib(1, prefix='$(prefix)')"`
- cp tdb.$(SHLIBEXT) $(DESTDIR)`$(PYTHON) -c "import distutils.sysconfig; print distutils.sysconfig.get_python_lib(1, prefix='$(prefix)')"`
-
-check-python:: build-python $(TDB_SONAME)
- $(LIB_PATH_VAR)=. PYTHONPATH=".:$(tdbdir)" $(PYTHON) $(tdbdir)/python/tests/simple.py
-
-clean::
- rm -f tdb.$(SHLIBEXT)
-
-installdirs::
- mkdir -p $(DESTDIR)$(bindir)
- mkdir -p $(DESTDIR)$(includedir)
- mkdir -p $(DESTDIR)$(libdir)
- mkdir -p $(DESTDIR)$(libdir)/pkgconfig
-
-installbin:: all installdirs
- cp $(PROGS) $(DESTDIR)$(bindir)
-
-installheaders:: installdirs
- cp $(srcdir)/include/tdb.h $(DESTDIR)$(includedir)
-
-installlibs:: all installdirs
- cp tdb.pc $(DESTDIR)$(libdir)/pkgconfig
- cp $(TDB_STLIB) $(TDB_SOLIB) $(DESTDIR)$(libdir)
- rm -f $(DESTDIR)$(libdir)/libtdb.$(SHLIBEXT)
- ln -s $(TDB_SOLIB) $(DESTDIR)$(libdir)/libtdb.$(SHLIBEXT)
- rm -f $(DESTDIR)$(libdir)/$(TDB_SONAME)
- ln -s $(TDB_SOLIB) $(DESTDIR)$(libdir)/$(TDB_SONAME)
-
-$(TDB_STLIB): $(TDB_OBJ)
- ar -rv $(TDB_STLIB) $(TDB_OBJ)
-
-libtdb.$(SHLIBEXT): $(TDB_SOLIB)
- ln -fs $< $@
-
-$(TDB_SONAME): $(TDB_SOLIB)
- ln -fs $< $@
diff --git a/lib/tdb/tdb.pc.in b/lib/tdb/tdb.pc.in
index 6f8f553736..b78419ea78 100644
--- a/lib/tdb/tdb.pc.in
+++ b/lib/tdb/tdb.pc.in
@@ -6,6 +6,6 @@ includedir=@includedir@
Name: tdb
Description: A trivial database
Version: @PACKAGE_VERSION@
-Libs: -L${libdir} -ltdb
+Libs: @LIB_RPATH@ -L${libdir} -ltdb
Cflags: -I${includedir}
URL: http://tdb.samba.org/
diff --git a/lib/tdb/tdb.signatures b/lib/tdb/tdb.signatures
deleted file mode 100644
index 93edb071be..0000000000
--- a/lib/tdb/tdb.signatures
+++ /dev/null
@@ -1,60 +0,0 @@
-const char *tdb_errorstr (struct tdb_context *);
-const char *tdb_name (struct tdb_context *);
-enum TDB_ERROR tdb_error (struct tdb_context *);
-int tdb_append (struct tdb_context *, TDB_DATA, TDB_DATA);
-int tdb_chainlock_mark (struct tdb_context *, TDB_DATA);
-int tdb_chainlock_nonblock (struct tdb_context *, TDB_DATA);
-int tdb_chainlock_read (struct tdb_context *, TDB_DATA);
-int tdb_chainlock (struct tdb_context *, TDB_DATA);
-int tdb_chainlock_unmark (struct tdb_context *, TDB_DATA);
-int tdb_chainunlock_read (struct tdb_context *, TDB_DATA);
-int tdb_chainunlock (struct tdb_context *, TDB_DATA);
-int tdb_close (struct tdb_context *);
-int tdb_delete (struct tdb_context *, TDB_DATA);
-int tdb_exists (struct tdb_context *, TDB_DATA);
-int tdb_fd (struct tdb_context *);
-int tdb_freelist_size (struct tdb_context *);
-int tdb_get_flags (struct tdb_context *);
-int tdb_get_seqnum (struct tdb_context *);
-int tdb_hash_size (struct tdb_context *);
-int tdb_lockall_mark (struct tdb_context *);
-int tdb_lockall_nonblock (struct tdb_context *);
-int tdb_lockall_read_nonblock (struct tdb_context *);
-int tdb_lockall_read (struct tdb_context *);
-int tdb_lockall (struct tdb_context *);
-int tdb_lockall_unmark (struct tdb_context *);
-int tdb_parse_record (struct tdb_context *, TDB_DATA, int (*) (TDB_DATA, TDB_DATA, void *), void *);
-int tdb_printfreelist (struct tdb_context *);
-int tdb_reopen_all (int);
-int tdb_reopen (struct tdb_context *);
-int tdb_repack (struct tdb_context *);
-int tdb_store (struct tdb_context *, TDB_DATA, TDB_DATA, int);
-int tdb_transaction_cancel (struct tdb_context *);
-int tdb_transaction_commit (struct tdb_context *);
-int tdb_transaction_prepare_commit (struct tdb_context *);
-int tdb_transaction_recover (struct tdb_context *);
-int tdb_transaction_start (struct tdb_context *);
-int tdb_traverse_read (struct tdb_context *, tdb_traverse_func, void *);
-int tdb_traverse (struct tdb_context *, tdb_traverse_func, void *);
-int tdb_unlockall_read (struct tdb_context *);
-int tdb_unlockall (struct tdb_context *);
-int tdb_validate_freelist (struct tdb_context *, int *);
-int tdb_wipe_all (struct tdb_context *);
-size_t tdb_map_size (struct tdb_context *);
-struct tdb_context *tdb_open (const char *, int, int, int, mode_t);
-struct tdb_context *tdb_open_ex (const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func);
-TDB_DATA tdb_fetch (struct tdb_context *, TDB_DATA);
-TDB_DATA tdb_firstkey (struct tdb_context *);
-TDB_DATA tdb_nextkey (struct tdb_context *, TDB_DATA);
-tdb_log_func tdb_log_fn (struct tdb_context *);
-void tdb_add_flags (struct tdb_context *, unsigned int);
-void tdb_dump_all (struct tdb_context *);
-void tdb_enable_seqnum (struct tdb_context *);
-void *tdb_get_logging_private (struct tdb_context *);
-void tdb_increment_seqnum_nonblock (struct tdb_context *);
-void tdb_remove_flags (struct tdb_context *, unsigned int);
-void tdb_setalarm_sigptr (struct tdb_context *, volatile sig_atomic_t *);
-void tdb_set_logging_function (struct tdb_context *, const struct tdb_logging_context *);
-void tdb_set_max_dead (struct tdb_context *, int);
-int tdb_check (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *);
-TDB_DATA tdb_null;
diff --git a/lib/tdb/tools/tdbbackup.c b/lib/tdb/tools/tdbbackup.c
index 6aca8dd99c..11ecaa0290 100644
--- a/lib/tdb/tools/tdbbackup.c
+++ b/lib/tdb/tools/tdbbackup.c
@@ -152,8 +152,9 @@ static int backup_tdb(const char *old_name, const char *new_name, int hash_size)
return 1;
}
- if (tdb_transaction_start(tdb_new) != 0) {
- printf("Failed to start transaction on new tdb\n");
+ /* lock the backup tdb so that nobody else can change it */
+ if (tdb_lockall(tdb_new) != 0) {
+ printf("Failed to lock backup tdb\n");
tdb_close(tdb);
tdb_close(tdb_new);
unlink(tmp_name);
@@ -177,12 +178,16 @@ static int backup_tdb(const char *old_name, const char *new_name, int hash_size)
/* close the old tdb */
tdb_close(tdb);
- if (tdb_transaction_commit(tdb_new) != 0) {
- fprintf(stderr, "Failed to commit new tdb\n");
- tdb_close(tdb_new);
- unlink(tmp_name);
- free(tmp_name);
- return 1;
+ /* copy done, unlock the backup tdb */
+ tdb_unlockall(tdb_new);
+
+#ifdef HAVE_FDATASYNC
+ if (fdatasync(tdb_fd(tdb_new)) != 0) {
+#else
+ if (fsync(tdb_fd(tdb_new)) != 0) {
+#endif
+ /* not fatal */
+ fprintf(stderr, "failed to fsync backup file\n");
}
/* close the new tdb and re-open read-only */
diff --git a/lib/tdb/tools/tdbrestore.c b/lib/tdb/tools/tdbrestore.c
new file mode 100644
index 0000000000..95ee360647
--- /dev/null
+++ b/lib/tdb/tools/tdbrestore.c
@@ -0,0 +1,225 @@
+/*
+ tdbrestore -- construct a tdb from tdbdump output.
+ Copyright (C) Volker Lendecke 2010
+ Copyright (C) Simon McVittie 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <assert.h>
+#include "replace.h"
+#include "system/locale.h"
+#include "system/time.h"
+#include "system/filesys.h"
+#include "system/wait.h"
+#include "tdb.h"
+
+#define debug_fprintf(file, fmt, ...) do {/*nothing*/} while (0)
+
+static int read_linehead(FILE *f)
+{
+ int i, c;
+ int num_bytes;
+ char prefix[128];
+
+ while (1) {
+ c = getc(f);
+ if (c == EOF) {
+ return -1;
+ }
+ if (c == '(') {
+ break;
+ }
+ }
+ for (i=0; i<sizeof(prefix); i++) {
+ c = getc(f);
+ if (c == EOF) {
+ return -1;
+ }
+ prefix[i] = c;
+ if (c == '"') {
+ break;
+ }
+ }
+ if (i == sizeof(prefix)) {
+ return -1;
+ }
+ prefix[i] = '\0';
+
+ if (sscanf(prefix, "%d) = ", &num_bytes) != 1) {
+ return -1;
+ }
+ return num_bytes;
+}
+
+static int read_hex(void) {
+ int c;
+ c = getchar();
+ if (c == EOF) {
+ fprintf(stderr, "Unexpected EOF in data\n");
+ return -1;
+ } else if (c == '"') {
+ fprintf(stderr, "Unexpected \\\" sequence\n");
+ return -1;
+ } else if ('0' <= c && c <= '9') {
+ return c - '0';
+ } else if ('A' <= c && c <= 'F') {
+ return c - 'A' + 10;
+ } else if ('a' <= c && c <= 'f') {
+ return c - 'a' + 10;
+ } else {
+ fprintf(stderr, "Invalid hex: %c\n", c);
+ return -1;
+ }
+}
+
+static int read_data(FILE *f, TDB_DATA *d, size_t size) {
+ int c, low, high;
+ int i;
+
+ d->dptr = (unsigned char *)malloc(size);
+ if (d->dptr == NULL) {
+ return -1;
+ }
+ d->dsize = size;
+
+ for (i=0; i<size; i++) {
+ c = getc(f);
+ if (c == EOF) {
+ fprintf(stderr, "Unexpected EOF in data\n");
+ return 1;
+ } else if (c == '"') {
+ return 0;
+ } else if (c == '\\') {
+ high = read_hex();
+ if (high < 0) {
+ return -1;
+ }
+ high = high << 4;
+ assert(high == (high & 0xf0));
+ low = read_hex();
+ if (low < 0) {
+ return -1;
+ }
+ assert(low == (low & 0x0f));
+ d->dptr[i] = (low|high);
+ } else {
+ d->dptr[i] = c;
+ }
+ }
+ return 0;
+}
+
+static int swallow(FILE *f, const char *s, int *eof)
+{
+ char line[128];
+
+ if (fgets(line, sizeof(line), f) == NULL) {
+ if (eof != NULL) {
+ *eof = 1;
+ }
+ return -1;
+ }
+ if (strcmp(line, s) != 0) {
+ return -1;
+ }
+ return 0;
+}
+
+static int read_rec(FILE *f, TDB_CONTEXT *tdb, int *eof)
+{
+ int length;
+ TDB_DATA key, data;
+ int ret = -1;
+
+ key.dptr = NULL;
+ data.dptr = NULL;
+
+ if (swallow(f, "{\n", eof) == -1) {
+ goto fail;
+ }
+ length = read_linehead(f);
+ if (length == -1) {
+ goto fail;
+ }
+ if (read_data(f, &key, length) == -1) {
+ goto fail;
+ }
+ if (swallow(f, "\"\n", NULL) == -1) {
+ goto fail;
+ }
+ length = read_linehead(f);
+ if (length == -1) {
+ goto fail;
+ }
+ if (read_data(f, &data, length) == -1) {
+ goto fail;
+ }
+ if ((swallow(f, "\"\n", NULL) == -1)
+ || (swallow(f, "}\n", NULL) == -1)) {
+ goto fail;
+ }
+ if (tdb_store(tdb, key, data, TDB_INSERT) == -1) {
+ fprintf(stderr, "TDB error: %s\n", tdb_errorstr(tdb));
+ goto fail;
+ }
+
+ ret = 0;
+fail:
+ free(key.dptr);
+ free(data.dptr);
+ return ret;
+}
+
+static int restore_tdb(const char *fname)
+{
+ TDB_CONTEXT *tdb;
+
+ tdb = tdb_open(fname, 0, 0, O_RDWR|O_CREAT|O_EXCL, 0666);
+ if (!tdb) {
+ perror("tdb_open");
+ fprintf(stderr, "Failed to open %s\n", fname);
+ return 1;
+ }
+
+ while (1) {
+ int eof = 0;
+ if (read_rec(stdin, tdb, &eof) == -1) {
+ if (eof) {
+ break;
+ }
+ return 1;
+ }
+ }
+ if (tdb_close(tdb)) {
+ fprintf(stderr, "Error closing tdb\n");
+ return 1;
+ }
+ fprintf(stderr, "EOF\n");
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ char *fname;
+
+ if (argc < 2) {
+ printf("Usage: %s dbname < tdbdump_output\n", argv[0]);
+ exit(1);
+ }
+
+ fname = argv[1];
+
+ return restore_tdb(fname);
+}
diff --git a/lib/tdb/tools/tdbtest.c b/lib/tdb/tools/tdbtest.c
index 416bc50a5b..44c78efda5 100644
--- a/lib/tdb/tools/tdbtest.c
+++ b/lib/tdb/tools/tdbtest.c
@@ -215,16 +215,38 @@ static void merge_test(void)
tdb_delete(db, key);
}
+static char *test_path(const char *filename)
+{
+ const char *prefix = getenv("TEST_DATA_PREFIX");
+
+ if (prefix) {
+ char *path = NULL;
+ int ret;
+
+ ret = asprintf(&path, "%s/%s", prefix, filename);
+ if (ret == -1) {
+ return NULL;
+ }
+ return path;
+ }
+
+ return strdup(filename);
+}
+
int main(int argc, const char *argv[])
{
int i, seed=0;
int loops = 10000;
int num_entries;
- char test_gdbm[] = "test.gdbm";
+ char test_gdbm[1] = "test.gdbm";
+ char *test_tdb;
- unlink("test.gdbm");
+ test_gdbm[0] = test_path("test.gdbm");
+ test_tdb = test_path("test.tdb");
- db = tdb_open("test.tdb", 0, TDB_CLEAR_IF_FIRST,
+ unlink(test_gdbm[0]);
+
+ db = tdb_open(test_tdb, 0, TDB_CLEAR_IF_FIRST,
O_RDWR | O_CREAT | O_TRUNC, 0600);
gdbm = gdbm_open(test_gdbm, 512, GDBM_WRITER|GDBM_NEWDB|GDBM_FAST,
0600, NULL);
@@ -261,5 +283,8 @@ static void merge_test(void)
tdb_close(db);
gdbm_close(gdbm);
+ free(test_gdbm[0]);
+ free(test_tdb);
+
return 0;
}
diff --git a/lib/tdb/tools/tdbtool.c b/lib/tdb/tools/tdbtool.c
index 2ba7efc8ab..3511dc1e56 100644
--- a/lib/tdb/tools/tdbtool.c
+++ b/lib/tdb/tools/tdbtool.c
@@ -409,12 +409,14 @@ static int traverse_fn(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA dbuf, void *
static void info_tdb(void)
{
- int count;
- total_bytes = 0;
- if ((count = tdb_traverse(tdb, traverse_fn, NULL)) == -1)
+ char *summary = tdb_summary(tdb);
+
+ if (!summary) {
printf("Error = %s\n", tdb_errorstr(tdb));
- else
- printf("%d records totalling %d bytes\n", count, total_bytes);
+ } else {
+ printf("%s", summary);
+ free(summary);
+ }
}
static void speed_tdb(const char *tlimit)
diff --git a/lib/tdb/tools/tdbtorture.c b/lib/tdb/tools/tdbtorture.c
index b0221a2503..64c5043441 100644
--- a/lib/tdb/tools/tdbtorture.c
+++ b/lib/tdb/tools/tdbtorture.c
@@ -30,6 +30,10 @@ static struct tdb_context *db;
static int in_transaction;
static int error_count;
static int always_transaction = 0;
+static int hash_size = 2;
+static int loopnum;
+static int count_pipe;
+static struct tdb_logging_context log_ctx;
#ifdef PRINTF_ATTRIBUTE
static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...) PRINTF_ATTRIBUTE(3,4);
@@ -48,8 +52,9 @@ static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const c
va_end(ap);
fflush(stdout);
#if 0
- {
+ if (level != TDB_DEBUG_TRACE) {
char *ptr;
+ signal(SIGUSR1, SIG_IGN);
asprintf(&ptr,"xterm -e gdb /proc/%d/exe %d", getpid(), getpid());
system(ptr);
free(ptr);
@@ -211,24 +216,93 @@ static int traverse_fn(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf,
static void usage(void)
{
- printf("Usage: tdbtorture [-t] [-n NUM_PROCS] [-l NUM_LOOPS] [-s SEED] [-H HASH_SIZE]\n");
+ printf("Usage: tdbtorture [-t] [-k] [-n NUM_PROCS] [-l NUM_LOOPS] [-s SEED] [-H HASH_SIZE]\n");
exit(0);
}
- int main(int argc, char * const *argv)
+static void send_count_and_suicide(int sig)
+{
+ /* This ensures our successor can continue where we left off. */
+ write(count_pipe, &loopnum, sizeof(loopnum));
+ /* This gives a unique signature. */
+ kill(getpid(), SIGUSR2);
+}
+
+static int run_child(const char *filename, int i, int seed, unsigned num_loops, unsigned start)
+{
+ db = tdb_open_ex(filename, hash_size, TDB_DEFAULT,
+ O_RDWR | O_CREAT, 0600, &log_ctx, NULL);
+ if (!db) {
+ fatal("db open failed");
+ }
+
+ srand(seed + i);
+ srandom(seed + i);
+
+ /* Set global, then we're ready to handle being killed. */
+ loopnum = start;
+ signal(SIGUSR1, send_count_and_suicide);
+
+ for (;loopnum<num_loops && error_count == 0;loopnum++) {
+ addrec_db();
+ }
+
+ if (error_count == 0) {
+ tdb_traverse_read(db, NULL, NULL);
+ if (always_transaction) {
+ while (in_transaction) {
+ tdb_transaction_cancel(db);
+ in_transaction--;
+ }
+ if (tdb_transaction_start(db) != 0)
+ fatal("tdb_transaction_start failed");
+ }
+ tdb_traverse(db, traverse_fn, NULL);
+ tdb_traverse(db, traverse_fn, NULL);
+ if (always_transaction) {
+ if (tdb_transaction_commit(db) != 0)
+ fatal("tdb_transaction_commit failed");
+ }
+ }
+
+ tdb_close(db);
+
+ return (error_count < 100 ? error_count : 100);
+}
+
+static char *test_path(const char *filename)
+{
+ const char *prefix = getenv("TEST_DATA_PREFIX");
+
+ if (prefix) {
+ char *path = NULL;
+ int ret;
+
+ ret = asprintf(&path, "%s/%s", prefix, filename);
+ if (ret == -1) {
+ return NULL;
+ }
+ return path;
+ }
+
+ return strdup(filename);
+}
+
+int main(int argc, char * const *argv)
{
int i, seed = -1;
- int num_procs = 3;
int num_loops = 5000;
- int hash_size = 2;
- int c;
+ int num_procs = 3;
+ int c, pfds[2];
extern char *optarg;
pid_t *pids;
+ int kill_random = 0;
+ int *done;
+ char *test_tdb;
- struct tdb_logging_context log_ctx;
log_ctx.log_fn = tdb_log;
- while ((c = getopt(argc, argv, "n:l:s:H:th")) != -1) {
+ while ((c = getopt(argc, argv, "n:l:s:H:thk")) != -1) {
switch (c) {
case 'n':
num_procs = strtol(optarg, NULL, 0);
@@ -245,102 +319,135 @@ static void usage(void)
case 't':
always_transaction = 1;
break;
+ case 'k':
+ kill_random = 1;
+ break;
default:
usage();
}
}
- unlink("torture.tdb");
-
- pids = (pid_t *)calloc(sizeof(pid_t), num_procs);
- pids[0] = getpid();
-
- for (i=0;i<num_procs-1;i++) {
- if ((pids[i+1]=fork()) == 0) break;
- }
+ test_tdb = test_path("torture.tdb");
- db = tdb_open_ex("torture.tdb", hash_size, TDB_CLEAR_IF_FIRST,
- O_RDWR | O_CREAT, 0600, &log_ctx, NULL);
- if (!db) {
- fatal("db open failed");
- }
+ unlink(test_tdb);
if (seed == -1) {
seed = (getpid() + time(NULL)) & 0x7FFFFFFF;
}
- if (i == 0) {
- printf("testing with %d processes, %d loops, %d hash_size, seed=%d%s\n",
- num_procs, num_loops, hash_size, seed, always_transaction ? " (all within transactions)" : "");
+ if (num_procs == 1 && !kill_random) {
+ /* Don't fork for this case, makes debugging easier. */
+ error_count = run_child(test_tdb, 0, seed, num_loops, 0);
+ goto done;
}
- srand(seed + i);
- srandom(seed + i);
+ pids = (pid_t *)calloc(sizeof(pid_t), num_procs);
+ done = (int *)calloc(sizeof(int), num_procs);
- for (i=0;i<num_loops && error_count == 0;i++) {
- addrec_db();
+ if (pipe(pfds) != 0) {
+ perror("Creating pipe");
+ exit(1);
}
-
- if (error_count == 0) {
- tdb_traverse_read(db, NULL, NULL);
- if (always_transaction) {
- while (in_transaction) {
- tdb_transaction_cancel(db);
- in_transaction--;
+ count_pipe = pfds[1];
+
+ for (i=0;i<num_procs;i++) {
+ if ((pids[i]=fork()) == 0) {
+ close(pfds[0]);
+ if (i == 0) {
+ printf("Testing with %d processes, %d loops, %d hash_size, seed=%d%s\n",
+ num_procs, num_loops, hash_size, seed, always_transaction ? " (all within transactions)" : "");
}
- if (tdb_transaction_start(db) != 0)
- fatal("tdb_transaction_start failed");
- }
- tdb_traverse(db, traverse_fn, NULL);
- tdb_traverse(db, traverse_fn, NULL);
- if (always_transaction) {
- if (tdb_transaction_commit(db) != 0)
- fatal("tdb_transaction_commit failed");
+ exit(run_child(test_tdb, i, seed, num_loops, 0));
}
}
- tdb_close(db);
-
- if (getpid() != pids[0]) {
- return error_count;
- }
-
- for (i=1;i<num_procs;i++) {
+ while (num_procs) {
int status, j;
pid_t pid;
+
if (error_count != 0) {
/* try and stop the test on any failure */
- for (j=1;j<num_procs;j++) {
+ for (j=0;j<num_procs;j++) {
if (pids[j] != 0) {
kill(pids[j], SIGTERM);
}
}
}
- pid = waitpid(-1, &status, 0);
+
+ pid = waitpid(-1, &status, kill_random ? WNOHANG : 0);
+ if (pid == 0) {
+ struct timeval tv;
+
+ /* Sleep for 1/10 second. */
+ tv.tv_sec = 0;
+ tv.tv_usec = 100000;
+ select(0, NULL, NULL, NULL, &tv);
+
+ /* Kill someone. */
+ kill(pids[random() % num_procs], SIGUSR1);
+ continue;
+ }
+
if (pid == -1) {
perror("failed to wait for child\n");
exit(1);
}
- for (j=1;j<num_procs;j++) {
+
+ for (j=0;j<num_procs;j++) {
if (pids[j] == pid) break;
}
if (j == num_procs) {
printf("unknown child %d exited!?\n", (int)pid);
exit(1);
}
- if (WEXITSTATUS(status) != 0) {
- printf("child %d exited with status %d\n",
- (int)pid, WEXITSTATUS(status));
+ if (WIFSIGNALED(status)) {
+ if (WTERMSIG(status) == SIGUSR2
+ || WTERMSIG(status) == SIGUSR1) {
+ /* SIGUSR2 means they wrote to pipe. */
+ if (WTERMSIG(status) == SIGUSR2) {
+ read(pfds[0], &done[j],
+ sizeof(done[j]));
+ }
+ pids[j] = fork();
+ if (pids[j] == 0)
+ exit(run_child(test_tdb, j, seed,
+ num_loops, done[j]));
+ printf("Restarting child %i for %u-%u\n",
+ j, done[j], num_loops);
+ continue;
+ }
+ printf("child %d exited with signal %d\n",
+ (int)pid, WTERMSIG(status));
error_count++;
+ } else {
+ if (WEXITSTATUS(status) != 0) {
+ printf("child %d exited with status %d\n",
+ (int)pid, WEXITSTATUS(status));
+ error_count++;
+ }
}
- pids[j] = 0;
+ memmove(&pids[j], &pids[j+1],
+ (num_procs - j - 1)*sizeof(pids[0]));
+ num_procs--;
}
free(pids);
+done:
if (error_count == 0) {
+ db = tdb_open_ex(test_tdb, hash_size, TDB_DEFAULT,
+ O_RDWR, 0, &log_ctx, NULL);
+ if (!db) {
+ fatal("db open failed");
+ }
+ if (tdb_check(db, NULL, NULL) == -1) {
+ printf("db check failed");
+ exit(1);
+ }
+ tdb_close(db);
printf("OK\n");
}
+ free(test_tdb);
return error_count;
}
diff --git a/lib/tdb/wscript b/lib/tdb/wscript
new file mode 100644
index 0000000000..9041f8c1ba
--- /dev/null
+++ b/lib/tdb/wscript
@@ -0,0 +1,136 @@
+#!/usr/bin/env python
+
+APPNAME = 'tdb'
+VERSION = '1.2.9'
+
+blddir = 'bin'
+
+import sys, os
+
+# find the buildtools directory
+srcdir = '.'
+while not os.path.exists(srcdir+'/buildtools') and len(srcdir.split('/')) < 5:
+ srcdir = '../' + srcdir
+sys.path.insert(0, srcdir + '/buildtools/wafsamba')
+
+import wafsamba, samba_dist, Options, Logs
+
+samba_dist.DIST_DIRS('lib/tdb:. lib/replace:lib/replace buildtools:buildtools')
+
+def set_options(opt):
+ opt.BUILTIN_DEFAULT('replace')
+ opt.PRIVATE_EXTENSION_DEFAULT('tdb', noextension='tdb')
+ opt.RECURSE('lib/replace')
+ if opt.IN_LAUNCH_DIR():
+ opt.add_option('--disable-python',
+ help=("disable the pytdb module"),
+ action="store_true", dest='disable_python', default=False)
+
+
+def configure(conf):
+ conf.RECURSE('lib/replace')
+
+ conf.env.standalone_tdb = conf.IN_LAUNCH_DIR()
+
+ if not conf.env.standalone_tdb:
+ if conf.CHECK_BUNDLED_SYSTEM('tdb', minversion=VERSION,
+ implied_deps='replace'):
+ conf.define('USING_SYSTEM_TDB', 1)
+ if conf.CHECK_BUNDLED_SYSTEM_PYTHON('pytdb', 'tdb', minversion=VERSION):
+ conf.define('USING_SYSTEM_PYTDB', 1)
+
+ conf.env.disable_python = getattr(Options.options, 'disable_python', False)
+
+ conf.CHECK_XSLTPROC_MANPAGES()
+
+ if not conf.env.disable_python:
+ # also disable if we don't have the python libs installed
+ conf.check_tool('python')
+ conf.check_python_version((2,4,2))
+ conf.SAMBA_CHECK_PYTHON_HEADERS(mandatory=False)
+ if not conf.env.HAVE_PYTHON_H:
+ Logs.warn('Disabling pytdb as python devel libs not found')
+ conf.env.disable_python = True
+
+ conf.SAMBA_CONFIG_H()
+
+def build(bld):
+ bld.RECURSE('lib/replace')
+
+ COMMON_SRC = bld.SUBDIR('common',
+ '''check.c error.c tdb.c traverse.c
+ freelistcheck.c lock.c dump.c freelist.c
+ io.c open.c transaction.c hash.c summary.c''')
+
+ if bld.env.standalone_tdb:
+ bld.env.PKGCONFIGDIR = '${LIBDIR}/pkgconfig'
+ bld.PKG_CONFIG_FILES('tdb.pc', vnum=VERSION)
+ private_library = False
+ else:
+ private_library = True
+
+ if not bld.CONFIG_SET('USING_SYSTEM_TDB'):
+ bld.SAMBA_LIBRARY('tdb',
+ COMMON_SRC,
+ deps='replace',
+ includes='include',
+ abi_directory='ABI',
+ abi_match='tdb_*',
+ hide_symbols=True,
+ vnum=VERSION,
+ public_headers='include/tdb.h',
+ public_headers_install=not private_library,
+ private_library=private_library)
+
+ bld.SAMBA_BINARY('tdbtorture',
+ 'tools/tdbtorture.c',
+ 'tdb',
+ install=False)
+
+ bld.SAMBA_BINARY('tdbrestore',
+ 'tools/tdbrestore.c',
+ 'tdb', manpages='manpages/tdbrestore.8')
+
+ bld.SAMBA_BINARY('tdbdump',
+ 'tools/tdbdump.c',
+ 'tdb', manpages='manpages/tdbdump.8')
+
+ bld.SAMBA_BINARY('tdbbackup',
+ 'tools/tdbbackup.c',
+ 'tdb',
+ manpages='manpages/tdbbackup.8')
+
+ bld.SAMBA_BINARY('tdbtool',
+ 'tools/tdbtool.c',
+ 'tdb', manpages='manpages/tdbtool.8')
+
+ if not bld.CONFIG_SET('USING_SYSTEM_PYTDB'):
+ bld.SAMBA_PYTHON('pytdb',
+ 'pytdb.c',
+ deps='tdb',
+ enabled=not bld.env.disable_python,
+ realname='tdb.so',
+ cflags='-DPACKAGE_VERSION=\"%s\"' % VERSION)
+
+
+
+def test(ctx):
+ '''run tdb testsuite'''
+ import Utils, samba_utils, shutil
+ test_prefix = "%s/st" % (Utils.g_module.blddir)
+ shutil.rmtree(test_prefix, ignore_errors=True)
+ os.makedirs(test_prefix)
+ os.environ['TEST_DATA_PREFIX'] = test_prefix
+ cmd = os.path.join(Utils.g_module.blddir, 'tdbtorture')
+ ret = samba_utils.RUN_COMMAND(cmd)
+ print("testsuite returned %d" % ret)
+ sys.exit(ret)
+
+def dist():
+ '''makes a tarball for distribution'''
+ samba_dist.dist()
+
+def reconfigure(ctx):
+ '''reconfigure if config scripts have changed'''
+ import samba_utils
+ samba_utils.reconfigure(ctx)
diff --git a/lib/tdr/config.mk b/lib/tdr/config.mk
deleted file mode 100644
index 07506ec647..0000000000
--- a/lib/tdr/config.mk
+++ /dev/null
@@ -1,9 +0,0 @@
-[SUBSYSTEM::TDR]
-CFLAGS = -Ilib/tdr
-PUBLIC_DEPENDENCIES = LIBTALLOC LIBSAMBA-UTIL
-
-TDR_OBJ_FILES = $(libtdrsrcdir)/tdr.o
-
-$(eval $(call proto_header_template,$(libtdrsrcdir)/tdr_proto.h,$(TDR_OBJ_FILES:.o=.c)))
-
-PUBLIC_HEADERS += $(libtdrsrcdir)/tdr.h
diff --git a/lib/tdr/tdr.c b/lib/tdr/tdr.c
index ce67003f8b..ab016d0bc4 100644
--- a/lib/tdr/tdr.c
+++ b/lib/tdr/tdr.c
@@ -163,7 +163,7 @@ NTSTATUS tdr_pull_charset(struct tdr_pull *tdr, TALLOC_CTX *ctx, const char **v,
TDR_PULL_NEED_BYTES(tdr, el_size*length);
- if (!convert_string_talloc_convenience(ctx, tdr->iconv_convenience, chset, CH_UNIX, tdr->data.data+tdr->offset, el_size*length, discard_const_p(void *, v), &ret, false)) {
+ if (!convert_string_talloc(ctx, chset, CH_UNIX, tdr->data.data+tdr->offset, el_size*length, discard_const_p(void *, v), &ret, false)) {
return NT_STATUS_INVALID_PARAMETER;
}
@@ -183,7 +183,8 @@ NTSTATUS tdr_push_charset(struct tdr_push *tdr, const char **v, uint32_t length,
required = el_size * length;
TDR_PUSH_NEED_BYTES(tdr, required);
- if (!convert_string_convenience(tdr->iconv_convenience, CH_UNIX, chset, *v, strlen(*v), tdr->data.data+tdr->data.length, required, &ret, false)) {
+ ret = convert_string(CH_UNIX, chset, *v, strlen(*v), tdr->data.data+tdr->data.length, required, false);
+ if (ret == -1) {
return NT_STATUS_INVALID_PARAMETER;
}
@@ -343,33 +344,29 @@ NTSTATUS tdr_pull_DATA_BLOB(struct tdr_pull *tdr, TALLOC_CTX *ctx, DATA_BLOB *bl
return NT_STATUS_OK;
}
-struct tdr_push *tdr_push_init(TALLOC_CTX *mem_ctx, struct smb_iconv_convenience *ic)
+struct tdr_push *tdr_push_init(TALLOC_CTX *mem_ctx)
{
struct tdr_push *push = talloc_zero(mem_ctx, struct tdr_push);
if (push == NULL)
return NULL;
- push->iconv_convenience = talloc_reference(push, ic);
-
return push;
}
-struct tdr_pull *tdr_pull_init(TALLOC_CTX *mem_ctx, struct smb_iconv_convenience *ic)
+struct tdr_pull *tdr_pull_init(TALLOC_CTX *mem_ctx)
{
struct tdr_pull *pull = talloc_zero(mem_ctx, struct tdr_pull);
if (pull == NULL)
return NULL;
- pull->iconv_convenience = talloc_reference(pull, ic);
-
return pull;
}
-NTSTATUS tdr_push_to_fd(int fd, struct smb_iconv_convenience *iconv_convenience, tdr_push_fn_t push_fn, const void *p)
+NTSTATUS tdr_push_to_fd(int fd, tdr_push_fn_t push_fn, const void *p)
{
- struct tdr_push *push = tdr_push_init(NULL, iconv_convenience);
+ struct tdr_push *push = tdr_push_init(NULL);
if (push == NULL)
return NT_STATUS_NO_MEMORY;
@@ -390,7 +387,7 @@ NTSTATUS tdr_push_to_fd(int fd, struct smb_iconv_convenience *iconv_convenience,
return NT_STATUS_OK;
}
-void tdr_print_debug_helper(struct tdr_print *tdr, const char *format, ...) _PRINTF_ATTRIBUTE(2,3)
+void tdr_print_debug_helper(struct tdr_print *tdr, const char *format, ...)
{
va_list ap;
char *s = NULL;
diff --git a/lib/tdr/tdr.h b/lib/tdr/tdr.h
index 84f3e50c2b..fa0a4d7a77 100644
--- a/lib/tdr/tdr.h
+++ b/lib/tdr/tdr.h
@@ -2,17 +2,17 @@
Unix SMB/CIFS implementation.
TDR definitions
Copyright (C) Jelmer Vernooij 2005
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -33,13 +33,11 @@ struct tdr_pull {
DATA_BLOB data;
uint32_t offset;
int flags;
- struct smb_iconv_convenience *iconv_convenience;
};
struct tdr_push {
DATA_BLOB data;
int flags;
- struct smb_iconv_convenience *iconv_convenience;
};
struct tdr_print {
@@ -62,6 +60,41 @@ struct tdr_print {
typedef NTSTATUS (*tdr_push_fn_t) (struct tdr_push *, const void *);
typedef NTSTATUS (*tdr_pull_fn_t) (struct tdr_pull *, TALLOC_CTX *, void *);
-#include "../lib/tdr/tdr_proto.h"
+NTSTATUS tdr_push_expand(struct tdr_push *tdr, uint32_t size);
+NTSTATUS tdr_pull_uint8(struct tdr_pull *tdr, TALLOC_CTX *ctx, uint8_t *v);
+NTSTATUS tdr_push_uint8(struct tdr_push *tdr, const uint8_t *v);
+NTSTATUS tdr_print_uint8(struct tdr_print *tdr, const char *name, uint8_t *v);
+NTSTATUS tdr_pull_uint16(struct tdr_pull *tdr, TALLOC_CTX *ctx, uint16_t *v);
+NTSTATUS tdr_pull_uint1632(struct tdr_pull *tdr, TALLOC_CTX *ctx, uint16_t *v);
+NTSTATUS tdr_push_uint16(struct tdr_push *tdr, const uint16_t *v);
+NTSTATUS tdr_push_uint1632(struct tdr_push *tdr, const uint16_t *v);
+NTSTATUS tdr_print_uint16(struct tdr_print *tdr, const char *name, uint16_t *v);
+NTSTATUS tdr_pull_uint32(struct tdr_pull *tdr, TALLOC_CTX *ctx, uint32_t *v);
+NTSTATUS tdr_push_uint32(struct tdr_push *tdr, const uint32_t *v);
+NTSTATUS tdr_print_uint32(struct tdr_print *tdr, const char *name, uint32_t *v);
+NTSTATUS tdr_pull_charset(struct tdr_pull *tdr, TALLOC_CTX *ctx, const char **v, uint32_t length, uint32_t el_size, charset_t chset);
+NTSTATUS tdr_push_charset(struct tdr_push *tdr, const char **v, uint32_t length, uint32_t el_size, charset_t chset);
+NTSTATUS tdr_print_charset(struct tdr_print *tdr, const char *name, const char **v, uint32_t length, uint32_t el_size, charset_t chset);
+
+NTSTATUS tdr_pull_hyper(struct tdr_pull *tdr, TALLOC_CTX *ctx, uint64_t *v);
+NTSTATUS tdr_push_hyper(struct tdr_push *tdr, uint64_t *v);
+
+NTSTATUS tdr_push_NTTIME(struct tdr_push *tdr, NTTIME *t);
+NTSTATUS tdr_pull_NTTIME(struct tdr_pull *tdr, TALLOC_CTX *ctx, NTTIME *t);
+NTSTATUS tdr_print_NTTIME(struct tdr_print *tdr, const char *name, NTTIME *t);
+
+NTSTATUS tdr_push_time_t(struct tdr_push *tdr, time_t *t);
+NTSTATUS tdr_pull_time_t(struct tdr_pull *tdr, TALLOC_CTX *ctx, time_t *t);
+NTSTATUS tdr_print_time_t(struct tdr_print *tdr, const char *name, time_t *t);
+
+NTSTATUS tdr_print_DATA_BLOB(struct tdr_print *tdr, const char *name, DATA_BLOB *r);
+NTSTATUS tdr_push_DATA_BLOB(struct tdr_push *tdr, DATA_BLOB *blob);
+NTSTATUS tdr_pull_DATA_BLOB(struct tdr_pull *tdr, TALLOC_CTX *ctx, DATA_BLOB *blob);
+
+struct tdr_push *tdr_push_init(TALLOC_CTX *mem_ctx);
+struct tdr_pull *tdr_pull_init(TALLOC_CTX *mem_ctx);
+
+NTSTATUS tdr_push_to_fd(int fd, tdr_push_fn_t push_fn, const void *p);
+void tdr_print_debug_helper(struct tdr_print *tdr, const char *format, ...) PRINTF_ATTRIBUTE(2,3);
#endif /* __TDR_H__ */
diff --git a/lib/tdr/testsuite.c b/lib/tdr/testsuite.c
index 36bb164a9a..bea78f53ec 100644
--- a/lib/tdr/testsuite.c
+++ b/lib/tdr/testsuite.c
@@ -25,7 +25,7 @@
static bool test_push_uint8(struct torture_context *tctx)
{
uint8_t v = 4;
- struct tdr_push *tdr = tdr_push_init(tctx, global_iconv_convenience);
+ struct tdr_push *tdr = tdr_push_init(tctx);
torture_assert_ntstatus_ok(tctx, tdr_push_uint8(tdr, &v), "push failed");
torture_assert_int_equal(tctx, tdr->data.length, 1, "length incorrect");
@@ -37,7 +37,7 @@ static bool test_pull_uint8(struct torture_context *tctx)
{
uint8_t d = 2;
uint8_t l;
- struct tdr_pull *tdr = tdr_pull_init(tctx, global_iconv_convenience);
+ struct tdr_pull *tdr = tdr_pull_init(tctx);
tdr->data.data = &d;
tdr->data.length = 1;
tdr->offset = 0;
@@ -52,7 +52,7 @@ static bool test_pull_uint8(struct torture_context *tctx)
static bool test_push_uint16(struct torture_context *tctx)
{
uint16_t v = 0xF32;
- struct tdr_push *tdr = tdr_push_init(tctx, global_iconv_convenience);
+ struct tdr_push *tdr = tdr_push_init(tctx);
torture_assert_ntstatus_ok(tctx, tdr_push_uint16(tdr, &v), "push failed");
torture_assert_int_equal(tctx, tdr->data.length, 2, "length incorrect");
@@ -65,7 +65,7 @@ static bool test_pull_uint16(struct torture_context *tctx)
{
uint8_t d[2] = { 782 & 0xFF, (782 & 0xFF00) / 0x100 };
uint16_t l;
- struct tdr_pull *tdr = tdr_pull_init(tctx, global_iconv_convenience);
+ struct tdr_pull *tdr = tdr_pull_init(tctx);
tdr->data.data = d;
tdr->data.length = 2;
tdr->offset = 0;
@@ -80,7 +80,7 @@ static bool test_pull_uint16(struct torture_context *tctx)
static bool test_push_uint32(struct torture_context *tctx)
{
uint32_t v = 0x100F32;
- struct tdr_push *tdr = tdr_push_init(tctx, global_iconv_convenience);
+ struct tdr_push *tdr = tdr_push_init(tctx);
torture_assert_ntstatus_ok(tctx, tdr_push_uint32(tdr, &v), "push failed");
torture_assert_int_equal(tctx, tdr->data.length, 4, "length incorrect");
@@ -95,7 +95,7 @@ static bool test_pull_uint32(struct torture_context *tctx)
{
uint8_t d[4] = { 782 & 0xFF, (782 & 0xFF00) / 0x100, 0, 0 };
uint32_t l;
- struct tdr_pull *tdr = tdr_pull_init(tctx, global_iconv_convenience);
+ struct tdr_pull *tdr = tdr_pull_init(tctx);
tdr->data.data = d;
tdr->data.length = 4;
tdr->offset = 0;
@@ -109,7 +109,7 @@ static bool test_pull_uint32(struct torture_context *tctx)
static bool test_pull_charset(struct torture_context *tctx)
{
- struct tdr_pull *tdr = tdr_pull_init(tctx, global_iconv_convenience);
+ struct tdr_pull *tdr = tdr_pull_init(tctx);
const char *l = NULL;
tdr->data.data = (uint8_t *)talloc_strdup(tctx, "bla");
tdr->data.length = 4;
@@ -131,7 +131,7 @@ static bool test_pull_charset(struct torture_context *tctx)
static bool test_pull_charset_empty(struct torture_context *tctx)
{
- struct tdr_pull *tdr = tdr_pull_init(tctx, global_iconv_convenience);
+ struct tdr_pull *tdr = tdr_pull_init(tctx);
const char *l = NULL;
tdr->data.data = (uint8_t *)talloc_strdup(tctx, "bla");
tdr->data.length = 4;
@@ -150,11 +150,11 @@ static bool test_pull_charset_empty(struct torture_context *tctx)
static bool test_push_charset(struct torture_context *tctx)
{
const char *l = "bloe";
- struct tdr_push *tdr = tdr_push_init(tctx, global_iconv_convenience);
+ struct tdr_push *tdr = tdr_push_init(tctx);
torture_assert_ntstatus_ok(tctx, tdr_push_charset(tdr, &l, 4, 1, CH_UTF8),
"push failed");
torture_assert_int_equal(tctx, 4, tdr->data.length, "offset invalid");
- torture_assert(tctx, strcmp("bloe", (const char *)tdr->data.data) == 0, "right string push");
+ torture_assert(tctx, strncmp("bloe", (const char *)tdr->data.data, 4) == 0, "right string push");
torture_assert_ntstatus_ok(tctx, tdr_push_charset(tdr, &l, -1, 1, CH_UTF8),
"push failed");
@@ -166,7 +166,7 @@ static bool test_push_charset(struct torture_context *tctx)
struct torture_suite *torture_local_tdr(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "TDR");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "tdr");
torture_suite_add_simple_test(suite, "pull_uint8", test_pull_uint8);
torture_suite_add_simple_test(suite, "push_uint8", test_push_uint8);
diff --git a/lib/tdr/wscript_build b/lib/tdr/wscript_build
new file mode 100644
index 0000000000..67fdfeb0ca
--- /dev/null
+++ b/lib/tdr/wscript_build
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+
+bld.SAMBA_SUBSYSTEM('TDR',
+ source='tdr.c',
+ public_deps='talloc samba-util',
+ public_headers='tdr.h'
+ )
+
diff --git a/lib/testtools/.testr.conf b/lib/testtools/.testr.conf
new file mode 100644
index 0000000000..12d6685d2b
--- /dev/null
+++ b/lib/testtools/.testr.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+test_command=PYTHONPATH=. python -m subunit.run $LISTOPT $IDOPTION testtools.tests.test_suite
+test_id_option=--load-list $IDFILE
+test_list_option=--list
diff --git a/lib/testtools/HACKING b/lib/testtools/HACKING
new file mode 100644
index 0000000000..e9ece73585
--- /dev/null
+++ b/lib/testtools/HACKING
@@ -0,0 +1,135 @@
+===================================
+Notes for contributing to testtools
+===================================
+
+Coding style
+------------
+
+In general, follow PEP 8 <http://www.python.org/dev/peps/pep-0008/>.
+
+For consistency with the standard library's ``unittest`` module, method names
+are generally ``camelCase``.
+
+testtools supports Python 2.4 and later, so avoid any 2.5-only features like
+the ``with`` statement.
+
+
+Copyright assignment
+--------------------
+
+Part of testtools raison d'etre is to provide Python with improvements to the
+testing code it ships. For that reason we require all contributions (that are
+non-trivial) to meet one of the following rules:
+
+ - be inapplicable for inclusion in Python.
+ - be able to be included in Python without further contact with the
+ contributor.
+ - be copyright assigned to Jonathan M. Lange.
+
+Please pick one of these and specify it when contributing code to testtools.
+
+
+Licensing
+---------
+
+All code that is not copyright assigned to Jonathan M. Lange (see Copyright
+Assignment above) needs to be licensed under the MIT license that testtools
+uses, so that testtools can ship it.
+
+
+Testing
+-------
+
+Please write tests for every feature. This project ought to be a model
+example of well-tested Python code!
+
+Take particular care to make sure the *intent* of each test is clear.
+
+You can run tests with ``make check``, or by running ``./run-tests`` directly.
+
+
+Source layout
+-------------
+
+The top-level directory contains the ``testtools/`` package directory, and
+miscellaneous files like README and setup.py.
+
+The ``testtools/`` directory is the Python package itself. It is separated
+into submodules for internal clarity, but all public APIs should be “promoted”
+into the top-level package by importing them in ``testtools/__init__.py``.
+Users of testtools should never import a submodule, they are just
+implementation details.
+
+Tests belong in ``testtools/tests/``.
+
+
+Commiting to trunk
+------------------
+
+Testtools is maintained using bzr, with its trunk at lp:testtools. This gives
+every contributor the ability to commit their work to their own branches.
+However permission must be granted to allow contributors to commit to the trunk
+branch.
+
+Commit access to trunk is obtained by joining the testtools-devs Launchpad
+team. Membership in this team is contingent on obeying the testtools
+contribution policy, including assigning copyright of all the work one creates
+and places in trunk to Jonathan Lange.
+
+
+Code Review
+-----------
+
+All code must be reviewed before landing on trunk. The process is to create a
+branch in launchpad, and submit it for merging to lp:testtools. It will then
+be reviewed before it can be merged to trunk. It will be reviewed by someone:
+
+ * not the author
+ * a committer (member of the testtools-devs team)
+
+As a special exception, while the testtools committers team is small and prone
+to blocking, a merge request from a committer that has not been reviewed after
+24 hours may be merged by that committer. When the team is larger this policy
+will be revisited.
+
+Code reviewers should look for the quality of what is being submitted,
+including conformance with this HACKING file.
+
+Changes which all users should be made aware of should be documented in NEWS.
+
+
+NEWS management
+---------------
+
+The file NEWS is structured as a sorted list of releases. Each release can have
+a free form description and more or more sections with bullet point items.
+Sections in use today are 'Improvements' and 'Changes'. To ease merging between
+branches, the bullet points are kept alphabetically sorted. The release NEXT is
+permanently present at the top of the list.
+
+
+Release tasks
+-------------
+
+ 1. Choose a version number, say X.Y.Z
+ 1. Branch from trunk to testtools-X.Y.Z
+ 1. In testtools-X.Y.Z, ensure __init__ has version X.Y.Z.
+ 1. Replace NEXT in NEWS with the version number X.Y.Z, adjusting the reST.
+ 1. Possibly write a blurb into NEWS.
+ 1. Replace any additional references to NEXT with the version being
+ released. (should be none).
+ 1. Commit the changes.
+ 1. Tag the release, bzr tag testtools-X.Y.Z
+ 1. Create a source distribution and upload to pypi ('make release').
+ 1. Make sure all "Fix committed" bugs are in the 'next' milestone on
+ Launchpad
+ 1. Rename the 'next' milestone on Launchpad to 'X.Y.Z'
+ 1. Create a release on the newly-renamed 'X.Y.Z' milestone
+ 1. Upload the tarball and asc file to Launchpad
+ 1. Merge the release branch testtools-X.Y.Z into trunk. Before the commit,
+ add a NEXT heading to the top of NEWS and bump the version in __init__.py.
+ Push trunk to Launchpad
+ 1. If a new series has been created (e.g. 0.10.0), make the series on Launchpad.
+ 1. Make a new milestone for the *next release*.
+ 1. During release we rename NEXT to $version.
+ 1. We call new milestones NEXT.
diff --git a/lib/testtools/LICENSE b/lib/testtools/LICENSE
new file mode 100644
index 0000000000..071d7359d2
--- /dev/null
+++ b/lib/testtools/LICENSE
@@ -0,0 +1,39 @@
+Copyright (c) 2008-2010 Jonathan M. Lange <jml@mumak.net> and the testtools
+authors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+Some code in testtools/run.py taken from Python's unittest module:
+Copyright (c) 1999-2003 Steve Purcell
+Copyright (c) 2003-2010 Python Software Foundation
+
+This module is free software, and you may redistribute it and/or modify
+it under the same terms as Python itself, so long as this copyright message
+and disclaimer are retained in their original form.
+
+IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
+SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
diff --git a/lib/testtools/MANIFEST.in b/lib/testtools/MANIFEST.in
new file mode 100644
index 0000000000..6d1bf1170f
--- /dev/null
+++ b/lib/testtools/MANIFEST.in
@@ -0,0 +1,8 @@
+include LICENSE
+include HACKING
+include Makefile
+include MANIFEST.in
+include MANUAL
+include NEWS
+include README
+include .bzrignore
diff --git a/lib/testtools/MANUAL b/lib/testtools/MANUAL
new file mode 100644
index 0000000000..7e7853c7e7
--- /dev/null
+++ b/lib/testtools/MANUAL
@@ -0,0 +1,349 @@
+======
+Manual
+======
+
+Introduction
+------------
+
+This document provides overview of the features provided by testtools. Refer
+to the API docs (i.e. docstrings) for full details on a particular feature.
+
+Extensions to TestCase
+----------------------
+
+Custom exception handling
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+testtools provides a way to control how test exceptions are handled. To do
+this, add a new exception to self.exception_handlers on a TestCase. For
+example::
+
+ >>> self.exception_handlers.insert(-1, (ExceptionClass, handler)).
+
+Having done this, if any of setUp, tearDown, or the test method raise
+ExceptionClass, handler will be called with the test case, test result and the
+raised exception.
+
+Controlling test execution
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you want to control more than just how exceptions are raised, you can
+provide a custom `RunTest` to a TestCase. The `RunTest` object can change
+everything about how the test executes.
+
+To work with `testtools.TestCase`, a `RunTest` must have a factory that takes
+a test and an optional list of exception handlers. Instances returned by the
+factory must have a `run()` method that takes an optional `TestResult` object.
+
+The default is `testtools.runtest.RunTest` and calls 'setUp', the test method
+and 'tearDown' in the normal, vanilla way that Python's standard unittest
+does.
+
+To specify a `RunTest` for all the tests in a `TestCase` class, do something
+like this::
+
+ class SomeTests(TestCase):
+ run_tests_with = CustomRunTestFactory
+
+To specify a `RunTest` for a specific test in a `TestCase` class, do::
+
+ class SomeTests(TestCase):
+ @run_test_with(CustomRunTestFactory, extra_arg=42, foo='whatever')
+ def test_something(self):
+ pass
+
+In addition, either of these can be overridden by passing a factory in to the
+`TestCase` constructor with the optional 'runTest' argument.
+
+TestCase.addCleanup
+~~~~~~~~~~~~~~~~~~~
+
+addCleanup is a robust way to arrange for a cleanup function to be called
+before tearDown. This is a powerful and simple alternative to putting cleanup
+logic in a try/finally block or tearDown method. e.g.::
+
+ def test_foo(self):
+ foo.lock()
+ self.addCleanup(foo.unlock)
+ ...
+
+Cleanups can also report multiple errors, if appropriate by wrapping them in
+a testtools.MultipleExceptions object::
+
+ raise MultipleExceptions(exc_info1, exc_info2)
+
+
+TestCase.addOnException
+~~~~~~~~~~~~~~~~~~~~~~~
+
+addOnException adds an exception handler that will be called from the test
+framework when it detects an exception from your test code. The handler is
+given the exc_info for the exception, and can use this opportunity to attach
+more data (via the addDetails API) and potentially other uses.
+
+
+TestCase.patch
+~~~~~~~~~~~~~~
+
+``patch`` is a convenient way to monkey-patch a Python object for the duration
+of your test. It's especially useful for testing legacy code. e.g.::
+
+ def test_foo(self):
+ my_stream = StringIO()
+ self.patch(sys, 'stderr', my_stream)
+ run_some_code_that_prints_to_stderr()
+ self.assertEqual('', my_stream.getvalue())
+
+The call to ``patch`` above masks sys.stderr with 'my_stream' so that anything
+printed to stderr will be captured in a StringIO variable that can be actually
+tested. Once the test is done, the real sys.stderr is restored to its rightful
+place.
+
+
+TestCase.skipTest
+~~~~~~~~~~~~~~~~~
+
+``skipTest`` is a simple way to have a test stop running and be reported as a
+skipped test, rather than a success/error/failure. This is an alternative to
+convoluted logic during test loading, permitting later and more localized
+decisions about the appropriateness of running a test. Many reasons exist to
+skip a test - for instance when a dependency is missing, or if the test is
+expensive and should not be run while on laptop battery power, or if the test
+is testing an incomplete feature (this is sometimes called a TODO). Using this
+feature when running your test suite with a TestResult object that is missing
+the ``addSkip`` method will result in the ``addError`` method being invoked
+instead. ``skipTest`` was previously known as ``skip`` but as Python 2.7 adds
+``skipTest`` support, the ``skip`` name is now deprecated (but no warning
+is emitted yet - some time in the future we may do so).
+
+TestCase.useFixture
+~~~~~~~~~~~~~~~~~~~
+
+``useFixture(fixture)`` calls setUp on the fixture, schedules a cleanup to
+clean it up, and schedules a cleanup to attach all details held by the
+fixture to the details dict of the test case. The fixture object should meet
+the ``fixtures.Fixture`` protocol (version 0.3.4 or newer). This is useful
+for moving code out of setUp and tearDown methods and into composable side
+classes.
+
+
+New assertion methods
+~~~~~~~~~~~~~~~~~~~~~
+
+testtools adds several assertion methods:
+
+ * assertIn
+ * assertNotIn
+ * assertIs
+ * assertIsNot
+ * assertIsInstance
+ * assertThat
+
+
+Improved assertRaises
+~~~~~~~~~~~~~~~~~~~~~
+
+TestCase.assertRaises returns the caught exception. This is useful for
+asserting more things about the exception than just the type::
+
+ error = self.assertRaises(UnauthorisedError, thing.frobnicate)
+ self.assertEqual('bob', error.username)
+ self.assertEqual('User bob cannot frobnicate', str(error))
+
+Note that this is incompatible with the assertRaises in unittest2/Python2.7.
+While we have no immediate plans to change to be compatible consider using the
+new assertThat facility instead::
+
+ self.assertThat(
+ lambda: thing.frobnicate('foo', 'bar'),
+ Raises(MatchesException(UnauthorisedError('bob')))
+
+There is also a convenience function to handle this common case::
+
+ self.assertThat(
+ lambda: thing.frobnicate('foo', 'bar'),
+ raises(UnauthorisedError('bob')))
+
+
+TestCase.assertThat
+~~~~~~~~~~~~~~~~~~~
+
+assertThat is a clean way to write complex assertions without tying them to
+the TestCase inheritance hierarchy (and thus making them easier to reuse).
+
+assertThat takes an object to be matched, and a matcher, and fails if the
+matcher does not match the matchee.
+
+See pydoc testtools.Matcher for the protocol that matchers need to implement.
+
+testtools includes some matchers in testtools.matchers.
+python -c 'import testtools.matchers; print testtools.matchers.__all__' will
+list those matchers.
+
+An example using the DocTestMatches matcher which uses doctests example
+matching logic::
+
+ def test_foo(self):
+ self.assertThat([1,2,3,4], DocTestMatches('[1, 2, 3, 4]'))
+
+
+Creation methods
+~~~~~~~~~~~~~~~~
+
+testtools.TestCase implements creation methods called ``getUniqueString`` and
+``getUniqueInteger``. See pages 419-423 of *xUnit Test Patterns* by Meszaros
+for a detailed discussion of creation methods.
+
+
+Test renaming
+~~~~~~~~~~~~~
+
+``testtools.clone_test_with_new_id`` is a function to copy a test case
+instance to one with a new name. This is helpful for implementing test
+parameterization.
+
+
+Extensions to TestResult
+------------------------
+
+TestResult.addSkip
+~~~~~~~~~~~~~~~~~~
+
+This method is called on result objects when a test skips. The
+``testtools.TestResult`` class records skips in its ``skip_reasons`` instance
+dict. The can be reported on in much the same way as succesful tests.
+
+
+TestResult.time
+~~~~~~~~~~~~~~~
+
+This method controls the time used by a TestResult, permitting accurate
+timing of test results gathered on different machines or in different threads.
+See pydoc testtools.TestResult.time for more details.
+
+
+ThreadsafeForwardingResult
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A TestResult which forwards activity to another test result, but synchronises
+on a semaphore to ensure that all the activity for a single test arrives in a
+batch. This allows simple TestResults which do not expect concurrent test
+reporting to be fed the activity from multiple test threads, or processes.
+
+Note that when you provide multiple errors for a single test, the target sees
+each error as a distinct complete test.
+
+
+TextTestResult
+~~~~~~~~~~~~~~
+
+A TestResult that provides a text UI very similar to the Python standard
+library UI. Key differences are that its supports the extended outcomes and
+details API, and is completely encapsulated into the result object, permitting
+it to be used without a 'TestRunner' object. Not all the Python 2.7 outcomes
+are displayed (yet). It is also a 'quiet' result with no dots or verbose mode.
+These limitations will be corrected soon.
+
+
+Test Doubles
+~~~~~~~~~~~~
+
+In testtools.testresult.doubles there are three test doubles that testtools
+uses for its own testing: Python26TestResult, Python27TestResult,
+ExtendedTestResult. These TestResult objects implement a single variation of
+the TestResult API each, and log activity to a list self._events. These are
+made available for the convenience of people writing their own extensions.
+
+
+startTestRun and stopTestRun
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Python 2.7 added hooks 'startTestRun' and 'stopTestRun' which are called
+before and after the entire test run. 'stopTestRun' is particularly useful for
+test results that wish to produce summary output.
+
+testtools.TestResult provides empty startTestRun and stopTestRun methods, and
+the default testtools runner will call these methods appropriately.
+
+
+Extensions to TestSuite
+-----------------------
+
+ConcurrentTestSuite
+~~~~~~~~~~~~~~~~~~~
+
+A TestSuite for parallel testing. This is used in conjuction with a helper that
+runs a single suite in some parallel fashion (for instance, forking, handing
+off to a subprocess, to a compute cloud, or simple threads).
+ConcurrentTestSuite uses the helper to get a number of separate runnable
+objects with a run(result), runs them all in threads using the
+ThreadsafeForwardingResult to coalesce their activity.
+
+
+Running tests
+-------------
+
+testtools provides a convenient way to run a test suite using the testtools
+result object: python -m testtools.run testspec [testspec...].
+
+To run tests with Python 2.4, you'll have to do something like:
+ python2.4 /path/to/testtools/run.py testspec [testspec ...].
+
+
+Test discovery
+--------------
+
+testtools includes a backported version of the Python 2.7 glue for using the
+discover test discovery module. If you either have Python 2.7/3.1 or newer, or
+install the 'discover' module, then you can invoke discovery::
+
+ python -m testtools.run discover [path]
+
+For more information see the Python 2.7 unittest documentation, or::
+
+ python -m testtools.run --help
+
+
+Twisted support
+---------------
+
+Support for running Twisted tests is very experimental right now. You
+shouldn't really do it. However, if you are going to, here are some tips for
+converting your Trial tests into testtools tests.
+
+ * Use the AsynchronousDeferredRunTest runner
+ * Make sure to upcall to setUp and tearDown
+ * Don't use setUpClass or tearDownClass
+ * Don't expect setting .todo, .timeout or .skip attributes to do anything
+ * flushLoggedErrors is not there for you. Sorry.
+ * assertFailure is not there for you. Even more sorry.
+
+
+General helpers
+---------------
+
+Lots of the time we would like to conditionally import modules. testtools
+needs to do this itself, and graciously extends the ability to its users.
+
+Instead of::
+
+ try:
+ from twisted.internet import defer
+ except ImportError:
+ defer = None
+
+You can do::
+
+ defer = try_import('twisted.internet.defer')
+
+
+Instead of::
+
+ try:
+ from StringIO import StringIO
+ except ImportError:
+ from io import StringIO
+
+You can do::
+
+ StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
diff --git a/lib/testtools/Makefile b/lib/testtools/Makefile
new file mode 100644
index 0000000000..c36fbd8012
--- /dev/null
+++ b/lib/testtools/Makefile
@@ -0,0 +1,35 @@
+# See README for copyright and licensing details.
+
+PYTHON=python
+SOURCES=$(shell find testtools -name "*.py")
+
+check:
+ PYTHONPATH=$(PWD) $(PYTHON) -m testtools.run testtools.tests.test_suite
+
+TAGS: ${SOURCES}
+ ctags -e -R testtools/
+
+tags: ${SOURCES}
+ ctags -R testtools/
+
+clean:
+ rm -f TAGS tags
+ find testtools -name "*.pyc" -exec rm '{}' \;
+
+prerelease:
+ # An existing MANIFEST breaks distutils sometimes. Avoid that.
+ -rm MANIFEST
+
+release:
+ ./setup.py sdist upload --sign
+
+snapshot: prerelease
+ ./setup.py sdist
+
+apidocs:
+ pydoctor --make-html --add-package testtools \
+ --docformat=restructuredtext --project-name=testtools \
+ --project-url=https://launchpad.net/testtools
+
+
+.PHONY: check clean prerelease release apidocs
diff --git a/lib/testtools/NEWS b/lib/testtools/NEWS
new file mode 100644
index 0000000000..4d2a74430f
--- /dev/null
+++ b/lib/testtools/NEWS
@@ -0,0 +1,468 @@
+testtools NEWS
+++++++++++++++
+
+NEXT
+~~~~
+
+Changes
+-------
+
+* The timestamps generated by ``TestResult`` objects when no timing data has
+ been received are now datetime-with-timezone, which allows them to be
+ sensibly serialised and transported. (Robert Collins, #692297)
+
+Improvements
+------------
+
+* ``MultiTestResult`` now forwards the ``time`` API. (Robert Collins, #692294)
+
+0.9.8
+~~~~~
+
+In this release we bring some very interesting improvements:
+
+* new matchers for exceptions, sets, lists, dicts and more.
+
+* experimental (works but the contract isn't supported) twisted reactor
+ support.
+
+* The built in runner can now list tests and filter tests (the -l and
+ --load-list options).
+
+Changes
+-------
+
+* addUnexpectedSuccess is translated to addFailure for test results that don't
+ know about addUnexpectedSuccess. Further, it fails the entire result for
+ all testtools TestResults (i.e. wasSuccessful() returns False after
+ addUnexpectedSuccess has been called). Note that when using a delegating
+ result such as ThreadsafeForwardingResult, MultiTestResult or
+ ExtendedToOriginalDecorator then the behaviour of addUnexpectedSuccess is
+ determined by the delegated to result(s).
+ (Jonathan Lange, Robert Collins, #654474, #683332)
+
+* startTestRun will reset any errors on the result. That is, wasSuccessful()
+ will always return True immediately after startTestRun() is called. This
+ only applies to delegated test results (ThreadsafeForwardingResult,
+ MultiTestResult and ExtendedToOriginalDecorator) if the delegated to result
+ is a testtools test result - we cannot reliably reset the state of unknown
+ test result class instances. (Jonathan Lange, Robert Collins, #683332)
+
+* Responsibility for running test cleanups has been moved to ``RunTest``.
+ This change does not affect public APIs and can be safely ignored by test
+ authors. (Jonathan Lange, #662647)
+
+Improvements
+------------
+
+* ``assertIsInstance`` supports a custom error message to be supplied, which
+ is necessary when using ``assertDictEqual`` on Python 2.7 with a
+ ``testtools.TestCase`` base class. (Jelmer Vernooij)
+
+* Experimental support for running tests that return Deferreds.
+ (Jonathan Lange, Martin [gz])
+
+* Provide a per-test decorator, run_test_with, to specify which RunTest
+ object to use for a given test. (Jonathan Lange, #657780)
+
+* Fix the runTest parameter of TestCase to actually work, rather than raising
+ a TypeError. (Jonathan Lange, #657760)
+
+* New matcher ``EndsWith`` added to complement the existing ``StartsWith``
+ matcher. (Jonathan Lange, #669165)
+
+* Non-release snapshots of testtools will now work with buildout.
+ (Jonathan Lange, #613734)
+
+* Malformed SyntaxErrors no longer blow up the test suite. (Martin [gz])
+
+* ``MatchesException`` added to the ``testtools.matchers`` module - matches
+ an exception class and parameters. (Robert Collins)
+
+* ``MismatchesAll.describe`` no longer appends a trailing newline.
+ (Michael Hudson-Doyle, #686790)
+
+* New ``KeysEqual`` matcher. (Jonathan Lange)
+
+* New helpers for conditionally importing modules, ``try_import`` and
+ ``try_imports``. (Jonathan Lange)
+
+* ``Raises`` added to the ``testtools.matchers`` module - matches if the
+ supplied callable raises, and delegates to an optional matcher for validation
+ of the exception. (Robert Collins)
+
+* ``raises`` added to the ``testtools.matchers`` module - matches if the
+ supplied callable raises and delegates to ``MatchesException`` to validate
+ the exception. (Jonathan Lange)
+
+* Tests will now pass on Python 2.6.4 : an ``Exception`` change made only in
+ 2.6.4 and reverted in Python 2.6.5 was causing test failures on that version.
+ (Martin [gz], #689858).
+
+* ``testtools.TestCase.useFixture`` has been added to glue with fixtures nicely.
+ (Robert Collins)
+
+* ``testtools.run`` now supports ``-l`` to list tests rather than executing
+ them. This is useful for integration with external test analysis/processing
+ tools like subunit and testrepository. (Robert Collins)
+
+* ``testtools.run`` now supports ``--load-list``, which takes a file containing
+ test ids, one per line, and intersects those ids with the tests found. This
+ allows fine grained control of what tests are run even when the tests cannot
+ be named as objects to import (e.g. due to test parameterisation via
+ testscenarios). (Robert Collins)
+
+* Update documentation to say how to use testtools.run() on Python 2.4.
+ (Jonathan Lange, #501174)
+
+* ``text_content`` conveniently converts a Python string to a Content object.
+ (Jonathan Lange, James Westby)
+
+
+
+0.9.7
+~~~~~
+
+Lots of little cleanups in this release; many small improvements to make your
+testing life more pleasant.
+
+Improvements
+------------
+
+* Cleanups can raise ``testtools.MultipleExceptions`` if they have multiple
+ exceptions to report. For instance, a cleanup which is itself responsible for
+ running several different internal cleanup routines might use this.
+
+* Code duplication between assertEqual and the matcher Equals has been removed.
+
+* In normal circumstances, a TestCase will no longer share details with clones
+ of itself. (Andrew Bennetts, bug #637725)
+
+* Less exception object cycles are generated (reduces peak memory use between
+ garbage collection). (Martin [gz])
+
+* New matchers 'DoesNotStartWith' and 'StartsWith' contributed by Canonical
+ from the Launchpad project. Written by James Westby.
+
+* Timestamps as produced by subunit protocol clients are now forwarded in the
+ ThreadsafeForwardingResult so correct test durations can be reported.
+ (Martin [gz], Robert Collins, #625594)
+
+* With unittest from Python 2.7 skipped tests will now show only the reason
+ rather than a serialisation of all details. (Martin [gz], #625583)
+
+* The testtools release process is now a little better documented and a little
+ smoother. (Jonathan Lange, #623483, #623487)
+
+
+0.9.6
+~~~~~
+
+Nothing major in this release, just enough small bits and pieces to make it
+useful enough to upgrade to.
+
+In particular, a serious bug in assertThat() has been fixed, it's easier to
+write Matchers, there's a TestCase.patch() method for those inevitable monkey
+patches and TestCase.assertEqual gives slightly nicer errors.
+
+Improvements
+------------
+
+* 'TestCase.assertEqual' now formats errors a little more nicely, in the
+ style of bzrlib.
+
+* Added `PlaceHolder` and `ErrorHolder`, TestCase-like objects that can be
+ used to add results to a `TestResult`.
+
+* 'Mismatch' now takes optional description and details parameters, so
+ custom Matchers aren't compelled to make their own subclass.
+
+* jml added a built-in UTF8_TEXT ContentType to make it slightly easier to
+ add details to test results. See bug #520044.
+
+* Fix a bug in our built-in matchers where assertThat would blow up if any
+ of them failed. All built-in mismatch objects now provide get_details().
+
+* New 'Is' matcher, which lets you assert that a thing is identical to
+ another thing.
+
+* New 'LessThan' matcher which lets you assert that a thing is less than
+ another thing.
+
+* TestCase now has a 'patch()' method to make it easier to monkey-patching
+ objects in tests. See the manual for more information. Fixes bug #310770.
+
+* MultiTestResult methods now pass back return values from the results it
+ forwards to.
+
+0.9.5
+~~~~~
+
+This release fixes some obscure traceback formatting issues that probably
+weren't affecting you but were certainly breaking our own test suite.
+
+Changes
+-------
+
+* Jamu Kakar has updated classes in testtools.matchers and testtools.runtest
+ to be new-style classes, fixing bug #611273.
+
+Improvements
+------------
+
+* Martin[gz] fixed traceback handling to handle cases where extract_tb returns
+ a source line of None. Fixes bug #611307.
+
+* Martin[gz] fixed an unicode issue that was causing the tests to fail,
+ closing bug #604187.
+
+* testtools now handles string exceptions (although why would you want to use
+ them?) and formats their tracebacks correctly. Thanks to Martin[gz] for
+ fixing bug #592262.
+
+0.9.4
+~~~~~
+
+This release overhauls the traceback formatting layer to deal with Python 2
+line numbers and traceback objects often being local user encoded strings
+rather than unicode objects. Test discovery has also been added and Python 3.1
+is also supported. Finally, the Mismatch protocol has been extended to let
+Matchers collaborate with tests in supplying detailed data about failures.
+
+Changes
+-------
+
+* testtools.utils has been renamed to testtools.compat. Importing
+ testtools.utils will now generate a deprecation warning.
+
+Improvements
+------------
+
+* Add machinery for Python 2 to create unicode tracebacks like those used by
+ Python 3. This means testtools no longer throws on encountering non-ascii
+ filenames, source lines, or exception strings when displaying test results.
+ Largely contributed by Martin[gz] with some tweaks from Robert Collins.
+
+* James Westby has supplied test discovery support using the Python 2.7
+ TestRunner in testtools.run. This requires the 'discover' module. This
+ closes bug #250764.
+
+* Python 3.1 is now supported, thanks to Martin[gz] for a partial patch.
+ This fixes bug #592375.
+
+* TestCase.addCleanup has had its docstring corrected about when cleanups run.
+
+* TestCase.skip is now deprecated in favour of TestCase.skipTest, which is the
+ Python2.7 spelling for skip. This closes bug #560436.
+
+* Tests work on IronPython patch from Martin[gz] applied.
+
+* Thanks to a patch from James Westby testtools.matchers.Mismatch can now
+ supply a get_details method, which assertThat will query to provide
+ additional attachments. This can be used to provide additional detail
+ about the mismatch that doesn't suite being included in describe(). For
+ instance, if the match process was complex, a log of the process could be
+ included, permitting debugging.
+
+* testtools.testresults.real._StringException will now answer __str__ if its
+ value is unicode by encoding with UTF8, and vice versa to answer __unicode__.
+ This permits subunit decoded exceptions to contain unicode and still format
+ correctly.
+
+0.9.3
+~~~~~
+
+More matchers, Python 2.4 support, faster test cloning by switching to copy
+rather than deepcopy and better output when exceptions occur in cleanups are
+the defining characteristics of this release.
+
+Improvements
+------------
+
+* New matcher "Annotate" that adds a simple string message to another matcher,
+ much like the option 'message' parameter to standard library assertFoo
+ methods.
+
+* New matchers "Not" and "MatchesAll". "Not" will invert another matcher, and
+ "MatchesAll" that needs a successful match for all of its arguments.
+
+* On Python 2.4, where types.FunctionType cannot be deepcopied, testtools will
+ now monkeypatch copy._deepcopy_dispatch using the same trivial patch that
+ added such support to Python 2.5. The monkey patch is triggered by the
+ absence of FunctionType from the dispatch dict rather than a version check.
+ Bug #498030.
+
+* On windows the test 'test_now_datetime_now' should now work reliably.
+
+* TestCase.getUniqueInteger and TestCase.getUniqueString now have docstrings.
+
+* TestCase.getUniqueString now takes an optional prefix parameter, so you can
+ now use it in circumstances that forbid strings with '.'s, and such like.
+
+* testtools.testcase.clone_test_with_new_id now uses copy.copy, rather than
+ copy.deepcopy. Tests that need a deeper copy should use the copy protocol to
+ control how they are copied. Bug #498869.
+
+* The backtrace test result output tests should now pass on windows and other
+ systems where os.sep is not '/'.
+
+* When a cleanUp or tearDown exception occurs, it is now accumulated as a new
+ traceback in the test details, rather than as a separate call to addError /
+ addException. This makes testtools work better with most TestResult objects
+ and fixes bug #335816.
+
+
+0.9.2
+~~~~~
+
+Python 3 support, more matchers and better consistency with Python 2.7 --
+you'd think that would be enough for a point release. Well, we here on the
+testtools project think that you deserve more.
+
+We've added a hook so that user code can be called just-in-time whenever there
+is an exception, and we've also factored out the "run" logic of test cases so
+that new outcomes can be added without fiddling with the actual flow of logic.
+
+It might sound like small potatoes, but it's changes like these that will
+bring about the end of test frameworks.
+
+
+Improvements
+------------
+
+* A failure in setUp and tearDown now report as failures not as errors.
+
+* Cleanups now run after tearDown to be consistent with Python 2.7's cleanup
+ feature.
+
+* ExtendedToOriginalDecorator now passes unrecognised attributes through
+ to the decorated result object, permitting other extensions to the
+ TestCase -> TestResult protocol to work.
+
+* It is now possible to trigger code just-in-time after an exception causes
+ a test outcome such as failure or skip. See the testtools MANUAL or
+ ``pydoc testtools.TestCase.addOnException``. (bug #469092)
+
+* New matcher Equals which performs a simple equality test.
+
+* New matcher MatchesAny which looks for a match of any of its arguments.
+
+* TestCase no longer breaks if a TestSkipped exception is raised with no
+ parameters.
+
+* TestCase.run now clones test cases before they are run and runs the clone.
+ This reduces memory footprint in large test runs - state accumulated on
+ test objects during their setup and execution gets freed when test case
+ has finished running unless the TestResult object keeps a reference.
+ NOTE: As test cloning uses deepcopy, this can potentially interfere if
+ a test suite has shared state (such as the testscenarios or testresources
+ projects use). Use the __deepcopy__ hook to control the copying of such
+ objects so that the shared references stay shared.
+
+* Testtools now accepts contributions without copyright assignment under some
+ circumstances. See HACKING for details.
+
+* Testtools now provides a convenient way to run a test suite using the
+ testtools result object: python -m testtools.run testspec [testspec...].
+
+* Testtools now works on Python 3, thanks to Benjamin Peterson.
+
+* Test execution now uses a separate class, testtools.RunTest to run single
+ tests. This can be customised and extended in a more consistent fashion than
+ the previous run method idiom. See pydoc for more information.
+
+* The test doubles that testtools itself uses are now available as part of
+ the testtools API in testtols.testresult.doubles.
+
+* TracebackContent now sets utf8 as the charset encoding, rather than not
+ setting one and encoding with the default encoder.
+
+* With python2.7 testtools.TestSkipped will be the unittest.case.SkipTest
+ exception class making skips compatible with code that manually raises the
+ standard library exception. (bug #490109)
+
+Changes
+-------
+
+* TestCase.getUniqueInteger is now implemented using itertools.count. Thanks
+ to Benjamin Peterson for the patch. (bug #490111)
+
+
+0.9.1
+~~~~~
+
+The new matcher API introduced in 0.9.0 had a small flaw where the matchee
+would be evaluated twice to get a description of the mismatch. This could lead
+to bugs if the act of matching caused side effects to occur in the matchee.
+Since having such side effects isn't desirable, we have changed the API now
+before it has become widespread.
+
+Changes
+-------
+
+* Matcher API changed to avoid evaluating matchee twice. Please consult
+ the API documentation.
+
+* TestCase.getUniqueString now uses the test id, not the test method name,
+ which works nicer with parameterised tests.
+
+Improvements
+------------
+
+* Python2.4 is now supported again.
+
+
+0.9.0
+~~~~~
+
+This release of testtools is perhaps the most interesting and exciting one
+it's ever had. We've continued in bringing together the best practices of unit
+testing from across a raft of different Python projects, but we've also
+extended our mission to incorporating unit testing concepts from other
+languages and from our own research, led by Robert Collins.
+
+We now support skipping and expected failures. We'll make sure that you
+up-call setUp and tearDown, avoiding unexpected testing weirdnesses. We're
+now compatible with Python 2.5, 2.6 and 2.7 unittest library.
+
+All in all, if you are serious about unit testing and want to get the best
+thinking from the whole Python community, you should get this release.
+
+Improvements
+------------
+
+* A new TestResult API has been added for attaching details to test outcomes.
+ This API is currently experimental, but is being prepared with the intent
+ of becoming an upstream Python API. For more details see pydoc
+ testtools.TestResult and the TestCase addDetail / getDetails methods.
+
+* assertThat has been added to TestCase. This new assertion supports
+ a hamcrest-inspired matching protocol. See pydoc testtools.Matcher for
+ details about writing matchers, and testtools.matchers for the included
+ matchers. See http://code.google.com/p/hamcrest/.
+
+* Compatible with Python 2.6 and Python 2.7
+
+* Failing to upcall in setUp or tearDown will now cause a test failure.
+ While the base methods do nothing, failing to upcall is usually a problem
+ in deeper hierarchies, and checking that the root method is called is a
+ simple way to catch this common bug.
+
+* New TestResult decorator ExtendedToOriginalDecorator which handles
+ downgrading extended API calls like addSkip to older result objects that
+ do not support them. This is used internally to make testtools simpler but
+ can also be used to simplify other code built on or for use with testtools.
+
+* New TextTestResult supporting the extended APIs that testtools provides.
+
+* Nose will no longer find 'runTest' tests in classes derived from
+ testtools.testcase.TestCase (bug #312257).
+
+* Supports the Python 2.7/3.1 addUnexpectedSuccess and addExpectedFailure
+ TestResult methods, with a support function 'knownFailure' to let tests
+ trigger these outcomes.
+
+* When using the skip feature with TestResult objects that do not support it
+ a test success will now be reported. Previously an error was reported but
+ production experience has shown that this is too disruptive for projects that
+ are using skips: they cannot get a clean run on down-level result objects.
diff --git a/lib/testtools/README b/lib/testtools/README
new file mode 100644
index 0000000000..83120f01e4
--- /dev/null
+++ b/lib/testtools/README
@@ -0,0 +1,72 @@
+=========
+testtools
+=========
+
+testtools is a set of extensions to the Python standard library's unit testing
+framework.
+
+These extensions have been derived from years of experience with unit testing
+in Python and come from many different sources.
+
+Licensing
+---------
+
+This project is distributed under the MIT license and copyright is owned by
+Jonathan M. Lange. See LICENSE for details.
+
+Some code in testtools/run.py is taken from Python's unittest module, and
+is copyright Steve Purcell and the Python Software Foundation, it is
+distributed under the same license as Python, see LICENSE for details.
+
+
+Required Dependencies
+---------------------
+
+ * Python 2.4+ or 3.0+
+
+Optional Dependencies
+---------------------
+
+If you would like to use our undocumented, unsupported Twisted support, then
+you will need Twisted.
+
+If you want to use ``fixtures`` then you can either install fixtures (e.g. from
+https://launchpad.net/python-fixtures or http://pypi.python.org/pypi/fixtures)
+or alternatively just make sure your fixture objects obey the same protocol.
+
+
+Bug reports and patches
+-----------------------
+
+Please report bugs using Launchpad at <https://bugs.launchpad.net/testtools>.
+Patches can also be submitted via Launchpad, or mailed to the author. You can
+mail the author directly at jml@mumak.net.
+
+There's no mailing list for this project yet, however the testing-in-python
+mailing list may be a useful resource:
+
+ * Address: testing-in-python@lists.idyll.org
+ * Subscription link: http://lists.idyll.org/listinfo/testing-in-python
+
+
+History
+-------
+
+testtools used to be called 'pyunit3k'. The name was changed to avoid
+conflating the library with the Python 3.0 release (commonly referred to as
+'py3k').
+
+
+Thanks
+------
+
+ * Canonical Ltd
+ * Bazaar
+ * Twisted Matrix Labs
+ * Robert Collins
+ * Andrew Bennetts
+ * Benjamin Peterson
+ * Jamu Kakar
+ * James Westby
+ * Martin [gz]
+ * Michael Hudson-Doyle
diff --git a/lib/testtools/setup.py b/lib/testtools/setup.py
new file mode 100755
index 0000000000..59e5804f05
--- /dev/null
+++ b/lib/testtools/setup.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+"""Distutils installer for testtools."""
+
+from distutils.core import setup
+import email
+import os
+
+import testtools
+
+
+def get_revno():
+ import bzrlib.workingtree
+ t = bzrlib.workingtree.WorkingTree.open_containing(__file__)[0]
+ return t.branch.revno()
+
+
+def get_version_from_pkg_info():
+ """Get the version from PKG-INFO file if we can."""
+ pkg_info_path = os.path.join(os.path.dirname(__file__), 'PKG-INFO')
+ try:
+ pkg_info_file = open(pkg_info_path, 'r')
+ except (IOError, OSError):
+ return None
+ try:
+ pkg_info = email.message_from_file(pkg_info_file)
+ except email.MessageError:
+ return None
+ return pkg_info.get('Version', None)
+
+
+def get_version():
+ """Return the version of testtools that we are building."""
+ version = '.'.join(
+ str(component) for component in testtools.__version__[0:3])
+ phase = testtools.__version__[3]
+ if phase == 'final':
+ return version
+ pkg_info_version = get_version_from_pkg_info()
+ if pkg_info_version:
+ return pkg_info_version
+ revno = get_revno()
+ if phase == 'alpha':
+ # No idea what the next version will be
+ return 'next-r%s' % revno
+ else:
+ # Preserve the version number but give it a revno prefix
+ return version + '-r%s' % revno
+
+
+def get_long_description():
+ manual_path = os.path.join(os.path.dirname(__file__), 'MANUAL')
+ return open(manual_path).read()
+
+
+setup(name='testtools',
+ author='Jonathan M. Lange',
+ author_email='jml+testtools@mumak.net',
+ url='https://launchpad.net/testtools',
+ description=('Extensions to the Python standard library unit testing '
+ 'framework'),
+ long_description=get_long_description(),
+ version=get_version(),
+ classifiers=["License :: OSI Approved :: MIT License"],
+ packages=['testtools', 'testtools.testresult', 'testtools.tests'])
diff --git a/lib/testtools/testtools/__init__.py b/lib/testtools/testtools/__init__.py
new file mode 100644
index 0000000000..48fa335694
--- /dev/null
+++ b/lib/testtools/testtools/__init__.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2008, 2009, 2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Extensions to the standard Python unittest library."""
+
+__all__ = [
+ 'clone_test_with_new_id',
+ 'ConcurrentTestSuite',
+ 'ErrorHolder',
+ 'ExtendedToOriginalDecorator',
+ 'iterate_tests',
+ 'MultipleExceptions',
+ 'MultiTestResult',
+ 'PlaceHolder',
+ 'run_test_with',
+ 'TestCase',
+ 'TestResult',
+ 'TextTestResult',
+ 'RunTest',
+ 'skip',
+ 'skipIf',
+ 'skipUnless',
+ 'ThreadsafeForwardingResult',
+ 'try_import',
+ 'try_imports',
+ ]
+
+from testtools.helpers import (
+ try_import,
+ try_imports,
+ )
+from testtools.matchers import (
+ Matcher,
+ )
+from testtools.runtest import (
+ MultipleExceptions,
+ RunTest,
+ )
+from testtools.testcase import (
+ ErrorHolder,
+ PlaceHolder,
+ TestCase,
+ clone_test_with_new_id,
+ run_test_with,
+ skip,
+ skipIf,
+ skipUnless,
+ )
+from testtools.testresult import (
+ ExtendedToOriginalDecorator,
+ MultiTestResult,
+ TestResult,
+ TextTestResult,
+ ThreadsafeForwardingResult,
+ )
+from testtools.testsuite import (
+ ConcurrentTestSuite,
+ iterate_tests,
+ )
+
+# same format as sys.version_info: "A tuple containing the five components of
+# the version number: major, minor, micro, releaselevel, and serial. All
+# values except releaselevel are integers; the release level is 'alpha',
+# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
+# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
+# releaselevel of 'dev' for unreleased under-development code.
+#
+# If the releaselevel is 'alpha' then the major/minor/micro components are not
+# established at this point, and setup.py will use a version of next-$(revno).
+# If the releaselevel is 'final', then the tarball will be major.minor.micro.
+# Otherwise it is major.minor.micro~$(revno).
+
+__version__ = (0, 9, 9, 'dev', 0)
diff --git a/lib/testtools/testtools/_spinner.py b/lib/testtools/testtools/_spinner.py
new file mode 100644
index 0000000000..98b51a6565
--- /dev/null
+++ b/lib/testtools/testtools/_spinner.py
@@ -0,0 +1,316 @@
+# Copyright (c) 2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Evil reactor-spinning logic for running Twisted tests.
+
+This code is highly experimental, liable to change and not to be trusted. If
+you couldn't write this yourself, you should not be using it.
+"""
+
+__all__ = [
+ 'DeferredNotFired',
+ 'extract_result',
+ 'NoResultError',
+ 'not_reentrant',
+ 'ReentryError',
+ 'Spinner',
+ 'StaleJunkError',
+ 'TimeoutError',
+ 'trap_unhandled_errors',
+ ]
+
+import signal
+
+from testtools.monkey import MonkeyPatcher
+
+from twisted.internet import defer
+from twisted.internet.base import DelayedCall
+from twisted.internet.interfaces import IReactorThreads
+from twisted.python.failure import Failure
+from twisted.python.util import mergeFunctionMetadata
+
+
+class ReentryError(Exception):
+ """Raised when we try to re-enter a function that forbids it."""
+
+ def __init__(self, function):
+ Exception.__init__(self,
+ "%r in not re-entrant but was called within a call to itself."
+ % (function,))
+
+
+def not_reentrant(function, _calls={}):
+ """Decorates a function as not being re-entrant.
+
+ The decorated function will raise an error if called from within itself.
+ """
+ def decorated(*args, **kwargs):
+ if _calls.get(function, False):
+ raise ReentryError(function)
+ _calls[function] = True
+ try:
+ return function(*args, **kwargs)
+ finally:
+ _calls[function] = False
+ return mergeFunctionMetadata(function, decorated)
+
+
+class DeferredNotFired(Exception):
+ """Raised when we extract a result from a Deferred that's not fired yet."""
+
+
+def extract_result(deferred):
+ """Extract the result from a fired deferred.
+
+ It can happen that you have an API that returns Deferreds for
+ compatibility with Twisted code, but is in fact synchronous, i.e. the
+ Deferreds it returns have always fired by the time it returns. In this
+ case, you can use this function to convert the result back into the usual
+ form for a synchronous API, i.e. the result itself or a raised exception.
+
+ It would be very bad form to use this as some way of checking if a
+ Deferred has fired.
+ """
+ failures = []
+ successes = []
+ deferred.addCallbacks(successes.append, failures.append)
+ if len(failures) == 1:
+ failures[0].raiseException()
+ elif len(successes) == 1:
+ return successes[0]
+ else:
+ raise DeferredNotFired("%r has not fired yet." % (deferred,))
+
+
+def trap_unhandled_errors(function, *args, **kwargs):
+ """Run a function, trapping any unhandled errors in Deferreds.
+
+ Assumes that 'function' will have handled any errors in Deferreds by the
+ time it is complete. This is almost never true of any Twisted code, since
+ you can never tell when someone has added an errback to a Deferred.
+
+ If 'function' raises, then don't bother doing any unhandled error
+ jiggery-pokery, since something horrible has probably happened anyway.
+
+ :return: A tuple of '(result, error)', where 'result' is the value returned
+ by 'function' and 'error' is a list of `defer.DebugInfo` objects that
+ have unhandled errors in Deferreds.
+ """
+ real_DebugInfo = defer.DebugInfo
+ debug_infos = []
+ def DebugInfo():
+ info = real_DebugInfo()
+ debug_infos.append(info)
+ return info
+ defer.DebugInfo = DebugInfo
+ try:
+ result = function(*args, **kwargs)
+ finally:
+ defer.DebugInfo = real_DebugInfo
+ errors = []
+ for info in debug_infos:
+ if info.failResult is not None:
+ errors.append(info)
+ # Disable the destructor that logs to error. We are already
+ # catching the error here.
+ info.__del__ = lambda: None
+ return result, errors
+
+
+class TimeoutError(Exception):
+ """Raised when run_in_reactor takes too long to run a function."""
+
+ def __init__(self, function, timeout):
+ Exception.__init__(self,
+ "%r took longer than %s seconds" % (function, timeout))
+
+
+class NoResultError(Exception):
+ """Raised when the reactor has stopped but we don't have any result."""
+
+ def __init__(self):
+ Exception.__init__(self,
+ "Tried to get test's result from Deferred when no result is "
+ "available. Probably means we received SIGINT or similar.")
+
+
+class StaleJunkError(Exception):
+ """Raised when there's junk in the spinner from a previous run."""
+
+ def __init__(self, junk):
+ Exception.__init__(self,
+ "There was junk in the spinner from a previous run. "
+ "Use clear_junk() to clear it out: %r" % (junk,))
+
+
+class Spinner(object):
+ """Spin the reactor until a function is done.
+
+ This class emulates the behaviour of twisted.trial in that it grotesquely
+ and horribly spins the Twisted reactor while a function is running, and
+ then kills the reactor when that function is complete and all the
+ callbacks in its chains are done.
+ """
+
+ _UNSET = object()
+
+ # Signals that we save and restore for each spin.
+ _PRESERVED_SIGNALS = [
+ 'SIGINT',
+ 'SIGTERM',
+ 'SIGCHLD',
+ ]
+
+ # There are many APIs within Twisted itself where a Deferred fires but
+ # leaves cleanup work scheduled for the reactor to do. Arguably, many of
+ # these are bugs. As such, we provide a facility to iterate the reactor
+ # event loop a number of times after every call, in order to shake out
+ # these buggy-but-commonplace events. The default is 0, because that is
+ # the ideal, and it actually works for many cases.
+ _OBLIGATORY_REACTOR_ITERATIONS = 0
+
+ def __init__(self, reactor, debug=False):
+ """Construct a Spinner.
+
+ :param reactor: A Twisted reactor.
+ :param debug: Whether or not to enable Twisted's debugging. Defaults
+ to False.
+ """
+ self._reactor = reactor
+ self._timeout_call = None
+ self._success = self._UNSET
+ self._failure = self._UNSET
+ self._saved_signals = []
+ self._junk = []
+ self._debug = debug
+
+ def _cancel_timeout(self):
+ if self._timeout_call:
+ self._timeout_call.cancel()
+
+ def _get_result(self):
+ if self._failure is not self._UNSET:
+ self._failure.raiseException()
+ if self._success is not self._UNSET:
+ return self._success
+ raise NoResultError()
+
+ def _got_failure(self, result):
+ self._cancel_timeout()
+ self._failure = result
+
+ def _got_success(self, result):
+ self._cancel_timeout()
+ self._success = result
+
+ def _stop_reactor(self, ignored=None):
+ """Stop the reactor!"""
+ self._reactor.crash()
+
+ def _timed_out(self, function, timeout):
+ e = TimeoutError(function, timeout)
+ self._failure = Failure(e)
+ self._stop_reactor()
+
+ def _clean(self):
+ """Clean up any junk in the reactor.
+
+ Will always iterate the reactor a number of times equal to
+ `_OBLIGATORY_REACTOR_ITERATIONS`. This is to work around bugs in
+ various Twisted APIs where a Deferred fires but still leaves work
+ (e.g. cancelling a call, actually closing a connection) for the
+ reactor to do.
+ """
+ for i in range(self._OBLIGATORY_REACTOR_ITERATIONS):
+ self._reactor.iterate(0)
+ junk = []
+ for delayed_call in self._reactor.getDelayedCalls():
+ delayed_call.cancel()
+ junk.append(delayed_call)
+ for selectable in self._reactor.removeAll():
+ # Twisted sends a 'KILL' signal to selectables that provide
+ # IProcessTransport. Since only _dumbwin32proc processes do this,
+ # we aren't going to bother.
+ junk.append(selectable)
+ if IReactorThreads.providedBy(self._reactor):
+ if self._reactor.threadpool is not None:
+ self._reactor._stopThreadPool()
+ self._junk.extend(junk)
+ return junk
+
+ def clear_junk(self):
+ """Clear out our recorded junk.
+
+ :return: Whatever junk was there before.
+ """
+ junk = self._junk
+ self._junk = []
+ return junk
+
+ def get_junk(self):
+ """Return any junk that has been found on the reactor."""
+ return self._junk
+
+ def _save_signals(self):
+ available_signals = [
+ getattr(signal, name, None) for name in self._PRESERVED_SIGNALS]
+ self._saved_signals = [
+ (sig, signal.getsignal(sig)) for sig in available_signals if sig]
+
+ def _restore_signals(self):
+ for sig, hdlr in self._saved_signals:
+ signal.signal(sig, hdlr)
+ self._saved_signals = []
+
+ @not_reentrant
+ def run(self, timeout, function, *args, **kwargs):
+ """Run 'function' in a reactor.
+
+ If 'function' returns a Deferred, the reactor will keep spinning until
+ the Deferred fires and its chain completes or until the timeout is
+ reached -- whichever comes first.
+
+ :raise TimeoutError: If 'timeout' is reached before the `Deferred`
+ returned by 'function' has completed its callback chain.
+ :raise NoResultError: If the reactor is somehow interrupted before
+ the `Deferred` returned by 'function' has completed its callback
+ chain.
+ :raise StaleJunkError: If there's junk in the spinner from a previous
+ run.
+ :return: Whatever is at the end of the function's callback chain. If
+ it's an error, then raise that.
+ """
+ debug = MonkeyPatcher()
+ if self._debug:
+ debug.add_patch(defer.Deferred, 'debug', True)
+ debug.add_patch(DelayedCall, 'debug', True)
+ debug.patch()
+ try:
+ junk = self.get_junk()
+ if junk:
+ raise StaleJunkError(junk)
+ self._save_signals()
+ self._timeout_call = self._reactor.callLater(
+ timeout, self._timed_out, function, timeout)
+ # Calling 'stop' on the reactor will make it impossible to
+ # re-start the reactor. Since the default signal handlers for
+ # TERM, BREAK and INT all call reactor.stop(), we'll patch it over
+ # with crash. XXX: It might be a better idea to either install
+ # custom signal handlers or to override the methods that are
+ # Twisted's signal handlers.
+ stop, self._reactor.stop = self._reactor.stop, self._reactor.crash
+ def run_function():
+ d = defer.maybeDeferred(function, *args, **kwargs)
+ d.addCallbacks(self._got_success, self._got_failure)
+ d.addBoth(self._stop_reactor)
+ try:
+ self._reactor.callWhenRunning(run_function)
+ self._reactor.run()
+ finally:
+ self._reactor.stop = stop
+ self._restore_signals()
+ try:
+ return self._get_result()
+ finally:
+ self._clean()
+ finally:
+ debug.restore()
diff --git a/lib/testtools/testtools/compat.py b/lib/testtools/testtools/compat.py
new file mode 100644
index 0000000000..ecbfb42d9a
--- /dev/null
+++ b/lib/testtools/testtools/compat.py
@@ -0,0 +1,279 @@
+# Copyright (c) 2008-2010 testtools developers. See LICENSE for details.
+
+"""Compatibility support for python 2 and 3."""
+
+
+import codecs
+import linecache
+import locale
+import os
+import re
+import sys
+import traceback
+
+__metaclass__ = type
+__all__ = [
+ '_b',
+ '_u',
+ 'advance_iterator',
+ 'str_is_unicode',
+ 'unicode_output_stream',
+ ]
+
+
+__u_doc = """A function version of the 'u' prefix.
+
+This is needed becayse the u prefix is not usable in Python 3 but is required
+in Python 2 to get a unicode object.
+
+To migrate code that was written as u'\u1234' in Python 2 to 2+3 change
+it to be _u('\u1234'). The Python 3 interpreter will decode it
+appropriately and the no-op _u for Python 3 lets it through, in Python
+2 we then call unicode-escape in the _u function.
+"""
+
+if sys.version_info > (3, 0):
+ def _u(s):
+ return s
+ _r = ascii
+ def _b(s):
+ """A byte literal."""
+ return s.encode("latin-1")
+ advance_iterator = next
+ def istext(x):
+ return isinstance(x, str)
+ def classtypes():
+ return (type,)
+ str_is_unicode = True
+else:
+ def _u(s):
+ # The double replace mangling going on prepares the string for
+ # unicode-escape - \foo is preserved, \u and \U are decoded.
+ return (s.replace("\\", "\\\\").replace("\\\\u", "\\u")
+ .replace("\\\\U", "\\U").decode("unicode-escape"))
+ _r = repr
+ def _b(s):
+ return s
+ advance_iterator = lambda it: it.next()
+ def istext(x):
+ return isinstance(x, basestring)
+ def classtypes():
+ import types
+ return (type, types.ClassType)
+ str_is_unicode = sys.platform == "cli"
+
+_u.__doc__ = __u_doc
+
+
+if sys.version_info > (2, 5):
+ all = all
+ _error_repr = BaseException.__repr__
+ def isbaseexception(exception):
+ """Return whether exception inherits from BaseException only"""
+ return (isinstance(exception, BaseException)
+ and not isinstance(exception, Exception))
+else:
+ def all(iterable):
+ """If contents of iterable all evaluate as boolean True"""
+ for obj in iterable:
+ if not obj:
+ return False
+ return True
+ def _error_repr(exception):
+ """Format an exception instance as Python 2.5 and later do"""
+ return exception.__class__.__name__ + repr(exception.args)
+ def isbaseexception(exception):
+ """Return whether exception would inherit from BaseException only
+
+ This approximates the hierarchy in Python 2.5 and later, compare the
+ difference between the diagrams at the bottom of the pages:
+ <http://docs.python.org/release/2.4.4/lib/module-exceptions.html>
+ <http://docs.python.org/release/2.5.4/lib/module-exceptions.html>
+ """
+ return isinstance(exception, (KeyboardInterrupt, SystemExit))
+
+
+def unicode_output_stream(stream):
+ """Get wrapper for given stream that writes any unicode without exception
+
+ Characters that can't be coerced to the encoding of the stream, or 'ascii'
+ if valid encoding is not found, will be replaced. The original stream may
+ be returned in situations where a wrapper is determined unneeded.
+
+ The wrapper only allows unicode to be written, not non-ascii bytestrings,
+ which is a good thing to ensure sanity and sanitation.
+ """
+ if sys.platform == "cli":
+ # Best to never encode before writing in IronPython
+ return stream
+ try:
+ writer = codecs.getwriter(stream.encoding or "")
+ except (AttributeError, LookupError):
+ # GZ 2010-06-16: Python 3 StringIO ends up here, but probably needs
+ # different handling as it doesn't want bytestrings
+ return codecs.getwriter("ascii")(stream, "replace")
+ if writer.__module__.rsplit(".", 1)[1].startswith("utf"):
+ # The current stream has a unicode encoding so no error handler is needed
+ return stream
+ if sys.version_info > (3, 0):
+ # Python 3 doesn't seem to make this easy, handle a common case
+ try:
+ return stream.__class__(stream.buffer, stream.encoding, "replace",
+ stream.newlines, stream.line_buffering)
+ except AttributeError:
+ pass
+ return writer(stream, "replace")
+
+
+# The default source encoding is actually "iso-8859-1" until Python 2.5 but
+# using non-ascii causes a deprecation warning in 2.4 and it's cleaner to
+# treat all versions the same way
+_default_source_encoding = "ascii"
+
+# Pattern specified in <http://www.python.org/dev/peps/pep-0263/>
+_cookie_search=re.compile("coding[:=]\s*([-\w.]+)").search
+
+def _detect_encoding(lines):
+ """Get the encoding of a Python source file from a list of lines as bytes
+
+ This function does less than tokenize.detect_encoding added in Python 3 as
+ it does not attempt to raise a SyntaxError when the interpreter would, it
+ just wants the encoding of a source file Python has already compiled and
+ determined is valid.
+ """
+ if not lines:
+ return _default_source_encoding
+ if lines[0].startswith("\xef\xbb\xbf"):
+ # Source starting with UTF-8 BOM is either UTF-8 or a SyntaxError
+ return "utf-8"
+ # Only the first two lines of the source file are examined
+ magic = _cookie_search("".join(lines[:2]))
+ if magic is None:
+ return _default_source_encoding
+ encoding = magic.group(1)
+ try:
+ codecs.lookup(encoding)
+ except LookupError:
+ # Some codecs raise something other than LookupError if they don't
+ # support the given error handler, but not the text ones that could
+ # actually be used for Python source code
+ return _default_source_encoding
+ return encoding
+
+
+class _EncodingTuple(tuple):
+ """A tuple type that can have an encoding attribute smuggled on"""
+
+
+def _get_source_encoding(filename):
+ """Detect, cache and return the encoding of Python source at filename"""
+ try:
+ return linecache.cache[filename].encoding
+ except (AttributeError, KeyError):
+ encoding = _detect_encoding(linecache.getlines(filename))
+ if filename in linecache.cache:
+ newtuple = _EncodingTuple(linecache.cache[filename])
+ newtuple.encoding = encoding
+ linecache.cache[filename] = newtuple
+ return encoding
+
+
+def _get_exception_encoding():
+ """Return the encoding we expect messages from the OS to be encoded in"""
+ if os.name == "nt":
+ # GZ 2010-05-24: Really want the codepage number instead, the error
+ # handling of standard codecs is more deterministic
+ return "mbcs"
+ # GZ 2010-05-23: We need this call to be after initialisation, but there's
+ # no benefit in asking more than once as it's a global
+ # setting that can change after the message is formatted.
+ return locale.getlocale(locale.LC_MESSAGES)[1] or "ascii"
+
+
+def _exception_to_text(evalue):
+ """Try hard to get a sensible text value out of an exception instance"""
+ try:
+ return unicode(evalue)
+ except KeyboardInterrupt:
+ raise
+ except:
+ # Apparently this is what traceback._some_str does. Sigh - RBC 20100623
+ pass
+ try:
+ return str(evalue).decode(_get_exception_encoding(), "replace")
+ except KeyboardInterrupt:
+ raise
+ except:
+ # Apparently this is what traceback._some_str does. Sigh - RBC 20100623
+ pass
+ # Okay, out of ideas, let higher level handle it
+ return None
+
+
+# GZ 2010-05-23: This function is huge and horrible and I welcome suggestions
+# on the best way to break it up
+_TB_HEADER = _u('Traceback (most recent call last):\n')
+def _format_exc_info(eclass, evalue, tb, limit=None):
+ """Format a stack trace and the exception information as unicode
+
+ Compatibility function for Python 2 which ensures each component of a
+ traceback is correctly decoded according to its origins.
+
+ Based on traceback.format_exception and related functions.
+ """
+ fs_enc = sys.getfilesystemencoding()
+ if tb:
+ list = [_TB_HEADER]
+ extracted_list = []
+ for filename, lineno, name, line in traceback.extract_tb(tb, limit):
+ extracted_list.append((
+ filename.decode(fs_enc, "replace"),
+ lineno,
+ name.decode("ascii", "replace"),
+ line and line.decode(
+ _get_source_encoding(filename), "replace")))
+ list.extend(traceback.format_list(extracted_list))
+ else:
+ list = []
+ if evalue is None:
+ # Is a (deprecated) string exception
+ list.append((eclass + "\n").decode("ascii", "replace"))
+ return list
+ if isinstance(evalue, SyntaxError):
+ # Avoid duplicating the special formatting for SyntaxError here,
+ # instead create a new instance with unicode filename and line
+ # Potentially gives duff spacing, but that's a pre-existing issue
+ try:
+ msg, (filename, lineno, offset, line) = evalue
+ except (TypeError, ValueError):
+ pass # Strange exception instance, fall through to generic code
+ else:
+ # Errors during parsing give the line from buffer encoded as
+ # latin-1 or utf-8 or the encoding of the file depending on the
+ # coding and whether the patch for issue #1031213 is applied, so
+ # give up on trying to decode it and just read the file again
+ if line:
+ bytestr = linecache.getline(filename, lineno)
+ if bytestr:
+ if lineno == 1 and bytestr.startswith("\xef\xbb\xbf"):
+ bytestr = bytestr[3:]
+ line = bytestr.decode(
+ _get_source_encoding(filename), "replace")
+ del linecache.cache[filename]
+ else:
+ line = line.decode("ascii", "replace")
+ if filename:
+ filename = filename.decode(fs_enc, "replace")
+ evalue = eclass(msg, (filename, lineno, offset, line))
+ list.extend(traceback.format_exception_only(eclass, evalue))
+ return list
+ sclass = eclass.__name__
+ svalue = _exception_to_text(evalue)
+ if svalue:
+ list.append("%s: %s\n" % (sclass, svalue))
+ elif svalue is None:
+ # GZ 2010-05-24: Not a great fallback message, but keep for the moment
+ list.append("%s: <unprintable %s object>\n" % (sclass, sclass))
+ else:
+ list.append("%s\n" % sclass)
+ return list
diff --git a/lib/testtools/testtools/content.py b/lib/testtools/testtools/content.py
new file mode 100644
index 0000000000..86df09fc6e
--- /dev/null
+++ b/lib/testtools/testtools/content.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
+
+"""Content - a MIME-like Content object."""
+
+import codecs
+
+from testtools.compat import _b
+from testtools.content_type import ContentType, UTF8_TEXT
+from testtools.testresult import TestResult
+
+
+_join_b = _b("").join
+
+
+class Content(object):
+ """A MIME-like Content object.
+
+ Content objects can be serialised to bytes using the iter_bytes method.
+ If the Content-Type is recognised by other code, they are welcome to
+ look for richer contents that mere byte serialisation - for example in
+ memory object graphs etc. However, such code MUST be prepared to receive
+ a generic Content object that has been reconstructed from a byte stream.
+
+ :ivar content_type: The content type of this Content.
+ """
+
+ def __init__(self, content_type, get_bytes):
+ """Create a ContentType."""
+ if None in (content_type, get_bytes):
+ raise ValueError("None not permitted in %r, %r" % (
+ content_type, get_bytes))
+ self.content_type = content_type
+ self._get_bytes = get_bytes
+
+ def __eq__(self, other):
+ return (self.content_type == other.content_type and
+ _join_b(self.iter_bytes()) == _join_b(other.iter_bytes()))
+
+ def iter_bytes(self):
+ """Iterate over bytestrings of the serialised content."""
+ return self._get_bytes()
+
+ def iter_text(self):
+ """Iterate over the text of the serialised content.
+
+ This is only valid for text MIME types, and will use ISO-8859-1 if
+ no charset parameter is present in the MIME type. (This is somewhat
+ arbitrary, but consistent with RFC2617 3.7.1).
+
+ :raises ValueError: If the content type is not text/\*.
+ """
+ if self.content_type.type != "text":
+ raise ValueError("Not a text type %r" % self.content_type)
+ return self._iter_text()
+
+ def _iter_text(self):
+ """Worker for iter_text - does the decoding."""
+ encoding = self.content_type.parameters.get('charset', 'ISO-8859-1')
+ try:
+ # 2.5+
+ decoder = codecs.getincrementaldecoder(encoding)()
+ for bytes in self.iter_bytes():
+ yield decoder.decode(bytes)
+ final = decoder.decode(_b(''), True)
+ if final:
+ yield final
+ except AttributeError:
+ # < 2.5
+ bytes = ''.join(self.iter_bytes())
+ yield bytes.decode(encoding)
+
+ def __repr__(self):
+ return "<Content type=%r, value=%r>" % (
+ self.content_type, _join_b(self.iter_bytes()))
+
+
+class TracebackContent(Content):
+ """Content object for tracebacks.
+
+ This adapts an exc_info tuple to the Content interface.
+ text/x-traceback;language=python is used for the mime type, in order to
+ provide room for other languages to format their tracebacks differently.
+ """
+
+ def __init__(self, err, test):
+ """Create a TracebackContent for err."""
+ if err is None:
+ raise ValueError("err may not be None")
+ content_type = ContentType('text', 'x-traceback',
+ {"language": "python", "charset": "utf8"})
+ self._result = TestResult()
+ value = self._result._exc_info_to_unicode(err, test)
+ super(TracebackContent, self).__init__(
+ content_type, lambda: [value.encode("utf8")])
+
+
+def text_content(text):
+ """Create a `Content` object from some text.
+
+ This is useful for adding details which are short strings.
+ """
+ return Content(UTF8_TEXT, lambda: [text.encode('utf8')])
diff --git a/lib/testtools/testtools/content_type.py b/lib/testtools/testtools/content_type.py
new file mode 100644
index 0000000000..a936506e48
--- /dev/null
+++ b/lib/testtools/testtools/content_type.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
+
+"""ContentType - a MIME Content Type."""
+
+
+class ContentType(object):
+ """A content type from http://www.iana.org/assignments/media-types/
+
+ :ivar type: The primary type, e.g. "text" or "application"
+ :ivar subtype: The subtype, e.g. "plain" or "octet-stream"
+ :ivar parameters: A dict of additional parameters specific to the
+ content type.
+ """
+
+ def __init__(self, primary_type, sub_type, parameters=None):
+ """Create a ContentType."""
+ if None in (primary_type, sub_type):
+ raise ValueError("None not permitted in %r, %r" % (
+ primary_type, sub_type))
+ self.type = primary_type
+ self.subtype = sub_type
+ self.parameters = parameters or {}
+
+ def __eq__(self, other):
+ if type(other) != ContentType:
+ return False
+ return self.__dict__ == other.__dict__
+
+ def __repr__(self):
+ return "%s/%s params=%s" % (self.type, self.subtype, self.parameters)
+
+
+UTF8_TEXT = ContentType('text', 'plain', {'charset': 'utf8'})
diff --git a/lib/testtools/testtools/deferredruntest.py b/lib/testtools/testtools/deferredruntest.py
new file mode 100644
index 0000000000..50153bee4f
--- /dev/null
+++ b/lib/testtools/testtools/deferredruntest.py
@@ -0,0 +1,336 @@
+# Copyright (c) 2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Individual test case execution for tests that return Deferreds.
+
+This module is highly experimental and is liable to change in ways that cause
+subtle failures in tests. Use at your own peril.
+"""
+
+__all__ = [
+ 'assert_fails_with',
+ 'AsynchronousDeferredRunTest',
+ 'AsynchronousDeferredRunTestForBrokenTwisted',
+ 'SynchronousDeferredRunTest',
+ ]
+
+import sys
+
+from testtools import try_imports
+from testtools.content import (
+ Content,
+ text_content,
+ )
+from testtools.content_type import UTF8_TEXT
+from testtools.runtest import RunTest
+from testtools._spinner import (
+ extract_result,
+ NoResultError,
+ Spinner,
+ TimeoutError,
+ trap_unhandled_errors,
+ )
+
+from twisted.internet import defer
+from twisted.python import log
+from twisted.trial.unittest import _LogObserver
+
+StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
+
+
+class _DeferredRunTest(RunTest):
+ """Base for tests that return Deferreds."""
+
+ def _got_user_failure(self, failure, tb_label='traceback'):
+ """We got a failure from user code."""
+ return self._got_user_exception(
+ (failure.type, failure.value, failure.getTracebackObject()),
+ tb_label=tb_label)
+
+
+class SynchronousDeferredRunTest(_DeferredRunTest):
+ """Runner for tests that return synchronous Deferreds."""
+
+ def _run_user(self, function, *args):
+ d = defer.maybeDeferred(function, *args)
+ d.addErrback(self._got_user_failure)
+ result = extract_result(d)
+ return result
+
+
+def run_with_log_observers(observers, function, *args, **kwargs):
+ """Run 'function' with the given Twisted log observers."""
+ real_observers = log.theLogPublisher.observers
+ for observer in real_observers:
+ log.theLogPublisher.removeObserver(observer)
+ for observer in observers:
+ log.theLogPublisher.addObserver(observer)
+ try:
+ return function(*args, **kwargs)
+ finally:
+ for observer in observers:
+ log.theLogPublisher.removeObserver(observer)
+ for observer in real_observers:
+ log.theLogPublisher.addObserver(observer)
+
+
+# Observer of the Twisted log that we install during tests.
+_log_observer = _LogObserver()
+
+
+
+class AsynchronousDeferredRunTest(_DeferredRunTest):
+ """Runner for tests that return Deferreds that fire asynchronously.
+
+ That is, this test runner assumes that the Deferreds will only fire if the
+ reactor is left to spin for a while.
+
+ Do not rely too heavily on the nuances of the behaviour of this class.
+ What it does to the reactor is black magic, and if we can find nicer ways
+ of doing it we will gladly break backwards compatibility.
+
+ This is highly experimental code. Use at your own risk.
+ """
+
+ def __init__(self, case, handlers=None, reactor=None, timeout=0.005,
+ debug=False):
+ """Construct an `AsynchronousDeferredRunTest`.
+
+ :param case: The `testtools.TestCase` to run.
+ :param handlers: A list of exception handlers (ExceptionType, handler)
+ where 'handler' is a callable that takes a `TestCase`, a
+ `TestResult` and the exception raised.
+ :param reactor: The Twisted reactor to use. If not given, we use the
+ default reactor.
+ :param timeout: The maximum time allowed for running a test. The
+ default is 0.005s.
+ :param debug: Whether or not to enable Twisted's debugging. Use this
+ to get information about unhandled Deferreds and left-over
+ DelayedCalls. Defaults to False.
+ """
+ super(AsynchronousDeferredRunTest, self).__init__(case, handlers)
+ if reactor is None:
+ from twisted.internet import reactor
+ self._reactor = reactor
+ self._timeout = timeout
+ self._debug = debug
+
+ @classmethod
+ def make_factory(cls, reactor=None, timeout=0.005, debug=False):
+ """Make a factory that conforms to the RunTest factory interface."""
+ # This is horrible, but it means that the return value of the method
+ # will be able to be assigned to a class variable *and* also be
+ # invoked directly.
+ class AsynchronousDeferredRunTestFactory:
+ def __call__(self, case, handlers=None):
+ return cls(case, handlers, reactor, timeout, debug)
+ return AsynchronousDeferredRunTestFactory()
+
+ @defer.deferredGenerator
+ def _run_cleanups(self):
+ """Run the cleanups on the test case.
+
+ We expect that the cleanups on the test case can also return
+ asynchronous Deferreds. As such, we take the responsibility for
+ running the cleanups, rather than letting TestCase do it.
+ """
+ while self.case._cleanups:
+ f, args, kwargs = self.case._cleanups.pop()
+ d = defer.maybeDeferred(f, *args, **kwargs)
+ thing = defer.waitForDeferred(d)
+ yield thing
+ try:
+ thing.getResult()
+ except Exception:
+ exc_info = sys.exc_info()
+ self.case._report_traceback(exc_info)
+ last_exception = exc_info[1]
+ yield last_exception
+
+ def _make_spinner(self):
+ """Make the `Spinner` to be used to run the tests."""
+ return Spinner(self._reactor, debug=self._debug)
+
+ def _run_deferred(self):
+ """Run the test, assuming everything in it is Deferred-returning.
+
+ This should return a Deferred that fires with True if the test was
+ successful and False if the test was not successful. It should *not*
+ call addSuccess on the result, because there's reactor clean up that
+ we needs to be done afterwards.
+ """
+ fails = []
+
+ def fail_if_exception_caught(exception_caught):
+ if self.exception_caught == exception_caught:
+ fails.append(None)
+
+ def clean_up(ignored=None):
+ """Run the cleanups."""
+ d = self._run_cleanups()
+ def clean_up_done(result):
+ if result is not None:
+ self._exceptions.append(result)
+ fails.append(None)
+ return d.addCallback(clean_up_done)
+
+ def set_up_done(exception_caught):
+ """Set up is done, either clean up or run the test."""
+ if self.exception_caught == exception_caught:
+ fails.append(None)
+ return clean_up()
+ else:
+ d = self._run_user(self.case._run_test_method, self.result)
+ d.addCallback(fail_if_exception_caught)
+ d.addBoth(tear_down)
+ return d
+
+ def tear_down(ignored):
+ d = self._run_user(self.case._run_teardown, self.result)
+ d.addCallback(fail_if_exception_caught)
+ d.addBoth(clean_up)
+ return d
+
+ d = self._run_user(self.case._run_setup, self.result)
+ d.addCallback(set_up_done)
+ d.addBoth(lambda ignored: len(fails) == 0)
+ return d
+
+ def _log_user_exception(self, e):
+ """Raise 'e' and report it as a user exception."""
+ try:
+ raise e
+ except e.__class__:
+ self._got_user_exception(sys.exc_info())
+
+ def _blocking_run_deferred(self, spinner):
+ try:
+ return trap_unhandled_errors(
+ spinner.run, self._timeout, self._run_deferred)
+ except NoResultError:
+ # We didn't get a result at all! This could be for any number of
+ # reasons, but most likely someone hit Ctrl-C during the test.
+ raise KeyboardInterrupt
+ except TimeoutError:
+ # The function took too long to run.
+ self._log_user_exception(TimeoutError(self.case, self._timeout))
+ return False, []
+
+ def _run_core(self):
+ # Add an observer to trap all logged errors.
+ error_observer = _log_observer
+ full_log = StringIO()
+ full_observer = log.FileLogObserver(full_log)
+ spinner = self._make_spinner()
+ successful, unhandled = run_with_log_observers(
+ [error_observer.gotEvent, full_observer.emit],
+ self._blocking_run_deferred, spinner)
+
+ self.case.addDetail(
+ 'twisted-log', Content(UTF8_TEXT, full_log.readlines))
+
+ logged_errors = error_observer.flushErrors()
+ for logged_error in logged_errors:
+ successful = False
+ self._got_user_failure(logged_error, tb_label='logged-error')
+
+ if unhandled:
+ successful = False
+ for debug_info in unhandled:
+ f = debug_info.failResult
+ info = debug_info._getDebugTracebacks()
+ if info:
+ self.case.addDetail(
+ 'unhandled-error-in-deferred-debug',
+ text_content(info))
+ self._got_user_failure(f, 'unhandled-error-in-deferred')
+
+ junk = spinner.clear_junk()
+ if junk:
+ successful = False
+ self._log_user_exception(UncleanReactorError(junk))
+
+ if successful:
+ self.result.addSuccess(self.case, details=self.case.getDetails())
+
+ def _run_user(self, function, *args):
+ """Run a user-supplied function.
+
+ This just makes sure that it returns a Deferred, regardless of how the
+ user wrote it.
+ """
+ d = defer.maybeDeferred(function, *args)
+ return d.addErrback(self._got_user_failure)
+
+
+class AsynchronousDeferredRunTestForBrokenTwisted(AsynchronousDeferredRunTest):
+ """Test runner that works around Twisted brokenness re reactor junk.
+
+ There are many APIs within Twisted itself where a Deferred fires but
+ leaves cleanup work scheduled for the reactor to do. Arguably, many of
+ these are bugs. This runner iterates the reactor event loop a number of
+ times after every test, in order to shake out these buggy-but-commonplace
+ events.
+ """
+
+ def _make_spinner(self):
+ spinner = super(
+ AsynchronousDeferredRunTestForBrokenTwisted, self)._make_spinner()
+ spinner._OBLIGATORY_REACTOR_ITERATIONS = 2
+ return spinner
+
+
+def assert_fails_with(d, *exc_types, **kwargs):
+ """Assert that 'd' will fail with one of 'exc_types'.
+
+ The normal way to use this is to return the result of 'assert_fails_with'
+ from your unit test.
+
+ Note that this function is experimental and unstable. Use at your own
+ peril; expect the API to change.
+
+ :param d: A Deferred that is expected to fail.
+ :param *exc_types: The exception types that the Deferred is expected to
+ fail with.
+ :param failureException: An optional keyword argument. If provided, will
+ raise that exception instead of `testtools.TestCase.failureException`.
+ :return: A Deferred that will fail with an `AssertionError` if 'd' does
+ not fail with one of the exception types.
+ """
+ failureException = kwargs.pop('failureException', None)
+ if failureException is None:
+ # Avoid circular imports.
+ from testtools import TestCase
+ failureException = TestCase.failureException
+ expected_names = ", ".join(exc_type.__name__ for exc_type in exc_types)
+ def got_success(result):
+ raise failureException(
+ "%s not raised (%r returned)" % (expected_names, result))
+ def got_failure(failure):
+ if failure.check(*exc_types):
+ return failure.value
+ raise failureException("%s raised instead of %s:\n %s" % (
+ failure.type.__name__, expected_names, failure.getTraceback()))
+ return d.addCallbacks(got_success, got_failure)
+
+
+def flush_logged_errors(*error_types):
+ return _log_observer.flushErrors(*error_types)
+
+
+class UncleanReactorError(Exception):
+ """Raised when the reactor has junk in it."""
+
+ def __init__(self, junk):
+ Exception.__init__(self,
+ "The reactor still thinks it needs to do things. Close all "
+ "connections, kill all processes and make sure all delayed "
+ "calls have either fired or been cancelled:\n%s"
+ % ''.join(map(self._get_junk_info, junk)))
+
+ def _get_junk_info(self, junk):
+ from twisted.internet.base import DelayedCall
+ if isinstance(junk, DelayedCall):
+ ret = str(junk)
+ else:
+ ret = repr(junk)
+ return ' %s\n' % (ret,)
diff --git a/lib/testtools/testtools/helpers.py b/lib/testtools/testtools/helpers.py
new file mode 100644
index 0000000000..0f489c73f6
--- /dev/null
+++ b/lib/testtools/testtools/helpers.py
@@ -0,0 +1,64 @@
+# Copyright (c) 2010 Jonathan M. Lange. See LICENSE for details.
+
+__all__ = [
+ 'try_import',
+ 'try_imports',
+ ]
+
+
+def try_import(name, alternative=None):
+ """Attempt to import `name`. If it fails, return `alternative`.
+
+ When supporting multiple versions of Python or optional dependencies, it
+ is useful to be able to try to import a module.
+
+ :param name: The name of the object to import, e.g. 'os.path' or
+ 'os.path.join'.
+ :param alternative: The value to return if no module can be imported.
+ Defaults to None.
+ """
+ module_segments = name.split('.')
+ while module_segments:
+ module_name = '.'.join(module_segments)
+ try:
+ module = __import__(module_name)
+ except ImportError:
+ module_segments.pop()
+ continue
+ else:
+ break
+ else:
+ return alternative
+ nonexistent = object()
+ for segment in name.split('.')[1:]:
+ module = getattr(module, segment, nonexistent)
+ if module is nonexistent:
+ return alternative
+ return module
+
+
+_RAISE_EXCEPTION = object()
+def try_imports(module_names, alternative=_RAISE_EXCEPTION):
+ """Attempt to import modules.
+
+ Tries to import the first module in `module_names`. If it can be
+ imported, we return it. If not, we go on to the second module and try
+ that. The process continues until we run out of modules to try. If none
+ of the modules can be imported, either raise an exception or return the
+ provided `alternative` value.
+
+ :param module_names: A sequence of module names to try to import.
+ :param alternative: The value to return if no module can be imported.
+ If unspecified, we raise an ImportError.
+ :raises ImportError: If none of the modules can be imported and no
+ alternative value was specified.
+ """
+ module_names = list(module_names)
+ for module_name in module_names:
+ module = try_import(module_name)
+ if module:
+ return module
+ if alternative is _RAISE_EXCEPTION:
+ raise ImportError(
+ "Could not import any of: %s" % ', '.join(module_names))
+ return alternative
diff --git a/lib/testtools/testtools/matchers.py b/lib/testtools/testtools/matchers.py
new file mode 100644
index 0000000000..06b348c6d9
--- /dev/null
+++ b/lib/testtools/testtools/matchers.py
@@ -0,0 +1,530 @@
+# Copyright (c) 2009-2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Matchers, a way to express complex assertions outside the testcase.
+
+Inspired by 'hamcrest'.
+
+Matcher provides the abstract API that all matchers need to implement.
+
+Bundled matchers are listed in __all__: a list can be obtained by running
+$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
+"""
+
+__metaclass__ = type
+__all__ = [
+ 'Annotate',
+ 'DocTestMatches',
+ 'Equals',
+ 'Is',
+ 'LessThan',
+ 'MatchesAll',
+ 'MatchesAny',
+ 'MatchesException',
+ 'NotEquals',
+ 'Not',
+ 'Raises',
+ 'raises',
+ 'StartsWith',
+ ]
+
+import doctest
+import operator
+from pprint import pformat
+import sys
+
+from testtools.compat import classtypes, _error_repr, isbaseexception
+
+
+class Matcher(object):
+ """A pattern matcher.
+
+ A Matcher must implement match and __str__ to be used by
+ testtools.TestCase.assertThat. Matcher.match(thing) returns None when
+ thing is completely matched, and a Mismatch object otherwise.
+
+ Matchers can be useful outside of test cases, as they are simply a
+ pattern matching language expressed as objects.
+
+ testtools.matchers is inspired by hamcrest, but is pythonic rather than
+ a Java transcription.
+ """
+
+ def match(self, something):
+ """Return None if this matcher matches something, a Mismatch otherwise.
+ """
+ raise NotImplementedError(self.match)
+
+ def __str__(self):
+ """Get a sensible human representation of the matcher.
+
+ This should include the parameters given to the matcher and any
+ state that would affect the matches operation.
+ """
+ raise NotImplementedError(self.__str__)
+
+
+class Mismatch(object):
+ """An object describing a mismatch detected by a Matcher."""
+
+ def __init__(self, description=None, details=None):
+ """Construct a `Mismatch`.
+
+ :param description: A description to use. If not provided,
+ `Mismatch.describe` must be implemented.
+ :param details: Extra details about the mismatch. Defaults
+ to the empty dict.
+ """
+ if description:
+ self._description = description
+ if details is None:
+ details = {}
+ self._details = details
+
+ def describe(self):
+ """Describe the mismatch.
+
+ This should be either a human-readable string or castable to a string.
+ """
+ try:
+ return self._description
+ except AttributeError:
+ raise NotImplementedError(self.describe)
+
+ def get_details(self):
+ """Get extra details about the mismatch.
+
+ This allows the mismatch to provide extra information beyond the basic
+ description, including large text or binary files, or debugging internals
+ without having to force it to fit in the output of 'describe'.
+
+ The testtools assertion assertThat will query get_details and attach
+ all its values to the test, permitting them to be reported in whatever
+ manner the test environment chooses.
+
+ :return: a dict mapping names to Content objects. name is a string to
+ name the detail, and the Content object is the detail to add
+ to the result. For more information see the API to which items from
+ this dict are passed testtools.TestCase.addDetail.
+ """
+ return getattr(self, '_details', {})
+
+ def __repr__(self):
+ return "<testtools.matchers.Mismatch object at %x attributes=%r>" % (
+ id(self), self.__dict__)
+
+
+class DocTestMatches(object):
+ """See if a string matches a doctest example."""
+
+ def __init__(self, example, flags=0):
+ """Create a DocTestMatches to match example.
+
+ :param example: The example to match e.g. 'foo bar baz'
+ :param flags: doctest comparison flags to match on. e.g.
+ doctest.ELLIPSIS.
+ """
+ if not example.endswith('\n'):
+ example += '\n'
+ self.want = example # required variable name by doctest.
+ self.flags = flags
+ self._checker = doctest.OutputChecker()
+
+ def __str__(self):
+ if self.flags:
+ flagstr = ", flags=%d" % self.flags
+ else:
+ flagstr = ""
+ return 'DocTestMatches(%r%s)' % (self.want, flagstr)
+
+ def _with_nl(self, actual):
+ result = str(actual)
+ if not result.endswith('\n'):
+ result += '\n'
+ return result
+
+ def match(self, actual):
+ with_nl = self._with_nl(actual)
+ if self._checker.check_output(self.want, with_nl, self.flags):
+ return None
+ return DocTestMismatch(self, with_nl)
+
+ def _describe_difference(self, with_nl):
+ return self._checker.output_difference(self, with_nl, self.flags)
+
+
+class DocTestMismatch(Mismatch):
+ """Mismatch object for DocTestMatches."""
+
+ def __init__(self, matcher, with_nl):
+ self.matcher = matcher
+ self.with_nl = with_nl
+
+ def describe(self):
+ return self.matcher._describe_difference(self.with_nl)
+
+
+class DoesNotStartWith(Mismatch):
+
+ def __init__(self, matchee, expected):
+ """Create a DoesNotStartWith Mismatch.
+
+ :param matchee: the string that did not match.
+ :param expected: the string that `matchee` was expected to start
+ with.
+ """
+ self.matchee = matchee
+ self.expected = expected
+
+ def describe(self):
+ return "'%s' does not start with '%s'." % (
+ self.matchee, self.expected)
+
+
+class DoesNotEndWith(Mismatch):
+
+ def __init__(self, matchee, expected):
+ """Create a DoesNotEndWith Mismatch.
+
+ :param matchee: the string that did not match.
+ :param expected: the string that `matchee` was expected to end with.
+ """
+ self.matchee = matchee
+ self.expected = expected
+
+ def describe(self):
+ return "'%s' does not end with '%s'." % (
+ self.matchee, self.expected)
+
+
+class _BinaryComparison(object):
+ """Matcher that compares an object to another object."""
+
+ def __init__(self, expected):
+ self.expected = expected
+
+ def __str__(self):
+ return "%s(%r)" % (self.__class__.__name__, self.expected)
+
+ def match(self, other):
+ if self.comparator(other, self.expected):
+ return None
+ return _BinaryMismatch(self.expected, self.mismatch_string, other)
+
+ def comparator(self, expected, other):
+ raise NotImplementedError(self.comparator)
+
+
+class _BinaryMismatch(Mismatch):
+ """Two things did not match."""
+
+ def __init__(self, expected, mismatch_string, other):
+ self.expected = expected
+ self._mismatch_string = mismatch_string
+ self.other = other
+
+ def describe(self):
+ left = repr(self.expected)
+ right = repr(self.other)
+ if len(left) + len(right) > 70:
+ return "%s:\nreference = %s\nactual = %s\n" % (
+ self._mismatch_string, pformat(self.expected),
+ pformat(self.other))
+ else:
+ return "%s %s %s" % (left, self._mismatch_string,right)
+
+
+class Equals(_BinaryComparison):
+ """Matches if the items are equal."""
+
+ comparator = operator.eq
+ mismatch_string = '!='
+
+
+class NotEquals(_BinaryComparison):
+ """Matches if the items are not equal.
+
+ In most cases, this is equivalent to `Not(Equals(foo))`. The difference
+ only matters when testing `__ne__` implementations.
+ """
+
+ comparator = operator.ne
+ mismatch_string = '=='
+
+
+class Is(_BinaryComparison):
+ """Matches if the items are identical."""
+
+ comparator = operator.is_
+ mismatch_string = 'is not'
+
+
+class LessThan(_BinaryComparison):
+ """Matches if the item is less than the matchers reference object."""
+
+ comparator = operator.__lt__
+ mismatch_string = 'is >='
+
+
+class MatchesAny(object):
+ """Matches if any of the matchers it is created with match."""
+
+ def __init__(self, *matchers):
+ self.matchers = matchers
+
+ def match(self, matchee):
+ results = []
+ for matcher in self.matchers:
+ mismatch = matcher.match(matchee)
+ if mismatch is None:
+ return None
+ results.append(mismatch)
+ return MismatchesAll(results)
+
+ def __str__(self):
+ return "MatchesAny(%s)" % ', '.join([
+ str(matcher) for matcher in self.matchers])
+
+
+class MatchesAll(object):
+ """Matches if all of the matchers it is created with match."""
+
+ def __init__(self, *matchers):
+ self.matchers = matchers
+
+ def __str__(self):
+ return 'MatchesAll(%s)' % ', '.join(map(str, self.matchers))
+
+ def match(self, matchee):
+ results = []
+ for matcher in self.matchers:
+ mismatch = matcher.match(matchee)
+ if mismatch is not None:
+ results.append(mismatch)
+ if results:
+ return MismatchesAll(results)
+ else:
+ return None
+
+
+class MismatchesAll(Mismatch):
+ """A mismatch with many child mismatches."""
+
+ def __init__(self, mismatches):
+ self.mismatches = mismatches
+
+ def describe(self):
+ descriptions = ["Differences: ["]
+ for mismatch in self.mismatches:
+ descriptions.append(mismatch.describe())
+ descriptions.append("]")
+ return '\n'.join(descriptions)
+
+
+class Not(object):
+ """Inverts a matcher."""
+
+ def __init__(self, matcher):
+ self.matcher = matcher
+
+ def __str__(self):
+ return 'Not(%s)' % (self.matcher,)
+
+ def match(self, other):
+ mismatch = self.matcher.match(other)
+ if mismatch is None:
+ return MatchedUnexpectedly(self.matcher, other)
+ else:
+ return None
+
+
+class MatchedUnexpectedly(Mismatch):
+ """A thing matched when it wasn't supposed to."""
+
+ def __init__(self, matcher, other):
+ self.matcher = matcher
+ self.other = other
+
+ def describe(self):
+ return "%r matches %s" % (self.other, self.matcher)
+
+
+class MatchesException(Matcher):
+ """Match an exc_info tuple against an exception instance or type."""
+
+ def __init__(self, exception):
+ """Create a MatchesException that will match exc_info's for exception.
+
+ :param exception: Either an exception instance or type.
+ If an instance is given, the type and arguments of the exception
+ are checked. If a type is given only the type of the exception is
+ checked.
+ """
+ Matcher.__init__(self)
+ self.expected = exception
+ self._is_instance = type(self.expected) not in classtypes()
+
+ def match(self, other):
+ if type(other) != tuple:
+ return Mismatch('%r is not an exc_info tuple' % other)
+ expected_class = self.expected
+ if self._is_instance:
+ expected_class = expected_class.__class__
+ if not issubclass(other[0], expected_class):
+ return Mismatch('%r is not a %r' % (other[0], expected_class))
+ if self._is_instance and other[1].args != self.expected.args:
+ return Mismatch('%s has different arguments to %s.' % (
+ _error_repr(other[1]), _error_repr(self.expected)))
+
+ def __str__(self):
+ if self._is_instance:
+ return "MatchesException(%s)" % _error_repr(self.expected)
+ return "MatchesException(%s)" % repr(self.expected)
+
+
+class StartsWith(Matcher):
+ """Checks whether one string starts with another."""
+
+ def __init__(self, expected):
+ """Create a StartsWith Matcher.
+
+ :param expected: the string that matchees should start with.
+ """
+ self.expected = expected
+
+ def __str__(self):
+ return "Starts with '%s'." % self.expected
+
+ def match(self, matchee):
+ if not matchee.startswith(self.expected):
+ return DoesNotStartWith(matchee, self.expected)
+ return None
+
+
+class EndsWith(Matcher):
+ """Checks whether one string starts with another."""
+
+ def __init__(self, expected):
+ """Create a EndsWith Matcher.
+
+ :param expected: the string that matchees should end with.
+ """
+ self.expected = expected
+
+ def __str__(self):
+ return "Ends with '%s'." % self.expected
+
+ def match(self, matchee):
+ if not matchee.endswith(self.expected):
+ return DoesNotEndWith(matchee, self.expected)
+ return None
+
+
+class KeysEqual(Matcher):
+ """Checks whether a dict has particular keys."""
+
+ def __init__(self, *expected):
+ """Create a `KeysEqual` Matcher.
+
+ :param *expected: The keys the dict is expected to have. If a dict,
+ then we use the keys of that dict, if a collection, we assume it
+ is a collection of expected keys.
+ """
+ try:
+ self.expected = expected.keys()
+ except AttributeError:
+ self.expected = list(expected)
+
+ def __str__(self):
+ return "KeysEqual(%s)" % ', '.join(map(repr, self.expected))
+
+ def match(self, matchee):
+ expected = sorted(self.expected)
+ matched = Equals(expected).match(sorted(matchee.keys()))
+ if matched:
+ return AnnotatedMismatch(
+ 'Keys not equal',
+ _BinaryMismatch(expected, 'does not match', matchee))
+ return None
+
+
+class Annotate(object):
+ """Annotates a matcher with a descriptive string.
+
+ Mismatches are then described as '<mismatch>: <annotation>'.
+ """
+
+ def __init__(self, annotation, matcher):
+ self.annotation = annotation
+ self.matcher = matcher
+
+ def __str__(self):
+ return 'Annotate(%r, %s)' % (self.annotation, self.matcher)
+
+ def match(self, other):
+ mismatch = self.matcher.match(other)
+ if mismatch is not None:
+ return AnnotatedMismatch(self.annotation, mismatch)
+
+
+class AnnotatedMismatch(Mismatch):
+ """A mismatch annotated with a descriptive string."""
+
+ def __init__(self, annotation, mismatch):
+ self.annotation = annotation
+ self.mismatch = mismatch
+
+ def describe(self):
+ return '%s: %s' % (self.mismatch.describe(), self.annotation)
+
+
+class Raises(Matcher):
+ """Match if the matchee raises an exception when called.
+
+ Exceptions which are not subclasses of Exception propogate out of the
+ Raises.match call unless they are explicitly matched.
+ """
+
+ def __init__(self, exception_matcher=None):
+ """Create a Raises matcher.
+
+ :param exception_matcher: Optional validator for the exception raised
+ by matchee. If supplied the exc_info tuple for the exception raised
+ is passed into that matcher. If no exception_matcher is supplied
+ then the simple fact of raising an exception is considered enough
+ to match on.
+ """
+ self.exception_matcher = exception_matcher
+
+ def match(self, matchee):
+ try:
+ result = matchee()
+ return Mismatch('%r returned %r' % (matchee, result))
+ # Catch all exceptions: Raises() should be able to match a
+ # KeyboardInterrupt or SystemExit.
+ except:
+ if self.exception_matcher:
+ mismatch = self.exception_matcher.match(sys.exc_info())
+ if not mismatch:
+ return
+ else:
+ mismatch = None
+ # The exception did not match, or no explicit matching logic was
+ # performed. If the exception is a non-user exception (that is, not
+ # a subclass of Exception on Python 2.5+) then propogate it.
+ if isbaseexception(sys.exc_info()[1]):
+ raise
+ return mismatch
+
+ def __str__(self):
+ return 'Raises()'
+
+
+def raises(exception):
+ """Make a matcher that checks that a callable raises an exception.
+
+ This is a convenience function, exactly equivalent to::
+ return Raises(MatchesException(exception))
+
+ See `Raises` and `MatchesException` for more information.
+ """
+ return Raises(MatchesException(exception))
diff --git a/lib/testtools/testtools/monkey.py b/lib/testtools/testtools/monkey.py
new file mode 100644
index 0000000000..bb24764cb7
--- /dev/null
+++ b/lib/testtools/testtools/monkey.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Helpers for monkey-patching Python code."""
+
+__all__ = [
+ 'MonkeyPatcher',
+ 'patch',
+ ]
+
+
+class MonkeyPatcher(object):
+ """A set of monkey-patches that can be applied and removed all together.
+
+ Use this to cover up attributes with new objects. Particularly useful for
+ testing difficult code.
+ """
+
+ # Marker used to indicate that the patched attribute did not exist on the
+ # object before we patched it.
+ _NO_SUCH_ATTRIBUTE = object()
+
+ def __init__(self, *patches):
+ """Construct a `MonkeyPatcher`.
+
+ :param *patches: The patches to apply, each should be (obj, name,
+ new_value). Providing patches here is equivalent to calling
+ `add_patch`.
+ """
+ # List of patches to apply in (obj, name, value).
+ self._patches_to_apply = []
+ # List of the original values for things that have been patched.
+ # (obj, name, value) format.
+ self._originals = []
+ for patch in patches:
+ self.add_patch(*patch)
+
+ def add_patch(self, obj, name, value):
+ """Add a patch to overwrite 'name' on 'obj' with 'value'.
+
+ The attribute C{name} on C{obj} will be assigned to C{value} when
+ C{patch} is called or during C{run_with_patches}.
+
+ You can restore the original values with a call to restore().
+ """
+ self._patches_to_apply.append((obj, name, value))
+
+ def patch(self):
+ """Apply all of the patches that have been specified with `add_patch`.
+
+ Reverse this operation using L{restore}.
+ """
+ for obj, name, value in self._patches_to_apply:
+ original_value = getattr(obj, name, self._NO_SUCH_ATTRIBUTE)
+ self._originals.append((obj, name, original_value))
+ setattr(obj, name, value)
+
+ def restore(self):
+ """Restore all original values to any patched objects.
+
+ If the patched attribute did not exist on an object before it was
+ patched, `restore` will delete the attribute so as to return the
+ object to its original state.
+ """
+ while self._originals:
+ obj, name, value = self._originals.pop()
+ if value is self._NO_SUCH_ATTRIBUTE:
+ delattr(obj, name)
+ else:
+ setattr(obj, name, value)
+
+ def run_with_patches(self, f, *args, **kw):
+ """Run 'f' with the given args and kwargs with all patches applied.
+
+ Restores all objects to their original state when finished.
+ """
+ self.patch()
+ try:
+ return f(*args, **kw)
+ finally:
+ self.restore()
+
+
+def patch(obj, attribute, value):
+ """Set 'obj.attribute' to 'value' and return a callable to restore 'obj'.
+
+ If 'attribute' is not set on 'obj' already, then the returned callable
+ will delete the attribute when called.
+
+ :param obj: An object to monkey-patch.
+ :param attribute: The name of the attribute to patch.
+ :param value: The value to set 'obj.attribute' to.
+ :return: A nullary callable that, when run, will restore 'obj' to its
+ original state.
+ """
+ patcher = MonkeyPatcher((obj, attribute, value))
+ patcher.patch()
+ return patcher.restore
diff --git a/lib/testtools/testtools/run.py b/lib/testtools/testtools/run.py
new file mode 100755
index 0000000000..272992cd05
--- /dev/null
+++ b/lib/testtools/testtools/run.py
@@ -0,0 +1,332 @@
+# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
+
+"""python -m testtools.run testspec [testspec...]
+
+Run some tests with the testtools extended API.
+
+For instance, to run the testtools test suite.
+ $ python -m testtools.run testtools.tests.test_suite
+"""
+
+import os
+import unittest
+import sys
+
+from testtools import TextTestResult
+from testtools.compat import classtypes, istext, unicode_output_stream
+from testtools.testsuite import iterate_tests
+
+
+defaultTestLoader = unittest.defaultTestLoader
+defaultTestLoaderCls = unittest.TestLoader
+
+if getattr(defaultTestLoader, 'discover', None) is None:
+ try:
+ import discover
+ defaultTestLoader = discover.DiscoveringTestLoader()
+ defaultTestLoaderCls = discover.DiscoveringTestLoader
+ have_discover = True
+ except ImportError:
+ have_discover = False
+else:
+ have_discover = True
+
+
+class TestToolsTestRunner(object):
+ """ A thunk object to support unittest.TestProgram."""
+
+ def __init__(self, stdout):
+ self.stdout = stdout
+
+ def run(self, test):
+ "Run the given test case or test suite."
+ result = TextTestResult(unicode_output_stream(self.stdout))
+ result.startTestRun()
+ try:
+ return test.run(result)
+ finally:
+ result.stopTestRun()
+
+
+####################
+# Taken from python 2.7 and slightly modified for compatibility with
+# older versions. Delete when 2.7 is the oldest supported version.
+# Modifications:
+# - Use have_discover to raise an error if the user tries to use
+# discovery on an old version and doesn't have discover installed.
+# - If --catch is given check that installHandler is available, as
+# it won't be on old python versions.
+# - print calls have been been made single-source python3 compatibile.
+# - exception handling likewise.
+# - The default help has been changed to USAGE_AS_MAIN and USAGE_FROM_MODULE
+# removed.
+# - A tweak has been added to detect 'python -m *.run' and use a
+# better progName in that case.
+# - self.module is more comprehensively set to None when being invoked from
+# the commandline - __name__ is used as a sentinel value.
+# - --list has been added which can list tests (should be upstreamed).
+# - --load-list has been added which can reduce the tests used (should be
+# upstreamed).
+# - The limitation of using getopt is declared to the user.
+
+FAILFAST = " -f, --failfast Stop on first failure\n"
+CATCHBREAK = " -c, --catch Catch control-C and display results\n"
+BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
+
+USAGE_AS_MAIN = """\
+Usage: %(progName)s [options] [tests]
+
+Options:
+ -h, --help Show this message
+ -v, --verbose Verbose output
+ -q, --quiet Minimal output
+ -l, --list List tests rather than executing them.
+ --load-list Specifies a file containing test ids, only tests matching
+ those ids are executed.
+%(failfast)s%(catchbreak)s%(buffer)s
+Examples:
+ %(progName)s test_module - run tests from test_module
+ %(progName)s module.TestClass - run tests from module.TestClass
+ %(progName)s module.Class.test_method - run specified test method
+
+All options must come before [tests]. [tests] can be a list of any number of
+test modules, classes and test methods.
+
+Alternative Usage: %(progName)s discover [options]
+
+Options:
+ -v, --verbose Verbose output
+%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
+ -p pattern Pattern to match test files ('test*.py' default)
+ -t directory Top level directory of project (default to
+ start directory)
+ -l, --list List tests rather than executing them.
+ --load-list Specifies a file containing test ids, only tests matching
+ those ids are executed.
+
+For test discovery all test modules must be importable from the top
+level directory of the project.
+"""
+
+
+class TestProgram(object):
+ """A command-line program that runs a set of tests; this is primarily
+ for making test modules conveniently executable.
+ """
+ USAGE = USAGE_AS_MAIN
+
+ # defaults for testing
+ failfast = catchbreak = buffer = progName = None
+
+ def __init__(self, module=__name__, defaultTest=None, argv=None,
+ testRunner=None, testLoader=defaultTestLoader,
+ exit=True, verbosity=1, failfast=None, catchbreak=None,
+ buffer=None, stdout=None):
+ if module == __name__:
+ self.module = None
+ elif istext(module):
+ self.module = __import__(module)
+ for part in module.split('.')[1:]:
+ self.module = getattr(self.module, part)
+ else:
+ self.module = module
+ if argv is None:
+ argv = sys.argv
+ if stdout is None:
+ stdout = sys.stdout
+
+ self.exit = exit
+ self.failfast = failfast
+ self.catchbreak = catchbreak
+ self.verbosity = verbosity
+ self.buffer = buffer
+ self.defaultTest = defaultTest
+ self.listtests = False
+ self.load_list = None
+ self.testRunner = testRunner
+ self.testLoader = testLoader
+ progName = argv[0]
+ if progName.endswith('%srun.py' % os.path.sep):
+ elements = progName.split(os.path.sep)
+ progName = '%s.run' % elements[-2]
+ else:
+ progName = os.path.basename(argv[0])
+ self.progName = progName
+ self.parseArgs(argv)
+ if self.load_list:
+ # TODO: preserve existing suites (like testresources does in
+ # OptimisingTestSuite.add, but with a standard protocol).
+ # This is needed because the load_tests hook allows arbitrary
+ # suites, even if that is rarely used.
+ source = file(self.load_list, 'rb')
+ try:
+ lines = source.readlines()
+ finally:
+ source.close()
+ test_ids = set(line.strip() for line in lines)
+ filtered = unittest.TestSuite()
+ for test in iterate_tests(self.test):
+ if test.id() in test_ids:
+ filtered.addTest(test)
+ self.test = filtered
+ if not self.listtests:
+ self.runTests()
+ else:
+ for test in iterate_tests(self.test):
+ stdout.write('%s\n' % test.id())
+
+ def usageExit(self, msg=None):
+ if msg:
+ print(msg)
+ usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
+ 'buffer': ''}
+ if self.failfast != False:
+ usage['failfast'] = FAILFAST
+ if self.catchbreak != False:
+ usage['catchbreak'] = CATCHBREAK
+ if self.buffer != False:
+ usage['buffer'] = BUFFEROUTPUT
+ print(self.USAGE % usage)
+ sys.exit(2)
+
+ def parseArgs(self, argv):
+ if len(argv) > 1 and argv[1].lower() == 'discover':
+ self._do_discovery(argv[2:])
+ return
+
+ import getopt
+ long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer',
+ 'list', 'load-list=']
+ try:
+ options, args = getopt.getopt(argv[1:], 'hHvqfcbl', long_opts)
+ for opt, value in options:
+ if opt in ('-h','-H','--help'):
+ self.usageExit()
+ if opt in ('-q','--quiet'):
+ self.verbosity = 0
+ if opt in ('-v','--verbose'):
+ self.verbosity = 2
+ if opt in ('-f','--failfast'):
+ if self.failfast is None:
+ self.failfast = True
+ # Should this raise an exception if -f is not valid?
+ if opt in ('-c','--catch'):
+ if self.catchbreak is None:
+ self.catchbreak = True
+ # Should this raise an exception if -c is not valid?
+ if opt in ('-b','--buffer'):
+ if self.buffer is None:
+ self.buffer = True
+ # Should this raise an exception if -b is not valid?
+ if opt in ('-l', '--list'):
+ self.listtests = True
+ if opt == '--load-list':
+ self.load_list = value
+ if len(args) == 0 and self.defaultTest is None:
+ # createTests will load tests from self.module
+ self.testNames = None
+ elif len(args) > 0:
+ self.testNames = args
+ else:
+ self.testNames = (self.defaultTest,)
+ self.createTests()
+ except getopt.error:
+ self.usageExit(sys.exc_info()[1])
+
+ def createTests(self):
+ if self.testNames is None:
+ self.test = self.testLoader.loadTestsFromModule(self.module)
+ else:
+ self.test = self.testLoader.loadTestsFromNames(self.testNames,
+ self.module)
+
+ def _do_discovery(self, argv, Loader=defaultTestLoaderCls):
+ # handle command line args for test discovery
+ if not have_discover:
+ raise AssertionError("Unable to use discovery, must use python 2.7 "
+ "or greater, or install the discover package.")
+ self.progName = '%s discover' % self.progName
+ import optparse
+ parser = optparse.OptionParser()
+ parser.prog = self.progName
+ parser.add_option('-v', '--verbose', dest='verbose', default=False,
+ help='Verbose output', action='store_true')
+ if self.failfast != False:
+ parser.add_option('-f', '--failfast', dest='failfast', default=False,
+ help='Stop on first fail or error',
+ action='store_true')
+ if self.catchbreak != False:
+ parser.add_option('-c', '--catch', dest='catchbreak', default=False,
+ help='Catch ctrl-C and display results so far',
+ action='store_true')
+ if self.buffer != False:
+ parser.add_option('-b', '--buffer', dest='buffer', default=False,
+ help='Buffer stdout and stderr during tests',
+ action='store_true')
+ parser.add_option('-s', '--start-directory', dest='start', default='.',
+ help="Directory to start discovery ('.' default)")
+ parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
+ help="Pattern to match tests ('test*.py' default)")
+ parser.add_option('-t', '--top-level-directory', dest='top', default=None,
+ help='Top level directory of project (defaults to start directory)')
+ parser.add_option('-l', '--list', dest='listtests', default=False,
+ help='List tests rather than running them.')
+ parser.add_option('--load-list', dest='load_list', default=None,
+ help='Specify a filename containing the test ids to use.')
+
+ options, args = parser.parse_args(argv)
+ if len(args) > 3:
+ self.usageExit()
+
+ for name, value in zip(('start', 'pattern', 'top'), args):
+ setattr(options, name, value)
+
+ # only set options from the parsing here
+ # if they weren't set explicitly in the constructor
+ if self.failfast is None:
+ self.failfast = options.failfast
+ if self.catchbreak is None:
+ self.catchbreak = options.catchbreak
+ if self.buffer is None:
+ self.buffer = options.buffer
+ self.listtests = options.listtests
+ self.load_list = options.load_list
+
+ if options.verbose:
+ self.verbosity = 2
+
+ start_dir = options.start
+ pattern = options.pattern
+ top_level_dir = options.top
+
+ loader = Loader()
+ self.test = loader.discover(start_dir, pattern, top_level_dir)
+
+ def runTests(self):
+ if (self.catchbreak
+ and getattr(unittest, 'installHandler', None) is not None):
+ unittest.installHandler()
+ if self.testRunner is None:
+ self.testRunner = runner.TextTestRunner
+ if isinstance(self.testRunner, classtypes()):
+ try:
+ testRunner = self.testRunner(verbosity=self.verbosity,
+ failfast=self.failfast,
+ buffer=self.buffer)
+ except TypeError:
+ # didn't accept the verbosity, buffer or failfast arguments
+ testRunner = self.testRunner()
+ else:
+ # it is assumed to be a TestRunner instance
+ testRunner = self.testRunner
+ self.result = testRunner.run(self.test)
+ if self.exit:
+ sys.exit(not self.result.wasSuccessful())
+################
+
+def main(argv, stdout):
+ runner = TestToolsTestRunner(stdout)
+ program = TestProgram(argv=argv, testRunner=runner, stdout=stdout)
+
+if __name__ == '__main__':
+ main(sys.argv, sys.stdout)
diff --git a/lib/testtools/testtools/runtest.py b/lib/testtools/testtools/runtest.py
new file mode 100644
index 0000000000..eb5801a4c6
--- /dev/null
+++ b/lib/testtools/testtools/runtest.py
@@ -0,0 +1,200 @@
+# Copyright (c) 2009-2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Individual test case execution."""
+
+__all__ = [
+ 'MultipleExceptions',
+ 'RunTest',
+ ]
+
+import sys
+
+from testtools.testresult import ExtendedToOriginalDecorator
+
+
+class MultipleExceptions(Exception):
+ """Represents many exceptions raised from some operation.
+
+ :ivar args: The sys.exc_info() tuples for each exception.
+ """
+
+
+class RunTest(object):
+ """An object to run a test.
+
+ RunTest objects are used to implement the internal logic involved in
+ running a test. TestCase.__init__ stores _RunTest as the class of RunTest
+ to execute. Passing the runTest= parameter to TestCase.__init__ allows a
+ different RunTest class to be used to execute the test.
+
+ Subclassing or replacing RunTest can be useful to add functionality to the
+ way that tests are run in a given project.
+
+ :ivar case: The test case that is to be run.
+ :ivar result: The result object a case is reporting to.
+ :ivar handlers: A list of (ExceptionClass, handler_function) for
+ exceptions that should be caught if raised from the user
+ code. Exceptions that are caught are checked against this list in
+ first to last order. There is a catch-all of `Exception` at the end
+ of the list, so to add a new exception to the list, insert it at the
+ front (which ensures that it will be checked before any existing base
+ classes in the list. If you add multiple exceptions some of which are
+ subclasses of each other, add the most specific exceptions last (so
+ they come before their parent classes in the list).
+ :ivar exception_caught: An object returned when _run_user catches an
+ exception.
+ :ivar _exceptions: A list of caught exceptions, used to do the single
+ reporting of error/failure/skip etc.
+ """
+
+ def __init__(self, case, handlers=None):
+ """Create a RunTest to run a case.
+
+ :param case: A testtools.TestCase test case object.
+ :param handlers: Exception handlers for this RunTest. These are stored
+ in self.handlers and can be modified later if needed.
+ """
+ self.case = case
+ self.handlers = handlers or []
+ self.exception_caught = object()
+ self._exceptions = []
+
+ def run(self, result=None):
+ """Run self.case reporting activity to result.
+
+ :param result: Optional testtools.TestResult to report activity to.
+ :return: The result object the test was run against.
+ """
+ if result is None:
+ actual_result = self.case.defaultTestResult()
+ actual_result.startTestRun()
+ else:
+ actual_result = result
+ try:
+ return self._run_one(actual_result)
+ finally:
+ if result is None:
+ actual_result.stopTestRun()
+
+ def _run_one(self, result):
+ """Run one test reporting to result.
+
+ :param result: A testtools.TestResult to report activity to.
+ This result object is decorated with an ExtendedToOriginalDecorator
+ to ensure that the latest TestResult API can be used with
+ confidence by client code.
+ :return: The result object the test was run against.
+ """
+ return self._run_prepared_result(ExtendedToOriginalDecorator(result))
+
+ def _run_prepared_result(self, result):
+ """Run one test reporting to result.
+
+ :param result: A testtools.TestResult to report activity to.
+ :return: The result object the test was run against.
+ """
+ result.startTest(self.case)
+ self.result = result
+ try:
+ self._exceptions = []
+ self._run_core()
+ if self._exceptions:
+ # One or more caught exceptions, now trigger the test's
+ # reporting method for just one.
+ e = self._exceptions.pop()
+ for exc_class, handler in self.handlers:
+ if isinstance(e, exc_class):
+ handler(self.case, self.result, e)
+ break
+ finally:
+ result.stopTest(self.case)
+ return result
+
+ def _run_core(self):
+ """Run the user supplied test code."""
+ if self.exception_caught == self._run_user(self.case._run_setup,
+ self.result):
+ # Don't run the test method if we failed getting here.
+ self._run_cleanups(self.result)
+ return
+ # Run everything from here on in. If any of the methods raise an
+ # exception we'll have failed.
+ failed = False
+ try:
+ if self.exception_caught == self._run_user(
+ self.case._run_test_method, self.result):
+ failed = True
+ finally:
+ try:
+ if self.exception_caught == self._run_user(
+ self.case._run_teardown, self.result):
+ failed = True
+ finally:
+ try:
+ if self.exception_caught == self._run_user(
+ self._run_cleanups, self.result):
+ failed = True
+ finally:
+ if not failed:
+ self.result.addSuccess(self.case,
+ details=self.case.getDetails())
+
+ def _run_cleanups(self, result):
+ """Run the cleanups that have been added with addCleanup.
+
+ See the docstring for addCleanup for more information.
+
+ :return: None if all cleanups ran without error,
+ `self.exception_caught` if there was an error.
+ """
+ failing = False
+ while self.case._cleanups:
+ function, arguments, keywordArguments = self.case._cleanups.pop()
+ got_exception = self._run_user(
+ function, *arguments, **keywordArguments)
+ if got_exception == self.exception_caught:
+ failing = True
+ if failing:
+ return self.exception_caught
+
+ def _run_user(self, fn, *args, **kwargs):
+ """Run a user supplied function.
+
+ Exceptions are processed by `_got_user_exception`.
+
+ :return: Either whatever 'fn' returns or `self.exception_caught` if
+ 'fn' raised an exception.
+ """
+ try:
+ return fn(*args, **kwargs)
+ except KeyboardInterrupt:
+ raise
+ except:
+ return self._got_user_exception(sys.exc_info())
+
+ def _got_user_exception(self, exc_info, tb_label='traceback'):
+ """Called when user code raises an exception.
+
+ If 'exc_info' is a `MultipleExceptions`, then we recurse into it
+ unpacking the errors that it's made up from.
+
+ :param exc_info: A sys.exc_info() tuple for the user error.
+ :param tb_label: An optional string label for the error. If
+ not specified, will default to 'traceback'.
+ :return: `exception_caught` if we catch one of the exceptions that
+ have handlers in `self.handlers`, otherwise raise the error.
+ """
+ if exc_info[0] is MultipleExceptions:
+ for sub_exc_info in exc_info[1].args:
+ self._got_user_exception(sub_exc_info, tb_label)
+ return self.exception_caught
+ try:
+ e = exc_info[1]
+ self.case.onException(exc_info, tb_label=tb_label)
+ finally:
+ del exc_info
+ for exc_class, handler in self.handlers:
+ if isinstance(e, exc_class):
+ self._exceptions.append(e)
+ return self.exception_caught
+ raise e
diff --git a/lib/testtools/testtools/testcase.py b/lib/testtools/testtools/testcase.py
new file mode 100644
index 0000000000..804684adb8
--- /dev/null
+++ b/lib/testtools/testtools/testcase.py
@@ -0,0 +1,677 @@
+# Copyright (c) 2008-2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Test case related stuff."""
+
+__metaclass__ = type
+__all__ = [
+ 'clone_test_with_new_id',
+ 'run_test_with',
+ 'skip',
+ 'skipIf',
+ 'skipUnless',
+ 'TestCase',
+ ]
+
+import copy
+import itertools
+import sys
+import types
+import unittest
+
+from testtools import (
+ content,
+ try_import,
+ )
+from testtools.compat import advance_iterator
+from testtools.matchers import (
+ Annotate,
+ Equals,
+ )
+from testtools.monkey import patch
+from testtools.runtest import RunTest
+from testtools.testresult import TestResult
+
+wraps = try_import('functools.wraps')
+
+class TestSkipped(Exception):
+ """Raised within TestCase.run() when a test is skipped."""
+TestSkipped = try_import('unittest.case.SkipTest', TestSkipped)
+
+
+class _UnexpectedSuccess(Exception):
+ """An unexpected success was raised.
+
+ Note that this exception is private plumbing in testtools' testcase
+ module.
+ """
+_UnexpectedSuccess = try_import(
+ 'unittest.case._UnexpectedSuccess', _UnexpectedSuccess)
+
+class _ExpectedFailure(Exception):
+ """An expected failure occured.
+
+ Note that this exception is private plumbing in testtools' testcase
+ module.
+ """
+_ExpectedFailure = try_import(
+ 'unittest.case._ExpectedFailure', _ExpectedFailure)
+
+
+def run_test_with(test_runner, **kwargs):
+ """Decorate a test as using a specific `RunTest`.
+
+ e.g.
+ @run_test_with(CustomRunner, timeout=42)
+ def test_foo(self):
+ self.assertTrue(True)
+
+ The returned decorator works by setting an attribute on the decorated
+ function. `TestCase.__init__` looks for this attribute when deciding
+ on a `RunTest` factory. If you wish to use multiple decorators on a test
+ method, then you must either make this one the top-most decorator, or
+ you must write your decorators so that they update the wrapping function
+ with the attributes of the wrapped function. The latter is recommended
+ style anyway. `functools.wraps`, `functools.wrapper` and
+ `twisted.python.util.mergeFunctionMetadata` can help you do this.
+
+ :param test_runner: A `RunTest` factory that takes a test case and an
+ optional list of exception handlers. See `RunTest`.
+ :param **kwargs: Keyword arguments to pass on as extra arguments to
+ `test_runner`.
+ :return: A decorator to be used for marking a test as needing a special
+ runner.
+ """
+ def decorator(function):
+ # Set an attribute on 'function' which will inform TestCase how to
+ # make the runner.
+ function._run_test_with = (
+ lambda case, handlers=None:
+ test_runner(case, handlers=handlers, **kwargs))
+ return function
+ return decorator
+
+
+class TestCase(unittest.TestCase):
+ """Extensions to the basic TestCase.
+
+ :ivar exception_handlers: Exceptions to catch from setUp, runTest and
+ tearDown. This list is able to be modified at any time and consists of
+ (exception_class, handler(case, result, exception_value)) pairs.
+ :cvar run_tests_with: A factory to make the `RunTest` to run tests with.
+ Defaults to `RunTest`. The factory is expected to take a test case
+ and an optional list of exception handlers.
+ """
+
+ skipException = TestSkipped
+
+ run_tests_with = RunTest
+
+ def __init__(self, *args, **kwargs):
+ """Construct a TestCase.
+
+ :param testMethod: The name of the method to run.
+ :param runTest: Optional class to use to execute the test. If not
+ supplied `testtools.runtest.RunTest` is used. The instance to be
+ used is created when run() is invoked, so will be fresh each time.
+ Overrides `run_tests_with` if given.
+ """
+ runTest = kwargs.pop('runTest', None)
+ unittest.TestCase.__init__(self, *args, **kwargs)
+ self._cleanups = []
+ self._unique_id_gen = itertools.count(1)
+ # Generators to ensure unique traceback ids. Maps traceback label to
+ # iterators.
+ self._traceback_id_gens = {}
+ self.__setup_called = False
+ self.__teardown_called = False
+ # __details is lazy-initialized so that a constructed-but-not-run
+ # TestCase is safe to use with clone_test_with_new_id.
+ self.__details = None
+ test_method = self._get_test_method()
+ if runTest is None:
+ runTest = getattr(
+ test_method, '_run_test_with', self.run_tests_with)
+ self.__RunTest = runTest
+ self.__exception_handlers = []
+ self.exception_handlers = [
+ (self.skipException, self._report_skip),
+ (self.failureException, self._report_failure),
+ (_ExpectedFailure, self._report_expected_failure),
+ (_UnexpectedSuccess, self._report_unexpected_success),
+ (Exception, self._report_error),
+ ]
+ if sys.version_info < (2, 6):
+ # Catch old-style string exceptions with None as the instance
+ self.exception_handlers.append((type(None), self._report_error))
+
+ def __eq__(self, other):
+ eq = getattr(unittest.TestCase, '__eq__', None)
+ if eq is not None and not unittest.TestCase.__eq__(self, other):
+ return False
+ return self.__dict__ == other.__dict__
+
+ def __repr__(self):
+ # We add id to the repr because it makes testing testtools easier.
+ return "<%s id=0x%0x>" % (self.id(), id(self))
+
+ def addDetail(self, name, content_object):
+ """Add a detail to be reported with this test's outcome.
+
+ For more details see pydoc testtools.TestResult.
+
+ :param name: The name to give this detail.
+ :param content_object: The content object for this detail. See
+ testtools.content for more detail.
+ """
+ if self.__details is None:
+ self.__details = {}
+ self.__details[name] = content_object
+
+ def getDetails(self):
+ """Get the details dict that will be reported with this test's outcome.
+
+ For more details see pydoc testtools.TestResult.
+ """
+ if self.__details is None:
+ self.__details = {}
+ return self.__details
+
+ def patch(self, obj, attribute, value):
+ """Monkey-patch 'obj.attribute' to 'value' while the test is running.
+
+ If 'obj' has no attribute, then the monkey-patch will still go ahead,
+ and the attribute will be deleted instead of restored to its original
+ value.
+
+ :param obj: The object to patch. Can be anything.
+ :param attribute: The attribute on 'obj' to patch.
+ :param value: The value to set 'obj.attribute' to.
+ """
+ self.addCleanup(patch(obj, attribute, value))
+
+ def shortDescription(self):
+ return self.id()
+
+ def skipTest(self, reason):
+ """Cause this test to be skipped.
+
+ This raises self.skipException(reason). skipException is raised
+ to permit a skip to be triggered at any point (during setUp or the
+ testMethod itself). The run() method catches skipException and
+ translates that into a call to the result objects addSkip method.
+
+ :param reason: The reason why the test is being skipped. This must
+ support being cast into a unicode string for reporting.
+ """
+ raise self.skipException(reason)
+
+ # skipTest is how python2.7 spells this. Sometime in the future
+ # This should be given a deprecation decorator - RBC 20100611.
+ skip = skipTest
+
+ def _formatTypes(self, classOrIterable):
+ """Format a class or a bunch of classes for display in an error."""
+ className = getattr(classOrIterable, '__name__', None)
+ if className is None:
+ className = ', '.join(klass.__name__ for klass in classOrIterable)
+ return className
+
+ def addCleanup(self, function, *arguments, **keywordArguments):
+ """Add a cleanup function to be called after tearDown.
+
+ Functions added with addCleanup will be called in reverse order of
+ adding after tearDown, or after setUp if setUp raises an exception.
+
+ If a function added with addCleanup raises an exception, the error
+ will be recorded as a test error, and the next cleanup will then be
+ run.
+
+ Cleanup functions are always called before a test finishes running,
+ even if setUp is aborted by an exception.
+ """
+ self._cleanups.append((function, arguments, keywordArguments))
+
+ def addOnException(self, handler):
+ """Add a handler to be called when an exception occurs in test code.
+
+ This handler cannot affect what result methods are called, and is
+ called before any outcome is called on the result object. An example
+ use for it is to add some diagnostic state to the test details dict
+ which is expensive to calculate and not interesting for reporting in
+ the success case.
+
+ Handlers are called before the outcome (such as addFailure) that
+ the exception has caused.
+
+ Handlers are called in first-added, first-called order, and if they
+ raise an exception, that will propogate out of the test running
+ machinery, halting test processing. As a result, do not call code that
+ may unreasonably fail.
+ """
+ self.__exception_handlers.append(handler)
+
+ def _add_reason(self, reason):
+ self.addDetail('reason', content.Content(
+ content.ContentType('text', 'plain'),
+ lambda: [reason.encode('utf8')]))
+
+ def assertEqual(self, expected, observed, message=''):
+ """Assert that 'expected' is equal to 'observed'.
+
+ :param expected: The expected value.
+ :param observed: The observed value.
+ :param message: An optional message to include in the error.
+ """
+ matcher = Equals(expected)
+ if message:
+ matcher = Annotate(message, matcher)
+ self.assertThat(observed, matcher)
+
+ failUnlessEqual = assertEquals = assertEqual
+
+ def assertIn(self, needle, haystack):
+ """Assert that needle is in haystack."""
+ self.assertTrue(
+ needle in haystack, '%r not in %r' % (needle, haystack))
+
+ def assertIs(self, expected, observed, message=''):
+ """Assert that 'expected' is 'observed'.
+
+ :param expected: The expected value.
+ :param observed: The observed value.
+ :param message: An optional message describing the error.
+ """
+ if message:
+ message = ': ' + message
+ self.assertTrue(
+ expected is observed,
+ '%r is not %r%s' % (expected, observed, message))
+
+ def assertIsNot(self, expected, observed, message=''):
+ """Assert that 'expected' is not 'observed'."""
+ if message:
+ message = ': ' + message
+ self.assertTrue(
+ expected is not observed,
+ '%r is %r%s' % (expected, observed, message))
+
+ def assertNotIn(self, needle, haystack):
+ """Assert that needle is not in haystack."""
+ self.assertTrue(
+ needle not in haystack, '%r in %r' % (needle, haystack))
+
+ def assertIsInstance(self, obj, klass, msg=None):
+ if msg is None:
+ msg = '%r is not an instance of %s' % (
+ obj, self._formatTypes(klass))
+ self.assertTrue(isinstance(obj, klass), msg)
+
+ def assertRaises(self, excClass, callableObj, *args, **kwargs):
+ """Fail unless an exception of class excClass is thrown
+ by callableObj when invoked with arguments args and keyword
+ arguments kwargs. If a different type of exception is
+ thrown, it will not be caught, and the test case will be
+ deemed to have suffered an error, exactly as for an
+ unexpected exception.
+ """
+ try:
+ ret = callableObj(*args, **kwargs)
+ except excClass:
+ return sys.exc_info()[1]
+ else:
+ excName = self._formatTypes(excClass)
+ self.fail("%s not raised, %r returned instead." % (excName, ret))
+ failUnlessRaises = assertRaises
+
+ def assertThat(self, matchee, matcher):
+ """Assert that matchee is matched by matcher.
+
+ :param matchee: An object to match with matcher.
+ :param matcher: An object meeting the testtools.Matcher protocol.
+ :raises self.failureException: When matcher does not match thing.
+ """
+ mismatch = matcher.match(matchee)
+ if not mismatch:
+ return
+ existing_details = self.getDetails()
+ for (name, content) in mismatch.get_details().items():
+ full_name = name
+ suffix = 1
+ while full_name in existing_details:
+ full_name = "%s-%d" % (name, suffix)
+ suffix += 1
+ self.addDetail(full_name, content)
+ self.fail('Match failed. Matchee: "%s"\nMatcher: %s\nDifference: %s\n'
+ % (matchee, matcher, mismatch.describe()))
+
+ def defaultTestResult(self):
+ return TestResult()
+
+ def expectFailure(self, reason, predicate, *args, **kwargs):
+ """Check that a test fails in a particular way.
+
+ If the test fails in the expected way, a KnownFailure is caused. If it
+ succeeds an UnexpectedSuccess is caused.
+
+ The expected use of expectFailure is as a barrier at the point in a
+ test where the test would fail. For example:
+ >>> def test_foo(self):
+ >>> self.expectFailure("1 should be 0", self.assertNotEqual, 1, 0)
+ >>> self.assertEqual(1, 0)
+
+ If in the future 1 were to equal 0, the expectFailure call can simply
+ be removed. This separation preserves the original intent of the test
+ while it is in the expectFailure mode.
+ """
+ self._add_reason(reason)
+ try:
+ predicate(*args, **kwargs)
+ except self.failureException:
+ # GZ 2010-08-12: Don't know how to avoid exc_info cycle as the new
+ # unittest _ExpectedFailure wants old traceback
+ exc_info = sys.exc_info()
+ try:
+ self._report_traceback(exc_info)
+ raise _ExpectedFailure(exc_info)
+ finally:
+ del exc_info
+ else:
+ raise _UnexpectedSuccess(reason)
+
+ def getUniqueInteger(self):
+ """Get an integer unique to this test.
+
+ Returns an integer that is guaranteed to be unique to this instance.
+ Use this when you need an arbitrary integer in your test, or as a
+ helper for custom anonymous factory methods.
+ """
+ return advance_iterator(self._unique_id_gen)
+
+ def getUniqueString(self, prefix=None):
+ """Get a string unique to this test.
+
+ Returns a string that is guaranteed to be unique to this instance. Use
+ this when you need an arbitrary string in your test, or as a helper
+ for custom anonymous factory methods.
+
+ :param prefix: The prefix of the string. If not provided, defaults
+ to the id of the tests.
+ :return: A bytestring of '<prefix>-<unique_int>'.
+ """
+ if prefix is None:
+ prefix = self.id()
+ return '%s-%d' % (prefix, self.getUniqueInteger())
+
+ def onException(self, exc_info, tb_label='traceback'):
+ """Called when an exception propogates from test code.
+
+ :seealso addOnException:
+ """
+ if exc_info[0] not in [
+ TestSkipped, _UnexpectedSuccess, _ExpectedFailure]:
+ self._report_traceback(exc_info, tb_label=tb_label)
+ for handler in self.__exception_handlers:
+ handler(exc_info)
+
+ @staticmethod
+ def _report_error(self, result, err):
+ result.addError(self, details=self.getDetails())
+
+ @staticmethod
+ def _report_expected_failure(self, result, err):
+ result.addExpectedFailure(self, details=self.getDetails())
+
+ @staticmethod
+ def _report_failure(self, result, err):
+ result.addFailure(self, details=self.getDetails())
+
+ @staticmethod
+ def _report_skip(self, result, err):
+ if err.args:
+ reason = err.args[0]
+ else:
+ reason = "no reason given."
+ self._add_reason(reason)
+ result.addSkip(self, details=self.getDetails())
+
+ def _report_traceback(self, exc_info, tb_label='traceback'):
+ id_gen = self._traceback_id_gens.setdefault(
+ tb_label, itertools.count(0))
+ tb_id = advance_iterator(id_gen)
+ if tb_id:
+ tb_label = '%s-%d' % (tb_label, tb_id)
+ self.addDetail(tb_label, content.TracebackContent(exc_info, self))
+
+ @staticmethod
+ def _report_unexpected_success(self, result, err):
+ result.addUnexpectedSuccess(self, details=self.getDetails())
+
+ def run(self, result=None):
+ return self.__RunTest(self, self.exception_handlers).run(result)
+
+ def _run_setup(self, result):
+ """Run the setUp function for this test.
+
+ :param result: A testtools.TestResult to report activity to.
+ :raises ValueError: If the base class setUp is not called, a
+ ValueError is raised.
+ """
+ ret = self.setUp()
+ if not self.__setup_called:
+ raise ValueError(
+ "TestCase.setUp was not called. Have you upcalled all the "
+ "way up the hierarchy from your setUp? e.g. Call "
+ "super(%s, self).setUp() from your setUp()."
+ % self.__class__.__name__)
+ return ret
+
+ def _run_teardown(self, result):
+ """Run the tearDown function for this test.
+
+ :param result: A testtools.TestResult to report activity to.
+ :raises ValueError: If the base class tearDown is not called, a
+ ValueError is raised.
+ """
+ ret = self.tearDown()
+ if not self.__teardown_called:
+ raise ValueError(
+ "TestCase.tearDown was not called. Have you upcalled all the "
+ "way up the hierarchy from your tearDown? e.g. Call "
+ "super(%s, self).tearDown() from your tearDown()."
+ % self.__class__.__name__)
+ return ret
+
+ def _get_test_method(self):
+ absent_attr = object()
+ # Python 2.5+
+ method_name = getattr(self, '_testMethodName', absent_attr)
+ if method_name is absent_attr:
+ # Python 2.4
+ method_name = getattr(self, '_TestCase__testMethodName')
+ return getattr(self, method_name)
+
+ def _run_test_method(self, result):
+ """Run the test method for this test.
+
+ :param result: A testtools.TestResult to report activity to.
+ :return: None.
+ """
+ return self._get_test_method()()
+
+ def useFixture(self, fixture):
+ """Use fixture in a test case.
+
+ The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called.
+
+ :param fixture: The fixture to use.
+ :return: The fixture, after setting it up and scheduling a cleanup for
+ it.
+ """
+ fixture.setUp()
+ self.addCleanup(fixture.cleanUp)
+ self.addCleanup(self._gather_details, fixture.getDetails)
+ return fixture
+
+ def _gather_details(self, getDetails):
+ """Merge the details from getDetails() into self.getDetails()."""
+ details = getDetails()
+ my_details = self.getDetails()
+ for name, content_object in details.items():
+ new_name = name
+ disambiguator = itertools.count(1)
+ while new_name in my_details:
+ new_name = '%s-%d' % (name, advance_iterator(disambiguator))
+ name = new_name
+ content_bytes = list(content_object.iter_bytes())
+ content_callback = lambda:content_bytes
+ self.addDetail(name,
+ content.Content(content_object.content_type, content_callback))
+
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+ self.__setup_called = True
+
+ def tearDown(self):
+ unittest.TestCase.tearDown(self)
+ self.__teardown_called = True
+
+
+class PlaceHolder(object):
+ """A placeholder test.
+
+ `PlaceHolder` implements much of the same interface as `TestCase` and is
+ particularly suitable for being added to `TestResult`s.
+ """
+
+ def __init__(self, test_id, short_description=None):
+ """Construct a `PlaceHolder`.
+
+ :param test_id: The id of the placeholder test.
+ :param short_description: The short description of the place holder
+ test. If not provided, the id will be used instead.
+ """
+ self._test_id = test_id
+ self._short_description = short_description
+
+ def __call__(self, result=None):
+ return self.run(result=result)
+
+ def __repr__(self):
+ internal = [self._test_id]
+ if self._short_description is not None:
+ internal.append(self._short_description)
+ return "<%s.%s(%s)>" % (
+ self.__class__.__module__,
+ self.__class__.__name__,
+ ", ".join(map(repr, internal)))
+
+ def __str__(self):
+ return self.id()
+
+ def countTestCases(self):
+ return 1
+
+ def debug(self):
+ pass
+
+ def id(self):
+ return self._test_id
+
+ def run(self, result=None):
+ if result is None:
+ result = TestResult()
+ result.startTest(self)
+ result.addSuccess(self)
+ result.stopTest(self)
+
+ def shortDescription(self):
+ if self._short_description is None:
+ return self.id()
+ else:
+ return self._short_description
+
+
+class ErrorHolder(PlaceHolder):
+ """A placeholder test that will error out when run."""
+
+ failureException = None
+
+ def __init__(self, test_id, error, short_description=None):
+ """Construct an `ErrorHolder`.
+
+ :param test_id: The id of the test.
+ :param error: The exc info tuple that will be used as the test's error.
+ :param short_description: An optional short description of the test.
+ """
+ super(ErrorHolder, self).__init__(
+ test_id, short_description=short_description)
+ self._error = error
+
+ def __repr__(self):
+ internal = [self._test_id, self._error]
+ if self._short_description is not None:
+ internal.append(self._short_description)
+ return "<%s.%s(%s)>" % (
+ self.__class__.__module__,
+ self.__class__.__name__,
+ ", ".join(map(repr, internal)))
+
+ def run(self, result=None):
+ if result is None:
+ result = TestResult()
+ result.startTest(self)
+ result.addError(self, self._error)
+ result.stopTest(self)
+
+
+# Python 2.4 did not know how to copy functions.
+if types.FunctionType not in copy._copy_dispatch:
+ copy._copy_dispatch[types.FunctionType] = copy._copy_immutable
+
+
+def clone_test_with_new_id(test, new_id):
+ """Copy a TestCase, and give the copied test a new id.
+
+ This is only expected to be used on tests that have been constructed but
+ not executed.
+ """
+ newTest = copy.copy(test)
+ newTest.id = lambda: new_id
+ return newTest
+
+
+def skip(reason):
+ """A decorator to skip unit tests.
+
+ This is just syntactic sugar so users don't have to change any of their
+ unit tests in order to migrate to python 2.7, which provides the
+ @unittest.skip decorator.
+ """
+ def decorator(test_item):
+ if wraps is not None:
+ @wraps(test_item)
+ def skip_wrapper(*args, **kwargs):
+ raise TestCase.skipException(reason)
+ else:
+ def skip_wrapper(test_item):
+ test_item.skip(reason)
+ return skip_wrapper
+ return decorator
+
+
+def skipIf(condition, reason):
+ """Skip a test if the condition is true."""
+ if condition:
+ return skip(reason)
+ def _id(obj):
+ return obj
+ return _id
+
+
+def skipUnless(condition, reason):
+ """Skip a test unless the condition is true."""
+ if not condition:
+ return skip(reason)
+ def _id(obj):
+ return obj
+ return _id
diff --git a/lib/testtools/testtools/testresult/__init__.py b/lib/testtools/testtools/testresult/__init__.py
new file mode 100644
index 0000000000..1f779419d2
--- /dev/null
+++ b/lib/testtools/testtools/testresult/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
+
+"""Test result objects."""
+
+__all__ = [
+ 'ExtendedToOriginalDecorator',
+ 'MultiTestResult',
+ 'TestResult',
+ 'TextTestResult',
+ 'ThreadsafeForwardingResult',
+ ]
+
+from testtools.testresult.real import (
+ ExtendedToOriginalDecorator,
+ MultiTestResult,
+ TestResult,
+ TextTestResult,
+ ThreadsafeForwardingResult,
+ )
diff --git a/lib/testtools/testtools/testresult/doubles.py b/lib/testtools/testtools/testresult/doubles.py
new file mode 100644
index 0000000000..7e4a2c9b41
--- /dev/null
+++ b/lib/testtools/testtools/testresult/doubles.py
@@ -0,0 +1,111 @@
+# Copyright (c) 2009-2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Doubles of test result objects, useful for testing unittest code."""
+
+__all__ = [
+ 'Python26TestResult',
+ 'Python27TestResult',
+ 'ExtendedTestResult',
+ ]
+
+
+class LoggingBase(object):
+ """Basic support for logging of results."""
+
+ def __init__(self):
+ self._events = []
+ self.shouldStop = False
+ self._was_successful = True
+
+
+class Python26TestResult(LoggingBase):
+ """A precisely python 2.6 like test result, that logs."""
+
+ def addError(self, test, err):
+ self._was_successful = False
+ self._events.append(('addError', test, err))
+
+ def addFailure(self, test, err):
+ self._was_successful = False
+ self._events.append(('addFailure', test, err))
+
+ def addSuccess(self, test):
+ self._events.append(('addSuccess', test))
+
+ def startTest(self, test):
+ self._events.append(('startTest', test))
+
+ def stop(self):
+ self.shouldStop = True
+
+ def stopTest(self, test):
+ self._events.append(('stopTest', test))
+
+ def wasSuccessful(self):
+ return self._was_successful
+
+
+class Python27TestResult(Python26TestResult):
+ """A precisely python 2.7 like test result, that logs."""
+
+ def addExpectedFailure(self, test, err):
+ self._events.append(('addExpectedFailure', test, err))
+
+ def addSkip(self, test, reason):
+ self._events.append(('addSkip', test, reason))
+
+ def addUnexpectedSuccess(self, test):
+ self._events.append(('addUnexpectedSuccess', test))
+
+ def startTestRun(self):
+ self._events.append(('startTestRun',))
+
+ def stopTestRun(self):
+ self._events.append(('stopTestRun',))
+
+
+class ExtendedTestResult(Python27TestResult):
+ """A test result like the proposed extended unittest result API."""
+
+ def addError(self, test, err=None, details=None):
+ self._was_successful = False
+ self._events.append(('addError', test, err or details))
+
+ def addFailure(self, test, err=None, details=None):
+ self._was_successful = False
+ self._events.append(('addFailure', test, err or details))
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._events.append(('addExpectedFailure', test, err or details))
+
+ def addSkip(self, test, reason=None, details=None):
+ self._events.append(('addSkip', test, reason or details))
+
+ def addSuccess(self, test, details=None):
+ if details:
+ self._events.append(('addSuccess', test, details))
+ else:
+ self._events.append(('addSuccess', test))
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self._was_successful = False
+ if details is not None:
+ self._events.append(('addUnexpectedSuccess', test, details))
+ else:
+ self._events.append(('addUnexpectedSuccess', test))
+
+ def progress(self, offset, whence):
+ self._events.append(('progress', offset, whence))
+
+ def startTestRun(self):
+ super(ExtendedTestResult, self).startTestRun()
+ self._was_successful = True
+
+ def tags(self, new_tags, gone_tags):
+ self._events.append(('tags', new_tags, gone_tags))
+
+ def time(self, time):
+ self._events.append(('time', time))
+
+ def wasSuccessful(self):
+ return self._was_successful
diff --git a/lib/testtools/testtools/testresult/real.py b/lib/testtools/testtools/testresult/real.py
new file mode 100644
index 0000000000..b521251f46
--- /dev/null
+++ b/lib/testtools/testtools/testresult/real.py
@@ -0,0 +1,620 @@
+# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
+
+"""Test results and related things."""
+
+__metaclass__ = type
+__all__ = [
+ 'ExtendedToOriginalDecorator',
+ 'MultiTestResult',
+ 'TestResult',
+ 'ThreadsafeForwardingResult',
+ ]
+
+import datetime
+import sys
+import unittest
+
+from testtools.compat import all, _format_exc_info, str_is_unicode, _u
+
+# From http://docs.python.org/library/datetime.html
+_ZERO = datetime.timedelta(0)
+
+# A UTC class.
+
+class UTC(datetime.tzinfo):
+ """UTC"""
+
+ def utcoffset(self, dt):
+ return _ZERO
+
+ def tzname(self, dt):
+ return "UTC"
+
+ def dst(self, dt):
+ return _ZERO
+
+utc = UTC()
+
+
+class TestResult(unittest.TestResult):
+ """Subclass of unittest.TestResult extending the protocol for flexability.
+
+ This test result supports an experimental protocol for providing additional
+ data to in test outcomes. All the outcome methods take an optional dict
+ 'details'. If supplied any other detail parameters like 'err' or 'reason'
+ should not be provided. The details dict is a mapping from names to
+ MIME content objects (see testtools.content). This permits attaching
+ tracebacks, log files, or even large objects like databases that were
+ part of the test fixture. Until this API is accepted into upstream
+ Python it is considered experimental: it may be replaced at any point
+ by a newer version more in line with upstream Python. Compatibility would
+ be aimed for in this case, but may not be possible.
+
+ :ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip.
+ """
+
+ def __init__(self):
+ # startTestRun resets all attributes, and older clients don't know to
+ # call startTestRun, so it is called once here.
+ # Because subclasses may reasonably not expect this, we call the
+ # specific version we want to run.
+ TestResult.startTestRun(self)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ """Called when a test has failed in an expected manner.
+
+ Like with addSuccess and addError, testStopped should still be called.
+
+ :param test: The test that has been skipped.
+ :param err: The exc_info of the error that was raised.
+ :return: None
+ """
+ # This is the python 2.7 implementation
+ self.expectedFailures.append(
+ (test, self._err_details_to_string(test, err, details)))
+
+ def addError(self, test, err=None, details=None):
+ """Called when an error has occurred. 'err' is a tuple of values as
+ returned by sys.exc_info().
+
+ :param details: Alternative way to supply details about the outcome.
+ see the class docstring for more information.
+ """
+ self.errors.append((test,
+ self._err_details_to_string(test, err, details)))
+
+ def addFailure(self, test, err=None, details=None):
+ """Called when an error has occurred. 'err' is a tuple of values as
+ returned by sys.exc_info().
+
+ :param details: Alternative way to supply details about the outcome.
+ see the class docstring for more information.
+ """
+ self.failures.append((test,
+ self._err_details_to_string(test, err, details)))
+
+ def addSkip(self, test, reason=None, details=None):
+ """Called when a test has been skipped rather than running.
+
+ Like with addSuccess and addError, testStopped should still be called.
+
+ This must be called by the TestCase. 'addError' and 'addFailure' will
+ not call addSkip, since they have no assumptions about the kind of
+ errors that a test can raise.
+
+ :param test: The test that has been skipped.
+ :param reason: The reason for the test being skipped. For instance,
+ u"pyGL is not available".
+ :param details: Alternative way to supply details about the outcome.
+ see the class docstring for more information.
+ :return: None
+ """
+ if reason is None:
+ reason = details.get('reason')
+ if reason is None:
+ reason = 'No reason given'
+ else:
+ reason = ''.join(reason.iter_text())
+ skip_list = self.skip_reasons.setdefault(reason, [])
+ skip_list.append(test)
+
+ def addSuccess(self, test, details=None):
+ """Called when a test succeeded."""
+
+ def addUnexpectedSuccess(self, test, details=None):
+ """Called when a test was expected to fail, but succeed."""
+ self.unexpectedSuccesses.append(test)
+
+ def wasSuccessful(self):
+ """Has this result been successful so far?
+
+ If there have been any errors, failures or unexpected successes,
+ return False. Otherwise, return True.
+
+ Note: This differs from standard unittest in that we consider
+ unexpected successes to be equivalent to failures, rather than
+ successes.
+ """
+ return not (self.errors or self.failures or self.unexpectedSuccesses)
+
+ if str_is_unicode:
+ # Python 3 and IronPython strings are unicode, use parent class method
+ _exc_info_to_unicode = unittest.TestResult._exc_info_to_string
+ else:
+ # For Python 2, need to decode components of traceback according to
+ # their source, so can't use traceback.format_exception
+ # Here follows a little deep magic to copy the existing method and
+ # replace the formatter with one that returns unicode instead
+ from types import FunctionType as __F, ModuleType as __M
+ __f = unittest.TestResult._exc_info_to_string.im_func
+ __g = dict(__f.func_globals)
+ __m = __M("__fake_traceback")
+ __m.format_exception = _format_exc_info
+ __g["traceback"] = __m
+ _exc_info_to_unicode = __F(__f.func_code, __g, "_exc_info_to_unicode")
+ del __F, __M, __f, __g, __m
+
+ def _err_details_to_string(self, test, err=None, details=None):
+ """Convert an error in exc_info form or a contents dict to a string."""
+ if err is not None:
+ return self._exc_info_to_unicode(err, test)
+ return _details_to_str(details)
+
+ def _now(self):
+ """Return the current 'test time'.
+
+ If the time() method has not been called, this is equivalent to
+ datetime.now(), otherwise its the last supplied datestamp given to the
+ time() method.
+ """
+ if self.__now is None:
+ return datetime.datetime.now(utc)
+ else:
+ return self.__now
+
+ def startTestRun(self):
+ """Called before a test run starts.
+
+ New in python 2.7. The testtools version resets the result to a
+ pristine condition ready for use in another test run.
+ """
+ super(TestResult, self).__init__()
+ self.skip_reasons = {}
+ self.__now = None
+ # -- Start: As per python 2.7 --
+ self.expectedFailures = []
+ self.unexpectedSuccesses = []
+ # -- End: As per python 2.7 --
+
+ def stopTestRun(self):
+ """Called after a test run completes
+
+ New in python 2.7
+ """
+
+ def time(self, a_datetime):
+ """Provide a timestamp to represent the current time.
+
+ This is useful when test activity is time delayed, or happening
+ concurrently and getting the system time between API calls will not
+ accurately represent the duration of tests (or the whole run).
+
+ Calling time() sets the datetime used by the TestResult object.
+ Time is permitted to go backwards when using this call.
+
+ :param a_datetime: A datetime.datetime object with TZ information or
+ None to reset the TestResult to gathering time from the system.
+ """
+ self.__now = a_datetime
+
+ def done(self):
+ """Called when the test runner is done.
+
+ deprecated in favour of stopTestRun.
+ """
+
+
+class MultiTestResult(TestResult):
+ """A test result that dispatches to many test results."""
+
+ def __init__(self, *results):
+ TestResult.__init__(self)
+ self._results = list(map(ExtendedToOriginalDecorator, results))
+
+ def _dispatch(self, message, *args, **kwargs):
+ return tuple(
+ getattr(result, message)(*args, **kwargs)
+ for result in self._results)
+
+ def startTest(self, test):
+ return self._dispatch('startTest', test)
+
+ def stopTest(self, test):
+ return self._dispatch('stopTest', test)
+
+ def addError(self, test, error=None, details=None):
+ return self._dispatch('addError', test, error, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ return self._dispatch(
+ 'addExpectedFailure', test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ return self._dispatch('addFailure', test, err, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ return self._dispatch('addSkip', test, reason, details=details)
+
+ def addSuccess(self, test, details=None):
+ return self._dispatch('addSuccess', test, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ return self._dispatch('addUnexpectedSuccess', test, details=details)
+
+ def startTestRun(self):
+ return self._dispatch('startTestRun')
+
+ def stopTestRun(self):
+ return self._dispatch('stopTestRun')
+
+ def time(self, a_datetime):
+ return self._dispatch('time', a_datetime)
+
+ def done(self):
+ return self._dispatch('done')
+
+ def wasSuccessful(self):
+ """Was this result successful?
+
+ Only returns True if every constituent result was successful.
+ """
+ return all(self._dispatch('wasSuccessful'))
+
+
+class TextTestResult(TestResult):
+ """A TestResult which outputs activity to a text stream."""
+
+ def __init__(self, stream):
+ """Construct a TextTestResult writing to stream."""
+ super(TextTestResult, self).__init__()
+ self.stream = stream
+ self.sep1 = '=' * 70 + '\n'
+ self.sep2 = '-' * 70 + '\n'
+
+ def _delta_to_float(self, a_timedelta):
+ return (a_timedelta.days * 86400.0 + a_timedelta.seconds +
+ a_timedelta.microseconds / 1000000.0)
+
+ def _show_list(self, label, error_list):
+ for test, output in error_list:
+ self.stream.write(self.sep1)
+ self.stream.write("%s: %s\n" % (label, test.id()))
+ self.stream.write(self.sep2)
+ self.stream.write(output)
+
+ def startTestRun(self):
+ super(TextTestResult, self).startTestRun()
+ self.__start = self._now()
+ self.stream.write("Tests running...\n")
+
+ def stopTestRun(self):
+ if self.testsRun != 1:
+ plural = 's'
+ else:
+ plural = ''
+ stop = self._now()
+ self._show_list('ERROR', self.errors)
+ self._show_list('FAIL', self.failures)
+ for test in self.unexpectedSuccesses:
+ self.stream.write(
+ "%sUNEXPECTED SUCCESS: %s\n%s" % (
+ self.sep1, test.id(), self.sep2))
+ self.stream.write("Ran %d test%s in %.3fs\n\n" %
+ (self.testsRun, plural,
+ self._delta_to_float(stop - self.__start)))
+ if self.wasSuccessful():
+ self.stream.write("OK\n")
+ else:
+ self.stream.write("FAILED (")
+ details = []
+ details.append("failures=%d" % (
+ sum(map(len, (
+ self.failures, self.errors, self.unexpectedSuccesses)))))
+ self.stream.write(", ".join(details))
+ self.stream.write(")\n")
+ super(TextTestResult, self).stopTestRun()
+
+
+class ThreadsafeForwardingResult(TestResult):
+ """A TestResult which ensures the target does not receive mixed up calls.
+
+ This is used when receiving test results from multiple sources, and batches
+ up all the activity for a single test into a thread-safe batch where all
+ other ThreadsafeForwardingResult objects sharing the same semaphore will be
+ locked out.
+
+ Typical use of ThreadsafeForwardingResult involves creating one
+ ThreadsafeForwardingResult per thread in a ConcurrentTestSuite. These
+ forward to the TestResult that the ConcurrentTestSuite run method was
+ called with.
+
+ target.done() is called once for each ThreadsafeForwardingResult that
+ forwards to the same target. If the target's done() takes special action,
+ care should be taken to accommodate this.
+ """
+
+ def __init__(self, target, semaphore):
+ """Create a ThreadsafeForwardingResult forwarding to target.
+
+ :param target: A TestResult.
+ :param semaphore: A threading.Semaphore with limit 1.
+ """
+ TestResult.__init__(self)
+ self.result = ExtendedToOriginalDecorator(target)
+ self.semaphore = semaphore
+
+ def _add_result_with_semaphore(self, method, test, *args, **kwargs):
+ self.semaphore.acquire()
+ try:
+ self.result.time(self._test_start)
+ self.result.startTest(test)
+ self.result.time(self._now())
+ try:
+ method(test, *args, **kwargs)
+ finally:
+ self.result.stopTest(test)
+ finally:
+ self.semaphore.release()
+
+ def addError(self, test, err=None, details=None):
+ self._add_result_with_semaphore(self.result.addError,
+ test, err, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._add_result_with_semaphore(self.result.addExpectedFailure,
+ test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ self._add_result_with_semaphore(self.result.addFailure,
+ test, err, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ self._add_result_with_semaphore(self.result.addSkip,
+ test, reason, details=details)
+
+ def addSuccess(self, test, details=None):
+ self._add_result_with_semaphore(self.result.addSuccess,
+ test, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self._add_result_with_semaphore(self.result.addUnexpectedSuccess,
+ test, details=details)
+
+ def startTestRun(self):
+ self.semaphore.acquire()
+ try:
+ self.result.startTestRun()
+ finally:
+ self.semaphore.release()
+
+ def stopTestRun(self):
+ self.semaphore.acquire()
+ try:
+ self.result.stopTestRun()
+ finally:
+ self.semaphore.release()
+
+ def done(self):
+ self.semaphore.acquire()
+ try:
+ self.result.done()
+ finally:
+ self.semaphore.release()
+
+ def startTest(self, test):
+ self._test_start = self._now()
+ super(ThreadsafeForwardingResult, self).startTest(test)
+
+ def wasSuccessful(self):
+ return self.result.wasSuccessful()
+
+
+class ExtendedToOriginalDecorator(object):
+ """Permit new TestResult API code to degrade gracefully with old results.
+
+ This decorates an existing TestResult and converts missing outcomes
+ such as addSkip to older outcomes such as addSuccess. It also supports
+ the extended details protocol. In all cases the most recent protocol
+ is attempted first, and fallbacks only occur when the decorated result
+ does not support the newer style of calling.
+ """
+
+ def __init__(self, decorated):
+ self.decorated = decorated
+
+ def __getattr__(self, name):
+ return getattr(self.decorated, name)
+
+ def addError(self, test, err=None, details=None):
+ self._check_args(err, details)
+ if details is not None:
+ try:
+ return self.decorated.addError(test, details=details)
+ except TypeError:
+ # have to convert
+ err = self._details_to_exc_info(details)
+ return self.decorated.addError(test, err)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._check_args(err, details)
+ addExpectedFailure = getattr(
+ self.decorated, 'addExpectedFailure', None)
+ if addExpectedFailure is None:
+ return self.addSuccess(test)
+ if details is not None:
+ try:
+ return addExpectedFailure(test, details=details)
+ except TypeError:
+ # have to convert
+ err = self._details_to_exc_info(details)
+ return addExpectedFailure(test, err)
+
+ def addFailure(self, test, err=None, details=None):
+ self._check_args(err, details)
+ if details is not None:
+ try:
+ return self.decorated.addFailure(test, details=details)
+ except TypeError:
+ # have to convert
+ err = self._details_to_exc_info(details)
+ return self.decorated.addFailure(test, err)
+
+ def addSkip(self, test, reason=None, details=None):
+ self._check_args(reason, details)
+ addSkip = getattr(self.decorated, 'addSkip', None)
+ if addSkip is None:
+ return self.decorated.addSuccess(test)
+ if details is not None:
+ try:
+ return addSkip(test, details=details)
+ except TypeError:
+ # extract the reason if it's available
+ try:
+ reason = ''.join(details['reason'].iter_text())
+ except KeyError:
+ reason = _details_to_str(details)
+ return addSkip(test, reason)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ outcome = getattr(self.decorated, 'addUnexpectedSuccess', None)
+ if outcome is None:
+ try:
+ test.fail("")
+ except test.failureException:
+ return self.addFailure(test, sys.exc_info())
+ if details is not None:
+ try:
+ return outcome(test, details=details)
+ except TypeError:
+ pass
+ return outcome(test)
+
+ def addSuccess(self, test, details=None):
+ if details is not None:
+ try:
+ return self.decorated.addSuccess(test, details=details)
+ except TypeError:
+ pass
+ return self.decorated.addSuccess(test)
+
+ def _check_args(self, err, details):
+ param_count = 0
+ if err is not None:
+ param_count += 1
+ if details is not None:
+ param_count += 1
+ if param_count != 1:
+ raise ValueError("Must pass only one of err '%s' and details '%s"
+ % (err, details))
+
+ def _details_to_exc_info(self, details):
+ """Convert a details dict to an exc_info tuple."""
+ return (_StringException,
+ _StringException(_details_to_str(details)), None)
+
+ def done(self):
+ try:
+ return self.decorated.done()
+ except AttributeError:
+ return
+
+ def progress(self, offset, whence):
+ method = getattr(self.decorated, 'progress', None)
+ if method is None:
+ return
+ return method(offset, whence)
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def startTest(self, test):
+ return self.decorated.startTest(test)
+
+ def startTestRun(self):
+ try:
+ return self.decorated.startTestRun()
+ except AttributeError:
+ return
+
+ def stop(self):
+ return self.decorated.stop()
+
+ def stopTest(self, test):
+ return self.decorated.stopTest(test)
+
+ def stopTestRun(self):
+ try:
+ return self.decorated.stopTestRun()
+ except AttributeError:
+ return
+
+ def tags(self, new_tags, gone_tags):
+ method = getattr(self.decorated, 'tags', None)
+ if method is None:
+ return
+ return method(new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ method = getattr(self.decorated, 'time', None)
+ if method is None:
+ return
+ return method(a_datetime)
+
+ def wasSuccessful(self):
+ return self.decorated.wasSuccessful()
+
+
+class _StringException(Exception):
+ """An exception made from an arbitrary string."""
+
+ if not str_is_unicode:
+ def __init__(self, string):
+ if type(string) is not unicode:
+ raise TypeError("_StringException expects unicode, got %r" %
+ (string,))
+ Exception.__init__(self, string)
+
+ def __str__(self):
+ return self.args[0].encode("utf-8")
+
+ def __unicode__(self):
+ return self.args[0]
+ # For 3.0 and above the default __str__ is fine, so we don't define one.
+
+ def __hash__(self):
+ return id(self)
+
+ def __eq__(self, other):
+ try:
+ return self.args == other.args
+ except AttributeError:
+ return False
+
+
+def _details_to_str(details):
+ """Convert a details dict to a string."""
+ chars = []
+ # sorted is for testing, may want to remove that and use a dict
+ # subclass with defined order for items instead.
+ for key, content in sorted(details.items()):
+ if content.content_type.type != 'text':
+ chars.append('Binary content: %s\n' % key)
+ continue
+ chars.append('Text attachment: %s\n' % key)
+ chars.append('------------\n')
+ chars.extend(content.iter_text())
+ if not chars[-1].endswith('\n'):
+ chars.append('\n')
+ chars.append('------------\n')
+ return _u('').join(chars)
diff --git a/lib/testtools/testtools/tests/__init__.py b/lib/testtools/testtools/tests/__init__.py
new file mode 100644
index 0000000000..ac3c218de9
--- /dev/null
+++ b/lib/testtools/testtools/tests/__init__.py
@@ -0,0 +1,41 @@
+"""Tests for testtools itself."""
+
+# See README for copyright and licensing details.
+
+import unittest
+
+
+def test_suite():
+ from testtools.tests import (
+ test_compat,
+ test_content,
+ test_content_type,
+ test_deferredruntest,
+ test_fixturesupport,
+ test_helpers,
+ test_matchers,
+ test_monkey,
+ test_run,
+ test_runtest,
+ test_spinner,
+ test_testtools,
+ test_testresult,
+ test_testsuite,
+ )
+ modules = [
+ test_compat,
+ test_content,
+ test_content_type,
+ test_deferredruntest,
+ test_fixturesupport,
+ test_helpers,
+ test_matchers,
+ test_monkey,
+ test_run,
+ test_spinner,
+ test_testresult,
+ test_testsuite,
+ test_testtools,
+ ]
+ suites = map(lambda x:x.test_suite(), modules)
+ return unittest.TestSuite(suites)
diff --git a/lib/testtools/testtools/tests/helpers.py b/lib/testtools/testtools/tests/helpers.py
new file mode 100644
index 0000000000..5f3187db29
--- /dev/null
+++ b/lib/testtools/testtools/tests/helpers.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
+
+"""Helpers for tests."""
+
+import sys
+
+__metaclass__ = type
+__all__ = [
+ 'LoggingResult',
+ ]
+
+from testtools import TestResult
+
+
+# GZ 2010-08-12: Don't do this, pointlessly creates an exc_info cycle
+try:
+ raise Exception
+except Exception:
+ an_exc_info = sys.exc_info()
+
+# Deprecated: This classes attributes are somewhat non deterministic which
+# leads to hard to predict tests (because Python upstream are changing things.
+class LoggingResult(TestResult):
+ """TestResult that logs its event to a list."""
+
+ def __init__(self, log):
+ self._events = log
+ super(LoggingResult, self).__init__()
+
+ def startTest(self, test):
+ self._events.append(('startTest', test))
+ super(LoggingResult, self).startTest(test)
+
+ def stopTest(self, test):
+ self._events.append(('stopTest', test))
+ super(LoggingResult, self).stopTest(test)
+
+ def addFailure(self, test, error):
+ self._events.append(('addFailure', test, error))
+ super(LoggingResult, self).addFailure(test, error)
+
+ def addError(self, test, error):
+ self._events.append(('addError', test, error))
+ super(LoggingResult, self).addError(test, error)
+
+ def addSkip(self, test, reason):
+ self._events.append(('addSkip', test, reason))
+ super(LoggingResult, self).addSkip(test, reason)
+
+ def addSuccess(self, test):
+ self._events.append(('addSuccess', test))
+ super(LoggingResult, self).addSuccess(test)
+
+ def startTestRun(self):
+ self._events.append('startTestRun')
+ super(LoggingResult, self).startTestRun()
+
+ def stopTestRun(self):
+ self._events.append('stopTestRun')
+ super(LoggingResult, self).stopTestRun()
+
+ def done(self):
+ self._events.append('done')
+ super(LoggingResult, self).done()
+
+ def time(self, a_datetime):
+ self._events.append(('time', a_datetime))
+ super(LoggingResult, self).time(a_datetime)
+
+# Note, the following three classes are different to LoggingResult by
+# being fully defined exact matches rather than supersets.
+from testtools.testresult.doubles import *
diff --git a/lib/testtools/testtools/tests/test_compat.py b/lib/testtools/testtools/tests/test_compat.py
new file mode 100644
index 0000000000..856953896a
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_compat.py
@@ -0,0 +1,257 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Tests for miscellaneous compatibility functions"""
+
+import linecache
+import os
+import sys
+import tempfile
+import traceback
+
+import testtools
+
+from testtools.compat import (
+ _b,
+ _detect_encoding,
+ _get_source_encoding,
+ _u,
+ unicode_output_stream,
+ )
+from testtools.matchers import (
+ MatchesException,
+ Not,
+ Raises,
+ )
+
+
+class TestDetectEncoding(testtools.TestCase):
+ """Test detection of Python source encodings"""
+
+ def _check_encoding(self, expected, lines, possibly_invalid=False):
+ """Check lines are valid Python and encoding is as expected"""
+ if not possibly_invalid:
+ compile(_b("".join(lines)), "<str>", "exec")
+ encoding = _detect_encoding(lines)
+ self.assertEqual(expected, encoding,
+ "Encoding %r expected but got %r from lines %r" %
+ (expected, encoding, lines))
+
+ def test_examples_from_pep(self):
+ """Check the examples given in PEP 263 all work as specified
+
+ See 'Examples' section of <http://www.python.org/dev/peps/pep-0263/>
+ """
+ # With interpreter binary and using Emacs style file encoding comment:
+ self._check_encoding("latin-1", (
+ "#!/usr/bin/python\n",
+ "# -*- coding: latin-1 -*-\n",
+ "import os, sys\n"))
+ self._check_encoding("iso-8859-15", (
+ "#!/usr/bin/python\n",
+ "# -*- coding: iso-8859-15 -*-\n",
+ "import os, sys\n"))
+ self._check_encoding("ascii", (
+ "#!/usr/bin/python\n",
+ "# -*- coding: ascii -*-\n",
+ "import os, sys\n"))
+ # Without interpreter line, using plain text:
+ self._check_encoding("utf-8", (
+ "# This Python file uses the following encoding: utf-8\n",
+ "import os, sys\n"))
+ # Text editors might have different ways of defining the file's
+ # encoding, e.g.
+ self._check_encoding("latin-1", (
+ "#!/usr/local/bin/python\n",
+ "# coding: latin-1\n",
+ "import os, sys\n"))
+ # Without encoding comment, Python's parser will assume ASCII text:
+ self._check_encoding("ascii", (
+ "#!/usr/local/bin/python\n",
+ "import os, sys\n"))
+ # Encoding comments which don't work:
+ # Missing "coding:" prefix:
+ self._check_encoding("ascii", (
+ "#!/usr/local/bin/python\n",
+ "# latin-1\n",
+ "import os, sys\n"))
+ # Encoding comment not on line 1 or 2:
+ self._check_encoding("ascii", (
+ "#!/usr/local/bin/python\n",
+ "#\n",
+ "# -*- coding: latin-1 -*-\n",
+ "import os, sys\n"))
+ # Unsupported encoding:
+ self._check_encoding("ascii", (
+ "#!/usr/local/bin/python\n",
+ "# -*- coding: utf-42 -*-\n",
+ "import os, sys\n"),
+ possibly_invalid=True)
+
+ def test_bom(self):
+ """Test the UTF-8 BOM counts as an encoding declaration"""
+ self._check_encoding("utf-8", (
+ "\xef\xbb\xbfimport sys\n",
+ ))
+ self._check_encoding("utf-8", (
+ "\xef\xbb\xbf# File encoding: UTF-8\n",
+ ))
+ self._check_encoding("utf-8", (
+ '\xef\xbb\xbf"""Module docstring\n',
+ '\xef\xbb\xbfThat should just be a ZWNB"""\n'))
+ self._check_encoding("latin-1", (
+ '"""Is this coding: latin-1 or coding: utf-8 instead?\n',
+ '\xef\xbb\xbfThose should be latin-1 bytes"""\n'))
+ self._check_encoding("utf-8", (
+ "\xef\xbb\xbf# Is the coding: utf-8 or coding: euc-jp instead?\n",
+ '"""Module docstring say \xe2\x98\x86"""\n'))
+
+ def test_multiple_coding_comments(self):
+ """Test only the first of multiple coding declarations counts"""
+ self._check_encoding("iso-8859-1", (
+ "# Is the coding: iso-8859-1\n",
+ "# Or is it coding: iso-8859-2\n"),
+ possibly_invalid=True)
+ self._check_encoding("iso-8859-1", (
+ "#!/usr/bin/python\n",
+ "# Is the coding: iso-8859-1\n",
+ "# Or is it coding: iso-8859-2\n"))
+ self._check_encoding("iso-8859-1", (
+ "# Is the coding: iso-8859-1 or coding: iso-8859-2\n",
+ "# Or coding: iso-8859-3 or coding: iso-8859-4\n"),
+ possibly_invalid=True)
+ self._check_encoding("iso-8859-2", (
+ "# Is the coding iso-8859-1 or coding: iso-8859-2\n",
+ "# Spot the missing colon above\n"))
+
+
+class TestGetSourceEncoding(testtools.TestCase):
+ """Test reading and caching the encodings of source files"""
+
+ def setUp(self):
+ testtools.TestCase.setUp(self)
+ dir = tempfile.mkdtemp()
+ self.addCleanup(os.rmdir, dir)
+ self.filename = os.path.join(dir, self.id().rsplit(".", 1)[1] + ".py")
+ self._written = False
+
+ def put_source(self, text):
+ f = open(self.filename, "w")
+ try:
+ f.write(text)
+ finally:
+ f.close()
+ if not self._written:
+ self._written = True
+ self.addCleanup(os.remove, self.filename)
+ self.addCleanup(linecache.cache.pop, self.filename, None)
+
+ def test_nonexistant_file_as_ascii(self):
+ """When file can't be found, the encoding should default to ascii"""
+ self.assertEquals("ascii", _get_source_encoding(self.filename))
+
+ def test_encoding_is_cached(self):
+ """The encoding should stay the same if the cache isn't invalidated"""
+ self.put_source(
+ "# coding: iso-8859-13\n"
+ "import os\n")
+ self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
+ self.put_source(
+ "# coding: rot-13\n"
+ "vzcbeg bf\n")
+ self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
+
+ def test_traceback_rechecks_encoding(self):
+ """A traceback function checks the cache and resets the encoding"""
+ self.put_source(
+ "# coding: iso-8859-8\n"
+ "import os\n")
+ self.assertEquals("iso-8859-8", _get_source_encoding(self.filename))
+ self.put_source(
+ "# coding: utf-8\n"
+ "import os\n")
+ try:
+ exec (compile("raise RuntimeError\n", self.filename, "exec"))
+ except RuntimeError:
+ traceback.extract_tb(sys.exc_info()[2])
+ else:
+ self.fail("RuntimeError not raised")
+ self.assertEquals("utf-8", _get_source_encoding(self.filename))
+
+
+class _FakeOutputStream(object):
+ """A simple file-like object for testing"""
+
+ def __init__(self):
+ self.writelog = []
+
+ def write(self, obj):
+ self.writelog.append(obj)
+
+
+class TestUnicodeOutputStream(testtools.TestCase):
+ """Test wrapping output streams so they work with arbitrary unicode"""
+
+ uni = _u("pa\u026a\u03b8\u0259n")
+
+ def setUp(self):
+ super(TestUnicodeOutputStream, self).setUp()
+ if sys.platform == "cli":
+ self.skip("IronPython shouldn't wrap streams to do encoding")
+
+ def test_no_encoding_becomes_ascii(self):
+ """A stream with no encoding attribute gets ascii/replace strings"""
+ sout = _FakeOutputStream()
+ unicode_output_stream(sout).write(self.uni)
+ self.assertEqual([_b("pa???n")], sout.writelog)
+
+ def test_encoding_as_none_becomes_ascii(self):
+ """A stream with encoding value of None gets ascii/replace strings"""
+ sout = _FakeOutputStream()
+ sout.encoding = None
+ unicode_output_stream(sout).write(self.uni)
+ self.assertEqual([_b("pa???n")], sout.writelog)
+
+ def test_bogus_encoding_becomes_ascii(self):
+ """A stream with a bogus encoding gets ascii/replace strings"""
+ sout = _FakeOutputStream()
+ sout.encoding = "bogus"
+ unicode_output_stream(sout).write(self.uni)
+ self.assertEqual([_b("pa???n")], sout.writelog)
+
+ def test_partial_encoding_replace(self):
+ """A string which can be partly encoded correctly should be"""
+ sout = _FakeOutputStream()
+ sout.encoding = "iso-8859-7"
+ unicode_output_stream(sout).write(self.uni)
+ self.assertEqual([_b("pa?\xe8?n")], sout.writelog)
+
+ def test_unicode_encodings_not_wrapped(self):
+ """A unicode encoding is left unwrapped as needs no error handler"""
+ sout = _FakeOutputStream()
+ sout.encoding = "utf-8"
+ self.assertIs(unicode_output_stream(sout), sout)
+ sout = _FakeOutputStream()
+ sout.encoding = "utf-16-be"
+ self.assertIs(unicode_output_stream(sout), sout)
+
+ def test_stringio(self):
+ """A StringIO object should maybe get an ascii native str type"""
+ try:
+ from cStringIO import StringIO
+ newio = False
+ except ImportError:
+ from io import StringIO
+ newio = True
+ sout = StringIO()
+ soutwrapper = unicode_output_stream(sout)
+ if newio:
+ self.expectFailure("Python 3 StringIO expects text not bytes",
+ self.assertThat, lambda: soutwrapper.write(self.uni),
+ Not(Raises(MatchesException(TypeError))))
+ soutwrapper.write(self.uni)
+ self.assertEqual("pa???n", sout.getvalue())
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_content.py b/lib/testtools/testtools/tests/test_content.py
new file mode 100644
index 0000000000..eaf50c7f37
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_content.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2008-2010 Jonathan M. Lange. See LICENSE for details.
+
+import unittest
+from testtools import TestCase
+from testtools.compat import _b, _u
+from testtools.content import Content, TracebackContent, text_content
+from testtools.content_type import ContentType, UTF8_TEXT
+from testtools.matchers import MatchesException, Raises
+from testtools.tests.helpers import an_exc_info
+
+
+raises_value_error = Raises(MatchesException(ValueError))
+
+
+class TestContent(TestCase):
+
+ def test___init___None_errors(self):
+ self.assertThat(lambda:Content(None, None), raises_value_error)
+ self.assertThat(lambda:Content(None, lambda: ["traceback"]),
+ raises_value_error)
+ self.assertThat(lambda:Content(ContentType("text", "traceback"), None),
+ raises_value_error)
+
+ def test___init___sets_ivars(self):
+ content_type = ContentType("foo", "bar")
+ content = Content(content_type, lambda: ["bytes"])
+ self.assertEqual(content_type, content.content_type)
+ self.assertEqual(["bytes"], list(content.iter_bytes()))
+
+ def test___eq__(self):
+ content_type = ContentType("foo", "bar")
+ one_chunk = lambda: [_b("bytes")]
+ two_chunk = lambda: [_b("by"), _b("tes")]
+ content1 = Content(content_type, one_chunk)
+ content2 = Content(content_type, one_chunk)
+ content3 = Content(content_type, two_chunk)
+ content4 = Content(content_type, lambda: [_b("by"), _b("te")])
+ content5 = Content(ContentType("f", "b"), two_chunk)
+ self.assertEqual(content1, content2)
+ self.assertEqual(content1, content3)
+ self.assertNotEqual(content1, content4)
+ self.assertNotEqual(content1, content5)
+
+ def test___repr__(self):
+ content = Content(ContentType("application", "octet-stream"),
+ lambda: [_b("\x00bin"), _b("ary\xff")])
+ self.assertIn("\\x00binary\\xff", repr(content))
+
+ def test_iter_text_not_text_errors(self):
+ content_type = ContentType("foo", "bar")
+ content = Content(content_type, lambda: ["bytes"])
+ self.assertThat(content.iter_text, raises_value_error)
+
+ def test_iter_text_decodes(self):
+ content_type = ContentType("text", "strange", {"charset": "utf8"})
+ content = Content(
+ content_type, lambda: [_u("bytes\xea").encode("utf8")])
+ self.assertEqual([_u("bytes\xea")], list(content.iter_text()))
+
+ def test_iter_text_default_charset_iso_8859_1(self):
+ content_type = ContentType("text", "strange")
+ text = _u("bytes\xea")
+ iso_version = text.encode("ISO-8859-1")
+ content = Content(content_type, lambda: [iso_version])
+ self.assertEqual([text], list(content.iter_text()))
+
+
+class TestTracebackContent(TestCase):
+
+ def test___init___None_errors(self):
+ self.assertThat(lambda:TracebackContent(None, None),
+ raises_value_error)
+
+ def test___init___sets_ivars(self):
+ content = TracebackContent(an_exc_info, self)
+ content_type = ContentType("text", "x-traceback",
+ {"language": "python", "charset": "utf8"})
+ self.assertEqual(content_type, content.content_type)
+ result = unittest.TestResult()
+ expected = result._exc_info_to_string(an_exc_info, self)
+ self.assertEqual(expected, ''.join(list(content.iter_text())))
+
+
+class TestBytesContent(TestCase):
+
+ def test_bytes(self):
+ data = _u("some data")
+ expected = Content(UTF8_TEXT, lambda: [data.encode('utf8')])
+ self.assertEqual(expected, text_content(data))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_content_type.py b/lib/testtools/testtools/tests/test_content_type.py
new file mode 100644
index 0000000000..52f4afac05
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_content_type.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.matchers import Equals, MatchesException, Raises
+from testtools.content_type import ContentType, UTF8_TEXT
+
+
+class TestContentType(TestCase):
+
+ def test___init___None_errors(self):
+ raises_value_error = Raises(MatchesException(ValueError))
+ self.assertThat(lambda:ContentType(None, None), raises_value_error)
+ self.assertThat(lambda:ContentType(None, "traceback"),
+ raises_value_error)
+ self.assertThat(lambda:ContentType("text", None), raises_value_error)
+
+ def test___init___sets_ivars(self):
+ content_type = ContentType("foo", "bar")
+ self.assertEqual("foo", content_type.type)
+ self.assertEqual("bar", content_type.subtype)
+ self.assertEqual({}, content_type.parameters)
+
+ def test___init___with_parameters(self):
+ content_type = ContentType("foo", "bar", {"quux": "thing"})
+ self.assertEqual({"quux": "thing"}, content_type.parameters)
+
+ def test___eq__(self):
+ content_type1 = ContentType("foo", "bar", {"quux": "thing"})
+ content_type2 = ContentType("foo", "bar", {"quux": "thing"})
+ content_type3 = ContentType("foo", "bar", {"quux": "thing2"})
+ self.assertTrue(content_type1.__eq__(content_type2))
+ self.assertFalse(content_type1.__eq__(content_type3))
+
+
+class TestBuiltinContentTypes(TestCase):
+
+ def test_plain_text(self):
+ # The UTF8_TEXT content type represents UTF-8 encoded text/plain.
+ self.assertThat(UTF8_TEXT.type, Equals('text'))
+ self.assertThat(UTF8_TEXT.subtype, Equals('plain'))
+ self.assertThat(UTF8_TEXT.parameters, Equals({'charset': 'utf8'}))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_deferredruntest.py b/lib/testtools/testtools/tests/test_deferredruntest.py
new file mode 100644
index 0000000000..04614df77f
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_deferredruntest.py
@@ -0,0 +1,738 @@
+# Copyright (c) 2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Tests for the DeferredRunTest single test execution logic."""
+
+import os
+import signal
+
+from testtools import (
+ skipIf,
+ TestCase,
+ )
+from testtools.content import (
+ text_content,
+ )
+from testtools.helpers import try_import
+from testtools.tests.helpers import ExtendedTestResult
+from testtools.matchers import (
+ Equals,
+ KeysEqual,
+ MatchesException,
+ Raises,
+ )
+from testtools.runtest import RunTest
+from testtools.tests.test_spinner import NeedsTwistedTestCase
+
+assert_fails_with = try_import('testtools.deferredruntest.assert_fails_with')
+AsynchronousDeferredRunTest = try_import(
+ 'testtools.deferredruntest.AsynchronousDeferredRunTest')
+flush_logged_errors = try_import(
+ 'testtools.deferredruntest.flush_logged_errors')
+SynchronousDeferredRunTest = try_import(
+ 'testtools.deferredruntest.SynchronousDeferredRunTest')
+
+defer = try_import('twisted.internet.defer')
+failure = try_import('twisted.python.failure')
+log = try_import('twisted.python.log')
+DelayedCall = try_import('twisted.internet.base.DelayedCall')
+
+
+class X(object):
+ """Tests that we run as part of our tests, nested to avoid discovery."""
+
+ class Base(TestCase):
+ def setUp(self):
+ super(X.Base, self).setUp()
+ self.calls = ['setUp']
+ self.addCleanup(self.calls.append, 'clean-up')
+ def test_something(self):
+ self.calls.append('test')
+ def tearDown(self):
+ self.calls.append('tearDown')
+ super(X.Base, self).tearDown()
+
+ class ErrorInSetup(Base):
+ expected_calls = ['setUp', 'clean-up']
+ expected_results = [('addError', RuntimeError)]
+ def setUp(self):
+ super(X.ErrorInSetup, self).setUp()
+ raise RuntimeError("Error in setUp")
+
+ class ErrorInTest(Base):
+ expected_calls = ['setUp', 'tearDown', 'clean-up']
+ expected_results = [('addError', RuntimeError)]
+ def test_something(self):
+ raise RuntimeError("Error in test")
+
+ class FailureInTest(Base):
+ expected_calls = ['setUp', 'tearDown', 'clean-up']
+ expected_results = [('addFailure', AssertionError)]
+ def test_something(self):
+ self.fail("test failed")
+
+ class ErrorInTearDown(Base):
+ expected_calls = ['setUp', 'test', 'clean-up']
+ expected_results = [('addError', RuntimeError)]
+ def tearDown(self):
+ raise RuntimeError("Error in tearDown")
+
+ class ErrorInCleanup(Base):
+ expected_calls = ['setUp', 'test', 'tearDown', 'clean-up']
+ expected_results = [('addError', ZeroDivisionError)]
+ def test_something(self):
+ self.calls.append('test')
+ self.addCleanup(lambda: 1/0)
+
+ class TestIntegration(NeedsTwistedTestCase):
+
+ def assertResultsMatch(self, test, result):
+ events = list(result._events)
+ self.assertEqual(('startTest', test), events.pop(0))
+ for expected_result in test.expected_results:
+ result = events.pop(0)
+ if len(expected_result) == 1:
+ self.assertEqual((expected_result[0], test), result)
+ else:
+ self.assertEqual((expected_result[0], test), result[:2])
+ error_type = expected_result[1]
+ self.assertIn(error_type.__name__, str(result[2]))
+ self.assertEqual([('stopTest', test)], events)
+
+ def test_runner(self):
+ result = ExtendedTestResult()
+ test = self.test_factory('test_something', runTest=self.runner)
+ test.run(result)
+ self.assertEqual(test.calls, self.test_factory.expected_calls)
+ self.assertResultsMatch(test, result)
+
+
+def make_integration_tests():
+ from unittest import TestSuite
+ from testtools import clone_test_with_new_id
+ runners = [
+ ('RunTest', RunTest),
+ ('SynchronousDeferredRunTest', SynchronousDeferredRunTest),
+ ('AsynchronousDeferredRunTest', AsynchronousDeferredRunTest),
+ ]
+
+ tests = [
+ X.ErrorInSetup,
+ X.ErrorInTest,
+ X.ErrorInTearDown,
+ X.FailureInTest,
+ X.ErrorInCleanup,
+ ]
+ base_test = X.TestIntegration('test_runner')
+ integration_tests = []
+ for runner_name, runner in runners:
+ for test in tests:
+ new_test = clone_test_with_new_id(
+ base_test, '%s(%s, %s)' % (
+ base_test.id(),
+ runner_name,
+ test.__name__))
+ new_test.test_factory = test
+ new_test.runner = runner
+ integration_tests.append(new_test)
+ return TestSuite(integration_tests)
+
+
+class TestSynchronousDeferredRunTest(NeedsTwistedTestCase):
+
+ def make_result(self):
+ return ExtendedTestResult()
+
+ def make_runner(self, test):
+ return SynchronousDeferredRunTest(test, test.exception_handlers)
+
+ def test_success(self):
+ class SomeCase(TestCase):
+ def test_success(self):
+ return defer.succeed(None)
+ test = SomeCase('test_success')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ result._events, Equals([
+ ('startTest', test),
+ ('addSuccess', test),
+ ('stopTest', test)]))
+
+ def test_failure(self):
+ class SomeCase(TestCase):
+ def test_failure(self):
+ return defer.maybeDeferred(self.fail, "Egads!")
+ test = SomeCase('test_failure')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events], Equals([
+ ('startTest', test),
+ ('addFailure', test),
+ ('stopTest', test)]))
+
+ def test_setUp_followed_by_test(self):
+ class SomeCase(TestCase):
+ def setUp(self):
+ super(SomeCase, self).setUp()
+ return defer.succeed(None)
+ def test_failure(self):
+ return defer.maybeDeferred(self.fail, "Egads!")
+ test = SomeCase('test_failure')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events], Equals([
+ ('startTest', test),
+ ('addFailure', test),
+ ('stopTest', test)]))
+
+
+class TestAsynchronousDeferredRunTest(NeedsTwistedTestCase):
+
+ def make_reactor(self):
+ from twisted.internet import reactor
+ return reactor
+
+ def make_result(self):
+ return ExtendedTestResult()
+
+ def make_runner(self, test, timeout=None):
+ if timeout is None:
+ timeout = self.make_timeout()
+ return AsynchronousDeferredRunTest(
+ test, test.exception_handlers, timeout=timeout)
+
+ def make_timeout(self):
+ return 0.005
+
+ def test_setUp_returns_deferred_that_fires_later(self):
+ # setUp can return a Deferred that might fire at any time.
+ # AsynchronousDeferredRunTest will not go on to running the test until
+ # the Deferred returned by setUp actually fires.
+ call_log = []
+ marker = object()
+ d = defer.Deferred().addCallback(call_log.append)
+ class SomeCase(TestCase):
+ def setUp(self):
+ super(SomeCase, self).setUp()
+ call_log.append('setUp')
+ return d
+ def test_something(self):
+ call_log.append('test')
+ def fire_deferred():
+ self.assertThat(call_log, Equals(['setUp']))
+ d.callback(marker)
+ test = SomeCase('test_something')
+ timeout = self.make_timeout()
+ runner = self.make_runner(test, timeout=timeout)
+ result = self.make_result()
+ reactor = self.make_reactor()
+ reactor.callLater(timeout, fire_deferred)
+ runner.run(result)
+ self.assertThat(call_log, Equals(['setUp', marker, 'test']))
+
+ def test_calls_setUp_test_tearDown_in_sequence(self):
+ # setUp, the test method and tearDown can all return
+ # Deferreds. AsynchronousDeferredRunTest will make sure that each of
+ # these are run in turn, only going on to the next stage once the
+ # Deferred from the previous stage has fired.
+ call_log = []
+ a = defer.Deferred()
+ a.addCallback(lambda x: call_log.append('a'))
+ b = defer.Deferred()
+ b.addCallback(lambda x: call_log.append('b'))
+ c = defer.Deferred()
+ c.addCallback(lambda x: call_log.append('c'))
+ class SomeCase(TestCase):
+ def setUp(self):
+ super(SomeCase, self).setUp()
+ call_log.append('setUp')
+ return a
+ def test_success(self):
+ call_log.append('test')
+ return b
+ def tearDown(self):
+ super(SomeCase, self).tearDown()
+ call_log.append('tearDown')
+ return c
+ test = SomeCase('test_success')
+ timeout = self.make_timeout()
+ runner = self.make_runner(test, timeout)
+ result = self.make_result()
+ reactor = self.make_reactor()
+ def fire_a():
+ self.assertThat(call_log, Equals(['setUp']))
+ a.callback(None)
+ def fire_b():
+ self.assertThat(call_log, Equals(['setUp', 'a', 'test']))
+ b.callback(None)
+ def fire_c():
+ self.assertThat(
+ call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown']))
+ c.callback(None)
+ reactor.callLater(timeout * 0.25, fire_a)
+ reactor.callLater(timeout * 0.5, fire_b)
+ reactor.callLater(timeout * 0.75, fire_c)
+ runner.run(result)
+ self.assertThat(
+ call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown', 'c']))
+
+ def test_async_cleanups(self):
+ # Cleanups added with addCleanup can return
+ # Deferreds. AsynchronousDeferredRunTest will run each of them in
+ # turn.
+ class SomeCase(TestCase):
+ def test_whatever(self):
+ pass
+ test = SomeCase('test_whatever')
+ call_log = []
+ a = defer.Deferred().addCallback(lambda x: call_log.append('a'))
+ b = defer.Deferred().addCallback(lambda x: call_log.append('b'))
+ c = defer.Deferred().addCallback(lambda x: call_log.append('c'))
+ test.addCleanup(lambda: a)
+ test.addCleanup(lambda: b)
+ test.addCleanup(lambda: c)
+ def fire_a():
+ self.assertThat(call_log, Equals([]))
+ a.callback(None)
+ def fire_b():
+ self.assertThat(call_log, Equals(['a']))
+ b.callback(None)
+ def fire_c():
+ self.assertThat(call_log, Equals(['a', 'b']))
+ c.callback(None)
+ timeout = self.make_timeout()
+ reactor = self.make_reactor()
+ reactor.callLater(timeout * 0.25, fire_a)
+ reactor.callLater(timeout * 0.5, fire_b)
+ reactor.callLater(timeout * 0.75, fire_c)
+ runner = self.make_runner(test, timeout)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(call_log, Equals(['a', 'b', 'c']))
+
+ def test_clean_reactor(self):
+ # If there's cruft left over in the reactor, the test fails.
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ class SomeCase(TestCase):
+ def test_cruft(self):
+ reactor.callLater(timeout * 10.0, lambda: None)
+ test = SomeCase('test_cruft')
+ runner = self.make_runner(test, timeout)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals(
+ [('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
+
+ def test_unhandled_error_from_deferred(self):
+ # If there's a Deferred with an unhandled error, the test fails. Each
+ # unhandled error is reported with a separate traceback.
+ class SomeCase(TestCase):
+ def test_cruft(self):
+ # Note we aren't returning the Deferred so that the error will
+ # be unhandled.
+ defer.maybeDeferred(lambda: 1/0)
+ defer.maybeDeferred(lambda: 2/0)
+ test = SomeCase('test_cruft')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ error = result._events[1][2]
+ result._events[1] = ('addError', test, None)
+ self.assertThat(result._events, Equals(
+ [('startTest', test),
+ ('addError', test, None),
+ ('stopTest', test)]))
+ self.assertThat(
+ error, KeysEqual(
+ 'twisted-log',
+ 'unhandled-error-in-deferred',
+ 'unhandled-error-in-deferred-1',
+ ))
+
+ def test_unhandled_error_from_deferred_combined_with_error(self):
+ # If there's a Deferred with an unhandled error, the test fails. Each
+ # unhandled error is reported with a separate traceback, and the error
+ # is still reported.
+ class SomeCase(TestCase):
+ def test_cruft(self):
+ # Note we aren't returning the Deferred so that the error will
+ # be unhandled.
+ defer.maybeDeferred(lambda: 1/0)
+ 2 / 0
+ test = SomeCase('test_cruft')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ error = result._events[1][2]
+ result._events[1] = ('addError', test, None)
+ self.assertThat(result._events, Equals(
+ [('startTest', test),
+ ('addError', test, None),
+ ('stopTest', test)]))
+ self.assertThat(
+ error, KeysEqual(
+ 'traceback',
+ 'twisted-log',
+ 'unhandled-error-in-deferred',
+ ))
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_keyboard_interrupt_stops_test_run(self):
+ # If we get a SIGINT during a test run, the test stops and no more
+ # tests run.
+ SIGINT = getattr(signal, 'SIGINT', None)
+ if not SIGINT:
+ raise self.skipTest("SIGINT unavailable")
+ class SomeCase(TestCase):
+ def test_pause(self):
+ return defer.Deferred()
+ test = SomeCase('test_pause')
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ runner = self.make_runner(test, timeout * 5)
+ result = self.make_result()
+ reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
+ self.assertThat(lambda:runner.run(result),
+ Raises(MatchesException(KeyboardInterrupt)))
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_fast_keyboard_interrupt_stops_test_run(self):
+ # If we get a SIGINT during a test run, the test stops and no more
+ # tests run.
+ SIGINT = getattr(signal, 'SIGINT', None)
+ if not SIGINT:
+ raise self.skipTest("SIGINT unavailable")
+ class SomeCase(TestCase):
+ def test_pause(self):
+ return defer.Deferred()
+ test = SomeCase('test_pause')
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ runner = self.make_runner(test, timeout * 5)
+ result = self.make_result()
+ reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
+ self.assertThat(lambda:runner.run(result),
+ Raises(MatchesException(KeyboardInterrupt)))
+
+ def test_timeout_causes_test_error(self):
+ # If a test times out, it reports itself as having failed with a
+ # TimeoutError.
+ class SomeCase(TestCase):
+ def test_pause(self):
+ return defer.Deferred()
+ test = SomeCase('test_pause')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ error = result._events[1][2]
+ self.assertThat(
+ [event[:2] for event in result._events], Equals(
+ [('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ self.assertIn('TimeoutError', str(error['traceback']))
+
+ def test_convenient_construction(self):
+ # As a convenience method, AsynchronousDeferredRunTest has a
+ # classmethod that returns an AsynchronousDeferredRunTest
+ # factory. This factory has the same API as the RunTest constructor.
+ reactor = object()
+ timeout = object()
+ handler = object()
+ factory = AsynchronousDeferredRunTest.make_factory(reactor, timeout)
+ runner = factory(self, [handler])
+ self.assertIs(reactor, runner._reactor)
+ self.assertIs(timeout, runner._timeout)
+ self.assertIs(self, runner.case)
+ self.assertEqual([handler], runner.handlers)
+
+ def test_use_convenient_factory(self):
+ # Make sure that the factory can actually be used.
+ factory = AsynchronousDeferredRunTest.make_factory()
+ class SomeCase(TestCase):
+ run_tests_with = factory
+ def test_something(self):
+ pass
+ case = SomeCase('test_something')
+ case.run()
+
+ def test_convenient_construction_default_reactor(self):
+ # As a convenience method, AsynchronousDeferredRunTest has a
+ # classmethod that returns an AsynchronousDeferredRunTest
+ # factory. This factory has the same API as the RunTest constructor.
+ reactor = object()
+ handler = object()
+ factory = AsynchronousDeferredRunTest.make_factory(reactor=reactor)
+ runner = factory(self, [handler])
+ self.assertIs(reactor, runner._reactor)
+ self.assertIs(self, runner.case)
+ self.assertEqual([handler], runner.handlers)
+
+ def test_convenient_construction_default_timeout(self):
+ # As a convenience method, AsynchronousDeferredRunTest has a
+ # classmethod that returns an AsynchronousDeferredRunTest
+ # factory. This factory has the same API as the RunTest constructor.
+ timeout = object()
+ handler = object()
+ factory = AsynchronousDeferredRunTest.make_factory(timeout=timeout)
+ runner = factory(self, [handler])
+ self.assertIs(timeout, runner._timeout)
+ self.assertIs(self, runner.case)
+ self.assertEqual([handler], runner.handlers)
+
+ def test_convenient_construction_default_debugging(self):
+ # As a convenience method, AsynchronousDeferredRunTest has a
+ # classmethod that returns an AsynchronousDeferredRunTest
+ # factory. This factory has the same API as the RunTest constructor.
+ handler = object()
+ factory = AsynchronousDeferredRunTest.make_factory(debug=True)
+ runner = factory(self, [handler])
+ self.assertIs(self, runner.case)
+ self.assertEqual([handler], runner.handlers)
+ self.assertEqual(True, runner._debug)
+
+ def test_deferred_error(self):
+ class SomeTest(TestCase):
+ def test_something(self):
+ return defer.maybeDeferred(lambda: 1/0)
+ test = SomeTest('test_something')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals([
+ ('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
+
+ def test_only_addError_once(self):
+ # Even if the reactor is unclean and the test raises an error and the
+ # cleanups raise errors, we only called addError once per test.
+ reactor = self.make_reactor()
+ class WhenItRains(TestCase):
+ def it_pours(self):
+ # Add a dirty cleanup.
+ self.addCleanup(lambda: 3 / 0)
+ # Dirty the reactor.
+ from twisted.internet.protocol import ServerFactory
+ reactor.listenTCP(0, ServerFactory())
+ # Unhandled error.
+ defer.maybeDeferred(lambda: 2 / 0)
+ # Actual error.
+ raise RuntimeError("Excess precipitation")
+ test = WhenItRains('it_pours')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals([
+ ('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(
+ error, KeysEqual(
+ 'traceback',
+ 'traceback-1',
+ 'traceback-2',
+ 'twisted-log',
+ 'unhandled-error-in-deferred',
+ ))
+
+ def test_log_err_is_error(self):
+ # An error logged during the test run is recorded as an error in the
+ # tests.
+ class LogAnError(TestCase):
+ def test_something(self):
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = failure.Failure()
+ log.err(f)
+ test = LogAnError('test_something')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals([
+ ('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(error, KeysEqual('logged-error', 'twisted-log'))
+
+ def test_log_err_flushed_is_success(self):
+ # An error logged during the test run is recorded as an error in the
+ # tests.
+ class LogAnError(TestCase):
+ def test_something(self):
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = failure.Failure()
+ log.err(f)
+ flush_logged_errors(ZeroDivisionError)
+ test = LogAnError('test_something')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ result._events,
+ Equals([
+ ('startTest', test),
+ ('addSuccess', test, {'twisted-log': text_content('')}),
+ ('stopTest', test)]))
+
+ def test_log_in_details(self):
+ class LogAnError(TestCase):
+ def test_something(self):
+ log.msg("foo")
+ 1/0
+ test = LogAnError('test_something')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals([
+ ('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
+
+ def test_debugging_unchanged_during_test_by_default(self):
+ debugging = [(defer.Deferred.debug, DelayedCall.debug)]
+ class SomeCase(TestCase):
+ def test_debugging_enabled(self):
+ debugging.append((defer.Deferred.debug, DelayedCall.debug))
+ test = SomeCase('test_debugging_enabled')
+ runner = AsynchronousDeferredRunTest(
+ test, handlers=test.exception_handlers,
+ reactor=self.make_reactor(), timeout=self.make_timeout())
+ runner.run(self.make_result())
+ self.assertEqual(debugging[0], debugging[1])
+
+ def test_debugging_enabled_during_test_with_debug_flag(self):
+ self.patch(defer.Deferred, 'debug', False)
+ self.patch(DelayedCall, 'debug', False)
+ debugging = []
+ class SomeCase(TestCase):
+ def test_debugging_enabled(self):
+ debugging.append((defer.Deferred.debug, DelayedCall.debug))
+ test = SomeCase('test_debugging_enabled')
+ runner = AsynchronousDeferredRunTest(
+ test, handlers=test.exception_handlers,
+ reactor=self.make_reactor(), timeout=self.make_timeout(),
+ debug=True)
+ runner.run(self.make_result())
+ self.assertEqual([(True, True)], debugging)
+ self.assertEqual(False, defer.Deferred.debug)
+ self.assertEqual(False, defer.Deferred.debug)
+
+
+class TestAssertFailsWith(NeedsTwistedTestCase):
+ """Tests for `assert_fails_with`."""
+
+ if SynchronousDeferredRunTest is not None:
+ run_tests_with = SynchronousDeferredRunTest
+
+ def test_assert_fails_with_success(self):
+ # assert_fails_with fails the test if it's given a Deferred that
+ # succeeds.
+ marker = object()
+ d = assert_fails_with(defer.succeed(marker), RuntimeError)
+ def check_result(failure):
+ failure.trap(self.failureException)
+ self.assertThat(
+ str(failure.value),
+ Equals("RuntimeError not raised (%r returned)" % (marker,)))
+ d.addCallbacks(
+ lambda x: self.fail("Should not have succeeded"), check_result)
+ return d
+
+ def test_assert_fails_with_success_multiple_types(self):
+ # assert_fails_with fails the test if it's given a Deferred that
+ # succeeds.
+ marker = object()
+ d = assert_fails_with(
+ defer.succeed(marker), RuntimeError, ZeroDivisionError)
+ def check_result(failure):
+ failure.trap(self.failureException)
+ self.assertThat(
+ str(failure.value),
+ Equals("RuntimeError, ZeroDivisionError not raised "
+ "(%r returned)" % (marker,)))
+ d.addCallbacks(
+ lambda x: self.fail("Should not have succeeded"), check_result)
+ return d
+
+ def test_assert_fails_with_wrong_exception(self):
+ # assert_fails_with fails the test if it's given a Deferred that
+ # succeeds.
+ d = assert_fails_with(
+ defer.maybeDeferred(lambda: 1/0), RuntimeError, KeyboardInterrupt)
+ def check_result(failure):
+ failure.trap(self.failureException)
+ lines = str(failure.value).splitlines()
+ self.assertThat(
+ lines[:2],
+ Equals([
+ ("ZeroDivisionError raised instead of RuntimeError, "
+ "KeyboardInterrupt:"),
+ " Traceback (most recent call last):",
+ ]))
+ d.addCallbacks(
+ lambda x: self.fail("Should not have succeeded"), check_result)
+ return d
+
+ def test_assert_fails_with_expected_exception(self):
+ # assert_fails_with calls back with the value of the failure if it's
+ # one of the expected types of failures.
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = failure.Failure()
+ d = assert_fails_with(defer.fail(f), ZeroDivisionError)
+ return d.addCallback(self.assertThat, Equals(f.value))
+
+ def test_custom_failure_exception(self):
+ # If assert_fails_with is passed a 'failureException' keyword
+ # argument, then it will raise that instead of `AssertionError`.
+ class CustomException(Exception):
+ pass
+ marker = object()
+ d = assert_fails_with(
+ defer.succeed(marker), RuntimeError,
+ failureException=CustomException)
+ def check_result(failure):
+ failure.trap(CustomException)
+ self.assertThat(
+ str(failure.value),
+ Equals("RuntimeError not raised (%r returned)" % (marker,)))
+ return d.addCallbacks(
+ lambda x: self.fail("Should not have succeeded"), check_result)
+
+
+def test_suite():
+ from unittest import TestLoader, TestSuite
+ return TestSuite(
+ [TestLoader().loadTestsFromName(__name__),
+ make_integration_tests()])
diff --git a/lib/testtools/testtools/tests/test_fixturesupport.py b/lib/testtools/testtools/tests/test_fixturesupport.py
new file mode 100644
index 0000000000..ebdd0373e2
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_fixturesupport.py
@@ -0,0 +1,77 @@
+import unittest
+
+from testtools import (
+ TestCase,
+ content,
+ content_type,
+ )
+from testtools.helpers import try_import
+from testtools.tests.helpers import (
+ ExtendedTestResult,
+ )
+
+fixtures = try_import('fixtures')
+LoggingFixture = try_import('fixtures.tests.helpers.LoggingFixture')
+
+
+class TestFixtureSupport(TestCase):
+
+ def setUp(self):
+ super(TestFixtureSupport, self).setUp()
+ if fixtures is None or LoggingFixture is None:
+ self.skipTest("Need fixtures")
+
+ def test_useFixture(self):
+ fixture = LoggingFixture()
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ result = unittest.TestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertTrue(result.wasSuccessful())
+ self.assertEqual(['setUp', 'cleanUp'], fixture.calls)
+
+ def test_useFixture_cleanups_raise_caught(self):
+ calls = []
+ def raiser(ignored):
+ calls.append('called')
+ raise Exception('foo')
+ fixture = fixtures.FunctionFixture(lambda:None, raiser)
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ result = unittest.TestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertFalse(result.wasSuccessful())
+ self.assertEqual(['called'], calls)
+
+ def test_useFixture_details_captured(self):
+ class DetailsFixture(fixtures.Fixture):
+ def setUp(self):
+ fixtures.Fixture.setUp(self)
+ self.addCleanup(delattr, self, 'content')
+ self.content = ['content available until cleanUp']
+ self.addDetail('content',
+ content.Content(content_type.UTF8_TEXT, self.get_content))
+ def get_content(self):
+ return self.content
+ fixture = DetailsFixture()
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ # Add a colliding detail (both should show up)
+ self.addDetail('content',
+ content.Content(content_type.UTF8_TEXT, lambda:['foo']))
+ result = ExtendedTestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertEqual('addSuccess', result._events[-2][0])
+ details = result._events[-2][2]
+ self.assertEqual(['content', 'content-1'], sorted(details.keys()))
+ self.assertEqual('foo', ''.join(details['content'].iter_text()))
+ self.assertEqual('content available until cleanUp',
+ ''.join(details['content-1'].iter_text()))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_helpers.py b/lib/testtools/testtools/tests/test_helpers.py
new file mode 100644
index 0000000000..f1894a4613
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_helpers.py
@@ -0,0 +1,106 @@
+# Copyright (c) 2010 Jonathan M. Lange. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.helpers import (
+ try_import,
+ try_imports,
+ )
+from testtools.matchers import (
+ Equals,
+ Is,
+ )
+
+
+class TestTryImport(TestCase):
+
+ def test_doesnt_exist(self):
+ # try_import('thing', foo) returns foo if 'thing' doesn't exist.
+ marker = object()
+ result = try_import('doesntexist', marker)
+ self.assertThat(result, Is(marker))
+
+ def test_None_is_default_alternative(self):
+ # try_import('thing') returns None if 'thing' doesn't exist.
+ result = try_import('doesntexist')
+ self.assertThat(result, Is(None))
+
+ def test_existing_module(self):
+ # try_import('thing', foo) imports 'thing' and returns it if it's a
+ # module that exists.
+ result = try_import('os', object())
+ import os
+ self.assertThat(result, Is(os))
+
+ def test_existing_submodule(self):
+ # try_import('thing.another', foo) imports 'thing' and returns it if
+ # it's a module that exists.
+ result = try_import('os.path', object())
+ import os
+ self.assertThat(result, Is(os.path))
+
+ def test_nonexistent_submodule(self):
+ # try_import('thing.another', foo) imports 'thing' and returns foo if
+ # 'another' doesn't exist.
+ marker = object()
+ result = try_import('os.doesntexist', marker)
+ self.assertThat(result, Is(marker))
+
+ def test_object_from_module(self):
+ # try_import('thing.object') imports 'thing' and returns
+ # 'thing.object' if 'thing' is a module and 'object' is not.
+ result = try_import('os.path.join')
+ import os
+ self.assertThat(result, Is(os.path.join))
+
+
+class TestTryImports(TestCase):
+
+ def test_doesnt_exist(self):
+ # try_imports('thing', foo) returns foo if 'thing' doesn't exist.
+ marker = object()
+ result = try_imports(['doesntexist'], marker)
+ self.assertThat(result, Is(marker))
+
+ def test_fallback(self):
+ result = try_imports(['doesntexist', 'os'])
+ import os
+ self.assertThat(result, Is(os))
+
+ def test_None_is_default_alternative(self):
+ # try_imports('thing') returns None if 'thing' doesn't exist.
+ e = self.assertRaises(
+ ImportError, try_imports, ['doesntexist', 'noreally'])
+ self.assertThat(
+ str(e),
+ Equals("Could not import any of: doesntexist, noreally"))
+
+ def test_existing_module(self):
+ # try_imports('thing', foo) imports 'thing' and returns it if it's a
+ # module that exists.
+ result = try_imports(['os'], object())
+ import os
+ self.assertThat(result, Is(os))
+
+ def test_existing_submodule(self):
+ # try_imports('thing.another', foo) imports 'thing' and returns it if
+ # it's a module that exists.
+ result = try_imports(['os.path'], object())
+ import os
+ self.assertThat(result, Is(os.path))
+
+ def test_nonexistent_submodule(self):
+ # try_imports('thing.another', foo) imports 'thing' and returns foo if
+ # 'another' doesn't exist.
+ marker = object()
+ result = try_imports(['os.doesntexist'], marker)
+ self.assertThat(result, Is(marker))
+
+ def test_fallback_submodule(self):
+ result = try_imports(['os.doesntexist', 'os.path'])
+ import os
+ self.assertThat(result, Is(os.path))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_matchers.py b/lib/testtools/testtools/tests/test_matchers.py
new file mode 100644
index 0000000000..bbcd87eff8
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_matchers.py
@@ -0,0 +1,451 @@
+# Copyright (c) 2008-2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Tests for matchers."""
+
+import doctest
+import sys
+
+from testtools import (
+ Matcher, # check that Matcher is exposed at the top level for docs.
+ TestCase,
+ )
+from testtools.matchers import (
+ Annotate,
+ Equals,
+ DocTestMatches,
+ DoesNotEndWith,
+ DoesNotStartWith,
+ EndsWith,
+ KeysEqual,
+ Is,
+ LessThan,
+ MatchesAny,
+ MatchesAll,
+ MatchesException,
+ Mismatch,
+ Not,
+ NotEquals,
+ Raises,
+ raises,
+ StartsWith,
+ )
+
+# Silence pyflakes.
+Matcher
+
+
+class TestMismatch(TestCase):
+
+ def test_constructor_arguments(self):
+ mismatch = Mismatch("some description", {'detail': "things"})
+ self.assertEqual("some description", mismatch.describe())
+ self.assertEqual({'detail': "things"}, mismatch.get_details())
+
+ def test_constructor_no_arguments(self):
+ mismatch = Mismatch()
+ self.assertThat(mismatch.describe,
+ Raises(MatchesException(NotImplementedError)))
+ self.assertEqual({}, mismatch.get_details())
+
+
+class TestMatchersInterface(object):
+
+ def test_matches_match(self):
+ matcher = self.matches_matcher
+ matches = self.matches_matches
+ mismatches = self.matches_mismatches
+ for candidate in matches:
+ self.assertEqual(None, matcher.match(candidate))
+ for candidate in mismatches:
+ mismatch = matcher.match(candidate)
+ self.assertNotEqual(None, mismatch)
+ self.assertNotEqual(None, getattr(mismatch, 'describe', None))
+
+ def test__str__(self):
+ # [(expected, object to __str__)].
+ examples = self.str_examples
+ for expected, matcher in examples:
+ self.assertThat(matcher, DocTestMatches(expected))
+
+ def test_describe_difference(self):
+ # [(expected, matchee, matcher), ...]
+ examples = self.describe_examples
+ for difference, matchee, matcher in examples:
+ mismatch = matcher.match(matchee)
+ self.assertEqual(difference, mismatch.describe())
+
+ def test_mismatch_details(self):
+ # The mismatch object must provide get_details, which must return a
+ # dictionary mapping names to Content objects.
+ examples = self.describe_examples
+ for difference, matchee, matcher in examples:
+ mismatch = matcher.match(matchee)
+ details = mismatch.get_details()
+ self.assertEqual(dict(details), details)
+
+
+class TestDocTestMatchesInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = DocTestMatches("Ran 1 test in ...s", doctest.ELLIPSIS)
+ matches_matches = ["Ran 1 test in 0.000s", "Ran 1 test in 1.234s"]
+ matches_mismatches = ["Ran 1 tests in 0.000s", "Ran 2 test in 0.000s"]
+
+ str_examples = [("DocTestMatches('Ran 1 test in ...s\\n')",
+ DocTestMatches("Ran 1 test in ...s")),
+ ("DocTestMatches('foo\\n', flags=8)", DocTestMatches("foo", flags=8)),
+ ]
+
+ describe_examples = [('Expected:\n Ran 1 tests in ...s\nGot:\n'
+ ' Ran 1 test in 0.123s\n', "Ran 1 test in 0.123s",
+ DocTestMatches("Ran 1 tests in ...s", doctest.ELLIPSIS))]
+
+
+class TestDocTestMatchesSpecific(TestCase):
+
+ def test___init__simple(self):
+ matcher = DocTestMatches("foo")
+ self.assertEqual("foo\n", matcher.want)
+
+ def test___init__flags(self):
+ matcher = DocTestMatches("bar\n", doctest.ELLIPSIS)
+ self.assertEqual("bar\n", matcher.want)
+ self.assertEqual(doctest.ELLIPSIS, matcher.flags)
+
+
+class TestEqualsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Equals(1)
+ matches_matches = [1]
+ matches_mismatches = [2]
+
+ str_examples = [("Equals(1)", Equals(1)), ("Equals('1')", Equals('1'))]
+
+ describe_examples = [("1 != 2", 2, Equals(1))]
+
+
+class TestNotEqualsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = NotEquals(1)
+ matches_matches = [2]
+ matches_mismatches = [1]
+
+ str_examples = [
+ ("NotEquals(1)", NotEquals(1)), ("NotEquals('1')", NotEquals('1'))]
+
+ describe_examples = [("1 == 1", 1, NotEquals(1))]
+
+
+class TestIsInterface(TestCase, TestMatchersInterface):
+
+ foo = object()
+ bar = object()
+
+ matches_matcher = Is(foo)
+ matches_matches = [foo]
+ matches_mismatches = [bar, 1]
+
+ str_examples = [("Is(2)", Is(2))]
+
+ describe_examples = [("1 is not 2", 2, Is(1))]
+
+
+class TestLessThanInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = LessThan(4)
+ matches_matches = [-5, 3]
+ matches_mismatches = [4, 5, 5000]
+
+ str_examples = [
+ ("LessThan(12)", LessThan(12)),
+ ]
+
+ describe_examples = [('4 is >= 4', 4, LessThan(4))]
+
+
+def make_error(type, *args, **kwargs):
+ try:
+ raise type(*args, **kwargs)
+ except type:
+ return sys.exc_info()
+
+
+class TestMatchesExceptionInstanceInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(ValueError("foo"))
+ error_foo = make_error(ValueError, 'foo')
+ error_bar = make_error(ValueError, 'bar')
+ error_base_foo = make_error(Exception, 'foo')
+ matches_matches = [error_foo]
+ matches_mismatches = [error_bar, error_base_foo]
+
+ str_examples = [
+ ("MatchesException(Exception('foo',))",
+ MatchesException(Exception('foo')))
+ ]
+ describe_examples = [
+ ("%r is not a %r" % (Exception, ValueError),
+ error_base_foo,
+ MatchesException(ValueError("foo"))),
+ ("ValueError('bar',) has different arguments to ValueError('foo',).",
+ error_bar,
+ MatchesException(ValueError("foo"))),
+ ]
+
+
+class TestMatchesExceptionTypeInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(ValueError)
+ error_foo = make_error(ValueError, 'foo')
+ error_sub = make_error(UnicodeError, 'bar')
+ error_base_foo = make_error(Exception, 'foo')
+ matches_matches = [error_foo, error_sub]
+ matches_mismatches = [error_base_foo]
+
+ str_examples = [
+ ("MatchesException(%r)" % Exception,
+ MatchesException(Exception))
+ ]
+ describe_examples = [
+ ("%r is not a %r" % (Exception, ValueError),
+ error_base_foo,
+ MatchesException(ValueError)),
+ ]
+
+
+class TestNotInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Not(Equals(1))
+ matches_matches = [2]
+ matches_mismatches = [1]
+
+ str_examples = [
+ ("Not(Equals(1))", Not(Equals(1))),
+ ("Not(Equals('1'))", Not(Equals('1')))]
+
+ describe_examples = [('1 matches Equals(1)', 1, Not(Equals(1)))]
+
+
+class TestMatchersAnyInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAny(DocTestMatches("1"), DocTestMatches("2"))
+ matches_matches = ["1", "2"]
+ matches_mismatches = ["3"]
+
+ str_examples = [(
+ "MatchesAny(DocTestMatches('1\\n'), DocTestMatches('2\\n'))",
+ MatchesAny(DocTestMatches("1"), DocTestMatches("2"))),
+ ]
+
+ describe_examples = [("""Differences: [
+Expected:
+ 1
+Got:
+ 3
+
+Expected:
+ 2
+Got:
+ 3
+
+]""",
+ "3", MatchesAny(DocTestMatches("1"), DocTestMatches("2")))]
+
+
+class TestMatchesAllInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAll(NotEquals(1), NotEquals(2))
+ matches_matches = [3, 4]
+ matches_mismatches = [1, 2]
+
+ str_examples = [
+ ("MatchesAll(NotEquals(1), NotEquals(2))",
+ MatchesAll(NotEquals(1), NotEquals(2)))]
+
+ describe_examples = [("""Differences: [
+1 == 1
+]""",
+ 1, MatchesAll(NotEquals(1), NotEquals(2)))]
+
+
+class TestKeysEqual(TestCase, TestMatchersInterface):
+
+ matches_matcher = KeysEqual('foo', 'bar')
+ matches_matches = [
+ {'foo': 0, 'bar': 1},
+ ]
+ matches_mismatches = [
+ {},
+ {'foo': 0},
+ {'bar': 1},
+ {'foo': 0, 'bar': 1, 'baz': 2},
+ {'a': None, 'b': None, 'c': None},
+ ]
+
+ str_examples = [
+ ("KeysEqual('foo', 'bar')", KeysEqual('foo', 'bar')),
+ ]
+
+ describe_examples = [
+ ("['bar', 'foo'] does not match {'baz': 2, 'foo': 0, 'bar': 1}: "
+ "Keys not equal",
+ {'foo': 0, 'bar': 1, 'baz': 2}, KeysEqual('foo', 'bar')),
+ ]
+
+
+class TestAnnotate(TestCase, TestMatchersInterface):
+
+ matches_matcher = Annotate("foo", Equals(1))
+ matches_matches = [1]
+ matches_mismatches = [2]
+
+ str_examples = [
+ ("Annotate('foo', Equals(1))", Annotate("foo", Equals(1)))]
+
+ describe_examples = [("1 != 2: foo", 2, Annotate('foo', Equals(1)))]
+
+
+class TestRaisesInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Raises()
+ def boom():
+ raise Exception('foo')
+ matches_matches = [boom]
+ matches_mismatches = [lambda:None]
+
+ # Tricky to get function objects to render constantly, and the interfaces
+ # helper uses assertEqual rather than (for instance) DocTestMatches.
+ str_examples = []
+
+ describe_examples = []
+
+
+class TestRaisesExceptionMatcherInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Raises(
+ exception_matcher=MatchesException(Exception('foo')))
+ def boom_bar():
+ raise Exception('bar')
+ def boom_foo():
+ raise Exception('foo')
+ matches_matches = [boom_foo]
+ matches_mismatches = [lambda:None, boom_bar]
+
+ # Tricky to get function objects to render constantly, and the interfaces
+ # helper uses assertEqual rather than (for instance) DocTestMatches.
+ str_examples = []
+
+ describe_examples = []
+
+
+class TestRaisesBaseTypes(TestCase):
+
+ def raiser(self):
+ raise KeyboardInterrupt('foo')
+
+ def test_KeyboardInterrupt_matched(self):
+ # When KeyboardInterrupt is matched, it is swallowed.
+ matcher = Raises(MatchesException(KeyboardInterrupt))
+ self.assertThat(self.raiser, matcher)
+
+ def test_KeyboardInterrupt_propogates(self):
+ # The default 'it raised' propogates KeyboardInterrupt.
+ match_keyb = Raises(MatchesException(KeyboardInterrupt))
+ def raise_keyb_from_match():
+ matcher = Raises()
+ matcher.match(self.raiser)
+ self.assertThat(raise_keyb_from_match, match_keyb)
+
+ def test_KeyboardInterrupt_match_Exception_propogates(self):
+ # If the raised exception isn't matched, and it is not a subclass of
+ # Exception, it is propogated.
+ match_keyb = Raises(MatchesException(KeyboardInterrupt))
+ def raise_keyb_from_match():
+ if sys.version_info > (2, 5):
+ matcher = Raises(MatchesException(Exception))
+ else:
+ # On Python 2.4 KeyboardInterrupt is a StandardError subclass
+ # but should propogate from less generic exception matchers
+ matcher = Raises(MatchesException(EnvironmentError))
+ matcher.match(self.raiser)
+ self.assertThat(raise_keyb_from_match, match_keyb)
+
+
+class TestRaisesConvenience(TestCase):
+
+ def test_exc_type(self):
+ self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
+
+ def test_exc_value(self):
+ e = RuntimeError("You lose!")
+ def raiser():
+ raise e
+ self.assertThat(raiser, raises(e))
+
+
+class DoesNotStartWithTests(TestCase):
+
+ def test_describe(self):
+ mismatch = DoesNotStartWith("fo", "bo")
+ self.assertEqual("'fo' does not start with 'bo'.", mismatch.describe())
+
+
+class StartsWithTests(TestCase):
+
+ def test_str(self):
+ matcher = StartsWith("bar")
+ self.assertEqual("Starts with 'bar'.", str(matcher))
+
+ def test_match(self):
+ matcher = StartsWith("bar")
+ self.assertIs(None, matcher.match("barf"))
+
+ def test_mismatch_returns_does_not_start_with(self):
+ matcher = StartsWith("bar")
+ self.assertIsInstance(matcher.match("foo"), DoesNotStartWith)
+
+ def test_mismatch_sets_matchee(self):
+ matcher = StartsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("foo", mismatch.matchee)
+
+ def test_mismatch_sets_expected(self):
+ matcher = StartsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("bar", mismatch.expected)
+
+
+class DoesNotEndWithTests(TestCase):
+
+ def test_describe(self):
+ mismatch = DoesNotEndWith("fo", "bo")
+ self.assertEqual("'fo' does not end with 'bo'.", mismatch.describe())
+
+
+class EndsWithTests(TestCase):
+
+ def test_str(self):
+ matcher = EndsWith("bar")
+ self.assertEqual("Ends with 'bar'.", str(matcher))
+
+ def test_match(self):
+ matcher = EndsWith("arf")
+ self.assertIs(None, matcher.match("barf"))
+
+ def test_mismatch_returns_does_not_end_with(self):
+ matcher = EndsWith("bar")
+ self.assertIsInstance(matcher.match("foo"), DoesNotEndWith)
+
+ def test_mismatch_sets_matchee(self):
+ matcher = EndsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("foo", mismatch.matchee)
+
+ def test_mismatch_sets_expected(self):
+ matcher = EndsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("bar", mismatch.expected)
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_monkey.py b/lib/testtools/testtools/tests/test_monkey.py
new file mode 100644
index 0000000000..540a2ee909
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_monkey.py
@@ -0,0 +1,167 @@
+# Copyright (c) 2010 Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""Tests for testtools.monkey."""
+
+from testtools import TestCase
+from testtools.matchers import MatchesException, Raises
+from testtools.monkey import MonkeyPatcher, patch
+
+
+class TestObj:
+
+ def __init__(self):
+ self.foo = 'foo value'
+ self.bar = 'bar value'
+ self.baz = 'baz value'
+
+
+class MonkeyPatcherTest(TestCase):
+ """
+ Tests for 'MonkeyPatcher' monkey-patching class.
+ """
+
+ def setUp(self):
+ super(MonkeyPatcherTest, self).setUp()
+ self.test_object = TestObj()
+ self.original_object = TestObj()
+ self.monkey_patcher = MonkeyPatcher()
+
+ def test_empty(self):
+ # A monkey patcher without patches doesn't change a thing.
+ self.monkey_patcher.patch()
+
+ # We can't assert that all state is unchanged, but at least we can
+ # check our test object.
+ self.assertEquals(self.original_object.foo, self.test_object.foo)
+ self.assertEquals(self.original_object.bar, self.test_object.bar)
+ self.assertEquals(self.original_object.baz, self.test_object.baz)
+
+ def test_construct_with_patches(self):
+ # Constructing a 'MonkeyPatcher' with patches adds all of the given
+ # patches to the patch list.
+ patcher = MonkeyPatcher((self.test_object, 'foo', 'haha'),
+ (self.test_object, 'bar', 'hehe'))
+ patcher.patch()
+ self.assertEquals('haha', self.test_object.foo)
+ self.assertEquals('hehe', self.test_object.bar)
+ self.assertEquals(self.original_object.baz, self.test_object.baz)
+
+ def test_patch_existing(self):
+ # Patching an attribute that exists sets it to the value defined in the
+ # patch.
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+ self.monkey_patcher.patch()
+ self.assertEquals(self.test_object.foo, 'haha')
+
+ def test_patch_non_existing(self):
+ # Patching a non-existing attribute sets it to the value defined in
+ # the patch.
+ self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value')
+ self.monkey_patcher.patch()
+ self.assertEquals(self.test_object.doesntexist, 'value')
+
+ def test_restore_non_existing(self):
+ # Restoring a value that didn't exist before the patch deletes the
+ # value.
+ self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value')
+ self.monkey_patcher.patch()
+ self.monkey_patcher.restore()
+ marker = object()
+ self.assertIs(marker, getattr(self.test_object, 'doesntexist', marker))
+
+ def test_patch_already_patched(self):
+ # Adding a patch for an object and attribute that already have a patch
+ # overrides the existing patch.
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah')
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'BLAH')
+ self.monkey_patcher.patch()
+ self.assertEquals(self.test_object.foo, 'BLAH')
+ self.monkey_patcher.restore()
+ self.assertEquals(self.test_object.foo, self.original_object.foo)
+
+ def test_restore_twice_is_a_no_op(self):
+ # Restoring an already-restored monkey patch is a no-op.
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah')
+ self.monkey_patcher.patch()
+ self.monkey_patcher.restore()
+ self.assertEquals(self.test_object.foo, self.original_object.foo)
+ self.monkey_patcher.restore()
+ self.assertEquals(self.test_object.foo, self.original_object.foo)
+
+ def test_run_with_patches_decoration(self):
+ # run_with_patches runs the given callable, passing in all arguments
+ # and keyword arguments, and returns the return value of the callable.
+ log = []
+
+ def f(a, b, c=None):
+ log.append((a, b, c))
+ return 'foo'
+
+ result = self.monkey_patcher.run_with_patches(f, 1, 2, c=10)
+ self.assertEquals('foo', result)
+ self.assertEquals([(1, 2, 10)], log)
+
+ def test_repeated_run_with_patches(self):
+ # We can call the same function with run_with_patches more than
+ # once. All patches apply for each call.
+ def f():
+ return (self.test_object.foo, self.test_object.bar,
+ self.test_object.baz)
+
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+ result = self.monkey_patcher.run_with_patches(f)
+ self.assertEquals(
+ ('haha', self.original_object.bar, self.original_object.baz),
+ result)
+ result = self.monkey_patcher.run_with_patches(f)
+ self.assertEquals(
+ ('haha', self.original_object.bar, self.original_object.baz),
+ result)
+
+ def test_run_with_patches_restores(self):
+ # run_with_patches restores the original values after the function has
+ # executed.
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+ self.assertEquals(self.original_object.foo, self.test_object.foo)
+ self.monkey_patcher.run_with_patches(lambda: None)
+ self.assertEquals(self.original_object.foo, self.test_object.foo)
+
+ def test_run_with_patches_restores_on_exception(self):
+ # run_with_patches restores the original values even when the function
+ # raises an exception.
+ def _():
+ self.assertEquals(self.test_object.foo, 'haha')
+ self.assertEquals(self.test_object.bar, 'blahblah')
+ raise RuntimeError("Something went wrong!")
+
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+ self.monkey_patcher.add_patch(self.test_object, 'bar', 'blahblah')
+
+ self.assertThat(lambda:self.monkey_patcher.run_with_patches(_),
+ Raises(MatchesException(RuntimeError("Something went wrong!"))))
+ self.assertEquals(self.test_object.foo, self.original_object.foo)
+ self.assertEquals(self.test_object.bar, self.original_object.bar)
+
+
+class TestPatchHelper(TestCase):
+
+ def test_patch_patches(self):
+ # patch(obj, name, value) sets obj.name to value.
+ test_object = TestObj()
+ patch(test_object, 'foo', 42)
+ self.assertEqual(42, test_object.foo)
+
+ def test_patch_returns_cleanup(self):
+ # patch(obj, name, value) returns a nullary callable that restores obj
+ # to its original state when run.
+ test_object = TestObj()
+ original = test_object.foo
+ cleanup = patch(test_object, 'foo', 42)
+ cleanup()
+ self.assertEqual(original, test_object.foo)
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_run.py b/lib/testtools/testtools/tests/test_run.py
new file mode 100644
index 0000000000..8f88fb62ec
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_run.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2010 Testtools authors. See LICENSE for details.
+
+"""Tests for the test runner logic."""
+
+from testtools.helpers import try_import, try_imports
+fixtures = try_import('fixtures')
+StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
+
+import testtools
+from testtools import TestCase, run
+
+
+if fixtures:
+ class SampleTestFixture(fixtures.Fixture):
+ """Creates testtools.runexample temporarily."""
+
+ def __init__(self):
+ self.package = fixtures.PythonPackage(
+ 'runexample', [('__init__.py', """
+from testtools import TestCase
+
+class TestFoo(TestCase):
+ def test_bar(self):
+ pass
+ def test_quux(self):
+ pass
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
+""")])
+
+ def setUp(self):
+ super(SampleTestFixture, self).setUp()
+ self.useFixture(self.package)
+ testtools.__path__.append(self.package.base)
+ self.addCleanup(testtools.__path__.remove, self.package.base)
+
+
+class TestRun(TestCase):
+
+ def test_run_list(self):
+ if fixtures is None:
+ self.skipTest("Need fixtures")
+ package = self.useFixture(SampleTestFixture())
+ out = StringIO()
+ run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
+ self.assertEqual("""testtools.runexample.TestFoo.test_bar
+testtools.runexample.TestFoo.test_quux
+""", out.getvalue())
+
+ def test_run_load_list(self):
+ if fixtures is None:
+ self.skipTest("Need fixtures")
+ package = self.useFixture(SampleTestFixture())
+ out = StringIO()
+ # We load two tests - one that exists and one that doesn't, and we
+ # should get the one that exists and neither the one that doesn't nor
+ # the unmentioned one that does.
+ tempdir = self.useFixture(fixtures.TempDir())
+ tempname = tempdir.path + '/tests.list'
+ f = open(tempname, 'wb')
+ try:
+ f.write("""
+testtools.runexample.TestFoo.test_bar
+testtools.runexample.missingtest
+""")
+ finally:
+ f.close()
+ run.main(['prog', '-l', '--load-list', tempname,
+ 'testtools.runexample.test_suite'], out)
+ self.assertEqual("""testtools.runexample.TestFoo.test_bar
+""", out.getvalue())
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_runtest.py b/lib/testtools/testtools/tests/test_runtest.py
new file mode 100644
index 0000000000..02863ac6fd
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_runtest.py
@@ -0,0 +1,300 @@
+# Copyright (c) 2009-2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Tests for the RunTest single test execution logic."""
+
+from testtools import (
+ ExtendedToOriginalDecorator,
+ run_test_with,
+ RunTest,
+ TestCase,
+ TestResult,
+ )
+from testtools.matchers import MatchesException, Is, Raises
+from testtools.tests.helpers import ExtendedTestResult
+
+
+class TestRunTest(TestCase):
+
+ def make_case(self):
+ class Case(TestCase):
+ def test(self):
+ pass
+ return Case('test')
+
+ def test___init___short(self):
+ run = RunTest("bar")
+ self.assertEqual("bar", run.case)
+ self.assertEqual([], run.handlers)
+
+ def test__init____handlers(self):
+ handlers = [("quux", "baz")]
+ run = RunTest("bar", handlers)
+ self.assertEqual(handlers, run.handlers)
+
+ def test_run_with_result(self):
+ # test.run passes result down to _run_test_method.
+ log = []
+ class Case(TestCase):
+ def _run_test_method(self, result):
+ log.append(result)
+ case = Case('_run_test_method')
+ run = RunTest(case, lambda x: log.append(x))
+ result = TestResult()
+ run.run(result)
+ self.assertEqual(1, len(log))
+ self.assertEqual(result, log[0].decorated)
+
+ def test_run_no_result_manages_new_result(self):
+ log = []
+ run = RunTest(self.make_case(), lambda x: log.append(x) or x)
+ result = run.run()
+ self.assertIsInstance(result.decorated, TestResult)
+
+ def test__run_core_called(self):
+ case = self.make_case()
+ log = []
+ run = RunTest(case, lambda x: x)
+ run._run_core = lambda: log.append('foo')
+ run.run()
+ self.assertEqual(['foo'], log)
+
+ def test__run_user_does_not_catch_keyboard(self):
+ case = self.make_case()
+ def raises():
+ raise KeyboardInterrupt("yo")
+ run = RunTest(case, None)
+ run.result = ExtendedTestResult()
+ self.assertThat(lambda: run._run_user(raises),
+ Raises(MatchesException(KeyboardInterrupt)))
+ self.assertEqual([], run.result._events)
+
+ def test__run_user_calls_onException(self):
+ case = self.make_case()
+ log = []
+ def handler(exc_info):
+ log.append("got it")
+ self.assertEqual(3, len(exc_info))
+ self.assertIsInstance(exc_info[1], KeyError)
+ self.assertIs(KeyError, exc_info[0])
+ case.addOnException(handler)
+ e = KeyError('Yo')
+ def raises():
+ raise e
+ run = RunTest(case, [(KeyError, None)])
+ run.result = ExtendedTestResult()
+ status = run._run_user(raises)
+ self.assertEqual(run.exception_caught, status)
+ self.assertEqual([], run.result._events)
+ self.assertEqual(["got it"], log)
+
+ def test__run_user_can_catch_Exception(self):
+ case = self.make_case()
+ e = Exception('Yo')
+ def raises():
+ raise e
+ log = []
+ run = RunTest(case, [(Exception, None)])
+ run.result = ExtendedTestResult()
+ status = run._run_user(raises)
+ self.assertEqual(run.exception_caught, status)
+ self.assertEqual([], run.result._events)
+ self.assertEqual([], log)
+
+ def test__run_user_uncaught_Exception_raised(self):
+ case = self.make_case()
+ e = KeyError('Yo')
+ def raises():
+ raise e
+ log = []
+ def log_exc(self, result, err):
+ log.append((result, err))
+ run = RunTest(case, [(ValueError, log_exc)])
+ run.result = ExtendedTestResult()
+ self.assertThat(lambda: run._run_user(raises),
+ Raises(MatchesException(KeyError)))
+ self.assertEqual([], run.result._events)
+ self.assertEqual([], log)
+
+ def test__run_user_uncaught_Exception_from_exception_handler_raised(self):
+ case = self.make_case()
+ def broken_handler(exc_info):
+ # ValueError because thats what we know how to catch - and must
+ # not.
+ raise ValueError('boo')
+ case.addOnException(broken_handler)
+ e = KeyError('Yo')
+ def raises():
+ raise e
+ log = []
+ def log_exc(self, result, err):
+ log.append((result, err))
+ run = RunTest(case, [(ValueError, log_exc)])
+ run.result = ExtendedTestResult()
+ self.assertThat(lambda: run._run_user(raises),
+ Raises(MatchesException(ValueError)))
+ self.assertEqual([], run.result._events)
+ self.assertEqual([], log)
+
+ def test__run_user_returns_result(self):
+ case = self.make_case()
+ def returns():
+ return 1
+ run = RunTest(case)
+ run.result = ExtendedTestResult()
+ self.assertEqual(1, run._run_user(returns))
+ self.assertEqual([], run.result._events)
+
+ def test__run_one_decorates_result(self):
+ log = []
+ class Run(RunTest):
+ def _run_prepared_result(self, result):
+ log.append(result)
+ return result
+ run = Run(self.make_case(), lambda x: x)
+ result = run._run_one('foo')
+ self.assertEqual([result], log)
+ self.assertIsInstance(log[0], ExtendedToOriginalDecorator)
+ self.assertEqual('foo', result.decorated)
+
+ def test__run_prepared_result_calls_start_and_stop_test(self):
+ result = ExtendedTestResult()
+ case = self.make_case()
+ run = RunTest(case, lambda x: x)
+ run.run(result)
+ self.assertEqual([
+ ('startTest', case),
+ ('addSuccess', case),
+ ('stopTest', case),
+ ], result._events)
+
+ def test__run_prepared_result_calls_stop_test_always(self):
+ result = ExtendedTestResult()
+ case = self.make_case()
+ def inner():
+ raise Exception("foo")
+ run = RunTest(case, lambda x: x)
+ run._run_core = inner
+ self.assertThat(lambda: run.run(result),
+ Raises(MatchesException(Exception("foo"))))
+ self.assertEqual([
+ ('startTest', case),
+ ('stopTest', case),
+ ], result._events)
+
+
+class CustomRunTest(RunTest):
+
+ marker = object()
+
+ def run(self, result=None):
+ return self.marker
+
+
+class TestTestCaseSupportForRunTest(TestCase):
+
+ def test_pass_custom_run_test(self):
+ class SomeCase(TestCase):
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo', runTest=CustomRunTest)
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+ def test_default_is_runTest_class_variable(self):
+ class SomeCase(TestCase):
+ run_tests_with = CustomRunTest
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo')
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+ def test_constructor_argument_overrides_class_variable(self):
+ # If a 'runTest' argument is passed to the test's constructor, that
+ # overrides the class variable.
+ marker = object()
+ class DifferentRunTest(RunTest):
+ def run(self, result=None):
+ return marker
+ class SomeCase(TestCase):
+ run_tests_with = CustomRunTest
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo', runTest=DifferentRunTest)
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(marker))
+
+ def test_decorator_for_run_test(self):
+ # Individual test methods can be marked as needing a special runner.
+ class SomeCase(TestCase):
+ @run_test_with(CustomRunTest)
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo')
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+ def test_extended_decorator_for_run_test(self):
+ # Individual test methods can be marked as needing a special runner.
+ # Extra arguments can be passed to the decorator which will then be
+ # passed on to the RunTest object.
+ marker = object()
+ class FooRunTest(RunTest):
+ def __init__(self, case, handlers=None, bar=None):
+ super(FooRunTest, self).__init__(case, handlers)
+ self.bar = bar
+ def run(self, result=None):
+ return self.bar
+ class SomeCase(TestCase):
+ @run_test_with(FooRunTest, bar=marker)
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo')
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(marker))
+
+ def test_works_as_inner_decorator(self):
+ # Even if run_test_with is the innermost decorator, it will be
+ # respected.
+ def wrapped(function):
+ """Silly, trivial decorator."""
+ def decorated(*args, **kwargs):
+ return function(*args, **kwargs)
+ decorated.__name__ = function.__name__
+ decorated.__dict__.update(function.__dict__)
+ return decorated
+ class SomeCase(TestCase):
+ @wrapped
+ @run_test_with(CustomRunTest)
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo')
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+ def test_constructor_overrides_decorator(self):
+ # If a 'runTest' argument is passed to the test's constructor, that
+ # overrides the decorator.
+ marker = object()
+ class DifferentRunTest(RunTest):
+ def run(self, result=None):
+ return marker
+ class SomeCase(TestCase):
+ @run_test_with(CustomRunTest)
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo', runTest=DifferentRunTest)
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(marker))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_spinner.py b/lib/testtools/testtools/tests/test_spinner.py
new file mode 100644
index 0000000000..5c6139d0e9
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_spinner.py
@@ -0,0 +1,332 @@
+# Copyright (c) 2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Tests for the evil Twisted reactor-spinning we do."""
+
+import os
+import signal
+
+from testtools import (
+ skipIf,
+ TestCase,
+ )
+from testtools.helpers import try_import
+from testtools.matchers import (
+ Equals,
+ Is,
+ MatchesException,
+ Raises,
+ )
+
+_spinner = try_import('testtools._spinner')
+
+defer = try_import('twisted.internet.defer')
+Failure = try_import('twisted.python.failure.Failure')
+
+
+class NeedsTwistedTestCase(TestCase):
+
+ def setUp(self):
+ super(NeedsTwistedTestCase, self).setUp()
+ if defer is None or Failure is None:
+ self.skipTest("Need Twisted to run")
+
+
+class TestNotReentrant(NeedsTwistedTestCase):
+
+ def test_not_reentrant(self):
+ # A function decorated as not being re-entrant will raise a
+ # _spinner.ReentryError if it is called while it is running.
+ calls = []
+ @_spinner.not_reentrant
+ def log_something():
+ calls.append(None)
+ if len(calls) < 5:
+ log_something()
+ self.assertThat(
+ log_something, Raises(MatchesException(_spinner.ReentryError)))
+ self.assertEqual(1, len(calls))
+
+ def test_deeper_stack(self):
+ calls = []
+ @_spinner.not_reentrant
+ def g():
+ calls.append(None)
+ if len(calls) < 5:
+ f()
+ @_spinner.not_reentrant
+ def f():
+ calls.append(None)
+ if len(calls) < 5:
+ g()
+ self.assertThat(f, Raises(MatchesException(_spinner.ReentryError)))
+ self.assertEqual(2, len(calls))
+
+
+class TestExtractResult(NeedsTwistedTestCase):
+
+ def test_not_fired(self):
+ # _spinner.extract_result raises _spinner.DeferredNotFired if it's
+ # given a Deferred that has not fired.
+ self.assertThat(lambda:_spinner.extract_result(defer.Deferred()),
+ Raises(MatchesException(_spinner.DeferredNotFired)))
+
+ def test_success(self):
+ # _spinner.extract_result returns the value of the Deferred if it has
+ # fired successfully.
+ marker = object()
+ d = defer.succeed(marker)
+ self.assertThat(_spinner.extract_result(d), Equals(marker))
+
+ def test_failure(self):
+ # _spinner.extract_result raises the failure's exception if it's given
+ # a Deferred that is failing.
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = Failure()
+ d = defer.fail(f)
+ self.assertThat(lambda:_spinner.extract_result(d),
+ Raises(MatchesException(ZeroDivisionError)))
+
+
+class TestTrapUnhandledErrors(NeedsTwistedTestCase):
+
+ def test_no_deferreds(self):
+ marker = object()
+ result, errors = _spinner.trap_unhandled_errors(lambda: marker)
+ self.assertEqual([], errors)
+ self.assertIs(marker, result)
+
+ def test_unhandled_error(self):
+ failures = []
+ def make_deferred_but_dont_handle():
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = Failure()
+ failures.append(f)
+ defer.fail(f)
+ result, errors = _spinner.trap_unhandled_errors(
+ make_deferred_but_dont_handle)
+ self.assertIs(None, result)
+ self.assertEqual(failures, [error.failResult for error in errors])
+
+
+class TestRunInReactor(NeedsTwistedTestCase):
+
+ def make_reactor(self):
+ from twisted.internet import reactor
+ return reactor
+
+ def make_spinner(self, reactor=None):
+ if reactor is None:
+ reactor = self.make_reactor()
+ return _spinner.Spinner(reactor)
+
+ def make_timeout(self):
+ return 0.01
+
+ def test_function_called(self):
+ # run_in_reactor actually calls the function given to it.
+ calls = []
+ marker = object()
+ self.make_spinner().run(self.make_timeout(), calls.append, marker)
+ self.assertThat(calls, Equals([marker]))
+
+ def test_return_value_returned(self):
+ # run_in_reactor returns the value returned by the function given to
+ # it.
+ marker = object()
+ result = self.make_spinner().run(self.make_timeout(), lambda: marker)
+ self.assertThat(result, Is(marker))
+
+ def test_exception_reraised(self):
+ # If the given function raises an error, run_in_reactor re-raises that
+ # error.
+ self.assertThat(
+ lambda:self.make_spinner().run(self.make_timeout(), lambda: 1/0),
+ Raises(MatchesException(ZeroDivisionError)))
+
+ def test_keyword_arguments(self):
+ # run_in_reactor passes keyword arguments on.
+ calls = []
+ function = lambda *a, **kw: calls.extend([a, kw])
+ self.make_spinner().run(self.make_timeout(), function, foo=42)
+ self.assertThat(calls, Equals([(), {'foo': 42}]))
+
+ def test_not_reentrant(self):
+ # run_in_reactor raises an error if it is called inside another call
+ # to run_in_reactor.
+ spinner = self.make_spinner()
+ self.assertThat(lambda: spinner.run(
+ self.make_timeout(), spinner.run, self.make_timeout(),
+ lambda: None), Raises(MatchesException(_spinner.ReentryError)))
+
+ def test_deferred_value_returned(self):
+ # If the given function returns a Deferred, run_in_reactor returns the
+ # value in the Deferred at the end of the callback chain.
+ marker = object()
+ result = self.make_spinner().run(
+ self.make_timeout(), lambda: defer.succeed(marker))
+ self.assertThat(result, Is(marker))
+
+ def test_preserve_signal_handler(self):
+ signals = ['SIGINT', 'SIGTERM', 'SIGCHLD']
+ signals = filter(
+ None, (getattr(signal, name, None) for name in signals))
+ for sig in signals:
+ self.addCleanup(signal.signal, sig, signal.getsignal(sig))
+ new_hdlrs = list(lambda *a: None for _ in signals)
+ for sig, hdlr in zip(signals, new_hdlrs):
+ signal.signal(sig, hdlr)
+ spinner = self.make_spinner()
+ spinner.run(self.make_timeout(), lambda: None)
+ self.assertEqual(new_hdlrs, map(signal.getsignal, signals))
+
+ def test_timeout(self):
+ # If the function takes too long to run, we raise a
+ # _spinner.TimeoutError.
+ timeout = self.make_timeout()
+ self.assertThat(
+ lambda:self.make_spinner().run(timeout, lambda: defer.Deferred()),
+ Raises(MatchesException(_spinner.TimeoutError)))
+
+ def test_no_junk_by_default(self):
+ # If the reactor hasn't spun yet, then there cannot be any junk.
+ spinner = self.make_spinner()
+ self.assertThat(spinner.get_junk(), Equals([]))
+
+ def test_clean_do_nothing(self):
+ # If there's nothing going on in the reactor, then clean does nothing
+ # and returns an empty list.
+ spinner = self.make_spinner()
+ result = spinner._clean()
+ self.assertThat(result, Equals([]))
+
+ def test_clean_delayed_call(self):
+ # If there's a delayed call in the reactor, then clean cancels it and
+ # returns an empty list.
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ call = reactor.callLater(10, lambda: None)
+ results = spinner._clean()
+ self.assertThat(results, Equals([call]))
+ self.assertThat(call.active(), Equals(False))
+
+ def test_clean_delayed_call_cancelled(self):
+ # If there's a delayed call that's just been cancelled, then it's no
+ # longer there.
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ call = reactor.callLater(10, lambda: None)
+ call.cancel()
+ results = spinner._clean()
+ self.assertThat(results, Equals([]))
+
+ def test_clean_selectables(self):
+ # If there's still a selectable (e.g. a listening socket), then
+ # clean() removes it from the reactor's registry.
+ #
+ # Note that the socket is left open. This emulates a bug in trial.
+ from twisted.internet.protocol import ServerFactory
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ port = reactor.listenTCP(0, ServerFactory())
+ spinner.run(self.make_timeout(), lambda: None)
+ results = spinner.get_junk()
+ self.assertThat(results, Equals([port]))
+
+ def test_clean_running_threads(self):
+ import threading
+ import time
+ current_threads = list(threading.enumerate())
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ spinner = self.make_spinner(reactor)
+ spinner.run(timeout, reactor.callInThread, time.sleep, timeout / 2.0)
+ # Python before 2.5 has a race condition with thread handling where
+ # join() does not remove threads from enumerate before returning - the
+ # thread being joined does the removal. This was fixed in Python 2.5
+ # but we still support 2.4, so we have to workaround the issue.
+ # http://bugs.python.org/issue1703448.
+ self.assertThat(
+ [thread for thread in threading.enumerate() if thread.isAlive()],
+ Equals(current_threads))
+
+ def test_leftover_junk_available(self):
+ # If 'run' is given a function that leaves the reactor dirty in some
+ # way, 'run' will clean up the reactor and then store information
+ # about the junk. This information can be got using get_junk.
+ from twisted.internet.protocol import ServerFactory
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ port = spinner.run(
+ self.make_timeout(), reactor.listenTCP, 0, ServerFactory())
+ self.assertThat(spinner.get_junk(), Equals([port]))
+
+ def test_will_not_run_with_previous_junk(self):
+ # If 'run' is called and there's still junk in the spinner's junk
+ # list, then the spinner will refuse to run.
+ from twisted.internet.protocol import ServerFactory
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ timeout = self.make_timeout()
+ spinner.run(timeout, reactor.listenTCP, 0, ServerFactory())
+ self.assertThat(lambda: spinner.run(timeout, lambda: None),
+ Raises(MatchesException(_spinner.StaleJunkError)))
+
+ def test_clear_junk_clears_previous_junk(self):
+ # If 'run' is called and there's still junk in the spinner's junk
+ # list, then the spinner will refuse to run.
+ from twisted.internet.protocol import ServerFactory
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ timeout = self.make_timeout()
+ port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory())
+ junk = spinner.clear_junk()
+ self.assertThat(junk, Equals([port]))
+ self.assertThat(spinner.get_junk(), Equals([]))
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_sigint_raises_no_result_error(self):
+ # If we get a SIGINT during a run, we raise _spinner.NoResultError.
+ SIGINT = getattr(signal, 'SIGINT', None)
+ if not SIGINT:
+ self.skipTest("SIGINT not available")
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ timeout = self.make_timeout()
+ reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
+ self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
+ Raises(MatchesException(_spinner.NoResultError)))
+ self.assertEqual([], spinner._clean())
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_sigint_raises_no_result_error_second_time(self):
+ # If we get a SIGINT during a run, we raise _spinner.NoResultError.
+ # This test is exactly the same as test_sigint_raises_no_result_error,
+ # and exists to make sure we haven't futzed with state.
+ self.test_sigint_raises_no_result_error()
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_fast_sigint_raises_no_result_error(self):
+ # If we get a SIGINT during a run, we raise _spinner.NoResultError.
+ SIGINT = getattr(signal, 'SIGINT', None)
+ if not SIGINT:
+ self.skipTest("SIGINT not available")
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ timeout = self.make_timeout()
+ reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
+ self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
+ Raises(MatchesException(_spinner.NoResultError)))
+ self.assertEqual([], spinner._clean())
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_fast_sigint_raises_no_result_error_second_time(self):
+ self.test_fast_sigint_raises_no_result_error()
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_testresult.py b/lib/testtools/testtools/tests/test_testresult.py
new file mode 100644
index 0000000000..57c3293c09
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_testresult.py
@@ -0,0 +1,1374 @@
+# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
+
+"""Test TestResults and related things."""
+
+__metaclass__ = type
+
+import codecs
+import datetime
+import doctest
+import os
+import shutil
+import sys
+import tempfile
+import threading
+import warnings
+
+from testtools import (
+ ExtendedToOriginalDecorator,
+ MultiTestResult,
+ TestCase,
+ TestResult,
+ TextTestResult,
+ ThreadsafeForwardingResult,
+ testresult,
+ try_imports,
+ )
+from testtools.compat import (
+ _b,
+ _get_exception_encoding,
+ _r,
+ _u,
+ str_is_unicode,
+ )
+from testtools.content import Content
+from testtools.content_type import ContentType, UTF8_TEXT
+from testtools.matchers import (
+ DocTestMatches,
+ MatchesException,
+ Raises,
+ )
+from testtools.tests.helpers import (
+ LoggingResult,
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ an_exc_info
+ )
+from testtools.testresult.real import utc
+
+StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
+
+
+class Python26Contract(object):
+
+ def test_fresh_result_is_successful(self):
+ # A result is considered successful before any tests are run.
+ result = self.makeResult()
+ self.assertTrue(result.wasSuccessful())
+
+ def test_addError_is_failure(self):
+ # addError fails the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addError(self, an_exc_info)
+ result.stopTest(self)
+ self.assertFalse(result.wasSuccessful())
+
+ def test_addFailure_is_failure(self):
+ # addFailure fails the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addFailure(self, an_exc_info)
+ result.stopTest(self)
+ self.assertFalse(result.wasSuccessful())
+
+ def test_addSuccess_is_success(self):
+ # addSuccess does not fail the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSuccess(self)
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+
+class Python27Contract(Python26Contract):
+
+ def test_addExpectedFailure(self):
+ # Calling addExpectedFailure(test, exc_info) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addExpectedFailure(self, an_exc_info)
+
+ def test_addExpectedFailure_is_success(self):
+ # addExpectedFailure does not fail the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addExpectedFailure(self, an_exc_info)
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_addSkipped(self):
+ # Calling addSkip(test, reason) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSkip(self, _u("Skipped for some reason"))
+
+ def test_addSkip_is_success(self):
+ # addSkip does not fail the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSkip(self, _u("Skipped for some reason"))
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_addUnexpectedSuccess(self):
+ # Calling addUnexpectedSuccess(test) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+
+ def test_addUnexpectedSuccess_was_successful(self):
+ # addUnexpectedSuccess does not fail the test run in Python 2.7.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_startStopTestRun(self):
+ # Calling startTestRun completes ok.
+ result = self.makeResult()
+ result.startTestRun()
+ result.stopTestRun()
+
+
+class DetailsContract(Python27Contract):
+ """Tests for the contract of TestResults."""
+
+ def test_addExpectedFailure_details(self):
+ # Calling addExpectedFailure(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addExpectedFailure(self, details={})
+
+ def test_addError_details(self):
+ # Calling addError(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addError(self, details={})
+
+ def test_addFailure_details(self):
+ # Calling addFailure(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addFailure(self, details={})
+
+ def test_addSkipped_details(self):
+ # Calling addSkip(test, reason) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSkip(self, details={})
+
+ def test_addUnexpectedSuccess_details(self):
+ # Calling addUnexpectedSuccess(test) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self, details={})
+
+ def test_addSuccess_details(self):
+ # Calling addSuccess(test) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSuccess(self, details={})
+
+
+class FallbackContract(DetailsContract):
+ """When we fallback we take our policy choice to map calls.
+
+ For instance, we map unexpectedSuccess to an error code, not to success.
+ """
+
+ def test_addUnexpectedSuccess_was_successful(self):
+ # addUnexpectedSuccess fails test run in testtools.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+ result.stopTest(self)
+ self.assertFalse(result.wasSuccessful())
+
+
+class StartTestRunContract(FallbackContract):
+ """Defines the contract for testtools policy choices.
+
+ That is things which are not simply extensions to unittest but choices we
+ have made differently.
+ """
+
+ def test_startTestRun_resets_unexpected_success(self):
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+ result.stopTest(self)
+ result.startTestRun()
+ self.assertTrue(result.wasSuccessful())
+
+ def test_startTestRun_resets_failure(self):
+ result = self.makeResult()
+ result.startTest(self)
+ result.addFailure(self, an_exc_info)
+ result.stopTest(self)
+ result.startTestRun()
+ self.assertTrue(result.wasSuccessful())
+
+ def test_startTestRun_resets_errors(self):
+ result = self.makeResult()
+ result.startTest(self)
+ result.addError(self, an_exc_info)
+ result.stopTest(self)
+ result.startTestRun()
+ self.assertTrue(result.wasSuccessful())
+
+
+class TestTestResultContract(TestCase, StartTestRunContract):
+
+ def makeResult(self):
+ return TestResult()
+
+
+class TestMultiTestResultContract(TestCase, StartTestRunContract):
+
+ def makeResult(self):
+ return MultiTestResult(TestResult(), TestResult())
+
+
+class TestTextTestResultContract(TestCase, StartTestRunContract):
+
+ def makeResult(self):
+ return TextTestResult(StringIO())
+
+
+class TestThreadSafeForwardingResultContract(TestCase, StartTestRunContract):
+
+ def makeResult(self):
+ result_semaphore = threading.Semaphore(1)
+ target = TestResult()
+ return ThreadsafeForwardingResult(target, result_semaphore)
+
+
+class TestExtendedTestResultContract(TestCase, StartTestRunContract):
+
+ def makeResult(self):
+ return ExtendedTestResult()
+
+
+class TestPython26TestResultContract(TestCase, Python26Contract):
+
+ def makeResult(self):
+ return Python26TestResult()
+
+
+class TestAdaptedPython26TestResultContract(TestCase, FallbackContract):
+
+ def makeResult(self):
+ return ExtendedToOriginalDecorator(Python26TestResult())
+
+
+class TestPython27TestResultContract(TestCase, Python27Contract):
+
+ def makeResult(self):
+ return Python27TestResult()
+
+
+class TestAdaptedPython27TestResultContract(TestCase, DetailsContract):
+
+ def makeResult(self):
+ return ExtendedToOriginalDecorator(Python27TestResult())
+
+
+class TestTestResult(TestCase):
+ """Tests for `TestResult`."""
+
+ def makeResult(self):
+ """Make an arbitrary result for testing."""
+ return TestResult()
+
+ def test_addSkipped(self):
+ # Calling addSkip on a TestResult records the test that was skipped in
+ # its skip_reasons dict.
+ result = self.makeResult()
+ result.addSkip(self, _u("Skipped for some reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self]},
+ result.skip_reasons)
+ result.addSkip(self, _u("Skipped for some reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self, self]},
+ result.skip_reasons)
+ result.addSkip(self, _u("Skipped for another reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self, self],
+ _u("Skipped for another reason"):[self]},
+ result.skip_reasons)
+
+ def test_now_datetime_now(self):
+ result = self.makeResult()
+ olddatetime = testresult.real.datetime
+ def restore():
+ testresult.real.datetime = olddatetime
+ self.addCleanup(restore)
+ class Module:
+ pass
+ now = datetime.datetime.now(utc)
+ stubdatetime = Module()
+ stubdatetime.datetime = Module()
+ stubdatetime.datetime.now = lambda tz: now
+ testresult.real.datetime = stubdatetime
+ # Calling _now() looks up the time.
+ self.assertEqual(now, result._now())
+ then = now + datetime.timedelta(0, 1)
+ # Set an explicit datetime, which gets returned from then on.
+ result.time(then)
+ self.assertNotEqual(now, result._now())
+ self.assertEqual(then, result._now())
+ # go back to looking it up.
+ result.time(None)
+ self.assertEqual(now, result._now())
+
+ def test_now_datetime_time(self):
+ result = self.makeResult()
+ now = datetime.datetime.now(utc)
+ result.time(now)
+ self.assertEqual(now, result._now())
+
+
+class TestWithFakeExceptions(TestCase):
+
+ def makeExceptionInfo(self, exceptionFactory, *args, **kwargs):
+ try:
+ raise exceptionFactory(*args, **kwargs)
+ except:
+ return sys.exc_info()
+
+
+class TestMultiTestResult(TestWithFakeExceptions):
+ """Tests for `MultiTestResult`."""
+
+ def setUp(self):
+ TestWithFakeExceptions.setUp(self)
+ self.result1 = LoggingResult([])
+ self.result2 = LoggingResult([])
+ self.multiResult = MultiTestResult(self.result1, self.result2)
+
+ def assertResultLogsEqual(self, expectedEvents):
+ """Assert that our test results have received the expected events."""
+ self.assertEqual(expectedEvents, self.result1._events)
+ self.assertEqual(expectedEvents, self.result2._events)
+
+ def test_empty(self):
+ # Initializing a `MultiTestResult` doesn't do anything to its
+ # `TestResult`s.
+ self.assertResultLogsEqual([])
+
+ def test_startTest(self):
+ # Calling `startTest` on a `MultiTestResult` calls `startTest` on all
+ # its `TestResult`s.
+ self.multiResult.startTest(self)
+ self.assertResultLogsEqual([('startTest', self)])
+
+ def test_stopTest(self):
+ # Calling `stopTest` on a `MultiTestResult` calls `stopTest` on all
+ # its `TestResult`s.
+ self.multiResult.stopTest(self)
+ self.assertResultLogsEqual([('stopTest', self)])
+
+ def test_addSkipped(self):
+ # Calling `addSkip` on a `MultiTestResult` calls addSkip on its
+ # results.
+ reason = _u("Skipped for some reason")
+ self.multiResult.addSkip(self, reason)
+ self.assertResultLogsEqual([('addSkip', self, reason)])
+
+ def test_addSuccess(self):
+ # Calling `addSuccess` on a `MultiTestResult` calls `addSuccess` on
+ # all its `TestResult`s.
+ self.multiResult.addSuccess(self)
+ self.assertResultLogsEqual([('addSuccess', self)])
+
+ def test_done(self):
+ # Calling `done` on a `MultiTestResult` calls `done` on all its
+ # `TestResult`s.
+ self.multiResult.done()
+ self.assertResultLogsEqual([('done')])
+
+ def test_addFailure(self):
+ # Calling `addFailure` on a `MultiTestResult` calls `addFailure` on
+ # all its `TestResult`s.
+ exc_info = self.makeExceptionInfo(AssertionError, 'failure')
+ self.multiResult.addFailure(self, exc_info)
+ self.assertResultLogsEqual([('addFailure', self, exc_info)])
+
+ def test_addError(self):
+ # Calling `addError` on a `MultiTestResult` calls `addError` on all
+ # its `TestResult`s.
+ exc_info = self.makeExceptionInfo(RuntimeError, 'error')
+ self.multiResult.addError(self, exc_info)
+ self.assertResultLogsEqual([('addError', self, exc_info)])
+
+ def test_startTestRun(self):
+ # Calling `startTestRun` on a `MultiTestResult` forwards to all its
+ # `TestResult`s.
+ self.multiResult.startTestRun()
+ self.assertResultLogsEqual([('startTestRun')])
+
+ def test_stopTestRun(self):
+ # Calling `stopTestRun` on a `MultiTestResult` forwards to all its
+ # `TestResult`s.
+ self.multiResult.stopTestRun()
+ self.assertResultLogsEqual([('stopTestRun')])
+
+ def test_stopTestRun_returns_results(self):
+ # `MultiTestResult.stopTestRun` returns a tuple of all of the return
+ # values the `stopTestRun`s that it forwards to.
+ class Result(LoggingResult):
+ def stopTestRun(self):
+ super(Result, self).stopTestRun()
+ return 'foo'
+ multi_result = MultiTestResult(Result([]), Result([]))
+ result = multi_result.stopTestRun()
+ self.assertEqual(('foo', 'foo'), result)
+
+ def test_time(self):
+ # the time call is dispatched, not eaten by the base class
+ self.multiResult.time('foo')
+ self.assertResultLogsEqual([('time', 'foo')])
+
+
+class TestTextTestResult(TestCase):
+ """Tests for `TextTestResult`."""
+
+ def setUp(self):
+ super(TestTextTestResult, self).setUp()
+ self.result = TextTestResult(StringIO())
+
+ def make_erroring_test(self):
+ class Test(TestCase):
+ def error(self):
+ 1/0
+ return Test("error")
+
+ def make_failing_test(self):
+ class Test(TestCase):
+ def failed(self):
+ self.fail("yo!")
+ return Test("failed")
+
+ def make_unexpectedly_successful_test(self):
+ class Test(TestCase):
+ def succeeded(self):
+ self.expectFailure("yo!", lambda: None)
+ return Test("succeeded")
+
+ def make_test(self):
+ class Test(TestCase):
+ def test(self):
+ pass
+ return Test("test")
+
+ def getvalue(self):
+ return self.result.stream.getvalue()
+
+ def test__init_sets_stream(self):
+ result = TextTestResult("fp")
+ self.assertEqual("fp", result.stream)
+
+ def reset_output(self):
+ self.result.stream = StringIO()
+
+ def test_startTestRun(self):
+ self.result.startTestRun()
+ self.assertEqual("Tests running...\n", self.getvalue())
+
+ def test_stopTestRun_count_many(self):
+ test = self.make_test()
+ self.result.startTestRun()
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.result.stream = StringIO()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("Ran 2 tests in ...s\n...", doctest.ELLIPSIS))
+
+ def test_stopTestRun_count_single(self):
+ test = self.make_test()
+ self.result.startTestRun()
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("Ran 1 test in ...s\n\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_count_zero(self):
+ self.result.startTestRun()
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("Ran 0 tests in ...s\n\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_current_time(self):
+ test = self.make_test()
+ now = datetime.datetime.now(utc)
+ self.result.time(now)
+ self.result.startTestRun()
+ self.result.startTest(test)
+ now = now + datetime.timedelta(0, 0, 0, 1)
+ self.result.time(now)
+ self.result.stopTest(test)
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS))
+
+ def test_stopTestRun_successful(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\n\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_failure(self):
+ test = self.make_failing_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\n\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_error(self):
+ test = self.make_erroring_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\n\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_unexpected_success(self):
+ test = self.make_unexpectedly_successful_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\n\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_shows_details(self):
+ self.result.startTestRun()
+ self.make_erroring_test().run(self.result)
+ self.make_unexpectedly_successful_test().run(self.result)
+ self.make_failing_test().run(self.result)
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("""...======================================================================
+ERROR: testtools.tests.test_testresult.Test.error
+----------------------------------------------------------------------
+Text attachment: traceback
+------------
+Traceback (most recent call last):
+ File "...testtools...runtest.py", line ..., in _run_user...
+ return fn(*args, **kwargs)
+ File "...testtools...testcase.py", line ..., in _run_test_method
+ return self._get_test_method()()
+ File "...testtools...tests...test_testresult.py", line ..., in error
+ 1/0
+ZeroDivisionError:... divi... by zero...
+------------
+======================================================================
+FAIL: testtools.tests.test_testresult.Test.failed
+----------------------------------------------------------------------
+Text attachment: traceback
+------------
+Traceback (most recent call last):
+ File "...testtools...runtest.py", line ..., in _run_user...
+ return fn(*args, **kwargs)
+ File "...testtools...testcase.py", line ..., in _run_test_method
+ return self._get_test_method()()
+ File "...testtools...tests...test_testresult.py", line ..., in failed
+ self.fail("yo!")
+AssertionError: yo!
+------------
+======================================================================
+UNEXPECTED SUCCESS: testtools.tests.test_testresult.Test.succeeded
+----------------------------------------------------------------------
+...""", doctest.ELLIPSIS | doctest.REPORT_NDIFF))
+
+
+class TestThreadSafeForwardingResult(TestWithFakeExceptions):
+ """Tests for `TestThreadSafeForwardingResult`."""
+
+ def setUp(self):
+ TestWithFakeExceptions.setUp(self)
+ self.result_semaphore = threading.Semaphore(1)
+ self.target = LoggingResult([])
+ self.result1 = ThreadsafeForwardingResult(self.target,
+ self.result_semaphore)
+
+ def test_nonforwarding_methods(self):
+ # startTest and stopTest are not forwarded because they need to be
+ # batched.
+ self.result1.startTest(self)
+ self.result1.stopTest(self)
+ self.assertEqual([], self.target._events)
+
+ def test_startTestRun(self):
+ self.result1.startTestRun()
+ self.result2 = ThreadsafeForwardingResult(self.target,
+ self.result_semaphore)
+ self.result2.startTestRun()
+ self.assertEqual(["startTestRun", "startTestRun"], self.target._events)
+
+ def test_stopTestRun(self):
+ self.result1.stopTestRun()
+ self.result2 = ThreadsafeForwardingResult(self.target,
+ self.result_semaphore)
+ self.result2.stopTestRun()
+ self.assertEqual(["stopTestRun", "stopTestRun"], self.target._events)
+
+ def test_forwarding_methods(self):
+ # error, failure, skip and success are forwarded in batches.
+ exc_info1 = self.makeExceptionInfo(RuntimeError, 'error')
+ starttime1 = datetime.datetime.utcfromtimestamp(1.489)
+ endtime1 = datetime.datetime.utcfromtimestamp(51.476)
+ self.result1.time(starttime1)
+ self.result1.startTest(self)
+ self.result1.time(endtime1)
+ self.result1.addError(self, exc_info1)
+ exc_info2 = self.makeExceptionInfo(AssertionError, 'failure')
+ starttime2 = datetime.datetime.utcfromtimestamp(2.489)
+ endtime2 = datetime.datetime.utcfromtimestamp(3.476)
+ self.result1.time(starttime2)
+ self.result1.startTest(self)
+ self.result1.time(endtime2)
+ self.result1.addFailure(self, exc_info2)
+ reason = _u("Skipped for some reason")
+ starttime3 = datetime.datetime.utcfromtimestamp(4.489)
+ endtime3 = datetime.datetime.utcfromtimestamp(5.476)
+ self.result1.time(starttime3)
+ self.result1.startTest(self)
+ self.result1.time(endtime3)
+ self.result1.addSkip(self, reason)
+ starttime4 = datetime.datetime.utcfromtimestamp(6.489)
+ endtime4 = datetime.datetime.utcfromtimestamp(7.476)
+ self.result1.time(starttime4)
+ self.result1.startTest(self)
+ self.result1.time(endtime4)
+ self.result1.addSuccess(self)
+ self.assertEqual([
+ ('time', starttime1),
+ ('startTest', self),
+ ('time', endtime1),
+ ('addError', self, exc_info1),
+ ('stopTest', self),
+ ('time', starttime2),
+ ('startTest', self),
+ ('time', endtime2),
+ ('addFailure', self, exc_info2),
+ ('stopTest', self),
+ ('time', starttime3),
+ ('startTest', self),
+ ('time', endtime3),
+ ('addSkip', self, reason),
+ ('stopTest', self),
+ ('time', starttime4),
+ ('startTest', self),
+ ('time', endtime4),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], self.target._events)
+
+
+class TestExtendedToOriginalResultDecoratorBase(TestCase):
+
+ def make_26_result(self):
+ self.result = Python26TestResult()
+ self.make_converter()
+
+ def make_27_result(self):
+ self.result = Python27TestResult()
+ self.make_converter()
+
+ def make_converter(self):
+ self.converter = ExtendedToOriginalDecorator(self.result)
+
+ def make_extended_result(self):
+ self.result = ExtendedTestResult()
+ self.make_converter()
+
+ def check_outcome_details(self, outcome):
+ """Call an outcome with a details dict to be passed through."""
+ # This dict is /not/ convertible - thats deliberate, as it should
+ # not hit the conversion code path.
+ details = {'foo': 'bar'}
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, details)], self.result._events)
+
+ def get_details_and_string(self):
+ """Get a details dict and expected string."""
+ text1 = lambda: [_b("1\n2\n")]
+ text2 = lambda: [_b("3\n4\n")]
+ bin1 = lambda: [_b("5\n")]
+ details = {'text 1': Content(ContentType('text', 'plain'), text1),
+ 'text 2': Content(ContentType('text', 'strange'), text2),
+ 'bin 1': Content(ContentType('application', 'binary'), bin1)}
+ return (details, "Binary content: bin 1\n"
+ "Text attachment: text 1\n------------\n1\n2\n"
+ "------------\nText attachment: text 2\n------------\n"
+ "3\n4\n------------\n")
+
+ def check_outcome_details_to_exec_info(self, outcome, expected=None):
+ """Call an outcome with a details dict to be made into exc_info."""
+ # The conversion is a done using RemoteError and the string contents
+ # of the text types in the details dict.
+ if not expected:
+ expected = outcome
+ details, err_str = self.get_details_and_string()
+ getattr(self.converter, outcome)(self, details=details)
+ err = self.converter._details_to_exc_info(details)
+ self.assertEqual([(expected, self, err)], self.result._events)
+
+ def check_outcome_details_to_nothing(self, outcome, expected=None):
+ """Call an outcome with a details dict to be swallowed."""
+ if not expected:
+ expected = outcome
+ details = {'foo': 'bar'}
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_details_to_string(self, outcome):
+ """Call an outcome with a details dict to be stringified."""
+ details, err_str = self.get_details_and_string()
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, err_str)], self.result._events)
+
+ def check_outcome_details_to_arg(self, outcome, arg, extra_detail=None):
+ """Call an outcome with a details dict to have an arg extracted."""
+ details, _ = self.get_details_and_string()
+ if extra_detail:
+ details.update(extra_detail)
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, arg)], self.result._events)
+
+ def check_outcome_exc_info(self, outcome, expected=None):
+ """Check that calling a legacy outcome still works."""
+ # calling some outcome with the legacy exc_info style api (no keyword
+ # parameters) gets passed through.
+ if not expected:
+ expected = outcome
+ err = sys.exc_info()
+ getattr(self.converter, outcome)(self, err)
+ self.assertEqual([(expected, self, err)], self.result._events)
+
+ def check_outcome_exc_info_to_nothing(self, outcome, expected=None):
+ """Check that calling a legacy outcome on a fallback works."""
+ # calling some outcome with the legacy exc_info style api (no keyword
+ # parameters) gets passed through.
+ if not expected:
+ expected = outcome
+ err = sys.exc_info()
+ getattr(self.converter, outcome)(self, err)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_nothing(self, outcome, expected=None):
+ """Check that calling a legacy outcome still works."""
+ if not expected:
+ expected = outcome
+ getattr(self.converter, outcome)(self)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_string_nothing(self, outcome, expected):
+ """Check that calling outcome with a string calls expected."""
+ getattr(self.converter, outcome)(self, "foo")
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_string(self, outcome):
+ """Check that calling outcome with a string works."""
+ getattr(self.converter, outcome)(self, "foo")
+ self.assertEqual([(outcome, self, "foo")], self.result._events)
+
+
+class TestExtendedToOriginalResultDecorator(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ def test_progress_py26(self):
+ self.make_26_result()
+ self.converter.progress(1, 2)
+
+ def test_progress_py27(self):
+ self.make_27_result()
+ self.converter.progress(1, 2)
+
+ def test_progress_pyextended(self):
+ self.make_extended_result()
+ self.converter.progress(1, 2)
+ self.assertEqual([('progress', 1, 2)], self.result._events)
+
+ def test_shouldStop(self):
+ self.make_26_result()
+ self.assertEqual(False, self.converter.shouldStop)
+ self.converter.decorated.stop()
+ self.assertEqual(True, self.converter.shouldStop)
+
+ def test_startTest_py26(self):
+ self.make_26_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTest_py27(self):
+ self.make_27_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTest_pyextended(self):
+ self.make_extended_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTestRun_py26(self):
+ self.make_26_result()
+ self.converter.startTestRun()
+ self.assertEqual([], self.result._events)
+
+ def test_startTestRun_py27(self):
+ self.make_27_result()
+ self.converter.startTestRun()
+ self.assertEqual([('startTestRun',)], self.result._events)
+
+ def test_startTestRun_pyextended(self):
+ self.make_extended_result()
+ self.converter.startTestRun()
+ self.assertEqual([('startTestRun',)], self.result._events)
+
+ def test_stopTest_py26(self):
+ self.make_26_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTest_py27(self):
+ self.make_27_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTest_pyextended(self):
+ self.make_extended_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTestRun_py26(self):
+ self.make_26_result()
+ self.converter.stopTestRun()
+ self.assertEqual([], self.result._events)
+
+ def test_stopTestRun_py27(self):
+ self.make_27_result()
+ self.converter.stopTestRun()
+ self.assertEqual([('stopTestRun',)], self.result._events)
+
+ def test_stopTestRun_pyextended(self):
+ self.make_extended_result()
+ self.converter.stopTestRun()
+ self.assertEqual([('stopTestRun',)], self.result._events)
+
+ def test_tags_py26(self):
+ self.make_26_result()
+ self.converter.tags(1, 2)
+
+ def test_tags_py27(self):
+ self.make_27_result()
+ self.converter.tags(1, 2)
+
+ def test_tags_pyextended(self):
+ self.make_extended_result()
+ self.converter.tags(1, 2)
+ self.assertEqual([('tags', 1, 2)], self.result._events)
+
+ def test_time_py26(self):
+ self.make_26_result()
+ self.converter.time(1)
+
+ def test_time_py27(self):
+ self.make_27_result()
+ self.converter.time(1)
+
+ def test_time_pyextended(self):
+ self.make_extended_result()
+ self.converter.time(1)
+ self.assertEqual([('time', 1)], self.result._events)
+
+
+class TestExtendedToOriginalAddError(TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addError'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_exec_info(self.outcome)
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_exec_info(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+ def test_outcome__no_details(self):
+ self.make_extended_result()
+ self.assertThat(
+ lambda: getattr(self.converter, self.outcome)(self),
+ Raises(MatchesException(ValueError)))
+
+
+class TestExtendedToOriginalAddFailure(
+ TestExtendedToOriginalAddError):
+
+ outcome = 'addFailure'
+
+
+class TestExtendedToOriginalAddExpectedFailure(
+ TestExtendedToOriginalAddError):
+
+ outcome = 'addExpectedFailure'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_exc_info_to_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_nothing(self.outcome, 'addSuccess')
+
+
+
+class TestExtendedToOriginalAddSkip(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addSkip'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_string_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_string(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_string(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_string_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Extended_py27_no_reason(self):
+ self.make_27_result()
+ self.check_outcome_details_to_string(self.outcome)
+
+ def test_outcome_Extended_py27_reason(self):
+ self.make_27_result()
+ self.check_outcome_details_to_arg(self.outcome, 'foo',
+ {'reason': Content(UTF8_TEXT, lambda:[_b('foo')])})
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+ def test_outcome__no_details(self):
+ self.make_extended_result()
+ self.assertThat(
+ lambda: getattr(self.converter, self.outcome)(self),
+ Raises(MatchesException(ValueError)))
+
+
+class TestExtendedToOriginalAddSuccess(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addSuccess'
+ expected = 'addSuccess'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_nothing(self.outcome, self.expected)
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_nothing(self.outcome, self.expected)
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_nothing(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+
+class TestExtendedToOriginalAddUnexpectedSuccess(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addUnexpectedSuccess'
+ expected = 'addFailure'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ getattr(self.converter, self.outcome)(self)
+ [event] = self.result._events
+ self.assertEqual((self.expected, self), event[:2])
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ getattr(self.converter, self.outcome)(self)
+ [event] = self.result._events
+ self.assertEqual((self.expected, self), event[:2])
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_nothing(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+
+class TestExtendedToOriginalResultOtherAttributes(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ def test_other_attribute(self):
+ class OtherExtendedResult:
+ def foo(self):
+ return 2
+ bar = 1
+ self.result = OtherExtendedResult()
+ self.make_converter()
+ self.assertEqual(1, self.converter.bar)
+ self.assertEqual(2, self.converter.foo())
+
+
+class TestNonAsciiResults(TestCase):
+ """Test all kinds of tracebacks are cleanly interpreted as unicode
+
+ Currently only uses weak "contains" assertions, would be good to be much
+ stricter about the expected output. This would add a few failures for the
+ current release of IronPython for instance, which gets some traceback
+ lines muddled.
+ """
+
+ _sample_texts = (
+ _u("pa\u026a\u03b8\u0259n"), # Unicode encodings only
+ _u("\u5357\u7121"), # In ISO 2022 encodings
+ _u("\xa7\xa7\xa7"), # In ISO 8859 encodings
+ )
+ # Everything but Jython shows syntax errors on the current character
+ _error_on_character = os.name != "java"
+
+ def _run(self, stream, test):
+ """Run the test, the same as in testtools.run but not to stdout"""
+ result = TextTestResult(stream)
+ result.startTestRun()
+ try:
+ return test.run(result)
+ finally:
+ result.stopTestRun()
+
+ def _write_module(self, name, encoding, contents):
+ """Create Python module on disk with contents in given encoding"""
+ try:
+ # Need to pre-check that the coding is valid or codecs.open drops
+ # the file without closing it which breaks non-refcounted pythons
+ codecs.lookup(encoding)
+ except LookupError:
+ self.skip("Encoding unsupported by implementation: %r" % encoding)
+ f = codecs.open(os.path.join(self.dir, name + ".py"), "w", encoding)
+ try:
+ f.write(contents)
+ finally:
+ f.close()
+
+ def _test_external_case(self, testline, coding="ascii", modulelevel="",
+ suffix=""):
+ """Create and run a test case in a seperate module"""
+ self._setup_external_case(testline, coding, modulelevel, suffix)
+ return self._run_external_case()
+
+ def _setup_external_case(self, testline, coding="ascii", modulelevel="",
+ suffix=""):
+ """Create a test case in a seperate module"""
+ _, prefix, self.modname = self.id().rsplit(".", 2)
+ self.dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
+ self.addCleanup(shutil.rmtree, self.dir)
+ self._write_module(self.modname, coding,
+ # Older Python 2 versions don't see a coding declaration in a
+ # docstring so it has to be in a comment, but then we can't
+ # workaround bug: <http://ironpython.codeplex.com/workitem/26940>
+ "# coding: %s\n"
+ "import testtools\n"
+ "%s\n"
+ "class Test(testtools.TestCase):\n"
+ " def runTest(self):\n"
+ " %s\n" % (coding, modulelevel, testline))
+
+ def _run_external_case(self):
+ """Run the prepared test case in a seperate module"""
+ sys.path.insert(0, self.dir)
+ self.addCleanup(sys.path.remove, self.dir)
+ module = __import__(self.modname)
+ self.addCleanup(sys.modules.pop, self.modname)
+ stream = StringIO()
+ self._run(stream, module.Test())
+ return stream.getvalue()
+
+ def _silence_deprecation_warnings(self):
+ """Shut up DeprecationWarning for this test only"""
+ warnings.simplefilter("ignore", DeprecationWarning)
+ self.addCleanup(warnings.filters.remove, warnings.filters[0])
+
+ def _get_sample_text(self, encoding="unicode_internal"):
+ if encoding is None and str_is_unicode:
+ encoding = "unicode_internal"
+ for u in self._sample_texts:
+ try:
+ b = u.encode(encoding)
+ if u == b.decode(encoding):
+ if str_is_unicode:
+ return u, u
+ return u, b
+ except (LookupError, UnicodeError):
+ pass
+ self.skip("Could not find a sample text for encoding: %r" % encoding)
+
+ def _as_output(self, text):
+ return text
+
+ def test_non_ascii_failure_string(self):
+ """Assertion contents can be non-ascii and should get decoded"""
+ text, raw = self._get_sample_text(_get_exception_encoding())
+ textoutput = self._test_external_case("self.fail(%s)" % _r(raw))
+ self.assertIn(self._as_output(text), textoutput)
+
+ def test_non_ascii_failure_string_via_exec(self):
+ """Assertion via exec can be non-ascii and still gets decoded"""
+ text, raw = self._get_sample_text(_get_exception_encoding())
+ textoutput = self._test_external_case(
+ testline='exec ("self.fail(%s)")' % _r(raw))
+ self.assertIn(self._as_output(text), textoutput)
+
+ def test_control_characters_in_failure_string(self):
+ """Control characters in assertions should be escaped"""
+ textoutput = self._test_external_case("self.fail('\\a\\a\\a')")
+ self.expectFailure("Defense against the beeping horror unimplemented",
+ self.assertNotIn, self._as_output("\a\a\a"), textoutput)
+ self.assertIn(self._as_output(_u("\uFFFD\uFFFD\uFFFD")), textoutput)
+
+ def test_os_error(self):
+ """Locale error messages from the OS shouldn't break anything"""
+ textoutput = self._test_external_case(
+ modulelevel="import os",
+ testline="os.mkdir('/')")
+ if os.name != "nt" or sys.version_info < (2, 5):
+ self.assertIn(self._as_output("OSError: "), textoutput)
+ else:
+ self.assertIn(self._as_output("WindowsError: "), textoutput)
+
+ def test_assertion_text_shift_jis(self):
+ """A terminal raw backslash in an encoded string is weird but fine"""
+ example_text = _u("\u5341")
+ textoutput = self._test_external_case(
+ coding="shift_jis",
+ testline="self.fail('%s')" % example_text)
+ if str_is_unicode:
+ output_text = example_text
+ else:
+ output_text = example_text.encode("shift_jis").decode(
+ _get_exception_encoding(), "replace")
+ self.assertIn(self._as_output("AssertionError: %s" % output_text),
+ textoutput)
+
+ def test_file_comment_iso2022_jp(self):
+ """Control character escapes must be preserved if valid encoding"""
+ example_text, _ = self._get_sample_text("iso2022_jp")
+ textoutput = self._test_external_case(
+ coding="iso2022_jp",
+ testline="self.fail('Simple') # %s" % example_text)
+ self.assertIn(self._as_output(example_text), textoutput)
+
+ def test_unicode_exception(self):
+ """Exceptions that can be formated losslessly as unicode should be"""
+ example_text, _ = self._get_sample_text()
+ exception_class = (
+ "class FancyError(Exception):\n"
+ # A __unicode__ method does nothing on py3k but the default works
+ " def __unicode__(self):\n"
+ " return self.args[0]\n")
+ textoutput = self._test_external_case(
+ modulelevel=exception_class,
+ testline="raise FancyError(%s)" % _r(example_text))
+ self.assertIn(self._as_output(example_text), textoutput)
+
+ def test_unprintable_exception(self):
+ """A totally useless exception instance still prints something"""
+ exception_class = (
+ "class UnprintableError(Exception):\n"
+ " def __str__(self):\n"
+ " raise RuntimeError\n"
+ " def __unicode__(self):\n"
+ " raise RuntimeError\n"
+ " def __repr__(self):\n"
+ " raise RuntimeError\n")
+ textoutput = self._test_external_case(
+ modulelevel=exception_class,
+ testline="raise UnprintableError")
+ self.assertIn(self._as_output(
+ "UnprintableError: <unprintable UnprintableError object>\n"),
+ textoutput)
+
+ def test_string_exception(self):
+ """Raise a string rather than an exception instance if supported"""
+ if sys.version_info > (2, 6):
+ self.skip("No string exceptions in Python 2.6 or later")
+ elif sys.version_info > (2, 5):
+ self._silence_deprecation_warnings()
+ textoutput = self._test_external_case(testline="raise 'plain str'")
+ self.assertIn(self._as_output("\nplain str\n"), textoutput)
+
+ def test_non_ascii_dirname(self):
+ """Script paths in the traceback can be non-ascii"""
+ text, raw = self._get_sample_text(sys.getfilesystemencoding())
+ textoutput = self._test_external_case(
+ # Avoid bug in Python 3 by giving a unicode source encoding rather
+ # than just ascii which raises a SyntaxError with no other details
+ coding="utf-8",
+ testline="self.fail('Simple')",
+ suffix=raw)
+ self.assertIn(self._as_output(text), textoutput)
+
+ def test_syntax_error(self):
+ """Syntax errors should still have fancy special-case formatting"""
+ textoutput = self._test_external_case("exec ('f(a, b c)')")
+ self.assertIn(self._as_output(
+ ' File "<string>", line 1\n'
+ ' f(a, b c)\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: '
+ ), textoutput)
+
+ def test_syntax_error_malformed(self):
+ """Syntax errors with bogus parameters should break anything"""
+ textoutput = self._test_external_case("raise SyntaxError(3, 2, 1)")
+ self.assertIn(self._as_output("\nSyntaxError: "), textoutput)
+
+ def test_syntax_error_import_binary(self):
+ """Importing a binary file shouldn't break SyntaxError formatting"""
+ if sys.version_info < (2, 5):
+ # Python 2.4 assumes the file is latin-1 and tells you off
+ self._silence_deprecation_warnings()
+ self._setup_external_case("import bad")
+ f = open(os.path.join(self.dir, "bad.py"), "wb")
+ try:
+ f.write(_b("x\x9c\xcb*\xcd\xcb\x06\x00\x04R\x01\xb9"))
+ finally:
+ f.close()
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output("\nSyntaxError: "), textoutput)
+
+ def test_syntax_error_line_iso_8859_1(self):
+ """Syntax error on a latin-1 line shows the line decoded"""
+ text, raw = self._get_sample_text("iso-8859-1")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "iso-8859-1",
+ "# coding: iso-8859-1\n! = 0 # %s\n" % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ #'bad.py", line 2\n'
+ ' ! = 0 # %s\n'
+ ' ^\n'
+ 'SyntaxError: ') %
+ (text,)), textoutput)
+
+ def test_syntax_error_line_iso_8859_5(self):
+ """Syntax error on a iso-8859-5 line shows the line decoded"""
+ text, raw = self._get_sample_text("iso-8859-5")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "iso-8859-5",
+ "# coding: iso-8859-5\n%% = 0 # %s\n" % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ #'bad.py", line 2\n'
+ ' %% = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: ') %
+ (text,)), textoutput)
+
+ def test_syntax_error_line_euc_jp(self):
+ """Syntax error on a euc_jp line shows the line decoded"""
+ text, raw = self._get_sample_text("euc_jp")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "euc_jp",
+ "# coding: euc_jp\n$ = 0 # %s\n" % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ #'bad.py", line 2\n'
+ ' $ = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: ') %
+ (text,)), textoutput)
+
+ def test_syntax_error_line_utf_8(self):
+ """Syntax error on a utf-8 line shows the line decoded"""
+ text, raw = self._get_sample_text("utf-8")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "utf-8", _u("\ufeff^ = 0 # %s\n") % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ 'bad.py", line 1\n'
+ ' ^ = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: ') %
+ text), textoutput)
+
+
+class TestNonAsciiResultsWithUnittest(TestNonAsciiResults):
+ """Test that running under unittest produces clean ascii strings"""
+
+ def _run(self, stream, test):
+ from unittest import TextTestRunner as _Runner
+ return _Runner(stream).run(test)
+
+ def _as_output(self, text):
+ if str_is_unicode:
+ return text
+ return text.encode("utf-8")
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_testsuite.py b/lib/testtools/testtools/tests/test_testsuite.py
new file mode 100644
index 0000000000..eeb8fd2811
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_testsuite.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
+
+"""Test ConcurrentTestSuite and related things."""
+
+__metaclass__ = type
+
+import datetime
+import unittest
+
+from testtools import (
+ ConcurrentTestSuite,
+ iterate_tests,
+ TestCase,
+ )
+from testtools.matchers import (
+ Equals,
+ )
+from testtools.tests.helpers import LoggingResult
+
+
+class TestConcurrentTestSuiteRun(TestCase):
+
+ def test_trivial(self):
+ log = []
+ result = LoggingResult(log)
+ class Sample(TestCase):
+ def __hash__(self):
+ return id(self)
+
+ def test_method1(self):
+ pass
+ def test_method2(self):
+ pass
+ test1 = Sample('test_method1')
+ test2 = Sample('test_method2')
+ original_suite = unittest.TestSuite([test1, test2])
+ suite = ConcurrentTestSuite(original_suite, self.split_suite)
+ suite.run(result)
+ # 0 is the timestamp for the first test starting.
+ test1 = log[1][1]
+ test2 = log[-1][1]
+ self.assertIsInstance(test1, Sample)
+ self.assertIsInstance(test2, Sample)
+ self.assertNotEqual(test1.id(), test2.id())
+
+ def split_suite(self, suite):
+ tests = list(iterate_tests(suite))
+ return tests[0], tests[1]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_testtools.py b/lib/testtools/testtools/tests/test_testtools.py
new file mode 100644
index 0000000000..2e722e919d
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_testtools.py
@@ -0,0 +1,1135 @@
+# Copyright (c) 2008-2010 Jonathan M. Lange. See LICENSE for details.
+
+"""Tests for extensions to the base test library."""
+
+from pprint import pformat
+import sys
+import unittest
+
+from testtools import (
+ ErrorHolder,
+ MultipleExceptions,
+ PlaceHolder,
+ TestCase,
+ clone_test_with_new_id,
+ content,
+ skip,
+ skipIf,
+ skipUnless,
+ testcase,
+ )
+from testtools.matchers import (
+ Equals,
+ MatchesException,
+ Raises,
+ )
+from testtools.tests.helpers import (
+ an_exc_info,
+ LoggingResult,
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ )
+
+
+class TestPlaceHolder(TestCase):
+
+ def makePlaceHolder(self, test_id="foo", short_description=None):
+ return PlaceHolder(test_id, short_description)
+
+ def test_id_comes_from_constructor(self):
+ # The id() of a PlaceHolder is whatever you pass into the constructor.
+ test = PlaceHolder("test id")
+ self.assertEqual("test id", test.id())
+
+ def test_shortDescription_is_id(self):
+ # The shortDescription() of a PlaceHolder is the id, by default.
+ test = PlaceHolder("test id")
+ self.assertEqual(test.id(), test.shortDescription())
+
+ def test_shortDescription_specified(self):
+ # If a shortDescription is provided to the constructor, then
+ # shortDescription() returns that instead.
+ test = PlaceHolder("test id", "description")
+ self.assertEqual("description", test.shortDescription())
+
+ def test_repr_just_id(self):
+ # repr(placeholder) shows you how the object was constructed.
+ test = PlaceHolder("test id")
+ self.assertEqual(
+ "<testtools.testcase.PlaceHolder(%s)>" % repr(test.id()),
+ repr(test))
+
+ def test_repr_with_description(self):
+ # repr(placeholder) shows you how the object was constructed.
+ test = PlaceHolder("test id", "description")
+ self.assertEqual(
+ "<testtools.testcase.PlaceHolder(%r, %r)>" % (
+ test.id(), test.shortDescription()),
+ repr(test))
+
+ def test_counts_as_one_test(self):
+ # A placeholder test counts as one test.
+ test = self.makePlaceHolder()
+ self.assertEqual(1, test.countTestCases())
+
+ def test_str_is_id(self):
+ # str(placeholder) is always the id(). We are not barbarians.
+ test = self.makePlaceHolder()
+ self.assertEqual(test.id(), str(test))
+
+ def test_runs_as_success(self):
+ # When run, a PlaceHolder test records a success.
+ test = self.makePlaceHolder()
+ log = []
+ test.run(LoggingResult(log))
+ self.assertEqual(
+ [('startTest', test), ('addSuccess', test), ('stopTest', test)],
+ log)
+
+ def test_call_is_run(self):
+ # A PlaceHolder can be called, in which case it behaves like run.
+ test = self.makePlaceHolder()
+ run_log = []
+ test.run(LoggingResult(run_log))
+ call_log = []
+ test(LoggingResult(call_log))
+ self.assertEqual(run_log, call_log)
+
+ def test_runs_without_result(self):
+ # A PlaceHolder can be run without a result, in which case there's no
+ # way to actually get at the result.
+ self.makePlaceHolder().run()
+
+ def test_debug(self):
+ # A PlaceHolder can be debugged.
+ self.makePlaceHolder().debug()
+
+
+class TestErrorHolder(TestCase):
+
+ def makeException(self):
+ try:
+ raise RuntimeError("danger danger")
+ except:
+ return sys.exc_info()
+
+ def makePlaceHolder(self, test_id="foo", error=None,
+ short_description=None):
+ if error is None:
+ error = self.makeException()
+ return ErrorHolder(test_id, error, short_description)
+
+ def test_id_comes_from_constructor(self):
+ # The id() of a PlaceHolder is whatever you pass into the constructor.
+ test = ErrorHolder("test id", self.makeException())
+ self.assertEqual("test id", test.id())
+
+ def test_shortDescription_is_id(self):
+ # The shortDescription() of a PlaceHolder is the id, by default.
+ test = ErrorHolder("test id", self.makeException())
+ self.assertEqual(test.id(), test.shortDescription())
+
+ def test_shortDescription_specified(self):
+ # If a shortDescription is provided to the constructor, then
+ # shortDescription() returns that instead.
+ test = ErrorHolder("test id", self.makeException(), "description")
+ self.assertEqual("description", test.shortDescription())
+
+ def test_repr_just_id(self):
+ # repr(placeholder) shows you how the object was constructed.
+ error = self.makeException()
+ test = ErrorHolder("test id", error)
+ self.assertEqual(
+ "<testtools.testcase.ErrorHolder(%r, %r)>" % (test.id(), error),
+ repr(test))
+
+ def test_repr_with_description(self):
+ # repr(placeholder) shows you how the object was constructed.
+ error = self.makeException()
+ test = ErrorHolder("test id", error, "description")
+ self.assertEqual(
+ "<testtools.testcase.ErrorHolder(%r, %r, %r)>" % (
+ test.id(), error, test.shortDescription()),
+ repr(test))
+
+ def test_counts_as_one_test(self):
+ # A placeholder test counts as one test.
+ test = self.makePlaceHolder()
+ self.assertEqual(1, test.countTestCases())
+
+ def test_str_is_id(self):
+ # str(placeholder) is always the id(). We are not barbarians.
+ test = self.makePlaceHolder()
+ self.assertEqual(test.id(), str(test))
+
+ def test_runs_as_error(self):
+ # When run, a PlaceHolder test records a success.
+ error = self.makeException()
+ test = self.makePlaceHolder(error=error)
+ log = []
+ test.run(LoggingResult(log))
+ self.assertEqual(
+ [('startTest', test),
+ ('addError', test, error),
+ ('stopTest', test)], log)
+
+ def test_call_is_run(self):
+ # A PlaceHolder can be called, in which case it behaves like run.
+ test = self.makePlaceHolder()
+ run_log = []
+ test.run(LoggingResult(run_log))
+ call_log = []
+ test(LoggingResult(call_log))
+ self.assertEqual(run_log, call_log)
+
+ def test_runs_without_result(self):
+ # A PlaceHolder can be run without a result, in which case there's no
+ # way to actually get at the result.
+ self.makePlaceHolder().run()
+
+ def test_debug(self):
+ # A PlaceHolder can be debugged.
+ self.makePlaceHolder().debug()
+
+
+class TestEquality(TestCase):
+ """Test `TestCase`'s equality implementation."""
+
+ def test_identicalIsEqual(self):
+ # TestCase's are equal if they are identical.
+ self.assertEqual(self, self)
+
+ def test_nonIdenticalInUnequal(self):
+ # TestCase's are not equal if they are not identical.
+ self.assertNotEqual(TestCase(methodName='run'),
+ TestCase(methodName='skip'))
+
+
+class TestAssertions(TestCase):
+ """Test assertions in TestCase."""
+
+ def raiseError(self, exceptionFactory, *args, **kwargs):
+ raise exceptionFactory(*args, **kwargs)
+
+ def test_formatTypes_single(self):
+ # Given a single class, _formatTypes returns the name.
+ class Foo(object):
+ pass
+ self.assertEqual('Foo', self._formatTypes(Foo))
+
+ def test_formatTypes_multiple(self):
+ # Given multiple types, _formatTypes returns the names joined by
+ # commas.
+ class Foo(object):
+ pass
+ class Bar(object):
+ pass
+ self.assertEqual('Foo, Bar', self._formatTypes([Foo, Bar]))
+
+ def test_assertRaises(self):
+ # assertRaises asserts that a callable raises a particular exception.
+ self.assertRaises(RuntimeError, self.raiseError, RuntimeError)
+
+ def test_assertRaises_fails_when_no_error_raised(self):
+ # assertRaises raises self.failureException when it's passed a
+ # callable that raises no error.
+ ret = ('orange', 42)
+ try:
+ self.assertRaises(RuntimeError, lambda: ret)
+ except self.failureException:
+ # We expected assertRaises to raise this exception.
+ e = sys.exc_info()[1]
+ self.assertEqual(
+ '%s not raised, %r returned instead.'
+ % (self._formatTypes(RuntimeError), ret), str(e))
+ else:
+ self.fail('Expected assertRaises to fail, but it did not.')
+
+ def test_assertRaises_fails_when_different_error_raised(self):
+ # assertRaises re-raises an exception that it didn't expect.
+ self.assertThat(lambda: self.assertRaises(RuntimeError,
+ self.raiseError, ZeroDivisionError),
+ Raises(MatchesException(ZeroDivisionError)))
+
+ def test_assertRaises_returns_the_raised_exception(self):
+ # assertRaises returns the exception object that was raised. This is
+ # useful for testing that exceptions have the right message.
+
+ # This contraption stores the raised exception, so we can compare it
+ # to the return value of assertRaises.
+ raisedExceptions = []
+ def raiseError():
+ try:
+ raise RuntimeError('Deliberate error')
+ except RuntimeError:
+ raisedExceptions.append(sys.exc_info()[1])
+ raise
+
+ exception = self.assertRaises(RuntimeError, raiseError)
+ self.assertEqual(1, len(raisedExceptions))
+ self.assertTrue(
+ exception is raisedExceptions[0],
+ "%r is not %r" % (exception, raisedExceptions[0]))
+
+ def test_assertRaises_with_multiple_exceptions(self):
+ # assertRaises((ExceptionOne, ExceptionTwo), function) asserts that
+ # function raises one of ExceptionTwo or ExceptionOne.
+ expectedExceptions = (RuntimeError, ZeroDivisionError)
+ self.assertRaises(
+ expectedExceptions, self.raiseError, expectedExceptions[0])
+ self.assertRaises(
+ expectedExceptions, self.raiseError, expectedExceptions[1])
+
+ def test_assertRaises_with_multiple_exceptions_failure_mode(self):
+ # If assertRaises is called expecting one of a group of exceptions and
+ # a callable that doesn't raise an exception, then fail with an
+ # appropriate error message.
+ expectedExceptions = (RuntimeError, ZeroDivisionError)
+ failure = self.assertRaises(
+ self.failureException,
+ self.assertRaises, expectedExceptions, lambda: None)
+ self.assertEqual(
+ '%s not raised, None returned instead.'
+ % self._formatTypes(expectedExceptions), str(failure))
+
+ def assertFails(self, message, function, *args, **kwargs):
+ """Assert that function raises a failure with the given message."""
+ failure = self.assertRaises(
+ self.failureException, function, *args, **kwargs)
+ self.assertEqual(message, str(failure))
+
+ def test_assertIn_success(self):
+ # assertIn(needle, haystack) asserts that 'needle' is in 'haystack'.
+ self.assertIn(3, range(10))
+ self.assertIn('foo', 'foo bar baz')
+ self.assertIn('foo', 'foo bar baz'.split())
+
+ def test_assertIn_failure(self):
+ # assertIn(needle, haystack) fails the test when 'needle' is not in
+ # 'haystack'.
+ self.assertFails('3 not in [0, 1, 2]', self.assertIn, 3, [0, 1, 2])
+ self.assertFails(
+ '%r not in %r' % ('qux', 'foo bar baz'),
+ self.assertIn, 'qux', 'foo bar baz')
+
+ def test_assertNotIn_success(self):
+ # assertNotIn(needle, haystack) asserts that 'needle' is not in
+ # 'haystack'.
+ self.assertNotIn(3, [0, 1, 2])
+ self.assertNotIn('qux', 'foo bar baz')
+
+ def test_assertNotIn_failure(self):
+ # assertNotIn(needle, haystack) fails the test when 'needle' is in
+ # 'haystack'.
+ self.assertFails('3 in [1, 2, 3]', self.assertNotIn, 3, [1, 2, 3])
+ self.assertFails(
+ '%r in %r' % ('foo', 'foo bar baz'),
+ self.assertNotIn, 'foo', 'foo bar baz')
+
+ def test_assertIsInstance(self):
+ # assertIsInstance asserts that an object is an instance of a class.
+
+ class Foo(object):
+ """Simple class for testing assertIsInstance."""
+
+ foo = Foo()
+ self.assertIsInstance(foo, Foo)
+
+ def test_assertIsInstance_multiple_classes(self):
+ # assertIsInstance asserts that an object is an instance of one of a
+ # group of classes.
+
+ class Foo(object):
+ """Simple class for testing assertIsInstance."""
+
+ class Bar(object):
+ """Another simple class for testing assertIsInstance."""
+
+ foo = Foo()
+ self.assertIsInstance(foo, (Foo, Bar))
+ self.assertIsInstance(Bar(), (Foo, Bar))
+
+ def test_assertIsInstance_failure(self):
+ # assertIsInstance(obj, klass) fails the test when obj is not an
+ # instance of klass.
+
+ class Foo(object):
+ """Simple class for testing assertIsInstance."""
+
+ self.assertFails(
+ '42 is not an instance of %s' % self._formatTypes(Foo),
+ self.assertIsInstance, 42, Foo)
+
+ def test_assertIsInstance_failure_multiple_classes(self):
+ # assertIsInstance(obj, (klass1, klass2)) fails the test when obj is
+ # not an instance of klass1 or klass2.
+
+ class Foo(object):
+ """Simple class for testing assertIsInstance."""
+
+ class Bar(object):
+ """Another simple class for testing assertIsInstance."""
+
+ self.assertFails(
+ '42 is not an instance of %s' % self._formatTypes([Foo, Bar]),
+ self.assertIsInstance, 42, (Foo, Bar))
+
+ def test_assertIsInstance_overridden_message(self):
+ # assertIsInstance(obj, klass, msg) permits a custom message.
+ self.assertFails("foo", self.assertIsInstance, 42, str, "foo")
+
+ def test_assertIs(self):
+ # assertIs asserts that an object is identical to another object.
+ self.assertIs(None, None)
+ some_list = [42]
+ self.assertIs(some_list, some_list)
+ some_object = object()
+ self.assertIs(some_object, some_object)
+
+ def test_assertIs_fails(self):
+ # assertIs raises assertion errors if one object is not identical to
+ # another.
+ self.assertFails('None is not 42', self.assertIs, None, 42)
+ self.assertFails('[42] is not [42]', self.assertIs, [42], [42])
+
+ def test_assertIs_fails_with_message(self):
+ # assertIs raises assertion errors if one object is not identical to
+ # another, and includes a user-supplied message, if it's provided.
+ self.assertFails(
+ 'None is not 42: foo bar', self.assertIs, None, 42, 'foo bar')
+
+ def test_assertIsNot(self):
+ # assertIsNot asserts that an object is not identical to another
+ # object.
+ self.assertIsNot(None, 42)
+ self.assertIsNot([42], [42])
+ self.assertIsNot(object(), object())
+
+ def test_assertIsNot_fails(self):
+ # assertIsNot raises assertion errors if one object is identical to
+ # another.
+ self.assertFails('None is None', self.assertIsNot, None, None)
+ some_list = [42]
+ self.assertFails(
+ '[42] is [42]', self.assertIsNot, some_list, some_list)
+
+ def test_assertIsNot_fails_with_message(self):
+ # assertIsNot raises assertion errors if one object is identical to
+ # another, and includes a user-supplied message if it's provided.
+ self.assertFails(
+ 'None is None: foo bar', self.assertIsNot, None, None, "foo bar")
+
+ def test_assertThat_matches_clean(self):
+ class Matcher(object):
+ def match(self, foo):
+ return None
+ self.assertThat("foo", Matcher())
+
+ def test_assertThat_mismatch_raises_description(self):
+ calls = []
+ class Mismatch(object):
+ def __init__(self, thing):
+ self.thing = thing
+ def describe(self):
+ calls.append(('describe_diff', self.thing))
+ return "object is not a thing"
+ def get_details(self):
+ return {}
+ class Matcher(object):
+ def match(self, thing):
+ calls.append(('match', thing))
+ return Mismatch(thing)
+ def __str__(self):
+ calls.append(('__str__',))
+ return "a description"
+ class Test(TestCase):
+ def test(self):
+ self.assertThat("foo", Matcher())
+ result = Test("test").run()
+ self.assertEqual([
+ ('match', "foo"),
+ ('describe_diff', "foo"),
+ ('__str__',),
+ ], calls)
+ self.assertFalse(result.wasSuccessful())
+
+ def test_assertEqual_nice_formatting(self):
+ message = "These things ought not be equal."
+ a = ['apple', 'banana', 'cherry']
+ b = {'Thatcher': 'One who mends roofs of straw',
+ 'Major': 'A military officer, ranked below colonel',
+ 'Blair': 'To shout loudly',
+ 'Brown': 'The colour of healthy human faeces'}
+ expected_error = '\n'.join(
+ [message,
+ 'not equal:',
+ 'a = %s' % pformat(a),
+ 'b = %s' % pformat(b),
+ ''])
+ expected_error = '\n'.join([
+ 'Match failed. Matchee: "%r"' % b,
+ 'Matcher: Annotate(%r, Equals(%r))' % (message, a),
+ 'Difference: !=:',
+ 'reference = %s' % pformat(a),
+ 'actual = %s' % pformat(b),
+ ': ' + message,
+ ''
+ ])
+ self.assertFails(expected_error, self.assertEqual, a, b, message)
+ self.assertFails(expected_error, self.assertEquals, a, b, message)
+ self.assertFails(expected_error, self.failUnlessEqual, a, b, message)
+
+ def test_assertEqual_formatting_no_message(self):
+ a = "cat"
+ b = "dog"
+ expected_error = '\n'.join([
+ 'Match failed. Matchee: "dog"',
+ 'Matcher: Equals(\'cat\')',
+ 'Difference: \'cat\' != \'dog\'',
+ ''
+ ])
+ self.assertFails(expected_error, self.assertEqual, a, b)
+ self.assertFails(expected_error, self.assertEquals, a, b)
+ self.assertFails(expected_error, self.failUnlessEqual, a, b)
+
+
+class TestAddCleanup(TestCase):
+ """Tests for TestCase.addCleanup."""
+
+ class LoggingTest(TestCase):
+ """A test that logs calls to setUp, runTest and tearDown."""
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self._calls = ['setUp']
+
+ def brokenSetUp(self):
+ # A tearDown that deliberately fails.
+ self._calls = ['brokenSetUp']
+ raise RuntimeError('Deliberate Failure')
+
+ def runTest(self):
+ self._calls.append('runTest')
+
+ def brokenTest(self):
+ raise RuntimeError('Deliberate broken test')
+
+ def tearDown(self):
+ self._calls.append('tearDown')
+ TestCase.tearDown(self)
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self._result_calls = []
+ self.test = TestAddCleanup.LoggingTest('runTest')
+ self.logging_result = LoggingResult(self._result_calls)
+
+ def assertErrorLogEqual(self, messages):
+ self.assertEqual(messages, [call[0] for call in self._result_calls])
+
+ def assertTestLogEqual(self, messages):
+ """Assert that the call log equals 'messages'."""
+ case = self._result_calls[0][1]
+ self.assertEqual(messages, case._calls)
+
+ def logAppender(self, message):
+ """A cleanup that appends 'message' to the tests log.
+
+ Cleanups are callables that are added to a test by addCleanup. To
+ verify that our cleanups run in the right order, we add strings to a
+ list that acts as a log. This method returns a cleanup that will add
+ the given message to that log when run.
+ """
+ self.test._calls.append(message)
+
+ def test_fixture(self):
+ # A normal run of self.test logs 'setUp', 'runTest' and 'tearDown'.
+ # This test doesn't test addCleanup itself, it just sanity checks the
+ # fixture.
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
+
+ def test_cleanup_run_before_tearDown(self):
+ # Cleanup functions added with 'addCleanup' are called before tearDown
+ # runs.
+ self.test.addCleanup(self.logAppender, 'cleanup')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['setUp', 'runTest', 'tearDown', 'cleanup'])
+
+ def test_add_cleanup_called_if_setUp_fails(self):
+ # Cleanup functions added with 'addCleanup' are called even if setUp
+ # fails. Note that tearDown has a different behavior: it is only
+ # called when setUp succeeds.
+ self.test.setUp = self.test.brokenSetUp
+ self.test.addCleanup(self.logAppender, 'cleanup')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['brokenSetUp', 'cleanup'])
+
+ def test_addCleanup_called_in_reverse_order(self):
+ # Cleanup functions added with 'addCleanup' are called in reverse
+ # order.
+ #
+ # One of the main uses of addCleanup is to dynamically create
+ # resources that need some sort of explicit tearDown. Often one
+ # resource will be created in terms of another, e.g.,
+ # self.first = self.makeFirst()
+ # self.second = self.makeSecond(self.first)
+ #
+ # When this happens, we generally want to clean up the second resource
+ # before the first one, since the second depends on the first.
+ self.test.addCleanup(self.logAppender, 'first')
+ self.test.addCleanup(self.logAppender, 'second')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(
+ ['setUp', 'runTest', 'tearDown', 'second', 'first'])
+
+ def test_tearDown_runs_after_cleanup_failure(self):
+ # tearDown runs even if a cleanup function fails.
+ self.test.addCleanup(lambda: 1/0)
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
+
+ def test_cleanups_continue_running_after_error(self):
+ # All cleanups are always run, even if one or two of them fail.
+ self.test.addCleanup(self.logAppender, 'first')
+ self.test.addCleanup(lambda: 1/0)
+ self.test.addCleanup(self.logAppender, 'second')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(
+ ['setUp', 'runTest', 'tearDown', 'second', 'first'])
+
+ def test_error_in_cleanups_are_captured(self):
+ # If a cleanup raises an error, we want to record it and fail the the
+ # test, even though we go on to run other cleanups.
+ self.test.addCleanup(lambda: 1/0)
+ self.test.run(self.logging_result)
+ self.assertErrorLogEqual(['startTest', 'addError', 'stopTest'])
+
+ def test_keyboard_interrupt_not_caught(self):
+ # If a cleanup raises KeyboardInterrupt, it gets reraised.
+ def raiseKeyboardInterrupt():
+ raise KeyboardInterrupt()
+ self.test.addCleanup(raiseKeyboardInterrupt)
+ self.assertThat(lambda:self.test.run(self.logging_result),
+ Raises(MatchesException(KeyboardInterrupt)))
+
+ def test_all_errors_from_MultipleExceptions_reported(self):
+ # When a MultipleExceptions exception is caught, all the errors are
+ # reported.
+ def raiseMany():
+ try:
+ 1/0
+ except Exception:
+ exc_info1 = sys.exc_info()
+ try:
+ 1/0
+ except Exception:
+ exc_info2 = sys.exc_info()
+ raise MultipleExceptions(exc_info1, exc_info2)
+ self.test.addCleanup(raiseMany)
+ self.logging_result = ExtendedTestResult()
+ self.test.run(self.logging_result)
+ self.assertEqual(['startTest', 'addError', 'stopTest'],
+ [event[0] for event in self.logging_result._events])
+ self.assertEqual(set(['traceback', 'traceback-1']),
+ set(self.logging_result._events[1][2].keys()))
+
+ def test_multipleCleanupErrorsReported(self):
+ # Errors from all failing cleanups are reported as separate backtraces.
+ self.test.addCleanup(lambda: 1/0)
+ self.test.addCleanup(lambda: 1/0)
+ self.logging_result = ExtendedTestResult()
+ self.test.run(self.logging_result)
+ self.assertEqual(['startTest', 'addError', 'stopTest'],
+ [event[0] for event in self.logging_result._events])
+ self.assertEqual(set(['traceback', 'traceback-1']),
+ set(self.logging_result._events[1][2].keys()))
+
+ def test_multipleErrorsCoreAndCleanupReported(self):
+ # Errors from all failing cleanups are reported, with stopTest,
+ # startTest inserted.
+ self.test = TestAddCleanup.LoggingTest('brokenTest')
+ self.test.addCleanup(lambda: 1/0)
+ self.test.addCleanup(lambda: 1/0)
+ self.logging_result = ExtendedTestResult()
+ self.test.run(self.logging_result)
+ self.assertEqual(['startTest', 'addError', 'stopTest'],
+ [event[0] for event in self.logging_result._events])
+ self.assertEqual(set(['traceback', 'traceback-1', 'traceback-2']),
+ set(self.logging_result._events[1][2].keys()))
+
+
+class TestWithDetails(TestCase):
+
+ def assertDetailsProvided(self, case, expected_outcome, expected_keys):
+ """Assert that when case is run, details are provided to the result.
+
+ :param case: A TestCase to run.
+ :param expected_outcome: The call that should be made.
+ :param expected_keys: The keys to look for.
+ """
+ result = ExtendedTestResult()
+ case.run(result)
+ case = result._events[0][1]
+ expected = [
+ ('startTest', case),
+ (expected_outcome, case),
+ ('stopTest', case),
+ ]
+ self.assertEqual(3, len(result._events))
+ self.assertEqual(expected[0], result._events[0])
+ self.assertEqual(expected[1], result._events[1][0:2])
+ # Checking the TB is right is rather tricky. doctest line matching
+ # would help, but 'meh'.
+ self.assertEqual(sorted(expected_keys),
+ sorted(result._events[1][2].keys()))
+ self.assertEqual(expected[-1], result._events[-1])
+
+ def get_content(self):
+ return content.Content(
+ content.ContentType("text", "foo"), lambda: ['foo'])
+
+
+class TestExpectedFailure(TestWithDetails):
+ """Tests for expected failures and unexpected successess."""
+
+ def make_unexpected_case(self):
+ class Case(TestCase):
+ def test(self):
+ raise testcase._UnexpectedSuccess
+ case = Case('test')
+ return case
+
+ def test_raising__UnexpectedSuccess_py27(self):
+ case = self.make_unexpected_case()
+ result = Python27TestResult()
+ case.run(result)
+ case = result._events[0][1]
+ self.assertEqual([
+ ('startTest', case),
+ ('addUnexpectedSuccess', case),
+ ('stopTest', case),
+ ], result._events)
+
+ def test_raising__UnexpectedSuccess_extended(self):
+ case = self.make_unexpected_case()
+ result = ExtendedTestResult()
+ case.run(result)
+ case = result._events[0][1]
+ self.assertEqual([
+ ('startTest', case),
+ ('addUnexpectedSuccess', case, {}),
+ ('stopTest', case),
+ ], result._events)
+
+ def make_xfail_case_xfails(self):
+ content = self.get_content()
+ class Case(TestCase):
+ def test(self):
+ self.addDetail("foo", content)
+ self.expectFailure("we are sad", self.assertEqual,
+ 1, 0)
+ case = Case('test')
+ return case
+
+ def make_xfail_case_succeeds(self):
+ content = self.get_content()
+ class Case(TestCase):
+ def test(self):
+ self.addDetail("foo", content)
+ self.expectFailure("we are sad", self.assertEqual,
+ 1, 1)
+ case = Case('test')
+ return case
+
+ def test_expectFailure_KnownFailure_extended(self):
+ case = self.make_xfail_case_xfails()
+ self.assertDetailsProvided(case, "addExpectedFailure",
+ ["foo", "traceback", "reason"])
+
+ def test_expectFailure_KnownFailure_unexpected_success(self):
+ case = self.make_xfail_case_succeeds()
+ self.assertDetailsProvided(case, "addUnexpectedSuccess",
+ ["foo", "reason"])
+
+
+class TestUniqueFactories(TestCase):
+ """Tests for getUniqueString and getUniqueInteger."""
+
+ def test_getUniqueInteger(self):
+ # getUniqueInteger returns an integer that increments each time you
+ # call it.
+ one = self.getUniqueInteger()
+ self.assertEqual(1, one)
+ two = self.getUniqueInteger()
+ self.assertEqual(2, two)
+
+ def test_getUniqueString(self):
+ # getUniqueString returns the current test id followed by a unique
+ # integer.
+ name_one = self.getUniqueString()
+ self.assertEqual('%s-%d' % (self.id(), 1), name_one)
+ name_two = self.getUniqueString()
+ self.assertEqual('%s-%d' % (self.id(), 2), name_two)
+
+ def test_getUniqueString_prefix(self):
+ # If getUniqueString is given an argument, it uses that argument as
+ # the prefix of the unique string, rather than the test id.
+ name_one = self.getUniqueString('foo')
+ self.assertThat(name_one, Equals('foo-1'))
+ name_two = self.getUniqueString('bar')
+ self.assertThat(name_two, Equals('bar-2'))
+
+
+class TestCloneTestWithNewId(TestCase):
+ """Tests for clone_test_with_new_id."""
+
+ def test_clone_test_with_new_id(self):
+ class FooTestCase(TestCase):
+ def test_foo(self):
+ pass
+ test = FooTestCase('test_foo')
+ oldName = test.id()
+ newName = self.getUniqueString()
+ newTest = clone_test_with_new_id(test, newName)
+ self.assertEqual(newName, newTest.id())
+ self.assertEqual(oldName, test.id(),
+ "the original test instance should be unchanged.")
+
+ def test_cloned_testcase_does_not_share_details(self):
+ """A cloned TestCase does not share the details dict."""
+ class Test(TestCase):
+ def test_foo(self):
+ self.addDetail(
+ 'foo', content.Content('text/plain', lambda: 'foo'))
+ orig_test = Test('test_foo')
+ cloned_test = clone_test_with_new_id(orig_test, self.getUniqueString())
+ orig_test.run(unittest.TestResult())
+ self.assertEqual('foo', orig_test.getDetails()['foo'].iter_bytes())
+ self.assertEqual(None, cloned_test.getDetails().get('foo'))
+
+
+class TestDetailsProvided(TestWithDetails):
+
+ def test_addDetail(self):
+ mycontent = self.get_content()
+ self.addDetail("foo", mycontent)
+ details = self.getDetails()
+ self.assertEqual({"foo": mycontent}, details)
+
+ def test_addError(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ 1/0
+ self.assertDetailsProvided(Case("test"), "addError",
+ ["foo", "traceback"])
+
+ def test_addFailure(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ self.fail('yo')
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["foo", "traceback"])
+
+ def test_addSkip(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ self.skip('yo')
+ self.assertDetailsProvided(Case("test"), "addSkip",
+ ["foo", "reason"])
+
+ def test_addSucccess(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ self.assertDetailsProvided(Case("test"), "addSuccess",
+ ["foo"])
+
+ def test_addUnexpectedSuccess(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ raise testcase._UnexpectedSuccess()
+ self.assertDetailsProvided(Case("test"), "addUnexpectedSuccess",
+ ["foo"])
+
+ def test_addDetails_from_Mismatch(self):
+ content = self.get_content()
+ class Mismatch(object):
+ def describe(self):
+ return "Mismatch"
+ def get_details(self):
+ return {"foo": content}
+ class Matcher(object):
+ def match(self, thing):
+ return Mismatch()
+ def __str__(self):
+ return "a description"
+ class Case(TestCase):
+ def test(self):
+ self.assertThat("foo", Matcher())
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["foo", "traceback"])
+
+ def test_multiple_addDetails_from_Mismatch(self):
+ content = self.get_content()
+ class Mismatch(object):
+ def describe(self):
+ return "Mismatch"
+ def get_details(self):
+ return {"foo": content, "bar": content}
+ class Matcher(object):
+ def match(self, thing):
+ return Mismatch()
+ def __str__(self):
+ return "a description"
+ class Case(TestCase):
+ def test(self):
+ self.assertThat("foo", Matcher())
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["bar", "foo", "traceback"])
+
+ def test_addDetails_with_same_name_as_key_from_get_details(self):
+ content = self.get_content()
+ class Mismatch(object):
+ def describe(self):
+ return "Mismatch"
+ def get_details(self):
+ return {"foo": content}
+ class Matcher(object):
+ def match(self, thing):
+ return Mismatch()
+ def __str__(self):
+ return "a description"
+ class Case(TestCase):
+ def test(self):
+ self.addDetail("foo", content)
+ self.assertThat("foo", Matcher())
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["foo", "foo-1", "traceback"])
+
+
+class TestSetupTearDown(TestCase):
+
+ def test_setUpNotCalled(self):
+ class DoesnotcallsetUp(TestCase):
+ def setUp(self):
+ pass
+ def test_method(self):
+ pass
+ result = unittest.TestResult()
+ DoesnotcallsetUp('test_method').run(result)
+ self.assertEqual(1, len(result.errors))
+
+ def test_tearDownNotCalled(self):
+ class DoesnotcalltearDown(TestCase):
+ def test_method(self):
+ pass
+ def tearDown(self):
+ pass
+ result = unittest.TestResult()
+ DoesnotcalltearDown('test_method').run(result)
+ self.assertEqual(1, len(result.errors))
+
+
+class TestSkipping(TestCase):
+ """Tests for skipping of tests functionality."""
+
+ def test_skip_causes_skipException(self):
+ self.assertThat(lambda:self.skip("Skip this test"),
+ Raises(MatchesException(self.skipException)))
+
+ def test_can_use_skipTest(self):
+ self.assertThat(lambda:self.skipTest("Skip this test"),
+ Raises(MatchesException(self.skipException)))
+
+ def test_skip_without_reason_works(self):
+ class Test(TestCase):
+ def test(self):
+ raise self.skipException()
+ case = Test("test")
+ result = ExtendedTestResult()
+ case.run(result)
+ self.assertEqual('addSkip', result._events[1][0])
+ self.assertEqual('no reason given.',
+ ''.join(result._events[1][2]['reason'].iter_text()))
+
+ def test_skipException_in_setup_calls_result_addSkip(self):
+ class TestThatRaisesInSetUp(TestCase):
+ def setUp(self):
+ TestCase.setUp(self)
+ self.skip("skipping this test")
+ def test_that_passes(self):
+ pass
+ calls = []
+ result = LoggingResult(calls)
+ test = TestThatRaisesInSetUp("test_that_passes")
+ test.run(result)
+ case = result._events[0][1]
+ self.assertEqual([('startTest', case),
+ ('addSkip', case, "skipping this test"), ('stopTest', case)],
+ calls)
+
+ def test_skipException_in_test_method_calls_result_addSkip(self):
+ class SkippingTest(TestCase):
+ def test_that_raises_skipException(self):
+ self.skip("skipping this test")
+ result = Python27TestResult()
+ test = SkippingTest("test_that_raises_skipException")
+ test.run(result)
+ case = result._events[0][1]
+ self.assertEqual([('startTest', case),
+ ('addSkip', case, "skipping this test"), ('stopTest', case)],
+ result._events)
+
+ def test_skip__in_setup_with_old_result_object_calls_addSuccess(self):
+ class SkippingTest(TestCase):
+ def setUp(self):
+ TestCase.setUp(self)
+ raise self.skipException("skipping this test")
+ def test_that_raises_skipException(self):
+ pass
+ result = Python26TestResult()
+ test = SkippingTest("test_that_raises_skipException")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skip_with_old_result_object_calls_addError(self):
+ class SkippingTest(TestCase):
+ def test_that_raises_skipException(self):
+ raise self.skipException("skipping this test")
+ result = Python26TestResult()
+ test = SkippingTest("test_that_raises_skipException")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skip_decorator(self):
+ class SkippingTest(TestCase):
+ @skip("skipping this test")
+ def test_that_is_decorated_with_skip(self):
+ self.fail()
+ result = Python26TestResult()
+ test = SkippingTest("test_that_is_decorated_with_skip")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skipIf_decorator(self):
+ class SkippingTest(TestCase):
+ @skipIf(True, "skipping this test")
+ def test_that_is_decorated_with_skipIf(self):
+ self.fail()
+ result = Python26TestResult()
+ test = SkippingTest("test_that_is_decorated_with_skipIf")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skipUnless_decorator(self):
+ class SkippingTest(TestCase):
+ @skipUnless(False, "skipping this test")
+ def test_that_is_decorated_with_skipUnless(self):
+ self.fail()
+ result = Python26TestResult()
+ test = SkippingTest("test_that_is_decorated_with_skipUnless")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+
+class TestOnException(TestCase):
+
+ def test_default_works(self):
+ events = []
+ class Case(TestCase):
+ def method(self):
+ self.onException(an_exc_info)
+ events.append(True)
+ case = Case("method")
+ case.run()
+ self.assertThat(events, Equals([True]))
+
+ def test_added_handler_works(self):
+ events = []
+ class Case(TestCase):
+ def method(self):
+ self.addOnException(events.append)
+ self.onException(an_exc_info)
+ case = Case("method")
+ case.run()
+ self.assertThat(events, Equals([an_exc_info]))
+
+ def test_handler_that_raises_is_not_caught(self):
+ events = []
+ class Case(TestCase):
+ def method(self):
+ self.addOnException(events.index)
+ self.assertThat(lambda: self.onException(an_exc_info),
+ Raises(MatchesException(ValueError)))
+ case = Case("method")
+ case.run()
+ self.assertThat(events, Equals([]))
+
+
+class TestPatchSupport(TestCase):
+
+ class Case(TestCase):
+ def test(self):
+ pass
+
+ def test_patch(self):
+ # TestCase.patch masks obj.attribute with the new value.
+ self.foo = 'original'
+ test = self.Case('test')
+ test.patch(self, 'foo', 'patched')
+ self.assertEqual('patched', self.foo)
+
+ def test_patch_restored_after_run(self):
+ # TestCase.patch masks obj.attribute with the new value, but restores
+ # the original value after the test is finished.
+ self.foo = 'original'
+ test = self.Case('test')
+ test.patch(self, 'foo', 'patched')
+ test.run()
+ self.assertEqual('original', self.foo)
+
+ def test_successive_patches_apply(self):
+ # TestCase.patch can be called multiple times per test. Each time you
+ # call it, it overrides the original value.
+ self.foo = 'original'
+ test = self.Case('test')
+ test.patch(self, 'foo', 'patched')
+ test.patch(self, 'foo', 'second')
+ self.assertEqual('second', self.foo)
+
+ def test_successive_patches_restored_after_run(self):
+ # TestCase.patch restores the original value, no matter how many times
+ # it was called.
+ self.foo = 'original'
+ test = self.Case('test')
+ test.patch(self, 'foo', 'patched')
+ test.patch(self, 'foo', 'second')
+ test.run()
+ self.assertEqual('original', self.foo)
+
+ def test_patch_nonexistent_attribute(self):
+ # TestCase.patch can be used to patch a non-existent attribute.
+ test = self.Case('test')
+ test.patch(self, 'doesntexist', 'patched')
+ self.assertEqual('patched', self.doesntexist)
+
+ def test_restore_nonexistent_attribute(self):
+ # TestCase.patch can be used to patch a non-existent attribute, after
+ # the test run, the attribute is then removed from the object.
+ test = self.Case('test')
+ test.patch(self, 'doesntexist', 'patched')
+ test.run()
+ marker = object()
+ value = getattr(self, 'doesntexist', marker)
+ self.assertIs(marker, value)
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/testsuite.py b/lib/testtools/testtools/testsuite.py
new file mode 100644
index 0000000000..fd802621e3
--- /dev/null
+++ b/lib/testtools/testtools/testsuite.py
@@ -0,0 +1,87 @@
+# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
+
+"""Test suites and related things."""
+
+__metaclass__ = type
+__all__ = [
+ 'ConcurrentTestSuite',
+ 'iterate_tests',
+ ]
+
+try:
+ from Queue import Queue
+except ImportError:
+ from queue import Queue
+import threading
+import unittest
+
+import testtools
+
+
+def iterate_tests(test_suite_or_case):
+ """Iterate through all of the test cases in 'test_suite_or_case'."""
+ try:
+ suite = iter(test_suite_or_case)
+ except TypeError:
+ yield test_suite_or_case
+ else:
+ for test in suite:
+ for subtest in iterate_tests(test):
+ yield subtest
+
+
+class ConcurrentTestSuite(unittest.TestSuite):
+ """A TestSuite whose run() calls out to a concurrency strategy."""
+
+ def __init__(self, suite, make_tests):
+ """Create a ConcurrentTestSuite to execute suite.
+
+ :param suite: A suite to run concurrently.
+ :param make_tests: A helper function to split the tests in the
+ ConcurrentTestSuite into some number of concurrently executing
+ sub-suites. make_tests must take a suite, and return an iterable
+ of TestCase-like object, each of which must have a run(result)
+ method.
+ """
+ super(ConcurrentTestSuite, self).__init__([suite])
+ self.make_tests = make_tests
+
+ def run(self, result):
+ """Run the tests concurrently.
+
+ This calls out to the provided make_tests helper, and then serialises
+ the results so that result only sees activity from one TestCase at
+ a time.
+
+ ConcurrentTestSuite provides no special mechanism to stop the tests
+ returned by make_tests, it is up to the make_tests to honour the
+ shouldStop attribute on the result object they are run with, which will
+ be set if an exception is raised in the thread which
+ ConcurrentTestSuite.run is called in.
+ """
+ tests = self.make_tests(self)
+ try:
+ threads = {}
+ queue = Queue()
+ result_semaphore = threading.Semaphore(1)
+ for test in tests:
+ process_result = testtools.ThreadsafeForwardingResult(result,
+ result_semaphore)
+ reader_thread = threading.Thread(
+ target=self._run_test, args=(test, process_result, queue))
+ threads[test] = reader_thread, process_result
+ reader_thread.start()
+ while threads:
+ finished_test = queue.get()
+ threads[finished_test][0].join()
+ del threads[finished_test]
+ except:
+ for thread, process_result in threads.values():
+ process_result.stop()
+ raise
+
+ def _run_test(self, test, process_result, queue):
+ try:
+ test.run(process_result)
+ finally:
+ queue.put(test)
diff --git a/lib/testtools/testtools/utils.py b/lib/testtools/testtools/utils.py
new file mode 100644
index 0000000000..0f39d8f5b6
--- /dev/null
+++ b/lib/testtools/testtools/utils.py
@@ -0,0 +1,13 @@
+# Copyright (c) 2008-2010 testtools developers. See LICENSE for details.
+
+"""Utilities for dealing with stuff in unittest.
+
+Legacy - deprecated - use testtools.testsuite.iterate_tests
+"""
+
+import warnings
+warnings.warn("Please import iterate_tests from testtools.testsuite - "
+ "testtools.utils is deprecated.", DeprecationWarning, stacklevel=2)
+
+from testtools.testsuite import iterate_tests
+
diff --git a/lib/tevent/ABI/tevent-0.9.10.sigs b/lib/tevent/ABI/tevent-0.9.10.sigs
new file mode 100644
index 0000000000..9adaba579b
--- /dev/null
+++ b/lib/tevent/ABI/tevent-0.9.10.sigs
@@ -0,0 +1,73 @@
+_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *)
+_tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *)
+_tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *)
+_tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *)
+_tevent_loop_once: int (struct tevent_context *, const char *)
+_tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *)
+_tevent_loop_wait: int (struct tevent_context *, const char *)
+_tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *)
+_tevent_req_callback_data: void *(struct tevent_req *)
+_tevent_req_cancel: bool (struct tevent_req *, const char *)
+_tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *)
+_tevent_req_data: void *(struct tevent_req *)
+_tevent_req_done: void (struct tevent_req *, const char *)
+_tevent_req_error: bool (struct tevent_req *, uint64_t, const char *)
+_tevent_req_nomem: bool (const void *, struct tevent_req *, const char *)
+_tevent_req_notify_callback: void (struct tevent_req *, const char *)
+_tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *)
+tevent_backend_list: const char **(TALLOC_CTX *)
+tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *)
+tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *)
+tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *)
+tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *)
+tevent_common_check_signal: int (struct tevent_context *)
+tevent_common_context_destructor: int (struct tevent_context *)
+tevent_common_fd_destructor: int (struct tevent_fd *)
+tevent_common_fd_get_flags: uint16_t (struct tevent_fd *)
+tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t)
+tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t)
+tevent_common_loop_immediate: bool (struct tevent_context *)
+tevent_common_loop_timer_delay: struct timeval (struct tevent_context *)
+tevent_common_loop_wait: int (struct tevent_context *, const char *)
+tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *)
+tevent_context_init: struct tevent_context *(TALLOC_CTX *)
+tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *)
+tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...)
+tevent_fd_get_flags: uint16_t (struct tevent_fd *)
+tevent_fd_set_auto_close: void (struct tevent_fd *)
+tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t)
+tevent_fd_set_flags: void (struct tevent_fd *, uint16_t)
+tevent_loop_allow_nesting: void (struct tevent_context *)
+tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *)
+tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *)
+tevent_queue_length: size_t (struct tevent_queue *)
+tevent_queue_start: void (struct tevent_queue *)
+tevent_queue_stop: void (struct tevent_queue *)
+tevent_re_initialise: int (struct tevent_context *)
+tevent_register_backend: bool (const char *, const struct tevent_ops *)
+tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *)
+tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *)
+tevent_req_is_in_progress: bool (struct tevent_req *)
+tevent_req_poll: bool (struct tevent_req *, struct tevent_context *)
+tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *)
+tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *)
+tevent_req_received: void (struct tevent_req *)
+tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *)
+tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn)
+tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval)
+tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn)
+tevent_set_abort_fn: void (void (*)(const char *))
+tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *)
+tevent_set_debug_stderr: int (struct tevent_context *)
+tevent_set_default_backend: void (const char *)
+tevent_signal_support: bool (struct tevent_context *)
+tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t)
+tevent_timeval_compare: int (const struct timeval *, const struct timeval *)
+tevent_timeval_current: struct timeval (void)
+tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t)
+tevent_timeval_is_zero: bool (const struct timeval *)
+tevent_timeval_set: struct timeval (uint32_t, uint32_t)
+tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *)
+tevent_timeval_zero: struct timeval (void)
+tevent_wakeup_recv: bool (struct tevent_req *)
+tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval)
diff --git a/lib/tevent/ABI/tevent-0.9.11.sigs b/lib/tevent/ABI/tevent-0.9.11.sigs
new file mode 100644
index 0000000000..9adaba579b
--- /dev/null
+++ b/lib/tevent/ABI/tevent-0.9.11.sigs
@@ -0,0 +1,73 @@
+_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *)
+_tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *)
+_tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *)
+_tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *)
+_tevent_loop_once: int (struct tevent_context *, const char *)
+_tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *)
+_tevent_loop_wait: int (struct tevent_context *, const char *)
+_tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *)
+_tevent_req_callback_data: void *(struct tevent_req *)
+_tevent_req_cancel: bool (struct tevent_req *, const char *)
+_tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *)
+_tevent_req_data: void *(struct tevent_req *)
+_tevent_req_done: void (struct tevent_req *, const char *)
+_tevent_req_error: bool (struct tevent_req *, uint64_t, const char *)
+_tevent_req_nomem: bool (const void *, struct tevent_req *, const char *)
+_tevent_req_notify_callback: void (struct tevent_req *, const char *)
+_tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *)
+tevent_backend_list: const char **(TALLOC_CTX *)
+tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *)
+tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *)
+tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *)
+tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *)
+tevent_common_check_signal: int (struct tevent_context *)
+tevent_common_context_destructor: int (struct tevent_context *)
+tevent_common_fd_destructor: int (struct tevent_fd *)
+tevent_common_fd_get_flags: uint16_t (struct tevent_fd *)
+tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t)
+tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t)
+tevent_common_loop_immediate: bool (struct tevent_context *)
+tevent_common_loop_timer_delay: struct timeval (struct tevent_context *)
+tevent_common_loop_wait: int (struct tevent_context *, const char *)
+tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *)
+tevent_context_init: struct tevent_context *(TALLOC_CTX *)
+tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *)
+tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...)
+tevent_fd_get_flags: uint16_t (struct tevent_fd *)
+tevent_fd_set_auto_close: void (struct tevent_fd *)
+tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t)
+tevent_fd_set_flags: void (struct tevent_fd *, uint16_t)
+tevent_loop_allow_nesting: void (struct tevent_context *)
+tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *)
+tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *)
+tevent_queue_length: size_t (struct tevent_queue *)
+tevent_queue_start: void (struct tevent_queue *)
+tevent_queue_stop: void (struct tevent_queue *)
+tevent_re_initialise: int (struct tevent_context *)
+tevent_register_backend: bool (const char *, const struct tevent_ops *)
+tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *)
+tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *)
+tevent_req_is_in_progress: bool (struct tevent_req *)
+tevent_req_poll: bool (struct tevent_req *, struct tevent_context *)
+tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *)
+tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *)
+tevent_req_received: void (struct tevent_req *)
+tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *)
+tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn)
+tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval)
+tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn)
+tevent_set_abort_fn: void (void (*)(const char *))
+tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *)
+tevent_set_debug_stderr: int (struct tevent_context *)
+tevent_set_default_backend: void (const char *)
+tevent_signal_support: bool (struct tevent_context *)
+tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t)
+tevent_timeval_compare: int (const struct timeval *, const struct timeval *)
+tevent_timeval_current: struct timeval (void)
+tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t)
+tevent_timeval_is_zero: bool (const struct timeval *)
+tevent_timeval_set: struct timeval (uint32_t, uint32_t)
+tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *)
+tevent_timeval_zero: struct timeval (void)
+tevent_wakeup_recv: bool (struct tevent_req *)
+tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval)
diff --git a/lib/tevent/ABI/tevent-0.9.9.sigs b/lib/tevent/ABI/tevent-0.9.9.sigs
new file mode 100644
index 0000000000..9adaba579b
--- /dev/null
+++ b/lib/tevent/ABI/tevent-0.9.9.sigs
@@ -0,0 +1,73 @@
+_tevent_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *)
+_tevent_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *)
+_tevent_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *)
+_tevent_create_immediate: struct tevent_immediate *(TALLOC_CTX *, const char *)
+_tevent_loop_once: int (struct tevent_context *, const char *)
+_tevent_loop_until: int (struct tevent_context *, bool (*)(void *), void *, const char *)
+_tevent_loop_wait: int (struct tevent_context *, const char *)
+_tevent_queue_create: struct tevent_queue *(TALLOC_CTX *, const char *, const char *)
+_tevent_req_callback_data: void *(struct tevent_req *)
+_tevent_req_cancel: bool (struct tevent_req *, const char *)
+_tevent_req_create: struct tevent_req *(TALLOC_CTX *, void *, size_t, const char *, const char *)
+_tevent_req_data: void *(struct tevent_req *)
+_tevent_req_done: void (struct tevent_req *, const char *)
+_tevent_req_error: bool (struct tevent_req *, uint64_t, const char *)
+_tevent_req_nomem: bool (const void *, struct tevent_req *, const char *)
+_tevent_req_notify_callback: void (struct tevent_req *, const char *)
+_tevent_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *)
+tevent_backend_list: const char **(TALLOC_CTX *)
+tevent_cleanup_pending_signal_handlers: void (struct tevent_signal *)
+tevent_common_add_fd: struct tevent_fd *(struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *)
+tevent_common_add_signal: struct tevent_signal *(struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *)
+tevent_common_add_timer: struct tevent_timer *(struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *)
+tevent_common_check_signal: int (struct tevent_context *)
+tevent_common_context_destructor: int (struct tevent_context *)
+tevent_common_fd_destructor: int (struct tevent_fd *)
+tevent_common_fd_get_flags: uint16_t (struct tevent_fd *)
+tevent_common_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t)
+tevent_common_fd_set_flags: void (struct tevent_fd *, uint16_t)
+tevent_common_loop_immediate: bool (struct tevent_context *)
+tevent_common_loop_timer_delay: struct timeval (struct tevent_context *)
+tevent_common_loop_wait: int (struct tevent_context *, const char *)
+tevent_common_schedule_immediate: void (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *)
+tevent_context_init: struct tevent_context *(TALLOC_CTX *)
+tevent_context_init_byname: struct tevent_context *(TALLOC_CTX *, const char *)
+tevent_debug: void (struct tevent_context *, enum tevent_debug_level, const char *, ...)
+tevent_fd_get_flags: uint16_t (struct tevent_fd *)
+tevent_fd_set_auto_close: void (struct tevent_fd *)
+tevent_fd_set_close_fn: void (struct tevent_fd *, tevent_fd_close_fn_t)
+tevent_fd_set_flags: void (struct tevent_fd *, uint16_t)
+tevent_loop_allow_nesting: void (struct tevent_context *)
+tevent_loop_set_nesting_hook: void (struct tevent_context *, tevent_nesting_hook, void *)
+tevent_queue_add: bool (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *)
+tevent_queue_length: size_t (struct tevent_queue *)
+tevent_queue_start: void (struct tevent_queue *)
+tevent_queue_stop: void (struct tevent_queue *)
+tevent_re_initialise: int (struct tevent_context *)
+tevent_register_backend: bool (const char *, const struct tevent_ops *)
+tevent_req_default_print: char *(struct tevent_req *, TALLOC_CTX *)
+tevent_req_is_error: bool (struct tevent_req *, enum tevent_req_state *, uint64_t *)
+tevent_req_is_in_progress: bool (struct tevent_req *)
+tevent_req_poll: bool (struct tevent_req *, struct tevent_context *)
+tevent_req_post: struct tevent_req *(struct tevent_req *, struct tevent_context *)
+tevent_req_print: char *(TALLOC_CTX *, struct tevent_req *)
+tevent_req_received: void (struct tevent_req *)
+tevent_req_set_callback: void (struct tevent_req *, tevent_req_fn, void *)
+tevent_req_set_cancel_fn: void (struct tevent_req *, tevent_req_cancel_fn)
+tevent_req_set_endtime: bool (struct tevent_req *, struct tevent_context *, struct timeval)
+tevent_req_set_print_fn: void (struct tevent_req *, tevent_req_print_fn)
+tevent_set_abort_fn: void (void (*)(const char *))
+tevent_set_debug: int (struct tevent_context *, void (*)(void *, enum tevent_debug_level, const char *, va_list), void *)
+tevent_set_debug_stderr: int (struct tevent_context *)
+tevent_set_default_backend: void (const char *)
+tevent_signal_support: bool (struct tevent_context *)
+tevent_timeval_add: struct timeval (const struct timeval *, uint32_t, uint32_t)
+tevent_timeval_compare: int (const struct timeval *, const struct timeval *)
+tevent_timeval_current: struct timeval (void)
+tevent_timeval_current_ofs: struct timeval (uint32_t, uint32_t)
+tevent_timeval_is_zero: bool (const struct timeval *)
+tevent_timeval_set: struct timeval (uint32_t, uint32_t)
+tevent_timeval_until: struct timeval (const struct timeval *, const struct timeval *)
+tevent_timeval_zero: struct timeval (void)
+tevent_wakeup_recv: bool (struct tevent_req *)
+tevent_wakeup_send: struct tevent_req *(TALLOC_CTX *, struct tevent_context *, struct timeval)
diff --git a/lib/tevent/Makefile b/lib/tevent/Makefile
new file mode 100644
index 0000000000..5acfcbce58
--- /dev/null
+++ b/lib/tevent/Makefile
@@ -0,0 +1,51 @@
+# simple makefile wrapper to run waf
+
+WAF=WAF_MAKE=1 PATH=buildtools/bin:../../buildtools/bin:$$PATH waf
+
+all:
+ $(WAF) build
+
+install:
+ $(WAF) install
+
+uninstall:
+ $(WAF) uninstall
+
+test:
+ $(WAF) test $(TEST_OPTIONS)
+
+dist:
+ touch .tmplock
+ WAFLOCK=.tmplock $(WAF) dist
+
+distcheck:
+ touch .tmplock
+ WAFLOCK=.tmplock $(WAF) distcheck
+
+clean:
+ $(WAF) clean
+
+distclean:
+ $(WAF) distclean
+
+reconfigure: configure
+ $(WAF) reconfigure
+
+show_waf_options:
+ $(WAF) --help
+
+# some compatibility make targets
+everything: all
+
+testsuite: all
+
+check: test
+
+# this should do an install as well, once install is finished
+installcheck: test
+
+etags:
+ $(WAF) etags
+
+ctags:
+ $(WAF) ctags
diff --git a/lib/tevent/Makefile.in b/lib/tevent/Makefile.in
deleted file mode 100644
index f3deb9df45..0000000000
--- a/lib/tevent/Makefile.in
+++ /dev/null
@@ -1,79 +0,0 @@
-#!gmake
-#
-# Makefile for tdb directory
-#
-
-CC = @CC@
-prefix = @prefix@
-exec_prefix = @exec_prefix@
-bindir = @bindir@
-includedir = @includedir@
-libdir = @libdir@
-VPATH = @srcdir@:@libreplacedir@
-srcdir = @srcdir@
-builddir = @builddir@
-sharedbuilddir = @sharedbuilddir@
-INSTALLCMD = @INSTALL@
-CPPFLAGS = @CPPFLAGS@ -I$(srcdir)/include -Iinclude -I.
-LDFLAGS = @LDFLAGS@
-EXEEXT = @EXEEXT@
-SHLD = @SHLD@
-SHLD_FLAGS = @SHLD_FLAGS@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PICFLAG = @PICFLAG@
-SHLIBEXT = @SHLIBEXT@
-LIB_PATH_VAR = @LIB_PATH_VAR@
-teventdir = @teventdir@
-
-TALLOC_CFLAGS = @TALLOC_CFLAGS@
-TALLOC_LDFLAGS = @TALLOC_CFLAGS@
-TALLOC_LIBS = @TALLOC_LIBS@
-
-TEVENT_CFLAGS = @TEVENT_CFLAGS@
-TEVENT_LDFLAGS = @TEVENT_CFLAGS@
-TEVENT_LIBS = @TEVENT_LIBS@
-
-CFLAGS = $(CPPFLAGS) $(TALLOC_CFLAGS) $(TEVENT_CFLAGS) @CFLAGS@
-LDFLAGS = $(TALLOC_LDFLAGS) $(TEVENT_LDFLAGS) @LDFLAGS@
-LIBS = $(TALLOC_LIBS) $(TEVENT_LIBS) @LIBS@
-
-TEVENT_OBJ = @TEVENT_OBJ@ @LIBREPLACEOBJ@
-
-SONAMEFLAG = @SONAMEFLAG@
-VERSIONSCRIPT = @VERSIONSCRIPT@
-EXPORTSFILE = @EXPORTSFILE@
-
-default: all
-
-include $(teventdir)/tevent.mk
-include $(teventdir)/rules.mk
-
-all:: showflags dirs $(PROGS) $(TEVENT_SOLIB) libtevent.a
-
-install:: all
-$(TEVENT_SOLIB): $(TEVENT_OBJ)
- $(SHLD) $(SHLD_FLAGS) $(LDFLAGS) $(LIBS) -o $@ $(TEVENT_OBJ) $(VERSIONSCRIPT) $(EXPORTSFILE) $(SONAMEFLAG)$(TEVENT_SONAME)
-
-shared-build: all
- ${INSTALLCMD} -d $(sharedbuilddir)/lib
- ${INSTALLCMD} -m 644 libtevent.a $(sharedbuilddir)/lib
- ${INSTALLCMD} -m 755 $(TEVENT_SOLIB) $(sharedbuilddir)/lib
- ln -sf $(TEVENT_SOLIB) $(sharedbuilddir)/lib/$(TEVENT_SONAME)
- ln -sf $(TEVENT_SOLIB) $(sharedbuilddir)/lib/libtevent.so
- ${INSTALLCMD} -d $(sharedbuilddir)/include
- ${INSTALLCMD} -m 644 $(srcdir)/tevent.h $(sharedbuilddir)/include
-
-check: test
-
-installcheck:: test install
-
-clean::
- rm -f *.o *.a */*.o
- rm -fr abi
-
-distclean:: clean
- rm -f config.log config.status config.h config.cache
- rm -f Makefile
-
-realdistclean:: distclean
- rm -f configure config.h.in
diff --git a/lib/tevent/autogen.sh b/lib/tevent/autogen.sh
deleted file mode 100755
index 7a1e7ab990..0000000000
--- a/lib/tevent/autogen.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-
-rm -rf autom4te.cache
-rm -f configure config.h.in
-
-IPATHS="-I libreplace -I lib/replace -I ../libreplace -I ../replace -I ../../../lib/replace"
-autoconf $IPATHS || exit 1
-autoheader $IPATHS || exit 1
-
-rm -rf autom4te.cache
-
-echo "Now run ./configure and then make."
-exit 0
-
diff --git a/lib/tevent/bindings.py b/lib/tevent/bindings.py
new file mode 100644
index 0000000000..1060caf28d
--- /dev/null
+++ b/lib/tevent/bindings.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python
+#
+# Python integration for tevent - tests
+#
+# Copyright (C) Jelmer Vernooij 2010
+#
+# ** NOTE! The following LGPL license applies to the tevent
+# ** library. This does NOT imply that all of Samba is released
+# ** under the LGPL
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 3 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+import signal
+import _tevent
+from unittest import TestCase
+
+class BackendListTests(TestCase):
+
+ def test_backend_list(self):
+ self.assertTrue(isinstance(_tevent.backend_list(), list))
+
+
+class CreateContextTests(TestCase):
+
+ def test_by_name(self):
+ ctx = _tevent.Context(_tevent.backend_list()[0])
+ self.assertTrue(ctx is not None)
+
+ def test_no_name(self):
+ ctx = _tevent.Context()
+ self.assertTrue(ctx is not None)
+
+
+class ContextTests(TestCase):
+
+ def setUp(self):
+ super(ContextTests, self).setUp()
+ self.ctx = _tevent.Context()
+
+ def test_signal_support(self):
+ self.assertTrue(type(self.ctx.signal_support) is bool)
+
+ def test_reinitialise(self):
+ self.ctx.reinitialise()
+
+ def test_loop_wait(self):
+ self.ctx.loop_wait()
+
+ def test_add_signal(self):
+ sig = self.ctx.add_signal(signal.SIGINT, 0, lambda callback: None)
+ self.assertTrue(isinstance(sig, _tevent.Signal))
diff --git a/lib/tevent/build_macros.m4 b/lib/tevent/build_macros.m4
deleted file mode 100644
index bb7fad8f7a..0000000000
--- a/lib/tevent/build_macros.m4
+++ /dev/null
@@ -1,15 +0,0 @@
-AC_DEFUN(BUILD_WITH_SHARED_BUILD_DIR,
- [ AC_ARG_WITH([shared-build-dir],
- [AC_HELP_STRING([--with-shared-build-dir=DIR],
- [temporary build directory where libraries are installed [$srcdir/sharedbuild]])])
-
- sharedbuilddir="$srcdir/sharedbuild"
- if test x"$with_shared_build_dir" != x; then
- sharedbuilddir=$with_shared_build_dir
- CFLAGS="$CFLAGS -I$with_shared_build_dir/include"
- CPPFLAGS="$CPPFLAGS -I$with_shared_build_dir/include"
- LDFLAGS="$LDFLAGS -L$with_shared_build_dir/lib"
- fi
- AC_SUBST(sharedbuilddir)
- ])
-
diff --git a/lib/tevent/config.guess b/lib/tevent/config.guess
deleted file mode 100755
index da83314608..0000000000
--- a/lib/tevent/config.guess
+++ /dev/null
@@ -1,1561 +0,0 @@
-#! /bin/sh
-# Attempt to guess a canonical system name.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
-# Free Software Foundation, Inc.
-
-timestamp='2009-04-27'
-
-# This file is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Originally written by Per Bothner <per@bothner.com>.
-# Please send patches to <config-patches@gnu.org>. Submit a context
-# diff and a properly formatted ChangeLog entry.
-#
-# This script attempts to guess a canonical system name similar to
-# config.sub. If it succeeds, it prints the system name on stdout, and
-# exits with 0. Otherwise, it exits with 1.
-#
-# The plan is that this can be called by configure scripts if you
-# don't specify an explicit build system type.
-
-me=`echo "$0" | sed -e 's,.*/,,'`
-
-usage="\
-Usage: $0 [OPTION]
-
-Output the configuration name of the system \`$me' is run on.
-
-Operation modes:
- -h, --help print this help, then exit
- -t, --time-stamp print date of last modification, then exit
- -v, --version print version number, then exit
-
-Report bugs and patches to <config-patches@gnu.org>."
-
-version="\
-GNU config.guess ($timestamp)
-
-Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
-
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-
-help="
-Try \`$me --help' for more information."
-
-# Parse command line
-while test $# -gt 0 ; do
- case $1 in
- --time-stamp | --time* | -t )
- echo "$timestamp" ; exit ;;
- --version | -v )
- echo "$version" ; exit ;;
- --help | --h* | -h )
- echo "$usage"; exit ;;
- -- ) # Stop option processing
- shift; break ;;
- - ) # Use stdin as input.
- break ;;
- -* )
- echo "$me: invalid option $1$help" >&2
- exit 1 ;;
- * )
- break ;;
- esac
-done
-
-if test $# != 0; then
- echo "$me: too many arguments$help" >&2
- exit 1
-fi
-
-trap 'exit 1' 1 2 15
-
-# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
-# compiler to aid in system detection is discouraged as it requires
-# temporary files to be created and, as you can see below, it is a
-# headache to deal with in a portable fashion.
-
-# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
-# use `HOST_CC' if defined, but it is deprecated.
-
-# Portable tmp directory creation inspired by the Autoconf team.
-
-set_cc_for_build='
-trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
-trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
-: ${TMPDIR=/tmp} ;
- { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
- { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
- { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
- { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
-dummy=$tmp/dummy ;
-tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
-case $CC_FOR_BUILD,$HOST_CC,$CC in
- ,,) echo "int x;" > $dummy.c ;
- for c in cc gcc c89 c99 ; do
- if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
- CC_FOR_BUILD="$c"; break ;
- fi ;
- done ;
- if test x"$CC_FOR_BUILD" = x ; then
- CC_FOR_BUILD=no_compiler_found ;
- fi
- ;;
- ,,*) CC_FOR_BUILD=$CC ;;
- ,*,*) CC_FOR_BUILD=$HOST_CC ;;
-esac ; set_cc_for_build= ;'
-
-# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
-# (ghazi@noc.rutgers.edu 1994-08-24)
-if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
- PATH=$PATH:/.attbin ; export PATH
-fi
-
-UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
-UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
-UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
-UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
-
-# Note: order is significant - the case branches are not exclusive.
-
-case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
- *:NetBSD:*:*)
- # NetBSD (nbsd) targets should (where applicable) match one or
- # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
- # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
- # switched to ELF, *-*-netbsd* would select the old
- # object file format. This provides both forward
- # compatibility and a consistent mechanism for selecting the
- # object file format.
- #
- # Note: NetBSD doesn't particularly care about the vendor
- # portion of the name. We always set it to "unknown".
- sysctl="sysctl -n hw.machine_arch"
- UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
- /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
- case "${UNAME_MACHINE_ARCH}" in
- armeb) machine=armeb-unknown ;;
- arm*) machine=arm-unknown ;;
- sh3el) machine=shl-unknown ;;
- sh3eb) machine=sh-unknown ;;
- sh5el) machine=sh5le-unknown ;;
- *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
- esac
- # The Operating System including object format, if it has switched
- # to ELF recently, or will in the future.
- case "${UNAME_MACHINE_ARCH}" in
- arm*|i386|m68k|ns32k|sh3*|sparc|vax)
- eval $set_cc_for_build
- if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
- | grep __ELF__ >/dev/null
- then
- # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
- # Return netbsd for either. FIX?
- os=netbsd
- else
- os=netbsdelf
- fi
- ;;
- *)
- os=netbsd
- ;;
- esac
- # The OS release
- # Debian GNU/NetBSD machines have a different userland, and
- # thus, need a distinct triplet. However, they do not need
- # kernel version information, so it can be replaced with a
- # suitable tag, in the style of linux-gnu.
- case "${UNAME_VERSION}" in
- Debian*)
- release='-gnu'
- ;;
- *)
- release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
- ;;
- esac
- # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
- # contains redundant information, the shorter form:
- # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
- echo "${machine}-${os}${release}"
- exit ;;
- *:OpenBSD:*:*)
- UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
- echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
- exit ;;
- *:ekkoBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
- exit ;;
- *:SolidBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
- exit ;;
- macppc:MirBSD:*:*)
- echo powerpc-unknown-mirbsd${UNAME_RELEASE}
- exit ;;
- *:MirBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
- exit ;;
- alpha:OSF1:*:*)
- case $UNAME_RELEASE in
- *4.0)
- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
- ;;
- *5.*)
- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
- ;;
- esac
- # According to Compaq, /usr/sbin/psrinfo has been available on
- # OSF/1 and Tru64 systems produced since 1995. I hope that
- # covers most systems running today. This code pipes the CPU
- # types through head -n 1, so we only detect the type of CPU 0.
- ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
- case "$ALPHA_CPU_TYPE" in
- "EV4 (21064)")
- UNAME_MACHINE="alpha" ;;
- "EV4.5 (21064)")
- UNAME_MACHINE="alpha" ;;
- "LCA4 (21066/21068)")
- UNAME_MACHINE="alpha" ;;
- "EV5 (21164)")
- UNAME_MACHINE="alphaev5" ;;
- "EV5.6 (21164A)")
- UNAME_MACHINE="alphaev56" ;;
- "EV5.6 (21164PC)")
- UNAME_MACHINE="alphapca56" ;;
- "EV5.7 (21164PC)")
- UNAME_MACHINE="alphapca57" ;;
- "EV6 (21264)")
- UNAME_MACHINE="alphaev6" ;;
- "EV6.7 (21264A)")
- UNAME_MACHINE="alphaev67" ;;
- "EV6.8CB (21264C)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.8AL (21264B)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.8CX (21264D)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.9A (21264/EV69A)")
- UNAME_MACHINE="alphaev69" ;;
- "EV7 (21364)")
- UNAME_MACHINE="alphaev7" ;;
- "EV7.9 (21364A)")
- UNAME_MACHINE="alphaev79" ;;
- esac
- # A Pn.n version is a patched version.
- # A Vn.n version is a released version.
- # A Tn.n version is a released field test version.
- # A Xn.n version is an unreleased experimental baselevel.
- # 1.2 uses "1.2" for uname -r.
- echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- exit ;;
- Alpha\ *:Windows_NT*:*)
- # How do we know it's Interix rather than the generic POSIX subsystem?
- # Should we change UNAME_MACHINE based on the output of uname instead
- # of the specific Alpha model?
- echo alpha-pc-interix
- exit ;;
- 21064:Windows_NT:50:3)
- echo alpha-dec-winnt3.5
- exit ;;
- Amiga*:UNIX_System_V:4.0:*)
- echo m68k-unknown-sysv4
- exit ;;
- *:[Aa]miga[Oo][Ss]:*:*)
- echo ${UNAME_MACHINE}-unknown-amigaos
- exit ;;
- *:[Mm]orph[Oo][Ss]:*:*)
- echo ${UNAME_MACHINE}-unknown-morphos
- exit ;;
- *:OS/390:*:*)
- echo i370-ibm-openedition
- exit ;;
- *:z/VM:*:*)
- echo s390-ibm-zvmoe
- exit ;;
- *:OS400:*:*)
- echo powerpc-ibm-os400
- exit ;;
- arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
- echo arm-acorn-riscix${UNAME_RELEASE}
- exit ;;
- arm:riscos:*:*|arm:RISCOS:*:*)
- echo arm-unknown-riscos
- exit ;;
- SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
- echo hppa1.1-hitachi-hiuxmpp
- exit ;;
- Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
- # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
- if test "`(/bin/universe) 2>/dev/null`" = att ; then
- echo pyramid-pyramid-sysv3
- else
- echo pyramid-pyramid-bsd
- fi
- exit ;;
- NILE*:*:*:dcosx)
- echo pyramid-pyramid-svr4
- exit ;;
- DRS?6000:unix:4.0:6*)
- echo sparc-icl-nx6
- exit ;;
- DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
- case `/usr/bin/uname -p` in
- sparc) echo sparc-icl-nx7; exit ;;
- esac ;;
- s390x:SunOS:*:*)
- echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4H:SunOS:5.*:*)
- echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
- echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
- eval $set_cc_for_build
- SUN_ARCH="i386"
- # If there is a compiler, see if it is configured for 64-bit objects.
- # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
- # This test works for both compilers.
- if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
- if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
- (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
- grep IS_64BIT_ARCH >/dev/null
- then
- SUN_ARCH="x86_64"
- fi
- fi
- echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:6*:*)
- # According to config.sub, this is the proper way to canonicalize
- # SunOS6. Hard to guess exactly what SunOS6 will be like, but
- # it's likely to be more like Solaris than SunOS4.
- echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:*:*)
- case "`/usr/bin/arch -k`" in
- Series*|S4*)
- UNAME_RELEASE=`uname -v`
- ;;
- esac
- # Japanese Language versions have a version number like `4.1.3-JL'.
- echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
- exit ;;
- sun3*:SunOS:*:*)
- echo m68k-sun-sunos${UNAME_RELEASE}
- exit ;;
- sun*:*:4.2BSD:*)
- UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
- test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
- case "`/bin/arch`" in
- sun3)
- echo m68k-sun-sunos${UNAME_RELEASE}
- ;;
- sun4)
- echo sparc-sun-sunos${UNAME_RELEASE}
- ;;
- esac
- exit ;;
- aushp:SunOS:*:*)
- echo sparc-auspex-sunos${UNAME_RELEASE}
- exit ;;
- # The situation for MiNT is a little confusing. The machine name
- # can be virtually everything (everything which is not
- # "atarist" or "atariste" at least should have a processor
- # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
- # to the lowercase version "mint" (or "freemint"). Finally
- # the system name "TOS" denotes a system which is actually not
- # MiNT. But MiNT is downward compatible to TOS, so this should
- # be no problem.
- atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
- echo m68k-milan-mint${UNAME_RELEASE}
- exit ;;
- hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
- echo m68k-hades-mint${UNAME_RELEASE}
- exit ;;
- *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
- echo m68k-unknown-mint${UNAME_RELEASE}
- exit ;;
- m68k:machten:*:*)
- echo m68k-apple-machten${UNAME_RELEASE}
- exit ;;
- powerpc:machten:*:*)
- echo powerpc-apple-machten${UNAME_RELEASE}
- exit ;;
- RISC*:Mach:*:*)
- echo mips-dec-mach_bsd4.3
- exit ;;
- RISC*:ULTRIX:*:*)
- echo mips-dec-ultrix${UNAME_RELEASE}
- exit ;;
- VAX*:ULTRIX*:*:*)
- echo vax-dec-ultrix${UNAME_RELEASE}
- exit ;;
- 2020:CLIX:*:* | 2430:CLIX:*:*)
- echo clipper-intergraph-clix${UNAME_RELEASE}
- exit ;;
- mips:*:*:UMIPS | mips:*:*:RISCos)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
-#ifdef __cplusplus
-#include <stdio.h> /* for printf() prototype */
- int main (int argc, char *argv[]) {
-#else
- int main (argc, argv) int argc; char *argv[]; {
-#endif
- #if defined (host_mips) && defined (MIPSEB)
- #if defined (SYSTYPE_SYSV)
- printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
- #endif
- #if defined (SYSTYPE_SVR4)
- printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
- #endif
- #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
- printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
- #endif
- #endif
- exit (-1);
- }
-EOF
- $CC_FOR_BUILD -o $dummy $dummy.c &&
- dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
- SYSTEM_NAME=`$dummy $dummyarg` &&
- { echo "$SYSTEM_NAME"; exit; }
- echo mips-mips-riscos${UNAME_RELEASE}
- exit ;;
- Motorola:PowerMAX_OS:*:*)
- echo powerpc-motorola-powermax
- exit ;;
- Motorola:*:4.3:PL8-*)
- echo powerpc-harris-powermax
- exit ;;
- Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
- echo powerpc-harris-powermax
- exit ;;
- Night_Hawk:Power_UNIX:*:*)
- echo powerpc-harris-powerunix
- exit ;;
- m88k:CX/UX:7*:*)
- echo m88k-harris-cxux7
- exit ;;
- m88k:*:4*:R4*)
- echo m88k-motorola-sysv4
- exit ;;
- m88k:*:3*:R3*)
- echo m88k-motorola-sysv3
- exit ;;
- AViiON:dgux:*:*)
- # DG/UX returns AViiON for all architectures
- UNAME_PROCESSOR=`/usr/bin/uname -p`
- if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
- then
- if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
- [ ${TARGET_BINARY_INTERFACE}x = x ]
- then
- echo m88k-dg-dgux${UNAME_RELEASE}
- else
- echo m88k-dg-dguxbcs${UNAME_RELEASE}
- fi
- else
- echo i586-dg-dgux${UNAME_RELEASE}
- fi
- exit ;;
- M88*:DolphinOS:*:*) # DolphinOS (SVR3)
- echo m88k-dolphin-sysv3
- exit ;;
- M88*:*:R3*:*)
- # Delta 88k system running SVR3
- echo m88k-motorola-sysv3
- exit ;;
- XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
- echo m88k-tektronix-sysv3
- exit ;;
- Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
- echo m68k-tektronix-bsd
- exit ;;
- *:IRIX*:*:*)
- echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
- exit ;;
- ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
- echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
- exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
- i*86:AIX:*:*)
- echo i386-ibm-aix
- exit ;;
- ia64:AIX:*:*)
- if [ -x /usr/bin/oslevel ] ; then
- IBM_REV=`/usr/bin/oslevel`
- else
- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
- fi
- echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
- exit ;;
- *:AIX:2:3)
- if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <sys/systemcfg.h>
-
- main()
- {
- if (!__power_pc())
- exit(1);
- puts("powerpc-ibm-aix3.2.5");
- exit(0);
- }
-EOF
- if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
- then
- echo "$SYSTEM_NAME"
- else
- echo rs6000-ibm-aix3.2.5
- fi
- elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
- echo rs6000-ibm-aix3.2.4
- else
- echo rs6000-ibm-aix3.2
- fi
- exit ;;
- *:AIX:*:[456])
- IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
- if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
- IBM_ARCH=rs6000
- else
- IBM_ARCH=powerpc
- fi
- if [ -x /usr/bin/oslevel ] ; then
- IBM_REV=`/usr/bin/oslevel`
- else
- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
- fi
- echo ${IBM_ARCH}-ibm-aix${IBM_REV}
- exit ;;
- *:AIX:*:*)
- echo rs6000-ibm-aix
- exit ;;
- ibmrt:4.4BSD:*|romp-ibm:BSD:*)
- echo romp-ibm-bsd4.4
- exit ;;
- ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
- echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
- exit ;; # report: romp-ibm BSD 4.3
- *:BOSX:*:*)
- echo rs6000-bull-bosx
- exit ;;
- DPX/2?00:B.O.S.:*:*)
- echo m68k-bull-sysv3
- exit ;;
- 9000/[34]??:4.3bsd:1.*:*)
- echo m68k-hp-bsd
- exit ;;
- hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
- echo m68k-hp-bsd4.4
- exit ;;
- 9000/[34678]??:HP-UX:*:*)
- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
- case "${UNAME_MACHINE}" in
- 9000/31? ) HP_ARCH=m68000 ;;
- 9000/[34]?? ) HP_ARCH=m68k ;;
- 9000/[678][0-9][0-9])
- if [ -x /usr/bin/getconf ]; then
- sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
- sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
- case "${sc_cpu_version}" in
- 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
- 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
- 532) # CPU_PA_RISC2_0
- case "${sc_kernel_bits}" in
- 32) HP_ARCH="hppa2.0n" ;;
- 64) HP_ARCH="hppa2.0w" ;;
- '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
- esac ;;
- esac
- fi
- if [ "${HP_ARCH}" = "" ]; then
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
-
- #define _HPUX_SOURCE
- #include <stdlib.h>
- #include <unistd.h>
-
- int main ()
- {
- #if defined(_SC_KERNEL_BITS)
- long bits = sysconf(_SC_KERNEL_BITS);
- #endif
- long cpu = sysconf (_SC_CPU_VERSION);
-
- switch (cpu)
- {
- case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
- case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
- case CPU_PA_RISC2_0:
- #if defined(_SC_KERNEL_BITS)
- switch (bits)
- {
- case 64: puts ("hppa2.0w"); break;
- case 32: puts ("hppa2.0n"); break;
- default: puts ("hppa2.0"); break;
- } break;
- #else /* !defined(_SC_KERNEL_BITS) */
- puts ("hppa2.0"); break;
- #endif
- default: puts ("hppa1.0"); break;
- }
- exit (0);
- }
-EOF
- (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
- test -z "$HP_ARCH" && HP_ARCH=hppa
- fi ;;
- esac
- if [ ${HP_ARCH} = "hppa2.0w" ]
- then
- eval $set_cc_for_build
-
- # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
- # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
- # generating 64-bit code. GNU and HP use different nomenclature:
- #
- # $ CC_FOR_BUILD=cc ./config.guess
- # => hppa2.0w-hp-hpux11.23
- # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
- # => hppa64-hp-hpux11.23
-
- if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
- grep __LP64__ >/dev/null
- then
- HP_ARCH="hppa2.0w"
- else
- HP_ARCH="hppa64"
- fi
- fi
- echo ${HP_ARCH}-hp-hpux${HPUX_REV}
- exit ;;
- ia64:HP-UX:*:*)
- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
- echo ia64-hp-hpux${HPUX_REV}
- exit ;;
- 3050*:HI-UX:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <unistd.h>
- int
- main ()
- {
- long cpu = sysconf (_SC_CPU_VERSION);
- /* The order matters, because CPU_IS_HP_MC68K erroneously returns
- true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
- results, however. */
- if (CPU_IS_PA_RISC (cpu))
- {
- switch (cpu)
- {
- case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
- case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
- case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
- default: puts ("hppa-hitachi-hiuxwe2"); break;
- }
- }
- else if (CPU_IS_HP_MC68K (cpu))
- puts ("m68k-hitachi-hiuxwe2");
- else puts ("unknown-hitachi-hiuxwe2");
- exit (0);
- }
-EOF
- $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
- { echo "$SYSTEM_NAME"; exit; }
- echo unknown-hitachi-hiuxwe2
- exit ;;
- 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
- echo hppa1.1-hp-bsd
- exit ;;
- 9000/8??:4.3bsd:*:*)
- echo hppa1.0-hp-bsd
- exit ;;
- *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
- echo hppa1.0-hp-mpeix
- exit ;;
- hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
- echo hppa1.1-hp-osf
- exit ;;
- hp8??:OSF1:*:*)
- echo hppa1.0-hp-osf
- exit ;;
- i*86:OSF1:*:*)
- if [ -x /usr/sbin/sysversion ] ; then
- echo ${UNAME_MACHINE}-unknown-osf1mk
- else
- echo ${UNAME_MACHINE}-unknown-osf1
- fi
- exit ;;
- parisc*:Lites*:*:*)
- echo hppa1.1-hp-lites
- exit ;;
- C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
- echo c1-convex-bsd
- exit ;;
- C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
- if getsysinfo -f scalar_acc
- then echo c32-convex-bsd
- else echo c2-convex-bsd
- fi
- exit ;;
- C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
- echo c34-convex-bsd
- exit ;;
- C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
- echo c38-convex-bsd
- exit ;;
- C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
- echo c4-convex-bsd
- exit ;;
- CRAY*Y-MP:*:*:*)
- echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*[A-Z]90:*:*:*)
- echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
- | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
- -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
- -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*TS:*:*:*)
- echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*T3E:*:*:*)
- echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*SV1:*:*:*)
- echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- *:UNICOS/mp:*:*)
- echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
- FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
- echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
- exit ;;
- 5000:UNIX_System_V:4.*:*)
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
- echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
- exit ;;
- i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
- echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
- exit ;;
- sparc*:BSD/OS:*:*)
- echo sparc-unknown-bsdi${UNAME_RELEASE}
- exit ;;
- *:BSD/OS:*:*)
- echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
- exit ;;
- *:FreeBSD:*:*)
- case ${UNAME_MACHINE} in
- pc98)
- echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- amd64)
- echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- *)
- echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- esac
- exit ;;
- i*:CYGWIN*:*)
- echo ${UNAME_MACHINE}-pc-cygwin
- exit ;;
- *:MINGW*:*)
- echo ${UNAME_MACHINE}-pc-mingw32
- exit ;;
- i*:windows32*:*)
- # uname -m includes "-pc" on this system.
- echo ${UNAME_MACHINE}-mingw32
- exit ;;
- i*:PW*:*)
- echo ${UNAME_MACHINE}-pc-pw32
- exit ;;
- *:Interix*:[3456]*)
- case ${UNAME_MACHINE} in
- x86)
- echo i586-pc-interix${UNAME_RELEASE}
- exit ;;
- EM64T | authenticamd | genuineintel)
- echo x86_64-unknown-interix${UNAME_RELEASE}
- exit ;;
- IA64)
- echo ia64-unknown-interix${UNAME_RELEASE}
- exit ;;
- esac ;;
- [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
- echo i${UNAME_MACHINE}-pc-mks
- exit ;;
- i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
- # How do we know it's Interix rather than the generic POSIX subsystem?
- # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
- # UNAME_MACHINE based on the output of uname instead of i386?
- echo i586-pc-interix
- exit ;;
- i*:UWIN*:*)
- echo ${UNAME_MACHINE}-pc-uwin
- exit ;;
- amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
- echo x86_64-unknown-cygwin
- exit ;;
- p*:CYGWIN*:*)
- echo powerpcle-unknown-cygwin
- exit ;;
- prep*:SunOS:5.*:*)
- echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- *:GNU:*:*)
- # the GNU system
- echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
- exit ;;
- *:GNU/*:*:*)
- # other systems with GNU libc and userland
- echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
- exit ;;
- i*86:Minix:*:*)
- echo ${UNAME_MACHINE}-pc-minix
- exit ;;
- arm*:Linux:*:*)
- eval $set_cc_for_build
- if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
- | grep -q __ARM_EABI__
- then
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- else
- echo ${UNAME_MACHINE}-unknown-linux-gnueabi
- fi
- exit ;;
- avr32*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- cris:Linux:*:*)
- echo cris-axis-linux-gnu
- exit ;;
- crisv32:Linux:*:*)
- echo crisv32-axis-linux-gnu
- exit ;;
- frv:Linux:*:*)
- echo frv-unknown-linux-gnu
- exit ;;
- ia64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- m32r*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- m68*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- mips:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips
- #undef mipsel
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mipsel
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips
- #else
- CPU=
- #endif
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
- ;;
- mips64:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips64
- #undef mips64el
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mips64el
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips64
- #else
- CPU=
- #endif
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
- ;;
- or32:Linux:*:*)
- echo or32-unknown-linux-gnu
- exit ;;
- ppc:Linux:*:*)
- echo powerpc-unknown-linux-gnu
- exit ;;
- ppc64:Linux:*:*)
- echo powerpc64-unknown-linux-gnu
- exit ;;
- alpha:Linux:*:*)
- case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
- EV5) UNAME_MACHINE=alphaev5 ;;
- EV56) UNAME_MACHINE=alphaev56 ;;
- PCA56) UNAME_MACHINE=alphapca56 ;;
- PCA57) UNAME_MACHINE=alphapca56 ;;
- EV6) UNAME_MACHINE=alphaev6 ;;
- EV67) UNAME_MACHINE=alphaev67 ;;
- EV68*) UNAME_MACHINE=alphaev68 ;;
- esac
- objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
- if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
- echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
- exit ;;
- padre:Linux:*:*)
- echo sparc-unknown-linux-gnu
- exit ;;
- parisc:Linux:*:* | hppa:Linux:*:*)
- # Look for CPU level
- case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
- PA7*) echo hppa1.1-unknown-linux-gnu ;;
- PA8*) echo hppa2.0-unknown-linux-gnu ;;
- *) echo hppa-unknown-linux-gnu ;;
- esac
- exit ;;
- parisc64:Linux:*:* | hppa64:Linux:*:*)
- echo hppa64-unknown-linux-gnu
- exit ;;
- s390:Linux:*:* | s390x:Linux:*:*)
- echo ${UNAME_MACHINE}-ibm-linux
- exit ;;
- sh64*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- sh*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- sparc:Linux:*:* | sparc64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- vax:Linux:*:*)
- echo ${UNAME_MACHINE}-dec-linux-gnu
- exit ;;
- x86_64:Linux:*:*)
- echo x86_64-unknown-linux-gnu
- exit ;;
- xtensa*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- i*86:Linux:*:*)
- # The BFD linker knows what the default object file format is, so
- # first see if it will tell us. cd to the root directory to prevent
- # problems with other programs or directories called `ld' in the path.
- # Set LC_ALL=C to ensure ld outputs messages in English.
- ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
- | sed -ne '/supported targets:/!d
- s/[ ][ ]*/ /g
- s/.*supported targets: *//
- s/ .*//
- p'`
- case "$ld_supported_targets" in
- elf32-i386)
- TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
- ;;
- a.out-i386-linux)
- echo "${UNAME_MACHINE}-pc-linux-gnuaout"
- exit ;;
- "")
- # Either a pre-BFD a.out linker (linux-gnuoldld) or
- # one that does not give us useful --help.
- echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
- exit ;;
- esac
- # Determine whether the default compiler is a.out or elf
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <features.h>
- #ifdef __ELF__
- # ifdef __GLIBC__
- # if __GLIBC__ >= 2
- LIBC=gnu
- # else
- LIBC=gnulibc1
- # endif
- # else
- LIBC=gnulibc1
- # endif
- #else
- #if defined(__INTEL_COMPILER) || defined(__PGI) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
- LIBC=gnu
- #else
- LIBC=gnuaout
- #endif
- #endif
- #ifdef __dietlibc__
- LIBC=dietlibc
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^LIBC/{
- s: ::g
- p
- }'`"
- test x"${LIBC}" != x && {
- echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
- exit
- }
- test x"${TENTATIVE}" != x && { echo "${TENTATIVE}"; exit; }
- ;;
- i*86:DYNIX/ptx:4*:*)
- # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
- # earlier versions are messed up and put the nodename in both
- # sysname and nodename.
- echo i386-sequent-sysv4
- exit ;;
- i*86:UNIX_SV:4.2MP:2.*)
- # Unixware is an offshoot of SVR4, but it has its own version
- # number series starting with 2...
- # I am not positive that other SVR4 systems won't match this,
- # I just have to hope. -- rms.
- # Use sysv4.2uw... so that sysv4* matches it.
- echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
- exit ;;
- i*86:OS/2:*:*)
- # If we were able to find `uname', then EMX Unix compatibility
- # is probably installed.
- echo ${UNAME_MACHINE}-pc-os2-emx
- exit ;;
- i*86:XTS-300:*:STOP)
- echo ${UNAME_MACHINE}-unknown-stop
- exit ;;
- i*86:atheos:*:*)
- echo ${UNAME_MACHINE}-unknown-atheos
- exit ;;
- i*86:syllable:*:*)
- echo ${UNAME_MACHINE}-pc-syllable
- exit ;;
- i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
- echo i386-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- i*86:*DOS:*:*)
- echo ${UNAME_MACHINE}-pc-msdosdjgpp
- exit ;;
- i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
- UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
- if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
- echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
- else
- echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
- fi
- exit ;;
- i*86:*:5:[678]*)
- # UnixWare 7.x, OpenUNIX and OpenServer 6.
- case `/bin/uname -X | grep "^Machine"` in
- *486*) UNAME_MACHINE=i486 ;;
- *Pentium) UNAME_MACHINE=i586 ;;
- *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
- esac
- echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
- exit ;;
- i*86:*:3.2:*)
- if test -f /usr/options/cb.name; then
- UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
- echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
- elif /bin/uname -X 2>/dev/null >/dev/null ; then
- UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
- (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
- (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
- && UNAME_MACHINE=i586
- (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
- && UNAME_MACHINE=i686
- (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
- && UNAME_MACHINE=i686
- echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
- else
- echo ${UNAME_MACHINE}-pc-sysv32
- fi
- exit ;;
- pc:*:*:*)
- # Left here for compatibility:
- # uname -m prints for DJGPP always 'pc', but it prints nothing about
- # the processor, so we play safe by assuming i586.
- # Note: whatever this is, it MUST be the same as what config.sub
- # prints for the "djgpp" host, or else GDB configury will decide that
- # this is a cross-build.
- echo i586-pc-msdosdjgpp
- exit ;;
- Intel:Mach:3*:*)
- echo i386-pc-mach3
- exit ;;
- paragon:*:*:*)
- echo i860-intel-osf1
- exit ;;
- i860:*:4.*:*) # i860-SVR4
- if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
- echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
- else # Add other i860-SVR4 vendors below as they are discovered.
- echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
- fi
- exit ;;
- mini*:CTIX:SYS*5:*)
- # "miniframe"
- echo m68010-convergent-sysv
- exit ;;
- mc68k:UNIX:SYSTEM5:3.51m)
- echo m68k-convergent-sysv
- exit ;;
- M680?0:D-NIX:5.3:*)
- echo m68k-diab-dnix
- exit ;;
- M68*:*:R3V[5678]*:*)
- test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
- 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
- OS_REL=''
- test -r /etc/.relid \
- && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
- /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
- 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4; exit; } ;;
- NCR*:*:4.2:* | MPRAS*:*:4.2:*)
- OS_REL='.3'
- test -r /etc/.relid \
- && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
- /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
- /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
- m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
- echo m68k-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- mc68030:UNIX_System_V:4.*:*)
- echo m68k-atari-sysv4
- exit ;;
- TSUNAMI:LynxOS:2.*:*)
- echo sparc-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- rs6000:LynxOS:2.*:*)
- echo rs6000-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
- echo powerpc-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- SM[BE]S:UNIX_SV:*:*)
- echo mips-dde-sysv${UNAME_RELEASE}
- exit ;;
- RM*:ReliantUNIX-*:*:*)
- echo mips-sni-sysv4
- exit ;;
- RM*:SINIX-*:*:*)
- echo mips-sni-sysv4
- exit ;;
- *:SINIX-*:*:*)
- if uname -p 2>/dev/null >/dev/null ; then
- UNAME_MACHINE=`(uname -p) 2>/dev/null`
- echo ${UNAME_MACHINE}-sni-sysv4
- else
- echo ns32k-sni-sysv
- fi
- exit ;;
- PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
- # says <Richard.M.Bartel@ccMail.Census.GOV>
- echo i586-unisys-sysv4
- exit ;;
- *:UNIX_System_V:4*:FTX*)
- # From Gerald Hewes <hewes@openmarket.com>.
- # How about differentiating between stratus architectures? -djm
- echo hppa1.1-stratus-sysv4
- exit ;;
- *:*:*:FTX*)
- # From seanf@swdc.stratus.com.
- echo i860-stratus-sysv4
- exit ;;
- i*86:VOS:*:*)
- # From Paul.Green@stratus.com.
- echo ${UNAME_MACHINE}-stratus-vos
- exit ;;
- *:VOS:*:*)
- # From Paul.Green@stratus.com.
- echo hppa1.1-stratus-vos
- exit ;;
- mc68*:A/UX:*:*)
- echo m68k-apple-aux${UNAME_RELEASE}
- exit ;;
- news*:NEWS-OS:6*:*)
- echo mips-sony-newsos6
- exit ;;
- R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
- if [ -d /usr/nec ]; then
- echo mips-nec-sysv${UNAME_RELEASE}
- else
- echo mips-unknown-sysv${UNAME_RELEASE}
- fi
- exit ;;
- BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
- echo powerpc-be-beos
- exit ;;
- BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
- echo powerpc-apple-beos
- exit ;;
- BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
- echo i586-pc-beos
- exit ;;
- BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
- echo i586-pc-haiku
- exit ;;
- SX-4:SUPER-UX:*:*)
- echo sx4-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-5:SUPER-UX:*:*)
- echo sx5-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-6:SUPER-UX:*:*)
- echo sx6-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-7:SUPER-UX:*:*)
- echo sx7-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-8:SUPER-UX:*:*)
- echo sx8-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-8R:SUPER-UX:*:*)
- echo sx8r-nec-superux${UNAME_RELEASE}
- exit ;;
- Power*:Rhapsody:*:*)
- echo powerpc-apple-rhapsody${UNAME_RELEASE}
- exit ;;
- *:Rhapsody:*:*)
- echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
- exit ;;
- *:Darwin:*:*)
- UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
- case $UNAME_PROCESSOR in
- unknown) UNAME_PROCESSOR=powerpc ;;
- esac
- echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
- exit ;;
- *:procnto*:*:* | *:QNX:[0123456789]*:*)
- UNAME_PROCESSOR=`uname -p`
- if test "$UNAME_PROCESSOR" = "x86"; then
- UNAME_PROCESSOR=i386
- UNAME_MACHINE=pc
- fi
- echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
- exit ;;
- *:QNX:*:4*)
- echo i386-pc-qnx
- exit ;;
- NSE-?:NONSTOP_KERNEL:*:*)
- echo nse-tandem-nsk${UNAME_RELEASE}
- exit ;;
- NSR-?:NONSTOP_KERNEL:*:*)
- echo nsr-tandem-nsk${UNAME_RELEASE}
- exit ;;
- *:NonStop-UX:*:*)
- echo mips-compaq-nonstopux
- exit ;;
- BS2000:POSIX*:*:*)
- echo bs2000-siemens-sysv
- exit ;;
- DS/*:UNIX_System_V:*:*)
- echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
- exit ;;
- *:Plan9:*:*)
- # "uname -m" is not consistent, so use $cputype instead. 386
- # is converted to i386 for consistency with other x86
- # operating systems.
- if test "$cputype" = "386"; then
- UNAME_MACHINE=i386
- else
- UNAME_MACHINE="$cputype"
- fi
- echo ${UNAME_MACHINE}-unknown-plan9
- exit ;;
- *:TOPS-10:*:*)
- echo pdp10-unknown-tops10
- exit ;;
- *:TENEX:*:*)
- echo pdp10-unknown-tenex
- exit ;;
- KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
- echo pdp10-dec-tops20
- exit ;;
- XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
- echo pdp10-xkl-tops20
- exit ;;
- *:TOPS-20:*:*)
- echo pdp10-unknown-tops20
- exit ;;
- *:ITS:*:*)
- echo pdp10-unknown-its
- exit ;;
- SEI:*:*:SEIUX)
- echo mips-sei-seiux${UNAME_RELEASE}
- exit ;;
- *:DragonFly:*:*)
- echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
- exit ;;
- *:*VMS:*:*)
- UNAME_MACHINE=`(uname -p) 2>/dev/null`
- case "${UNAME_MACHINE}" in
- A*) echo alpha-dec-vms ; exit ;;
- I*) echo ia64-dec-vms ; exit ;;
- V*) echo vax-dec-vms ; exit ;;
- esac ;;
- *:XENIX:*:SysV)
- echo i386-pc-xenix
- exit ;;
- i*86:skyos:*:*)
- echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
- exit ;;
- i*86:rdos:*:*)
- echo ${UNAME_MACHINE}-pc-rdos
- exit ;;
- i*86:AROS:*:*)
- echo ${UNAME_MACHINE}-pc-aros
- exit ;;
-esac
-
-#echo '(No uname command or uname output not recognized.)' 1>&2
-#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
-
-eval $set_cc_for_build
-cat >$dummy.c <<EOF
-#ifdef _SEQUENT_
-# include <sys/types.h>
-# include <sys/utsname.h>
-#endif
-main ()
-{
-#if defined (sony)
-#if defined (MIPSEB)
- /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
- I don't know.... */
- printf ("mips-sony-bsd\n"); exit (0);
-#else
-#include <sys/param.h>
- printf ("m68k-sony-newsos%s\n",
-#ifdef NEWSOS4
- "4"
-#else
- ""
-#endif
- ); exit (0);
-#endif
-#endif
-
-#if defined (__arm) && defined (__acorn) && defined (__unix)
- printf ("arm-acorn-riscix\n"); exit (0);
-#endif
-
-#if defined (hp300) && !defined (hpux)
- printf ("m68k-hp-bsd\n"); exit (0);
-#endif
-
-#if defined (NeXT)
-#if !defined (__ARCHITECTURE__)
-#define __ARCHITECTURE__ "m68k"
-#endif
- int version;
- version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
- if (version < 4)
- printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
- else
- printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
- exit (0);
-#endif
-
-#if defined (MULTIMAX) || defined (n16)
-#if defined (UMAXV)
- printf ("ns32k-encore-sysv\n"); exit (0);
-#else
-#if defined (CMU)
- printf ("ns32k-encore-mach\n"); exit (0);
-#else
- printf ("ns32k-encore-bsd\n"); exit (0);
-#endif
-#endif
-#endif
-
-#if defined (__386BSD__)
- printf ("i386-pc-bsd\n"); exit (0);
-#endif
-
-#if defined (sequent)
-#if defined (i386)
- printf ("i386-sequent-dynix\n"); exit (0);
-#endif
-#if defined (ns32000)
- printf ("ns32k-sequent-dynix\n"); exit (0);
-#endif
-#endif
-
-#if defined (_SEQUENT_)
- struct utsname un;
-
- uname(&un);
-
- if (strncmp(un.version, "V2", 2) == 0) {
- printf ("i386-sequent-ptx2\n"); exit (0);
- }
- if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
- printf ("i386-sequent-ptx1\n"); exit (0);
- }
- printf ("i386-sequent-ptx\n"); exit (0);
-
-#endif
-
-#if defined (vax)
-# if !defined (ultrix)
-# include <sys/param.h>
-# if defined (BSD)
-# if BSD == 43
- printf ("vax-dec-bsd4.3\n"); exit (0);
-# else
-# if BSD == 199006
- printf ("vax-dec-bsd4.3reno\n"); exit (0);
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# endif
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# else
- printf ("vax-dec-ultrix\n"); exit (0);
-# endif
-#endif
-
-#if defined (alliant) && defined (i860)
- printf ("i860-alliant-bsd\n"); exit (0);
-#endif
-
- exit (1);
-}
-EOF
-
-$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
- { echo "$SYSTEM_NAME"; exit; }
-
-# Apollos put the system type in the environment.
-
-test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
-
-# Convex versions that predate uname can use getsysinfo(1)
-
-if [ -x /usr/convex/getsysinfo ]
-then
- case `getsysinfo -f cpu_type` in
- c1*)
- echo c1-convex-bsd
- exit ;;
- c2*)
- if getsysinfo -f scalar_acc
- then echo c32-convex-bsd
- else echo c2-convex-bsd
- fi
- exit ;;
- c34*)
- echo c34-convex-bsd
- exit ;;
- c38*)
- echo c38-convex-bsd
- exit ;;
- c4*)
- echo c4-convex-bsd
- exit ;;
- esac
-fi
-
-cat >&2 <<EOF
-$0: unable to guess system type
-
-This script, last modified $timestamp, has failed to recognize
-the operating system you are using. It is advised that you
-download the most up to date version of the config scripts from
-
- http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
-and
- http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
-
-If the version you run ($0) is already up to date, please
-send the following data and any information you think might be
-pertinent to <config-patches@gnu.org> in order to provide the needed
-information to handle your system.
-
-config.guess timestamp = $timestamp
-
-uname -m = `(uname -m) 2>/dev/null || echo unknown`
-uname -r = `(uname -r) 2>/dev/null || echo unknown`
-uname -s = `(uname -s) 2>/dev/null || echo unknown`
-uname -v = `(uname -v) 2>/dev/null || echo unknown`
-
-/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
-/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
-
-hostinfo = `(hostinfo) 2>/dev/null`
-/bin/universe = `(/bin/universe) 2>/dev/null`
-/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
-/bin/arch = `(/bin/arch) 2>/dev/null`
-/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
-/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
-
-UNAME_MACHINE = ${UNAME_MACHINE}
-UNAME_RELEASE = ${UNAME_RELEASE}
-UNAME_SYSTEM = ${UNAME_SYSTEM}
-UNAME_VERSION = ${UNAME_VERSION}
-EOF
-
-exit 1
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "timestamp='"
-# time-stamp-format: "%:y-%02m-%02d"
-# time-stamp-end: "'"
-# End:
diff --git a/lib/tevent/config.sub b/lib/tevent/config.sub
deleted file mode 100755
index a39437d015..0000000000
--- a/lib/tevent/config.sub
+++ /dev/null
@@ -1,1686 +0,0 @@
-#! /bin/sh
-# Configuration validation subroutine script.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
-# Free Software Foundation, Inc.
-
-timestamp='2009-04-17'
-
-# This file is (in principle) common to ALL GNU software.
-# The presence of a machine in this file suggests that SOME GNU software
-# can handle that machine. It does not imply ALL GNU software can.
-#
-# This file is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Please send patches to <config-patches@gnu.org>. Submit a context
-# diff and a properly formatted ChangeLog entry.
-#
-# Configuration subroutine to validate and canonicalize a configuration type.
-# Supply the specified configuration type as an argument.
-# If it is invalid, we print an error message on stderr and exit with code 1.
-# Otherwise, we print the canonical config type on stdout and succeed.
-
-# This file is supposed to be the same for all GNU packages
-# and recognize all the CPU types, system types and aliases
-# that are meaningful with *any* GNU software.
-# Each package is responsible for reporting which valid configurations
-# it does not support. The user should be able to distinguish
-# a failure to support a valid configuration from a meaningless
-# configuration.
-
-# The goal of this file is to map all the various variations of a given
-# machine specification into a single specification in the form:
-# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
-# or in some cases, the newer four-part form:
-# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
-# It is wrong to echo any other type of specification.
-
-me=`echo "$0" | sed -e 's,.*/,,'`
-
-usage="\
-Usage: $0 [OPTION] CPU-MFR-OPSYS
- $0 [OPTION] ALIAS
-
-Canonicalize a configuration name.
-
-Operation modes:
- -h, --help print this help, then exit
- -t, --time-stamp print date of last modification, then exit
- -v, --version print version number, then exit
-
-Report bugs and patches to <config-patches@gnu.org>."
-
-version="\
-GNU config.sub ($timestamp)
-
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
-
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-
-help="
-Try \`$me --help' for more information."
-
-# Parse command line
-while test $# -gt 0 ; do
- case $1 in
- --time-stamp | --time* | -t )
- echo "$timestamp" ; exit ;;
- --version | -v )
- echo "$version" ; exit ;;
- --help | --h* | -h )
- echo "$usage"; exit ;;
- -- ) # Stop option processing
- shift; break ;;
- - ) # Use stdin as input.
- break ;;
- -* )
- echo "$me: invalid option $1$help"
- exit 1 ;;
-
- *local*)
- # First pass through any local machine types.
- echo $1
- exit ;;
-
- * )
- break ;;
- esac
-done
-
-case $# in
- 0) echo "$me: missing argument$help" >&2
- exit 1;;
- 1) ;;
- *) echo "$me: too many arguments$help" >&2
- exit 1;;
-esac
-
-# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
-# Here we must recognize all the valid KERNEL-OS combinations.
-maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
-case $maybe_os in
- nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \
- uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \
- kopensolaris*-gnu* | \
- storm-chaos* | os2-emx* | rtmk-nova*)
- os=-$maybe_os
- basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
- ;;
- *)
- basic_machine=`echo $1 | sed 's/-[^-]*$//'`
- if [ $basic_machine != $1 ]
- then os=`echo $1 | sed 's/.*-/-/'`
- else os=; fi
- ;;
-esac
-
-### Let's recognize common machines as not being operating systems so
-### that things like config.sub decstation-3100 work. We also
-### recognize some manufacturers as not being operating systems, so we
-### can provide default operating systems below.
-case $os in
- -sun*os*)
- # Prevent following clause from handling this invalid input.
- ;;
- -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
- -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
- -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
- -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
- -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
- -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
- -apple | -axis | -knuth | -cray)
- os=
- basic_machine=$1
- ;;
- -sim | -cisco | -oki | -wec | -winbond)
- os=
- basic_machine=$1
- ;;
- -scout)
- ;;
- -wrs)
- os=-vxworks
- basic_machine=$1
- ;;
- -chorusos*)
- os=-chorusos
- basic_machine=$1
- ;;
- -chorusrdb)
- os=-chorusrdb
- basic_machine=$1
- ;;
- -hiux*)
- os=-hiuxwe2
- ;;
- -sco6)
- os=-sco5v6
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco5)
- os=-sco3.2v5
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco4)
- os=-sco3.2v4
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco3.2.[4-9]*)
- os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco3.2v[4-9]*)
- # Don't forget version if it is 3.2v4 or newer.
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco5v6*)
- # Don't forget version if it is 3.2v4 or newer.
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco*)
- os=-sco3.2v2
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -udk*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -isc)
- os=-isc2.2
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -clix*)
- basic_machine=clipper-intergraph
- ;;
- -isc*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -lynx*)
- os=-lynxos
- ;;
- -ptx*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
- ;;
- -windowsnt*)
- os=`echo $os | sed -e 's/windowsnt/winnt/'`
- ;;
- -psos*)
- os=-psos
- ;;
- -mint | -mint[0-9]*)
- basic_machine=m68k-atari
- os=-mint
- ;;
-esac
-
-# Decode aliases for certain CPU-COMPANY combinations.
-case $basic_machine in
- # Recognize the basic CPU types without company name.
- # Some are omitted here because they have special meanings below.
- 1750a | 580 \
- | a29k \
- | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
- | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
- | am33_2.0 \
- | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
- | bfin \
- | c4x | clipper \
- | d10v | d30v | dlx | dsp16xx \
- | fido | fr30 | frv \
- | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
- | i370 | i860 | i960 | ia64 \
- | ip2k | iq2000 \
- | lm32 \
- | m32c | m32r | m32rle | m68000 | m68k | m88k \
- | maxq | mb | microblaze | mcore | mep | metag \
- | mips | mipsbe | mipseb | mipsel | mipsle \
- | mips16 \
- | mips64 | mips64el \
- | mips64octeon | mips64octeonel \
- | mips64orion | mips64orionel \
- | mips64r5900 | mips64r5900el \
- | mips64vr | mips64vrel \
- | mips64vr4100 | mips64vr4100el \
- | mips64vr4300 | mips64vr4300el \
- | mips64vr5000 | mips64vr5000el \
- | mips64vr5900 | mips64vr5900el \
- | mipsisa32 | mipsisa32el \
- | mipsisa32r2 | mipsisa32r2el \
- | mipsisa64 | mipsisa64el \
- | mipsisa64r2 | mipsisa64r2el \
- | mipsisa64sb1 | mipsisa64sb1el \
- | mipsisa64sr71k | mipsisa64sr71kel \
- | mipstx39 | mipstx39el \
- | mn10200 | mn10300 \
- | moxie \
- | mt \
- | msp430 \
- | nios | nios2 \
- | ns16k | ns32k \
- | or32 \
- | pdp10 | pdp11 | pj | pjl \
- | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
- | pyramid \
- | score \
- | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
- | sh64 | sh64le \
- | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
- | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
- | spu | strongarm \
- | tahoe | thumb | tic4x | tic80 | tron \
- | v850 | v850e \
- | we32k \
- | x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \
- | z8k | z80)
- basic_machine=$basic_machine-unknown
- ;;
- m6811 | m68hc11 | m6812 | m68hc12)
- # Motorola 68HC11/12.
- basic_machine=$basic_machine-unknown
- os=-none
- ;;
- m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
- ;;
- ms1)
- basic_machine=mt-unknown
- ;;
-
- # We use `pc' rather than `unknown'
- # because (1) that's what they normally are, and
- # (2) the word "unknown" tends to confuse beginning users.
- i*86 | x86_64)
- basic_machine=$basic_machine-pc
- ;;
- # Object if more than one company name word.
- *-*-*)
- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
- exit 1
- ;;
- # Recognize the basic CPU types with company name.
- 580-* \
- | a29k-* \
- | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
- | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
- | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
- | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
- | avr-* | avr32-* \
- | bfin-* | bs2000-* \
- | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
- | clipper-* | craynv-* | cydra-* \
- | d10v-* | d30v-* | dlx-* \
- | elxsi-* \
- | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
- | h8300-* | h8500-* \
- | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
- | i*86-* | i860-* | i960-* | ia64-* \
- | ip2k-* | iq2000-* \
- | lm32-* \
- | m32c-* | m32r-* | m32rle-* \
- | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
- | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
- | mips16-* \
- | mips64-* | mips64el-* \
- | mips64octeon-* | mips64octeonel-* \
- | mips64orion-* | mips64orionel-* \
- | mips64r5900-* | mips64r5900el-* \
- | mips64vr-* | mips64vrel-* \
- | mips64vr4100-* | mips64vr4100el-* \
- | mips64vr4300-* | mips64vr4300el-* \
- | mips64vr5000-* | mips64vr5000el-* \
- | mips64vr5900-* | mips64vr5900el-* \
- | mipsisa32-* | mipsisa32el-* \
- | mipsisa32r2-* | mipsisa32r2el-* \
- | mipsisa64-* | mipsisa64el-* \
- | mipsisa64r2-* | mipsisa64r2el-* \
- | mipsisa64sb1-* | mipsisa64sb1el-* \
- | mipsisa64sr71k-* | mipsisa64sr71kel-* \
- | mipstx39-* | mipstx39el-* \
- | mmix-* \
- | mt-* \
- | msp430-* \
- | nios-* | nios2-* \
- | none-* | np1-* | ns16k-* | ns32k-* \
- | orion-* \
- | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
- | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
- | pyramid-* \
- | romp-* | rs6000-* \
- | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
- | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
- | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
- | sparclite-* \
- | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \
- | tahoe-* | thumb-* \
- | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* | tile-* \
- | tron-* \
- | v850-* | v850e-* | vax-* \
- | we32k-* \
- | x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \
- | xstormy16-* | xtensa*-* \
- | ymp-* \
- | z8k-* | z80-*)
- ;;
- # Recognize the basic CPU types without company name, with glob match.
- xtensa*)
- basic_machine=$basic_machine-unknown
- ;;
- # Recognize the various machine names and aliases which stand
- # for a CPU type and a company and sometimes even an OS.
- 386bsd)
- basic_machine=i386-unknown
- os=-bsd
- ;;
- 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
- basic_machine=m68000-att
- ;;
- 3b*)
- basic_machine=we32k-att
- ;;
- a29khif)
- basic_machine=a29k-amd
- os=-udi
- ;;
- abacus)
- basic_machine=abacus-unknown
- ;;
- adobe68k)
- basic_machine=m68010-adobe
- os=-scout
- ;;
- alliant | fx80)
- basic_machine=fx80-alliant
- ;;
- altos | altos3068)
- basic_machine=m68k-altos
- ;;
- am29k)
- basic_machine=a29k-none
- os=-bsd
- ;;
- amd64)
- basic_machine=x86_64-pc
- ;;
- amd64-*)
- basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- amdahl)
- basic_machine=580-amdahl
- os=-sysv
- ;;
- amiga | amiga-*)
- basic_machine=m68k-unknown
- ;;
- amigaos | amigados)
- basic_machine=m68k-unknown
- os=-amigaos
- ;;
- amigaunix | amix)
- basic_machine=m68k-unknown
- os=-sysv4
- ;;
- apollo68)
- basic_machine=m68k-apollo
- os=-sysv
- ;;
- apollo68bsd)
- basic_machine=m68k-apollo
- os=-bsd
- ;;
- aros)
- basic_machine=i386-pc
- os=-aros
- ;;
- aux)
- basic_machine=m68k-apple
- os=-aux
- ;;
- balance)
- basic_machine=ns32k-sequent
- os=-dynix
- ;;
- blackfin)
- basic_machine=bfin-unknown
- os=-linux
- ;;
- blackfin-*)
- basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=-linux
- ;;
- c90)
- basic_machine=c90-cray
- os=-unicos
- ;;
- cegcc)
- basic_machine=arm-unknown
- os=-cegcc
- ;;
- convex-c1)
- basic_machine=c1-convex
- os=-bsd
- ;;
- convex-c2)
- basic_machine=c2-convex
- os=-bsd
- ;;
- convex-c32)
- basic_machine=c32-convex
- os=-bsd
- ;;
- convex-c34)
- basic_machine=c34-convex
- os=-bsd
- ;;
- convex-c38)
- basic_machine=c38-convex
- os=-bsd
- ;;
- cray | j90)
- basic_machine=j90-cray
- os=-unicos
- ;;
- craynv)
- basic_machine=craynv-cray
- os=-unicosmp
- ;;
- cr16)
- basic_machine=cr16-unknown
- os=-elf
- ;;
- crds | unos)
- basic_machine=m68k-crds
- ;;
- crisv32 | crisv32-* | etraxfs*)
- basic_machine=crisv32-axis
- ;;
- cris | cris-* | etrax*)
- basic_machine=cris-axis
- ;;
- crx)
- basic_machine=crx-unknown
- os=-elf
- ;;
- da30 | da30-*)
- basic_machine=m68k-da30
- ;;
- decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
- basic_machine=mips-dec
- ;;
- decsystem10* | dec10*)
- basic_machine=pdp10-dec
- os=-tops10
- ;;
- decsystem20* | dec20*)
- basic_machine=pdp10-dec
- os=-tops20
- ;;
- delta | 3300 | motorola-3300 | motorola-delta \
- | 3300-motorola | delta-motorola)
- basic_machine=m68k-motorola
- ;;
- delta88)
- basic_machine=m88k-motorola
- os=-sysv3
- ;;
- dicos)
- basic_machine=i686-pc
- os=-dicos
- ;;
- djgpp)
- basic_machine=i586-pc
- os=-msdosdjgpp
- ;;
- dpx20 | dpx20-*)
- basic_machine=rs6000-bull
- os=-bosx
- ;;
- dpx2* | dpx2*-bull)
- basic_machine=m68k-bull
- os=-sysv3
- ;;
- ebmon29k)
- basic_machine=a29k-amd
- os=-ebmon
- ;;
- elxsi)
- basic_machine=elxsi-elxsi
- os=-bsd
- ;;
- encore | umax | mmax)
- basic_machine=ns32k-encore
- ;;
- es1800 | OSE68k | ose68k | ose | OSE)
- basic_machine=m68k-ericsson
- os=-ose
- ;;
- fx2800)
- basic_machine=i860-alliant
- ;;
- genix)
- basic_machine=ns32k-ns
- ;;
- gmicro)
- basic_machine=tron-gmicro
- os=-sysv
- ;;
- go32)
- basic_machine=i386-pc
- os=-go32
- ;;
- h3050r* | hiux*)
- basic_machine=hppa1.1-hitachi
- os=-hiuxwe2
- ;;
- h8300hms)
- basic_machine=h8300-hitachi
- os=-hms
- ;;
- h8300xray)
- basic_machine=h8300-hitachi
- os=-xray
- ;;
- h8500hms)
- basic_machine=h8500-hitachi
- os=-hms
- ;;
- harris)
- basic_machine=m88k-harris
- os=-sysv3
- ;;
- hp300-*)
- basic_machine=m68k-hp
- ;;
- hp300bsd)
- basic_machine=m68k-hp
- os=-bsd
- ;;
- hp300hpux)
- basic_machine=m68k-hp
- os=-hpux
- ;;
- hp3k9[0-9][0-9] | hp9[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hp9k2[0-9][0-9] | hp9k31[0-9])
- basic_machine=m68000-hp
- ;;
- hp9k3[2-9][0-9])
- basic_machine=m68k-hp
- ;;
- hp9k6[0-9][0-9] | hp6[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hp9k7[0-79][0-9] | hp7[0-79][0-9])
- basic_machine=hppa1.1-hp
- ;;
- hp9k78[0-9] | hp78[0-9])
- # FIXME: really hppa2.0-hp
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
- # FIXME: really hppa2.0-hp
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[0-9][13679] | hp8[0-9][13679])
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[0-9][0-9] | hp8[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hppa-next)
- os=-nextstep3
- ;;
- hppaosf)
- basic_machine=hppa1.1-hp
- os=-osf
- ;;
- hppro)
- basic_machine=hppa1.1-hp
- os=-proelf
- ;;
- i370-ibm* | ibm*)
- basic_machine=i370-ibm
- ;;
-# I'm not sure what "Sysv32" means. Should this be sysv3.2?
- i*86v32)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv32
- ;;
- i*86v4*)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv4
- ;;
- i*86v)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv
- ;;
- i*86sol2)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-solaris2
- ;;
- i386mach)
- basic_machine=i386-mach
- os=-mach
- ;;
- i386-vsta | vsta)
- basic_machine=i386-unknown
- os=-vsta
- ;;
- iris | iris4d)
- basic_machine=mips-sgi
- case $os in
- -irix*)
- ;;
- *)
- os=-irix4
- ;;
- esac
- ;;
- isi68 | isi)
- basic_machine=m68k-isi
- os=-sysv
- ;;
- m68knommu)
- basic_machine=m68k-unknown
- os=-linux
- ;;
- m68knommu-*)
- basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=-linux
- ;;
- m88k-omron*)
- basic_machine=m88k-omron
- ;;
- magnum | m3230)
- basic_machine=mips-mips
- os=-sysv
- ;;
- merlin)
- basic_machine=ns32k-utek
- os=-sysv
- ;;
- mingw32)
- basic_machine=i386-pc
- os=-mingw32
- ;;
- mingw32ce)
- basic_machine=arm-unknown
- os=-mingw32ce
- ;;
- miniframe)
- basic_machine=m68000-convergent
- ;;
- *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
- basic_machine=m68k-atari
- os=-mint
- ;;
- mips3*-*)
- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
- ;;
- mips3*)
- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
- ;;
- monitor)
- basic_machine=m68k-rom68k
- os=-coff
- ;;
- morphos)
- basic_machine=powerpc-unknown
- os=-morphos
- ;;
- msdos)
- basic_machine=i386-pc
- os=-msdos
- ;;
- ms1-*)
- basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
- ;;
- mvs)
- basic_machine=i370-ibm
- os=-mvs
- ;;
- ncr3000)
- basic_machine=i486-ncr
- os=-sysv4
- ;;
- netbsd386)
- basic_machine=i386-unknown
- os=-netbsd
- ;;
- netwinder)
- basic_machine=armv4l-rebel
- os=-linux
- ;;
- news | news700 | news800 | news900)
- basic_machine=m68k-sony
- os=-newsos
- ;;
- news1000)
- basic_machine=m68030-sony
- os=-newsos
- ;;
- news-3600 | risc-news)
- basic_machine=mips-sony
- os=-newsos
- ;;
- necv70)
- basic_machine=v70-nec
- os=-sysv
- ;;
- next | m*-next )
- basic_machine=m68k-next
- case $os in
- -nextstep* )
- ;;
- -ns2*)
- os=-nextstep2
- ;;
- *)
- os=-nextstep3
- ;;
- esac
- ;;
- nh3000)
- basic_machine=m68k-harris
- os=-cxux
- ;;
- nh[45]000)
- basic_machine=m88k-harris
- os=-cxux
- ;;
- nindy960)
- basic_machine=i960-intel
- os=-nindy
- ;;
- mon960)
- basic_machine=i960-intel
- os=-mon960
- ;;
- nonstopux)
- basic_machine=mips-compaq
- os=-nonstopux
- ;;
- np1)
- basic_machine=np1-gould
- ;;
- nsr-tandem)
- basic_machine=nsr-tandem
- ;;
- op50n-* | op60c-*)
- basic_machine=hppa1.1-oki
- os=-proelf
- ;;
- openrisc | openrisc-*)
- basic_machine=or32-unknown
- ;;
- os400)
- basic_machine=powerpc-ibm
- os=-os400
- ;;
- OSE68000 | ose68000)
- basic_machine=m68000-ericsson
- os=-ose
- ;;
- os68k)
- basic_machine=m68k-none
- os=-os68k
- ;;
- pa-hitachi)
- basic_machine=hppa1.1-hitachi
- os=-hiuxwe2
- ;;
- paragon)
- basic_machine=i860-intel
- os=-osf
- ;;
- parisc)
- basic_machine=hppa-unknown
- os=-linux
- ;;
- parisc-*)
- basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=-linux
- ;;
- pbd)
- basic_machine=sparc-tti
- ;;
- pbb)
- basic_machine=m68k-tti
- ;;
- pc532 | pc532-*)
- basic_machine=ns32k-pc532
- ;;
- pc98)
- basic_machine=i386-pc
- ;;
- pc98-*)
- basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentium | p5 | k5 | k6 | nexgen | viac3)
- basic_machine=i586-pc
- ;;
- pentiumpro | p6 | 6x86 | athlon | athlon_*)
- basic_machine=i686-pc
- ;;
- pentiumii | pentium2 | pentiumiii | pentium3)
- basic_machine=i686-pc
- ;;
- pentium4)
- basic_machine=i786-pc
- ;;
- pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
- basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentiumpro-* | p6-* | 6x86-* | athlon-*)
- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentium4-*)
- basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pn)
- basic_machine=pn-gould
- ;;
- power) basic_machine=power-ibm
- ;;
- ppc) basic_machine=powerpc-unknown
- ;;
- ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppcle | powerpclittle | ppc-le | powerpc-little)
- basic_machine=powerpcle-unknown
- ;;
- ppcle-* | powerpclittle-*)
- basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppc64) basic_machine=powerpc64-unknown
- ;;
- ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppc64le | powerpc64little | ppc64-le | powerpc64-little)
- basic_machine=powerpc64le-unknown
- ;;
- ppc64le-* | powerpc64little-*)
- basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ps2)
- basic_machine=i386-ibm
- ;;
- pw32)
- basic_machine=i586-unknown
- os=-pw32
- ;;
- rdos)
- basic_machine=i386-pc
- os=-rdos
- ;;
- rom68k)
- basic_machine=m68k-rom68k
- os=-coff
- ;;
- rm[46]00)
- basic_machine=mips-siemens
- ;;
- rtpc | rtpc-*)
- basic_machine=romp-ibm
- ;;
- s390 | s390-*)
- basic_machine=s390-ibm
- ;;
- s390x | s390x-*)
- basic_machine=s390x-ibm
- ;;
- sa29200)
- basic_machine=a29k-amd
- os=-udi
- ;;
- sb1)
- basic_machine=mipsisa64sb1-unknown
- ;;
- sb1el)
- basic_machine=mipsisa64sb1el-unknown
- ;;
- sde)
- basic_machine=mipsisa32-sde
- os=-elf
- ;;
- sei)
- basic_machine=mips-sei
- os=-seiux
- ;;
- sequent)
- basic_machine=i386-sequent
- ;;
- sh)
- basic_machine=sh-hitachi
- os=-hms
- ;;
- sh5el)
- basic_machine=sh5le-unknown
- ;;
- sh64)
- basic_machine=sh64-unknown
- ;;
- sparclite-wrs | simso-wrs)
- basic_machine=sparclite-wrs
- os=-vxworks
- ;;
- sps7)
- basic_machine=m68k-bull
- os=-sysv2
- ;;
- spur)
- basic_machine=spur-unknown
- ;;
- st2000)
- basic_machine=m68k-tandem
- ;;
- stratus)
- basic_machine=i860-stratus
- os=-sysv4
- ;;
- sun2)
- basic_machine=m68000-sun
- ;;
- sun2os3)
- basic_machine=m68000-sun
- os=-sunos3
- ;;
- sun2os4)
- basic_machine=m68000-sun
- os=-sunos4
- ;;
- sun3os3)
- basic_machine=m68k-sun
- os=-sunos3
- ;;
- sun3os4)
- basic_machine=m68k-sun
- os=-sunos4
- ;;
- sun4os3)
- basic_machine=sparc-sun
- os=-sunos3
- ;;
- sun4os4)
- basic_machine=sparc-sun
- os=-sunos4
- ;;
- sun4sol2)
- basic_machine=sparc-sun
- os=-solaris2
- ;;
- sun3 | sun3-*)
- basic_machine=m68k-sun
- ;;
- sun4)
- basic_machine=sparc-sun
- ;;
- sun386 | sun386i | roadrunner)
- basic_machine=i386-sun
- ;;
- sv1)
- basic_machine=sv1-cray
- os=-unicos
- ;;
- symmetry)
- basic_machine=i386-sequent
- os=-dynix
- ;;
- t3e)
- basic_machine=alphaev5-cray
- os=-unicos
- ;;
- t90)
- basic_machine=t90-cray
- os=-unicos
- ;;
- tic54x | c54x*)
- basic_machine=tic54x-unknown
- os=-coff
- ;;
- tic55x | c55x*)
- basic_machine=tic55x-unknown
- os=-coff
- ;;
- tic6x | c6x*)
- basic_machine=tic6x-unknown
- os=-coff
- ;;
- tile*)
- basic_machine=tile-unknown
- os=-linux-gnu
- ;;
- tx39)
- basic_machine=mipstx39-unknown
- ;;
- tx39el)
- basic_machine=mipstx39el-unknown
- ;;
- toad1)
- basic_machine=pdp10-xkl
- os=-tops20
- ;;
- tower | tower-32)
- basic_machine=m68k-ncr
- ;;
- tpf)
- basic_machine=s390x-ibm
- os=-tpf
- ;;
- udi29k)
- basic_machine=a29k-amd
- os=-udi
- ;;
- ultra3)
- basic_machine=a29k-nyu
- os=-sym1
- ;;
- v810 | necv810)
- basic_machine=v810-nec
- os=-none
- ;;
- vaxv)
- basic_machine=vax-dec
- os=-sysv
- ;;
- vms)
- basic_machine=vax-dec
- os=-vms
- ;;
- vpp*|vx|vx-*)
- basic_machine=f301-fujitsu
- ;;
- vxworks960)
- basic_machine=i960-wrs
- os=-vxworks
- ;;
- vxworks68)
- basic_machine=m68k-wrs
- os=-vxworks
- ;;
- vxworks29k)
- basic_machine=a29k-wrs
- os=-vxworks
- ;;
- w65*)
- basic_machine=w65-wdc
- os=-none
- ;;
- w89k-*)
- basic_machine=hppa1.1-winbond
- os=-proelf
- ;;
- xbox)
- basic_machine=i686-pc
- os=-mingw32
- ;;
- xps | xps100)
- basic_machine=xps100-honeywell
- ;;
- ymp)
- basic_machine=ymp-cray
- os=-unicos
- ;;
- z8k-*-coff)
- basic_machine=z8k-unknown
- os=-sim
- ;;
- z80-*-coff)
- basic_machine=z80-unknown
- os=-sim
- ;;
- none)
- basic_machine=none-none
- os=-none
- ;;
-
-# Here we handle the default manufacturer of certain CPU types. It is in
-# some cases the only manufacturer, in others, it is the most popular.
- w89k)
- basic_machine=hppa1.1-winbond
- ;;
- op50n)
- basic_machine=hppa1.1-oki
- ;;
- op60c)
- basic_machine=hppa1.1-oki
- ;;
- romp)
- basic_machine=romp-ibm
- ;;
- mmix)
- basic_machine=mmix-knuth
- ;;
- rs6000)
- basic_machine=rs6000-ibm
- ;;
- vax)
- basic_machine=vax-dec
- ;;
- pdp10)
- # there are many clones, so DEC is not a safe bet
- basic_machine=pdp10-unknown
- ;;
- pdp11)
- basic_machine=pdp11-dec
- ;;
- we32k)
- basic_machine=we32k-att
- ;;
- sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
- basic_machine=sh-unknown
- ;;
- sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
- basic_machine=sparc-sun
- ;;
- cydra)
- basic_machine=cydra-cydrome
- ;;
- orion)
- basic_machine=orion-highlevel
- ;;
- orion105)
- basic_machine=clipper-highlevel
- ;;
- mac | mpw | mac-mpw)
- basic_machine=m68k-apple
- ;;
- pmac | pmac-mpw)
- basic_machine=powerpc-apple
- ;;
- *-unknown)
- # Make sure to match an already-canonicalized machine name.
- ;;
- *)
- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
- exit 1
- ;;
-esac
-
-# Here we canonicalize certain aliases for manufacturers.
-case $basic_machine in
- *-digital*)
- basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
- ;;
- *-commodore*)
- basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
- ;;
- *)
- ;;
-esac
-
-# Decode manufacturer-specific aliases for certain operating systems.
-
-if [ x"$os" != x"" ]
-then
-case $os in
- # First match some system type aliases
- # that might get confused with valid system types.
- # -solaris* is a basic system type, with this one exception.
- -solaris1 | -solaris1.*)
- os=`echo $os | sed -e 's|solaris1|sunos4|'`
- ;;
- -solaris)
- os=-solaris2
- ;;
- -svr4*)
- os=-sysv4
- ;;
- -unixware*)
- os=-sysv4.2uw
- ;;
- -gnu/linux*)
- os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
- ;;
- # First accept the basic system types.
- # The portable systems comes first.
- # Each alternative MUST END IN A *, to match a version number.
- # -sysv* is not here because it comes later, after sysvr4.
- -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
- | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
- | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
- | -kopensolaris* \
- | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
- | -aos* | -aros* \
- | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
- | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
- | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
- | -openbsd* | -solidbsd* \
- | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
- | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
- | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
- | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
- | -chorusos* | -chorusrdb* | -cegcc* \
- | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \
- | -uxpv* | -beos* | -mpeix* | -udk* \
- | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
- | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
- | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
- | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
- | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
- | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
- | -skyos* | -haiku* | -rdos* | -toppers* | -drops*)
- # Remember, each alternative MUST END IN *, to match a version number.
- ;;
- -qnx*)
- case $basic_machine in
- x86-* | i*86-*)
- ;;
- *)
- os=-nto$os
- ;;
- esac
- ;;
- -nto-qnx*)
- ;;
- -nto*)
- os=`echo $os | sed -e 's|nto|nto-qnx|'`
- ;;
- -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
- | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
- | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
- ;;
- -mac*)
- os=`echo $os | sed -e 's|mac|macos|'`
- ;;
- -linux-dietlibc)
- os=-linux-dietlibc
- ;;
- -linux*)
- os=`echo $os | sed -e 's|linux|linux-gnu|'`
- ;;
- -sunos5*)
- os=`echo $os | sed -e 's|sunos5|solaris2|'`
- ;;
- -sunos6*)
- os=`echo $os | sed -e 's|sunos6|solaris3|'`
- ;;
- -opened*)
- os=-openedition
- ;;
- -os400*)
- os=-os400
- ;;
- -wince*)
- os=-wince
- ;;
- -osfrose*)
- os=-osfrose
- ;;
- -osf*)
- os=-osf
- ;;
- -utek*)
- os=-bsd
- ;;
- -dynix*)
- os=-bsd
- ;;
- -acis*)
- os=-aos
- ;;
- -atheos*)
- os=-atheos
- ;;
- -syllable*)
- os=-syllable
- ;;
- -386bsd)
- os=-bsd
- ;;
- -ctix* | -uts*)
- os=-sysv
- ;;
- -nova*)
- os=-rtmk-nova
- ;;
- -ns2 )
- os=-nextstep2
- ;;
- -nsk*)
- os=-nsk
- ;;
- # Preserve the version number of sinix5.
- -sinix5.*)
- os=`echo $os | sed -e 's|sinix|sysv|'`
- ;;
- -sinix*)
- os=-sysv4
- ;;
- -tpf*)
- os=-tpf
- ;;
- -triton*)
- os=-sysv3
- ;;
- -oss*)
- os=-sysv3
- ;;
- -svr4)
- os=-sysv4
- ;;
- -svr3)
- os=-sysv3
- ;;
- -sysvr4)
- os=-sysv4
- ;;
- # This must come after -sysvr4.
- -sysv*)
- ;;
- -ose*)
- os=-ose
- ;;
- -es1800*)
- os=-ose
- ;;
- -xenix)
- os=-xenix
- ;;
- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
- os=-mint
- ;;
- -aros*)
- os=-aros
- ;;
- -kaos*)
- os=-kaos
- ;;
- -zvmoe)
- os=-zvmoe
- ;;
- -dicos*)
- os=-dicos
- ;;
- -none)
- ;;
- *)
- # Get rid of the `-' at the beginning of $os.
- os=`echo $os | sed 's/[^-]*-//'`
- echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
- exit 1
- ;;
-esac
-else
-
-# Here we handle the default operating systems that come with various machines.
-# The value should be what the vendor currently ships out the door with their
-# machine or put another way, the most popular os provided with the machine.
-
-# Note that if you're going to try to match "-MANUFACTURER" here (say,
-# "-sun"), then you have to tell the case statement up towards the top
-# that MANUFACTURER isn't an operating system. Otherwise, code above
-# will signal an error saying that MANUFACTURER isn't an operating
-# system, and we'll never get to this point.
-
-case $basic_machine in
- score-*)
- os=-elf
- ;;
- spu-*)
- os=-elf
- ;;
- *-acorn)
- os=-riscix1.2
- ;;
- arm*-rebel)
- os=-linux
- ;;
- arm*-semi)
- os=-aout
- ;;
- c4x-* | tic4x-*)
- os=-coff
- ;;
- # This must come before the *-dec entry.
- pdp10-*)
- os=-tops20
- ;;
- pdp11-*)
- os=-none
- ;;
- *-dec | vax-*)
- os=-ultrix4.2
- ;;
- m68*-apollo)
- os=-domain
- ;;
- i386-sun)
- os=-sunos4.0.2
- ;;
- m68000-sun)
- os=-sunos3
- # This also exists in the configure program, but was not the
- # default.
- # os=-sunos4
- ;;
- m68*-cisco)
- os=-aout
- ;;
- mep-*)
- os=-elf
- ;;
- mips*-cisco)
- os=-elf
- ;;
- mips*-*)
- os=-elf
- ;;
- or32-*)
- os=-coff
- ;;
- *-tti) # must be before sparc entry or we get the wrong os.
- os=-sysv3
- ;;
- sparc-* | *-sun)
- os=-sunos4.1.1
- ;;
- *-be)
- os=-beos
- ;;
- *-haiku)
- os=-haiku
- ;;
- *-ibm)
- os=-aix
- ;;
- *-knuth)
- os=-mmixware
- ;;
- *-wec)
- os=-proelf
- ;;
- *-winbond)
- os=-proelf
- ;;
- *-oki)
- os=-proelf
- ;;
- *-hp)
- os=-hpux
- ;;
- *-hitachi)
- os=-hiux
- ;;
- i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
- os=-sysv
- ;;
- *-cbm)
- os=-amigaos
- ;;
- *-dg)
- os=-dgux
- ;;
- *-dolphin)
- os=-sysv3
- ;;
- m68k-ccur)
- os=-rtu
- ;;
- m88k-omron*)
- os=-luna
- ;;
- *-next )
- os=-nextstep
- ;;
- *-sequent)
- os=-ptx
- ;;
- *-crds)
- os=-unos
- ;;
- *-ns)
- os=-genix
- ;;
- i370-*)
- os=-mvs
- ;;
- *-next)
- os=-nextstep3
- ;;
- *-gould)
- os=-sysv
- ;;
- *-highlevel)
- os=-bsd
- ;;
- *-encore)
- os=-bsd
- ;;
- *-sgi)
- os=-irix
- ;;
- *-siemens)
- os=-sysv4
- ;;
- *-masscomp)
- os=-rtu
- ;;
- f30[01]-fujitsu | f700-fujitsu)
- os=-uxpv
- ;;
- *-rom68k)
- os=-coff
- ;;
- *-*bug)
- os=-coff
- ;;
- *-apple)
- os=-macos
- ;;
- *-atari*)
- os=-mint
- ;;
- *)
- os=-none
- ;;
-esac
-fi
-
-# Here we handle the case where we know the os, and the CPU type, but not the
-# manufacturer. We pick the logical manufacturer.
-vendor=unknown
-case $basic_machine in
- *-unknown)
- case $os in
- -riscix*)
- vendor=acorn
- ;;
- -sunos*)
- vendor=sun
- ;;
- -aix*)
- vendor=ibm
- ;;
- -beos*)
- vendor=be
- ;;
- -hpux*)
- vendor=hp
- ;;
- -mpeix*)
- vendor=hp
- ;;
- -hiux*)
- vendor=hitachi
- ;;
- -unos*)
- vendor=crds
- ;;
- -dgux*)
- vendor=dg
- ;;
- -luna*)
- vendor=omron
- ;;
- -genix*)
- vendor=ns
- ;;
- -mvs* | -opened*)
- vendor=ibm
- ;;
- -os400*)
- vendor=ibm
- ;;
- -ptx*)
- vendor=sequent
- ;;
- -tpf*)
- vendor=ibm
- ;;
- -vxsim* | -vxworks* | -windiss*)
- vendor=wrs
- ;;
- -aux*)
- vendor=apple
- ;;
- -hms*)
- vendor=hitachi
- ;;
- -mpw* | -macos*)
- vendor=apple
- ;;
- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
- vendor=atari
- ;;
- -vos*)
- vendor=stratus
- ;;
- esac
- basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
- ;;
-esac
-
-echo $basic_machine$os
-exit
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "timestamp='"
-# time-stamp-format: "%:y-%02m-%02d"
-# time-stamp-end: "'"
-# End:
diff --git a/lib/tevent/configure b/lib/tevent/configure
new file mode 100755
index 0000000000..15ad1a57f1
--- /dev/null
+++ b/lib/tevent/configure
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+PREVPATH=`dirname $0`
+
+if [ -f $PREVPATH/../../buildtools/bin/waf ]; then
+ WAF=../../buildtools/bin/waf
+elif [ -f $PREVPATH/buildtools/bin/waf ]; then
+ WAF=./buildtools/bin/waf
+else
+ echo "tevent: Unable to find waf"
+ exit 1
+fi
+
+# using JOBS=1 gives maximum compatibility with
+# systems like AIX which have broken threading in python
+JOBS=1
+export JOBS
+
+cd . || exit 1
+$WAF configure "$@" || exit 1
+cd $PREVPATH
diff --git a/lib/tevent/configure.ac b/lib/tevent/configure.ac
deleted file mode 100644
index c759b83fab..0000000000
--- a/lib/tevent/configure.ac
+++ /dev/null
@@ -1,25 +0,0 @@
-AC_PREREQ(2.50)
-AC_INIT(tevent, 0.9.8)
-AC_CONFIG_SRCDIR([tevent.c])
-AC_CONFIG_HEADER(config.h)
-
-AC_LIBREPLACE_ALL_CHECKS
-
-AC_LD_EXPORT_DYNAMIC
-AC_LD_SONAMEFLAG
-AC_LD_VERSIONSCRIPT
-AC_LD_PICFLAG
-AC_LD_SHLIBEXT
-AC_LIBREPLACE_SHLD
-AC_LIBREPLACE_SHLD_FLAGS
-AC_LIBREPLACE_RUNTIME_LIB_PATH_VAR
-
-m4_include(build_macros.m4)
-BUILD_WITH_SHARED_BUILD_DIR
-
-m4_include(pkg.m4)
-m4_include(libtalloc.m4)
-
-m4_include(libtevent.m4)
-
-AC_OUTPUT(Makefile tevent.pc)
diff --git a/lib/tevent/doc/mainpage.dox b/lib/tevent/doc/mainpage.dox
new file mode 100644
index 0000000000..e2f986e1ef
--- /dev/null
+++ b/lib/tevent/doc/mainpage.dox
@@ -0,0 +1,42 @@
+/**
+ * @mainpage
+ *
+ * Tevent is an event system based on the talloc memory management library. It
+ * is the core event system used in Samba.
+ *
+ * The low level tevent has support for many event types, including timers,
+ * signals, and the classic file descriptor events.
+ *
+ * Tevent also provide helpers to deal with asynchronous code providing the
+ * tevent_req (tevent request) functions.
+ *
+ * @section tevent_download Download
+ *
+ * You can download the latest releases of tevent from the
+ * <a href="http://samba.org/ftp/tevent" target="_blank">tevent directory</a>
+ * on the samba public source archive.
+ *
+ * @section tevent_bugs Discussion and bug reports
+ *
+ * tevent does not currently have its own mailing list or bug tracking system.
+ * For now, please use the
+ * <a href="https://lists.samba.org/mailman/listinfo/samba-technical" target="_blank">samba-technical</a>
+ * mailing list, and the
+ * <a href="http://bugzilla.samba.org/" target="_blank">Samba bugzilla</a>
+ * bug tracking system.
+ *
+ * @section tevent_devel Development
+ * You can download the latest code either via git or rsync.
+ *
+ * To fetch via git see the following guide:
+ *
+ * <a href="http://wiki.samba.org/index.php/Using_Git_for_Samba_Development" target="_blank">Using Git for Samba Development</a>
+ *
+ * Once you have cloned the tree switch to the master branch and cd into the
+ * lib/tevent directory.
+ *
+ * To fetch via rsync use this command:
+ *
+ * rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/tevent .
+ *
+ */
diff --git a/lib/tevent/doc/tutorials.dox b/lib/tevent/doc/tutorials.dox
new file mode 100644
index 0000000000..e8beed7dc0
--- /dev/null
+++ b/lib/tevent/doc/tutorials.dox
@@ -0,0 +1,43 @@
+/**
+ * @page tevent_queue_tutorial The tevent_queue tutorial
+ *
+ * @section Introduction
+ *
+ * A tevent_queue is used to queue up async requests that must be
+ * serialized. For example writing buffers into a socket must be
+ * serialized. Writing a large lump of data into a socket can require
+ * multiple write(2) or send(2) system calls. If more than one async
+ * request is outstanding to write large buffers into a socket, every
+ * request must individually be completed before the next one begins,
+ * even if multiple syscalls are required.
+ *
+ * To do this, every socket gets assigned a tevent_queue struct.
+ *
+ * Creating a serialized async request follows the usual convention to
+ * return a tevent_req structure with an embedded state structure. To
+ * serialize the work the requests is about to so, instead of directly
+ * starting or doing that work, tevent_queue_add must be called. When it
+ * is time for the serialized async request to do its work, the trigger
+ * callback function tevent_queue_add was given is called. In the example
+ * of writing to a socket, the trigger is called when the write request
+ * can begin accessing the socket.
+ *
+ * How does this engine work behind the scenes? When the queue is empty,
+ * tevent_queue_add schedules an immediate call to the trigger
+ * callback. The trigger callback starts its work, likely by starting
+ * other async subrequests. While these async subrequests are working,
+ * more requests can accumulate in the queue by tevent_queue_add. While
+ * there is no function to explicitly trigger the next waiter in line, it
+ * still works: When the active request in the queue is done, it will be
+ * destroyed by talloc_free. Talloc_free of an serialized async request
+ * that had been added to a queue will trigger the next request in the
+ * queue via a talloc destructor attached to a child of the serialized
+ * request. This way the queue will be kept busy when an async request
+ * finishes.
+ *
+ * @section Example
+ *
+ * @code
+ * Metze: Please add a code example here.
+ * @endcode
+ */
diff --git a/lib/tevent/doxy.config b/lib/tevent/doxy.config
new file mode 100644
index 0000000000..578ecafc77
--- /dev/null
+++ b/lib/tevent/doxy.config
@@ -0,0 +1,1538 @@
+# Doxyfile 1.6.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = tevent
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER = 0.9.8
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = doc
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF = "The $name class" \
+ "The $name widget" \
+ "The $name file" \
+ is \
+ provides \
+ specifies \
+ contains \
+ represents \
+ a \
+ an \
+ the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = YES
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it parses.
+# With this tag you can assign which parser to use for a given extension.
+# Doxygen has a built-in mapping, but you can override or extend it using this tag.
+# The format is ext=language, where ext is a file extension, and language is one of
+# the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP,
+# Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat
+# .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran),
+# use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen to replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penality.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will rougly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = NO
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespace are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = YES
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = YES
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the (brief and detailed) documentation of class members so that constructors and destructors are listed first. If set to NO (the default) the constructors will appear in the respective orders defined by SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by
+# doxygen. The layout file controls the global structure of the generated output files
+# in an output format independent way. The create the layout file that represents
+# doxygen's defaults, run doxygen with the -l option. You can optionally specify a
+# file name after the option, if omitted DoxygenLayout.xml will be used as the name
+# of the layout file.
+
+LAYOUT_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = . doc
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS = *.cpp \
+ *.cc \
+ *.c \
+ *.h \
+ *.hh \
+ *.hpp \
+ *.dox
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS = */.git/* \
+ */.svn/* \
+ */cmake/* \
+ */build/*
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# If the HTML_FOOTER_DESCRIPTION tag is set to YES, Doxygen will
+# add generated date, project name and doxygen version to HTML footer.
+
+HTML_FOOTER_DESCRIPTION= NO
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER
+# are set, an additional index file will be generated that can be used as input for
+# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated
+# HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE =
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add.
+# For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NONE
+
+# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list.
+
+USE_INLINE_TREES = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# When the SEARCHENGINE tag is enable doxygen will generate a search box for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using HTML help (GENERATE_HTMLHELP) or Qt help (GENERATE_QHP)
+# there is already a search function so this one should typically
+# be disabled.
+
+SEARCHENGINE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = YES
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = YES
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED = DOXYGEN PRINTF_ATTRIBUTE(x,y)=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# By default doxygen will write a font called FreeSans.ttf to the output
+# directory and reference it in all dot files that doxygen generates. This
+# font does not include all possible unicode characters however, so when you need
+# these (or just want a differently looking font) you can specify the font name
+# using DOT_FONTNAME. You need need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = YES
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/lib/tevent/install-sh b/lib/tevent/install-sh
deleted file mode 100755
index 58719246f0..0000000000
--- a/lib/tevent/install-sh
+++ /dev/null
@@ -1,238 +0,0 @@
-#! /bin/sh
-#
-# install - install a program, script, or datafile
-# This comes from X11R5.
-#
-# Calling this script install-sh is preferred over install.sh, to prevent
-# `make' implicit rules from creating a file called install from it
-# when there is no Makefile.
-#
-# This script is compatible with the BSD install script, but was written
-# from scratch.
-#
-
-
-# set DOITPROG to echo to test this script
-
-# Don't use :- since 4.3BSD and earlier shells don't like it.
-doit="${DOITPROG-}"
-
-
-# put in absolute paths if you don't have them in your path; or use env. vars.
-
-mvprog="${MVPROG-mv}"
-cpprog="${CPPROG-cp}"
-chmodprog="${CHMODPROG-chmod}"
-chownprog="${CHOWNPROG-chown}"
-chgrpprog="${CHGRPPROG-chgrp}"
-stripprog="${STRIPPROG-strip}"
-rmprog="${RMPROG-rm}"
-mkdirprog="${MKDIRPROG-mkdir}"
-
-transformbasename=""
-transform_arg=""
-instcmd="$mvprog"
-chmodcmd="$chmodprog 0755"
-chowncmd=""
-chgrpcmd=""
-stripcmd=""
-rmcmd="$rmprog -f"
-mvcmd="$mvprog"
-src=""
-dst=""
-dir_arg=""
-
-while [ x"$1" != x ]; do
- case $1 in
- -c) instcmd="$cpprog"
- shift
- continue;;
-
- -d) dir_arg=true
- shift
- continue;;
-
- -m) chmodcmd="$chmodprog $2"
- shift
- shift
- continue;;
-
- -o) chowncmd="$chownprog $2"
- shift
- shift
- continue;;
-
- -g) chgrpcmd="$chgrpprog $2"
- shift
- shift
- continue;;
-
- -s) stripcmd="$stripprog"
- shift
- continue;;
-
- -t=*) transformarg=`echo $1 | sed 's/-t=//'`
- shift
- continue;;
-
- -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
- shift
- continue;;
-
- *) if [ x"$src" = x ]
- then
- src=$1
- else
- # this colon is to work around a 386BSD /bin/sh bug
- :
- dst=$1
- fi
- shift
- continue;;
- esac
-done
-
-if [ x"$src" = x ]
-then
- echo "install: no input file specified"
- exit 1
-else
- true
-fi
-
-if [ x"$dir_arg" != x ]; then
- dst=$src
- src=""
-
- if [ -d $dst ]; then
- instcmd=:
- else
- instcmd=mkdir
- fi
-else
-
-# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
-# might cause directories to be created, which would be especially bad
-# if $src (and thus $dsttmp) contains '*'.
-
- if [ -f $src -o -d $src ]
- then
- true
- else
- echo "install: $src does not exist"
- exit 1
- fi
-
- if [ x"$dst" = x ]
- then
- echo "install: no destination specified"
- exit 1
- else
- true
- fi
-
-# If destination is a directory, append the input filename; if your system
-# does not like double slashes in filenames, you may need to add some logic
-
- if [ -d $dst ]
- then
- dst="$dst"/`basename $src`
- else
- true
- fi
-fi
-
-## this sed command emulates the dirname command
-dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
-
-# Make sure that the destination directory exists.
-# this part is taken from Noah Friedman's mkinstalldirs script
-
-# Skip lots of stat calls in the usual case.
-if [ ! -d "$dstdir" ]; then
-defaultIFS='
-'
-IFS="${IFS-${defaultIFS}}"
-
-oIFS="${IFS}"
-# Some sh's can't handle IFS=/ for some reason.
-IFS='%'
-set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
-IFS="${oIFS}"
-
-pathcomp=''
-
-while [ $# -ne 0 ] ; do
- pathcomp="${pathcomp}${1}"
- shift
-
- if [ ! -d "${pathcomp}" ] ;
- then
- $mkdirprog "${pathcomp}"
- else
- true
- fi
-
- pathcomp="${pathcomp}/"
-done
-fi
-
-if [ x"$dir_arg" != x ]
-then
- $doit $instcmd $dst &&
-
- if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
- if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
- if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
- if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
-else
-
-# If we're going to rename the final executable, determine the name now.
-
- if [ x"$transformarg" = x ]
- then
- dstfile=`basename $dst`
- else
- dstfile=`basename $dst $transformbasename |
- sed $transformarg`$transformbasename
- fi
-
-# don't allow the sed command to completely eliminate the filename
-
- if [ x"$dstfile" = x ]
- then
- dstfile=`basename $dst`
- else
- true
- fi
-
-# Make a temp file name in the proper directory.
-
- dsttmp=$dstdir/#inst.$$#
-
-# Move or copy the file name to the temp name
-
- $doit $instcmd $src $dsttmp &&
-
- trap "rm -f ${dsttmp}" 0 &&
-
-# and set any options; do chmod last to preserve setuid bits
-
-# If any of these fail, we abort the whole thing. If we want to
-# ignore errors from any of these, just make sure not to ignore
-# errors from the above "$doit $instcmd $src $dsttmp" command.
-
- if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
- if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
- if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
- if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
-
-# Now rename the file to the real destination.
-
- $doit $rmcmd -f $dstdir/$dstfile &&
- $doit $mvcmd $dsttmp $dstdir/$dstfile
-
-fi &&
-
-
-exit 0
diff --git a/lib/tevent/libtalloc.m4 b/lib/tevent/libtalloc.m4
deleted file mode 100644
index a4c5b8a9d9..0000000000
--- a/lib/tevent/libtalloc.m4
+++ /dev/null
@@ -1,7 +0,0 @@
-AC_SUBST(TALLOC_OBJ)
-AC_SUBST(TALLOC_CFLAGS)
-AC_SUBST(TALLOC_LIBS)
-
-AC_CHECK_HEADER(talloc.h,
- [AC_CHECK_LIB(talloc, talloc_init, [TALLOC_LIBS="-ltalloc"]) ],
- [PKG_CHECK_MODULES(TALLOC, talloc)])
diff --git a/lib/tevent/libtevent.m4 b/lib/tevent/libtevent.m4
index 4162ba30f9..5c5969b0df 100644
--- a/lib/tevent/libtevent.m4
+++ b/lib/tevent/libtevent.m4
@@ -30,6 +30,7 @@ TEVENT_OBJ="tevent.o tevent_debug.o tevent_util.o"
TEVENT_OBJ="$TEVENT_OBJ tevent_fd.o tevent_timed.o tevent_immediate.o tevent_signal.o"
TEVENT_OBJ="$TEVENT_OBJ tevent_req.o tevent_wakeup.o tevent_queue.o"
TEVENT_OBJ="$TEVENT_OBJ tevent_standard.o tevent_select.o"
+TEVENT_OBJ="$TEVENT_OBJ tevent_poll.o"
AC_CHECK_HEADERS(sys/epoll.h)
AC_CHECK_FUNCS(epoll_create)
diff --git a/lib/tevent/pkg.m4 b/lib/tevent/pkg.m4
deleted file mode 100644
index a8b3d06c81..0000000000
--- a/lib/tevent/pkg.m4
+++ /dev/null
@@ -1,156 +0,0 @@
-# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*-
-#
-# Copyright © 2004 Scott James Remnant <scott@netsplit.com>.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-# PKG_PROG_PKG_CONFIG([MIN-VERSION])
-# ----------------------------------
-AC_DEFUN([PKG_PROG_PKG_CONFIG],
-[m4_pattern_forbid([^_?PKG_[A-Z_]+$])
-m4_pattern_allow([^PKG_CONFIG(_PATH)?$])
-AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility])dnl
-if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then
- AC_PATH_TOOL([PKG_CONFIG], [pkg-config])
-fi
-if test -n "$PKG_CONFIG"; then
- _pkg_min_version=m4_default([$1], [0.9.0])
- AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version])
- if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then
- AC_MSG_RESULT([yes])
- else
- AC_MSG_RESULT([no])
- PKG_CONFIG=""
- fi
-
-fi[]dnl
-])# PKG_PROG_PKG_CONFIG
-
-# PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
-#
-# Check to see whether a particular set of modules exists. Similar
-# to PKG_CHECK_MODULES(), but does not set variables or print errors.
-#
-#
-# Similar to PKG_CHECK_MODULES, make sure that the first instance of
-# this or PKG_CHECK_MODULES is called, or make sure to call
-# PKG_CHECK_EXISTS manually
-# --------------------------------------------------------------
-AC_DEFUN([PKG_CHECK_EXISTS],
-[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
-if test -n "$PKG_CONFIG" && \
- AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then
- m4_ifval([$2], [$2], [:])
-m4_ifvaln([$3], [else
- $3])dnl
-fi])
-
-
-# _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES])
-# ---------------------------------------------
-m4_define([_PKG_CONFIG],
-[if test -n "$PKG_CONFIG"; then
- if test -n "$$1"; then
- pkg_cv_[]$1="$$1"
- else
- PKG_CHECK_EXISTS([$3],
- [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null`],
- [pkg_failed=yes])
- fi
-else
- pkg_failed=untried
-fi[]dnl
-])# _PKG_CONFIG
-
-# _PKG_SHORT_ERRORS_SUPPORTED
-# -----------------------------
-AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED],
-[AC_REQUIRE([PKG_PROG_PKG_CONFIG])
-if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
- _pkg_short_errors_supported=yes
-else
- _pkg_short_errors_supported=no
-fi[]dnl
-])# _PKG_SHORT_ERRORS_SUPPORTED
-
-
-# PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
-# [ACTION-IF-NOT-FOUND])
-#
-#
-# Note that if there is a possibility the first call to
-# PKG_CHECK_MODULES might not happen, you should be sure to include an
-# explicit call to PKG_PROG_PKG_CONFIG in your configure.ac
-#
-#
-# --------------------------------------------------------------
-AC_DEFUN([PKG_CHECK_MODULES],
-[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
-AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl
-AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl
-
-pkg_failed=no
-AC_MSG_CHECKING([for $1])
-
-_PKG_CONFIG([$1][_CFLAGS], [cflags], [$2])
-_PKG_CONFIG([$1][_LIBS], [libs], [$2])
-
-m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS
-and $1[]_LIBS to avoid the need to call pkg-config.
-See the pkg-config man page for more details.])
-
-if test $pkg_failed = yes; then
- _PKG_SHORT_ERRORS_SUPPORTED
- if test $_pkg_short_errors_supported = yes; then
- $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --errors-to-stdout --print-errors "$2"`
- else
- $1[]_PKG_ERRORS=`$PKG_CONFIG --errors-to-stdout --print-errors "$2"`
- fi
- # Put the nasty error message in config.log where it belongs
- echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD
-
- ifelse([$4], , [AC_MSG_ERROR(dnl
-[Package requirements ($2) were not met:
-
-$$1_PKG_ERRORS
-
-Consider adjusting the PKG_CONFIG_PATH environment variable if you
-installed software in a non-standard prefix.
-
-_PKG_TEXT
-])],
- [AC_MSG_RESULT([no])
- $4])
-elif test $pkg_failed = untried; then
- ifelse([$4], , [AC_MSG_FAILURE(dnl
-[The pkg-config script could not be found or is too old. Make sure it
-is in your PATH or set the PKG_CONFIG environment variable to the full
-path to pkg-config.
-
-_PKG_TEXT
-
-To get pkg-config, see <http://www.freedesktop.org/software/pkgconfig>.])],
- [$4])
-else
- $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS
- $1[]_LIBS=$pkg_cv_[]$1[]_LIBS
- AC_MSG_RESULT([yes])
- ifelse([$3], , :, [$3])
-fi[]dnl
-])# PKG_CHECK_MODULES
diff --git a/lib/tevent/pytevent.c b/lib/tevent/pytevent.c
new file mode 100644
index 0000000000..22541bb624
--- /dev/null
+++ b/lib/tevent/pytevent.c
@@ -0,0 +1,762 @@
+/*
+ Unix SMB/CIFS implementation.
+ Python bindings for tevent
+
+ Copyright (C) Jelmer Vernooij 2010
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <Python.h>
+#include <tevent.h>
+
+typedef struct {
+ PyObject_HEAD
+ struct tevent_context *ev;
+} TeventContext_Object;
+
+typedef struct {
+ PyObject_HEAD
+ struct tevent_queue *queue;
+} TeventQueue_Object;
+
+typedef struct {
+ PyObject_HEAD
+ struct tevent_req *req;
+} TeventReq_Object;
+
+typedef struct {
+ PyObject_HEAD
+ struct tevent_signal *signal;
+} TeventSignal_Object;
+
+typedef struct {
+ PyObject_HEAD
+ struct tevent_timer *timer;
+} TeventTimer_Object;
+
+typedef struct {
+ PyObject_HEAD
+ struct tevent_fd *fd;
+} TeventFd_Object;
+
+staticforward PyTypeObject TeventContext_Type;
+staticforward PyTypeObject TeventReq_Type;
+staticforward PyTypeObject TeventQueue_Type;
+staticforward PyTypeObject TeventSignal_Type;
+staticforward PyTypeObject TeventTimer_Type;
+staticforward PyTypeObject TeventFd_Type;
+
+static int py_context_init(struct tevent_context *ev)
+{
+ /* FIXME */
+ return 0;
+}
+
+static struct tevent_fd *py_add_fd(struct tevent_context *ev,
+ TALLOC_CTX *mem_ctx,
+ int fd, uint16_t flags,
+ tevent_fd_handler_t handler,
+ void *private_data,
+ const char *handler_name,
+ const char *location)
+{
+ /* FIXME */
+ return NULL;
+}
+
+static void py_set_fd_close_fn(struct tevent_fd *fde,
+ tevent_fd_close_fn_t close_fn)
+{
+ /* FIXME */
+}
+
+uint16_t py_get_fd_flags(struct tevent_fd *fde)
+{
+ /* FIXME */
+ return 0;
+}
+
+static void py_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
+{
+ /* FIXME */
+}
+
+/* timed_event functions */
+static struct tevent_timer *py_add_timer(struct tevent_context *ev,
+ TALLOC_CTX *mem_ctx,
+ struct timeval next_event,
+ tevent_timer_handler_t handler,
+ void *private_data,
+ const char *handler_name,
+ const char *location)
+{
+ /* FIXME */
+ return NULL;
+}
+
+/* immediate event functions */
+static void py_schedule_immediate(struct tevent_immediate *im,
+ struct tevent_context *ev,
+ tevent_immediate_handler_t handler,
+ void *private_data,
+ const char *handler_name,
+ const char *location)
+{
+ /* FIXME */
+}
+
+/* signal functions */
+static struct tevent_signal *py_add_signal(struct tevent_context *ev,
+ TALLOC_CTX *mem_ctx,
+ int signum, int sa_flags,
+ tevent_signal_handler_t handler,
+ void *private_data,
+ const char *handler_name,
+ const char *location)
+{
+ /* FIXME */
+ return NULL;
+}
+
+/* loop functions */
+static int py_loop_once(struct tevent_context *ev, const char *location)
+{
+ /* FIXME */
+ return 0;
+}
+
+static int py_loop_wait(struct tevent_context *ev, const char *location)
+{
+ /* FIXME */
+ return 0;
+}
+
+const static struct tevent_ops py_tevent_ops = {
+ .context_init = py_context_init,
+ .add_fd = py_add_fd,
+ .set_fd_close_fn = py_set_fd_close_fn,
+ .get_fd_flags = py_get_fd_flags,
+ .set_fd_flags = py_set_fd_flags,
+ .add_timer = py_add_timer,
+ .schedule_immediate = py_schedule_immediate,
+ .add_signal = py_add_signal,
+ .loop_wait = py_loop_wait,
+ .loop_once = py_loop_once,
+};
+
+static PyObject *py_register_backend(PyObject *self, PyObject *args)
+{
+ PyObject *name, *py_backend;
+
+ if (!PyArg_ParseTuple(args, "O", &py_backend))
+ return NULL;
+
+ name = PyObject_GetAttrString(py_backend, "name");
+ if (name == NULL) {
+ PyErr_SetNone(PyExc_AttributeError);
+ return NULL;
+ }
+
+ if (!PyString_Check(name)) {
+ PyErr_SetNone(PyExc_TypeError);
+ return NULL;
+ }
+
+ if (!tevent_register_backend(PyString_AsString(name), &py_tevent_ops)) { /* FIXME: What to do with backend */
+ PyErr_SetNone(PyExc_RuntimeError);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_context_reinitialise(TeventContext_Object *self)
+{
+ int ret = tevent_re_initialise(self->ev);
+ if (ret != 0) {
+ PyErr_SetNone(PyExc_RuntimeError);
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_queue_stop(TeventQueue_Object *self)
+{
+ tevent_queue_stop(self->queue);
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_queue_start(TeventQueue_Object *self)
+{
+ tevent_queue_start(self->queue);
+ Py_RETURN_NONE;
+}
+
+static void py_queue_trigger(struct tevent_req *req, void *private_data)
+{
+ PyObject *callback = private_data, *ret;
+
+ ret = PyObject_CallFunction(callback, "");
+ Py_XDECREF(ret);
+}
+
+static PyObject *py_tevent_queue_add(TeventQueue_Object *self, PyObject *args)
+{
+ TeventContext_Object *py_ev;
+ TeventReq_Object *py_req;
+ PyObject *trigger;
+ bool ret;
+
+ if (!PyArg_ParseTuple(args, "O!O!O",
+ &TeventContext_Type, &py_ev,
+ &TeventReq_Type, &py_req,
+ &trigger))
+ return NULL;
+
+ Py_INCREF(trigger);
+
+ ret = tevent_queue_add(self->queue, py_ev->ev, py_req->req,
+ py_queue_trigger, trigger);
+ if (!ret) {
+ PyErr_SetString(PyExc_RuntimeError, "queue add failed");
+ Py_DECREF(trigger);
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+static PyMethodDef py_tevent_queue_methods[] = {
+ { "stop", (PyCFunction)py_tevent_queue_stop, METH_NOARGS,
+ "S.stop()" },
+ { "start", (PyCFunction)py_tevent_queue_start, METH_NOARGS,
+ "S.start()" },
+ { "add", (PyCFunction)py_tevent_queue_add, METH_VARARGS,
+ "S.add(ctx, req, trigger, baton)" },
+ { NULL },
+};
+
+static PyObject *py_tevent_context_wakeup_send(PyObject *self, PyObject *args)
+{
+ /* FIXME */
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_context_loop_wait(TeventContext_Object *self)
+{
+ if (tevent_loop_wait(self->ev) != 0) {
+ PyErr_SetNone(PyExc_RuntimeError);
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_context_loop_once(TeventContext_Object *self)
+{
+ if (tevent_loop_once(self->ev) != 0) {
+ PyErr_SetNone(PyExc_RuntimeError);
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+#ifdef TEVENT_DEPRECATED
+static bool py_tevent_finished(PyObject *callback)
+{
+ PyObject *py_ret;
+ bool ret;
+
+ py_ret = PyObject_CallFunction(callback, "");
+ if (py_ret == NULL)
+ return true;
+ ret = PyObject_IsTrue(py_ret);
+ Py_DECREF(py_ret);
+ return ret;
+}
+
+static PyObject *py_tevent_context_loop_until(TeventContext_Object *self, PyObject *args)
+{
+ PyObject *callback;
+ if (!PyArg_ParseTuple(args, "O", &callback))
+ return NULL;
+
+ if (tevent_loop_until(self->ev, py_tevent_finished, callback) != 0) {
+ PyErr_SetNone(PyExc_RuntimeError);
+ return NULL;
+ }
+
+ if (PyErr_Occurred())
+ return NULL;
+
+ Py_RETURN_NONE;
+}
+#endif
+
+static void py_tevent_signal_handler(struct tevent_context *ev,
+ struct tevent_signal *se,
+ int signum,
+ int count,
+ void *siginfo,
+ void *private_data)
+{
+ PyObject *callback = (PyObject *)private_data, *ret;
+
+ ret = PyObject_CallFunction(callback, "ii", signum, count);
+ Py_XDECREF(ret);
+}
+
+static void py_tevent_signal_dealloc(TeventSignal_Object *self)
+{
+ talloc_free(self->signal);
+ PyObject_Del(self);
+}
+
+static PyTypeObject TeventSignal_Type = {
+ .tp_name = "Signal",
+ .tp_basicsize = sizeof(TeventSignal_Object),
+ .tp_dealloc = (destructor)py_tevent_signal_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+};
+
+static PyObject *py_tevent_context_add_signal(TeventContext_Object *self, PyObject *args)
+{
+ int signum, sa_flags;
+ PyObject *handler;
+ struct tevent_signal *sig;
+ TeventSignal_Object *ret;
+
+ if (!PyArg_ParseTuple(args, "iiO", &signum, &sa_flags, &handler))
+ return NULL;
+
+ Py_INCREF(handler);
+ sig = tevent_add_signal(self->ev, NULL, signum, sa_flags,
+ py_tevent_signal_handler, handler);
+
+ ret = PyObject_New(TeventSignal_Object, &TeventSignal_Type);
+ if (ret == NULL) {
+ PyErr_NoMemory();
+ talloc_free(sig);
+ return NULL;
+ }
+
+ ret->signal = sig;
+
+ return (PyObject *)ret;
+}
+
+static void py_timer_handler(struct tevent_context *ev,
+ struct tevent_timer *te,
+ struct timeval current_time,
+ void *private_data)
+{
+ PyObject *callback = private_data, *ret;
+ ret = PyObject_CallFunction(callback, "l", te);
+ Py_XDECREF(ret);
+}
+
+static PyObject *py_tevent_context_add_timer(TeventContext_Object *self, PyObject *args)
+{
+ TeventTimer_Object *ret;
+ struct timeval next_event;
+ struct tevent_timer *timer;
+ PyObject *handler;
+ if (!PyArg_ParseTuple(args, "lO", &next_event, &handler))
+ return NULL;
+
+ timer = tevent_add_timer(self->ev, NULL, next_event, py_timer_handler,
+ handler);
+ if (timer == NULL) {
+ PyErr_SetNone(PyExc_RuntimeError);
+ return NULL;
+ }
+
+ ret = PyObject_New(TeventTimer_Object, &TeventTimer_Type);
+ if (ret == NULL) {
+ PyErr_NoMemory();
+ talloc_free(timer);
+ return NULL;
+ }
+ ret->timer = timer;
+
+ return (PyObject *)ret;
+}
+
+static void py_fd_handler(struct tevent_context *ev,
+ struct tevent_fd *fde,
+ uint16_t flags,
+ void *private_data)
+{
+ PyObject *callback = private_data, *ret;
+
+ ret = PyObject_CallFunction(callback, "i", flags);
+ Py_XDECREF(ret);
+}
+
+static PyObject *py_tevent_context_add_fd(TeventContext_Object *self, PyObject *args)
+{
+ int fd, flags;
+ PyObject *handler;
+ struct tevent_fd *tfd;
+ TeventFd_Object *ret;
+
+ if (!PyArg_ParseTuple(args, "iiO", &fd, &flags, &handler))
+ return NULL;
+
+ tfd = tevent_add_fd(self->ev, NULL, fd, flags, py_fd_handler, handler);
+ if (tfd == NULL) {
+ PyErr_SetNone(PyExc_RuntimeError);
+ return NULL;
+ }
+
+ ret = PyObject_New(TeventFd_Object, &TeventFd_Type);
+ if (ret == NULL) {
+ talloc_free(tfd);
+ return NULL;
+ }
+ ret->fd = tfd;
+
+ return (PyObject *)ret;
+}
+
+#ifdef TEVENT_DEPRECATED
+static PyObject *py_tevent_context_set_allow_nesting(TeventContext_Object *self)
+{
+ tevent_loop_allow_nesting(self->ev);
+ Py_RETURN_NONE;
+}
+#endif
+
+static PyMethodDef py_tevent_context_methods[] = {
+ { "reinitialise", (PyCFunction)py_tevent_context_reinitialise, METH_NOARGS,
+ "S.reinitialise()" },
+ { "wakeup_send", (PyCFunction)py_tevent_context_wakeup_send,
+ METH_VARARGS, "S.wakeup_send(wakeup_time) -> req" },
+ { "loop_wait", (PyCFunction)py_tevent_context_loop_wait,
+ METH_NOARGS, "S.loop_wait()" },
+ { "loop_once", (PyCFunction)py_tevent_context_loop_once,
+ METH_NOARGS, "S.loop_once()" },
+#ifdef TEVENT_DEPRECATED
+ { "loop_until", (PyCFunction)py_tevent_context_loop_until,
+ METH_VARARGS, "S.loop_until(callback)" },
+#endif
+ { "add_signal", (PyCFunction)py_tevent_context_add_signal,
+ METH_VARARGS, "S.add_signal(signum, sa_flags, handler) -> signal" },
+ { "add_timer", (PyCFunction)py_tevent_context_add_timer,
+ METH_VARARGS, "S.add_timer(next_event, handler) -> timer" },
+ { "add_fd", (PyCFunction)py_tevent_context_add_fd,
+ METH_VARARGS, "S.add_fd(fd, flags, handler) -> fd" },
+#ifdef TEVENT_DEPRECATED
+ { "allow_nesting", (PyCFunction)py_tevent_context_set_allow_nesting,
+ METH_NOARGS, "Whether to allow nested tevent loops." },
+#endif
+ { NULL },
+};
+
+static PyObject *py_tevent_req_wakeup_recv(PyObject *self)
+{
+ /* FIXME */
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_req_received(PyObject *self)
+{
+ /* FIXME */
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_req_is_error(PyObject *self)
+{
+ /* FIXME */
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_req_poll(PyObject *self)
+{
+ /* FIXME */
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_req_is_in_progress(PyObject *self)
+{
+ /* FIXME */
+ Py_RETURN_NONE;
+}
+
+static PyGetSetDef py_tevent_req_getsetters[] = {
+ { "in_progress", (getter)py_tevent_req_is_in_progress, NULL,
+ "Whether the request is in progress" },
+ { NULL }
+};
+
+static PyObject *py_tevent_req_post(PyObject *self, PyObject *args)
+{
+ /* FIXME */
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_req_set_error(PyObject *self, PyObject *args)
+{
+ /* FIXME */
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_req_done(PyObject *self)
+{
+ /* FIXME */
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_req_notify_callback(PyObject *self)
+{
+ /* FIXME */
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_req_set_endtime(PyObject *self, PyObject *args)
+{
+ /* FIXME */
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_tevent_req_cancel(TeventReq_Object *self)
+{
+ if (!tevent_req_cancel(self->req)) {
+ PyErr_SetNone(PyExc_RuntimeError);
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+static PyMethodDef py_tevent_req_methods[] = {
+ { "wakeup_recv", (PyCFunction)py_tevent_req_wakeup_recv, METH_NOARGS,
+ "Wakeup received" },
+ { "received", (PyCFunction)py_tevent_req_received, METH_NOARGS,
+ "Receive finished" },
+ { "is_error", (PyCFunction)py_tevent_req_is_error, METH_NOARGS,
+ "is_error() -> (error, state)" },
+ { "poll", (PyCFunction)py_tevent_req_poll, METH_VARARGS,
+ "poll(ctx)" },
+ { "post", (PyCFunction)py_tevent_req_post, METH_VARARGS,
+ "post(ctx) -> req" },
+ { "set_error", (PyCFunction)py_tevent_req_set_error, METH_VARARGS,
+ "set_error(error)" },
+ { "done", (PyCFunction)py_tevent_req_done, METH_NOARGS,
+ "done()" },
+ { "notify_callback", (PyCFunction)py_tevent_req_notify_callback,
+ METH_NOARGS, "notify_callback()" },
+ { "set_endtime", (PyCFunction)py_tevent_req_set_endtime,
+ METH_VARARGS, "set_endtime(ctx, endtime)" },
+ { "cancel", (PyCFunction)py_tevent_req_cancel,
+ METH_NOARGS, "cancel()" },
+ { NULL }
+};
+
+static void py_tevent_req_dealloc(TeventReq_Object *self)
+{
+ talloc_free(self->req);
+ PyObject_DEL(self);
+}
+
+static PyTypeObject TeventReq_Type = {
+ .tp_name = "tevent.Request",
+ .tp_basicsize = sizeof(TeventReq_Object),
+ .tp_methods = py_tevent_req_methods,
+ .tp_dealloc = (destructor)py_tevent_req_dealloc,
+ .tp_getset = py_tevent_req_getsetters,
+ /* FIXME: .tp_new = py_tevent_req_new, */
+};
+
+static PyObject *py_tevent_queue_get_length(TeventQueue_Object *self)
+{
+ return PyInt_FromLong(tevent_queue_length(self->queue));
+}
+
+static PyGetSetDef py_tevent_queue_getsetters[] = {
+ { "length", (getter)py_tevent_queue_get_length,
+ NULL, "The number of elements in the queue." },
+ { NULL },
+};
+
+static void py_tevent_queue_dealloc(TeventQueue_Object *self)
+{
+ talloc_free(self->queue);
+ PyObject_Del(self);
+}
+
+static PyTypeObject TeventQueue_Type = {
+ .tp_name = "tevent.Queue",
+ .tp_basicsize = sizeof(TeventQueue_Object),
+ .tp_dealloc = (destructor)py_tevent_queue_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+ .tp_getset = py_tevent_queue_getsetters,
+ .tp_methods = py_tevent_queue_methods,
+};
+
+static PyObject *py_tevent_context_signal_support(PyObject *_self)
+{
+ TeventContext_Object *self = (TeventContext_Object *)_self;
+ return PyBool_FromLong(tevent_signal_support(self->ev));
+}
+
+static PyGetSetDef py_tevent_context_getsetters[] = {
+ { "signal_support", (getter)py_tevent_context_signal_support,
+ NULL, "if this platform and tevent context support signal handling" },
+ { NULL }
+};
+
+static void py_tevent_context_dealloc(TeventContext_Object *self)
+{
+ talloc_free(self->ev);
+ PyObject_Del(self);
+}
+
+static PyObject *py_tevent_context_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
+{
+ const char * const kwnames[] = { "name", NULL };
+ char *name = NULL;
+ struct tevent_context *ev;
+ TeventContext_Object *ret;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", kwnames, &name))
+ return NULL;
+
+ if (name == NULL) {
+ ev = tevent_context_init(NULL);
+ } else {
+ ev = tevent_context_init_byname(NULL, name);
+ }
+
+ if (ev == NULL) {
+ PyErr_SetNone(PyExc_RuntimeError);
+ return NULL;
+ }
+
+ ret = PyObject_New(TeventContext_Object, type);
+ if (ret == NULL) {
+ PyErr_NoMemory();
+ talloc_free(ev);
+ return NULL;
+ }
+
+ ret->ev = ev;
+ return (PyObject *)ret;
+}
+
+static PyTypeObject TeventContext_Type = {
+ .tp_name = "_tevent.Context",
+ .tp_new = py_tevent_context_new,
+ .tp_basicsize = sizeof(TeventContext_Object),
+ .tp_dealloc = (destructor)py_tevent_context_dealloc,
+ .tp_methods = py_tevent_context_methods,
+ .tp_getset = py_tevent_context_getsetters,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+};
+
+static PyObject *py_set_default_backend(PyObject *self, PyObject *args)
+{
+ char *backend_name;
+ if (!PyArg_ParseTuple(args, "s", &backend_name))
+ return NULL;
+
+ tevent_set_default_backend(backend_name);
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_backend_list(PyObject *self)
+{
+ PyObject *ret;
+ int i;
+ const char **backends;
+
+ ret = PyList_New(0);
+ if (ret == NULL) {
+ return NULL;
+ }
+
+ backends = tevent_backend_list(NULL);
+ if (backends == NULL) {
+ PyErr_SetNone(PyExc_RuntimeError);
+ Py_DECREF(ret);
+ return NULL;
+ }
+ for (i = 0; backends[i]; i++) {
+ PyList_Append(ret, PyString_FromString(backends[i]));
+ }
+
+ talloc_free(backends);
+
+ return ret;
+}
+
+static PyMethodDef tevent_methods[] = {
+ { "register_backend", (PyCFunction)py_register_backend, METH_VARARGS,
+ "register_backend(backend)" },
+ { "set_default_backend", (PyCFunction)py_set_default_backend,
+ METH_VARARGS, "set_default_backend(backend)" },
+ { "backend_list", (PyCFunction)py_backend_list,
+ METH_NOARGS, "backend_list() -> list" },
+ { NULL },
+};
+
+void init_tevent(void)
+{
+ PyObject *m;
+
+ if (PyType_Ready(&TeventContext_Type) < 0)
+ return;
+
+ if (PyType_Ready(&TeventQueue_Type) < 0)
+ return;
+
+ if (PyType_Ready(&TeventReq_Type) < 0)
+ return;
+
+ if (PyType_Ready(&TeventSignal_Type) < 0)
+ return;
+
+ if (PyType_Ready(&TeventTimer_Type) < 0)
+ return;
+
+ if (PyType_Ready(&TeventFd_Type) < 0)
+ return;
+
+ m = Py_InitModule3("_tevent", tevent_methods, "Tevent integration for twisted.");
+ if (m == NULL)
+ return;
+
+ Py_INCREF(&TeventContext_Type);
+ PyModule_AddObject(m, "Context", (PyObject *)&TeventContext_Type);
+
+ Py_INCREF(&TeventQueue_Type);
+ PyModule_AddObject(m, "Queue", (PyObject *)&TeventQueue_Type);
+
+ Py_INCREF(&TeventReq_Type);
+ PyModule_AddObject(m, "Request", (PyObject *)&TeventReq_Type);
+
+ Py_INCREF(&TeventSignal_Type);
+ PyModule_AddObject(m, "Signal", (PyObject *)&TeventSignal_Type);
+
+ Py_INCREF(&TeventTimer_Type);
+ PyModule_AddObject(m, "Timer", (PyObject *)&TeventTimer_Type);
+
+ Py_INCREF(&TeventFd_Type);
+ PyModule_AddObject(m, "Fd", (PyObject *)&TeventFd_Type);
+}
diff --git a/lib/tevent/rules.mk b/lib/tevent/rules.mk
deleted file mode 100644
index c197e930a3..0000000000
--- a/lib/tevent/rules.mk
+++ /dev/null
@@ -1,18 +0,0 @@
-.SUFFIXES: .i _wrap.c
-
-showflags::
- @echo 'libtevent will be compiled with flags:'
- @echo ' CFLAGS = $(CFLAGS)'
- @echo ' CPPFLAGS = $(CPPFLAGS)'
- @echo ' LDFLAGS = $(LDFLAGS)'
- @echo ' LIBS = $(LIBS)'
-
-.SUFFIXES: .c .o
-
-.c.o:
- @echo Compiling $*.c
- @mkdir -p `dirname $@`
- @$(CC) $(PICFLAG) $(CFLAGS) $(ABI_CHECK) -c $< -o $@
-
-distclean::
- rm -f *~ */*~
diff --git a/lib/tevent/samba.m4 b/lib/tevent/samba.m4
deleted file mode 100644
index 549f39d210..0000000000
--- a/lib/tevent/samba.m4
+++ /dev/null
@@ -1,11 +0,0 @@
-
-teventdir="\$(libteventsrcdir)"
-m4_include(../lib/tevent/libtevent.m4)
-
-SMB_EXT_LIB(LIBTEVENT_EXT, [${TEVENT_LIBS}])
-SMB_ENABLE(LIBTEVENT_EXT)
-
-SMB_SUBSYSTEM(LIBTEVENT,
- [\$(addprefix \$(libteventsrcdir)/, ${TEVENT_OBJ})],
- [LIBTEVENT_EXT],
- [${TEVENT_CFLAGS}])
diff --git a/lib/tevent/testsuite.c b/lib/tevent/testsuite.c
index f9aca91aa1..991e161733 100644
--- a/lib/tevent/testsuite.c
+++ b/lib/tevent/testsuite.c
@@ -146,7 +146,7 @@ static bool test_event_context(struct torture_context *test,
struct torture_suite *torture_local_event(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "EVENT");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "event");
const char **list = event_backend_list(suite);
int i;
diff --git a/lib/tevent/tevent.c b/lib/tevent/tevent.c
index a0ee208663..87e5aff034 100644
--- a/lib/tevent/tevent.c
+++ b/lib/tevent/tevent.c
@@ -88,7 +88,7 @@ bool tevent_register_backend(const char *name, const struct tevent_ops *ops)
}
}
- e = talloc(talloc_autofree_context(), struct tevent_ops_list);
+ e = talloc(NULL, struct tevent_ops_list);
if (e == NULL) return false;
e->name = name;
@@ -104,8 +104,7 @@ bool tevent_register_backend(const char *name, const struct tevent_ops *ops)
void tevent_set_default_backend(const char *backend)
{
talloc_free(tevent_default_backend);
- tevent_default_backend = talloc_strdup(talloc_autofree_context(),
- backend);
+ tevent_default_backend = talloc_strdup(NULL, backend);
}
/*
@@ -114,6 +113,7 @@ void tevent_set_default_backend(const char *backend)
static void tevent_backend_init(void)
{
tevent_select_init();
+ tevent_poll_init();
tevent_standard_init();
#ifdef HAVE_EPOLL
tevent_epoll_init();
@@ -262,9 +262,6 @@ struct tevent_context *tevent_context_init(TALLOC_CTX *mem_ctx)
/*
add a fd based event
return NULL on failure (memory allocation error)
-
- if flags contains TEVENT_FD_AUTOCLOSE then the fd will be closed when
- the returned fd_event context is freed
*/
struct tevent_fd *_tevent_add_fd(struct tevent_context *ev,
TALLOC_CTX *mem_ctx,
@@ -616,3 +613,18 @@ int _tevent_loop_wait(struct tevent_context *ev, const char *location)
{
return ev->ops->loop_wait(ev, location);
}
+
+
+/*
+ re-initialise a tevent context. This leaves you with the same
+ event context, but all events are wiped and the structure is
+ re-initialised. This is most useful after a fork()
+
+ zero is returned on success, non-zero on failure
+*/
+int tevent_re_initialise(struct tevent_context *ev)
+{
+ tevent_common_context_destructor(ev);
+
+ return ev->ops->context_init(ev);
+}
diff --git a/lib/tevent/tevent.exports b/lib/tevent/tevent.exports
deleted file mode 100644
index 01d547ad85..0000000000
--- a/lib/tevent/tevent.exports
+++ /dev/null
@@ -1,62 +0,0 @@
-{
- global:
- _tevent_add_fd;
- _tevent_add_signal;
- _tevent_add_timer;
- tevent_backend_list;
- tevent_context_init;
- tevent_context_init_byname;
- _tevent_create_immediate;
- tevent_fd_get_flags;
- tevent_fd_set_auto_close;
- tevent_fd_set_close_fn;
- tevent_fd_set_flags;
- tevent_loop_allow_nesting;
- _tevent_loop_once;
- tevent_loop_set_nesting_hook;
- _tevent_loop_until;
- _tevent_loop_wait;
- tevent_queue_add;
- _tevent_queue_create;
- tevent_queue_length;
- tevent_queue_start;
- tevent_queue_stop;
- tevent_register_backend;
- _tevent_req_callback_data;
- _tevent_req_create;
- _tevent_req_data;
- tevent_req_default_print;
- _tevent_req_done;
- _tevent_req_error;
- tevent_req_is_error;
- tevent_req_is_in_progress;
- _tevent_req_nomem;
- _tevent_req_notify_callback;
- tevent_req_poll;
- tevent_req_post;
- tevent_req_print;
- tevent_req_received;
- tevent_req_set_callback;
- tevent_req_set_endtime;
- tevent_req_set_print_fn;
- _tevent_schedule_immediate;
- tevent_set_abort_fn;
- tevent_set_debug;
- tevent_set_debug_stderr;
- tevent_set_default_backend;
- tevent_signal_support;
- tevent_timeval_add;
- tevent_timeval_compare;
- tevent_timeval_current;
- tevent_timeval_current_ofs;
- tevent_timeval_is_zero;
- tevent_timeval_set;
- tevent_timeval_until;
- tevent_timeval_zero;
- tevent_wakeup_recv;
- tevent_wakeup_send;
- _tevent_req_cancel;
- tevent_req_set_cancel_fn;
-
- local: *;
-};
diff --git a/lib/tevent/tevent.h b/lib/tevent/tevent.h
index d3556053ac..665c491ebb 100644
--- a/lib/tevent/tevent.h
+++ b/lib/tevent/tevent.h
@@ -40,22 +40,54 @@ struct tevent_timer;
struct tevent_immediate;
struct tevent_signal;
+/**
+ * @defgroup tevent The tevent API
+ *
+ * The tevent low-level API
+ *
+ * This API provides the public interface to manage events in the tevent
+ * mainloop. Functions are provided for managing low-level events such
+ * as timer events, fd events and signal handling.
+ *
+ * @{
+ */
+
/* event handler types */
+/**
+ * Called when a file descriptor monitored by tevent has
+ * data to be read or written on it.
+ */
typedef void (*tevent_fd_handler_t)(struct tevent_context *ev,
struct tevent_fd *fde,
uint16_t flags,
void *private_data);
+
+/**
+ * Called when tevent is ceasing the monitoring of a file descriptor.
+ */
typedef void (*tevent_fd_close_fn_t)(struct tevent_context *ev,
struct tevent_fd *fde,
int fd,
void *private_data);
+
+/**
+ * Called when a tevent timer has fired.
+ */
typedef void (*tevent_timer_handler_t)(struct tevent_context *ev,
struct tevent_timer *te,
struct timeval current_time,
void *private_data);
+
+/**
+ * Called when a tevent immediate event is invoked.
+ */
typedef void (*tevent_immediate_handler_t)(struct tevent_context *ctx,
struct tevent_immediate *im,
void *private_data);
+
+/**
+ * Called after tevent detects the specified signal.
+ */
typedef void (*tevent_signal_handler_t)(struct tevent_context *ev,
struct tevent_signal *se,
int signum,
@@ -63,11 +95,81 @@ typedef void (*tevent_signal_handler_t)(struct tevent_context *ev,
void *siginfo,
void *private_data);
+/**
+ * @brief Create a event_context structure.
+ *
+ * This must be the first events call, and all subsequent calls pass this
+ * event_context as the first element. Event handlers also receive this as
+ * their first argument.
+ *
+ * @param[in] mem_ctx The memory context to use.
+ *
+ * @return An allocated tevent context, NULL on error.
+ *
+ * @see tevent_context_init()
+ */
struct tevent_context *tevent_context_init(TALLOC_CTX *mem_ctx);
+
+/**
+ * @brief Create a event_context structure and name it.
+ *
+ * This must be the first events call, and all subsequent calls pass this
+ * event_context as the first element. Event handlers also receive this as
+ * their first argument.
+ *
+ * @param[in] mem_ctx The memory context to use.
+ *
+ * @param[in] name The name for the tevent context.
+ *
+ * @return An allocated tevent context, NULL on error.
+ */
struct tevent_context *tevent_context_init_byname(TALLOC_CTX *mem_ctx, const char *name);
+
+/**
+ * @brief List available backends.
+ *
+ * @param[in] mem_ctx The memory context to use.
+ *
+ * @return A string vector with a terminating NULL element, NULL
+ * on error.
+ */
const char **tevent_backend_list(TALLOC_CTX *mem_ctx);
+
+/**
+ * @brief Set the default tevent backent.
+ *
+ * @param[in] backend The name of the backend to set.
+ */
void tevent_set_default_backend(const char *backend);
+#ifdef DOXYGEN
+/**
+ * @brief Add a file descriptor based event.
+ *
+ * @param[in] ev The event context to work on.
+ *
+ * @param[in] mem_ctx The talloc memory context to use.
+ *
+ * @param[in] fd The file descriptor to base the event on.
+ *
+ * @param[in] flags #TEVENT_FD_READ or #TEVENT_FD_WRITE
+ *
+ * @param[in] handler The callback handler for the event.
+ *
+ * @param[in] private_data The private data passed to the callback handler.
+ *
+ * @return The file descriptor based event, NULL on error.
+ *
+ * @note To cancel the monitoring of a file descriptor, call talloc_free()
+ * on the object returned by this function.
+ */
+struct tevent_fd *tevent_add_fd(struct tevent_context *ev,
+ TALLOC_CTX *mem_ctx,
+ int fd,
+ uint16_t flags,
+ tevent_fd_handler_t handler,
+ void *private_data);
+#else
struct tevent_fd *_tevent_add_fd(struct tevent_context *ev,
TALLOC_CTX *mem_ctx,
int fd,
@@ -79,7 +181,44 @@ struct tevent_fd *_tevent_add_fd(struct tevent_context *ev,
#define tevent_add_fd(ev, mem_ctx, fd, flags, handler, private_data) \
_tevent_add_fd(ev, mem_ctx, fd, flags, handler, private_data, \
#handler, __location__)
+#endif
+#ifdef DOXYGEN
+/**
+ * @brief Add a timed event
+ *
+ * @param[in] ev The event context to work on.
+ *
+ * @param[in] mem_ctx The talloc memory context to use.
+ *
+ * @param[in] next_event Timeval specifying the absolute time to fire this
+ * event. This is not an offset.
+ *
+ * @param[in] handler The callback handler for the event.
+ *
+ * @param[in] private_data The private data passed to the callback handler.
+ *
+ * @return The newly-created timer event, or NULL on error.
+ *
+ * @note To cancel a timer event before it fires, call talloc_free() on the
+ * event returned from this function. This event is automatically
+ * talloc_free()-ed after its event handler files, if it hasn't been freed yet.
+ *
+ * @note Unlike some mainloops, tevent timers are one-time events. To set up
+ * a recurring event, it is necessary to call tevent_add_timer() again during
+ * the handler processing.
+ *
+ * @note Due to the internal mainloop processing, a timer set to run
+ * immediately will do so after any other pending timers fire, but before
+ * any further file descriptor or signal handling events fire. Callers should
+ * not rely on this behavior!
+ */
+struct tevent_timer *tevent_add_timer(struct tevent_context *ev,
+ TALLOC_CTX *mem_ctx,
+ struct timeval next_event,
+ tevent_timer_handler_t handler,
+ void *private_data);
+#else
struct tevent_timer *_tevent_add_timer(struct tevent_context *ev,
TALLOC_CTX *mem_ctx,
struct timeval next_event,
@@ -90,12 +229,47 @@ struct tevent_timer *_tevent_add_timer(struct tevent_context *ev,
#define tevent_add_timer(ev, mem_ctx, next_event, handler, private_data) \
_tevent_add_timer(ev, mem_ctx, next_event, handler, private_data, \
#handler, __location__)
+#endif
+#ifdef DOXYGEN
+/**
+ * Initialize an immediate event object
+ *
+ * This object can be used to trigger an event to occur immediately after
+ * returning from the current event (before any other event occurs)
+ *
+ * @param[in] mem_ctx The talloc memory context to use as the parent
+ *
+ * @return An empty tevent_immediate object. Use tevent_schedule_immediate
+ * to populate and use it.
+ *
+ * @note Available as of tevent 0.9.8
+ */
+struct tevent_immediate *tevent_create_immediate(TALLOC_CTX *mem_ctx);
+#else
struct tevent_immediate *_tevent_create_immediate(TALLOC_CTX *mem_ctx,
const char *location);
#define tevent_create_immediate(mem_ctx) \
_tevent_create_immediate(mem_ctx, __location__)
+#endif
+#ifdef DOXYGEN
+
+/**
+ * Schedule an event for immediate execution. This event will occur
+ * immediately after returning from the current event (before any other
+ * event occurs)
+ *
+ * @param[in] im The tevent_immediate object to populate and use
+ * @param[in] ctx The tevent_context to run this event
+ * @param[in] handler The event handler to run when this event fires
+ * @param[in] private_data Data to pass to the event handler
+ */
+void tevent_schedule_immediate(struct tevent_immediate *im,
+ struct tevent_context *ctx,
+ tevent_immediate_handler_t handler,
+ void *private_data);
+#else
void _tevent_schedule_immediate(struct tevent_immediate *im,
struct tevent_context *ctx,
tevent_immediate_handler_t handler,
@@ -105,7 +279,40 @@ void _tevent_schedule_immediate(struct tevent_immediate *im,
#define tevent_schedule_immediate(im, ctx, handler, private_data) \
_tevent_schedule_immediate(im, ctx, handler, private_data, \
#handler, __location__);
+#endif
+#ifdef DOXYGEN
+/**
+ * @brief Add a tevent signal handler
+ *
+ * tevent_add_signal() creates a new event for handling a signal the next
+ * time through the mainloop. It implements a very simple traditional signal
+ * handler whose only purpose is to add the handler event into the mainloop.
+ *
+ * @param[in] ev The event context to work on.
+ *
+ * @param[in] mem_ctx The talloc memory context to use.
+ *
+ * @param[in] signum The signal to trap
+ *
+ * @param[in] handler The callback handler for the signal.
+ *
+ * @param[in] sa_flags sigaction flags for this signal handler.
+ *
+ * @param[in] private_data The private data passed to the callback handler.
+ *
+ * @return The newly-created signal handler event, or NULL on error.
+ *
+ * @note To cancel a signal handler, call talloc_free() on the event returned
+ * from this function.
+ */
+struct tevent_signal *tevent_add_signal(struct tevent_context *ev,
+ TALLOC_CTX *mem_ctx,
+ int signum,
+ int sa_flags,
+ tevent_signal_handler_t handler,
+ void *private_data);
+#else
struct tevent_signal *_tevent_add_signal(struct tevent_context *ev,
TALLOC_CTX *mem_ctx,
int signum,
@@ -117,40 +324,134 @@ struct tevent_signal *_tevent_add_signal(struct tevent_context *ev,
#define tevent_add_signal(ev, mem_ctx, signum, sa_flags, handler, private_data) \
_tevent_add_signal(ev, mem_ctx, signum, sa_flags, handler, private_data, \
#handler, __location__)
+#endif
+#ifdef DOXYGEN
+/**
+ * @brief Pass a single time through the mainloop
+ *
+ * This will process any appropriate signal, immediate, fd and timer events
+ *
+ * @param[in] ev The event context to process
+ *
+ * @return Zero on success, nonzero if an internal error occurred
+ */
+int tevent_loop_once(struct tevent_context *ev);
+#else
int _tevent_loop_once(struct tevent_context *ev, const char *location);
#define tevent_loop_once(ev) \
- _tevent_loop_once(ev, __location__) \
+ _tevent_loop_once(ev, __location__)
+#endif
+#ifdef DOXYGEN
+/**
+ * @brief Run the mainloop
+ *
+ * The mainloop will run until there are no events remaining to be processed
+ *
+ * @param[in] ev The event context to process
+ *
+ * @return Zero if all events have been processed. Nonzero if an internal
+ * error occurred.
+ */
+int tevent_loop_wait(struct tevent_context *ev);
+#else
int _tevent_loop_wait(struct tevent_context *ev, const char *location);
#define tevent_loop_wait(ev) \
- _tevent_loop_wait(ev, __location__) \
+ _tevent_loop_wait(ev, __location__)
+#endif
+
+/**
+ * Assign a function to run when a tevent_fd is freed
+ *
+ * This function is a destructor for the tevent_fd. It does not automatically
+ * close the file descriptor. If this is the desired behavior, then it must be
+ * performed by the close_fn.
+ *
+ * @param[in] fde File descriptor event on which to set the destructor
+ * @param[in] close_fn Destructor to execute when fde is freed
+ */
void tevent_fd_set_close_fn(struct tevent_fd *fde,
tevent_fd_close_fn_t close_fn);
+
+/**
+ * Automatically close the file descriptor when the tevent_fd is freed
+ *
+ * This function calls close(fd) internally.
+ *
+ * @param[in] fde File descriptor event to auto-close
+ */
void tevent_fd_set_auto_close(struct tevent_fd *fde);
+
+/**
+ * Return the flags set on this file descriptor event
+ *
+ * @param[in] fde File descriptor event to query
+ *
+ * @return The flags set on the event. See #TEVENT_FD_READ and
+ * #TEVENT_FD_WRITE
+ */
uint16_t tevent_fd_get_flags(struct tevent_fd *fde);
+
+/**
+ * Set flags on a file descriptor event
+ *
+ * @param[in] fde File descriptor event to set
+ * @param[in] flags Flags to set on the event. See #TEVENT_FD_READ and
+ * #TEVENT_FD_WRITE
+ */
void tevent_fd_set_flags(struct tevent_fd *fde, uint16_t flags);
+/**
+ * Query whether tevent supports signal handling
+ *
+ * @param[in] ev An initialized tevent context
+ *
+ * @return True if this platform and tevent context support signal handling
+ */
bool tevent_signal_support(struct tevent_context *ev);
void tevent_set_abort_fn(void (*abort_fn)(const char *reason));
/* bits for file descriptor event flags */
+
+/**
+ * Monitor a file descriptor for write availability
+ */
#define TEVENT_FD_READ 1
+/**
+ * Monitor a file descriptor for data to be read
+ */
#define TEVENT_FD_WRITE 2
+/**
+ * Convenience function for declaring a tevent_fd writable
+ */
#define TEVENT_FD_WRITEABLE(fde) \
tevent_fd_set_flags(fde, tevent_fd_get_flags(fde) | TEVENT_FD_WRITE)
+
+/**
+ * Convenience function for declaring a tevent_fd readable
+ */
#define TEVENT_FD_READABLE(fde) \
tevent_fd_set_flags(fde, tevent_fd_get_flags(fde) | TEVENT_FD_READ)
+/**
+ * Convenience function for declaring a tevent_fd non-writable
+ */
#define TEVENT_FD_NOT_WRITEABLE(fde) \
tevent_fd_set_flags(fde, tevent_fd_get_flags(fde) & ~TEVENT_FD_WRITE)
+
+/**
+ * Convenience function for declaring a tevent_fd non-readable
+ */
#define TEVENT_FD_NOT_READABLE(fde) \
tevent_fd_set_flags(fde, tevent_fd_get_flags(fde) & ~TEVENT_FD_READ)
-/* DEBUG */
+/**
+ * Debug level of tevent
+ */
enum tevent_debug_level {
TEVENT_DEBUG_FATAL,
TEVENT_DEBUG_ERROR,
@@ -158,94 +459,414 @@ enum tevent_debug_level {
TEVENT_DEBUG_TRACE
};
+/**
+ * @brief The tevent debug callbac.
+ *
+ * @param[in] context The memory context to use.
+ *
+ * @param[in] level The debug level.
+ *
+ * @param[in] fmt The format string.
+ *
+ * @param[in] ap The arguments for the format string.
+ */
+typedef void (*tevent_debug_fn)(void *context,
+ enum tevent_debug_level level,
+ const char *fmt,
+ va_list ap) PRINTF_ATTRIBUTE(3,0);
+
+/**
+ * Set destination for tevent debug messages
+ *
+ * @param[in] ev Event context to debug
+ * @param[in] debug Function to handle output printing
+ * @param[in] context The context to pass to the debug function.
+ *
+ * @return Always returns 0 as of version 0.9.8
+ *
+ * @note Default is to emit no debug messages
+ */
int tevent_set_debug(struct tevent_context *ev,
- void (*debug)(void *context,
- enum tevent_debug_level level,
- const char *fmt,
- va_list ap) PRINTF_ATTRIBUTE(3,0),
+ tevent_debug_fn debug,
void *context);
+
+/**
+ * Designate stderr for debug message output
+ *
+ * @param[in] ev Event context to debug
+ *
+ * @note This function will only output TEVENT_DEBUG_FATAL, TEVENT_DEBUG_ERROR
+ * and TEVENT_DEBUG_WARNING messages. For TEVENT_DEBUG_TRACE, please define a
+ * function for tevent_set_debug()
+ */
int tevent_set_debug_stderr(struct tevent_context *ev);
/**
- * An async request moves between the following 4 states:
+ * @}
+ */
+
+/**
+ * @defgroup tevent_request The tevent request functions.
+ * @ingroup tevent
+ *
+ * A tevent_req represents an asynchronous computation.
+ *
+ * The tevent_req group of API calls is the recommended way of
+ * programming async computations within tevent. In particular the
+ * file descriptor (tevent_add_fd) and timer (tevent_add_timed) events
+ * are considered too low-level to be used in larger computations. To
+ * read and write from and to sockets, Samba provides two calls on top
+ * of tevent_add_fd: read_packet_send/recv and writev_send/recv. These
+ * requests are much easier to compose than the low-level event
+ * handlers called from tevent_add_fd.
+ *
+ * A lot of the simplicity tevent_req has brought to the notoriously
+ * hairy async programming came via a set of conventions that every
+ * async computation programmed should follow. One central piece of
+ * these conventions is the naming of routines and variables.
+ *
+ * Every async computation needs a name (sensibly called "computation"
+ * down from here). From this name quite a few naming conventions are
+ * derived.
+ *
+ * Every computation that requires local state needs a
+ * @code
+ * struct computation_state {
+ * int local_var;
+ * };
+ * @endcode
+ * Even if no local variables are required, such a state struct should
+ * be created containing a dummy variable. Quite a few helper
+ * functions and macros (for example tevent_req_create()) assume such
+ * a state struct.
+ *
+ * An async computation is started by a computation_send
+ * function. When it is finished, its result can be received by a
+ * computation_recv function. For an example how to set up an async
+ * computation, see the code example in the documentation for
+ * tevent_req_create() and tevent_req_post(). The prototypes for _send
+ * and _recv functions should follow some conventions:
+ *
+ * @code
+ * struct tevent_req *computation_send(TALLOC_CTX *mem_ctx,
+ * struct tevent_req *ev,
+ * ... further args);
+ * int computation_recv(struct tevent_req *req, ... further output args);
+ * @endcode
+ *
+ * The "int" result of computation_recv() depends on the result the
+ * sync version of the function would have, "int" is just an example
+ * here.
+ *
+ * Another important piece of the conventions is that the program flow
+ * is interrupted as little as possible. Because a blocking
+ * sub-computation requires that the flow needs to continue in a
+ * separate function that is the logical sequel of some computation,
+ * it should lexically follow sending off the blocking
+ * sub-computation. Setting the callback function via
+ * tevent_req_set_callback() requires referencing a function lexically
+ * below the call to tevent_req_set_callback(), forward declarations
+ * are required. A lot of the async computations thus begin with a
+ * sequence of declarations such as
+ *
+ * @code
+ * static void computation_step1_done(struct tevent_req *subreq);
+ * static void computation_step2_done(struct tevent_req *subreq);
+ * static void computation_step3_done(struct tevent_req *subreq);
+ * @endcode
+ *
+ * It really helps readability a lot to do these forward declarations,
+ * because the lexically sequential program flow makes the async
+ * computations almost as clear to read as a normal, sync program
+ * flow.
+ *
+ * It is up to the user of the async computation to talloc_free it
+ * after it has finished. If an async computation should be aborted,
+ * the tevent_req structure can be talloc_free'ed. After it has
+ * finished, it should talloc_free'ed by the API user.
+ *
+ * @{
+ */
+
+/**
+ * An async request moves from TEVENT_REQ_INIT to
+ * TEVENT_REQ_IN_PROGRESS. All other states are valid after a request
+ * has finished.
*/
enum tevent_req_state {
/**
- * we are creating the request
+ * We are creating the request
*/
TEVENT_REQ_INIT,
/**
- * we are waiting the request to complete
+ * We are waiting the request to complete
*/
TEVENT_REQ_IN_PROGRESS,
/**
- * the request is finished
+ * The request is finished successfully
*/
TEVENT_REQ_DONE,
/**
- * A user error has occured
+ * A user error has occurred. The user error has been
+ * indicated by tevent_req_error(), it can be retrieved via
+ * tevent_req_is_error().
*/
TEVENT_REQ_USER_ERROR,
/**
- * Request timed out
+ * Request timed out after the timeout set by tevent_req_set_endtime.
*/
TEVENT_REQ_TIMED_OUT,
/**
- * No memory in between
+ * An internal allocation has failed, or tevent_req_nomem has
+ * been given a NULL pointer as the first argument.
*/
TEVENT_REQ_NO_MEMORY,
/**
- * the request is already received by the caller
+ * The request has been received by the caller. No further
+ * action is valid.
*/
TEVENT_REQ_RECEIVED
};
/**
* @brief An async request
- *
- * This represents an async request being processed by callbacks via an event
- * context. A user can issue for example a write request to a socket, giving
- * an implementation function the fd, the buffer and the number of bytes to
- * transfer. The function issuing the request will immediately return without
- * blocking most likely without having sent anything. The API user then fills
- * in req->async.fn and req->async.private_data, functions that are called
- * when the request is finished.
- *
- * It is up to the user of the async request to talloc_free it after it has
- * finished. This can happen while the completion function is called.
*/
-
struct tevent_req;
-typedef void (*tevent_req_fn)(struct tevent_req *);
+/**
+ * @brief A tevent request callback function.
+ *
+ * @param[in] req The tevent async request which executed this callback.
+ */
+typedef void (*tevent_req_fn)(struct tevent_req *req);
+/**
+ * @brief Set an async request callback.
+ *
+ * See the documentation of tevent_req_post() for an example how this
+ * is supposed to be used.
+ *
+ * @param[in] req The async request to set the callback.
+ *
+ * @param[in] fn The callback function to set.
+ *
+ * @param[in] pvt A pointer to private data to pass to the async request
+ * callback.
+ */
void tevent_req_set_callback(struct tevent_req *req, tevent_req_fn fn, void *pvt);
-void *_tevent_req_callback_data(struct tevent_req *req);
-void *_tevent_req_data(struct tevent_req *req);
+#ifdef DOXYGEN
+/**
+ * @brief Get the private data cast to the given type for a callback from
+ * a tevent request structure.
+ *
+ * @code
+ * static void computation_done(struct tevent_req *subreq) {
+ * struct tevent_req *req = tevent_req_callback_data(subreq, struct tevent_req);
+ * struct computation_state *state = tevent_req_data(req, struct computation_state);
+ * .... more things, eventually maybe call tevent_req_done(req);
+ * }
+ * @endcode
+ *
+ * @param[in] req The structure to get the callback data from.
+ *
+ * @param[in] type The type of the private callback data to get.
+ *
+ * @return The type casted private data set NULL if not set.
+ */
+void *tevent_req_callback_data(struct tevent_req *req, #type);
+#else
+void *_tevent_req_callback_data(struct tevent_req *req);
#define tevent_req_callback_data(_req, _type) \
talloc_get_type_abort(_tevent_req_callback_data(_req), _type)
+#endif
+
+#ifdef DOXYGEN
+/**
+ * @brief Get the private data for a callback from a tevent request structure.
+ *
+ * @param[in] req The structure to get the callback data from.
+ *
+ * @param[in] req The structure to get the data from.
+ *
+ * @return The private data or NULL if not set.
+ */
+void *tevent_req_callback_data_void(struct tevent_req *req);
+#else
#define tevent_req_callback_data_void(_req) \
_tevent_req_callback_data(_req)
+#endif
+
+#ifdef DOXYGEN
+/**
+ * @brief Get the private data from a tevent request structure.
+ *
+ * When the tevent_req has been created by tevent_req_create, the
+ * result of tevent_req_data() is the state variable created by
+ * tevent_req_create() as a child of the req.
+ *
+ * @param[in] req The structure to get the private data from.
+ *
+ * @param[in] type The type of the private data
+ *
+ * @return The private data or NULL if not set.
+ */
+void *tevent_req_data(struct tevent_req *req, #type);
+#else
+void *_tevent_req_data(struct tevent_req *req);
#define tevent_req_data(_req, _type) \
talloc_get_type_abort(_tevent_req_data(_req), _type)
+#endif
-typedef char *(*tevent_req_print_fn)(struct tevent_req *, TALLOC_CTX *);
+/**
+ * @brief The print function which can be set for a tevent async request.
+ *
+ * @param[in] req The tevent async request.
+ *
+ * @param[in] ctx A talloc memory context which can be uses to allocate
+ * memory.
+ *
+ * @return An allocated string buffer to print.
+ *
+ * Example:
+ * @code
+ * static char *my_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
+ * {
+ * struct my_data *data = tevent_req_data(req, struct my_data);
+ * char *result;
+ *
+ * result = tevent_req_default_print(mem_ctx, req);
+ * if (result == NULL) {
+ * return NULL;
+ * }
+ *
+ * return talloc_asprintf_append_buffer(result, "foo=%d, bar=%d",
+ * data->foo, data->bar);
+ * }
+ * @endcode
+ */
+typedef char *(*tevent_req_print_fn)(struct tevent_req *req, TALLOC_CTX *ctx);
+/**
+ * @brief This function sets a print function for the given request.
+ *
+ * This function can be used to setup a print function for the given request.
+ * This will be triggered if the tevent_req_print() function was
+ * called on the given request.
+ *
+ * @param[in] req The request to use.
+ *
+ * @param[in] fn A pointer to the print function
+ *
+ * @note This function should only be used for debugging.
+ */
void tevent_req_set_print_fn(struct tevent_req *req, tevent_req_print_fn fn);
+/**
+ * @brief The default print function for creating debug messages.
+ *
+ * The function should not be used by users of the async API,
+ * but custom print function can use it and append custom text
+ * to the string.
+ *
+ * @param[in] req The request to be printed.
+ *
+ * @param[in] mem_ctx The memory context for the result.
+ *
+ * @return Text representation of request.
+ *
+ */
char *tevent_req_default_print(struct tevent_req *req, TALLOC_CTX *mem_ctx);
+/**
+ * @brief Print an tevent_req structure in debug messages.
+ *
+ * This function should be used by callers of the async API.
+ *
+ * @param[in] mem_ctx The memory context for the result.
+ *
+ * @param[in] req The request to be printed.
+ *
+ * @return Text representation of request.
+ */
char *tevent_req_print(TALLOC_CTX *mem_ctx, struct tevent_req *req);
-typedef bool (*tevent_req_cancel_fn)(struct tevent_req *);
+/**
+ * @brief A typedef for a cancel function for a tevent request.
+ *
+ * @param[in] req The tevent request calling this function.
+ *
+ * @return True if the request could be canceled, false if not.
+ */
+typedef bool (*tevent_req_cancel_fn)(struct tevent_req *req);
+/**
+ * @brief This function sets a cancel function for the given tevent request.
+ *
+ * This function can be used to setup a cancel function for the given request.
+ * This will be triggered if the tevent_req_cancel() function was
+ * called on the given request.
+ *
+ * @param[in] req The request to use.
+ *
+ * @param[in] fn A pointer to the cancel function.
+ */
void tevent_req_set_cancel_fn(struct tevent_req *req, tevent_req_cancel_fn fn);
+#ifdef DOXYGEN
+/**
+ * @brief Try to cancel the given tevent request.
+ *
+ * This function can be used to cancel the given request.
+ *
+ * It is only possible to cancel a request when the implementation
+ * has registered a cancel function via the tevent_req_set_cancel_fn().
+ *
+ * @param[in] req The request to use.
+ *
+ * @return This function returns true is the request is cancelable,
+ * othererwise false is returned.
+ *
+ * @note Even if the function returns true, the caller need to wait
+ * for the function to complete normally.
+ * Only the _recv() function of the given request indicates
+ * if the request was really canceled.
+ */
+bool tevent_req_cancel(struct tevent_req *req);
+#else
bool _tevent_req_cancel(struct tevent_req *req, const char *location);
#define tevent_req_cancel(req) \
_tevent_req_cancel(req, __location__)
+#endif
+#ifdef DOXYGEN
+/**
+ * @brief Create an async tevent request.
+ *
+ * The new async request will be initialized in state TEVENT_REQ_IN_PROGRESS.
+ *
+ * @code
+ * struct tevent_req *req;
+ * struct computation_state *state;
+ * req = tevent_req_create(mem_ctx, &state, struct computation_state);
+ * @endcode
+ *
+ * Tevent_req_create() creates the state variable as a talloc child of
+ * its result. The state variable should be used as the talloc parent
+ * for all temporary variables that are allocated during the async
+ * computation. This way, when the user of the async computation frees
+ * the request, the state as a talloc child will be free'd along with
+ * all the temporary variables hanging off the state.
+ *
+ * @param[in] mem_ctx The memory context for the result.
+ * @param[in] pstate Pointer to the private request state.
+ * @param[in] type The name of the request.
+ *
+ * @return A new async request. NULL on error.
+ */
+struct tevent_req *tevent_req_create(TALLOC_CTX *mem_ctx,
+ void **pstate, #type);
+#else
struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx,
void *pstate,
size_t state_size,
@@ -255,89 +876,507 @@ struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx,
#define tevent_req_create(_mem_ctx, _pstate, _type) \
_tevent_req_create((_mem_ctx), (_pstate), sizeof(_type), \
#_type, __location__)
+#endif
+/**
+ * @brief Set a timeout for an async request.
+ *
+ * @param[in] req The request to set the timeout for.
+ *
+ * @param[in] ev The event context to use for the timer.
+ *
+ * @param[in] endtime The endtime of the request.
+ *
+ * @return True if succeeded, false if not.
+ */
bool tevent_req_set_endtime(struct tevent_req *req,
struct tevent_context *ev,
struct timeval endtime);
+#ifdef DOXYGEN
+/**
+ * @brief Call the notify callback of the given tevent request manually.
+ *
+ * @param[in] req The tevent request to call the notify function from.
+ *
+ * @see tevent_req_set_callback()
+ */
+void tevent_req_notify_callback(struct tevent_req *req);
+#else
void _tevent_req_notify_callback(struct tevent_req *req, const char *location);
#define tevent_req_notify_callback(req) \
_tevent_req_notify_callback(req, __location__)
+#endif
+#ifdef DOXYGEN
+/**
+ * @brief An async request has successfully finished.
+ *
+ * This function is to be used by implementors of async requests. When a
+ * request is successfully finished, this function calls the user's completion
+ * function.
+ *
+ * @param[in] req The finished request.
+ */
+void tevent_req_done(struct tevent_req *req);
+#else
void _tevent_req_done(struct tevent_req *req,
const char *location);
#define tevent_req_done(req) \
_tevent_req_done(req, __location__)
+#endif
+#ifdef DOXYGEN
+/**
+ * @brief An async request has seen an error.
+ *
+ * This function is to be used by implementors of async requests. When a
+ * request can not successfully completed, the implementation should call this
+ * function with the appropriate status code.
+ *
+ * If error is 0 the function returns false and does nothing more.
+ *
+ * @param[in] req The request with an error.
+ *
+ * @param[in] error The error code.
+ *
+ * @return On success true is returned, false if error is 0.
+ *
+ * @code
+ * int error = first_function();
+ * if (tevent_req_error(req, error)) {
+ * return;
+ * }
+ *
+ * error = second_function();
+ * if (tevent_req_error(req, error)) {
+ * return;
+ * }
+ *
+ * tevent_req_done(req);
+ * return;
+ * @endcode
+ */
+bool tevent_req_error(struct tevent_req *req,
+ uint64_t error);
+#else
bool _tevent_req_error(struct tevent_req *req,
uint64_t error,
const char *location);
#define tevent_req_error(req, error) \
_tevent_req_error(req, error, __location__)
+#endif
+#ifdef DOXYGEN
+/**
+ * @brief Helper function for nomem check.
+ *
+ * Convenience helper to easily check alloc failure within a callback
+ * implementing the next step of an async request.
+ *
+ * @param[in] p The pointer to be checked.
+ *
+ * @param[in] req The request being processed.
+ *
+ * @code
+ * p = talloc(mem_ctx, bla);
+ * if (tevent_req_nomem(p, req)) {
+ * return;
+ * }
+ * @endcode
+ */
+bool tevent_req_nomem(const void *p,
+ struct tevent_req *req);
+#else
bool _tevent_req_nomem(const void *p,
struct tevent_req *req,
const char *location);
#define tevent_req_nomem(p, req) \
_tevent_req_nomem(p, req, __location__)
+#endif
+/**
+ * @brief Finish a request before the caller had the change to set the callback.
+ *
+ * An implementation of an async request might find that it can either finish
+ * the request without waiting for an external event, or it can not even start
+ * the engine. To present the illusion of a callback to the user of the API,
+ * the implementation can call this helper function which triggers an
+ * immediate timed event. This way the caller can use the same calling
+ * conventions, independent of whether the request was actually deferred.
+ *
+ * @code
+ * struct tevent_req *computation_send(TALLOC_CTX *mem_ctx,
+ * struct tevent_context *ev)
+ * {
+ * struct tevent_req *req, *subreq;
+ * struct computation_state *state;
+ * req = tevent_req_create(mem_ctx, &state, struct computation_state);
+ * if (req == NULL) {
+ * return NULL;
+ * }
+ * subreq = subcomputation_send(state, ev);
+ * if (tevent_req_nomem(subreq, req)) {
+ * return tevent_req_post(req, ev);
+ * }
+ * tevent_req_set_callback(subreq, computation_done, req);
+ * return req;
+ * }
+ * @endcode
+ *
+ * @param[in] req The finished request.
+ *
+ * @param[in] ev The tevent_context for the timed event.
+ *
+ * @return The given request will be returned.
+ */
struct tevent_req *tevent_req_post(struct tevent_req *req,
struct tevent_context *ev);
+/**
+ * @brief Check if the given request is still in progress.
+ *
+ * It is typically used by sync wrapper functions.
+ *
+ * @param[in] req The request to poll.
+ *
+ * @return The boolean form of "is in progress".
+ */
bool tevent_req_is_in_progress(struct tevent_req *req);
+/**
+ * @brief Actively poll for the given request to finish.
+ *
+ * This function is typically used by sync wrapper functions.
+ *
+ * @param[in] req The request to poll.
+ *
+ * @param[in] ev The tevent_context to be used.
+ *
+ * @return On success true is returned. If a critical error has
+ * happened in the tevent loop layer false is returned.
+ * This is not the return value of the given request!
+ *
+ * @note This should only be used if the given tevent context was created by the
+ * caller, to avoid event loop nesting.
+ *
+ * @code
+ * req = tstream_writev_queue_send(mem_ctx,
+ * ev_ctx,
+ * tstream,
+ * send_queue,
+ * iov, 2);
+ * ok = tevent_req_poll(req, tctx->ev);
+ * rc = tstream_writev_queue_recv(req, &sys_errno);
+ * TALLOC_FREE(req);
+ * @endcode
+ */
bool tevent_req_poll(struct tevent_req *req,
struct tevent_context *ev);
+/**
+ * @brief Get the tevent request state and the actual error set by
+ * tevent_req_error.
+ *
+ * @code
+ * int computation_recv(struct tevent_req *req, uint64_t *perr)
+ * {
+ * enum tevent_req_state state;
+ * uint64_t err;
+ * if (tevent_req_is_error(req, &state, &err)) {
+ * *perr = err;
+ * return -1;
+ * }
+ * return 0;
+ * }
+ * @endcode
+ *
+ * @param[in] req The tevent request to get the error from.
+ *
+ * @param[out] state A pointer to store the tevent request error state.
+ *
+ * @param[out] error A pointer to store the error set by tevent_req_error().
+ *
+ * @return True if the function could set error and state, false
+ * otherwise.
+ *
+ * @see tevent_req_error()
+ */
bool tevent_req_is_error(struct tevent_req *req,
enum tevent_req_state *state,
uint64_t *error);
+/**
+ * @brief Use as the last action of a _recv() function.
+ *
+ * This function destroys the attached private data.
+ *
+ * @param[in] req The finished request.
+ */
void tevent_req_received(struct tevent_req *req);
+/**
+ * @brief Create a tevent subrequest at a given time.
+ *
+ * The idea is that always the same syntax for tevent requests.
+ *
+ * @param[in] mem_ctx The talloc memory context to use.
+ *
+ * @param[in] ev The event handle to setup the request.
+ *
+ * @param[in] wakeup_time The time to wakeup and execute the request.
+ *
+ * @return The new subrequest, NULL on error.
+ *
+ * Example:
+ * @code
+ * static void my_callback_wakeup_done(tevent_req *subreq)
+ * {
+ * struct tevent_req *req = tevent_req_callback_data(subreq,
+ * struct tevent_req);
+ * bool ok;
+ *
+ * ok = tevent_wakeup_recv(subreq);
+ * TALLOC_FREE(subreq);
+ * if (!ok) {
+ * tevent_req_error(req, -1);
+ * return;
+ * }
+ * ...
+ * }
+ * @endcode
+ *
+ * @code
+ * subreq = tevent_wakeup_send(mem_ctx, ev, wakeup_time);
+ * if (tevent_req_nomem(subreq, req)) {
+ * return false;
+ * }
+ * tevent_set_callback(subreq, my_callback_wakeup_done, req);
+ * @endcode
+ *
+ * @see tevent_wakeup_recv()
+ */
struct tevent_req *tevent_wakeup_send(TALLOC_CTX *mem_ctx,
struct tevent_context *ev,
struct timeval wakeup_time);
+
+/**
+ * @brief Check if the wakeup has been correctly executed.
+ *
+ * This function needs to be called in the callback function set after calling
+ * tevent_wakeup_send().
+ *
+ * @param[in] req The tevent request to check.
+ *
+ * @return True on success, false otherwise.
+ *
+ * @see tevent_wakeup_recv()
+ */
bool tevent_wakeup_recv(struct tevent_req *req);
+/* @} */
+
+/**
+ * @defgroup tevent_helpers The tevent helper functiions
+ * @ingroup tevent
+ *
+ * @todo description
+ *
+ * @{
+ */
+
+/**
+ * @brief Compare two timeval values.
+ *
+ * @param[in] tv1 The first timeval value to compare.
+ *
+ * @param[in] tv2 The second timeval value to compare.
+ *
+ * @return 0 if they are equal.
+ * 1 if the first time is greater than the second.
+ * -1 if the first time is smaller than the second.
+ */
int tevent_timeval_compare(const struct timeval *tv1,
const struct timeval *tv2);
+/**
+ * @brief Get a zero timval value.
+ *
+ * @return A zero timval value.
+ */
struct timeval tevent_timeval_zero(void);
+/**
+ * @brief Get a timeval value for the current time.
+ *
+ * @return A timval value with the current time.
+ */
struct timeval tevent_timeval_current(void);
+/**
+ * @brief Get a timeval structure with the given values.
+ *
+ * @param[in] secs The seconds to set.
+ *
+ * @param[in] usecs The milliseconds to set.
+ *
+ * @return A timeval structure with the given values.
+ */
struct timeval tevent_timeval_set(uint32_t secs, uint32_t usecs);
+/**
+ * @brief Get the difference between two timeval values.
+ *
+ * @param[in] tv1 The first timeval.
+ *
+ * @param[in] tv2 The second timeval.
+ *
+ * @return A timeval structure with the difference between the
+ * first and the second value.
+ */
struct timeval tevent_timeval_until(const struct timeval *tv1,
const struct timeval *tv2);
+/**
+ * @brief Check if a given timeval structure is zero.
+ *
+ * @param[in] tv The timeval to check if it is zero.
+ *
+ * @return True if it is zero, false otherwise.
+ */
bool tevent_timeval_is_zero(const struct timeval *tv);
+/**
+ * @brief Add the given amount of time to a timeval structure.
+ *
+ * @param[in] tv The timeval structure to add the time.
+ *
+ * @param[in] secs The seconds to add to the timeval.
+ *
+ * @param[in] usecs The milliseconds to add to the timeval.
+ *
+ * @return The timeval structure with the new time.
+ */
struct timeval tevent_timeval_add(const struct timeval *tv, uint32_t secs,
uint32_t usecs);
+/**
+ * @brief Get a timeval in the future with a specified offset from now.
+ *
+ * @param[in] secs The seconds of the offset from now.
+ *
+ * @param[in] usecs The milliseconds of the offset from now.
+ *
+ * @return A timval with the given offset in the future.
+ */
struct timeval tevent_timeval_current_ofs(uint32_t secs, uint32_t usecs);
+/* @} */
+
+
+/**
+ * @defgroup tevent_queue The tevent queue functions
+ * @ingroup tevent
+ *
+ * A tevent_queue is used to queue up async requests that must be
+ * serialized. For example writing buffers into a socket must be
+ * serialized. Writing a large lump of data into a socket can require
+ * multiple write(2) or send(2) system calls. If more than one async
+ * request is outstanding to write large buffers into a socket, every
+ * request must individually be completed before the next one begins,
+ * even if multiple syscalls are required.
+ *
+ * Take a look at @ref tevent_queue_tutorial for more details.
+ * @{
+ */
+
struct tevent_queue;
+#ifdef DOXYGEN
+/**
+ * @brief Create and start a tevent queue.
+ *
+ * @param[in] mem_ctx The talloc memory context to allocate the queue.
+ *
+ * @param[in] name The name to use to identify the queue.
+ *
+ * @return An allocated tevent queue on success, NULL on error.
+ *
+ * @see tevent_start()
+ * @see tevent_stop()
+ */
+struct tevent_queue *tevent_queue_create(TALLOC_CTX *mem_ctx,
+ const char *name);
+#else
struct tevent_queue *_tevent_queue_create(TALLOC_CTX *mem_ctx,
const char *name,
const char *location);
#define tevent_queue_create(_mem_ctx, _name) \
_tevent_queue_create((_mem_ctx), (_name), __location__)
+#endif
+/**
+ * @brief A callback trigger function run by the queue.
+ *
+ * @param[in] req The tevent request the trigger function is executed on.
+ *
+ * @param[in] private_data The private data pointer specified by
+ * tevent_queue_add().
+ *
+ * @see tevent_queue_add()
+ */
typedef void (*tevent_queue_trigger_fn_t)(struct tevent_req *req,
void *private_data);
+
+/**
+ * @brief Add a tevent request to the queue.
+ *
+ * @param[in] queue The queue to add the request.
+ *
+ * @param[in] ev The event handle to use for the request.
+ *
+ * @param[in] req The tevent request to add to the queue.
+ *
+ * @param[in] trigger The function triggered by the queue when the request
+ * is called.
+ *
+ * @param[in] private_data The private data passed to the trigger function.
+ *
+ * @return True if the request has been successfully added, false
+ * otherwise.
+ */
bool tevent_queue_add(struct tevent_queue *queue,
struct tevent_context *ev,
struct tevent_req *req,
tevent_queue_trigger_fn_t trigger,
void *private_data);
+
+/**
+ * @brief Start a tevent queue.
+ *
+ * The queue is started by default.
+ *
+ * @param[in] queue The queue to start.
+ */
void tevent_queue_start(struct tevent_queue *queue);
+
+/**
+ * @brief Stop a tevent queue.
+ *
+ * The queue is started by default.
+ *
+ * @param[in] queue The queue to stop.
+ */
void tevent_queue_stop(struct tevent_queue *queue);
+/**
+ * @brief Get the length of the queue.
+ *
+ * @param[in] queue The queue to get the length from.
+ *
+ * @return The number of elements.
+ */
size_t tevent_queue_length(struct tevent_queue *queue);
typedef int (*tevent_nesting_hook)(struct tevent_context *ev,
@@ -366,11 +1405,18 @@ int _tevent_loop_until(struct tevent_context *ev,
_tevent_loop_until(ev, finished, private_data, __location__)
#endif
+int tevent_re_initialise(struct tevent_context *ev);
+
+/* @} */
/**
+ * @defgroup tevent_ops The tevent operation functions
+ * @ingroup tevent
+ *
* The following structure and registration functions are exclusively
* needed for people writing and pluggin a different event engine.
* There is nothing useful for normal tevent user in here.
+ * @{
*/
struct tevent_ops {
@@ -423,11 +1469,19 @@ struct tevent_ops {
bool tevent_register_backend(const char *name, const struct tevent_ops *ops);
+/* @} */
/**
+ * @defgroup tevent_compat The tevent compatibility functions
+ * @ingroup tevent
+ *
* The following definitions are usueful only for compatibility with the
* implementation originally developed within the samba4 code and will be
* soon removed. Please NEVER use in new code.
+ *
+ * @todo Ignore it?
+ *
+ * @{
*/
#ifdef TEVENT_COMPAT_DEFINES
@@ -504,4 +1558,6 @@ bool tevent_register_backend(const char *name, const struct tevent_ops *ops);
#endif /* TEVENT_COMPAT_DEFINES */
+/* @} */
+
#endif /* __TEVENT_H__ */
diff --git a/lib/tevent/tevent.mk b/lib/tevent/tevent.mk
deleted file mode 100644
index 57bfd81222..0000000000
--- a/lib/tevent/tevent.mk
+++ /dev/null
@@ -1,46 +0,0 @@
-TEVENT_SOBASE = libtevent.$(SHLIBEXT)
-TEVENT_SONAME = $(TEVENT_SOBASE).0
-TEVENT_SOLIB = $(TEVENT_SOBASE).$(PACKAGE_VERSION)
-TEVENT_STLIB = libtevent.a
-
-$(TEVENT_STLIB): $(TEVENT_OBJ)
- ar -rv $(TEVENT_STLIB) $(TEVENT_OBJ)
-
-$(TEVENT_SOBASE): $(TEVENT_SOLIB)
- ln -fs $< $@
-
-$(TEVENT_SONAME): $(TEVENT_SOLIB)
- ln -fs $< $@
-
-dirs::
- @mkdir -p lib
-
-installdirs::
- mkdir -p $(DESTDIR)$(includedir)
- mkdir -p $(DESTDIR)$(libdir)
- mkdir -p $(DESTDIR)$(libdir)/pkgconfig
-
-installheaders:: installdirs
- cp $(srcdir)/tevent.h $(DESTDIR)$(includedir)
-
-installlibs:: installdirs
- cp tevent.pc $(DESTDIR)$(libdir)/pkgconfig
- cp $(TEVENT_STLIB) $(TEVENT_SOLIB) $(DESTDIR)$(libdir)
- rm -f $(DESTDIR)$(libdir)/$(TEVENT_SONAME)
- ln -s $(TEVENT_SOLIB) $(DESTDIR)$(libdir)/$(TEVENT_SONAME)
- rm -f $(DESTDIR)$(libdir)/$(TEVENT_SOBASE)
- ln -s $(TEVENT_SOLIB) $(DESTDIR)$(libdir)/$(TEVENT_SOBASE)
-
-install:: all installdirs installheaders installlibs $(PYTHON_INSTALL_TARGET)
-
-abi_checks::
- @echo ABI checks:
- @./script/abi_checks.sh tevent tevent.h
-
-test:: abi_checks
-
-clean::
- rm -f $(TEVENT_SOBASE) $(TEVENT_SONAME) $(TEVENT_SOLIB) $(TEVENT_STLIB)
- rm -f tevent.pc
- rm -f tevent.exports.sort tevent.exports.check tevent.exports.check.sort
- rm -f tevent.signatures.sort tevent.signatures.check tevent.signatures.check.sort
diff --git a/lib/tevent/tevent.pc.in b/lib/tevent/tevent.pc.in
index b1dd439785..1091ff00f1 100644
--- a/lib/tevent/tevent.pc.in
+++ b/lib/tevent/tevent.pc.in
@@ -8,5 +8,5 @@ Description: An event system library
Version: @PACKAGE_VERSION@
Requires: talloc
Libs: -L${libdir} -ltevent
-Cflags: -I${includedir}
+Cflags: @LIB_RPATH@ -I${includedir}
URL: http://samba.org/
diff --git a/lib/tevent/tevent.signatures b/lib/tevent/tevent.signatures
deleted file mode 100644
index c752b9e933..0000000000
--- a/lib/tevent/tevent.signatures
+++ /dev/null
@@ -1,57 +0,0 @@
-_Bool tevent_queue_add (struct tevent_queue *, struct tevent_context *, struct tevent_req *, tevent_queue_trigger_fn_t, void *);
-_Bool tevent_register_backend (const char *, const struct tevent_ops *);
-_Bool _tevent_req_error (struct tevent_req *, uint64_t, const char *);
-_Bool tevent_req_is_error (struct tevent_req *, enum tevent_req_state *, uint64_t *);
-_Bool tevent_req_is_in_progress (struct tevent_req *);
-_Bool _tevent_req_nomem (const void *, struct tevent_req *, const char *);
-_Bool tevent_req_poll (struct tevent_req *, struct tevent_context *);
-_Bool tevent_req_set_endtime (struct tevent_req *, struct tevent_context *, struct timeval);
-_Bool tevent_signal_support (struct tevent_context *);
-_Bool tevent_timeval_is_zero (const struct timeval *);
-_Bool tevent_wakeup_recv (struct tevent_req *);
-char *tevent_req_default_print (struct tevent_req *, TALLOC_CTX *);
-char *tevent_req_print (TALLOC_CTX *, struct tevent_req *);
-const char **tevent_backend_list (TALLOC_CTX *);
-int _tevent_loop_once (struct tevent_context *, const char *);
-int _tevent_loop_until (struct tevent_context *, _Bool (*) (void *), void *, const char *);
-int _tevent_loop_wait (struct tevent_context *, const char *);
-int tevent_set_debug_stderr (struct tevent_context *);
-int tevent_set_debug (struct tevent_context *, void (*) (void *, enum tevent_debug_level, const char *, va_list), void *);
-int tevent_timeval_compare (const struct timeval *, const struct timeval *);
-size_t tevent_queue_length (struct tevent_queue *);
-struct tevent_context *tevent_context_init_byname (TALLOC_CTX *, const char *);
-struct tevent_context *tevent_context_init (TALLOC_CTX *);
-struct tevent_fd *_tevent_add_fd (struct tevent_context *, TALLOC_CTX *, int, uint16_t, tevent_fd_handler_t, void *, const char *, const char *);
-struct tevent_immediate *_tevent_create_immediate (TALLOC_CTX *, const char *);
-struct tevent_queue *_tevent_queue_create (TALLOC_CTX *, const char *, const char *);
-struct tevent_req *_tevent_req_create (TALLOC_CTX *, void *, size_t, const char *, const char *);
-struct tevent_req *tevent_req_post (struct tevent_req *, struct tevent_context *);
-struct tevent_req *tevent_wakeup_send (TALLOC_CTX *, struct tevent_context *, struct timeval);
-struct tevent_signal *_tevent_add_signal (struct tevent_context *, TALLOC_CTX *, int, int, tevent_signal_handler_t, void *, const char *, const char *);
-struct tevent_timer *_tevent_add_timer (struct tevent_context *, TALLOC_CTX *, struct timeval, tevent_timer_handler_t, void *, const char *, const char *);
-struct timeval tevent_timeval_add (const struct timeval *, uint32_t, uint32_t);
-struct timeval tevent_timeval_current_ofs (uint32_t, uint32_t);
-struct timeval tevent_timeval_current (void);
-struct timeval tevent_timeval_set (uint32_t, uint32_t);
-struct timeval tevent_timeval_until (const struct timeval *, const struct timeval *);
-struct timeval tevent_timeval_zero (void);
-uint16_t tevent_fd_get_flags (struct tevent_fd *);
-void tevent_fd_set_auto_close (struct tevent_fd *);
-void tevent_fd_set_close_fn (struct tevent_fd *, tevent_fd_close_fn_t);
-void tevent_fd_set_flags (struct tevent_fd *, uint16_t);
-void tevent_loop_allow_nesting (struct tevent_context *);
-void tevent_loop_set_nesting_hook (struct tevent_context *, tevent_nesting_hook, void *);
-void tevent_queue_start (struct tevent_queue *);
-void tevent_queue_stop (struct tevent_queue *);
-void *_tevent_req_callback_data (struct tevent_req *);
-void *_tevent_req_data (struct tevent_req *);
-void _tevent_req_done (struct tevent_req *, const char *);
-void _tevent_req_notify_callback (struct tevent_req *, const char *);
-void tevent_req_received (struct tevent_req *);
-void tevent_req_set_callback (struct tevent_req *, tevent_req_fn, void *);
-void tevent_req_set_print_fn (struct tevent_req *, tevent_req_print_fn);
-void _tevent_schedule_immediate (struct tevent_immediate *, struct tevent_context *, tevent_immediate_handler_t, void *, const char *, const char *);
-void tevent_set_abort_fn (void (*) (const char *));
-void tevent_set_default_backend (const char *);
-_Bool _tevent_req_cancel (struct tevent_req *, const char *);
-void tevent_req_set_cancel_fn (struct tevent_req *, tevent_req_cancel_fn);
diff --git a/lib/tevent/tevent_epoll.c b/lib/tevent/tevent_epoll.c
index 7c7f389d5b..3ab8283955 100644
--- a/lib/tevent/tevent_epoll.c
+++ b/lib/tevent/tevent_epoll.c
@@ -42,8 +42,7 @@ struct epoll_event_context {
};
/*
- called when a epoll call fails, and we should fallback
- to using select
+ called when a epoll call fails
*/
static void epoll_panic(struct epoll_event_context *epoll_ev, const char *reason)
{
@@ -437,7 +436,7 @@ static const struct tevent_ops epoll_event_ops = {
.loop_wait = tevent_common_loop_wait,
};
-bool tevent_epoll_init(void)
+_PRIVATE_ bool tevent_epoll_init(void)
{
return tevent_register_backend("epoll", &epoll_event_ops);
}
diff --git a/lib/tevent/tevent_fd.c b/lib/tevent/tevent_fd.c
index c58e8e1ab1..455961b67c 100644
--- a/lib/tevent/tevent_fd.c
+++ b/lib/tevent/tevent_fd.c
@@ -51,6 +51,12 @@ struct tevent_fd *tevent_common_add_fd(struct tevent_context *ev, TALLOC_CTX *me
{
struct tevent_fd *fde;
+ /* tevent will crash later on select() if we save
+ * a negative file descriptor. Better to fail here
+ * so that consumers will be able to debug it
+ */
+ if (fd < 0) return NULL;
+
fde = talloc(mem_ctx?mem_ctx:ev, struct tevent_fd);
if (!fde) return NULL;
diff --git a/lib/tevent/tevent_internal.h b/lib/tevent/tevent_internal.h
index 7f5fd64854..9227f90315 100644
--- a/lib/tevent/tevent_internal.h
+++ b/lib/tevent/tevent_internal.h
@@ -162,7 +162,7 @@ struct tevent_fd {
const char *handler_name;
const char *location;
/* this is private for the events_ops implementation */
- uint16_t additional_flags;
+ uint64_t additional_flags;
void *additional_data;
};
@@ -303,6 +303,7 @@ void tevent_cleanup_pending_signal_handlers(struct tevent_signal *se);
bool tevent_standard_init(void);
bool tevent_select_init(void);
+bool tevent_poll_init(void);
#ifdef HAVE_EPOLL
bool tevent_epoll_init(void);
#endif
diff --git a/lib/tevent/tevent_liboop.c b/lib/tevent/tevent_liboop.c
index c8503e72b2..3d36b0e2a1 100644
--- a/lib/tevent/tevent_liboop.c
+++ b/lib/tevent/tevent_liboop.c
@@ -31,8 +31,8 @@
/*
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- NOTE: this code compiles fine, but is completly *UNTESTED*
- and is only commited as example
+ NOTE: this code compiles fine, but is completely *UNTESTED*
+ and is only committed as an example
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
*/
diff --git a/lib/tevent/tevent_poll.c b/lib/tevent/tevent_poll.c
new file mode 100644
index 0000000000..0b782e99bb
--- /dev/null
+++ b/lib/tevent/tevent_poll.c
@@ -0,0 +1,307 @@
+/*
+ Unix SMB/CIFS implementation.
+ main select loop and event handling
+ Copyright (C) Andrew Tridgell 2003-2005
+ Copyright (C) Stefan Metzmacher 2005-2009
+
+ ** NOTE! The following LGPL license applies to the tevent
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/filesys.h"
+#include "system/select.h"
+#include "tevent.h"
+#include "tevent_util.h"
+#include "tevent_internal.h"
+
+struct poll_event_context {
+ /*
+ * These two arrays are maintained together.
+ */
+ struct pollfd *fds;
+ struct tevent_fd **fd_events;
+ uint64_t num_fds;
+
+ /* information for exiting from the event loop */
+ int exit_code;
+};
+
+/*
+ create a select_event_context structure.
+*/
+static int poll_event_context_init(struct tevent_context *ev)
+{
+ struct poll_event_context *poll_ev;
+
+ poll_ev = talloc_zero(ev, struct poll_event_context);
+ if (poll_ev == NULL) {
+ return -1;
+ }
+ ev->additional_data = poll_ev;
+ return 0;
+}
+
+/*
+ destroy an fd_event
+*/
+static int poll_event_fd_destructor(struct tevent_fd *fde)
+{
+ struct tevent_context *ev = fde->event_ctx;
+ struct poll_event_context *poll_ev = NULL;
+ struct tevent_fd *moved_fde;
+ uint64_t del_idx = fde->additional_flags;
+
+ if (ev == NULL) {
+ goto done;
+ }
+
+ poll_ev = talloc_get_type_abort(
+ ev->additional_data, struct poll_event_context);
+
+ moved_fde = poll_ev->fd_events[poll_ev->num_fds-1];
+ poll_ev->fd_events[del_idx] = moved_fde;
+ poll_ev->fds[del_idx] = poll_ev->fds[poll_ev->num_fds-1];
+ moved_fde->additional_flags = del_idx;
+
+ poll_ev->num_fds -= 1;
+done:
+ return tevent_common_fd_destructor(fde);
+}
+
+/*
+ add a fd based event
+ return NULL on failure (memory allocation error)
+*/
+static struct tevent_fd *poll_event_add_fd(struct tevent_context *ev,
+ TALLOC_CTX *mem_ctx,
+ int fd, uint16_t flags,
+ tevent_fd_handler_t handler,
+ void *private_data,
+ const char *handler_name,
+ const char *location)
+{
+ struct poll_event_context *poll_ev = talloc_get_type_abort(
+ ev->additional_data, struct poll_event_context);
+ struct pollfd *pfd;
+ struct tevent_fd *fde;
+
+ fde = tevent_common_add_fd(ev, mem_ctx, fd, flags,
+ handler, private_data,
+ handler_name, location);
+ if (fde == NULL) {
+ return NULL;
+ }
+
+ /* we allocate 16 slots to avoid a lot of reallocations */
+ if (talloc_array_length(poll_ev->fds) == poll_ev->num_fds) {
+ struct pollfd *tmp_fds;
+ struct tevent_fd **tmp_fd_events;
+ tmp_fds = talloc_realloc(
+ poll_ev, poll_ev->fds, struct pollfd,
+ poll_ev->num_fds + 16);
+ if (tmp_fds == NULL) {
+ TALLOC_FREE(fde);
+ return NULL;
+ }
+ poll_ev->fds = tmp_fds;
+
+ tmp_fd_events = talloc_realloc(
+ poll_ev, poll_ev->fd_events, struct tevent_fd *,
+ poll_ev->num_fds + 16);
+ if (tmp_fd_events == NULL) {
+ TALLOC_FREE(fde);
+ return NULL;
+ }
+ poll_ev->fd_events = tmp_fd_events;
+ }
+
+ pfd = &poll_ev->fds[poll_ev->num_fds];
+
+ pfd->fd = fd;
+
+ pfd->events = 0;
+ pfd->revents = 0;
+
+ if (flags & TEVENT_FD_READ) {
+ pfd->events |= (POLLIN|POLLHUP);
+ }
+ if (flags & TEVENT_FD_WRITE) {
+ pfd->events |= (POLLOUT);
+ }
+
+ fde->additional_flags = poll_ev->num_fds;
+ poll_ev->fd_events[poll_ev->num_fds] = fde;
+
+ poll_ev->num_fds += 1;
+
+ talloc_set_destructor(fde, poll_event_fd_destructor);
+
+ return fde;
+}
+
+/*
+ set the fd event flags
+*/
+static void poll_event_set_fd_flags(struct tevent_fd *fde, uint16_t flags)
+{
+ struct poll_event_context *poll_ev = talloc_get_type_abort(
+ fde->event_ctx->additional_data, struct poll_event_context);
+ uint64_t idx = fde->additional_flags;
+ uint16_t pollflags = 0;
+
+ if (flags & TEVENT_FD_READ) {
+ pollflags |= (POLLIN|POLLHUP);
+ }
+ if (flags & TEVENT_FD_WRITE) {
+ pollflags |= (POLLOUT);
+ }
+
+ poll_ev->fds[idx].events = pollflags;
+
+ fde->flags = flags;
+}
+
+/*
+ event loop handling using select()
+*/
+static int poll_event_loop_poll(struct tevent_context *ev,
+ struct timeval *tvalp)
+{
+ struct poll_event_context *poll_ev = talloc_get_type_abort(
+ ev->additional_data, struct poll_event_context);
+ struct tevent_fd *fde;
+ int pollrtn;
+ int timeout = -1;
+
+ if (ev->signal_events && tevent_common_check_signal(ev)) {
+ return 0;
+ }
+
+ if (tvalp != NULL) {
+ timeout = tvalp->tv_sec * 1000;
+ timeout += (tvalp->tv_usec + 999) / 1000;
+ }
+
+ pollrtn = poll(poll_ev->fds, poll_ev->num_fds, timeout);
+
+ if (pollrtn == -1 && errno == EINTR && ev->signal_events) {
+ tevent_common_check_signal(ev);
+ return 0;
+ }
+
+ if (pollrtn == -1 && errno == EBADF) {
+ /* the socket is dead! this should never
+ happen as the socket should have first been
+ made readable and that should have removed
+ the event, so this must be a bug. This is a
+ fatal error. */
+ tevent_debug(ev, TEVENT_DEBUG_FATAL,
+ "ERROR: EBADF on poll_event_loop_once\n");
+ poll_ev->exit_code = EBADF;
+ return -1;
+ }
+
+ if (pollrtn == 0 && tvalp) {
+ /* we don't care about a possible delay here */
+ tevent_common_loop_timer_delay(ev);
+ return 0;
+ }
+
+ if (pollrtn > 0) {
+ /* at least one file descriptor is ready - check
+ which ones and call the handler, being careful to allow
+ the handler to remove itself when called */
+ for (fde = ev->fd_events; fde; fde = fde->next) {
+ struct pollfd *pfd;
+ uint64_t pfd_idx = fde->additional_flags;
+ uint16_t flags = 0;
+
+ pfd = &poll_ev->fds[pfd_idx];
+
+ if (pfd->revents & (POLLHUP|POLLERR)) {
+ /* If we only wait for TEVENT_FD_WRITE, we
+ should not tell the event handler about it,
+ and remove the writable flag, as we only
+ report errors when waiting for read events
+ to match the select behavior. */
+ if (!(fde->flags & TEVENT_FD_READ)) {
+ TEVENT_FD_NOT_WRITEABLE(fde);
+ continue;
+ }
+ flags |= TEVENT_FD_READ;
+ }
+ if (pfd->revents & POLLIN) {
+ flags |= TEVENT_FD_READ;
+ }
+ if (pfd->revents & POLLOUT) {
+ flags |= TEVENT_FD_WRITE;
+ }
+ if (flags != 0) {
+ fde->handler(ev, fde, flags,
+ fde->private_data);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ do a single event loop using the events defined in ev
+*/
+static int poll_event_loop_once(struct tevent_context *ev,
+ const char *location)
+{
+ struct timeval tval;
+
+ if (ev->signal_events &&
+ tevent_common_check_signal(ev)) {
+ return 0;
+ }
+
+ if (ev->immediate_events &&
+ tevent_common_loop_immediate(ev)) {
+ return 0;
+ }
+
+ tval = tevent_common_loop_timer_delay(ev);
+ if (tevent_timeval_is_zero(&tval)) {
+ return 0;
+ }
+
+ return poll_event_loop_poll(ev, &tval);
+}
+
+static const struct tevent_ops poll_event_ops = {
+ .context_init = poll_event_context_init,
+ .add_fd = poll_event_add_fd,
+ .set_fd_close_fn = tevent_common_fd_set_close_fn,
+ .get_fd_flags = tevent_common_fd_get_flags,
+ .set_fd_flags = poll_event_set_fd_flags,
+ .add_timer = tevent_common_add_timer,
+ .schedule_immediate = tevent_common_schedule_immediate,
+ .add_signal = tevent_common_add_signal,
+ .loop_once = poll_event_loop_once,
+ .loop_wait = tevent_common_loop_wait,
+};
+
+_PRIVATE_ bool tevent_poll_init(void)
+{
+ return tevent_register_backend("poll", &poll_event_ops);
+}
diff --git a/lib/tevent/tevent_req.c b/lib/tevent/tevent_req.c
index 345a2fdcd1..b0c9c57dde 100644
--- a/lib/tevent/tevent_req.c
+++ b/lib/tevent/tevent_req.c
@@ -27,17 +27,6 @@
#include "tevent_internal.h"
#include "tevent_util.h"
-/**
- * @brief The default print function for creating debug messages
- * @param[in] req The request to be printed
- * @param[in] mem_ctx The memory context for the result
- * @retval Text representation of req
- *
- * The function should not be used by users of the asynx API,
- * but custom print function can use it and append custom text
- * to the string.
- */
-
char *tevent_req_default_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
{
return talloc_asprintf(mem_ctx,
@@ -53,15 +42,6 @@ char *tevent_req_default_print(struct tevent_req *req, TALLOC_CTX *mem_ctx)
);
}
-/**
- * @brief Print an tevent_req structure in debug messages
- * @param[in] mem_ctx The memory context for the result
- * @param[in] req The request to be printed
- * @retval Text representation of req
- *
- * This function should be used by callers of the async API
- */
-
char *tevent_req_print(TALLOC_CTX *mem_ctx, struct tevent_req *req)
{
if (!req->private_print) {
@@ -71,15 +51,6 @@ char *tevent_req_print(TALLOC_CTX *mem_ctx, struct tevent_req *req)
return req->private_print(req, mem_ctx);
}
-/**
- * @brief Create an async request
- * @param[in] mem_ctx The memory context for the result
- * @param[in] ev The event context this async request will be driven by
- * @retval A new async request
- *
- * The new async request will be initialized in state ASYNC_REQ_IN_PROGRESS
- */
-
struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx,
void *pdata,
size_t data_size,
@@ -133,49 +104,12 @@ static void tevent_req_finish(struct tevent_req *req,
_tevent_req_notify_callback(req, location);
}
-/**
- * @brief An async request has successfully finished
- * @param[in] req The finished request
- *
- * tevent_req_done is to be used by implementors of async requests. When a
- * request is successfully finished, this function calls the user's completion
- * function.
- */
-
void _tevent_req_done(struct tevent_req *req,
const char *location)
{
tevent_req_finish(req, TEVENT_REQ_DONE, location);
}
-/**
- * @brief An async request has seen an error
- * @param[in] req The request with an error
- * @param[in] error The error code
- *
- * tevent_req_done is to be used by implementors of async requests. When a
- * request can not successfully completed, the implementation should call this
- * function with the appropriate status code.
- *
- * If error is 0 the function returns false and does nothing more.
- *
- * Call pattern would be
- * \code
- * int error = first_function();
- * if (tevent_req_error(req, error)) {
- * return;
- * }
- *
- * error = second_function();
- * if (tevent_req_error(req, error)) {
- * return;
- * }
- *
- * tevent_req_done(req);
- * return;
- * \endcode
- */
-
bool _tevent_req_error(struct tevent_req *req,
uint64_t error,
const char *location)
@@ -189,23 +123,6 @@ bool _tevent_req_error(struct tevent_req *req,
return true;
}
-/**
- * @brief Helper function for nomem check
- * @param[in] p The pointer to be checked
- * @param[in] req The request being processed
- *
- * Convenience helper to easily check alloc failure within a callback
- * implementing the next step of an async request.
- *
- * Call pattern would be
- * \code
- * p = talloc(mem_ctx, bla);
- * if (tevent_req_nomem(p, req)) {
- * return;
- * }
- * \endcode
- */
-
bool _tevent_req_nomem(const void *p,
struct tevent_req *req,
const char *location)
@@ -218,10 +135,15 @@ bool _tevent_req_nomem(const void *p,
}
/**
- * @brief Immediate event callback
- * @param[in] ev Event context
- * @param[in] im The immediate event
- * @param[in] priv The async request to be finished
+ * @internal
+ *
+ * @brief Immediate event callback.
+ *
+ * @param[in] ev The event context to use.
+ *
+ * @param[in] im The immediate event.
+ *
+ * @param[in] priv The async request to be finished.
*/
static void tevent_req_trigger(struct tevent_context *ev,
struct tevent_immediate *im,
@@ -234,20 +156,6 @@ static void tevent_req_trigger(struct tevent_context *ev,
req->internal.finish_location);
}
-/**
- * @brief Finish a request before the caller had the change to set the callback
- * @param[in] req The finished request
- * @param[in] ev The tevent_context for the timed event
- * @retval req will be returned
- *
- * An implementation of an async request might find that it can either finish
- * the request without waiting for an external event, or it can't even start
- * the engine. To present the illusion of a callback to the user of the API,
- * the implementation can call this helper function which triggers an
- * immediate timed event. This way the caller can use the same calling
- * conventions, independent of whether the request was actually deferred.
- */
-
struct tevent_req *tevent_req_post(struct tevent_req *req,
struct tevent_context *ev)
{
@@ -256,16 +164,6 @@ struct tevent_req *tevent_req_post(struct tevent_req *req,
return req;
}
-/**
- * @brief This function destroys the attached private data
- * @param[in] req The request to poll
- * @retval The boolean form of "is in progress".
- *
- * This function can be used to ask if the given request
- * is still in progress.
- *
- * This function is typically used by sync wrapper functions.
- */
bool tevent_req_is_in_progress(struct tevent_req *req)
{
if (req->internal.state == TEVENT_REQ_IN_PROGRESS) {
@@ -275,13 +173,6 @@ bool tevent_req_is_in_progress(struct tevent_req *req)
return false;
}
-/**
- * @brief This function destroys the attached private data
- * @param[in] req The finished request
- *
- * This function can be called as last action of a _recv()
- * function, it destroys the data attached to the tevent_req.
- */
void tevent_req_received(struct tevent_req *req)
{
TALLOC_FREE(req->data);
@@ -293,23 +184,6 @@ void tevent_req_received(struct tevent_req *req)
req->internal.state = TEVENT_REQ_RECEIVED;
}
-/**
- * @brief This function destroys the attached private data
- * @param[in] req The request to poll
- * @param[in] ev The tevent_context to be used
- * @retval If a critical error has happened in the
- * tevent loop layer false is returned.
- * Otherwise true is returned.
- * This is not the return value of the given request!
- *
- * This function can be used to actively poll for the
- * given request to finish.
- *
- * Note: this should only be used if the given tevent context
- * was created by the caller, to avoid event loop nesting.
- *
- * This function is typically used by sync wrapper functions.
- */
bool tevent_req_poll(struct tevent_req *req,
struct tevent_context *ev)
{
@@ -383,56 +257,16 @@ void *_tevent_req_data(struct tevent_req *req)
return req->data;
}
-/**
- * @brief This function sets a print function for the given request
- * @param[in] req The given request
- * @param[in] fn A pointer to the print function
- *
- * This function can be used to setup a print function for the given request.
- * This will be triggered if the tevent_req_print() function was
- * called on the given request.
- *
- * Note: this function should only be used for debugging.
- */
void tevent_req_set_print_fn(struct tevent_req *req, tevent_req_print_fn fn)
{
req->private_print = fn;
}
-/**
- * @brief This function sets a cancel function for the given request
- * @param[in] req The given request
- * @param[in] fn A pointer to the cancel function
- *
- * This function can be used to setup a cancel function for the given request.
- * This will be triggered if the tevent_req_cancel() function was
- * called on the given request.
- *
- */
void tevent_req_set_cancel_fn(struct tevent_req *req, tevent_req_cancel_fn fn)
{
req->private_cancel = fn;
}
-/**
- * @brief This function tries to cancel the given request
- * @param[in] req The given request
- * @param[in] location Automaticly filled with the __location__ macro
- * via the tevent_req_cancel() macro. This is for debugging
- * only!
- * @retval This function returns true is the request is cancelable.
- * Otherwise false is returned.
- *
- * This function can be used to cancel the given request.
- *
- * It is only possible to cancel a request when the implementation
- * has registered a cancel function via the tevent_req_set_cancel_fn().
- *
- * Note: Even if the function returns true, the caller need to wait
- * for the function to complete normally.
- * Only the _recv() function of the given request indicates
- * if the request was really canceled.
- */
bool _tevent_req_cancel(struct tevent_req *req, const char *location)
{
if (req->private_cancel == NULL) {
diff --git a/lib/tevent/tevent_select.c b/lib/tevent/tevent_select.c
index 890e0311c6..94faa86593 100644
--- a/lib/tevent/tevent_select.c
+++ b/lib/tevent/tevent_select.c
@@ -121,7 +121,8 @@ static struct tevent_fd *select_event_add_fd(struct tevent_context *ev, TALLOC_C
handler_name, location);
if (!fde) return NULL;
- if (fde->fd > select_ev->maxfd) {
+ if ((select_ev->maxfd != EVENT_INVALID_MAXFD)
+ && (fde->fd > select_ev->maxfd)) {
select_ev->maxfd = fde->fd;
}
talloc_set_destructor(fde, select_event_fd_destructor);
@@ -251,7 +252,7 @@ static const struct tevent_ops select_event_ops = {
.loop_wait = tevent_common_loop_wait,
};
-bool tevent_select_init(void)
+_PRIVATE_ bool tevent_select_init(void)
{
return tevent_register_backend("select", &select_event_ops);
}
diff --git a/lib/tevent/tevent_signal.c b/lib/tevent/tevent_signal.c
index 45f65cf6dd..dbab8a8ada 100644
--- a/lib/tevent/tevent_signal.c
+++ b/lib/tevent/tevent_signal.c
@@ -210,7 +210,7 @@ struct tevent_signal *tevent_common_add_signal(struct tevent_context *ev,
/* the sig_state needs to be on a global context as it can last across
multiple event contexts */
if (sig_state == NULL) {
- sig_state = talloc_zero(talloc_autofree_context(), struct tevent_sig_state);
+ sig_state = talloc_zero(NULL, struct tevent_sig_state);
if (sig_state == NULL) {
return NULL;
}
diff --git a/lib/tevent/tevent_standard.c b/lib/tevent/tevent_standard.c
index d4a00cc062..35f7ded9b7 100644
--- a/lib/tevent/tevent_standard.c
+++ b/lib/tevent/tevent_standard.c
@@ -461,7 +461,6 @@ static int std_event_loop_select(struct std_event_context *std_ev, struct timeva
std_ev->exit_code = EBADF;
return -1;
}
-
if (fde->flags & TEVENT_FD_READ) {
FD_SET(fde->fd, &r_fds);
}
@@ -510,7 +509,7 @@ static int std_event_loop_select(struct std_event_context *std_ev, struct timeva
if (FD_ISSET(fde->fd, &r_fds)) flags |= TEVENT_FD_READ;
if (FD_ISSET(fde->fd, &w_fds)) flags |= TEVENT_FD_WRITE;
- if (flags) {
+ if (flags & fde->flags) {
fde->handler(std_ev->ev, fde, flags, fde->private_data);
break;
}
@@ -567,7 +566,7 @@ static const struct tevent_ops std_event_ops = {
};
-bool tevent_standard_init(void)
+_PRIVATE_ bool tevent_standard_init(void)
{
return tevent_register_backend("standard", &std_event_ops);
}
diff --git a/lib/tevent/tevent_timed.c b/lib/tevent/tevent_timed.c
index cc51bf60d5..f7c39697d9 100644
--- a/lib/tevent/tevent_timed.c
+++ b/lib/tevent/tevent_timed.c
@@ -197,7 +197,7 @@ struct tevent_timer *tevent_common_add_timer(struct tevent_context *ev, TALLOC_C
/*
do a single event loop using the events defined in ev
- return the delay untill the next timed event,
+ return the delay until the next timed event,
or zero if a timed event was triggered
*/
struct timeval tevent_common_loop_timer_delay(struct tevent_context *ev)
@@ -208,7 +208,7 @@ struct timeval tevent_common_loop_timer_delay(struct tevent_context *ev)
if (!te) {
/* have a default tick time of 30 seconds. This guarantees
that code that uses its own timeout checking will be
- able to proceeed eventually */
+ able to proceed eventually */
return tevent_timeval_set(30, 0);
}
diff --git a/lib/tevent/tevent_util.h b/lib/tevent/tevent_util.h
index 829cbc2f6e..46a4506dac 100644
--- a/lib/tevent/tevent_util.h
+++ b/lib/tevent/tevent_util.h
@@ -1,7 +1,7 @@
/*
Unix SMB/CIFS implementation.
- Copyright (C) Andrew Tridgell 1998-2005
+ Copyright (C) Andrew Tridgell 1998-2010
Copyright (C) Jelmer Vernooij 2005
This program is free software; you can redistribute it and/or modify
@@ -24,55 +24,94 @@
#ifndef _DLINKLIST_H
#define _DLINKLIST_H
+/*
+ February 2010 - changed list format to have a prev pointer from the
+ list head. This makes DLIST_ADD_END() O(1) even though we only have
+ one list pointer.
+
+ The scheme is as follows:
+
+ 1) with no entries in the list:
+ list_head == NULL
+
+ 2) with 1 entry in the list:
+ list_head->next == NULL
+ list_head->prev == list_head
+
+ 3) with 2 entries in the list:
+ list_head->next == element2
+ list_head->prev == element2
+ element2->prev == list_head
+ element2->next == NULL
+
+ 4) with N entries in the list:
+ list_head->next == element2
+ list_head->prev == elementN
+ elementN->prev == element{N-1}
+ elementN->next == NULL
+
+ This allows us to find the tail of the list by using
+ list_head->prev, which means we can add to the end of the list in
+ O(1) time
-/* hook into the front of the list */
+
+ Note that the 'type' arguments below are no longer needed, but
+ are kept for now to prevent an incompatible argument change
+ */
+
+
+/*
+ add an element at the front of a list
+*/
#define DLIST_ADD(list, p) \
do { \
if (!(list)) { \
- (list) = (p); \
- (p)->next = (p)->prev = NULL; \
+ (p)->prev = (list) = (p); \
+ (p)->next = NULL; \
} else { \
+ (p)->prev = (list)->prev; \
(list)->prev = (p); \
(p)->next = (list); \
- (p)->prev = NULL; \
(list) = (p); \
- }\
+ } \
} while (0)
-/* remove an element from a list - element doesn't have to be in list. */
+/*
+ remove an element from a list
+ Note that the element doesn't have to be in the list. If it
+ isn't then this is a no-op
+*/
#define DLIST_REMOVE(list, p) \
do { \
if ((p) == (list)) { \
+ if ((p)->next) (p)->next->prev = (p)->prev; \
(list) = (p)->next; \
- if (list) (list)->prev = NULL; \
+ } else if ((list) && (p) == (list)->prev) { \
+ (p)->prev->next = NULL; \
+ (list)->prev = (p)->prev; \
} else { \
if ((p)->prev) (p)->prev->next = (p)->next; \
if ((p)->next) (p)->next->prev = (p)->prev; \
} \
- if ((p) != (list)) (p)->next = (p)->prev = NULL; \
+ if ((p) != (list)) (p)->next = (p)->prev = NULL; \
} while (0)
-/* promote an element to the top of the list */
-#define DLIST_PROMOTE(list, p) \
+/*
+ find the head of the list given any element in it.
+ Note that this costs O(N), so you should avoid this macro
+ if at all possible!
+*/
+#define DLIST_HEAD(p, result_head) \
do { \
- DLIST_REMOVE(list, p); \
- DLIST_ADD(list, p); \
-} while (0)
+ (result_head) = (p); \
+ while (DLIST_PREV(result_head)) (result_head) = (result_head)->prev; \
+} while(0)
-/* hook into the end of the list - needs a tmp pointer */
-#define DLIST_ADD_END(list, p, type) \
-do { \
- if (!(list)) { \
- (list) = (p); \
- (p)->next = (p)->prev = NULL; \
- } else { \
- type tmp; \
- for (tmp = (list); tmp->next; tmp = tmp->next) ; \
- tmp->next = (p); \
- (p)->next = NULL; \
- (p)->prev = tmp; \
- } \
-} while (0)
+/* return the last element in the list */
+#define DLIST_TAIL(list) ((list)?(list)->prev:NULL)
+
+/* return the previous element in the list. */
+#define DLIST_PREV(p) (((p)->prev && (p)->prev->next != NULL)?(p)->prev:NULL)
/* insert 'p' after the given element 'el' in a list. If el is NULL then
this is the same as a DLIST_ADD() */
@@ -81,34 +120,62 @@ do { \
if (!(list) || !(el)) { \
DLIST_ADD(list, p); \
} else { \
- p->prev = el; \
- p->next = el->next; \
- el->next = p; \
- if (p->next) p->next->prev = p; \
+ (p)->prev = (el); \
+ (p)->next = (el)->next; \
+ (el)->next = (p); \
+ if ((p)->next) (p)->next->prev = (p); \
+ if ((list)->prev == (el)) (list)->prev = (p); \
}\
} while (0)
-/* demote an element to the end of the list, needs a tmp pointer */
-#define DLIST_DEMOTE(list, p, tmp) \
+
+/*
+ add to the end of a list.
+ Note that 'type' is ignored
+*/
+#define DLIST_ADD_END(list, p, type) \
+do { \
+ if (!(list)) { \
+ DLIST_ADD(list, p); \
+ } else { \
+ DLIST_ADD_AFTER(list, p, (list)->prev); \
+ } \
+} while (0)
+
+/* promote an element to the from of a list */
+#define DLIST_PROMOTE(list, p) \
+do { \
+ DLIST_REMOVE(list, p); \
+ DLIST_ADD(list, p); \
+} while (0)
+
+/*
+ demote an element to the end of a list.
+ Note that 'type' is ignored
+*/
+#define DLIST_DEMOTE(list, p, type) \
do { \
- DLIST_REMOVE(list, p); \
- DLIST_ADD_END(list, p, tmp); \
+ DLIST_REMOVE(list, p); \
+ DLIST_ADD_END(list, p, NULL); \
} while (0)
-/* concatenate two lists - putting all elements of the 2nd list at the
- end of the first list */
-#define DLIST_CONCATENATE(list1, list2, type) \
+/*
+ concatenate two lists - putting all elements of the 2nd list at the
+ end of the first list.
+ Note that 'type' is ignored
+*/
+#define DLIST_CONCATENATE(list1, list2, type) \
do { \
- if (!(list1)) { \
- (list1) = (list2); \
- } else { \
- type tmp; \
- for (tmp = (list1); tmp->next; tmp = tmp->next) ; \
- tmp->next = (list2); \
- if (list2) { \
- (list2)->prev = tmp; \
- } \
+ if (!(list1)) { \
+ (list1) = (list2); \
+ } else { \
+ (list1)->prev->next = (list2); \
+ if (list2) { \
+ void *_tmplist = (void *)(list1)->prev; \
+ (list1)->prev = (list2)->prev; \
+ (list2)->prev = _tmplist; \
} \
+ } \
} while (0)
#endif /* _DLINKLIST_H */
diff --git a/lib/tevent/wscript b/lib/tevent/wscript
new file mode 100644
index 0000000000..de245d92e7
--- /dev/null
+++ b/lib/tevent/wscript
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+
+APPNAME = 'tevent'
+VERSION = '0.9.11'
+
+blddir = 'bin'
+
+import sys, os
+
+# find the buildtools directory
+srcdir = '.'
+while not os.path.exists(srcdir+'/buildtools') and len(srcdir.split('/')) < 5:
+ srcdir = '../' + srcdir
+sys.path.insert(0, srcdir + '/buildtools/wafsamba')
+
+import wafsamba, samba_dist, Options, Logs
+
+samba_dist.DIST_DIRS('lib/tevent:. lib/replace:lib/replace lib/talloc:lib/talloc buildtools:buildtools')
+
+def set_options(opt):
+ opt.BUILTIN_DEFAULT('replace')
+ opt.PRIVATE_EXTENSION_DEFAULT('tevent', noextension='tevent')
+ opt.RECURSE('lib/replace')
+ opt.RECURSE('lib/talloc')
+ if opt.IN_LAUNCH_DIR():
+ opt.add_option('--disable-python',
+ help=("disable the pytevent module"),
+ action="store_true", dest='disable_python', default=False)
+
+
+def configure(conf):
+ conf.RECURSE('lib/replace')
+ conf.RECURSE('lib/talloc')
+
+ conf.env.standalone_tevent = conf.IN_LAUNCH_DIR()
+
+ if not conf.env.standalone_tevent:
+ if conf.CHECK_BUNDLED_SYSTEM('tevent', minversion=VERSION,
+ onlyif='talloc', implied_deps='replace talloc'):
+ conf.define('USING_SYSTEM_TEVENT', 1)
+
+ if conf.CHECK_FUNCS('epoll_create', headers='sys/epoll.h'):
+ conf.DEFINE('HAVE_EPOLL', 1)
+
+ conf.env.disable_python = getattr(Options.options, 'disable_python', False)
+
+ if not conf.env.disable_python:
+ # also disable if we don't have the python libs installed
+ conf.check_tool('python')
+ conf.check_python_version((2,4,2))
+ conf.SAMBA_CHECK_PYTHON_HEADERS(mandatory=False)
+ if not conf.env.HAVE_PYTHON_H:
+ Logs.warn('Disabling pytevent as python devel libs not found')
+ conf.env.disable_python = True
+
+ conf.SAMBA_CONFIG_H()
+
+def build(bld):
+ bld.RECURSE('lib/replace')
+ bld.RECURSE('lib/talloc')
+
+ SRC = '''tevent.c tevent_debug.c tevent_fd.c tevent_immediate.c
+ tevent_queue.c tevent_req.c tevent_select.c
+ tevent_poll.c
+ tevent_signal.c tevent_standard.c tevent_timed.c tevent_util.c tevent_wakeup.c'''
+
+ if bld.CONFIG_SET('HAVE_EPOLL'):
+ SRC += ' tevent_epoll.c'
+
+ if bld.env.standalone_tevent:
+ bld.env.PKGCONFIGDIR = '${LIBDIR}/pkgconfig'
+ bld.PKG_CONFIG_FILES('tevent.pc', vnum=VERSION)
+ private_library = False
+ else:
+ private_library = True
+
+ if not bld.CONFIG_SET('USING_SYSTEM_TEVENT'):
+ bld.SAMBA_LIBRARY('tevent',
+ SRC,
+ deps='replace talloc',
+ enabled= not bld.CONFIG_SET('USING_SYSTEM_TEVENT'),
+ includes='.',
+ abi_directory='ABI',
+ abi_match='tevent_* _tevent_*',
+ vnum=VERSION,
+ public_headers='tevent.h',
+ public_headers_install=not private_library,
+ private_library=private_library)
+
+ bld.SAMBA_PYTHON('pytevent',
+ 'pytevent.c',
+ deps='tevent',
+ enabled=True,
+ realname='_tevent.so')
+
+
+def test(ctx):
+ '''test tevent'''
+ print("The tevent testsuite is part of smbtorture in samba4")
+
+
+def dist():
+ '''makes a tarball for distribution'''
+ samba_dist.dist()
+
+def reconfigure(ctx):
+ '''reconfigure if config scripts have changed'''
+ import samba_utils
+ samba_utils.reconfigure(ctx)
diff --git a/lib/torture/config.mk b/lib/torture/config.mk
deleted file mode 100644
index b4ad9ae17f..0000000000
--- a/lib/torture/config.mk
+++ /dev/null
@@ -1,17 +0,0 @@
-# TORTURE subsystem
-[LIBRARY::torture]
-PUBLIC_DEPENDENCIES = \
- LIBSAMBA-HOSTCONFIG \
- LIBSAMBA-UTIL \
- LIBSAMBA-ERRORS \
- LIBTALLOC \
- LIBTEVENT
-CFLAGS = -I$(libtorturesrcdir) -I$(libtorturesrcdir)/../
-
-torture_VERSION = 0.0.1
-torture_SOVERSION = 0
-
-PC_FILES += $(libtorturesrcdir)/torture.pc
-torture_OBJ_FILES = $(addprefix $(libtorturesrcdir)/, torture.o subunit.o)
-
-PUBLIC_HEADERS += $(libtorturesrcdir)/torture.h
diff --git a/lib/torture/subunit.c b/lib/torture/subunit.c
index 832f11fafc..a190975a08 100644
--- a/lib/torture/subunit.c
+++ b/lib/torture/subunit.c
@@ -19,72 +19,119 @@
#include "includes.h"
#include "lib/torture/torture.h"
+#include <subunit/child.h>
-static void subunit_suite_start(struct torture_context *ctx,
+static void torture_subunit_suite_start(struct torture_context *ctx,
struct torture_suite *suite)
{
}
-static void subunit_print_testname(struct torture_context *ctx,
+static char *torture_subunit_test_name(struct torture_context *ctx,
struct torture_tcase *tcase,
struct torture_test *test)
{
if (!strcmp(tcase->name, test->name)) {
- printf("%s", test->name);
+ return talloc_strdup(ctx, test->name);
} else {
- printf("%s.%s", tcase->name, test->name);
+ return talloc_asprintf(ctx, "%s.%s", tcase->name, test->name);
}
}
-static void subunit_test_start(struct torture_context *ctx,
+static void torture_subunit_report_time(struct torture_context *tctx)
+{
+ struct timespec tp;
+ struct tm *tmp;
+ char timestr[200];
+ if (clock_gettime(CLOCK_REALTIME, &tp) != 0) {
+ perror("clock_gettime");
+ return;
+ }
+
+ tmp = localtime(&tp.tv_sec);
+ if (!tmp) {
+ perror("localtime");
+ return;
+ }
+
+ if (strftime(timestr, sizeof(timestr), "%Y-%m-%d %H:%M:%S", tmp) <= 0) {
+ perror("strftime");
+ return;
+ }
+
+ printf("time: %s.%06ld\n", timestr, tp.tv_nsec / 1000);
+}
+
+static void torture_subunit_test_start(struct torture_context *context,
struct torture_tcase *tcase,
struct torture_test *test)
{
- printf("test: ");
- subunit_print_testname(ctx, tcase, test);
- printf("\n");
+ char *fullname = torture_subunit_test_name(context, context->active_tcase, context->active_test);
+ subunit_test_start(fullname);
+ torture_subunit_report_time(context);
+ talloc_free(fullname);
}
-static void subunit_test_result(struct torture_context *context,
+static void torture_subunit_test_result(struct torture_context *context,
enum torture_result res, const char *reason)
{
+ char *fullname = torture_subunit_test_name(context, context->active_tcase, context->active_test);
+ torture_subunit_report_time(context);
switch (res) {
case TORTURE_OK:
- printf("success: ");
+ subunit_test_pass(fullname);
break;
case TORTURE_FAIL:
- printf("failure: ");
+ subunit_test_fail(fullname, reason);
break;
case TORTURE_ERROR:
- printf("error: ");
+ subunit_test_error(fullname, reason);
break;
case TORTURE_SKIP:
- printf("skip: ");
+ subunit_test_skip(fullname, reason);
break;
}
- subunit_print_testname(context, context->active_tcase, context->active_test);
-
- if (reason)
- printf(" [\n%s\n]", reason);
- printf("\n");
+ talloc_free(fullname);
}
-static void subunit_comment(struct torture_context *test,
+static void torture_subunit_comment(struct torture_context *test,
const char *comment)
{
fprintf(stderr, "%s", comment);
}
-static void subunit_warning(struct torture_context *test,
+static void torture_subunit_warning(struct torture_context *test,
const char *comment)
{
fprintf(stderr, "WARNING!: %s\n", comment);
}
+static void torture_subunit_progress(struct torture_context *tctx, int offset, enum torture_progress_whence whence)
+{
+ switch (whence) {
+ case TORTURE_PROGRESS_SET:
+ printf("progress: %d\n", offset);
+ break;
+ case TORTURE_PROGRESS_CUR:
+ printf("progress: %+-d\n", offset);
+ break;
+ case TORTURE_PROGRESS_POP:
+ printf("progress: pop\n");
+ break;
+ case TORTURE_PROGRESS_PUSH:
+ printf("progress: push\n");
+ break;
+ default:
+ fprintf(stderr, "Invalid call to progress()\n");
+ break;
+ }
+}
+
const struct torture_ui_ops torture_subunit_ui_ops = {
- .comment = subunit_comment,
- .warning = subunit_warning,
- .test_start = subunit_test_start,
- .test_result = subunit_test_result,
- .suite_start = subunit_suite_start
+ .comment = torture_subunit_comment,
+ .warning = torture_subunit_warning,
+ .test_start = torture_subunit_test_start,
+ .test_result = torture_subunit_test_result,
+ .suite_start = torture_subunit_suite_start,
+ .progress = torture_subunit_progress,
+ .report_time = torture_subunit_report_time,
};
diff --git a/lib/torture/torture.c b/lib/torture/torture.c
index 392cb0ad4c..a0b35bfe7c 100644
--- a/lib/torture/torture.c
+++ b/lib/torture/torture.c
@@ -1,19 +1,19 @@
-/*
+/*
Unix SMB/CIFS implementation.
SMB torture UI functions
Copyright (C) Jelmer Vernooij 2006-2008
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -23,6 +23,8 @@
#include "../lib/util/dlinklist.h"
#include "param/param.h"
#include "system/filesys.h"
+#include "system/dir.h"
+
struct torture_results *torture_results_init(TALLOC_CTX *mem_ctx, const struct torture_ui_ops *ui_ops)
{
@@ -71,18 +73,17 @@ struct torture_context *torture_context_child(struct torture_context *parent)
subtorture->results = talloc_reference(subtorture, parent->results);
return subtorture;
-}
+}
/**
- create a temporary directory.
+ create a temporary directory under the output dir
*/
-_PUBLIC_ NTSTATUS torture_temp_dir(struct torture_context *tctx,
- const char *prefix,
- char **tempdir)
+_PUBLIC_ NTSTATUS torture_temp_dir(struct torture_context *tctx,
+ const char *prefix, char **tempdir)
{
SMB_ASSERT(tctx->outputdir != NULL);
- *tempdir = talloc_asprintf(tctx, "%s/%s.XXXXXX", tctx->outputdir,
+ *tempdir = talloc_asprintf(tctx, "%s/%s.XXXXXX", tctx->outputdir,
prefix);
NT_STATUS_HAVE_NO_MEMORY(*tempdir);
@@ -93,6 +94,73 @@ _PUBLIC_ NTSTATUS torture_temp_dir(struct torture_context *tctx,
return NT_STATUS_OK;
}
+static int local_deltree(const char *path)
+{
+ int ret = 0;
+ struct dirent *dirent;
+ DIR *dir = opendir(path);
+ if (!dir) {
+ char *error = talloc_asprintf(NULL, "Could not open directory %s", path);
+ perror(error);
+ talloc_free(error);
+ return -1;
+ }
+ while ((dirent = readdir(dir))) {
+ char *name;
+ if ((strcmp(dirent->d_name, ".") == 0) || (strcmp(dirent->d_name, "..") == 0)) {
+ continue;
+ }
+ name = talloc_asprintf(NULL, "%s/%s", path,
+ dirent->d_name);
+ if (name == NULL) {
+ closedir(dir);
+ return -1;
+ }
+ DEBUG(0, ("About to remove %s\n", name));
+ ret = remove(name);
+ if (ret == 0) {
+ talloc_free(name);
+ continue;
+ }
+
+ if (errno == ENOTEMPTY) {
+ ret = local_deltree(name);
+ if (ret == 0) {
+ ret = remove(name);
+ }
+ }
+ talloc_free(name);
+ if (ret != 0) {
+ char *error = talloc_asprintf(NULL, "Could not remove %s", path);
+ perror(error);
+ talloc_free(error);
+ break;
+ }
+ }
+ closedir(dir);
+ rmdir(path);
+ return ret;
+}
+
+_PUBLIC_ NTSTATUS torture_deltree_outputdir(struct torture_context *tctx)
+{
+ if (tctx->outputdir == NULL) {
+ return NT_STATUS_OK;
+ }
+ if ((strcmp(tctx->outputdir, "/") == 0)
+ || (strcmp(tctx->outputdir, "") == 0)) {
+ return NT_STATUS_INVALID_PARAMETER;
+ }
+
+ if (local_deltree(tctx->outputdir) == -1) {
+ if (errno != 0) {
+ return map_nt_error_from_unix(errno);
+ }
+ return NT_STATUS_UNSUCCESSFUL;
+ }
+ return NT_STATUS_OK;
+}
+
/**
* Comment on the status/progress of a test
*/
@@ -107,9 +175,9 @@ void torture_comment(struct torture_context *context, const char *comment, ...)
va_start(ap, comment);
tmp = talloc_vasprintf(context, comment, ap);
va_end(ap);
-
+
context->results->ui_ops->comment(context, tmp);
-
+
talloc_free(tmp);
}
@@ -245,38 +313,56 @@ struct torture_tcase *torture_suite_add_tcase(struct torture_suite *suite,
return tcase;
}
+int torture_suite_children_count(const struct torture_suite *suite)
+{
+ int ret = 0;
+ struct torture_tcase *tcase;
+ struct torture_test *test;
+ struct torture_suite *tsuite;
+ for (tcase = suite->testcases; tcase; tcase = tcase->next) {
+ for (test = tcase->tests; test; test = test->next) {
+ ret++;
+ }
+ }
+ for (tsuite = suite->children; tsuite; tsuite = tsuite->next) {
+ ret ++;
+ }
+ return ret;
+}
+
/**
* Run a torture test suite.
*/
bool torture_run_suite(struct torture_context *context,
struct torture_suite *suite)
{
+ return torture_run_suite_restricted(context, suite, NULL);
+}
+
+bool torture_run_suite_restricted(struct torture_context *context,
+ struct torture_suite *suite, const char **restricted)
+{
bool ret = true;
struct torture_tcase *tcase;
struct torture_suite *tsuite;
- char *old_testname;
if (context->results->ui_ops->suite_start)
context->results->ui_ops->suite_start(context, suite);
- old_testname = context->active_testname;
- if (old_testname != NULL)
- context->active_testname = talloc_asprintf(context, "%s-%s",
- old_testname, suite->name);
- else
- context->active_testname = talloc_strdup(context, suite->name);
+ /* FIXME: Adjust torture_suite_children_count if restricted != NULL */
+ context->results->ui_ops->progress(context,
+ torture_suite_children_count(suite), TORTURE_PROGRESS_SET);
for (tcase = suite->testcases; tcase; tcase = tcase->next) {
- ret &= torture_run_tcase(context, tcase);
+ ret &= torture_run_tcase_restricted(context, tcase, restricted);
}
for (tsuite = suite->children; tsuite; tsuite = tsuite->next) {
- ret &= torture_run_suite(context, tsuite);
+ context->results->ui_ops->progress(context, 0, TORTURE_PROGRESS_PUSH);
+ ret &= torture_run_suite_restricted(context, tsuite, restricted);
+ context->results->ui_ops->progress(context, 0, TORTURE_PROGRESS_POP);
}
- talloc_free(context->active_testname);
- context->active_testname = old_testname;
-
if (context->results->ui_ops->suite_finish)
context->results->ui_ops->suite_finish(context, suite);
@@ -302,19 +388,36 @@ void torture_ui_test_result(struct torture_context *context,
context->results->returncode = false;
}
+static bool test_needs_running(const char *name, const char **restricted)
+{
+ int i;
+ if (restricted == NULL)
+ return true;
+ for (i = 0; restricted[i]; i++) {
+ if (!strcmp(name, restricted[i]))
+ return true;
+ }
+ return false;
+}
+
static bool internal_torture_run_test(struct torture_context *context,
struct torture_tcase *tcase,
struct torture_test *test,
- bool already_setup)
+ bool already_setup,
+ const char **restricted)
{
bool success;
- char *old_testname = NULL;
+ char *subunit_testname = NULL;
if (tcase == NULL || strcmp(test->name, tcase->name) != 0) {
- old_testname = context->active_testname;
- context->active_testname = talloc_asprintf(context, "%s-%s", old_testname, test->name);
+ subunit_testname = talloc_asprintf(context, "%s.%s", tcase->name, test->name);
+ } else {
+ subunit_testname = talloc_strdup(context, test->name);
}
+ if (!test_needs_running(subunit_testname, restricted))
+ return true;
+
context->active_tcase = tcase;
context->active_test = test;
@@ -357,84 +460,111 @@ static bool internal_torture_run_test(struct torture_context *context,
talloc_free(context->last_reason);
- if (tcase == NULL || strcmp(test->name, tcase->name) != 0) {
- talloc_free(context->active_testname);
- context->active_testname = old_testname;
- }
context->active_test = NULL;
context->active_tcase = NULL;
return success;
}
-bool torture_run_tcase(struct torture_context *context,
+bool torture_run_tcase(struct torture_context *context,
struct torture_tcase *tcase)
{
+ return torture_run_tcase_restricted(context, tcase, NULL);
+}
+
+bool torture_run_tcase_restricted(struct torture_context *context,
+ struct torture_tcase *tcase, const char **restricted)
+{
bool ret = true;
- char *old_testname;
struct torture_test *test;
+ bool setup_succeeded = true;
+ const char * setup_reason = "Setup failed";
context->active_tcase = tcase;
if (context->results->ui_ops->tcase_start)
context->results->ui_ops->tcase_start(context, tcase);
- if (tcase->fixture_persistent && tcase->setup
- && !tcase->setup(context, &tcase->data)) {
- /* FIXME: Use torture ui ops for reporting this error */
- fprintf(stderr, "Setup failed: ");
- if (context->last_reason != NULL)
- fprintf(stderr, "%s", context->last_reason);
- fprintf(stderr, "\n");
- ret = false;
- goto done;
+ if (tcase->fixture_persistent && tcase->setup) {
+ setup_succeeded = tcase->setup(context, &tcase->data);
+ }
+
+ if (!setup_succeeded) {
+ /* Uh-oh. The setup failed, so we can't run any of the tests
+ * in this testcase. The subunit format doesn't specify what
+ * to do here, so we keep the failure reason, and manually
+ * use it to fail every test.
+ */
+ if (context->last_reason != NULL) {
+ setup_reason = talloc_asprintf(context,
+ "Setup failed: %s", context->last_reason);
+ }
}
- old_testname = context->active_testname;
- context->active_testname = talloc_asprintf(context, "%s-%s",
- old_testname, tcase->name);
for (test = tcase->tests; test; test = test->next) {
- ret &= internal_torture_run_test(context, tcase, test,
- tcase->fixture_persistent);
+ if (setup_succeeded) {
+ ret &= internal_torture_run_test(context, tcase, test,
+ tcase->fixture_persistent, restricted);
+ } else {
+ context->active_tcase = tcase;
+ context->active_test = test;
+ torture_ui_test_start(context, tcase, test);
+ torture_ui_test_result(context, TORTURE_FAIL, setup_reason);
+ }
}
- talloc_free(context->active_testname);
- context->active_testname = old_testname;
- if (tcase->fixture_persistent && tcase->teardown &&
- !tcase->teardown(context, tcase->data))
+ if (setup_succeeded && tcase->fixture_persistent && tcase->teardown &&
+ !tcase->teardown(context, tcase->data)) {
ret = false;
+ }
-done:
context->active_tcase = NULL;
+ context->active_test = NULL;
if (context->results->ui_ops->tcase_finish)
context->results->ui_ops->tcase_finish(context, tcase);
- return ret;
+ return (!setup_succeeded) ? false : ret;
}
bool torture_run_test(struct torture_context *context,
struct torture_tcase *tcase,
struct torture_test *test)
{
- return internal_torture_run_test(context, tcase, test, false);
+ return internal_torture_run_test(context, tcase, test, false, NULL);
+}
+
+bool torture_run_test_restricted(struct torture_context *context,
+ struct torture_tcase *tcase,
+ struct torture_test *test,
+ const char **restricted)
+{
+ return internal_torture_run_test(context, tcase, test, false, restricted);
}
int torture_setting_int(struct torture_context *test, const char *name,
int default_value)
{
- return lp_parm_int(test->lp_ctx, NULL, "torture", name, default_value);
+ return lpcfg_parm_int(test->lp_ctx, NULL, "torture", name, default_value);
+}
+
+unsigned long torture_setting_ulong(struct torture_context *test,
+ const char *name,
+ unsigned long default_value)
+{
+ return lpcfg_parm_ulong(test->lp_ctx, NULL, "torture", name,
+ default_value);
}
double torture_setting_double(struct torture_context *test, const char *name,
double default_value)
{
- return lp_parm_double(test->lp_ctx, NULL, "torture", name, default_value);
+ return lpcfg_parm_double(test->lp_ctx, NULL, "torture", name, default_value);
}
bool torture_setting_bool(struct torture_context *test, const char *name,
bool default_value)
{
- return lp_parm_bool(test->lp_ctx, NULL, "torture", name, default_value);
+ return lpcfg_parm_bool(test->lp_ctx, NULL, "torture", name, default_value);
}
const char *torture_setting_string(struct torture_context *test,
@@ -446,7 +576,7 @@ const char *torture_setting_string(struct torture_context *test,
SMB_ASSERT(test != NULL);
SMB_ASSERT(test->lp_ctx != NULL);
- ret = lp_parm_string(test->lp_ctx, NULL, "torture", name);
+ ret = lpcfg_parm_string(test->lp_ctx, NULL, "torture", name);
if (ret == NULL)
return default_value;
@@ -619,3 +749,9 @@ struct torture_test *torture_tcase_add_simple_test(struct torture_tcase *tcase,
return test;
}
+
+void torture_ui_report_time(struct torture_context *context)
+{
+ if (context->results->ui_ops->report_time)
+ context->results->ui_ops->report_time(context);
+}
diff --git a/lib/torture/torture.h b/lib/torture/torture.h
index 7f387cc1f2..00de5e8efc 100644
--- a/lib/torture/torture.h
+++ b/lib/torture/torture.h
@@ -34,6 +34,13 @@ enum torture_result {
TORTURE_SKIP=3
};
+enum torture_progress_whence {
+ TORTURE_PROGRESS_SET,
+ TORTURE_PROGRESS_CUR,
+ TORTURE_PROGRESS_POP,
+ TORTURE_PROGRESS_PUSH,
+};
+
/*
* These callbacks should be implemented by any backend that wishes
* to listen to reports from the torture tests.
@@ -52,6 +59,8 @@ struct torture_ui_ops
struct torture_test *);
void (*test_result) (struct torture_context *,
enum torture_result, const char *reason);
+ void (*progress) (struct torture_context *, int offset, enum torture_progress_whence whence);
+ void (*report_time) (struct torture_context *);
};
void torture_ui_test_start(struct torture_context *context,
@@ -62,6 +71,8 @@ void torture_ui_test_result(struct torture_context *context,
enum torture_result result,
const char *comment);
+void torture_ui_report_time(struct torture_context *context);
+
/*
* Holds information about a specific run of the testsuite.
* The data in this structure should be considered private to
@@ -76,7 +87,6 @@ struct torture_context
{
struct torture_results *results;
- char *active_testname;
struct torture_test *active_test;
struct torture_tcase *active_tcase;
@@ -85,7 +95,7 @@ struct torture_context
/** Directory used for temporary test data */
const char *outputdir;
-
+
/** Event context */
struct tevent_context *ev;
@@ -210,15 +220,28 @@ bool torture_suite_add_suite(struct torture_suite *suite,
bool torture_run_suite(struct torture_context *context,
struct torture_suite *suite);
+/* Run the specified testsuite recursively, but only the specified
+ * tests */
+bool torture_run_suite_restricted(struct torture_context *context,
+ struct torture_suite *suite, const char **restricted);
+
/* Run the specified testcase */
bool torture_run_tcase(struct torture_context *context,
struct torture_tcase *tcase);
+bool torture_run_tcase_restricted(struct torture_context *context,
+ struct torture_tcase *tcase, const char **restricted);
+
/* Run the specified test */
bool torture_run_test(struct torture_context *context,
struct torture_tcase *tcase,
struct torture_test *test);
+bool torture_run_test_restricted(struct torture_context *context,
+ struct torture_tcase *tcase,
+ struct torture_test *test,
+ const char **restricted);
+
void torture_comment(struct torture_context *test, const char *comment, ...) PRINTF_ATTRIBUTE(2,3);
void torture_warning(struct torture_context *test, const char *comment, ...) PRINTF_ATTRIBUTE(2,3);
void torture_result(struct torture_context *test,
@@ -230,6 +253,13 @@ void torture_result(struct torture_context *test,
return false; \
}
+#define torture_assert_goto(torture_ctx,expr,ret,label,cmt) \
+ if (!(expr)) { \
+ torture_result(torture_ctx, TORTURE_FAIL, __location__": Expression `%s' failed: %s", __STRING(expr), cmt); \
+ ret = false; \
+ goto label; \
+ }
+
#define torture_assert_werr_equal(torture_ctx, got, expected, cmt) \
do { WERROR __got = got, __expected = expected; \
if (!W_ERROR_EQUAL(__got, __expected)) { \
@@ -281,6 +311,16 @@ void torture_result(struct torture_context *test,
} \
} while(0)
+#define torture_assert_strn_equal(torture_ctx,got,expected,len,cmt)\
+ do { const char *__got = (got), *__expected = (expected); \
+ if (strncmp(__got, __expected, len) != 0) { \
+ torture_result(torture_ctx, TORTURE_FAIL, \
+ __location__": "#got" %s of len %d did not match "#expected" %s: %s", \
+ __got, (int)len, __expected, cmt); \
+ return false; \
+ } \
+ } while(0)
+
#define torture_assert_str_equal_goto(torture_ctx,got,expected,ret,label,cmt)\
do { const char *__got = (got), *__expected = (expected); \
if (strcmp_safe(__got, __expected) != 0) { \
@@ -357,8 +397,8 @@ void torture_result(struct torture_context *test,
do { int __got = (got), __expected = (expected); \
if (__got != __expected) { \
torture_result(torture_ctx, TORTURE_FAIL, \
- __location__": "#got" was %d, expected %d: %s", \
- __got, __expected, cmt); \
+ __location__": "#got" was %d (0x%X), expected %d (0x%X): %s", \
+ __got, __got, __expected, __expected, cmt); \
return false; \
} \
} while(0)
@@ -367,8 +407,8 @@ void torture_result(struct torture_context *test,
do { int __got = (got), __expected = (expected); \
if (__got != __expected) { \
torture_result(torture_ctx, TORTURE_FAIL, \
- __location__": "#got" was %d, expected %d: %s", \
- __got, __expected, cmt); \
+ __location__": "#got" was %d (0x%X), expected %d (0x%X): %s", \
+ __got, __got, __expected, __expected, cmt); \
ret = false; \
goto label; \
} \
@@ -378,12 +418,27 @@ void torture_result(struct torture_context *test,
do { uint64_t __got = (got), __expected = (expected); \
if (__got != __expected) { \
torture_result(torture_ctx, TORTURE_FAIL, \
- __location__": "#got" was %llu, expected %llu: %s", \
- (unsigned long long)__got, (unsigned long long)__expected, cmt); \
+ __location__": "#got" was %llu (0x%llX), expected %llu (0x%llX): %s", \
+ (unsigned long long)__got, (unsigned long long)__got, \
+ (unsigned long long)__expected, (unsigned long long)__expected, \
+ cmt); \
return false; \
} \
} while(0)
+#define torture_assert_u64_equal_goto(torture_ctx,got,expected,ret,label,cmt)\
+ do { uint64_t __got = (got), __expected = (expected); \
+ if (__got != __expected) { \
+ torture_result(torture_ctx, TORTURE_FAIL, \
+ __location__": "#got" was %llu (0x%llX), expected %llu (0x%llX): %s", \
+ (unsigned long long)__got, (unsigned long long)__got, \
+ (unsigned long long)__expected, (unsigned long long)__expected, \
+ cmt); \
+ ret = false; \
+ goto label; \
+ } \
+ } while(0)
+
#define torture_assert_errno_equal(torture_ctx,expected,cmt)\
do { int __expected = (expected); \
if (errno != __expected) { \
@@ -395,7 +450,13 @@ void torture_result(struct torture_context *test,
} \
} while(0)
-
+#define torture_assert_nttime_equal(torture_ctx,got,expected,cmt) \
+ do { NTTIME __got = got, __expected = expected; \
+ if (!nt_time_equal(&__got, &__expected)) { \
+ torture_result(torture_ctx, TORTURE_FAIL, __location__": "#got" was %s, expected %s: %s", nt_time_string(tctx, __got), nt_time_string(tctx, __expected), cmt); \
+ return false; \
+ }\
+ } while(0)
#define torture_skip(torture_ctx,cmt) do {\
torture_result(torture_ctx, TORTURE_SKIP, __location__": %s", cmt);\
@@ -449,9 +510,14 @@ bool torture_setting_bool(struct torture_context *test,
struct torture_suite *torture_find_suite(struct torture_suite *parent,
const char *name);
+unsigned long torture_setting_ulong(struct torture_context *test,
+ const char *name,
+ unsigned long default_value);
+
NTSTATUS torture_temp_dir(struct torture_context *tctx,
const char *prefix,
char **tempdir);
+NTSTATUS torture_deltree_outputdir(struct torture_context *tctx);
struct torture_test *torture_tcase_add_simple_test(struct torture_tcase *tcase,
const char *name,
@@ -461,6 +527,7 @@ struct torture_test *torture_tcase_add_simple_test(struct torture_tcase *tcase,
bool torture_suite_init_tcase(struct torture_suite *suite,
struct torture_tcase *tcase,
const char *name);
+int torture_suite_children_count(const struct torture_suite *suite);
struct torture_context *torture_context_init(struct tevent_context *event_ctx, struct torture_results *results);
diff --git a/lib/torture/torture.pc.in b/lib/torture/torture.pc.in
index 6582816cb5..459b35c582 100644
--- a/lib/torture/torture.pc.in
+++ b/lib/torture/torture.pc.in
@@ -8,5 +8,5 @@ Name: torture
Description: Samba torture (test) suite
Requires: talloc
Version: 0.0.1
-Libs: -L${libdir} -ltorture
+Libs: @LIB_RPATH@ -L${libdir} -ltorture
Cflags: -I${includedir} -DHAVE_IMMEDIATE_STRUCTURES=1
diff --git a/lib/torture/wscript_build b/lib/torture/wscript_build
new file mode 100644
index 0000000000..a68707a3a0
--- /dev/null
+++ b/lib/torture/wscript_build
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+bld.SAMBA_LIBRARY('torture',
+ source='torture.c subunit.c',
+ vnum='0.0.1',
+ pc_files='torture.pc',
+ public_deps='samba-hostconfig samba-util errors talloc tevent subunit',
+ public_headers='torture.h'
+ )
diff --git a/lib/tsocket/config.mk b/lib/tsocket/config.mk
deleted file mode 100644
index ca2978a426..0000000000
--- a/lib/tsocket/config.mk
+++ /dev/null
@@ -1,13 +0,0 @@
-[SUBSYSTEM::LIBTSOCKET]
-PRIVATE_DEPENDENCIES = LIBREPLACE_NETWORK
-PUBLIC_DEPENDENCIES = LIBTALLOC LIBTEVENT
-
-LIBTSOCKET_OBJ_FILES = $(addprefix ../lib/tsocket/, \
- tsocket.o \
- tsocket_helpers.o \
- tsocket_bsd.o)
-
-PUBLIC_HEADERS += $(addprefix ../lib/tsocket/, \
- tsocket.h\
- tsocket_internal.h)
-
diff --git a/lib/tsocket/tsocket.c b/lib/tsocket/tsocket.c
index b8dd6c8936..674858de0a 100644
--- a/lib/tsocket/tsocket.c
+++ b/lib/tsocket/tsocket.c
@@ -46,8 +46,7 @@ int tsocket_simple_int_recv(struct tevent_req *req, int *perrno)
*perrno = (int)error;
return -1;
default:
- *perrno = EIO;
- return -1;
+ break;
}
*perrno = EIO;
diff --git a/lib/tsocket/tsocket.h b/lib/tsocket/tsocket.h
index d983325c45..3aca536124 100644
--- a/lib/tsocket/tsocket.h
+++ b/lib/tsocket/tsocket.h
@@ -83,7 +83,7 @@ struct iovec;
*/
/**
- * @brief Get a string representaion of the endpoint.
+ * @brief Get a string representation of the endpoint.
*
* This function creates a string representation of the endpoint for debugging.
* The output will look as followed:
@@ -101,6 +101,7 @@ struct iovec;
*
* @return The address as a string representation, NULL on error.
*
+ * @see tsocket_address_is_inet()
* @see tsocket_address_inet_addr_string()
* @see tsocket_address_inet_port()
*/
@@ -486,6 +487,20 @@ int tstream_disconnect_recv(struct tevent_req *req,
* @{
*/
+/**
+ * @brief Find out if the tsocket_address represents an ipv4 or ipv6 endpoint.
+ *
+ * @param[in] addr The tsocket_address pointer
+ *
+ * @param[in] fam The family can be can be "ipv4", "ipv6" or "ip". With
+ * "ip" is autodetects "ipv4" or "ipv6" based on the
+ * addr.
+ *
+ * @return true if addr represents an address of the given family,
+ * otherwise false.
+ */
+bool tsocket_address_is_inet(const struct tsocket_address *addr, const char *fam);
+
#if DOXYGEN
/**
* @brief Create a tsocket_address for ipv4 and ipv6 endpoint addresses.
@@ -533,6 +548,8 @@ int _tsocket_address_inet_from_strings(TALLOC_CTX *mem_ctx,
*
* @return A newly allocated string of the address, NULL on error
* with errno set.
+ *
+ * @see tsocket_address_is_inet()
*/
char *tsocket_address_inet_addr_string(const struct tsocket_address *addr,
TALLOC_CTX *mem_ctx);
@@ -558,6 +575,16 @@ uint16_t tsocket_address_inet_port(const struct tsocket_address *addr);
int tsocket_address_inet_set_port(struct tsocket_address *addr,
uint16_t port);
+/**
+ * @brief Find out if the tsocket_address represents an unix domain endpoint.
+ *
+ * @param[in] addr The tsocket_address pointer
+ *
+ * @return true if addr represents an unix domain endpoint,
+ * otherwise false.
+ */
+bool tsocket_address_is_unix(const struct tsocket_address *addr);
+
#ifdef DOXYGEN
/**
* @brief Create a tsocket_address for a unix domain endpoint addresses.
@@ -569,6 +596,8 @@ int tsocket_address_inet_set_port(struct tsocket_address *addr,
* @param[in] _addr The tsocket_address pointer to store the information.
*
* @return 0 on success, -1 on error with errno set.
+ *
+ * @see tsocket_address_is_unix()
*/
int tsocket_address_unix_from_path(TALLOC_CTX *mem_ctx,
const char *path,
@@ -695,23 +724,28 @@ struct tevent_req *tstream_inet_tcp_connect_send(TALLOC_CTX *mem_ctx,
*
* @param[in] mem_ctx The talloc memory context to use.
*
- * @param[in] stream A tstream_context pointer to setup the tcp communication
+ * @param[out] stream A tstream_context pointer to setup the tcp communication
* on. This function will allocate the memory.
*
+ * @param[out] local The real 'inet' tsocket_address of the local endpoint.
+ * This parameter is optional and can be NULL.
+ *
* @return 0 on success, -1 on error with perrno set.
*/
int tstream_inet_tcp_connect_recv(struct tevent_req *req,
int *perrno,
TALLOC_CTX *mem_ctx,
- struct tstream_context **stream);
+ struct tstream_context **stream,
+ struct tsocket_address **local)
#else
int _tstream_inet_tcp_connect_recv(struct tevent_req *req,
int *perrno,
TALLOC_CTX *mem_ctx,
struct tstream_context **stream,
+ struct tsocket_address **local,
const char *location);
-#define tstream_inet_tcp_connect_recv(req, perrno, mem_ctx, stream) \
- _tstream_inet_tcp_connect_recv(req, perrno, mem_ctx, stream, \
+#define tstream_inet_tcp_connect_recv(req, perrno, mem_ctx, stream, local) \
+ _tstream_inet_tcp_connect_recv(req, perrno, mem_ctx, stream, local, \
__location__)
#endif
@@ -873,16 +907,26 @@ ssize_t tsocket_address_bsd_sockaddr(const struct tsocket_address *addr,
* for anything else. The file descriptor will be closed when the stream gets
* freed. If you still want to use the fd you have have to create a duplicate.
*
- * @param[in] mem_ctx The talloc memory context to use.
+ * @param[in] mem_ctx The talloc memory context to use.
*
- * @param[in] fd The non blocking fd to use!
+ * @param[in] fd The non blocking fd to use!
*
- * @param[in] stream The filed tstream_context you allocated before.
+ * @param[out] stream A pointer to store an allocated tstream_context.
*
- * @return 0 on success, -1 on error with errno set.
+ * @return 0 on success, -1 on error.
+ *
+ * Example:
+ * @code
+ * fd2 = dup(fd);
+ * rc = tstream_bsd_existing_socket(mem_ctx, fd2, &tstream);
+ * if (rc < 0) {
+ * stream_terminate_connection(conn, "named_pipe_accept: out of memory");
+ * return;
+ * }
+ * @endcode
*
- * @warning You should read the tsocket_bsd.c code and unterstand it in order
- * use this function.
+ * @warning This is an internal function. You should read the code to fully
+ * understand it if you plan to use it.
*/
int tstream_bsd_existing_socket(TALLOC_CTX *mem_ctx,
int fd,
diff --git a/lib/tsocket/tsocket_bsd.c b/lib/tsocket/tsocket_bsd.c
index 43defb30c8..9e80065227 100644
--- a/lib/tsocket/tsocket_bsd.c
+++ b/lib/tsocket/tsocket_bsd.c
@@ -263,6 +263,9 @@ int _tsocket_address_bsd_from_sockaddr(TALLOC_CTX *mem_ctx,
memcpy(&bsda->u.ss, sa, sa_socklen);
bsda->sa_socklen = sa_socklen;
+#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
+ bsda->u.sa.sa_len = bsda->sa_socklen;
+#endif
*_addr = addr;
return 0;
@@ -291,9 +294,49 @@ ssize_t tsocket_address_bsd_sockaddr(const struct tsocket_address *addr,
}
memcpy(sa, &bsda->u.ss, sa_socklen);
+#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
+ sa->sa_len = sa_socklen;
+#endif
return sa_socklen;
}
+bool tsocket_address_is_inet(const struct tsocket_address *addr, const char *fam)
+{
+ struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data,
+ struct tsocket_address_bsd);
+
+ if (!bsda) {
+ return false;
+ }
+
+ switch (bsda->u.sa.sa_family) {
+ case AF_INET:
+ if (strcasecmp(fam, "ip") == 0) {
+ return true;
+ }
+
+ if (strcasecmp(fam, "ipv4") == 0) {
+ return true;
+ }
+
+ return false;
+#ifdef HAVE_IPV6
+ case AF_INET6:
+ if (strcasecmp(fam, "ip") == 0) {
+ return true;
+ }
+
+ if (strcasecmp(fam, "ipv6") == 0) {
+ return true;
+ }
+
+ return false;
+#endif
+ }
+
+ return false;
+}
+
int _tsocket_address_inet_from_strings(TALLOC_CTX *mem_ctx,
const char *fam,
const char *addr,
@@ -466,6 +509,23 @@ int tsocket_address_inet_set_port(struct tsocket_address *addr,
return 0;
}
+bool tsocket_address_is_unix(const struct tsocket_address *addr)
+{
+ struct tsocket_address_bsd *bsda = talloc_get_type(addr->private_data,
+ struct tsocket_address_bsd);
+
+ if (!bsda) {
+ return false;
+ }
+
+ switch (bsda->u.sa.sa_family) {
+ case AF_UNIX:
+ return true;
+ }
+
+ return false;
+}
+
int _tsocket_address_unix_from_path(TALLOC_CTX *mem_ctx,
const char *path,
struct tsocket_address **_addr,
@@ -846,6 +906,9 @@ static void tdgram_bsd_recvfrom_handler(void *private_data)
ZERO_STRUCTP(bsda);
bsda->sa_socklen = sizeof(bsda->u.ss);
+#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
+ bsda->u.sa.sa_len = bsda->sa_socklen;
+#endif
ret = recvfrom(bsds->fd, state->buf, state->len, 0,
&bsda->u.sa, &bsda->sa_socklen);
@@ -1053,6 +1116,7 @@ static struct tevent_req *tdgram_bsd_disconnect_send(TALLOC_CTX *mem_ctx,
goto post;
}
+ TALLOC_FREE(bsds->fde);
ret = close(bsds->fd);
bsds->fd = -1;
err = tsocket_bsd_error_from_errno(ret, errno, &dummy);
@@ -1183,12 +1247,12 @@ static int tdgram_bsd_dgram_socket(const struct tsocket_address *local,
fd = socket(sa_fam, SOCK_DGRAM, 0);
if (fd < 0) {
- return fd;
+ return -1;
}
fd = tsocket_bsd_common_prepare_fd(fd, true);
if (fd < 0) {
- return fd;
+ return -1;
}
dgram = tdgram_context_create(mem_ctx,
@@ -1216,7 +1280,7 @@ static int tdgram_bsd_dgram_socket(const struct tsocket_address *local,
int saved_errno = errno;
talloc_free(dgram);
errno = saved_errno;
- return ret;
+ return -1;
}
}
#endif
@@ -1230,7 +1294,7 @@ static int tdgram_bsd_dgram_socket(const struct tsocket_address *local,
int saved_errno = errno;
talloc_free(dgram);
errno = saved_errno;
- return ret;
+ return -1;
}
}
@@ -1243,7 +1307,7 @@ static int tdgram_bsd_dgram_socket(const struct tsocket_address *local,
int saved_errno = errno;
talloc_free(dgram);
errno = saved_errno;
- return ret;
+ return -1;
}
}
@@ -1253,7 +1317,7 @@ static int tdgram_bsd_dgram_socket(const struct tsocket_address *local,
int saved_errno = errno;
talloc_free(dgram);
errno = saved_errno;
- return ret;
+ return -1;
}
}
@@ -1269,7 +1333,7 @@ static int tdgram_bsd_dgram_socket(const struct tsocket_address *local,
int saved_errno = errno;
talloc_free(dgram);
errno = saved_errno;
- return ret;
+ return -1;
}
}
@@ -1609,7 +1673,7 @@ static void tstream_bsd_readv_handler(void *private_data)
uint8_t *base;
base = (uint8_t *)state->vector[0].iov_base;
base += ret;
- state->vector[0].iov_base = base;
+ state->vector[0].iov_base = (void *)base;
state->vector[0].iov_len -= ret;
break;
}
@@ -1769,7 +1833,7 @@ static void tstream_bsd_writev_handler(void *private_data)
uint8_t *base;
base = (uint8_t *)state->vector[0].iov_base;
base += ret;
- state->vector[0].iov_base = base;
+ state->vector[0].iov_base = (void *)base;
state->vector[0].iov_len -= ret;
break;
}
@@ -1840,6 +1904,7 @@ static struct tevent_req *tstream_bsd_disconnect_send(TALLOC_CTX *mem_ctx,
goto post;
}
+ TALLOC_FREE(bsds->fde);
ret = close(bsds->fd);
bsds->fd = -1;
err = tsocket_bsd_error_from_errno(ret, errno, &dummy);
@@ -1917,6 +1982,7 @@ struct tstream_bsd_connect_state {
int fd;
struct tevent_fd *fde;
struct tstream_conext *stream;
+ struct tsocket_address *local;
};
static int tstream_bsd_connect_destructor(struct tstream_bsd_connect_state *state)
@@ -1935,7 +2001,7 @@ static void tstream_bsd_connect_fde_handler(struct tevent_context *ev,
uint16_t flags,
void *private_data);
-static struct tevent_req * tstream_bsd_connect_send(TALLOC_CTX *mem_ctx,
+static struct tevent_req *tstream_bsd_connect_send(TALLOC_CTX *mem_ctx,
struct tevent_context *ev,
int sys_errno,
const struct tsocket_address *local,
@@ -1946,6 +2012,7 @@ static struct tevent_req * tstream_bsd_connect_send(TALLOC_CTX *mem_ctx,
struct tsocket_address_bsd *lbsda =
talloc_get_type_abort(local->private_data,
struct tsocket_address_bsd);
+ struct tsocket_address_bsd *lrbsda = NULL;
struct tsocket_address_bsd *rbsda =
talloc_get_type_abort(remote->private_data,
struct tsocket_address_bsd);
@@ -2025,6 +2092,23 @@ static struct tevent_req * tstream_bsd_connect_send(TALLOC_CTX *mem_ctx,
}
}
+ if (is_inet) {
+ state->local = tsocket_address_create(state,
+ &tsocket_address_bsd_ops,
+ &lrbsda,
+ struct tsocket_address_bsd,
+ __location__ "bsd_connect");
+ if (tevent_req_nomem(state->local, req)) {
+ goto post;
+ }
+
+ ZERO_STRUCTP(lrbsda);
+ lrbsda->sa_socklen = sizeof(lrbsda->u.ss);
+#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
+ lrbsda->u.sa.sa_len = lrbsda->sa_socklen;
+#endif
+ }
+
state->fd = socket(sa_fam, SOCK_STREAM, 0);
if (state->fd == -1) {
tevent_req_error(req, errno);
@@ -2084,6 +2168,17 @@ static struct tevent_req * tstream_bsd_connect_send(TALLOC_CTX *mem_ctx,
goto post;
}
+ if (!state->local) {
+ tevent_req_done(req);
+ goto post;
+ }
+
+ ret = getsockname(state->fd, &lrbsda->u.sa, &lrbsda->sa_socklen);
+ if (ret == -1) {
+ tevent_req_error(req, errno);
+ goto post;
+ }
+
tevent_req_done(req);
goto post;
@@ -2113,6 +2208,7 @@ static void tstream_bsd_connect_fde_handler(struct tevent_context *ev,
struct tevent_req);
struct tstream_bsd_connect_state *state = tevent_req_data(req,
struct tstream_bsd_connect_state);
+ struct tsocket_address_bsd *lrbsda = NULL;
int ret;
int error=0;
socklen_t len = sizeof(error);
@@ -2135,6 +2231,20 @@ static void tstream_bsd_connect_fde_handler(struct tevent_context *ev,
return;
}
+ if (!state->local) {
+ tevent_req_done(req);
+ return;
+ }
+
+ lrbsda = talloc_get_type_abort(state->local->private_data,
+ struct tsocket_address_bsd);
+
+ ret = getsockname(state->fd, &lrbsda->u.sa, &lrbsda->sa_socklen);
+ if (ret == -1) {
+ tevent_req_error(req, errno);
+ return;
+ }
+
tevent_req_done(req);
}
@@ -2142,6 +2252,7 @@ static int tstream_bsd_connect_recv(struct tevent_req *req,
int *perrno,
TALLOC_CTX *mem_ctx,
struct tstream_context **stream,
+ struct tsocket_address **local,
const char *location)
{
struct tstream_bsd_connect_state *state = tevent_req_data(req,
@@ -2160,6 +2271,10 @@ static int tstream_bsd_connect_recv(struct tevent_req *req,
}
TALLOC_FREE(state->fde);
state->fd = -1;
+
+ if (local) {
+ *local = talloc_move(mem_ctx, &state->local);
+ }
}
done:
@@ -2199,9 +2314,12 @@ int _tstream_inet_tcp_connect_recv(struct tevent_req *req,
int *perrno,
TALLOC_CTX *mem_ctx,
struct tstream_context **stream,
+ struct tsocket_address **local,
const char *location)
{
- return tstream_bsd_connect_recv(req, perrno, mem_ctx, stream, location);
+ return tstream_bsd_connect_recv(req, perrno,
+ mem_ctx, stream, local,
+ location);
}
struct tevent_req * tstream_unix_connect_send(TALLOC_CTX *mem_ctx,
@@ -2234,7 +2352,9 @@ int _tstream_unix_connect_recv(struct tevent_req *req,
struct tstream_context **stream,
const char *location)
{
- return tstream_bsd_connect_recv(req, perrno, mem_ctx, stream, location);
+ return tstream_bsd_connect_recv(req, perrno,
+ mem_ctx, stream, NULL,
+ location);
}
int _tstream_unix_socketpair(TALLOC_CTX *mem_ctx1,
diff --git a/lib/tsocket/tsocket_guide.txt b/lib/tsocket/tsocket_guide.txt
index dfe2dd44e1..f937385a82 100644
--- a/lib/tsocket/tsocket_guide.txt
+++ b/lib/tsocket/tsocket_guide.txt
@@ -23,20 +23,20 @@ for all abstracted methods that need to be async.
The tsocket_address abstraction
===============================
-The tsocket_address represents an socket endpoint genericly.
-As it's like an abstract class it has no specific constructor.
-The specific constructors are descripted later sections.
+A tsocket_address represents a generic socket endpoint.
+It behaves like an abstract class, therefore it has no direct constructor.
+Constructors are described in later sections of this document.
-There's a function get the string representation of the
-endpoint for debugging. Callers should not try to parse
-the string! The should use additional methods of the specific
-tsocket_address implemention to get more details.
+A function get the string representation of an endpoint for debugging is
+available but callers SHOULD NOT try to parse this string. To get more
+details callers should use getter methods of the specific tsocket_address
+implemention.
char *tsocket_address_string(const struct tsocket_address *addr,
TALLOC_CTX *mem_ctx);
-There's a function to create a copy of the tsocket_address.
-This is useful when before doing modifications to a socket
+A function to create a copy of the tsocket_address is also avilable.
+This is useful before doing modifications to a socket
via additional methods of the specific tsocket_address implementation.
struct tsocket_address *tsocket_address_copy(const struct tsocket_address *addr,
@@ -47,14 +47,13 @@ The tdgram_context abstraction
The tdgram_context is like an abstract class for datagram
based sockets. The interface provides async 'tevent_req' based
-functions on top functionality is similar to the
-recvfrom(2)/sendto(2)/close(2) syscalls.
+functions similar to recvfrom(2)/sendto(2)/close(2) syscalls.
The tdgram_recvfrom_send() method can be called to ask for the
-next available datagram on the abstracted tdgram_context.
+next available datagram from the abstracted tdgram_context.
It returns a 'tevent_req' handle, where the caller can register
a callback with tevent_req_set_callback(). The callback is triggered
-when a datagram is available or an error happened.
+when a datagram is available or an error occurs.
The callback is then supposed to get the result by calling
tdgram_recvfrom_recv() on the 'tevent_req'. It returns -1
@@ -122,20 +121,18 @@ of the tdgram_context on a fatal error.
The tstream_context abstraction
===============================
-The tstream_context is like an abstract class for stream
+A tstream_context is like an abstract class for stream
based sockets. The interface provides async 'tevent_req' based
-functions on top functionality is similar to the
-readv(2)/writev(2)/close(2) syscalls.
+functions similar to the readv(2)/writev(2)/close(2) syscalls.
-The tstream_pending_bytes() function is able to report
-how much bytes of the incoming stream have arrived
-but not consumed yet. It returns -1 and sets 'errno' on failure.
-Otherwise it returns the number of uncomsumed bytes
-(it can return 0!).
+The tstream_pending_bytes() function is able to report how many bytes of
+the incoming stream have been received but have not been consumed yet.
+It returns -1 and sets 'errno' on failure.
+Otherwise it returns the number of uncomsumed bytes (it can return 0!).
ssize_t tstream_pending_bytes(struct tstream_context *stream);
-The tstream_readv_send() method can be called to read for a
+The tstream_readv_send() method can be called to read a
specific amount of bytes from the stream into the buffers
of the given iovec vector. The caller has to preallocate the buffers
in the iovec vector. The caller might need to use
@@ -143,13 +140,12 @@ tstream_pending_bytes() if the protocol doesn't have a fixed pdu header
containing the pdu size. tstream_readv_send() returns a 'tevent_req' handle,
where the caller can register a callback with tevent_req_set_callback().
The callback is triggered when all iovec buffers are completely
-filled with bytes from the socket or an error happened.
+filled with bytes from the socket or an error occurs.
The callback is then supposed to get the result by calling
tstream_readv_recv() on the 'tevent_req'. It returns -1
and sets '*perrno' to the actual 'errno' on failure.
-Otherwise it returns the length of the datagram
-(0 is never returned!).
+Otherwise it returns the length of the datagram (0 is never returned!).
The caller can only have one outstanding tstream_readv_send()
at a time otherwise the caller will get *perrno = EBUSY.
@@ -165,7 +161,7 @@ at a time otherwise the caller will get *perrno = EBUSY.
The tstream_writev_send() method can be called to write
buffers in the given iovec vector into the stream socket.
-It's invalid to pass an empty vector.
+It is invalid to pass an empty vector.
tstream_writev_send() returns a 'tevent_req' handle,
where the caller can register a callback with tevent_req_set_callback().
The callback is triggered when the specific implementation (thinks it)
@@ -189,7 +185,7 @@ at a time otherwise the caller will get '*perrno = EBUSY'.
int tstream_writev_recv(struct tevent_req *req,
int *perrno);
-The tstream_disconnect_send() method should be used to normally
+The tstream_disconnect_send() method should normally be used to
shutdown/close the abstracted socket.
The caller should make sure there're no outstanding tstream_readv_send()
@@ -208,21 +204,21 @@ of the tstream_context on a fatal error.
PDU receive helper functions
============================
-In order to make the live easier for callers which want to implement
+In order to simplify the job, for callers that want to implement
a function to receive a full PDU with a single async function pair,
-there're some helper functions.
+some helper functions are provided.
The caller can use the tstream_readv_pdu_send() function
to ask for the next available PDU on the abstracted tstream_context.
The caller needs to provide a "next_vector" function and a private
state for this function. The tstream_readv_pdu engine will ask
-the next_vector function for the next iovec vetor to be filled.
+the next_vector function for the next iovec vector to be used.
There's a tstream_readv_send/recv pair for each vector returned
by the next_vector function. If the next_vector function detects
it received a full pdu, it returns an empty vector. The the callback
of the tevent_req (returned by tstream_readv_pdu_send()) is triggered.
Note: the buffer allocation is completely up to the next_vector function
-and it's private state.
+and its private state.
See the 'dcerpc_read_ncacn_packet_send/recv' functions in Samba as an
example.
@@ -244,14 +240,14 @@ example.
Async 'tevent_queue' based helper functions
===========================================
-There're some cases where the caller wants doesn't care about the
-order of doing IO on the abstracted sockets.
+In some cases the caller doesn't care about the IO ordering on the
+abstracted socket.
(Remember at the low level there's always only one IO in a specific
direction allowed, only one tdgram_sendto_send() at a time).
-There're some helpers using 'tevent_queue' to make it easier
-for callers. The functions just get a 'queue' argument
-and serialize the operations.
+Some helpers that use 'tevent_queue' are avilable to simplify handling
+multiple IO requests. The functions just get a 'queue' argument and
+internally serialize all operations.
struct tevent_req *tdgram_sendto_queue_send(TALLOC_CTX *mem_ctx,
struct tevent_context *ev,
@@ -295,7 +291,7 @@ With "ip" is autodetects "ipv4" or "ipv6" based on the
ip address string based on the selected family
(dns names are not allowed!). But it's valid to pass NULL,
which gets mapped to "0.0.0.0" or "::".
-It return -1 and set errno on error. Otherwise it returns 0.
+It returns -1 and sets errno on error. Otherwise it returns 0.
int tsocket_address_inet_from_strings(TALLOC_CTX *mem_ctx,
const char *family,
@@ -342,7 +338,7 @@ On success it returns 0.
To get the path of an 'unix' tsocket_address
you can use the tsocket_address_unix_path() function.
It will return NULL and set errno to EINVAL if the tsocket_address
-doesn't represent an unix domain endpoint path.
+doesn't represent a unix domain endpoint path.
char *tsocket_address_unix_path(const struct tsocket_address *addr,
TALLOC_CTX *mem_ctx);
@@ -371,7 +367,7 @@ and it returns 0 on success.
TALLOC_CTX *mem_ctx,
struct tdgram_context **dgram);
-You can use tstream_inet_tcp_connect_send to async
+You can use tstream_inet_tcp_connect_send to asynchronously
connect to a remote ipv4 or ipv6 TCP endpoint and create a
tstream_context for the stream based communication. "local_address" has to be
an 'inet' tsocket_address and it has to represent the local
@@ -397,7 +393,7 @@ in '*stream'.
TALLOC_CTX *mem_ctx,
struct tstream_context **stream);
-You can use tstream_unix_connect_send to async
+You can use tstream_unix_connect_send to asynchronously
connect to a unix domain endpoint and create a
tstream_context for the stream based communication.
"local_address" has to be an 'unix' tsocket_address and
@@ -438,7 +434,7 @@ In some situations it's needed to create a tsocket_address from
a given 'struct sockaddr'. You can use tsocket_address_bsd_from_sockaddr()
for that. This should only be used if really needed, because of
already existing fixed APIs. Only AF_INET, AF_INET6 and AF_UNIX
-sockets are allowed. The function returns -1 and set errno on error.
+sockets are allowed. The function returns -1 and sets errno on error.
Otherwise it returns 0.
int tsocket_address_bsd_from_sockaddr(TALLOC_CTX *mem_ctx,
diff --git a/lib/tsocket/wscript_build b/lib/tsocket/wscript_build
new file mode 100644
index 0000000000..5fa05f8c50
--- /dev/null
+++ b/lib/tsocket/wscript_build
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+
+bld.SAMBA_SUBSYSTEM('LIBTSOCKET',
+ source='tsocket.c tsocket_helpers.c tsocket_bsd.c',
+ public_deps='talloc tevent',
+ public_headers='tsocket.h tsocket_internal.h',
+ )
+
diff --git a/lib/uid_wrapper/config.mk b/lib/uid_wrapper/config.mk
deleted file mode 100644
index 1bebc68118..0000000000
--- a/lib/uid_wrapper/config.mk
+++ /dev/null
@@ -1,9 +0,0 @@
-##############################
-# Start SUBSYSTEM UID_WRAPPER
-[SUBSYSTEM::UID_WRAPPER]
-PRIVATE_DEPENDENCIES = LIBTALLOC
-# End SUBSYSTEM UID_WRAPPER
-##############################
-
-UID_WRAPPER_OBJ_FILES = $(uidwrappersrcdir)/uid_wrapper.o
-
diff --git a/lib/uid_wrapper/uid_wrapper.c b/lib/uid_wrapper/uid_wrapper.c
index f7f04316bf..c67679777c 100644
--- a/lib/uid_wrapper/uid_wrapper.c
+++ b/lib/uid_wrapper/uid_wrapper.c
@@ -15,10 +15,18 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#ifdef _SAMBA_BUILD_
+
#define UID_WRAPPER_NOT_REPLACE
-#include "includes.h"
+#include "../replace/replace.h"
+#include <talloc.h>
#include "system/passwd.h"
-#include "system/filesys.h"
+
+#else /* _SAMBA_BUILD_ */
+
+#error uid_wrapper_only_supported_in_samba_yet
+
+#endif
#ifndef _PUBLIC_
#define _PUBLIC_
@@ -44,7 +52,7 @@ static void uwrap_init(void)
uwrap.enabled = true;
/* put us in one group */
uwrap.ngroups = 1;
- uwrap.groups = talloc_array(talloc_autofree_context(), gid_t, 1);
+ uwrap.groups = talloc_array(NULL, gid_t, 1);
uwrap.groups[0] = 0;
}
}
@@ -108,7 +116,7 @@ _PUBLIC_ int uwrap_setgroups(size_t size, const gid_t *list)
uwrap.groups = NULL;
if (size != 0) {
- uwrap.groups = talloc_array(talloc_autofree_context(), gid_t, size);
+ uwrap.groups = talloc_array(NULL, gid_t, size);
if (uwrap.groups == NULL) {
errno = ENOMEM;
return -1;
diff --git a/lib/uid_wrapper/uid_wrapper.h b/lib/uid_wrapper/uid_wrapper.h
index 5d7c99d2b2..b3b11d954a 100644
--- a/lib/uid_wrapper/uid_wrapper.h
+++ b/lib/uid_wrapper/uid_wrapper.h
@@ -17,6 +17,17 @@
#ifndef __UID_WRAPPER_H__
#define __UID_WRAPPER_H__
+#ifndef uwrap_enabled
+
+int uwrap_enabled(void);
+int uwrap_seteuid(uid_t euid);
+uid_t uwrap_geteuid(void);
+int uwrap_setegid(gid_t egid);
+uid_t uwrap_getegid(void);
+int uwrap_setgroups(size_t size, const gid_t *list);
+int uwrap_getgroups(int size, gid_t *list);
+uid_t uwrap_getuid(void);
+gid_t uwrap_getgid(void);
#ifdef seteuid
#undef seteuid
@@ -58,6 +69,5 @@
#endif
#define getgid uwrap_getgid
-int uwrap_enabled(void);
-
+#endif
#endif /* __UID_WRAPPER_H__ */
diff --git a/lib/uid_wrapper/wscript b/lib/uid_wrapper/wscript
new file mode 100644
index 0000000000..1501d0e5ce
--- /dev/null
+++ b/lib/uid_wrapper/wscript
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+
+import Options
+
+def set_options(opt):
+ gr = opt.option_group('developer options')
+ gr.add_option('--enable-uid-wrapper',
+ help=("Turn on uid wrapper library (default=no)"),
+ action="store_true", dest='enable_uid_wrapper', default=False)
+
+def configure(conf):
+ if (Options.options.enable_uid_wrapper or
+ Options.options.developer or
+ Options.options.enable_selftest):
+ conf.DEFINE('UID_WRAPPER', 1)
+ conf.ADD_GLOBAL_DEPENDENCY('uid_wrapper')
+
diff --git a/lib/uid_wrapper/wscript_build b/lib/uid_wrapper/wscript_build
new file mode 100644
index 0000000000..54e5b80f43
--- /dev/null
+++ b/lib/uid_wrapper/wscript_build
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+
+
+bld.SAMBA_LIBRARY('uid_wrapper',
+ source='uid_wrapper.c',
+ deps='talloc',
+ private_library=True,
+ enabled=bld.CONFIG_SET("UID_WRAPPER"),
+ )
+
diff --git a/lib/update-external.sh b/lib/update-external.sh
new file mode 100755
index 0000000000..0a854406b5
--- /dev/null
+++ b/lib/update-external.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+# Pull in a new snapshot of external projects that are included in
+# our source tree for users that don't have them installed on their system
+
+TARGETDIR="`dirname $0`"
+WORKDIR="`mktemp -d`"
+
+echo "Updating subunit..."
+bzr export "$WORKDIR/subunit" lp:subunit
+# Preserve wscript file
+cp "$TARGETDIR/subunit/c/wscript" "$WORKDIR/subunit/c/wscript"
+rsync -avz --delete "$WORKDIR/subunit/" "$TARGETDIR/subunit/"
+
+echo "Updating testtools..."
+bzr export "$WORKDIR/testtools" lp:testtools
+rsync -avz --delete "$WORKDIR/testtools/" "$TARGETDIR/testtools/"
+
+echo "Updating dnspython..."
+git clone git://www.dnspython.org/dnspython.git "$WORKDIR/dnspython"
+rm -rf "$WORKDIR/dnspython/.git"
+rsync -avz --delete "$WORKDIR/dnspython/" "$TARGETDIR/dnspython/"
+
+rm -rf "$WORKDIR"
diff --git a/lib/util/asn1.c b/lib/util/asn1.c
index 70c2c57450..c23bf65b8d 100644
--- a/lib/util/asn1.c
+++ b/lib/util/asn1.c
@@ -214,32 +214,37 @@ bool asn1_write_BitString(struct asn1_data *data, const void *p, size_t length,
return asn1_pop_tag(data);
}
-bool ber_write_OID_String(DATA_BLOB *blob, const char *OID)
+bool ber_write_OID_String(TALLOC_CTX *mem_ctx, DATA_BLOB *blob, const char *OID)
{
- uint_t v, v2;
+ unsigned int v, v2;
const char *p = (const char *)OID;
char *newp;
int i;
+ if (!isdigit(*p)) return false;
v = strtoul(p, &newp, 10);
if (newp[0] != '.') return false;
p = newp + 1;
+ if (!isdigit(*p)) return false;
v2 = strtoul(p, &newp, 10);
if (newp[0] != '.') return false;
p = newp + 1;
/*the ber representation can't use more space then the string one */
- *blob = data_blob(NULL, strlen(OID));
+ *blob = data_blob_talloc(mem_ctx, NULL, strlen(OID));
if (!blob->data) return false;
blob->data[0] = 40*v + v2;
i = 1;
while (*p) {
+ if (!isdigit(*p)) return false;
v = strtoul(p, &newp, 10);
if (newp[0] == '.') {
p = newp + 1;
+ /* check for empty last component */
+ if (!*p) return false;
} else if (newp[0] == '\0') {
p = newp;
} else {
@@ -258,6 +263,45 @@ bool ber_write_OID_String(DATA_BLOB *blob, const char *OID)
return true;
}
+/**
+ * Serialize partial OID string.
+ * Partial OIDs are in the form:
+ * 1:2.5.6:0x81
+ * 1:2.5.6:0x8182
+ */
+bool ber_write_partial_OID_String(TALLOC_CTX *mem_ctx, DATA_BLOB *blob, const char *partial_oid)
+{
+ TALLOC_CTX *tmp_ctx = talloc_new(mem_ctx);
+ char *oid = talloc_strdup(tmp_ctx, partial_oid);
+ char *p;
+
+ /* truncate partial part so ber_write_OID_String() works */
+ p = strchr(oid, ':');
+ if (p) {
+ *p = '\0';
+ p++;
+ }
+
+ if (!ber_write_OID_String(mem_ctx, blob, oid)) {
+ talloc_free(tmp_ctx);
+ return false;
+ }
+
+ /* Add partially encoded sub-identifier */
+ if (p) {
+ DATA_BLOB tmp_blob = strhex_to_data_blob(tmp_ctx, p);
+ if (!data_blob_append(mem_ctx, blob, tmp_blob.data,
+ tmp_blob.length)) {
+ talloc_free(tmp_ctx);
+ return false;
+ }
+ }
+
+ talloc_free(tmp_ctx);
+
+ return true;
+}
+
/* write an object ID to a ASN1 buffer */
bool asn1_write_OID(struct asn1_data *data, const char *OID)
{
@@ -265,7 +309,7 @@ bool asn1_write_OID(struct asn1_data *data, const char *OID)
if (!asn1_push_tag(data, ASN1_OID)) return false;
- if (!ber_write_OID_String(&blob, OID)) {
+ if (!ber_write_OID_String(NULL, &blob, OID)) {
data->has_error = true;
return false;
}
@@ -454,6 +498,77 @@ bool asn1_peek_tag(struct asn1_data *data, uint8_t tag)
return (b == tag);
}
+/*
+ * just get the needed size the tag would consume
+ */
+bool asn1_peek_tag_needed_size(struct asn1_data *data, uint8_t tag, size_t *size)
+{
+ off_t start_ofs = data->ofs;
+ uint8_t b;
+ size_t taglen = 0;
+
+ if (data->has_error) {
+ return false;
+ }
+
+ if (!asn1_read_uint8(data, &b)) {
+ data->ofs = start_ofs;
+ data->has_error = false;
+ return false;
+ }
+
+ if (b != tag) {
+ data->ofs = start_ofs;
+ data->has_error = false;
+ return false;
+ }
+
+ if (!asn1_read_uint8(data, &b)) {
+ data->ofs = start_ofs;
+ data->has_error = false;
+ return false;
+ }
+
+ if (b & 0x80) {
+ int n = b & 0x7f;
+ if (!asn1_read_uint8(data, &b)) {
+ data->ofs = start_ofs;
+ data->has_error = false;
+ return false;
+ }
+ if (n > 4) {
+ /*
+ * We should not allow more than 4 bytes
+ * for the encoding of the tag length.
+ *
+ * Otherwise we'd overflow the taglen
+ * variable on 32 bit systems.
+ */
+ data->ofs = start_ofs;
+ data->has_error = false;
+ return false;
+ }
+ taglen = b;
+ while (n > 1) {
+ if (!asn1_read_uint8(data, &b)) {
+ data->ofs = start_ofs;
+ data->has_error = false;
+ return false;
+ }
+ taglen = (taglen << 8) | b;
+ n--;
+ }
+ } else {
+ taglen = b;
+ }
+
+ *size = (data->ofs - start_ofs) + taglen;
+
+ data->ofs = start_ofs;
+ data->has_error = false;
+ return true;
+}
+
/* start reading a nested asn1 structure */
bool asn1_start_tag(struct asn1_data *data, uint8_t tag)
{
@@ -543,12 +658,17 @@ int asn1_tag_remaining(struct asn1_data *data)
return remaining;
}
-/* read an object ID from a data blob */
-bool ber_read_OID_String(TALLOC_CTX *mem_ctx, DATA_BLOB blob, const char **OID)
+/**
+ * Internal implementation for reading binary OIDs
+ * Reading is done as far in the buffer as valid OID
+ * till buffer ends or not valid sub-identifier is found.
+ */
+static bool _ber_read_OID_String_impl(TALLOC_CTX *mem_ctx, DATA_BLOB blob,
+ char **OID, size_t *bytes_eaten)
{
int i;
uint8_t *b;
- uint_t v;
+ unsigned int v;
char *tmp_oid = NULL;
if (blob.length < 2) return false;
@@ -560,29 +680,78 @@ bool ber_read_OID_String(TALLOC_CTX *mem_ctx, DATA_BLOB blob, const char **OID)
tmp_oid = talloc_asprintf_append_buffer(tmp_oid, ".%u", b[0]%40);
if (!tmp_oid) goto nomem;
+ if (bytes_eaten != NULL) {
+ *bytes_eaten = 0;
+ }
+
for(i = 1, v = 0; i < blob.length; i++) {
v = (v<<7) | (b[i]&0x7f);
if ( ! (b[i] & 0x80)) {
tmp_oid = talloc_asprintf_append_buffer(tmp_oid, ".%u", v);
v = 0;
+ if (bytes_eaten)
+ *bytes_eaten = i+1;
}
if (!tmp_oid) goto nomem;
}
- if (v != 0) {
- talloc_free(tmp_oid);
+ *OID = tmp_oid;
+ return true;
+
+nomem:
+ return false;
+}
+
+/* read an object ID from a data blob */
+bool ber_read_OID_String(TALLOC_CTX *mem_ctx, DATA_BLOB blob, char **OID)
+{
+ size_t bytes_eaten;
+
+ if (!_ber_read_OID_String_impl(mem_ctx, blob, OID, &bytes_eaten))
+ return false;
+
+ return (bytes_eaten == blob.length);
+}
+
+/**
+ * Deserialize partial OID string.
+ * Partial OIDs are in the form:
+ * 1:2.5.6:0x81
+ * 1:2.5.6:0x8182
+ */
+bool ber_read_partial_OID_String(TALLOC_CTX *mem_ctx, DATA_BLOB blob,
+ char **partial_oid)
+{
+ size_t bytes_left;
+ size_t bytes_eaten;
+ char *identifier = NULL;
+ char *tmp_oid = NULL;
+
+ if (!_ber_read_OID_String_impl(mem_ctx, blob, &tmp_oid, &bytes_eaten))
return false;
+
+ if (bytes_eaten < blob.length) {
+ bytes_left = blob.length - bytes_eaten;
+ identifier = hex_encode_talloc(mem_ctx, &blob.data[bytes_eaten], bytes_left);
+ if (!identifier) goto nomem;
+
+ *partial_oid = talloc_asprintf_append_buffer(tmp_oid, ":0x%s", identifier);
+ if (!*partial_oid) goto nomem;
+ TALLOC_FREE(identifier);
+ } else {
+ *partial_oid = tmp_oid;
}
- *OID = tmp_oid;
return true;
-nomem:
+nomem:
+ TALLOC_FREE(identifier);
+ TALLOC_FREE(tmp_oid);
return false;
}
/* read an object ID from a ASN1 buffer */
-bool asn1_read_OID(struct asn1_data *data, TALLOC_CTX *mem_ctx, const char **OID)
+bool asn1_read_OID(struct asn1_data *data, TALLOC_CTX *mem_ctx, char **OID)
{
DATA_BLOB blob;
int len;
@@ -621,16 +790,16 @@ bool asn1_read_OID(struct asn1_data *data, TALLOC_CTX *mem_ctx, const char **OID
/* check that the next object ID is correct */
bool asn1_check_OID(struct asn1_data *data, const char *OID)
{
- const char *id;
+ char *id;
if (!asn1_read_OID(data, data, &id)) return false;
if (strcmp(id, OID) != 0) {
- talloc_free(discard_const(id));
+ talloc_free(id);
data->has_error = true;
return false;
}
- talloc_free(discard_const(id));
+ talloc_free(id);
return true;
}
@@ -716,10 +885,19 @@ bool asn1_read_ContextSimple(struct asn1_data *data, uint8_t num, DATA_BLOB *blo
bool asn1_read_implicit_Integer(struct asn1_data *data, int *i)
{
uint8_t b;
+ bool first_byte = true;
*i = 0;
while (!data->has_error && asn1_tag_remaining(data)>0) {
if (!asn1_read_uint8(data, &b)) return false;
+ if (first_byte) {
+ if (b & 0x80) {
+ /* Number is negative.
+ Set i to -1 for sign extend. */
+ *i = -1;
+ }
+ first_byte = false;
+ }
*i = (*i << 8) + b;
}
return !data->has_error;
@@ -855,6 +1033,30 @@ NTSTATUS asn1_full_tag(DATA_BLOB blob, uint8_t tag, size_t *packet_size)
if (size > blob.length) {
return STATUS_MORE_ENTRIES;
+ }
+
+ *packet_size = size;
+ return NT_STATUS_OK;
+}
+
+NTSTATUS asn1_peek_full_tag(DATA_BLOB blob, uint8_t tag, size_t *packet_size)
+{
+ struct asn1_data asn1;
+ size_t size;
+ bool ok;
+
+ ZERO_STRUCT(asn1);
+ asn1.data = blob.data;
+ asn1.length = blob.length;
+
+ ok = asn1_peek_tag_needed_size(&asn1, tag, &size);
+ if (!ok) {
+ return NT_STATUS_INVALID_BUFFER_SIZE;
+ }
+
+ if (size > blob.length) {
+ *packet_size = size;
+ return STATUS_MORE_ENTRIES;
}
*packet_size = size;
diff --git a/lib/util/asn1.h b/lib/util/asn1.h
index 9abae50d64..568b4e4cc8 100644
--- a/lib/util/asn1.h
+++ b/lib/util/asn1.h
@@ -61,7 +61,8 @@ bool asn1_pop_tag(struct asn1_data *data);
bool asn1_write_implicit_Integer(struct asn1_data *data, int i);
bool asn1_write_Integer(struct asn1_data *data, int i);
bool asn1_write_BitString(struct asn1_data *data, const void *p, size_t length, uint8_t padding);
-bool ber_write_OID_String(DATA_BLOB *blob, const char *OID);
+bool ber_write_OID_String(TALLOC_CTX *mem_ctx, DATA_BLOB *blob, const char *OID);
+bool ber_write_partial_OID_String(TALLOC_CTX *mem_ctx, DATA_BLOB *blob, const char *partial_oid);
bool asn1_write_OID(struct asn1_data *data, const char *OID);
bool asn1_write_OctetString(struct asn1_data *data, const void *p, size_t length);
bool asn1_write_LDAPString(struct asn1_data *data, const char *s);
@@ -78,12 +79,14 @@ bool asn1_peek(struct asn1_data *data, void *p, int len);
bool asn1_read(struct asn1_data *data, void *p, int len);
bool asn1_read_uint8(struct asn1_data *data, uint8_t *v);
bool asn1_peek_uint8(struct asn1_data *data, uint8_t *v);
+bool asn1_peek_tag_needed_size(struct asn1_data *data, uint8_t tag, size_t *size);
bool asn1_peek_tag(struct asn1_data *data, uint8_t tag);
bool asn1_start_tag(struct asn1_data *data, uint8_t tag);
bool asn1_end_tag(struct asn1_data *data);
int asn1_tag_remaining(struct asn1_data *data);
-bool ber_read_OID_String(TALLOC_CTX *mem_ctx, DATA_BLOB blob, const char **OID);
-bool asn1_read_OID(struct asn1_data *data, TALLOC_CTX *mem_ctx, const char **OID);
+bool ber_read_OID_String(TALLOC_CTX *mem_ctx, DATA_BLOB blob, char **OID);
+bool ber_read_partial_OID_String(TALLOC_CTX *mem_ctx, DATA_BLOB blob, char **partial_oid);
+bool asn1_read_OID(struct asn1_data *data, TALLOC_CTX *mem_ctx, char **OID);
bool asn1_check_OID(struct asn1_data *data, const char *OID);
bool asn1_read_LDAPString(struct asn1_data *data, TALLOC_CTX *mem_ctx, char **s);
bool asn1_read_GeneralString(struct asn1_data *data, TALLOC_CTX *mem_ctx, char **s);
@@ -98,5 +101,6 @@ bool asn1_write_enumerated(struct asn1_data *data, uint8_t v);
bool asn1_blob(const struct asn1_data *asn1, DATA_BLOB *blob);
void asn1_load_nocopy(struct asn1_data *data, uint8_t *buf, size_t len);
NTSTATUS asn1_full_tag(DATA_BLOB blob, uint8_t tag, size_t *packet_size);
+NTSTATUS asn1_peek_full_tag(DATA_BLOB blob, uint8_t tag, size_t *packet_size);
#endif /* _ASN_1_H */
diff --git a/lib/util/attr.h b/lib/util/attr.h
index f64b272a67..a2690613fb 100644
--- a/lib/util/attr.h
+++ b/lib/util/attr.h
@@ -29,12 +29,6 @@
/** Feel free to add definitions for other compilers here. */
#endif
-#ifdef HAVE_VISIBILITY_ATTR
-# define _PUBLIC_ __attribute__((visibility("default")))
-#else
-# define _PUBLIC_
-#endif
-
#ifndef _DEPRECATED_
#if (__GNUC__ >= 3) && (__GNUC_MINOR__ >= 1 )
#define _DEPRECATED_ __attribute__ ((deprecated))
@@ -87,4 +81,16 @@
#endif
#endif
+#ifndef FORMAT_ATTRIBUTE
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
+/** Use gcc attribute to check printf fns. a1 is argument to format()
+ * in the above macro. This is needed to support Heimdal's printf
+ * decorations. Note that some gcc 2.x versions don't handle this
+ * properly, and as such I've used the same minimum from heimdal: GCC 3.1 **/
+#define FORMAT_ATTRIBUTE(a) __attribute__ ((format a))
+#else
+#define FORMAT_ATTRIBUTE(a)
+#endif
+#endif
+
#endif /* __UTIL_ATTR_H__ */
diff --git a/lib/util/become_daemon.c b/lib/util/become_daemon.c
index 3d06a4363d..2af16316b5 100644
--- a/lib/util/become_daemon.c
+++ b/lib/util/become_daemon.c
@@ -66,7 +66,7 @@ _PUBLIC_ void close_low_fds(bool stderr_too)
Become a daemon, discarding the controlling terminal.
****************************************************************************/
-_PUBLIC_ void become_daemon(bool do_fork, bool no_process_group)
+_PUBLIC_ void become_daemon(bool do_fork, bool no_process_group, bool log_stdout)
{
if (do_fork) {
if (sys_fork()) {
@@ -87,7 +87,9 @@ _PUBLIC_ void become_daemon(bool do_fork, bool no_process_group)
}
#endif /* HAVE_SETSID */
- /* Close fd's 0,1,2. Needed if started by rsh */
- close_low_fds(false); /* Don't close stderr, let the debug system
- attach it to the logfile */
+ if (!log_stdout) {
+ /* Close fd's 0,1,2. Needed if started by rsh */
+ close_low_fds(false); /* Don't close stderr, let the debug system
+ attach it to the logfile */
+ }
}
diff --git a/lib/util/binsearch.h b/lib/util/binsearch.h
index ac83990072..f85d11694e 100644
--- a/lib/util/binsearch.h
+++ b/lib/util/binsearch.h
@@ -65,4 +65,20 @@
if (_r < 0) _e = _i - 1; else _b = _i + 1; \
}} } while (0)
+/*
+ like BINARY_ARRAY_SEARCH_P, but assumes that the array is an array
+ of elements, rather than pointers to structures
+
+ result points to the found structure, or NULL
+ */
+#define BINARY_ARRAY_SEARCH_V(array, array_size, target, comparison_fn, result) do { \
+ int32_t _b, _e; \
+ (result) = NULL; \
+ if (array_size) { for (_b = 0, _e = (array_size)-1; _b <= _e; ) { \
+ int32_t _i = (_b+_e)/2; \
+ int _r = comparison_fn(target, array[_i]); \
+ if (_r == 0) { (result) = &array[_i]; break; } \
+ if (_r < 0) _e = _i - 1; else _b = _i + 1; \
+ }} } while (0)
+
#endif
diff --git a/lib/util/byteorder.h b/lib/util/byteorder.h
index b860dea791..6bcf71e83b 100644
--- a/lib/util/byteorder.h
+++ b/lib/util/byteorder.h
@@ -54,7 +54,7 @@ that don't have any int types that are 2 bytes long)
You do this:
#define CVAL(buf,pos) (((uint8_t *)(buf))[pos])
-#define PVAL(buf,pos) ((uint_t)CVAL(buf,pos))
+#define PVAL(buf,pos) ((unsigned int)CVAL(buf,pos))
#define SVAL(buf,pos) (PVAL(buf,pos)|PVAL(buf,(pos)+1)<<8)
then to extract a uint16_t value at offset 25 in a buffer you do this:
@@ -144,7 +144,7 @@ static __inline__ void st_le32(uint32_t *addr, const uint32_t val)
#define CAREFUL_ALIGNMENT 1
#endif
-#define CVAL(buf,pos) ((uint_t)(((const uint8_t *)(buf))[pos]))
+#define CVAL(buf,pos) ((unsigned int)(((const uint8_t *)(buf))[pos]))
#define CVAL_NC(buf,pos) (((uint8_t *)(buf))[pos]) /* Non-const version of CVAL */
#define PVAL(buf,pos) (CVAL(buf,pos))
#define SCVAL(buf,pos,val) (CVAL_NC(buf,pos) = (val))
@@ -185,13 +185,13 @@ static __inline__ void st_le32(uint32_t *addr, const uint32_t val)
/* get single value from an SMB buffer */
#define SVAL(buf,pos) (*(const uint16_t *)((const char *)(buf) + (pos)))
-#define SVAL_NC(buf,pos) (*(uint16_t *)((char *)(buf) + (pos))) /* Non const version of above. */
+#define SVAL_NC(buf,pos) (*(uint16_t *)((void *)((char *)(buf) + (pos)))) /* Non const version of above. */
#define IVAL(buf,pos) (*(const uint32_t *)((const char *)(buf) + (pos)))
-#define IVAL_NC(buf,pos) (*(uint32_t *)((char *)(buf) + (pos))) /* Non const version of above. */
+#define IVAL_NC(buf,pos) (*(uint32_t *)((void *)((char *)(buf) + (pos)))) /* Non const version of above. */
#define SVALS(buf,pos) (*(const int16_t *)((const char *)(buf) + (pos)))
-#define SVALS_NC(buf,pos) (*(int16_t *)((char *)(buf) + (pos))) /* Non const version of above. */
+#define SVALS_NC(buf,pos) (*(int16_t *)((void *)((char *)(buf) + (pos)))) /* Non const version of above. */
#define IVALS(buf,pos) (*(const int32_t *)((const char *)(buf) + (pos)))
-#define IVALS_NC(buf,pos) (*(int32_t *)((char *)(buf) + (pos))) /* Non const version of above. */
+#define IVALS_NC(buf,pos) (*(int32_t *)((void *)((char *)(buf) + (pos)))) /* Non const version of above. */
/* store single value in an SMB buffer */
#define SSVAL(buf,pos,val) SVAL_NC(buf,pos)=((uint16_t)(val))
@@ -201,18 +201,29 @@ static __inline__ void st_le32(uint32_t *addr, const uint32_t val)
#endif /* not CAREFUL_ALIGNMENT */
+/* 64 bit macros */
+#define BVAL(p, ofs) (IVAL(p,ofs) | (((uint64_t)IVAL(p,(ofs)+4)) << 32))
+#define BVALS(p, ofs) ((int64_t)BVAL(p,ofs))
+#define SBVAL(p, ofs, v) (SIVAL(p,ofs,(v)&0xFFFFFFFF), SIVAL(p,(ofs)+4,((uint64_t)(v))>>32))
+#define SBVALS(p, ofs, v) (SBVAL(p,ofs,(uint64_t)v))
+
/* now the reverse routines - these are used in nmb packets (mostly) */
#define SREV(x) ((((x)&0xFF)<<8) | (((x)>>8)&0xFF))
#define IREV(x) ((SREV(x)<<16) | (SREV((x)>>16)))
+#define BREV(x) ((IREV(x)<<32) | (IREV((x)>>32)))
#define RSVAL(buf,pos) SREV(SVAL(buf,pos))
#define RSVALS(buf,pos) SREV(SVALS(buf,pos))
#define RIVAL(buf,pos) IREV(IVAL(buf,pos))
#define RIVALS(buf,pos) IREV(IVALS(buf,pos))
+#define RBVAL(buf,pos) BREV(BVAL(buf,pos))
+#define RBVALS(buf,pos) BREV(BVALS(buf,pos))
#define RSSVAL(buf,pos,val) SSVAL(buf,pos,SREV(val))
#define RSSVALS(buf,pos,val) SSVALS(buf,pos,SREV(val))
#define RSIVAL(buf,pos,val) SIVAL(buf,pos,IREV(val))
#define RSIVALS(buf,pos,val) SIVALS(buf,pos,IREV(val))
+#define RSBVAL(buf,pos,val) SBVAL(buf,pos,BREV(val))
+#define RSBVALS(buf,pos,val) SBVALS(buf,pos,BREV(val))
/* Alignment macros. */
#define ALIGN4(p,base) ((p) + ((4 - (PTR_DIFF((p), (base)) & 3)) & 3))
@@ -222,10 +233,4 @@ static __inline__ void st_le32(uint32_t *addr, const uint32_t val)
/* macros for accessing SMB protocol elements */
#define VWV(vwv) ((vwv)*2)
-/* 64 bit macros */
-#define BVAL(p, ofs) (IVAL(p,ofs) | (((uint64_t)IVAL(p,(ofs)+4)) << 32))
-#define BVALS(p, ofs) ((int64_t)BVAL(p,ofs))
-#define SBVAL(p, ofs, v) (SIVAL(p,ofs,(v)&0xFFFFFFFF), SIVAL(p,(ofs)+4,((uint64_t)(v))>>32))
-#define SBVALS(p, ofs, v) (SBVAL(p,ofs,(uint64_t)v))
-
#endif /* _BYTEORDER_H */
diff --git a/lib/util/capability.m4 b/lib/util/capability.m4
deleted file mode 100644
index 2a95a607d5..0000000000
--- a/lib/util/capability.m4
+++ /dev/null
@@ -1,17 +0,0 @@
-AC_CACHE_CHECK([for irix specific capabilities],samba_cv_HAVE_IRIX_SPECIFIC_CAPABILITIES,[
-AC_TRY_RUN([#include <sys/types.h>
-#include <sys/capability.h>
-main() {
- cap_t cap;
- if ((cap = cap_get_proc()) == NULL)
- exit(1);
- cap->cap_effective |= CAP_NETWORK_MGT;
- cap->cap_inheritable |= CAP_NETWORK_MGT;
- cap_set_proc(cap);
- exit(0);
-}
-],
-samba_cv_HAVE_IRIX_SPECIFIC_CAPABILITIES=yes,samba_cv_HAVE_IRIX_SPECIFIC_CAPABILITIES=no,samba_cv_HAVE_IRIX_SPECIFIC_CAPABILITIES=cross)])
-if test x"$samba_cv_HAVE_IRIX_SPECIFIC_CAPABILITIES" = x"yes"; then
- AC_DEFINE(HAVE_IRIX_SPECIFIC_CAPABILITIES,1,[Whether IRIX specific capabilities are available])
-fi
diff --git a/lib/util/charset/charcnv.c b/lib/util/charset/charcnv.c
index a479f44426..dd2c725125 100644
--- a/lib/util/charset/charcnv.c
+++ b/lib/util/charset/charcnv.c
@@ -38,123 +38,6 @@
* @sa lib/iconv.c
*/
-struct smb_iconv_convenience {
- const char *unix_charset;
- const char *dos_charset;
- bool native_iconv;
- smb_iconv_t conv_handles[NUM_CHARSETS][NUM_CHARSETS];
-};
-
-
-/**
- * Return the name of a charset to give to iconv().
- **/
-static const char *charset_name(struct smb_iconv_convenience *ic, charset_t ch)
-{
- switch (ch) {
- case CH_UTF16: return "UTF-16LE";
- case CH_UNIX: return ic->unix_charset;
- case CH_DOS: return ic->dos_charset;
- case CH_UTF8: return "UTF8";
- case CH_UTF16BE: return "UTF-16BE";
- case CH_UTF16MUNGED: return "UTF16_MUNGED";
- default:
- return "ASCII";
- }
-}
-
-/**
- re-initialize iconv conversion descriptors
-**/
-static int close_iconv_convenience(struct smb_iconv_convenience *data)
-{
- unsigned c1, c2;
- for (c1=0;c1<NUM_CHARSETS;c1++) {
- for (c2=0;c2<NUM_CHARSETS;c2++) {
- if (data->conv_handles[c1][c2] != NULL) {
- if (data->conv_handles[c1][c2] != (smb_iconv_t)-1) {
- smb_iconv_close(data->conv_handles[c1][c2]);
- }
- data->conv_handles[c1][c2] = NULL;
- }
- }
- }
-
- return 0;
-}
-
-_PUBLIC_ struct smb_iconv_convenience *smb_iconv_convenience_init(TALLOC_CTX *mem_ctx,
- const char *dos_charset,
- const char *unix_charset,
- bool native_iconv)
-{
- struct smb_iconv_convenience *ret = talloc_zero(mem_ctx,
- struct smb_iconv_convenience);
-
- if (ret == NULL) {
- return NULL;
- }
-
- talloc_set_destructor(ret, close_iconv_convenience);
-
- ret->dos_charset = talloc_strdup(ret, dos_charset);
- ret->unix_charset = talloc_strdup(ret, unix_charset);
- ret->native_iconv = native_iconv;
-
- return ret;
-}
-
-/*
- on-demand initialisation of conversion handles
-*/
-static smb_iconv_t get_conv_handle(struct smb_iconv_convenience *ic,
- charset_t from, charset_t to)
-{
- const char *n1, *n2;
- static bool initialised;
-
- if (initialised == false) {
- initialised = true;
-
-#ifdef LC_ALL
- /* we set back the locale to C to get ASCII-compatible
- toupper/lower functions. For now we do not need
- any other POSIX localisations anyway. When we
- should really need localized string functions one
- day we need to write our own ascii_tolower etc.
- */
- setlocale(LC_ALL, "C");
-#endif
- }
-
- if (ic->conv_handles[from][to]) {
- return ic->conv_handles[from][to];
- }
-
- n1 = charset_name(ic, from);
- n2 = charset_name(ic, to);
-
- ic->conv_handles[from][to] = smb_iconv_open_ex(ic, n2, n1,
- ic->native_iconv);
-
- if (ic->conv_handles[from][to] == (smb_iconv_t)-1) {
- if ((from == CH_DOS || to == CH_DOS) &&
- strcasecmp(charset_name(ic, CH_DOS), "ASCII") != 0) {
- DEBUG(0,("dos charset '%s' unavailable - using ASCII\n",
- charset_name(ic, CH_DOS)));
- ic->dos_charset = "ASCII";
-
- n1 = charset_name(ic, from);
- n2 = charset_name(ic, to);
-
- ic->conv_handles[from][to] =
- smb_iconv_open_ex(ic, n2, n1, ic->native_iconv);
- }
- }
-
- return ic->conv_handles[from][to];
-}
-
/**
* Convert string from one encoding to another, making error checking etc
*
@@ -213,7 +96,8 @@ convert:
reason="Illegal multibyte sequence";
break;
}
- DEBUG(0,("Conversion error: %s(%s)\n",reason,inbuf));
+ DEBUG(0,("Conversion error: %s - ",reason));
+ dump_data(0, (const uint8_t *) inbuf, i_len);
talloc_free(ob);
return (size_t)-1;
}
@@ -348,135 +232,3 @@ _PUBLIC_ bool convert_string_talloc_convenience(TALLOC_CTX *ctx,
return true;
}
-/*
- return the unicode codepoint for the next multi-byte CH_UNIX character
- in the string
-
- also return the number of bytes consumed (which tells the caller
- how many bytes to skip to get to the next CH_UNIX character)
-
- return INVALID_CODEPOINT if the next character cannot be converted
-*/
-_PUBLIC_ codepoint_t next_codepoint_convenience(struct smb_iconv_convenience *ic,
- const char *str, size_t *size)
-{
- /* it cannot occupy more than 4 bytes in UTF16 format */
- uint8_t buf[4];
- smb_iconv_t descriptor;
- size_t ilen_orig;
- size_t ilen;
- size_t olen;
- char *outbuf;
-
- if ((str[0] & 0x80) == 0) {
- *size = 1;
- return (codepoint_t)str[0];
- }
-
- /* we assume that no multi-byte character can take
- more than 5 bytes. This is OK as we only
- support codepoints up to 1M */
- ilen_orig = strnlen(str, 5);
- ilen = ilen_orig;
-
- descriptor = get_conv_handle(ic, CH_UNIX, CH_UTF16);
- if (descriptor == (smb_iconv_t)-1) {
- *size = 1;
- return INVALID_CODEPOINT;
- }
-
- /* this looks a little strange, but it is needed to cope
- with codepoints above 64k */
- olen = 2;
- outbuf = (char *)buf;
- smb_iconv(descriptor, &str, &ilen, &outbuf, &olen);
- if (olen == 2) {
- olen = 4;
- outbuf = (char *)buf;
- smb_iconv(descriptor, &str, &ilen, &outbuf, &olen);
- if (olen == 4) {
- /* we didn't convert any bytes */
- *size = 1;
- return INVALID_CODEPOINT;
- }
- olen = 4 - olen;
- } else {
- olen = 2 - olen;
- }
-
- *size = ilen_orig - ilen;
-
- if (olen == 2) {
- return (codepoint_t)SVAL(buf, 0);
- }
- if (olen == 4) {
- /* decode a 4 byte UTF16 character manually */
- return (codepoint_t)0x10000 +
- (buf[2] | ((buf[3] & 0x3)<<8) |
- (buf[0]<<10) | ((buf[1] & 0x3)<<18));
- }
-
- /* no other length is valid */
- return INVALID_CODEPOINT;
-}
-
-/*
- push a single codepoint into a CH_UNIX string the target string must
- be able to hold the full character, which is guaranteed if it is at
- least 5 bytes in size. The caller may pass less than 5 bytes if they
- are sure the character will fit (for example, you can assume that
- uppercase/lowercase of a character will not add more than 1 byte)
-
- return the number of bytes occupied by the CH_UNIX character, or
- -1 on failure
-*/
-_PUBLIC_ ssize_t push_codepoint_convenience(struct smb_iconv_convenience *ic,
- char *str, codepoint_t c)
-{
- smb_iconv_t descriptor;
- uint8_t buf[4];
- size_t ilen, olen;
- const char *inbuf;
-
- if (c < 128) {
- *str = c;
- return 1;
- }
-
- descriptor = get_conv_handle(ic,
- CH_UTF16, CH_UNIX);
- if (descriptor == (smb_iconv_t)-1) {
- return -1;
- }
-
- if (c < 0x10000) {
- ilen = 2;
- olen = 5;
- inbuf = (char *)buf;
- SSVAL(buf, 0, c);
- smb_iconv(descriptor, &inbuf, &ilen, &str, &olen);
- if (ilen != 0) {
- return -1;
- }
- return 5 - olen;
- }
-
- c -= 0x10000;
-
- buf[0] = (c>>10) & 0xFF;
- buf[1] = (c>>18) | 0xd8;
- buf[2] = c & 0xFF;
- buf[3] = ((c>>8) & 0x3) | 0xdc;
-
- ilen = 4;
- olen = 5;
- inbuf = (char *)buf;
-
- smb_iconv(descriptor, &inbuf, &ilen, &str, &olen);
- if (ilen != 0) {
- return -1;
- }
- return 5 - olen;
-}
-
-
diff --git a/lib/util/charset/charset.h b/lib/util/charset/charset.h
index c9425ef730..474d77e54e 100644
--- a/lib/util/charset/charset.h
+++ b/lib/util/charset/charset.h
@@ -39,6 +39,24 @@ typedef enum {CH_UTF16LE=0, CH_UTF16=0, CH_UNIX, CH_DISPLAY, CH_DOS, CH_UTF8, CH
typedef uint16_t smb_ucs2_t;
+#ifdef WORDS_BIGENDIAN
+#define UCS2_SHIFT 8
+#else
+#define UCS2_SHIFT 0
+#endif
+
+/* turn a 7 bit character into a ucs2 character */
+#define UCS2_CHAR(c) ((c) << UCS2_SHIFT)
+
+/* return an ascii version of a ucs2 character */
+#define UCS2_TO_CHAR(c) (((c) >> UCS2_SHIFT) & 0xff)
+
+/* Copy into a smb_ucs2_t from a possibly unaligned buffer. Return the copied smb_ucs2_t */
+#define COPY_UCS2_CHAR(dest,src) (((unsigned char *)(dest))[0] = ((unsigned char *)(src))[0],\
+ ((unsigned char *)(dest))[1] = ((unsigned char *)(src))[1], (dest))
+
+
+
/*
* for each charset we have a function that pulls from that charset to
* a ucs2 buffer, and a function that pushes to a ucs2 buffer
@@ -102,6 +120,9 @@ struct smb_iconv_convenience;
#define strupper(s) strupper_m(s)
char *strchr_m(const char *s, char c);
+size_t strlen_m_ext(const char *s, charset_t src_charset, charset_t dst_charset);
+size_t strlen_m_ext_term(const char *s, charset_t src_charset,
+ charset_t dst_charset);
size_t strlen_m_term(const char *s);
size_t strlen_m_term_null(const char *s);
size_t strlen_m(const char *s);
@@ -149,24 +170,38 @@ ssize_t iconv_talloc(TALLOC_CTX *mem_ctx,
void *dest);
extern struct smb_iconv_convenience *global_iconv_convenience;
+struct smb_iconv_convenience *get_iconv_convenience(void);
+smb_iconv_t get_conv_handle(struct smb_iconv_convenience *ic,
+ charset_t from, charset_t to);
+const char *charset_name(struct smb_iconv_convenience *ic, charset_t ch);
+codepoint_t next_codepoint_ext(const char *str, charset_t src_charset,
+ size_t *size);
codepoint_t next_codepoint(const char *str, size_t *size);
ssize_t push_codepoint(char *str, codepoint_t c);
/* codepoints */
+codepoint_t next_codepoint_convenience_ext(struct smb_iconv_convenience *ic,
+ const char *str, charset_t src_charset,
+ size_t *size);
codepoint_t next_codepoint_convenience(struct smb_iconv_convenience *ic,
const char *str, size_t *size);
ssize_t push_codepoint_convenience(struct smb_iconv_convenience *ic,
char *str, codepoint_t c);
+
codepoint_t toupper_m(codepoint_t val);
codepoint_t tolower_m(codepoint_t val);
+bool islower_m(codepoint_t val);
+bool isupper_m(codepoint_t val);
int codepoint_cmpi(codepoint_t c1, codepoint_t c2);
/* Iconv convenience functions */
-struct smb_iconv_convenience *smb_iconv_convenience_init(TALLOC_CTX *mem_ctx,
- const char *dos_charset,
- const char *unix_charset,
- bool native_iconv);
+struct smb_iconv_convenience *smb_iconv_convenience_reinit(TALLOC_CTX *mem_ctx,
+ const char *dos_charset,
+ const char *unix_charset,
+ const char *display_charset,
+ bool native_iconv,
+ struct smb_iconv_convenience *old_ic);
bool convert_string_convenience(struct smb_iconv_convenience *ic,
charset_t from, charset_t to,
@@ -188,7 +223,8 @@ smb_iconv_t smb_iconv_open_ex(TALLOC_CTX *mem_ctx, const char *tocode,
const char *fromcode, bool native_iconv);
void load_case_tables(void);
-bool charset_register_backend(const void *_funcs);
+void load_case_tables_library(void);
+bool smb_register_charset(const struct charset_functions *funcs_in);
/*
* Define stub for charset module which implements 8-bit encoding with gaps.
@@ -263,8 +299,11 @@ struct charset_functions CHARSETNAME ## _functions = \
NTSTATUS charset_ ## CHARSETNAME ## _init(void); \
NTSTATUS charset_ ## CHARSETNAME ## _init(void) \
{ \
- return smb_register_charset(& CHARSETNAME ## _functions); \
-} \
+ if (!smb_register_charset(& CHARSETNAME ## _functions)) { \
+ return NT_STATUS_INTERNAL_ERROR; \
+ } \
+ return NT_STATUS_OK; \
+} \
#endif /* __CHARSET_H__ */
diff --git a/lib/util/charset/codepoints.c b/lib/util/charset/codepoints.c
index a940c1baf0..5ee95a8af5 100644
--- a/lib/util/charset/codepoints.c
+++ b/lib/util/charset/codepoints.c
@@ -1,8 +1,10 @@
/*
Unix SMB/CIFS implementation.
- Samba utility functions
- Copyright (C) Andrew Tridgell 1992-2001
+ Character set conversion Extensions
+ Copyright (C) Igor Vergeichik <iverg@mail.ru> 2001
+ Copyright (C) Andrew Tridgell 2001
Copyright (C) Simo Sorce 2001
+ Copyright (C) Jelmer Vernooij 2007
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -16,11 +18,16 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
+*/
#include "includes.h"
+#include "lib/util/charset/charset.h"
#include "system/locale.h"
-#include "dynconfig/dynconfig.h"
+#include "dynconfig.h"
+
+#ifdef strcasecmp
+#undef strcasecmp
+#endif
/**
* @file
@@ -35,8 +42,10 @@ static void *lowcase_table;
/*******************************************************************
load the case handling tables
+
+This is the function that should be called from library code.
********************************************************************/
-void load_case_tables(void)
+void load_case_tables_library(void)
{
TALLOC_CTX *mem_ctx;
@@ -44,25 +53,37 @@ void load_case_tables(void)
if (!mem_ctx) {
smb_panic("No memory for case_tables");
}
- upcase_table = map_file(talloc_asprintf(mem_ctx, "%s/upcase.dat", dyn_DATADIR), 0x20000);
- lowcase_table = map_file(talloc_asprintf(mem_ctx, "%s/lowcase.dat", dyn_DATADIR), 0x20000);
+ upcase_table = map_file(talloc_asprintf(mem_ctx, "%s/upcase.dat", get_dyn_CODEPAGEDIR()), 0x20000);
+ lowcase_table = map_file(talloc_asprintf(mem_ctx, "%s/lowcase.dat", get_dyn_CODEPAGEDIR()), 0x20000);
talloc_free(mem_ctx);
if (upcase_table == NULL) {
- /* try also under codepages for testing purposes */
- upcase_table = map_file("codepages/upcase.dat", 0x20000);
- if (upcase_table == NULL) {
- upcase_table = (void *)-1;
- }
+ DEBUG(1, ("Failed to load upcase.dat, will use lame ASCII-only case sensitivity rules\n"));
+ upcase_table = (void *)-1;
}
if (lowcase_table == NULL) {
- /* try also under codepages for testing purposes */
- lowcase_table = map_file("codepages/lowcase.dat", 0x20000);
- if (lowcase_table == NULL) {
- lowcase_table = (void *)-1;
- }
+ DEBUG(1, ("Failed to load lowcase.dat, will use lame ASCII-only case sensitivity rules\n"));
+ lowcase_table = (void *)-1;
}
}
+/*******************************************************************
+load the case handling tables
+
+This MUST only be called from main() in application code, never from a
+library. We don't know if the calling program has already done
+setlocale() to another value, and can't tell if they have.
+********************************************************************/
+void load_case_tables(void)
+{
+ /* This is a useful global hook where we can ensure that the
+ * locale is set from the environment. This is needed so that
+ * we can use LOCALE as a codepage */
+#ifdef HAVE_SETLOCALE
+ setlocale(LC_ALL, "");
+#endif
+ load_case_tables_library();
+}
+
/**
Convert a codepoint_t to upper case.
**/
@@ -72,7 +93,7 @@ _PUBLIC_ codepoint_t toupper_m(codepoint_t val)
return toupper(val);
}
if (upcase_table == NULL) {
- load_case_tables();
+ load_case_tables_library();
}
if (upcase_table == (void *)-1) {
return val;
@@ -92,7 +113,7 @@ _PUBLIC_ codepoint_t tolower_m(codepoint_t val)
return tolower(val);
}
if (lowcase_table == NULL) {
- load_case_tables();
+ load_case_tables_library();
}
if (lowcase_table == (void *)-1) {
return val;
@@ -104,6 +125,22 @@ _PUBLIC_ codepoint_t tolower_m(codepoint_t val)
}
/**
+ If we upper cased this character, would we get the same character?
+**/
+_PUBLIC_ bool islower_m(codepoint_t val)
+{
+ return (toupper_m(val) != val);
+}
+
+/**
+ If we lower cased this character, would we get the same character?
+**/
+_PUBLIC_ bool isupper_m(codepoint_t val)
+{
+ return (tolower_m(val) != val);
+}
+
+/**
compare two codepoints case insensitively
*/
_PUBLIC_ int codepoint_cmpi(codepoint_t c1, codepoint_t c2)
@@ -116,3 +153,352 @@ _PUBLIC_ int codepoint_cmpi(codepoint_t c1, codepoint_t c2)
}
+struct smb_iconv_convenience {
+ TALLOC_CTX *child_ctx;
+ const char *unix_charset;
+ const char *dos_charset;
+ const char *display_charset;
+ bool native_iconv;
+ smb_iconv_t conv_handles[NUM_CHARSETS][NUM_CHARSETS];
+};
+
+struct smb_iconv_convenience *global_iconv_convenience = NULL;
+
+struct smb_iconv_convenience *get_iconv_convenience(void)
+{
+ if (global_iconv_convenience == NULL)
+ global_iconv_convenience = smb_iconv_convenience_reinit(talloc_autofree_context(),
+ "ASCII", "UTF-8", "ASCII", true, NULL);
+ return global_iconv_convenience;
+}
+
+/**
+ * Return the name of a charset to give to iconv().
+ **/
+const char *charset_name(struct smb_iconv_convenience *ic, charset_t ch)
+{
+ switch (ch) {
+ case CH_UTF16: return "UTF-16LE";
+ case CH_UNIX: return ic->unix_charset;
+ case CH_DOS: return ic->dos_charset;
+ case CH_DISPLAY: return ic->display_charset;
+ case CH_UTF8: return "UTF8";
+ case CH_UTF16BE: return "UTF-16BE";
+ case CH_UTF16MUNGED: return "UTF16_MUNGED";
+ default:
+ return "ASCII";
+ }
+}
+
+/**
+ re-initialize iconv conversion descriptors
+**/
+static int close_iconv_convenience(struct smb_iconv_convenience *data)
+{
+ unsigned c1, c2;
+ for (c1=0;c1<NUM_CHARSETS;c1++) {
+ for (c2=0;c2<NUM_CHARSETS;c2++) {
+ if (data->conv_handles[c1][c2] != NULL) {
+ if (data->conv_handles[c1][c2] != (smb_iconv_t)-1) {
+ smb_iconv_close(data->conv_handles[c1][c2]);
+ }
+ data->conv_handles[c1][c2] = NULL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const char *map_locale(const char *charset)
+{
+ if (strcmp(charset, "LOCALE") != 0) {
+ return charset;
+ }
+#if defined(HAVE_NL_LANGINFO) && defined(CODESET)
+ {
+ const char *ln;
+ smb_iconv_t handle;
+
+ ln = nl_langinfo(CODESET);
+ if (ln == NULL) {
+ DEBUG(1,("Unable to determine charset for LOCALE - using ASCII\n"));
+ return "ASCII";
+ }
+ /* Check whether the charset name is supported
+ by iconv */
+ handle = smb_iconv_open(ln, "UCS-2LE");
+ if (handle == (smb_iconv_t) -1) {
+ DEBUG(5,("Locale charset '%s' unsupported, using ASCII instead\n", ln));
+ return "ASCII";
+ } else {
+ DEBUG(5,("Substituting charset '%s' for LOCALE\n", ln));
+ smb_iconv_close(handle);
+ }
+ return ln;
+ }
+#endif
+ return "ASCII";
+}
+
+/*
+ the old_ic is passed in here as the smb_iconv_convenience structure
+ is used as a global pointer in some places (eg. python modules). We
+ don't want to invalidate those global pointers, but we do want to
+ update them with the right charset information when loadparm
+ runs. To do that we need to re-use the structure pointer, but
+ re-fill the elements in the structure with the updated values
+ */
+_PUBLIC_ struct smb_iconv_convenience *smb_iconv_convenience_reinit(TALLOC_CTX *mem_ctx,
+ const char *dos_charset,
+ const char *unix_charset,
+ const char *display_charset,
+ bool native_iconv,
+ struct smb_iconv_convenience *old_ic)
+{
+ struct smb_iconv_convenience *ret;
+
+ display_charset = map_locale(display_charset);
+
+ if (old_ic != NULL) {
+ ret = old_ic;
+ close_iconv_convenience(ret);
+ talloc_free(ret->child_ctx);
+ ZERO_STRUCTP(ret);
+ } else {
+ ret = talloc_zero(mem_ctx, struct smb_iconv_convenience);
+ }
+ if (ret == NULL) {
+ return NULL;
+ }
+
+ /* we use a child context to allow us to free all ptrs without
+ freeing the structure itself */
+ ret->child_ctx = talloc_new(ret);
+ if (ret->child_ctx == NULL) {
+ return NULL;
+ }
+
+ talloc_set_destructor(ret, close_iconv_convenience);
+
+ ret->dos_charset = talloc_strdup(ret->child_ctx, dos_charset);
+ ret->unix_charset = talloc_strdup(ret->child_ctx, unix_charset);
+ ret->display_charset = talloc_strdup(ret->child_ctx, display_charset);
+ ret->native_iconv = native_iconv;
+
+ return ret;
+}
+
+/*
+ on-demand initialisation of conversion handles
+*/
+smb_iconv_t get_conv_handle(struct smb_iconv_convenience *ic,
+ charset_t from, charset_t to)
+{
+ const char *n1, *n2;
+ static bool initialised;
+
+ if (initialised == false) {
+ initialised = true;
+ }
+
+ if (ic->conv_handles[from][to]) {
+ return ic->conv_handles[from][to];
+ }
+
+ n1 = charset_name(ic, from);
+ n2 = charset_name(ic, to);
+
+ ic->conv_handles[from][to] = smb_iconv_open_ex(ic, n2, n1,
+ ic->native_iconv);
+
+ if (ic->conv_handles[from][to] == (smb_iconv_t)-1) {
+ if ((from == CH_DOS || to == CH_DOS) &&
+ strcasecmp(charset_name(ic, CH_DOS), "ASCII") != 0) {
+ DEBUG(0,("dos charset '%s' unavailable - using ASCII\n",
+ charset_name(ic, CH_DOS)));
+ ic->dos_charset = "ASCII";
+
+ n1 = charset_name(ic, from);
+ n2 = charset_name(ic, to);
+
+ ic->conv_handles[from][to] =
+ smb_iconv_open_ex(ic, n2, n1, ic->native_iconv);
+ }
+ }
+
+ return ic->conv_handles[from][to];
+}
+
+/**
+ * Return the unicode codepoint for the next character in the input
+ * string in the given src_charset.
+ * The unicode codepoint (codepoint_t) is an unsinged 32 bit value.
+ *
+ * Also return the number of bytes consumed (which tells the caller
+ * how many bytes to skip to get to the next src_charset-character).
+ *
+ * This is implemented (in the non-ascii-case) by first converting the
+ * next character in the input string to UTF16_LE and then calculating
+ * the unicode codepoint from that.
+ *
+ * Return INVALID_CODEPOINT if the next character cannot be converted.
+ */
+_PUBLIC_ codepoint_t next_codepoint_convenience_ext(
+ struct smb_iconv_convenience *ic,
+ const char *str, charset_t src_charset,
+ size_t *bytes_consumed)
+{
+ /* it cannot occupy more than 4 bytes in UTF16 format */
+ uint8_t buf[4];
+ smb_iconv_t descriptor;
+ size_t ilen_orig;
+ size_t ilen;
+ size_t olen;
+ char *outbuf;
+
+ if ((str[0] & 0x80) == 0) {
+ *bytes_consumed = 1;
+ return (codepoint_t)str[0];
+ }
+
+ /*
+ * we assume that no multi-byte character can take more than 5 bytes.
+ * This is OK as we only support codepoints up to 1M (U+100000)
+ */
+ ilen_orig = strnlen(str, 5);
+ ilen = ilen_orig;
+
+ descriptor = get_conv_handle(ic, src_charset, CH_UTF16);
+ if (descriptor == (smb_iconv_t)-1) {
+ *bytes_consumed = 1;
+ return INVALID_CODEPOINT;
+ }
+
+ /*
+ * this looks a little strange, but it is needed to cope with
+ * codepoints above 64k (U+1000) which are encoded as per RFC2781.
+ */
+ olen = 2;
+ outbuf = (char *)buf;
+ smb_iconv(descriptor, &str, &ilen, &outbuf, &olen);
+ if (olen == 2) {
+ olen = 4;
+ outbuf = (char *)buf;
+ smb_iconv(descriptor, &str, &ilen, &outbuf, &olen);
+ if (olen == 4) {
+ /* we didn't convert any bytes */
+ *bytes_consumed = 1;
+ return INVALID_CODEPOINT;
+ }
+ olen = 4 - olen;
+ } else {
+ olen = 2 - olen;
+ }
+
+ *bytes_consumed = ilen_orig - ilen;
+
+ if (olen == 2) {
+ return (codepoint_t)SVAL(buf, 0);
+ }
+ if (olen == 4) {
+ /* decode a 4 byte UTF16 character manually */
+ return (codepoint_t)0x10000 +
+ (buf[2] | ((buf[3] & 0x3)<<8) |
+ (buf[0]<<10) | ((buf[1] & 0x3)<<18));
+ }
+
+ /* no other length is valid */
+ return INVALID_CODEPOINT;
+}
+
+/*
+ return the unicode codepoint for the next multi-byte CH_UNIX character
+ in the string
+
+ also return the number of bytes consumed (which tells the caller
+ how many bytes to skip to get to the next CH_UNIX character)
+
+ return INVALID_CODEPOINT if the next character cannot be converted
+*/
+_PUBLIC_ codepoint_t next_codepoint_convenience(struct smb_iconv_convenience *ic,
+ const char *str, size_t *size)
+{
+ return next_codepoint_convenience_ext(ic, str, CH_UNIX, size);
+}
+
+/*
+ push a single codepoint into a CH_UNIX string the target string must
+ be able to hold the full character, which is guaranteed if it is at
+ least 5 bytes in size. The caller may pass less than 5 bytes if they
+ are sure the character will fit (for example, you can assume that
+ uppercase/lowercase of a character will not add more than 1 byte)
+
+ return the number of bytes occupied by the CH_UNIX character, or
+ -1 on failure
+*/
+_PUBLIC_ ssize_t push_codepoint_convenience(struct smb_iconv_convenience *ic,
+ char *str, codepoint_t c)
+{
+ smb_iconv_t descriptor;
+ uint8_t buf[4];
+ size_t ilen, olen;
+ const char *inbuf;
+
+ if (c < 128) {
+ *str = c;
+ return 1;
+ }
+
+ descriptor = get_conv_handle(ic,
+ CH_UTF16, CH_UNIX);
+ if (descriptor == (smb_iconv_t)-1) {
+ return -1;
+ }
+
+ if (c < 0x10000) {
+ ilen = 2;
+ olen = 5;
+ inbuf = (char *)buf;
+ SSVAL(buf, 0, c);
+ smb_iconv(descriptor, &inbuf, &ilen, &str, &olen);
+ if (ilen != 0) {
+ return -1;
+ }
+ return 5 - olen;
+ }
+
+ c -= 0x10000;
+
+ buf[0] = (c>>10) & 0xFF;
+ buf[1] = (c>>18) | 0xd8;
+ buf[2] = c & 0xFF;
+ buf[3] = ((c>>8) & 0x3) | 0xdc;
+
+ ilen = 4;
+ olen = 5;
+ inbuf = (char *)buf;
+
+ smb_iconv(descriptor, &inbuf, &ilen, &str, &olen);
+ if (ilen != 0) {
+ return -1;
+ }
+ return 5 - olen;
+}
+
+_PUBLIC_ codepoint_t next_codepoint_ext(const char *str, charset_t src_charset,
+ size_t *size)
+{
+ return next_codepoint_convenience_ext(get_iconv_convenience(), str,
+ src_charset, size);
+}
+
+_PUBLIC_ codepoint_t next_codepoint(const char *str, size_t *size)
+{
+ return next_codepoint_convenience(get_iconv_convenience(), str, size);
+}
+
+_PUBLIC_ ssize_t push_codepoint(char *str, codepoint_t c)
+{
+ return push_codepoint_convenience(get_iconv_convenience(), str, c);
+}
diff --git a/lib/util/charset/config.m4 b/lib/util/charset/config.m4
deleted file mode 100644
index 453de9fe26..0000000000
--- a/lib/util/charset/config.m4
+++ /dev/null
@@ -1,86 +0,0 @@
-dnl SMB_CHECK_ICONV(hdr, msg, action-if-found,action-if-not-found)
-AC_DEFUN(SMB_CHECK_ICONV,[
- AC_MSG_CHECKING($2)
- AC_TRY_RUN([#include <stdlib.h>
-#include <$1>
-
-int main()
-{
- iconv_t cd = iconv_open("ASCII","UCS-2LE");
- if (cd == 0 || cd == (iconv_t)-1) return -1;
- return 0;
-}
- ],
- [AC_MSG_RESULT(yes); $3],
- [AC_MSG_RESULT(no); $4],
- [AC_MSG_RESULT(cross); $4])
-])
-
-dnl SMB_CHECK_ICONV_DIR(dir,action-if-found,action-if-not-found)
-AC_DEFUN(SMB_CHECK_ICONV_DIR,
-[
- save_CPPFLAGS="$CPPFLAGS"
- save_LDFLAGS="$LDFLAGS"
- save_LIBS="$LIBS"
- CPPFLAGS="-I$1/include"
- LDFLAGS="-L$1/lib"
- LIBS=-liconv
-
- SMB_CHECK_ICONV(iconv.h,Whether iconv.h is present,[ AC_DEFINE(HAVE_ICONV_H,1,[Whether iconv.h is present]) $2 ], [
- LIBS=-lgiconv
- SMB_CHECK_ICONV(giconv.h,Whether giconv.h is present, [AC_DEFINE(HAVE_GICONV_H,1,[Whether giconv.h is present]) $2],[$3])
- ])
-
- CPPFLAGS="$save_CPPFLAGS"
- LDFLAGS="$save_LDFLAGS"
- LIBS="$save_LIBS"
-])
-
-ICONV_FOUND=no
-LOOK_DIRS="/usr /usr/local /sw"
-AC_ARG_WITH(libiconv,
-[ --with-libiconv=BASEDIR Use libiconv in BASEDIR/lib and BASEDIR/include (default=auto) ],
-[
- if test "$withval" = "no" ; then
- AC_MSG_ERROR(I won't take no for an answer)
- else
- if test "$withval" != "yes" ; then
- SMB_CHECK_ICONV_DIR($withval, [
- ICONV_FOUND=yes;
- ICONV_CPPFLAGS="$CPPFLAGS"
- ICONV_LIBS="$LIBS"
- ICONV_LDFLAGS="$LDFLAGS"
- ], [AC_MSG_ERROR([No iconv library found in $withval])])
- fi
- fi
-])
-
-if test x$ICONV_FOUND = xno; then
- SMB_CHECK_ICONV(iconv.h,
- [Whether iconv.h is present],
- [AC_DEFINE(HAVE_ICONV_H,1,[Whether iconv.h is present]) ICONV_FOUND=yes])
-fi
-
-for i in $LOOK_DIRS ; do
- if test x$ICONV_FOUND = xyes; then
- break
- fi
-
- SMB_CHECK_ICONV_DIR($i, [
- ICONV_FOUND=yes
- ICONV_CPPFLAGS="$CPPFLAGS"
- ICONV_LIBS="$LIBS"
- ICONV_LDFLAGS="$LDFLAGS"
- ], [])
-done
-
-if test x"$ICONV_FOUND" = x"no"; then
- AC_MSG_WARN([Sufficient support for iconv function was not found.
- Install libiconv from http://www.gnu.org/software/libiconv/ for better charset compatibility!])
- SMB_ENABLE(ICONV,NO)
-else
- AC_DEFINE(HAVE_NATIVE_ICONV,1,[Whether external iconv is available])
- SMB_ENABLE(ICONV,YES)
-fi
-
-SMB_EXT_LIB(ICONV,[${ICONV_LIBS}],[${ICONV_CFLAGS}],[${ICONV_CPPFLAGS}],[${ICONV_LDFLAGS}])
diff --git a/lib/util/charset/config.mk b/lib/util/charset/config.mk
deleted file mode 100644
index 952c13a84d..0000000000
--- a/lib/util/charset/config.mk
+++ /dev/null
@@ -1,11 +0,0 @@
-################################################
-# Start SUBSYSTEM CHARSET
-[SUBSYSTEM::CHARSET]
-PUBLIC_DEPENDENCIES = ICONV
-PRIVATE_DEPENDENCIES = DYNCONFIG
-# End SUBSYSTEM CHARSET
-################################################
-
-CHARSET_OBJ_FILES = $(addprefix $(libcharsetsrcdir)/, iconv.o charcnv.o util_unistr.o codepoints.o)
-
-PUBLIC_HEADERS += $(libcharsetsrcdir)/charset.h
diff --git a/lib/util/charset/iconv.c b/lib/util/charset/iconv.c
index 8256dc665c..cee2d26aa4 100644
--- a/lib/util/charset/iconv.c
+++ b/lib/util/charset/iconv.c
@@ -23,6 +23,13 @@
#include "system/iconv.h"
#include "system/filesys.h"
+#ifdef strcasecmp
+#undef strcasecmp
+#endif
+
+#ifdef static_decl_charset
+static_decl_charset;
+#endif
/**
* @file
@@ -49,6 +56,7 @@
static size_t ascii_pull (void *,const char **, size_t *, char **, size_t *);
static size_t ascii_push (void *,const char **, size_t *, char **, size_t *);
+static size_t latin1_push(void *,const char **, size_t *, char **, size_t *);
static size_t utf8_pull (void *,const char **, size_t *, char **, size_t *);
static size_t utf8_push (void *,const char **, size_t *, char **, size_t *);
static size_t utf16_munged_pull(void *,const char **, size_t *, char **, size_t *);
@@ -72,29 +80,64 @@ static const struct charset_functions builtin_functions[] = {
{"UTF16_MUNGED", utf16_munged_pull, iconv_copy},
{"ASCII", ascii_pull, ascii_push},
+ {"646", ascii_pull, ascii_push},
+ {"ISO-8859-1", ascii_pull, latin1_push},
{"UCS2-HEX", ucs2hex_pull, ucs2hex_push}
};
static struct charset_functions *charsets = NULL;
-bool charset_register_backend(const void *_funcs)
+static struct charset_functions *find_charset_functions(const char *name)
{
- struct charset_functions *funcs = (struct charset_functions *)memdup(_funcs,sizeof(struct charset_functions));
struct charset_functions *c;
/* Check whether we already have this charset... */
for (c = charsets; c != NULL; c = c->next) {
- if(!strcasecmp(c->name, funcs->name)) {
- DEBUG(2, ("Duplicate charset %s, not registering\n", funcs->name));
- return false;
+ if(strcasecmp(c->name, name) == 0) {
+ return c;
}
+ c = c->next;
+ }
+
+ return NULL;
+}
+
+bool smb_register_charset(const struct charset_functions *funcs_in)
+{
+ struct charset_functions *funcs;
+
+ DEBUG(5, ("Attempting to register new charset %s\n", funcs_in->name));
+ /* Check whether we already have this charset... */
+ if (find_charset_functions(funcs_in->name)) {
+ DEBUG(0, ("Duplicate charset %s, not registering\n", funcs_in->name));
+ return false;
+ }
+
+ funcs = talloc(NULL, struct charset_functions);
+ if (!funcs) {
+ DEBUG(0, ("Out of memory duplicating charset %s\n", funcs_in->name));
+ return false;
}
+ *funcs = *funcs_in;
funcs->next = funcs->prev = NULL;
+ DEBUG(5, ("Registered charset %s\n", funcs->name));
DLIST_ADD(charsets, funcs);
return true;
}
+static void lazy_initialize_iconv(void)
+{
+ static bool initialized;
+
+#ifdef static_init_charset
+ if (!initialized) {
+ static_init_charset;
+ initialized = true;
+ }
+#endif
+}
+
#ifdef HAVE_NATIVE_ICONV
/* if there was an error then reset the internal state,
this ensures that we don't have a shift state remaining for
@@ -158,8 +201,8 @@ static bool is_utf16(const char *name)
strcasecmp(name, "UTF-16LE") == 0;
}
-int smb_iconv_t_destructor(smb_iconv_t hwd)
-{
+static int smb_iconv_t_destructor(smb_iconv_t hwd)
+{
#ifdef HAVE_NATIVE_ICONV
if (hwd->cd_pull != NULL && hwd->cd_pull != (iconv_t)-1)
iconv_close(hwd->cd_pull);
@@ -179,6 +222,8 @@ _PUBLIC_ smb_iconv_t smb_iconv_open_ex(TALLOC_CTX *mem_ctx, const char *tocode,
const struct charset_functions *from=NULL, *to=NULL;
int i;
+ lazy_initialize_iconv();
+
ret = (smb_iconv_t)talloc_named(mem_ctx,
sizeof(*ret),
"iconv(%s,%s)", tocode, fromcode);
@@ -260,9 +305,6 @@ _PUBLIC_ smb_iconv_t smb_iconv_open_ex(TALLOC_CTX *mem_ctx, const char *tocode,
}
if (is_utf16(tocode)) {
ret->direct = sys_iconv;
- /* could be set just above - so we need to close iconv */
- if (ret->cd_direct != NULL && ret->cd_direct != (iconv_t)-1)
- iconv_close(ret->cd_direct);
ret->cd_direct = ret->cd_pull;
ret->cd_pull = NULL;
return ret;
@@ -285,7 +327,7 @@ failed:
*/
_PUBLIC_ smb_iconv_t smb_iconv_open(const char *tocode, const char *fromcode)
{
- return smb_iconv_open_ex(talloc_autofree_context(), tocode, fromcode, true);
+ return smb_iconv_open_ex(NULL, tocode, fromcode, true);
}
/*
@@ -350,12 +392,38 @@ static size_t ascii_push(void *cd, const char **inbuf, size_t *inbytesleft,
return ir_count;
}
+static size_t latin1_push(void *cd, const char **inbuf, size_t *inbytesleft,
+ char **outbuf, size_t *outbytesleft)
+{
+ int ir_count=0;
+
+ while (*inbytesleft >= 2 && *outbytesleft >= 1) {
+ (*outbuf)[0] = (*inbuf)[0];
+ if ((*inbuf)[1]) ir_count++;
+ (*inbytesleft) -= 2;
+ (*outbytesleft) -= 1;
+ (*inbuf) += 2;
+ (*outbuf) += 1;
+ }
+
+ if (*inbytesleft == 1) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (*inbytesleft > 1) {
+ errno = E2BIG;
+ return -1;
+ }
+
+ return ir_count;
+}
static size_t ucs2hex_pull(void *cd, const char **inbuf, size_t *inbytesleft,
char **outbuf, size_t *outbytesleft)
{
while (*inbytesleft >= 1 && *outbytesleft >= 2) {
- uint_t v;
+ unsigned int v;
if ((*inbuf)[0] != '@') {
/* seven bit ascii case */
diff --git a/lib/util/charset/tests/charset.c b/lib/util/charset/tests/charset.c
index 06acda80ab..72fd11b128 100644
--- a/lib/util/charset/tests/charset.c
+++ b/lib/util/charset/tests/charset.c
@@ -246,7 +246,7 @@ static bool test_count_chars_m(struct torture_context *tctx)
struct torture_suite *torture_local_charset(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "CHARSET");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "charset");
torture_suite_add_simple_test(suite, "toupper_m", test_toupper_m);
torture_suite_add_simple_test(suite, "tolower_m", test_tolower_m);
diff --git a/lib/util/charset/tests/iconv.c b/lib/util/charset/tests/iconv.c
index 3e2546dc01..a1a0d97a7e 100644
--- a/lib/util/charset/tests/iconv.c
+++ b/lib/util/charset/tests/iconv.c
@@ -35,7 +35,7 @@ static bool iconv_untestable(struct torture_context *tctx)
{
iconv_t cd;
- if (!lp_parm_bool(tctx->lp_ctx, NULL, "iconv", "native", true))
+ if (!lpcfg_parm_bool(tctx->lp_ctx, NULL, "iconv", "native", true))
torture_skip(tctx, "system iconv disabled - skipping test");
cd = iconv_open("UTF-16LE", "UCS-4LE");
@@ -134,7 +134,7 @@ static bool test_buffer(struct torture_context *test,
{
uint8_t buf1[1000], buf2[1000], buf3[1000];
size_t outsize1, outsize2, outsize3;
- const char *ptr_in;
+ char *ptr_in;
char *ptr_out;
size_t size_in1, size_in2, size_in3;
size_t ret1, ret2, ret3, len1, len2;
@@ -158,31 +158,31 @@ static bool test_buffer(struct torture_context *test,
"failed to open %s to UTF-16LE",
charset));
}
- cd2 = smb_iconv_open_ex(test, charset, "UTF-16LE", lp_parm_bool(test->lp_ctx, NULL, "iconv", "native", true));
- cd3 = smb_iconv_open_ex(test, "UTF-16LE", charset, lp_parm_bool(test->lp_ctx, NULL, "iconv", "native", true));
+ cd2 = smb_iconv_open_ex(test, charset, "UTF-16LE", lpcfg_parm_bool(test->lp_ctx, NULL, "iconv", "native", true));
+ cd3 = smb_iconv_open_ex(test, "UTF-16LE", charset, lpcfg_parm_bool(test->lp_ctx, NULL, "iconv", "native", true));
last_charset = charset;
}
/* internal convert to charset - placing result in buf1 */
- ptr_in = (const char *)inbuf;
+ ptr_in = (char *)inbuf;
ptr_out = (char *)buf1;
size_in1 = size;
outsize1 = sizeof(buf1);
memset(ptr_out, 0, outsize1);
errno = 0;
- ret1 = smb_iconv(cd2, &ptr_in, &size_in1, &ptr_out, &outsize1);
+ ret1 = smb_iconv(cd2, (const char **) &ptr_in, &size_in1, &ptr_out, &outsize1);
errno1 = errno;
/* system convert to charset - placing result in buf2 */
- ptr_in = (const char *)inbuf;
+ ptr_in = (char *)inbuf;
ptr_out = (char *)buf2;
size_in2 = size;
outsize2 = sizeof(buf2);
memset(ptr_out, 0, outsize2);
errno = 0;
- ret2 = iconv(cd, discard_const_p(char *, &ptr_in), &size_in2, &ptr_out, &outsize2);
+ ret2 = iconv(cd, &ptr_in, &size_in2, &ptr_out, &outsize2);
errno2 = errno;
len1 = sizeof(buf1) - outsize1;
@@ -236,13 +236,13 @@ static bool test_buffer(struct torture_context *test,
/* convert back to UTF-16, putting result in buf3 */
size = size - size_in1;
- ptr_in = (const char *)buf1;
+ ptr_in = (char *)buf1;
ptr_out = (char *)buf3;
size_in3 = len1;
outsize3 = sizeof(buf3);
memset(ptr_out, 0, outsize3);
- ret3 = smb_iconv(cd3, &ptr_in, &size_in3, &ptr_out, &outsize3);
+ ret3 = smb_iconv(cd3, (const char **) &ptr_in, &size_in3, &ptr_out, &outsize3);
/* we only internally support the first 1M codepoints */
if (outsize3 != sizeof(buf3) - size &&
@@ -289,7 +289,7 @@ static bool test_codepoint(struct torture_context *tctx, unsigned int codepoint)
size_t size, size2;
codepoint_t c;
- size = push_codepoint_convenience(lp_iconv_convenience(tctx->lp_ctx), (char *)buf, codepoint);
+ size = push_codepoint_convenience(lpcfg_iconv_convenience(tctx->lp_ctx), (char *)buf, codepoint);
torture_assert(tctx, size != -1 || (codepoint >= 0xd800 && codepoint <= 0x10000),
"Invalid Codepoint range");
@@ -300,7 +300,7 @@ static bool test_codepoint(struct torture_context *tctx, unsigned int codepoint)
buf[size+2] = random();
buf[size+3] = random();
- c = next_codepoint_convenience(lp_iconv_convenience(tctx->lp_ctx), (char *)buf, &size2);
+ c = next_codepoint_convenience(lpcfg_iconv_convenience(tctx->lp_ctx), (char *)buf, &size2);
torture_assert(tctx, c == codepoint,
talloc_asprintf(tctx,
@@ -451,7 +451,7 @@ static bool test_string2key(struct torture_context *tctx)
struct torture_suite *torture_local_iconv(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "ICONV");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "iconv");
torture_suite_add_simple_test(suite, "string2key",
test_string2key);
diff --git a/lib/util/charset/util_unistr.c b/lib/util/charset/util_unistr.c
index 045aa4a3e3..760be7769a 100644
--- a/lib/util/charset/util_unistr.c
+++ b/lib/util/charset/util_unistr.c
@@ -21,15 +21,6 @@
#include "includes.h"
#include "system/locale.h"
-struct smb_iconv_convenience *global_iconv_convenience = NULL;
-
-static inline struct smb_iconv_convenience *get_iconv_convenience(void)
-{
- if (global_iconv_convenience == NULL)
- global_iconv_convenience = smb_iconv_convenience_init(talloc_autofree_context(), "ASCII", "UTF-8", true);
- return global_iconv_convenience;
-}
-
/**
Case insensitive string compararison
**/
@@ -70,52 +61,6 @@ _PUBLIC_ int strcasecmp_m(const char *s1, const char *s2)
}
/**
- * Get the next token from a string, return False if none found.
- * Handles double-quotes.
- *
- * Based on a routine by GJC@VILLAGE.COM.
- * Extensively modified by Andrew.Tridgell@anu.edu.au
- **/
-_PUBLIC_ bool next_token(const char **ptr,char *buff, const char *sep, size_t bufsize)
-{
- const char *s;
- bool quoted;
- size_t len=1;
-
- if (!ptr)
- return false;
-
- s = *ptr;
-
- /* default to simple separators */
- if (!sep)
- sep = " \t\n\r";
-
- /* find the first non sep char */
- while (*s && strchr_m(sep,*s))
- s++;
-
- /* nothing left? */
- if (!*s)
- return false;
-
- /* copy over the token */
- for (quoted = false; len < bufsize && *s && (quoted || !strchr_m(sep,*s)); s++) {
- if (*s == '\"') {
- quoted = !quoted;
- } else {
- len++;
- *buff++ = *s;
- }
- }
-
- *ptr = (*s) ? s+1 : s;
- *buff = 0;
-
- return true;
-}
-
-/**
Case insensitive string compararison, length limited
**/
_PUBLIC_ int strncasecmp_m(const char *s1, const char *s2, size_t n)
@@ -248,11 +193,12 @@ _PUBLIC_ char *alpha_strcpy(char *dest, const char *src, const char *other_safe_
}
/**
- Count the number of UCS2 characters in a string. Normally this will
- be the same as the number of bytes in a string for single byte strings,
- but will be different for multibyte.
-**/
-_PUBLIC_ size_t strlen_m(const char *s)
+ * Calculate the number of units (8 or 16-bit, depending on the
+ * destination charset), that would be needed to convert the input
+ * string which is expected to be in in src_charset encoding to the
+ * destination charset (which should be a unicode charset).
+ */
+_PUBLIC_ size_t strlen_m_ext(const char *s, charset_t src_charset, charset_t dst_charset)
{
size_t count = 0;
struct smb_iconv_convenience *ic = get_iconv_convenience();
@@ -272,18 +218,68 @@ _PUBLIC_ size_t strlen_m(const char *s)
while (*s) {
size_t c_size;
- codepoint_t c = next_codepoint_convenience(ic, s, &c_size);
- if (c < 0x10000) {
+ codepoint_t c = next_codepoint_convenience_ext(ic, s, src_charset, &c_size);
+ s += c_size;
+
+ switch (dst_charset) {
+ case CH_UTF16LE:
+ case CH_UTF16BE:
+ case CH_UTF16MUNGED:
+ if (c < 0x10000) {
+ count += 1;
+ } else {
+ count += 2;
+ }
+ break;
+ case CH_UTF8:
+ /*
+ * this only checks ranges, and does not
+ * check for invalid codepoints
+ */
+ if (c < 0x80) {
+ count += 1;
+ } else if (c < 0x800) {
+ count += 2;
+ } else if (c < 0x1000) {
+ count += 3;
+ } else {
+ count += 4;
+ }
+ break;
+ default:
+ /*
+ * non-unicode encoding:
+ * assume that each codepoint fits into
+ * one unit in the destination encoding.
+ */
count += 1;
- } else {
- count += 2;
}
- s += c_size;
}
return count;
}
+_PUBLIC_ size_t strlen_m_ext_term(const char *s, const charset_t src_charset,
+ const charset_t dst_charset)
+{
+ if (!s) {
+ return 0;
+ }
+ return strlen_m_ext(s, src_charset, dst_charset) + 1;
+}
+
+/**
+ * Calculate the number of 16-bit units that would be needed to convert
+ * the input string which is expected to be in CH_UNIX encoding to UTF16.
+ *
+ * This will be the same as the number of bytes in a string for single
+ * byte strings, but will be different for multibyte.
+ */
+_PUBLIC_ size_t strlen_m(const char *s)
+{
+ return strlen_m_ext(s, CH_UNIX, CH_UTF16LE);
+}
+
/**
Work out the number of multibyte chars in a string, including the NULL
terminator.
@@ -430,6 +426,10 @@ _PUBLIC_ char *strlower_talloc(TALLOC_CTX *ctx, const char *src)
char *dest;
struct smb_iconv_convenience *iconv_convenience = get_iconv_convenience();
+ if(src == NULL) {
+ return NULL;
+ }
+
/* this takes advantage of the fact that upper/lower can't
change the length of a character by more than 1 byte */
dest = talloc_array(ctx, char, 2*(strlen(src))+1);
@@ -987,13 +987,3 @@ _PUBLIC_ bool convert_string_talloc(TALLOC_CTX *ctx,
allow_badcharcnv);
}
-
-_PUBLIC_ codepoint_t next_codepoint(const char *str, size_t *size)
-{
- return next_codepoint_convenience(get_iconv_convenience(), str, size);
-}
-
-_PUBLIC_ ssize_t push_codepoint(char *str, codepoint_t c)
-{
- return push_codepoint_convenience(get_iconv_convenience(), str, c);
-}
diff --git a/lib/util/charset/wscript_build b/lib/util/charset/wscript_build
new file mode 100644
index 0000000000..ab7cfc412d
--- /dev/null
+++ b/lib/util/charset/wscript_build
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+
+if bld.env._SAMBA_BUILD_ == 4:
+ bld.SAMBA_SUBSYSTEM('CHARSET',
+ source='charcnv.c util_unistr.c',
+ public_deps='CODEPOINTS',
+ public_headers='charset.h',
+ )
+
+bld.SAMBA_SUBSYSTEM('ICONV_WRAPPER',
+ source='iconv.c',
+ public_deps='iconv replace talloc')
+
+bld.SAMBA_SUBSYSTEM('CODEPOINTS',
+ source='codepoints.c',
+ deps='DYNCONFIG ICONV_WRAPPER'
+ )
diff --git a/lib/util/charset/wscript_configure b/lib/util/charset/wscript_configure
new file mode 100644
index 0000000000..98756fee2b
--- /dev/null
+++ b/lib/util/charset/wscript_configure
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+# rather strangely, we need to look for libiconv before checking libc
+# as the external libiconv can use a macro to override iconv_open to libiconv_open
+# and then we may find the wrong iconv.h later due to other packages looking
+# in /usr/local
+# We check for the lib iconv when building a shared lib has some compiler/linker
+# managed to link when specifying -liconv a executable even if there is no
+# libiconv.so or libiconv.a
+
+conf.CHECK_LIB(libs="iconv", shlib=True)
+
+if (conf.CHECK_FUNCS_IN('iconv_open', 'iconv', checklibc=False, headers='iconv.h') or
+ conf.CHECK_FUNCS('iconv_open', headers='iconv.h')):
+ if conf.env['HAVE_LIBICONV']:
+ if conf.CHECK_FUNCS('mbrtowc', headers='wchar.h'):
+ conf.DEFINE('HAVE_NATIVE_ICONV', 1)
+ elif conf.env.LIB_ICONV:
+ del conf.env['LIB_ICONV']
+ else:
+ conf.DEFINE('HAVE_NATIVE_ICONV', 1)
diff --git a/lib/util/config.mk b/lib/util/config.mk
deleted file mode 100644
index b6125563fb..0000000000
--- a/lib/util/config.mk
+++ /dev/null
@@ -1,90 +0,0 @@
-[LIBRARY::LIBSAMBA-UTIL]
-PUBLIC_DEPENDENCIES = \
- LIBTALLOC LIBCRYPTO \
- SOCKET_WRAPPER LIBREPLACE_NETWORK \
- CHARSET EXECINFO UID_WRAPPER
-
-LIBSAMBA-UTIL_VERSION = 0.0.1
-LIBSAMBA-UTIL_SOVERSION = 0
-
-LIBSAMBA-UTIL_OBJ_FILES = $(addprefix $(libutilsrcdir)/, \
- xfile.o \
- debug.o \
- fault.o \
- signal.o \
- system.o \
- time.o \
- genrand.o \
- dprintf.o \
- util_str.o \
- rfc1738.o \
- substitute.o \
- util_strlist.o \
- util_file.o \
- data_blob.o \
- util.o \
- blocking.o \
- util_net.o \
- fsusage.o \
- ms_fnmatch.o \
- mutex.o \
- idtree.o \
- become_daemon.o \
- rbtree.o \
- talloc_stack.o \
- smb_threads.o \
- params.o \
- parmlist.o \
- util_id.o)
-
-PUBLIC_HEADERS += $(addprefix $(libutilsrcdir)/, util.h \
- dlinklist.h \
- attr.h \
- byteorder.h \
- data_blob.h \
- debug.h \
- memory.h \
- mutex.h \
- safe_string.h \
- time.h \
- util_ldb.h \
- talloc_stack.h \
- xfile.h)
-
-[SUBSYSTEM::ASN1_UTIL]
-
-ASN1_UTIL_OBJ_FILES = $(libutilsrcdir)/asn1.o
-
-[SUBSYSTEM::UNIX_PRIVS]
-PRIVATE_DEPENDENCIES = UID_WRAPPER
-
-UNIX_PRIVS_OBJ_FILES = $(libutilsrcdir)/unix_privs.o
-
-$(eval $(call proto_header_template,$(libutilsrcdir)/unix_privs.h,$(UNIX_PRIVS_OBJ_FILES:.o=.c)))
-
-################################################
-# Start SUBSYSTEM WRAP_XATTR
-[SUBSYSTEM::WRAP_XATTR]
-PUBLIC_DEPENDENCIES = XATTR
-#
-# End SUBSYSTEM WRAP_XATTR
-################################################
-
-WRAP_XATTR_OBJ_FILES = $(libutilsrcdir)/wrap_xattr.o
-
-[SUBSYSTEM::UTIL_TDB]
-PUBLIC_DEPENDENCIES = LIBTDB
-
-UTIL_TDB_OBJ_FILES = $(libutilsrcdir)/util_tdb.o
-
-[SUBSYSTEM::UTIL_TEVENT]
-PUBLIC_DEPENDENCIES = LIBTEVENT
-
-UTIL_TEVENT_OBJ_FILES = $(addprefix $(libutilsrcdir)/, \
- tevent_unix.o \
- tevent_ntstatus.o)
-
-[SUBSYSTEM::UTIL_LDB]
-PUBLIC_DEPENDENCIES = LIBLDB
-
-UTIL_LDB_OBJ_FILES = $(libutilsrcdir)/util_ldb.o
diff --git a/lib/util/data_blob.c b/lib/util/data_blob.c
index 825d8cf88c..10864a025b 100644
--- a/lib/util/data_blob.c
+++ b/lib/util/data_blob.c
@@ -33,6 +33,14 @@ const DATA_BLOB data_blob_null = { NULL, 0 };
**/
_PUBLIC_ DATA_BLOB data_blob_named(const void *p, size_t length, const char *name)
{
+ return data_blob_talloc_named(NULL, p, length, name);
+}
+
+/**
+ construct a data blob, using supplied TALLOC_CTX
+**/
+_PUBLIC_ DATA_BLOB data_blob_talloc_named(TALLOC_CTX *mem_ctx, const void *p, size_t length, const char *name)
+{
DATA_BLOB ret;
if (p == NULL && length == 0) {
@@ -41,9 +49,9 @@ _PUBLIC_ DATA_BLOB data_blob_named(const void *p, size_t length, const char *nam
}
if (p) {
- ret.data = (uint8_t *)talloc_memdup(NULL, p, length);
+ ret.data = (uint8_t *)talloc_memdup(mem_ctx, p, length);
} else {
- ret.data = talloc_array(NULL, uint8_t, length);
+ ret.data = talloc_array(mem_ctx, uint8_t, length);
}
if (ret.data == NULL) {
ret.length = 0;
@@ -55,36 +63,6 @@ _PUBLIC_ DATA_BLOB data_blob_named(const void *p, size_t length, const char *nam
}
/**
- construct a data blob, using supplied TALLOC_CTX
-**/
-_PUBLIC_ DATA_BLOB data_blob_talloc_named(TALLOC_CTX *mem_ctx, const void *p, size_t length, const char *name)
-{
- DATA_BLOB ret = data_blob_named(p, length, name);
-
- if (ret.data) {
- talloc_steal(mem_ctx, ret.data);
- }
- return ret;
-}
-
-
-/**
- reference a data blob, to the supplied TALLOC_CTX.
- Returns a NULL DATA_BLOB on failure
-**/
-_PUBLIC_ DATA_BLOB data_blob_talloc_reference(TALLOC_CTX *mem_ctx, DATA_BLOB *blob)
-{
- DATA_BLOB ret = *blob;
-
- ret.data = talloc_reference(mem_ctx, blob->data);
-
- if (!ret.data) {
- return data_blob(NULL, 0);
- }
- return ret;
-}
-
-/**
construct a zero data blob, using supplied TALLOC_CTX.
use this sparingly as it initialises data - better to initialise
yourself if you want specific data in the blob
@@ -153,7 +131,7 @@ _PUBLIC_ int data_blob_cmp(const DATA_BLOB *d1, const DATA_BLOB *d2)
/**
print the data_blob as hex string
**/
-_PUBLIC_ char *data_blob_hex_string(TALLOC_CTX *mem_ctx, const DATA_BLOB *blob)
+_PUBLIC_ char *data_blob_hex_string_lower(TALLOC_CTX *mem_ctx, const DATA_BLOB *blob)
{
int i;
char *hex_string;
@@ -173,6 +151,23 @@ _PUBLIC_ char *data_blob_hex_string(TALLOC_CTX *mem_ctx, const DATA_BLOB *blob)
return hex_string;
}
+_PUBLIC_ char *data_blob_hex_string_upper(TALLOC_CTX *mem_ctx, const DATA_BLOB *blob)
+{
+ int i;
+ char *hex_string;
+
+ hex_string = talloc_array(mem_ctx, char, (blob->length*2)+1);
+ if (!hex_string) {
+ return NULL;
+ }
+
+ for (i = 0; i < blob->length; i++)
+ slprintf(&hex_string[i*2], 3, "%02X", blob->data[i]);
+
+ hex_string[(blob->length*2)] = '\0';
+ return hex_string;
+}
+
/**
useful for constructing data blobs in test suites, while
avoiding const warnings
diff --git a/lib/util/data_blob.h b/lib/util/data_blob.h
index ffde51cf33..83e6cd5f09 100644
--- a/lib/util/data_blob.h
+++ b/lib/util/data_blob.h
@@ -61,12 +61,6 @@ _PUBLIC_ DATA_BLOB data_blob_named(const void *p, size_t length, const char *nam
_PUBLIC_ DATA_BLOB data_blob_talloc_named(TALLOC_CTX *mem_ctx, const void *p, size_t length, const char *name);
/**
- reference a data blob, to the supplied TALLOC_CTX.
- Returns a NULL DATA_BLOB on failure
-**/
-_PUBLIC_ DATA_BLOB data_blob_talloc_reference(TALLOC_CTX *mem_ctx, DATA_BLOB *blob);
-
-/**
construct a zero data blob, using supplied TALLOC_CTX.
use this sparingly as it initialises data - better to initialise
yourself if you want specific data in the blob
@@ -96,7 +90,12 @@ _PUBLIC_ int data_blob_cmp(const DATA_BLOB *d1, const DATA_BLOB *d2);
/**
print the data_blob as hex string
**/
-_PUBLIC_ char *data_blob_hex_string(TALLOC_CTX *mem_ctx, const DATA_BLOB *blob);
+_PUBLIC_ char *data_blob_hex_string_upper(TALLOC_CTX *mem_ctx, const DATA_BLOB *blob);
+
+/**
+print the data_blob as hex string
+**/
+_PUBLIC_ char *data_blob_hex_string_lower(TALLOC_CTX *mem_ctx, const DATA_BLOB *blob);
/**
useful for constructing data blobs in test suites, while
diff --git a/lib/util/debug.c b/lib/util/debug.c
index 996efdff7e..2ff7cb2c58 100644
--- a/lib/util/debug.c
+++ b/lib/util/debug.c
@@ -1,8 +1,9 @@
/*
Unix SMB/CIFS implementation.
- Samba debug functions
- Copyright (C) Andrew Tridgell 2003
- Copyright (C) James J Myers 2003
+ Samba utility functions
+ Copyright (C) Andrew Tridgell 1992-1998
+ Copyright (C) Elrond 2002
+ Copyright (C) Simo Sorce 2002
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -20,204 +21,1003 @@
#include "includes.h"
#include "system/filesys.h"
-#include "system/time.h"
-#include "dynconfig/dynconfig.h"
+#include "system/syslog.h"
+#include "lib/util/time.h"
-/**
- * @file
- * @brief Debug logging
- **/
+/* define what facility to use for syslog */
+#ifndef SYSLOG_FACILITY
+#define SYSLOG_FACILITY LOG_DAEMON
+#endif
-/**
- * this global variable determines what messages are printed
+/* -------------------------------------------------------------------------- **
+ * Defines...
+ *
+ * FORMAT_BUFR_MAX - Index of the last byte of the format buffer;
+ * format_bufr[FORMAT_BUFR_MAX] should always be reserved
+ * for a terminating null byte.
*/
-int _debug_level = 0;
-_PUBLIC_ int *debug_level = &_debug_level;
-static int debug_all_class_hack = 1;
-int *DEBUGLEVEL_CLASS = &debug_all_class_hack; /* For samba 3 */
-static bool debug_all_class_isset_hack = true;
-bool *DEBUGLEVEL_CLASS_ISSET = &debug_all_class_isset_hack; /* For samba 3 */
-XFILE *dbf = NULL; /* For Samba 3*/
-/* the registered mutex handlers */
-static struct {
- const char *name;
- struct debug_ops ops;
-} debug_handlers;
+#define FORMAT_BUFR_SIZE 1024
+#define FORMAT_BUFR_MAX (FORMAT_BUFR_SIZE - 1)
+
+/* -------------------------------------------------------------------------- **
+ * This module implements Samba's debugging utility.
+ *
+ * The syntax of a debugging log file is represented as:
+ *
+ * <debugfile> :== { <debugmsg> }
+ *
+ * <debugmsg> :== <debughdr> '\n' <debugtext>
+ *
+ * <debughdr> :== '[' TIME ',' LEVEL ']' [ [FILENAME ':'] [FUNCTION '()'] ]
+ *
+ * <debugtext> :== { <debugline> }
+ *
+ * <debugline> :== TEXT '\n'
+ *
+ * TEXT is a string of characters excluding the newline character.
+ * LEVEL is the DEBUG level of the message (an integer in the range 0..10).
+ * TIME is a timestamp.
+ * FILENAME is the name of the file from which the debug message was generated.
+ * FUNCTION is the function from which the debug message was generated.
+ *
+ * Basically, what that all means is:
+ *
+ * - A debugging log file is made up of debug messages.
+ *
+ * - Each debug message is made up of a header and text. The header is
+ * separated from the text by a newline.
+ *
+ * - The header begins with the timestamp and debug level of the message
+ * enclosed in brackets. The filename and function from which the
+ * message was generated may follow. The filename is terminated by a
+ * colon, and the function name is terminated by parenthesis.
+ *
+ * - The message text is made up of zero or more lines, each terminated by
+ * a newline.
+ */
/* state variables for the debug system */
static struct {
- int fd;
- enum debug_logtype logtype;
+ bool initialized;
+ int fd; /* The log file handle */
+ enum debug_logtype logtype; /* The type of logging we are doing: eg stdout, file, stderr */
const char *prog_name;
bool reopening_logs;
-} state;
+ bool schedule_reopen_logs;
+
+ struct debug_settings settings;
+ char *debugf;
+} state = {
+ .settings = {
+ .timestamp_logs = true
+ }
+};
+
+/* -------------------------------------------------------------------------- **
+ * External variables.
+ *
+ * debugf - Debug file name.
+ * DEBUGLEVEL - System-wide debug message limit. Messages with message-
+ * levels higher than DEBUGLEVEL will not be processed.
+ */
+
+/*
+ used to check if the user specified a
+ logfile on the command line
+*/
+bool override_logfile;
+
+/*
+ * This is to allow reading of DEBUGLEVEL_CLASS before the debug
+ * system has been initialized.
+ */
+static const int debug_class_list_initial[DBGC_MAX_FIXED + 1];
-static bool reopen_logs_scheduled;
-static bool check_reopen_logs(void)
+static int debug_num_classes = 0;
+int *DEBUGLEVEL_CLASS = discard_const_p(int, debug_class_list_initial);
+
+
+/* -------------------------------------------------------------------------- **
+ * Internal variables.
+ *
+ * debug_count - Number of debug messages that have been output.
+ * Used to check log size.
+ *
+ * syslog_level - Internal copy of the message debug level. Written by
+ * dbghdr() and read by Debug1().
+ *
+ * format_bufr - Used to format debug messages. The dbgtext() function
+ * prints debug messages to a string, and then passes the
+ * string to format_debug_text(), which uses format_bufr
+ * to build the formatted output.
+ *
+ * format_pos - Marks the first free byte of the format_bufr.
+ *
+ *
+ * log_overflow - When this variable is true, never attempt to check the
+ * size of the log. This is a hack, so that we can write
+ * a message using DEBUG, from open_logs() when we
+ * are unable to open a new log file for some reason.
+ */
+
+static int debug_count = 0;
+#ifdef WITH_SYSLOG
+static int syslog_level = 0;
+#endif
+static char *format_bufr = NULL;
+static size_t format_pos = 0;
+static bool log_overflow = false;
+
+/*
+ * Define all the debug class selection names here. Names *MUST NOT* contain
+ * white space. There must be one name for each DBGC_<class name>, and they
+ * must be in the table in the order of DBGC_<class name>..
+ */
+static const char *default_classname_table[] = {
+ "all", /* DBGC_ALL; index refs traditional DEBUGLEVEL */
+ "tdb", /* DBGC_TDB */
+ "printdrivers", /* DBGC_PRINTDRIVERS */
+ "lanman", /* DBGC_LANMAN */
+ "smb", /* DBGC_SMB */
+ "rpc_parse", /* DBGC_RPC_PARSE */
+ "rpc_srv", /* DBGC_RPC_SRV */
+ "rpc_cli", /* DBGC_RPC_CLI */
+ "passdb", /* DBGC_PASSDB */
+ "sam", /* DBGC_SAM */
+ "auth", /* DBGC_AUTH */
+ "winbind", /* DBGC_WINBIND */
+ "vfs", /* DBGC_VFS */
+ "idmap", /* DBGC_IDMAP */
+ "quota", /* DBGC_QUOTA */
+ "acls", /* DBGC_ACLS */
+ "locking", /* DBGC_LOCKING */
+ "msdfs", /* DBGC_MSDFS */
+ "dmapi", /* DBGC_DMAPI */
+ "registry", /* DBGC_REGISTRY */
+ NULL
+};
+
+static char **classname_table = NULL;
+
+
+/* -------------------------------------------------------------------------- **
+ * Functions...
+ */
+
+static void debug_init(void);
+
+/***************************************************************************
+ Free memory pointed to by global pointers.
+****************************************************************************/
+
+void gfree_debugsyms(void)
{
- if (state.fd == 0 || reopen_logs_scheduled) {
- reopen_logs_scheduled = false;
- reopen_logs();
+ TALLOC_FREE(classname_table);
+
+ if ( DEBUGLEVEL_CLASS != debug_class_list_initial ) {
+ TALLOC_FREE( DEBUGLEVEL_CLASS );
+ DEBUGLEVEL_CLASS = discard_const_p(int, debug_class_list_initial);
}
- if (state.fd <= 0)
- return false;
+ TALLOC_FREE(format_bufr);
- return true;
+ debug_num_classes = DBGC_MAX_FIXED;
+
+ state.initialized = false;
}
-_PUBLIC_ void debug_schedule_reopen_logs(void)
+/****************************************************************************
+utility lists registered debug class names's
+****************************************************************************/
+
+char *debug_list_class_names_and_levels(void)
+{
+ char *buf = NULL;
+ unsigned int i;
+ /* prepare strings */
+ for (i = 0; i < debug_num_classes; i++) {
+ buf = talloc_asprintf_append(buf,
+ "%s:%d%s",
+ classname_table[i],
+ DEBUGLEVEL_CLASS[i],
+ i == (debug_num_classes - 1) ? "\n" : " ");
+ if (buf == NULL) {
+ return NULL;
+ }
+ }
+ return buf;
+}
+
+/****************************************************************************
+ Utility to translate names to debug class index's (internal version).
+****************************************************************************/
+
+static int debug_lookup_classname_int(const char* classname)
{
- reopen_logs_scheduled = true;
+ int i;
+
+ if (!classname) return -1;
+
+ for (i=0; i < debug_num_classes; i++) {
+ if (strcmp(classname, classname_table[i])==0)
+ return i;
+ }
+ return -1;
}
-static void log_timestring(int level, const char *location, const char *func)
+/****************************************************************************
+ Add a new debug class to the system.
+****************************************************************************/
+
+int debug_add_class(const char *classname)
{
- char *t = NULL;
- char *s = NULL;
+ int ndx;
+ int *new_class_list;
+ char **new_name_list;
+ int default_level;
+
+ if (!classname)
+ return -1;
+
+ /* check the init has yet been called */
+ debug_init();
+
+ ndx = debug_lookup_classname_int(classname);
+ if (ndx >= 0)
+ return ndx;
+ ndx = debug_num_classes;
+
+ if (DEBUGLEVEL_CLASS == debug_class_list_initial) {
+ /* Initial loading... */
+ new_class_list = NULL;
+ } else {
+ new_class_list = DEBUGLEVEL_CLASS;
+ }
- if (!check_reopen_logs()) return;
+ default_level = DEBUGLEVEL_CLASS[DBGC_ALL];
- if (state.logtype != DEBUG_FILE) return;
+ new_class_list = talloc_realloc(NULL, new_class_list, int, ndx + 1);
+ if (!new_class_list)
+ return -1;
+ DEBUGLEVEL_CLASS = new_class_list;
- t = timestring(NULL, time(NULL));
- if (!t) return;
+ DEBUGLEVEL_CLASS[ndx] = default_level;
- asprintf(&s, "[%s, %d %s:%s()]\n", t, level, location, func);
- talloc_free(t);
- if (!s) return;
+ new_name_list = talloc_realloc(NULL, classname_table, char *, ndx + 1);
+ if (!new_name_list)
+ return -1;
+ classname_table = new_name_list;
- write(state.fd, s, strlen(s));
- free(s);
+ classname_table[ndx] = talloc_strdup(classname_table, classname);
+ if (! classname_table[ndx])
+ return -1;
+
+ debug_num_classes = ndx + 1;
+
+ return ndx;
}
-/**
- the backend for debug messages. Note that the DEBUG() macro has already
- ensured that the log level has been met before this is called
-*/
-_PUBLIC_ void dbghdr(int level, const char *location, const char *func)
+/****************************************************************************
+ Utility to translate names to debug class index's (public version).
+****************************************************************************/
+
+int debug_lookup_classname(const char *classname)
{
- log_timestring(level, location, func);
- log_task_id();
+ int ndx;
+
+ if (!classname || !*classname)
+ return -1;
+
+ ndx = debug_lookup_classname_int(classname);
+
+ if (ndx != -1)
+ return ndx;
+
+ DEBUG(0, ("debug_lookup_classname(%s): Unknown class\n",
+ classname));
+ return debug_add_class(classname);
}
+/****************************************************************************
+ Dump the current registered debug levels.
+****************************************************************************/
-_PUBLIC_ void dbghdrclass(int level, int dclass, const char *location, const char *func)
+static void debug_dump_status(int level)
{
- /* Simple wrapper, Samba 4 doesn't do debug classes */
- dbghdr(level, location, func);
+ int q;
+
+ DEBUG(level, ("INFO: Current debug levels:\n"));
+ for (q = 0; q < debug_num_classes; q++) {
+ const char *classname = classname_table[q];
+ DEBUGADD(level, (" %s: %d\n",
+ classname,
+ DEBUGLEVEL_CLASS[q]));
+ }
+}
+
+/****************************************************************************
+ parse the debug levels from smbcontrol. Example debug level parameter:
+ printdrivers:7
+****************************************************************************/
+
+static bool debug_parse_params(char **params)
+{
+ int i, ndx;
+ char *class_name;
+ char *class_level;
+
+ if (!params)
+ return false;
+
+ /* Allow DBGC_ALL to be specified w/o requiring its class name e.g."10"
+ * v.s. "all:10", this is the traditional way to set DEBUGLEVEL
+ */
+ if (isdigit((int)params[0][0])) {
+ DEBUGLEVEL_CLASS[DBGC_ALL] = atoi(params[0]);
+ i = 1; /* start processing at the next params */
+ } else {
+ DEBUGLEVEL_CLASS[DBGC_ALL] = 0;
+ i = 0; /* DBGC_ALL not specified OR class name was included */
+ }
+
+ /* Array is debug_num_classes long */
+ for (ndx = DBGC_ALL; ndx < debug_num_classes; ndx++) {
+ DEBUGLEVEL_CLASS[ndx] = DEBUGLEVEL_CLASS[DBGC_ALL];
+ }
+
+ /* Fill in new debug class levels */
+ for (; i < debug_num_classes && params[i]; i++) {
+ char *saveptr;
+ if ((class_name = strtok_r(params[i],":", &saveptr)) &&
+ (class_level = strtok_r(NULL, "\0", &saveptr)) &&
+ ((ndx = debug_lookup_classname(class_name)) != -1)) {
+ DEBUGLEVEL_CLASS[ndx] = atoi(class_level);
+ } else {
+ DEBUG(0,("debug_parse_params: unrecognized debug class name or format [%s]\n", params[i]));
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/****************************************************************************
+ Parse the debug levels from smb.conf. Example debug level string:
+ 3 tdb:5 printdrivers:7
+ Note: the 1st param has no "name:" preceeding it.
+****************************************************************************/
+
+bool debug_parse_levels(const char *params_str)
+{
+ char **params;
+
+ /* Just in case */
+ debug_init();
+
+ params = str_list_make(NULL, params_str, NULL);
+
+ if (debug_parse_params(params)) {
+ debug_dump_status(5);
+ TALLOC_FREE(params);
+ return true;
+ } else {
+ TALLOC_FREE(params);
+ return false;
+ }
+}
+
+/* setup for logging of talloc warnings */
+static void talloc_log_fn(const char *msg)
+{
+ DEBUG(0,("%s", msg));
+}
+
+void debug_setup_talloc_log(void)
+{
+ talloc_set_log_fn(talloc_log_fn);
+}
+
+
+/****************************************************************************
+Init debugging (one time stuff)
+****************************************************************************/
+
+static void debug_init(void)
+{
+ const char **p;
+
+ if (state.initialized)
+ return;
+
+ state.initialized = true;
+
+ debug_setup_talloc_log();
+
+ for(p = default_classname_table; *p; p++) {
+ debug_add_class(*p);
+ }
+ format_bufr = talloc_array(NULL, char, FORMAT_BUFR_SIZE);
+ if (!format_bufr) {
+ smb_panic("debug_init: unable to create buffer");
+ }
+}
+
+/* This forces in some smb.conf derived values into the debug system.
+ * There are no pointers in this structure, so we can just
+ * structure-assign it in */
+void debug_set_settings(struct debug_settings *settings)
+{
+ state.settings = *settings;
}
/**
- the backend for debug messages. Note that the DEBUG() macro has already
- ensured that the log level has been met before this is called
+ control the name of the logfile and whether logging will be to stdout, stderr
+ or a file, and set up syslog
- @note You should never have to call this function directly. Call the DEBUG()
- macro instead.
+ new_log indicates the destination for the debug log (an enum in
+ order of precedence - once set to DEBUG_FILE, it is not possible to
+ reset to DEBUG_STDOUT for example. This makes it easy to override
+ for debug to stderr on the command line, as the smb.conf cannot
+ reset it back to file-based logging
*/
-_PUBLIC_ void dbgtext(const char *format, ...)
+void setup_logging(const char *prog_name, enum debug_logtype new_logtype)
{
- va_list ap;
- char *s = NULL;
+ debug_init();
+ if (state.logtype < new_logtype) {
+ state.logtype = new_logtype;
+ }
+ if (prog_name) {
+ state.prog_name = prog_name;
+ }
+ reopen_logs_internal();
+
+ if (state.logtype == DEBUG_FILE) {
+#ifdef WITH_SYSLOG
+ const char *p = strrchr_m( prog_name,'/' );
+ if (p)
+ prog_name = p + 1;
+#ifdef LOG_DAEMON
+ openlog( prog_name, LOG_PID, SYSLOG_FACILITY );
+#else
+ /* for old systems that have no facility codes. */
+ openlog( prog_name, LOG_PID );
+#endif
+#endif
+ }
+}
- if (!check_reopen_logs()) return;
+/***************************************************************************
+ Set the logfile name.
+**************************************************************************/
- va_start(ap, format);
- vasprintf(&s, format, ap);
- va_end(ap);
+void debug_set_logfile(const char *name)
+{
+ if (name == NULL || *name == 0) {
+ /* this copes with calls when smb.conf is not loaded yet */
+ return;
+ }
+ TALLOC_FREE(state.debugf);
+ state.debugf = talloc_strdup(NULL, name);
+}
- write(state.fd, s, strlen(s));
- free(s);
+static void debug_close_fd(int fd)
+{
+ if (fd > 2) {
+ close(fd);
+ }
}
-_PUBLIC_ const char *logfile = NULL;
+bool debug_get_output_is_stderr(void)
+{
+ return (state.logtype == DEBUG_DEFAULT_STDERR) || (state.logtype == DEBUG_STDERR);
+}
+
+/**************************************************************************
+ reopen the log files
+ note that we now do this unconditionally
+ We attempt to open the new debug fp before closing the old. This means
+ if we run out of fd's we just keep using the old fd rather than aborting.
+ Fix from dgibson@linuxcare.com.
+**************************************************************************/
/**
reopen the log file (usually called because the log file name might have changed)
*/
-_PUBLIC_ void reopen_logs(void)
+bool reopen_logs_internal(void)
{
+ mode_t oldumask;
+ int new_fd = 0;
+ int old_fd = 0;
+ bool ret = true;
+
char *fname = NULL;
- int old_fd = state.fd;
if (state.reopening_logs) {
- return;
+ return true;
}
+ /* Now clear the SIGHUP induced flag */
+ state.schedule_reopen_logs = false;
+
switch (state.logtype) {
case DEBUG_STDOUT:
+ debug_close_fd(state.fd);
state.fd = 1;
- break;
+ return true;
+ case DEBUG_DEFAULT_STDERR:
case DEBUG_STDERR:
+ debug_close_fd(state.fd);
state.fd = 2;
- break;
+ return true;
case DEBUG_FILE:
- state.reopening_logs = true;
- if (logfile && (*logfile) == '/') {
- fname = strdup(logfile);
+ break;
+ }
+
+ oldumask = umask( 022 );
+
+ fname = state.debugf;
+ if (!fname) {
+ return false;
+ }
+
+ state.reopening_logs = true;
+
+ new_fd = open( state.debugf, O_WRONLY|O_APPEND|O_CREAT, 0644);
+
+ if (new_fd == -1) {
+ log_overflow = true;
+ DEBUG(0, ("Unable to open new log file '%s': %s\n", state.debugf, strerror(errno)));
+ log_overflow = false;
+ ret = false;
+ } else {
+ old_fd = state.fd;
+ state.fd = new_fd;
+ debug_close_fd(old_fd);
+ }
+
+ /* Fix from klausr@ITAP.Physik.Uni-Stuttgart.De
+ * to fix problem where smbd's that generate less
+ * than 100 messages keep growing the log.
+ */
+ force_check_log_size();
+ (void)umask(oldumask);
+
+ /* Take over stderr to catch output into logs */
+ if (state.fd > 0 && dup2(state.fd, 2) == -1) {
+ close_low_fds(true); /* Close stderr too, if dup2 can't point it
+ at the logfile */
+ }
+
+ state.reopening_logs = false;
+
+ return ret;
+}
+
+/**************************************************************************
+ Force a check of the log size.
+ ***************************************************************************/
+
+void force_check_log_size( void )
+{
+ debug_count = 100;
+}
+
+_PUBLIC_ void debug_schedule_reopen_logs(void)
+{
+ state.schedule_reopen_logs = true;
+}
+
+
+/***************************************************************************
+ Check to see if there is any need to check if the logfile has grown too big.
+**************************************************************************/
+
+bool need_to_check_log_size( void )
+{
+ int maxlog;
+
+ if( debug_count < 100)
+ return( false );
+
+ maxlog = state.settings.max_log_size * 1024;
+ if ( state.fd <=2 || maxlog <= 0 ) {
+ debug_count = 0;
+ return(false);
+ }
+ return( true );
+}
+
+/**************************************************************************
+ Check to see if the log has grown to be too big.
+ **************************************************************************/
+
+void check_log_size( void )
+{
+ int maxlog;
+ struct stat st;
+
+ /*
+ * We need to be root to check/change log-file, skip this and let the main
+ * loop check do a new check as root.
+ */
+
+ if( geteuid() != 0) {
+ /* We don't check sec_initial_uid() here as it isn't
+ * available in common code and we don't generally
+ * want to rotate and the possibly lose logs in
+ * make test or the build farm */
+ return;
+ }
+
+ if(log_overflow || (!state.schedule_reopen_logs && !need_to_check_log_size())) {
+ return;
+ }
+
+ maxlog = state.settings.max_log_size * 1024;
+
+ if (state.schedule_reopen_logs ||
+ (fstat(state.fd, &st) == 0
+ && st.st_size > maxlog )) {
+ (void)reopen_logs_internal();
+ if (state.fd > 0 && fstat(state.fd, &st) == 0) {
+ if (st.st_size > maxlog) {
+ char *name = NULL;
+
+ if (asprintf(&name, "%s.old", state.debugf ) < 0) {
+ return;
+ }
+ (void)rename(state.debugf, name);
+
+ if (!reopen_logs_internal()) {
+ /* We failed to reopen a log - continue using the old name. */
+ (void)rename(name, state.debugf);
+ }
+ SAFE_FREE(name);
+ }
+ }
+ }
+
+ /*
+ * Here's where we need to panic if state.fd == 0 or -1 (invalid values)
+ */
+
+ if (state.fd <= 0) {
+ /* This code should only be reached in very strange
+ * circumstances. If we merely fail to open the new log we
+ * should stick with the old one. ergo this should only be
+ * reached when opening the logs for the first time: at
+ * startup or when the log level is increased from zero.
+ * -dwg 6 June 2000
+ */
+ int fd = open( "/dev/console", O_WRONLY, 0);
+ if (fd != -1) {
+ state.fd = fd;
+ DEBUG(0,("check_log_size: open of debug file %s failed - using console.\n",
+ state.debugf ));
} else {
- asprintf(&fname, "%s/%s.log", dyn_LOGFILEBASE, state.prog_name);
+ /*
+ * We cannot continue without a debug file handle.
+ */
+ abort();
}
- if (fname) {
- int newfd = open(fname, O_CREAT|O_APPEND|O_WRONLY, 0600);
- if (newfd == -1) {
- DEBUG(1, ("Failed to open new logfile: %s\n", fname));
- old_fd = -1;
- } else {
- state.fd = newfd;
+ }
+ debug_count = 0;
+}
+
+/*************************************************************************
+ Write an debug message on the debugfile.
+ This is called by dbghdr() and format_debug_text().
+************************************************************************/
+
+ int Debug1( const char *format_str, ... )
+{
+ va_list ap;
+ int old_errno = errno;
+
+ debug_count++;
+
+ if ( state.logtype != DEBUG_FILE ) {
+ va_start( ap, format_str );
+ if (state.fd > 0)
+ (void)vdprintf( state.fd, format_str, ap );
+ va_end( ap );
+ errno = old_errno;
+ goto done;
+ }
+
+#ifdef WITH_SYSLOG
+ if( !state.settings.syslog_only)
+#endif
+ {
+ if( state.fd <= 0 ) {
+ mode_t oldumask = umask( 022 );
+ int fd = open( state.debugf, O_WRONLY|O_APPEND|O_CREAT, 0644 );
+ (void)umask( oldumask );
+ if(fd == -1) {
+ errno = old_errno;
+ goto done;
}
- free(fname);
- } else {
- DEBUG(1, ("Failed to find name for file-based logfile!\n"));
+ state.fd = fd;
}
- state.reopening_logs = false;
+ }
- break;
+#ifdef WITH_SYSLOG
+ if( syslog_level < state.settings.syslog ) {
+ /* map debug levels to syslog() priorities
+ * note that not all DEBUG(0, ...) calls are
+ * necessarily errors */
+ static const int priority_map[4] = {
+ LOG_ERR, /* 0 */
+ LOG_WARNING, /* 1 */
+ LOG_NOTICE, /* 2 */
+ LOG_INFO, /* 3 */
+ };
+ int priority;
+ char *msgbuf = NULL;
+ int ret;
+
+ if( syslog_level >= ARRAY_SIZE(priority_map) || syslog_level < 0)
+ priority = LOG_DEBUG;
+ else
+ priority = priority_map[syslog_level];
+
+ /*
+ * Specify the facility to interoperate with other syslog
+ * callers (vfs_full_audit for example).
+ */
+ priority |= SYSLOG_FACILITY;
+
+ va_start(ap, format_str);
+ ret = vasprintf(&msgbuf, format_str, ap);
+ va_end(ap);
+
+ if (ret != -1) {
+ syslog(priority, "%s", msgbuf);
+ }
+ SAFE_FREE(msgbuf);
}
+#endif
+
+ check_log_size();
- if (old_fd > 2) {
- close(old_fd);
+#ifdef WITH_SYSLOG
+ if( !state.settings.syslog_only)
+#endif
+ {
+ va_start( ap, format_str );
+ if (state.fd > 0)
+ (void)vdprintf( state.fd, format_str, ap );
+ va_end( ap );
}
+
+ done:
+ errno = old_errno;
+
+ return( 0 );
}
-/**
- control the name of the logfile and whether logging will be to stdout, stderr
- or a file
-*/
-_PUBLIC_ void setup_logging(const char *prog_name, enum debug_logtype new_logtype)
+
+/**************************************************************************
+ Print the buffer content via Debug1(), then reset the buffer.
+ Input: none
+ Output: none
+****************************************************************************/
+
+static void bufr_print( void )
{
- if (state.logtype < new_logtype) {
- state.logtype = new_logtype;
+ format_bufr[format_pos] = '\0';
+ (void)Debug1( "%s", format_bufr );
+ format_pos = 0;
+}
+
+/***************************************************************************
+ Format the debug message text.
+
+ Input: msg - Text to be added to the "current" debug message text.
+
+ Output: none.
+
+ Notes: The purpose of this is two-fold. First, each call to syslog()
+ (used by Debug1(), see above) generates a new line of syslog
+ output. This is fixed by storing the partial lines until the
+ newline character is encountered. Second, printing the debug
+ message lines when a newline is encountered allows us to add
+ spaces, thus indenting the body of the message and making it
+ more readable.
+**************************************************************************/
+
+static void format_debug_text( const char *msg )
+{
+ size_t i;
+ bool timestamp = (state.logtype == DEBUG_FILE && (state.settings.timestamp_logs));
+
+ if (!format_bufr) {
+ debug_init();
}
- if (prog_name) {
- state.prog_name = prog_name;
+
+ for( i = 0; msg[i]; i++ ) {
+ /* Indent two spaces at each new line. */
+ if(timestamp && 0 == format_pos) {
+ format_bufr[0] = format_bufr[1] = ' ';
+ format_pos = 2;
+ }
+
+ /* If there's room, copy the character to the format buffer. */
+ if( format_pos < FORMAT_BUFR_MAX )
+ format_bufr[format_pos++] = msg[i];
+
+ /* If a newline is encountered, print & restart. */
+ if( '\n' == msg[i] )
+ bufr_print();
+
+ /* If the buffer is full dump it out, reset it, and put out a line
+ * continuation indicator.
+ */
+ if( format_pos >= FORMAT_BUFR_MAX ) {
+ bufr_print();
+ (void)Debug1( " +>\n" );
+ }
}
- reopen_logs();
+
+ /* Just to be safe... */
+ format_bufr[format_pos] = '\0';
}
-/**
- Just run logging to stdout for this program
-*/
-_PUBLIC_ void setup_logging_stdout(void)
+/***************************************************************************
+ Flush debug output, including the format buffer content.
+
+ Input: none
+ Output: none
+***************************************************************************/
+
+void dbgflush( void )
{
- setup_logging(NULL, DEBUG_STDOUT);
+ bufr_print();
}
-/**
- return a string constant containing n tabs
- no more than 10 tabs are returned
-*/
-_PUBLIC_ const char *do_debug_tab(int n)
+/***************************************************************************
+ Print a Debug Header.
+
+ Input: level - Debug level of the message (not the system-wide debug
+ level. )
+ cls - Debuglevel class of the calling module.
+ file - Pointer to a string containing the name of the file
+ from which this function was called, or an empty string
+ if the __FILE__ macro is not implemented.
+ func - Pointer to a string containing the name of the function
+ from which this function was called, or an empty string
+ if the __FUNCTION__ macro is not implemented.
+ line - line number of the call to dbghdr, assuming __LINE__
+ works.
+
+ Output: Always true. This makes it easy to fudge a call to dbghdr()
+ in a macro, since the function can be called as part of a test.
+ Eg: ( (level <= DEBUGLEVEL) && (dbghdr(level,"",line)) )
+
+ Notes: This function takes care of setting syslog_level.
+
+****************************************************************************/
+
+bool dbghdrclass(int level, int cls, const char *location, const char *func)
{
- const char *tabs[] = {"", "\t", "\t\t", "\t\t\t", "\t\t\t\t", "\t\t\t\t\t",
- "\t\t\t\t\t\t", "\t\t\t\t\t\t\t", "\t\t\t\t\t\t\t\t",
- "\t\t\t\t\t\t\t\t\t", "\t\t\t\t\t\t\t\t\t\t"};
- return tabs[MIN(n, 10)];
+ /* Ensure we don't lose any real errno value. */
+ int old_errno = errno;
+
+ if( format_pos ) {
+ /* This is a fudge. If there is stuff sitting in the format_bufr, then
+ * the *right* thing to do is to call
+ * format_debug_text( "\n" );
+ * to write the remainder, and then proceed with the new header.
+ * Unfortunately, there are several places in the code at which
+ * the DEBUG() macro is used to build partial lines. That in mind,
+ * we'll work under the assumption that an incomplete line indicates
+ * that a new header is *not* desired.
+ */
+ return( true );
+ }
+
+#ifdef WITH_SYSLOG
+ /* Set syslog_level. */
+ syslog_level = level;
+#endif
+
+ /* Don't print a header if we're logging to stdout. */
+ if ( state.logtype != DEBUG_FILE ) {
+ return( true );
+ }
+
+ /* Print the header if timestamps are turned on. If parameters are
+ * not yet loaded, then default to timestamps on.
+ */
+ if( state.settings.timestamp_logs || state.settings.debug_prefix_timestamp) {
+ char header_str[200];
+
+ header_str[0] = '\0';
+
+ if( state.settings.debug_pid)
+ slprintf(header_str,sizeof(header_str)-1,", pid=%u",(unsigned int)getpid());
+
+ if( state.settings.debug_uid) {
+ size_t hs_len = strlen(header_str);
+ slprintf(header_str + hs_len,
+ sizeof(header_str) - 1 - hs_len,
+ ", effective(%u, %u), real(%u, %u)",
+ (unsigned int)geteuid(), (unsigned int)getegid(),
+ (unsigned int)getuid(), (unsigned int)getgid());
+ }
+
+ if (state.settings.debug_class && (cls != DBGC_ALL)) {
+ size_t hs_len = strlen(header_str);
+ slprintf(header_str + hs_len,
+ sizeof(header_str) -1 - hs_len,
+ ", class=%s",
+ default_classname_table[cls]);
+ }
+
+ /* Print it all out at once to prevent split syslog output. */
+ if( state.settings.debug_prefix_timestamp ) {
+ char *time_str = current_timestring(NULL,
+ state.settings.debug_hires_timestamp);
+ (void)Debug1( "[%s, %2d%s] ",
+ time_str,
+ level, header_str);
+ talloc_free(time_str);
+ } else {
+ char *time_str = current_timestring(NULL,
+ state.settings.debug_hires_timestamp);
+ (void)Debug1( "[%s, %2d%s] %s(%s)\n",
+ time_str,
+ level, header_str, location, func );
+ talloc_free(time_str);
+ }
+ }
+
+ errno = old_errno;
+ return( true );
+}
+
+/***************************************************************************
+ Add text to the body of the "current" debug message via the format buffer.
+
+ Input: format_str - Format string, as used in printf(), et. al.
+ ... - Variable argument list.
+
+ ..or.. va_alist - Old style variable parameter list starting point.
+
+ Output: Always true. See dbghdr() for more info, though this is not
+ likely to be used in the same way.
+
+***************************************************************************/
+
+ bool dbgtext( const char *format_str, ... )
+{
+ va_list ap;
+ char *msgbuf = NULL;
+ bool ret = true;
+ int res;
+
+ va_start(ap, format_str);
+ res = vasprintf(&msgbuf, format_str, ap);
+ va_end(ap);
+
+ if (res != -1) {
+ format_debug_text(msgbuf);
+ } else {
+ ret = false;
+ }
+ SAFE_FREE(msgbuf);
+ return ret;
}
+/* the registered mutex handlers */
+static struct {
+ const char *name;
+ struct debug_ops ops;
+} debug_handlers;
+
/**
log suspicious usage - print comments and backtrace
*/
@@ -251,7 +1051,7 @@ _PUBLIC_ void log_task_id(void)
{
if (!debug_handlers.ops.log_task_id) return;
- if (!check_reopen_logs()) return;
+ if (!reopen_logs_internal()) return;
debug_handlers.ops.log_task_id(state.fd);
}
diff --git a/lib/util/debug.h b/lib/util/debug.h
index f0d16952a9..c01fa928b5 100644
--- a/lib/util/debug.h
+++ b/lib/util/debug.h
@@ -1,120 +1,236 @@
/*
Unix SMB/CIFS implementation.
- Samba debug defines
- Copyright (C) Andrew Tridgell 2003
+ SMB debug stuff
+ Copyright (C) Andrew Tridgell 1992-1998
+ Copyright (C) John H Terpstra 1996-1998
+ Copyright (C) Luke Kenneth Casson Leighton 1996-1998
+ Copyright (C) Paul Ashton 1998
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _SAMBA_DEBUG_H_
-#define _SAMBA_DEBUG_H_
+#ifndef _DEBUG_H
+#define _DEBUG_H
-/**
- * @file
- * @brief Debugging macros
+/* -------------------------------------------------------------------------- **
+ * Debugging code. See also debug.c
*/
-/* the debug operations structure - contains function pointers to
- various debug implementations of each operation */
-struct debug_ops {
- /* function to log (using DEBUG) suspicious usage of data structure */
- void (*log_suspicious_usage)(const char* from, const char* info);
-
- /* function to log (using printf) suspicious usage of data structure.
- * To be used in circumstances when using DEBUG would cause loop. */
- void (*print_suspicious_usage)(const char* from, const char* info);
-
- /* function to return process/thread id */
- uint32_t (*get_task_id)(void);
-
- /* function to log process/thread id */
- void (*log_task_id)(int fd);
-};
+/* the maximum debug level to compile into the code. This assumes a good
+ optimising compiler that can remove unused code
+ for embedded or low-memory systems set this to a value like 2 to get
+ only important messages. This gives *much* smaller binaries
+*/
+#ifndef MAX_DEBUG_LEVEL
+#define MAX_DEBUG_LEVEL 1000
+#endif
+
+int Debug1( const char *, ... ) PRINTF_ATTRIBUTE(1,2);
+bool dbgtext( const char *, ... ) PRINTF_ATTRIBUTE(1,2);
+bool dbghdrclass( int level, int cls, const char *location, const char *func);
+bool dbghdr( int level, const char *location, const char *func);
-#define DEBUGLEVEL *debug_level
-extern int DEBUGLEVEL;
-
-#define debug_ctx() (_debug_ctx?_debug_ctx:(_debug_ctx=talloc_new(NULL)))
-
-#define DEBUGLVL(level) ((level) <= DEBUGLEVEL)
-#define _DEBUG(level, body, header) do { \
- if (DEBUGLVL(level)) { \
- void* _debug_ctx=NULL; \
- if (header) { \
- dbghdr(level, __location__, __FUNCTION__); \
- } \
- dbgtext body; \
- talloc_free(_debug_ctx); \
- } \
-} while (0)
-/**
- * Write to the debug log.
+/*
+ * Redefine DEBUGLEVEL because so we don't have to change every source file
+ * that *unnecessarily* references it.
*/
-#define DEBUG(level, body) _DEBUG(level, body, true)
-/**
- * Add data to an existing debug log entry.
+#define DEBUGLEVEL DEBUGLEVEL_CLASS[DBGC_ALL]
+
+/*
+ * Define all new debug classes here. A class is represented by an entry in
+ * the DEBUGLEVEL_CLASS array. Index zero of this arrray is equivalent to the
+ * old DEBUGLEVEL. Any source file that does NOT add the following lines:
+ *
+ * #undef DBGC_CLASS
+ * #define DBGC_CLASS DBGC_<your class name here>
+ *
+ * at the start of the file (after #include "includes.h") will default to
+ * using index zero, so it will behaive just like it always has.
*/
-#define DEBUGADD(level, body) _DEBUG(level, body, false)
+#define DBGC_ALL 0 /* index equivalent to DEBUGLEVEL */
-/**
- * Obtain indentation string for the debug log.
+#define DBGC_TDB 1
+#define DBGC_PRINTDRIVERS 2
+#define DBGC_LANMAN 3
+#define DBGC_SMB 4
+#define DBGC_RPC_PARSE 5
+#define DBGC_RPC_SRV 6
+#define DBGC_RPC_CLI 7
+#define DBGC_PASSDB 8
+#define DBGC_SAM 9
+#define DBGC_AUTH 10
+#define DBGC_WINBIND 11
+#define DBGC_VFS 12
+#define DBGC_IDMAP 13
+#define DBGC_QUOTA 14
+#define DBGC_ACLS 15
+#define DBGC_LOCKING 16
+#define DBGC_MSDFS 17
+#define DBGC_DMAPI 18
+#define DBGC_REGISTRY 19
+
+/* Always ensure this is updated when new fixed classes area added, to ensure the array in debug.c is the right size */
+#define DBGC_MAX_FIXED 19
+
+/* So you can define DBGC_CLASS before including debug.h */
+#ifndef DBGC_CLASS
+#define DBGC_CLASS 0 /* override as shown above */
+#endif
+
+extern int *DEBUGLEVEL_CLASS;
+
+/* Debugging macros
+ *
+ * DEBUGLVL()
+ * If the 'file specific' debug class level >= level OR the system-wide
+ * DEBUGLEVEL (synomym for DEBUGLEVEL_CLASS[ DBGC_ALL ]) >= level then
+ * generate a header using the default macros for file, line, and
+ * function name. Returns True if the debug level was <= DEBUGLEVEL.
+ *
+ * Example: if( DEBUGLVL( 2 ) ) dbgtext( "Some text.\n" );
+ *
+ * DEBUG()
+ * If the 'file specific' debug class level >= level OR the system-wide
+ * DEBUGLEVEL (synomym for DEBUGLEVEL_CLASS[ DBGC_ALL ]) >= level then
+ * generate a header using the default macros for file, line, and
+ * function name. Each call to DEBUG() generates a new header *unless* the
+ * previous debug output was unterminated (i.e. no '\n').
+ * See debug.c:dbghdr() for more info.
+ *
+ * Example: DEBUG( 2, ("Some text and a value %d.\n", value) );
+ *
+ * DEBUGC()
+ * If the 'macro specified' debug class level >= level OR the system-wide
+ * DEBUGLEVEL (synomym for DEBUGLEVEL_CLASS[ DBGC_ALL ]) >= level then
+ * generate a header using the default macros for file, line, and
+ * function name. Each call to DEBUG() generates a new header *unless* the
+ * previous debug output was unterminated (i.e. no '\n').
+ * See debug.c:dbghdr() for more info.
+ *
+ * Example: DEBUGC( DBGC_TDB, 2, ("Some text and a value %d.\n", value) );
*
- * Level specified by n.
+ * DEBUGADD(), DEBUGADDC()
+ * Same as DEBUG() and DEBUGC() except the text is appended to the previous
+ * DEBUG(), DEBUGC(), DEBUGADD(), DEBUGADDC() with out another interviening
+ * header.
+ *
+ * Example: DEBUGADD( 2, ("Some text and a value %d.\n", value) );
+ * DEBUGADDC( DBGC_TDB, 2, ("Some text and a value %d.\n", value) );
+ *
+ * Note: If the debug class has not be redeined (see above) then the optimizer
+ * will remove the extra conditional test.
+ */
+
+/*
+ * From talloc.c:
*/
-#define DEBUGTAB(n) do_debug_tab(n)
+
+/* these macros gain us a few percent of speed on gcc */
+#if (__GNUC__ >= 3)
+/* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
+ as its first argument */
+#ifndef likely
+#define likely(x) __builtin_expect(!!(x), 1)
+#endif
+#ifndef unlikely
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+#else
+#ifndef likely
+#define likely(x) (x)
+#endif
+#ifndef unlikely
+#define unlikely(x) (x)
+#endif
+#endif
+
+#define CHECK_DEBUGLVL( level ) \
+ ( ((level) <= MAX_DEBUG_LEVEL) && \
+ unlikely(DEBUGLEVEL_CLASS[ DBGC_CLASS ] >= (level)))
+
+#define DEBUGLVL( level ) \
+ ( CHECK_DEBUGLVL(level) \
+ && dbghdrclass( level, DBGC_CLASS, __location__, __FUNCTION__ ) )
+
+
+#define DEBUG( level, body ) \
+ (void)( ((level) <= MAX_DEBUG_LEVEL) && \
+ unlikely(DEBUGLEVEL_CLASS[ DBGC_CLASS ] >= (level)) \
+ && (dbghdrclass( level, DBGC_CLASS, __location__, __FUNCTION__ )) \
+ && (dbgtext body) )
+
+#define DEBUGC( dbgc_class, level, body ) \
+ (void)( ((level) <= MAX_DEBUG_LEVEL) && \
+ unlikely(DEBUGLEVEL_CLASS[ dbgc_class ] >= (level)) \
+ && (dbghdrclass( level, DBGC_CLASS, __location__, __FUNCTION__ )) \
+ && (dbgtext body) )
+
+#define DEBUGADD( level, body ) \
+ (void)( ((level) <= MAX_DEBUG_LEVEL) && \
+ unlikely(DEBUGLEVEL_CLASS[ DBGC_CLASS ] >= (level)) \
+ && (dbgtext body) )
+
+#define DEBUGADDC( dbgc_class, level, body ) \
+ (void)( ((level) <= MAX_DEBUG_LEVEL) && \
+ unlikely((DEBUGLEVEL_CLASS[ dbgc_class ] >= (level))) \
+ && (dbgtext body) )
+
+/* Print a separator to the debug log. */
+#define DEBUGSEP(level)\
+ DEBUG((level),("===============================================================\n"))
+
+/* The following definitions come from lib/debug.c */
/** Possible destinations for the debug log (in order of precedence -
* once set to DEBUG_FILE, it is not possible to reset to DEBUG_STDOUT
* for example. This makes it easy to override for debug to stderr on
* the command line, as the smb.conf cannot reset it back to
* file-based logging */
-enum debug_logtype {DEBUG_STDOUT = 0, DEBUG_FILE = 1, DEBUG_STDERR = 2};
-
-/**
- the backend for debug messages. Note that the DEBUG() macro has already
- ensured that the log level has been met before this is called
-*/
-_PUBLIC_ void dbghdr(int level, const char *location, const char *func);
-
-_PUBLIC_ void dbghdrclass(int level, int cls, const char *location, const char *func);
+enum debug_logtype {DEBUG_DEFAULT_STDERR = 0, DEBUG_STDOUT = 1, DEBUG_FILE = 2, DEBUG_STDERR = 3};
-/**
- reopen the log file (usually called because the log file name might have changed)
-*/
-_PUBLIC_ void reopen_logs(void);
+struct debug_settings {
+ size_t max_log_size;
+ bool syslog;
+ bool syslog_only;
+ bool timestamp_logs;
+ bool debug_prefix_timestamp;
+ bool debug_hires_timestamp;
+ bool debug_pid;
+ bool debug_uid;
+ bool debug_class;
+};
-/**
- * this global variable determines what messages are printed
- */
-_PUBLIC_ void debug_schedule_reopen_logs(void);
+void setup_logging(const char *prog_name, enum debug_logtype new_logtype);
-/**
- control the name of the logfile and whether logging will be to stdout, stderr
- or a file
-*/
-_PUBLIC_ void setup_logging(const char *prog_name, enum debug_logtype new_logtype);
-
-/**
- Just run logging to stdout for this program
-*/
-_PUBLIC_ void setup_logging_stdout(void);
-
-/**
- return a string constant containing n tabs
- no more than 10 tabs are returned
-*/
-_PUBLIC_ const char *do_debug_tab(int n);
+void debug_close_dbf(void);
+void gfree_debugsyms(void);
+int debug_add_class(const char *classname);
+int debug_lookup_classname(const char *classname);
+bool debug_parse_levels(const char *params_str);
+void debug_setup_talloc_log(void);
+void debug_set_logfile(const char *name);
+void debug_set_settings(struct debug_settings *settings);
+bool reopen_logs_internal( void );
+void force_check_log_size( void );
+bool need_to_check_log_size( void );
+void check_log_size( void );
+void dbgflush( void );
+bool dbghdrclass(int level, int cls, const char *location, const char *func);
+bool dbghdr(int level, const char *location, const char *func);
+bool debug_get_output_is_stderr(void);
+void debug_schedule_reopen_logs(void);
+char *debug_list_class_names_and_levels(void);
/**
log suspicious usage - print comments and backtrace
@@ -128,21 +244,26 @@ _PUBLIC_ void print_suspicious_usage(const char* from, const char* info);
_PUBLIC_ uint32_t get_task_id(void);
_PUBLIC_ void log_task_id(void);
+/* the debug operations structure - contains function pointers to
+ various debug implementations of each operation */
+struct debug_ops {
+ /* function to log (using DEBUG) suspicious usage of data structure */
+ void (*log_suspicious_usage)(const char* from, const char* info);
+
+ /* function to log (using printf) suspicious usage of data structure.
+ * To be used in circumstances when using DEBUG would cause loop. */
+ void (*print_suspicious_usage)(const char* from, const char* info);
+
+ /* function to return process/thread id */
+ uint32_t (*get_task_id)(void);
+
+ /* function to log process/thread id */
+ void (*log_task_id)(int fd);
+};
+
/**
register a set of debug handlers.
*/
_PUBLIC_ void register_debug_handlers(const char *name, struct debug_ops *ops);
-/**
- the backend for debug messages. Note that the DEBUG() macro has already
- ensured that the log level has been met before this is called
-
- @note You should never have to call this function directly. Call the DEBUG()
- macro instead.
-*/
-_PUBLIC_ void dbgtext(const char *format, ...) PRINTF_ATTRIBUTE(1,2);
-
-struct _XFILE;
-extern struct _XFILE *dbf;
-
#endif
diff --git a/lib/util/debug_s3.c b/lib/util/debug_s3.c
new file mode 100644
index 0000000000..cfb675583c
--- /dev/null
+++ b/lib/util/debug_s3.c
@@ -0,0 +1,106 @@
+/*
+ Unix SMB/CIFS implementation.
+ Samba utility functions
+ Copyright (C) Andrew Bartlett 2011
+ Copyright (C) Andrew Tridgell 1992-2002
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "librpc/gen_ndr/messaging.h"
+#include "messages.h"
+
+/* This is the Samba3-specific implementation of reopen_logs(), which
+ * calls out to the s3 loadparm code, and means that we don't depend
+ * on loadparm directly. */
+
+bool reopen_logs(void)
+{
+ if (lp_loaded()) {
+ struct debug_settings settings;
+ debug_set_logfile(lp_logfile());
+
+ ZERO_STRUCT(settings);
+ settings.max_log_size = lp_max_log_size();
+ settings.syslog = lp_syslog();
+ settings.syslog_only = lp_syslog_only();
+ settings.timestamp_logs = lp_timestamp_logs();
+ settings.debug_prefix_timestamp = lp_debug_prefix_timestamp();
+ settings.debug_hires_timestamp = lp_debug_hires_timestamp();
+ settings.debug_pid = lp_debug_pid();
+ settings.debug_uid = lp_debug_uid();
+ settings.debug_class = lp_debug_class();
+ debug_set_settings(&settings);
+ }
+ return reopen_logs_internal();
+}
+
+/****************************************************************************
+ Receive a "set debug level" message.
+****************************************************************************/
+
+void debug_message(struct messaging_context *msg_ctx,
+ void *private_data,
+ uint32_t msg_type,
+ struct server_id src,
+ DATA_BLOB *data)
+{
+ const char *params_str = (const char *)data->data;
+
+ /* Check, it's a proper string! */
+ if (params_str[(data->length)-1] != '\0') {
+ DEBUG(1, ("Invalid debug message from pid %u to pid %u\n",
+ (unsigned int)procid_to_pid(&src),
+ (unsigned int)getpid()));
+ return;
+ }
+
+ DEBUG(3, ("INFO: Remote set of debug to `%s' (pid %u from pid %u)\n",
+ params_str, (unsigned int)getpid(),
+ (unsigned int)procid_to_pid(&src)));
+
+ debug_parse_levels(params_str);
+}
+
+/****************************************************************************
+ Return current debug level.
+****************************************************************************/
+
+static void debuglevel_message(struct messaging_context *msg_ctx,
+ void *private_data,
+ uint32_t msg_type,
+ struct server_id src,
+ DATA_BLOB *data)
+{
+ char *message = debug_list_class_names_and_levels();
+
+ if (!message) {
+ DEBUG(0,("debuglevel_message - debug_list_class_names_and_levels returned NULL\n"));
+ return;
+ }
+
+ DEBUG(1,("INFO: Received REQ_DEBUGLEVEL message from PID %s\n",
+ procid_str_static(&src)));
+ messaging_send_buf(msg_ctx, src, MSG_DEBUGLEVEL,
+ (uint8 *)message, strlen(message) + 1);
+
+ TALLOC_FREE(message);
+}
+void debug_register_msgs(struct messaging_context *msg_ctx)
+{
+ messaging_register(msg_ctx, NULL, MSG_DEBUG, debug_message);
+ messaging_register(msg_ctx, NULL, MSG_REQ_DEBUGLEVEL,
+ debuglevel_message);
+}
diff --git a/lib/util/debug_s3.h b/lib/util/debug_s3.h
new file mode 100644
index 0000000000..96b8ed74d9
--- /dev/null
+++ b/lib/util/debug_s3.h
@@ -0,0 +1,24 @@
+/*
+ Unix SMB/CIFS implementation.
+ SMB debug stuff
+ Copyright (C) Andrew Tridgell 2002
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+struct messaging_context;
+struct server_id;
+void debug_message(struct messaging_context *msg_ctx, void *private_data, uint32_t msg_type, struct server_id src, DATA_BLOB *data);
+void debug_register_msgs(struct messaging_context *msg_ctx);
+bool reopen_logs( void );
diff --git a/lib/util/dlinklist.h b/lib/util/dlinklist.h
index 693b43dd27..6d525f9036 100644
--- a/lib/util/dlinklist.h
+++ b/lib/util/dlinklist.h
@@ -1,7 +1,8 @@
/*
Unix SMB/CIFS implementation.
some simple double linked list macros
- Copyright (C) Andrew Tridgell 1998
+
+ Copyright (C) Andrew Tridgell 1998-2010
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -23,55 +24,94 @@
#ifndef _DLINKLIST_H
#define _DLINKLIST_H
+/*
+ February 2010 - changed list format to have a prev pointer from the
+ list head. This makes DLIST_ADD_END() O(1) even though we only have
+ one list pointer.
+
+ The scheme is as follows:
+
+ 1) with no entries in the list:
+ list_head == NULL
+
+ 2) with 1 entry in the list:
+ list_head->next == NULL
+ list_head->prev == list_head
+
+ 3) with 2 entries in the list:
+ list_head->next == element2
+ list_head->prev == element2
+ element2->prev == list_head
+ element2->next == NULL
+
+ 4) with N entries in the list:
+ list_head->next == element2
+ list_head->prev == elementN
+ elementN->prev == element{N-1}
+ elementN->next == NULL
-/* hook into the front of the list */
+ This allows us to find the tail of the list by using
+ list_head->prev, which means we can add to the end of the list in
+ O(1) time
+
+
+ Note that the 'type' arguments below are no longer needed, but
+ are kept for now to prevent an incompatible argument change
+ */
+
+
+/*
+ add an element at the front of a list
+*/
#define DLIST_ADD(list, p) \
do { \
if (!(list)) { \
- (list) = (p); \
- (p)->next = (p)->prev = NULL; \
+ (p)->prev = (list) = (p); \
+ (p)->next = NULL; \
} else { \
+ (p)->prev = (list)->prev; \
(list)->prev = (p); \
(p)->next = (list); \
- (p)->prev = NULL; \
(list) = (p); \
- }\
+ } \
} while (0)
-/* remove an element from a list - element doesn't have to be in list. */
+/*
+ remove an element from a list
+ Note that the element doesn't have to be in the list. If it
+ isn't then this is a no-op
+*/
#define DLIST_REMOVE(list, p) \
do { \
if ((p) == (list)) { \
+ if ((p)->next) (p)->next->prev = (p)->prev; \
(list) = (p)->next; \
- if (list) (list)->prev = NULL; \
+ } else if ((list) && (p) == (list)->prev) { \
+ (p)->prev->next = NULL; \
+ (list)->prev = (p)->prev; \
} else { \
if ((p)->prev) (p)->prev->next = (p)->next; \
if ((p)->next) (p)->next->prev = (p)->prev; \
} \
- if ((p) != (list)) (p)->next = (p)->prev = NULL; \
+ if ((p) != (list)) (p)->next = (p)->prev = NULL; \
} while (0)
-/* promote an element to the top of the list */
-#define DLIST_PROMOTE(list, p) \
+/*
+ find the head of the list given any element in it.
+ Note that this costs O(N), so you should avoid this macro
+ if at all possible!
+*/
+#define DLIST_HEAD(p, result_head) \
do { \
- DLIST_REMOVE(list, p); \
- DLIST_ADD(list, p); \
-} while (0)
+ (result_head) = (p); \
+ while (DLIST_PREV(result_head)) (result_head) = (result_head)->prev; \
+} while(0)
-/* hook into the end of the list - needs the entry type */
-#define DLIST_ADD_END(list, p, type) \
-do { \
- if (!(list)) { \
- (list) = (p); \
- (p)->next = (p)->prev = NULL; \
- } else { \
- type tmp; \
- for (tmp = (list); tmp->next; tmp = tmp->next) ; \
- tmp->next = (p); \
- (p)->next = NULL; \
- (p)->prev = tmp; \
- } \
-} while (0)
+/* return the last element in the list */
+#define DLIST_TAIL(list) ((list)?(list)->prev:NULL)
+
+/* return the previous element in the list. */
+#define DLIST_PREV(p) (((p)->prev && (p)->prev->next != NULL)?(p)->prev:NULL)
/* insert 'p' after the given element 'el' in a list. If el is NULL then
this is the same as a DLIST_ADD() */
@@ -80,34 +120,62 @@ do { \
if (!(list) || !(el)) { \
DLIST_ADD(list, p); \
} else { \
- p->prev = el; \
- p->next = el->next; \
- el->next = p; \
- if (p->next) p->next->prev = p; \
+ (p)->prev = (el); \
+ (p)->next = (el)->next; \
+ (el)->next = (p); \
+ if ((p)->next) (p)->next->prev = (p); \
+ if ((list)->prev == (el)) (list)->prev = (p); \
}\
} while (0)
-/* demote an element to the end of the list, needs the entry type */
-#define DLIST_DEMOTE(list, p, type) \
+
+/*
+ add to the end of a list.
+ Note that 'type' is ignored
+*/
+#define DLIST_ADD_END(list, p, type) \
do { \
- DLIST_REMOVE(list, p); \
- DLIST_ADD_END(list, p, type); \
+ if (!(list)) { \
+ DLIST_ADD(list, p); \
+ } else { \
+ DLIST_ADD_AFTER(list, p, (list)->prev); \
+ } \
} while (0)
-/* concatenate two lists - putting all elements of the 2nd list at the
- end of the first list */
-#define DLIST_CONCATENATE(list1, list2, type) \
+/* promote an element to the from of a list */
+#define DLIST_PROMOTE(list, p) \
do { \
- if (!(list1)) { \
- (list1) = (list2); \
- } else { \
- type tmp; \
- for (tmp = (list1); tmp->next; tmp = tmp->next) ; \
- tmp->next = (list2); \
- if (list2) { \
- (list2)->prev = tmp; \
- } \
+ DLIST_REMOVE(list, p); \
+ DLIST_ADD(list, p); \
+} while (0)
+
+/*
+ demote an element to the end of a list.
+ Note that 'type' is ignored
+*/
+#define DLIST_DEMOTE(list, p, type) \
+do { \
+ DLIST_REMOVE(list, p); \
+ DLIST_ADD_END(list, p, NULL); \
+} while (0)
+
+/*
+ concatenate two lists - putting all elements of the 2nd list at the
+ end of the first list.
+ Note that 'type' is ignored
+*/
+#define DLIST_CONCATENATE(list1, list2, type) \
+do { \
+ if (!(list1)) { \
+ (list1) = (list2); \
+ } else { \
+ (list1)->prev->next = (list2); \
+ if (list2) { \
+ void *_tmplist = (void *)(list1)->prev; \
+ (list1)->prev = (list2)->prev; \
+ (list2)->prev = _tmplist; \
} \
+ } \
} while (0)
#endif /* _DLINKLIST_H */
diff --git a/lib/util/fault.c b/lib/util/fault.c
index cb51cbd859..29b45ee147 100644
--- a/lib/util/fault.c
+++ b/lib/util/fault.c
@@ -51,7 +51,7 @@ _PUBLIC_ void call_backtrace(void)
#define BACKTRACE_STACK_SIZE 64
#endif
void *backtrace_stack[BACKTRACE_STACK_SIZE];
- size_t backtrace_size;
+ int backtrace_size;
char **backtrace_strings;
/* get the backtrace (stack frames) */
@@ -125,7 +125,7 @@ _PUBLIC_ _NORETURN_ void smb_panic(const char *why)
char pidstr[20];
char cmdstring[200];
safe_strcpy(cmdstring, panic_action, sizeof(cmdstring));
- snprintf(pidstr, sizeof(pidstr), "%u", getpid());
+ snprintf(pidstr, sizeof(pidstr), "%d", (int) getpid());
all_string_sub(cmdstring, "%PID%", pidstr, sizeof(cmdstring));
if (progname) {
all_string_sub(cmdstring, "%PROG%", progname, sizeof(cmdstring));
@@ -145,7 +145,7 @@ _PUBLIC_ _NORETURN_ void smb_panic(const char *why)
call_backtrace();
#ifdef SIGABRT
- CatchSignal(SIGABRT,SIGNAL_CAST SIG_DFL);
+ CatchSignal(SIGABRT, SIG_DFL);
#endif
abort();
}
@@ -187,24 +187,34 @@ setup our fault handlers
**/
_PUBLIC_ void fault_setup(const char *pname)
{
- if (progname == NULL) {
- progname = pname;
+ if (progname != NULL) {
+ return;
}
+ progname = pname;
#ifdef SIGSEGV
- CatchSignal(SIGSEGV,SIGNAL_CAST sig_fault);
+ CatchSignal(SIGSEGV, sig_fault);
#endif
#ifdef SIGBUS
- CatchSignal(SIGBUS,SIGNAL_CAST sig_fault);
+ CatchSignal(SIGBUS, sig_fault);
#endif
#ifdef SIGABRT
- CatchSignal(SIGABRT,SIGNAL_CAST sig_fault);
+ CatchSignal(SIGABRT, sig_fault);
#endif
#ifdef SIGFPE
- CatchSignal(SIGFPE,SIGNAL_CAST sig_fault);
+ CatchSignal(SIGFPE, sig_fault);
#endif
}
/**
+ disable setting up fault handlers
+**/
+_PUBLIC_ void fault_setup_disable(void)
+{
+ progname = "fault disabled";
+}
+
+
+/**
register a fault handler.
Should only be called once in the execution of smbd.
*/
diff --git a/lib/util/fault.m4 b/lib/util/fault.m4
deleted file mode 100644
index c22976998e..0000000000
--- a/lib/util/fault.m4
+++ /dev/null
@@ -1,18 +0,0 @@
-AC_CHECK_HEADERS(execinfo.h)
-AC_SEARCH_LIBS_EXT(backtrace, [execinfo], EXECINFO_LIBS)
-AC_CHECK_FUNC_EXT(backtrace, $EXECINFO_LIBS)
-
-
-if test x"$ac_cv_header_execinfo_h" = x"yes" -a x"$ac_cv_func_ext_backtrace" = x"yes";then
- SMB_ENABLE(EXECINFO, YES)
- EXECINFO_CFLAGS="$CFLAGS"
- EXECINFO_CPPFLAGS="$CPPFLAGS"
- EXECINFO_LDFLAGS="$LDFLAGS"
- LIB_REMOVE_USR_LIB(EXECINFO_LDFLAGS)
- CFLAGS_REMOVE_USR_INCLUDE(EXECINFO_CFLAGS)
- CFLAGS_REMOVE_USR_INCLUDE(EXECINFO_CPPFLAGS)
-else
- SMB_ENABLE(EXECINFO,NO)
-fi
-
-SMB_EXT_LIB(EXECINFO, [${EXECINFO_LIBS}], [${EXECINFO_CFLAGS}], [${EXECINFO_CPPFLAGS}], [${EXECINFO_LDFLAGS}])
diff --git a/lib/util/fsusage.m4 b/lib/util/fsusage.m4
deleted file mode 100644
index 843965041f..0000000000
--- a/lib/util/fsusage.m4
+++ /dev/null
@@ -1,200 +0,0 @@
-#################################################
-# these tests are taken from the GNU fileutils package
-AC_CHECKING(how to get filesystem space usage)
-AC_CHECK_HEADERS(sys/statfs.h sys/statvfs.h sys/vfs.h)
-
-AC_CHECK_HEADERS(sys/mount.h, , , [AC_INCLUDES_DEFAULT
-#ifdef HAVE_SYS_PARAM_H
-#include <sys/param.h>
-#endif])
-
-space=no
-
-# Test for statvfs64.
-if test $space = no; then
- # SVR4
- AC_CACHE_CHECK([statvfs64 function (SVR4)], fu_cv_sys_stat_statvfs64,
- [AC_TRY_RUN([
-#if defined(HAVE_UNISTD_H)
-#include <unistd.h>
-#endif
-#include <sys/types.h>
-#include <sys/statvfs.h>
- main ()
- {
- struct statvfs64 fsd;
- exit (statvfs64 (".", &fsd));
- }],
- fu_cv_sys_stat_statvfs64=yes,
- fu_cv_sys_stat_statvfs64=no,
- fu_cv_sys_stat_statvfs64=cross)])
- if test $fu_cv_sys_stat_statvfs64 = yes; then
- space=yes
- AC_DEFINE(STAT_STATVFS64,1,[Whether statvfs64() is available])
- fi
-fi
-
-# Perform only the link test since it seems there are no variants of the
-# statvfs function. This check is more than just AC_CHECK_FUNCS(statvfs)
-# because that got a false positive on SCO OSR5. Adding the declaration
-# of a `struct statvfs' causes this test to fail (as it should) on such
-# systems. That system is reported to work fine with STAT_STATFS4 which
-# is what it gets when this test fails.
-if test $space = no; then
- # SVR4
- AC_CACHE_CHECK([statvfs function (SVR4)], fu_cv_sys_stat_statvfs,
- [AC_TRY_LINK([#include <sys/types.h>
-#include <sys/statvfs.h>],
- [struct statvfs fsd; statvfs (0, &fsd);],
- fu_cv_sys_stat_statvfs=yes,
- fu_cv_sys_stat_statvfs=no)])
- if test $fu_cv_sys_stat_statvfs = yes; then
- space=yes
- AC_DEFINE(STAT_STATVFS,1,[Whether statvfs() is available])
- fi
-fi
-
-# fsusage.c assumes that statvfs has an f_frsize entry. Some weird
-# systems use f_bsize.
-AC_CACHE_CHECK([that statvfs.f_frsize works],samba_cv_frsize, [
- AC_TRY_COMPILE([#include <sys/types.h>
-#include <sys/statvfs.h>],[struct statvfs buf; buf.f_frsize = 0],
- samba_cv_frsize=yes,samba_cv_frsize=no)])
-if test x"$samba_cv_frsize" = x"yes"; then
- AC_DEFINE(HAVE_FRSIZE, 1, [Whether statvfs.f_frsize exists])
-fi
-
-if test $space = no; then
- # DEC Alpha running OSF/1
- AC_MSG_CHECKING([for 3-argument statfs function (DEC OSF/1)])
- AC_CACHE_VAL(fu_cv_sys_stat_statfs3_osf1,
- [AC_TRY_RUN([
-#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/mount.h>
- main ()
- {
- struct statfs fsd;
- fsd.f_fsize = 0;
- exit (statfs (".", &fsd, sizeof (struct statfs)));
- }],
- fu_cv_sys_stat_statfs3_osf1=yes,
- fu_cv_sys_stat_statfs3_osf1=no,
- fu_cv_sys_stat_statfs3_osf1=no)])
- AC_MSG_RESULT($fu_cv_sys_stat_statfs3_osf1)
- if test $fu_cv_sys_stat_statfs3_osf1 = yes; then
- space=yes
- AC_DEFINE(STAT_STATFS3_OSF1,1,[Whether statfs requires 3 arguments])
- fi
-fi
-
-if test $space = no; then
-# AIX
- AC_MSG_CHECKING([for two-argument statfs with statfs.bsize dnl
-member (AIX, 4.3BSD)])
- AC_CACHE_VAL(fu_cv_sys_stat_statfs2_bsize,
- [AC_TRY_RUN([
-#ifdef HAVE_SYS_PARAM_H
-#include <sys/param.h>
-#endif
-#ifdef HAVE_SYS_MOUNT_H
-#include <sys/mount.h>
-#endif
-#ifdef HAVE_SYS_VFS_H
-#include <sys/vfs.h>
-#endif
- main ()
- {
- struct statfs fsd;
- fsd.f_bsize = 0;
- exit (statfs (".", &fsd));
- }],
- fu_cv_sys_stat_statfs2_bsize=yes,
- fu_cv_sys_stat_statfs2_bsize=no,
- fu_cv_sys_stat_statfs2_bsize=no)])
- AC_MSG_RESULT($fu_cv_sys_stat_statfs2_bsize)
- if test $fu_cv_sys_stat_statfs2_bsize = yes; then
- space=yes
- AC_DEFINE(STAT_STATFS2_BSIZE,1,[Whether statfs requires two arguments and struct statfs has bsize property])
- fi
-fi
-
-if test $space = no; then
-# SVR3
- AC_MSG_CHECKING([for four-argument statfs (AIX-3.2.5, SVR3)])
- AC_CACHE_VAL(fu_cv_sys_stat_statfs4,
- [AC_TRY_RUN([#include <sys/types.h>
-#include <sys/statfs.h>
- main ()
- {
- struct statfs fsd;
- exit (statfs (".", &fsd, sizeof fsd, 0));
- }],
- fu_cv_sys_stat_statfs4=yes,
- fu_cv_sys_stat_statfs4=no,
- fu_cv_sys_stat_statfs4=no)])
- AC_MSG_RESULT($fu_cv_sys_stat_statfs4)
- if test $fu_cv_sys_stat_statfs4 = yes; then
- space=yes
- AC_DEFINE(STAT_STATFS4,1,[Whether statfs requires 4 arguments])
- fi
-fi
-
-if test $space = no; then
-# 4.4BSD and NetBSD
- AC_MSG_CHECKING([for two-argument statfs with statfs.fsize dnl
-member (4.4BSD and NetBSD)])
- AC_CACHE_VAL(fu_cv_sys_stat_statfs2_fsize,
- [AC_TRY_RUN([#include <sys/types.h>
-#ifdef HAVE_SYS_PARAM_H
-#include <sys/param.h>
-#endif
-#ifdef HAVE_SYS_MOUNT_H
-#include <sys/mount.h>
-#endif
- main ()
- {
- struct statfs fsd;
- fsd.f_fsize = 0;
- exit (statfs (".", &fsd));
- }],
- fu_cv_sys_stat_statfs2_fsize=yes,
- fu_cv_sys_stat_statfs2_fsize=no,
- fu_cv_sys_stat_statfs2_fsize=no)])
- AC_MSG_RESULT($fu_cv_sys_stat_statfs2_fsize)
- if test $fu_cv_sys_stat_statfs2_fsize = yes; then
- space=yes
- AC_DEFINE(STAT_STATFS2_FSIZE,1,[Whether statfs requires 2 arguments and struct statfs has fsize])
- fi
-fi
-
-if test $space = no; then
- # Ultrix
- AC_MSG_CHECKING([for two-argument statfs with struct fs_data (Ultrix)])
- AC_CACHE_VAL(fu_cv_sys_stat_fs_data,
- [AC_TRY_RUN([#include <sys/types.h>
-#ifdef HAVE_SYS_PARAM_H
-#include <sys/param.h>
-#endif
-#ifdef HAVE_SYS_MOUNT_H
-#include <sys/mount.h>
-#endif
-#ifdef HAVE_SYS_FS_TYPES_H
-#include <sys/fs_types.h>
-#endif
- main ()
- {
- struct fs_data fsd;
- /* Ultrix's statfs returns 1 for success,
- 0 for not mounted, -1 for failure. */
- exit (statfs (".", &fsd) != 1);
- }],
- fu_cv_sys_stat_fs_data=yes,
- fu_cv_sys_stat_fs_data=no,
- fu_cv_sys_stat_fs_data=no)])
- AC_MSG_RESULT($fu_cv_sys_stat_fs_data)
- if test $fu_cv_sys_stat_fs_data = yes; then
- space=yes
- AC_DEFINE(STAT_STATFS2_FS_DATA,1,[Whether statfs requires 2 arguments and struct fs_data is available])
- fi
-fi
diff --git a/lib/util/genrand.c b/lib/util/genrand.c
index f0544023f1..7fe55f345e 100644
--- a/lib/util/genrand.c
+++ b/lib/util/genrand.c
@@ -362,6 +362,54 @@ again:
}
/**
+ * Generate a random text password.
+ */
+
+_PUBLIC_ char *generate_random_password(TALLOC_CTX *mem_ctx, size_t min, size_t max)
+{
+ char *retstr;
+ /* This list does not include { or } because they cause
+ * problems for our provision (it can create a substring
+ * ${...}, and for Fedora DS (which treats {...} at the start
+ * of a stored password as special
+ * -- Andrew Bartlett 2010-03-11
+ */
+ const char *c_list = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+_-#.,@$%&!?:;<=>()[]~";
+ size_t len = max;
+ size_t diff;
+
+ if (min > max) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ diff = max - min;
+
+ if (diff > 0 ) {
+ size_t tmp;
+
+ generate_random_buffer((uint8_t *)&tmp, sizeof(tmp));
+
+ tmp %= diff;
+
+ len = min + tmp;
+ }
+
+again:
+ retstr = generate_random_str_list(mem_ctx, len, c_list);
+ if (!retstr) return NULL;
+
+ /* we need to make sure the random string passes basic quality tests
+ or it might be rejected by windows as a password */
+ if (len >= 7 && !check_password_quality(retstr)) {
+ talloc_free(retstr);
+ goto again;
+ }
+
+ return retstr;
+}
+
+/**
* Generate an array of unique text strings all of the same length.
* The returned string will be allocated.
* Returns NULL if the number of unique combinations cannot be created.
diff --git a/lib/util/idtree.c b/lib/util/idtree.c
index 0af93a229d..3648761069 100644
--- a/lib/util/idtree.c
+++ b/lib/util/idtree.c
@@ -104,8 +104,8 @@ static int sub_alloc(struct idr_context *idp, void *ptr, int *starting_id)
{
int n, m, sh;
struct idr_layer *p, *pn;
- struct idr_layer *pa[MAX_LEVEL];
- int l, id, oid;
+ struct idr_layer *pa[MAX_LEVEL+1];
+ unsigned int l, id, oid;
uint32_t bm;
memset(pa, 0, sizeof(pa));
@@ -240,7 +240,7 @@ build_up:
static int sub_remove(struct idr_context *idp, int shift, int id)
{
struct idr_layer *p = idp->top;
- struct idr_layer **pa[MAX_LEVEL];
+ struct idr_layer **pa[1+MAX_LEVEL];
struct idr_layer ***paa = &pa[0];
int n;
@@ -280,8 +280,10 @@ static void *_idr_find(struct idr_context *idp, int id)
* This tests to see if bits outside the current tree are
* present. If so, tain't one of ours!
*/
- if ((id & ~(~0 << MAX_ID_SHIFT)) >> (n + IDR_BITS))
- return NULL;
+ if (n + IDR_BITS < 31 &&
+ ((id & ~(~0 << MAX_ID_SHIFT)) >> (n + IDR_BITS))) {
+ return NULL;
+ }
/* Mask off upper bits we don't use for the search. */
id &= MAX_ID_MASK;
diff --git a/lib/util/memory.h b/lib/util/memory.h
index cfc13ab836..099dfbb694 100644
--- a/lib/util/memory.h
+++ b/lib/util/memory.h
@@ -28,7 +28,7 @@
* @note You are explicitly allowed to pass NULL pointers -- they will
* always be ignored.
**/
-#define SAFE_FREE(x) do { if ((x) != NULL) {free(discard_const_p(void *, (x))); (x)=NULL;} } while(0)
+#define SAFE_FREE(x) do { if ((x) != NULL) {free(x); (x)=NULL;} } while(0)
#endif
/**
diff --git a/lib/util/mutex.c b/lib/util/mutex.c
deleted file mode 100644
index 4d0df68eed..0000000000
--- a/lib/util/mutex.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- Unix SMB/CIFS implementation.
- Samba mutex/lock functions
- Copyright (C) Andrew Tridgell 2003
- Copyright (C) James J Myers 2003
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-#include "includes.h"
-#include "mutex.h"
-
-/**
- * @file
- * @brief Mutex utility functions
- */
-
-/* the registered mutex handlers */
-static struct {
- const char *name;
- struct mutex_ops ops;
-} mutex_handlers;
-
-/* read/write lock routines */
-
-
-/**
- register a set of mutex/rwlock handlers.
- Should only be called once in the execution of smbd.
-*/
-_PUBLIC_ bool register_mutex_handlers(const char *name, struct mutex_ops *ops)
-{
- if (mutex_handlers.name != NULL) {
- /* it's already registered! */
- DEBUG(2,("mutex handler '%s' already registered - failed '%s'\n",
- mutex_handlers.name, name));
- return false;
- }
-
- mutex_handlers.name = name;
- mutex_handlers.ops = *ops;
-
- DEBUG(2,("mutex handler '%s' registered\n", name));
- return true;
-}
-
diff --git a/lib/util/mutex.h b/lib/util/mutex.h
deleted file mode 100644
index bf845906f2..0000000000
--- a/lib/util/mutex.h
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef _MUTEX_H_
-#define _MUTEX_H_
-/*
- Unix SMB/CIFS implementation.
- Samba mutex functions
- Copyright (C) Andrew Tridgell 2003
- Copyright (C) James J Myers 2003
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-/**
- * @file
- * @brief Mutex operations
- */
-
-struct mutex_ops;
-
-/* To add a new read/write lock, add it to enum rwlock_id
- */
-enum rwlock_id { RWLOCK_SMBD, /* global smbd lock */
-
- RWLOCK_MAX /* this MUST be kept last */
-};
-
-#define MUTEX_LOCK_BY_ID(mutex_index) smb_mutex_lock_by_id(mutex_index, #mutex_index)
-#define MUTEX_UNLOCK_BY_ID(mutex_index) smb_mutex_unlock_by_id(mutex_index, #mutex_index)
-#define MUTEX_INIT(mutex, name) smb_mutex_init(mutex, #name)
-#define MUTEX_DESTROY(mutex, name) smb_mutex_destroy(mutex, #name)
-#define MUTEX_LOCK(mutex, name) smb_mutex_lock(mutex, #name)
-#define MUTEX_UNLOCK(mutex, name) smb_mutex_unlock(mutex, #name)
-
-#define RWLOCK_INIT(rwlock, name) smb_rwlock_init(rwlock, #name)
-#define RWLOCK_DESTROY(rwlock, name) smb_rwlock_destroy(rwlock, #name)
-#define RWLOCK_LOCK_WRITE(rwlock, name) smb_rwlock_lock_write(rwlock, #name)
-#define RWLOCK_LOCK_READ(rwlock, name) smb_rwlock_lock_read(rwlock, #name)
-#define RWLOCK_UNLOCK(rwlock, name) smb_rwlock_unlock(rwlock, #name)
-
-
-
-/* this null typedef ensures we get the types right and avoids the
- pitfalls of void* */
-typedef struct smb_mutex {
- void *mutex;
-} smb_mutex_t;
-typedef struct {
- void *rwlock;
-} smb_rwlock_t;
-
-/* the mutex model operations structure - contains function pointers to
- the model-specific implementations of each operation */
-struct mutex_ops {
- int (*mutex_init)(smb_mutex_t *mutex, const char *name);
- int (*mutex_lock)(smb_mutex_t *mutex, const char *name);
- int (*mutex_unlock)(smb_mutex_t *mutex, const char *name);
- int (*mutex_destroy)(smb_mutex_t *mutex, const char *name);
- int (*rwlock_init)(smb_rwlock_t *rwlock, const char *name);
- int (*rwlock_lock_write)(smb_rwlock_t *rwlock, const char *name);
- int (*rwlock_lock_read)(smb_rwlock_t *rwlock, const char *name);
- int (*rwlock_unlock)(smb_rwlock_t *rwlock, const char *name);
- int (*rwlock_destroy)(smb_rwlock_t *rwlock, const char *name);
-};
-
-#endif /* endif _MUTEX_H_ */
diff --git a/lib/util/params.c b/lib/util/params.c
index 376fed4078..45fcd5bdc8 100644
--- a/lib/util/params.c
+++ b/lib/util/params.c
@@ -488,7 +488,7 @@ static bool Parse( myFILE *InFile,
return( true );
} /* Parse */
-static myFILE *OpenConfFile( const char *FileName )
+static myFILE *OpenConfFile(TALLOC_CTX *mem_ctx, const char *FileName )
/* ------------------------------------------------------------------------ **
* Open a configuration file.
*
@@ -502,7 +502,7 @@ static myFILE *OpenConfFile( const char *FileName )
const char *func = "params.c:OpenConfFile() -";
myFILE *ret;
- ret = talloc(talloc_autofree_context(), myFILE);
+ ret = talloc(mem_ctx, myFILE);
if (!ret) return NULL;
ret->buf = file_load(FileName, &ret->size, 0, ret);
@@ -543,7 +543,7 @@ bool pm_process( const char *FileName,
myFILE *InFile;
const char *func = "params.c:pm_process() -";
- InFile = OpenConfFile( FileName ); /* Open the config file. */
+ InFile = OpenConfFile(NULL, FileName); /* Open the config file. */
if( NULL == InFile )
return( false );
diff --git a/lib/util/samba-util.pc.in b/lib/util/samba-util.pc.in
new file mode 100644
index 0000000000..a49c1d0290
--- /dev/null
+++ b/lib/util/samba-util.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: samba-util
+Description: Samba utility functions
+Requires: talloc tevent
+Version: 0.0.1
+Libs: @LIB_RPATH@ -L${libdir} -lsamba-util
+Cflags: -I${includedir} -DHAVE_IMMEDIATE_STRUCTURES=1
diff --git a/lib/util/select.c b/lib/util/select.c
new file mode 100644
index 0000000000..b9326ef901
--- /dev/null
+++ b/lib/util/select.c
@@ -0,0 +1,158 @@
+/*
+ Unix SMB/Netbios implementation.
+ Version 3.0
+ Samba select/poll implementation
+ Copyright (C) Andrew Tridgell 1992-1998
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "system/select.h"
+#include "lib/util/select.h"
+
+/* This is here because it allows us to avoid a nasty race in signal handling.
+ We need to guarantee that when we get a signal we get out of a select immediately
+ but doing that involves a race condition. We can avoid the race by getting the
+ signal handler to write to a pipe that is in the select/poll list
+
+ This means all Samba signal handlers should call sys_select_signal().
+*/
+
+static pid_t initialised;
+static int select_pipe[2];
+static volatile unsigned pipe_written, pipe_read;
+
+/*******************************************************************
+ Call this from all Samba signal handlers if you want to avoid a
+ nasty signal race condition.
+********************************************************************/
+
+void sys_select_signal(char c)
+{
+ int saved_errno = errno;
+
+ if (!initialised) return;
+
+ if (pipe_written > pipe_read+256) return;
+
+ if (write(select_pipe[1], &c, 1) == 1) pipe_written++;
+
+ errno = saved_errno;
+}
+
+/*
+ * sys_poll expects pollfd's to be a talloc'ed array.
+ *
+ * It expects the talloc_array_length(fds) >= num_fds+1 to give space
+ * to the signal pipe.
+ */
+
+int sys_poll(struct pollfd *fds, int num_fds, int timeout)
+{
+ int ret;
+
+ if (talloc_array_length(fds) < num_fds+1) {
+ errno = ENOSPC;
+ return -1;
+ }
+
+ if (initialised != sys_getpid()) {
+ if (pipe(select_pipe) == -1)
+ {
+ int saved_errno = errno;
+ DEBUG(0, ("sys_poll: pipe failed (%s)\n",
+ strerror(errno)));
+ errno = saved_errno;
+ return -1;
+ }
+
+ /*
+ * These next two lines seem to fix a bug with the Linux
+ * 2.0.x kernel (and probably other UNIXes as well) where
+ * the one byte read below can block even though the
+ * select returned that there is data in the pipe and
+ * the pipe_written variable was incremented. Thanks to
+ * HP for finding this one. JRA.
+ */
+
+ if(set_blocking(select_pipe[0],0)==-1)
+ smb_panic("select_pipe[0]: O_NONBLOCK failed");
+ if(set_blocking(select_pipe[1],0)==-1)
+ smb_panic("select_pipe[1]: O_NONBLOCK failed");
+
+ initialised = sys_getpid();
+ }
+
+ ZERO_STRUCT(fds[num_fds]);
+ fds[num_fds].fd = select_pipe[0];
+ fds[num_fds].events = POLLIN|POLLHUP;
+
+ errno = 0;
+ ret = poll(fds, num_fds+1, timeout);
+
+ if ((ret >= 0) && (fds[num_fds].revents & (POLLIN|POLLHUP|POLLERR))) {
+ char c;
+ int saved_errno = errno;
+
+ if (read(select_pipe[0], &c, 1) == 1) {
+ pipe_read += 1;
+
+ /* Mark Weaver <mark-clist@npsl.co.uk> pointed out a critical
+ fix to ensure we don't lose signals. We must always
+ return -1 when the select pipe is set, otherwise if another
+ fd is also ready (so ret == 2) then we used to eat the
+ byte in the pipe and lose the signal. JRA.
+ */
+ ret = -1;
+#if 0
+ /* JRA - we can use this to debug the signal messaging... */
+ DEBUG(0,("select got %u signal\n", (unsigned int)c));
+#endif
+ errno = EINTR;
+ } else {
+ ret -= 1;
+ errno = saved_errno;
+ }
+ }
+
+ return ret;
+}
+
+int sys_poll_intr(struct pollfd *fds, int num_fds, int timeout)
+{
+ int orig_timeout = timeout;
+ struct timespec start;
+ int ret;
+
+ clock_gettime_mono(&start);
+
+ while (true) {
+ struct timespec now;
+ int64_t elapsed;
+
+ ret = poll(fds, num_fds, timeout);
+ if (ret != -1) {
+ break;
+ }
+ if (errno != EINTR) {
+ break;
+ }
+ clock_gettime_mono(&now);
+ elapsed = nsec_time_diff(&now, &start);
+ timeout = (orig_timeout - elapsed) / 1000000;
+ };
+ return ret;
+}
diff --git a/lib/util/select.h b/lib/util/select.h
new file mode 100644
index 0000000000..36efa6e83b
--- /dev/null
+++ b/lib/util/select.h
@@ -0,0 +1,31 @@
+/*
+ Unix SMB/Netbios implementation.
+ Samba select/poll implementation
+ Copyright (C) Andrew Tridgell 1992-1998
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _select_h_
+#define _select_h_
+
+#include "system/select.h"
+
+/* The following definitions come from lib/util/select.c */
+
+void sys_select_signal(char c);
+int sys_poll(struct pollfd *fds, int num_fds, int timeout);
+int sys_poll_intr(struct pollfd *fds, int num_fds, int timeout);
+
+#endif
diff --git a/lib/util/signal.m4 b/lib/util/signal.m4
deleted file mode 100644
index c6d7f72f68..0000000000
--- a/lib/util/signal.m4
+++ /dev/null
@@ -1 +0,0 @@
-AC_CHECK_FUNCS(sigprocmask sigblock sigaction)
diff --git a/lib/util/system.c b/lib/util/system.c
index 9bf5de1a83..17c0553102 100644
--- a/lib/util/system.c
+++ b/lib/util/system.c
@@ -117,3 +117,75 @@ _PUBLIC_ pid_t sys_getpid(void)
return mypid;
}
+
+
+_PUBLIC_ int sys_getpeereid( int s, uid_t *uid)
+{
+#if defined(HAVE_PEERCRED)
+ struct ucred cred;
+ socklen_t cred_len = sizeof(struct ucred);
+ int ret;
+
+ ret = getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void *)&cred, &cred_len);
+ if (ret != 0) {
+ return -1;
+ }
+
+ if (cred_len != sizeof(struct ucred)) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ *uid = cred.uid;
+ return 0;
+#else
+#if defined(HAVE_GETPEEREID)
+ gid_t gid;
+ return getpeereid(s, uid, &gid);
+#endif
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+_PUBLIC_ int sys_getnameinfo(const struct sockaddr *psa,
+ int salen,
+ char *host,
+ size_t hostlen,
+ char *service,
+ size_t servlen,
+ int flags)
+{
+ /*
+ * For Solaris we must make sure salen is the
+ * correct length for the incoming sa_family.
+ */
+
+ if (salen == sizeof(struct sockaddr_storage)) {
+ salen = sizeof(struct sockaddr_in);
+#if defined(HAVE_IPV6)
+ if (psa->sa_family == AF_INET6) {
+ salen = sizeof(struct sockaddr_in6);
+ }
+#endif
+ }
+ return getnameinfo(psa, salen, host, hostlen, service, servlen, flags);
+}
+
+_PUBLIC_ int sys_connect(int fd, const struct sockaddr * addr)
+{
+ socklen_t salen = (socklen_t)-1;
+
+ if (addr->sa_family == AF_INET) {
+ salen = sizeof(struct sockaddr_in);
+ } else if (addr->sa_family == AF_UNIX) {
+ salen = sizeof(struct sockaddr_un);
+ }
+#if defined(HAVE_IPV6)
+ else if (addr->sa_family == AF_INET6) {
+ salen = sizeof(struct sockaddr_in6);
+ }
+#endif
+
+ return connect(fd, addr, salen);
+}
diff --git a/lib/util/talloc_stack.c b/lib/util/talloc_stack.c
index f34d495294..8e559cc20f 100644
--- a/lib/util/talloc_stack.c
+++ b/lib/util/talloc_stack.c
@@ -68,22 +68,20 @@ static void talloc_stackframe_init(void * unused)
static struct talloc_stackframe *talloc_stackframe_create(void)
{
#if defined(PARANOID_MALLOC_CHECKER)
-#ifdef malloc
-#undef malloc
+#ifdef calloc
+#undef calloc
#endif
#endif
- struct talloc_stackframe *ts =
- (struct talloc_stackframe *)malloc(sizeof(struct talloc_stackframe));
+ struct talloc_stackframe *ts = (struct talloc_stackframe *)calloc(
+ 1, sizeof(struct talloc_stackframe));
#if defined(PARANOID_MALLOC_CHECKER)
-#define malloc(s) __ERROR_DONT_USE_MALLOC_DIRECTLY
+#define calloc(n, s) __ERROR_DONT_USE_MALLOC_DIRECTLY
#endif
if (!ts) {
smb_panic("talloc_stackframe_init malloc failed");
}
- ZERO_STRUCTP(ts);
-
SMB_THREAD_ONCE(&ts_initialized, talloc_stackframe_init, NULL);
if (SMB_THREAD_SET_TLS(global_ts, ts)) {
@@ -102,8 +100,7 @@ static int talloc_pop(TALLOC_CTX *frame)
if (frame == ts->talloc_stack[i]) {
break;
}
- talloc_free(ts->talloc_stack[i]);
- ts->talloc_stack[i] = NULL;
+ TALLOC_FREE(ts->talloc_stack[i]);
}
ts->talloc_stack[i] = NULL;
diff --git a/lib/util/talloc_stack.h b/lib/util/talloc_stack.h
index 777671164d..0e8fab3759 100644
--- a/lib/util/talloc_stack.h
+++ b/lib/util/talloc_stack.h
@@ -35,7 +35,7 @@
#ifndef _TALLOC_STACK_H
#define _TALLOC_STACK_H
-#include "talloc.h"
+#include <talloc.h>
/*
* Create a new talloc stack frame.
diff --git a/lib/util/tdb_wrap.c b/lib/util/tdb_wrap.c
new file mode 100644
index 0000000000..c9562c6939
--- /dev/null
+++ b/lib/util/tdb_wrap.c
@@ -0,0 +1,194 @@
+/*
+ Unix SMB/CIFS implementation.
+ TDB wrap functions
+
+ Copyright (C) Andrew Tridgell 2004
+ Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include <tdb.h>
+#include "lib/util/dlinklist.h"
+#include "lib/util/tdb_wrap.h"
+#include <tdb.h>
+
+/*
+ Log tdb messages via DEBUG().
+*/
+static void tdb_wrap_log(TDB_CONTEXT *tdb, enum tdb_debug_level level,
+ const char *format, ...) PRINTF_ATTRIBUTE(3,4);
+
+static void tdb_wrap_log(TDB_CONTEXT *tdb, enum tdb_debug_level level,
+ const char *format, ...)
+{
+ va_list ap;
+ char *ptr = NULL;
+ int debuglevel = 0;
+ int ret;
+
+ switch (level) {
+ case TDB_DEBUG_FATAL:
+ debuglevel = 0;
+ break;
+ case TDB_DEBUG_ERROR:
+ debuglevel = 1;
+ break;
+ case TDB_DEBUG_WARNING:
+ debuglevel = 2;
+ break;
+ case TDB_DEBUG_TRACE:
+ debuglevel = 5;
+ break;
+ default:
+ debuglevel = 0;
+ }
+
+ va_start(ap, format);
+ ret = vasprintf(&ptr, format, ap);
+ va_end(ap);
+
+ if (ret != -1) {
+ const char *name = tdb_name(tdb);
+ DEBUG(debuglevel, ("tdb(%s): %s", name ? name : "unnamed", ptr));
+ free(ptr);
+ }
+}
+
+struct tdb_wrap_private {
+ struct tdb_context *tdb;
+ const char *name;
+ struct tdb_wrap_private *next, *prev;
+};
+
+static struct tdb_wrap_private *tdb_list;
+
+/* destroy the last connection to a tdb */
+static int tdb_wrap_private_destructor(struct tdb_wrap_private *w)
+{
+ tdb_close(w->tdb);
+ DLIST_REMOVE(tdb_list, w);
+ return 0;
+}
+
+static struct tdb_wrap_private *tdb_wrap_private_open(TALLOC_CTX *mem_ctx,
+ const char *name,
+ int hash_size,
+ int tdb_flags,
+ int open_flags,
+ mode_t mode)
+{
+ struct tdb_wrap_private *result;
+ struct tdb_logging_context log_ctx;
+
+ result = talloc(mem_ctx, struct tdb_wrap_private);
+ if (result == NULL) {
+ return NULL;
+ }
+ result->name = talloc_strdup(result, name);
+ if (result->name == NULL) {
+ goto fail;
+ }
+
+ log_ctx.log_fn = tdb_wrap_log;
+
+#if _SAMBA_BUILD_ == 3
+ /* This #if _SAMBA_BUILD == 3 is very unfortunate, as it means
+ * that in the top level build, these options are not
+ * available for these databases. However, having two
+ * different tdb_wrap lists is a worse fate, so this will do
+ * for now */
+
+ if (!lp_use_mmap()) {
+ tdb_flags |= TDB_NOMMAP;
+ }
+
+ if ((hash_size == 0) && (name != NULL)) {
+ const char *base;
+ base = strrchr_m(name, '/');
+
+ if (base != NULL) {
+ base += 1;
+ } else {
+ base = name;
+ }
+ hash_size = lp_parm_int(-1, "tdb_hashsize", base, 0);
+ }
+#endif
+
+ result->tdb = tdb_open_ex(name, hash_size, tdb_flags,
+ open_flags, mode, &log_ctx, NULL);
+ if (result->tdb == NULL) {
+ goto fail;
+ }
+ talloc_set_destructor(result, tdb_wrap_private_destructor);
+ DLIST_ADD(tdb_list, result);
+ return result;
+
+fail:
+ TALLOC_FREE(result);
+ return NULL;
+}
+
+/*
+ wrapped connection to a tdb database
+ to close just talloc_free() the tdb_wrap pointer
+ */
+struct tdb_wrap *tdb_wrap_open(TALLOC_CTX *mem_ctx,
+ const char *name, int hash_size, int tdb_flags,
+ int open_flags, mode_t mode)
+{
+ struct tdb_wrap *result;
+ struct tdb_wrap_private *w;
+
+ result = talloc(mem_ctx, struct tdb_wrap);
+ if (result == NULL) {
+ return NULL;
+ }
+
+ for (w=tdb_list;w;w=w->next) {
+ if (strcmp(name, w->name) == 0) {
+ break;
+ }
+ }
+
+ if (w == NULL) {
+ w = tdb_wrap_private_open(result, name, hash_size, tdb_flags,
+ open_flags, mode);
+ } else {
+ /*
+ * Correctly use talloc_reference: The tdb will be
+ * closed when "w" is being freed. The caller never
+ * sees "w", so an incorrect use of talloc_free(w)
+ * instead of calling talloc_unlink is not possible.
+ * To avoid having to refcount ourselves, "w" will
+ * have multiple parents that hang off all the
+ * tdb_wrap's being returned from here. Those parents
+ * can be freed without problem.
+ */
+ if (talloc_reference(result, w) == NULL) {
+ goto fail;
+ }
+ }
+ if (w == NULL) {
+ goto fail;
+ }
+ result->tdb = w->tdb;
+ return result;
+fail:
+ TALLOC_FREE(result);
+ return NULL;
+}
+
diff --git a/lib/util/tdb_wrap.h b/lib/util/tdb_wrap.h
new file mode 100644
index 0000000000..1be2bb059d
--- /dev/null
+++ b/lib/util/tdb_wrap.h
@@ -0,0 +1,42 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ database wrap headers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* IMPORTANT: tdb_wrap should be always preferred over tdb_context for end consumer functions
+ it's because if the code will be running inside smbd, then we must use the linked list
+ of open tdb files, to determine if the tdb we desire is already open
+ as otherwise, when you close the tdb (even on a different file descriptor),
+ ALL LOCKS are lost (due to a real screwup in the POSIX specification that nobody has been able to get fixed)
+*/
+
+#ifndef _TDB_WRAP_H_
+#define _TDB_WRAP_H_
+
+#include <tdb.h>
+
+struct tdb_wrap {
+ struct tdb_context *tdb;
+};
+
+struct tdb_wrap *tdb_wrap_open(TALLOC_CTX *mem_ctx,
+ const char *name, int hash_size, int tdb_flags,
+ int open_flags, mode_t mode);
+
+#endif /* _TDB_WRAP_H_ */
diff --git a/lib/util/tests/anonymous_shared.c b/lib/util/tests/anonymous_shared.c
new file mode 100644
index 0000000000..512a53f82d
--- /dev/null
+++ b/lib/util/tests/anonymous_shared.c
@@ -0,0 +1,70 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ anonymous_shared testing
+
+ Copyright (C) Stefan Metzmacher 2011
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "torture/torture.h"
+#include "torture/local/proto.h"
+
+static bool test_anonymous_shared_simple(struct torture_context *tctx)
+{
+ void *ptr;
+ size_t len;
+
+ torture_comment(tctx, "anonymous_shared_free(NULL)\n");
+ anonymous_shared_free(NULL);
+
+ len = 500;
+ torture_comment(tctx, "anonymous_shared_allocate(%llu)\n",
+ (unsigned long long)len);
+ ptr = anonymous_shared_allocate(len);
+ torture_assert(tctx, ptr, "valid pointer");
+ memset(ptr, 0xfe, len);
+ torture_comment(tctx, "anonymous_shared_free(ptr)\n");
+ anonymous_shared_free(ptr);
+
+ len = 50000;
+ torture_comment(tctx, "anonymous_shared_allocate(%llu)\n",
+ (unsigned long long)len);
+ ptr = anonymous_shared_allocate(len);
+ torture_assert(tctx, ptr, "valid pointer");
+ memset(ptr, 0xfe, len);
+ torture_comment(tctx, "anonymous_shared_free(ptr)\n");
+ anonymous_shared_free(ptr);
+
+ memset(&len, 0xFF, sizeof(len));
+ torture_comment(tctx, "anonymous_shared_allocate(%llu)\n",
+ (unsigned long long)len);
+ ptr = anonymous_shared_allocate(len);
+ torture_assert(tctx, ptr == NULL, "null pointer");
+
+ return true;
+}
+
+/* local.anonymous_shared test suite creation */
+struct torture_suite *torture_local_util_anonymous_shared(TALLOC_CTX *mem_ctx)
+{
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "anonymous_shared");
+
+ torture_suite_add_simple_test(suite, "simple",
+ test_anonymous_shared_simple);
+
+ return suite;
+}
diff --git a/lib/util/tests/asn1_tests.c b/lib/util/tests/asn1_tests.c
new file mode 100644
index 0000000000..ac8ca538f8
--- /dev/null
+++ b/lib/util/tests/asn1_tests.c
@@ -0,0 +1,282 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ util_asn1 testing
+
+ Copyright (C) Kamen Mazdrashki <kamen.mazdrashki@postpath.com> 2009
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "torture/torture.h"
+#include "../asn1.h"
+
+struct oid_data {
+ const char *oid; /* String OID */
+ const char *bin_oid; /* Binary OID represented as string */
+};
+
+/* Data for successful OIDs conversions */
+static const struct oid_data oid_data_ok[] = {
+ {
+ .oid = "2.5.4.0",
+ .bin_oid = "550400"
+ },
+ {
+ .oid = "2.5.4.1",
+ .bin_oid = "550401"
+ },
+ {
+ .oid = "2.5.4.130",
+ .bin_oid = "55048102"
+ },
+ {
+ .oid = "2.5.130.4",
+ .bin_oid = "55810204"
+ },
+ {
+ .oid = "2.5.4.16387",
+ .bin_oid = "5504818003"
+ },
+ {
+ .oid = "2.5.16387.4",
+ .bin_oid = "5581800304"
+ },
+ {
+ .oid = "2.5.2097155.4",
+ .bin_oid = "558180800304"
+ },
+ {
+ .oid = "2.5.4.130.16387.2097155.268435459",
+ .bin_oid = "55048102818003818080038180808003"
+ },
+};
+
+/* Data for successful OIDs conversions */
+static const char *oid_data_err[] = {
+ "", /* empty OID */
+ ".2.5.4.130", /* first sub-identifier is empty */
+ "2.5.4.130.", /* last sub-identifier is empty */
+ "2..5.4.130", /* second sub-identifier is empty */
+ "2.5..4.130", /* third sub-identifier is empty */
+ "2.abc.4.130", /* invalid sub-identifier */
+ "2.5abc.4.130", /* invalid sub-identifier (alpha-numeric)*/
+};
+
+/* Data for successful Partial OIDs conversions */
+static const struct oid_data partial_oid_data_ok[] = {
+ {
+ .oid = "2.5.4.130:0x81",
+ .bin_oid = "5504810281"
+ },
+ {
+ .oid = "2.5.4.16387:0x8180",
+ .bin_oid = "55048180038180"
+ },
+ {
+ .oid = "2.5.4.16387:0x81",
+ .bin_oid = "550481800381"
+ },
+ {
+ .oid = "2.5.2097155.4:0x818080",
+ .bin_oid = "558180800304818080"
+ },
+ {
+ .oid = "2.5.2097155.4:0x8180",
+ .bin_oid = "5581808003048180"
+ },
+ {
+ .oid = "2.5.2097155.4:0x81",
+ .bin_oid = "55818080030481"
+ },
+};
+
+
+/* Testing ber_write_OID_String() function */
+static bool test_ber_write_OID_String(struct torture_context *tctx)
+{
+ int i;
+ char *hex_str;
+ DATA_BLOB blob;
+ TALLOC_CTX *mem_ctx;
+ const struct oid_data *data = oid_data_ok;
+
+ mem_ctx = talloc_new(tctx);
+
+ /* check for valid OIDs */
+ for (i = 0; i < ARRAY_SIZE(oid_data_ok); i++) {
+ torture_assert(tctx, ber_write_OID_String(mem_ctx, &blob, data[i].oid),
+ "ber_write_OID_String failed");
+
+ hex_str = hex_encode_talloc(mem_ctx, blob.data, blob.length);
+ torture_assert(tctx, hex_str, "No memory!");
+
+ torture_assert(tctx, strequal(data[i].bin_oid, hex_str),
+ talloc_asprintf(mem_ctx,
+ "Failed: oid=%s, bin_oid:%s",
+ data[i].oid, data[i].bin_oid));
+ }
+
+ /* check for invalid OIDs */
+ for (i = 0; i < ARRAY_SIZE(oid_data_err); i++) {
+ torture_assert(tctx,
+ !ber_write_OID_String(mem_ctx, &blob, oid_data_err[i]),
+ talloc_asprintf(mem_ctx,
+ "Should fail for [%s] -> %s",
+ oid_data_err[i],
+ hex_encode_talloc(mem_ctx, blob.data, blob.length)));
+ }
+
+ talloc_free(mem_ctx);
+
+ return true;
+}
+
+/* Testing ber_read_OID_String() function */
+static bool test_ber_read_OID_String(struct torture_context *tctx)
+{
+ int i;
+ char *oid;
+ DATA_BLOB oid_blob;
+ TALLOC_CTX *mem_ctx;
+ const struct oid_data *data = oid_data_ok;
+
+ mem_ctx = talloc_new(tctx);
+
+ for (i = 0; i < ARRAY_SIZE(oid_data_ok); i++) {
+ oid_blob = strhex_to_data_blob(mem_ctx, data[i].bin_oid);
+
+ torture_assert(tctx, ber_read_OID_String(mem_ctx, oid_blob, &oid),
+ "ber_read_OID_String failed");
+
+ torture_assert(tctx, strequal(data[i].oid, oid),
+ talloc_asprintf(mem_ctx,
+ "Failed: oid=%s, bin_oid:%s",
+ data[i].oid, data[i].bin_oid));
+ }
+
+ talloc_free(mem_ctx);
+
+ return true;
+}
+
+/* Testing ber_write_partial_OID_String() function */
+static bool test_ber_write_partial_OID_String(struct torture_context *tctx)
+{
+ int i;
+ char *hex_str;
+ DATA_BLOB blob;
+ TALLOC_CTX *mem_ctx;
+ const struct oid_data *data = oid_data_ok;
+
+ mem_ctx = talloc_new(tctx);
+
+ /* ber_write_partial_OID_String() should work with not partial OIDs also */
+ for (i = 0; i < ARRAY_SIZE(oid_data_ok); i++) {
+ torture_assert(tctx, ber_write_partial_OID_String(mem_ctx, &blob, data[i].oid),
+ "ber_write_partial_OID_String failed");
+
+ hex_str = hex_encode_talloc(mem_ctx, blob.data, blob.length);
+ torture_assert(tctx, hex_str, "No memory!");
+
+ torture_assert(tctx, strequal(data[i].bin_oid, hex_str),
+ talloc_asprintf(mem_ctx,
+ "Failed: oid=%s, bin_oid:%s",
+ data[i].oid, data[i].bin_oid));
+ }
+
+ /* ber_write_partial_OID_String() test with partial OIDs */
+ data = partial_oid_data_ok;
+ for (i = 0; i < ARRAY_SIZE(partial_oid_data_ok); i++) {
+ torture_assert(tctx, ber_write_partial_OID_String(mem_ctx, &blob, data[i].oid),
+ "ber_write_partial_OID_String failed");
+
+ hex_str = hex_encode_talloc(mem_ctx, blob.data, blob.length);
+ torture_assert(tctx, hex_str, "No memory!");
+
+ torture_assert(tctx, strequal(data[i].bin_oid, hex_str),
+ talloc_asprintf(mem_ctx,
+ "Failed: oid=%s, bin_oid:%s",
+ data[i].oid, data[i].bin_oid));
+ }
+
+ talloc_free(mem_ctx);
+
+ return true;
+}
+
+/* Testing ber_read_partial_OID_String() function */
+static bool test_ber_read_partial_OID_String(struct torture_context *tctx)
+{
+ int i;
+ char *oid;
+ DATA_BLOB oid_blob;
+ TALLOC_CTX *mem_ctx;
+ const struct oid_data *data = oid_data_ok;
+
+ mem_ctx = talloc_new(tctx);
+
+ /* ber_read_partial_OID_String() should work with not partial OIDs also */
+ for (i = 0; i < ARRAY_SIZE(oid_data_ok); i++) {
+ oid_blob = strhex_to_data_blob(mem_ctx, data[i].bin_oid);
+
+ torture_assert(tctx, ber_read_partial_OID_String(mem_ctx, oid_blob, &oid),
+ "ber_read_partial_OID_String failed");
+
+ torture_assert(tctx, strequal(data[i].oid, oid),
+ talloc_asprintf(mem_ctx,
+ "Failed: oid=%s, bin_oid:%s",
+ data[i].oid, data[i].bin_oid));
+ }
+
+ /* ber_read_partial_OID_String() test with partial OIDs */
+ data = partial_oid_data_ok;
+ for (i = 0; i < ARRAY_SIZE(partial_oid_data_ok); i++) {
+ oid_blob = strhex_to_data_blob(mem_ctx, data[i].bin_oid);
+
+ torture_assert(tctx, ber_read_partial_OID_String(mem_ctx, oid_blob, &oid),
+ "ber_read_partial_OID_String failed");
+
+ torture_assert(tctx, strequal(data[i].oid, oid),
+ talloc_asprintf(mem_ctx,
+ "Failed: oid=%s, bin_oid:%s",
+ data[i].oid, data[i].bin_oid));
+ }
+
+ talloc_free(mem_ctx);
+
+ return true;
+}
+
+
+/* LOCAL-ASN1 test suite creation */
+struct torture_suite *torture_local_util_asn1(TALLOC_CTX *mem_ctx)
+{
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "asn1");
+
+ torture_suite_add_simple_test(suite, "ber_write_OID_String",
+ test_ber_write_OID_String);
+
+ torture_suite_add_simple_test(suite, "ber_read_OID_String",
+ test_ber_read_OID_String);
+
+ torture_suite_add_simple_test(suite, "ber_write_partial_OID_String",
+ test_ber_write_partial_OID_String);
+
+ torture_suite_add_simple_test(suite, "ber_read_partial_OID_String",
+ test_ber_read_partial_OID_String);
+
+ return suite;
+}
diff --git a/lib/util/tests/data_blob.c b/lib/util/tests/data_blob.c
index 875e5fdef8..bdccef2ce8 100644
--- a/lib/util/tests/data_blob.c
+++ b/lib/util/tests/data_blob.c
@@ -78,13 +78,14 @@ static bool test_cmp(struct torture_context *tctx)
static bool test_hex_string(struct torture_context *tctx)
{
DATA_BLOB a = data_blob_string_const("\xC\xA\xF\xE");
- torture_assert_str_equal(tctx, data_blob_hex_string(tctx, &a), "0C0A0F0E", "hex string");
+ torture_assert_str_equal(tctx, data_blob_hex_string_lower(tctx, &a), "0c0a0f0e", "hex string");
+ torture_assert_str_equal(tctx, data_blob_hex_string_upper(tctx, &a), "0C0A0F0E", "hex string");
return true;
}
struct torture_suite *torture_local_util_data_blob(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "DATABLOB");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "datablob");
torture_suite_add_simple_test(suite, "string", test_string);
torture_suite_add_simple_test(suite, "string_null", test_string_null);
diff --git a/lib/util/tests/dlinklist.c b/lib/util/tests/dlinklist.c
new file mode 100644
index 0000000000..8db0a02b88
--- /dev/null
+++ b/lib/util/tests/dlinklist.c
@@ -0,0 +1,130 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ local testing of DLIST_*() macros
+
+ Copyright (C) Andrew Tridgell 2010
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "torture/torture.h"
+#include "lib/util/dlinklist.h"
+
+struct listel {
+ struct listel *next, *prev;
+};
+
+static bool torture_local_dlinklist_simple(struct torture_context *tctx)
+{
+ TALLOC_CTX *mem_ctx = talloc_new(tctx);
+ struct listel *l1 = NULL, *l2 = NULL, *el, *el2;
+ int i;
+
+ torture_comment(tctx, "add 5 elements at front\n");
+ for (i=0; i<5; i++) {
+ el = talloc(mem_ctx, struct listel);
+ DLIST_ADD(l1, el);
+ }
+
+ torture_comment(tctx, "add 5 elements at end\n");
+ for (i=0; i<5; i++) {
+ el = talloc(mem_ctx, struct listel);
+ DLIST_ADD_END(l1, el, NULL);
+ }
+
+ torture_comment(tctx, "delete 3 from front\n");
+ for (i=0; i < 3; i++) {
+ el = l1;
+ DLIST_REMOVE(l1, l1);
+ DLIST_ADD(l2, el);
+ }
+
+ torture_comment(tctx, "delete 3 from back\n");
+ for (i=0; i < 3; i++) {
+ el = DLIST_TAIL(l1);
+ DLIST_REMOVE(l1, el);
+ DLIST_ADD_END(l2, el, NULL);
+ }
+
+ torture_comment(tctx, "count forward\n");
+ for (i=0,el=l1; el; el=el->next) i++;
+ torture_assert_int_equal(tctx, i, 4, "should have 4 elements");
+
+ torture_comment(tctx, "count backwards\n");
+ for (i=0,el=DLIST_TAIL(l1); el; el=DLIST_PREV(el)) i++;
+ torture_assert_int_equal(tctx, i, 4, "should have 4 elements");
+
+ torture_comment(tctx, "check DLIST_HEAD\n");
+ el = DLIST_TAIL(l1);
+ DLIST_HEAD(el, el2);
+ torture_assert(tctx, el2 == l1, "should find head");
+
+ torture_comment(tctx, "check DLIST_ADD_AFTER\n");
+ el = talloc(mem_ctx, struct listel);
+ el2 = talloc(mem_ctx, struct listel);
+ DLIST_ADD_AFTER(l1, el, l1);
+ DLIST_ADD_AFTER(l1, el2, el);
+ torture_assert(tctx, l1->next == el, "2nd in list");
+ torture_assert(tctx, el->next == el2, "3rd in list");
+
+ torture_comment(tctx, "check DLIST_PROMOTE\n");
+ DLIST_PROMOTE(l1, el2);
+ torture_assert(tctx, el2==l1, "1st in list");
+ torture_assert(tctx, el2->next->next == el, "3rd in list");
+
+ torture_comment(tctx, "check DLIST_DEMOTE\n");
+ DLIST_DEMOTE(l1, el, NULL);
+ torture_assert(tctx, el->next == NULL, "last in list");
+ torture_assert(tctx, el2->prev == el, "backlink from head");
+
+ torture_comment(tctx, "count forward\n");
+ for (i=0,el=l1; el; el=el->next) i++;
+ torture_assert_int_equal(tctx, i, 6, "should have 6 elements");
+
+ torture_comment(tctx, "count backwards\n");
+ for (i=0,el=DLIST_TAIL(l1); el; el=DLIST_PREV(el)) i++;
+ torture_assert_int_equal(tctx, i, 6, "should have 6 elements");
+
+ torture_comment(tctx, "check DLIST_CONCATENATE\n");
+ DLIST_CONCATENATE(l1, l2, NULL);
+ torture_comment(tctx, "count forward\n");
+ for (i=0,el=l1; el; el=el->next) i++;
+ torture_assert_int_equal(tctx, i, 12, "should have 12 elements");
+
+ torture_comment(tctx, "count backwards\n");
+ for (i=0,el=DLIST_TAIL(l1); el; el=DLIST_PREV(el)) i++;
+ torture_assert_int_equal(tctx, i, 12, "should have 12 elements");
+
+ torture_comment(tctx, "free forwards\n");
+ for (el=l1; el; el=el2) {
+ el2 = el->next;
+ DLIST_REMOVE(l1, el);
+ talloc_free(el);
+ }
+
+ torture_assert(tctx, l1 == NULL, "list empty");
+ torture_assert_int_equal(tctx, talloc_total_blocks(mem_ctx), 1, "1 block");
+
+ talloc_free(mem_ctx);
+ return true;
+}
+
+struct torture_suite *torture_local_dlinklist(TALLOC_CTX *mem_ctx)
+{
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "dlinklist");
+ torture_suite_add_simple_test(suite, "dlinklist", torture_local_dlinklist_simple);
+ return suite;
+}
diff --git a/lib/util/tests/file.c b/lib/util/tests/file.c
index 4aff0e9afd..c29e09868e 100644
--- a/lib/util/tests/file.c
+++ b/lib/util/tests/file.c
@@ -91,13 +91,12 @@ static bool test_afdgets(struct torture_context *tctx)
struct torture_suite *torture_local_util_file(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "FILE");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "file");
torture_suite_add_simple_test(suite, "file_load_save",
test_file_load_save);
- torture_suite_add_simple_test(suite, "afdgets",
- test_afdgets);
+ torture_suite_add_simple_test(suite, "afdgets", test_afdgets);
return suite;
}
diff --git a/lib/util/tests/genrand.c b/lib/util/tests/genrand.c
index 20a20ac7fa..50d77bb03a 100644
--- a/lib/util/tests/genrand.c
+++ b/lib/util/tests/genrand.c
@@ -59,7 +59,7 @@ static bool test_generate_random_str(struct torture_context *tctx)
struct torture_suite *torture_local_genrand(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "GENRAND");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "genrand");
torture_suite_add_simple_test(suite, "reseed_callback", test_reseed_callback);
torture_suite_add_simple_test(suite, "check_password_quality", test_check_password_quality);
torture_suite_add_simple_test(suite, "generate_random_str", test_generate_random_str);
diff --git a/lib/util/tests/idtree.c b/lib/util/tests/idtree.c
index d89fb8c489..ba7b2e7337 100644
--- a/lib/util/tests/idtree.c
+++ b/lib/util/tests/idtree.c
@@ -115,7 +115,7 @@ static bool torture_local_idtree_simple(struct torture_context *tctx)
struct torture_suite *torture_local_idtree(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "IDTREE");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "idtree");
torture_suite_add_simple_test(suite, "idtree", torture_local_idtree_simple);
return suite;
}
diff --git a/lib/util/tests/parmlist.c b/lib/util/tests/parmlist.c
index 4b1d875715..45e7ec3efa 100644
--- a/lib/util/tests/parmlist.c
+++ b/lib/util/tests/parmlist.c
@@ -59,8 +59,8 @@ static bool test_get(struct torture_context *tctx)
torture_assert_str_equal(tctx, e->key, "bar", "key");
torture_assert_str_equal(tctx, e->value, "mystring", "value");
- e = parmlist_get(pctx, "nonexistant");
- torture_assert(tctx, e == NULL, "nonexistant");
+ e = parmlist_get(pctx, "non-existent");
+ torture_assert(tctx, e == NULL, "non-existent");
return true;
}
@@ -87,14 +87,14 @@ static bool test_get_string_list(struct torture_context *tctx)
torture_assert_int_equal(tctx, str_list_length(ret), 2, "length");
torture_assert_str_equal(tctx, "true", ret[0], "ret[0]");
torture_assert_str_equal(tctx, "false", ret[1], "ret[1]");
- torture_assert(tctx, NULL == parmlist_get_string_list(pctx, "nonexistant", NULL), "nonexistant");
+ torture_assert(tctx, NULL == parmlist_get_string_list(pctx, "non-existent", NULL), "non-existent");
return true;
}
struct torture_suite *torture_local_util_parmlist(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "PARMLIST");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "parmlist");
torture_suite_add_simple_test(suite, "get_int", test_get_int);
torture_suite_add_simple_test(suite, "get_string", test_get_string);
diff --git a/lib/util/tests/str.c b/lib/util/tests/str.c
index 3bd6a02fdc..6b38feaf43 100644
--- a/lib/util/tests/str.c
+++ b/lib/util/tests/str.c
@@ -94,7 +94,7 @@ static bool test_string_sub_talloc_multiple(struct torture_context *tctx)
struct torture_suite *torture_local_util_str(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "STR");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "str");
torture_suite_add_simple_test(suite, "string_sub_simple",
test_string_sub_simple);
diff --git a/lib/util/tests/strlist.c b/lib/util/tests/strlist.c
index a974f58184..21b7b1dd13 100644
--- a/lib/util/tests/strlist.c
+++ b/lib/util/tests/strlist.c
@@ -26,11 +26,11 @@
struct test_list_element {
const char *list_as_string;
- const char *seperators;
+ const char *separators;
const char *list[5];
};
-struct test_list_element test_lists_strings[] = {
+const struct test_list_element test_lists_strings[] = {
{
.list_as_string = "",
.list = { NULL }
@@ -46,7 +46,7 @@ struct test_list_element test_lists_strings[] = {
{
.list_as_string = "foo bar",
.list = { "foo bar", NULL },
- .seperators = ";"
+ .separators = ";"
},
{
.list_as_string = "\"foo bar\"",
@@ -59,11 +59,11 @@ struct test_list_element test_lists_strings[] = {
{
.list_as_string = "\"foo bar\",comma;semicolon",
.list = { "\"foo bar\",comma", "semicolon", NULL },
- .seperators = ";"
+ .separators = ";"
}
};
-struct test_list_element test_lists_shell_strings[] = {
+const struct test_list_element test_lists_shell_strings[] = {
{
.list_as_string = "",
.list = { NULL }
@@ -79,7 +79,7 @@ struct test_list_element test_lists_shell_strings[] = {
{
.list_as_string = "foo bar",
.list = { "foo bar", NULL },
- .seperators = ";"
+ .separators = ";"
},
{
.list_as_string = "\"foo bar\"",
@@ -99,19 +99,19 @@ struct test_list_element test_lists_shell_strings[] = {
}
};
-static bool test_lists_shell(struct torture_context *tctx,
- const void *data)
+static bool test_lists_shell(struct torture_context *tctx, const void *data)
{
const struct test_list_element *element = data;
- const char **ret1, **ret2, *tmp;
+
+ char **ret1, **ret2, *tmp;
bool match = true;
TALLOC_CTX *mem_ctx = tctx;
- ret1 = str_list_make_shell(mem_ctx, element->list_as_string, element->seperators);
+ ret1 = str_list_make_shell(mem_ctx, element->list_as_string, element->separators);
torture_assert(tctx, ret1, "str_list_make_shell() must not return NULL");
- tmp = str_list_join_shell(mem_ctx, ret1, element->seperators ? *element->seperators : ' ');
- ret2 = str_list_make_shell(mem_ctx, tmp, element->seperators);
+ tmp = str_list_join_shell(mem_ctx, (const char **) ret1, element->separators ? *element->separators : ' ');
+ ret2 = str_list_make_shell(mem_ctx, tmp, element->separators);
if ((ret1 == NULL || ret2 == NULL) && ret2 != ret1) {
match = false;
@@ -130,7 +130,8 @@ static bool test_lists_shell(struct torture_context *tctx,
torture_assert(tctx, match, talloc_asprintf(tctx,
"str_list_{make,join}_shell: Error double parsing, first run:\n%s\nSecond run: \n%s", element->list_as_string, tmp));
- torture_assert(tctx, str_list_equal(ret1, element->list),
+ torture_assert(tctx, str_list_equal((const char * const *) ret1,
+ element->list),
talloc_asprintf(tctx,
"str_list_make_shell(%s) failed to create correct list",
element->list_as_string));
@@ -141,10 +142,12 @@ static bool test_lists_shell(struct torture_context *tctx,
static bool test_list_make(struct torture_context *tctx, const void *data)
{
const struct test_list_element *element = data;
+
char **result;
- result = str_list_make(tctx, element->list_as_string, element->seperators);
+ result = str_list_make(tctx, element->list_as_string, element->separators);
torture_assert(tctx, result, "str_list_make() must not return NULL");
- torture_assert(tctx, str_list_equal((const char **)result, element->list),
+ torture_assert(tctx, str_list_equal((const char * const *) result,
+ element->list),
talloc_asprintf(tctx,
"str_list_make(%s) failed to create correct list",
element->list_as_string));
@@ -250,7 +253,7 @@ static bool test_list_length(struct torture_context *tctx)
static bool test_list_add(struct torture_context *tctx)
{
- char **result, **result2;
+ const char **result, **result2;
const char *list[] = {
"element_0",
"element_1",
@@ -258,9 +261,9 @@ static bool test_list_add(struct torture_context *tctx)
"element_3",
NULL
};
- result = str_list_make(tctx, "element_0, element_1, element_2", NULL);
+ result = (const char **) str_list_make(tctx, "element_0, element_1, element_2", NULL);
torture_assert(tctx, result, "str_list_make() must not return NULL");
- result2 = str_list_add(result, "element_3");
+ result2 = str_list_add((const char **) result, "element_3");
torture_assert(tctx, result2, "str_list_add() must not return NULL");
torture_assert(tctx, str_list_equal(result2, list),
"str_list_add() failed");
@@ -270,7 +273,7 @@ static bool test_list_add(struct torture_context *tctx)
static bool test_list_add_const(struct torture_context *tctx)
{
- char **result, **result2;
+ const char **result, **result2;
const char *list[] = {
"element_0",
"element_1",
@@ -278,7 +281,7 @@ static bool test_list_add_const(struct torture_context *tctx)
"element_3",
NULL
};
- result = str_list_make(tctx, "element_0, element_1, element_2", NULL);
+ result = (const char **) str_list_make(tctx, "element_0, element_1, element_2", NULL);
torture_assert(tctx, result, "str_list_make() must not return NULL");
result2 = str_list_add_const(result, "element_3");
torture_assert(tctx, result2, "str_list_add_const() must not return NULL");
@@ -290,14 +293,14 @@ static bool test_list_add_const(struct torture_context *tctx)
static bool test_list_remove(struct torture_context *tctx)
{
- char **result;
+ const char **result;
const char *list[] = {
"element_0",
"element_1",
"element_3",
NULL
};
- result = str_list_make(tctx, "element_0, element_1, element_2, element_3", NULL);
+ result = (const char **) str_list_make(tctx, "element_0, element_1, element_2, element_3", NULL);
torture_assert(tctx, result, "str_list_make() must not return NULL");
str_list_remove(result, "element_2");
torture_assert(tctx, str_list_equal(result, list),
@@ -336,7 +339,7 @@ static bool test_list_check_ci(struct torture_context *tctx)
static bool test_list_unique(struct torture_context *tctx)
{
- char **result;
+ const char **result;
const char *list[] = {
"element_0",
"element_1",
@@ -354,7 +357,7 @@ static bool test_list_unique(struct torture_context *tctx)
"element_2",
NULL
};
- result = str_list_copy(tctx, list_dup);
+ result = (const char **) str_list_copy(tctx, list_dup);
/* We must copy the list, as str_list_unique does a talloc_realloc() on it's parameter */
result = str_list_unique(result);
torture_assert(tctx, result, "str_list_unique() must not return NULL");
@@ -373,8 +376,8 @@ static bool test_list_unique_2(struct torture_context *tctx)
const char **list = (const char **)str_list_make_empty(tctx);
const char **list_dup = (const char **)str_list_make_empty(tctx);
- count = lp_parm_int(tctx->lp_ctx, NULL, "list_unique", "count", 9);
- num_dups = lp_parm_int(tctx->lp_ctx, NULL, "list_unique", "dups", 7);
+ count = lpcfg_parm_int(tctx->lp_ctx, NULL, "list_unique", "count", 9);
+ num_dups = lpcfg_parm_int(tctx->lp_ctx, NULL, "list_unique", "dups", 7);
torture_comment(tctx, "test_list_unique_2() with %d elements and %d dups\n", count, num_dups);
for (i = 0; i < count; i++) {
@@ -398,7 +401,7 @@ static bool test_list_unique_2(struct torture_context *tctx)
static bool test_list_append(struct torture_context *tctx)
{
- char **result;
+ const char **result;
const char *list[] = {
"element_0",
"element_1",
@@ -420,7 +423,7 @@ static bool test_list_append(struct torture_context *tctx)
"element_5",
NULL
};
- result = str_list_copy(tctx, list);
+ result = (const char **) str_list_copy(tctx, list);
torture_assert(tctx, result, "str_list_copy() must not return NULL");
result = str_list_append(result, list2);
torture_assert(tctx, result, "str_list_append() must not return NULL");
@@ -432,7 +435,7 @@ static bool test_list_append(struct torture_context *tctx)
static bool test_list_append_const(struct torture_context *tctx)
{
- char **result;
+ const char **result;
const char *list[] = {
"element_0",
"element_1",
@@ -454,7 +457,7 @@ static bool test_list_append_const(struct torture_context *tctx)
"element_5",
NULL
};
- result = str_list_copy(tctx, list);
+ result = (const char **) str_list_copy(tctx, list);
torture_assert(tctx, result, "str_list_copy() must not return NULL");
result = str_list_append_const(result, list2);
torture_assert(tctx, result, "str_list_append_const() must not return NULL");
@@ -466,17 +469,23 @@ static bool test_list_append_const(struct torture_context *tctx)
struct torture_suite *torture_local_util_strlist(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "STRLIST");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "strlist");
int i;
for (i = 0; i < ARRAY_SIZE(test_lists_shell_strings); i++) {
- torture_suite_add_simple_tcase_const(suite, "lists_shell",
- test_lists_shell, &test_lists_shell_strings[i]);
+ char *name;
+ name = talloc_asprintf(suite, "lists_shell(%s)",
+ test_lists_shell_strings[i].list_as_string);
+ torture_suite_add_simple_tcase_const(suite, name,
+ test_lists_shell, &test_lists_shell_strings[i]);
}
for (i = 0; i < ARRAY_SIZE(test_lists_strings); i++) {
- torture_suite_add_simple_tcase_const(suite, "lists",
- test_list_make, &test_lists_strings[i]);
+ char *name;
+ name = talloc_asprintf(suite, "list_make(%s)",
+ test_lists_strings[i].list_as_string);
+ torture_suite_add_simple_tcase_const(suite, name,
+ test_list_make, &test_lists_strings[i]);
}
torture_suite_add_simple_test(suite, "list_copy", test_list_copy);
diff --git a/lib/util/tests/time.c b/lib/util/tests/time.c
index d08a4e79d1..592f88f88b 100644
--- a/lib/util/tests/time.c
+++ b/lib/util/tests/time.c
@@ -77,8 +77,7 @@ static bool test_timestring(struct torture_context *tctx)
time_t utc_offset = mktime(&local) - mktime(&gmt);
result = timestring(tctx, 42 - (utc_offset < 0 ? utc_offset : 0));
- torture_assert(tctx, !strncmp(start, result, strlen(start)),
- result);
+ torture_assert(tctx, !strncmp(start, result, strlen(start)), result);
return true;
}
@@ -101,7 +100,7 @@ static bool test_get_time_zone(struct torture_context *tctx)
struct torture_suite *torture_local_util_time(TALLOC_CTX *mem_ctx)
{
- struct torture_suite *suite = torture_suite_create(mem_ctx, "TIME");
+ struct torture_suite *suite = torture_suite_create(mem_ctx, "time");
torture_suite_add_simple_test(suite, "null_time", test_null_time);
torture_suite_add_simple_test(suite, "get_time_zone", test_get_time_zone);
diff --git a/lib/util/tevent_ntstatus.c b/lib/util/tevent_ntstatus.c
index d6cb0affd9..764d251b59 100644
--- a/lib/util/tevent_ntstatus.c
+++ b/lib/util/tevent_ntstatus.c
@@ -20,9 +20,29 @@
#include "../replace/replace.h"
#include "tevent_ntstatus.h"
-bool tevent_req_nterror(struct tevent_req *req, NTSTATUS status)
+#define TEVENT_NTERROR_MAGIC (0x917b5acd)
+
+bool _tevent_req_nterror(struct tevent_req *req,
+ NTSTATUS status,
+ const char *location)
{
- return tevent_req_error(req, NT_STATUS_V(status));
+ uint64_t err;
+
+ if (NT_STATUS_IS_OK(status)) {
+ return false;
+ }
+
+ /*
+ * I've put this variable here, because I'm not 100% certain
+ * how to correctly assign a 64-bit constant and left-shift it
+ * by 32 bits in a single expression. If anyone knows, feel
+ * free :-)
+ */
+ err = TEVENT_NTERROR_MAGIC;
+ err <<= 32;
+ err |= NT_STATUS_V(status);
+
+ return _tevent_req_error(req, err, location);
}
bool tevent_req_is_nterror(struct tevent_req *req, NTSTATUS *status)
@@ -41,7 +61,10 @@ bool tevent_req_is_nterror(struct tevent_req *req, NTSTATUS *status)
*status = NT_STATUS_NO_MEMORY;
break;
case TEVENT_REQ_USER_ERROR:
- *status = NT_STATUS(err);
+ if ((err >> 32) != TEVENT_NTERROR_MAGIC) {
+ abort();
+ }
+ *status = NT_STATUS(err & 0xffffffff);
break;
default:
*status = NT_STATUS_INTERNAL_ERROR;
@@ -59,3 +82,18 @@ NTSTATUS tevent_req_simple_recv_ntstatus(struct tevent_req *req)
}
return NT_STATUS_OK;
}
+
+void tevent_req_simple_finish_ntstatus(struct tevent_req *subreq,
+ NTSTATUS subreq_status)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+
+ TALLOC_FREE(subreq);
+
+ if (!NT_STATUS_IS_OK(subreq_status)) {
+ tevent_req_nterror(req, subreq_status);
+ return;
+ }
+ tevent_req_done(req);
+}
diff --git a/lib/util/tevent_ntstatus.h b/lib/util/tevent_ntstatus.h
index 22fe9188d0..4ac9243320 100644
--- a/lib/util/tevent_ntstatus.h
+++ b/lib/util/tevent_ntstatus.h
@@ -23,10 +23,21 @@
#include <stdint.h>
#include <stdbool.h>
#include "../libcli/util/ntstatus.h"
-#include "../tevent/tevent.h"
+#include <tevent.h>
-bool tevent_req_nterror(struct tevent_req *req, NTSTATUS status);
+bool _tevent_req_nterror(struct tevent_req *req,
+ NTSTATUS status,
+ const char *location);
+#define tevent_req_nterror(req, status) \
+ _tevent_req_nterror(req, status, __location__)
bool tevent_req_is_nterror(struct tevent_req *req, NTSTATUS *pstatus);
NTSTATUS tevent_req_simple_recv_ntstatus(struct tevent_req *req);
+/*
+ * Helper routine to pass the subreq_ntstatus to the req embedded in
+ * tevent_req_callback_data(subreq), which will be freed.
+ */
+void tevent_req_simple_finish_ntstatus(struct tevent_req *subreq,
+ NTSTATUS subreq_status);
+
#endif
diff --git a/lib/util/tevent_unix.c b/lib/util/tevent_unix.c
index 0a8c4c6b30..e4c960e4d3 100644
--- a/lib/util/tevent_unix.c
+++ b/lib/util/tevent_unix.c
@@ -21,8 +21,8 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "tevent_unix.h"
#include "../replace/replace.h"
+#include "tevent_unix.h"
bool tevent_req_is_unix_error(struct tevent_req *req, int *perrno)
{
diff --git a/lib/util/tevent_unix.h b/lib/util/tevent_unix.h
index bc2cea9199..377e976c39 100644
--- a/lib/util/tevent_unix.h
+++ b/lib/util/tevent_unix.h
@@ -24,7 +24,7 @@
#ifndef _TEVENT_UNIX_H
#define _TEVENT_UNIX_H
-#include "../tevent/tevent.h"
+#include <tevent.h>
bool tevent_req_is_unix_error(struct tevent_req *req, int *perrno);
diff --git a/lib/util/tevent_werror.c b/lib/util/tevent_werror.c
new file mode 100644
index 0000000000..d8956b398f
--- /dev/null
+++ b/lib/util/tevent_werror.c
@@ -0,0 +1,81 @@
+/*
+ Unix SMB/CIFS implementation.
+ Wrap win32 errors around tevent_req
+ Copyright (C) Kai Blin 2010
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "../replace/replace.h"
+#include "tevent_werror.h"
+
+bool _tevent_req_werror(struct tevent_req *req,
+ WERROR werror,
+ const char *location)
+{
+ return _tevent_req_error(req, W_ERROR_V(werror),
+ location);
+}
+
+bool tevent_req_is_werror(struct tevent_req *req, WERROR *error)
+{
+ enum tevent_req_state state;
+ uint64_t err;
+
+ if (!tevent_req_is_error(req, &state, &err)) {
+ return false;
+ }
+ switch (state) {
+ case TEVENT_REQ_TIMED_OUT:
+ *error = WERR_TIMEOUT;
+ break;
+ case TEVENT_REQ_NO_MEMORY:
+ *error = WERR_NOMEM;
+ break;
+ case TEVENT_REQ_USER_ERROR:
+ *error = W_ERROR(err);
+ break;
+ default:
+ *error = WERR_INTERNAL_ERROR;
+ break;
+ }
+ return true;
+}
+
+WERROR tevent_req_simple_recv_werror(struct tevent_req *req)
+{
+ WERROR werror;
+
+ if (tevent_req_is_werror(req, &werror)) {
+ tevent_req_received(req);
+ return werror;
+ }
+ tevent_req_received(req);
+ return WERR_OK;
+}
+
+void tevent_req_simple_finish_werror(struct tevent_req *subreq,
+ WERROR subreq_error)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+
+ TALLOC_FREE(subreq);
+
+ if (!W_ERROR_IS_OK(subreq_error)) {
+ tevent_req_werror(req, subreq_error);
+ return;
+ }
+ tevent_req_done(req);
+}
diff --git a/lib/util/tevent_werror.h b/lib/util/tevent_werror.h
new file mode 100644
index 0000000000..0e243825b6
--- /dev/null
+++ b/lib/util/tevent_werror.h
@@ -0,0 +1,43 @@
+/*
+ Unix SMB/CIFS implementation.
+ Wrap win32 errors around tevent_req
+ Copyright (C) Kai Blin 2010
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _TEVENT_WERROR_H
+#define _TEVENT_WERROR_H
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "../libcli/util/werror.h"
+#include <tevent.h>
+
+bool _tevent_req_werror(struct tevent_req *req,
+ WERROR werror,
+ const char *location);
+#define tevent_req_werror(req, werror) \
+ _tevent_req_werror(req, werror, __location__)
+bool tevent_req_is_werror(struct tevent_req *req, WERROR *error);
+WERROR tevent_req_simple_recv_werror(struct tevent_req *req);
+
+/*
+ * Helper routine to pass the subreq_werror to the req embedded in
+ * tevent_req_callback_data(subreq), which will be freed.
+ */
+void tevent_req_simple_finish_werror(struct tevent_req *subreq,
+ WERROR subreq_error);
+
+#endif
diff --git a/lib/util/time.c b/lib/util/time.c
index 571219b810..4843fc9697 100644
--- a/lib/util/time.c
+++ b/lib/util/time.c
@@ -4,6 +4,8 @@
Copyright (C) Andrew Tridgell 1992-2004
Copyright (C) Stefan (metze) Metzmacher 2002
+ Copyright (C) Jeremy Allison 2007
+ Copyright (C) Andrew Bartlett 2011
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -55,13 +57,51 @@ _PUBLIC_ void GetTimeOfDay(struct timeval *tval)
#endif
}
+/**
+a wrapper to preferably get the monotonic time
+**/
+_PUBLIC_ void clock_gettime_mono(struct timespec *tp)
+{
+ if (clock_gettime(CUSTOM_CLOCK_MONOTONIC,tp) != 0) {
+ clock_gettime(CLOCK_REALTIME,tp);
+ }
+}
+
+/**
+a wrapper to preferably get the monotonic time in seconds
+as this is only second resolution we can use the cached
+(and much faster) COARSE clock variant
+**/
+_PUBLIC_ time_t time_mono(time_t *t)
+{
+ struct timespec tp;
+ int rc = -1;
+#ifdef CLOCK_MONOTONIC_COARSE
+ rc = clock_gettime(CLOCK_MONOTONIC_COARSE,&tp);
+#endif
+ if (rc != 0) {
+ clock_gettime_mono(&tp);
+ }
+ if (t != NULL) {
+ *t = tp.tv_sec;
+ }
+ return tp.tv_sec;
+}
+
#define TIME_FIXUP_CONSTANT 11644473600LL
time_t convert_timespec_to_time_t(struct timespec ts)
{
+ /* Ensure tv_nsec is less than 1sec. */
+ while (ts.tv_nsec > 1000000000) {
+ ts.tv_sec += 1;
+ ts.tv_nsec -= 1000000000;
+ }
+
/* 1 ns == 1,000,000,000 - one thousand millionths of a second.
increment if it's greater than 500 millionth of a second. */
+
if (ts.tv_nsec > 500000000) {
return ts.tv_sec + 1;
}
@@ -299,6 +339,63 @@ _PUBLIC_ time_t pull_dos_date3(const uint8_t *date_ptr, int zone_offset)
}
+/****************************************************************************
+ Return the date and time as a string
+****************************************************************************/
+
+char *timeval_string(TALLOC_CTX *ctx, const struct timeval *tp, bool hires)
+{
+ time_t t;
+ struct tm *tm;
+
+ t = (time_t)tp->tv_sec;
+ tm = localtime(&t);
+ if (!tm) {
+ if (hires) {
+ return talloc_asprintf(ctx,
+ "%ld.%06ld seconds since the Epoch",
+ (long)tp->tv_sec,
+ (long)tp->tv_usec);
+ } else {
+ return talloc_asprintf(ctx,
+ "%ld seconds since the Epoch",
+ (long)t);
+ }
+ } else {
+#ifdef HAVE_STRFTIME
+ char TimeBuf[60];
+ if (hires) {
+ strftime(TimeBuf,sizeof(TimeBuf)-1,"%Y/%m/%d %H:%M:%S",tm);
+ return talloc_asprintf(ctx,
+ "%s.%06ld", TimeBuf,
+ (long)tp->tv_usec);
+ } else {
+ strftime(TimeBuf,sizeof(TimeBuf)-1,"%Y/%m/%d %H:%M:%S",tm);
+ return talloc_strdup(ctx, TimeBuf);
+ }
+#else
+ if (hires) {
+ const char *asct = asctime(tm);
+ return talloc_asprintf(ctx, "%s.%06ld",
+ asct ? asct : "unknown",
+ (long)tp->tv_usec);
+ } else {
+ const char *asct = asctime(tm);
+ return talloc_asprintf(ctx, asct ? asct : "unknown");
+ }
+#endif
+ }
+}
+
+char *current_timestring(TALLOC_CTX *ctx, bool hires)
+{
+ struct timeval tv;
+
+ GetTimeOfDay(&tv);
+ return timeval_string(ctx, &tv, hires);
+}
+
+
/**
return a HTTP/1.0 time string
**/
@@ -346,11 +443,10 @@ _PUBLIC_ char *timestring(TALLOC_CTX *mem_ctx, time_t t)
}
#ifdef HAVE_STRFTIME
- /* some versions of gcc complain about using %c. This is a bug
- in the gcc warning, not a bug in this code. See a recent
- strftime() manual page for details.
- */
- strftime(tempTime,sizeof(tempTime)-1,"%c %Z",tm);
+ /* Some versions of gcc complain about using some special format
+ * specifiers. This is a bug in gcc, not a bug in this code. See a
+ * recent strftime() manual page for details. */
+ strftime(tempTime,sizeof(tempTime)-1,"%a %b %e %X %Y %Z",tm);
TimeBuf = talloc_strdup(mem_ctx, tempTime);
#else
TimeBuf = talloc_strdup(mem_ctx, asctime(tm));
@@ -399,6 +495,15 @@ _PUBLIC_ int64_t usec_time_diff(const struct timeval *tv1, const struct timeval
return (sec_diff * 1000000) + (int64_t)(tv1->tv_usec - tv2->tv_usec);
}
+/**
+ return (tp1 - tp2) in microseconds
+*/
+_PUBLIC_ int64_t nsec_time_diff(const struct timespec *tp1, const struct timespec *tp2)
+{
+ int64_t sec_diff = tp1->tv_sec - tp2->tv_sec;
+ return (sec_diff * 1000000000) + (int64_t)(tp1->tv_nsec - tp2->tv_nsec);
+}
+
/**
return a zero timeval
diff --git a/lib/util/time.h b/lib/util/time.h
index cf6dc1caa7..3a406340f4 100644
--- a/lib/util/time.h
+++ b/lib/util/time.h
@@ -51,6 +51,16 @@ a gettimeofday wrapper
_PUBLIC_ void GetTimeOfDay(struct timeval *tval);
/**
+a wrapper to preferably get the monotonic time
+**/
+_PUBLIC_ void clock_gettime_mono(struct timespec *tp);
+
+/**
+a wrapper to preferably get the monotonic time in s
+**/
+_PUBLIC_ time_t time_mono(time_t *t);
+
+/**
interpret an 8 byte "filetime" structure to a time_t
It's originally in "100ns units since jan 1st 1601"
**/
@@ -109,12 +119,29 @@ _PUBLIC_ time_t pull_dos_date2(const uint8_t *date_ptr, int zone_offset);
_PUBLIC_ time_t pull_dos_date3(const uint8_t *date_ptr, int zone_offset);
/**
+ Return a date and time as a string (optionally with microseconds)
+
+ format is %Y/%m/%d %H:%M:%S if strftime is available
+**/
+
+char *timeval_string(TALLOC_CTX *ctx, const struct timeval *tp, bool hires);
+
+/**
+ Return the current date and time as a string (optionally with microseconds)
+
+ format is %Y/%m/%d %H:%M:%S if strftime is available
+**/
+char *current_timestring(TALLOC_CTX *ctx, bool hires);
+
+/**
return a HTTP/1.0 time string
**/
_PUBLIC_ char *http_timestring(TALLOC_CTX *mem_ctx, time_t t);
/**
Return the date and time as a string
+
+ format is %a %b %e %X %Y %Z
**/
_PUBLIC_ char *timestring(TALLOC_CTX *mem_ctx, time_t t);
@@ -144,6 +171,11 @@ _PUBLIC_ NTTIME nttime_from_string(const char *s);
_PUBLIC_ int64_t usec_time_diff(const struct timeval *tv1, const struct timeval *tv2);
/**
+ return (tp1 - tp2) in nanoseconds
+*/
+_PUBLIC_ int64_t nsec_time_diff(const struct timespec *tp1, const struct timespec *tp2);
+
+/**
return a zero timeval
*/
_PUBLIC_ struct timeval timeval_zero(void);
diff --git a/lib/util/time.m4 b/lib/util/time.m4
deleted file mode 100644
index 675e20129f..0000000000
--- a/lib/util/time.m4
+++ /dev/null
@@ -1,9 +0,0 @@
-AC_CACHE_CHECK([if gettimeofday takes tz argument],samba_cv_HAVE_GETTIMEOFDAY_TZ,[
-AC_TRY_RUN([
-#include <sys/time.h>
-#include <unistd.h>
-main() { struct timeval tv; exit(gettimeofday(&tv, NULL));}],
- samba_cv_HAVE_GETTIMEOFDAY_TZ=yes,samba_cv_HAVE_GETTIMEOFDAY_TZ=no,samba_cv_HAVE_GETTIMEOFDAY_TZ=yes)])
-if test x"$samba_cv_HAVE_GETTIMEOFDAY_TZ" = x"yes"; then
- AC_DEFINE(HAVE_GETTIMEOFDAY_TZ,1,[Whether gettimeofday() is available])
-fi
diff --git a/lib/util/tsort.h b/lib/util/tsort.h
new file mode 100644
index 0000000000..811d6cd2f7
--- /dev/null
+++ b/lib/util/tsort.h
@@ -0,0 +1,40 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ typesafe qsort
+
+ Copyright (C) Andrew Tridgell 2010
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _TSORT_H
+#define _TSORT_H
+#include <assert.h>
+
+/*
+ a wrapper around qsort() that ensures the comparison function is
+ type safe.
+ */
+#ifndef TYPESAFE_QSORT
+#define TYPESAFE_QSORT(base, numel, comparison) \
+do { \
+ if (numel > 1) { \
+ qsort(base, numel, sizeof((base)[0]), (int (*)(const void *, const void *))comparison); \
+ assert(comparison(&((base)[0]), &((base)[1])) <= 0); \
+ } \
+} while (0)
+#endif
+
+#endif
diff --git a/lib/util/unix_privs.c b/lib/util/unix_privs.c
index f55e739a9b..9aa9a45918 100644
--- a/lib/util/unix_privs.c
+++ b/lib/util/unix_privs.c
@@ -20,9 +20,18 @@
*/
#include "includes.h"
-#include "system/filesys.h"
+#include "system/passwd.h"
#include "../lib/util/unix_privs.h"
+#if defined(UID_WRAPPER)
+#if !defined(UID_WRAPPER_REPLACE) && !defined(UID_WRAPPER_NOT_REPLACE)
+#define UID_WRAPPER_REPLACE
+#include "../uid_wrapper/uid_wrapper.h"
+#endif
+#else
+#define uwrap_enabled() 0
+#endif
+
/**
* @file
* @brief Gaining/losing root privileges
@@ -76,3 +85,9 @@ void *root_privileges(void)
talloc_set_destructor(s, privileges_destructor);
return s;
}
+
+uid_t root_privileges_original_uid(void *s)
+{
+ struct saved_state *saved = talloc_get_type_abort(s, struct saved_state);
+ return saved->uid;
+}
diff --git a/lib/util/util.c b/lib/util/util.c
index d1297a09dd..d4a936fae9 100644
--- a/lib/util/util.c
+++ b/lib/util/util.c
@@ -25,12 +25,23 @@
#include "system/network.h"
#include "system/filesys.h"
#include "system/locale.h"
+#include "system/shmem.h"
+
#undef malloc
#undef strcasecmp
#undef strncasecmp
#undef strdup
#undef realloc
+#if defined(UID_WRAPPER)
+#if !defined(UID_WRAPPER_REPLACE) && !defined(UID_WRAPPER_NOT_REPLACE)
+#define UID_WRAPPER_REPLACE
+#include "../uid_wrapper/uid_wrapper.h"
+#endif
+#else
+#define uwrap_enabled() 0
+#endif
+
/**
* @file
* @brief Misc utility functions
@@ -154,15 +165,50 @@ _PUBLIC_ bool directory_create_or_exist(const char *dname, uid_t uid,
Sleep for a specified number of milliseconds.
**/
-_PUBLIC_ void msleep(unsigned int t)
+_PUBLIC_ void smb_msleep(unsigned int t)
{
- struct timeval tval;
+#if defined(HAVE_NANOSLEEP)
+ struct timespec ts;
+ int ret;
+
+ ts.tv_sec = t/1000;
+ ts.tv_nsec = 1000000*(t%1000);
+
+ do {
+ errno = 0;
+ ret = nanosleep(&ts, &ts);
+ } while (ret < 0 && errno == EINTR && (ts.tv_sec > 0 || ts.tv_nsec > 0));
+#else
+ unsigned int tdiff=0;
+ struct timeval tval,t1,t2;
+ fd_set fds;
+
+ GetTimeOfDay(&t1);
+ t2 = t1;
+
+ while (tdiff < t) {
+ tval.tv_sec = (t-tdiff)/1000;
+ tval.tv_usec = 1000*((t-tdiff)%1000);
+
+ /* Never wait for more than 1 sec. */
+ if (tval.tv_sec > 1) {
+ tval.tv_sec = 1;
+ tval.tv_usec = 0;
+ }
+
+ FD_ZERO(&fds);
+ errno = 0;
+ select(0,&fds,NULL,NULL,&tval);
+
+ GetTimeOfDay(&t2);
+ if (t2.tv_sec < t1.tv_sec) {
+ /* Someone adjusted time... */
+ t1 = t2;
+ }
- tval.tv_sec = t/1000;
- tval.tv_usec = 1000*(t%1000);
- /* this should be the real select - do NOT replace
- with sys_select() */
- select(0,NULL,NULL,NULL,&tval);
+ tdiff = usec_time_diff(&t2,&t1)/1000;
+ }
+#endif
}
/**
@@ -254,29 +300,46 @@ _PUBLIC_ bool fcntl_lock(int fd, int op, off_t offset, off_t count, int type)
return true;
}
-void print_asc(int level, const uint8_t *buf,int len)
+static void debugadd_cb(const char *buf, void *private_data)
+{
+ int *plevel = (int *)private_data;
+ DEBUGADD(*plevel, ("%s", buf));
+}
+
+void print_asc_cb(const uint8_t *buf, int len,
+ void (*cb)(const char *buf, void *private_data),
+ void *private_data)
{
int i;
- for (i=0;i<len;i++)
- DEBUGADD(level,("%c", isprint(buf[i])?buf[i]:'.'));
+ char s[2];
+ s[1] = 0;
+
+ for (i=0; i<len; i++) {
+ s[0] = isprint(buf[i]) ? buf[i] : '.';
+ cb(s, private_data);
+ }
+}
+
+void print_asc(int level, const uint8_t *buf,int len)
+{
+ print_asc_cb(buf, len, debugadd_cb, &level);
}
/**
- * Write dump of binary data to the log file.
- *
- * The data is only written if the log level is at least level.
+ * Write dump of binary data to a callback
*/
-static void _dump_data(int level, const uint8_t *buf, int len,
- bool omit_zero_bytes)
+void dump_data_cb(const uint8_t *buf, int len,
+ bool omit_zero_bytes,
+ void (*cb)(const char *buf, void *private_data),
+ void *private_data)
{
int i=0;
static const uint8_t empty[16] = { 0, };
bool skipped = false;
+ char tmp[16];
if (len<=0) return;
- if (!DEBUGLVL(level)) return;
-
for (i=0;i<len;) {
if (i%16 == 0) {
@@ -290,23 +353,30 @@ static void _dump_data(int level, const uint8_t *buf, int len,
}
if (i<len) {
- DEBUGADD(level,("[%04X] ",i));
+ snprintf(tmp, sizeof(tmp), "[%04X] ", i);
+ cb(tmp, private_data);
}
}
- DEBUGADD(level,("%02X ",(int)buf[i]));
+ snprintf(tmp, sizeof(tmp), "%02X ", (int)buf[i]);
+ cb(tmp, private_data);
i++;
- if (i%8 == 0) DEBUGADD(level,(" "));
+ if (i%8 == 0) {
+ cb(" ", private_data);
+ }
if (i%16 == 0) {
- print_asc(level,&buf[i-16],8); DEBUGADD(level,(" "));
- print_asc(level,&buf[i-8],8); DEBUGADD(level,("\n"));
+ print_asc_cb(&buf[i-16], 8, cb, private_data);
+ cb(" ", private_data);
+ print_asc_cb(&buf[i-8], 8, cb, private_data);
+ cb("\n", private_data);
if ((omit_zero_bytes == true) &&
(len > i+16) &&
(memcmp(&buf[i], &empty, 16) == 0)) {
if (!skipped) {
- DEBUGADD(level,("skipping zero buffer bytes\n"));
+ cb("skipping zero buffer bytes\n",
+ private_data);
skipped = true;
}
}
@@ -316,14 +386,21 @@ static void _dump_data(int level, const uint8_t *buf, int len,
if (i%16) {
int n;
n = 16 - (i%16);
- DEBUGADD(level,(" "));
- if (n>8) DEBUGADD(level,(" "));
- while (n--) DEBUGADD(level,(" "));
+ cb(" ", private_data);
+ if (n>8) {
+ cb(" ", private_data);
+ }
+ while (n--) {
+ cb(" ", private_data);
+ }
n = MIN(8,i%16);
- print_asc(level,&buf[i-(i%16)],n); DEBUGADD(level,( " " ));
+ print_asc_cb(&buf[i-(i%16)], n, cb, private_data);
+ cb(" ", private_data);
n = (i%16) - n;
- if (n>0) print_asc(level,&buf[i-n],n);
- DEBUGADD(level,("\n"));
+ if (n>0) {
+ print_asc_cb(&buf[i-n], n, cb, private_data);
+ }
+ cb("\n", private_data);
}
}
@@ -335,18 +412,24 @@ static void _dump_data(int level, const uint8_t *buf, int len,
*/
_PUBLIC_ void dump_data(int level, const uint8_t *buf, int len)
{
- _dump_data(level, buf, len, false);
+ if (!DEBUGLVL(level)) {
+ return;
+ }
+ dump_data_cb(buf, len, false, debugadd_cb, &level);
}
/**
* Write dump of binary data to the log file.
*
* The data is only written if the log level is at least level.
- * 16 zero bytes in a row are ommited
+ * 16 zero bytes in a row are omitted
*/
_PUBLIC_ void dump_data_skip_zeros(int level, const uint8_t *buf, int len)
{
- _dump_data(level, buf, len, true);
+ if (!DEBUGLVL(level)) {
+ return;
+ }
+ dump_data_cb(buf, len, true, debugadd_cb, &level);
}
@@ -579,18 +662,18 @@ _PUBLIC_ _PURE_ size_t count_chars(const char *s, char c)
**/
_PUBLIC_ size_t strhex_to_str(char *p, size_t p_len, const char *strhex, size_t strhex_len)
{
- size_t i;
+ size_t i = 0;
size_t num_chars = 0;
uint8_t lonybble, hinybble;
const char *hexchars = "0123456789ABCDEF";
char *p1 = NULL, *p2 = NULL;
- for (i = 0; i < strhex_len && strhex[i] != 0; i++) {
- if (strncasecmp(hexchars, "0x", 2) == 0) {
- i++; /* skip two chars */
- continue;
- }
+ /* skip leading 0x prefix */
+ if (strncasecmp(strhex, "0x", 2) == 0) {
+ i += 2; /* skip two chars */
+ }
+ for (; i < strhex_len && strhex[i] != 0; i++) {
if (!(p1 = strchr(hexchars, toupper((unsigned char)strhex[i]))))
break;
@@ -764,8 +847,8 @@ static bool next_token_internal_talloc(TALLOC_CTX *ctx,
const char *sep,
bool ltrim)
{
- char *s;
- char *saved_s;
+ const char *s;
+ const char *saved_s;
char *pbuf;
bool quoted;
size_t len=1;
@@ -775,7 +858,7 @@ static bool next_token_internal_talloc(TALLOC_CTX *ctx,
return(false);
}
- s = (char *)*ptr;
+ s = *ptr;
/* default to simple separators */
if (!sep) {
@@ -853,4 +936,136 @@ bool next_token_no_ltrim_talloc(TALLOC_CTX *ctx,
return next_token_internal_talloc(ctx, ptr, pp_buff, sep, false);
}
+/**
+ * Get the next token from a string, return False if none found.
+ * Handles double-quotes.
+ *
+ * Based on a routine by GJC@VILLAGE.COM.
+ * Extensively modified by Andrew.Tridgell@anu.edu.au
+ **/
+_PUBLIC_ bool next_token(const char **ptr,char *buff, const char *sep, size_t bufsize)
+{
+ const char *s;
+ bool quoted;
+ size_t len=1;
+
+ if (!ptr)
+ return false;
+
+ s = *ptr;
+
+ /* default to simple separators */
+ if (!sep)
+ sep = " \t\n\r";
+
+ /* find the first non sep char */
+ while (*s && strchr_m(sep,*s))
+ s++;
+ /* nothing left? */
+ if (!*s)
+ return false;
+
+ /* copy over the token */
+ for (quoted = false; len < bufsize && *s && (quoted || !strchr_m(sep,*s)); s++) {
+ if (*s == '\"') {
+ quoted = !quoted;
+ } else {
+ len++;
+ *buff++ = *s;
+ }
+ }
+
+ *ptr = (*s) ? s+1 : s;
+ *buff = 0;
+
+ return true;
+}
+
+struct anonymous_shared_header {
+ union {
+ size_t length;
+ uint8_t pad[16];
+ } u;
+};
+
+/* Map a shared memory buffer of at least nelem counters. */
+void *anonymous_shared_allocate(size_t orig_bufsz)
+{
+ void *ptr;
+ void *buf;
+ size_t pagesz = getpagesize();
+ size_t pagecnt;
+ size_t bufsz = orig_bufsz;
+ struct anonymous_shared_header *hdr;
+
+ bufsz += sizeof(*hdr);
+
+ /* round up to full pages */
+ pagecnt = bufsz / pagesz;
+ if (bufsz % pagesz) {
+ pagecnt += 1;
+ }
+ bufsz = pagesz * pagecnt;
+
+ if (orig_bufsz >= bufsz) {
+ /* integer wrap */
+ errno = ENOMEM;
+ return NULL;
+ }
+
+#ifdef MAP_ANON
+ /* BSD */
+ buf = mmap(NULL, bufsz, PROT_READ|PROT_WRITE, MAP_ANON|MAP_SHARED,
+ -1 /* fd */, 0 /* offset */);
+#else
+ buf = mmap(NULL, bufsz, PROT_READ|PROT_WRITE, MAP_FILE|MAP_SHARED,
+ open("/dev/zero", O_RDWR), 0 /* offset */);
+#endif
+
+ if (buf == MAP_FAILED) {
+ return NULL;
+ }
+
+ hdr = (struct anonymous_shared_header *)buf;
+ hdr->u.length = bufsz;
+
+ ptr = (void *)(&hdr[1]);
+
+ return ptr;
+}
+
+void anonymous_shared_free(void *ptr)
+{
+ struct anonymous_shared_header *hdr;
+
+ if (ptr == NULL) {
+ return;
+ }
+
+ hdr = (struct anonymous_shared_header *)ptr;
+
+ hdr--;
+
+ munmap(hdr, hdr->u.length);
+}
+
+#ifdef DEVELOPER
+/* used when you want a debugger started at a particular point in the
+ code. Mostly useful in code that runs as a child process, where
+ normal gdb attach is harder to organise.
+*/
+void samba_start_debugger(void)
+{
+ char *cmd = NULL;
+ if (asprintf(&cmd, "xterm -e \"gdb --pid %u\"&", getpid()) == -1) {
+ return;
+ }
+ if (system(cmd) == -1) {
+ free(cmd);
+ return;
+ }
+ free(cmd);
+ sleep(2);
+}
+#endif
diff --git a/lib/util/util.h b/lib/util/util.h
index 159f812d98..81289b81de 100644
--- a/lib/util/util.h
+++ b/lib/util/util.h
@@ -21,10 +21,8 @@
#ifndef _SAMBA_UTIL_H_
#define _SAMBA_UTIL_H_
-#if _SAMBA_BUILD_ == 4
-#include "../lib/util/charset/charset.h"
-#endif
-#include "../lib/util/attr.h"
+#include "lib/util/charset/charset.h"
+#include "lib/util/attr.h"
/* for TALLOC_CTX */
#include <talloc.h>
@@ -39,12 +37,11 @@ struct smbsrv_tcon;
extern const char *logfile;
extern const char *panic_action;
-#include "../lib/util/time.h"
-#include "../lib/util/data_blob.h"
-#include "../lib/util/xfile.h"
-#include "../lib/util/mutex.h"
-#include "../lib/util/byteorder.h"
-#include "../lib/util/talloc_stack.h"
+#include "lib/util/time.h"
+#include "lib/util/data_blob.h"
+#include "lib/util/xfile.h"
+#include "lib/util/byteorder.h"
+#include "lib/util/talloc_stack.h"
/**
* assert macros
@@ -60,18 +57,11 @@ extern const char *panic_action;
__FILE__, __LINE__, #b)); }} while (0)
#endif
-#if _SAMBA_BUILD_ == 4
-#ifdef VALGRIND
-#define strlen(x) valgrind_strlen(x)
-size_t valgrind_strlen(const char *s);
-#endif
-#endif
-
#ifndef ABS
#define ABS(a) ((a)>0?(a):(-(a)))
#endif
-#include "../lib/util/memory.h"
+#include "lib/util/memory.h"
/**
* Write backtrace to debug log
@@ -88,6 +78,7 @@ _PUBLIC_ _NORETURN_ void smb_panic(const char *why);
setup our fault handlers
**/
_PUBLIC_ void fault_setup(const char *pname);
+_PUBLIC_ void fault_setup_disable(void);
#endif
/**
@@ -142,8 +133,20 @@ _PUBLIC_ pid_t sys_fork(void);
**/
_PUBLIC_ pid_t sys_getpid(void);
-/* The following definitions come from lib/util/genrand.c */
+_PUBLIC_ int sys_getpeereid( int s, uid_t *uid);
+struct sockaddr;
+
+_PUBLIC_ int sys_getnameinfo(const struct sockaddr *psa,
+ int salen,
+ char *host,
+ size_t hostlen,
+ char *service,
+ size_t servlen,
+ int flags);
+_PUBLIC_ int sys_connect(int fd, const struct sockaddr * addr);
+
+/* The following definitions come from lib/util/genrand.c */
/**
Copy any user given reseed data.
**/
@@ -178,6 +181,11 @@ _PUBLIC_ uint32_t generate_random(void);
_PUBLIC_ bool check_password_quality(const char *s);
/**
+ * Generate a random text password.
+ */
+_PUBLIC_ char *generate_random_password(TALLOC_CTX *mem_ctx, size_t min, size_t max);
+
+/**
Use the random number generator to generate a random string.
**/
_PUBLIC_ char *generate_random_str_list(TALLOC_CTX *mem_ctx, size_t len, const char *list);
@@ -450,7 +458,7 @@ _PUBLIC_ char **str_list_make(TALLOC_CTX *mem_ctx, const char *string,
/**
* build a null terminated list of strings from an argv-like input string
- * Entries are seperated by spaces and can be enclosed by quotes.
+ * Entries are separated by spaces and can be enclosed by quotes.
* Does NOT support escaping
*/
_PUBLIC_ char **str_list_make_shell(TALLOC_CTX *mem_ctx, const char *string, const char *sep);
@@ -458,10 +466,10 @@ _PUBLIC_ char **str_list_make_shell(TALLOC_CTX *mem_ctx, const char *string, con
/**
* join a list back to one string
*/
-_PUBLIC_ char *str_list_join(TALLOC_CTX *mem_ctx, const char **list, char seperator);
+_PUBLIC_ char *str_list_join(TALLOC_CTX *mem_ctx, const char **list, char separator);
/** join a list back to one (shell-like) string; entries
- * seperated by spaces, using quotes where necessary */
+ * separated by spaces, using quotes where necessary */
_PUBLIC_ char *str_list_join_shell(TALLOC_CTX *mem_ctx, const char **list, char sep);
/**
@@ -477,7 +485,7 @@ _PUBLIC_ char **str_list_copy(TALLOC_CTX *mem_ctx, const char **list);
/**
Return true if all the elements of the list match exactly.
*/
-_PUBLIC_ bool str_list_equal(const char **list1, const char **list2);
+_PUBLIC_ bool str_list_equal(const char * const *list1, const char * const *list2);
/**
add an entry to a string list
@@ -535,6 +543,11 @@ _PUBLIC_ const char **str_list_add_const(const char **list, const char *s);
_PUBLIC_ const char **str_list_copy_const(TALLOC_CTX *mem_ctx,
const char **list);
+/**
+ * Needed for making an "unconst" list "const"
+ */
+_PUBLIC_ const char **const_str_list(char **list);
+
/* The following definitions come from lib/util/util_file.c */
@@ -596,6 +609,11 @@ _PUBLIC_ int vfdprintf(int fd, const char *format, va_list ap) PRINTF_ATTRIBUTE(
_PUBLIC_ int fdprintf(int fd, const char *format, ...) PRINTF_ATTRIBUTE(2,3);
_PUBLIC_ bool large_file_support(const char *path);
+/*
+ compare two files, return true if the two files have the same content
+ */
+bool file_compare(const char *path1, const char *path2);
+
/* The following definitions come from lib/util/util.c */
@@ -640,7 +658,7 @@ _PUBLIC_ int set_blocking(int fd, bool set);
/**
Sleep for a specified number of milliseconds.
**/
-_PUBLIC_ void msleep(unsigned int t);
+_PUBLIC_ void smb_msleep(unsigned int t);
/**
Get my own name, return in talloc'ed storage.
@@ -648,33 +666,6 @@ _PUBLIC_ void msleep(unsigned int t);
_PUBLIC_ char* get_myname(TALLOC_CTX *mem_ctx);
/**
- Return true if a string could be a pure IP address.
-**/
-_PUBLIC_ bool is_ipaddress(const char *str);
-
-/**
- Interpret an internet address or name into an IP address in 4 byte form.
-**/
-_PUBLIC_ uint32_t interpret_addr(const char *str);
-
-/**
- A convenient addition to interpret_addr().
-**/
-_PUBLIC_ struct in_addr interpret_addr2(const char *str);
-
-/**
- Check if an IP is the 0.0.0.0.
-**/
-_PUBLIC_ bool is_zero_ip_v4(struct in_addr ip);
-
-/**
- Are two IPs on the same subnet?
-**/
-_PUBLIC_ bool same_net_v4(struct in_addr ip1,struct in_addr ip2,struct in_addr mask);
-
-_PUBLIC_ bool is_ipaddress_v4(const char *str);
-
-/**
Check if a process exists. Does this work on all unixes?
**/
_PUBLIC_ bool process_exists_by_pid(pid_t pid);
@@ -686,6 +677,14 @@ _PUBLIC_ bool process_exists_by_pid(pid_t pid);
_PUBLIC_ bool fcntl_lock(int fd, int op, off_t offset, off_t count, int type);
/**
+ * Write dump of binary data to a callback
+ */
+void dump_data_cb(const uint8_t *buf, int len,
+ bool omit_zero_bytes,
+ void (*cb)(const char *buf, void *private_data),
+ void *private_data);
+
+/**
* Write dump of binary data to the log file.
*
* The data is only written if the log level is at least level.
@@ -696,7 +695,7 @@ _PUBLIC_ void dump_data(int level, const uint8_t *buf,int len);
* Write dump of binary data to the log file.
*
* The data is only written if the log level is at least level.
- * 16 zero bytes in a row are ommited
+ * 16 zero bytes in a row are omitted
*/
_PUBLIC_ void dump_data_skip_zeros(int level, const uint8_t *buf, int len);
@@ -783,15 +782,6 @@ int ms_fnmatch(const char *pattern, const char *string, enum protocol_types prot
int gen_fnmatch(const char *pattern, const char *string);
#endif
-/* The following definitions come from lib/util/mutex.c */
-
-
-/**
- register a set of mutex/rwlock handlers.
- Should only be called once in the execution of smbd.
-*/
-_PUBLIC_ bool register_mutex_handlers(const char *name, struct mutex_ops *ops);
-
/* The following definitions come from lib/util/idtree.c */
@@ -839,7 +829,7 @@ _PUBLIC_ void close_low_fds(bool stderr_too);
/**
Become a daemon, discarding the controlling terminal.
**/
-_PUBLIC_ void become_daemon(bool do_fork, bool no_process_group);
+_PUBLIC_ void become_daemon(bool do_fork, bool no_process_group, bool log_stdout);
/**
* Load a ini-style file.
@@ -852,6 +842,9 @@ bool pm_process( const char *fileName,
bool unmap_file(void *start, size_t size);
void print_asc(int level, const uint8_t *buf,int len);
+void print_asc_cb(const uint8_t *buf, int len,
+ void (*cb)(const char *buf, void *private_data),
+ void *private_data);
/**
* Add an id to an array of ids.
@@ -861,9 +854,37 @@ void print_asc(int level, const uint8_t *buf,int len);
*/
bool add_uid_to_array_unique(TALLOC_CTX *mem_ctx, uid_t uid,
- uid_t **uids, size_t *num_uids);
+ uid_t **uids, uint32_t *num_uids);
bool add_gid_to_array_unique(TALLOC_CTX *mem_ctx, gid_t gid,
- gid_t **gids, size_t *num_gids);
+ gid_t **gids, uint32_t *num_gids);
+/**
+ * Allocate anonymous shared memory of the given size
+ */
+void *anonymous_shared_allocate(size_t bufsz);
+void anonymous_shared_free(void *ptr);
+
+/*
+ run a command as a child process, with a timeout.
+
+ any stdout/stderr from the child will appear in the Samba logs with
+ the specified log levels
+
+ If callback is set then the callback is called on completion
+ with the return code from the command
+ */
+struct tevent_context;
+struct tevent_req;
+struct tevent_req *samba_runcmd_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct timeval endtime,
+ int stdout_log_level,
+ int stderr_log_level,
+ const char * const *argv0, ...);
+int samba_runcmd_recv(struct tevent_req *req, int *perrno);
+
+#ifdef DEVELOPER
+void samba_start_debugger(void);
+#endif
#endif /* _SAMBA_UTIL_H_ */
diff --git a/lib/util/util.m4 b/lib/util/util.m4
deleted file mode 100644
index 9e362954cd..0000000000
--- a/lib/util/util.m4
+++ /dev/null
@@ -1 +0,0 @@
-AC_CHECK_FUNCS(setsid)
diff --git a/lib/util/util_file.c b/lib/util/util_file.c
index 7466004e5c..7c00dd8b57 100644
--- a/lib/util/util_file.c
+++ b/lib/util/util_file.c
@@ -235,7 +235,7 @@ _PUBLIC_ void *map_file(const char *fname, size_t size)
}
#endif
if (!p) {
- p = file_load(fname, &s2, 0, talloc_autofree_context());
+ p = file_load(fname, &s2, 0, NULL);
if (!p) return NULL;
if (s2 != size) {
DEBUG(1,("incorrect size for %s - got %d expected %d\n",
@@ -435,3 +435,25 @@ _PUBLIC_ bool large_file_support(const char *path)
}
+/*
+ compare two files, return true if the two files have the same content
+ */
+bool file_compare(const char *path1, const char *path2)
+{
+ size_t size1, size2;
+ char *p1, *p2;
+ TALLOC_CTX *mem_ctx = talloc_new(NULL);
+
+ p1 = file_load(path1, &size1, 0, mem_ctx);
+ p2 = file_load(path2, &size2, 0, mem_ctx);
+ if (!p1 || !p2 || size1 != size2) {
+ talloc_free(mem_ctx);
+ return false;
+ }
+ if (memcmp(p1, p2, size1) != 0) {
+ talloc_free(mem_ctx);
+ return false;
+ }
+ talloc_free(mem_ctx);
+ return true;
+}
diff --git a/lib/util/util_id.c b/lib/util/util_id.c
index 8744ce4e4e..d5a8c66f5e 100644
--- a/lib/util/util_id.c
+++ b/lib/util/util_id.c
@@ -26,7 +26,7 @@
****************************************************************************/
bool add_gid_to_array_unique(TALLOC_CTX *mem_ctx, gid_t gid,
- gid_t **gids, size_t *num_gids)
+ gid_t **gids, uint32_t *num_gids)
{
int i;
@@ -59,7 +59,7 @@ bool add_gid_to_array_unique(TALLOC_CTX *mem_ctx, gid_t gid,
****************************************************************************/
bool add_uid_to_array_unique(TALLOC_CTX *mem_ctx, uid_t uid,
- uid_t **uids, size_t *num_uids)
+ uid_t **uids, uint32_t *num_uids)
{
int i;
diff --git a/lib/util/util_ldb.c b/lib/util/util_ldb.c
index ac1e11566e..25ae510689 100644
--- a/lib/util/util_ldb.c
+++ b/lib/util/util_ldb.c
@@ -21,18 +21,19 @@
*/
#include "includes.h"
-#include "lib/ldb/include/ldb.h"
+#include <ldb.h>
#include "../lib/util/util_ldb.h"
+
/*
- search the sam for the specified attributes - va_list variant
-*/
+ * search the LDB for the specified attributes - va_list variant
+ */
int gendb_search_v(struct ldb_context *ldb,
TALLOC_CTX *mem_ctx,
struct ldb_dn *basedn,
struct ldb_message ***msgs,
const char * const *attrs,
const char *format,
- va_list ap)
+ va_list ap)
{
enum ldb_scope scope = LDB_SCOPE_SUBTREE;
struct ldb_result *res;
@@ -54,22 +55,23 @@ int gendb_search_v(struct ldb_context *ldb,
expr?"%s":NULL, expr);
if (ret == LDB_SUCCESS) {
- talloc_steal(mem_ctx, res->msgs);
-
DEBUG(6,("gendb_search_v: %s %s -> %d\n",
basedn?ldb_dn_get_linearized(basedn):"NULL",
expr?expr:"NULL", res->count));
ret = res->count;
- *msgs = res->msgs;
+ if (msgs != NULL) {
+ *msgs = talloc_steal(mem_ctx, res->msgs);
+ }
talloc_free(res);
} else if (scope == LDB_SCOPE_BASE && ret == LDB_ERR_NO_SUCH_OBJECT) {
ret = 0;
- *msgs = NULL;
+ if (msgs != NULL) *msgs = NULL;
} else {
DEBUG(4,("gendb_search_v: search failed: %s\n",
ldb_errstring(ldb)));
ret = -1;
+ if (msgs != NULL) *msgs = NULL;
}
talloc_free(expr);
@@ -78,14 +80,14 @@ int gendb_search_v(struct ldb_context *ldb,
}
/*
- search the LDB for the specified attributes - varargs variant
-*/
+ * search the LDB for the specified attributes - varargs variant
+ */
int gendb_search(struct ldb_context *ldb,
TALLOC_CTX *mem_ctx,
struct ldb_dn *basedn,
struct ldb_message ***res,
const char * const *attrs,
- const char *format, ...)
+ const char *format, ...)
{
va_list ap;
int count;
@@ -98,9 +100,8 @@ int gendb_search(struct ldb_context *ldb,
}
/*
- search the LDB for a specified record (by DN)
-*/
-
+ * search the LDB for a specified record (by DN)
+ */
int gendb_search_dn(struct ldb_context *ldb,
TALLOC_CTX *mem_ctx,
struct ldb_dn *dn,
@@ -110,119 +111,3 @@ int gendb_search_dn(struct ldb_context *ldb,
return gendb_search(ldb, mem_ctx, dn, res, attrs, NULL);
}
-/*
- setup some initial ldif in a ldb
-*/
-int gendb_add_ldif(struct ldb_context *ldb, const char *ldif_string)
-{
- struct ldb_ldif *ldif;
- int ret;
- ldif = ldb_ldif_read_string(ldb, &ldif_string);
- if (ldif == NULL) return -1;
- ret = ldb_add(ldb, ldif->msg);
- talloc_free(ldif);
- return ret;
-}
-
-char *wrap_casefold(void *context, void *mem_ctx, const char *s, size_t n)
-{
- return strupper_talloc_n(mem_ctx, s, n);
-}
-
-
-
-/*
- search the LDB for a single record, with the extended_dn control
- return LDB_SUCCESS on success, or an ldb error code on error
-
- if the search returns 0 entries, return LDB_ERR_NO_SUCH_OBJECT
- if the search returns more than 1 entry, return LDB_ERR_CONSTRAINT_VIOLATION
-*/
-int gendb_search_single_extended_dn(struct ldb_context *ldb,
- TALLOC_CTX *mem_ctx,
- struct ldb_dn *basedn,
- enum ldb_scope scope,
- struct ldb_message **msg,
- const char * const *attrs,
- const char *format, ...)
-{
- va_list ap;
- int ret;
- struct ldb_request *req;
- char *filter;
- TALLOC_CTX *tmp_ctx;
- struct ldb_result *res;
- struct ldb_extended_dn_control *ctrl;
-
- tmp_ctx = talloc_new(mem_ctx);
-
- res = talloc_zero(tmp_ctx, struct ldb_result);
- if (!res) {
- return LDB_ERR_OPERATIONS_ERROR;
- }
-
- va_start(ap, format);
- filter = talloc_vasprintf(tmp_ctx, format, ap);
- va_end(ap);
-
- if (filter == NULL) {
- talloc_free(tmp_ctx);
- return LDB_ERR_OPERATIONS_ERROR;
- }
-
- ret = ldb_build_search_req(&req, ldb, tmp_ctx,
- basedn,
- scope,
- filter,
- attrs,
- NULL,
- res,
- ldb_search_default_callback,
- NULL);
- if (ret != LDB_SUCCESS) {
- talloc_free(tmp_ctx);
- return ret;
- }
-
- ctrl = talloc(tmp_ctx, struct ldb_extended_dn_control);
- if (ctrl == NULL) {
- talloc_free(tmp_ctx);
- return LDB_ERR_OPERATIONS_ERROR;
- }
-
- ctrl->type = 1;
-
- ret = ldb_request_add_control(req, LDB_CONTROL_EXTENDED_DN_OID, true, ctrl);
- if (ret != LDB_SUCCESS) {
- return ret;
- }
-
- ret = ldb_request(ldb, req);
- if (ret == LDB_SUCCESS) {
- ret = ldb_wait(req->handle, LDB_WAIT_ALL);
- }
-
- if (ret != LDB_SUCCESS) {
- talloc_free(tmp_ctx);
- return ret;
- }
-
- if (res->count == 0) {
- talloc_free(tmp_ctx);
- return LDB_ERR_NO_SUCH_OBJECT;
- }
-
- if (res->count > 1) {
- /* the function is only supposed to return a single entry */
- DEBUG(0,(__location__ ": More than one return for baseDN %s filter %s\n",
- ldb_dn_get_linearized(basedn), filter));
- talloc_free(tmp_ctx);
- return LDB_ERR_CONSTRAINT_VIOLATION;
- }
-
- *msg = talloc_steal(mem_ctx, res->msgs[0]);
-
- talloc_free(tmp_ctx);
-
- return LDB_SUCCESS;
-}
diff --git a/lib/util/util_ldb.h b/lib/util/util_ldb.h
index 4575c6565a..d2bc3b0ff7 100644
--- a/lib/util/util_ldb.h
+++ b/lib/util/util_ldb.h
@@ -23,15 +23,5 @@ int gendb_search_dn(struct ldb_context *ldb,
struct ldb_dn *dn,
struct ldb_message ***res,
const char * const *attrs);
-int gendb_add_ldif(struct ldb_context *ldb, const char *ldif_string);
-char *wrap_casefold(void *context, void *mem_ctx, const char *s, size_t n);
-
-int gendb_search_single_extended_dn(struct ldb_context *ldb,
- TALLOC_CTX *mem_ctx,
- struct ldb_dn *basedn,
- enum ldb_scope scope,
- struct ldb_message **msg,
- const char * const *attrs,
- const char *format, ...) PRINTF_ATTRIBUTE(7,8);
#endif /* __LIB_UTIL_UTIL_LDB_H__ */
diff --git a/lib/util/util_net.c b/lib/util/util_net.c
index 0ce495e57c..e80447128f 100644
--- a/lib/util/util_net.c
+++ b/lib/util/util_net.c
@@ -27,6 +27,7 @@
#include "system/network.h"
#include "system/locale.h"
#include "system/filesys.h"
+#include "lib/util/util_net.h"
#undef strcasecmp
/*******************************************************************
@@ -35,7 +36,7 @@
void zero_sockaddr(struct sockaddr_storage *pss)
{
- memset(pss, '\0', sizeof(*pss));
+ ZERO_STRUCTP(pss);
/* Ensure we're at least a valid sockaddr-storage. */
pss->ss_family = AF_INET;
}
@@ -49,13 +50,14 @@ bool interpret_string_addr_internal(struct addrinfo **ppres,
int ret;
struct addrinfo hints;
- memset(&hints, '\0', sizeof(hints));
+ ZERO_STRUCT(hints);
+
/* By default make sure it supports TCP. */
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = flags;
- /* Linux man page on getaddinfo() says port will be
- uninitialized when service string in NULL */
+ /* Linux man page on getaddrinfo() says port will be
+ uninitialized when service string is NULL */
ret = getaddrinfo(str, NULL,
&hints,
@@ -382,16 +384,16 @@ bool is_loopback_addr(const struct sockaddr *pss)
/**
* Check if a struct sockaddr has an unspecified address.
*/
-bool is_zero_addr(const struct sockaddr *pss)
+bool is_zero_addr(const struct sockaddr_storage *pss)
{
#if defined(HAVE_IPV6)
- if (pss->sa_family == AF_INET6) {
+ if (pss->ss_family == AF_INET6) {
const struct in6_addr *pin6 =
&((const struct sockaddr_in6 *)pss)->sin6_addr;
return IN6_IS_ADDR_UNSPECIFIED(pin6);
}
#endif
- if (pss->sa_family == AF_INET) {
+ if (pss->ss_family == AF_INET) {
const struct in_addr *pin = &((const struct sockaddr_in *)pss)->sin_addr;
return is_zero_ip_v4(*pin);
}
@@ -538,3 +540,158 @@ void set_sockaddr_port(struct sockaddr *psa, uint16_t port)
}
+/****************************************************************************
+ Get a port number in host byte order from a sockaddr_storage.
+****************************************************************************/
+
+uint16_t get_sockaddr_port(const struct sockaddr_storage *pss)
+{
+ uint16_t port = 0;
+
+ if (pss->ss_family != AF_INET) {
+#if defined(HAVE_IPV6)
+ /* IPv6 */
+ const struct sockaddr_in6 *sa6 =
+ (const struct sockaddr_in6 *)pss;
+ port = ntohs(sa6->sin6_port);
+#endif
+ } else {
+ const struct sockaddr_in *sa =
+ (const struct sockaddr_in *)pss;
+ port = ntohs(sa->sin_port);
+ }
+ return port;
+}
+
+/****************************************************************************
+ Print out an IPv4 or IPv6 address from a struct sockaddr_storage.
+****************************************************************************/
+
+char *print_sockaddr_len(char *dest,
+ size_t destlen,
+ const struct sockaddr *psa,
+ socklen_t psalen)
+{
+ if (destlen > 0) {
+ dest[0] = '\0';
+ }
+ (void)sys_getnameinfo(psa,
+ psalen,
+ dest, destlen,
+ NULL, 0,
+ NI_NUMERICHOST);
+ return dest;
+}
+
+/****************************************************************************
+ Print out an IPv4 or IPv6 address from a struct sockaddr_storage.
+****************************************************************************/
+
+char *print_sockaddr(char *dest,
+ size_t destlen,
+ const struct sockaddr_storage *psa)
+{
+ return print_sockaddr_len(dest, destlen, (struct sockaddr *)psa,
+ sizeof(struct sockaddr_storage));
+}
+
+/****************************************************************************
+ Print out a canonical IPv4 or IPv6 address from a struct sockaddr_storage.
+****************************************************************************/
+
+char *print_canonical_sockaddr(TALLOC_CTX *ctx,
+ const struct sockaddr_storage *pss)
+{
+ char addr[INET6_ADDRSTRLEN];
+ char *dest = NULL;
+ int ret;
+
+ /* Linux getnameinfo() man pages says port is unitialized if
+ service name is NULL. */
+
+ ret = sys_getnameinfo((const struct sockaddr *)pss,
+ sizeof(struct sockaddr_storage),
+ addr, sizeof(addr),
+ NULL, 0,
+ NI_NUMERICHOST);
+ if (ret != 0) {
+ return NULL;
+ }
+
+ if (pss->ss_family != AF_INET) {
+#if defined(HAVE_IPV6)
+ dest = talloc_asprintf(ctx, "[%s]", addr);
+#else
+ return NULL;
+#endif
+ } else {
+ dest = talloc_asprintf(ctx, "%s", addr);
+ }
+
+ return dest;
+}
+
+/****************************************************************************
+ Return the port number we've bound to on a socket.
+****************************************************************************/
+
+int get_socket_port(int fd)
+{
+ struct sockaddr_storage sa;
+ socklen_t length = sizeof(sa);
+
+ if (fd == -1) {
+ return -1;
+ }
+
+ if (getsockname(fd, (struct sockaddr *)&sa, &length) < 0) {
+ int level = (errno == ENOTCONN) ? 2 : 0;
+ DEBUG(level, ("getsockname failed. Error was %s\n",
+ strerror(errno)));
+ return -1;
+ }
+
+#if defined(HAVE_IPV6)
+ if (sa.ss_family == AF_INET6) {
+ return ntohs(((struct sockaddr_in6 *)&sa)->sin6_port);
+ }
+#endif
+ if (sa.ss_family == AF_INET) {
+ return ntohs(((struct sockaddr_in *)&sa)->sin_port);
+ }
+ return -1;
+}
+
+/****************************************************************************
+ Return the string of an IP address (IPv4 or IPv6).
+****************************************************************************/
+
+static const char *get_socket_addr(int fd, char *addr_buf, size_t addr_len)
+{
+ struct sockaddr_storage sa;
+ socklen_t length = sizeof(sa);
+
+ /* Ok, returning a hard coded IPv4 address
+ * is bogus, but it's just as bogus as a
+ * zero IPv6 address. No good choice here.
+ */
+
+ strlcpy(addr_buf, "0.0.0.0", addr_len);
+
+ if (fd == -1) {
+ return addr_buf;
+ }
+
+ if (getsockname(fd, (struct sockaddr *)&sa, &length) < 0) {
+ DEBUG(0,("getsockname failed. Error was %s\n",
+ strerror(errno) ));
+ return addr_buf;
+ }
+
+ return print_sockaddr_len(addr_buf, addr_len, (struct sockaddr *)&sa, length);
+}
+
+const char *client_socket_addr(int fd, char *addr, size_t addr_len)
+{
+ return get_socket_addr(fd, addr, addr_len);
+}
diff --git a/lib/util/util_net.h b/lib/util/util_net.h
index 6eacfc395f..71d1d1a6bd 100644
--- a/lib/util/util_net.h
+++ b/lib/util/util_net.h
@@ -43,4 +43,69 @@ bool interpret_string_addr_prefer_ipv4(struct sockaddr_storage *pss,
const char *str,
int flags);
+void set_sockaddr_port(struct sockaddr *psa, uint16_t port);
+
+/**
+ Check if an IP is the 0.0.0.0.
+**/
+_PUBLIC_ bool is_zero_ip_v4(struct in_addr ip);
+
+void in_addr_to_sockaddr_storage(struct sockaddr_storage *ss,
+ struct in_addr ip);
+#if defined(HAVE_IPV6)
+/**
+ * Convert an IPv6 struct in_addr to a struct sockaddr_storage.
+ */
+void in6_addr_to_sockaddr_storage(struct sockaddr_storage *ss,
+ struct in6_addr ip);
+#endif
+/**
+ Are two IPs on the same subnet?
+**/
+_PUBLIC_ bool same_net_v4(struct in_addr ip1,struct in_addr ip2,struct in_addr mask);
+
+/**
+ Return true if a string could be a pure IP address.
+**/
+_PUBLIC_ bool is_ipaddress(const char *str);
+
+bool is_broadcast_addr(const struct sockaddr *pss);
+bool is_loopback_ip_v4(struct in_addr ip);
+bool is_loopback_addr(const struct sockaddr *pss);
+bool is_zero_addr(const struct sockaddr_storage *pss);
+void zero_ip_v4(struct in_addr *ip);
+/**
+ Interpret an internet address or name into an IP address in 4 byte form.
+**/
+_PUBLIC_ uint32_t interpret_addr(const char *str);
+
+/**
+ A convenient addition to interpret_addr().
+**/
+_PUBLIC_ struct in_addr interpret_addr2(const char *str);
+
+_PUBLIC_ bool is_ipaddress_v4(const char *str);
+
+bool is_address_any(const struct sockaddr *psa);
+bool same_net(const struct sockaddr *ip1,
+ const struct sockaddr *ip2,
+ const struct sockaddr *mask);
+bool sockaddr_equal(const struct sockaddr *ip1,
+ const struct sockaddr *ip2);
+
+bool is_address_any(const struct sockaddr *psa);
+uint16_t get_sockaddr_port(const struct sockaddr_storage *pss);
+char *print_sockaddr_len(char *dest,
+ size_t destlen,
+ const struct sockaddr *psa,
+ socklen_t psalen);
+char *print_sockaddr(char *dest,
+ size_t destlen,
+ const struct sockaddr_storage *psa);
+char *print_canonical_sockaddr(TALLOC_CTX *ctx,
+ const struct sockaddr_storage *pss);
+const char *client_name(int fd);
+int get_socket_port(int fd);
+const char *client_socket_addr(int fd, char *addr, size_t addr_len);
+
#endif /* _SAMBA_UTIL_NET_H_ */
diff --git a/lib/util/util_pw.c b/lib/util/util_pw.c
index 11e46ec4e3..c6e4680ec3 100644
--- a/lib/util/util_pw.c
+++ b/lib/util/util_pw.c
@@ -3,7 +3,12 @@
Safe versions of getpw* calls
+ Copyright (C) Andrew Tridgell 1992-1998
+ Copyright (C) Jeremy Allison 1998-2005
Copyright (C) Andrew Bartlett 2002
+ Copyright (C) Timur Bakeyev 2005
+ Copyright (C) Bjoern Jacke 2006-2007
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -20,9 +25,54 @@
*/
#include "includes.h"
+#include "system/passwd.h"
+#include "lib/util/util_pw.h"
+
+/**************************************************************************
+ Wrappers for setpwent(), getpwent() and endpwent()
+****************************************************************************/
-static struct passwd *alloc_copy_passwd(TALLOC_CTX *mem_ctx,
- const struct passwd *from)
+void sys_setpwent(void)
+{
+ setpwent();
+}
+
+struct passwd *sys_getpwent(void)
+{
+ return getpwent();
+}
+
+void sys_endpwent(void)
+{
+ endpwent();
+}
+
+/**************************************************************************
+ Wrappers for getpwnam(), getpwuid(), getgrnam(), getgrgid()
+****************************************************************************/
+
+struct passwd *sys_getpwnam(const char *name)
+{
+ return getpwnam(name);
+}
+
+struct passwd *sys_getpwuid(uid_t uid)
+{
+ return getpwuid(uid);
+}
+
+struct group *sys_getgrnam(const char *name)
+{
+ return getgrnam(name);
+}
+
+struct group *sys_getgrgid(gid_t gid)
+{
+ return getgrgid(gid);
+}
+
+struct passwd *tcopy_passwd(TALLOC_CTX *mem_ctx,
+ const struct passwd *from)
{
struct passwd *ret = talloc_zero(mem_ctx, struct passwd);
@@ -40,7 +90,7 @@ static struct passwd *alloc_copy_passwd(TALLOC_CTX *mem_ctx,
return ret;
}
-struct passwd *getpwnam_alloc(TALLOC_CTX *mem_ctx, const char *name)
+struct passwd *getpwnam_alloc(TALLOC_CTX *mem_ctx, const char *name)
{
struct passwd *temp;
@@ -55,10 +105,14 @@ struct passwd *getpwnam_alloc(TALLOC_CTX *mem_ctx, const char *name)
return NULL;
}
- return alloc_copy_passwd(mem_ctx, temp);
+ return tcopy_passwd(mem_ctx, temp);
}
-struct passwd *getpwuid_alloc(TALLOC_CTX *mem_ctx, uid_t uid)
+/****************************************************************************
+ talloc'ed version of getpwuid.
+****************************************************************************/
+
+struct passwd *getpwuid_alloc(TALLOC_CTX *mem_ctx, uid_t uid)
{
struct passwd *temp;
@@ -73,5 +127,5 @@ struct passwd *getpwuid_alloc(TALLOC_CTX *mem_ctx, uid_t uid)
return NULL;
}
- return alloc_copy_passwd(mem_ctx, temp);
+ return tcopy_passwd(mem_ctx, temp);
}
diff --git a/lib/util/util_pw.h b/lib/util/util_pw.h
new file mode 100644
index 0000000000..2967963459
--- /dev/null
+++ b/lib/util/util_pw.h
@@ -0,0 +1,39 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ Safe versions of getpw* calls
+
+ Copyright (C) Andrew Tridgell 1992-1998
+ Copyright (C) Jeremy Allison 1997-2001.
+ Copyright (C) Andrew Bartlett 2002
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __LIB_UTIL_UTIL_PW_H__
+#define __LIB_UTIL_UTIL_PW_H__
+
+void sys_setpwent(void);
+struct passwd *sys_getpwent(void);
+void sys_endpwent(void);
+struct passwd *sys_getpwnam(const char *name);
+struct passwd *sys_getpwuid(uid_t uid);
+struct group *sys_getgrnam(const char *name);
+struct group *sys_getgrgid(gid_t gid);
+struct passwd *tcopy_passwd(TALLOC_CTX *mem_ctx,
+ const struct passwd *from);
+struct passwd *getpwnam_alloc(TALLOC_CTX *mem_ctx, const char *name);
+struct passwd *getpwuid_alloc(TALLOC_CTX *mem_ctx, uid_t uid);
+
+#endif /* __LIB_UTIL_UTIL_PW_H__ */
diff --git a/lib/util/util_runcmd.c b/lib/util/util_runcmd.c
new file mode 100644
index 0000000000..d617254432
--- /dev/null
+++ b/lib/util/util_runcmd.c
@@ -0,0 +1,312 @@
+/*
+ Unix SMB/CIFS mplementation.
+
+ run a child command
+
+ Copyright (C) Andrew Tridgell 2010
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+/*
+ this runs a child command with stdout and stderr going to the Samba
+ log
+ */
+
+#include "includes.h"
+#include "system/filesys.h"
+#include <tevent.h>
+#include "../lib/util/tevent_unix.h"
+
+struct samba_runcmd_state {
+ int stdout_log_level;
+ int stderr_log_level;
+ struct tevent_fd *fde_stdout;
+ struct tevent_fd *fde_stderr;
+ int fd_stdout, fd_stderr;
+ char *arg0;
+ pid_t pid;
+ char buf[1024];
+ uint16_t buf_used;
+};
+
+static int samba_runcmd_state_destructor(struct samba_runcmd_state *state)
+{
+ if (state->pid > 0) {
+ kill(state->pid, SIGKILL);
+ waitpid(state->pid, NULL, 0);
+ state->pid = -1;
+ }
+ return 0;
+}
+
+static void samba_runcmd_io_handler(struct tevent_context *ev,
+ struct tevent_fd *fde,
+ uint16_t flags,
+ void *private_data);
+
+/*
+ run a command as a child process, with a timeout.
+
+ any stdout/stderr from the child will appear in the Samba logs with
+ the specified log levels
+ */
+struct tevent_req *samba_runcmd_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct timeval endtime,
+ int stdout_log_level,
+ int stderr_log_level,
+ const char * const *argv0, ...)
+{
+ struct tevent_req *req;
+ struct samba_runcmd_state *state;
+ int p1[2], p2[2];
+ char **argv;
+ int ret;
+ va_list ap;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct samba_runcmd_state);
+ if (req == NULL) {
+ return NULL;
+ }
+
+ state->stdout_log_level = stdout_log_level;
+ state->stderr_log_level = stderr_log_level;
+
+ state->arg0 = talloc_strdup(state, argv0[0]);
+ if (tevent_req_nomem(state->arg0, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ if (pipe(p1) != 0) {
+ tevent_req_error(req, errno);
+ return tevent_req_post(req, ev);
+ }
+ if (pipe(p2) != 0) {
+ close(p1[0]);
+ close(p1[1]);
+ tevent_req_error(req, errno);
+ return tevent_req_post(req, ev);
+ }
+
+ state->pid = fork();
+ if (state->pid == (pid_t)-1) {
+ close(p1[0]);
+ close(p1[1]);
+ close(p2[0]);
+ close(p2[1]);
+ tevent_req_error(req, errno);
+ return tevent_req_post(req, ev);
+ }
+
+ if (state->pid != 0) {
+ /* the parent */
+ close(p1[1]);
+ close(p2[1]);
+ state->fd_stdout = p1[0];
+ state->fd_stderr = p2[0];
+ set_blocking(state->fd_stdout, false);
+ set_blocking(state->fd_stderr, false);
+
+ talloc_set_destructor(state, samba_runcmd_state_destructor);
+
+ state->fde_stdout = tevent_add_fd(ev, state,
+ state->fd_stdout,
+ TEVENT_FD_READ,
+ samba_runcmd_io_handler,
+ req);
+ if (tevent_req_nomem(state->fde_stdout, req)) {
+ close(p1[0]);
+ close(p2[0]);
+ return tevent_req_post(req, ev);
+ }
+ tevent_fd_set_auto_close(state->fde_stdout);
+
+ state->fde_stderr = tevent_add_fd(ev, state,
+ state->fd_stderr,
+ TEVENT_FD_READ,
+ samba_runcmd_io_handler,
+ req);
+ if (tevent_req_nomem(state->fde_stdout, req)) {
+ close(p2[0]);
+ return tevent_req_post(req, ev);
+ }
+ tevent_fd_set_auto_close(state->fde_stderr);
+
+ if (!timeval_is_zero(&endtime)) {
+ tevent_req_set_endtime(req, ev, endtime);
+ }
+
+ return req;
+ }
+
+ /* the child */
+ close(p1[0]);
+ close(p2[0]);
+ close(0);
+ close(1);
+ close(2);
+
+ /* we want to ensure that all of the network sockets we had
+ open are closed */
+ tevent_re_initialise(ev);
+
+ /* setup for logging to go to the parents debug log */
+ open("/dev/null", O_RDONLY); /* for stdin */
+ dup2(p1[1], 1);
+ dup2(p2[1], 2);
+
+ argv = str_list_copy(state, discard_const_p(const char *, argv0));
+ if (!argv) {
+ fprintf(stderr, "Out of memory in child\n");
+ _exit(255);
+ }
+
+ va_start(ap, argv0);
+ while (1) {
+ char *arg = va_arg(ap, char *);
+ if (arg == NULL) break;
+ argv = discard_const_p(char *, str_list_add((const char **)argv, arg));
+ if (!argv) {
+ fprintf(stderr, "Out of memory in child\n");
+ _exit(255);
+ }
+ }
+ va_end(ap);
+
+ ret = execvp(state->arg0, argv);
+ fprintf(stderr, "Failed to exec child - %s\n", strerror(errno));
+ _exit(255);
+ return NULL;
+}
+
+/*
+ handle stdout/stderr from the child
+ */
+static void samba_runcmd_io_handler(struct tevent_context *ev,
+ struct tevent_fd *fde,
+ uint16_t flags,
+ void *private_data)
+{
+ struct tevent_req *req = talloc_get_type_abort(private_data,
+ struct tevent_req);
+ struct samba_runcmd_state *state = tevent_req_data(req,
+ struct samba_runcmd_state);
+ int level;
+ char *p;
+ int n, fd;
+
+ if (fde == state->fde_stdout) {
+ level = state->stdout_log_level;
+ fd = state->fd_stdout;
+ } else {
+ level = state->stderr_log_level;
+ fd = state->fd_stderr;
+ }
+
+ if (!(flags & TEVENT_FD_READ)) {
+ return;
+ }
+
+ n = read(fd, &state->buf[state->buf_used],
+ sizeof(state->buf) - state->buf_used);
+ if (n > 0) {
+ state->buf_used += n;
+ } else if (n == 0) {
+ if (fde == state->fde_stdout) {
+ talloc_free(fde);
+ state->fde_stdout = NULL;
+ }
+ if (fde == state->fde_stderr) {
+ talloc_free(fde);
+ state->fde_stderr = NULL;
+ }
+ if (state->fde_stdout == NULL &&
+ state->fde_stderr == NULL) {
+ int status;
+ /* the child has closed both stdout and
+ * stderr, assume its dead */
+ pid_t pid = waitpid(state->pid, &status, 0);
+ if (pid != state->pid) {
+ if (errno == ECHILD) {
+ /* this happens when the
+ parent has set SIGCHLD to
+ SIG_IGN. In that case we
+ can only get error
+ information for the child
+ via its logging. We should
+ stop using SIG_IGN on
+ SIGCHLD in the standard
+ process model.
+ */
+ tevent_req_done(req);
+ return;
+ }
+ DEBUG(0,("Error in waitpid() for child %s - %s \n",
+ state->arg0, strerror(errno)));
+ if (errno == 0) {
+ errno = ECHILD;
+ }
+ tevent_req_error(req, errno);
+ return;
+ }
+ status = WEXITSTATUS(status);
+ DEBUG(3,("Child %s exited with status %d - %s\n",
+ state->arg0, status, strerror(status)));
+ if (status != 0) {
+ tevent_req_error(req, status);
+ return;
+ }
+
+ tevent_req_done(req);
+ return;
+ }
+ return;
+ }
+
+ while (state->buf_used > 0 &&
+ (p = (char *)memchr(state->buf, '\n', state->buf_used)) != NULL) {
+ int n1 = (p - state->buf)+1;
+ int n2 = n1 - 1;
+ /* swallow \r from child processes */
+ if (n2 > 0 && state->buf[n2-1] == '\r') {
+ n2--;
+ }
+ DEBUG(level,("%s: %*.*s\n", state->arg0, n2, n2, state->buf));
+ memmove(state->buf, p+1, sizeof(state->buf) - n1);
+ state->buf_used -= n1;
+ }
+
+ /* the buffer could have completely filled - unfortunately we have
+ no choice but to dump it out straight away */
+ if (state->buf_used == sizeof(state->buf)) {
+ DEBUG(level,("%s: %*.*s\n",
+ state->arg0, state->buf_used,
+ state->buf_used, state->buf));
+ state->buf_used = 0;
+ }
+}
+
+int samba_runcmd_recv(struct tevent_req *req, int *perrno)
+{
+ if (tevent_req_is_unix_error(req, perrno)) {
+ tevent_req_received(req);
+ return -1;
+ }
+
+ tevent_req_received(req);
+ return 0;
+}
diff --git a/lib/util/util_str.c b/lib/util/util_str.c
index a2c50fd38f..8695266655 100644
--- a/lib/util/util_str.c
+++ b/lib/util/util_str.c
@@ -64,7 +64,7 @@ _PUBLIC_ char *safe_strcpy(char *dest,const char *src, size_t maxlength)
if (len > maxlength) {
DEBUG(0,("ERROR: string overflow by %u (%u - %u) in safe_strcpy [%.50s]\n",
- (uint_t)(len-maxlength), (unsigned)len, (unsigned)maxlength, src));
+ (unsigned int)(len-maxlength), (unsigned)len, (unsigned)maxlength, src));
len = maxlength;
}
@@ -112,17 +112,6 @@ _PUBLIC_ char *safe_strcat(char *dest, const char *src, size_t maxlength)
return dest;
}
-#ifdef VALGRIND
-size_t valgrind_strlen(const char *s)
-{
- size_t count;
- for(count = 0; *s++; count++)
- ;
- return count;
-}
-#endif
-
-
/**
format a string into length-prefixed dotted domain format, as used in NBT
and in some ADS structures
diff --git a/lib/util/util_strlist.c b/lib/util/util_strlist.c
index 8d69eef233..953862da85 100644
--- a/lib/util/util_strlist.c
+++ b/lib/util/util_strlist.c
@@ -20,6 +20,7 @@
#include "includes.h"
#include "system/locale.h"
+#include "lib/util/tsort.h"
#undef strcasecmp
@@ -120,7 +121,7 @@ _PUBLIC_ char **str_list_make(TALLOC_CTX *mem_ctx, const char *string, const cha
/**
* build a null terminated list of strings from an argv-like input string
- * Entries are seperated by spaces and can be enclosed by quotes.
+ * Entries are separated by spaces and can be enclosed by quotes.
* Does NOT support escaping
*/
_PUBLIC_ char **str_list_make_shell(TALLOC_CTX *mem_ctx, const char *string, const char *sep)
@@ -182,7 +183,7 @@ _PUBLIC_ char **str_list_make_shell(TALLOC_CTX *mem_ctx, const char *string, con
/**
* join a list back to one string
*/
-_PUBLIC_ char *str_list_join(TALLOC_CTX *mem_ctx, const char **list, char seperator)
+_PUBLIC_ char *str_list_join(TALLOC_CTX *mem_ctx, const char **list, char separator)
{
char *ret = NULL;
int i;
@@ -193,14 +194,14 @@ _PUBLIC_ char *str_list_join(TALLOC_CTX *mem_ctx, const char **list, char sepera
ret = talloc_strdup(mem_ctx, list[0]);
for (i = 1; list[i]; i++) {
- ret = talloc_asprintf_append_buffer(ret, "%c%s", seperator, list[i]);
+ ret = talloc_asprintf_append_buffer(ret, "%c%s", separator, list[i]);
}
return ret;
}
/** join a list back to one (shell-like) string; entries
- * seperated by spaces, using quotes where necessary */
+ * separated by spaces, using quotes where necessary */
_PUBLIC_ char *str_list_join_shell(TALLOC_CTX *mem_ctx, const char **list, char sep)
{
char *ret = NULL;
@@ -264,7 +265,8 @@ _PUBLIC_ char **str_list_copy(TALLOC_CTX *mem_ctx, const char **list)
/**
Return true if all the elements of the list match exactly.
*/
-_PUBLIC_ bool str_list_equal(const char **list1, const char **list2)
+_PUBLIC_ bool str_list_equal(const char * const *list1,
+ const char * const *list2)
{
int i;
@@ -392,7 +394,7 @@ _PUBLIC_ const char **str_list_unique(const char **list)
}
list2 = (const char **)talloc_memdup(list, list,
sizeof(list[0])*(len+1));
- qsort(list2, len, sizeof(list2[0]), QSORT_CAST list_cmp);
+ TYPESAFE_QSORT(list2, len, list_cmp);
list[0] = list2[0];
for (i=j=1;i<len;i++) {
if (strcmp(list2[i], list[j-1]) != 0) {
@@ -485,3 +487,12 @@ _PUBLIC_ const char **str_list_copy_const(TALLOC_CTX *mem_ctx,
ret[i] = NULL;
return ret;
}
+
+/**
+ * Needed for making an "unconst" list "const"
+ */
+_PUBLIC_ const char **const_str_list(char **list)
+{
+ return (const char **)list;
+}
+
diff --git a/lib/util/util_tdb.c b/lib/util/util_tdb.c
index 46dbf6d324..4a81678808 100644
--- a/lib/util/util_tdb.c
+++ b/lib/util/util_tdb.c
@@ -20,7 +20,7 @@
*/
#include "includes.h"
-#include "tdb.h"
+#include <tdb.h>
#include "../lib/util/util_tdb.h"
/* these are little tdb utility functions that are meant to make
@@ -133,7 +133,7 @@ int32_t tdb_fetch_int32(struct tdb_context *tdb, const char *keystr)
}
/****************************************************************************
- Store a int32_t value by an arbitary blob key, return 0 on success, -1 on failure.
+ Store a int32_t value by an arbitrary blob key, return 0 on success, -1 on failure.
Input is int32_t in native byte order. Output in tdb is in little-endian.
****************************************************************************/
@@ -190,7 +190,7 @@ bool tdb_fetch_uint32(struct tdb_context *tdb, const char *keystr, uint32_t *val
}
/****************************************************************************
- Store a uint32_t value by an arbitary blob key, return 0 on success, -1 on failure.
+ Store a uint32_t value by an arbitrary blob key, return 0 on success, -1 on failure.
Input is uint32_t in native byte order. Output in tdb is in little-endian.
****************************************************************************/
diff --git a/lib/util/util_tdb.h b/lib/util/util_tdb.h
index 79c46714f9..c11a347e07 100644
--- a/lib/util/util_tdb.h
+++ b/lib/util/util_tdb.h
@@ -1,7 +1,6 @@
#ifndef _____LIB_UTIL_UTIL_TDB_H__
#define _____LIB_UTIL_UTIL_TDB_H__
-
/***************************************************************
Make a TDB_DATA and keep the const warning in one place
****************************************************************/
@@ -43,7 +42,7 @@ int32_t tdb_fetch_int32_byblob(struct tdb_context *tdb, TDB_DATA key);
int32_t tdb_fetch_int32(struct tdb_context *tdb, const char *keystr);
/****************************************************************************
- Store a int32_t value by an arbitary blob key, return 0 on success, -1 on failure.
+ Store a int32_t value by an arbitrary blob key, return 0 on success, -1 on failure.
Input is int32_t in native byte order. Output in tdb is in little-endian.
****************************************************************************/
int tdb_store_int32_byblob(struct tdb_context *tdb, TDB_DATA key, int32_t v);
@@ -67,7 +66,7 @@ bool tdb_fetch_uint32_byblob(struct tdb_context *tdb, TDB_DATA key, uint32_t *va
bool tdb_fetch_uint32(struct tdb_context *tdb, const char *keystr, uint32_t *value);
/****************************************************************************
- Store a uint32_t value by an arbitary blob key, return 0 on success, -1 on failure.
+ Store a uint32_t value by an arbitrary blob key, return 0 on success, -1 on failure.
Input is uint32_t in native byte order. Output in tdb is in little-endian.
****************************************************************************/
bool tdb_store_uint32_byblob(struct tdb_context *tdb, TDB_DATA key, uint32_t value);
diff --git a/lib/util/wscript_build b/lib/util/wscript_build
new file mode 100755
index 0000000000..b68791f88f
--- /dev/null
+++ b/lib/util/wscript_build
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+
+# as we move files into common between samba-util and samba-util3, move them here.
+# Both samba-util and samba-util3 depend on this private library
+bld.SAMBA_LIBRARY('samba-util-common',
+ source='''talloc_stack.c smb_threads.c xfile.c
+ util_file.c time.c rbtree.c rfc1738.c select.c
+ genrand.c fsusage.c blocking.c become_daemon.c
+ signal.c system.c params.c util.c util_id.c util_net.c
+ util_strlist.c idtree.c debug.c''',
+ public_deps='talloc pthread LIBCRYPTO',
+ # until we get all the dependencies in this library in common
+ # we need to allow this library to be built with unresolved symbols
+ allow_undefined_symbols=True,
+ local_include=False,
+ public_headers='debug.h',
+ header_path= [('*', 'util') ],
+ private_library=True
+ )
+
+if bld.env._SAMBA_BUILD_ == 4:
+ bld.SAMBA_LIBRARY('samba-util',
+ source='''dprintf.c fault.c data_blob.c
+ ms_fnmatch.c parmlist.c substitute.c util_str.c
+ ''',
+ deps='samba-util-common',
+ public_deps='talloc CHARSET execinfo uid_wrapper',
+ public_headers='attr.h byteorder.h data_blob.h memory.h safe_string.h time.h talloc_stack.h xfile.h dlinklist.h util.h',
+ header_path= [ ('dlinklist.h util.h', '.'), ('*', 'util') ],
+ local_include=False,
+ vnum='0.0.1',
+ pc_files='samba-util.pc'
+ )
+
+
+bld.SAMBA_SUBSYSTEM('ASN1_UTIL',
+ source='asn1.c',
+ deps='talloc',
+ local_include=False)
+
+
+bld.SAMBA_SUBSYSTEM('UNIX_PRIVS',
+ source='unix_privs.c',
+ autoproto='unix_privs.h',
+ deps='replace talloc',
+ local_include=False,
+ )
+
+
+bld.SAMBA_LIBRARY('wrap_xattr',
+ source='wrap_xattr.c',
+ public_deps='attr',
+ deps='talloc',
+ local_include=False,
+ private_library=True
+ )
+
+
+bld.SAMBA_SUBSYSTEM('UTIL_TDB',
+ source='util_tdb.c',
+ local_include=False,
+ public_deps='tdb talloc'
+ )
+
+bld.SAMBA_SUBSYSTEM('UTIL_TEVENT',
+ source='tevent_unix.c tevent_ntstatus.c tevent_werror.c',
+ local_include=False,
+ public_deps='tevent',
+ public_headers='tevent_ntstatus.h tevent_unix.h tevent_werror.h',
+ header_path=[ ('*', 'util') ],
+ )
+
+
+if bld.env._SAMBA_BUILD_ == 4:
+ bld.SAMBA_SUBSYSTEM('UTIL_LDB',
+ source='util_ldb.c',
+ local_include=False,
+ public_deps='ldb',
+ public_headers='util_ldb.h'
+ )
+
+
+bld.SAMBA_SUBSYSTEM('UTIL_RUNCMD',
+ source='util_runcmd.c',
+ local_include=False,
+ public_deps='tevent'
+ )
+
+bld.SAMBA_SUBSYSTEM('UTIL_PW',
+ source='util_pw.c',
+ local_include=False,
+ public_deps='talloc'
+ )
+
+
+bld.SAMBA_LIBRARY('tdb-wrap',
+ source='tdb_wrap.c',
+ deps='tdb talloc samba-util',
+ public_headers='tdb_wrap.h',
+ private_library=True,
+ local_include=False
+ )
+
diff --git a/lib/util/wscript_configure b/lib/util/wscript_configure
new file mode 100644
index 0000000000..fea8ddf7ce
--- /dev/null
+++ b/lib/util/wscript_configure
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+
+
+# backtrace could be in libexecinfo or in libc
+conf.CHECK_FUNCS_IN('backtrace', 'execinfo', checklibc=True, headers='execinfo.h')
+
+conf.CHECK_FUNCS('sigprocmask sigblock sigaction')
+
+xattr_headers='sys/attributes.h attr/xattr.h sys/xattr.h'
+conf.CHECK_FUNCS_IN('flistxattr', 'attr', checklibc=True, headers=xattr_headers)
+
+conf.CHECK_CODE('getxattr(NULL, NULL, NULL, 0, 0, 0)',
+ headers=xattr_headers, local_include=False,
+ define='XATTR_ADDITIONAL_OPTIONS',
+ msg='Checking for darwin xattr api')
+
+if conf.CONFIG_SET('HAVE_FLISTXATTR'):
+ conf.DEFINE('HAVE_XATTR_SUPPORT', 1)
+
+
+
+conf.CHECK_STRUCTURE_MEMBER('struct statvfs', 'f_frsize', define='HAVE_FRSIZE', headers='sys/statvfs.h')
+
+# all the different ways of doing statfs
+statfs_types = [
+ ( 'STAT_STATVFS64',
+ 'Linux statvfs64',
+ 'struct statvfs64 fsd; exit(statvfs64 (".", &fsd))',
+ 'sys/statvfs.h' ),
+
+ ( 'STAT_STATVFS',
+ 'statvfs (SVR4)',
+ 'struct statvfs fsd; exit(statvfs(0, &fsd))',
+ 'sys/statvfs.h' ),
+
+ ( 'STAT_STATFS3_OSF1',
+ '3-argument statfs function (DEC OSF/1)',
+ 'struct statfs fsd; fsd.f_fsize = 0; exit(statfs(".", &fsd, sizeof(struct statfs)))'
+ 'sys/param.h sys/mount.h' ),
+
+ ( 'STAT_STATFS2_BSIZE',
+ 'two-argument statfs with statfs.bsize',
+ 'struct statfs fsd; fsd.f_bsize = 0; exit(statfs(".", &fsd))',
+ 'sys/param.h sys/mount.h sys/vfs.h' ),
+
+ ( 'STAT_STATFS4',
+ 'four-argument statfs (AIX-3.2.5, SVR3)',
+ 'struct statfs fsd; exit(statfs(".", &fsd, sizeof fsd, 0))',
+ 'sys/statfs.h' ),
+
+ ( 'STAT_STATFS2_FSIZE',
+ 'two-argument statfs with statfs.fsize',
+ 'struct statfs fsd; fsd.f_fsize = 0; exit(statfs(".", &fsd))'
+ 'sys/param.h sys/mount.h' ),
+
+ ( 'STAT_STATFS2_FS_DATA',
+ 'two-argument statfs with struct fs_data (Ultrix)',
+ 'struct fs_data fsd; exit(statfs(".", &fsd) != 1)',
+ 'sys/param.h sys/mount.h sys/fs_types.h' )
+]
+
+found_statfs=False
+for (define, msg, code, headers) in statfs_types:
+ if conf.CHECK_CODE(code,
+ define=define,
+ headers=headers,
+ msg='Checking for %s' % msg,
+ local_include=False):
+ found_statfs=True
+ break
+
+if not found_statfs:
+ print("FATAL: Failed to find a statfs method")
+ raise
+
+
+conf.CHECK_CODE('struct statvfs buf; buf.f_fsid = 0',
+ define='HAVE_FSID_INT',
+ msg='Checking if f_fsid is an integer',
+ execute=False,
+ local_include=False,
+ headers='sys/statvfs.h')
+
+# fsusage.c assumes that statvfs has an f_frsize entry. Some weird
+# systems use f_bsize.
+conf.CHECK_CODE('struct statvfs buf; buf.f_frsize = 0',
+ define='HAVE_FRSIZE',
+ msg='Checking that statvfs.f_frsize works',
+ headers='sys/statvfs.h',
+ execute=False,
+ local_include=False)
+
+# Some systems use f_flag in struct statvfs while others use f_flags
+conf.CHECK_CODE('struct statvfs buf; buf.f_flag = 0',
+ define='HAVE_STATVFS_F_FLAG',
+ msg='Checking whether statvfs.f_flag exists',
+ headers='sys/statvfs.h',
+ local_include=False,
+ execute=False)
+
+conf.CHECK_CODE('struct statvfs buf; buf.f_flags = 0',
+ define='HAVE_STATVFS_F_FLAGS',
+ msg='Checking whether statvfs.f_flags exists',
+ headers='sys/statvfs.h',
+ local_include=False,
+ execute=False)
diff --git a/lib/util/xattr.m4 b/lib/util/xattr.m4
deleted file mode 100644
index 497809a47a..0000000000
--- a/lib/util/xattr.m4
+++ /dev/null
@@ -1,32 +0,0 @@
-dnl ############################################
-dnl use flistxattr as the key function for having
-dnl sufficient xattr support for posix xattr backend
-AC_CHECK_HEADERS(sys/attributes.h attr/xattr.h sys/xattr.h)
-AC_SEARCH_LIBS_EXT(flistxattr, [attr], XATTR_LIBS)
-AC_CHECK_FUNC_EXT(flistxattr, $XATTR_LIBS)
-SMB_EXT_LIB(XATTR,[${XATTR_LIBS}],[${XATTR_CFLAGS}],[${XATTR_CPPFLAGS}],[${XATTR_LDFLAGS}])
-if test x"$ac_cv_func_ext_flistxattr" = x"yes"; then
- AC_CACHE_CHECK([whether xattr interface takes additional options], smb_attr_cv_xattr_add_opt,
- [old_LIBS=$LIBS
- LIBS="$LIBS $XATTRLIBS"
- AC_TRY_COMPILE([
- #include <sys/types.h>
- #if HAVE_ATTR_XATTR_H
- #include <attr/xattr.h>
- #elif HAVE_SYS_XATTR_H
- #include <sys/xattr.h>
- #endif
- #ifndef NULL
- #define NULL ((void *)0)
- #endif
- ],[
- getxattr(NULL, NULL, NULL, 0, 0, 0);
- ],smb_attr_cv_xattr_add_opt=yes,smb_attr_cv_xattr_add_opt=no)
- LIBS=$old_LIBS])
- if test x"$smb_attr_cv_xattr_add_opt" = x"yes"; then
- AC_DEFINE(XATTR_ADDITIONAL_OPTIONS, 1, [xattr functions have additional options])
- fi
- AC_DEFINE(HAVE_XATTR_SUPPORT,1,[Whether we have xattr support])
- SMB_ENABLE(XATTR,YES)
-fi
-
diff --git a/lib/wscript_build b/lib/wscript_build
new file mode 100644
index 0000000000..9ce832a2fd
--- /dev/null
+++ b/lib/wscript_build
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+import os, Options
+
+# work out what python external libraries we need to install
+external_libs = {
+ "dns.resolver": "dnspython/dns",
+ "subunit": "subunit/python/subunit",
+ "testtools": "testtools/testtools"}
+
+list = []
+
+for module, package in external_libs.items():
+ try:
+ __import__(module)
+ except ImportError:
+ list.append(package)
+
+for e in list:
+ bld.INSTALL_WILDCARD('${PYTHONARCHDIR}/samba/external', e + '/**/*', flat=False,
+ exclude='*.pyc', trim_path=os.path.dirname(e))
+
+bld.SAMBA_GENERATOR('external_init_py',
+ rule='touch ${TGT}',
+ target='empty_file')
+
+bld.INSTALL_FILES('${PYTHONARCHDIR}/samba/external', 'empty_file', destname='__init__.py')
+
+# a grouping library for event and socket related subsystems
+bld.SAMBA_LIBRARY('samba-sockets',
+ source=[],
+ private_library=True,
+ grouping_library=True,
+ deps='LIBTSOCKET samba_socket UTIL_TEVENT')
diff --git a/lib/zlib/contrib/ada/zlib-streams.ads b/lib/zlib/contrib/ada/zlib-streams.ads
index f0193c6bae..f3352d3d23 100644
--- a/lib/zlib/contrib/ada/zlib-streams.ads
+++ b/lib/zlib/contrib/ada/zlib-streams.ads
@@ -31,7 +31,7 @@ package ZLib.Streams is
Mode : in Flush_Mode := Sync_Flush);
-- Flush the written data to the back stream,
-- all data placed to the compressor is flushing to the Back stream.
- -- Should not be used untill necessary, becouse it is decreasing
+ -- Should not be used untill necessary, because it is decreasing
-- compression.
function Read_Total_In (Stream : in Stream_Type) return Count;
@@ -97,13 +97,13 @@ private
Rest_Last : Stream_Element_Offset;
-- Buffer for Read operation.
-- We need to have this buffer in the record
- -- becouse not all read data from back stream
+ -- because not all read data from back stream
-- could be processed during the read operation.
Buffer_Size : Stream_Element_Offset;
-- Buffer size for write operation.
-- We do not need to have this buffer
- -- in the record becouse all data could be
+ -- in the record because all data could be
-- processed in the write operation.
Back : Stream_Access;
diff --git a/lib/zlib/contrib/ada/zlib-thin.ads b/lib/zlib/contrib/ada/zlib-thin.ads
index d4407eb800..810173cff8 100644
--- a/lib/zlib/contrib/ada/zlib-thin.ads
+++ b/lib/zlib/contrib/ada/zlib-thin.ads
@@ -436,7 +436,7 @@ private
pragma Import (C, inflateBackInit, "inflateBackInit_");
- -- I stopped binding the inflateBack routines, becouse realize that
+ -- I stopped binding the inflateBack routines, because realize that
-- it does not support zlib and gzip headers for now, and have no
-- symmetric deflateBack routines.
-- ZLib-Ada is symmetric regarding deflate/inflate data transformation
diff --git a/lib/zlib/contrib/minizip/miniunz.c b/lib/zlib/contrib/minizip/miniunz.c
index f599938884..cf9a03b050 100644
--- a/lib/zlib/contrib/minizip/miniunz.c
+++ b/lib/zlib/contrib/minizip/miniunz.c
@@ -536,7 +536,7 @@ int main(argc,argv)
# endif
strncpy(filename_try, zipfilename,MAXFILENAME-1);
- /* strncpy doesnt append the trailing NULL, of the string is too long. */
+ /* strncpy doesn't append the trailing NULL, if the string is too long. */
filename_try[ MAXFILENAME ] = '\0';
# ifdef USEWIN32IOAPI
diff --git a/lib/zlib/contrib/minizip/minizip.c b/lib/zlib/contrib/minizip/minizip.c
index f2dfecd8b1..2aae64c0a0 100644
--- a/lib/zlib/contrib/minizip/minizip.c
+++ b/lib/zlib/contrib/minizip/minizip.c
@@ -77,7 +77,7 @@ uLong filetime(f, tmzip, dt)
len = MAXFILENAME;
strncpy(name, f,MAXFILENAME-1);
- /* strncpy doesnt append the trailing NULL, of the string is too long. */
+ /* strncpy doesn't append the trailing NULL, if the string is too long. */
name[ MAXFILENAME ] = '\0';
if (name[len - 1] == '/')
@@ -253,7 +253,7 @@ int main(argc,argv)
zipok = 1 ;
strncpy(filename_try, argv[zipfilenamearg],MAXFILENAME-1);
- /* strncpy doesnt append the trailing NULL, of the string is too long. */
+ /* strncpy doesn't append the trailing NULL, if the string is too long. */
filename_try[ MAXFILENAME ] = '\0';
len=(int)strlen(filename_try);
diff --git a/lib/zlib/contrib/minizip/unzip.c b/lib/zlib/contrib/minizip/unzip.c
index 9ad4766d8d..fe2f1f0a96 100644
--- a/lib/zlib/contrib/minizip/unzip.c
+++ b/lib/zlib/contrib/minizip/unzip.c
@@ -159,7 +159,7 @@ typedef struct
/* ===========================================================================
Read a byte from a gz_stream; update next_in and avail_in. Return EOF
for end of file.
- IN assertion: the stream s has been sucessfully opened for reading.
+ IN assertion: the stream s has been successfully opened for reading.
*/
@@ -295,10 +295,10 @@ local int strcmpcasenosensitive_internal (fileName1,fileName2)
/*
Compare two filename (fileName1,fileName2).
- If iCaseSenisivity = 1, comparision is case sensitivity (like strcmp)
- If iCaseSenisivity = 2, comparision is not case sensitivity (like strcmpi
+ If iCaseSensitivity = 1, comparison is case sensitive (like strcmp)
+ If iCaseSensitivity = 2, comparison is not case sensitive (like strcmpi
or strcasecmp)
- If iCaseSenisivity = 0, case sensitivity is defaut of your operating system
+ If iCaseSensitivity = 0, case sensitivity is default of your operating system
(like 1 on Unix, 2 on Windows)
*/
diff --git a/lib/zlib/contrib/minizip/unzip.h b/lib/zlib/contrib/minizip/unzip.h
index b247937c80..6a7e155d66 100644
--- a/lib/zlib/contrib/minizip/unzip.h
+++ b/lib/zlib/contrib/minizip/unzip.h
@@ -124,10 +124,10 @@ extern int ZEXPORT unzStringFileNameCompare OF ((const char* fileName1,
int iCaseSensitivity));
/*
Compare two filename (fileName1,fileName2).
- If iCaseSenisivity = 1, comparision is case sensitivity (like strcmp)
- If iCaseSenisivity = 2, comparision is not case sensitivity (like strcmpi
+ If iCaseSensitivity = 1, comparison is case sensitive (like strcmp)
+ If iCaseSensitivity = 2, comparison is not case sensitive (like strcmpi
or strcasecmp)
- If iCaseSenisivity = 0, case sensitivity is defaut of your operating system
+ If iCaseSensitivity = 0, case sensitivity is default of your operating system
(like 1 on Unix, 2 on Windows)
*/
diff --git a/lib/zlib/contrib/minizip/zip.c b/lib/zlib/contrib/minizip/zip.c
index 7fbe002743..bb678ee133 100644
--- a/lib/zlib/contrib/minizip/zip.c
+++ b/lib/zlib/contrib/minizip/zip.c
@@ -99,7 +99,7 @@ typedef struct linkedlist_datablock_internal_s
struct linkedlist_datablock_internal_s* next_datablock;
uLong avail_in_this_block;
uLong filled_in_this_block;
- uLong unused; /* for future use and alignement */
+ uLong unused; /* for future use and alignment */
unsigned char data[SIZEDATA_INDATABLOCK];
} linkedlist_datablock_internal;
@@ -117,12 +117,12 @@ typedef struct
uInt pos_in_buffered_data; /* last written byte in buffered_data */
uLong pos_local_header; /* offset of the local header of the file
- currenty writing */
+ currently writing */
char* central_header; /* central header data for the current file */
uLong size_centralheader; /* size of the central header for cur file */
uLong flag; /* flag of the file currently writing */
- int method; /* compression method of file currenty wr.*/
+ int method; /* compression method of file currently wr.*/
int raw; /* 1 for directly writing raw data */
Byte buffered_data[Z_BUFSIZE];/* buffer contain compressed data to be writ*/
uLong dosDate;
diff --git a/lib/zlib/contrib/puff/puff.c b/lib/zlib/contrib/puff/puff.c
index ce0cc405e3..1fbcc18f29 100644
--- a/lib/zlib/contrib/puff/puff.c
+++ b/lib/zlib/contrib/puff/puff.c
@@ -585,7 +585,7 @@ local int fixed(struct state *s)
* are themselves compressed using Huffman codes and run-length encoding. In
* the list of code lengths, a 0 symbol means no code, a 1..15 symbol means
* that length, and the symbols 16, 17, and 18 are run-length instructions.
- * Each of 16, 17, and 18 are follwed by extra bits to define the length of
+ * Each of 16, 17, and 18 are followed by extra bits to define the length of
* the run. 16 copies the last length 3 to 6 times. 17 represents 3 to 10
* zero lengths, and 18 represents 11 to 138 zero lengths. Unused symbols
* are common, hence the special coding for zero lengths.
diff --git a/lib/zlib/deflate.h b/lib/zlib/deflate.h
index 05a5ab3a2c..b16f7a7728 100644
--- a/lib/zlib/deflate.h
+++ b/lib/zlib/deflate.h
@@ -188,7 +188,7 @@ typedef struct internal_state {
int nice_match; /* Stop searching when current match exceeds this */
/* used by trees.c: */
- /* Didn't use ct_data typedef below to supress compiler warning */
+ /* Didn't use ct_data typedef below to suppress compiler warning */
struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
diff --git a/lib/zlib/examples/fitblk.c b/lib/zlib/examples/fitblk.c
index c61de5c996..966702dea9 100644
--- a/lib/zlib/examples/fitblk.c
+++ b/lib/zlib/examples/fitblk.c
@@ -17,7 +17,7 @@
data in order to determine how much of that input will compress to
nearly the requested output block size. The first pass generates
enough deflate blocks to produce output to fill the requested
- output size plus a specfied excess amount (see the EXCESS define
+ output size plus a specified excess amount (see the EXCESS define
below). The last deflate block may go quite a bit past that, but
is discarded. The second pass decompresses and recompresses just
the compressed data that fit in the requested plus excess sized
diff --git a/lib/zlib/examples/gun.c b/lib/zlib/examples/gun.c
index bfec590a00..dce01aef0f 100644
--- a/lib/zlib/examples/gun.c
+++ b/lib/zlib/examples/gun.c
@@ -42,7 +42,7 @@
end-of-file, they cannot be concantenated. If a Unix compress stream is
encountered in an input file, it is the last stream in that file.
- Like gunzip and uncompress, the file attributes of the orignal compressed
+ Like gunzip and uncompress, the file attributes of the original compressed
file are maintained in the final uncompressed file, to the extent that the
user permissions allow it.
diff --git a/lib/zlib/gzio.c b/lib/zlib/gzio.c
index 0b51297936..55521a32ef 100644
--- a/lib/zlib/gzio.c
+++ b/lib/zlib/gzio.c
@@ -254,7 +254,7 @@ int ZEXPORT gzsetparams (file, level, strategy)
/* ===========================================================================
Read a byte from a gz_stream; update next_in and avail_in. Return EOF
for end of file.
- IN assertion: the stream s has been sucessfully opened for reading.
+ IN assertion: the stream s has been successfully opened for reading.
*/
local int get_byte(s)
gz_stream *s;
@@ -279,7 +279,7 @@ local int get_byte(s)
mode to transparent if the gzip magic header is not present; set s->err
to Z_DATA_ERROR if the magic header is present but the rest of the header
is incorrect.
- IN assertion: the stream s has already been created sucessfully;
+ IN assertion: the stream s has already been created successfully;
s->stream.avail_in is zero for the first time, but may be non-zero
for concatenated .gz files.
*/
diff --git a/lib/zlib/wscript b/lib/zlib/wscript
new file mode 100644
index 0000000000..a091de6b5f
--- /dev/null
+++ b/lib/zlib/wscript
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+def configure(conf):
+ conf.CHECK_HEADERS('zlib.h')
+ conf.CHECK_FUNCS_IN('zlibVersion', 'z')
+
+ conf.CHECK_CODE('''
+ #if (ZLIB_VERNUM >= 0x1230)
+ #else
+ #error "ZLIB_VERNUM < 0x1230"
+ #endif
+ ''',
+ headers='zlib.h',
+ local_include=False,
+ msg='Checking for ZLIB_VERNUM >= 0x1230',
+ define='HAVE_ZLIB')
+
+ # If we don't do this then we will receive an error that lib 'z'
+ # is already declared as a system lib (for the cases where zlibVersion
+ # is defined
+ if not conf.env['HAVE_ZLIB']:
+ conf.LOCAL_CACHE_SET('TARGET_TYPE', 'z', 'EMPTY')
+
+def build(bld):
+ if not bld.CONFIG_SET('HAVE_ZLIB'):
+ bld.SAMBA_LIBRARY('z',
+ private_library=True,
+ deps='replace',
+ source='''adler32.c compress.c crc32.c gzio.c
+ uncompr.c deflate.c trees.c zutil.c
+ inflate.c infback.c inftrees.c inffast.c''')
diff --git a/lib/zlib/zlib.h b/lib/zlib/zlib.h
index a660031e94..aa7f0825f4 100644
--- a/lib/zlib/zlib.h
+++ b/lib/zlib/zlib.h
@@ -885,7 +885,7 @@ ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits,
See inflateBack() for the usage of these routines.
inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of
- the paramaters are invalid, Z_MEM_ERROR if the internal state could not
+ the parameters are invalid, Z_MEM_ERROR if the internal state could not
be allocated, or Z_VERSION_ERROR if the version of the library does not
match the version of the header file.
*/