BIND 10 trac510, updated. 8cb640551a6fc4a101bdc4c6469630d0cbeb7076 [510] whitespace style fix
BIND 10 source code commits
bind10-changes at lists.isc.org
Wed Dec 14 09:34:31 UTC 2011
The branch, trac510 has been updated
via 8cb640551a6fc4a101bdc4c6469630d0cbeb7076 (commit)
via ff81c97c2ad006e5e28f33e67d3a8bde1dccc523 (commit)
via afddaf4c5718c2a0cc31f2eee79c4e0cc625499f (commit)
via 2f4433fb15900481e4fb976a93692e68084c3925 (commit)
via 497d53517cb412c6d5b90779bd06070e58f90df1 (commit)
via 1ae5ff8616b9a2c97bfe8c3a9f5f7c65fcd7fc00 (commit)
via 1655bed624866a766311a01214597db01b4c7cec (commit)
via 1615a521d61a0f47a627f92cc4f95d982a04f8d1 (commit)
via e26901a19766452353b43e81ddc993b26b2b8518 (commit)
via eaa492e0bfa0855b01ec86ed0885d90166d32c7e (commit)
via 95b7c29f155b8fa21cf85c6d3afbe3f510db83d1 (commit)
via a677ae91c9d7a5bd2ef8574f84e9f33f90c45e44 (commit)
via 9b2b249d23576c999a65d8c338e008cabe45f0c9 (commit)
via 45274924f3f25b70547809eeda5dbcbe230029b5 (commit)
via a6a35e9970e4937924c1e33c01f6bd7eaf1ed994 (commit)
via c8dc421c5cad0f3296174b44f8deccfb69dec43f (commit)
via 91fb141bfb3aadfdf96f13e157a26636f6e9f9e3 (commit)
via 37a11387baa321daec8311fc66d5d83e567886bd (commit)
via cee6a7af5f4f116ea89ecfde9a235dfe727bf207 (commit)
via a5e9d9176e9c60ef20c0f5ef59eeb6838ed47ab2 (commit)
via 7f5229256540479ef63d707a14d194c80099fab3 (commit)
via 697341a7b1e73b508d8142b8afc07767ea46f3f3 (commit)
via c0be7a6c0e12c78a7e02a2a3b3b259a3382b52bf (commit)
via e82cf73f21e06b1a301ff120fb5f73203d343111 (commit)
via 777d6f3037738200a8a8426a0b957b18011d460a (commit)
via b705708aafbe566facc578a02b2f1cce44dff86f (commit)
via a56e72ce1bbc9d016a7ebd83eaba0aadbf2b41aa (commit)
via 318dceaa39aa30ee9d394e1e096d4891f3bee490 (commit)
via 25ac24557f8789f516ac7ffa1db831701ebf3c37 (commit)
via 2a6b5e55caf422997b893e319db83855fe1709b1 (commit)
via 83ecce54ed1ef5215f722e8339ae4a43f50ada5c (commit)
via 0e3736c7c3e882ba3f0616b9d0877792edd73317 (commit)
via c37c596c844d35dc25eb729a99666948c6af8a6b (commit)
via 0c8c0f808a9ddc3a27a87f55964a965fb30f18ef (commit)
via df44752c7d69b865fa346215c1ec428153d5a3ca (commit)
via e30dfe31fbc6b290e63ffe4666dc39ebbd0d23aa (commit)
via 52802deb18632b028815d25f19976d0576d76e1f (commit)
via 4220ef5ac9c8fdd4b506b3579f0e5eec98e3f3d8 (commit)
via 56be59fbcdc0ba54ccea0d09d49ef28dace3d65d (commit)
via 1341209064bc7afd8e720e3b12060239c368bcdd (commit)
via 86a4ce45115dab4d3978c36dd2dbe07edcac02ac (commit)
via 14a64484a3159a142f1b83a9830ac389a52f6a35 (commit)
via 146203239c50d2a00069986944d4ec168f17b31f (commit)
via 7515af0e50af796b0b936e9a966eea5bff82dfe9 (commit)
via 292db243f0d4e4036b265da8b9e5c01db2929f58 (commit)
via 982fc000a3064515ac30f5457b71802577fec90d (commit)
via eb08c5acb5deafa28ae37032910cce9a385d2030 (commit)
via b2186bf05d8d9858b0b58cf9dca5b215afe447f5 (commit)
via 88f94cf8e025558b14091af5050e2ce424237ea0 (commit)
via 4db174c8f096e2b54b3a5d384a6cffc25b9d9024 (commit)
via 3bb1cc7d961930edc38d9f8b34d0cccd3d69dd96 (commit)
via 6583a47dde1b851aee99de3c38c6331a22ede260 (commit)
via 99033305fa90310135d37118a0d47df3f2223770 (commit)
via b7bbe25fdf0d0c168c24c904c82c7e04fc269bba (commit)
via 89601ee181490433adf058011d920befc5e38061 (commit)
via 0ae049de728692224b087e95a645f45f4a69cb68 (commit)
via 9f792ee32ba42a44291277d0577196e03a929738 (commit)
via d873ad02831294bdc9c23ebf3178fa0532f8b8c0 (commit)
via 660cf410c0fb41587b977df992879f5dff934c19 (commit)
via 2e58ef22ff8fb2b2ef21d481205b4cf197aa1092 (commit)
via b09fdcc6b45d4580b138cc9f59bfc051bd6ad360 (commit)
via 4d97ef5cdb4833a7a36b6679c16338505b07d4e3 (commit)
via 424f32864efcd2c647c6e5303125b6a8afb421ea (commit)
via f27e984224d7dbb033c09205c2dd8e1e6579408f (commit)
via e1a683babf04b28b75599c797a00b8a277191844 (commit)
via 40cfd32c280020af33a28c1501380a17ce604175 (commit)
via e7019de8a8ec9ff562557c9fc2a0bd28a4c64829 (commit)
via a60c96464c0b959492a13b10767a7d9352be060e (commit)
via 0c0e8938a3ece603eddd70e3ebba94b03eeeeb92 (commit)
via 7715c727d25d6430cbdbd82e40bdb7b3fa2ea843 (commit)
via 073cae7e8f0c72040eef17f49cce5593023237bc (commit)
via a26b979adb54baabdf939ed1a7852b2ee9b8b93c (commit)
via eb703a7e5b3749ca95a43c7582c9cccde564f123 (commit)
via cbe600decbef4db82cb3b070e03b5702540af4aa (commit)
via c44075a40764cbb5dc37e9dd3666ce46bb8c7955 (commit)
via 7ac21664665acee54a2a57331a2afc2a0c0a3530 (commit)
via 96a32f0ab063cbcd98fae0d5a87bc286bb8a7498 (commit)
via 7019db2a44f39897486eea618f4447c37dbabcf8 (commit)
via 024808d2a40b695f6c7191398c5a3d2c39c26736 (commit)
via 9df50bec4e691dc8cb724547659fb71caad656ab (commit)
via 3a206ab523d4612676362274ae8c30def53ac15e (commit)
via 15dffb02f179974c6726f16aff586c49eec8c7ca (commit)
via ad90525811869e2ff6fb5e24d38bf19e5743767e (commit)
via 936d5cad35355e1785550f7150f90e688166f448 (commit)
via 0737908f9e7cb615f80354131dca4df1a8c0bff6 (commit)
via d6d7a352b0b0df685f285cd413568b0e475339da (commit)
via 82fdeb65eba233a63b4425c7c5528a6257b91703 (commit)
via 5832af30821d4d4d077946b063b8f53056fa7e60 (commit)
via 1d24818f927edb1840f673f1ba67d7a45d9ef1c2 (commit)
via d6d90c1976110dcfb94cba2c56086960054cdeae (commit)
via 20a6000a9f69476797477ca7af5fd83b8e236909 (commit)
via 1485c897a9e2c71ed2a33c8972c116a5f7e8e078 (commit)
via b7ac17da5405582098e98ed22bf122fe87658923 (commit)
via b8d14d2e45ee719e4e33adbecddafb4ae3aa4df1 (commit)
via 567260cde6e21499ad4bf47789d538a929df5552 (commit)
via b092df6f17e5d8f8f07e726fc4006e346417d49f (commit)
via d9b851b96c9fb3f56c4fe3a626f5c2b05bbb7a5f (commit)
via 614e0ed92f8e6fb5f66277c7fbec8af6149cfa39 (commit)
via 9300ad5a1030e50ab76ff8a6f87b4d91d2d2b124 (commit)
via afee8bc035223c87c385a6855ab210b4e55cc161 (commit)
via 34de4dab534c2ccc735f6c815aa0459553aa1153 (commit)
via 717946a088b5c3fa287258e1ebc3fa6dd9093702 (commit)
via 936511f6e114f26bf86497466a7f61ef467bf5ad (commit)
via 7f573f432cfca90d2f9409829f14b3645083b9af (commit)
via b586771730eb1d22330e3a4f46c6c596d6ab57da (commit)
via 137abb738558ae9602f834890f477a924b520001 (commit)
via 14c51c664a98beb4867728d528190aff335e6f27 (commit)
via 6a4afc2165e4e6e692e71cb6795201c9df5afee2 (commit)
via 047ea7f6cfa2677865dcf441726dcc3e082608a9 (commit)
via b8e895092634bc661baf7fa043fffdba511f8256 (commit)
via 1bad76a6ab0ece059d8a587870f1da84510eccc5 (commit)
via 51f3cb54492ef02e4951afb15a9c40ba0cdff4ce (commit)
via 51c9278d000daee776c5e12456d8c4ea60ff5f21 (commit)
via 0dedcdb128646fdbf37be96f91076adda2f37c95 (commit)
via fc6a79af0d625ca18a2cdc3df91e86e8c1e02f9c (commit)
via b4471621912e7518088b106d829a8431a6c4ea97 (commit)
via c05dc7099e4ed686ad1af573e6795a751d020025 (commit)
via 1beaacf4d924392323bd08a0c7aed65e9324e092 (commit)
via 84d0d090f5452410a58d8f8503c61d81ec85f2f4 (commit)
via 35015af5525965fdb421a856ffb01fb1ab8a7ad4 (commit)
via 25b7595f3f1b158bd6278cea3c4dd0d6eeca8a2f (commit)
via cf8596b58bd57f4ebfff7d83d24294eaed38f7bf (commit)
via b77f5d1f891daf4c24024b44db6a7502e2728d2a (commit)
via 0337c552ff717ee890ae784451668ce3d789650f (commit)
via 63f318aa4405840d77c5e7afcf7c3437c5af241b (commit)
via 2e12dd60da03170462efad07173036f973813bd8 (commit)
via 5a2d958780a4a671cd8df9080d99ff95dd16772d (commit)
via 075e3787986676c7491f157931b6f7da1773db0a (commit)
via 7d2f07481169780071bf564223a20a219b550385 (commit)
via d5e189cf1573446503a4fafa3e909db60eb04623 (commit)
via 0b6937d0e075e1192c41891ae138532f2c733b47 (commit)
via 5371b694b6cc564c3f1899a935769dd024f38e56 (commit)
via 837002896937febe208c141912fc4f8c3beaa2ab (commit)
from b14866797ef758fd4e3a920b8ca9eab50053e120 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 8cb640551a6fc4a101bdc4c6469630d0cbeb7076
Author: Yoshitaka Aharen <aharen at jprs.co.jp>
Date: Wed Dec 14 18:29:28 2011 +0900
[510] whitespace style fix
commit ff81c97c2ad006e5e28f33e67d3a8bde1dccc523
Author: Yoshitaka Aharen <aharen at jprs.co.jp>
Date: Wed Dec 14 18:11:27 2011 +0900
[510] forgot to merge
commit afddaf4c5718c2a0cc31f2eee79c4e0cc625499f
Merge: b14866797ef758fd4e3a920b8ca9eab50053e120 2f4433fb15900481e4fb976a93692e68084c3925
Author: Yoshitaka Aharen <aharen at jprs.co.jp>
Date: Wed Dec 14 18:00:35 2011 +0900
Merge branch 'master' into trac510
Conflicts:
src/lib/statistics/counter_dict.cc
src/lib/statistics/counter_dict.h
src/lib/statistics/tests/counter_dict_unittest.cc
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 41 +-
configure.ac | 106 ++-
src/bin/auth/benchmarks/Makefile.am | 2 +-
src/bin/auth/statistics.cc | 4 +-
src/bin/auth/tests/Makefile.am | 2 +-
src/bin/bind10/bind10_messages.mes | 11 +
src/bin/bind10/bind10_src.py.in | 308 +++++-
src/bin/bind10/tests/bind10_test.py.in | 463 ++++++++
src/bin/dhcp6/.gitignore | 1 +
src/bin/dhcp6/dhcp6_srv.cc | 30 +-
src/bin/dhcp6/dhcp6_srv.h | 11 +-
src/bin/dhcp6/iface_mgr.cc | 437 ++++++---
src/bin/dhcp6/iface_mgr.h | 269 ++++-
src/bin/dhcp6/tests/Makefile.am | 4 +-
src/bin/dhcp6/tests/dhcp6_srv_unittest.cc | 23 +-
src/bin/dhcp6/tests/iface_mgr_unittest.cc | 265 ++++-
src/bin/resolver/tests/Makefile.am | 2 +-
src/lib/asiodns/io_fetch.cc | 4 -
src/lib/cryptolink/Makefile.am | 3 +-
src/lib/cryptolink/tests/Makefile.am | 4 +-
src/lib/datasrc/database.cc | 657 +++++++-----
src/lib/datasrc/database.h | 1122 +++++++++++---------
src/lib/datasrc/datasrc_messages.mes | 109 ++-
src/lib/datasrc/zone.h | 15 +-
src/lib/dhcp/pkt4.cc | 47 +-
src/lib/dhcp/pkt4.h | 24 +-
src/lib/dhcp/tests/option_unittest.cc | 2 +
src/lib/dhcp/tests/pkt4_unittest.cc | 13 +
src/lib/dns/tests/Makefile.am | 6 +-
src/lib/log/Makefile.am | 3 +-
src/lib/log/tests/Makefile.am | 10 +-
src/lib/nsas/nameserver_entry.cc | 24 +-
src/lib/nsas/nsas_messages.mes | 29 +-
src/lib/python/isc/bind10/Makefile.am | 3 +-
src/lib/python/isc/bind10/socket_cache.py | 302 ++++++
src/lib/python/isc/bind10/special_component.py | 1 +
src/lib/python/isc/bind10/tests/Makefile.am | 2 +-
.../python/isc/bind10/tests/sockcreator_test.py | 3 -
.../python/isc/bind10/tests/socket_cache_test.py | 396 +++++++
src/lib/python/isc/datasrc/Makefile.am | 1 +
src/lib/resolve/recursive_query.cc | 155 ++-
src/lib/resolve/resolve_messages.mes | 97 ++-
src/lib/resolve/response_classifier.h | 4 +-
src/lib/xfr/Makefile.am | 2 +
src/lib/{cryptolink => xfr}/tests/Makefile.am | 14 +-
.../tempdir.h.in => xfr/tests/client_test.cc} | 26 +-
src/lib/{acl => xfr}/tests/run_unittests.cc | 0
src/lib/xfr/xfrout_client.cc | 9 +-
tests/lettuce/README.tutorial | 6 +-
.../configurations/ixfr-out/testset1-config.db | 1 +
tests/lettuce/data/ixfr-out/zones.slite3 | Bin 0 -> 246784 bytes
tests/lettuce/features/ixfr_out_bind10.feature | 195 ++++
tests/lettuce/features/terrain/bind10_control.py | 14 +
tests/lettuce/features/terrain/querying.py | 2 +-
tests/lettuce/features/terrain/transfer.py | 138 +++
tools/reorder_message_file.py | 196 ++++
56 files changed, 4371 insertions(+), 1247 deletions(-)
create mode 100644 src/lib/python/isc/bind10/socket_cache.py
create mode 100644 src/lib/python/isc/bind10/tests/socket_cache_test.py
copy src/lib/{cryptolink => xfr}/tests/Makefile.am (54%)
copy src/lib/{log/tests/tempdir.h.in => xfr/tests/client_test.cc} (61%)
copy src/lib/{acl => xfr}/tests/run_unittests.cc (100%)
create mode 100644 tests/lettuce/configurations/ixfr-out/testset1-config.db
create mode 100644 tests/lettuce/data/ixfr-out/zones.slite3
create mode 100644 tests/lettuce/features/ixfr_out_bind10.feature
create mode 100644 tests/lettuce/features/terrain/transfer.py
create mode 100644 tools/reorder_message_file.py
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 434bd61..7a2a121 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,15 +1,44 @@
-339. [func] y-aharen
+344. [func] y-aharen
src/lib/statistics: Added statistics counter library for entire server
items and per zone items. Also, modified b10-auth to use it. It is
also intended to use in the other modules such as b10-resolver.
(Trac #510, git TBD)
+343. [func] jelte
+ Added IXFR-out system tests, based on the first two test sets of
+ http://bind10.isc.org/wiki/IxfrSystemTests.
+ (Trac #1314, git 1655bed624866a766311a01214597db01b4c7cec)
+
+342. [bug] stephen
+ In the resolver, a FORMERR received from an upstream nameserver
+ now results in a SERVFAIL being returned as a response to the original
+ query. Additional debug messages added to distinguish between
+ different errors in packets received from upstream nameservers.
+ (Trac #1383, git 9b2b249d23576c999a65d8c338e008cabe45f0c9)
+
+341. [func] tomek
+ libdhcp++: Support for handling both IPv4 and IPv6 added.
+ Also added support for binding IPv4 sockets.
+ (Trac #1238, git 86a4ce45115dab4d3978c36dd2dbe07edcac02ac)
+
+340. [build] jelte
+ Fixed several linker issues related to recent gcc versions, botan
+ and gtest.
+ (Trac #1442, git 91fb141bfb3aadfdf96f13e157a26636f6e9f9e3)
+
+339. [bug] jinmei
+ libxfr, used by b10-auth to share TCP sockets with b10-xfrout,
+ incorrectly propagated ASIO specific exceptions to the application
+ if the given file name was too long. This could lead to
+ unexpected shut down of b10-auth.
+ (Trac #1387, git a5e9d9176e9c60ef20c0f5ef59eeb6838ed47ab2)
+
338. [bug] jinmei
b10-xfrin didn't check SOA serials of SOA and IXFR responses,
which resulted in unnecessary transfer or unexpected IXFR
timeouts (these issues were not overlooked but deferred to be
fixed until #1278 was completed). Validation on responses to SOA
- queries were tighten, too.
+ queries were tightened, too.
(Trac #1299, git 6ff03bb9d631023175df99248e8cc0cda586c30a)
337. [func] tomek
@@ -51,12 +80,12 @@
potential problems and were fixed.
(Trac #1389, git 3fdce88046bdad392bd89ea656ec4ac3c858ca2f)
-333. [bug] dvv
- Solaris needs "-z now" to force non-lazy binding and prevent g++ static
- initialization code from deadlocking.
+333. [bug] dvv
+ Solaris needs "-z now" to force non-lazy binding and prevent
+ g++ static initialization code from deadlocking.
(Trac #1439, git c789138250b33b6b08262425a08a2a0469d90433)
-332. [bug] vorner
+332. [bug] vorner
C++ exceptions in the isc.dns.Rdata wrapper are now converted
to python ones instead of just aborting the interpretter.
(Trac #1407, git 5b64e839be2906b8950f5b1e42a3fadd72fca033)
diff --git a/configure.ac b/configure.ac
index a5f8e87..4c1d9cf 100644
--- a/configure.ac
+++ b/configure.ac
@@ -480,23 +480,33 @@ else
fi
fi
-BOTAN_LDFLAGS=`${BOTAN_CONFIG} --libs`
+BOTAN_LIBS=`${BOTAN_CONFIG} --libs`
BOTAN_INCLUDES=`${BOTAN_CONFIG} --cflags`
# We expect botan-config --libs to contain -L<path_to_libbotan>, but
# this is not always the case. As a heuristics workaround we add
-# -L`botan-config --prefix/lib` in this case. Same for BOTAN_INCLUDES
-# (but using include instead of lib) below.
+# -L`botan-config --prefix/lib` in this case (if not present already).
+# Same for BOTAN_INCLUDES (but using include instead of lib) below.
if [ $BOTAN_CONFIG --prefix >/dev/null 2>&1 ] ; then
- echo ${BOTAN_LDFLAGS} | grep -- -L > /dev/null || \
- BOTAN_LDFLAGS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LDFLAGS}"
+ echo ${BOTAN_LIBS} | grep -- -L > /dev/null || \
+ BOTAN_LIBS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LIBS}"
echo ${BOTAN_INCLUDES} | grep -- -I > /dev/null || \
BOTAN_INCLUDES="-I`${BOTAN_CONFIG} --prefix`/include ${BOTAN_INCLUDES}"
fi
+
+# botan-config script (and the way we call pkg-config) returns -L and -l
+# as one string, but we need them in separate values
+BOTAN_LDFLAGS=
+BOTAN_NEWLIBS=
+for flag in ${BOTAN_LIBS}; do
+ BOTAN_LDFLAGS="${BOTAN_LDFLAGS} `echo $flag | sed -ne '/^\(\-L\)/p'`"
+ BOTAN_LIBS="${BOTAN_LIBS} `echo $flag | sed -ne '/^\(\-l\)/p'`"
+done
+
# See python_rpath for some info on why we do this
if test $rpath_available = yes; then
BOTAN_RPATH=
- for flag in ${BOTAN_LDFLAGS}; do
+ for flag in ${BOTAN_LIBS}; do
BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne 's/^\(\-L\)/-R/p'`"
done
AC_SUBST(BOTAN_RPATH)
@@ -512,13 +522,13 @@ AC_SUBST(BOTAN_RPATH)
fi
AC_SUBST(BOTAN_LDFLAGS)
+AC_SUBST(BOTAN_LIBS)
AC_SUBST(BOTAN_INCLUDES)
CPPFLAGS_SAVED=$CPPFLAGS
CPPFLAGS="$BOTAN_INCLUDES $CPPFLAGS"
-LDFLAGS_SAVED="$LDFLAGS"
-LDFLAGS="$BOTAN_LDFLAGS $LDFLAGS"
-
+LIBS_SAVED="$LIBS"
+LIBS="$LIBS $BOTAN_LIBS"
AC_CHECK_HEADERS([botan/botan.h],,AC_MSG_ERROR([Missing required header files.]))
AC_LINK_IFELSE(
[AC_LANG_PROGRAM([#include <botan/botan.h>
@@ -533,7 +543,7 @@ AC_LINK_IFELSE(
AC_MSG_ERROR([Needs Botan library 1.8 or higher])]
)
CPPFLAGS=$CPPFLAGS_SAVED
-LDFLAGS=$LDFLAGS_SAVED
+LIBS=$LIBS_SAVED
# Check for log4cplus
log4cplus_path="yes"
@@ -545,7 +555,7 @@ if test "${log4cplus_path}" = "no" ; then
AC_MSG_ERROR([Need log4cplus])
elif test "${log4cplus_path}" != "yes" ; then
LOG4CPLUS_INCLUDES="-I${log4cplus_path}/include"
- LOG4CPLUS_LDFLAGS="-L${log4cplus_path}/lib"
+ LOG4CPLUS_LIBS="-L${log4cplus_path}/lib"
else
# If not specified, try some common paths.
log4cplusdirs="/usr/local /usr/pkg /opt /opt/local"
@@ -553,21 +563,21 @@ else
do
if test -f $d/include/log4cplus/logger.h; then
LOG4CPLUS_INCLUDES="-I$d/include"
- LOG4CPLUS_LDFLAGS="-L$d/lib"
+ LOG4CPLUS_LIBS="-L$d/lib"
break
fi
done
fi
-LOG4CPLUS_LDFLAGS="$LOG4CPLUS_LDFLAGS -llog4cplus $MULTITHREADING_FLAG"
+LOG4CPLUS_LIBS="$LOG4CPLUS_LIBS -llog4cplus $MULTITHREADING_FLAG"
-AC_SUBST(LOG4CPLUS_LDFLAGS)
+AC_SUBST(LOG4CPLUS_LIBS)
AC_SUBST(LOG4CPLUS_INCLUDES)
CPPFLAGS_SAVED=$CPPFLAGS
CPPFLAGS="$LOG4CPLUS_INCLUDES $CPPFLAGS"
-LDFLAGS_SAVED="$LDFLAGS"
-LDFLAGS="$LOG4CPLUS_LDFLAGS $LDFLAGS"
+LIBS_SAVED="$LIBS"
+LIBS="$LOG4CPLUS_LIBS $LIBS"
AC_CHECK_HEADERS([log4cplus/logger.h],,AC_MSG_ERROR([Missing required header files.]))
AC_LINK_IFELSE(
@@ -582,7 +592,7 @@ AC_LINK_IFELSE(
)
CPPFLAGS=$CPPFLAGS_SAVED
-LDFLAGS=$LDFLAGS_SAVED
+LIBS=$LIBS_SAVED
#
# Configure Boost header path
@@ -675,6 +685,13 @@ else
AM_CONDITIONAL(NEED_LIBBOOST_THREAD, test "${use_boost_threads}" = "yes")
fi
+# I can't get some of the #include <asio.hpp> right without this
+# TODO: find the real cause of asio/boost wanting pthreads
+# (this currently only occurs for src/lib/cc/session_unittests)
+PTHREAD_LDFLAGS=
+AC_CHECK_LIB(pthread, pthread_create,[ PTHREAD_LDFLAGS=-lpthread ], [])
+AC_SUBST(PTHREAD_LDFLAGS)
+AC_SUBST(MULTITHREADING_FLAG)
#
# Check availability of gtest, which will be used for unit tests.
@@ -711,6 +728,48 @@ then
GTEST_LDFLAGS="-L$dir/lib"
GTEST_LDADD="-lgtest"
GTEST_FOUND="true"
+ # There is no gtest-config script on this
+ # system, which is supposed to inform us
+ # whether we need pthreads as well (a
+ # gtest compile-time option). So we still
+ # need to test that manually.
+ CPPFLAGS_SAVED="$CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $GTEST_INCLUDES"
+ LDFLAGS_SAVED="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $GTEST_LDFLAGS"
+ LIBS_SAVED=$LIBS
+ LIBS="$LIBS $GTEST_LDADD"
+ AC_MSG_CHECKING([Checking whether gtest tests need pthreads])
+ # First try to compile without pthreads
+ AC_TRY_LINK([
+ #include <gtest/gtest.h>
+ ],[
+ int i = 0;
+ char* c = NULL;
+ ::testing::InitGoogleTest(&i, &c);
+ return (0);
+ ],
+ [ AC_MSG_RESULT(no) ],
+ [
+ LIBS="$SAVED_LIBS $GTEST_LDADD $PTHREAD_LDFLAGS"
+ # Now try to compile with pthreads
+ AC_TRY_LINK([
+ #include <gtest/gtest.h>
+ ],[
+ int i = 0;
+ char* c = NULL;
+ ::testing::InitGoogleTest(&i, &c);
+ return (0);
+ ],
+ [ AC_MSG_RESULT(yes)
+ GTEST_LDADD="$GTEST_LDADD $PTHREAD_LDFLAGS"
+ ],
+ # Apparently we can't compile it at all
+ [ AC_MSG_ERROR(unable to compile with gtest) ])
+ ])
+ CPPFLAGS=$CPPFLAGS_SAVED
+ LDFLAGS=$LDFLAGS_SAVED
+ LIBS=$LIBS_SAVED
break
fi
done
@@ -737,15 +796,6 @@ if test "x$HAVE_PKG_CONFIG" = "xno" ; then
fi
PKG_CHECK_MODULES(SQLITE, sqlite3 >= 3.3.9, enable_features="$enable_features SQLite3")
-# I can't get some of the #include <asio.hpp> right without this
-# TODO: find the real cause of asio/boost wanting pthreads
-# (this currently only occurs for src/lib/cc/session_unittests)
-PTHREAD_LDFLAGS=
-AC_CHECK_LIB(pthread, pthread_create,[ PTHREAD_LDFLAGS=-lpthread ], [])
-AC_SUBST(PTHREAD_LDFLAGS)
-
-AC_SUBST(MULTITHREADING_FLAG)
-
#
# ASIO: we extensively use it as the C++ event management module.
#
@@ -912,6 +962,7 @@ AC_CONFIG_FILES([Makefile
src/lib/datasrc/tests/Makefile
src/lib/datasrc/tests/testdata/Makefile
src/lib/xfr/Makefile
+ src/lib/xfr/tests/Makefile
src/lib/log/Makefile
src/lib/log/compiler/Makefile
src/lib/log/tests/Makefile
@@ -1089,8 +1140,9 @@ dnl includes too
Boost: ${BOOST_INCLUDES}
Botan: ${BOTAN_INCLUDES}
${BOTAN_LDFLAGS}
+ ${BOTAN_LIBS}
Log4cplus: ${LOG4CPLUS_INCLUDES}
- ${LOG4CPLUS_LDFLAGS}
+ ${LOG4CPLUS_LIBS}
SQLite: $SQLITE_CFLAGS
$SQLITE_LIBS
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index fb348bb..da6a5c8 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -32,9 +32,9 @@ query_bench_LDADD += $(top_builddir)/src/lib/cc/libcc.la
query_bench_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
query_bench_LDADD += $(top_builddir)/src/lib/log/liblog.la
query_bench_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
-query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
query_bench_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
query_bench_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
query_bench_LDADD += $(top_builddir)/src/lib/statistics/libstatistics.la
query_bench_LDADD += $(SQLITE_LIBS)
diff --git a/src/bin/auth/statistics.cc b/src/bin/auth/statistics.cc
index 0e49fd5..7397a50 100644
--- a/src/bin/auth/statistics.cc
+++ b/src/bin/auth/statistics.cc
@@ -30,10 +30,10 @@ using namespace isc::auth;
using namespace isc::statistics;
// TODO: We need a namespace ("auth_server"?) to hold
-// AuthSrv and AuthCounters.
+// AuthSrv and AuthCounters.
// TODO: Make use of wrappers like isc::dns::Opcode
-// for counter item type.
+// for counter item type.
class AuthCountersImpl : boost::noncopyable {
public:
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index e9527a4..b5b96d7 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -64,8 +64,8 @@ run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
-run_unittests_LDADD += $(top_builddir)/src/lib/statistics/libstatistics.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/statistics/libstatistics.la
endif
noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
index d850e47..79635fd 100644
--- a/src/bin/bind10/bind10_messages.mes
+++ b/src/bin/bind10/bind10_messages.mes
@@ -99,6 +99,12 @@ The boss module is sending a kill signal to process with the given name,
as part of the process of killing all started processes during a failed
startup, as described for BIND10_KILLING_ALL_PROCESSES
+% BIND10_LOST_SOCKET_CONSUMER consumer %1 of sockets disconnected, considering all its sockets closed
+A connection from one of the applications which requested a socket was
+closed. This means the application has terminated, so all the sockets it was
+using are now closed and bind10 process can release them as well, unless the
+same sockets are used by yet another application.
+
% BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
There already appears to be a message bus daemon running. Either an
old process was not shut down correctly, and needs to be killed, or
@@ -110,6 +116,11 @@ While listening on the message bus channel for messages, it suddenly
disappeared. The msgq daemon may have died. This might lead to an
inconsistent state of the system, and BIND 10 will now shut down.
+% BIND10_NO_SOCKET couldn't send a socket for token %1 because of error: %2
+An error occurred when the bind10 process was asked to send a socket file
+descriptor. The error is mentioned, most common reason is that the request
+is invalid and may not come from bind10 process at all.
+
% BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
This indicates a process started previously terminated. The process id
and component owning the process are indicated, as well as the exit code.
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index f9816fb..00858d8 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -72,6 +72,9 @@ import isc.log
from isc.log_messages.bind10_messages import *
import isc.bind10.component
import isc.bind10.special_component
+import isc.bind10.socket_cache
+import libutil_io_python
+import tempfile
isc.log.init("b10-boss")
logger = isc.log.Logger("boss")
@@ -81,6 +84,10 @@ logger = isc.log.Logger("boss")
DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
+# Messages sent over the unix domain socket to indicate if it is followed by a real socket
+CREATOR_SOCKET_OK = "1\n"
+CREATOR_SOCKET_UNAVAILABLE = "0\n"
+
# Assign this process some longer name
isc.util.process.rename(sys.argv[0])
@@ -241,6 +248,12 @@ class BoB:
# If -v was set, enable full debug logging.
if self.verbose:
logger.set_severity("DEBUG", 99)
+ # This is set in init_socket_srv
+ self._socket_path = None
+ self._socket_cache = None
+ self._tmpdir = None
+ self._srv_socket = None
+ self._unix_sockets = {}
def __propagate_component_config(self, config):
comps = dict(config)
@@ -315,6 +328,18 @@ class BoB:
elif command == "show_processes":
answer = isc.config.ccsession. \
create_answer(0, self.get_processes())
+ elif command == "get_socket":
+ answer = self._get_socket(args)
+ elif command == "drop_socket":
+ if "token" not in args:
+ answer = isc.config.ccsession. \
+ create_answer(1, "Missing token parameter")
+ else:
+ try:
+ self._socket_cache.drop_socket(args["token"])
+ answer = isc.config.ccsession.create_answer(0)
+ except Exception as e:
+ answer = isc.config.ccsession.create_answer(1, str(e))
else:
answer = isc.config.ccsession.create_answer(1,
"Unknown command")
@@ -769,6 +794,209 @@ class BoB:
return next_restart_time
+ def _get_socket(self, args):
+ """
+ Implementation of the get_socket CC command. It asks the cache
+ to provide the token and sends the information back.
+ """
+ try:
+ try:
+ addr = isc.net.parse.addr_parse(args['address'])
+ port = isc.net.parse.port_parse(args['port'])
+ protocol = args['protocol']
+ if protocol not in ['UDP', 'TCP']:
+ raise ValueError("Protocol must be either UDP or TCP")
+ share_mode = args['share_mode']
+ if share_mode not in ['ANY', 'SAMEAPP', 'NO']:
+ raise ValueError("Share mode must be one of ANY, SAMEAPP" +
+ " or NO")
+ share_name = args['share_name']
+ except KeyError as ke:
+ return \
+ isc.config.ccsession.create_answer(1,
+ "Missing parameter " +
+ str(ke))
+
+ # FIXME: This call contains blocking IPC. It is expected to be
+ # short, but if it turns out to be problem, we'll need to do
+ # something about it.
+ token = self._socket_cache.get_token(protocol, addr, port,
+ share_mode, share_name)
+ return isc.config.ccsession.create_answer(0, {
+ 'token': token,
+ 'path': self._socket_path
+ })
+ except Exception as e:
+ return isc.config.ccsession.create_answer(1, str(e))
+
+ def socket_request_handler(self, token, unix_socket):
+ """
+ This function handles a token that comes over a unix_domain socket.
+ The function looks into the _socket_cache and sends the socket
+ identified by the token back over the unix_socket.
+ """
+ try:
+ fd = self._socket_cache.get_socket(token, unix_socket.fileno())
+ # FIXME: These two calls are blocking in their nature. An OS-level
+ # buffer is likely to be large enough to hold all these data, but
+ # if it wasn't and the remote application got stuck, we would have
+ # a problem. If there appear such problems, we should do something
+ # about it.
+ unix_socket.sendall(CREATOR_SOCKET_OK)
+ libutil_io_python.send_fd(unix_socket.fileno(), fd)
+ except Exception as e:
+ logger.info(BIND10_NO_SOCKET, token, e)
+ unix_socket.sendall(CREATOR_SOCKET_UNAVAILABLE)
+
+ def socket_consumer_dead(self, unix_socket):
+ """
+ This function handles when a unix_socket closes. This means all
+ sockets sent to it are to be considered closed. This function signals
+ so to the _socket_cache.
+ """
+ logger.info(BIND10_LOST_SOCKET_CONSUMER, unix_socket.fileno())
+ try:
+ self._socket_cache.drop_application(unix_socket.fileno())
+ except ValueError:
+ # This means the application holds no sockets. It's harmless, as it
+ # can happen in real life - for example, it requests a socket, but
+ # get_socket doesn't find it, so the application dies. It should be
+ # rare, though.
+ pass
+
+ def set_creator(self, creator):
+ """
+ Registeres a socket creator into the boss. The socket creator is not
+ used directly, but through a cache. The cache is created in this
+ method.
+
+ If called more than once, it raises a ValueError.
+ """
+ if self._socket_cache is not None:
+ raise ValueError("A creator was inserted previously")
+ self._socket_cache = isc.bind10.socket_cache.Cache(creator)
+
+ def init_socket_srv(self):
+ """
+ Creates and listens on a unix-domain socket to be able to send out
+ the sockets.
+
+ This method should be called after switching user, or the switched
+ applications won't be able to access the socket.
+ """
+ self._srv_socket = socket.socket(socket.AF_UNIX)
+ # We create a temporary directory somewhere safe and unique, to avoid
+ # the need to find the place ourself or bother users. Also, this
+ # secures the socket on some platforms, as it creates a private
+ # directory.
+ self._tmpdir = tempfile.mkdtemp()
+ # Get the name
+ self._socket_path = os.path.join(self._tmpdir, "sockcreator")
+ # And bind the socket to the name
+ self._srv_socket.bind(self._socket_path)
+ self._srv_socket.listen(5)
+
+ def remove_socket_srv(self):
+ """
+ Closes and removes the listening socket and the directory where it
+ lives, as we created both.
+
+ It does nothing if the _srv_socket is not set (eg. it was not yet
+ initialized).
+ """
+ if self._srv_socket is not None:
+ self._srv_socket.close()
+ os.remove(self._socket_path)
+ os.rmdir(self._tmpdir)
+
+ def _srv_accept(self):
+ """
+ Accept a socket from the unix domain socket server and put it to the
+ others we care about.
+ """
+ socket = self._srv_socket.accept()
+ self._unix_sockets[socket.fileno()] = (socket, b'')
+
+ def _socket_data(self, socket_fileno):
+ """
+ This is called when a socket identified by the socket_fileno needs
+ attention. We try to read data from there. If it is closed, we remove
+ it.
+ """
+ (sock, previous) = self._unix_sockets[socket_fileno]
+ while True:
+ try:
+ data = sock.recv(1, socket.MSG_DONTWAIT)
+ except socket.error as se:
+ # These two might be different on some systems
+ if se.errno == errno.EAGAIN or se.errno == errno.EWOULDBLOCK:
+ # No more data now. Oh, well, just store what we have.
+ self._unix_sockets[socket_fileno] = (sock, previous)
+ return
+ else:
+ data = b'' # Pretend it got closed
+ if len(data) == 0: # The socket got to it's end
+ del self._unix_sockets[socket_fileno]
+ self.socket_consumer_dead(sock)
+ sock.close()
+ return
+ else:
+ if data == b"\n":
+ # Handle this token and clear it
+ self.socket_request_handler(previous, sock)
+ previous = b''
+ else:
+ previous += data
+
+ def run(self, wakeup_fd):
+ """
+ The main loop, waiting for sockets, commands and dead processes.
+ Runs as long as the runnable is true.
+
+ The wakeup_fd descriptor is the read end of pipe where CHLD signal
+ handler writes.
+ """
+ ccs_fd = self.ccs.get_socket().fileno()
+ while self.runnable:
+ # clean up any processes that exited
+ self.reap_children()
+ next_restart = self.restart_processes()
+ if next_restart is None:
+ wait_time = None
+ else:
+ wait_time = max(next_restart - time.time(), 0)
+
+ # select() can raise EINTR when a signal arrives,
+ # even if they are resumable, so we have to catch
+ # the exception
+ try:
+ (rlist, wlist, xlist) = \
+ select.select([wakeup_fd, ccs_fd,
+ self._srv_socket.fileno()] +
+ list(self._unix_sockets.keys()), [], [],
+ wait_time)
+ except select.error as err:
+ if err.args[0] == errno.EINTR:
+ (rlist, wlist, xlist) = ([], [], [])
+ else:
+ logger.fatal(BIND10_SELECT_ERROR, err)
+ break
+
+ for fd in rlist + xlist:
+ if fd == ccs_fd:
+ try:
+ self.ccs.check_command()
+ except isc.cc.session.ProtocolError:
+ logger.fatal(BIND10_MSGQ_DISAPPEARED)
+ self.runnable = False
+ break
+ elif fd == wakeup_fd:
+ os.read(wakeup_fd, 32)
+ elif fd == self._srv_socket.fileno():
+ self._srv_accept()
+ elif fd in self._unix_sockets:
+ self._socket_data(fd)
+
# global variables, needed for signal handlers
options = None
boss_of_bind = None
@@ -931,60 +1159,32 @@ def main():
# Block SIGPIPE, as we don't want it to end this process
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
- # Go bob!
- boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
- options.config_file, options.nocache, options.verbose,
- setuid, username, options.cmdctl_port,
- options.wait_time)
- startup_result = boss_of_bind.startup()
- if startup_result:
- logger.fatal(BIND10_STARTUP_ERROR, startup_result)
- sys.exit(1)
- logger.info(BIND10_STARTUP_COMPLETE)
- dump_pid(options.pid_file)
-
- # In our main loop, we check for dead processes or messages
- # on the c-channel.
- wakeup_fd = wakeup_pipe[0]
- ccs_fd = boss_of_bind.ccs.get_socket().fileno()
- while boss_of_bind.runnable:
- # clean up any processes that exited
- boss_of_bind.reap_children()
- next_restart = boss_of_bind.restart_processes()
- if next_restart is None:
- wait_time = None
- else:
- wait_time = max(next_restart - time.time(), 0)
-
- # select() can raise EINTR when a signal arrives,
- # even if they are resumable, so we have to catch
- # the exception
- try:
- (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [],
- wait_time)
- except select.error as err:
- if err.args[0] == errno.EINTR:
- (rlist, wlist, xlist) = ([], [], [])
- else:
- logger.fatal(BIND10_SELECT_ERROR, err)
- break
-
- for fd in rlist + xlist:
- if fd == ccs_fd:
- try:
- boss_of_bind.ccs.check_command()
- except isc.cc.session.ProtocolError:
- logger.fatal(BIND10_MSGQ_DISAPPEARED)
- self.runnable = False
- break
- elif fd == wakeup_fd:
- os.read(wakeup_fd, 32)
-
- # shutdown
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.shutdown()
- unlink_pid_file(options.pid_file)
- sys.exit(0)
+ try:
+ # Go bob!
+ boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
+ options.config_file, options.nocache,
+ options.verbose, setuid, username,
+ options.cmdctl_port, options.wait_time)
+ startup_result = boss_of_bind.startup()
+ if startup_result:
+ logger.fatal(BIND10_STARTUP_ERROR, startup_result)
+ sys.exit(1)
+ boss_of_bind.init_socket_srv()
+ logger.info(BIND10_STARTUP_COMPLETE)
+ dump_pid(options.pid_file)
+
+ # Let it run
+ boss_of_bind.run(wakeup_pipe[0])
+
+ # shutdown
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ boss_of_bind.shutdown()
+ finally:
+ # Clean up the filesystem
+ unlink_pid_file(options.pid_file)
+ if boss_of_bind is not None:
+ boss_of_bind.remove_socket_srv()
+ sys.exit(boss_of_bind.exitcode)
if __name__ == "__main__":
main()
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index c917d33..f9537fd 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -13,7 +13,11 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# Most of the time, we omit the "bind10_src" for brevity. Sometimes,
+# we want to be explicit about what we do, like when hijacking a library
+# call used by the bind10_src.
from bind10_src import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
+import bind10_src
# XXX: environment tests are currently disabled, due to the preprocessor
# setup that we have now complicating the environment
@@ -28,6 +32,8 @@ from isc.net.addr import IPAddr
import time
import isc
import isc.log
+import isc.bind10.socket_cache
+import errno
from isc.testutils.parse_args import TestOptParser, OptsError
@@ -97,6 +103,232 @@ class TestProcessInfo(unittest.TestCase):
self.assertTrue(type(pi.pid) is int)
self.assertNotEqual(pi.pid, old_pid)
+class TestCacheCommands(unittest.TestCase):
+ """
+ Test methods of boss related to the socket cache and socket handling.
+ """
+ def setUp(self):
+ """
+ Prepare the boss for some tests.
+
+ Also prepare some variables we need.
+ """
+ self.__boss = BoB()
+ # Fake the cache here so we can pretend it is us and hijack the
+ # calls to its methods.
+ self.__boss._socket_cache = self
+ self.__boss._socket_path = '/socket/path'
+ self.__raise_exception = None
+ self.__socket_args = {
+ "port": 53,
+ "address": "::",
+ "protocol": "UDP",
+ "share_mode": "ANY",
+ "share_name": "app"
+ }
+ # What was and wasn't called.
+ self.__drop_app_called = None
+ self.__get_socket_called = None
+ self.__send_fd_called = None
+ self.__get_token_called = None
+ self.__drop_socket_called = None
+ bind10_src.libutil_io_python.send_fd = self.__send_fd
+
+ def __send_fd(self, to, socket):
+ """
+ A function to hook the send_fd in the bind10_src.
+ """
+ self.__send_fd_called = (to, socket)
+
+ class FalseSocket:
+ """
+ A socket where we can fake methods we need instead of having a real
+ socket.
+ """
+ def __init__(self):
+ self.send = ""
+ def fileno(self):
+ """
+ The file number. Used for identifying the remote application.
+ """
+ return 42
+
+ def sendall(self, data):
+ """
+ Adds data to the self.send.
+ """
+ self.send += data
+
+ def drop_application(self, application):
+ """
+ Part of pretending to be the cache. Logs the parameter to
+ self.__drop_app_called.
+
+ In the case self.__raise_exception is set, the exception there
+ is raised instead.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__drop_app_called = application
+
+ def test_consumer_dead(self):
+ """
+ Test that it calls the drop_application method of the cache.
+ """
+ self.__boss.socket_consumer_dead(self.FalseSocket())
+ self.assertEqual(42, self.__drop_app_called)
+
+ def test_consumer_dead_invalid(self):
+ """
+ Test that it doesn't crash in case the application is not known to
+ the cache, the boss doesn't crash, as this actually can happen in
+ practice.
+ """
+ self.__raise_exception = ValueError("This application is unknown")
+ # This doesn't crash
+ self.__boss.socket_consumer_dead(self.FalseSocket())
+
+ def get_socket(self, token, application):
+ """
+ Part of pretending to be the cache. If there's anything in
+ __raise_exception, it is raised. Otherwise, the call is logged
+ into __get_socket_called and a number is returned.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__get_socket_called = (token, application)
+ return 13
+
+ def test_request_handler(self):
+ """
+ Test that a request for socket is forwarded and the socket is sent
+ back, if it returns a socket.
+ """
+ socket = self.FalseSocket()
+ # An exception from the cache
+ self.__raise_exception = ValueError("Test value error")
+ self.__boss.socket_request_handler("token", socket)
+ # It was called, but it threw, so it is not noted here
+ self.assertIsNone(self.__get_socket_called)
+ self.assertEqual("0\n", socket.send)
+ # It should not have sent any socket.
+ self.assertIsNone(self.__send_fd_called)
+ # Now prepare a valid scenario
+ self.__raise_exception = None
+ socket.send = ""
+ self.__boss.socket_request_handler("token", socket)
+ self.assertEqual("1\n", socket.send)
+ self.assertEqual((42, 13), self.__send_fd_called)
+ self.assertEqual(("token", 42), self.__get_socket_called)
+
+ def get_token(self, protocol, address, port, share_mode, share_name):
+ """
+ Part of pretending to be the cache. If there's anything in
+ __raise_exception, it is raised. Otherwise, the parameters are
+ logged into __get_token_called and a token is returned.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__get_token_called = (protocol, address, port, share_mode,
+ share_name)
+ return "token"
+
+ def test_get_socket_ok(self):
+ """
+ Test the successful scenario of getting a socket.
+ """
+ result = self.__boss._get_socket(self.__socket_args)
+ [code, answer] = result['result']
+ self.assertEqual(0, code)
+ self.assertEqual({
+ 'token': 'token',
+ 'path': '/socket/path'
+ }, answer)
+ addr = self.__get_token_called[1]
+ self.assertTrue(isinstance(addr, IPAddr))
+ self.assertEqual("::", str(addr))
+ self.assertEqual(("UDP", addr, 53, "ANY", "app"),
+ self.__get_token_called)
+
+ def test_get_socket_error(self):
+ """
+ Test that bad inputs are handled correctly, etc.
+ """
+ def check_code(code, args):
+ """
+ Pass the args there and check if it returns success or not.
+
+ The rest is not tested, as it is already checked in the
+ test_get_socket_ok.
+ """
+ [rcode, ranswer] = self.__boss._get_socket(args)['result']
+ self.assertEqual(code, rcode)
+ if code == 1:
+ # This should be an error message. The exact formatting
+ # is unknown, but we check it is string at least
+ self.assertTrue(isinstance(ranswer, str))
+ def mod_args(name, value):
+ """
+ Override a parameter in the args.
+ """
+ result = dict(self.__socket_args)
+ result[name] = value
+ return result
+
+ # Port too large
+ check_code(1, mod_args('port', 65536))
+ # Not numeric address
+ check_code(1, mod_args('address', 'example.org.'))
+ # Some bad values of enum-like params
+ check_code(1, mod_args('protocol', 'BAD PROTO'))
+ check_code(1, mod_args('share_mode', 'BAD SHARE'))
+ # Check missing parameters
+ for param in self.__socket_args.keys():
+ args = dict(self.__socket_args)
+ del args[param]
+ check_code(1, args)
+ # These are OK values for the enum-like parameters
+ # The ones from test_get_socket_ok are not tested here
+ check_code(0, mod_args('protocol', 'TCP'))
+ check_code(0, mod_args('share_mode', 'SAMEAPP'))
+ check_code(0, mod_args('share_mode', 'NO'))
+ # If an exception is raised from within the cache, it is converted
+ # to an error, not propagated
+ self.__raise_exception = Exception("Test exception")
+ check_code(1, self.__socket_args)
+
+ def drop_socket(self, token):
+ """
+ Part of pretending to be the cache. If there's anything in
+ __raise_exception, it is raised. Otherwise, the parameter is stored
+ in __drop_socket_called.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__drop_socket_called = token
+
+ def test_drop_socket(self):
+ """
+ Check the drop_socket command. It should directly call the method
+ on the cache. Exceptions should be translated to error messages.
+ """
+ # This should be OK and just propagated to the call.
+ self.assertEqual({"result": [0]},
+ self.__boss.command_handler("drop_socket",
+ {"token": "token"}))
+ self.assertEqual("token", self.__drop_socket_called)
+ self.__drop_socket_called = None
+ # Missing parameter
+ self.assertEqual({"result": [1, "Missing token parameter"]},
+ self.__boss.command_handler("drop_socket", {}))
+ self.assertIsNone(self.__drop_socket_called)
+ # An exception is raised from within the cache
+ self.__raise_exception = ValueError("Test error")
+ self.assertEqual({"result": [1, "Test error"]},
+ self.__boss.command_handler("drop_socket",
+ {"token": "token"}))
+
+
class TestBoB(unittest.TestCase):
def test_init(self):
bob = BoB()
@@ -109,6 +341,22 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.uid, None)
self.assertEqual(bob.username, None)
self.assertEqual(bob.nocache, False)
+ self.assertIsNone(bob._socket_cache)
+
+ def test_set_creator(self):
+ """
+ Test the call to set_creator. First time, the cache is created
+ with the passed creator. The next time, it throws an exception.
+ """
+ bob = BoB()
+ # The cache doesn't use it at start, so just create an empty class
+ class Creator: pass
+ creator = Creator()
+ bob.set_creator(creator)
+ self.assertTrue(isinstance(bob._socket_cache,
+ isc.bind10.socket_cache.Cache))
+ self.assertEqual(creator, bob._socket_cache._creator)
+ self.assertRaises(ValueError, bob.set_creator, creator)
def test_init_alternate_socket(self):
bob = BoB("alt_socket_file")
@@ -183,6 +431,26 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.command_handler("__UNKNOWN__", None),
isc.config.ccsession.create_answer(1, "Unknown command"))
+ # Fake the get_token of cache and test the command works
+ bob._socket_path = '/socket/path'
+ class cache:
+ def get_token(self, protocol, addr, port, share_mode, share_name):
+ return str(addr) + ':' + str(port)
+ bob._socket_cache = cache()
+ args = {
+ "port": 53,
+ "address": "0.0.0.0",
+ "protocol": "UDP",
+ "share_mode": "ANY",
+ "share_name": "app"
+ }
+ # at all and this is the easiest way to check.
+ self.assertEqual({'result': [0, {'token': '0.0.0.0:53',
+ 'path': '/socket/path'}]},
+ bob.command_handler("get_socket", args))
+ # The drop_socket is not tested here, but in TestCacheCommands.
+ # It needs the cache mocks to be in place and they are there.
+
# Class for testing the BoB without actually starting processes.
# This is used for testing the start/stop components routines and
# the BoB commands.
@@ -931,6 +1199,201 @@ class TestBossComponents(unittest.TestCase):
bob.start_all_components()
self.__check_extended(self.__param)
+class SocketSrvTest(unittest.TestCase):
+ """
+ This tests some methods of boss related to the unix domain sockets used
+ to transfer other sockets to applications.
+ """
+ def setUp(self):
+ """
+ Create the boss to test, testdata and backup some functions.
+ """
+ self.__boss = BoB()
+ self.__select_backup = bind10_src.select.select
+ self.__select_called = None
+ self.__socket_data_called = None
+ self.__consumer_dead_called = None
+ self.__socket_request_handler_called = None
+
+ def tearDown(self):
+ """
+ Restore functions.
+ """
+ bind10_src.select.select = self.__select_backup
+
+ class __FalseSocket:
+ """
+ A mock socket for the select and accept and stuff like that.
+ """
+ def __init__(self, owner, fileno=42):
+ self.__owner = owner
+ self.__fileno = fileno
+ self.data = None
+ self.closed = False
+
+ def fileno(self):
+ return self.__fileno
+
+ def accept(self):
+ return self.__class__(self.__owner, 13)
+
+ def recv(self, bufsize, flags=0):
+ self.__owner.assertEqual(1, bufsize)
+ self.__owner.assertEqual(socket.MSG_DONTWAIT, flags)
+ if isinstance(self.data, socket.error):
+ raise self.data
+ elif self.data is not None:
+ if len(self.data):
+ result = self.data[0:1]
+ self.data = self.data[1:]
+ return result
+ else:
+ raise socket.error(errno.EAGAIN, "Would block")
+ else:
+ return b''
+
+ def close(self):
+ self.closed = True
+
+ class __CCS:
+ """
+ A mock CCS, just to provide the socket file number.
+ """
+ class __Socket:
+ def fileno(self):
+ return 1
+ def get_socket(self):
+ return self.__Socket()
+
+ def __select_accept(self, r, w, x, t):
+ self.__select_called = (r, w, x, t)
+ return ([42], [], [])
+
+ def __select_data(self, r, w, x, t):
+ self.__select_called = (r, w, x, t)
+ return ([13], [], [])
+
+ def __accept(self):
+ """
+ Hijact the accept method of the boss.
+
+ Notes down it was called and stops the boss.
+ """
+ self.__accept_called = True
+ self.__boss.runnable = False
+
+ def test_srv_accept_called(self):
+ """
+ Test that the _srv_accept method of boss is called when the listening
+ socket is readable.
+ """
+ self.__boss.runnable = True
+ self.__boss._srv_socket = self.__FalseSocket(self)
+ self.__boss._srv_accept = self.__accept
+ self.__boss.ccs = self.__CCS()
+ bind10_src.select.select = self.__select_accept
+ self.__boss.run(2)
+ # It called the accept
+ self.assertTrue(self.__accept_called)
+ # And the select had the right parameters
+ self.assertEqual(([2, 1, 42], [], [], None), self.__select_called)
+
+ def test_srv_accept(self):
+ """
+ Test how the _srv_accept method works.
+ """
+ self.__boss._srv_socket = self.__FalseSocket(self)
+ self.__boss._srv_accept()
+ # After we accepted, a new socket is added there
+ socket = self.__boss._unix_sockets[13][0]
+ # The socket is properly stored there
+ self.assertTrue(isinstance(socket, self.__FalseSocket))
+ # And the buffer (yet empty) is there
+ self.assertEqual({13: (socket, b'')}, self.__boss._unix_sockets)
+
+ def __socket_data(self, socket):
+ self.__boss.runnable = False
+ self.__socket_data_called = socket
+
+ def test_socket_data(self):
+ """
+ Test that a socket that wants attention gets it.
+ """
+ self.__boss._srv_socket = self.__FalseSocket(self)
+ self.__boss._socket_data = self.__socket_data
+ self.__boss.ccs = self.__CCS()
+ self.__boss._unix_sockets = {13: (self.__FalseSocket(self, 13), b'')}
+ self.__boss.runnable = True
+ bind10_src.select.select = self.__select_data
+ self.__boss.run(2)
+ self.assertEqual(13, self.__socket_data_called)
+ self.assertEqual(([2, 1, 42, 13], [], [], None), self.__select_called)
+
+ def __prepare_data(self, data):
+ socket = self.__FalseSocket(self, 13)
+ self.__boss._unix_sockets = {13: (socket, b'')}
+ socket.data = data
+ self.__boss.socket_consumer_dead = self.__consumer_dead
+ self.__boss.socket_request_handler = self.__socket_request_handler
+ return socket
+
+ def __consumer_dead(self, socket):
+ self.__consumer_dead_called = socket
+
+ def __socket_request_handler(self, token, socket):
+ self.__socket_request_handler_called = (token, socket)
+
+ def test_socket_closed(self):
+ """
+ Test that a socket is removed and the socket_consumer_dead is called
+ when it is closed.
+ """
+ socket = self.__prepare_data(None)
+ self.__boss._socket_data(13)
+ self.assertEqual(socket, self.__consumer_dead_called)
+ self.assertEqual({}, self.__boss._unix_sockets)
+ self.assertTrue(socket.closed)
+
+ def test_socket_short(self):
+ """
+ Test that if there's not enough data to get the whole socket, it is
+ kept there, but nothing is called.
+ """
+ socket = self.__prepare_data(b'tok')
+ self.__boss._socket_data(13)
+ self.assertEqual({13: (socket, b'tok')}, self.__boss._unix_sockets)
+ self.assertFalse(socket.closed)
+ self.assertIsNone(self.__consumer_dead_called)
+ self.assertIsNone(self.__socket_request_handler_called)
+
+ def test_socket_continue(self):
+ """
+ Test that we call the token handling function when the whole token
+ comes. This test pretends to continue reading where the previous one
+ stopped.
+ """
+ socket = self.__prepare_data(b"en\nanothe")
+ # The data to finish
+ self.__boss._unix_sockets[13] = (socket, b'tok')
+ self.__boss._socket_data(13)
+ self.assertEqual({13: (socket, b'anothe')}, self.__boss._unix_sockets)
+ self.assertFalse(socket.closed)
+ self.assertIsNone(self.__consumer_dead_called)
+ self.assertEqual((b'token', socket),
+ self.__socket_request_handler_called)
+
+ def test_broken_socket(self):
+ """
+ If the socket raises an exception during the read other than EAGAIN,
+ it is broken and we remove it.
+ """
+ sock = self.__prepare_data(socket.error(errno.ENOMEM,
+ "There's more memory available, but not for you"))
+ self.__boss._socket_data(13)
+ self.assertEqual(sock, self.__consumer_dead_called)
+ self.assertEqual({}, self.__boss._unix_sockets)
+ self.assertTrue(sock.closed)
+
if __name__ == '__main__':
# store os.environ for test_unchanged_environment
original_os_environ = copy.deepcopy(os.environ)
diff --git a/src/bin/dhcp6/.gitignore b/src/bin/dhcp6/.gitignore
index 6a6060b..e4e8f2d 100644
--- a/src/bin/dhcp6/.gitignore
+++ b/src/bin/dhcp6/.gitignore
@@ -7,3 +7,4 @@ Makefile.in
b10-dhcp6
spec_config.h
spec_config.h.pre
+tests/dhcp6_unittests
diff --git a/src/bin/dhcp6/dhcp6_srv.cc b/src/bin/dhcp6/dhcp6_srv.cc
index ba5afec..d5a969f 100644
--- a/src/bin/dhcp6/dhcp6_srv.cc
+++ b/src/bin/dhcp6/dhcp6_srv.cc
@@ -12,26 +12,32 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include "dhcp/dhcp6.h"
-#include "dhcp/pkt6.h"
-#include "dhcp6/iface_mgr.h"
-#include "dhcp6/dhcp6_srv.h"
-#include "dhcp/option6_ia.h"
-#include "dhcp/option6_iaaddr.h"
-#include "asiolink/io_address.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/pkt6.h>
+#include <dhcp6/iface_mgr.h>
+#include <dhcp6/dhcp6_srv.h>
+#include <dhcp/option6_ia.h>
+#include <dhcp/option6_iaaddr.h>
+#include <asiolink/io_address.h>
+#include <exceptions/exceptions.h>
using namespace std;
using namespace isc;
using namespace isc::dhcp;
using namespace isc::asiolink;
-Dhcpv6Srv::Dhcpv6Srv() {
+Dhcpv6Srv::Dhcpv6Srv(uint16_t port) {
+
+//void Dhcpv6Srv::Dhcpv6Srv_impl(uint16_t port) {
cout << "Initialization" << endl;
- // first call to instance() will create IfaceMgr (it's a singleton)
- // it may throw something if things go wrong
+ // First call to instance() will create IfaceMgr (it's a singleton).
+ // It may throw something if things go wrong.
IfaceMgr::instance();
+ // Now try to open IPv6 sockets on detected interfaces.
+ IfaceMgr::instance().openSockets(port);
+
/// @todo: instantiate LeaseMgr here once it is imlpemented.
setServerID();
@@ -41,6 +47,8 @@ Dhcpv6Srv::Dhcpv6Srv() {
Dhcpv6Srv::~Dhcpv6Srv() {
cout << "DHCPv6 Srv shutdown." << endl;
+
+ IfaceMgr::instance().closeSockets();
}
bool
@@ -49,7 +57,7 @@ Dhcpv6Srv::run() {
boost::shared_ptr<Pkt6> query; // client's message
boost::shared_ptr<Pkt6> rsp; // server's response
- query = IfaceMgr::instance().receive();
+ query = IfaceMgr::instance().receive6();
if (query) {
if (!query->unpack()) {
diff --git a/src/bin/dhcp6/dhcp6_srv.h b/src/bin/dhcp6/dhcp6_srv.h
index 4daef3a..bcc7818 100644
--- a/src/bin/dhcp6/dhcp6_srv.h
+++ b/src/bin/dhcp6/dhcp6_srv.h
@@ -17,8 +17,9 @@
#include <boost/shared_ptr.hpp>
#include <boost/noncopyable.hpp>
-#include "dhcp/pkt6.h"
-#include "dhcp/option.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/pkt6.h>
+#include <dhcp/option.h>
#include <iostream>
namespace isc {
@@ -41,10 +42,12 @@ public:
/// In particular, creates IfaceMgr that will be responsible for
/// network interaction. Will instantiate lease manager, and load
/// old or create new DUID.
- Dhcpv6Srv();
+ ///
+ /// @param port port on will all sockets will listen
+ Dhcpv6Srv(uint16_t port = DHCP6_SERVER_PORT);
/// @brief Destructor. Used during DHCPv6 service shutdown.
- ~Dhcpv6Srv();
+ virtual ~Dhcpv6Srv();
/// @brief Returns server-intentifier option
///
diff --git a/src/bin/dhcp6/iface_mgr.cc b/src/bin/dhcp6/iface_mgr.cc
index a96db07..de2b93c 100644
--- a/src/bin/dhcp6/iface_mgr.cc
+++ b/src/bin/dhcp6/iface_mgr.cc
@@ -18,9 +18,9 @@
#include <netinet/in.h>
#include <arpa/inet.h>
-#include "dhcp/dhcp6.h"
-#include "dhcp6/iface_mgr.h"
-#include "exceptions/exceptions.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp6/iface_mgr.h>
+#include <exceptions/exceptions.h>
using namespace std;
using namespace isc;
@@ -79,6 +79,30 @@ IfaceMgr::Iface::getPlainMac() const {
return (tmp.str());
}
+bool IfaceMgr::Iface::delAddress(const isc::asiolink::IOAddress& addr) {
+
+ // Let's delete all addresses that match. It really shouldn't matter
+ // if we delete first or all, as the OS should allow to add a single
+ // address to an interface only once. If OS allows multiple instances
+ // of the same address added, we are in deep problems anyway.
+ size_t size = addrs_.size();
+ addrs_.erase(remove(addrs_.begin(), addrs_.end(), addr), addrs_.end());
+ return (addrs_.size() < size);
+}
+
+bool IfaceMgr::Iface::delSocket(uint16_t sockfd) {
+ list<SocketInfo>::iterator sock = sockets_.begin();
+ while (sock!=sockets_.end()) {
+ if (sock->sockfd_ == sockfd) {
+ close(sockfd);
+ sockets_.erase(sock);
+ return (true); //socket found
+ }
+ ++sock;
+ }
+ return (false); // socket not found
+}
+
IfaceMgr::IfaceMgr()
:control_buf_len_(CMSG_SPACE(sizeof(struct in6_pktinfo))),
control_buf_(new char[control_buf_len_])
@@ -95,9 +119,6 @@ IfaceMgr::IfaceMgr()
detectIfaces();
- if (!openSockets()) {
- isc_throw(Unexpected, "Failed to open/bind sockets.");
- }
} catch (const std::exception& ex) {
cout << "IfaceMgr creation failed:" << ex.what() << endl;
@@ -109,7 +130,23 @@ IfaceMgr::IfaceMgr()
}
}
+void IfaceMgr::closeSockets() {
+ for (IfaceCollection::iterator iface = ifaces_.begin();
+ iface != ifaces_.end(); ++iface) {
+
+ for (SocketCollection::iterator sock = iface->sockets_.begin();
+ sock != iface->sockets_.end(); ++sock) {
+ cout << "Closing socket " << sock->sockfd_ << endl;
+ close(sock->sockfd_);
+ }
+ iface->sockets_.clear();
+ }
+
+}
+
IfaceMgr::~IfaceMgr() {
+ closeSockets();
+
// control_buf_ is deleted automatically (scoped_ptr)
control_buf_len_ = 0;
}
@@ -139,8 +176,8 @@ IfaceMgr::detectIfaces() {
Iface iface(ifaceName, if_nametoindex( ifaceName.c_str() ) );
IOAddress addr(linkLocal);
- iface.addrs_.push_back(addr);
- ifaces_.push_back(iface);
+ iface.addAddress(addr);
+ addInterface(iface);
interfaces.close();
} catch (const std::exception& ex) {
// TODO: deallocate whatever memory we used
@@ -154,51 +191,55 @@ IfaceMgr::detectIfaces() {
}
}
-bool
-IfaceMgr::openSockets() {
- int sock;
+void
+IfaceMgr::openSockets(uint16_t port) {
+ int sock1, sock2;
+
+ for (IfaceCollection::iterator iface = ifaces_.begin();
+ iface != ifaces_.end(); ++iface) {
- for (IfaceLst::iterator iface=ifaces_.begin();
- iface!=ifaces_.end();
- ++iface) {
+ AddressCollection addrs = iface->getAddresses();
- for (Addr6Lst::iterator addr=iface->addrs_.begin();
- addr!=iface->addrs_.end();
+ for (AddressCollection::iterator addr = addrs.begin();
+ addr != addrs.end();
++addr) {
- sock = openSocket(iface->name_, *addr,
- DHCP6_SERVER_PORT);
- if (sock<0) {
- cout << "Failed to open unicast socket." << endl;
- return (false);
+ sock1 = openSocket(iface->getName(), *addr, port);
+ if (sock1 < 0) {
+ isc_throw(Unexpected, "Failed to open unicast socket on "
+ << " interface " << iface->getFullName());
}
- sendsock_ = sock;
-
- sock = openSocket(iface->name_,
- IOAddress(ALL_DHCP_RELAY_AGENTS_AND_SERVERS),
- DHCP6_SERVER_PORT);
- if (sock<0) {
- cout << "Failed to open multicast socket." << endl;
- close(sendsock_);
- return (false);
+
+ if ( !joinMcast(sock1, iface->getName(),
+ string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
+ close(sock1);
+ isc_throw(Unexpected, "Failed to join " << ALL_DHCP_RELAY_AGENTS_AND_SERVERS
+ << " multicast group.");
+ }
+
+ // this doesn't work too well on NetBSD
+ sock2 = openSocket(iface->getName(),
+ IOAddress(ALL_DHCP_RELAY_AGENTS_AND_SERVERS),
+ port);
+ if (sock2 < 0) {
+ isc_throw(Unexpected, "Failed to open multicast socket on "
+ << " interface " << iface->getFullName());
+ iface->delSocket(sock1); // delete previously opened socket
}
- recvsock_ = sock;
}
}
-
- return (true);
}
void
IfaceMgr::printIfaces(std::ostream& out /*= std::cout*/) {
- for (IfaceLst::const_iterator iface=ifaces_.begin();
- iface!=ifaces_.end();
- ++iface) {
+ for (IfaceCollection::const_iterator iface = ifaces_.begin();
+ iface != ifaces_.end(); ++iface) {
out << "Detected interface " << iface->getFullName() << endl;
- out << " " << iface->addrs_.size() << " addr(s):" << endl;
- for (Addr6Lst::const_iterator addr=iface->addrs_.begin();
- addr != iface->addrs_.end();
- ++addr) {
+ out << " " << iface->getAddresses().size() << " addr(s):" << endl;
+ const AddressCollection addrs = iface->getAddresses();
+
+ for (AddressCollection::const_iterator addr = addrs.begin();
+ addr != addrs.end(); ++addr) {
out << " " << addr->toText() << endl;
}
out << " mac: " << iface->getPlainMac() << endl;
@@ -207,11 +248,11 @@ IfaceMgr::printIfaces(std::ostream& out /*= std::cout*/) {
IfaceMgr::Iface*
IfaceMgr::getIface(int ifindex) {
- for (IfaceLst::iterator iface=ifaces_.begin();
- iface!=ifaces_.end();
- ++iface) {
- if (iface->ifindex_ == ifindex)
+ for (IfaceCollection::iterator iface = ifaces_.begin();
+ iface != ifaces_.end(); ++iface) {
+ if (iface->getIndex() == ifindex) {
return (&(*iface));
+ }
}
return (NULL); // not found
@@ -219,29 +260,88 @@ IfaceMgr::getIface(int ifindex) {
IfaceMgr::Iface*
IfaceMgr::getIface(const std::string& ifname) {
- for (IfaceLst::iterator iface=ifaces_.begin();
- iface!=ifaces_.end();
- ++iface) {
- if (iface->name_ == ifname)
+ for (IfaceCollection::iterator iface = ifaces_.begin();
+ iface != ifaces_.end(); ++iface) {
+ if (iface->getName() == ifname) {
return (&(*iface));
+ }
}
return (NULL); // not found
}
int
-IfaceMgr::openSocket(const std::string& ifname,
- const IOAddress& addr,
+IfaceMgr::openSocket(const std::string& ifname, const IOAddress& addr,
int port) {
- struct sockaddr_in6 addr6;
+ Iface* iface = getIface(ifname);
+ if (!iface) {
+ isc_throw(BadValue, "There is no " << ifname << " interface present.");
+ }
+ switch (addr.getFamily()) {
+ case AF_INET:
+ return openSocket4(*iface, addr, port);
+ case AF_INET6:
+ return openSocket6(*iface, addr, port);
+ default:
+ isc_throw(BadValue, "Failed to detect family of address: "
+ << addr.toText());
+ }
+}
+
+int
+IfaceMgr::openSocket4(Iface& iface, const IOAddress& addr, int port) {
+
+ cout << "Creating UDP4 socket on " << iface.getFullName()
+ << " " << addr.toText() << "/port=" << port << endl;
+
+ struct sockaddr_in addr4;
+ memset(&addr4, 0, sizeof(sockaddr));
+ addr4.sin_family = AF_INET;
+ addr4.sin_port = htons(port);
+ memcpy(&addr4.sin_addr, addr.getAddress().to_v4().to_bytes().data(),
+ sizeof(addr4.sin_addr));
+
+ int sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock < 0) {
+ isc_throw(Unexpected, "Failed to create UDP6 socket.");
+ }
+
+ if (bind(sock, (struct sockaddr *)&addr4, sizeof(addr4)) < 0) {
+ close(sock);
+ isc_throw(Unexpected, "Failed to bind socket " << sock << " to " << addr.toText()
+ << "/port=" << port);
+ }
+
+ // If there is no support for IP_PKTINFO, we are really out of luck.
+ // It will be difficult to understand, where this packet came from.
+#if defined(IP_PKTINFO)
+ int flag = 1;
+ if (setsockopt(sock, IPPROTO_IP, IP_PKTINFO, &flag, sizeof(flag)) != 0) {
+ close(sock);
+ isc_throw(Unexpected, "setsockopt: IP_PKTINFO: failed.");
+ }
+#endif
+
+ cout << "Created socket " << sock << " on " << iface.getName() << "/" <<
+ addr.toText() << "/port=" << port << endl;
- cout << "Creating socket on " << ifname << "/" << addr.toText()
- << "/port=" << port << endl;
+ iface.addSocket(SocketInfo(sock, addr, port));
+ return (sock);
+}
+
+int
+IfaceMgr::openSocket6(Iface& iface, const IOAddress& addr, int port) {
+
+ cout << "Creating UDP6 socket on " << iface.getFullName()
+ << " " << addr.toText() << "/port=" << port << endl;
+
+ struct sockaddr_in6 addr6;
memset(&addr6, 0, sizeof(addr6));
addr6.sin6_family = AF_INET6;
addr6.sin6_port = htons(port);
- addr6.sin6_scope_id = if_nametoindex(ifname.c_str());
+ if (addr.toText() != "::1")
+ addr6.sin6_scope_id = if_nametoindex(iface.getName().c_str());
memcpy(&addr6.sin6_addr,
addr.getAddress().to_v6().to_bytes().data(),
@@ -255,61 +355,58 @@ IfaceMgr::openSocket(const std::string& ifname,
// make a socket
int sock = socket(AF_INET6, SOCK_DGRAM, 0);
if (sock < 0) {
- cout << "Failed to create UDP6 socket." << endl;
- return (-1);
+ isc_throw(Unexpected, "Failed to create UDP6 socket.");
}
- /* Set the REUSEADDR option so that we don't fail to start if
- we're being restarted. */
+ // Set the REUSEADDR option so that we don't fail to start if
+ // we're being restarted.
int flag = 1;
if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
(char *)&flag, sizeof(flag)) < 0) {
- cout << "Can't set SO_REUSEADDR option on dhcpv6 socket." << endl;
close(sock);
- return (-1);
+ isc_throw(Unexpected, "Can't set SO_REUSEADDR option on dhcpv6 socket.");
}
if (bind(sock, (struct sockaddr *)&addr6, sizeof(addr6)) < 0) {
- cout << "Failed to bind socket " << sock << " to " << addr.toText()
- << "/port=" << port << endl;
close(sock);
- return (-1);
+ isc_throw(Unexpected, "Failed to bind socket " << sock << " to " << addr.toText()
+ << "/port=" << port);
}
#ifdef IPV6_RECVPKTINFO
- /* RFC3542 - a new way */
+ // RFC3542 - a new way
if (setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO,
&flag, sizeof(flag)) != 0) {
- cout << "setsockopt: IPV6_RECVPKTINFO failed." << endl;
close(sock);
- return (-1);
+ isc_throw(Unexpected, "setsockopt: IPV6_RECVPKTINFO failed.");
}
#else
- /* RFC2292 - an old way */
+ // RFC2292 - an old way
if (setsockopt(sock, IPPROTO_IPV6, IPV6_PKTINFO,
&flag, sizeof(flag)) != 0) {
- cout << "setsockopt: IPV6_PKTINFO: failed." << endl;
close(sock);
- return (-1);
+ isc_throw(Unexpected, "setsockopt: IPV6_PKTINFO: failed.");
}
#endif
// multicast stuff
-
if (addr.getAddress().to_v6().is_multicast()) {
// both mcast (ALL_DHCP_RELAY_AGENTS_AND_SERVERS and ALL_DHCP_SERVERS)
// are link and site-scoped, so there is no sense to join those groups
// with global addresses.
- if ( !joinMcast( sock, ifname,
+ if ( !joinMcast( sock, iface.getName(),
string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
close(sock);
- return (-1);
+ isc_throw(Unexpected, "Failed to join " << ALL_DHCP_RELAY_AGENTS_AND_SERVERS
+ << " multicast group.");
}
}
- cout << "Created socket " << sock << " on " << ifname << "/" <<
+ cout << "Created socket " << sock << " on " << iface.getName() << "/" <<
addr.toText() << "/port=" << port << endl;
+ iface.addSocket(SocketInfo(sock, addr, port));
+
return (sock);
}
@@ -345,16 +442,19 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
int result;
struct in6_pktinfo *pktinfo;
struct cmsghdr *cmsg;
+
+ Iface* iface = getIface(pkt->iface_);
+ if (!iface) {
+ isc_throw(BadValue, "Unable to send Pkt6. Invalid interface ("
+ << pkt->iface_ << ") specified.");
+ }
+
memset(&control_buf_[0], 0, control_buf_len_);
- /*
- * Initialize our message header structure.
- */
+ // Initialize our message header structure.
memset(&m, 0, sizeof(m));
- /*
- * Set the target address we're sending to.
- */
+ // Set the target address we're sending to.
sockaddr_in6 to;
memset(&to, 0, sizeof(to));
to.sin6_family = AF_INET6;
@@ -367,24 +467,20 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
m.msg_name = &to;
m.msg_namelen = sizeof(to);
- /*
- * Set the data buffer we're sending. (Using this wacky
- * "scatter-gather" stuff... we only have a single chunk
- * of data to send, so we declare a single vector entry.)
- */
+ // Set the data buffer we're sending. (Using this wacky
+ // "scatter-gather" stuff... we only have a single chunk
+ // of data to send, so we declare a single vector entry.)
v.iov_base = (char *) &pkt->data_[0];
v.iov_len = pkt->data_len_;
m.msg_iov = &v;
m.msg_iovlen = 1;
- /*
- * Setting the interface is a bit more involved.
- *
- * We have to create a "control message", and set that to
- * define the IPv6 packet information. We could set the
- * source address if we wanted, but we can safely let the
- * kernel decide what that should be.
- */
+ // Setting the interface is a bit more involved.
+ //
+ // We have to create a "control message", and set that to
+ // define the IPv6 packet information. We could set the
+ // source address if we wanted, but we can safely let the
+ // kernel decide what that should be.
m.msg_control = &control_buf_[0];
m.msg_controllen = control_buf_len_;
cmsg = CMSG_FIRSTHDR(&m);
@@ -396,14 +492,12 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
pktinfo->ipi6_ifindex = pkt->ifindex_;
m.msg_controllen = cmsg->cmsg_len;
- result = sendmsg(sendsock_, &m, 0);
+ result = sendmsg(getSocket(*pkt), &m, 0);
if (result < 0) {
cout << "Send packet failed." << endl;
}
- cout << "Sent " << result << " bytes." << endl;
-
- cout << "Sent " << pkt->data_len_ << " bytes over "
- << pkt->iface_ << "/" << pkt->ifindex_ << " interface: "
+ cout << "Sent " << pkt->data_len_ << " bytes over socket " << getSocket(*pkt)
+ << " on " << iface->getFullName() << " interface: "
<< " dst=" << pkt->remote_addr_.toText()
<< ", src=" << pkt->local_addr_.toText()
<< endl;
@@ -411,8 +505,24 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
return (result);
}
+bool
+IfaceMgr::send(boost::shared_ptr<Pkt4>& )
+{
+ /// TODO: Implement this (ticket #1240)
+ isc_throw(NotImplemented, "Pkt4 send not implemented yet.");
+}
+
+
+boost::shared_ptr<Pkt4>
+IfaceMgr::receive4() {
+ isc_throw(NotImplemented, "Pkt4 reception not implemented yet.");
+
+ // TODO: To be implemented (ticket #1239)
+ return (boost::shared_ptr<Pkt4>()); // NULL
+}
+
boost::shared_ptr<Pkt6>
-IfaceMgr::receive() {
+IfaceMgr::receive6() {
struct msghdr m;
struct iovec v;
int result;
@@ -442,49 +552,66 @@ IfaceMgr::receive() {
memset(&from, 0, sizeof(from));
memset(&to_addr, 0, sizeof(to_addr));
- /*
- * Initialize our message header structure.
- */
+ // Initialize our message header structure.
memset(&m, 0, sizeof(m));
- /*
- * Point so we can get the from address.
- */
+ // Point so we can get the from address.
m.msg_name = &from;
m.msg_namelen = sizeof(from);
- /*
- * Set the data buffer we're receiving. (Using this wacky
- * "scatter-gather" stuff... but we that doesn't really make
- * sense for us, so we use a single vector entry.)
- */
+ // Set the data buffer we're receiving. (Using this wacky
+ // "scatter-gather" stuff... but we that doesn't really make
+ // sense for us, so we use a single vector entry.)
v.iov_base = (void*)&pkt->data_[0];
v.iov_len = pkt->data_len_;
m.msg_iov = &v;
m.msg_iovlen = 1;
- /*
- * Getting the interface is a bit more involved.
- *
- * We set up some space for a "control message". We have
- * previously asked the kernel to give us packet
- * information (when we initialized the interface), so we
- * should get the destination address from that.
- */
+ // Getting the interface is a bit more involved.
+ //
+ // We set up some space for a "control message". We have
+ // previously asked the kernel to give us packet
+ // information (when we initialized the interface), so we
+ // should get the destination address from that.
m.msg_control = &control_buf_[0];
m.msg_controllen = control_buf_len_;
- result = recvmsg(recvsock_, &m, 0);
+ /// TODO: Need to move to select() and pool over
+ /// all available sockets. For now, we just take the
+ /// first interface and use first socket from it.
+ IfaceCollection::const_iterator iface = ifaces_.begin();
+ if (iface == ifaces_.end()) {
+ isc_throw(Unexpected, "No interfaces detected. Can't receive anything.");
+ }
+ SocketCollection::const_iterator s = iface->sockets_.begin();
+ const SocketInfo* candidate = 0;
+ while (s != iface->sockets_.end()) {
+ if (s->addr_.getAddress().to_v6().is_multicast()) {
+ candidate = &(*s);
+ break;
+ }
+ if (!candidate) {
+ candidate = &(*s); // it's not multicast, but it's better than none
+ }
+ ++s;
+ }
+ if (!candidate) {
+ isc_throw(Unexpected, "Interface " << iface->getFullName()
+ << " does not have any sockets open.");
+ }
+
+ cout << "Trying to receive over socket " << candidate->sockfd_ << " bound to "
+ << candidate->addr_.toText() << "/port=" << candidate->port_ << " on "
+ << iface->getFullName() << endl;
+ result = recvmsg(candidate->sockfd_, &m, 0);
if (result >= 0) {
- /*
- * If we did read successfully, then we need to loop
- * through the control messages we received and
- * find the one with our destination address.
- *
- * We also keep a flag to see if we found it. If we
- * didn't, then we consider this to be an error.
- */
+ // If we did read successfully, then we need to loop
+ // through the control messages we received and
+ // find the one with our destination address.
+ //
+ // We also keep a flag to see if we found it. If we
+ // didn't, then we consider this to be an error.
int found_pktinfo = 0;
cmsg = CMSG_FIRSTHDR(&m);
while (cmsg != NULL) {
@@ -520,7 +647,7 @@ IfaceMgr::receive() {
Iface* received = getIface(pkt->ifindex_);
if (received) {
- pkt->iface_ = received->name_;
+ pkt->iface_ = received->getName();
} else {
cout << "Received packet over unknown interface (ifindex="
<< pkt->ifindex_ << ")." << endl;
@@ -539,4 +666,60 @@ IfaceMgr::receive() {
return (pkt);
}
+uint16_t
+IfaceMgr::getSocket(isc::dhcp::Pkt6 const& pkt) {
+ Iface* iface = getIface(pkt.iface_);
+ if (!iface) {
+ isc_throw(BadValue, "Tried to find socket for non-existent interface "
+ << pkt.iface_);
+ }
+
+ SocketCollection::const_iterator s;
+ for (s = iface->sockets_.begin(); s != iface->sockets_.end(); ++s) {
+ if (s->family_ != AF_INET6) {
+ // don't use IPv4 sockets
+ continue;
+ }
+ if (s->addr_.getAddress().to_v6().is_multicast()) {
+ // don't use IPv6 sockets bound to multicast address
+ continue;
+ }
+ /// TODO: Add more checks here later. If remote address is
+ /// not link-local, we can't use link local bound socket
+ /// to send data.
+
+ return (s->sockfd_);
+ }
+
+ isc_throw(Unexpected, "Interface " << iface->getFullName()
+ << " does not have any suitable IPv6 sockets open.");
+}
+
+uint16_t
+IfaceMgr::getSocket(isc::dhcp::Pkt4 const& pkt) {
+ Iface* iface = getIface(pkt.getIface());
+ if (!iface) {
+ isc_throw(BadValue, "Tried to find socket for non-existent interface "
+ << pkt.getIface());
+ }
+
+ SocketCollection::const_iterator s;
+ for (s = iface->sockets_.begin(); s != iface->sockets_.end(); ++s) {
+ if (s->family_ != AF_INET) {
+ // don't use IPv4 sockets
+ continue;
+ }
+ /// TODO: Add more checks here later. If remote address is
+ /// not link-local, we can't use link local bound socket
+ /// to send data.
+
+ return (s->sockfd_);
+ }
+
+ isc_throw(Unexpected, "Interface " << iface->getFullName()
+ << " does not have any suitable IPv4 sockets open.");
+}
+
+
+
}
diff --git a/src/bin/dhcp6/iface_mgr.h b/src/bin/dhcp6/iface_mgr.h
index 249c7ef..0aa2592 100644
--- a/src/bin/dhcp6/iface_mgr.h
+++ b/src/bin/dhcp6/iface_mgr.h
@@ -19,8 +19,9 @@
#include <boost/shared_ptr.hpp>
#include <boost/scoped_array.hpp>
#include <boost/noncopyable.hpp>
-#include "asiolink/io_address.h"
-#include "dhcp/pkt6.h"
+#include <asiolink/io_address.h>
+#include <dhcp/pkt4.h>
+#include <dhcp/pkt6.h>
namespace isc {
@@ -34,26 +35,119 @@ namespace dhcp {
class IfaceMgr : public boost::noncopyable {
public:
/// type that defines list of addresses
- typedef std::list<isc::asiolink::IOAddress> Addr6Lst;
+ typedef std::vector<isc::asiolink::IOAddress> AddressCollection;
/// maximum MAC address length (Infiniband uses 20 bytes)
static const unsigned int MAX_MAC_LEN = 20;
+ /// Holds information about socket.
+ struct SocketInfo {
+ uint16_t sockfd_; /// socket descriptor
+ isc::asiolink::IOAddress addr_; /// bound address
+ uint16_t port_; /// socket port
+ uint16_t family_; /// IPv4 or IPv6
+
+ /// @brief SocketInfo constructor.
+ ///
+ /// @param sockfd socket descriptor
+ /// @param addr an address the socket is bound to
+ /// @param port a port the socket is bound to
+ SocketInfo(uint16_t sockfd, const isc::asiolink::IOAddress& addr,
+ uint16_t port)
+ :sockfd_(sockfd), addr_(addr), port_(port), family_(addr.getFamily()) { }
+ };
+
+ /// type that holds a list of socket informations
+ typedef std::list<SocketInfo> SocketCollection;
+
/// @brief represents a single network interface
///
/// Iface structure represents network interface with all useful
/// information, like name, interface index, MAC address and
/// list of assigned addresses
- struct Iface {
- /// constructor
+ class Iface {
+ public:
+ /// @brief Iface constructor.
+ ///
+ /// Creates Iface object that represents network interface.
+ ///
+ /// @param name name of the interface
+ /// @param ifindex interface index (unique integer identifier)
Iface(const std::string& name, int ifindex);
- /// returns full interface name in format ifname/ifindex
+ /// @brief Returns full interface name as "ifname/ifindex" string.
+ ///
+ /// @return string with interface name
std::string getFullName() const;
- /// returns link-layer address a plain text
+ /// @brief Returns link-layer address a plain text.
+ ///
+ /// @return MAC address as a plain text (string)
std::string getPlainMac() const;
+ /// @brief Returns interface index.
+ ///
+ /// @return interface index
+ uint16_t getIndex() const { return ifindex_; }
+
+ /// @brief Returns interface name.
+ ///
+ /// @return interface name
+ std::string getName() const { return name_; };
+
+ /// @brief Returns all interfaces available on an interface.
+ ///
+ /// Care should be taken to not use this collection after Iface object
+ /// ceases to exist. That is easy in most cases as Iface objects are
+ /// created by IfaceMgr that is a singleton an is expected to be
+ /// available at all time. We may revisit this if we ever decide to
+ /// implement dynamic interface detection, but such fancy feature would
+ /// mostly be useful for clients with wifi/vpn/virtual interfaces.
+ ///
+ /// @return collection of addresses
+ const AddressCollection& getAddresses() const { return addrs_; }
+
+ /// @brief Adds an address to an interface.
+ ///
+ /// This only adds an address to collection, it does not physically
+ /// configure address on actual network interface.
+ ///
+ /// @param addr address to be added
+ void addAddress(const isc::asiolink::IOAddress& addr) {
+ addrs_.push_back(addr);
+ }
+
+ /// @brief Deletes an address from an interface.
+ ///
+ /// This only deletes address from collection, it does not physically
+ /// remove address configuration from actual network interface.
+ ///
+ /// @param addr address to be removed.
+ ///
+ /// @return true if removal was successful (address was in collection),
+ /// false otherwise
+ bool delAddress(const isc::asiolink::IOAddress& addr);
+
+ /// @brief Adds socket descriptor to an interface.
+ ///
+ /// @param socket SocketInfo structure that describes socket.
+ void addSocket(const SocketInfo& sock)
+ { sockets_.push_back(sock); }
+
+ /// @brief Closes socket.
+ ///
+ /// Closes socket and removes corresponding SocketInfo structure
+ /// from an interface.
+ ///
+ /// @param socket descriptor to be closed/removed.
+ /// @return true if there was such socket, false otherwise
+ bool delSocket(uint16_t sockfd);
+
+ /// socket used to sending data
+ /// TODO: this should be protected
+ SocketCollection sockets_;
+
+ protected:
/// network interface name
std::string name_;
@@ -61,19 +155,13 @@ public:
int ifindex_;
/// list of assigned addresses
- Addr6Lst addrs_;
+ AddressCollection addrs_;
/// link-layer address
uint8_t mac_[MAX_MAC_LEN];
/// length of link-layer address (usually 6)
int mac_len_;
-
- /// socket used to sending data
- int sendsock_;
-
- /// socket used for receiving data
- int recvsock_;
};
// TODO performance improvement: we may change this into
@@ -81,7 +169,7 @@ public:
// also hide it (make it public make tests easier for now)
/// type that holds a list of interfaces
- typedef std::list<Iface> IfaceLst;
+ typedef std::list<Iface> IfaceCollection;
/// IfaceMgr is a singleton class. This method returns reference
/// to its sole instance.
@@ -109,27 +197,63 @@ public:
Iface*
getIface(const std::string& ifname);
+ /// @brief Return most suitable socket for transmitting specified IPv6 packet.
+ ///
+ /// This method takes Pkt6 (see overloaded implementation that takes
+ /// Pkt4) and chooses appropriate socket to send it. This method
+ /// may throw BadValue if specified packet does not have outbound
+ /// interface specified, no such interface exists, or specified
+ /// interface does not have any appropriate sockets open.
+ ///
+ /// @param pkt a packet to be transmitted
+ ///
+ /// @return a socket descriptor
+ uint16_t getSocket(const isc::dhcp::Pkt6& pkt);
+
+ /// @brief Return most suitable socket for transmitting specified IPv6 packet.
+ ///
+ /// This method takes Pkt4 (see overloaded implementation that takes
+ /// Pkt6) and chooses appropriate socket to send it. This method
+ /// may throw BadValue if specified packet does not have outbound
+ /// interface specified, no such interface exists, or specified
+ /// interface does not have any appropriate sockets open.
+ ///
+ /// @param pkt a packet to be transmitted
+ ///
+ /// @return a socket descriptor
+ uint16_t getSocket(const isc::dhcp::Pkt4& pkt);
+
/// debugging method that prints out all available interfaces
///
/// @param out specifies stream to print list of interfaces to
void
printIfaces(std::ostream& out = std::cout);
- /// @brief Sends a packet.
+ /// @brief Sends an IPv6 packet.
///
- /// Sends a packet. All parameters for actual transmission are specified in
+ /// Sends an IPv6 packet. All parameters for actual transmission are specified in
/// Pkt6 structure itself. That includes destination address, src/dst port
/// and interface over which data will be sent.
///
/// @param pkt packet to be sent
///
/// @return true if sending was successful
- bool
- send(boost::shared_ptr<Pkt6>& pkt);
+ bool send(boost::shared_ptr<Pkt6>& pkt);
- /// @brief Tries to receive packet over open sockets.
+ /// @brief Sends an IPv4 packet.
///
- /// Attempts to receive a single packet of any of the open sockets.
+ /// Sends an IPv4 packet. All parameters for actual transmission are specified
+ /// in Pkt4 structure itself. That includes destination address, src/dst
+ /// port and interface over which data will be sent.
+ ///
+ /// @param pkt a packet to be sent
+ ///
+ /// @return true if sending was successful
+ bool send(boost::shared_ptr<Pkt4>& pkt);
+
+ /// @brief Tries to receive IPv6 packet over open IPv6 sockets.
+ ///
+ /// Attempts to receive a single IPv6 packet of any of the open IPv6 sockets.
/// If reception is successful and all information about its sender
/// are obtained, Pkt6 object is created and returned.
///
@@ -138,7 +262,49 @@ public:
/// (e.g. remove expired leases)
///
/// @return Pkt6 object representing received packet (or NULL)
- boost::shared_ptr<Pkt6> receive();
+ boost::shared_ptr<Pkt6> receive6();
+
+ /// @brief Tries to receive IPv4 packet over open IPv4 sockets.
+ ///
+ /// Attempts to receive a single IPv4 packet of any of the open IPv4 sockets.
+ /// If reception is successful and all information about its sender
+ /// are obtained, Pkt4 object is created and returned.
+ ///
+ /// TODO Start using select() and add timeout to be able
+ /// to not wait infinitely, but rather do something useful
+ /// (e.g. remove expired leases)
+ ///
+ /// @return Pkt4 object representing received packet (or NULL)
+ boost::shared_ptr<Pkt4> receive4();
+
+ /// Opens UDP/IP socket and binds it to address, interface and port.
+ ///
+ /// Specific type of socket (UDP/IPv4 or UDP/IPv6) depends on passed addr
+ /// family.
+ ///
+ /// @param ifname name of the interface
+ /// @param addr address to be bound.
+ /// @param port UDP port.
+ ///
+ /// Method will throw if socket creation, socket binding or multicast
+ /// join fails.
+ ///
+ /// @return socket descriptor, if socket creation, binding and multicast
+ /// group join were all successful.
+ int openSocket(const std::string& ifname,
+ const isc::asiolink::IOAddress& addr, int port);
+
+ /// Opens IPv6 sockets on detected interfaces.
+ ///
+ /// Will throw exception if socket creation fails.
+ ///
+ /// @param port specifies port number (usually DHCP6_SERVER_PORT)
+ void openSockets(uint16_t port);
+
+
+ /// @brief Closes all open sockets.
+ /// Is used in destructor, but also from Dhcpv4_srv and Dhcpv6_srv classes.
+ void closeSockets();
// don't use private, we need derived classes in tests
protected:
@@ -146,11 +312,44 @@ protected:
/// @brief Protected constructor.
///
/// Protected constructor. This is a singleton class. We don't want
- /// anyone to create instances of IfaceMgr. Use instance() method
+ /// anyone to create instances of IfaceMgr. Use instance() method instead.
IfaceMgr();
~IfaceMgr();
+ /// @brief Opens IPv4 socket.
+ ///
+ /// Please do not use this method directly. Use openSocket instead.
+ ///
+ /// This method may throw exception if socket creation fails.
+ ///
+ /// @param iface reference to interface structure.
+ /// @param addr an address the created socket should be bound to
+ /// @param port a port that created socket should be bound to
+ ///
+ /// @return socket descriptor
+ int openSocket4(Iface& iface, const isc::asiolink::IOAddress& addr, int port);
+
+ /// @brief Opens IPv6 socket.
+ ///
+ /// Please do not use this method directly. Use openSocket instead.
+ ///
+ /// This method may throw exception if socket creation fails.
+ ///
+ /// @param iface reference to interface structure.
+ /// @param addr an address the created socket should be bound to
+ /// @param port a port that created socket should be bound to
+ ///
+ /// @return socket descriptor
+ int openSocket6(Iface& iface, const isc::asiolink::IOAddress& addr, int port);
+
+ /// @brief Adds an interface to list of known interfaces.
+ ///
+ /// @param iface reference to Iface object.
+ void addInterface(const Iface& iface) {
+ ifaces_.push_back(iface);
+ }
+
/// @brief Detects network interfaces.
///
/// This method will eventually detect available interfaces. For now
@@ -159,24 +358,11 @@ protected:
void
detectIfaces();
- ///
- /// Opens UDP/IPv6 socket and binds it to address, interface and port.
- ///
- /// @param ifname name of the interface
- /// @param addr address to be bound.
- /// @param port UDP port.
- ///
- /// @return socket descriptor, if socket creation, binding and multicast
- /// group join were all successful. -1 otherwise.
- int openSocket(const std::string& ifname,
- const isc::asiolink::IOAddress& addr,
- int port);
-
// TODO: having 2 maps (ifindex->iface and ifname->iface would)
// probably be better for performance reasons
/// List of available interfaces
- IfaceLst ifaces_;
+ IfaceCollection ifaces_;
/// a pointer to a sole instance of this class (a singleton)
static IfaceMgr * instance_;
@@ -184,8 +370,9 @@ protected:
// TODO: Also keep this interface on Iface once interface detection
// is implemented. We may need it e.g. to close all sockets on
// specific interface
- int recvsock_; // TODO: should be fd_set eventually, but we have only
- int sendsock_; // 2 sockets for now. Will do for until next release
+ //int recvsock_; // TODO: should be fd_set eventually, but we have only
+ //int sendsock_; // 2 sockets for now. Will do for until next release
+
// we can't use the same socket, as receiving socket
// is bound to multicast address. And we all know what happens
// to people who try to use multicast as source address.
@@ -197,9 +384,6 @@ protected:
boost::scoped_array<char> control_buf_;
private:
- /// Opens sockets on detected interfaces.
- bool
- openSockets();
/// creates a single instance of this class (a singleton implementation)
static void
@@ -221,6 +405,7 @@ private:
bool
joinMcast(int sock, const std::string& ifname,
const std::string& mcast);
+
};
}; // namespace isc::dhcp
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
index 985368e..f37194c 100644
--- a/src/bin/dhcp6/tests/Makefile.am
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -25,8 +25,6 @@ check-local:
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_builddir)/src/bin # for generated spec_config.h header
AM_CPPFLAGS += -I$(top_srcdir)/src/bin
-AM_CPPFLAGS += -I$(top_builddir)/src/lib/cc
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/asiolink
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_top_srcdir)/src/lib/testutils/testdata\"
AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/bin/dhcp6/tests\"
@@ -57,8 +55,8 @@ dhcp6_unittests_LDADD = $(GTEST_LDADD)
dhcp6_unittests_LDADD += $(SQLITE_LIBS)
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp.la
-dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
endif
noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
index 72e48e4..50f37af 100644
--- a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
+++ b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
@@ -34,7 +34,7 @@ namespace test {
class NakedDhcpv6Srv: public Dhcpv6Srv {
// "naked" Interface Manager, exposes internal fields
public:
- NakedDhcpv6Srv() { }
+ NakedDhcpv6Srv():Dhcpv6Srv(DHCP6_SERVER_PORT + 10000) { }
boost::shared_ptr<Pkt6>
processSolicit(boost::shared_ptr<Pkt6>& request) {
@@ -53,30 +53,27 @@ public:
};
TEST_F(Dhcpv6SrvTest, basic) {
- // there's almost no code now. What's there provides echo capability
- // that is just a proof of concept and will be removed soon
- // No need to thoroughly test it
-
// srv has stubbed interface detection. It will read
// interfaces.txt instead. It will pretend to have detected
// fe80::1234 link-local address on eth0 interface. Obviously
// an attempt to bind this socket will fail.
- EXPECT_NO_THROW( {
- Dhcpv6Srv * srv = new Dhcpv6Srv();
-
- delete srv;
- });
+ Dhcpv6Srv* srv = 0;
+ ASSERT_NO_THROW( {
+ // open an unpriviledged port
+ srv = new Dhcpv6Srv(DHCP6_SERVER_PORT + 10000);
+ });
+ delete srv;
}
TEST_F(Dhcpv6SrvTest, Solicit_basic) {
NakedDhcpv6Srv * srv = 0;
- EXPECT_NO_THROW( srv = new NakedDhcpv6Srv(); );
+ ASSERT_NO_THROW( srv = new NakedDhcpv6Srv(); );
// a dummy content for client-id
boost::shared_array<uint8_t> clntDuid(new uint8_t[32]);
- for (int i=0; i<32; i++)
- clntDuid[i] = 100+i;
+ for (int i = 0; i < 32; i++)
+ clntDuid[i] = 100 + i;
boost::shared_ptr<Pkt6> sol =
boost::shared_ptr<Pkt6>(new Pkt6(DHCPV6_SOLICIT,
diff --git a/src/bin/dhcp6/tests/iface_mgr_unittest.cc b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
index f126e6a..0c54780 100644
--- a/src/bin/dhcp6/tests/iface_mgr_unittest.cc
+++ b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
@@ -20,9 +20,10 @@
#include <arpa/inet.h>
#include <gtest/gtest.h>
-#include "io_address.h"
-#include "dhcp/pkt6.h"
-#include "dhcp6/iface_mgr.h"
+#include <asiolink/io_address.h>
+#include <dhcp/pkt6.h>
+#include <dhcp6/iface_mgr.h>
+#include <dhcp/dhcp4.h>
using namespace std;
using namespace isc;
@@ -39,16 +40,7 @@ class NakedIfaceMgr: public IfaceMgr {
// "naked" Interface Manager, exposes internal fields
public:
NakedIfaceMgr() { }
- IfaceLst & getIfacesLst() { return ifaces_; }
- void setSendSock(int sock) { sendsock_ = sock; }
- void setRecvSock(int sock) { recvsock_ = sock; }
-
- int openSocket(const std::string& ifname,
- const isc::asiolink::IOAddress& addr,
- int port) {
- return IfaceMgr::openSocket(ifname, addr, port);
- }
-
+ IfaceCollection & getIfacesLst() { return ifaces_; }
};
// dummy class for now, but this will be expanded when needed
@@ -56,6 +48,13 @@ class IfaceMgrTest : public ::testing::Test {
public:
IfaceMgrTest() {
}
+
+ void createLoInterfacesTxt() {
+ unlink(INTERFACE_FILE);
+ fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
+ fakeifaces << LOOPBACK << " ::1";
+ fakeifaces.close();
+ }
};
// We need some known interface to work reliably. Loopback interface
@@ -109,6 +108,7 @@ TEST_F(IfaceMgrTest, dhcp6Sniffer) {
while (true) {
pkt = ifacemgr->receive();
+ cout << "// this code is autogenerated. Do NOT edit." << endl;
cout << "// Received " << pkt->data_len_ << " bytes packet:" << endl;
cout << "Pkt6 *capture" << cnt++ << "() {" << endl;
cout << " Pkt6* pkt;" << endl;
@@ -183,10 +183,10 @@ TEST_F(IfaceMgrTest, getIface) {
cout << "There are " << ifacemgr->getIfacesLst().size()
<< " interfaces." << endl;
- for (IfaceMgr::IfaceLst::iterator iface=ifacemgr->getIfacesLst().begin();
+ for (IfaceMgr::IfaceCollection::iterator iface=ifacemgr->getIfacesLst().begin();
iface != ifacemgr->getIfacesLst().end();
++iface) {
- cout << " " << iface->name_ << "/" << iface->ifindex_ << endl;
+ cout << " " << iface->getFullName() << endl;
}
@@ -195,15 +195,15 @@ TEST_F(IfaceMgrTest, getIface) {
// ASSERT_NE(NULL, tmp); is not supported. hmmmm.
ASSERT_TRUE( tmp != NULL );
- EXPECT_STREQ( "en3", tmp->name_.c_str() );
- EXPECT_EQ(5, tmp->ifindex_);
+ EXPECT_EQ( "en3", tmp->getName() );
+ EXPECT_EQ(5, tmp->getIndex());
// check that interface can be retrieved by name
tmp = ifacemgr->getIface("lo1");
ASSERT_TRUE( tmp != NULL );
- EXPECT_STREQ( "lo1", tmp->name_.c_str() );
- EXPECT_EQ(1, tmp->ifindex_);
+ EXPECT_EQ( "lo1", tmp->getName() );
+ EXPECT_EQ(1, tmp->getIndex());
// check that non-existing interfaces are not returned
EXPECT_EQ(static_cast<void*>(NULL), ifacemgr->getIface("wifi0") );
@@ -231,58 +231,51 @@ TEST_F(IfaceMgrTest, detectIfaces) {
IfaceMgr::Iface * eth0 = ifacemgr->getIface("eth0");
// there should be one address
- EXPECT_EQ(1, eth0->addrs_.size());
+ IfaceMgr::AddressCollection addrs = eth0->getAddresses();
+ ASSERT_EQ(1, addrs.size());
- IOAddress * addr = &(*eth0->addrs_.begin());
- ASSERT_TRUE( addr != NULL );
+ IOAddress addr = *addrs.begin();
- EXPECT_STREQ( "fe80::1234", addr->toText().c_str() );
+ EXPECT_STREQ( "fe80::1234", addr.toText().c_str() );
delete ifacemgr;
}
-// TODO: disabled due to other naming on various systems
-// (lo in Linux, lo0 in BSD systems)
-// Fix for this is available on 1186 branch, will reenable
-// this test once 1186 is merged
-TEST_F(IfaceMgrTest, DISABLED_sockets) {
+TEST_F(IfaceMgrTest, sockets6) {
// testing socket operation in a portable way is tricky
// without interface detection implemented
+ createLoInterfacesTxt();
+
NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
IOAddress loAddr("::1");
+ Pkt6 pkt6(128);
+ pkt6.iface_ = LOOPBACK;
+
// bind multicast socket to port 10547
int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
EXPECT_GT(socket1, 0); // socket > 0
+ EXPECT_EQ(socket1, ifacemgr->getSocket(pkt6));
+
// bind unicast socket to port 10548
int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10548);
EXPECT_GT(socket2, 0);
- // expect success. This address/port is already bound, but
- // we are using SO_REUSEADDR, so we can bind it twice
- int socket3 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
-
- // rebinding succeeds on Linux, fails on BSD
- // TODO: add OS-specific defines here (or modify code to
- // behave the same way on all OSes, but that may not be
- // possible
- // EXPECT_GT(socket3, 0); // socket > 0
-
- // we now have 3 sockets open at the same time. Looks good.
+ // removed code for binding socket twice to the same address/port
+ // as it caused problems on some platforms (e.g. Mac OS X)
close(socket1);
close(socket2);
- close(socket3);
delete ifacemgr;
}
// TODO: disabled due to other naming on various systems
// (lo in Linux, lo0 in BSD systems)
-TEST_F(IfaceMgrTest, DISABLED_socketsMcast) {
+TEST_F(IfaceMgrTest, DISABLED_sockets6Mcast) {
// testing socket operation in a portable way is tricky
// without interface detection implemented
@@ -311,27 +304,24 @@ TEST_F(IfaceMgrTest, DISABLED_socketsMcast) {
delete ifacemgr;
}
-// TODO: disabled due to other naming on various systems
-// (lo in Linux, lo0 in BSD systems)
-// Fix for this is available on 1186 branch, will reenable
-// this test once 1186 is merged
-TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
+TEST_F(IfaceMgrTest, sendReceive6) {
+
// testing socket operation in a portable way is tricky
// without interface detection implemented
+ createLoInterfacesTxt();
- fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
- fakeifaces << LOOPBACK << " ::1";
- fakeifaces.close();
-
- NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
// let's assume that every supported OS have lo interface
IOAddress loAddr("::1");
- int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
- int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10546);
+ int socket1 = 0, socket2 = 0;
+ EXPECT_NO_THROW(
+ socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+ socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10546);
+ );
- ifacemgr->setSendSock(socket2);
- ifacemgr->setRecvSock(socket1);
+ EXPECT_GT(socket1, 0);
+ EXPECT_GT(socket2, 0);
boost::shared_ptr<Pkt6> sendPkt(new Pkt6(128) );
@@ -349,7 +339,7 @@ TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
EXPECT_EQ(true, ifacemgr->send(sendPkt));
- rcvPkt = ifacemgr->receive();
+ rcvPkt = ifacemgr->receive6();
ASSERT_TRUE( rcvPkt ); // received our own packet
@@ -359,7 +349,168 @@ TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
rcvPkt->data_len_) );
EXPECT_EQ(sendPkt->remote_addr_.toText(), rcvPkt->remote_addr_.toText());
- EXPECT_EQ(rcvPkt->remote_port_, 10546);
+
+ // since we opened 2 sockets on the same interface and none of them is multicast,
+ // none is preferred over the other for sending data, so we really should not
+ // assume the one or the other will always be choosen for sending data. Therefore
+ // we should accept both values as source ports.
+ EXPECT_TRUE( (rcvPkt->remote_port_ == 10546) || (rcvPkt->remote_port_ == 10547) );
+
+ delete ifacemgr;
+}
+
+TEST_F(IfaceMgrTest, socket4) {
+
+ createLoInterfacesTxt();
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+
+ // Let's assume that every supported OS have lo interface.
+ IOAddress loAddr("127.0.0.1");
+ // Use unprivileged port (it's convenient for running tests as non-root).
+ int socket1 = 0;
+
+ EXPECT_NO_THROW(
+ socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, DHCP4_SERVER_PORT + 10000);
+ );
+
+ EXPECT_GT(socket1, 0);
+
+ Pkt4 pkt(DHCPDISCOVER, 1234);
+ pkt.setIface(LOOPBACK);
+
+ // Expect that we get the socket that we just opened.
+ EXPECT_EQ(socket1, ifacemgr->getSocket(pkt));
+
+ close(socket1);
+
+ delete ifacemgr;
+}
+
+// Test the Iface structure itself
+TEST_F(IfaceMgrTest, iface) {
+ IfaceMgr::Iface* iface = 0;
+ EXPECT_NO_THROW(
+ iface = new IfaceMgr::Iface("eth0",1);
+ );
+
+ EXPECT_EQ("eth0", iface->getName());
+ EXPECT_EQ(1, iface->getIndex());
+ EXPECT_EQ("eth0/1", iface->getFullName());
+
+ // Let's make a copy of this address collection.
+ IfaceMgr::AddressCollection addrs = iface->getAddresses();
+
+ EXPECT_EQ(0, addrs.size());
+
+ IOAddress addr1("192.0.2.6");
+ iface->addAddress(addr1);
+
+ addrs = iface->getAddresses();
+ ASSERT_EQ(1, addrs.size());
+ EXPECT_EQ("192.0.2.6", addrs.at(0).toText());
+
+ // No such address, should return false.
+ EXPECT_FALSE(iface->delAddress(IOAddress("192.0.8.9")));
+
+ // This address is present, delete it!
+ EXPECT_TRUE(iface->delAddress(IOAddress("192.0.2.6")));
+
+ // Not really necessary, previous reference still points to the same
+ // collection. Let's do it anyway, as test code may serve as example
+ // usage code as well.
+ addrs = iface->getAddresses();
+
+ EXPECT_EQ(0, addrs.size());
+
+ EXPECT_NO_THROW(
+ delete iface;
+ );
+}
+
+TEST_F(IfaceMgrTest, socketInfo) {
+
+ // check that socketinfo for IPv4 socket is functional
+ IfaceMgr::SocketInfo sock1(7, IOAddress("192.0.2.56"), DHCP4_SERVER_PORT + 7);
+ EXPECT_EQ(7, sock1.sockfd_);
+ EXPECT_EQ("192.0.2.56", sock1.addr_.toText());
+ EXPECT_EQ(AF_INET, sock1.family_);
+ EXPECT_EQ(DHCP4_SERVER_PORT + 7, sock1.port_);
+
+ // check that socketinfo for IPv6 socket is functional
+ IfaceMgr::SocketInfo sock2(9, IOAddress("2001:db8:1::56"), DHCP4_SERVER_PORT + 9);
+ EXPECT_EQ(9, sock2.sockfd_);
+ EXPECT_EQ("2001:db8:1::56", sock2.addr_.toText());
+ EXPECT_EQ(AF_INET6, sock2.family_);
+ EXPECT_EQ(DHCP4_SERVER_PORT + 9, sock2.port_);
+
+ // now let's test if IfaceMgr handles socket info properly
+ createLoInterfacesTxt();
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+ IfaceMgr::Iface* loopback = ifacemgr->getIface(LOOPBACK);
+ ASSERT_TRUE(loopback);
+ loopback->addSocket(sock1);
+ loopback->addSocket(sock2);
+
+ Pkt6 pkt6(100);
+
+ // pkt6 dos not have interface set yet
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt6),
+ BadValue
+ );
+
+ // try to send over non-existing interface
+ pkt6.iface_ = "nosuchinterface45";
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt6),
+ BadValue
+ );
+
+ // this will work
+ pkt6.iface_ = LOOPBACK;
+ EXPECT_EQ(9, ifacemgr->getSocket(pkt6));
+
+ bool deleted = false;
+ EXPECT_NO_THROW(
+ deleted = ifacemgr->getIface(LOOPBACK)->delSocket(9);
+ );
+ EXPECT_EQ(true, deleted);
+
+ // it should throw again, there's no usable socket anymore
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt6),
+ Unexpected
+ );
+
+ // repeat for pkt4
+ Pkt4 pkt4(DHCPDISCOVER, 1);
+
+ // pkt4 does not have interface set yet.
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt4),
+ BadValue
+ );
+
+ // Try to send over non-existing interface.
+ pkt4.setIface("nosuchinterface45");
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt4),
+ BadValue
+ );
+
+ // Socket info is set, packet has well defined interface. It should work.
+ pkt4.setIface(LOOPBACK);
+ EXPECT_EQ(7, ifacemgr->getSocket(pkt4));
+
+ EXPECT_NO_THROW(
+ ifacemgr->getIface(LOOPBACK)->delSocket(7);
+ );
+
+ // It should throw again, there's no usable socket anymore.
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt4),
+ Unexpected
+ );
delete ifacemgr;
}
diff --git a/src/bin/resolver/tests/Makefile.am b/src/bin/resolver/tests/Makefile.am
index 12ddab3..4d407bb 100644
--- a/src/bin/resolver/tests/Makefile.am
+++ b/src/bin/resolver/tests/Makefile.am
@@ -45,9 +45,9 @@ run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
-run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
diff --git a/src/lib/asiodns/io_fetch.cc b/src/lib/asiodns/io_fetch.cc
index 466be3e..8a91982 100644
--- a/src/lib/asiodns/io_fetch.cc
+++ b/src/lib/asiodns/io_fetch.cc
@@ -355,10 +355,6 @@ IOFetch::stop(Result result) {
// variable should be done inside a mutex (and the stopped_ variable
// declared as "volatile").
//
- // The numeric arguments indicate the debug level, with the lower
- // numbers indicating the most important information. The relative
- // values are somewhat arbitrary.
- //
// TODO: Update testing of stopped_ if threads are used.
data_->stopped = true;
switch (result) {
diff --git a/src/lib/cryptolink/Makefile.am b/src/lib/cryptolink/Makefile.am
index 93f3443..fc12fae 100644
--- a/src/lib/cryptolink/Makefile.am
+++ b/src/lib/cryptolink/Makefile.am
@@ -11,4 +11,5 @@ lib_LTLIBRARIES = libcryptolink.la
libcryptolink_la_SOURCES = cryptolink.h cryptolink.cc
libcryptolink_la_SOURCES += crypto_hmac.h crypto_hmac.cc
-libcryptolink_la_LIBADD = ${BOTAN_LDFLAGS} ${BOTAN_RPATH}
+libcryptolink_la_LDFLAGS = ${BOTAN_LDFLAGS}
+libcryptolink_la_LIBADD = ${BOTAN_LIBS} ${BOTAN_RPATH}
diff --git a/src/lib/cryptolink/tests/Makefile.am b/src/lib/cryptolink/tests/Makefile.am
index fbdd13f..6ac6fdf 100644
--- a/src/lib/cryptolink/tests/Makefile.am
+++ b/src/lib/cryptolink/tests/Makefile.am
@@ -16,8 +16,8 @@ TESTS += run_unittests
run_unittests_SOURCES = run_unittests.cc
run_unittests_SOURCES += crypto_unittests.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = ${BOTAN_LDFLAGS} $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDFLAGS = $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS) $(AM_LDFLAGS)
+run_unittests_LDADD = $(GTEST_LDADD) $(BOTAN_LIBS)
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/cryptolink/libcryptolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index 45ce0c2..6e0b02b 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -352,7 +352,7 @@ FINAL_TYPES() {
}
-RRsetPtr
+ConstRRsetPtr
DatabaseClient::Finder::findNSECCover(const Name& name) {
try {
// Which one should contain the NSEC record?
@@ -387,69 +387,99 @@ DatabaseClient::Finder::findNSECCover(const Name& name) {
arg(accessor_->getDBName()).arg(name);
}
// We didn't find it, return nothing
- return (RRsetPtr());
+ return (ConstRRsetPtr());
}
-ZoneFinder::FindResult
-DatabaseClient::Finder::find(const isc::dns::Name& name,
- const isc::dns::RRType& type,
- isc::dns::RRsetList*,
- const FindOptions options)
+DatabaseClient::Finder::DelegationSearchResult
+DatabaseClient::Finder::findDelegationPoint(const isc::dns::Name& name,
+ const FindOptions options)
{
- // This variable is used to determine the difference between
- // NXDOMAIN and NXRRSET
- bool records_found = false;
- bool glue_ok((options & FIND_GLUE_OK) != 0);
- const bool dnssec_data((options & FIND_DNSSEC) != 0);
- bool get_cover(false);
- isc::dns::RRsetPtr result_rrset;
+ // Result of search
+ isc::dns::ConstRRsetPtr result_rrset;
ZoneFinder::Result result_status = SUCCESS;
- FoundRRsets found;
- logger.debug(DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
- .arg(accessor_->getDBName()).arg(name).arg(type);
- // In case we are in GLUE_OK mode and start matching wildcards,
- // we can't do it under NS, so we store it here to check
- isc::dns::RRsetPtr first_ns;
-
- // First, do we have any kind of delegation (NS/DNAME) here?
- const Name origin(getOrigin());
- const size_t origin_label_count(origin.getLabelCount());
- // Number of labels in the last known non-empty domain
- size_t last_known(origin_label_count);
- const size_t current_label_count(name.getLabelCount());
- // This is how many labels we remove to get origin
- const size_t remove_labels(current_label_count - origin_label_count);
-
- // Now go trough all superdomains from origin down
- for (int i(remove_labels); i > 0; --i) {
- Name superdomain(name.split(i));
- // Look if there's NS or DNAME (but ignore the NS in origin)
- found = getRRsets(superdomain.toText(), DELEGATION_TYPES(),
- i != remove_labels);
- if (found.first) {
- // It contains some RRs, so it exists.
- last_known = superdomain.getLabelCount();
+ // Are we searching for glue?
+ const bool glue_ok = ((options & FIND_GLUE_OK) != 0);
+
+ // This next declaration is an optimisation. When we search the database
+ // for glue records, we generally ignore delegations. (This allows for
+ // the case where e.g. the delegation to zone example.com refers to
+ // nameservers within the zone, e.g. ns1.example.com. When conducting the
+ // search for ns1.example.com, we have to search past the NS records at
+ // example.com.)
+ //
+ // The one case where this is forbidden is when we search past the zone
+ // cut but the match we find for the glue is a wildcard match. In that
+ // case, we return the delegation instead (see RFC 1034, section 4.3.3).
+ // To save a new search, we record the location of the delegation cut when
+ // we encounter it here.
+ isc::dns::ConstRRsetPtr first_ns;
+
+ // We want to search from the apex down. We are given the full domain
+ // name so we have to do some manipulation to ensure that when we start
+ // checking superdomains, we start from the the domain name of the zone
+ // (e.g. if the name is b.a.example.com. and we are in the example.com.
+ // zone, we check example.com., a.example.com. and b.a.example.com. We
+ // don't need to check com. or .).
+ //
+ // Set the number of labels in the origin (i.e. apex of the zone) and in
+ // the last known non-empty domain (which, at this point, is the origin).
+ const size_t origin_label_count = getOrigin().getLabelCount();
+ size_t last_known = origin_label_count;
+
+ // Set how many labels we remove to get origin: this is the number of
+ // labels we have to process in our search.
+ const size_t remove_labels = name.getLabelCount() - origin_label_count;
+
+ // Go through all superdomains from the origin down searching for nodes
+ // that indicate a delegation (.e. NS or DNAME).
+ for (int i = remove_labels; i > 0; --i) {
+ const Name superdomain(name.split(i));
+
+ // Note if this is the origin. (We don't count NS records at the origin
+ // as a delegation so this controls whether NS RRs are included in
+ // the results of some searches.)
+ const bool not_origin = (i != remove_labels);
+
+ // Look if there's NS or DNAME at this point of the tree, but ignore
+ // the NS RRs at the apex of the zone.
+ const FoundRRsets found = getRRsets(superdomain.toText(),
+ DELEGATION_TYPES(), not_origin);
+ if (found.first) {
+ // This node contains either NS or DNAME RRs so it does exist.
const FoundIterator nsi(found.second.find(RRType::NS()));
const FoundIterator dni(found.second.find(RRType::DNAME()));
- // In case we are in GLUE_OK mode, we want to store the
- // highest encountered NS (but not apex)
- if (glue_ok && !first_ns && i != remove_labels &&
- nsi != found.second.end()) {
+
+ // An optimisation. We know that there is an exact match for
+ // something at this point in the tree so remember it. If we have
+ // to do a wildcard search, as we search upwards through the tree
+ // we don't need to pass this point, which is an exact match for
+ // the domain name.
+ last_known = superdomain.getLabelCount();
+
+ if (glue_ok && !first_ns && not_origin &&
+ nsi != found.second.end()) {
+ // If we are searching for glue ("glue OK" mode), store the
+ // highest NS record that we find that is not the apex. This
+ // is another optimisation for later, where we need the
+ // information if the domain we are looking for matches through
+ // a wildcard.
first_ns = nsi->second;
- } else if (!glue_ok && i != remove_labels &&
- nsi != found.second.end()) {
- // Do a NS delegation, but ignore NS in glue_ok mode. Ignore
- // delegation in apex
+
+ } else if (!glue_ok && not_origin && nsi != found.second.end()) {
+ // Not searching for glue and we have found an NS RRset that is
+ // not at the apex. We have found a delegation - return that
+ // fact, there is no need to search further down the tree.
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
DATASRC_DATABASE_FOUND_DELEGATION).
arg(accessor_->getDBName()).arg(superdomain);
result_rrset = nsi->second;
result_status = DELEGATION;
- // No need to go lower, found
break;
+
} else if (dni != found.second.end()) {
- // Very similar with DNAME
+ // We have found a DNAME so again stop searching down the tree
+ // and return the information.
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
DATASRC_DATABASE_FOUND_DNAME).
arg(accessor_->getDBName()).arg(superdomain);
@@ -464,202 +494,344 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
}
}
}
+ return (DelegationSearchResult(result_status, result_rrset, first_ns,
+ last_known));
+}
- if (!result_rrset) { // Only if we didn't find a redirect already
- // Try getting the final result and extract it
- // It is special if there's a CNAME or NS, DNAME is ignored here
- // And we don't consider the NS in origin
-
- WantedTypes final_types(FINAL_TYPES());
- final_types.insert(type);
- found = getRRsets(name.toText(), final_types, name != origin);
- records_found = found.first;
-
- // NS records, CNAME record and Wanted Type records
- const FoundIterator nsi(found.second.find(RRType::NS()));
- const FoundIterator cni(found.second.find(RRType::CNAME()));
- const FoundIterator wti(found.second.find(type));
- if (name != origin && !glue_ok && nsi != found.second.end()) {
- // There's a delegation at the exact node.
- LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_DELEGATION_EXACT).
- arg(accessor_->getDBName()).arg(name);
- result_status = DELEGATION;
- result_rrset = nsi->second;
- } else if (type != isc::dns::RRType::CNAME() &&
- cni != found.second.end()) {
- // A CNAME here
- result_status = CNAME;
- result_rrset = cni->second;
- if (result_rrset->getRdataCount() != 1) {
- isc_throw(DataSourceError, "CNAME with " <<
- result_rrset->getRdataCount() <<
- " rdata at " << name << ", expected 1");
- }
- } else if (wti != found.second.end()) {
- // Just get the answer
- result_rrset = wti->second;
- } else if (!records_found) {
- // Nothing lives here.
- // But check if something lives below this
- // domain and if so, pretend something is here as well.
- if (hasSubdomains(name.toText())) {
- LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL).
- arg(accessor_->getDBName()).arg(name);
- records_found = true;
- get_cover = dnssec_data;
- } else if ((options & NO_WILDCARD) != 0) {
- // If wildcard check is disabled, the search will ultimately
- // terminate with NXDOMAIN. If DNSSEC is enabled, flag that
- // we need to get the NSEC records to prove this.
- if (dnssec_data) {
- get_cover = true;
- }
- } else {
- // It's not empty non-terminal. So check for wildcards.
- // We remove labels one by one and look for the wildcard there.
- // Go up to first non-empty domain.
- for (size_t i(1); i + last_known <= current_label_count; ++i) {
- // Construct the name with *
- const Name superdomain(name.split(i));
- const string wildcard("*." + superdomain.toText());
- const string construct_name(name.toText());
- // TODO What do we do about DNAME here?
- // The types are the same as with original query
- found = getRRsets(wildcard, final_types, true,
+// This method is called when we have not found an exact match and when we
+// know that the name is not an empty non-terminal. So the only way that
+// the name can match something in the zone is through a wildcard match.
+//
+// During an earlier stage in the search for this name, we made a record of
+// the lowest superdomain for which we know an RR exists. (Note the "we
+// know" qualification - there may be lower superdomains (ones with more
+// labels) that hold an RR, but as we weren't searching for them, we don't
+// know about them.)
+//
+// In the search for a wildcard match (which starts at the given domain
+// name and goes up the tree to successive superdomains), this is the level
+// at which we can stop - there can't be a wildcard at or beyond that
+// point.
+//
+// At each level that can stop the search, we should consider several cases:
+//
+// - If we found a wildcard match for a glue record below a
+// delegation point, we don't return the match,
+// instead we return the delegation. (Note that if we didn't
+// a wildcard match at all, we would return NXDOMAIN, not the
+// the delegation.)
+//
+// - If we found a wildcard match and we are sure that the match
+// is not an empty non-terminal, return the result taking into account CNAME,
+// on a zone cut, and NXRRSET.
+// (E.g. searching for a match
+// for c.b.a.example.com, we found that b.a.example.com did
+// not exist but that *.a.example.com. did. Checking
+// b.a.example.com revealed no subdomains, so we can use the
+// wilcard match we found.)
+//
+// - If we found a more specified match, the wildcard search
+// is canceled, resulting in NXDOMAIN. (E.g. searching for a match
+// for c.b.a.example.com, we found that b.a.example.com did
+// not exist but that *.a.example.com. did. Checking
+// b.a.example.com found subdomains. So b.example.com is
+// an empty non-terminal and so should not be returned in
+// the wildcard matching process. In other words,
+// b.example.com does exist in the DNS space, it just doesn't
+// have any RRs associated with it.)
+//
+// - If we found a match, but it is an empty non-terminal asterisk (E.g.#
+// subdomain.*.example.com. is present, but there is nothing at
+// *.example.com.), return an NXRRSET indication;
+// the wildcard exists in the DNS space, there's just nothing
+// associated with it. If DNSSEC data is required, return the
+// covering NSEC record.
+//
+// If none of the above applies in any level, the search fails with NXDOMAIN.
+ZoneFinder::FindResult
+DatabaseClient::Finder::findWildcardMatch(
+ const isc::dns::Name& name, const isc::dns::RRType& type,
+ const FindOptions options, const DelegationSearchResult& dresult)
+{
+ // Note that during the search we are going to search not only for the
+ // requested type, but also for types that indicate a delegation -
+ // NS and DNAME.
+ WantedTypes final_types(FINAL_TYPES());
+ final_types.insert(type);
+
+ for (size_t i = 1; i <= (name.getLabelCount() - dresult.last_known); ++i) {
+
+ // Strip off the left-more label(s) in the name and replace with a "*".
+ const Name superdomain(name.split(i));
+ const string wildcard("*." + superdomain.toText());
+ const string construct_name(name.toText());
+
+ // TODO Add a check for DNAME, as DNAME wildcards are discouraged (see
+ // RFC 4592 section 4.4).
+ // Search for a match. The types are the same as with original query.
+ FoundRRsets found = getRRsets(wildcard, final_types, true,
&construct_name);
- if (found.first) {
- if (first_ns) {
- // In case we are under NS, we don't
- // wildcard-match, but return delegation
- result_rrset = first_ns;
- result_status = DELEGATION;
- records_found = true;
- // We pretend to switch to non-glue_ok mode
- glue_ok = false;
- LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_WILDCARD_CANCEL_NS).
- arg(accessor_->getDBName()).arg(wildcard).
- arg(first_ns->getName());
- } else if (!hasSubdomains(name.split(i - 1).toText()))
- {
- // Nothing we added as part of the * can exist
- // directly, as we go up only to first existing
- // domain, but it could be empty non-terminal. In
- // that case, we need to cancel the match.
- records_found = true;
- const FoundIterator
- cni(found.second.find(RRType::CNAME()));
- const FoundIterator
- nsi(found.second.find(RRType::NS()));
- const FoundIterator
- nci(found.second.find(RRType::NSEC()));
- const FoundIterator wti(found.second.find(type));
- if (cni != found.second.end() &&
- type != RRType::CNAME()) {
- result_rrset = cni->second;
- result_status = WILDCARD_CNAME;
- } else if (nsi != found.second.end()) {
- result_rrset = nsi->second;
- result_status = DELEGATION;
- } else if (wti != found.second.end()) {
- result_rrset = wti->second;
- result_status = WILDCARD;
- } else {
- // NXRRSET case in the wildcard
- result_status = WILDCARD_NXRRSET;
- if (dnssec_data &&
- nci != found.second.end()) {
- // User wants a proof the wildcard doesn't
- // contain it
- //
- // However, we need to get the RRset in the
- // name of the wildcard, not the constructed
- // one, so we walk it again
- found = getRRsets(wildcard, NSEC_TYPES(),
- true);
- result_rrset =
- found.second.find(RRType::NSEC())->
- second;
- }
- }
-
- LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_WILDCARD).
- arg(accessor_->getDBName()).arg(wildcard).
- arg(name);
- } else {
- LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_WILDCARD_CANCEL_SUB).
- arg(accessor_->getDBName()).arg(wildcard).
- arg(name).arg(superdomain);
- }
- break;
- } else if (hasSubdomains(wildcard)) {
- // Empty non-terminal asterisk
- records_found = true;
- LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_WILDCARD_EMPTY).
- arg(accessor_->getDBName()).arg(wildcard).
- arg(name);
- if (dnssec_data) {
- result_rrset = findNSECCover(Name(wildcard));
- if (result_rrset) {
- result_status = WILDCARD_NXRRSET;
- }
- }
- break;
- }
- }
- // This is the NXDOMAIN case (nothing found anywhere). If
- // they want DNSSEC data, try getting the NSEC record
- if (dnssec_data && !records_found) {
- get_cover = true;
+ if (found.first) {
+ // Found something - but what?
+
+ if (dresult.first_ns) {
+ // About to use first_ns. The only way this can be set is if
+ // we are searching for glue, so do a sanity check.
+ if ((options & FIND_GLUE_OK) == 0) {
+ isc_throw(Unexpected, "Inconsistent conditions during "
+ "cancel of wilcard search for " <<
+ name.toText() << ": find_ns non-null when not "
+ "processing glue request");
}
+
+ // Wildcard match for a glue below a delegation point
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_CANCEL_NS).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(dresult.first_ns->getName());
+ return (ZoneFinder::FindResult(DELEGATION, dresult.first_ns));
+
+ } else if (!hasSubdomains(name.split(i - 1).toText())) {
+ // The wildcard match is the best one, find the final result
+ // at it. Note that wildcard should never be the zone origin.
+ return (findOnNameResult(name, type, options, false,
+ found, &wildcard));
+ } else {
+
+ // more specified match found, cancel wildcard match
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_CANCEL_SUB).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(name).arg(superdomain);
+ return (ZoneFinder::FindResult(NXDOMAIN, ConstRRsetPtr()));
}
- } else if (dnssec_data) {
- // This is the "usual" NXRRSET case
- // So in case they want DNSSEC, provide the NSEC
- // (which should be available already here)
- result_status = NXRRSET;
- const FoundIterator nci(found.second.find(RRType::NSEC()));
- if (nci != found.second.end()) {
- result_rrset = nci->second;
+
+ } else if (hasSubdomains(wildcard)) {
+ // an empty non-terminal asterisk
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_EMPTY).
+ arg(accessor_->getDBName()).arg(wildcard).arg(name);
+ if ((options & FIND_DNSSEC) != 0) {
+ ConstRRsetPtr nsec = findNSECCover(Name(wildcard));
+ if (nsec) {
+ return (ZoneFinder::FindResult(WILDCARD_NXRRSET, nsec));
+ }
}
+ return (ZoneFinder::FindResult(NXRRSET, ConstRRsetPtr()));
}
}
- if (!result_rrset) {
- if (result_status == SUCCESS) {
- // Should we look for NSEC covering the name?
- if (get_cover) {
- result_rrset = findNSECCover(name);
- if (result_rrset) {
- result_status = NXDOMAIN;
- }
+ // Nothing found at any level.
+ return (ZoneFinder::FindResult(NXDOMAIN, ConstRRsetPtr()));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::logAndCreateResult(
+ const Name& name, const string* wildname, const RRType& type,
+ ZoneFinder::Result code, ConstRRsetPtr rrset,
+ const isc::log::MessageID& log_id) const
+{
+ if (rrset) {
+ if (wildname == NULL) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, log_id).
+ arg(accessor_->getDBName()).arg(name).arg(type).
+ arg(getClass()).arg(*rrset);
+ } else {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, log_id).
+ arg(accessor_->getDBName()).arg(name).arg(type).
+ arg(getClass()).arg(*wildname).arg(*rrset);
+ }
+ } else {
+ if (wildname == NULL) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, log_id).
+ arg(accessor_->getDBName()).arg(name).arg(type).
+ arg(getClass());
+ } else {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, log_id).
+ arg(accessor_->getDBName()).arg(name).arg(type).
+ arg(getClass()).arg(*wildname);
+ }
+ }
+ return (ZoneFinder::FindResult(code, rrset));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::findOnNameResult(const Name& name,
+ const RRType& type,
+ const FindOptions options,
+ const bool is_origin,
+ const FoundRRsets& found,
+ const string* wildname)
+{
+ const bool wild = (wildname != NULL);
+
+ // Get iterators for the different types of records we are interested in -
+ // CNAME, NS and Wanted types.
+ const FoundIterator nsi(found.second.find(RRType::NS()));
+ const FoundIterator cni(found.second.find(RRType::CNAME()));
+ const FoundIterator wti(found.second.find(type));
+
+ if (!is_origin && ((options & FIND_GLUE_OK) == 0) &&
+ nsi != found.second.end()) {
+ // A NS RRset was found at the domain we were searching for. As it is
+ // not at the origin of the zone, it is a delegation and indicates that
+ // this zone is not authoritative for the data. Just return the
+ // delegation information.
+ return (logAndCreateResult(name, wildname, type, DELEGATION,
+ nsi->second,
+ wild ? DATASRC_DATABASE_WILDCARD_NS :
+ DATASRC_DATABASE_FOUND_DELEGATION_EXACT));
+
+ } else if (type != RRType::CNAME() && cni != found.second.end()) {
+ // We are not searching for a CNAME but nevertheless we have found one
+ // at the name we are searching so we return it. (The caller may
+ // want to continue the lookup by replacing the query name with the
+ // canonical name and the original RR type.) First though, do a sanity
+ // check to ensure that there is only one RR in the CNAME RRset.
+ if (cni->second->getRdataCount() != 1) {
+ isc_throw(DataSourceError, "CNAME with " <<
+ cni->second->getRdataCount() << " rdata at " << name <<
+ ", expected 1");
+ }
+ return (logAndCreateResult(name, wildname, type,
+ wild ? WILDCARD_CNAME : CNAME, cni->second,
+ wild ? DATASRC_DATABASE_WILDCARD_CNAME :
+ DATASRC_DATABASE_FOUND_CNAME));
+
+ } else if (wti != found.second.end()) {
+ // Found an RR matching the query, so return it. (Note that this
+ // includes the case where we were explicitly querying for a CNAME and
+ // found it. It also includes the case where we were querying for an
+ // NS RRset and found it at the apex of the zone.)
+ return (logAndCreateResult(name, wildname, type,
+ wild ? WILDCARD : SUCCESS, wti->second,
+ wild ? DATASRC_DATABASE_WILDCARD_MATCH :
+ DATASRC_DATABASE_FOUND_RRSET));
+ }
+
+ // If we get here, we have found something at the requested name but not
+ // one of the RR types we were interested in. This is the NXRRSET case so
+ // return the appropriate status. If DNSSEC information was requested,
+ // provide the NSEC records. If it's for wildcard, we need to get the
+ // NSEC records in the name of the wildcard, not the substituted one,
+ // so we need to search the tree again.
+ ConstRRsetPtr nsec_rrset; // possibly used with DNSSEC, otherwise NULL
+ if ((options & FIND_DNSSEC) != 0) {
+ if (wild) {
+ const FoundRRsets wfound = getRRsets(*wildname, NSEC_TYPES(),
+ true);
+ const FoundIterator nci = wfound.second.find(RRType::NSEC());
+ if (nci != wfound.second.end()) {
+ nsec_rrset = nci->second;
}
- // Something is not here and we didn't decide yet what
- if (records_found) {
- logger.debug(DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_NXRRSET)
- .arg(accessor_->getDBName()).arg(name)
- .arg(getClass()).arg(type);
- result_status = NXRRSET;
- } else {
- logger.debug(DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_NXDOMAIN)
- .arg(accessor_->getDBName()).arg(name)
- .arg(getClass()).arg(type);
- result_status = NXDOMAIN;
+ } else {
+ const FoundIterator nci = found.second.find(RRType::NSEC());
+ if (nci != found.second.end()) {
+ nsec_rrset = nci->second;
}
}
+ }
+ if (nsec_rrset) {
+ // This log message covers both normal and wildcard cases, so we pass
+ // NULL for 'wildname'.
+ return (logAndCreateResult(name, NULL, type,
+ wild ? WILDCARD_NXRRSET : NXRRSET,
+ nsec_rrset,
+ DATASRC_DATABASE_FOUND_NXRRSET_NSEC));
+ }
+ return (logAndCreateResult(name, wildname, type,
+ wild ? WILDCARD_NXRRSET : NXRRSET, nsec_rrset,
+ wild ? DATASRC_DATABASE_WILDCARD_NXRRSET :
+ DATASRC_DATABASE_FOUND_NXRRSET));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::findNoNameResult(const Name& name, const RRType& type,
+ FindOptions options,
+ const DelegationSearchResult& dresult)
+{
+ const bool dnssec_data = ((options & FIND_DNSSEC) != 0);
+
+ // On entry to this method, we know that the database doesn't have any
+ // entry for this name. Before returning NXDOMAIN, we need to check
+ // for special cases.
+
+ if (hasSubdomains(name.toText())) {
+ // Does the domain have a subdomain (i.e. it is an empty non-terminal)?
+ // If so, return NXRRSET instead of NXDOMAIN (as although the name does
+ // not exist in the database, it does exist in the DNS tree).
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL).
+ arg(accessor_->getDBName()).arg(name);
+ return (FindResult(NXRRSET, dnssec_data ? findNSECCover(name) :
+ ConstRRsetPtr()));
+
+ } else if ((options & NO_WILDCARD) == 0) {
+ // It's not an empty non-terminal and wildcard matching is not
+ // disabled, so check for wildcards. If there is a wildcard match
+ // (i.e. all results except NXDOMAIN) return it; otherwise fall
+ // through to the NXDOMAIN case below.
+ const ZoneFinder::FindResult wresult =
+ findWildcardMatch(name, type, options, dresult);
+ if (wresult.code != NXDOMAIN) {
+ return (FindResult(wresult.code, wresult.rrset));
+ }
+ }
+
+ // All avenues to find a match are now exhausted, return NXDOMAIN (plus
+ // NSEC records if requested).
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_NO_MATCH).
+ arg(accessor_->getDBName()).arg(name).arg(type).arg(getClass());
+ return (FindResult(NXDOMAIN, dnssec_data ? findNSECCover(name) :
+ ConstRRsetPtr()));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::find(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ isc::dns::RRsetList*,
+ const FindOptions options)
+{
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
+ .arg(accessor_->getDBName()).arg(name).arg(type).arg(getClass());
+
+ // First, go through all superdomains from the origin down, searching for
+ // nodes that indicate a delegation (i.e. NS or DNAME, ignoring NS records
+ // at the apex). If one is found, the search stops there.
+ //
+ // (In fact there could be RRs in the database corresponding to subdomains
+ // of the delegation. The reason we do the search for the delegations
+ // first is because the delegation means that another zone is authoritative
+ // for the data and so should be consulted to retrieve it. RRs below
+ // this delegation point can be found in a search for glue but not
+ // otherwise; in the latter case they are said to be occluded by the
+ // presence of the delegation.)
+ const DelegationSearchResult dresult = findDelegationPoint(name, options);
+ if (dresult.rrset) {
+ return (FindResult(dresult.code, dresult.rrset));
+ }
+
+ // If there is no delegation, look for the exact match to the request
+ // name/type/class. However, there are special cases:
+ // - Requested name has a singleton CNAME record associated with it
+ // - Requested name is a delegation point (NS only but not at the zone
+ // apex - DNAME is ignored here as it redirects DNS names subordinate to
+ // the owner name - the owner name itself is not redirected.)
+ const bool is_origin = (name == getOrigin());
+ WantedTypes final_types(FINAL_TYPES());
+ final_types.insert(type);
+ const FoundRRsets found = getRRsets(name.toText(), final_types,
+ !is_origin);
+
+ if (found.first) {
+ // Something found at the domain name. Look into it further to get
+ // the final result.
+ return (findOnNameResult(name, type, options, is_origin, found, NULL));
} else {
- logger.debug(DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_RRSET)
- .arg(accessor_->getDBName()).arg(*result_rrset);
+ // Did not find anything at all at the domain name, so check for
+ // subdomains or wildcards.
+ return (findNoNameResult(name, type, options, dresult));
}
- return (FindResult(result_status, result_rrset));
}
Name
@@ -669,10 +841,9 @@ DatabaseClient::Finder::findPreviousName(const Name& name) const {
try {
return (Name(str));
}
- /*
- * To avoid having the same code many times, we just catch all the
- * exceptions and handle them in a common code below
- */
+
+ // To avoid having the same code many times, we just catch all the
+ // exceptions and handle them in a common code below
catch (const isc::dns::EmptyLabel&) {}
catch (const isc::dns::TooLongLabel&) {}
catch (const isc::dns::BadLabelType&) {}
@@ -695,14 +866,12 @@ DatabaseClient::Finder::getClass() const {
namespace {
-/*
- * This needs, beside of converting all data from textual representation, group
- * together rdata of the same RRsets. To do this, we hold one row of data ahead
- * of iteration. When we get a request to provide data, we create it from this
- * data and load a new one. If it is to be put to the same rrset, we add it.
- * Otherwise we just return what we have and keep the row as the one ahead
- * for next time.
- */
+/// This needs, beside of converting all data from textual representation, group
+/// together rdata of the same RRsets. To do this, we hold one row of data ahead
+/// of iteration. When we get a request to provide data, we create it from this
+/// data and load a new one. If it is to be put to the same rrset, we add it.
+/// Otherwise we just return what we have and keep the row as the one ahead
+/// for next time.
class DatabaseIterator : public ZoneIterator {
public:
DatabaseIterator(shared_ptr<DatabaseAccessor> accessor,
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
index 81e6241..c1b71cd 100644
--- a/src/lib/datasrc/database.h
+++ b/src/lib/datasrc/database.h
@@ -18,14 +18,16 @@
#include <string>
#include <boost/scoped_ptr.hpp>
+#include <boost/tuple/tuple.hpp>
#include <dns/rrclass.h>
-#include <dns/rrclass.h>
#include <dns/rrset.h>
+#include <dns/rrtype.h>
#include <datasrc/data_source.h>
#include <datasrc/client.h>
#include <datasrc/client.h>
+#include <datasrc/logger.h>
#include <dns/name.h>
#include <exceptions/exceptions.h>
@@ -36,46 +38,41 @@
namespace isc {
namespace datasrc {
-/**
- * \brief Abstraction of lowlevel database with DNS data
- *
- * This class is defines interface to databases. Each supported database
- * will provide methods for accessing the data stored there in a generic
- * manner. The methods are meant to be low-level, without much or any knowledge
- * about DNS and should be possible to translate directly to queries.
- *
- * On the other hand, how the communication with database is done and in what
- * schema (in case of relational/SQL database) is up to the concrete classes.
- *
- * This class is non-copyable, as copying connections to database makes little
- * sense and will not be needed.
- *
- * \todo Is it true this does not need to be copied? For example the zone
- * iterator might need it's own copy. But a virtual clone() method might
- * be better for that than copy constructor.
- *
- * \note The same application may create multiple connections to the same
- * database, having multiple instances of this class. If the database
- * allows having multiple open queries at one connection, the connection
- * class may share it.
- */
+/// \brief Abstraction of lowlevel database with DNS data
+///
+/// This class is defines interface to databases. Each supported database
+/// will provide methods for accessing the data stored there in a generic
+/// manner. The methods are meant to be low-level, without much or any knowledge
+/// about DNS and should be possible to translate directly to queries.
+///
+/// On the other hand, how the communication with database is done and in what
+/// schema (in case of relational/SQL database) is up to the concrete classes.
+///
+/// This class is non-copyable, as copying connections to database makes little
+/// sense and will not be needed.
+///
+/// \todo Is it true this does not need to be copied? For example the zone
+/// iterator might need it's own copy. But a virtual clone() method might
+/// be better for that than copy constructor.
+///
+/// \note The same application may create multiple connections to the same
+/// database, having multiple instances of this class. If the database
+/// allows having multiple open queries at one connection, the connection
+/// class may share it.
class DatabaseAccessor : boost::noncopyable {
public:
- /**
- * Definitions of the fields as they are required to be filled in
- * by IteratorContext::getNext()
- *
- * When implementing getNext(), the columns array should
- * be filled with the values as described in this enumeration,
- * in this order, i.e. TYPE_COLUMN should be the first element
- * (index 0) of the array, TTL_COLUMN should be the second element
- * (index 1), etc.
- */
+ /// \brief Data columns for by IteratorContext::getNext()
+ ///
+ /// When implementing getNext(), the columns array should be filled with
+ /// the values as described in this enumeration, in this order, i.e.
+ /// - TYPE_COLUMN should be the first element (index 0) of the array,
+ /// - TTL_COLUMN should be the second element (index 1),
+ /// - etc.
enum RecordColumns {
TYPE_COLUMN = 0, ///< The RRType of the record (A/NS/TXT etc.)
TTL_COLUMN = 1, ///< The TTL of the record (a
- SIGTYPE_COLUMN = 2, ///< For RRSIG records, this contains the RRTYPE
- ///< the RRSIG covers. In the current implementation,
+ SIGTYPE_COLUMN = 2, ///< For RRSIG records, this contains the RRTYPEs
+ ///< the RRSIG cover. In the current implementation,
///< this field is ignored.
RDATA_COLUMN = 3, ///< Full text representation of the record's RDATA
NAME_COLUMN = 4, ///< The domain name of this RR
@@ -83,31 +80,26 @@ public:
///< the largest other element in this enum plus 1.
};
- /**
- * Definitions of the fields to be passed to addRecordToZone().
- *
- * Each derived implementation of addRecordToZone() should expect
- * the "columns" array to be filled with the values as described in this
- * enumeration, in this order.
- */
+ /// \brief Definitions of the fields to be passed to addRecordToZone()
+ ///
+ /// Each derived implementation of addRecordToZone() should expect
+ /// the "columns" array to be filled with the values as described in this
+ /// enumeration, in this order.
enum AddRecordColumns {
- ADD_NAME = 0, ///< The owner name of the record (a domain name)
- ADD_REV_NAME = 1, ///< Reversed name of NAME (used for DNSSEC)
- ADD_TTL = 2, ///< The TTL of the record (in numeric form)
- ADD_TYPE = 3, ///< The RRType of the record (A/NS/TXT etc.)
- ADD_SIGTYPE = 4, ///< For RRSIG records, this contains the RRTYPE
- ///< the RRSIG covers.
- ADD_RDATA = 5, ///< Full text representation of the record's RDATA
+ ADD_NAME = 0, ///< The owner name of the record (a domain name)
+ ADD_REV_NAME = 1, ///< Reversed name of NAME (used for DNSSEC)
+ ADD_TTL = 2, ///< The TTL of the record (in numeric form)
+ ADD_TYPE = 3, ///< The RRType of the record (A/NS/TXT etc.)
+ ADD_SIGTYPE = 4, ///< RRSIGs only: RRTYPEs the RRSIG covers.
+ ADD_RDATA = 5, ///< Full text representation of the record's RDATA
ADD_COLUMN_COUNT = 6 ///< Number of columns
};
- /**
- * Definitions of the fields to be passed to deleteRecordInZone().
- *
- * Each derived implementation of deleteRecordInZone() should expect
- * the "params" array to be filled with the values as described in this
- * enumeration, in this order.
- */
+ /// \brief Definitions of the fields to be passed to deleteRecordInZone()
+ ///
+ /// Each derived implementation of deleteRecordInZone() should expect
+ /// the "params" array to be filled with the values as described in this
+ /// enumeration, in this order.
enum DeleteRecordParams {
DEL_NAME = 0, ///< The owner name of the record (a domain name)
DEL_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
@@ -115,218 +107,199 @@ public:
DEL_PARAM_COUNT = 3 ///< Number of parameters
};
- /**
- * Operation mode when adding a record diff.
- *
- * This is used as the "operation" parameter value of addRecordDiff().
- */
+ /// \brief Operation mode when adding a record diff.
+ ///
+ /// This is used as the "operation" parameter value of addRecordDiff().
enum DiffOperation {
DIFF_ADD = 0, ///< This diff is for adding an RR
DIFF_DELETE = 1 ///< This diff is for deleting an RR
};
- /**
- * Definitions of the fields to be passed to addRecordDiff().
- *
- * Each derived implementation of addRecordDiff() should expect
- * the "params" array to be filled with the values as described in this
- * enumeration, in this order.
- */
+ /// \brief Definitions of the fields to be passed to addRecordDiff().
+ ///
+ /// Each derived implementation of addRecordDiff() should expect
+ /// the "params" array to be filled with the values as described in this
+ /// enumeration, in this order.
enum DiffRecordParams {
- DIFF_NAME = 0, ///< The owner name of the record (a domain name)
- DIFF_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
- DIFF_TTL = 2, ///< The TTL of the record (in numeric form)
- DIFF_RDATA = 3, ///< Full text representation of the record's RDATA
+ DIFF_NAME = 0, ///< Owner name of the record (a domain name)
+ DIFF_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
+ DIFF_TTL = 2, ///< The TTL of the record (in numeric form)
+ DIFF_RDATA = 3, ///< Full text representation of record's RDATA
DIFF_PARAM_COUNT = 4 ///< Number of parameters
};
- /**
- * \brief Destructor
- *
- * It is empty, but needs a virtual one, since we will use the derived
- * classes in polymorphic way.
- */
+ /// \brief Destructor
+ ///
+ /// It is empty, but needs a virtual one, since we will use the derived
+ /// classes in polymorphic way.
virtual ~DatabaseAccessor() { }
- /**
- * \brief Retrieve a zone identifier
- *
- * This method looks up a zone for the given name in the database. It
- * should match only exact zone name (eg. name is equal to the zone's
- * apex), as the DatabaseClient will loop trough the labels itself and
- * find the most suitable zone.
- *
- * It is not specified if and what implementation of this method may throw,
- * so code should expect anything.
- *
- * \param name The (fully qualified) domain name of the zone's apex to be
- * looked up.
- * \return The first part of the result indicates if a matching zone
- * was found. In case it was, the second part is internal zone ID.
- * This one will be passed to methods finding data in the zone.
- * It is not required to keep them, in which case whatever might
- * be returned - the ID is only passed back to the database as
- * an opaque handle.
- */
+ /// \brief Retrieve a zone identifier
+ ///
+ /// This method looks up a zone for the given name in the database. It
+ /// should match only exact zone name (eg. name is equal to the zone's
+ /// apex), as the DatabaseClient will loop trough the labels itself and
+ /// find the most suitable zone.
+ ///
+ /// It is not specified if and what implementation of this method may throw,
+ /// so code should expect anything.
+ ///
+ /// \param name The (fully qualified) domain name of the zone's apex to be
+ /// looked up.
+ /// \return The first part of the result indicates if a matching zone
+ /// was found. In case it was, the second part is internal zone ID.
+ /// This one will be passed to methods finding data in the zone.
+ /// It is not required to keep them, in which case whatever might
+ /// be returned - the ID is only passed back to the database as
+ /// an opaque handle.
virtual std::pair<bool, int> getZone(const std::string& name) const = 0;
- /**
- * \brief This holds the internal context of ZoneIterator for databases
- *
- * While the ZoneIterator implementation from DatabaseClient does all the
- * translation from strings to DNS classes and validation, this class
- * holds the pointer to where the database is at reading the data.
- *
- * It can either hold shared pointer to the connection which created it
- * and have some kind of statement inside (in case single database
- * connection can handle multiple concurrent SQL statements) or it can
- * create a new connection (or, if it is more convenient, the connection
- * itself can inherit both from DatabaseConnection and IteratorContext
- * and just clone itself).
- */
+ /// \brief This holds the internal context of ZoneIterator for databases
+ ///
+ /// While the ZoneIterator implementation from DatabaseClient does all the
+ /// translation from strings to DNS classes and validation, this class
+ /// holds the pointer to where the database is at reading the data.
+ ///
+ /// It can either hold shared pointer to the connection which created it
+ /// and have some kind of statement inside (in case single database
+ /// connection can handle multiple concurrent SQL statements) or it can
+ /// create a new connection (or, if it is more convenient, the connection
+ /// itself can inherit both from DatabaseConnection and IteratorContext
+ /// and just clone itself).
class IteratorContext : public boost::noncopyable {
public:
- /**
- * \brief Destructor
- *
- * Virtual destructor, so any descendand class is destroyed correctly.
- */
+ /// \brief Destructor
+ ///
+ /// Virtual destructor, so any descendand class is destroyed correctly.
virtual ~IteratorContext() { }
- /**
- * \brief Function to provide next resource record
- *
- * This function should provide data about the next resource record
- * from the data that is searched. The data is not converted yet.
- *
- * Depending on how the iterator was constructed, there is a difference
- * in behaviour; for a 'full zone iterator', created with
- * getAllRecords(), all COLUMN_COUNT elements of the array are
- * overwritten.
- * For a 'name iterator', created with getRecords(), the column
- * NAME_COLUMN is untouched, since what would be added here is by
- * definition already known to the caller (it already passes it as
- * an argument to getRecords()).
- *
- * Once this function returns false, any subsequent call to it should
- * result in false. The implementation of a derived class must ensure
- * it doesn't cause any disruption due to that such as a crash or
- * exception.
- *
- * \note The order of RRs is not strictly set, but the RRs for single
- * RRset must not be interleaved with any other RRs (eg. RRsets must be
- * "together").
- *
- * \param columns The data will be returned through here. The order
- * is specified by the RecordColumns enum, and the size must be
- * COLUMN_COUNT
- * \todo Do we consider databases where it is stored in binary blob
- * format?
- * \throw DataSourceError if there's database-related error. If the
- * exception (or any other in case of derived class) is thrown,
- * the iterator can't be safely used any more.
- * \return true if a record was found, and the columns array was
- * updated. false if there was no more data, in which case
- * the columns array is untouched.
- */
+ /// \brief Function to provide next resource record
+ ///
+ /// This function should provide data about the next resource record
+ /// from the data that is searched. The data is not converted yet.
+ ///
+ /// Depending on how the iterator was constructed, there is a difference
+ /// in behaviour; for a 'full zone iterator', created with
+ /// getAllRecords(), all COLUMN_COUNT elements of the array are
+ /// overwritten.
+ /// For a 'name iterator', created with getRecords(), the column
+ /// NAME_COLUMN is untouched, since what would be added here is by
+ /// definition already known to the caller (it already passes it as
+ /// an argument to getRecords()).
+ ///
+ /// Once this function returns false, any subsequent call to it should
+ /// result in false. The implementation of a derived class must ensure
+ /// it doesn't cause any disruption due to that such as a crash or
+ /// exception.
+ ///
+ /// \note The order of RRs is not strictly set, but the RRs for single
+ /// RRset must not be interleaved with any other RRs (eg. RRsets must be
+ /// "together").
+ ///
+ /// \param columns The data will be returned through here. The order
+ /// is specified by the RecordColumns enum, and the size must be
+ /// COLUMN_COUNT
+ /// \todo Do we consider databases where it is stored in binary blob
+ /// format?
+ /// \throw DataSourceError if there's database-related error. If the
+ /// exception (or any other in case of derived class) is thrown,
+ /// the iterator can't be safely used any more.
+ /// \return true if a record was found, and the columns array was
+ /// updated. false if there was no more data, in which case
+ /// the columns array is untouched.
virtual bool getNext(std::string (&columns)[COLUMN_COUNT]) = 0;
};
typedef boost::shared_ptr<IteratorContext> IteratorContextPtr;
- /**
- * \brief Creates an iterator context for a specific name.
- *
- * Returns an IteratorContextPtr that contains all records of the
- * given name from the given zone.
- *
- * The implementation of the iterator that is returned may leave the
- * NAME_COLUMN column of the array passed to getNext() untouched, as that
- * data is already known (it is the same as the name argument here)
- *
- * \exception any Since any implementation can be used, the caller should
- * expect any exception to be thrown.
- *
- * \param name The name to search for. This should be a FQDN.
- * \param id The ID of the zone, returned from getZone().
- * \param subdomains If set to true, match subdomains of name instead
- * of name itself. It is used to find empty domains and match
- * wildcards.
- * \return Newly created iterator context. Must not be NULL.
- */
+ /// \brief Creates an iterator context for a specific name.
+ ///
+ /// Returns an IteratorContextPtr that contains all records of the
+ /// given name from the given zone.
+ ///
+ /// The implementation of the iterator that is returned may leave the
+ /// NAME_COLUMN column of the array passed to getNext() untouched, as that
+ /// data is already known (it is the same as the name argument here)
+ ///
+ /// \exception any Since any implementation can be used, the caller should
+ /// expect any exception to be thrown.
+ ///
+ /// \param name The name to search for. This should be a FQDN.
+ /// \param id The ID of the zone, returned from getZone().
+ /// \param subdomains If set to true, match subdomains of name instead
+ /// of name itself. It is used to find empty domains and match
+ /// wildcards.
+ /// \return Newly created iterator context. Must not be NULL.
virtual IteratorContextPtr getRecords(const std::string& name,
int id,
bool subdomains = false) const = 0;
- /**
- * \brief Creates an iterator context for the whole zone.
- *
- * Returns an IteratorContextPtr that contains all records of the
- * zone with the given zone id.
- *
- * Each call to getNext() on the returned iterator should copy all
- * column fields of the array that is passed, as defined in the
- * RecordColumns enum.
- *
- * \exception any Since any implementation can be used, the caller should
- * expect any exception to be thrown.
- *
- * \param id The ID of the zone, returned from getZone().
- * \return Newly created iterator context. Must not be NULL.
- */
+ /// \brief Creates an iterator context for the whole zone.
+ ///
+ /// Returns an IteratorContextPtr that contains all records of the
+ /// zone with the given zone id.
+ ///
+ /// Each call to getNext() on the returned iterator should copy all
+ /// column fields of the array that is passed, as defined in the
+ /// RecordColumns enum.
+ ///
+ /// \exception any Since any implementation can be used, the caller should
+ /// expect any exception to be thrown.
+ ///
+ /// \param id The ID of the zone, returned from getZone().
+ /// \return Newly created iterator context. Must not be NULL.
virtual IteratorContextPtr getAllRecords(int id) const = 0;
- /**
- * \brief Creates an iterator context for a set of differences.
- *
- * Returns an IteratorContextPtr that contains all difference records for
- * the given zone between two versions of a zone.
- *
- * The difference records are the set of records that would appear in an
- * IXFR serving a request for the difference between two versions of a zone.
- * The records are returned in the same order as they would be in the IXFR.
- * This means that if the the difference between versions of a zone with SOA
- * serial numbers of "start" and "end" is required, and the zone contains
- * the differences between serial number "start" to serial number
- * "intermediate" and from serial number "intermediate" to serial number
- * "end", the returned records will be (in order):
- *
- * \li SOA for serial "start"
- * \li Records removed from the zone between versions "start" and
- * "intermediate" of the zone. The order of these is not guaranteed.
- * \li SOA for serial "intermediate"
- * \li Records added to the zone between versions "start" and
- * "intermediate" of the zone. The order of these is not guaranteed.
- * \li SOA for serial "intermediate"
- * \li Records removed from the zone between versions "intermediate" and
- * "end" of the zone. The order of these is not guaranteed.
- * \li SOA for serial "end"
- * \li Records added to the zone between versions "intermediate" and "end"
- * of the zone. The order of these is not guaranteed.
- *
- * Note that there is no requirement that "start" be less than "end". Owing
- * to serial number arithmetic, it is entirely possible that a later version
- * of a zone will have a smaller SOA serial number than an earlier version.
- *
- * Each call to getNext() on the returned iterator should copy all
- * column fields of the array that is passed, as defined in the
- * RecordColumns enum.
- *
- * \exception any Since any implementation can be used, the caller should
- * expect any exception to be thrown.
- *
- * \param id The ID of the zone, returned from getZone().
- * \param start The SOA serial number of the version of the zone from
- * which the difference sequence should start.
- * \param end The SOA serial number of the version of the zone at which
- * the difference sequence should end.
- *
- * \return Newly created iterator context. Must not be NULL.
- */
+ /// \brief Creates an iterator context for a set of differences.
+ ///
+ /// Returns an IteratorContextPtr that contains all difference records for
+ /// the given zone between two versions of a zone.
+ ///
+ /// The difference records are the set of records that would appear in an
+ /// IXFR serving a request for the difference between two versions of a
+ /// zone. The records are returned in the same order as they would be in
+ /// the IXFR. This means that if the the difference between versions of a
+ /// zone with SOA serial numbers of "start" and "end" is required, and the
+ /// zone contains the differences between serial number "start" to serial
+ /// number "intermediate" and from serial number "intermediate" to serial
+ /// number "end", the returned records will be (in order):
+ ///
+ /// \li SOA for serial "start"
+ /// \li Records removed from the zone between versions "start" and
+ /// "intermediate" of the zone. The order of these is not guaranteed.
+ /// \li SOA for serial "intermediate"
+ /// \li Records added to the zone between versions "start" and
+ /// "intermediate" of the zone. The order of these is not guaranteed.
+ /// \li SOA for serial "intermediate"
+ /// \li Records removed from the zone between versions "intermediate" and
+ /// "end" of the zone. The order of these is not guaranteed.
+ /// \li SOA for serial "end"
+ /// \li Records added to the zone between versions "intermediate" and "end"
+ /// of the zone. The order of these is not guaranteed.
+ ///
+ /// Note that there is no requirement that "start" be less than "end".
+ /// Owing to serial number arithmetic, it is entirely possible that a later
+ /// version of a zone will have a smaller SOA serial number than an earlier
+ /// version.
+ ///
+ /// Each call to getNext() on the returned iterator should copy all column
+ /// fields of the array that is passed, as defined in the RecordColumns
+ /// enum.
+ ///
+ /// \exception any Since any implementation can be used, the caller should
+ /// expect any exception to be thrown.
+ ///
+ /// \param id The ID of the zone, returned from getZone().
+ /// \param start The SOA serial number of the version of the zone from
+ /// which the difference sequence should start.
+ /// \param end The SOA serial number of the version of the zone at which
+ /// the difference sequence should end.
+ ///
+ /// \return Newly created iterator context. Must not be NULL.
virtual IteratorContextPtr
getDiffs(int id, uint32_t start, uint32_t end) const = 0;
- /// Start a transaction for updating a zone.
+ /// \brief Start a transaction for updating a zone.
///
/// Each derived class version of this method starts a database
/// transaction to make updates to the given name of zone (whose class was
@@ -385,7 +358,7 @@ public:
virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
bool replace) = 0;
- /// Add a single record to the zone to be updated.
+ /// \brief Add a single record to the zone to be updated.
///
/// This method provides a simple interface to insert a new record
/// (a database "row") to the zone in the update context started by
@@ -424,7 +397,7 @@ public:
virtual void addRecordToZone(
const std::string (&columns)[ADD_COLUMN_COUNT]) = 0;
- /// Delete a single record from the zone to be updated.
+ /// \brief Delete a single record from the zone to be updated.
///
/// This method provides a simple interface to delete a record
/// (a database "row") from the zone in the update context started by
@@ -461,7 +434,7 @@ public:
virtual void deleteRecordInZone(
const std::string (¶ms)[DEL_PARAM_COUNT]) = 0;
- /// Start a general transaction.
+ /// \brief Start a general transaction.
///
/// Each derived class version of this method starts a database
/// transaction in a way specific to the database details. Any subsequent
@@ -481,7 +454,7 @@ public:
/// internal database related error.
virtual void startTransaction() = 0;
- /// Commit a transaction.
+ /// \brief Commit a transaction.
///
/// This method completes a transaction started by \c startTransaction
/// or \c startUpdateZone.
@@ -504,7 +477,7 @@ public:
/// to the method or internal database error.
virtual void commit() = 0;
- /// Rollback any changes in a transaction made so far.
+ /// \brief Rollback any changes in a transaction made so far.
///
/// This method rollbacks a transaction started by \c startTransaction or
/// \c startUpdateZone. When it succeeds (it normally should, but see
@@ -530,7 +503,7 @@ public:
/// to the method or internal database error.
virtual void rollback() = 0;
- /// Install a single RR diff in difference sequences for zone update.
+ /// \brief Install a single RR diff in difference sequences for zone update.
///
/// This method inserts parameters of an update operation for a single RR
/// (either adding or deleting one) in the underlying database.
@@ -604,7 +577,7 @@ public:
int zone_id, uint32_t serial, DiffOperation operation,
const std::string (¶ms)[DIFF_PARAM_COUNT]) = 0;
- /// Clone the accessor with the same configuration.
+ /// \brief Clone the accessor with the same configuration.
///
/// Each derived class implementation of this method will create a new
/// accessor of the same derived class with the same configuration
@@ -633,187 +606,169 @@ public:
/// \return A shared pointer to the cloned accessor.
virtual boost::shared_ptr<DatabaseAccessor> clone() = 0;
- /**
- * \brief Returns a string identifying this dabase backend
- *
- * The returned string is mainly intended to be used for
- * debugging/logging purposes.
- *
- * Any implementation is free to choose the exact string content,
- * but it is advisable to make it a name that is distinguishable
- * from the others.
- *
- * \return the name of the database
- */
+ /// \brief Returns a string identifying this dabase backend
+ ///
+ /// The returned string is mainly intended to be used for
+ /// debugging/logging purposes.
+ ///
+ /// Any implementation is free to choose the exact string content,
+ /// but it is advisable to make it a name that is distinguishable
+ /// from the others.
+ ///
+ /// \return the name of the database
virtual const std::string& getDBName() const = 0;
- /**
- * \brief It returns the previous name in DNSSEC order.
- *
- * This is used in DatabaseClient::findPreviousName and does more
- * or less the real work, except for working on strings.
- *
- * \param rname The name to ask for previous of, in reversed form.
- * We use the reversed form (see isc::dns::Name::reverse),
- * because then the case insensitive order of string representation
- * and the DNSSEC order correspond (eg. org.example.a is followed
- * by org.example.a.b which is followed by org.example.b, etc).
- * \param zone_id The zone to look through.
- * \return The previous name.
- * \note This function must return previous name even in case
- * the queried rname does not exist in the zone.
- * \note This method must skip under-the-zone-cut data (glue data).
- * This might be implemented by looking for NSEC records (as glue
- * data don't have them) in the zone or in some other way.
- *
- * \throw DataSourceError if there's a problem with the database.
- * \throw NotImplemented if this database doesn't support DNSSEC
- * or there's no previous name for the queried one (the NSECs
- * might be missing or the queried name is less or equal the
- * apex of the zone).
- */
+ /// \brief It returns the previous name in DNSSEC order.
+ ///
+ /// This is used in DatabaseClient::findPreviousName and does more
+ /// or less the real work, except for working on strings.
+ ///
+ /// \param rname The name to ask for previous of, in reversed form.
+ /// We use the reversed form (see isc::dns::Name::reverse),
+ /// because then the case insensitive order of string representation
+ /// and the DNSSEC order correspond (eg. org.example.a is followed
+ /// by org.example.a.b which is followed by org.example.b, etc).
+ /// \param zone_id The zone to look through.
+ /// \return The previous name.
+ /// \note This function must return previous name even in case
+ /// the queried rname does not exist in the zone.
+ /// \note This method must skip under-the-zone-cut data (glue data).
+ /// This might be implemented by looking for NSEC records (as glue
+ /// data don't have them) in the zone or in some other way.
+ ///
+ /// \throw DataSourceError if there's a problem with the database.
+ /// \throw NotImplemented if this database doesn't support DNSSEC
+ /// or there's no previous name for the queried one (the NSECs
+ /// might be missing or the queried name is less or equal the
+ /// apex of the zone).
virtual std::string findPreviousName(int zone_id,
const std::string& rname) const = 0;
};
-/**
- * \brief Concrete data source client oriented at database backends.
- *
- * This class (together with corresponding versions of ZoneFinder,
- * ZoneIterator, etc.) translates high-level data source queries to
- * low-level calls on DatabaseAccessor. It calls multiple queries
- * if necessary and validates data from the database, allowing the
- * DatabaseAccessor to be just simple translation to SQL/other
- * queries to database.
- *
- * While it is possible to subclass it for specific database in case
- * of special needs, it is not expected to be needed. This should just
- * work as it is with whatever DatabaseAccessor.
- */
+/// \brief Concrete data source client oriented at database backends.
+///
+/// This class (together with corresponding versions of ZoneFinder,
+/// ZoneIterator, etc.) translates high-level data source queries to
+/// low-level calls on DatabaseAccessor. It calls multiple queries
+/// if necessary and validates data from the database, allowing the
+/// DatabaseAccessor to be just simple translation to SQL/other
+/// queries to database.
+///
+/// While it is possible to subclass it for specific database in case
+/// of special needs, it is not expected to be needed. This should just
+/// work as it is with whatever DatabaseAccessor.
class DatabaseClient : public DataSourceClient {
public:
- /**
- * \brief Constructor
- *
- * It initializes the client with a database via the given accessor.
- *
- * \exception isc::InvalidParameter if accessor is NULL. It might throw
- * standard allocation exception as well, but doesn't throw anything else.
- *
- * \param rrclass The RR class of the zones that this client will handle.
- * \param accessor The accessor to the database to use to get data.
- * As the parameter suggests, the client takes ownership of the accessor
- * and will delete it when itself deleted.
- */
+ /// \brief Constructor
+ ///
+ /// It initializes the client with a database via the given accessor.
+ ///
+ /// \exception isc::InvalidParameter if accessor is NULL. It might throw
+ /// standard allocation exception as well, but doesn't throw anything else.
+ ///
+ /// \param rrclass The RR class of the zones that this client will handle.
+ /// \param accessor The accessor to the database to use to get data.
+ /// As the parameter suggests, the client takes ownership of the accessor
+ /// and will delete it when itself deleted.
DatabaseClient(isc::dns::RRClass rrclass,
boost::shared_ptr<DatabaseAccessor> accessor);
- /**
- * \brief Corresponding ZoneFinder implementation
- *
- * The zone finder implementation for database data sources. Similarly
- * to the DatabaseClient, it translates the queries to methods of the
- * database.
- *
- * Application should not come directly in contact with this class
- * (it should handle it trough generic ZoneFinder pointer), therefore
- * it could be completely hidden in the .cc file. But it is provided
- * to allow testing and for rare cases when a database needs slightly
- * different handling, so it can be subclassed.
- *
- * Methods directly corresponds to the ones in ZoneFinder.
- */
+ /// \brief Corresponding ZoneFinder implementation
+ ///
+ /// The zone finder implementation for database data sources. Similarly
+ /// to the DatabaseClient, it translates the queries to methods of the
+ /// database.
+ ///
+ /// Application should not come directly in contact with this class
+ /// (it should handle it trough generic ZoneFinder pointer), therefore
+ /// it could be completely hidden in the .cc file. But it is provided
+ /// to allow testing and for rare cases when a database needs slightly
+ /// different handling, so it can be subclassed.
+ ///
+ /// Methods directly corresponds to the ones in ZoneFinder.
class Finder : public ZoneFinder {
public:
- /**
- * \brief Constructor
- *
- * \param database The database (shared with DatabaseClient) to
- * be used for queries (the one asked for ID before).
- * \param zone_id The zone ID which was returned from
- * DatabaseAccessor::getZone and which will be passed to further
- * calls to the database.
- * \param origin The name of the origin of this zone. It could query
- * it from database, but as the DatabaseClient just searched for
- * the zone using the name, it should have it.
- */
+ /// \brief Constructor
+ ///
+ /// \param database The database (shared with DatabaseClient) to
+ /// be used for queries (the one asked for ID before).
+ /// \param zone_id The zone ID which was returned from
+ /// DatabaseAccessor::getZone and which will be passed to further
+ /// calls to the database.
+ /// \param origin The name of the origin of this zone. It could query
+ /// it from database, but as the DatabaseClient just searched for
+ /// the zone using the name, it should have it.
Finder(boost::shared_ptr<DatabaseAccessor> database, int zone_id,
const isc::dns::Name& origin);
+
// The following three methods are just implementations of inherited
// ZoneFinder's pure virtual methods.
virtual isc::dns::Name getOrigin() const;
virtual isc::dns::RRClass getClass() const;
- /**
- * \brief Find an RRset in the datasource
- *
- * Searches the datasource for an RRset of the given name and
- * type. If there is a CNAME at the given name, the CNAME rrset
- * is returned.
- * (this implementation is not complete, and currently only
- * does full matches, CNAMES, and the signatures for matches and
- * CNAMEs)
- * \note target was used in the original design to handle ANY
- * queries. This is not implemented yet, and may use
- * target again for that, but it might also use something
- * different. It is left in for compatibility at the moment.
- * \note options are ignored at this moment
- *
- * \note Maybe counter intuitively, this method is not a const member
- * function. This is intentional; some of the underlying implementations
- * are expected to use a database backend, and would internally contain
- * some abstraction of "database connection". In the most strict sense
- * any (even read only) operation might change the internal state of
- * such a connection, and in that sense the operation cannot be considered
- * "const". In order to avoid giving a false sense of safety to the
- * caller, we indicate a call to this method may have a surprising
- * side effect. That said, this view may be too strict and it may
- * make sense to say the internal database connection doesn't affect
- * external behavior in terms of the interface of this method. As
- * we gain more experiences with various kinds of backends we may
- * revisit the constness.
- *
- * \exception DataSourceError when there is a problem reading
- * the data from the dabase backend.
- * This can be a connection, code, or
- * data (parse) error.
- *
- * \param name The name to find
- * \param type The RRType to find
- * \param target Unused at this moment
- * \param options Options about how to search.
- * See ZoneFinder::FindOptions.
- */
+ /// \brief Find an RRset in the datasource
+ ///
+ /// Searches the datasource for an RRset of the given name and
+ /// type. If there is a CNAME at the given name, the CNAME rrset
+ /// is returned.
+ /// (this implementation is not complete, and currently only
+ /// does full matches, CNAMES, and the signatures for matches and
+ /// CNAMEs)
+ /// \note target was used in the original design to handle ANY
+ /// queries. This is not implemented yet, and may use
+ /// target again for that, but it might also use something
+ /// different. It is left in for compatibility at the moment.
+ /// \note options are ignored at this moment
+ ///
+ /// \note Maybe counter intuitively, this method is not a const member
+ /// function. This is intentional; some of the underlying
+ /// implementations are expected to use a database backend, and would
+ /// internally contain some abstraction of "database connection". In
+ /// the most strict sense any (even read only) operation might change
+ /// the internal state of such a connection, and in that sense the
+ /// operation cannot be considered "const". In order to avoid giving a
+ /// false sense of safety to the caller, we indicate a call to this
+ /// method may have a surprising side effect. That said, this view may
+ /// be too strict and it may make sense to say the internal database
+ /// connection doesn't affect external behavior in terms of the
+ /// interface of this method. As we gain more experiences with various
+ /// kinds of backends we may revisit the constness.
+ ///
+ /// \exception DataSourceError when there is a problem reading
+ /// the data from the dabase backend.
+ /// This can be a connection, code, or
+ /// data (parse) error.
+ ///
+ /// \param name The name to find
+ /// \param type The RRType to find
+ /// \param target Unused at this moment
+ /// \param options Options about how to search.
+ /// See ZoneFinder::FindOptions.
virtual FindResult find(const isc::dns::Name& name,
const isc::dns::RRType& type,
isc::dns::RRsetList* target = NULL,
const FindOptions options = FIND_DEFAULT);
- /**
- * \brief Implementation of ZoneFinder::findPreviousName method.
- */
+ /// \brief Implementation of ZoneFinder::findPreviousName method.
virtual isc::dns::Name findPreviousName(const isc::dns::Name& query)
const;
- /**
- * \brief The zone ID
- *
- * This function provides the stored zone ID as passed to the
- * constructor. This is meant for testing purposes and normal
- * applications shouldn't need it.
- */
+ /// \brief The zone ID
+ ///
+ /// This function provides the stored zone ID as passed to the
+ /// constructor. This is meant for testing purposes and normal
+ /// applications shouldn't need it.
int zone_id() const { return (zone_id_); }
- /**
- * \brief The database accessor.
- *
- * This function provides the database accessor stored inside as
- * passed to the constructor. This is meant for testing purposes and
- * normal applications shouldn't need it.
- */
+ /// \brief The database accessor.
+ ///
+ /// This function provides the database accessor stored inside as
+ /// passed to the constructor. This is meant for testing purposes and
+ /// normal applications shouldn't need it.
const DatabaseAccessor& getAccessor() const {
return (*accessor_);
}
+
private:
boost::shared_ptr<DatabaseAccessor> accessor_;
const int zone_id_;
@@ -824,103 +779,308 @@ public:
FoundRRsets;
/// \brief Just shortcut for set of types
typedef std::set<dns::RRType> WantedTypes;
- /**
- * \brief Searches database for RRsets of one domain.
- *
- * This method scans RRs of single domain specified by name and
- * extracts any RRsets found and requested by parameters.
- *
- * It is used internally by find(), because it is called multiple
- * times (usually with different domains).
- *
- * \param name Which domain name should be scanned.
- * \param types List of types the caller is interested in.
- * \param check_ns If this is set to true, it checks nothing lives
- * together with NS record (with few little exceptions, like RRSIG
- * or NSEC). This check is meant for non-apex NS records.
- * \param construct_name If this is NULL, the resulting RRsets have
- * their name set to name. If it is not NULL, it overrides the name
- * and uses this one (this can be used for wildcard synthesized
- * records).
- * \return A pair, where the first element indicates if the domain
- * contains any RRs at all (not only the requested, it may happen
- * this is set to true, but the second part is empty). The second
- * part is map from RRtypes to RRsets of the corresponding types.
- * If the RRset is not present in DB, the RRtype is not there at
- * all (so you'll not find NULL pointer in the result).
- * \throw DataSourceError If there's a low-level error with the
- * database or the database contains bad data.
- */
+
+ /// \brief Search result of \c findDelegationPoint().
+ ///
+ /// This is a tuple combining the result of the search - a status code
+ /// and a pointer to the RRset found - together with additional
+ /// information needed for subsequent processing, an indication of
+ /// the first NS RRset found in the search and the number of labels
+ /// in the last non-empty domain encountered in the search. It is
+ /// used by \c findDelegationPoint().
+ ///
+ /// The last two items are located naturally in the search and although
+ /// not strictly part of the result, they are passed back to avoid
+ /// another (duplicate) search later in the processing.
+ ///
+ /// Note that the code and rrset elements are the same as that in
+ /// the \c ZoneFinder::FindResult struct: this structure could be
+ /// derived from that one, but as it is used just once in the code and
+ /// will never be treated as a \c FindResult, the obscurity involved in
+ /// deriving it from a parent class was deemed not worthwhile.
+ struct DelegationSearchResult {
+ DelegationSearchResult(const ZoneFinder::Result param_code,
+ const isc::dns::ConstRRsetPtr param_rrset,
+ const isc::dns::ConstRRsetPtr param_ns,
+ size_t param_last_known) :
+ code(param_code), rrset(param_rrset),
+ first_ns(param_ns),
+ last_known(param_last_known)
+ {}
+ const ZoneFinder::Result code; ///< Result code
+ const isc::dns::ConstRRsetPtr rrset; ///< RRset found
+ const isc::dns::ConstRRsetPtr first_ns; ///< First NS found
+ const size_t last_known; ///< No. labels in last non-empty domain
+ };
+
+ /// \brief Searches database for RRsets of one domain.
+ ///
+ /// This method scans RRs of single domain specified by name and
+ /// extracts any RRsets found and requested by parameters.
+ ///
+ /// It is used internally by find(), because it is called multiple
+ /// times (usually with different domains).
+ ///
+ /// \param name Which domain name should be scanned.
+ /// \param types List of types the caller is interested in.
+ /// \param check_ns If this is set to true, it checks nothing lives
+ /// together with NS record (with few little exceptions, like RRSIG
+ /// or NSEC). This check is meant for non-apex NS records.
+ /// \param construct_name If this is NULL, the resulting RRsets have
+ /// their name set to name. If it is not NULL, it overrides the name
+ /// and uses this one (this can be used for wildcard synthesized
+ /// records).
+ /// \return A pair, where the first element indicates if the domain
+ /// contains any RRs at all (not only the requested, it may happen
+ /// this is set to true, but the second part is empty). The second
+ /// part is map from RRtypes to RRsets of the corresponding types.
+ /// If the RRset is not present in DB, the RRtype is not there at
+ /// all (so you'll not find NULL pointer in the result).
+ /// \throw DataSourceError If there's a low-level error with the
+ /// database or the database contains bad data.
FoundRRsets getRRsets(const std::string& name,
const WantedTypes& types, bool check_ns,
const std::string* construct_name = NULL);
- /**
- * \brief Checks if something lives below this domain.
- *
- * This looks if there's any subdomain of the given name. It can be
- * used to test if domain is empty non-terminal.
- *
- * \param name The domain to check.
- */
+
+ /// \brief Find delegation point
+ ///
+ /// Given a name, searches through the superdomains from the origin
+ /// down, searching for a point that indicates a delegation (i.e. an
+ /// NS record or a DNAME).
+ ///
+ /// The method operates in two modes, non-glue-ok and glue-ok modes:
+ ///
+ /// In non-glue-ok mode, the search is made purely for the NS or DNAME
+ /// RR. The zone is searched from the origin down looking for one
+ /// of these RRTypes (and ignoring the NS records at the zone origin).
+ /// A status is returned indicating what is found: DNAME, DELEGATION
+ /// of SUCCESS, the last indicating that nothing was found, together
+ /// with a pointer to the relevant RR.
+ ///
+ /// In glue-ok mode, the first NS encountered in the search (apart from
+ /// the NS at the zone apex) is remembered but otherwise NS records are
+ /// ignored and the search attempts to find a DNAME. The result is
+ /// returned in the same format, along with a pointer to the first non-
+ /// apex NS (if found).
+ ///
+ /// \param name The name to find
+ /// \param options Options about how to search. See the documentation
+ /// for ZoneFinder::FindOptions.
+ ///
+ /// \return Tuple holding the result of the search - the RRset of the
+ /// delegation point and the type of the point (DELEGATION or
+ /// DNAME) - and associated information. This latter item
+ /// comprises two pieces of data: a pointer to the highest
+ /// encountered NS, and the number of labels in the last known
+ /// non-empty domain. The associated information is found as
+ /// a natural part of the search for the delegation point and
+ /// is used later in the find() processing; it is passed back
+ /// to avoid the need to perform a second search to obtain it.
+ DelegationSearchResult
+ findDelegationPoint(const isc::dns::Name& name,
+ const FindOptions options);
+
+ /// \brief Find wildcard match
+ ///
+ /// Having found that the name is not an empty non-terminal, this
+ /// searches the zone for for wildcards that match the name.
+ ///
+ /// It searches superdomains of the name from the zone origin down
+ /// looking for a wildcard in the zone that matches the name. There
+ /// are several cases to consider:
+ ///
+ /// - If the previous search for a delegation point has found that
+ /// there is an NS at the superdomain of the point at which the
+ /// wildcard is found, the delegation is returned.
+ /// - If there is a match to the name, an appropriate status is
+ /// returned (match on requested type, delegation, cname, or just
+ /// the indication of a match but no RRs relevant to the query).
+ /// - If the match is to an non-empty non-terminal wildcard, a
+ /// wildcard NXRRSET is returned.
+ ///
+ /// Note that if DNSSEC is enabled for the search and the zone uses
+ /// NSEC for authenticated denial of existence, the search may
+ /// return NSEC records.
+ ///
+ /// \param name The name to find
+ /// \param type The RRType to find
+ /// \param options Options about how to search. See the documentation
+ /// for ZoneFinder::FindOptions.
+ /// \param dresult Result of the search through the zone for a
+ /// delegation.
+ ///
+ /// \return Tuple holding the result of the search - the RRset of the
+ /// wildcard records matching the name, together with a status
+ /// indicating the match type (e.g. CNAME at the wildcard
+ /// match, no RRs of the requested type at the wildcard,
+ /// success due to an exact match). Also returned if there
+ /// is no match is an indication as to whether there was an
+ /// NXDOMAIN or an NXRRSET.
+ FindResult findWildcardMatch(
+ const isc::dns::Name& name,
+ const isc::dns::RRType& type, const FindOptions options,
+ const DelegationSearchResult& dresult);
+
+ /// \brief Handle matching results for name
+ ///
+ /// This is called when something is found in the underlying database
+ /// whose domain name is an exact match of the name to be searched for.
+ /// It explores four possible cases to decide the final lookup result:
+ /// - The name is a zone cut due to an NS RR.
+ /// - CNAME is found (while the requested RR type is not CNAME).
+ /// In this case multiple CNAMEs are checked and rejected with
+ /// a \c DataSourceError exception.
+ /// - Requested type is not found at that name.
+ /// - A record of the requested type is found.
+ /// and returns a corresponding find result.
+ ///
+ /// This method is commonly used for normal (non wildcard) and wildcard
+ /// matches.
+ ///
+ /// \param name The name to find
+ /// \param type The RRType to find
+ /// \param options Options about how to search. See the documentation
+ /// for ZoneFinder::FindOptions.
+ /// \param is_origin If name is the zone's origin name.
+ /// \param found A set of found RRsets in the search for the name
+ /// and type. It could contain one or more of the requested
+ /// type, CNAME, NS, and NSEC RRsets of the name.
+ /// \param wildname If non NULL, the method is called on a wildcard
+ /// match, and points to a string object representing
+ /// a textual form of the matched wildcard name;
+ /// it's NULL in the case of non wildcard match.
+ ///
+ /// \return Tuple holding the result of the search - the RRset of the
+ /// wildcard records matching the name, together with a status
+ /// indicating the match type (corresponding to the each of
+ /// the above 4 cases). The return value is intended to be
+ /// usable as a return value of the caller of this helper
+ /// method.
+ FindResult findOnNameResult(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ const FindOptions options,
+ const bool is_origin,
+ const FoundRRsets& found,
+ const std::string* wildname);
+
+ /// \brief Handle no match for name
+ ///
+ /// This is called when it is known that there is no delegation and
+ /// there is no exact match for the name (regardless of RR types
+ /// requested). Before returning NXDOMAIN, we need to check two
+ /// cases:
+ /// - Empty non-terminal: if the name has subdomains in the database,
+ /// flag the fact. An NXRRSET will be returned (along with the
+ /// NSEC record covering the requested domain name if DNSSEC data
+ /// is being returned).
+ /// - Wildcard: is there a wildcard record in the zone that matches
+ /// requested name? If so, return it. If not, return the relevant
+ /// NSEC records (if requested).
+ ///
+ /// \param name The name to find
+ /// \param type The RRType to find
+ /// \param options Options about how to search. See the documentation
+ /// for ZoneFinder::FindOptions.
+ /// \param dresult Result of the search through the zone for a
+ /// delegation.
+ ///
+ /// \return Tuple holding the result of the search - the RRset of the
+ /// wildcard records matching the name, together with a status
+ /// indicating the match type (e.g. CNAME at the wildcard
+ /// match, no RRs of the requested type at the wildcard,
+ /// success due to an exact match).
+ FindResult findNoNameResult(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ FindOptions options,
+ const DelegationSearchResult& dresult);
+
+ /// Logs condition and creates result
+ ///
+ /// A convenience function used by findOnNameResult(), it both creates
+ /// the FindResult object that find() will return to its caller as well
+ /// as logging a debug message for the information being returned.
+ ///
+ /// \param name Domain name of the RR that was being sought.
+ /// \param wildname Domain name string of a matched wildcard name or
+ /// NULL for non wildcard match.
+ /// \param type Type of RR being sought.
+ /// \param code Result of the find operation
+ /// \param rrset RRset found as a result of the find (which may be
+ /// null).
+ /// \param log_id ID of the message being logged. Up to five
+ /// parameters are available to the message: data source name,
+ /// requested domain name, requested class, requested type
+ /// and (but only if the search was successful and returned
+ /// an RRset) details of the RRset found.
+ ///
+ /// \return FindResult object constructed from the code and rrset
+ /// arguments.
+ FindResult logAndCreateResult(const isc::dns::Name& name,
+ const std::string* wildname,
+ const isc::dns::RRType& type,
+ ZoneFinder::Result code,
+ isc::dns::ConstRRsetPtr rrset,
+ const isc::log::MessageID& log_id) const;
+
+ /// \brief Checks if something lives below this domain.
+ ///
+ /// This looks if there's any subdomain of the given name. It can be
+ /// used to test if domain is empty non-terminal.
+ ///
+ /// \param name The domain to check.
+ ///
+ /// \return true if the name has subdomains, false if not.
bool hasSubdomains(const std::string& name);
- /**
- * \brief Get the NSEC covering a name.
- *
- * This one calls findPreviousName on the given name and extracts an NSEC
- * record on the result. It handles various error cases. The method exists
- * to share code present at more than one location.
- */
- dns::RRsetPtr findNSECCover(const dns::Name& name);
-
- /**
- * \brief Convenience type shortcut.
- *
- * To find stuff in the result of getRRsets.
- */
+ /// \brief Get the NSEC covering a name.
+ ///
+ /// This one calls findPreviousName on the given name and extracts an
+ /// NSEC record on the result. It handles various error cases. The
+ /// method exists to share code present at more than one location.
+ dns::ConstRRsetPtr findNSECCover(const dns::Name& name);
+
+ /// \brief Convenience type shortcut.
+ ///
+ /// To find stuff in the result of getRRsets.
typedef std::map<dns::RRType, dns::RRsetPtr>::const_iterator
FoundIterator;
};
- /**
- * \brief Find a zone in the database
- *
- * This queries database's getZone to find the best matching zone.
- * It will propagate whatever exceptions are thrown from that method
- * (which is not restricted in any way).
- *
- * \param name Name of the zone or data contained there.
- * \return FindResult containing the code and an instance of Finder, if
- * anything is found. However, application should not rely on the
- * ZoneFinder being instance of Finder (possible subclass of this class
- * may return something else and it may change in future versions), it
- * should use it as a ZoneFinder only.
- */
+ /// \brief Find a zone in the database
+ ///
+ /// This queries database's getZone to find the best matching zone.
+ /// It will propagate whatever exceptions are thrown from that method
+ /// (which is not restricted in any way).
+ ///
+ /// \param name Name of the zone or data contained there.
+ /// \return FindResult containing the code and an instance of Finder, if
+ /// anything is found. However, application should not rely on the
+ /// ZoneFinder being instance of Finder (possible subclass of this class
+ /// may return something else and it may change in future versions), it
+ /// should use it as a ZoneFinder only.
virtual FindResult findZone(const isc::dns::Name& name) const;
- /**
- * \brief Get the zone iterator
- *
- * The iterator allows going through the whole zone content. If the
- * underlying DatabaseConnection is implemented correctly, it should
- * be possible to have multiple ZoneIterators at once and query data
- * at the same time.
- *
- * \exception DataSourceError if the zone doesn't exist.
- * \exception isc::NotImplemented if the underlying DatabaseConnection
- * doesn't implement iteration. But in case it is not implemented
- * and the zone doesn't exist, DataSourceError is thrown.
- * \exception Anything else the underlying DatabaseConnection might
- * want to throw.
- * \param name The origin of the zone to iterate.
- * \param separate_rrs If true, the iterator will return each RR as a
- * new RRset object. If false, the iterator will
- * combine consecutive RRs with the name and type
- * into 1 RRset. The capitalization of the RRset will
- * be that of the first RR read, and TTLs will be
- * adjusted to the lowest one found.
- * \return Shared pointer to the iterator (it will never be NULL)
- */
+ /// \brief Get the zone iterator
+ ///
+ /// The iterator allows going through the whole zone content. If the
+ /// underlying DatabaseConnection is implemented correctly, it should
+ /// be possible to have multiple ZoneIterators at once and query data
+ /// at the same time.
+ ///
+ /// \exception DataSourceError if the zone doesn't exist.
+ /// \exception isc::NotImplemented if the underlying DatabaseConnection
+ /// doesn't implement iteration. But in case it is not implemented
+ /// and the zone doesn't exist, DataSourceError is thrown.
+ /// \exception Anything else the underlying DatabaseConnection might
+ /// want to throw.
+ /// \param name The origin of the zone to iterate.
+ /// \param separate_rrs If true, the iterator will return each RR as a
+ /// new RRset object. If false, the iterator will
+ /// combine consecutive RRs with the name and type
+ /// into 1 RRset. The capitalization of the RRset will
+ /// be that of the first RR read, and TTLs will be
+ /// adjusted to the lowest one found.
+ /// \return Shared pointer to the iterator (it will never be NULL)
virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
bool separate_rrs = false) const;
@@ -953,7 +1113,3 @@ private:
}
#endif // __DATABASE_DATASRC_H
-
-// Local Variables:
-// mode: c++
-// End:
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index b4d0df7..01fb082 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -68,7 +68,7 @@ The datasource tried to provide an NSEC proof that the named domain does not
exist, but the database backend doesn't support DNSSEC. No proof is included
in the answer as a result.
-% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3
+% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3/%4
Debug information. The database data source is looking up records with the given
name and type in the database.
@@ -78,11 +78,17 @@ different TTL values. This isn't allowed on the wire and is considered
an error, so we set it to the lowest value we found (but we don't modify the
database). The data in database should be checked and fixed.
+% DATASRC_DATABASE_FOUND_CNAME search in datasource %1 for %2/%3/%4 found CNAME, resulting in %5
+When searching the domain for a name a CNAME was found at that name.
+Even though it was not the RR type being sought, it is returned. (The
+caller may want to continue the lookup by replacing the query name with
+the canonical name and restarting the query with the original RR type.)
+
% DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1
When searching for a domain, the program met a delegation to a different zone
at the given domain name. It will return that one instead.
-% DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1
+% DATASRC_DATABASE_FOUND_DELEGATION_EXACT search in datasource %1 for %2/%3/%4 found delegation at %5
The program found the domain requested, but it is a delegation point to a
different zone, therefore it is not authoritative for this domain name.
It will return the NS record instead.
@@ -93,19 +99,25 @@ place in the domain space at the given domain name. It will return that one
instead.
% DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1
-The domain name doesn't have any RRs, so it doesn't exist in the database.
-However, it has a subdomain, so it exists in the DNS address space. So we
-return NXRRSET instead of NXDOMAIN.
+The domain name does not have any RRs associated with it, so it doesn't
+exist in the database. However, it has a subdomain, so it does exist
+in the DNS address space. This type of domain is known an an "empty
+non-terminal" and so we return NXRRSET instead of NXDOMAIN.
% DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4
The data returned by the database backend did not contain any data for the given
domain name, class and type.
-% DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4
+% DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 for %2/%3/%4 resulted in NXRRSET
The data returned by the database backend contained data for the given domain
name and class, but not for the given type.
-% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2
+% DATASRC_DATABASE_FOUND_NXRRSET_NSEC search in datasource %1 for %2/%3/%4 resulted in RRset %5
+A search in the database for RRs for the specified name, type and class has
+located RRs that match the name and class but not the type. DNSSEC information
+has been requested and returned.
+
+% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %5
The data returned by the database backend contained data for the given domain
name, and it either matches the type or has a relevant type. The RRset that is
returned is printed.
@@ -127,11 +139,46 @@ were found to be different. This isn't allowed on the wire and is considered
an error, so we set it to the lowest value we found (but we don't modify the
database). The data in database should be checked and fixed.
-% DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1
-The database doesn't contain directly matching domain, but it does contain a
-wildcard one which is being used to synthesize the answer.
+% DATASRC_DATABASE_NO_MATCH not match for %2/%3/%4 in %1
+No match (not even a wildcard) was found in the named data source for the given
+name/type/class in the data source.
+
+% DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3
+Debug information. A set of updates to a zone has been successfully
+committed to the corresponding database backend. The zone name,
+its class and the database name are printed.
+
+% DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3
+Debug information. A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+
+% DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3
+Debug information. A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+
+% DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes. The intermediate changes made through the updater won't
+be applied to the underlying database. The zone name, its class, and
+the underlying database name are shown in the log message.
-% DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1
+% DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails. The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation. In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid. The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+
+% DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %3 because %2 contains NS (data source %1)
The database was queried to provide glue data and it didn't find direct match.
It could create it from given wildcard, but matching wildcards is forbidden
under a zone cut, which was found. Therefore the delegation will be returned
@@ -143,11 +190,31 @@ exists, therefore this name is something like empty non-terminal (actually,
from the protocol point of view, it is empty non-terminal, but the code
discovers it differently).
-% DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1
-The given wildcard exists implicitly in the domainspace, as empty nonterminal
-(eg. there's something like subdomain.*.example.org, so *.example.org exists
-implicitly, but is empty). This will produce NXRRSET, because the constructed
-domain is empty as well as the wildcard.
+% DATASRC_DATABASE_WILDCARD_CNAME search in datasource %1 for %2/%3/%4 found wildcard CNAME at %5, resulting in %6
+The database doesn't contain directly matching name. When searching
+for a wildcard match, a CNAME RR was found at a wildcard record
+matching the name. This is returned as the result of the search.
+
+% DATASRC_DATABASE_WILDCARD_EMPTY found subdomains of %2 which is a wildcard match for %3 in %1
+The given wildcard matches the name being sough but it as an empty
+nonterminal (e.g. there's nothing at *.example.com but something like
+subdomain.*.example.org, do exist: so *.example.org exists in the
+namespace but has no RRs assopciated with it). This will produce NXRRSET.
+
+% DATASRC_DATABASE_WILDCARD_MATCH search in datasource %1 resulted in wildcard match at %5 with RRset %6
+The database doesn't contain directly matching name. When searching
+for a wildcard match, a wildcard record matching the name and type of
+the query was found. The data at this point is returned.
+
+% DATASRC_DATABASE_WILDCARD_NS search in datasource %1 for %2/%3/%4 found wildcard delegation at %5, resulting in %6
+The database doesn't contain directly matching name. When searching
+for a wildcard match, an NS RR was found at a wildcard record matching
+the name. This is returned as the result of the search.
+
+% DATASRC_DATABASE_WILDCARD_NXRRSET search in datasource %1 for %2/%3/%4 resulted in wildcard NXRRSET at %5
+The database doesn't contain directly matching name. When searching
+for a wildcard match, a matching wildcard entry was found but it did
+not contain RRs the requested type. AN NXRRSET indication is returned.
% DATASRC_DO_QUERY handling query for '%1/%2'
A debug message indicating that a query for the given name and RR type is being
@@ -259,7 +326,7 @@ Debug information. The requested record was found.
% DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
Debug information. The search stopped at a superdomain of the requested
-domain. The domain is a empty nonterminal, therefore it is treated as NXRRSET
+domain. The domain is an empty nonterminal, therefore it is treated as NXRRSET
case (eg. the domain exists, but it doesn't have the requested record type).
% DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
@@ -487,12 +554,12 @@ enough information for it. The code is 1 for error, 2 for not implemented.
% DATASRC_SQLITE_CLOSE closing SQLite database
Debug information. The SQLite data source is closing the database file.
-% DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'
-The database file is being opened so it can start providing data.
-
% DATASRC_SQLITE_CONNCLOSE Closing sqlite database
The database file is no longer needed and is being closed.
+% DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'
+The database file is being opened so it can start providing data.
+
% DATASRC_SQLITE_CREATE SQLite data source created
Debug information. An instance of SQLite data source is being created.
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index 9fcd289..e028bea 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -264,12 +264,15 @@ public:
/// proof of the non existence of any matching wildcard or non existence
/// of an exact match when a wildcard match is found.
///
- /// A derived version of this method may involve internal resource
- /// allocation, especially for constructing the resulting RRset, and may
- /// throw an exception if it fails.
- /// It throws DuplicateRRset exception if there are duplicate rrsets under
- /// the same domain.
- /// It should not throw other types of exceptions.
+ /// \exception std::bad_alloc Memory allocation such as for constructing
+ /// the resulting RRset fails
+ /// \exception DataSourceError Derived class specific exception, e.g.
+ /// when encountering a bad zone configuration or database connection
+ /// failure. Although these are considered rare, exceptional events,
+ /// it can happen under relatively usual conditions (unlike memory
+ /// allocation failure). So, in general, the application is expected
+ /// to catch this exception, either specifically or as a result of
+ /// catching a base exception class, and handle it gracefully.
///
/// \param name The domain name to be searched for.
/// \param type The RR type to be searched for.
diff --git a/src/lib/dhcp/pkt4.cc b/src/lib/dhcp/pkt4.cc
index 1f68527..bea93fc 100644
--- a/src/lib/dhcp/pkt4.cc
+++ b/src/lib/dhcp/pkt4.cc
@@ -47,7 +47,6 @@ Pkt4::Pkt4(uint8_t msg_type, uint32_t transid)
yiaddr_(DEFAULT_ADDRESS),
siaddr_(DEFAULT_ADDRESS),
giaddr_(DEFAULT_ADDRESS),
- bufferIn_(NULL, 0), // not used, this is TX packet
bufferOut_(DHCPV4_PKT_HDR_LEN),
msg_type_(msg_type)
{
@@ -71,7 +70,6 @@ Pkt4::Pkt4(const uint8_t* data, size_t len)
yiaddr_(DEFAULT_ADDRESS),
siaddr_(DEFAULT_ADDRESS),
giaddr_(DEFAULT_ADDRESS),
- bufferIn_(data, len),
bufferOut_(0), // not used, this is RX packet
msg_type_(DHCPDISCOVER)
{
@@ -80,6 +78,9 @@ Pkt4::Pkt4(const uint8_t* data, size_t len)
<< " received, at least " << DHCPV4_PKT_HDR_LEN
<< "is expected");
}
+
+ data_.resize(len);
+ memcpy(&data_[0], data, len);
}
size_t
@@ -123,31 +124,35 @@ Pkt4::pack() {
}
bool
Pkt4::unpack() {
- if (bufferIn_.getLength()<DHCPV4_PKT_HDR_LEN) {
+
+ // input buffer (used during message reception)
+ isc::util::InputBuffer bufferIn(&data_[0], data_.size());
+
+ if (bufferIn.getLength()<DHCPV4_PKT_HDR_LEN) {
isc_throw(OutOfRange, "Received truncated DHCPv4 packet (len="
- << bufferIn_.getLength() << " received, at least "
+ << bufferIn.getLength() << " received, at least "
<< DHCPV4_PKT_HDR_LEN << "is expected");
}
- op_ = bufferIn_.readUint8();
- htype_ = bufferIn_.readUint8();
- hlen_ = bufferIn_.readUint8();
- hops_ = bufferIn_.readUint8();
- transid_ = bufferIn_.readUint32();
- secs_ = bufferIn_.readUint16();
- flags_ = bufferIn_.readUint16();
- ciaddr_ = IOAddress(bufferIn_.readUint32());
- yiaddr_ = IOAddress(bufferIn_.readUint32());
- siaddr_ = IOAddress(bufferIn_.readUint32());
- giaddr_ = IOAddress(bufferIn_.readUint32());
- bufferIn_.readData(chaddr_, MAX_CHADDR_LEN);
- bufferIn_.readData(sname_, MAX_SNAME_LEN);
- bufferIn_.readData(file_, MAX_FILE_LEN);
-
- size_t opts_len = bufferIn_.getLength() - bufferIn_.getPosition();
+ op_ = bufferIn.readUint8();
+ htype_ = bufferIn.readUint8();
+ hlen_ = bufferIn.readUint8();
+ hops_ = bufferIn.readUint8();
+ transid_ = bufferIn.readUint32();
+ secs_ = bufferIn.readUint16();
+ flags_ = bufferIn.readUint16();
+ ciaddr_ = IOAddress(bufferIn.readUint32());
+ yiaddr_ = IOAddress(bufferIn.readUint32());
+ siaddr_ = IOAddress(bufferIn.readUint32());
+ giaddr_ = IOAddress(bufferIn.readUint32());
+ bufferIn.readData(chaddr_, MAX_CHADDR_LEN);
+ bufferIn.readData(sname_, MAX_SNAME_LEN);
+ bufferIn.readData(file_, MAX_FILE_LEN);
+
+ size_t opts_len = bufferIn.getLength() - bufferIn.getPosition();
vector<uint8_t> optsBuffer;
// fist use of readVector
- bufferIn_.readVector(optsBuffer, opts_len);
+ bufferIn.readVector(optsBuffer, opts_len);
LibDHCP::unpackOptions4(optsBuffer, options_);
return (true);
diff --git a/src/lib/dhcp/pkt4.h b/src/lib/dhcp/pkt4.h
index 8517091..189d95d 100644
--- a/src/lib/dhcp/pkt4.h
+++ b/src/lib/dhcp/pkt4.h
@@ -299,10 +299,21 @@ public:
///
/// @return returns option of requested type (or NULL)
/// if no such option is present
-
boost::shared_ptr<Option>
getOption(uint8_t opt_type);
+
+ /// @brief set interface over which packet should be sent
+ ///
+ /// @param interface defines outbound interface
+ void setIface(const std::string& interface){ iface_ = interface; }
+
+ /// @brief gets interface over which packet was received or
+ /// will be transmitted
+ ///
+ /// @return name of the interface
+ std::string getIface() const { return iface_; }
+
protected:
/// converts DHCP message type to BOOTP op type
@@ -385,14 +396,15 @@ protected:
// end of real DHCPv4 fields
- /// input buffer (used during message reception)
- /// Note that it must be modifiable as hooks can modify incoming buffer),
- /// thus OutputBuffer, not InputBuffer
- isc::util::InputBuffer bufferIn_;
-
/// output buffer (used during message
isc::util::OutputBuffer bufferOut_;
+ // that's the data of input buffer used in RX packet. Note that
+ // InputBuffer does not store the data itself, but just expects that
+ // data will be valid for the whole life of InputBuffer. Therefore we
+ // need to keep the data around.
+ std::vector<uint8_t> data_;
+
/// message type (e.g. 1=DHCPDISCOVER)
/// TODO: this will eventually be replaced with DHCP Message Type
/// option (option 53)
diff --git a/src/lib/dhcp/tests/option_unittest.cc b/src/lib/dhcp/tests/option_unittest.cc
index db3ee3b..66dce8f 100644
--- a/src/lib/dhcp/tests/option_unittest.cc
+++ b/src/lib/dhcp/tests/option_unittest.cc
@@ -402,6 +402,8 @@ TEST_F(OptionTest, v6_addgetdel) {
// let's try to delete - should fail
EXPECT_TRUE(false == parent->delOption(2));
+
+ delete parent;
}
}
diff --git a/src/lib/dhcp/tests/pkt4_unittest.cc b/src/lib/dhcp/tests/pkt4_unittest.cc
index 0f70442..091bfac 100644
--- a/src/lib/dhcp/tests/pkt4_unittest.cc
+++ b/src/lib/dhcp/tests/pkt4_unittest.cc
@@ -561,4 +561,17 @@ TEST(Pkt4Test, unpackOptions) {
EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+22, 3)); // data len=3
}
+// This test verifies methods that are used for manipulating meta fields
+// i.e. fields that are not part of DHCPv4 (e.g. interface name).
+TEST(Pkt4Ttest, metaFields) {
+ Pkt4 pkt(DHCPDISCOVER, 1234);
+
+ pkt.setIface("lo0");
+
+ EXPECT_EQ("lo0", pkt.getIface());
+
+ /// TODO: Expand this test once additonal getters/setters are
+ /// implemented.
+}
+
} // end of anonymous namespace
diff --git a/src/lib/dns/tests/Makefile.am b/src/lib/dns/tests/Makefile.am
index fc6c87c..cfd1286 100644
--- a/src/lib/dns/tests/Makefile.am
+++ b/src/lib/dns/tests/Makefile.am
@@ -62,12 +62,12 @@ run_unittests_SOURCES += tsigrecord_unittest.cc
run_unittests_SOURCES += character_string_unittest.cc
run_unittests_SOURCES += run_unittests.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-# We shouldn't need to include BOTAN_LDFLAGS here, but there
+# We shouldn't need to include BOTAN_LIBS here, but there
# is one test system where the path for GTEST_LDFLAGS contains
# an older version of botan, and somehow that version gets
# linked if we don't
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDFLAGS = $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS) $(AM_LDFLAGS)
+run_unittests_LDADD = $(BOTAN_LIBS) $(GTEST_LDADD)
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
diff --git a/src/lib/log/Makefile.am b/src/lib/log/Makefile.am
index 957d350..286e9fd 100644
--- a/src/lib/log/Makefile.am
+++ b/src/lib/log/Makefile.am
@@ -46,5 +46,4 @@ if USE_CLANGPP
liblog_la_CXXFLAGS += -Wno-error
endif
liblog_la_CPPFLAGS = $(AM_CPPFLAGS) $(LOG4CPLUS_INCLUDES)
-liblog_la_LDFLAGS = $(LOG4CPLUS_LDFLAGS)
-liblog_la_LIBADD = $(top_builddir)/src/lib/util/libutil.la
+liblog_la_LIBADD = $(LOG4CPLUS_LIBS) $(top_builddir)/src/lib/util/libutil.la
diff --git a/src/lib/log/tests/Makefile.am b/src/lib/log/tests/Makefile.am
index a5f793c..53e97a1 100644
--- a/src/lib/log/tests/Makefile.am
+++ b/src/lib/log/tests/Makefile.am
@@ -48,16 +48,18 @@ endif
noinst_PROGRAMS = logger_example
logger_example_SOURCES = logger_example.cc
logger_example_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-logger_example_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
-logger_example_LDADD = $(top_builddir)/src/lib/log/liblog.la
+logger_example_LDFLAGS = $(AM_LDFLAGS)
+logger_example_LDADD = $(LOG4CPLUS_LIBS)
+logger_example_LDADD += $(top_builddir)/src/lib/log/liblog.la
logger_example_LDADD += $(top_builddir)/src/lib/util/libutil.la
logger_example_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
noinst_PROGRAMS += init_logger_test
init_logger_test_SOURCES = init_logger_test.cc
init_logger_test_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-init_logger_test_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
-init_logger_test_LDADD = $(top_builddir)/src/lib/log/liblog.la
+init_logger_test_LDFLAGS = $(AM_LDFLAGS)
+init_logger_test_LDADD = $(LOG4CPLUS_LIBS)
+init_logger_test_LDADD += $(top_builddir)/src/lib/log/liblog.la
init_logger_test_LDADD += $(top_builddir)/src/lib/util/libutil.la
init_logger_test_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/nsas/nameserver_entry.cc b/src/lib/nsas/nameserver_entry.cc
index 553c35d..bca8f73 100644
--- a/src/lib/nsas/nameserver_entry.cc
+++ b/src/lib/nsas/nameserver_entry.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -223,7 +223,8 @@ class NameserverEntry::ResolverCallback :
* \short We received the address successfully.
*
* This extracts the addresses out from the response and puts them
- * inside the entry. It tries to reuse the address entries from before (if there were any), to keep their RTTs.
+ * inside the entry. It tries to reuse the address entries from before
+ * (if there were any), to keep their RTTs.
*/
virtual void success(MessagePtr response_message) {
time_t now = time(NULL);
@@ -231,10 +232,21 @@ class NameserverEntry::ResolverCallback :
Lock lock(entry_->mutex_);
// TODO: find the correct RRset, not simply the first
- if (!response_message ||
- response_message->getRcode() != isc::dns::Rcode::NOERROR() ||
+ if (!response_message) {
+ LOG_ERROR(nsas_logger, NSAS_NULL_RESPONSE).arg(entry_->getName());
+ failureInternal(lock);
+ return;
+
+ } else if (response_message->getRcode() != isc::dns::Rcode::NOERROR()) {
+ LOG_DEBUG(nsas_logger, NSAS_DBG_RESULTS, NSAS_ERROR_RESPONSE).
+ arg(response_message->getRcode()).arg(entry_->getName());
+ failureInternal(lock);
+ return;
+
+ } else if (
response_message->getRRCount(isc::dns::Message::SECTION_ANSWER) == 0) {
- LOG_ERROR(nsas_logger, NSAS_INVALID_RESPONSE).arg(entry_->getName());
+ LOG_DEBUG(nsas_logger, NSAS_DBG_RESULTS, NSAS_EMPTY_RESPONSE).
+ arg(entry_->getName());
failureInternal(lock);
return;
}
@@ -371,7 +383,7 @@ class NameserverEntry::ResolverCallback :
}
}
- // Handle a failure to optain data. Dispatches callbacks and leaves
+ // Handle a failure to obtain data. Dispatches callbacks and leaves
// lock unlocked
void failureInternal(Lock &lock) {
// Set state of the addresses
diff --git a/src/lib/nsas/nsas_messages.mes b/src/lib/nsas/nsas_messages.mes
index 512fcd5..6c35172 100644
--- a/src/lib/nsas/nsas_messages.mes
+++ b/src/lib/nsas/nsas_messages.mes
@@ -14,6 +14,16 @@
$NAMESPACE isc::nsas
+% NSAS_EMPTY_RESPONSE response to query for %1 returned an empty answer section
+The NSAS (nameserver address store - part of the resolver) made a query
+for information it needed. The query completed successfully but the
+answer section in the response was empty.
+
+% NSAS_ERROR_RESPONSE error response of %1 returned in query for %2
+The NSAS (nameserver address store - part of the resolver) made a query
+for information it needed. The query completed successfully but the
+RCODE in the response was something other than NOERROR.
+
% NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1
A debug message issued when the NSAS (nameserver address store - part
of the resolver) is making a callback into the resolver to retrieve the
@@ -24,17 +34,6 @@ A debug message issued when the NSAS (nameserver address store - part
of the resolver) has retrieved the given address for the specified
nameserver through an external query.
-% NSAS_INVALID_RESPONSE queried for %1 but got invalid response
-The NSAS (nameserver address store - part of the resolver) made a query
-for a RR for the specified nameserver but received an invalid response.
-Either the success function was called without a DNS message or the
-message was invalid on some way. (In the latter case, the error should
-have been picked up elsewhere in the processing logic, hence the raising
-of the error here.)
-
-This message indicates an internal error in the NSAS. Please raise a
-bug report.
-
% NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled
A debug message issued when an NSAS (nameserver address store - part of
the resolver) lookup for a zone has been canceled.
@@ -46,6 +45,14 @@ for the specified nameserver. This is not necessarily a problem - the
nameserver may be unreachable, in which case the NSAS will try other
nameservers in the zone.
+% NSAS_NULL_RESPONSE got null message in success callback for query for %1
+The NSAS (nameserver address store - part of the resolver) made a query
+for information it needed. The query completed successfully, but the
+message passed to the callback was null.
+
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
+
% NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1
A debug message output when a call is made to the NSAS (nameserver
address store - part of the resolver) to obtain the nameservers for
diff --git a/src/lib/python/isc/bind10/Makefile.am b/src/lib/python/isc/bind10/Makefile.am
index c0f1e32..aa5d0ab 100644
--- a/src/lib/python/isc/bind10/Makefile.am
+++ b/src/lib/python/isc/bind10/Makefile.am
@@ -1,4 +1,5 @@
SUBDIRS = . tests
-python_PYTHON = __init__.py sockcreator.py component.py special_component.py
+python_PYTHON = __init__.py sockcreator.py component.py special_component.py \
+ socket_cache.py
pythondir = $(pyexecdir)/isc/bind10
diff --git a/src/lib/python/isc/bind10/socket_cache.py b/src/lib/python/isc/bind10/socket_cache.py
new file mode 100644
index 0000000..26e87d2
--- /dev/null
+++ b/src/lib/python/isc/bind10/socket_cache.py
@@ -0,0 +1,302 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Here's the cache for sockets from socket creator.
+"""
+
+import os
+import random
+import isc.bind10.sockcreator
+from copy import copy
+
+class SocketError(Exception):
+ """
+ Exception raised when the socket creator is unable to create requested
+ socket. Possible reasons might be the address it should be bound to
+ is already taken, the permissions are insufficient, the address family
+ is not supported on this computer and many more.
+
+ The errno, if not None, is passed from the socket creator.
+ """
+ def __init__(self, message, errno):
+ Exception.__init__(self, message)
+ self.errno = errno
+
+class ShareError(Exception):
+ """
+ The requested socket is already taken by other component and the sharing
+ parameters don't allow sharing with the new request.
+ """
+ pass
+
+class Socket:
+ """
+ This represents one socket cached by the cache program. This should never
+ be used directly by a user, it is used internally by the Cache. Therefore
+ many member variables are used directly instead of by a accessor method.
+
+ Be warned that this object implements the __del__ method. It closes the
+ socket held inside in it. But this poses various problems with garbage
+ collector. In short, do not make reference cycles with this and generally
+ leave this class alone to live peacefully.
+ """
+ def __init__(self, protocol, address, port, fileno):
+ """
+ Creates the socket.
+
+ The protocol, address and port are preserved for the information.
+ """
+ self.protocol = protocol
+ self.address = address
+ self.port = port
+ self.fileno = fileno
+ # Mapping from token -> application
+ self.active_tokens = {}
+ # The tokens which were not yet picked up
+ self.waiting_tokens = set()
+ # Share modes and names by the tokens (token -> (mode, name))
+ self.shares = {}
+
+ def __del__(self):
+ """
+ Closes the file descriptor.
+ """
+ os.close(self.fileno)
+
+ def share_compatible(self, mode, name):
+ """
+ Checks if the given share mode and name is compatible with the ones
+ already installed here.
+
+ The allowed values for mode are listed in the Cache.get_token
+ function.
+ """
+ if mode not in ['NO', 'SAMEAPP', 'ANY']:
+ raise ValueError("Mode " + mode + " is invalid")
+
+ # Go through the existing ones
+ for (emode, ename) in self.shares.values():
+ if emode == 'NO' or mode == 'NO':
+ # One of them can't live together with anything
+ return False
+ if (emode == 'SAMEAPP' or mode == 'SAMEAPP') and \
+ ename != name:
+ # One of them can't live together with someone of different
+ # name
+ return False
+ # else both are ANY or SAMEAPP with the same name, which is OK
+ # No problem found, so we consider it OK
+ return True
+
+class Cache:
+ """
+ This is the cache for sockets from socket creator. The purpose of cache
+ is to hold the sockets that were requested, until they are no longer
+ needed. One reason is, the socket is created before it is sent over the
+ unix domain socket in boss, so we need to keep it somewhere for a while.
+
+ The other reason is, a single socket might be requested multiple times.
+ So we keep it here in case someone else might ask for it.
+
+ Each socket kept here has a reference count and when it drops to zero,
+ it is removed from cache and closed.
+
+ This is expected to be part of Boss, it is not a general utility class.
+
+ It is not expected to be subclassed. The methods and members are named
+ as protected so tests are easier access into them.
+ """
+ def __init__(self, creator):
+ """
+ Initialization. The creator is the socket creator object
+ (isc.bind10.sockcreator.Creator) which will be used to create yet
+ uncached sockets.
+ """
+ self._creator = creator
+ # The sockets we have live here, these dicts are various ways how
+ # to get them. Each of them contains the Socket objects somehow
+
+ # This one is dict of token: socket for the ones that were not yet
+ # picked up by an application.
+ self._waiting_tokens = {}
+ # This format is the same as above, but for the tokens that were
+ # already picked up by the application and not yet released.
+ self._active_tokens = {}
+ # This is a dict from applications to set of tokens used by the
+ # application, for the sockets already picked up by an application
+ self._active_apps = {}
+ # The sockets live here to be indexed by protocol, address and
+ # subsequently by port
+ self._sockets = {}
+ # These are just the tokens actually in use, so we don't generate
+ # dupes. If one is dropped, it can be potentially reclaimed.
+ self._live_tokens = set()
+
+ def get_token(self, protocol, address, port, share_mode, share_name):
+ """
+ This requests a token representing a socket. The socket is either
+ found in the cache already or requested from the creator at this time
+ (and cached for later time).
+
+ The parameters are:
+ - protocol: either 'UDP' or 'TCP'
+ - address: the IPAddr object representing the address to bind to
+ - port: integer saying which port to bind to
+ - share_mode: either 'NO', 'SAMEAPP' or 'ANY', specifying how the
+ socket can be shared with others. See bin/bind10/creatorapi.txt
+ for details.
+ - share_name: the name of application, in case of 'SAMEAPP' share
+ mode. Only requests with the same name can share the socket.
+
+ If the call is successful, it returns a string token which can be
+ used to pick up the socket later. The socket is created with reference
+ count zero and if it isn't picked up soon enough (the time yet has to
+ be set), it will be removed and the token is invalid.
+
+ It can fail in various ways. Explicitly listed exceptions are:
+ - SocketError: this one is thrown if the socket creator couldn't provide
+ the socket and it is not yet cached (it belongs to other application,
+ for example).
+ - ShareError: the socket is already in the cache, but it can't be
+ shared due to share_mode and share_name combination (both the request
+ restrictions and of all copies of socket handed out are considered,
+ so it can be raised even if you call it with share_mode 'ANY').
+ - isc.bind10.sockcreator.CreatorError: fatal creator errors are
+ propagated. Thay should cause the boss to exit if ever encountered.
+
+ Note that it isn't guaranteed the tokens would be unique and they
+ should be used as an opaque handle only.
+ """
+ addr_str = str(address)
+ try:
+ socket = self._sockets[protocol][addr_str][port]
+ except KeyError:
+ # Something in the dicts is not there, so socket is to be
+ # created
+ try:
+ fileno = self._creator.get_socket(address, port, protocol)
+ except isc.bind10.sockcreator.CreatorError as ce:
+ if ce.fatal:
+ raise
+ else:
+ raise SocketError(str(ce), ce.errno)
+ socket = Socket(protocol, address, port, fileno)
+ # And cache it
+ if protocol not in self._sockets:
+ self._sockets[protocol] = {}
+ if addr_str not in self._sockets[protocol]:
+ self._sockets[protocol][addr_str] = {}
+ self._sockets[protocol][addr_str][port] = socket
+ # Now we get the token, check it is compatible
+ if not socket.share_compatible(share_mode, share_name):
+ raise ShareError("Cached socket not compatible with mode " +
+ share_mode + " and name " + share_name)
+ # Grab yet unused token
+ token = 't' + str(random.randint(0, 2^32-1))
+ while token in self._live_tokens:
+ token = 't' + str(random.randint(0, 2^32-1))
+ self._waiting_tokens[token] = socket
+ self._live_tokens.add(token)
+ socket.shares[token] = (share_mode, share_name)
+ socket.waiting_tokens.add(token)
+ return token
+
+ def get_socket(self, token, application):
+ """
+ This returns the socket created by get_token. The token should be the
+ one returned from previous call from get_token. The token can be used
+ only once to receive the socket.
+
+ The application is a token representing the application that requested
+ it. Currently, boss uses the file descriptor of connection from the
+ application, but anything which can be a key in a dict is OK from the
+ cache's point of view. You just need to use the same thing in
+ drop_application.
+
+ In case the token is considered invalid (it doesn't come from the
+ get_token, it was already used, the socket wasn't picked up soon
+ enough, ...), it raises ValueError.
+ """
+ try:
+ socket = self._waiting_tokens[token]
+ except KeyError:
+ raise ValueError("Token " + token +
+ " isn't waiting to be picked up")
+ del self._waiting_tokens[token]
+ self._active_tokens[token] = socket
+ if application not in self._active_apps:
+ self._active_apps[application] = set()
+ self._active_apps[application].add(token)
+ socket.waiting_tokens.remove(token)
+ socket.active_tokens[token] = application
+ return socket.fileno
+
+ def drop_socket(self, token):
+ """
+ This signals the application no longer uses the socket which was
+ requested by the given token. It decreases the reference count for
+ the socket and closes and removes the cached copy if it was the last
+ one.
+
+ It raises ValueError if the token doesn't exist.
+ """
+ try:
+ socket = self._active_tokens[token]
+ except KeyError:
+ raise ValueError("Token " + token + " doesn't represent an " +
+ "active socket")
+ # Now, remove everything from the bookkeeping
+ del socket.shares[token]
+ app = socket.active_tokens[token]
+ del socket.active_tokens[token]
+ del self._active_tokens[token]
+ self._active_apps[app].remove(token)
+ if len(self._active_apps[app]) == 0:
+ del self._active_apps[app]
+ self._live_tokens.remove(token)
+ # The socket is not used by anything now, so remove it
+ if len(socket.active_tokens) == 0 and len(socket.waiting_tokens) == 0:
+ addr = str(socket.address)
+ port = socket.port
+ proto = socket.protocol
+ del self._sockets[proto][addr][port]
+ # Clean up empty branches of the structure
+ if len(self._sockets[proto][addr]) == 0:
+ del self._sockets[proto][addr]
+ if len(self._sockets[proto]) == 0:
+ del self._sockets[proto]
+
+ def drop_application(self, application):
+ """
+ This signals the application terminated and all sockets it picked up
+ should be considered unused by it now. It effectively calls drop_socket
+ on each of the sockets the application picked up and didn't drop yet.
+
+ If the application is invalid (no get_socket was successful with this
+ value of application), it raises ValueError.
+ """
+ try:
+ # Get a copy. Who knows how iteration works through sets if we
+ # delete from it during the time, so we'll just have our own copy
+ # to iterate
+ to_drop = copy(self._active_apps[application])
+ except KeyError:
+ raise ValueError("Application " + str(application) +
+ " doesn't hold any sockets")
+ for token in to_drop:
+ self.drop_socket(token)
+ # We don't call del now. The last drop_socket should have
+ # removed the application key as well.
diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py
index dad10bb..c9c7683 100644
--- a/src/lib/python/isc/bind10/special_component.py
+++ b/src/lib/python/isc/bind10/special_component.py
@@ -42,6 +42,7 @@ class SockCreator(BaseComponent):
self.__creator = isc.bind10.sockcreator.Creator(LIBEXECDIR + ':' +
os.environ['PATH'])
self._boss.register_process(self.pid(), self)
+ self._boss.set_creator(self.__creator)
self._boss.log_started(self.pid())
def _stop_internal(self):
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
index df625b2..658db1e 100644
--- a/src/lib/python/isc/bind10/tests/Makefile.am
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -1,7 +1,7 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
#PYTESTS = args_test.py bind10_test.py
# NOTE: this has a generated test found in the builddir
-PYTESTS = sockcreator_test.py component_test.py
+PYTESTS = sockcreator_test.py component_test.py socket_cache_test.py
EXTRA_DIST = $(PYTESTS)
diff --git a/src/lib/python/isc/bind10/tests/sockcreator_test.py b/src/lib/python/isc/bind10/tests/sockcreator_test.py
index 4453184..d97d21b 100644
--- a/src/lib/python/isc/bind10/tests/sockcreator_test.py
+++ b/src/lib/python/isc/bind10/tests/sockcreator_test.py
@@ -13,9 +13,6 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-# This test file is generated .py.in -> .py just to be in the build dir,
-# same as the rest of the tests. Saves a lot of stuff in makefile.
-
"""
Tests for the bind10.sockcreator module.
"""
diff --git a/src/lib/python/isc/bind10/tests/socket_cache_test.py b/src/lib/python/isc/bind10/tests/socket_cache_test.py
new file mode 100644
index 0000000..bbbf776
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/socket_cache_test.py
@@ -0,0 +1,396 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+import isc.log
+import isc.bind10.socket_cache
+import isc.bind10.sockcreator
+from isc.net.addr import IPAddr
+import os
+
+class Test(unittest.TestCase):
+ """
+ Base for the tests here. It replaces the os.close method.
+ """
+ def setUp(self):
+ self._closes = []
+ isc.bind10.socket_cache.os.close = self.__close
+
+ def tearDown(self):
+ # This is not very clean solution. But when the test stops
+ # to exist, the method must not be used to destroy the
+ # object any more. And we can't restore the os.close here
+ # as we never work with real sockets here.
+ isc.bind10.socket_cache.os.close = lambda fd: None
+
+ def __close(self, fd):
+ """
+ Just log a close was called.
+ """
+ self._closes.append(fd)
+
+class SocketTest(Test):
+ """
+ Test for the Socket class.
+ """
+ def setUp(self):
+ """
+ Creates the socket to be tested.
+
+ It also creates other useful test variables.
+ """
+ Test.setUp(self)
+ self.__address = IPAddr("192.0.2.1")
+ self.__socket = isc.bind10.socket_cache.Socket('UDP', self.__address,
+ 1024, 42)
+
+ def test_init(self):
+ """
+ Checks the intrnals of the cache just after the creation.
+ """
+ self.assertEqual('UDP', self.__socket.protocol)
+ self.assertEqual(self.__address, self.__socket.address)
+ self.assertEqual(1024, self.__socket.port)
+ self.assertEqual(42, self.__socket.fileno)
+ self.assertEqual({}, self.__socket.active_tokens)
+ self.assertEqual({}, self.__socket.shares)
+ self.assertEqual(set(), self.__socket.waiting_tokens)
+
+ def test_del(self):
+ """
+ Check it closes the socket when removed.
+ """
+ # This should make the refcount 0 and call the descructor
+ # right away
+ self.__socket = None
+ self.assertEqual([42], self._closes)
+
+ def test_share_modes(self):
+ """
+ Test the share mode compatibility check function.
+ """
+ modes = ['NO', 'SAMEAPP', 'ANY']
+ # If there are no shares, it is compatible with everything.
+ for mode in modes:
+ self.assertTrue(self.__socket.share_compatible(mode, 'anything'))
+
+ # There's an NO already, so it is incompatible with everything.
+ self.__socket.shares = {'token': ('NO', 'anything')}
+ for mode in modes:
+ self.assertFalse(self.__socket.share_compatible(mode, 'anything'))
+
+ # If there's SAMEAPP, it is compatible with ANY and SAMEAPP with the
+ # same name.
+ self.__socket.shares = {'token': ('SAMEAPP', 'app')}
+ self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+ self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+ 'something'))
+ self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+ self.assertTrue(self.__socket.share_compatible('ANY', 'app'))
+ self.assertFalse(self.__socket.share_compatible('ANY', 'something'))
+
+ # If there's ANY, then ANY and SAMEAPP with the same name is compatible
+ self.__socket.shares = {'token': ('ANY', 'app')}
+ self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+ self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+ 'something'))
+ self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+ self.assertTrue(self.__socket.share_compatible('ANY', 'something'))
+
+ # In case there are multiple already inside
+ self.__socket.shares = {
+ 'token': ('ANY', 'app'),
+ 'another': ('SAMEAPP', 'app')
+ }
+ self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+ self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+ 'something'))
+ self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+ self.assertFalse(self.__socket.share_compatible('ANY', 'something'))
+ self.assertTrue(self.__socket.share_compatible('ANY', 'app'))
+
+ # Invalid inputs are rejected
+ self.assertRaises(ValueError, self.__socket.share_compatible, 'bad',
+ 'bad')
+
+class SocketCacheTest(Test):
+ """
+ Some tests for the isc.bind10.socket_cache.Cache.
+
+ This class, as well as being the testcase, pretends to be the
+ socket creator so it can hijack all the requests for sockets.
+ """
+ def setUp(self):
+ """
+ Creates the cache for tests with us being the socket creator.
+
+ Also creates some more variables for testing.
+ """
+ Test.setUp(self)
+ self.__cache = isc.bind10.socket_cache.Cache(self)
+ self.__address = IPAddr("192.0.2.1")
+ self.__socket = isc.bind10.socket_cache.Socket('UDP', self.__address,
+ 1024, 42)
+ self.__get_socket_called = False
+
+ def test_init(self):
+ """
+ Checks the internals of the cache just after the creation.
+ """
+ self.assertEqual(self, self.__cache._creator)
+ self.assertEqual({}, self.__cache._waiting_tokens)
+ self.assertEqual({}, self.__cache._active_tokens)
+ self.assertEqual({}, self.__cache._active_apps)
+ self.assertEqual({}, self.__cache._sockets)
+ self.assertEqual(set(), self.__cache._live_tokens)
+
+ def get_socket(self, address, port, socktype):
+ """
+ Pretend to be a socket creator.
+
+ This expects to be called with the _address, port 1024 and 'UDP'.
+
+ Returns 42 and notes down it was called.
+ """
+ self.assertEqual(self.__address, address)
+ self.assertEqual(1024, port)
+ self.assertEqual('UDP', socktype)
+ self.__get_socket_called = True
+ return 42
+
+ def test_get_token_cached(self):
+ """
+ Check the behaviour of get_token when the requested socket is already
+ cached inside.
+ """
+ self.__cache._sockets = {
+ 'UDP': {'192.0.2.1': {1024: self.__socket}}
+ }
+ token = self.__cache.get_token('UDP', self.__address, 1024, 'ANY',
+ 'test')
+ # It didn't call get_socket
+ self.assertFalse(self.__get_socket_called)
+ # It returned something
+ self.assertIsNotNone(token)
+ # The token is both in the waiting sockets and the live tokens
+ self.assertEqual({token: self.__socket}, self.__cache._waiting_tokens)
+ self.assertEqual(set([token]), self.__cache._live_tokens)
+ # The token got the new share to block any relevant queries
+ self.assertEqual({token: ('ANY', 'test')}, self.__socket.shares)
+ # The socket knows the token is waiting in it
+ self.assertEqual(set([token]), self.__socket.waiting_tokens)
+
+ # If we request one more, with incompatible share, it is rejected
+ self.assertRaises(isc.bind10.socket_cache.ShareError,
+ self.__cache.get_token, 'UDP', self.__address, 1024,
+ 'NO', 'test')
+ # The internals are not changed, so the same checks
+ self.assertEqual({token: self.__socket}, self.__cache._waiting_tokens)
+ self.assertEqual(set([token]), self.__cache._live_tokens)
+ self.assertEqual({token: ('ANY', 'test')}, self.__socket.shares)
+ self.assertEqual(set([token]), self.__socket.waiting_tokens)
+
+ def test_get_token_uncached(self):
+ """
+ Check a new socket is created when a corresponding one is missing.
+ """
+ token = self.__cache.get_token('UDP', self.__address, 1024, 'ANY',
+ 'test')
+ # The get_socket was called
+ self.assertTrue(self.__get_socket_called)
+ # It returned something
+ self.assertIsNotNone(token)
+ # Get the socket and check it looks OK
+ socket = self.__cache._waiting_tokens[token]
+ self.assertEqual(self.__address, socket.address)
+ self.assertEqual(1024, socket.port)
+ self.assertEqual(42, socket.fileno)
+ self.assertEqual('UDP', socket.protocol)
+ # The socket is properly cached
+ self.assertEqual({
+ 'UDP': {'192.0.2.1': {1024: socket}}
+ }, self.__cache._sockets)
+ # The token is both in the waiting sockets and the live tokens
+ self.assertEqual({token: socket}, self.__cache._waiting_tokens)
+ self.assertEqual(set([token]), self.__cache._live_tokens)
+ # The token got the new share to block any relevant queries
+ self.assertEqual({token: ('ANY', 'test')}, socket.shares)
+ # The socket knows the token is waiting in it
+ self.assertEqual(set([token]), socket.waiting_tokens)
+
+ def test_get_token_excs(self):
+ """
+ Test that it is handled properly if the socket creator raises
+ some exceptions.
+ """
+ def raiseCreatorError(fatal):
+ raise isc.bind10.sockcreator.CreatorError('test error', fatal)
+ # First, fatal socket creator errors are passed through
+ self.get_socket = lambda addr, port, proto: raiseCreatorError(True)
+ self.assertRaises(isc.bind10.sockcreator.CreatorError,
+ self.__cache.get_token, 'UDP', self.__address, 1024,
+ 'NO', 'test')
+ # And nonfatal are converted to SocketError
+ self.get_socket = lambda addr, port, proto: raiseCreatorError(False)
+ self.assertRaises(isc.bind10.socket_cache.SocketError,
+ self.__cache.get_token, 'UDP', self.__address, 1024,
+ 'NO', 'test')
+
+ def test_get_socket(self):
+ """
+ Test that we can pickup a socket if we know a token.
+ """
+ token = "token"
+ app = 13
+ # No socket prepared there
+ self.assertRaises(ValueError, self.__cache.get_socket, token, app)
+ # Not changed
+ self.assertEqual({}, self.__cache._active_tokens)
+ self.assertEqual({}, self.__cache._active_apps)
+ self.assertEqual({}, self.__cache._sockets)
+ self.assertEqual(set(), self.__cache._live_tokens)
+ # Prepare a token there
+ self.__socket.waiting_tokens = set([token])
+ self.__socket.shares = {token: ('ANY', 'app')}
+ self.__cache._waiting_tokens = {token: self.__socket}
+ self.__cache._sockets = {'UDP': {'192.0.2.1': {1024: self.__socket}}}
+ self.__cache._live_tokens = set([token])
+ socket = self.__cache.get_socket(token, app)
+ # Received the fileno
+ self.assertEqual(42, socket)
+ # It moved from waiting to active ones
+ self.assertEqual({}, self.__cache._waiting_tokens)
+ self.assertEqual({token: self.__socket}, self.__cache._active_tokens)
+ self.assertEqual({13: set([token])}, self.__cache._active_apps)
+ self.assertEqual(set([token]), self.__cache._live_tokens)
+ self.assertEqual(set(), self.__socket.waiting_tokens)
+ self.assertEqual({token: 13}, self.__socket.active_tokens)
+ # Trying to get it again fails
+ self.assertRaises(ValueError, self.__cache.get_socket, token, app)
+
+ def test_drop_application(self):
+ """
+ Test that a drop_application calls drop_socket on all the sockets
+ held by the application.
+ """
+ sockets = set()
+ def drop_socket(token):
+ sockets.add(token)
+ # Mock the drop_socket so we know it is called
+ self.__cache.drop_socket = drop_socket
+ self.assertRaises(ValueError, self.__cache.drop_application,
+ 13)
+ self.assertEqual(set(), sockets)
+ # Put the tokens into active_apps. Nothing else should be touched
+ # by this call, so leave it alone.
+ self.__cache._active_apps = {
+ 1: set(['t1', 't2']),
+ 2: set(['t3'])
+ }
+ self.__cache.drop_application(1)
+ # We don't check the _active_apps, as it would be cleaned by
+ # drop_socket and we removed it.
+ self.assertEqual(set(['t1', 't2']), sockets)
+
+ def test_drop_socket(self):
+ """
+ Test the drop_socket call. It tests:
+ * That a socket that still has something to keep it alive is left alive
+ (both waiting and active).
+ * If not, it is deleted.
+ * All bookkeeping data around are properly removed.
+ * Of course the exception.
+ """
+ self.assertRaises(ValueError, self.__cache.drop_socket, "bad token")
+ self.__socket.active_tokens = {'t1': 1}
+ self.__socket.waiting_tokens = set(['t2'])
+ self.__socket.shares = {'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')}
+ self.__cache._waiting_tokens = {'t2': self.__socket}
+ self.__cache._active_tokens = {'t1': self.__socket}
+ self.__cache._sockets = {'UDP': {'192.0.2.1': {1024: self.__socket}}}
+ self.__cache._live_tokens = set(['t1', 't2'])
+ self.__cache._active_apps = {1: set(['t1'])}
+ # We can't drop what wasn't picket up yet
+ self.assertRaises(ValueError, self.__cache.drop_socket, 't2')
+ self.assertEqual({'t1': 1}, self.__socket.active_tokens)
+ self.assertEqual(set(['t2']), self.__socket.waiting_tokens)
+ self.assertEqual({'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')},
+ self.__socket.shares)
+ self.assertEqual({'t2': self.__socket}, self.__cache._waiting_tokens)
+ self.assertEqual({'t1': self.__socket}, self.__cache._active_tokens)
+ self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+ self.__cache._sockets)
+ self.assertEqual(set(['t1', 't2']), self.__cache._live_tokens)
+ self.assertEqual({1: set(['t1'])}, self.__cache._active_apps)
+ self.assertEqual([], self._closes)
+ # If we drop this, it survives because it waits for being picked up
+ self.__cache.drop_socket('t1')
+ self.assertEqual({}, self.__socket.active_tokens)
+ self.assertEqual(set(['t2']), self.__socket.waiting_tokens)
+ self.assertEqual({'t2': ('ANY', 'app2')}, self.__socket.shares)
+ self.assertEqual({}, self.__cache._active_tokens)
+ self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+ self.__cache._sockets)
+ self.assertEqual(set(['t2']), self.__cache._live_tokens)
+ self.assertEqual({}, self.__cache._active_apps)
+ self.assertEqual([], self._closes)
+ # Fill it again, now two applications having the same socket
+ self.__socket.active_tokens = {'t1': 1, 't2': 2}
+ self.__socket.waiting_tokens = set()
+ self.__socket.shares = {'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')}
+ self.__cache._waiting_tokens = {}
+ self.__cache._active_tokens = {
+ 't1': self.__socket,
+ 't2': self.__socket
+ }
+ self.__cache._live_tokens = set(['t1', 't2', 't3'])
+ self.assertEqual([], self._closes)
+ # We cheat here little bit, the t3 doesn't exist enywhere else, but
+ # we need to check the app isn't removed too soon and it shouldn't
+ # matter anywhere else, so we just avoid the tiresome filling in
+ self.__cache._active_apps = {1: set(['t1', 't3']), 2: set(['t2'])}
+ # Drop it as t1. It should still live.
+ self.__cache.drop_socket('t1')
+ self.assertEqual({'t2': 2}, self.__socket.active_tokens)
+ self.assertEqual(set(), self.__socket.waiting_tokens)
+ self.assertEqual({'t2': ('ANY', 'app2')}, self.__socket.shares)
+ self.assertEqual({}, self.__cache._waiting_tokens)
+ self.assertEqual({'t2': self.__socket}, self.__cache._active_tokens)
+ self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+ self.__cache._sockets)
+ self.assertEqual(set(['t3', 't2']), self.__cache._live_tokens)
+ self.assertEqual({1: set(['t3']), 2: set(['t2'])},
+ self.__cache._active_apps)
+ self.assertEqual([], self._closes)
+ # Drop it again, from the other application. It should get removed
+ # and closed.
+ self.__cache.drop_socket('t2')
+ self.assertEqual({}, self.__socket.active_tokens)
+ self.assertEqual(set(), self.__socket.waiting_tokens)
+ self.assertEqual({}, self.__socket.shares)
+ self.assertEqual({}, self.__cache._waiting_tokens)
+ self.assertEqual({}, self.__cache._active_tokens)
+ self.assertEqual({}, self.__cache._sockets)
+ self.assertEqual(set(['t3']), self.__cache._live_tokens)
+ self.assertEqual({1: set(['t3'])}, self.__cache._active_apps)
+ # The cache doesn't hold the socket. So when we remove it ourself,
+ # it should get closed.
+ self.__socket = None
+ self.assertEqual([42], self._closes)
+
+if __name__ == '__main__':
+ isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index fb6d151..47f3dbc 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -24,6 +24,7 @@ datasrc_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
datasrc_la_LDFLAGS = $(PYTHON_LDFLAGS)
datasrc_la_LDFLAGS += -module
datasrc_la_LIBADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
+datasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
datasrc_la_LIBADD += $(top_builddir)/src/lib/dns/python/libpydnspp.la
datasrc_la_LIBADD += $(PYTHON_LIB)
diff --git a/src/lib/resolve/recursive_query.cc b/src/lib/resolve/recursive_query.cc
index 0d3fb4c..930b593 100644
--- a/src/lib/resolve/recursive_query.cc
+++ b/src/lib/resolve/recursive_query.cc
@@ -28,9 +28,9 @@
#include <dns/opcode.h>
#include <dns/exceptions.h>
#include <dns/rdataclass.h>
-
#include <resolve/resolve.h>
#include <resolve/resolve_log.h>
+#include <resolve/resolve_messages.h>
#include <cache/resolver_cache.h>
#include <nsas/address_request_callback.h>
#include <nsas/nameserver_address.h>
@@ -39,6 +39,7 @@
#include <asiodns/dns_service.h>
#include <asiodns/io_fetch.h>
#include <asiolink/io_service.h>
+#include <resolve/response_classifier.h>
#include <resolve/recursive_query.h>
using namespace isc::dns;
@@ -430,7 +431,7 @@ private:
.arg(questionText(question_));
isc::resolve::copyResponseMessage(incoming, answer_message_);
cache_.update(*answer_message_);
- return true;
+ return (true);
break;
case isc::resolve::ResponseClassifier::CNAME:
@@ -444,7 +445,7 @@ private:
LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_LONG_CHAIN)
.arg(questionText(question_));
makeSERVFAIL();
- return true;
+ return (true);
}
LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_CNAME)
@@ -460,7 +461,7 @@ private:
LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_FOLLOW_CNAME)
.arg(questionText(question_));
doLookup();
- return false;
+ return (false);
break;
case isc::resolve::ResponseClassifier::NXDOMAIN:
@@ -471,7 +472,7 @@ private:
isc::resolve::copyResponseMessage(incoming, answer_message_);
// no negcache yet
//cache_.update(*answer_message_);
- return true;
+ return (true);
break;
case isc::resolve::ResponseClassifier::REFERRAL:
@@ -520,7 +521,7 @@ private:
nsas_callback_out_ = true;
nsas_.lookup(cur_zone_, question_.getClass(),
nsas_callback_, ANY_OK, glue_hints);
- return false;
+ return (false);
} else {
// Referral was received but did not contain an NS RRset.
LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_NO_NS_RRSET)
@@ -528,48 +529,125 @@ private:
// TODO this will result in answering with the delegation. oh well
isc::resolve::copyResponseMessage(incoming, answer_message_);
- return true;
+ return (true);
}
break;
+
case isc::resolve::ResponseClassifier::TRUNCATED:
// Truncated packet. If the protocol we used for the last one is
// UDP, re-query using TCP. Otherwise regard it as an error.
if (protocol_ == IOFetch::UDP) {
- LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_TRUNCATED)
- .arg(questionText(question_));
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS,
+ RESLIB_TRUNCATED).arg(questionText(question_));
send(IOFetch::TCP);
- return false;
+ return (false);
+ }
+
+ // Was a TCP query so we have received a packet over TCP with the
+ // TC bit set: report an error by dropping down to the common
+ // error code.
+
+ default:
+ // Some error in received packet it. Report it and return SERVFAIL
+ // to the caller.
+ if (logger.isDebugEnabled()) {
+ reportResponseClassifierError(category, incoming.getRcode());
}
- // Was a TCP query so we have received a packet over TCP with the TC
- // bit set: drop through to common error processing.
- // TODO: Can we use what we have received instead of discarding it?
-
- case isc::resolve::ResponseClassifier::EMPTY:
- case isc::resolve::ResponseClassifier::EXTRADATA:
- case isc::resolve::ResponseClassifier::INVNAMCLASS:
- case isc::resolve::ResponseClassifier::INVTYPE:
- case isc::resolve::ResponseClassifier::MISMATQUEST:
- case isc::resolve::ResponseClassifier::MULTICLASS:
- case isc::resolve::ResponseClassifier::NOTONEQUEST:
- case isc::resolve::ResponseClassifier::NOTRESPONSE:
- case isc::resolve::ResponseClassifier::NOTSINGLE:
- case isc::resolve::ResponseClassifier::OPCODE:
- case isc::resolve::ResponseClassifier::RCODE:
- LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_RCODE_ERR)
- .arg(questionText(question_));
- // Should we try a different server rather than SERVFAIL?
makeSERVFAIL();
- return true;
- break;
+ return (true);
}
- // Since we do not have a default in the switch above,
- // the compiler should have errored on any missing case
- // statements.
+ // If we get here, there is some serious logic error (or a missing
+ // "return").
assert(false);
- return true;
+ return (true); // To keep the compiler happy
}
-
+
+ /// \brief Report classification-detected error
+ ///
+ /// When the response classifier has detected an error in the response from
+ /// an upstream query, this method is called to log a debug message giving
+ /// information about the problem.
+ ///
+ /// \param category Classification code for the packet
+ /// \param rcode RCODE value in the packet
+ void reportResponseClassifierError(ResponseClassifier::Category category,
+ const Rcode& rcode)
+ {
+ // We could set up a table of response classifications to message
+ // IDs here and index into that table. But given that (a) C++ does
+ // not have C's named initializers, (b) the codes for the
+ // response classifier are in another module and (c) not all messages
+ // have the same number of arguments, the setup of the table would be
+ // almost as long as the code here: it would need to include a number
+ // of assertions to ensure that any change to the the response
+ // classifier codes was detected, and the checking logic would need to
+ // check that the numeric value of the code lay within the defined
+ // limits of the table.
+
+ if (category == ResponseClassifier::RCODE) {
+
+ // Special case as this message takes two arguments.
+ LOG_DEBUG(logger, RESLIB_DBG_RESULTS, RESLIB_RCODE_ERROR).
+ arg(questionText(question_)).arg(rcode);
+
+ } else {
+
+ isc::log::MessageID message_id;
+ switch (category) {
+ case ResponseClassifier::TRUNCATED:
+ message_id = RESLIB_TCP_TRUNCATED;
+ break;
+
+ case ResponseClassifier::EMPTY:
+ message_id = RESLIB_EMPTY_RESPONSE;
+ break;
+
+ case ResponseClassifier::EXTRADATA:
+ message_id = RESLIB_EXTRADATA_RESPONSE;
+ break;
+
+ case ResponseClassifier::INVNAMCLASS:
+ message_id = RESLIB_INVALID_NAMECLASS_RESPONSE;
+ break;
+
+ case ResponseClassifier::INVTYPE:
+ message_id = RESLIB_INVALID_TYPE_RESPONSE;
+ break;
+
+ case ResponseClassifier::MISMATQUEST:
+ message_id = RESLIB_INVALID_QNAME_RESPONSE;
+ break;
+
+ case ResponseClassifier::MULTICLASS:
+ message_id = RESLIB_MULTIPLE_CLASS_RESPONSE;
+ break;
+
+ case ResponseClassifier::NOTONEQUEST:
+ message_id = RESLIB_NOT_ONE_QNAME_RESPONSE;
+ break;
+
+ case ResponseClassifier::NOTRESPONSE:
+ message_id = RESLIB_NOT_RESPONSE;
+ break;
+
+ case ResponseClassifier::NOTSINGLE:
+ message_id = RESLIB_NOTSINGLE_RESPONSE;
+ break;
+
+ case ResponseClassifier::OPCODE:
+ message_id = RESLIB_OPCODE_RESPONSE;
+ break;
+
+ default:
+ message_id = RESLIB_ERROR_RESPONSE;
+ break;
+ }
+ LOG_DEBUG(logger, RESLIB_DBG_RESULTS, message_id).
+ arg(questionText(question_));
+ }
+ }
+
public:
RunningQuery(IOService& io,
const Question& question,
@@ -734,12 +812,7 @@ public:
incoming.fromWire(ibuf);
buffer_->clear();
- if (incoming.getRcode() == Rcode::NOERROR()) {
- done_ = handleRecursiveAnswer(incoming);
- } else {
- isc::resolve::copyResponseMessage(incoming, answer_message_);
- done_ = true;
- }
+ done_ = handleRecursiveAnswer(incoming);
if (done_) {
callCallback(true);
stop();
diff --git a/src/lib/resolve/resolve_messages.mes b/src/lib/resolve/resolve_messages.mes
index f702d9b..b59fd8c 100644
--- a/src/lib/resolve/resolve_messages.mes
+++ b/src/lib/resolve/resolve_messages.mes
@@ -15,22 +15,61 @@
$NAMESPACE isc::resolve
% RESLIB_ANSWER answer received in response to query for <%1>
-A debug message recording that an answer has been received to an upstream
-query for the specified question. Previous debug messages will have indicated
-the server to which the question was sent.
+A debug message reporting that an answer has been received to an upstream
+query for the specified question. Previous debug messages will have
+indicated the server to which the question was sent.
% RESLIB_CNAME CNAME received in response to query for <%1>
-A debug message recording that CNAME response has been received to an upstream
-query for the specified question. Previous debug messages will have indicated
-the server to which the question was sent.
+A debug message recording that CNAME response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
% RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2
-A debug message, a cache lookup did not find the specified <name, class,
-type> tuple in the cache; instead, the deepest delegation found is indicated.
+A debug message, a cache lookup did not find the specified <name,
+class, type> tuple in the cache; instead, the deepest delegation found
+is indicated.
+
+% RESLIB_EMPTY_RESPONSE empty response received to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver did not contain anything in the answer or authority sections,
+although in all other respects it was a valid response. A SERVFAIL will
+be returned to the system making the original query.
+
+% RESLIB_ERROR_RESPONSE unspecified error received in response to query for <%1>
+A debug message, the response to the specified query to an upstream
+nameserver indicated that the response was classified as an erroneous
+response, but that the nature of the error cannot be identified.
+A SERVFAIL will be returned to the system making the original query.
+
+% RESLIB_EXTRADATA_RESPONSE extra data in response to query for <%1>
+A debug message indicating that the response to the specified query
+from an upstream nameserver contained too much data. This can happen if
+an ANY query was sent and the answer section in the response contained
+multiple RRs with different names. A SERVFAIL will be returned to the
+system making the original query.
% RESLIB_FOLLOW_CNAME following CNAME chain to <%1>
-A debug message, a CNAME response was received and another query is being issued
-for the <name, class, type> tuple.
+A debug message, a CNAME response was received and another query is
+being issued for the <name, class, type> tuple.
+
+% RESLIB_INVALID_NAMECLASS_RESPONSE invalid name or class in response to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) contained either
+an answer not matching the query name or an answer having a different
+class to that queried for. A SERVFAIL will be returned to the system
+making the original query.
+
+% RESLIB_INVALID_QNAME_RESPONSE invalid name or class in response to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) contained a name
+in the question section that did not match that of the query. A SERVFAIL
+will be returned to the system making the original query.
+
+% RESLIB_INVALID_TYPE_RESPONSE invalid name or class in response to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) contained an
+invalid type field. A SERVFAIL will be returned to the system making
+the original query.
% RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded
A debug message recording that a CNAME response has been received to an upstream
@@ -39,16 +78,47 @@ the server to which the question was sent). However, receipt of this CNAME
has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
is where on CNAME points to another) and so an error is being returned.
+% RESLIB_MULTIPLE_CLASS_RESPONSE response to query for <%1> contained multiple RRsets with different classes
+A debug message reporting that the response to an upstream query for
+the specified name contained multiple RRsets in the answer and not all
+were of the same class. This is a violation of the standard and so a
+SERVFAIL will be returned.
+
% RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1>
A debug message, this indicates that a response was received for the specified
query and was categorized as a referral. However, the received message did
not contain any NS RRsets. This may indicate a programming error in the
response classification code.
+% RESLIB_NOT_ONE_QNAME_RESPONSE not one question in response to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) did not contain
+one name in the question section as required by the standard. A SERVFAIL
+will be returned to the system making the original query.
+
+% RESLIB_NOT_RESPONSE response to query for <%1> was not a response
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) did not have the QR
+bit set (thus indicating that the packet was a query, not a response).
+A SERVFAIL will be returned to the system making the original query.
+
+% RESLIB_NOTSINGLE_RESPONSE response to query for <%1> was not a response
+A debug message, the response to the specified query from an upstream
+nameserver was a CNAME that had mutiple RRs in the RRset. This is
+an invalid response according to the standards so a SERVFAIL will be
+returned to the system making the original query.
+
% RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS
A debug message, the RunningQuery object is querying the NSAS for the
nameservers for the specified zone.
+% RESLIB_OPCODE_RESPONSE response to query for <%1> did not have query opcode
+A debug message, the response to the specified query from an upstream
+nameserver was a response that did not have the opcode set to that of
+a query. According to the standards, this is an invalid response to
+the query that was made, so a SERVFAIL will be returned to the system
+making the original query.
+
% RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1>
A debug message recording that either a NXDOMAIN or an NXRRSET response has
been received to an upstream query for the specified question. Previous debug
@@ -63,7 +133,7 @@ A debug message indicating that a protocol error was received and that
the resolver is repeating the query to the same nameserver. After this
repeated query, there will be the indicated number of retries left.
-% RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1>
+% RESLIB_RCODE_ERROR response to query for <%1> returns RCODE of %2
A debug message, the response to the specified query indicated an error
that is not covered by a specific code path. A SERVFAIL will be returned.
@@ -122,6 +192,11 @@ A debug message indicating that a RunningQuery's success callback has been
called because a nameserver has been found, and that a query is being sent
to the specified nameserver.
+% RESLIB_TCP_TRUNCATED TCP response to query for %1 was truncated
+This is a debug message logged when a response to the specified query to an
+upstream nameserver returned a response with the TC (truncation) bit set. This
+is treated as an error by the code.
+
% RESLIB_TEST_SERVER setting test server to %1(%2)
This is a warning message only generated in unit tests. It indicates
that all upstream queries from the resolver are being routed to the
diff --git a/src/lib/resolve/response_classifier.h b/src/lib/resolve/response_classifier.h
index 3821560..a027bd0 100644
--- a/src/lib/resolve/response_classifier.h
+++ b/src/lib/resolve/response_classifier.h
@@ -151,7 +151,7 @@ private:
size_t size);
};
-#endif // __RESPONSE_CLASSIFIER_H
-
} // namespace resolve
} // namespace isc
+
+#endif // __RESPONSE_CLASSIFIER_H
diff --git a/src/lib/xfr/Makefile.am b/src/lib/xfr/Makefile.am
index d714990..3d7f60f 100644
--- a/src/lib/xfr/Makefile.am
+++ b/src/lib/xfr/Makefile.am
@@ -1,3 +1,5 @@
+SUBDIRS = . tests
+
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_srcdir)/src/lib/dns -I$(top_builddir)/src/lib/dns
AM_CPPFLAGS += $(BOOST_INCLUDES)
diff --git a/src/lib/xfr/tests/Makefile.am b/src/lib/xfr/tests/Makefile.am
new file mode 100644
index 0000000..4abb456
--- /dev/null
+++ b/src/lib/xfr/tests/Makefile.am
@@ -0,0 +1,25 @@
+AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+CLEANFILES = *.gcno *.gcda
+
+TESTS =
+if HAVE_GTEST
+TESTS += run_unittests
+run_unittests_SOURCES = run_unittests.cc client_test.cc
+
+run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+endif
+
+noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/xfr/tests/client_test.cc b/src/lib/xfr/tests/client_test.cc
new file mode 100644
index 0000000..6c9f4ad
--- /dev/null
+++ b/src/lib/xfr/tests/client_test.cc
@@ -0,0 +1,37 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <sys/un.h>
+#include <string>
+
+#include <xfr/xfrout_client.h>
+
+using namespace std;
+using namespace isc::xfr;
+
+namespace {
+
+TEST(ClientTest, connetFile) {
+ // File path is too long
+ struct sockaddr_un s; // can't be const; some compiler complains
+ EXPECT_THROW(XfroutClient(string(sizeof(s.sun_path), 'x')).connect(),
+ XfroutError);
+
+ // File doesn't exist (we assume the file "no_such_file" doesn't exist)
+ EXPECT_THROW(XfroutClient("no_such_file").connect(), XfroutError);
+}
+
+}
diff --git a/src/lib/xfr/tests/run_unittests.cc b/src/lib/xfr/tests/run_unittests.cc
new file mode 100644
index 0000000..8dc59a2
--- /dev/null
+++ b/src/lib/xfr/tests/run_unittests.cc
@@ -0,0 +1,24 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+#include <log/logger_support.h>
+#include <util/unittests/run_all.h>
+
+int
+main(int argc, char* argv[]) {
+ ::testing::InitGoogleTest(&argc, argv);
+ isc::log::initLogger();
+ return (isc::util::unittests::run_all());
+}
diff --git a/src/lib/xfr/xfrout_client.cc b/src/lib/xfr/xfrout_client.cc
index 6ab905b..227ffc4 100644
--- a/src/lib/xfr/xfrout_client.cc
+++ b/src/lib/xfr/xfrout_client.cc
@@ -52,10 +52,11 @@ XfroutClient::~XfroutClient() {
void
XfroutClient::connect() {
- asio::error_code err;
- impl_->socket_.connect(stream_protocol::endpoint(impl_->file_path_), err);
- if (err) {
- isc_throw(XfroutError, "socket connect failed: " << err.message());
+ try {
+ impl_->socket_.connect(stream_protocol::endpoint(impl_->file_path_));
+ } catch (const asio::system_error& err) {
+ isc_throw(XfroutError, "socket connect failed for " <<
+ impl_->file_path_ << ": " << err.what());
}
}
diff --git a/tests/lettuce/README.tutorial b/tests/lettuce/README.tutorial
index 18c94cf..c7d3cd7 100644
--- a/tests/lettuce/README.tutorial
+++ b/tests/lettuce/README.tutorial
@@ -50,7 +50,7 @@ will need to expand these, but we will look at them shortly.
This file defines a feature, just under the feature name we can
provide a description of the feature.
-The one scenario we have no has no steps, so if we run it we should
+The one scenario we have has no steps, so if we run it we should
see something like:
-- output
@@ -84,7 +84,7 @@ So let's add a step that starts bind10.
When I start bind10 with configuration example.org.config
--
-This is not good enough; it will fire of the process, but setting up
+This is not good enough; it will start the process, but setting up
b10-auth may take a few moments, so we need to add a step to wait for
it to be started before we continue.
@@ -121,7 +121,7 @@ regular expression itself), so if the step is defined with "do foo bar", the
scenario can add words for readability "When I do foo bar".
Each captured group will be passed as an argument to the function we define.
-For bind10, i defined a configuration file, a cmdctl port, and a process
+For bind10, I defined a configuration file, a cmdctl port, and a process
name. The first two should be self-evident, and the process name is an
optional name we give it, should we want to address it in the rest of the
tests. This is most useful if we want to start multiple instances. In the
diff --git a/tests/lettuce/configurations/ixfr-out/testset1-config.db b/tests/lettuce/configurations/ixfr-out/testset1-config.db
new file mode 100644
index 0000000..c5fc165
--- /dev/null
+++ b/tests/lettuce/configurations/ixfr-out/testset1-config.db
@@ -0,0 +1 @@
+{"Xfrin": {"zones": [{"use_ixfr": true, "class": "IN", "name": "example.com.", "master_addr": "178.18.82.80"}]}, "version": 2, "Logging": {"loggers": [{"debuglevel": 99, "severity": "DEBUG", "output_options": [{"output": "stderr", "flush": true}], "name": "*"}]}, "Auth": {"database_file": "data/ixfr-out/zones.slite3", "listen_on": [{"port": 47806, "address": "::"}, {"port": 47806, "address": "0.0.0.0"}]}}
diff --git a/tests/lettuce/data/ixfr-out/zones.slite3 b/tests/lettuce/data/ixfr-out/zones.slite3
new file mode 100644
index 0000000..a2b2dbd
Binary files /dev/null and b/tests/lettuce/data/ixfr-out/zones.slite3 differ
diff --git a/tests/lettuce/features/ixfr_out_bind10.feature b/tests/lettuce/features/ixfr_out_bind10.feature
new file mode 100644
index 0000000..e84ad8c
--- /dev/null
+++ b/tests/lettuce/features/ixfr_out_bind10.feature
@@ -0,0 +1,195 @@
+Feature: IXFR out
+ Tests for IXFR-out, specific for BIND 10 behaviour.
+ These are (part of) the tests as described on
+ http://bind10.isc.org/wiki/IxfrSystemTests
+
+ # A lot of these tests test specific UDP behaviour.
+ #
+ # Where possible, we use the TCP equivalent. Some of the behaviour
+ # tested is UDP-specific though. In either case, a comment above
+ # the test shows how and why it differs from the test specification,
+ # or why it is commented out for now.
+ # When we do implement UDP IXFR, we should probably keep the TCP
+ # tests, and add them to the test specification, so we still have a
+ # 1-to-1 mapping between these tests and the specification document.
+ #
+ # These tests use a zone with just a few records, the first serial
+ # is 2, and it is incremented in steps of 2, up to serial 22.
+ # Each updates either deletes or adds the www.example.com A record.
+ # Version 2 has the record, then the update to version 4 deletes it,
+ # the update to 6 adds it again, and so on, until version 22 (where
+ # the last update has added it again)
+ #
+ # Some of the tests (scenario 1 tests 3 and 4, and scenario 2 tests 1 and
+ # 2 may still not work if we replicate BIND 9's behaviour; it always
+ # responds to UDP IXFR requests with just the SOA, and it does not do
+ # AXFR-style IXFR if the number of changes exceeds the size of the zone)
+ #
+ # So in effect, there is only one test that is currently active (scenario
+ # 1 test 7)
+
+
+ Scenario: Test Set 1
+ Given I have bind10 running with configuration ixfr-out/testset1-config.db
+ Then wait for bind10 xfrout to start
+ The SOA serial for example.com should be 22
+
+ #
+ # Test 1
+ #
+ # We don't support UDP yet, and for TCP we currently return full zone,
+ # so this test is currently skipped
+ #
+ #When I do an IXFR transfer of example.com 123 over udp
+ #The transfer result should have 1 RRs
+ #The full result of the last transfer should be
+ #"""
+ #example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ #"""
+
+ #
+ # Test 2
+ #
+ # Original test specification was for UDP, using TCP for now
+ #
+ #When I do an IXFR transfer of example.com 22 over udp
+ When I do an IXFR transfer of example.com 22 over tcp
+ The transfer result should have 1 RRs
+ The full result of the last transfer should be
+ """
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ """
+
+ #
+ # Test 3
+ #
+ # Original test specification was for UDP, using TCP for now
+ #
+ #When I do an IXFR transfer of example.com 20 over udp
+ When I do an IXFR transfer of example.com 20 over tcp
+ The transfer result should have 5 RRs
+ The full result of the last transfer should be
+ """
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 20 28800 7200 604800 18000
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ www.example.com. 3600 IN A 192.0.2.1
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ """
+
+ #
+ # Test 4
+ #
+ # Original test specification was for UDP, using TCP for now
+ #
+ #When I do an IXFR transfer of example.com 18 over udp
+ When I do an IXFR transfer of example.com 18 over tcp
+ The transfer result should have 8 RRs
+ The full result of the last transfer should be
+ """
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 18 28800 7200 604800 18000
+ www.example.com. 3600 IN A 192.0.2.1
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 20 28800 7200 604800 18000
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 20 28800 7200 604800 18000
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ www.example.com. 3600 IN A 192.0.2.1
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ """
+
+ #
+ # Test 5
+ #
+ # This test does not have a TCP equivalent, so it is skipped.
+ #
+ #When I do an IXFR transfer of example.com 2 over udp
+ #The transfer result should have 1 RRs
+ #The full result of the last transfer should be
+ #"""
+ #example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ #"""
+
+ #
+ # Test 6
+ #
+ # This test does not have a TCP equivalent, so it is skipped.
+ #
+ #When I do an IXFR transfer of example.com 5 over udp
+ #The transfer result should have 1 RRs
+ #The full result of the last transfer should be
+ #"""
+ #example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ #"""
+
+ #
+ # Test 7
+ #
+ When I do an IXFR transfer of example.com 14 over tcp
+ The transfer result should have 14 RRs
+ The full result of the last transfer should be
+ """
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 14 28800 7200 604800 18000
+ www.example.com. 3600 IN A 192.0.2.1
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 16 28800 7200 604800 18000
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 16 28800 7200 604800 18000
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 18 28800 7200 604800 18000
+ www.example.com. 3600 IN A 192.0.2.1
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 18 28800 7200 604800 18000
+ www.example.com. 3600 IN A 192.0.2.1
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 20 28800 7200 604800 18000
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 20 28800 7200 604800 18000
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ www.example.com. 3600 IN A 192.0.2.1
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ """
+
+ Scenario: Test Set 2
+ Given I have bind10 running with configuration ixfr-out/testset1-config.db
+ Then wait for bind10 xfrout to start
+ The SOA serial for example.com should be 22
+
+ #
+ # Test 1
+ #
+ # Original test specification was for UDP, using TCP for now
+ #
+ #When I do an IXFR transfer of example.com 19 over udp
+ When I do an IXFR transfer of example.com 19 over tcp
+ The transfer result should have 5 RRs
+ The full result of the last transfer should be
+ """
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ example.com. 3600 IN NS ns.example.com.
+ ns.example.com. 3600 IN A 192.0.2.1
+ www.example.com. 3600 IN A 192.0.2.1
+ example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ """
+
+ #
+ # Test 2
+ #
+ # This test has no TCP equivalent
+ #
+ #When I do an IXFR transfer of example.com 6 over udp
+ #The transfer result should have 5 RRs
+ #The full result of the last transfer should be
+ #"""
+ #example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ #example.com. 3600 IN NS ns.example.com.
+ #ns.example.com. 3600 IN A 192.0.2.1
+ #www.example.com. 3600 IN A 192.0.2.1
+ #example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ #"""
+
+ #
+ # Test 3
+ #
+ # This test has no TCP equivalent
+ #
+ #When I do an IXFR transfer of example.com 2 over udp
+ #The transfer result should have 1 RRs
+ #The full result of the last transfer should be
+ #"""
+ #example.com. 3600 IN SOA ns.example.com. admin.example.com. 22 28800 7200 604800 18000
+ #"""
diff --git a/tests/lettuce/features/terrain/bind10_control.py b/tests/lettuce/features/terrain/bind10_control.py
index 5248316..fdc419b 100644
--- a/tests/lettuce/features/terrain/bind10_control.py
+++ b/tests/lettuce/features/terrain/bind10_control.py
@@ -79,6 +79,20 @@ def wait_for_auth(step, process_name):
world.processes.wait_for_stderr_str(process_name, ['AUTH_SERVER_STARTED'],
False)
+ at step('wait for bind10 xfrout (?:of (\w+) )?to start')
+def wait_for_xfrout(step, process_name):
+ """Wait for b10-xfrout to run. This is done by blocking until the message
+ XFROUT_NEW_CONFIG_DONE is logged.
+ Parameters:
+ process_name ('of <name', optional): The name of the BIND 10 instance
+ to wait for. Defaults to 'bind10'.
+ """
+ if process_name is None:
+ process_name = "bind10"
+ world.processes.wait_for_stderr_str(process_name,
+ ['XFROUT_NEW_CONFIG_DONE'],
+ False)
+
@step('have bind10 running(?: with configuration ([\S]+))?' +\
'(?: with cmdctl port (\d+))?' +\
'(?: as ([\S]+))?')
diff --git a/tests/lettuce/features/terrain/querying.py b/tests/lettuce/features/terrain/querying.py
index ea89b18..b132512 100644
--- a/tests/lettuce/features/terrain/querying.py
+++ b/tests/lettuce/features/terrain/querying.py
@@ -179,7 +179,7 @@ class QueryResult(object):
"""
pass
- at step('A query for ([\w.]+) (?:type ([A-Z]+) )?(?:class ([A-Z]+) )?' +
+ at step('A query for ([\w.]+) (?:type ([A-Z0-9]+) )?(?:class ([A-Z]+) )?' +
'(?:to ([^:]+)(?::([0-9]+))? )?should have rcode ([\w.]+)')
def query(step, query_name, qtype, qclass, addr, port, rcode):
"""
diff --git a/tests/lettuce/features/terrain/transfer.py b/tests/lettuce/features/terrain/transfer.py
new file mode 100644
index 0000000..305e677
--- /dev/null
+++ b/tests/lettuce/features/terrain/transfer.py
@@ -0,0 +1,138 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This script provides transfer (ixfr/axfr) test functionality
+# It provides steps to perform the client side of a transfer,
+# and inspect the results.
+#
+# Like querying.py, it uses dig to do the transfers, and
+# places its output in a result structure
+#
+# This is done in a different file with different steps than
+# querying, because the format of dig's output is
+# very different than that of normal queries
+
+from lettuce import *
+import subprocess
+import re
+
+class TransferResult(object):
+ """This object stores transfer results, which is essentially simply
+ a list of RR strings. These are stored, as read from dig's output,
+ in the list 'records'. So for an IXFR transfer it contains
+ the exact result as returned by the server.
+ If this list is empty, the transfer failed for some reason (dig
+ does not really show error results well, unfortunately).
+ We may add some smarter inspection functionality to this class
+ later.
+ """
+ def __init__(self, args):
+ """Perform the transfer by calling dig, and store the results.
+ args is the array of arguments to pass to Popen(), this
+ is passed as is since for IXFR and AXFR there can be very
+ different options"""
+ self.records = []
+
+ # Technically, using a pipe here can fail; since we don't expect
+ # large output right now, this works, but should we get a test
+ # where we do have a lot of output, this could block, and we will
+ # need to read the output in a different way.
+ dig_process = subprocess.Popen(args, 1, None, None, subprocess.PIPE,
+ None)
+ result = dig_process.wait()
+ assert result == 0
+ for l in dig_process.stdout:
+ line = l.strip()
+ if len(line) > 0 and line[0] != ';':
+ self.records.append(line)
+
+ at step('An AXFR transfer of ([\w.]+)(?: from ([^:]+)(?::([0-9]+))?)?')
+def perform_axfr(step, zone_name, address, port):
+ """
+ Perform an AXFR transfer, and store the result as an instance of
+ TransferResult in world.transfer_result.
+
+ Step definition:
+ An AXFR transfer of <zone_name> [from <address>:<port>]
+
+ Address defaults to 127.0.0.1
+ Port defaults to 47806
+ """
+ if address is None:
+ address = "127.0.0.1"
+ if port is None:
+ port = 47806
+ args = [ 'dig', 'AXFR', '@' + str(address), '-p', str(port), zone_name ]
+ world.transfer_result = TransferResult(args)
+
+ at step('An IXFR transfer of ([\w.]+) (\d+)(?: from ([^:]+)(?::([0-9]+))?)?(?: over (tcp|udp))?')
+def perform_ixfr(step, zone_name, serial, address, port, protocol):
+ """
+ Perform an IXFR transfer, and store the result as an instance of
+ TransferResult in world.transfer_result.
+
+ Step definition:
+ An IXFR transfer of <zone_name> <serial> [from <address>:port] [over <tcp|udp>]
+
+ Address defaults to 127.0.0.1
+ Port defaults to 47806
+ If either tcp or udp is specified, only this protocol will be used.
+ """
+ if address is None:
+ address = "127.0.0.1"
+ if port is None:
+ port = 47806
+ args = [ 'dig', 'IXFR=' + str(serial), '@' + str(address), '-p', str(port), zone_name ]
+ if protocol is not None:
+ assert protocol == 'tcp' or protocol == 'udp', "Unknown protocol: " + protocol
+ if protocol == 'tcp':
+ args.append('+tcp')
+ elif protocol == 'udp':
+ args.append('+notcp')
+ world.transfer_result = TransferResult(args)
+
+ at step('transfer result should have (\d+) rrs?')
+def check_transfer_result_count(step, number_of_rrs):
+ """
+ Check the number of rrs in the transfer result object created by
+ the AXFR transfer or IXFR transfer step.
+
+ Step definition:
+ transfer result should have <number> rr[s]
+
+ Fails if the number of RRs is not equal to number
+ """
+ assert int(number_of_rrs) == len(world.transfer_result.records),\
+ "Got " + str(len(world.transfer_result.records)) +\
+ " records, expected " + str(number_of_rrs)
+
+ at step('full result of the last transfer should be')
+def check_full_transfer_result(step):
+ """
+ Check the complete output from the last transfer call.
+
+ Step definition:
+ full result of the last transfer should be <multiline value>
+
+ Whitespace is normalized in both the multiline value and the
+ output, but the order of the output is not.
+ Fails if there is any difference between the two. Prints
+ full output and expected value upon failure.
+ """
+ records_string = "\n".join(world.transfer_result.records)
+ records_string = re.sub("[ \t]+", " ", records_string)
+ expect = re.sub("[ \t]+", " ", step.multiline)
+ assert records_string.strip() == expect.strip(),\
+ "Got:\n'" + records_string + "'\nExpected:\n'" + expect + "'"
diff --git a/tools/reorder_message_file.py b/tools/reorder_message_file.py
new file mode 100644
index 0000000..31f4941
--- /dev/null
+++ b/tools/reorder_message_file.py
@@ -0,0 +1,196 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Reorder Message File
+#
+# Reads a message file into memory, then outputs it with the messages and
+# associated descriptions in alphabetical order.
+#
+# Invocation:
+# The code is invoked using the command line:
+#
+# python reorder.py message_file
+#
+# Output is written to stdout.
+
+import sys
+
+def remove_empty_leading_trailing(lines):
+ """
+ Removes leading and trailing empty lines.
+
+ A list of strings is passed as argument, some of which may be empty.
+ This function removes from the start and end of the list a contiguous
+ sequence of empty lines and returns the result. Embedded sequences of
+ empty lines are not touched.
+
+ Parameters:
+ lines List of strings to be modified.
+
+ Return:
+ Input list of strings with leading/trailing blank line sequences
+ removed.
+ """
+
+ retlines = []
+
+ # Dispose of degenerate case of empty array
+ if len(lines) == 0:
+ return retlines
+
+ # Search for first non-blank line
+ start = 0
+ while start < len(lines):
+ if len(lines[start]) > 0:
+ break
+ start = start + 1
+
+ # Handle case when entire list is empty
+ if start >= len(lines):
+ return retlines
+
+ # Search for last non-blank line
+ finish = len(lines) - 1
+ while finish >= 0:
+ if len(lines[finish]) > 0:
+ break
+ finish = finish - 1
+
+ retlines = lines[start:finish + 1]
+ return retlines
+
+
+def canonicalise_message_line(line):
+ """
+ Given a line known to start with the '%' character (i.e. a line
+ introducing a message), canonicalise it by ensuring that the result
+ is of the form '%<single-space>MESSAGE_IDENTIFIER<single-space>text'.
+
+ Parameters:
+ line - input line. Known to start with a '%' and to have leading
+ and trailing spaces removed.
+
+ Return:
+ Canonicalised line.
+ """
+ # Cope with degenerate case of a single "%"
+ if len(line) == 1:
+ return line
+
+ # Get the rest of the line
+ line = line[1:].lstrip()
+
+ # Extract the first word (the message ID)
+ words = line.split()
+ message_line = "% " + words[0]
+
+ # ... and now the rest of the line
+ if len(line) > len(words[0]):
+ message_line = message_line + " " + line[len(words[0]):].lstrip()
+
+ return message_line
+
+
+def make_dict(lines):
+ """
+ Split the lines into segments starting with the message definition and
+ place into a dictionary.
+
+ Parameters:
+ lines - list of lines containing the text of the message file (less the
+ header).
+
+ Returns:
+ dictionary - map of the messages, keyed by the line that holds the message
+ ID.
+ """
+
+ dictionary = {}
+
+ message_key = canonicalise_message_line(lines[0])
+ message_lines = [message_key]
+ index = 1;
+ while index < len(lines):
+ if lines[index].startswith("%"):
+ # Start of new message
+ dictionary[message_key] = remove_empty_leading_trailing(message_lines)
+ message_key = canonicalise_message_line(lines[index])
+ message_lines = [message_key]
+ else:
+ message_lines.append(lines[index])
+
+ index = index + 1
+
+ dictionary[message_key] = remove_empty_leading_trailing(message_lines)
+
+ return dictionary
+
+
+def print_dict(dictionary):
+ """
+ Prints the dictionary with a blank line between entries.
+
+ Parameters:
+ dicitionary - Map holding the message dictionary
+ """
+ count = 0
+ for msgid in sorted(dictionary):
+
+ # Blank line before all entries but the first
+ if count > 0:
+ print("")
+ count = count + 1
+
+ # ... and the entry itself.
+ for l in dictionary[msgid]:
+ print(l.strip())
+
+
+def process_file(filename):
+ """
+ Processes a file by reading it and searching for the first line starting
+ with the '%' sign. Everything before that line is treated as the file
+ header and is copied to the output with leading and trailing spaces removed.
+ After that, each message block is read and stored for later sorting.
+
+ Parameters:
+ filename Name of the message file to process
+ """
+ lines = open(filename).read().splitlines()
+
+ # Search for the first line starting with the percent character. Everything
+ # before it is considered the file header and is copied to the output with
+ # leading and trailing spaces removed.
+ index = 0
+ while index < len(lines):
+ if lines[index].startswith("%"):
+ break
+ print(lines[index].strip())
+ index = index + 1
+
+ # Now put the remaining lines into the message dictionary
+ dictionary = make_dict(lines[index:])
+
+ # ...and print it
+ print_dict(dictionary)
+
+
+# Main program
+if __name__ == "__main__":
+
+ # Read the files and load the data
+ if len(sys.argv) != 2:
+ print "Usage: python reorder.py message_file"
+ else:
+ process_file(sys.argv[1])
More information about the bind10-changes
mailing list