BIND 10 trac2513, updated. e19573d76a7f11827ee1f2dec889f3edcf6385b9 [2513] Merge branch 'master' into trac2513
BIND 10 source code commits
bind10-changes at lists.isc.org
Thu Dec 6 12:16:08 UTC 2012
The branch, trac2513 has been updated
via e19573d76a7f11827ee1f2dec889f3edcf6385b9 (commit)
via 4e8325e1b309f1d388a3055ec1e1df98c377f383 (commit)
via a6865a7686a02c7026150be1304ee682e8ddbb51 (commit)
via 238b343266ec6c5e7c610781021d235b7fcfb22d (commit)
via e0c2fad3471896467ad0ae3576f93b39bcb76a7d (commit)
via 501a5e337373d10c40c05d0838f5711fbb961ee2 (commit)
via dbf7313f6bb3fd3ace97b50509a4208864c4c4ea (commit)
via e66472386a716a31089a2306b9e5d51f7618feeb (commit)
via 90641997b368ea093686c97d839b2794b927180c (commit)
via 816eacf47e4168d336c2f64b1b621f13f8ca6cb4 (commit)
via 09d70952ca89ef967f8652046ed6fe869625b42e (commit)
via 6944e7758d326b69482bcfd7d3f501df5113b394 (commit)
via 970ee33b88d3268780fc4a7d8950ba2a4a8202fd (commit)
via 2f554072bc7b82a18d898073802f545305a90110 (commit)
via 191d96a58702a5a3059993a1287136833ec94a4e (commit)
via 5d239b6aed71f987330add0bbcfc7baed670e804 (commit)
via 9440d65d7e71ec30e0d268f5f9f5b1980d288b98 (commit)
via dfd0dd105b4fc8054e3b9cfd1bc5547302f54578 (commit)
via e441d6b05a022f3c0a7140ae43fe42d5fd12cce3 (commit)
via 0ad163bb379066067551f1f3623afe43cf5bd327 (commit)
via f7f3060b97886bea4fbc4408a5c432e81096e3a9 (commit)
via b9f1eefe7e7040db4841e75a38b6a9cd1511eb59 (commit)
via 03dc2386b14131c0eb35408952e3ddc8ca017831 (commit)
via 3ae13a89a2f238602d5dd7e3dc12da963de21ec8 (commit)
via f73f27474fe73abacded88f4dad74867331cb402 (commit)
via ca8fc9f4147a8f332280fde6cfad99b963ebc40a (commit)
via b48ecd894a0ee52c132e041f686938bdb230d743 (commit)
via 638a2d7fbb2a0548e002d57f662f51621175a3a4 (commit)
via cb52403a8ad934f8dc32c6404b5c97b65057d63d (commit)
via a9a6007e022bdc61bcec8d01b51562f9f33f8fd3 (commit)
via 3e964a48bd208e8a7dc04ef1ff8de5cf1da5a514 (commit)
via 8e32eb96aad5d079931b5c7afac55a02f332f3cf (commit)
via 6226330df851a092d9df353081fe1d9c10743d36 (commit)
via d4b9c33282199c912e6f795f6bbfddde12510795 (commit)
via d27729277c9e4c9bb4cf121700d67181d6c0aadc (commit)
via 09cd4b9b32415144a2542171558b56226322bd7e (commit)
via 597ed99f869f9d170c93467e8abcc2b03959f75f (commit)
via ae3a1e5d17d5c8ab8c9dcf20cf60a68744f5cca0 (commit)
via d52186367e1f8b49dcb5a3b4825841a95251412f (commit)
via f70d131fd20085a472de32f86299a77647f18fd5 (commit)
via b991213a389213fe42ef0c87bde56e462c025229 (commit)
via c6c7cdee4088d6be3728ffaf07a25b920b0e8b5c (commit)
via ebea1d79bcd55536f89e250ce1be13a8369171f7 (commit)
via 610720a5bef22ba7193c9ad6e3cbe318eebdbf14 (commit)
from 1d4751ed52516da9036c61a16946ec74ea1f1ba5 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit e19573d76a7f11827ee1f2dec889f3edcf6385b9
Merge: 1d4751e 4e8325e
Author: Stephen Morris <stephen at isc.org>
Date: Thu Dec 6 11:26:03 2012 +0000
[2513] Merge branch 'master' into trac2513
-----------------------------------------------------------------------
Summary of changes:
configure.ac | 2 +-
src/lib/dns/gen-rdatacode.py.in | 29 +-
src/lib/dns/master_lexer.cc | 120 +++--
src/lib/dns/master_lexer.h | 541 ++++++++++++--------
src/lib/dns/master_lexer_state.h | 10 +-
src/lib/dns/rdata.cc | 114 ++++-
src/lib/dns/rdata.h | 41 +-
src/lib/dns/rdata/in_1/aaaa_28.cc | 27 +-
src/lib/dns/rdata/template.cc | 5 +
src/lib/dns/rrparamregistry-placeholder.cc | 7 +-
src/lib/dns/rrparamregistry.h | 35 +-
src/lib/dns/tests/master_lexer_state_unittest.cc | 14 +-
src/lib/dns/tests/master_lexer_token_unittest.cc | 90 ++--
src/lib/dns/tests/master_lexer_unittest.cc | 201 +++++++-
src/lib/dns/tests/rdata_unittest.cc | 133 +++++
src/lib/dns/tests/rdata_unittest.h | 2 +
tests/tools/perfdhcp/Makefile.am | 2 +-
tests/tools/perfdhcp/tests/Makefile.am | 3 +-
.../tools/perfdhcp/tests/test_control_unittest.cc | 34 +-
.../{templates => tests/testdata}/.gitignore | 0
.../{templates => tests/testdata}/Makefile.am | 2 -
.../testdata}/discover-example.hex | 0
.../testdata}/request4-example.hex | 0
.../testdata}/request6-example.hex | 0
.../testdata}/solicit-example.hex | 0
25 files changed, 1016 insertions(+), 396 deletions(-)
rename tests/tools/perfdhcp/{templates => tests/testdata}/.gitignore (100%)
rename tests/tools/perfdhcp/{templates => tests/testdata}/Makefile.am (90%)
rename tests/tools/perfdhcp/{templates => tests/testdata}/discover-example.hex (100%)
rename tests/tools/perfdhcp/{templates => tests/testdata}/request4-example.hex (100%)
rename tests/tools/perfdhcp/{templates => tests/testdata}/request6-example.hex (100%)
rename tests/tools/perfdhcp/{templates => tests/testdata}/solicit-example.hex (100%)
-----------------------------------------------------------------------
diff --git a/configure.ac b/configure.ac
index 636f8aa..feeb9a9 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1308,7 +1308,7 @@ AC_CONFIG_FILES([Makefile
tests/tools/badpacket/tests/Makefile
tests/tools/perfdhcp/Makefile
tests/tools/perfdhcp/tests/Makefile
- tests/tools/perfdhcp/templates/Makefile
+ tests/tools/perfdhcp/tests/testdata/Makefile
dns++.pc
])
AC_OUTPUT([doc/version.ent
diff --git a/src/lib/dns/gen-rdatacode.py.in b/src/lib/dns/gen-rdatacode.py.in
index 5f0f2ef..72b7674 100755
--- a/src/lib/dns/gen-rdatacode.py.in
+++ b/src/lib/dns/gen-rdatacode.py.in
@@ -32,7 +32,7 @@ import sys
#
# Example:
# new_rdata_factory_users = [('a', 'in'), ('a', 'ch'), ('soa', 'generic')]
-new_rdata_factory_users = []
+new_rdata_factory_users = [('aaaa', 'in')]
re_typecode = re.compile('([\da-z]+)_(\d+)')
classcode2txt = {}
@@ -126,6 +126,9 @@ class AbstractMessageRenderer;\n\n'''
explicit ''' + type_utxt + '''(const std::string& type_str);
''' + type_utxt + '''(isc::util::InputBuffer& buffer, size_t rdata_len);
''' + type_utxt + '''(const ''' + type_utxt + '''& other);
+ ''' + type_utxt + '''(
+ MasterLexer& lexer, const Name* name,
+ MasterLoader::Options options, MasterLoaderCallbacks& callbacks);
virtual std::string toText() const;
virtual void toWire(isc::util::OutputBuffer& buffer) const;
virtual void toWire(AbstractMessageRenderer& renderer) const;
@@ -213,17 +216,33 @@ def generate_rdatadef(file, basemtime):
rdata_deffile.write(class_definitions)
rdata_deffile.close()
-def generate_rdatahdr(file, declarations, basemtime):
+def generate_rdatahdr(file, heading, declarations, basemtime):
if not need_generate(file, basemtime):
print('skip generating ' + file);
return
+ heading += '''
+#ifndef DNS_RDATACLASS_H
+#define DNS_RDATACLASS_H 1
+
+#include <dns/master_loader.h>
+
+namespace isc {
+namespace dns {
+class Name;
+class MasterLexer;
+class MasterLoaderCallbacks;
+}
+}
+'''
declarations += '''
+#endif // DNS_RDATACLASS_H
+
// Local Variables:
// mode: c++
// End:
'''
rdata_header = open(file, 'w')
- rdata_header.write(heading_txt)
+ rdata_header.write(heading)
rdata_header.write(declarations)
rdata_header.close()
@@ -320,8 +339,8 @@ if __name__ == "__main__":
try:
import_definitions(classcode2txt, typecode2txt, typeandclass)
generate_rdatadef('@builddir@/rdataclass.cc', rdatadef_mtime)
- generate_rdatahdr('@builddir@/rdataclass.h', rdata_declarations,
- rdatahdr_mtime)
+ generate_rdatahdr('@builddir@/rdataclass.h', heading_txt,
+ rdata_declarations, rdatahdr_mtime)
generate_typeclasscode('rrtype', rdatahdr_mtime, typecode2txt, 'Type')
generate_typeclasscode('rrclass', classdir_mtime,
classcode2txt, 'Class')
diff --git a/src/lib/dns/master_lexer.cc b/src/lib/dns/master_lexer.cc
index 2bf0254..b3b78c0 100644
--- a/src/lib/dns/master_lexer.cc
+++ b/src/lib/dns/master_lexer.cc
@@ -36,7 +36,7 @@ using namespace master_lexer_internal;
struct MasterLexer::MasterLexerImpl {
- MasterLexerImpl() : source_(NULL), token_(Token::NOT_STARTED),
+ MasterLexerImpl() : source_(NULL), token_(MasterToken::NOT_STARTED),
paren_count_(0), last_was_eol_(false),
has_previous_(false),
previous_paren_count_(0),
@@ -82,7 +82,7 @@ struct MasterLexer::MasterLexerImpl {
std::vector<InputSourcePtr> sources_;
InputSource* source_; // current source (NULL if sources_ is empty)
- Token token_; // currently recognized token (set by a state)
+ MasterToken token_; // currently recognized token (set by a state)
std::vector<char> data_; // placeholder for string data
// These are used in states, and defined here only as a placeholder.
@@ -165,9 +165,8 @@ MasterLexer::getSourceLine() const {
return (impl_->sources_.back()->getCurrentLine());
}
-const MasterLexer::Token&
+const MasterToken&
MasterLexer::getNextToken(Options options) {
- // If the source is not available
if (impl_->source_ == NULL) {
isc_throw(isc::InvalidOperation, "No source to read tokens from");
}
@@ -178,7 +177,7 @@ MasterLexer::getNextToken(Options options) {
impl_->has_previous_ = true;
// Reset the token now. This is to check a token was actually produced.
// This is debugging aid.
- impl_->token_ = Token(Token::NO_TOKEN_PRODUCED);
+ impl_->token_ = MasterToken(MasterToken::NO_TOKEN_PRODUCED);
// And get the token
// This actually handles EOF internally too.
@@ -188,8 +187,62 @@ MasterLexer::getNextToken(Options options) {
}
// Make sure a token was produced. Since this Can Not Happen, we assert
// here instead of throwing.
- assert(impl_->token_.getType() != Token::ERROR ||
- impl_->token_.getErrorCode() != Token::NO_TOKEN_PRODUCED);
+ assert(impl_->token_.getType() != MasterToken::ERROR ||
+ impl_->token_.getErrorCode() != MasterToken::NO_TOKEN_PRODUCED);
+ return (impl_->token_);
+}
+
+namespace {
+inline MasterLexer::Options
+optionsForTokenType(MasterToken::Type expect) {
+ switch (expect) {
+ case MasterToken::STRING:
+ return (MasterLexer::NONE);
+ case MasterToken::QSTRING:
+ return (MasterLexer::QSTRING);
+ case MasterToken::NUMBER:
+ return (MasterLexer::NUMBER);
+ default:
+ isc_throw(InvalidParameter,
+ "expected type for getNextToken not supported: " << expect);
+ }
+}
+}
+
+const MasterToken&
+MasterLexer::getNextToken(MasterToken::Type expect, bool eol_ok) {
+ // Get the next token, specifying an appropriate option corresponding to
+ // the expected type. The result should be set in impl_->token_.
+ getNextToken(optionsForTokenType(expect));
+
+ if (impl_->token_.getType() == MasterToken::ERROR) {
+ if (impl_->token_.getErrorCode() == MasterToken::NUMBER_OUT_OF_RANGE) {
+ ungetToken();
+ }
+ throw LexerError(__FILE__, __LINE__, impl_->token_);
+ }
+
+ const bool is_eol_like =
+ (impl_->token_.getType() == MasterToken::END_OF_LINE ||
+ impl_->token_.getType() == MasterToken::END_OF_FILE);
+ if (eol_ok && is_eol_like) {
+ return (impl_->token_);
+ }
+ if (impl_->token_.getType() == MasterToken::STRING &&
+ expect == MasterToken::QSTRING) {
+ return (impl_->token_);
+ }
+ if (impl_->token_.getType() != expect) {
+ ungetToken();
+ if (is_eol_like) {
+ throw LexerError(__FILE__, __LINE__,
+ MasterToken(MasterToken::UNEXPECTED_END));
+ }
+ assert(expect == MasterToken::NUMBER);
+ throw LexerError(__FILE__, __LINE__,
+ MasterToken(MasterToken::BAD_NUMBER));
+ }
+
return (impl_->token_);
}
@@ -212,16 +265,17 @@ const char* const error_text[] = {
"unexpected end of input", // UNEXPECTED_END
"unbalanced quotes", // UNBALANCED_QUOTES
"no token produced", // NO_TOKEN_PRODUCED
- "number out of range" // NUMBER_OUT_OF_RANGE
+ "number out of range", // NUMBER_OUT_OF_RANGE
+ "not a valid number" // BAD_NUMBER
};
const size_t error_text_max_count = sizeof(error_text) / sizeof(error_text[0]);
} // end unnamed namespace
std::string
-MasterLexer::Token::getErrorText() const {
+MasterToken::getErrorText() const {
if (type_ != ERROR) {
isc_throw(InvalidOperation,
- "Token::getErrorText() for non error type");
+ "MasterToken::getErrorText() for non error type");
}
// The class integrity ensures the following:
@@ -234,14 +288,12 @@ namespace master_lexer_internal {
// Note that these need to be defined here so that they can refer to
// the details of MasterLexerImpl.
-typedef MasterLexer::Token Token; // convenience shortcut
-
bool
State::wasLastEOL(const MasterLexer& lexer) const {
return (lexer.impl_->last_was_eol_);
}
-const MasterLexer::Token&
+const MasterToken&
State::getToken(const MasterLexer& lexer) const {
return (lexer.impl_->token_);
}
@@ -271,7 +323,7 @@ public:
if (c != '\n') {
getLexerImpl(lexer)->source_->ungetChar();
}
- getLexerImpl(lexer)->token_ = Token(Token::END_OF_LINE);
+ getLexerImpl(lexer)->token_ = MasterToken(MasterToken::END_OF_LINE);
getLexerImpl(lexer)->last_was_eol_ = true;
}
};
@@ -342,24 +394,24 @@ State::start(MasterLexer& lexer, MasterLexer::Options options) {
if (c == InputSource::END_OF_STREAM) {
lexerimpl.last_was_eol_ = false;
if (paren_count != 0) {
- lexerimpl.token_ = Token(Token::UNBALANCED_PAREN);
+ lexerimpl.token_ = MasterToken(MasterToken::UNBALANCED_PAREN);
paren_count = 0; // reset to 0; this helps in lenient mode.
return (NULL);
}
- lexerimpl.token_ = Token(Token::END_OF_FILE);
+ lexerimpl.token_ = MasterToken(MasterToken::END_OF_FILE);
return (NULL);
} else if (c == ' ' || c == '\t') {
// If requested and we are not in (), recognize the initial space.
if (lexerimpl.last_was_eol_ && paren_count == 0 &&
(options & MasterLexer::INITIAL_WS) != 0) {
lexerimpl.last_was_eol_ = false;
- lexerimpl.token_ = Token(Token::INITIAL_WS);
+ lexerimpl.token_ = MasterToken(MasterToken::INITIAL_WS);
return (NULL);
}
} else if (c == '\n') {
lexerimpl.last_was_eol_ = true;
if (paren_count == 0) { // we don't recognize EOL if we are in ()
- lexerimpl.token_ = Token(Token::END_OF_LINE);
+ lexerimpl.token_ = MasterToken(MasterToken::END_OF_LINE);
return (NULL);
}
} else if (c == '\r') {
@@ -375,7 +427,7 @@ State::start(MasterLexer& lexer, MasterLexer::Options options) {
} else if (c == ')') {
lexerimpl.last_was_eol_ = false;
if (paren_count == 0) {
- lexerimpl.token_ = Token(Token::UNBALANCED_PAREN);
+ lexerimpl.token_ = MasterToken(MasterToken::UNBALANCED_PAREN);
return (NULL);
}
--paren_count;
@@ -406,8 +458,11 @@ String::handle(MasterLexer& lexer) const {
if (getLexerImpl(lexer)->isTokenEnd(c, escaped)) {
getLexerImpl(lexer)->source_->ungetChar();
+ // make sure it nul-terminated as a c-str (excluded from token
+ // data).
+ data.push_back('\0');
getLexerImpl(lexer)->token_ =
- MasterLexer::Token(&data.at(0), data.size());
+ MasterToken(&data.at(0), data.size() - 1);
return;
}
escaped = (c == '\\' && !escaped);
@@ -417,7 +472,7 @@ String::handle(MasterLexer& lexer) const {
void
QString::handle(MasterLexer& lexer) const {
- MasterLexer::Token& token = getLexerImpl(lexer)->token_;
+ MasterToken& token = getLexerImpl(lexer)->token_;
std::vector<char>& data = getLexerImpl(lexer)->data_;
data.clear();
@@ -425,7 +480,7 @@ QString::handle(MasterLexer& lexer) const {
while (true) {
const int c = getLexerImpl(lexer)->source_->getChar();
if (c == InputSource::END_OF_STREAM) {
- token = Token(Token::UNEXPECTED_END);
+ token = MasterToken(MasterToken::UNEXPECTED_END);
return;
} else if (c == '"') {
if (escaped) {
@@ -434,12 +489,15 @@ QString::handle(MasterLexer& lexer) const {
escaped = false;
data.back() = '"';
} else {
- token = MasterLexer::Token(&data.at(0), data.size(), true);
+ // make sure it nul-terminated as a c-str (excluded from token
+ // data). This also simplifies the case of an empty string.
+ data.push_back('\0');
+ token = MasterToken(&data.at(0), data.size() - 1, true);
return;
}
} else if (c == '\n' && !escaped) {
getLexerImpl(lexer)->source_->ungetChar();
- token = Token(Token::UNBALANCED_QUOTES);
+ token = MasterToken(MasterToken::UNBALANCED_QUOTES);
return;
} else {
escaped = (c == '\\' && !escaped);
@@ -450,7 +508,7 @@ QString::handle(MasterLexer& lexer) const {
void
Number::handle(MasterLexer& lexer) const {
- MasterLexer::Token& token = getLexerImpl(lexer)->token_;
+ MasterToken& token = getLexerImpl(lexer)->token_;
// It may yet turn out to be a string, so we first
// collect all the data
@@ -464,21 +522,21 @@ Number::handle(MasterLexer& lexer) const {
getLexerImpl(lexer)->source_->getChar(), escaped);
if (getLexerImpl(lexer)->isTokenEnd(c, escaped)) {
getLexerImpl(lexer)->source_->ungetChar();
+ // We need to close the string whether it's digits-only (for
+ // lexical_cast) or not (see String::handle()).
+ data.push_back('\0');
if (digits_only) {
- // Close the string for lexical_cast
- data.push_back('\0');
try {
const uint32_t number32 =
boost::lexical_cast<uint32_t, const char*>(&data[0]);
- token = MasterLexer::Token(number32);
+ token = MasterToken(number32);
} catch (const boost::bad_lexical_cast&) {
// Since we already know we have only digits,
// range should be the only possible problem.
- token = Token(Token::NUMBER_OUT_OF_RANGE);
+ token = MasterToken(MasterToken::NUMBER_OUT_OF_RANGE);
}
} else {
- token = MasterLexer::Token(&data.at(0),
- data.size());
+ token = MasterToken(&data.at(0), data.size() - 1);
}
return;
}
diff --git a/src/lib/dns/master_lexer.h b/src/lib/dns/master_lexer.h
index 4a861fc..35586fe 100644
--- a/src/lib/dns/master_lexer.h
+++ b/src/lib/dns/master_lexer.h
@@ -28,225 +28,6 @@ namespace master_lexer_internal {
class State;
}
-/// \brief Tokenizer for parsing DNS master files.
-///
-/// The \c MasterLexer class provides tokenize interfaces for parsing DNS
-/// master files. It understands some special rules of master files as
-/// defined in RFC 1035, such as comments, character escaping, and multi-line
-/// data, and provides the user application with the actual data in a
-/// more convenient form such as a std::string object.
-///
-/// In order to support the $INCLUDE notation, this class is designed to be
-/// able to operate on multiple files or input streams in the nested way.
-/// The \c pushSource() and \c popSource() methods correspond to the push
-/// and pop operations.
-///
-/// While this class is public, it is less likely to be used by normal
-/// applications; it's mainly expected to be used within this library,
-/// specifically by the \c MasterLoader class and \c Rdata implementation
-/// classes.
-///
-/// \note The error handling policy of this class is slightly different from
-/// that of other classes of this library. We generally throw an exception
-/// for an invalid input, whether it's more likely to be a program error or
-/// a "user error", which means an invalid input that comes from outside of
-/// the library. But, this class returns an error code for some certain
-/// types of user errors instead of throwing an exception. Such cases include
-/// a syntax error identified by the lexer or a misspelled file name that
-/// causes a system error at the time of open. This is based on the assumption
-/// that the main user of this class is a parser of master files, where
-/// we want to give an option to ignore some non fatal errors and continue
-/// the parsing. This will be useful if it just performs overall error
-/// checks on a master file. When the (immediate) caller needs to do explicit
-/// error handling, exceptions are not that a useful tool for error reporting
-/// because we cannot separate the normal and error cases anyway, which would
-/// be one major advantage when we use exceptions. And, exceptions are
-/// generally more expensive, either when it happens or just by being able
-/// to handle with \c try and \c catch (depending on the underlying
-/// implementation of the exception handling). For these reasons, some of
-/// this class does not throw for an error that would be reported as an
-/// exception in other classes.
-class MasterLexer {
- friend class master_lexer_internal::State;
-public:
- /// \brief Exception thrown when we fail to read from the input
- /// stream or file.
- struct ReadError : public Unexpected {
- ReadError(const char* file, size_t line, const char* what) :
- Unexpected(file, line, what)
- {}
- };
-
- class Token; // we define it separately for better readability
-
- /// \brief Options for getNextToken.
- ///
- /// A compound option, indicating multiple options are set, can be
- /// specified using the logical OR operator (operator|()).
- enum Options {
- NONE = 0, ///< No option
- INITIAL_WS = 1, ///< recognize begin-of-line spaces after an
- ///< end-of-line
- QSTRING = 2, ///< recognize quoted string
- NUMBER = 4 ///< recognize numeric text as integer
- };
-
- /// \brief The constructor.
- ///
- /// \throw std::bad_alloc Internal resource allocation fails (rare case).
- MasterLexer();
-
- /// \brief The destructor.
- ///
- /// It internally closes any remaining input sources.
- ~MasterLexer();
-
- /// \brief Open a file and make it the current input source of MasterLexer.
- ///
- /// The opened file can be explicitly closed by the \c popSource() method;
- /// if \c popSource() is not called within the lifetime of the
- /// \c MasterLexer, it will be closed in the destructor.
- ///
- /// In the case possible system errors in opening the file (most likely
- /// because of specifying a non-existent or unreadable file), it returns
- /// false, and if the optional \c error parameter is non NULL, it will be
- /// set to a description of the error (any existing content of the string
- /// will be discarded). If opening the file succeeds, the given
- /// \c error parameter will be intact.
- ///
- /// Note that this method has two styles of error reporting: one by
- /// returning \c false (and setting \c error optionally) and the other
- /// by throwing an exception. See the note for the class description
- /// about the distinction.
- ///
- /// \throw InvalidParameter filename is NULL
- /// \param filename A non NULL string specifying a master file
- /// \param error If non null, a placeholder to set error description in
- /// case of failure.
- ///
- /// \return true if pushing the file succeeds; false otherwise.
- bool pushSource(const char* filename, std::string* error = NULL);
-
- /// \brief Make the given stream the current input source of MasterLexer.
- ///
- /// The caller still holds the ownership of the passed stream; it's the
- /// caller's responsibility to keep it valid as long as it's used in
- /// \c MasterLexer or to release any resource for the stream after that.
- /// The caller can explicitly tell \c MasterLexer to stop using the
- /// stream by calling the \c popSource() method.
- ///
- /// \param input An input stream object that produces textual
- /// representation of DNS RRs.
- void pushSource(std::istream& input);
-
- /// \brief Stop using the most recently opened input source (file or
- /// stream).
- ///
- /// If it's a file, the previously opened file will be closed internally.
- /// If it's a stream, \c MasterLexer will simply stop using
- /// the stream; the caller can assume it will be never used in
- /// \c MasterLexer thereafter.
- ///
- /// This method must not be called when there is no source pushed for
- /// \c MasterLexer. This method is otherwise exception free.
- ///
- /// \throw isc::InvalidOperation Called with no pushed source.
- void popSource();
-
- /// \brief Return the name of the current input source name.
- ///
- /// If it's a file, it will be the C string given at the corresponding
- /// \c pushSource() call, that is, its filename. If it's a stream, it will
- /// be formatted as \c "stream-%p" where \c %p is hex representation
- /// of the address of the stream object.
- ///
- /// If there is no opened source at the time of the call, this method
- /// returns an empty string.
- ///
- /// \throw std::bad_alloc Resource allocation failed for string
- /// construction (rare case)
- ///
- /// \return A string representation of the current source (see the
- /// description)
- std::string getSourceName() const;
-
- /// \brief Return the input source line number.
- ///
- /// If there is an opened source, the return value will be a non-0
- /// integer indicating the line number of the current source where
- /// the \c MasterLexer is currently working. The expected usage of
- /// this value is to print a helpful error message when parsing fails
- /// by specifically identifying the position of the error.
- ///
- /// If there is no opened source at the time of the call, this method
- /// returns 0.
- ///
- /// \throw None
- ///
- /// \return The current line number of the source (see the description)
- size_t getSourceLine() const;
-
- /// \brief Parse and return another token from the input.
- ///
- /// It reads a bit of the last opened source and produces another token
- /// found in it.
- ///
- /// This method does not provide the strong exception guarantee. Generally,
- /// if it throws, the object should not be used any more and should be
- /// discarded. It was decided all the exceptions thrown from here are
- /// serious enough that aborting the loading process is the only reasonable
- /// recovery anyway, so the strong exception guarantee is not needed.
- ///
- /// \param options The options can be used to modify the tokenization.
- /// The method can be made reporting things which are usually ignored
- /// by this parameter. Multiple options can be passed at once by
- /// bitwise or (eg. option1 | option 2). See description of available
- /// options.
- /// \return Next token found in the input. Note that the token refers to
- /// some internal data in the lexer. It is valid only until
- /// getNextToken or ungetToken is called. Also, the token becomes
- /// invalid when the lexer is destroyed.
- /// \throw isc::InvalidOperation in case the source is not available. This
- /// may mean the pushSource() has not been called yet, or that the
- /// current source has been read past the end.
- /// \throw ReadError in case there's problem reading from the underlying
- /// source (eg. I/O error in the file on the disk).
- /// \throw std::bad_alloc in case allocation of some internal resources
- /// or the token fail.
- const Token& getNextToken(Options options = NONE);
-
- /// \brief Return the last token back to the lexer.
- ///
- /// The method undoes the lasts call to getNextToken(). If you call the
- /// getNextToken() again with the same options, it'll return the same
- /// token. If the options are different, it may return a different token,
- /// but it acts as if the previous getNextToken() was never called.
- ///
- /// It is possible to return only one token back in time (you can't call
- /// ungetToken() twice in a row without calling getNextToken() in between
- /// successfully).
- ///
- /// It does not work after change of source (by pushSource or popSource).
- ///
- /// \throw isc::InvalidOperation If called second time in a row or if
- /// getNextToken() was not called since the last change of the source.
- void ungetToken();
-
-private:
- struct MasterLexerImpl;
- MasterLexerImpl* impl_;
-};
-
-/// \brief Operator to combine \c MasterLexer options
-///
-/// This is a trivial shortcut so that compound options can be specified
-/// in an intuitive way.
-inline MasterLexer::Options
-operator|(MasterLexer::Options o1, MasterLexer::Options o2) {
- return (static_cast<MasterLexer::Options>(
- static_cast<unsigned>(o1) | static_cast<unsigned>(o2)));
-}
-
/// \brief Tokens for \c MasterLexer
///
/// This is a simple value-class encapsulating a type of a lexer token and
@@ -261,7 +42,7 @@ operator|(MasterLexer::Options o1, MasterLexer::Options o2) {
/// (using the default version of copy constructor and assignment operator),
/// but it's mainly for internal implementation convenience. Applications will
/// simply refer to Token object as a reference via the \c MasterLexer class.
-class MasterLexer::Token {
+class MasterToken {
public:
/// \brief Enumeration for token types
///
@@ -293,6 +74,7 @@ public:
NO_TOKEN_PRODUCED, ///< No token was produced. This means programmer
/// error and should never get out of the lexer.
NUMBER_OUT_OF_RANGE, ///< Number was out of range
+ BAD_NUMBER, ///< Number is expected but not recognized
MAX_ERROR_CODE ///< Max integer corresponding to valid error codes.
/// (excluding this one). Mainly for internal use.
};
@@ -308,6 +90,13 @@ public:
/// the region. On the other hand, it is not ensured that the string
/// is nul-terminated. So the usual string manipulation API may not work
/// as expected.
+ ///
+ /// The `MasterLexer` implementation ensures that there are at least
+ /// len + 1 bytes of valid memory region starting from beg, and that
+ /// beg[len] is \0. This means the application can use the bytes as a
+ /// validly nul-terminated C string if there is no intermediate nul
+ /// character. Note also that due to this property beg is always non
+ /// NULL; for an empty string len will be set to 0 and beg[0] is \0.
struct StringRegion {
const char* beg; ///< The start address of the string
size_t len; ///< The length of the string in bytes
@@ -318,7 +107,7 @@ public:
/// \throw InvalidParameter A value type token is specified.
/// \param type The type of the token. It must indicate a non-value
/// type (not larger than \c NOVALUE_TYPE_MAX).
- explicit Token(Type type) : type_(type) {
+ explicit MasterToken(Type type) : type_(type) {
if (type > NOVALUE_TYPE_MAX) {
isc_throw(InvalidParameter, "Token per-type constructor "
"called with invalid type: " << type);
@@ -340,7 +129,7 @@ public:
/// \param str_beg The start address of the string
/// \param str_len The size of the string in bytes
/// \param quoted true if it's a quoted string; false otherwise.
- Token(const char* str_beg, size_t str_len, bool quoted = false) :
+ MasterToken(const char* str_beg, size_t str_len, bool quoted = false) :
type_(quoted ? QSTRING : STRING)
{
val_.str_region_.beg = str_beg;
@@ -351,7 +140,7 @@ public:
///
/// \brief number An unsigned 32-bit integer corresponding to the token
/// value.
- explicit Token(uint32_t number) : type_(NUMBER) {
+ explicit MasterToken(uint32_t number) : type_(NUMBER) {
val_.number_ = number;
}
@@ -359,7 +148,7 @@ public:
///
/// \throw InvalidParameter Invalid error code value is specified.
/// \brief error_code A pre-defined constant of \c ErrorCode.
- explicit Token(ErrorCode error_code) : type_(ERROR) {
+ explicit MasterToken(ErrorCode error_code) : type_(ERROR) {
if (!(error_code < MAX_ERROR_CODE)) {
isc_throw(InvalidParameter, "Invalid master lexer error code: "
<< error_code);
@@ -476,6 +265,310 @@ private:
} val_;
};
+/// \brief Tokenizer for parsing DNS master files.
+///
+/// The \c MasterLexer class provides tokenize interfaces for parsing DNS
+/// master files. It understands some special rules of master files as
+/// defined in RFC 1035, such as comments, character escaping, and multi-line
+/// data, and provides the user application with the actual data in a
+/// more convenient form such as a std::string object.
+///
+/// In order to support the $INCLUDE notation, this class is designed to be
+/// able to operate on multiple files or input streams in the nested way.
+/// The \c pushSource() and \c popSource() methods correspond to the push
+/// and pop operations.
+///
+/// While this class is public, it is less likely to be used by normal
+/// applications; it's mainly expected to be used within this library,
+/// specifically by the \c MasterLoader class and \c Rdata implementation
+/// classes.
+///
+/// \note The error handling policy of this class is slightly different from
+/// that of other classes of this library. We generally throw an exception
+/// for an invalid input, whether it's more likely to be a program error or
+/// a "user error", which means an invalid input that comes from outside of
+/// the library. But, this class returns an error code for some certain
+/// types of user errors instead of throwing an exception. Such cases include
+/// a syntax error identified by the lexer or a misspelled file name that
+/// causes a system error at the time of open. This is based on the assumption
+/// that the main user of this class is a parser of master files, where
+/// we want to give an option to ignore some non fatal errors and continue
+/// the parsing. This will be useful if it just performs overall error
+/// checks on a master file. When the (immediate) caller needs to do explicit
+/// error handling, exceptions are not that a useful tool for error reporting
+/// because we cannot separate the normal and error cases anyway, which would
+/// be one major advantage when we use exceptions. And, exceptions are
+/// generally more expensive, either when it happens or just by being able
+/// to handle with \c try and \c catch (depending on the underlying
+/// implementation of the exception handling). For these reasons, some of
+/// this class does not throw for an error that would be reported as an
+/// exception in other classes.
+class MasterLexer {
+ friend class master_lexer_internal::State;
+public:
+ /// \brief Exception thrown when we fail to read from the input
+ /// stream or file.
+ class ReadError : public Unexpected {
+ public:
+ ReadError(const char* file, size_t line, const char* what) :
+ Unexpected(file, line, what)
+ {}
+ };
+
+ /// \brief Exception thrown from a wrapper version of
+ /// \c MasterLexer::getNextToken() for non fatal errors.
+ ///
+ /// See the method description for more details.
+ ///
+ /// The \c token_ member variable (read-only) is set to a \c MasterToken
+ /// object of type ERROR indicating the reason for the error.
+ class LexerError : public Exception {
+ public:
+ LexerError(const char* file, size_t line, MasterToken error_token) :
+ Exception(file, line, error_token.getErrorText().c_str()),
+ token_(error_token)
+ {}
+ const MasterToken token_;
+ };
+
+ /// \brief Options for getNextToken.
+ ///
+ /// A compound option, indicating multiple options are set, can be
+ /// specified using the logical OR operator (operator|()).
+ enum Options {
+ NONE = 0, ///< No option
+ INITIAL_WS = 1, ///< recognize begin-of-line spaces after an
+ ///< end-of-line
+ QSTRING = 2, ///< recognize quoted string
+ NUMBER = 4 ///< recognize numeric text as integer
+ };
+
+ /// \brief The constructor.
+ ///
+ /// \throw std::bad_alloc Internal resource allocation fails (rare case).
+ MasterLexer();
+
+ /// \brief The destructor.
+ ///
+ /// It internally closes any remaining input sources.
+ ~MasterLexer();
+
+ /// \brief Open a file and make it the current input source of MasterLexer.
+ ///
+ /// The opened file can be explicitly closed by the \c popSource() method;
+ /// if \c popSource() is not called within the lifetime of the
+ /// \c MasterLexer, it will be closed in the destructor.
+ ///
+ /// In the case possible system errors in opening the file (most likely
+ /// because of specifying a non-existent or unreadable file), it returns
+ /// false, and if the optional \c error parameter is non NULL, it will be
+ /// set to a description of the error (any existing content of the string
+ /// will be discarded). If opening the file succeeds, the given
+ /// \c error parameter will be intact.
+ ///
+ /// Note that this method has two styles of error reporting: one by
+ /// returning \c false (and setting \c error optionally) and the other
+ /// by throwing an exception. See the note for the class description
+ /// about the distinction.
+ ///
+ /// \throw InvalidParameter filename is NULL
+ /// \param filename A non NULL string specifying a master file
+ /// \param error If non null, a placeholder to set error description in
+ /// case of failure.
+ ///
+ /// \return true if pushing the file succeeds; false otherwise.
+ bool pushSource(const char* filename, std::string* error = NULL);
+
+ /// \brief Make the given stream the current input source of MasterLexer.
+ ///
+ /// The caller still holds the ownership of the passed stream; it's the
+ /// caller's responsibility to keep it valid as long as it's used in
+ /// \c MasterLexer or to release any resource for the stream after that.
+ /// The caller can explicitly tell \c MasterLexer to stop using the
+ /// stream by calling the \c popSource() method.
+ ///
+ /// \param input An input stream object that produces textual
+ /// representation of DNS RRs.
+ void pushSource(std::istream& input);
+
+ /// \brief Stop using the most recently opened input source (file or
+ /// stream).
+ ///
+ /// If it's a file, the previously opened file will be closed internally.
+ /// If it's a stream, \c MasterLexer will simply stop using
+ /// the stream; the caller can assume it will be never used in
+ /// \c MasterLexer thereafter.
+ ///
+ /// This method must not be called when there is no source pushed for
+ /// \c MasterLexer. This method is otherwise exception free.
+ ///
+ /// \throw isc::InvalidOperation Called with no pushed source.
+ void popSource();
+
+ /// \brief Return the name of the current input source name.
+ ///
+ /// If it's a file, it will be the C string given at the corresponding
+ /// \c pushSource() call, that is, its filename. If it's a stream, it will
+ /// be formatted as \c "stream-%p" where \c %p is hex representation
+ /// of the address of the stream object.
+ ///
+ /// If there is no opened source at the time of the call, this method
+ /// returns an empty string.
+ ///
+ /// \throw std::bad_alloc Resource allocation failed for string
+ /// construction (rare case)
+ ///
+ /// \return A string representation of the current source (see the
+ /// description)
+ std::string getSourceName() const;
+
+ /// \brief Return the input source line number.
+ ///
+ /// If there is an opened source, the return value will be a non-0
+ /// integer indicating the line number of the current source where
+ /// the \c MasterLexer is currently working. The expected usage of
+ /// this value is to print a helpful error message when parsing fails
+ /// by specifically identifying the position of the error.
+ ///
+ /// If there is no opened source at the time of the call, this method
+ /// returns 0.
+ ///
+ /// \throw None
+ ///
+ /// \return The current line number of the source (see the description)
+ size_t getSourceLine() const;
+
+ /// \brief Parse and return another token from the input.
+ ///
+ /// It reads a bit of the last opened source and produces another token
+ /// found in it.
+ ///
+ /// This method does not provide the strong exception guarantee. Generally,
+ /// if it throws, the object should not be used any more and should be
+ /// discarded. It was decided all the exceptions thrown from here are
+ /// serious enough that aborting the loading process is the only reasonable
+ /// recovery anyway, so the strong exception guarantee is not needed.
+ ///
+ /// \param options The options can be used to modify the tokenization.
+ /// The method can be made reporting things which are usually ignored
+ /// by this parameter. Multiple options can be passed at once by
+ /// bitwise or (eg. option1 | option 2). See description of available
+ /// options.
+ /// \return Next token found in the input. Note that the token refers to
+ /// some internal data in the lexer. It is valid only until
+ /// getNextToken or ungetToken is called. Also, the token becomes
+ /// invalid when the lexer is destroyed.
+ /// \throw isc::InvalidOperation in case the source is not available. This
+ /// may mean the pushSource() has not been called yet, or that the
+ /// current source has been read past the end.
+ /// \throw ReadError in case there's problem reading from the underlying
+ /// source (eg. I/O error in the file on the disk).
+ /// \throw std::bad_alloc in case allocation of some internal resources
+ /// or the token fail.
+ const MasterToken& getNextToken(Options options = NONE);
+
+ /// \brief Parse the input for the expected type of token.
+ ///
+ /// This method is a wrapper of the other version, customized for the case
+ /// where a particular type of token is expected as the next one.
+ /// More specifically, it's intended to be used to get tokens for RDATA
+ /// fields. Since most RDATA types of fixed format, the token type is
+ /// often predictable and the method interface can be simplified.
+ ///
+ /// This method basically works as follows: it gets the type of the
+ /// expected token, calls the other version of \c getNextToken(Options),
+ /// and returns the token if it's of the expected type (due to the usage
+ /// assumption this should be normally the case). There are some non
+ /// trivial details though:
+ ///
+ /// - If the expected type is MasterToken::QSTRING, both quoted and
+ /// unquoted strings are recognized and returned.
+ /// - If the optional \c eol_ok parameter is \c true (very rare case),
+ /// MasterToken::END_OF_LINE and MasterToken::END_OF_FILE are recognized
+ /// and returned if they are found instead of the expected type of
+ /// token.
+ /// - If the next token is not of the expected type (including the case
+ /// a number is expected but it's out of range), ungetToken() is
+ /// internally called so the caller can re-read that token.
+ /// - If other types or errors (such as unbalanced parentheses) are
+ /// detected, the erroneous part isn't "ungotten"; the caller can
+ /// continue parsing after that part.
+ ///
+ /// In some very rare cases where the RDATA has an optional trailing field,
+ /// the \c eol_ok parameter would be set to \c true. This way the caller
+ /// can handle both cases (the field does or does not exist) by a single
+ /// call to this method. In all other cases \c eol_ok should be set to
+ /// \c false, and that is the default and can be omitted.
+ ///
+ /// Unlike the other version of \c getNextToken(Options), this method
+ /// throws an exception of type \c LexerError for non fatal errors such as
+ /// broken syntax or encountering an unexpected type of token. This way
+ /// the caller can write RDATA parser code without bothering to handle
+ /// errors for each field. For example, pseudo parser code for MX RDATA
+ /// would look like this:
+ /// \code
+ /// const uint32_t pref =
+ /// lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ /// // check if pref is the uint16_t range; no other check is needed.
+ /// const Name mx(lexer.getNextToken(MasterToken::STRING).getString());
+ /// \endcode
+ ///
+ /// In the case where \c LexerError exception is thrown, it's expected
+ /// to be handled comprehensively for the parser of the RDATA or at a
+ /// higher layer. The \c token_ member variable of the corresponding
+ /// \c LexerError exception object stores a token of type
+ /// \c MasterToken::ERROR that indicates the reason for the error.
+ ///
+ /// Due to the specific intended usage of this method, only a subset
+ /// of \c MasterToken::Type values are acceptable for the \c expect
+ /// parameter: \c MasterToken::STRING, \c MasterToken::QSTRING, and
+ /// \c MasterToken::NUMBER. Specifying other values will result in
+ /// an \c InvalidParameter exception.
+ ///
+ /// \throw InvalidParameter The expected token type is not allowed for
+ /// this method.
+ /// \throw LexerError The lexer finds non fatal error or it finds an
+ /// \throw other Anything the other version of getNextToken() can throw.
+ ///
+ /// \param expect Expected type of token. Must be either STRING, QSTRING,
+ /// or NUMBER.
+ /// \param eol_ok \c true iff END_OF_LINE or END_OF_FILE is acceptable.
+ /// \return The expected type of token.
+ const MasterToken& getNextToken(MasterToken::Type expect,
+ bool eol_ok = false);
+
+ /// \brief Return the last token back to the lexer.
+ ///
+ /// The method undoes the lasts call to getNextToken(). If you call the
+ /// getNextToken() again with the same options, it'll return the same
+ /// token. If the options are different, it may return a different token,
+ /// but it acts as if the previous getNextToken() was never called.
+ ///
+ /// It is possible to return only one token back in time (you can't call
+ /// ungetToken() twice in a row without calling getNextToken() in between
+ /// successfully).
+ ///
+ /// It does not work after change of source (by pushSource or popSource).
+ ///
+ /// \throw isc::InvalidOperation If called second time in a row or if
+ /// getNextToken() was not called since the last change of the source.
+ void ungetToken();
+
+private:
+ struct MasterLexerImpl;
+ MasterLexerImpl* impl_;
+};
+
+/// \brief Operator to combine \c MasterLexer options
+///
+/// This is a trivial shortcut so that compound options can be specified
+/// in an intuitive way.
+inline MasterLexer::Options
+operator|(MasterLexer::Options o1, MasterLexer::Options o2) {
+ return (static_cast<MasterLexer::Options>(
+ static_cast<unsigned>(o1) | static_cast<unsigned>(o2)));
+}
+
} // namespace dns
} // namespace isc
#endif // MASTER_LEXER_H
diff --git a/src/lib/dns/master_lexer_state.h b/src/lib/dns/master_lexer_state.h
index 2a64c9d..f296c1c 100644
--- a/src/lib/dns/master_lexer_state.h
+++ b/src/lib/dns/master_lexer_state.h
@@ -43,10 +43,10 @@ namespace master_lexer_internal {
/// state, so it makes more sense to separate the interface for the transition
/// from the initial state.
///
-/// When an object of a specific state class completes the session, it
-/// normally sets the identified token in the lexer, and returns NULL;
-/// if more transition is necessary, it returns a pointer to the next state
-/// object.
+/// If the whole lexer transition is completed within start(), it sets the
+/// identified token and returns NULL; otherwise it returns a pointer to
+/// an object of a specific state class that completes the session
+/// on the call of handle().
///
/// As is usual in the state design pattern, the \c State class is made
/// a friend class of \c MasterLexer and can refer to its internal details.
@@ -119,7 +119,7 @@ public:
/// purposes.
///@{
bool wasLastEOL(const MasterLexer& lexer) const;
- const MasterLexer::Token& getToken(const MasterLexer& lexer) const;
+ const MasterToken& getToken(const MasterLexer& lexer) const;
size_t getParenCount(const MasterLexer& lexer) const;
///@}
diff --git a/src/lib/dns/rdata.cc b/src/lib/dns/rdata.cc
index f8deec6..081f855 100644
--- a/src/lib/dns/rdata.cc
+++ b/src/lib/dns/rdata.cc
@@ -12,6 +12,20 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <exceptions/exceptions.h>
+
+#include <util/buffer.h>
+
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/master_lexer.h>
+#include <dns/rdata.h>
+#include <dns/rrparamregistry.h>
+#include <dns/rrtype.h>
+
+#include <boost/lexical_cast.hpp>
+#include <boost/shared_ptr.hpp>
+
#include <algorithm>
#include <cctype>
#include <string>
@@ -24,16 +38,6 @@
#include <stdint.h>
#include <string.h>
-#include <boost/lexical_cast.hpp>
-#include <boost/shared_ptr.hpp>
-
-#include <util/buffer.h>
-#include <dns/name.h>
-#include <dns/messagerenderer.h>
-#include <dns/rdata.h>
-#include <dns/rrparamregistry.h>
-#include <dns/rrtype.h>
-
using namespace std;
using boost::lexical_cast;
using namespace isc::util;
@@ -81,23 +85,92 @@ createRdata(const RRType& rrtype, const RRClass& rrclass, const Rdata& source)
source));
}
+namespace {
+void
+fromtextError(bool& error_issued, const MasterLexer& lexer,
+ MasterLoaderCallbacks& callbacks,
+ const MasterToken* token, const char* reason)
+{
+ // Don't be too noisy if there are many issues for single RDATA
+ if (error_issued) {
+ return;
+ }
+ error_issued = true;
+
+ if (token == NULL) {
+ callbacks.error(lexer.getSourceName(), lexer.getSourceLine(),
+ "createRdata from text failed: " + string(reason));
+ return;
+ }
+
+ switch (token->getType()) {
+ case MasterToken::STRING:
+ case MasterToken::QSTRING:
+ callbacks.error(lexer.getSourceName(), lexer.getSourceLine(),
+ "createRdata from text failed near '" +
+ token->getString() + "': " + string(reason));
+ break;
+ case MasterToken::ERROR:
+ callbacks.error(lexer.getSourceName(), lexer.getSourceLine(),
+ "createRdata from text failed: " +
+ token->getErrorText());
+ break;
+ default:
+ // This case shouldn't happen based on how we use MasterLexer in
+ // createRdata(), so we could assert() that here. But since it
+ // depends on detailed behavior of other classes, we treat the case
+ // in a bit less harsh way.
+ isc_throw(Unexpected, "bug: createRdata() saw unexpected token type");
+ }
+}
+}
+
RdataPtr
createRdata(const RRType& rrtype, const RRClass& rrclass,
MasterLexer& lexer, const Name* origin,
MasterLoader::Options options,
MasterLoaderCallbacks& callbacks)
{
- RdataPtr ret;
+ RdataPtr rdata;
+ bool error_issued = false;
try {
- ret = RRParamRegistry::getRegistry().createRdata(rrtype, rrclass,
- lexer, origin,
- options, callbacks);
- } catch (...) {
- // ret is NULL here.
+ rdata = RRParamRegistry::getRegistry().createRdata(
+ rrtype, rrclass, lexer, origin, options, callbacks);
+ } catch (const MasterLexer::LexerError& error) {
+ fromtextError(error_issued, lexer, callbacks, &error.token_, "");
+ } catch (const Exception& ex) {
+ // Catching all isc::Exception is too broad, but right now we don't
+ // have better granularity. When we complete #2518 we can make this
+ // finer.
+ fromtextError(error_issued, lexer, callbacks, NULL, ex.what());
}
+ // Other exceptions mean a serious implementation bug or fatal system
+ // error; it doesn't make sense to catch and try to recover from them
+ // here. Just propagate.
+
+ // Consume to end of line / file.
+ // Call callback via fromtextError once if there was an error.
+ do {
+ const MasterToken& token = lexer.getNextToken();
+ switch (token.getType()) {
+ case MasterToken::END_OF_LINE:
+ return (rdata);
+ case MasterToken::END_OF_FILE:
+ callbacks.warning(lexer.getSourceName(), lexer.getSourceLine(),
+ "file does not end with newline");
+ return (rdata);
+ default:
+ rdata.reset(); // we'll return NULL
+ fromtextError(error_issued, lexer, callbacks, &token,
+ "extra input text");
+ // Continue until we see EOL or EOF
+ }
+ } while (true);
- return (ret);
+ // We shouldn't reach here
+ assert(false);
+ return (RdataPtr()); // add explicit return to silence some compilers
}
int
@@ -211,9 +284,10 @@ Generic::Generic(MasterLexer& lexer, const Name*,
std::string s;
while (true) {
- const MasterLexer::Token& token = lexer.getNextToken();
- if ((token.getType() == MasterLexer::Token::END_OF_FILE) ||
- (token.getType() == MasterLexer::Token::END_OF_LINE)) {
+ const MasterToken& token = lexer.getNextToken();
+ if ((token.getType() == MasterToken::END_OF_FILE) ||
+ (token.getType() == MasterToken::END_OF_LINE)) {
+ lexer.ungetToken(); // let the upper layer handle the end-of token
break;
}
diff --git a/src/lib/dns/rdata.h b/src/lib/dns/rdata.h
index e7811c9..4cd63cc 100644
--- a/src/lib/dns/rdata.h
+++ b/src/lib/dns/rdata.h
@@ -485,8 +485,47 @@ RdataPtr createRdata(const RRType& rrtype, const RRClass& rrclass,
RdataPtr createRdata(const RRType& rrtype, const RRClass& rrclass,
const Rdata& source);
-/// \brief Create RDATA of a given pair of RR type and class from the
+/// \brief Create RDATA of a given pair of RR type and class using the
/// master lexer.
+///
+/// This is a more generic form of factory from textual RDATA, and is mainly
+/// intended to be used internally by the master file parser (\c MasterLoader)
+/// of this library.
+///
+/// The \c lexer is expected to be at the beginning of textual RDATA of the
+/// specified type and class. This function (and its underlying Rdata
+/// implementations) extracts necessary tokens from the lexer and constructs
+/// the RDATA from them.
+///
+/// Due to the intended usage of this version, this function handles error
+/// cases quite differently from other versions. It internally catches
+/// most of syntax and semantics errors of the input (reported as exceptions),
+/// calls the corresponding callback specified by the \c callbacks parameters,
+/// and returns a NULL smart pointer. If the caller rather wants to get
+/// an exception in these cases, it can pass a callback that internally
+/// throws on error. Some critical exceptions such as \c std::bad_alloc are
+/// still propagated to the upper layer as it doesn't make sense to try
+/// recovery from such a situation within this function.
+///
+/// Whether or not the creation succeeds, this function updates the lexer
+/// until it reaches either the end of line or file, starting from the end of
+/// the RDATA text (or the point of failure if the parsing fails in the
+/// middle of it). The caller can therefore assume it's ready for reading
+/// the next data (which is normally a subsequent RR in the zone file) on
+/// return, whether or not this function succeeds.
+///
+/// \param rrtype An \c RRType object specifying the type/class pair.
+/// \param rrclass An \c RRClass object specifying the type/class pair.
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of any domain name fields
+/// of the RDATA that are non absolute.
+/// \param options Master loader options controlling how to deal with errors
+/// or non critical issues in the parsed RDATA.
+/// \param callbacks Callback to be called when an error or non critical issue
+/// is found.
+/// \return An \c RdataPtr object pointing to the created
+/// \c Rdata object. Will be NULL if parsing fails.
RdataPtr createRdata(const RRType& rrtype, const RRClass& rrclass,
MasterLexer& lexer, const Name* origin,
MasterLoader::Options options,
diff --git a/src/lib/dns/rdata/in_1/aaaa_28.cc b/src/lib/dns/rdata/in_1/aaaa_28.cc
index ce49a04..0466f1a 100644
--- a/src/lib/dns/rdata/in_1/aaaa_28.cc
+++ b/src/lib/dns/rdata/in_1/aaaa_28.cc
@@ -12,6 +12,15 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <exceptions/exceptions.h>
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/master_lexer.h>
+#include <dns/master_loader.h>
+
#include <stdint.h>
#include <string.h>
@@ -20,14 +29,6 @@
#include <arpa/inet.h> // XXX: for inet_pton/ntop(), not exist in C++ standards
#include <sys/socket.h> // for AF_INET/AF_INET6
-#include <exceptions/exceptions.h>
-
-#include <util/buffer.h>
-#include <dns/exceptions.h>
-#include <dns/messagerenderer.h>
-#include <dns/rdata.h>
-#include <dns/rdataclass.h>
-
using namespace std;
using namespace isc::util;
@@ -42,6 +43,16 @@ AAAA::AAAA(const std::string& addrstr) {
}
}
+AAAA::AAAA(MasterLexer& lexer, const Name*,
+ MasterLoader::Options, MasterLoaderCallbacks&)
+{
+ const MasterToken& token = lexer.getNextToken(MasterToken::STRING);
+ if (inet_pton(AF_INET6, token.getStringRegion().beg, &addr_) != 1) {
+ isc_throw(InvalidRdataText, "Failed to convert '"
+ << token.getString() << "' to IN/AAAA RDATA");
+ }
+}
+
AAAA::AAAA(InputBuffer& buffer, size_t rdata_len) {
if (rdata_len != sizeof(addr_)) {
isc_throw(DNSMessageFORMERR,
diff --git a/src/lib/dns/rdata/template.cc b/src/lib/dns/rdata/template.cc
index ee1097e..6486e6a 100644
--- a/src/lib/dns/rdata/template.cc
+++ b/src/lib/dns/rdata/template.cc
@@ -34,6 +34,11 @@ using namespace isc::util;
// If you added member functions specific to this derived class, you'll need
// to implement them here, of course.
+MyType::MyType(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options options, MasterLoaderCallbacks& callbacks)
+{
+}
+
MyType::MyType(const string& type_str) {
}
diff --git a/src/lib/dns/rrparamregistry-placeholder.cc b/src/lib/dns/rrparamregistry-placeholder.cc
index ed59f5d..16ec23c 100644
--- a/src/lib/dns/rrparamregistry-placeholder.cc
+++ b/src/lib/dns/rrparamregistry-placeholder.cc
@@ -51,9 +51,10 @@ AbstractRdataFactory::create(MasterLexer& lexer, const Name*,
std::string s;
while (true) {
- const MasterLexer::Token& token = lexer.getNextToken();
- if ((token.getType() == MasterLexer::Token::END_OF_FILE) ||
- (token.getType() == MasterLexer::Token::END_OF_LINE)) {
+ const MasterToken& token = lexer.getNextToken();
+ if ((token.getType() == MasterToken::END_OF_FILE) ||
+ (token.getType() == MasterToken::END_OF_LINE)) {
+ lexer.ungetToken(); // let the upper layer handle the end-of token
break;
}
diff --git a/src/lib/dns/rrparamregistry.h b/src/lib/dns/rrparamregistry.h
index e156dc9..56ae981 100644
--- a/src/lib/dns/rrparamregistry.h
+++ b/src/lib/dns/rrparamregistry.h
@@ -119,10 +119,22 @@ public:
/// \return An \c RdataPtr object pointing to the created \c Rdata object.
virtual RdataPtr create(const rdata::Rdata& source) const = 0;
- /// \brief Create RDATA from MasterLexer
- virtual RdataPtr create(MasterLexer& lexer, const Name*,
- MasterLoader::Options,
- MasterLoaderCallbacks&) const;
+ /// \brief Create RDATA using MasterLexer.
+ ///
+ /// This version of the method defines the entry point of factory
+ /// of a specific RR type and class for \c RRParamRegistry::createRdata()
+ /// that uses \c MasterLexer. See its description for the expected
+ /// behavior and meaning of the parameters.
+ ///
+ /// \note Right now this is not defined as a pure virtual method and
+ /// provides the default implementation. This is an intermediate
+ /// workaround until we implement the underlying constructor for all
+ /// supported \c Rdata classes; once it's completed the workaround
+ /// default implementation should be removed and this method should become
+ /// pure virtual.
+ virtual RdataPtr create(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options options,
+ MasterLoaderCallbacks& callbacks) const;
//@}
};
@@ -504,9 +516,20 @@ public:
rdata::RdataPtr createRdata(const RRType& rrtype, const RRClass& rrclass,
const rdata::Rdata& source);
- /// \brief Create RDATA from MasterLexer
+ /// \brief Create RDATA using MasterLexer
+ ///
+ /// This method is expected to be used as the underlying implementation
+ /// of the same signature of \c rdata::createRdata(). One main
+ /// difference is that this method is only responsible for constructing
+ /// the Rdata; it doesn't update the lexer to reach the end of line or
+ /// file or doesn't care about whether there's an extra (garbage) token
+ /// after the textual RDATA representation. Another difference is that
+ /// this method can throw on error and never returns a NULL pointer.
+ ///
+ /// For other details and parameters, see the description of
+ /// \c rdata::createRdata().
rdata::RdataPtr createRdata(const RRType& rrtype, const RRClass& rrclass,
- MasterLexer& lexer, const Name* name,
+ MasterLexer& lexer, const Name* origin,
MasterLoader::Options options,
MasterLoaderCallbacks& callbacks);
//@}
diff --git a/src/lib/dns/tests/master_lexer_state_unittest.cc b/src/lib/dns/tests/master_lexer_state_unittest.cc
index d8a6b66..846c4c2 100644
--- a/src/lib/dns/tests/master_lexer_state_unittest.cc
+++ b/src/lib/dns/tests/master_lexer_state_unittest.cc
@@ -24,7 +24,7 @@ using namespace isc::dns;
using namespace master_lexer_internal;
namespace {
-typedef MasterLexer::Token Token; // shortcut
+typedef MasterToken Token; // shortcut
class MasterLexerStateTest : public ::testing::Test {
protected:
@@ -260,7 +260,7 @@ TEST_F(MasterLexerStateTest, crlf) {
// Commonly used check for string related test cases, checking if the given
// token has expected values.
void
-stringTokenCheck(const std::string& expected, const MasterLexer::Token& token,
+stringTokenCheck(const std::string& expected, const MasterToken& token,
bool quoted = false)
{
EXPECT_EQ(quoted ? Token::QSTRING : Token::STRING, token.getType());
@@ -269,6 +269,10 @@ stringTokenCheck(const std::string& expected, const MasterLexer::Token& token,
token.getStringRegion().beg +
token.getStringRegion().len);
EXPECT_EQ(expected, actual);
+
+ // There should be "hidden" nul-terminator after the string data.
+ ASSERT_NE(static_cast<const char*>(NULL), token.getStringRegion().beg);
+ EXPECT_EQ(0, *(token.getStringRegion().beg + token.getStringRegion().len));
}
TEST_F(MasterLexerStateTest, string) {
@@ -365,6 +369,7 @@ TEST_F(MasterLexerStateTest, stringEscape) {
TEST_F(MasterLexerStateTest, quotedString) {
ss << "\"ignore-quotes\"\n";
ss << "\"quoted string\" "; // space is part of the qstring
+ ss << "\"\" "; // empty quoted string
// also check other separator characters. note that \r doesn't cause
// UNBALANCED_QUOTES. Not sure if it's intentional, but that's how the
// BIND 9 version works, so we follow it (it should be too minor to matter
@@ -391,6 +396,11 @@ TEST_F(MasterLexerStateTest, quotedString) {
s_qstring.handle(lexer);
stringTokenCheck("quoted string", s_string.getToken(lexer), true);
+ // Empty string is okay as qstring
+ EXPECT_EQ(&s_qstring, State::start(lexer, options));
+ s_qstring.handle(lexer);
+ stringTokenCheck("", s_string.getToken(lexer), true);
+
// Also checks other separator characters within a qstring
EXPECT_EQ(&s_qstring, State::start(lexer, options));
s_qstring.handle(lexer);
diff --git a/src/lib/dns/tests/master_lexer_token_unittest.cc b/src/lib/dns/tests/master_lexer_token_unittest.cc
index 1f022df..89a4f9c 100644
--- a/src/lib/dns/tests/master_lexer_token_unittest.cc
+++ b/src/lib/dns/tests/master_lexer_token_unittest.cc
@@ -31,27 +31,27 @@ const size_t TEST_STRING_LEN = sizeof(TEST_STRING) - 1;
class MasterLexerTokenTest : public ::testing::Test {
protected:
MasterLexerTokenTest() :
- token_eof(MasterLexer::Token::END_OF_FILE),
+ token_eof(MasterToken::END_OF_FILE),
token_str(TEST_STRING, TEST_STRING_LEN),
token_num(42),
- token_err(MasterLexer::Token::UNEXPECTED_END)
+ token_err(MasterToken::UNEXPECTED_END)
{}
- const MasterLexer::Token token_eof; // an example of non-value type token
- const MasterLexer::Token token_str;
- const MasterLexer::Token token_num;
- const MasterLexer::Token token_err;
+ const MasterToken token_eof; // an example of non-value type token
+ const MasterToken token_str;
+ const MasterToken token_num;
+ const MasterToken token_err;
};
TEST_F(MasterLexerTokenTest, strings) {
// basic construction and getter checks
- EXPECT_EQ(MasterLexer::Token::STRING, token_str.getType());
+ EXPECT_EQ(MasterToken::STRING, token_str.getType());
EXPECT_EQ(std::string("string token"), token_str.getString());
std::string strval = "dummy"; // this should be replaced
token_str.getString(strval);
EXPECT_EQ(std::string("string token"), strval);
- const MasterLexer::Token::StringRegion str_region =
+ const MasterToken::StringRegion str_region =
token_str.getStringRegion();
EXPECT_EQ(TEST_STRING, str_region.beg);
EXPECT_EQ(TEST_STRING_LEN, str_region.len);
@@ -62,17 +62,17 @@ TEST_F(MasterLexerTokenTest, strings) {
std::string expected_str("string token");
expected_str.push_back('\0');
EXPECT_EQ(expected_str,
- MasterLexer::Token(TEST_STRING, TEST_STRING_LEN + 1).getString());
- MasterLexer::Token(TEST_STRING, TEST_STRING_LEN + 1).getString(strval);
+ MasterToken(TEST_STRING, TEST_STRING_LEN + 1).getString());
+ MasterToken(TEST_STRING, TEST_STRING_LEN + 1).getString(strval);
EXPECT_EQ(expected_str, strval);
// Construct type of qstring
- EXPECT_EQ(MasterLexer::Token::QSTRING,
- MasterLexer::Token(TEST_STRING, sizeof(TEST_STRING), true).
+ EXPECT_EQ(MasterToken::QSTRING,
+ MasterToken(TEST_STRING, sizeof(TEST_STRING), true).
getType());
// if we explicitly set 'quoted' to false, it should be normal string
- EXPECT_EQ(MasterLexer::Token::STRING,
- MasterLexer::Token(TEST_STRING, sizeof(TEST_STRING), false).
+ EXPECT_EQ(MasterToken::STRING,
+ MasterToken(TEST_STRING, sizeof(TEST_STRING), false).
getType());
// getString/StringRegion() aren't allowed for non string(-variant) types
@@ -86,23 +86,23 @@ TEST_F(MasterLexerTokenTest, strings) {
TEST_F(MasterLexerTokenTest, numbers) {
EXPECT_EQ(42, token_num.getNumber());
- EXPECT_EQ(MasterLexer::Token::NUMBER, token_num.getType());
+ EXPECT_EQ(MasterToken::NUMBER, token_num.getType());
// It's copyable and assignable.
- MasterLexer::Token token(token_num);
+ MasterToken token(token_num);
EXPECT_EQ(42, token.getNumber());
- EXPECT_EQ(MasterLexer::Token::NUMBER, token.getType());
+ EXPECT_EQ(MasterToken::NUMBER, token.getType());
token = token_num;
EXPECT_EQ(42, token.getNumber());
- EXPECT_EQ(MasterLexer::Token::NUMBER, token.getType());
+ EXPECT_EQ(MasterToken::NUMBER, token.getType());
// it's okay to replace it with a different type of token
token = token_eof;
- EXPECT_EQ(MasterLexer::Token::END_OF_FILE, token.getType());
+ EXPECT_EQ(MasterToken::END_OF_FILE, token.getType());
// Possible max value
- token = MasterLexer::Token(0xffffffff);
+ token = MasterToken(0xffffffff);
EXPECT_EQ(4294967295u, token.getNumber());
// getNumber() isn't allowed for non number types
@@ -112,58 +112,52 @@ TEST_F(MasterLexerTokenTest, numbers) {
TEST_F(MasterLexerTokenTest, novalues) {
// Just checking we can construct them and getType() returns correct value.
- EXPECT_EQ(MasterLexer::Token::END_OF_FILE, token_eof.getType());
- EXPECT_EQ(MasterLexer::Token::END_OF_LINE,
- MasterLexer::Token(MasterLexer::Token::END_OF_LINE).getType());
- EXPECT_EQ(MasterLexer::Token::INITIAL_WS,
- MasterLexer::Token(MasterLexer::Token::INITIAL_WS).getType());
+ EXPECT_EQ(MasterToken::END_OF_FILE, token_eof.getType());
+ EXPECT_EQ(MasterToken::END_OF_LINE,
+ MasterToken(MasterToken::END_OF_LINE).getType());
+ EXPECT_EQ(MasterToken::INITIAL_WS,
+ MasterToken(MasterToken::INITIAL_WS).getType());
// Special types of tokens cannot have value-based types
- EXPECT_THROW(MasterLexer::Token t(MasterLexer::Token::STRING),
- isc::InvalidParameter);
- EXPECT_THROW(MasterLexer::Token t(MasterLexer::Token::QSTRING),
- isc::InvalidParameter);
- EXPECT_THROW(MasterLexer::Token t(MasterLexer::Token::NUMBER),
- isc::InvalidParameter);
- EXPECT_THROW(MasterLexer::Token t(MasterLexer::Token::ERROR),
- isc::InvalidParameter);
+ EXPECT_THROW(MasterToken t(MasterToken::STRING), isc::InvalidParameter);
+ EXPECT_THROW(MasterToken t(MasterToken::QSTRING), isc::InvalidParameter);
+ EXPECT_THROW(MasterToken t(MasterToken::NUMBER), isc::InvalidParameter);
+ EXPECT_THROW(MasterToken t(MasterToken::ERROR), isc::InvalidParameter);
}
TEST_F(MasterLexerTokenTest, errors) {
- EXPECT_EQ(MasterLexer::Token::ERROR, token_err.getType());
- EXPECT_EQ(MasterLexer::Token::UNEXPECTED_END, token_err.getErrorCode());
+ EXPECT_EQ(MasterToken::ERROR, token_err.getType());
+ EXPECT_EQ(MasterToken::UNEXPECTED_END, token_err.getErrorCode());
EXPECT_EQ("unexpected end of input", token_err.getErrorText());
- EXPECT_EQ("lexer not started",
- MasterLexer::Token(MasterLexer::Token::NOT_STARTED).
+ EXPECT_EQ("lexer not started", MasterToken(MasterToken::NOT_STARTED).
getErrorText());
EXPECT_EQ("unbalanced parentheses",
- MasterLexer::Token(MasterLexer::Token::UNBALANCED_PAREN).
+ MasterToken(MasterToken::UNBALANCED_PAREN).
getErrorText());
- EXPECT_EQ("unbalanced quotes",
- MasterLexer::Token(MasterLexer::Token::UNBALANCED_QUOTES).
+ EXPECT_EQ("unbalanced quotes", MasterToken(MasterToken::UNBALANCED_QUOTES).
getErrorText());
- EXPECT_EQ("no token produced",
- MasterLexer::Token(MasterLexer::Token::NO_TOKEN_PRODUCED).
+ EXPECT_EQ("no token produced", MasterToken(MasterToken::NO_TOKEN_PRODUCED).
getErrorText());
EXPECT_EQ("number out of range",
- MasterLexer::Token(MasterLexer::Token::NUMBER_OUT_OF_RANGE).
+ MasterToken(MasterToken::NUMBER_OUT_OF_RANGE).
getErrorText());
+ EXPECT_EQ("not a valid number",
+ MasterToken(MasterToken::BAD_NUMBER).getErrorText());
// getErrorCode/Text() isn't allowed for non number types
EXPECT_THROW(token_num.getErrorCode(), isc::InvalidOperation);
EXPECT_THROW(token_num.getErrorText(), isc::InvalidOperation);
- // Only the pre-defined error code is accepted. Hardcoding '6' (max code
+ // Only the pre-defined error code is accepted. Hardcoding '7' (max code
// + 1) is intentional; it'd be actually better if we notice it when we
// update the enum list (which shouldn't happen too often).
- EXPECT_THROW(MasterLexer::Token(MasterLexer::Token::ErrorCode(6)),
+ EXPECT_THROW(MasterToken(MasterToken::ErrorCode(7)),
isc::InvalidParameter);
// Check the coexistence of "from number" and "from error-code"
// constructors won't cause confusion.
- EXPECT_EQ(MasterLexer::Token::NUMBER,
- MasterLexer::Token(static_cast<uint32_t>(
- MasterLexer::Token::NOT_STARTED)).
+ EXPECT_EQ(MasterToken::NUMBER,
+ MasterToken(static_cast<uint32_t>(MasterToken::NOT_STARTED)).
getType());
}
}
diff --git a/src/lib/dns/tests/master_lexer_unittest.cc b/src/lib/dns/tests/master_lexer_unittest.cc
index eca6a73..b2415da 100644
--- a/src/lib/dns/tests/master_lexer_unittest.cc
+++ b/src/lib/dns/tests/master_lexer_unittest.cc
@@ -141,19 +141,19 @@ TEST_F(MasterLexerTest, getNextToken) {
lexer.pushSource(ss);
// First, the newline should get out.
- EXPECT_EQ(MasterLexer::Token::END_OF_LINE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_LINE, lexer.getNextToken().getType());
// Then the whitespace, if we specify the option.
- EXPECT_EQ(MasterLexer::Token::INITIAL_WS,
+ EXPECT_EQ(MasterToken::INITIAL_WS,
lexer.getNextToken(MasterLexer::INITIAL_WS).getType());
// The newline
- EXPECT_EQ(MasterLexer::Token::END_OF_LINE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_LINE, lexer.getNextToken().getType());
// The (quoted) string
- EXPECT_EQ(MasterLexer::Token::QSTRING,
+ EXPECT_EQ(MasterToken::QSTRING,
lexer.getNextToken(MasterLexer::QSTRING).getType());
// And the end of line and file
- EXPECT_EQ(MasterLexer::Token::END_OF_LINE, lexer.getNextToken().getType());
- EXPECT_EQ(MasterLexer::Token::END_OF_FILE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_LINE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_FILE, lexer.getNextToken().getType());
}
// Test we correctly find end of file.
@@ -162,12 +162,12 @@ TEST_F(MasterLexerTest, eof) {
lexer.pushSource(ss);
// The first one is found to be EOF
- EXPECT_EQ(MasterLexer::Token::END_OF_FILE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_FILE, lexer.getNextToken().getType());
// And it stays on EOF for any following attempts
- EXPECT_EQ(MasterLexer::Token::END_OF_FILE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_FILE, lexer.getNextToken().getType());
// And we can step back one token, but that is the EOF too.
lexer.ungetToken();
- EXPECT_EQ(MasterLexer::Token::END_OF_FILE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_FILE, lexer.getNextToken().getType());
}
// Check we properly return error when there's an opened parentheses and no
@@ -177,12 +177,12 @@ TEST_F(MasterLexerTest, getUnbalancedParen) {
lexer.pushSource(ss);
// The string gets out first
- EXPECT_EQ(MasterLexer::Token::STRING, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::STRING, lexer.getNextToken().getType());
// Then an unbalanced parenthesis
- EXPECT_EQ(MasterLexer::Token::UNBALANCED_PAREN,
+ EXPECT_EQ(MasterToken::UNBALANCED_PAREN,
lexer.getNextToken().getErrorCode());
// And then EOF
- EXPECT_EQ(MasterLexer::Token::END_OF_FILE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_FILE, lexer.getNextToken().getType());
}
// Check we properly return error when there's an opened quoted string and no
@@ -192,10 +192,10 @@ TEST_F(MasterLexerTest, getUnbalancedString) {
lexer.pushSource(ss);
// Then an unbalanced qstring (reported as an unexpected end)
- EXPECT_EQ(MasterLexer::Token::UNEXPECTED_END,
+ EXPECT_EQ(MasterToken::UNEXPECTED_END,
lexer.getNextToken(MasterLexer::QSTRING).getErrorCode());
// And then EOF
- EXPECT_EQ(MasterLexer::Token::END_OF_FILE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_FILE, lexer.getNextToken().getType());
}
// Test ungetting tokens works
@@ -204,28 +204,28 @@ TEST_F(MasterLexerTest, ungetToken) {
lexer.pushSource(ss);
// Try getting the newline
- EXPECT_EQ(MasterLexer::Token::END_OF_LINE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_LINE, lexer.getNextToken().getType());
// Return it and get again
lexer.ungetToken();
- EXPECT_EQ(MasterLexer::Token::END_OF_LINE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_LINE, lexer.getNextToken().getType());
// Get the string and return it back
- EXPECT_EQ(MasterLexer::Token::QSTRING,
+ EXPECT_EQ(MasterToken::QSTRING,
lexer.getNextToken(MasterLexer::QSTRING).getType());
lexer.ungetToken();
// But if we change the options, it honors them
- EXPECT_EQ(MasterLexer::Token::INITIAL_WS,
+ EXPECT_EQ(MasterToken::INITIAL_WS,
lexer.getNextToken(MasterLexer::QSTRING |
MasterLexer::INITIAL_WS).getType());
// Get to the "more" string
- EXPECT_EQ(MasterLexer::Token::QSTRING,
+ EXPECT_EQ(MasterToken::QSTRING,
lexer.getNextToken(MasterLexer::QSTRING).getType());
- EXPECT_EQ(MasterLexer::Token::STRING,
+ EXPECT_EQ(MasterToken::STRING,
lexer.getNextToken(MasterLexer::QSTRING).getType());
// Return it back. It should get inside the parentheses.
// Upon next attempt to get it again, the newline inside the parentheses
// should be still ignored.
lexer.ungetToken();
- EXPECT_EQ(MasterLexer::Token::STRING,
+ EXPECT_EQ(MasterToken::STRING,
lexer.getNextToken(MasterLexer::QSTRING).getType());
}
@@ -235,16 +235,16 @@ TEST_F(MasterLexerTest, ungetRealOptions) {
ss << "\n \n";
lexer.pushSource(ss);
// Skip the first newline
- EXPECT_EQ(MasterLexer::Token::END_OF_LINE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_LINE, lexer.getNextToken().getType());
// If we call it the usual way, it skips up to the newline and returns
// it
- EXPECT_EQ(MasterLexer::Token::END_OF_LINE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_LINE, lexer.getNextToken().getType());
// Now we return it. If we call it again, but with different options,
// we get the initial whitespace.
lexer.ungetToken();
- EXPECT_EQ(MasterLexer::Token::INITIAL_WS,
+ EXPECT_EQ(MasterToken::INITIAL_WS,
lexer.getNextToken(MasterLexer::INITIAL_WS).getType());
}
@@ -253,7 +253,7 @@ TEST_F(MasterLexerTest, ungetTwice) {
ss << "\n";
lexer.pushSource(ss);
- EXPECT_EQ(MasterLexer::Token::END_OF_LINE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_LINE, lexer.getNextToken().getType());
// Unget the token. It can be done once
lexer.ungetToken();
// But not twice
@@ -271,17 +271,164 @@ TEST_F(MasterLexerTest, ungetBeforeGet) {
TEST_F(MasterLexerTest, ungetAfterSwitch) {
ss << "\n\n";
lexer.pushSource(ss);
- EXPECT_EQ(MasterLexer::Token::END_OF_LINE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_LINE, lexer.getNextToken().getType());
// Switch the source
std::stringstream ss2;
ss2 << "\n\n";
lexer.pushSource(ss2);
EXPECT_THROW(lexer.ungetToken(), isc::InvalidOperation);
// We can get from the new source
- EXPECT_EQ(MasterLexer::Token::END_OF_LINE, lexer.getNextToken().getType());
+ EXPECT_EQ(MasterToken::END_OF_LINE, lexer.getNextToken().getType());
// And when we drop the current source, we can't unget again
lexer.popSource();
EXPECT_THROW(lexer.ungetToken(), isc::InvalidOperation);
}
+// Common checks for the case when getNextToken() should result in LexerError
+void
+lexerErrorCheck(MasterLexer& lexer, MasterToken::Type expect,
+ MasterToken::ErrorCode expected_error)
+{
+ bool thrown = false;
+ try {
+ lexer.getNextToken(expect);
+ } catch (const MasterLexer::LexerError& error) {
+ EXPECT_EQ(expected_error, error.token_.getErrorCode());
+ thrown = true;
+ }
+ EXPECT_TRUE(thrown);
+}
+
+// Common checks regarding expected/unexpected end-of-line
+//
+// The 'lexer' should be at a position before two consecutive '\n's.
+// The first one will be recognized, and the second one will be considered an
+// unexpected token. Then this helper consumes the second '\n', so the caller
+// can continue the test after these '\n's.
+void
+eolCheck(MasterLexer& lexer, MasterToken::Type expect) {
+ // If EOL is found and eol_ok is true, we get it.
+ EXPECT_EQ(MasterToken::END_OF_LINE,
+ lexer.getNextToken(expect, true).getType());
+ // We'll see the second '\n'; by default it will fail.
+ EXPECT_THROW(lexer.getNextToken(expect), MasterLexer::LexerError);
+ // Same if eol_ok is explicitly set to false. This also checks the
+ // offending '\n' was "ungotten".
+ EXPECT_THROW(lexer.getNextToken(expect, false), MasterLexer::LexerError);
+
+ // And also check the error token set in the exception object.
+ lexerErrorCheck(lexer, expect, MasterToken::UNEXPECTED_END);
+
+ // Then skip the 2nd '\n'
+ EXPECT_EQ(MasterToken::END_OF_LINE, lexer.getNextToken().getType());
+}
+
+// Common checks regarding expected/unexpected end-of-file
+//
+// The 'lexer' should be at a position just before an end-of-file.
+void
+eofCheck(MasterLexer& lexer, MasterToken::Type expect) {
+ EXPECT_EQ(MasterToken::END_OF_FILE,
+ lexer.getNextToken(expect, true).getType());
+ EXPECT_THROW(lexer.getNextToken(expect), MasterLexer::LexerError);
+ EXPECT_THROW(lexer.getNextToken(expect, false), MasterLexer::LexerError);
+}
+
+TEST_F(MasterLexerTest, getNextTokenString) {
+ ss << "normal-string\n";
+ ss << "\n";
+ ss << "another-string";
+ lexer.pushSource(ss);
+
+ // Normal successful case: Expecting a string and get one.
+ EXPECT_EQ("normal-string",
+ lexer.getNextToken(MasterToken::STRING).getString());
+ eolCheck(lexer, MasterToken::STRING);
+
+ // Same set of tests but for end-of-file
+ EXPECT_EQ("another-string",
+ lexer.getNextToken(MasterToken::STRING, true).getString());
+ eofCheck(lexer, MasterToken::STRING);
+}
+
+TEST_F(MasterLexerTest, getNextTokenQString) {
+ ss << "\"quoted-string\"\n";
+ ss << "\n";
+ ss << "normal-string";
+ lexer.pushSource(ss);
+
+ // Expecting a quoted string and get one.
+ EXPECT_EQ("quoted-string",
+ lexer.getNextToken(MasterToken::QSTRING).getString());
+ eolCheck(lexer, MasterToken::QSTRING);
+
+ // Expecting a quoted string but see a normal string. It's okay.
+ EXPECT_EQ("normal-string",
+ lexer.getNextToken(MasterToken::QSTRING).getString());
+ eofCheck(lexer, MasterToken::QSTRING);
+}
+
+TEST_F(MasterLexerTest, getNextTokenNumber) {
+ ss << "3600\n";
+ ss << "\n";
+ ss << "4294967296 "; // =2^32, out of range
+ ss << "not-a-number ";
+ ss << "123abc "; // starting with digits, but resulting in a string
+ ss << "86400";
+ lexer.pushSource(ss);
+
+ // Expecting a number string and get one.
+ EXPECT_EQ(3600,
+ lexer.getNextToken(MasterToken::NUMBER).getNumber());
+ eolCheck(lexer, MasterToken::NUMBER);
+
+ // Expecting a number, but it's too big for uint32.
+ lexerErrorCheck(lexer, MasterToken::NUMBER,
+ MasterToken::NUMBER_OUT_OF_RANGE);
+ // The token should have been "ungotten". Re-read and skip it.
+ EXPECT_EQ(MasterToken::STRING, lexer.getNextToken().getType());
+
+ // Expecting a number, but see a string.
+ lexerErrorCheck(lexer, MasterToken::NUMBER, MasterToken::BAD_NUMBER);
+ // The unexpected string should have been "ungotten". Re-read and skip it.
+ EXPECT_EQ(MasterToken::STRING, lexer.getNextToken().getType());
+
+ // Expecting a number, but see a string.
+ lexerErrorCheck(lexer, MasterToken::NUMBER, MasterToken::BAD_NUMBER);
+ // The unexpected string should have been "ungotten". Re-read and skip it.
+ EXPECT_EQ(MasterToken::STRING, lexer.getNextToken().getType());
+
+ // Unless we specify NUMBER, decimal number string should be recognized
+ // as a string.
+ EXPECT_EQ("86400",
+ lexer.getNextToken(MasterToken::STRING).getString());
+ eofCheck(lexer, MasterToken::NUMBER);
+}
+
+TEST_F(MasterLexerTest, getNextTokenErrors) {
+ // Check miscellaneous error cases
+
+ ss << ") "; // unbalanced parenthesis
+ ss << "string-after-error ";
+ lexer.pushSource(ss);
+
+ // Only string/qstring/number can be "expected".
+ EXPECT_THROW(lexer.getNextToken(MasterToken::END_OF_LINE),
+ isc::InvalidParameter);
+ EXPECT_THROW(lexer.getNextToken(MasterToken::END_OF_FILE),
+ isc::InvalidParameter);
+ EXPECT_THROW(lexer.getNextToken(MasterToken::INITIAL_WS),
+ isc::InvalidParameter);
+ EXPECT_THROW(lexer.getNextToken(MasterToken::ERROR),
+ isc::InvalidParameter);
+
+ // If it encounters a syntax error, it results in LexerError exception.
+ lexerErrorCheck(lexer, MasterToken::STRING, MasterToken::UNBALANCED_PAREN);
+
+ // Unlike the NUMBER_OUT_OF_RANGE case, the error part has been skipped
+ // within getNextToken(). We should be able to get the next token.
+ EXPECT_EQ("string-after-error",
+ lexer.getNextToken(MasterToken::STRING).getString());
+}
+
}
diff --git a/src/lib/dns/tests/rdata_unittest.cc b/src/lib/dns/tests/rdata_unittest.cc
index bc91f7a..7f0dd65 100644
--- a/src/lib/dns/tests/rdata_unittest.cc
+++ b/src/lib/dns/tests/rdata_unittest.cc
@@ -29,6 +29,7 @@
#include <dns/tests/rdata_unittest.h>
#include <boost/bind.hpp>
+#include <boost/lexical_cast.hpp>
using isc::UnitTestUtil;
using namespace std;
@@ -82,6 +83,138 @@ createRdataUsingLexer(const RRType& rrtype, const RRClass& rrclass,
} // end of namespace isc::dns::rdata::test
+// A mock class to check parameters passed via loader callbacks. Its callback
+// records the passed parameters, allowing the test to check them later via
+// the check() method.
+class CreateRdataCallback {
+public:
+ enum CallbackType { NONE, ERROR, WARN };
+ CreateRdataCallback() : type_(NONE), line_(0) {}
+ void callback(CallbackType type, const string& source, size_t line,
+ const string& reason_txt) {
+ type_ = type;
+ source_ = source;
+ line_ = line;
+ reason_txt_ = reason_txt;
+ }
+
+ void clear() {
+ type_ = NONE;
+ source_.clear();
+ line_ = 0;
+ reason_txt_.clear();
+ }
+
+ // Return if callback is called since the previous call to clear().
+ bool isCalled() const { return (type_ != NONE); }
+
+ void check(const string& expected_srcname, size_t expected_line,
+ CallbackType expected_type, const string& expected_reason)
+ const
+ {
+ EXPECT_EQ(expected_srcname, source_);
+ EXPECT_EQ(expected_line, line_);
+ EXPECT_EQ(expected_type, type_);
+ EXPECT_EQ(expected_reason, reason_txt_);
+ }
+
+private:
+ CallbackType type_;
+ string source_;
+ size_t line_;
+ string reason_txt_;
+};
+
+// Test class/type-independent behavior of createRdata().
+TEST_F(RdataTest, createRdataWithLexer) {
+ const in::AAAA aaaa_rdata("2001:db8::1");
+
+ stringstream ss;
+ const string src_name = "stream-" + boost::lexical_cast<string>(&ss);
+ ss << aaaa_rdata.toText() << "\n"; // valid case
+ ss << aaaa_rdata.toText() << "; comment, should be ignored\n";
+ ss << aaaa_rdata.toText() << " extra-token\n"; // extra token
+ ss << aaaa_rdata.toText() << " extra token\n"; // 2 extra tokens
+ ss << ")\n"; // causing lexer error in parsing the RDATA text
+ ss << "192.0.2.1\n"; // semantics error: IPv4 address is given for AAAA
+ ss << aaaa_rdata.toText(); // valid, but end with EOF, not EOL
+ lexer.pushSource(ss);
+
+ CreateRdataCallback callback;
+ MasterLoaderCallbacks callbacks(
+ boost::bind(&CreateRdataCallback::callback, &callback,
+ CreateRdataCallback::ERROR, _1, _2, _3),
+ boost::bind(&CreateRdataCallback::callback, &callback,
+ CreateRdataCallback::WARN, _1, _2, _3));
+
+ size_t line = 0;
+
+ // Valid case.
+ ++line;
+ ConstRdataPtr rdata = createRdata(RRType::AAAA(), RRClass::IN(), lexer,
+ NULL, MasterLoader::MANY_ERRORS,
+ callbacks);
+ EXPECT_EQ(0, aaaa_rdata.compare(*rdata));
+ EXPECT_FALSE(callback.isCalled());
+
+ // Similar to the previous case, but RDATA is followed by a comment.
+ // It should cause any confusion.
+ ++line;
+ callback.clear();
+ rdata = createRdata(RRType::AAAA(), RRClass::IN(), lexer, NULL,
+ MasterLoader::MANY_ERRORS, callbacks);
+ EXPECT_EQ(0, aaaa_rdata.compare(*rdata));
+ EXPECT_FALSE(callback.isCalled());
+
+ // Broken RDATA text: extra token. createRdata() returns NULL, error
+ // callback is called.
+ ++line;
+ callback.clear();
+ EXPECT_FALSE(createRdata(RRType::AAAA(), RRClass::IN(), lexer, NULL,
+ MasterLoader::MANY_ERRORS, callbacks));
+ callback.check(src_name, line, CreateRdataCallback::ERROR,
+ "createRdata from text failed near 'extra-token': "
+ "extra input text");
+
+ // Similar to the previous case, but only the first extra token triggers
+ // callback.
+ ++line;
+ callback.clear();
+ EXPECT_FALSE(createRdata(RRType::AAAA(), RRClass::IN(), lexer, NULL,
+ MasterLoader::MANY_ERRORS, callbacks));
+ callback.check(src_name, line, CreateRdataCallback::ERROR,
+ "createRdata from text failed near 'extra': "
+ "extra input text");
+
+ // Lexer error will happen, corresponding error callback will be triggered.
+ ++line;
+ callback.clear();
+ EXPECT_FALSE(createRdata(RRType::AAAA(), RRClass::IN(), lexer, NULL,
+ MasterLoader::MANY_ERRORS, callbacks));
+ callback.check(src_name, line, CreateRdataCallback::ERROR,
+ "createRdata from text failed: unbalanced parentheses");
+
+ // Semantics level error will happen, corresponding error callback will be
+ // triggered.
+ ++line;
+ callback.clear();
+ EXPECT_FALSE(createRdata(RRType::AAAA(), RRClass::IN(), lexer, NULL,
+ MasterLoader::MANY_ERRORS, callbacks));
+ callback.check(src_name, line, CreateRdataCallback::ERROR,
+ "createRdata from text failed: Failed to convert "
+ "'192.0.2.1' to IN/AAAA RDATA");
+
+ // Input is valid and parse will succeed, but with a warning that the
+ // file is not ended with a newline.
+ ++line;
+ callback.clear();
+ rdata = createRdata(RRType::AAAA(), RRClass::IN(), lexer, NULL,
+ MasterLoader::MANY_ERRORS, callbacks);
+ EXPECT_EQ(0, aaaa_rdata.compare(*rdata));
+ callback.check(src_name, line, CreateRdataCallback::WARN,
+ "file does not end with newline");
+}
+
}
}
}
diff --git a/src/lib/dns/tests/rdata_unittest.h b/src/lib/dns/tests/rdata_unittest.h
index 3efb5d8..af19311 100644
--- a/src/lib/dns/tests/rdata_unittest.h
+++ b/src/lib/dns/tests/rdata_unittest.h
@@ -20,6 +20,7 @@
#include <dns/rrclass.h>
#include <dns/rrtype.h>
#include <dns/rdata.h>
+#include <dns/master_lexer.h>
#include <gtest/gtest.h>
@@ -40,6 +41,7 @@ protected:
/// This is an RDATA object of some "unknown" RR type so that it can be
/// used to test the compare() method against a well-known RR type.
RdataPtr rdata_nomatch;
+ MasterLexer lexer;
};
namespace test {
diff --git a/tests/tools/perfdhcp/Makefile.am b/tests/tools/perfdhcp/Makefile.am
index 08a21a4..c4b82b5 100644
--- a/tests/tools/perfdhcp/Makefile.am
+++ b/tests/tools/perfdhcp/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = . tests templates
+SUBDIRS = . tests
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_srcdir)/src/lib/log -I$(top_builddir)/src/lib/log
diff --git a/tests/tools/perfdhcp/templates/.gitignore b/tests/tools/perfdhcp/templates/.gitignore
deleted file mode 100644
index 6f865da..0000000
--- a/tests/tools/perfdhcp/templates/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-/test1.hex
-/test2.hex
-/test3.hex
-/test4.hex
-/test5.hex
diff --git a/tests/tools/perfdhcp/templates/Makefile.am b/tests/tools/perfdhcp/templates/Makefile.am
deleted file mode 100644
index c22787f..0000000
--- a/tests/tools/perfdhcp/templates/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-SUBDIRS = .
-
-# The test[1-5].hex are created by the TestControl.PacketTemplates
-# unit tests and have to be removed.
-CLEANFILES = test1.hex test2.hex test3.hex test4.hex test5.hex
-
-perfdhcpdir = $(pkgdatadir)
-
-EXTRA_DIST = discover-example.hex request4-example.hex
-EXTRA_DIST += solicit-example.hex request6-example.hex
diff --git a/tests/tools/perfdhcp/templates/discover-example.hex b/tests/tools/perfdhcp/templates/discover-example.hex
deleted file mode 100644
index 9a6e5ea..0000000
--- a/tests/tools/perfdhcp/templates/discover-example.hex
+++ /dev/null
@@ -1 +0,0 @@
-01010601008b45d200000000000000000000000000000000ac100102000c0102030400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000638253633501013707011c02030f060cff
\ No newline at end of file
diff --git a/tests/tools/perfdhcp/templates/request4-example.hex b/tests/tools/perfdhcp/templates/request4-example.hex
deleted file mode 100644
index 32447d6..0000000
--- a/tests/tools/perfdhcp/templates/request4-example.hex
+++ /dev/null
@@ -1 +0,0 @@
-01010601007b23f800000000000000000000000000000000ac100102000c0102030400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000638253633204ac1001813501033604ac1001013707011c02030f060cff
\ No newline at end of file
diff --git a/tests/tools/perfdhcp/templates/request6-example.hex b/tests/tools/perfdhcp/templates/request6-example.hex
deleted file mode 100644
index 1e3e76f..0000000
--- a/tests/tools/perfdhcp/templates/request6-example.hex
+++ /dev/null
@@ -1 +0,0 @@
-03da30c60001000e0001000117cf8e76000c010203060002000e0001000117cf8a5c080027a87b3400030028000000010000000a0000000e0005001820010db800010000000000000001b568000000be000000c8000800020000
\ No newline at end of file
diff --git a/tests/tools/perfdhcp/templates/solicit-example.hex b/tests/tools/perfdhcp/templates/solicit-example.hex
deleted file mode 100644
index 41c5ad3..0000000
--- a/tests/tools/perfdhcp/templates/solicit-example.hex
+++ /dev/null
@@ -1 +0,0 @@
-015f4e650001000e0001000117cf8e76000c010203040003000c0000000100000e01000015180006000400170018000800020000
\ No newline at end of file
diff --git a/tests/tools/perfdhcp/tests/Makefile.am b/tests/tools/perfdhcp/tests/Makefile.am
index 54602af..be67481 100644
--- a/tests/tools/perfdhcp/tests/Makefile.am
+++ b/tests/tools/perfdhcp/tests/Makefile.am
@@ -1,6 +1,7 @@
-SUBDIRS = .
+SUBDIRS = . testdata
AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
+AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_srcdir)/testdata\"
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
diff --git a/tests/tools/perfdhcp/tests/test_control_unittest.cc b/tests/tools/perfdhcp/tests/test_control_unittest.cc
index abe8282..176644c 100644
--- a/tests/tools/perfdhcp/tests/test_control_unittest.cc
+++ b/tests/tools/perfdhcp/tests/test_control_unittest.cc
@@ -185,6 +185,18 @@ public:
return ("");
}
+ /// \brief Get full path to a file in testdata directory.
+ ///
+ /// \param filename filename being appended to absolute
+ /// path to testdata directory
+ ///
+ /// \return full path to a file in testdata directory.
+ std::string getFullPath(const std::string& filename) const {
+ std::ostringstream stream;
+ stream << TEST_DATA_DIR << "/" << filename;
+ return (stream.str());
+ }
+
/// \brief Match requested options in the buffer with given list.
///
/// This method iterates through options provided in the buffer
@@ -896,7 +908,7 @@ TEST_F(TestControlTest, Packet6) {
}
}
-TEST_F(TestControlTest, DISABLED_Packet4Exchange) {
+TEST_F(TestControlTest, Packet4Exchange) {
// Get the local loopback interface to open socket on
// it and test packets exchanges. We don't want to fail
// the test if interface is not available.
@@ -925,8 +937,8 @@ TEST_F(TestControlTest, DISABLED_Packet4Exchange) {
// Use templates for this test.
processCmdLine("perfdhcp -l " + loopback_iface
+ " -r 100 -R 20 -n 20 -D 10% -L 10547"
- + " -T ../templates/discover-example.hex"
- + " -T ../templates/request4-example.hex"
+ + " -T " + getFullPath("discover-example.hex")
+ + " -T " + getFullPath("request4-example.hex")
+ " 127.0.0.1");
// The number iterations is restricted by the percentage of
// dropped packets (-D 10%). We also have to bump up the number
@@ -939,7 +951,7 @@ TEST_F(TestControlTest, DISABLED_Packet4Exchange) {
EXPECT_EQ(12, iterations_performed);
}
-TEST_F(TestControlTest, DISABLED_Packet6Exchange) {
+TEST_F(TestControlTest, Packet6Exchange) {
// Get the local loopback interface to open socket on
// it and test packets exchanges. We don't want to fail
// the test if interface is not available.
@@ -967,8 +979,8 @@ TEST_F(TestControlTest, DISABLED_Packet6Exchange) {
use_templates = true;
processCmdLine("perfdhcp -l " + loopback_iface
+ " -6 -r 100 -n 10 -R 20 -D 3 -L 10547"
- + " -T ../templates/solicit-example.hex"
- + " -T ../templates/request6-example.hex ::1");
+ + " -T " + getFullPath("solicit-example.hex")
+ + " -T " + getFullPath("request6-example.hex ::1"));
// For the first 3 packets we are simulating responses from server.
// For other packets we don't so packet as 4,5,6 will be dropped and
// then test should be interrupted and actual number of iterations will
@@ -981,9 +993,9 @@ TEST_F(TestControlTest, DISABLED_Packet6Exchange) {
TEST_F(TestControlTest, PacketTemplates) {
std::vector<uint8_t> template1(256);
- std::string file1("../templates/test1.hex");
+ std::string file1(getFullPath("test1.hex"));
std::vector<uint8_t> template2(233);
- std::string file2("../templates/test2.hex");
+ std::string file2(getFullPath("test2.hex"));
for (int i = 0; i < template1.size(); ++i) {
template1[i] = static_cast<uint8_t>(random() % 256);
}
@@ -1011,7 +1023,7 @@ TEST_F(TestControlTest, PacketTemplates) {
EXPECT_TRUE(std::equal(template2.begin(), template2.end(), buf2.begin()));
// Try to read template file with odd number of digits.
- std::string file3("../templates/test3.hex");
+ std::string file3(getFullPath("test3.hex"));
// Size of the file is 2 times larger than binary data size and it is always
// even number. Substracting 1 makes file size odd.
ASSERT_TRUE(createTemplateFile(file3, template1, template1.size() * 2 - 1));
@@ -1021,7 +1033,7 @@ TEST_F(TestControlTest, PacketTemplates) {
EXPECT_THROW(tc.initPacketTemplates(), isc::OutOfRange);
// Try to read empty file.
- std::string file4("../templates/test4.hex");
+ std::string file4(getFullPath("test4.hex"));
ASSERT_TRUE(createTemplateFile(file4, template2, 0));
ASSERT_NO_THROW(
processCmdLine("perfdhcp -l 127.0.0.1 -T " + file4 + " all")
@@ -1029,7 +1041,7 @@ TEST_F(TestControlTest, PacketTemplates) {
EXPECT_THROW(tc.initPacketTemplates(), isc::OutOfRange);
// Try reading file with non hexadecimal characters.
- std::string file5("../templates/test5.hex");
+ std::string file5(getFullPath("test5.hex"));
ASSERT_TRUE(createTemplateFile(file5, template1, template1.size() * 2, true));
ASSERT_NO_THROW(
processCmdLine("perfdhcp -l 127.0.0.1 -T " + file5 + " all")
diff --git a/tests/tools/perfdhcp/tests/testdata/.gitignore b/tests/tools/perfdhcp/tests/testdata/.gitignore
new file mode 100644
index 0000000..6f865da
--- /dev/null
+++ b/tests/tools/perfdhcp/tests/testdata/.gitignore
@@ -0,0 +1,5 @@
+/test1.hex
+/test2.hex
+/test3.hex
+/test4.hex
+/test5.hex
diff --git a/tests/tools/perfdhcp/tests/testdata/Makefile.am b/tests/tools/perfdhcp/tests/testdata/Makefile.am
new file mode 100644
index 0000000..bbd9a73
--- /dev/null
+++ b/tests/tools/perfdhcp/tests/testdata/Makefile.am
@@ -0,0 +1,8 @@
+SUBDIRS = .
+
+# The test[1-5].hex are created by the TestControl.PacketTemplates
+# unit tests and have to be removed.
+CLEANFILES = test1.hex test2.hex test3.hex test4.hex test5.hex
+
+EXTRA_DIST = discover-example.hex request4-example.hex
+EXTRA_DIST += solicit-example.hex request6-example.hex
diff --git a/tests/tools/perfdhcp/tests/testdata/discover-example.hex b/tests/tools/perfdhcp/tests/testdata/discover-example.hex
new file mode 100644
index 0000000..9a6e5ea
--- /dev/null
+++ b/tests/tools/perfdhcp/tests/testdata/discover-example.hex
@@ -0,0 +1 @@
+01010601008b45d200000000000000000000000000000000ac100102000c0102030400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000638253633501013707011c02030f060cff
\ No newline at end of file
diff --git a/tests/tools/perfdhcp/tests/testdata/request4-example.hex b/tests/tools/perfdhcp/tests/testdata/request4-example.hex
new file mode 100644
index 0000000..32447d6
--- /dev/null
+++ b/tests/tools/perfdhcp/tests/testdata/request4-example.hex
@@ -0,0 +1 @@
+01010601007b23f800000000000000000000000000000000ac100102000c0102030400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000638253633204ac1001813501033604ac1001013707011c02030f060cff
\ No newline at end of file
diff --git a/tests/tools/perfdhcp/tests/testdata/request6-example.hex b/tests/tools/perfdhcp/tests/testdata/request6-example.hex
new file mode 100644
index 0000000..1e3e76f
--- /dev/null
+++ b/tests/tools/perfdhcp/tests/testdata/request6-example.hex
@@ -0,0 +1 @@
+03da30c60001000e0001000117cf8e76000c010203060002000e0001000117cf8a5c080027a87b3400030028000000010000000a0000000e0005001820010db800010000000000000001b568000000be000000c8000800020000
\ No newline at end of file
diff --git a/tests/tools/perfdhcp/tests/testdata/solicit-example.hex b/tests/tools/perfdhcp/tests/testdata/solicit-example.hex
new file mode 100644
index 0000000..41c5ad3
--- /dev/null
+++ b/tests/tools/perfdhcp/tests/testdata/solicit-example.hex
@@ -0,0 +1 @@
+015f4e650001000e0001000117cf8e76000c010203040003000c0000000100000e01000015180006000400170018000800020000
\ No newline at end of file
More information about the bind10-changes
mailing list