BIND 10 exp/res-research, updated. e9d3fd9a3d5bb8d6df9143a25d590cd301db86f1 [res-research] supported serialize/desrize cache content in binary form.
BIND 10 source code commits
bind10-changes at lists.isc.org
Tue Jul 10 17:37:55 UTC 2012
The branch, exp/res-research has been updated
via e9d3fd9a3d5bb8d6df9143a25d590cd301db86f1 (commit)
via ebffb70040f991e88636d8c8992be064eea0d835 (commit)
from f6032431b32f7ffabc26216edfe46c5f1e8c5dd1 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit e9d3fd9a3d5bb8d6df9143a25d590cd301db86f1
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Tue Jul 10 10:31:52 2012 -0700
[res-research] supported serialize/desrize cache content in binary form.
commit ebffb70040f991e88636d8c8992be064eea0d835
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Mon Jul 9 23:51:59 2012 -0700
[res-research] cache auth/additional sections of auth answers.
also, keep all trus-levels of data of the same (name, type).
-----------------------------------------------------------------------
Summary of changes:
exp/res-research/analysis/dns_cache.py | 200 ++++++++++++++++++++++++----
exp/res-research/analysis/mini_resolver.py | 38 +++++-
2 files changed, 210 insertions(+), 28 deletions(-)
-----------------------------------------------------------------------
diff --git a/exp/res-research/analysis/dns_cache.py b/exp/res-research/analysis/dns_cache.py
index 92c7400..afc961f 100755
--- a/exp/res-research/analysis/dns_cache.py
+++ b/exp/res-research/analysis/dns_cache.py
@@ -16,6 +16,7 @@
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from isc.dns import *
+import struct
# "root hint"
ROOT_SERVERS = [pfx + '.root-servers.net' for pfx in 'abcdefghijklm']
@@ -73,6 +74,13 @@ class CacheEntry:
self.msglen = msglen
self.rcode = rcode.get_code()
+ def copy(self, other):
+ self.ttl = other.ttl
+ self.rdata_list = other.rdata_list
+ self.trust = other.trust
+ self.msglen = other.msglen
+ self.rcode = other.rcode
+
# Don't worry about cache expire; just record the RRs
class SimpleDNSCache:
'''A simplified DNS cache database.
@@ -80,20 +88,24 @@ class SimpleDNSCache:
It's a dict from (isc.dns.Name, isc.dns.RRClass) to an entry.
Each entry can be of either of the following:
- CacheEntry: in case the specified name doesn't exist (NXDOMAIN).
- - dict from RRType to CacheEntry: this gives a cache entry for the
- (name, class, type).
+ - dict from RRType to list of CacheEntry: this gives a cache entries for
+ the (name, class, type) sorted by the trust levels (more trustworthy
+ ones appear sooner)
'''
# simplified trust levels for cached records
TRUST_LOCAL = 0 # specific this implementation, never expires
TRUST_ANSWER = 1 # authoritative answer
- TRUST_GLUE = 2 # referral or glue
+ TRUST_AUTHAUTHORITY = 2 # authority section records in auth answer
+ TRUST_GLUE = 3 # referral or glue
+ TRUST_AUTHADDITIONAL = 4 # additional section records in auth answer
+
# Search options, can be logically OR'ed.
FIND_DEFAULT = 0
FIND_ALLOW_NEGATIVE = 1
- FIND_ALLOW_GLUE = 2
+ FIND_ALLOW_NOANSWER = 2
FIND_ALLOW_CNAME = 4
def __init__(self):
@@ -118,9 +130,11 @@ class SimpleDNSCache:
search_types.append(RRType.CNAME())
for type in search_types:
if rdata_map is not None and type in rdata_map:
- entry = rdata_map[type]
- if (options & self.FIND_ALLOW_GLUE) == 0 and \
- entry.trust > self.TRUST_ANSWER:
+ entries = rdata_map[type]
+ entry = entries[0]
+ if (options & self.FIND_ALLOW_NOANSWER) == 0:
+ entry = self.__find_cache_entry(entries, self.TRUST_ANSWER)
+ if entry is None:
return None
(ttl, rdata_list) = (entry.ttl, entry.rdata_list)
rrset = RRset(name, rrclass, type, RRTTL(ttl))
@@ -146,25 +160,53 @@ class SimpleDNSCache:
new_entry = CacheEntry(rrset.get_ttl().get_value(), rrset.get_rdata(),
trust, msglen, rcode)
if not key in self.__table:
- self.__table[key] = {rrset.get_type(): new_entry}
+ self.__table[key] = {rrset.get_type(): [new_entry]}
else:
table_ent = self.__table[key]
- cur_entry = table_ent.get(rrset.get_type())
- if cur_entry is None or cur_entry.trust >= trust:
- table_ent[rrset.get_type()] = new_entry
-
- def dump(self, dump_file):
- with open(dump_file, 'w') as f:
- for key, entry in self.__table.items():
- name = key[0]
- rrclass = key[1]
- if isinstance(entry, CacheEntry):
- f.write(';; [%s, TTL=%d, msglen=%d] %s/%s\n' %
- (str(Rcode(entry.rcode)), entry.ttl, entry.msglen,
- str(name), str(rrclass)))
- continue
- rdata_map = entry
- for rrtype, entry in rdata_map.items():
+ cur_entries = table_ent.get(rrset.get_type())
+ if cur_entries is None:
+ table_ent[rrset.get_type()] = [new_entry]
+ else:
+ self.__insert_cache_entry(cur_entries, new_entry)
+
+ def __insert_cache_entry(self, entries, new_entry):
+ old = self.__find_cache_entry(entries, new_entry.trust, True)
+ if old is not None and old.trust == new_entry.trust:
+ old.copy(new_entry)
+ else:
+ entries.append(new_entry)
+ entries.sort(key=lambda x: x.trust)
+
+ def __find_cache_entry(self, entries, trust, exact=False):
+ for entry in entries:
+ if entry.trust == trust or (not exact and entry.trust < trus):
+ return entry
+ return None
+
+ def dump(self, dump_file, serialize=False):
+ if serialize:
+ with open(dump_file, 'bw') as f:
+ self.__serialize(f)
+ else:
+ with open(dump_file, 'w') as f:
+ self.__dump_text(f)
+
+ def load(self, db_file):
+ with open(db_file, 'br') as f:
+ self.__deserialize(f)
+
+ def __dump_text(self, f):
+ for key, entry in self.__table.items():
+ name = key[0]
+ rrclass = key[1]
+ if isinstance(entry, CacheEntry):
+ f.write(';; [%s, TTL=%d, msglen=%d] %s/%s\n' %
+ (str(Rcode(entry.rcode)), entry.ttl, entry.msglen,
+ str(name), str(rrclass)))
+ continue
+ rdata_map = entry
+ for rrtype, entries in rdata_map.items():
+ for entry in entries:
if len(entry.rdata_list) == 0:
f.write(';; [%s, TTL=%d, msglen=%d] %s/%s/%s\n' %
(str(Rcode(entry.rcode)), entry.ttl,
@@ -177,3 +219,113 @@ class SimpleDNSCache:
for rdata in entry.rdata_list:
rrset.add_rdata(rdata)
f.write(rrset.to_text())
+
+ def __serialize(self, f):
+ '''Dump cache database content to a file in serialized binary format.
+
+ The serialized format is as follows:
+ Common header part:
+ <name length, 1 byte>
+ <domain name (wire)>
+ <RR class (numeric, wire)>
+ <# of cache entries, 2 bytes>
+ If #-of-entries is 0:
+ <Rcode value, 1 byte><TTL value, 4 bytes><msglen, 2 bytes>
+ <trust, 1 byte>
+ Else: sequence of serialized cache entries. Each of which is:
+ <RR type value, wire>
+ <# of cache entries of the type, 1 byte>
+ sequence of cache entries of the type, each of which is:
+ <RCODE value, 1 byte>
+ <TTL, 4 bytes>
+ <msglen, 2 bytes>
+ <trust, 1 byte>
+ <# of RDATAs, 2 bytes>
+ sequence of RDATA, each of which is:
+ <RDATA length, 2 bytes>
+ <RDATA, wire>
+
+ '''
+ for key, entry in self.__table.items():
+ name = key[0]
+ rrclass = key[1]
+ f.write(struct.pack('B', name.get_length()))
+ f.write(name.to_wire(b''))
+ f.write(rrclass.to_wire(b''))
+
+ if isinstance(entry, CacheEntry):
+ data = struct.pack('H', 0) # #-of-entries is 0
+ data += struct.pack('B', entry.rcode)
+ data += struct.pack('I', entry.ttl)
+ data += struct.pack('H', entry.msglen)
+ data += struct.pack('B', entry.trust)
+ f.write(data)
+ continue
+
+ rdata_map = entry
+ data = struct.pack('H', len(rdata_map)) # #-of-cache entries
+ for rrtype, entries in rdata_map.items():
+ data += rrtype.to_wire(b'')
+ data += struct.pack('B', len(entries))
+
+ for entry in entries:
+ data += struct.pack('B', entry.rcode)
+ data += struct.pack('I', entry.ttl)
+ data += struct.pack('H', entry.msglen)
+ data += struct.pack('B', entry.trust)
+ data += struct.pack('H', len(entry.rdata_list))
+ for rdata in entry.rdata_list:
+ rdata_data = rdata.to_wire(b'')
+ data += struct.pack('H', len(rdata_data))
+ data += rdata_data
+ f.write(data)
+
+ def __deserialize(self, f):
+ '''Load serialized cache DB to memory.
+
+ See __serialize for the format. Validation is generally omitted
+ for simplicity.
+
+ '''
+ while True:
+ initial_byte = f.read(1)
+ if len(initial_byte) == 0:
+ break
+ ndata = f.read(struct.unpack('B', initial_byte)[0])
+ name = Name(ndata)
+ rrclass = RRClass(f.read(2))
+ key = (name, rrclass)
+ n_types = struct.unpack('H', f.read(2))[0]
+ if n_types == 0:
+ rcode = struct.unpack('B', f.read(1))[0]
+ ttl = struct.unpack('I', f.read(4))[0]
+ msglen = struct.unpack('H', f.read(2))[0]
+ trust = struct.unpack('B', f.read(1))[0]
+ entry = CacheEntry(ttl, [], trust, msglen, Rcode(rcode))
+ self.__table[key] = entry
+ continue
+
+ self.__table[key] = {}
+ while n_types > 0:
+ n_types -= 1
+ rrtype = RRType(f.read(2))
+ n_entries = struct.unpack('B', f.read(1))[0]
+ entries = []
+ while n_entries > 0:
+ n_entries -= 1
+ rcode = struct.unpack('B', f.read(1))[0]
+ ttl = struct.unpack('I', f.read(4))[0]
+ msglen = struct.unpack('H', f.read(2))[0]
+ trust = struct.unpack('B', f.read(1))[0]
+ n_rdata = struct.unpack('H', f.read(2))[0]
+ rdata_list = []
+ while n_rdata > 0:
+ n_rdata -= 1
+ rdata_len = struct.unpack('H', f.read(2))[0]
+ rdata_list.append(Rdata(rrtype, rrclass,
+ f.read(rdata_len)))
+ entry = CacheEntry(ttl, rdata_list, trust, msglen,
+ Rcode(rcode))
+ entries.append(entry)
+ entries.sort(key=lambda x: x.trust)
+ self.__table[key][rrtype] = entries
diff --git a/exp/res-research/analysis/mini_resolver.py b/exp/res-research/analysis/mini_resolver.py
index c4ca51b..74daed1 100755
--- a/exp/res-research/analysis/mini_resolver.py
+++ b/exp/res-research/analysis/mini_resolver.py
@@ -175,6 +175,7 @@ class ResolverContext:
# Look into the response
if resp_msg.get_header_flag(Message.HEADERFLAG_AA):
next_qry = self.__handle_auth_answer(resp_msg, msglen)
+ self.__handle_auth_othersections(resp_msg)
elif resp_msg.get_rcode() == Rcode.NOERROR() and \
(not resp_msg.get_header_flag(Message.HEADERFLAG_AA)):
authorities = resp_msg.get_section(Message.SECTION_AUTHORITY)
@@ -264,6 +265,28 @@ class ResolverContext:
raise InternalLame('unexpected answer rcode=' +
str(resp_msg.get_rcode()))
+ def __handle_auth_othersections(self, resp_msg):
+ ns_names = []
+ for auth_rrset in resp_msg.get_section(Message.SECTION_AUTHORITY):
+ if auth_rrset.get_type() == RRType.NS():
+ ns_owner = auth_rrset.get_name()
+ cmp_reln = ns_owner.compare(self.__cur_zone).get_relation()
+ if cmp_reln == NameComparisonResult.SUBDOMAIN or \
+ cmp_reln == NameComparisonResult.EQUAL:
+ self.__cache.add(auth_rrset,
+ SimpleDNSCache.TRUST_AUTHAUTHORITY, 0)
+ for ns_rdata in auth_rrset.get_rdata():
+ ns_names.append(Name(ns_rdata.to_text()))
+ for ad_rrset in resp_msg.get_section(Message.SECTION_ADDITIONAL):
+ if ad_rrset.get_type() == RRType.A() or \
+ ad_rrset.get_type() == RRType.AAAA():
+ for ns_name in ns_names:
+ if ad_rrset.get_name() == ns_name:
+ self.__cache.add(ad_rrset,
+ SimpleDNSCache.TRUST_AUTHADDITIONAL,
+ 0)
+ break
+
def __handle_negative_answer(self, resp_msg, msglen):
rcode = resp_msg.get_rcode()
if rcode == Rcode.NOERROR():
@@ -362,7 +385,7 @@ class ResolverContext:
for l in range(0, zname.get_labelcount()):
zname = qname.split(l)
ns_rrset = self.__cache.find(zname, self.__qclass, RRType.NS(),
- SimpleDNSCache.FIND_ALLOW_GLUE)
+ SimpleDNSCache.FIND_ALLOW_NOANSWER)
if ns_rrset is not None:
return zname, ns_rrset
raise MiniResolverException('no name server found for ' + str(qname))
@@ -376,12 +399,12 @@ class ResolverContext:
ns_name = Name(ns.to_text())
ns_names.append(ns_name)
rrset4 = self.__cache.find(ns_name, ns_class, RRType.A(),
- SimpleDNSCache.FIND_ALLOW_GLUE)
+ SimpleDNSCache.FIND_ALLOW_NOANSWER)
if rrset4 is not None:
for rdata in rrset4.get_rdata():
v4_addrs.append((rdata.to_text(), DNS_PORT))
rrset6 = self.__cache.find(ns_name, ns_class, RRType.AAAA(),
- SimpleDNSCache.FIND_ALLOW_GLUE)
+ SimpleDNSCache.FIND_ALLOW_NOANSWER)
if rrset6 is not None:
for rdata in rrset6.get_rdata():
# specify 0 for flowinfo and scopeid unconditionally
@@ -523,6 +546,7 @@ class FileResolver:
self.__qfile = open(query_file, 'r')
self.__max_ctxts = int(options.max_query)
self.__dump_file = options.dump_file
+ self.__serialize_file = options.serialize_file
ResQuery.QUERY_TIMEOUT = int(options.query_timeo)
@@ -591,6 +615,8 @@ class FileResolver:
def done(self):
if self.__dump_file is not None:
self.__cache.dump(self.__dump_file)
+ if self.__serialize_file is not None:
+ self.__cache.dump(self.__serialize_file, True)
def __handle(self, s):
pkt, remote = s.recvfrom(4096)
@@ -656,7 +682,11 @@ def get_option_parser():
parser.add_option("-f", "--dump-file", dest="dump_file", action="store",
default=None,
help="if specified, file name to dump the resulting " + \
- "cache")
+ "cache in text format")
+ parser.add_option("-s", "--serialize", dest="serialize_file",
+ action="store", default=None,
+ help="if specified, file name to dump the resulting " + \
+ "cache in the serialized binary format")
parser.add_option("-n", "--max-query", dest="max_query", action="store",
default="10",
help="specify the max # of queries in parallel")
More information about the bind10-changes
mailing list