diff --git a/API.md b/API.md
index f0aa5ee5..b0e77e2b 100644
--- a/API.md
+++ b/API.md
@@ -1705,6 +1705,37 @@ Returns:
```
+### get_whois_lookup
+Get the connection info for an IP address.
+
+```
+Required parameters:
+ ip_address
+
+Optional parameters:
+ None
+
+Returns:
+ json:
+ {"host": "google-public-dns-a.google.com",
+ "nets": [{"description": "Google Inc.",
+ "address": "1600 Amphitheatre Parkway",
+ "city": "Mountain View",
+ "state": "CA",
+ "postal_code": "94043",
+ "country": "United States",
+ ...
+ },
+ {...}
+ ]
+ json:
+ {"host": "Not available",
+ "nets": [],
+ "error": "IPv4 address 127.0.0.1 is already defined as Loopback via RFC 1122, Section 3.2.1.3."
+ }
+```
+
+
### import_database
Import a PlexWatch or Plexivity database into PlexPy.
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 35c0dd52..4a75500e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,19 @@
# Changelog
+## v1.4.9 (2016-08-14)
+
+* New: Option to include current activity in the history tables.
+* New: ISP lookup info in the IP address modal.
+* New: Option to disable web page previews for Telegram notifications.
+* Fix: Send correct JSON header for Slack/Mattermost notifications.
+* Fix: Twitter and Facebook test notifications incorrectly showing as "failed".
+* Fix: Current activity progress bars extending past 100%.
+* Fix: Typo in the setup wizard. (Thanks @wopian)
+* Fix: Update PMS server version before checking for a new update.
+* Change: Compare distro and build when checking for server updates.
+* Change: Nicer y-axis intervals when viewing "Play Duration" graphs.
+
+
## v1.4.8 (2016-07-16)
* New: Setting to specify PlexPy backup interval.
diff --git a/README.md b/README.md
index c83a4610..98ae9414 100644
--- a/README.md
+++ b/README.md
@@ -2,11 +2,13 @@
[](https://gitter.im/drzoidberg33/plexpy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-A python based web application for monitoring, analytics and notifications for Plex Media Server (www.plex.tv).
+A python based web application for monitoring, analytics and notifications for [Plex Media Server](https://plex.tv).
This project is based on code from [Headphones](https://github.com/rembo10/headphones) and [PlexWatchWeb](https://github.com/ecleese/plexWatchWeb).
-* PlexPy [forum thread](https://forums.plex.tv/discussion/169591/plexpy-another-plex-monitoring-program)
+* [Plex forum thread](https://forums.plex.tv/discussion/169591/plexpy-another-plex-monitoring-program)
+* [Gitter chat](https://gitter.im/drzoidberg33/plexpy)
+* [Discord server](https://discord.gg/011TFFWSuNFI02EKr) | [PlexPy Discord server](https://discord.gg/36ggawe)
## Features
@@ -25,6 +27,12 @@ This project is based on code from [Headphones](https://github.com/rembo10/headp
* Full sync list data on all users syncing items from your library.
* And many more!!
+## Preview
+
+* [Full preview gallery on Imgur](https://imgur.com/a/RwQPM)
+
+
+
## Installation and Support
* [Installation Guides](https://github.com/drzoidberg33/plexpy/wiki/Installation) shows you how to install PlexPy.
diff --git a/data/interfaces/default/css/plexpy.css b/data/interfaces/default/css/plexpy.css
index 5ddbff61..76441f73 100644
--- a/data/interfaces/default/css/plexpy.css
+++ b/data/interfaces/default/css/plexpy.css
@@ -598,6 +598,7 @@ a .users-poster-face:hover {
}
.dashboard-instance.hover .bar {
height: 14px;
+ max-width: 100%;
transform-origin: top;
transition: all .2s ease;
border-radius: 0px 0px 3px 3px;
@@ -608,6 +609,7 @@ a .users-poster-face:hover {
}
.dashboard-instance.hover .bufferbar {
height: 14px;
+ max-width: 100%;
transform-origin: top;
transition: all .2s ease;
border-radius: 0px 0px 3px 3px;
@@ -836,6 +838,7 @@ a .users-poster-face:hover {
background-color: #444;
position: absolute;
height: 6px;
+ max-width: 100%;
overflow: hidden;
}
.dashboard-activity-progress .bar {
@@ -853,6 +856,7 @@ a .users-poster-face:hover {
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffbb450', endColorstr='#fff89406', GradientType=0);
position: absolute;
height: 6px;
+ max-width: 100%;
overflow: hidden;
}
.dashboard-activity-metadata-wrapper {
@@ -2706,6 +2710,13 @@ div[id^='media_info_child'] div[id^='media_info_child'] div.dataTables_scrollHea
.dataTables_scrollBody {
-webkit-overflow-scrolling: touch;
}
+.current-activity-row {
+ background-color: rgba(255,255,255,.1) !important;
+}
+.current-activity-row:hover {
+ background-color: rgba(255,255,255,0.125) !important;
+}
+
#search_form {
width: 300px;
padding: 8px 15px;
@@ -3005,8 +3016,10 @@ a:hover .overlay-refresh-image {
a:hover .overlay-refresh-image:hover {
opacity: .9;
}
-#ip_error {
+#ip_error, #isp_error {
color: #aaa;
display: none;
text-align: center;
-}
\ No newline at end of file
+ padding-top: 10px;
+ padding-bottom: 10px;
+}
diff --git a/data/interfaces/default/graphs.html b/data/interfaces/default/graphs.html
index 6c83dfd3..37b85f93 100644
--- a/data/interfaces/default/graphs.html
+++ b/data/interfaces/default/graphs.html
@@ -341,6 +341,14 @@
var music_visible = (${config['music_logging_enable']} == 1 ? true : false);
+
+ function dataSecondsToHours(data) {
+ $.each(data.series, function (i, series) {
+ series.data = $.map(series.data, function (value) {
+ return value / 60 / 60;
+ });
+ });
+ }
function loadGraphsTab1(time_range, yaxis) {
$('#days-selection').show();
@@ -354,18 +362,19 @@
dataType: "json",
success: function(data) {
var dateArray = [];
- for (var i = 0; i < data.categories.length; i++) {
- dateArray.push(moment(data.categories[i], 'YYYY-MM-DD').valueOf());
+ $.each(data.categories, function (i, day) {
+ dateArray.push(moment(day, 'YYYY-MM-DD').valueOf());
// Highlight the weekend
- if ((moment(data.categories[i], 'YYYY-MM-DD').format('ddd') == 'Sat') ||
- (moment(data.categories[i], 'YYYY-MM-DD').format('ddd') == 'Sun')) {
+ if ((moment(day, 'YYYY-MM-DD').format('ddd') == 'Sat') ||
+ (moment(day, 'YYYY-MM-DD').format('ddd') == 'Sun')) {
hc_plays_by_day_options.xAxis.plotBands.push({
from: i-0.5,
to: i+0.5,
color: 'rgba(80,80,80,0.3)'
});
}
- }
+ });
+ if (yaxis === 'duration') { dataSecondsToHours(data); }
hc_plays_by_day_options.yAxis.min = 0;
hc_plays_by_day_options.xAxis.categories = dateArray;
hc_plays_by_day_options.series = data.series;
@@ -380,6 +389,7 @@
data: { time_range: time_range, y_axis: yaxis, user_id: selected_user_id },
dataType: "json",
success: function(data) {
+ if (yaxis === 'duration') { dataSecondsToHours(data); }
hc_plays_by_dayofweek_options.xAxis.categories = data.categories;
hc_plays_by_dayofweek_options.series = data.series;
hc_plays_by_dayofweek_options.series[2].visible = music_visible;
@@ -393,6 +403,7 @@
data: { time_range: time_range, y_axis: yaxis, user_id: selected_user_id },
dataType: "json",
success: function(data) {
+ if (yaxis === 'duration') { dataSecondsToHours(data); }
hc_plays_by_hourofday_options.xAxis.categories = data.categories;
hc_plays_by_hourofday_options.series = data.series;
hc_plays_by_hourofday_options.series[2].visible = music_visible;
@@ -406,6 +417,7 @@
data: { time_range: time_range, y_axis: yaxis, user_id: selected_user_id },
dataType: "json",
success: function(data) {
+ if (yaxis === 'duration') { dataSecondsToHours(data); }
hc_plays_by_platform_options.xAxis.categories = data.categories;
hc_plays_by_platform_options.series = data.series;
hc_plays_by_platform_options.series[2].visible = music_visible;
@@ -419,6 +431,7 @@
data: { time_range: time_range, y_axis: yaxis, user_id: selected_user_id },
dataType: "json",
success: function(data) {
+ if (yaxis === 'duration') { dataSecondsToHours(data); }
hc_plays_by_user_options.xAxis.categories = data.categories;
hc_plays_by_user_options.series = data.series;
hc_plays_by_user_options.series[2].visible = music_visible;
@@ -439,18 +452,19 @@
dataType: "json",
success: function(data) {
var dateArray = [];
- for (var i = 0; i < data.categories.length; i++) {
- dateArray.push(moment(data.categories[i], 'YYYY-MM-DD').valueOf());
+ $.each(data.categories, function (i, day) {
+ dateArray.push(moment(day, 'YYYY-MM-DD').valueOf());
// Highlight the weekend
- if ((moment(data.categories[i], 'YYYY-MM-DD').format('ddd') == 'Sat') ||
- (moment(data.categories[i], 'YYYY-MM-DD').format('ddd') == 'Sun')) {
- hc_plays_by_stream_type_options.xAxis.plotBands.push({
+ if ((moment(day, 'YYYY-MM-DD').format('ddd') == 'Sat') ||
+ (moment(day, 'YYYY-MM-DD').format('ddd') == 'Sun')) {
+ hc_plays_by_day_options.xAxis.plotBands.push({
from: i-0.5,
to: i+0.5,
color: 'rgba(80,80,80,0.3)'
});
}
- }
+ });
+ if (yaxis === 'duration') { dataSecondsToHours(data); }
hc_plays_by_stream_type_options.yAxis.min = 0;
hc_plays_by_stream_type_options.xAxis.categories = dateArray;
hc_plays_by_stream_type_options.series = data.series;
@@ -464,6 +478,7 @@
data: { time_range: time_range, y_axis: yaxis, user_id: selected_user_id },
dataType: "json",
success: function(data) {
+ if (yaxis === 'duration') { dataSecondsToHours(data); }
hc_plays_by_source_resolution_options.xAxis.categories = data.categories;
hc_plays_by_source_resolution_options.series = data.series;
var hc_plays_by_source_resolution = new Highcharts.Chart(hc_plays_by_source_resolution_options);
@@ -476,6 +491,7 @@
data: { time_range: time_range, y_axis: yaxis, user_id: selected_user_id },
dataType: "json",
success: function(data) {
+ if (yaxis === 'duration') { dataSecondsToHours(data); }
hc_plays_by_stream_resolution_options.xAxis.categories = data.categories;
hc_plays_by_stream_resolution_options.series = data.series;
var hc_plays_by_stream_resolution = new Highcharts.Chart(hc_plays_by_stream_resolution_options);
@@ -488,6 +504,7 @@
data: { time_range: time_range, y_axis: yaxis, user_id: selected_user_id },
dataType: "json",
success: function(data) {
+ if (yaxis === 'duration') { dataSecondsToHours(data); }
hc_plays_by_platform_by_stream_type_options.xAxis.categories = data.categories;
hc_plays_by_platform_by_stream_type_options.series = data.series;
var hc_plays_by_platform_by_stream_type = new Highcharts.Chart(hc_plays_by_platform_by_stream_type_options);
@@ -500,6 +517,7 @@
data: { time_range: time_range, y_axis: yaxis, user_id: selected_user_id },
dataType: "json",
success: function(data) {
+ if (yaxis === 'duration') { dataSecondsToHours(data); }
hc_plays_by_user_by_stream_type_options.xAxis.categories = data.categories;
hc_plays_by_user_by_stream_type_options.series = data.series;
var hc_plays_by_user_by_stream_type = new Highcharts.Chart(hc_plays_by_user_by_stream_type_options);
@@ -518,6 +536,7 @@
data: { y_axis: yaxis, user_id: selected_user_id },
dataType: "json",
success: function(data) {
+ if (yaxis === 'duration') { dataSecondsToHours(data); }
hc_plays_by_month_options.yAxis.min = 0;
hc_plays_by_month_options.xAxis.categories = data.categories;
hc_plays_by_month_options.series = data.series;
@@ -610,56 +629,55 @@
if (type === 'plays') {
yaxis_format = function() { return this.value; };
tooltip_format = function() {
- if (moment(this.x, 'X').isValid() && (this.x > 946684800)) {
- var s = ''+ moment(this.x).format("ddd MMM D") +'';
- } else {
- var s = ''+ this.x +'';
- }
- if (this.points.length > 1) {
- var total = 0;
- $.each(this.points, function(i, point) {
- s += ' '+point.series.name+': '+point.y;
- total += point.y;
- });
- s += ' Total: '+total+'';
- } else {
- $.each(this.points, function(i, point) {
- s += ' '+point.series.name+': '+point.y;
- });
- }
- return s;
- }
+ if (moment(this.x, 'X').isValid() && (this.x > 946684800)) {
+ var s = ''+ moment(this.x).format('ddd MMM D') +'';
+ } else {
+ var s = ''+ this.x +'';
+ }
+ if (this.points.length > 1) {
+ var total = 0;
+ $.each(this.points, function(i, point) {
+ s += ' '+point.series.name+': '+point.y;
+ total += point.y;
+ });
+ s += ' Total: '+total+'';
+ } else {
+ $.each(this.points, function(i, point) {
+ s += ' '+point.series.name+': '+point.y;
+ });
+ }
+ return s;
+ }
stack_labels_format = function() {
- return this.total;
- }
-
+ return this.total;
+ }
$('.yaxis-text').html('Play count');
} else {
- yaxis_format = function() { return moment.duration(this.value, 'seconds').format("H [h] m [m]"); };
+ yaxis_format = function() { return moment.duration(this.value, 'hours').format('H [h] m [m]'); };
tooltip_format = function() {
- if (moment(this.x, 'X').isValid() && (this.x > 946684800)) {
- var s = ''+ moment(this.x).format("ddd MMM D") +'';
- } else {
- var s = ''+ this.x +'';
- }
- if (this.points.length > 1) {
- var total = 0;
- $.each(this.points, function(i, point) {
- s += ' '+point.series.name+': '+moment.duration(point.y, 'seconds').format('D [days] H [hrs] m [mins]');
- total += point.y;
- });
- s += ' Total: '+moment.duration(total, 'seconds').format('D [days] H [hrs] m [mins]')+'';
- } else {
- $.each(this.points, function(i, point) {
- s += ' '+point.series.name+': '+moment.duration(point.y, 'seconds').format('D [days] H [hrs] m [mins]');
- });
- }
- return s;
- }
+ if (moment(this.x, 'X').isValid() && (this.x > 946684800)) {
+ var s = ''+ moment(this.x).format('ddd MMM D') +'';
+ } else {
+ var s = ''+ this.x +'';
+ }
+ if (this.points.length > 1) {
+ var total = 0;
+ $.each(this.points, function(i, point) {
+ s += ' '+point.series.name+': '+moment.duration(point.y, 'hours').format('D [days] H [hrs] m [mins]');
+ total += point.y;
+ });
+ s += ' Total: '+moment.duration(total, 'hours').format('D [days] H [hrs] m [mins]')+'';
+ } else {
+ $.each(this.points, function(i, point) {
+ s += ' '+point.series.name+': '+moment.duration(point.y, 'hours').format('D [days] H [hrs] m [mins]');
+ });
+ }
+ return s;
+ }
stack_labels_format = function() {
- var s = moment.duration(this.total, 'seconds').format("H [hrs] m [mins]");
- return s;
- }
+ var s = moment.duration(this.total, 'hours').format('H [h] m [m]');
+ return s;
+ }
$('.yaxis-text').html('Play duration');
}
diff --git a/data/interfaces/default/ip_address_modal.html b/data/interfaces/default/ip_address_modal.html
index 5b80b13c..cb407305 100644
--- a/data/interfaces/default/ip_address_modal.html
+++ b/data/interfaces/default/ip_address_modal.html
@@ -13,10 +13,10 @@
-
-
Location Details
+
Location Details
+
Continent:
@@ -34,6 +34,21 @@
Accuracy Radius:
+
+
Connection Details
+
+
+
+
+
Host:
+
+
+
+
+
ISP:
+
Address:
+
+
+
+
+
Include current activity in the history tables. Statistics will not be counted until the stream has ended.
+
Backup
@@ -549,6 +555,7 @@
+
@@ -2681,6 +2688,7 @@ $(document).ready(function() {
var plexpass = update_params.plexpass;
var platform = update_params.pms_platform;
var update_channel = update_params.pms_update_channel;
+ var update_distro = update_params.pms_update_distro;
var update_distro_build = update_params.pms_update_distro_build;
$("#pms_update_channel option[value='plexpass']").remove();
@@ -2699,18 +2707,26 @@ $(document).ready(function() {
$("#pms_update_distro_build option").remove();
$.each(platform_downloads.releases, function (index, item) {
var label = (platform_downloads.releases.length == 1) ? platform_downloads.name : platform_downloads.name + ' - ' + item.label;
- var selected = (item.build == update_distro_build) ? true : false;
+ var selected = (item.distro == update_distro && item.build == update_distro_build) ? true : false;
$('#pms_update_distro_build')
.append($('')
.text(label)
.val(item.build)
+ .attr('data-distro', item.distro)
.prop('selected', selected));
})
+ $('#pms_update_distro').val($("#pms_update_distro_build option:selected").data('distro'))
}
});
});
}
loadUpdateDistros();
+
+
+ $('#pms_update_distro_build').change(function () {
+ var distro = $("option:selected", this).data('distro')
+ $('#pms_update_distro').val(distro)
+ });
});
%def>
diff --git a/data/interfaces/default/welcome.html b/data/interfaces/default/welcome.html
index 429bed6b..66e76d50 100644
--- a/data/interfaces/default/welcome.html
+++ b/data/interfaces/default/welcome.html
@@ -428,7 +428,7 @@
complete: function (xhr, status) {
var authToken = $.parseJSON(xhr.responseText);
if (authToken) {
- $("#pms-token-status").html(' Authentation successful!');
+ $("#pms-token-status").html(' Authentication successful!');
$('#pms-token-status').fadeIn('fast');
$("#pms_token").val(authToken);
authenticated = true;
diff --git a/lib/dns/__init__.py b/lib/dns/__init__.py
new file mode 100644
index 00000000..c848e485
--- /dev/null
+++ b/lib/dns/__init__.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2003-2007, 2009, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""dnspython DNS toolkit"""
+
+__all__ = [
+ 'dnssec',
+ 'e164',
+ 'edns',
+ 'entropy',
+ 'exception',
+ 'flags',
+ 'hash',
+ 'inet',
+ 'ipv4',
+ 'ipv6',
+ 'message',
+ 'name',
+ 'namedict',
+ 'node',
+ 'opcode',
+ 'query',
+ 'rcode',
+ 'rdata',
+ 'rdataclass',
+ 'rdataset',
+ 'rdatatype',
+ 'renderer',
+ 'resolver',
+ 'reversename',
+ 'rrset',
+ 'set',
+ 'tokenizer',
+ 'tsig',
+ 'tsigkeyring',
+ 'ttl',
+ 'rdtypes',
+ 'update',
+ 'version',
+ 'wiredata',
+ 'zone',
+]
diff --git a/lib/dns/_compat.py b/lib/dns/_compat.py
new file mode 100644
index 00000000..cffe4bb9
--- /dev/null
+++ b/lib/dns/_compat.py
@@ -0,0 +1,21 @@
+import sys
+
+
+if sys.version_info > (3,):
+ long = int
+ xrange = range
+else:
+ long = long
+ xrange = xrange
+
+# unicode / binary types
+if sys.version_info > (3,):
+ text_type = str
+ binary_type = bytes
+ string_types = (str,)
+ unichr = chr
+else:
+ text_type = unicode
+ binary_type = str
+ string_types = (basestring,)
+ unichr = unichr
diff --git a/lib/dns/dnssec.py b/lib/dns/dnssec.py
new file mode 100644
index 00000000..fec12082
--- /dev/null
+++ b/lib/dns/dnssec.py
@@ -0,0 +1,457 @@
+# Copyright (C) 2003-2007, 2009, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Common DNSSEC-related functions and constants."""
+
+from io import BytesIO
+import struct
+import time
+
+import dns.exception
+import dns.hash
+import dns.name
+import dns.node
+import dns.rdataset
+import dns.rdata
+import dns.rdatatype
+import dns.rdataclass
+from ._compat import string_types
+
+
+class UnsupportedAlgorithm(dns.exception.DNSException):
+
+ """The DNSSEC algorithm is not supported."""
+
+
+class ValidationFailure(dns.exception.DNSException):
+
+ """The DNSSEC signature is invalid."""
+
+RSAMD5 = 1
+DH = 2
+DSA = 3
+ECC = 4
+RSASHA1 = 5
+DSANSEC3SHA1 = 6
+RSASHA1NSEC3SHA1 = 7
+RSASHA256 = 8
+RSASHA512 = 10
+ECDSAP256SHA256 = 13
+ECDSAP384SHA384 = 14
+INDIRECT = 252
+PRIVATEDNS = 253
+PRIVATEOID = 254
+
+_algorithm_by_text = {
+ 'RSAMD5': RSAMD5,
+ 'DH': DH,
+ 'DSA': DSA,
+ 'ECC': ECC,
+ 'RSASHA1': RSASHA1,
+ 'DSANSEC3SHA1': DSANSEC3SHA1,
+ 'RSASHA1NSEC3SHA1': RSASHA1NSEC3SHA1,
+ 'RSASHA256': RSASHA256,
+ 'RSASHA512': RSASHA512,
+ 'INDIRECT': INDIRECT,
+ 'ECDSAP256SHA256': ECDSAP256SHA256,
+ 'ECDSAP384SHA384': ECDSAP384SHA384,
+ 'PRIVATEDNS': PRIVATEDNS,
+ 'PRIVATEOID': PRIVATEOID,
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_algorithm_by_value = dict((y, x) for x, y in _algorithm_by_text.items())
+
+
+def algorithm_from_text(text):
+ """Convert text into a DNSSEC algorithm value
+ @rtype: int"""
+
+ value = _algorithm_by_text.get(text.upper())
+ if value is None:
+ value = int(text)
+ return value
+
+
+def algorithm_to_text(value):
+ """Convert a DNSSEC algorithm value to text
+ @rtype: string"""
+
+ text = _algorithm_by_value.get(value)
+ if text is None:
+ text = str(value)
+ return text
+
+
+def _to_rdata(record, origin):
+ s = BytesIO()
+ record.to_wire(s, origin=origin)
+ return s.getvalue()
+
+
+def key_id(key, origin=None):
+ rdata = _to_rdata(key, origin)
+ rdata = bytearray(rdata)
+ if key.algorithm == RSAMD5:
+ return (rdata[-3] << 8) + rdata[-2]
+ else:
+ total = 0
+ for i in range(len(rdata) // 2):
+ total += (rdata[2 * i] << 8) + \
+ rdata[2 * i + 1]
+ if len(rdata) % 2 != 0:
+ total += rdata[len(rdata) - 1] << 8
+ total += ((total >> 16) & 0xffff)
+ return total & 0xffff
+
+
+def make_ds(name, key, algorithm, origin=None):
+ if algorithm.upper() == 'SHA1':
+ dsalg = 1
+ hash = dns.hash.hashes['SHA1']()
+ elif algorithm.upper() == 'SHA256':
+ dsalg = 2
+ hash = dns.hash.hashes['SHA256']()
+ else:
+ raise UnsupportedAlgorithm('unsupported algorithm "%s"' % algorithm)
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, origin)
+ hash.update(name.canonicalize().to_wire())
+ hash.update(_to_rdata(key, origin))
+ digest = hash.digest()
+
+ dsrdata = struct.pack("!HBB", key_id(key), key.algorithm, dsalg) + digest
+ return dns.rdata.from_wire(dns.rdataclass.IN, dns.rdatatype.DS, dsrdata, 0,
+ len(dsrdata))
+
+
+def _find_candidate_keys(keys, rrsig):
+ candidate_keys = []
+ value = keys.get(rrsig.signer)
+ if value is None:
+ return None
+ if isinstance(value, dns.node.Node):
+ try:
+ rdataset = value.find_rdataset(dns.rdataclass.IN,
+ dns.rdatatype.DNSKEY)
+ except KeyError:
+ return None
+ else:
+ rdataset = value
+ for rdata in rdataset:
+ if rdata.algorithm == rrsig.algorithm and \
+ key_id(rdata) == rrsig.key_tag:
+ candidate_keys.append(rdata)
+ return candidate_keys
+
+
+def _is_rsa(algorithm):
+ return algorithm in (RSAMD5, RSASHA1,
+ RSASHA1NSEC3SHA1, RSASHA256,
+ RSASHA512)
+
+
+def _is_dsa(algorithm):
+ return algorithm in (DSA, DSANSEC3SHA1)
+
+
+def _is_ecdsa(algorithm):
+ return _have_ecdsa and (algorithm in (ECDSAP256SHA256, ECDSAP384SHA384))
+
+
+def _is_md5(algorithm):
+ return algorithm == RSAMD5
+
+
+def _is_sha1(algorithm):
+ return algorithm in (DSA, RSASHA1,
+ DSANSEC3SHA1, RSASHA1NSEC3SHA1)
+
+
+def _is_sha256(algorithm):
+ return algorithm in (RSASHA256, ECDSAP256SHA256)
+
+
+def _is_sha384(algorithm):
+ return algorithm == ECDSAP384SHA384
+
+
+def _is_sha512(algorithm):
+ return algorithm == RSASHA512
+
+
+def _make_hash(algorithm):
+ if _is_md5(algorithm):
+ return dns.hash.hashes['MD5']()
+ if _is_sha1(algorithm):
+ return dns.hash.hashes['SHA1']()
+ if _is_sha256(algorithm):
+ return dns.hash.hashes['SHA256']()
+ if _is_sha384(algorithm):
+ return dns.hash.hashes['SHA384']()
+ if _is_sha512(algorithm):
+ return dns.hash.hashes['SHA512']()
+ raise ValidationFailure('unknown hash for algorithm %u' % algorithm)
+
+
+def _make_algorithm_id(algorithm):
+ if _is_md5(algorithm):
+ oid = [0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05]
+ elif _is_sha1(algorithm):
+ oid = [0x2b, 0x0e, 0x03, 0x02, 0x1a]
+ elif _is_sha256(algorithm):
+ oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01]
+ elif _is_sha512(algorithm):
+ oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03]
+ else:
+ raise ValidationFailure('unknown algorithm %u' % algorithm)
+ olen = len(oid)
+ dlen = _make_hash(algorithm).digest_size
+ idbytes = [0x30] + [8 + olen + dlen] + \
+ [0x30, olen + 4] + [0x06, olen] + oid + \
+ [0x05, 0x00] + [0x04, dlen]
+ return struct.pack('!%dB' % len(idbytes), *idbytes)
+
+
+def _validate_rrsig(rrset, rrsig, keys, origin=None, now=None):
+ """Validate an RRset against a single signature rdata
+
+ The owner name of the rrsig is assumed to be the same as the owner name
+ of the rrset.
+
+ @param rrset: The RRset to validate
+ @type rrset: dns.rrset.RRset or (dns.name.Name, dns.rdataset.Rdataset)
+ tuple
+ @param rrsig: The signature rdata
+ @type rrsig: dns.rrset.Rdata
+ @param keys: The key dictionary.
+ @type keys: a dictionary keyed by dns.name.Name with node or rdataset
+ values
+ @param origin: The origin to use for relative names
+ @type origin: dns.name.Name or None
+ @param now: The time to use when validating the signatures. The default
+ is the current time.
+ @type now: int
+ """
+
+ if isinstance(origin, string_types):
+ origin = dns.name.from_text(origin, dns.name.root)
+
+ for candidate_key in _find_candidate_keys(keys, rrsig):
+ if not candidate_key:
+ raise ValidationFailure('unknown key')
+
+ # For convenience, allow the rrset to be specified as a (name,
+ # rdataset) tuple as well as a proper rrset
+ if isinstance(rrset, tuple):
+ rrname = rrset[0]
+ rdataset = rrset[1]
+ else:
+ rrname = rrset.name
+ rdataset = rrset
+
+ if now is None:
+ now = time.time()
+ if rrsig.expiration < now:
+ raise ValidationFailure('expired')
+ if rrsig.inception > now:
+ raise ValidationFailure('not yet valid')
+
+ hash = _make_hash(rrsig.algorithm)
+
+ if _is_rsa(rrsig.algorithm):
+ keyptr = candidate_key.key
+ (bytes_,) = struct.unpack('!B', keyptr[0:1])
+ keyptr = keyptr[1:]
+ if bytes_ == 0:
+ (bytes_,) = struct.unpack('!H', keyptr[0:2])
+ keyptr = keyptr[2:]
+ rsa_e = keyptr[0:bytes_]
+ rsa_n = keyptr[bytes_:]
+ keylen = len(rsa_n) * 8
+ pubkey = Crypto.PublicKey.RSA.construct(
+ (Crypto.Util.number.bytes_to_long(rsa_n),
+ Crypto.Util.number.bytes_to_long(rsa_e)))
+ sig = (Crypto.Util.number.bytes_to_long(rrsig.signature),)
+ elif _is_dsa(rrsig.algorithm):
+ keyptr = candidate_key.key
+ (t,) = struct.unpack('!B', keyptr[0:1])
+ keyptr = keyptr[1:]
+ octets = 64 + t * 8
+ dsa_q = keyptr[0:20]
+ keyptr = keyptr[20:]
+ dsa_p = keyptr[0:octets]
+ keyptr = keyptr[octets:]
+ dsa_g = keyptr[0:octets]
+ keyptr = keyptr[octets:]
+ dsa_y = keyptr[0:octets]
+ pubkey = Crypto.PublicKey.DSA.construct(
+ (Crypto.Util.number.bytes_to_long(dsa_y),
+ Crypto.Util.number.bytes_to_long(dsa_g),
+ Crypto.Util.number.bytes_to_long(dsa_p),
+ Crypto.Util.number.bytes_to_long(dsa_q)))
+ (dsa_r, dsa_s) = struct.unpack('!20s20s', rrsig.signature[1:])
+ sig = (Crypto.Util.number.bytes_to_long(dsa_r),
+ Crypto.Util.number.bytes_to_long(dsa_s))
+ elif _is_ecdsa(rrsig.algorithm):
+ if rrsig.algorithm == ECDSAP256SHA256:
+ curve = ecdsa.curves.NIST256p
+ key_len = 32
+ elif rrsig.algorithm == ECDSAP384SHA384:
+ curve = ecdsa.curves.NIST384p
+ key_len = 48
+ else:
+ # shouldn't happen
+ raise ValidationFailure('unknown ECDSA curve')
+ keyptr = candidate_key.key
+ x = Crypto.Util.number.bytes_to_long(keyptr[0:key_len])
+ y = Crypto.Util.number.bytes_to_long(keyptr[key_len:key_len * 2])
+ assert ecdsa.ecdsa.point_is_valid(curve.generator, x, y)
+ point = ecdsa.ellipticcurve.Point(curve.curve, x, y, curve.order)
+ verifying_key = ecdsa.keys.VerifyingKey.from_public_point(point,
+ curve)
+ pubkey = ECKeyWrapper(verifying_key, key_len)
+ r = rrsig.signature[:key_len]
+ s = rrsig.signature[key_len:]
+ sig = ecdsa.ecdsa.Signature(Crypto.Util.number.bytes_to_long(r),
+ Crypto.Util.number.bytes_to_long(s))
+ else:
+ raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm)
+
+ hash.update(_to_rdata(rrsig, origin)[:18])
+ hash.update(rrsig.signer.to_digestable(origin))
+
+ if rrsig.labels < len(rrname) - 1:
+ suffix = rrname.split(rrsig.labels + 1)[1]
+ rrname = dns.name.from_text('*', suffix)
+ rrnamebuf = rrname.to_digestable(origin)
+ rrfixed = struct.pack('!HHI', rdataset.rdtype, rdataset.rdclass,
+ rrsig.original_ttl)
+ rrlist = sorted(rdataset)
+ for rr in rrlist:
+ hash.update(rrnamebuf)
+ hash.update(rrfixed)
+ rrdata = rr.to_digestable(origin)
+ rrlen = struct.pack('!H', len(rrdata))
+ hash.update(rrlen)
+ hash.update(rrdata)
+
+ digest = hash.digest()
+
+ if _is_rsa(rrsig.algorithm):
+ # PKCS1 algorithm identifier goop
+ digest = _make_algorithm_id(rrsig.algorithm) + digest
+ padlen = keylen // 8 - len(digest) - 3
+ digest = struct.pack('!%dB' % (2 + padlen + 1),
+ *([0, 1] + [0xFF] * padlen + [0])) + digest
+ elif _is_dsa(rrsig.algorithm) or _is_ecdsa(rrsig.algorithm):
+ pass
+ else:
+ # Raise here for code clarity; this won't actually ever happen
+ # since if the algorithm is really unknown we'd already have
+ # raised an exception above
+ raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm)
+
+ if pubkey.verify(digest, sig):
+ return
+ raise ValidationFailure('verify failure')
+
+
+def _validate(rrset, rrsigset, keys, origin=None, now=None):
+ """Validate an RRset
+
+ @param rrset: The RRset to validate
+ @type rrset: dns.rrset.RRset or (dns.name.Name, dns.rdataset.Rdataset)
+ tuple
+ @param rrsigset: The signature RRset
+ @type rrsigset: dns.rrset.RRset or (dns.name.Name, dns.rdataset.Rdataset)
+ tuple
+ @param keys: The key dictionary.
+ @type keys: a dictionary keyed by dns.name.Name with node or rdataset
+ values
+ @param origin: The origin to use for relative names
+ @type origin: dns.name.Name or None
+ @param now: The time to use when validating the signatures. The default
+ is the current time.
+ @type now: int
+ """
+
+ if isinstance(origin, string_types):
+ origin = dns.name.from_text(origin, dns.name.root)
+
+ if isinstance(rrset, tuple):
+ rrname = rrset[0]
+ else:
+ rrname = rrset.name
+
+ if isinstance(rrsigset, tuple):
+ rrsigname = rrsigset[0]
+ rrsigrdataset = rrsigset[1]
+ else:
+ rrsigname = rrsigset.name
+ rrsigrdataset = rrsigset
+
+ rrname = rrname.choose_relativity(origin)
+ rrsigname = rrname.choose_relativity(origin)
+ if rrname != rrsigname:
+ raise ValidationFailure("owner names do not match")
+
+ for rrsig in rrsigrdataset:
+ try:
+ _validate_rrsig(rrset, rrsig, keys, origin, now)
+ return
+ except ValidationFailure:
+ pass
+ raise ValidationFailure("no RRSIGs validated")
+
+
+def _need_pycrypto(*args, **kwargs):
+ raise NotImplementedError("DNSSEC validation requires pycrypto")
+
+try:
+ import Crypto.PublicKey.RSA
+ import Crypto.PublicKey.DSA
+ import Crypto.Util.number
+ validate = _validate
+ validate_rrsig = _validate_rrsig
+ _have_pycrypto = True
+except ImportError:
+ validate = _need_pycrypto
+ validate_rrsig = _need_pycrypto
+ _have_pycrypto = False
+
+try:
+ import ecdsa
+ import ecdsa.ecdsa
+ import ecdsa.ellipticcurve
+ import ecdsa.keys
+ _have_ecdsa = True
+
+ class ECKeyWrapper(object):
+
+ def __init__(self, key, key_len):
+ self.key = key
+ self.key_len = key_len
+
+ def verify(self, digest, sig):
+ diglong = Crypto.Util.number.bytes_to_long(digest)
+ return self.key.pubkey.verifies(diglong, sig)
+
+except ImportError:
+ _have_ecdsa = False
diff --git a/lib/dns/e164.py b/lib/dns/e164.py
new file mode 100644
index 00000000..2cc911cd
--- /dev/null
+++ b/lib/dns/e164.py
@@ -0,0 +1,84 @@
+# Copyright (C) 2006, 2007, 2009, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS E.164 helpers
+
+@var public_enum_domain: The DNS public ENUM domain, e164.arpa.
+@type public_enum_domain: dns.name.Name object
+"""
+
+
+import dns.exception
+import dns.name
+import dns.resolver
+from ._compat import string_types
+
+public_enum_domain = dns.name.from_text('e164.arpa.')
+
+
+def from_e164(text, origin=public_enum_domain):
+ """Convert an E.164 number in textual form into a Name object whose
+ value is the ENUM domain name for that number.
+ @param text: an E.164 number in textual form.
+ @type text: str
+ @param origin: The domain in which the number should be constructed.
+ The default is e164.arpa.
+ @type origin: dns.name.Name object or None
+ @rtype: dns.name.Name object
+ """
+ parts = [d for d in text if d.isdigit()]
+ parts.reverse()
+ return dns.name.from_text('.'.join(parts), origin=origin)
+
+
+def to_e164(name, origin=public_enum_domain, want_plus_prefix=True):
+ """Convert an ENUM domain name into an E.164 number.
+ @param name: the ENUM domain name.
+ @type name: dns.name.Name object.
+ @param origin: A domain containing the ENUM domain name. The
+ name is relativized to this domain before being converted to text.
+ @type origin: dns.name.Name object or None
+ @param want_plus_prefix: if True, add a '+' to the beginning of the
+ returned number.
+ @rtype: str
+ """
+ if origin is not None:
+ name = name.relativize(origin)
+ dlabels = [d for d in name.labels if (d.isdigit() and len(d) == 1)]
+ if len(dlabels) != len(name.labels):
+ raise dns.exception.SyntaxError('non-digit labels in ENUM domain name')
+ dlabels.reverse()
+ text = b''.join(dlabels)
+ if want_plus_prefix:
+ text = b'+' + text
+ return text
+
+
+def query(number, domains, resolver=None):
+ """Look for NAPTR RRs for the specified number in the specified domains.
+
+ e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.'])
+ """
+ if resolver is None:
+ resolver = dns.resolver.get_default_resolver()
+ for domain in domains:
+ if isinstance(domain, string_types):
+ domain = dns.name.from_text(domain)
+ qname = dns.e164.from_e164(number, domain)
+ try:
+ return resolver.query(qname, 'NAPTR')
+ except dns.resolver.NXDOMAIN:
+ pass
+ raise dns.resolver.NXDOMAIN
diff --git a/lib/dns/edns.py b/lib/dns/edns.py
new file mode 100644
index 00000000..8ac676bc
--- /dev/null
+++ b/lib/dns/edns.py
@@ -0,0 +1,150 @@
+# Copyright (C) 2009, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""EDNS Options"""
+
+NSID = 3
+
+
+class Option(object):
+
+ """Base class for all EDNS option types.
+ """
+
+ def __init__(self, otype):
+ """Initialize an option.
+ @param otype: The rdata type
+ @type otype: int
+ """
+ self.otype = otype
+
+ def to_wire(self, file):
+ """Convert an option to wire format.
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def from_wire(cls, otype, wire, current, olen):
+ """Build an EDNS option object from wire format
+
+ @param otype: The option type
+ @type otype: int
+ @param wire: The wire-format message
+ @type wire: string
+ @param current: The offset in wire of the beginning of the rdata.
+ @type current: int
+ @param olen: The length of the wire-format option data
+ @type olen: int
+ @rtype: dns.edns.Option instance"""
+ raise NotImplementedError
+
+ def _cmp(self, other):
+ """Compare an EDNS option with another option of the same type.
+ Return < 0 if self < other, 0 if self == other,
+ and > 0 if self > other.
+ """
+ raise NotImplementedError
+
+ def __eq__(self, other):
+ if not isinstance(other, Option):
+ return False
+ if self.otype != other.otype:
+ return False
+ return self._cmp(other) == 0
+
+ def __ne__(self, other):
+ if not isinstance(other, Option):
+ return False
+ if self.otype != other.otype:
+ return False
+ return self._cmp(other) != 0
+
+ def __lt__(self, other):
+ if not isinstance(other, Option) or \
+ self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) < 0
+
+ def __le__(self, other):
+ if not isinstance(other, Option) or \
+ self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) <= 0
+
+ def __ge__(self, other):
+ if not isinstance(other, Option) or \
+ self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) >= 0
+
+ def __gt__(self, other):
+ if not isinstance(other, Option) or \
+ self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) > 0
+
+
+class GenericOption(Option):
+
+ """Generate Rdata Class
+
+ This class is used for EDNS option types for which we have no better
+ implementation.
+ """
+
+ def __init__(self, otype, data):
+ super(GenericOption, self).__init__(otype)
+ self.data = data
+
+ def to_wire(self, file):
+ file.write(self.data)
+
+ @classmethod
+ def from_wire(cls, otype, wire, current, olen):
+ return cls(otype, wire[current: current + olen])
+
+ def _cmp(self, other):
+ if self.data == other.data:
+ return 0
+ if self.data > other.data:
+ return 1
+ return -1
+
+_type_to_class = {
+}
+
+
+def get_option_class(otype):
+ cls = _type_to_class.get(otype)
+ if cls is None:
+ cls = GenericOption
+ return cls
+
+
+def option_from_wire(otype, wire, current, olen):
+ """Build an EDNS option object from wire format
+
+ @param otype: The option type
+ @type otype: int
+ @param wire: The wire-format message
+ @type wire: string
+ @param current: The offset in wire of the beginning of the rdata.
+ @type current: int
+ @param olen: The length of the wire-format option data
+ @type olen: int
+ @rtype: dns.edns.Option instance"""
+
+ cls = get_option_class(otype)
+ return cls.from_wire(otype, wire, current, olen)
diff --git a/lib/dns/entropy.py b/lib/dns/entropy.py
new file mode 100644
index 00000000..43841a7a
--- /dev/null
+++ b/lib/dns/entropy.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2009, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import os
+import time
+from ._compat import long, binary_type
+try:
+ import threading as _threading
+except ImportError:
+ import dummy_threading as _threading
+
+
+class EntropyPool(object):
+
+ def __init__(self, seed=None):
+ self.pool_index = 0
+ self.digest = None
+ self.next_byte = 0
+ self.lock = _threading.Lock()
+ try:
+ import hashlib
+ self.hash = hashlib.sha1()
+ self.hash_len = 20
+ except:
+ try:
+ import sha
+ self.hash = sha.new()
+ self.hash_len = 20
+ except:
+ import md5
+ self.hash = md5.new()
+ self.hash_len = 16
+ self.pool = bytearray(b'\0' * self.hash_len)
+ if seed is not None:
+ self.stir(bytearray(seed))
+ self.seeded = True
+ else:
+ self.seeded = False
+
+ def stir(self, entropy, already_locked=False):
+ if not already_locked:
+ self.lock.acquire()
+ try:
+ for c in entropy:
+ if self.pool_index == self.hash_len:
+ self.pool_index = 0
+ b = c & 0xff
+ self.pool[self.pool_index] ^= b
+ self.pool_index += 1
+ finally:
+ if not already_locked:
+ self.lock.release()
+
+ def _maybe_seed(self):
+ if not self.seeded:
+ try:
+ seed = os.urandom(16)
+ except:
+ try:
+ r = open('/dev/urandom', 'rb', 0)
+ try:
+ seed = r.read(16)
+ finally:
+ r.close()
+ except:
+ seed = str(time.time())
+ self.seeded = True
+ seed = bytearray(seed)
+ self.stir(seed, True)
+
+ def random_8(self):
+ self.lock.acquire()
+ try:
+ self._maybe_seed()
+ if self.digest is None or self.next_byte == self.hash_len:
+ self.hash.update(binary_type(self.pool))
+ self.digest = bytearray(self.hash.digest())
+ self.stir(self.digest, True)
+ self.next_byte = 0
+ value = self.digest[self.next_byte]
+ self.next_byte += 1
+ finally:
+ self.lock.release()
+ return value
+
+ def random_16(self):
+ return self.random_8() * 256 + self.random_8()
+
+ def random_32(self):
+ return self.random_16() * 65536 + self.random_16()
+
+ def random_between(self, first, last):
+ size = last - first + 1
+ if size > long(4294967296):
+ raise ValueError('too big')
+ if size > 65536:
+ rand = self.random_32
+ max = long(4294967295)
+ elif size > 256:
+ rand = self.random_16
+ max = 65535
+ else:
+ rand = self.random_8
+ max = 255
+ return (first + size * rand() // (max + 1))
+
+pool = EntropyPool()
+
+
+def random_16():
+ return pool.random_16()
+
+
+def between(first, last):
+ return pool.random_between(first, last)
diff --git a/lib/dns/exception.py b/lib/dns/exception.py
new file mode 100644
index 00000000..62fbe2cb
--- /dev/null
+++ b/lib/dns/exception.py
@@ -0,0 +1,124 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Common DNS Exceptions."""
+
+
+class DNSException(Exception):
+
+ """Abstract base class shared by all dnspython exceptions.
+
+ It supports two basic modes of operation:
+
+ a) Old/compatible mode is used if __init__ was called with
+ empty **kwargs.
+ In compatible mode all *args are passed to standard Python Exception class
+ as before and all *args are printed by standard __str__ implementation.
+ Class variable msg (or doc string if msg is None) is returned from str()
+ if *args is empty.
+
+ b) New/parametrized mode is used if __init__ was called with
+ non-empty **kwargs.
+ In the new mode *args has to be empty and all kwargs has to exactly match
+ set in class variable self.supp_kwargs. All kwargs are stored inside
+ self.kwargs and used in new __str__ implementation to construct
+ formatted message based on self.fmt string.
+
+ In the simplest case it is enough to override supp_kwargs and fmt
+ class variables to get nice parametrized messages.
+ """
+ msg = None # non-parametrized message
+ supp_kwargs = set() # accepted parameters for _fmt_kwargs (sanity check)
+ fmt = None # message parametrized with results from _fmt_kwargs
+
+ def __init__(self, *args, **kwargs):
+ self._check_params(*args, **kwargs)
+ self._check_kwargs(**kwargs)
+ self.kwargs = kwargs
+ if self.msg is None:
+ # doc string is better implicit message than empty string
+ self.msg = self.__doc__
+ if args:
+ super(DNSException, self).__init__(*args)
+ else:
+ super(DNSException, self).__init__(self.msg)
+
+ def _check_params(self, *args, **kwargs):
+ """Old exceptions supported only args and not kwargs.
+
+ For sanity we do not allow to mix old and new behavior."""
+ if args or kwargs:
+ assert bool(args) != bool(kwargs), \
+ 'keyword arguments are mutually exclusive with positional args'
+
+ def _check_kwargs(self, **kwargs):
+ if kwargs:
+ assert set(kwargs.keys()) == self.supp_kwargs, \
+ 'following set of keyword args is required: %s' % (
+ self.supp_kwargs)
+
+ def _fmt_kwargs(self, **kwargs):
+ """Format kwargs before printing them.
+
+ Resulting dictionary has to have keys necessary for str.format call
+ on fmt class variable.
+ """
+ fmtargs = {}
+ for kw, data in kwargs.items():
+ if isinstance(data, (list, set)):
+ # convert list of to list of str()
+ fmtargs[kw] = list(map(str, data))
+ if len(fmtargs[kw]) == 1:
+ # remove list brackets [] from single-item lists
+ fmtargs[kw] = fmtargs[kw].pop()
+ else:
+ fmtargs[kw] = data
+ return fmtargs
+
+ def __str__(self):
+ if self.kwargs and self.fmt:
+ # provide custom message constructed from keyword arguments
+ fmtargs = self._fmt_kwargs(**self.kwargs)
+ return self.fmt.format(**fmtargs)
+ else:
+ # print *args directly in the same way as old DNSException
+ return super(DNSException, self).__str__()
+
+
+class FormError(DNSException):
+
+ """DNS message is malformed."""
+
+
+class SyntaxError(DNSException):
+
+ """Text input is malformed."""
+
+
+class UnexpectedEnd(SyntaxError):
+
+ """Text input ended unexpectedly."""
+
+
+class TooBig(DNSException):
+
+ """The DNS message is too big."""
+
+
+class Timeout(DNSException):
+
+ """The DNS operation timed out."""
+ supp_kwargs = set(['timeout'])
+ fmt = "The DNS operation timed out after {timeout} seconds"
diff --git a/lib/dns/flags.py b/lib/dns/flags.py
new file mode 100644
index 00000000..388d6aaa
--- /dev/null
+++ b/lib/dns/flags.py
@@ -0,0 +1,112 @@
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Message Flags."""
+
+# Standard DNS flags
+
+QR = 0x8000
+AA = 0x0400
+TC = 0x0200
+RD = 0x0100
+RA = 0x0080
+AD = 0x0020
+CD = 0x0010
+
+# EDNS flags
+
+DO = 0x8000
+
+_by_text = {
+ 'QR': QR,
+ 'AA': AA,
+ 'TC': TC,
+ 'RD': RD,
+ 'RA': RA,
+ 'AD': AD,
+ 'CD': CD
+}
+
+_edns_by_text = {
+ 'DO': DO
+}
+
+
+# We construct the inverse mappings programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mappings not to be true inverses.
+
+_by_value = dict((y, x) for x, y in _by_text.items())
+
+_edns_by_value = dict((y, x) for x, y in _edns_by_text.items())
+
+
+def _order_flags(table):
+ order = list(table.items())
+ order.sort()
+ order.reverse()
+ return order
+
+_flags_order = _order_flags(_by_value)
+
+_edns_flags_order = _order_flags(_edns_by_value)
+
+
+def _from_text(text, table):
+ flags = 0
+ tokens = text.split()
+ for t in tokens:
+ flags = flags | table[t.upper()]
+ return flags
+
+
+def _to_text(flags, table, order):
+ text_flags = []
+ for k, v in order:
+ if flags & k != 0:
+ text_flags.append(v)
+ return ' '.join(text_flags)
+
+
+def from_text(text):
+ """Convert a space-separated list of flag text values into a flags
+ value.
+ @rtype: int"""
+
+ return _from_text(text, _by_text)
+
+
+def to_text(flags):
+ """Convert a flags value into a space-separated list of flag text
+ values.
+ @rtype: string"""
+
+ return _to_text(flags, _by_value, _flags_order)
+
+
+def edns_from_text(text):
+ """Convert a space-separated list of EDNS flag text values into a EDNS
+ flags value.
+ @rtype: int"""
+
+ return _from_text(text, _edns_by_text)
+
+
+def edns_to_text(flags):
+ """Convert an EDNS flags value into a space-separated list of EDNS flag
+ text values.
+ @rtype: string"""
+
+ return _to_text(flags, _edns_by_value, _edns_flags_order)
diff --git a/lib/dns/grange.py b/lib/dns/grange.py
new file mode 100644
index 00000000..01a3257b
--- /dev/null
+++ b/lib/dns/grange.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS GENERATE range conversion."""
+
+import dns
+
+
+def from_text(text):
+ """Convert the text form of a range in a GENERATE statement to an
+ integer.
+
+ @param text: the textual range
+ @type text: string
+ @return: The start, stop and step values.
+ @rtype: tuple
+ """
+ # TODO, figure out the bounds on start, stop and step.
+
+ step = 1
+ cur = ''
+ state = 0
+ # state 0 1 2 3 4
+ # x - y / z
+ for c in text:
+ if c == '-' and state == 0:
+ start = int(cur)
+ cur = ''
+ state = 2
+ elif c == '/':
+ stop = int(cur)
+ cur = ''
+ state = 4
+ elif c.isdigit():
+ cur += c
+ else:
+ raise dns.exception.SyntaxError("Could not parse %s" % (c))
+
+ if state in (1, 3):
+ raise dns.exception.SyntaxError
+
+ if state == 2:
+ stop = int(cur)
+
+ if state == 4:
+ step = int(cur)
+
+ assert step >= 1
+ assert start >= 0
+ assert start <= stop
+ # TODO, can start == stop?
+
+ return (start, stop, step)
diff --git a/lib/dns/hash.py b/lib/dns/hash.py
new file mode 100644
index 00000000..27f7a7e2
--- /dev/null
+++ b/lib/dns/hash.py
@@ -0,0 +1,32 @@
+# Copyright (C) 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Hashing backwards compatibility wrapper"""
+
+import sys
+import hashlib
+
+
+hashes = {}
+hashes['MD5'] = hashlib.md5
+hashes['SHA1'] = hashlib.sha1
+hashes['SHA224'] = hashlib.sha224
+hashes['SHA256'] = hashlib.sha256
+hashes['SHA384'] = hashlib.sha384
+hashes['SHA512'] = hashlib.sha512
+
+
+def get(algorithm):
+ return hashes[algorithm.upper()]
diff --git a/lib/dns/inet.py b/lib/dns/inet.py
new file mode 100644
index 00000000..966285e7
--- /dev/null
+++ b/lib/dns/inet.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Generic Internet address helper functions."""
+
+import socket
+
+import dns.ipv4
+import dns.ipv6
+
+
+# We assume that AF_INET is always defined.
+
+AF_INET = socket.AF_INET
+
+# AF_INET6 might not be defined in the socket module, but we need it.
+# We'll try to use the socket module's value, and if it doesn't work,
+# we'll use our own value.
+
+try:
+ AF_INET6 = socket.AF_INET6
+except AttributeError:
+ AF_INET6 = 9999
+
+
+def inet_pton(family, text):
+ """Convert the textual form of a network address into its binary form.
+
+ @param family: the address family
+ @type family: int
+ @param text: the textual address
+ @type text: string
+ @raises NotImplementedError: the address family specified is not
+ implemented.
+ @rtype: string
+ """
+
+ if family == AF_INET:
+ return dns.ipv4.inet_aton(text)
+ elif family == AF_INET6:
+ return dns.ipv6.inet_aton(text)
+ else:
+ raise NotImplementedError
+
+
+def inet_ntop(family, address):
+ """Convert the binary form of a network address into its textual form.
+
+ @param family: the address family
+ @type family: int
+ @param address: the binary address
+ @type address: string
+ @raises NotImplementedError: the address family specified is not
+ implemented.
+ @rtype: string
+ """
+ if family == AF_INET:
+ return dns.ipv4.inet_ntoa(address)
+ elif family == AF_INET6:
+ return dns.ipv6.inet_ntoa(address)
+ else:
+ raise NotImplementedError
+
+
+def af_for_address(text):
+ """Determine the address family of a textual-form network address.
+
+ @param text: the textual address
+ @type text: string
+ @raises ValueError: the address family cannot be determined from the input.
+ @rtype: int
+ """
+ try:
+ dns.ipv4.inet_aton(text)
+ return AF_INET
+ except:
+ try:
+ dns.ipv6.inet_aton(text)
+ return AF_INET6
+ except:
+ raise ValueError
+
+
+def is_multicast(text):
+ """Is the textual-form network address a multicast address?
+
+ @param text: the textual address
+ @raises ValueError: the address family cannot be determined from the input.
+ @rtype: bool
+ """
+ try:
+ first = ord(dns.ipv4.inet_aton(text)[0])
+ return (first >= 224 and first <= 239)
+ except:
+ try:
+ first = ord(dns.ipv6.inet_aton(text)[0])
+ return (first == 255)
+ except:
+ raise ValueError
diff --git a/lib/dns/ipv4.py b/lib/dns/ipv4.py
new file mode 100644
index 00000000..3fef282b
--- /dev/null
+++ b/lib/dns/ipv4.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""IPv4 helper functions."""
+
+import struct
+
+import dns.exception
+from ._compat import binary_type
+
+def inet_ntoa(address):
+ """Convert an IPv4 address in network form to text form.
+
+ @param address: The IPv4 address
+ @type address: string
+ @returns: string
+ """
+ if len(address) != 4:
+ raise dns.exception.SyntaxError
+ if not isinstance(address, bytearray):
+ address = bytearray(address)
+ return (u'%u.%u.%u.%u' % (address[0], address[1],
+ address[2], address[3])).encode()
+
+def inet_aton(text):
+ """Convert an IPv4 address in text form to network form.
+
+ @param text: The IPv4 address
+ @type text: string
+ @returns: string
+ """
+ if not isinstance(text, binary_type):
+ text = text.encode()
+ parts = text.split(b'.')
+ if len(parts) != 4:
+ raise dns.exception.SyntaxError
+ for part in parts:
+ if not part.isdigit():
+ raise dns.exception.SyntaxError
+ if len(part) > 1 and part[0] == '0':
+ # No leading zeros
+ raise dns.exception.SyntaxError
+ try:
+ bytes = [int(part) for part in parts]
+ return struct.pack('BBBB', *bytes)
+ except:
+ raise dns.exception.SyntaxError
diff --git a/lib/dns/ipv6.py b/lib/dns/ipv6.py
new file mode 100644
index 00000000..ee991e85
--- /dev/null
+++ b/lib/dns/ipv6.py
@@ -0,0 +1,172 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""IPv6 helper functions."""
+
+import re
+import binascii
+
+import dns.exception
+import dns.ipv4
+from ._compat import xrange, binary_type
+
+_leading_zero = re.compile(b'0+([0-9a-f]+)')
+
+def inet_ntoa(address):
+ """Convert a network format IPv6 address into text.
+
+ @param address: the binary address
+ @type address: string
+ @rtype: string
+ @raises ValueError: the address isn't 16 bytes long
+ """
+
+ if len(address) != 16:
+ raise ValueError("IPv6 addresses are 16 bytes long")
+ hex = binascii.hexlify(address)
+ chunks = []
+ i = 0
+ l = len(hex)
+ while i < l:
+ chunk = hex[i : i + 4]
+ # strip leading zeros. we do this with an re instead of
+ # with lstrip() because lstrip() didn't support chars until
+ # python 2.2.2
+ m = _leading_zero.match(chunk)
+ if not m is None:
+ chunk = m.group(1)
+ chunks.append(chunk)
+ i += 4
+ #
+ # Compress the longest subsequence of 0-value chunks to ::
+ #
+ best_start = 0
+ best_len = 0
+ start = -1
+ last_was_zero = False
+ for i in xrange(8):
+ if chunks[i] != b'0':
+ if last_was_zero:
+ end = i
+ current_len = end - start
+ if current_len > best_len:
+ best_start = start
+ best_len = current_len
+ last_was_zero = False
+ elif not last_was_zero:
+ start = i
+ last_was_zero = True
+ if last_was_zero:
+ end = 8
+ current_len = end - start
+ if current_len > best_len:
+ best_start = start
+ best_len = current_len
+ if best_len > 1:
+ if best_start == 0 and \
+ (best_len == 6 or
+ best_len == 5 and chunks[5] == b'ffff'):
+ # We have an embedded IPv4 address
+ if best_len == 6:
+ prefix = b'::'
+ else:
+ prefix = b'::ffff:'
+ hex = prefix + dns.ipv4.inet_ntoa(address[12:])
+ else:
+ hex = b':'.join(chunks[:best_start]) + b'::' + \
+ b':'.join(chunks[best_start + best_len:])
+ else:
+ hex = b':'.join(chunks)
+ return hex
+
+_v4_ending = re.compile(b'(.*):(\d+\.\d+\.\d+\.\d+)$')
+_colon_colon_start = re.compile(b'::.*')
+_colon_colon_end = re.compile(b'.*::$')
+
+def inet_aton(text):
+ """Convert a text format IPv6 address into network format.
+
+ @param text: the textual address
+ @type text: string
+ @rtype: string
+ @raises dns.exception.SyntaxError: the text was not properly formatted
+ """
+
+ #
+ # Our aim here is not something fast; we just want something that works.
+ #
+ if not isinstance(text, binary_type):
+ text = text.encode()
+
+ if text == b'::':
+ text = b'0::'
+ #
+ # Get rid of the icky dot-quad syntax if we have it.
+ #
+ m = _v4_ending.match(text)
+ if not m is None:
+ b = bytearray(dns.ipv4.inet_aton(m.group(2)))
+ text = (u"%s:%02x%02x:%02x%02x" % (m.group(1).decode(), b[0], b[1],
+ b[2], b[3])).encode()
+ #
+ # Try to turn '::' into ':'; if no match try to
+ # turn '::' into ':'
+ #
+ m = _colon_colon_start.match(text)
+ if not m is None:
+ text = text[1:]
+ else:
+ m = _colon_colon_end.match(text)
+ if not m is None:
+ text = text[:-1]
+ #
+ # Now canonicalize into 8 chunks of 4 hex digits each
+ #
+ chunks = text.split(b':')
+ l = len(chunks)
+ if l > 8:
+ raise dns.exception.SyntaxError
+ seen_empty = False
+ canonical = []
+ for c in chunks:
+ if c == b'':
+ if seen_empty:
+ raise dns.exception.SyntaxError
+ seen_empty = True
+ for i in xrange(0, 8 - l + 1):
+ canonical.append(b'0000')
+ else:
+ lc = len(c)
+ if lc > 4:
+ raise dns.exception.SyntaxError
+ if lc != 4:
+ c = (b'0' * (4 - lc)) + c
+ canonical.append(c)
+ if l < 8 and not seen_empty:
+ raise dns.exception.SyntaxError
+ text = b''.join(canonical)
+
+ #
+ # Finally we can go to binary.
+ #
+ try:
+ return binascii.unhexlify(text)
+ except (binascii.Error, TypeError):
+ raise dns.exception.SyntaxError
+
+_mapped_prefix = b'\x00' * 10 + b'\xff\xff'
+
+def is_mapped(address):
+ return address.startswith(_mapped_prefix)
diff --git a/lib/dns/message.py b/lib/dns/message.py
new file mode 100644
index 00000000..9b8dcd0f
--- /dev/null
+++ b/lib/dns/message.py
@@ -0,0 +1,1153 @@
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Messages"""
+
+from __future__ import absolute_import
+
+from io import StringIO
+import struct
+import sys
+import time
+
+import dns.edns
+import dns.exception
+import dns.flags
+import dns.name
+import dns.opcode
+import dns.entropy
+import dns.rcode
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.rrset
+import dns.renderer
+import dns.tsig
+import dns.wiredata
+
+from ._compat import long, xrange, string_types
+
+
+class ShortHeader(dns.exception.FormError):
+
+ """The DNS packet passed to from_wire() is too short."""
+
+
+class TrailingJunk(dns.exception.FormError):
+
+ """The DNS packet passed to from_wire() has extra junk at the end of it."""
+
+
+class UnknownHeaderField(dns.exception.DNSException):
+
+ """The header field name was not recognized when converting from text
+ into a message."""
+
+
+class BadEDNS(dns.exception.FormError):
+
+ """OPT record occurred somewhere other than the start of
+ the additional data section."""
+
+
+class BadTSIG(dns.exception.FormError):
+
+ """A TSIG record occurred somewhere other than the end of
+ the additional data section."""
+
+
+class UnknownTSIGKey(dns.exception.DNSException):
+
+ """A TSIG with an unknown key was received."""
+
+
+class Message(object):
+
+ """A DNS message.
+
+ @ivar id: The query id; the default is a randomly chosen id.
+ @type id: int
+ @ivar flags: The DNS flags of the message. @see: RFC 1035 for an
+ explanation of these flags.
+ @type flags: int
+ @ivar question: The question section.
+ @type question: list of dns.rrset.RRset objects
+ @ivar answer: The answer section.
+ @type answer: list of dns.rrset.RRset objects
+ @ivar authority: The authority section.
+ @type authority: list of dns.rrset.RRset objects
+ @ivar additional: The additional data section.
+ @type additional: list of dns.rrset.RRset objects
+ @ivar edns: The EDNS level to use. The default is -1, no Edns.
+ @type edns: int
+ @ivar ednsflags: The EDNS flags
+ @type ednsflags: long
+ @ivar payload: The EDNS payload size. The default is 0.
+ @type payload: int
+ @ivar options: The EDNS options
+ @type options: list of dns.edns.Option objects
+ @ivar request_payload: The associated request's EDNS payload size.
+ @type request_payload: int
+ @ivar keyring: The TSIG keyring to use. The default is None.
+ @type keyring: dict
+ @ivar keyname: The TSIG keyname to use. The default is None.
+ @type keyname: dns.name.Name object
+ @ivar keyalgorithm: The TSIG algorithm to use; defaults to
+ dns.tsig.default_algorithm. Constants for TSIG algorithms are defined
+ in dns.tsig, and the currently implemented algorithms are
+ HMAC_MD5, HMAC_SHA1, HMAC_SHA224, HMAC_SHA256, HMAC_SHA384, and
+ HMAC_SHA512.
+ @type keyalgorithm: string
+ @ivar request_mac: The TSIG MAC of the request message associated with
+ this message; used when validating TSIG signatures. @see: RFC 2845 for
+ more information on TSIG fields.
+ @type request_mac: string
+ @ivar fudge: TSIG time fudge; default is 300 seconds.
+ @type fudge: int
+ @ivar original_id: TSIG original id; defaults to the message's id
+ @type original_id: int
+ @ivar tsig_error: TSIG error code; default is 0.
+ @type tsig_error: int
+ @ivar other_data: TSIG other data.
+ @type other_data: string
+ @ivar mac: The TSIG MAC for this message.
+ @type mac: string
+ @ivar xfr: Is the message being used to contain the results of a DNS
+ zone transfer? The default is False.
+ @type xfr: bool
+ @ivar origin: The origin of the zone in messages which are used for
+ zone transfers or for DNS dynamic updates. The default is None.
+ @type origin: dns.name.Name object
+ @ivar tsig_ctx: The TSIG signature context associated with this
+ message. The default is None.
+ @type tsig_ctx: hmac.HMAC object
+ @ivar had_tsig: Did the message decoded from wire format have a TSIG
+ signature?
+ @type had_tsig: bool
+ @ivar multi: Is this message part of a multi-message sequence? The
+ default is false. This variable is used when validating TSIG signatures
+ on messages which are part of a zone transfer.
+ @type multi: bool
+ @ivar first: Is this message standalone, or the first of a multi
+ message sequence? This variable is used when validating TSIG signatures
+ on messages which are part of a zone transfer.
+ @type first: bool
+ @ivar index: An index of rrsets in the message. The index key is
+ (section, name, rdclass, rdtype, covers, deleting). Indexing can be
+ disabled by setting the index to None.
+ @type index: dict
+ """
+
+ def __init__(self, id=None):
+ if id is None:
+ self.id = dns.entropy.random_16()
+ else:
+ self.id = id
+ self.flags = 0
+ self.question = []
+ self.answer = []
+ self.authority = []
+ self.additional = []
+ self.edns = -1
+ self.ednsflags = 0
+ self.payload = 0
+ self.options = []
+ self.request_payload = 0
+ self.keyring = None
+ self.keyname = None
+ self.keyalgorithm = dns.tsig.default_algorithm
+ self.request_mac = ''
+ self.other_data = ''
+ self.tsig_error = 0
+ self.fudge = 300
+ self.original_id = self.id
+ self.mac = ''
+ self.xfr = False
+ self.origin = None
+ self.tsig_ctx = None
+ self.had_tsig = False
+ self.multi = False
+ self.first = True
+ self.index = {}
+
+ def __repr__(self):
+ return ''
+
+ def __str__(self):
+ return self.to_text()
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ """Convert the message to text.
+
+ The I{origin}, I{relativize}, and any other keyword
+ arguments are passed to the rrset to_wire() method.
+
+ @rtype: string
+ """
+
+ s = StringIO()
+ s.write(u'id %d\n' % self.id)
+ s.write(u'opcode %s\n' %
+ dns.opcode.to_text(dns.opcode.from_flags(self.flags)))
+ rc = dns.rcode.from_flags(self.flags, self.ednsflags)
+ s.write(u'rcode %s\n' % dns.rcode.to_text(rc))
+ s.write(u'flags %s\n' % dns.flags.to_text(self.flags))
+ if self.edns >= 0:
+ s.write(u'edns %s\n' % self.edns)
+ if self.ednsflags != 0:
+ s.write(u'eflags %s\n' %
+ dns.flags.edns_to_text(self.ednsflags))
+ s.write(u'payload %d\n' % self.payload)
+ is_update = dns.opcode.is_update(self.flags)
+ if is_update:
+ s.write(u';ZONE\n')
+ else:
+ s.write(u';QUESTION\n')
+ for rrset in self.question:
+ s.write(rrset.to_text(origin, relativize, **kw))
+ s.write(u'\n')
+ if is_update:
+ s.write(u';PREREQ\n')
+ else:
+ s.write(u';ANSWER\n')
+ for rrset in self.answer:
+ s.write(rrset.to_text(origin, relativize, **kw))
+ s.write(u'\n')
+ if is_update:
+ s.write(u';UPDATE\n')
+ else:
+ s.write(u';AUTHORITY\n')
+ for rrset in self.authority:
+ s.write(rrset.to_text(origin, relativize, **kw))
+ s.write(u'\n')
+ s.write(u';ADDITIONAL\n')
+ for rrset in self.additional:
+ s.write(rrset.to_text(origin, relativize, **kw))
+ s.write(u'\n')
+ #
+ # We strip off the final \n so the caller can print the result without
+ # doing weird things to get around eccentricities in Python print
+ # formatting
+ #
+ return s.getvalue()[:-1]
+
+ def __eq__(self, other):
+ """Two messages are equal if they have the same content in the
+ header, question, answer, and authority sections.
+ @rtype: bool"""
+ if not isinstance(other, Message):
+ return False
+ if self.id != other.id:
+ return False
+ if self.flags != other.flags:
+ return False
+ for n in self.question:
+ if n not in other.question:
+ return False
+ for n in other.question:
+ if n not in self.question:
+ return False
+ for n in self.answer:
+ if n not in other.answer:
+ return False
+ for n in other.answer:
+ if n not in self.answer:
+ return False
+ for n in self.authority:
+ if n not in other.authority:
+ return False
+ for n in other.authority:
+ if n not in self.authority:
+ return False
+ return True
+
+ def __ne__(self, other):
+ """Are two messages not equal?
+ @rtype: bool"""
+ return not self.__eq__(other)
+
+ def is_response(self, other):
+ """Is other a response to self?
+ @rtype: bool"""
+ if other.flags & dns.flags.QR == 0 or \
+ self.id != other.id or \
+ dns.opcode.from_flags(self.flags) != \
+ dns.opcode.from_flags(other.flags):
+ return False
+ if dns.rcode.from_flags(other.flags, other.ednsflags) != \
+ dns.rcode.NOERROR:
+ return True
+ if dns.opcode.is_update(self.flags):
+ return True
+ for n in self.question:
+ if n not in other.question:
+ return False
+ for n in other.question:
+ if n not in self.question:
+ return False
+ return True
+
+ def section_number(self, section):
+ if section is self.question:
+ return 0
+ elif section is self.answer:
+ return 1
+ elif section is self.authority:
+ return 2
+ elif section is self.additional:
+ return 3
+ else:
+ raise ValueError('unknown section')
+
+ def find_rrset(self, section, name, rdclass, rdtype,
+ covers=dns.rdatatype.NONE, deleting=None, create=False,
+ force_unique=False):
+ """Find the RRset with the given attributes in the specified section.
+
+ @param section: the section of the message to look in, e.g.
+ self.answer.
+ @type section: list of dns.rrset.RRset objects
+ @param name: the name of the RRset
+ @type name: dns.name.Name object
+ @param rdclass: the class of the RRset
+ @type rdclass: int
+ @param rdtype: the type of the RRset
+ @type rdtype: int
+ @param covers: the covers value of the RRset
+ @type covers: int
+ @param deleting: the deleting value of the RRset
+ @type deleting: int
+ @param create: If True, create the RRset if it is not found.
+ The created RRset is appended to I{section}.
+ @type create: bool
+ @param force_unique: If True and create is also True, create a
+ new RRset regardless of whether a matching RRset exists already.
+ @type force_unique: bool
+ @raises KeyError: the RRset was not found and create was False
+ @rtype: dns.rrset.RRset object"""
+
+ key = (self.section_number(section),
+ name, rdclass, rdtype, covers, deleting)
+ if not force_unique:
+ if self.index is not None:
+ rrset = self.index.get(key)
+ if rrset is not None:
+ return rrset
+ else:
+ for rrset in section:
+ if rrset.match(name, rdclass, rdtype, covers, deleting):
+ return rrset
+ if not create:
+ raise KeyError
+ rrset = dns.rrset.RRset(name, rdclass, rdtype, covers, deleting)
+ section.append(rrset)
+ if self.index is not None:
+ self.index[key] = rrset
+ return rrset
+
+ def get_rrset(self, section, name, rdclass, rdtype,
+ covers=dns.rdatatype.NONE, deleting=None, create=False,
+ force_unique=False):
+ """Get the RRset with the given attributes in the specified section.
+
+ If the RRset is not found, None is returned.
+
+ @param section: the section of the message to look in, e.g.
+ self.answer.
+ @type section: list of dns.rrset.RRset objects
+ @param name: the name of the RRset
+ @type name: dns.name.Name object
+ @param rdclass: the class of the RRset
+ @type rdclass: int
+ @param rdtype: the type of the RRset
+ @type rdtype: int
+ @param covers: the covers value of the RRset
+ @type covers: int
+ @param deleting: the deleting value of the RRset
+ @type deleting: int
+ @param create: If True, create the RRset if it is not found.
+ The created RRset is appended to I{section}.
+ @type create: bool
+ @param force_unique: If True and create is also True, create a
+ new RRset regardless of whether a matching RRset exists already.
+ @type force_unique: bool
+ @rtype: dns.rrset.RRset object or None"""
+
+ try:
+ rrset = self.find_rrset(section, name, rdclass, rdtype, covers,
+ deleting, create, force_unique)
+ except KeyError:
+ rrset = None
+ return rrset
+
+ def to_wire(self, origin=None, max_size=0, **kw):
+ """Return a string containing the message in DNS compressed wire
+ format.
+
+ Additional keyword arguments are passed to the rrset to_wire()
+ method.
+
+ @param origin: The origin to be appended to any relative names.
+ @type origin: dns.name.Name object
+ @param max_size: The maximum size of the wire format output; default
+ is 0, which means 'the message's request payload, if nonzero, or
+ 65536'.
+ @type max_size: int
+ @raises dns.exception.TooBig: max_size was exceeded
+ @rtype: string
+ """
+
+ if max_size == 0:
+ if self.request_payload != 0:
+ max_size = self.request_payload
+ else:
+ max_size = 65535
+ if max_size < 512:
+ max_size = 512
+ elif max_size > 65535:
+ max_size = 65535
+ r = dns.renderer.Renderer(self.id, self.flags, max_size, origin)
+ for rrset in self.question:
+ r.add_question(rrset.name, rrset.rdtype, rrset.rdclass)
+ for rrset in self.answer:
+ r.add_rrset(dns.renderer.ANSWER, rrset, **kw)
+ for rrset in self.authority:
+ r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw)
+ if self.edns >= 0:
+ r.add_edns(self.edns, self.ednsflags, self.payload, self.options)
+ for rrset in self.additional:
+ r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw)
+ r.write_header()
+ if self.keyname is not None:
+ r.add_tsig(self.keyname, self.keyring[self.keyname],
+ self.fudge, self.original_id, self.tsig_error,
+ self.other_data, self.request_mac,
+ self.keyalgorithm)
+ self.mac = r.mac
+ return r.get_wire()
+
+ def use_tsig(self, keyring, keyname=None, fudge=300,
+ original_id=None, tsig_error=0, other_data='',
+ algorithm=dns.tsig.default_algorithm):
+ """When sending, a TSIG signature using the specified keyring
+ and keyname should be added.
+
+ @param keyring: The TSIG keyring to use; defaults to None.
+ @type keyring: dict
+ @param keyname: The name of the TSIG key to use; defaults to None.
+ The key must be defined in the keyring. If a keyring is specified
+ but a keyname is not, then the key used will be the first key in the
+ keyring. Note that the order of keys in a dictionary is not defined,
+ so applications should supply a keyname when a keyring is used, unless
+ they know the keyring contains only one key.
+ @type keyname: dns.name.Name or string
+ @param fudge: TSIG time fudge; default is 300 seconds.
+ @type fudge: int
+ @param original_id: TSIG original id; defaults to the message's id
+ @type original_id: int
+ @param tsig_error: TSIG error code; default is 0.
+ @type tsig_error: int
+ @param other_data: TSIG other data.
+ @type other_data: string
+ @param algorithm: The TSIG algorithm to use; defaults to
+ dns.tsig.default_algorithm
+ """
+
+ self.keyring = keyring
+ if keyname is None:
+ self.keyname = list(self.keyring.keys())[0]
+ else:
+ if isinstance(keyname, string_types):
+ keyname = dns.name.from_text(keyname)
+ self.keyname = keyname
+ self.keyalgorithm = algorithm
+ self.fudge = fudge
+ if original_id is None:
+ self.original_id = self.id
+ else:
+ self.original_id = original_id
+ self.tsig_error = tsig_error
+ self.other_data = other_data
+
+ def use_edns(self, edns=0, ednsflags=0, payload=1280, request_payload=None,
+ options=None):
+ """Configure EDNS behavior.
+ @param edns: The EDNS level to use. Specifying None, False, or -1
+ means 'do not use EDNS', and in this case the other parameters are
+ ignored. Specifying True is equivalent to specifying 0, i.e. 'use
+ EDNS0'.
+ @type edns: int or bool or None
+ @param ednsflags: EDNS flag values.
+ @type ednsflags: int
+ @param payload: The EDNS sender's payload field, which is the maximum
+ size of UDP datagram the sender can handle.
+ @type payload: int
+ @param request_payload: The EDNS payload size to use when sending
+ this message. If not specified, defaults to the value of payload.
+ @type request_payload: int or None
+ @param options: The EDNS options
+ @type options: None or list of dns.edns.Option objects
+ @see: RFC 2671
+ """
+ if edns is None or edns is False:
+ edns = -1
+ if edns is True:
+ edns = 0
+ if request_payload is None:
+ request_payload = payload
+ if edns < 0:
+ ednsflags = 0
+ payload = 0
+ request_payload = 0
+ options = []
+ else:
+ # make sure the EDNS version in ednsflags agrees with edns
+ ednsflags &= long(0xFF00FFFF)
+ ednsflags |= (edns << 16)
+ if options is None:
+ options = []
+ self.edns = edns
+ self.ednsflags = ednsflags
+ self.payload = payload
+ self.options = options
+ self.request_payload = request_payload
+
+ def want_dnssec(self, wanted=True):
+ """Enable or disable 'DNSSEC desired' flag in requests.
+ @param wanted: Is DNSSEC desired? If True, EDNS is enabled if
+ required, and then the DO bit is set. If False, the DO bit is
+ cleared if EDNS is enabled.
+ @type wanted: bool
+ """
+ if wanted:
+ if self.edns < 0:
+ self.use_edns()
+ self.ednsflags |= dns.flags.DO
+ elif self.edns >= 0:
+ self.ednsflags &= ~dns.flags.DO
+
+ def rcode(self):
+ """Return the rcode.
+ @rtype: int
+ """
+ return dns.rcode.from_flags(self.flags, self.ednsflags)
+
+ def set_rcode(self, rcode):
+ """Set the rcode.
+ @param rcode: the rcode
+ @type rcode: int
+ """
+ (value, evalue) = dns.rcode.to_flags(rcode)
+ self.flags &= 0xFFF0
+ self.flags |= value
+ self.ednsflags &= long(0x00FFFFFF)
+ self.ednsflags |= evalue
+ if self.ednsflags != 0 and self.edns < 0:
+ self.edns = 0
+
+ def opcode(self):
+ """Return the opcode.
+ @rtype: int
+ """
+ return dns.opcode.from_flags(self.flags)
+
+ def set_opcode(self, opcode):
+ """Set the opcode.
+ @param opcode: the opcode
+ @type opcode: int
+ """
+ self.flags &= 0x87FF
+ self.flags |= dns.opcode.to_flags(opcode)
+
+
+class _WireReader(object):
+
+ """Wire format reader.
+
+ @ivar wire: the wire-format message.
+ @type wire: string
+ @ivar message: The message object being built
+ @type message: dns.message.Message object
+ @ivar current: When building a message object from wire format, this
+ variable contains the offset from the beginning of wire of the next octet
+ to be read.
+ @type current: int
+ @ivar updating: Is the message a dynamic update?
+ @type updating: bool
+ @ivar one_rr_per_rrset: Put each RR into its own RRset?
+ @type one_rr_per_rrset: bool
+ @ivar ignore_trailing: Ignore trailing junk at end of request?
+ @type ignore_trailing: bool
+ @ivar zone_rdclass: The class of the zone in messages which are
+ DNS dynamic updates.
+ @type zone_rdclass: int
+ """
+
+ def __init__(self, wire, message, question_only=False,
+ one_rr_per_rrset=False, ignore_trailing=False):
+ self.wire = dns.wiredata.maybe_wrap(wire)
+ self.message = message
+ self.current = 0
+ self.updating = False
+ self.zone_rdclass = dns.rdataclass.IN
+ self.question_only = question_only
+ self.one_rr_per_rrset = one_rr_per_rrset
+ self.ignore_trailing = ignore_trailing
+
+ def _get_question(self, qcount):
+ """Read the next I{qcount} records from the wire data and add them to
+ the question section.
+ @param qcount: the number of questions in the message
+ @type qcount: int"""
+
+ if self.updating and qcount > 1:
+ raise dns.exception.FormError
+
+ for i in xrange(0, qcount):
+ (qname, used) = dns.name.from_wire(self.wire, self.current)
+ if self.message.origin is not None:
+ qname = qname.relativize(self.message.origin)
+ self.current = self.current + used
+ (rdtype, rdclass) = \
+ struct.unpack('!HH',
+ self.wire[self.current:self.current + 4])
+ self.current = self.current + 4
+ self.message.find_rrset(self.message.question, qname,
+ rdclass, rdtype, create=True,
+ force_unique=True)
+ if self.updating:
+ self.zone_rdclass = rdclass
+
+ def _get_section(self, section, count):
+ """Read the next I{count} records from the wire data and add them to
+ the specified section.
+ @param section: the section of the message to which to add records
+ @type section: list of dns.rrset.RRset objects
+ @param count: the number of records to read
+ @type count: int"""
+
+ if self.updating or self.one_rr_per_rrset:
+ force_unique = True
+ else:
+ force_unique = False
+ seen_opt = False
+ for i in xrange(0, count):
+ rr_start = self.current
+ (name, used) = dns.name.from_wire(self.wire, self.current)
+ absolute_name = name
+ if self.message.origin is not None:
+ name = name.relativize(self.message.origin)
+ self.current = self.current + used
+ (rdtype, rdclass, ttl, rdlen) = \
+ struct.unpack('!HHIH',
+ self.wire[self.current:self.current + 10])
+ self.current = self.current + 10
+ if rdtype == dns.rdatatype.OPT:
+ if section is not self.message.additional or seen_opt:
+ raise BadEDNS
+ self.message.payload = rdclass
+ self.message.ednsflags = ttl
+ self.message.edns = (ttl & 0xff0000) >> 16
+ self.message.options = []
+ current = self.current
+ optslen = rdlen
+ while optslen > 0:
+ (otype, olen) = \
+ struct.unpack('!HH',
+ self.wire[current:current + 4])
+ current = current + 4
+ opt = dns.edns.option_from_wire(
+ otype, self.wire, current, olen)
+ self.message.options.append(opt)
+ current = current + olen
+ optslen = optslen - 4 - olen
+ seen_opt = True
+ elif rdtype == dns.rdatatype.TSIG:
+ if not (section is self.message.additional and
+ i == (count - 1)):
+ raise BadTSIG
+ if self.message.keyring is None:
+ raise UnknownTSIGKey('got signed message without keyring')
+ secret = self.message.keyring.get(absolute_name)
+ if secret is None:
+ raise UnknownTSIGKey("key '%s' unknown" % name)
+ self.message.keyname = absolute_name
+ (self.message.keyalgorithm, self.message.mac) = \
+ dns.tsig.get_algorithm_and_mac(self.wire, self.current,
+ rdlen)
+ self.message.tsig_ctx = \
+ dns.tsig.validate(self.wire,
+ absolute_name,
+ secret,
+ int(time.time()),
+ self.message.request_mac,
+ rr_start,
+ self.current,
+ rdlen,
+ self.message.tsig_ctx,
+ self.message.multi,
+ self.message.first)
+ self.message.had_tsig = True
+ else:
+ if ttl < 0:
+ ttl = 0
+ if self.updating and \
+ (rdclass == dns.rdataclass.ANY or
+ rdclass == dns.rdataclass.NONE):
+ deleting = rdclass
+ rdclass = self.zone_rdclass
+ else:
+ deleting = None
+ if deleting == dns.rdataclass.ANY or \
+ (deleting == dns.rdataclass.NONE and
+ section is self.message.answer):
+ covers = dns.rdatatype.NONE
+ rd = None
+ else:
+ rd = dns.rdata.from_wire(rdclass, rdtype, self.wire,
+ self.current, rdlen,
+ self.message.origin)
+ covers = rd.covers()
+ if self.message.xfr and rdtype == dns.rdatatype.SOA:
+ force_unique = True
+ rrset = self.message.find_rrset(section, name,
+ rdclass, rdtype, covers,
+ deleting, True, force_unique)
+ if rd is not None:
+ rrset.add(rd, ttl)
+ self.current = self.current + rdlen
+
+ def read(self):
+ """Read a wire format DNS message and build a dns.message.Message
+ object."""
+
+ l = len(self.wire)
+ if l < 12:
+ raise ShortHeader
+ (self.message.id, self.message.flags, qcount, ancount,
+ aucount, adcount) = struct.unpack('!HHHHHH', self.wire[:12])
+ self.current = 12
+ if dns.opcode.is_update(self.message.flags):
+ self.updating = True
+ self._get_question(qcount)
+ if self.question_only:
+ return
+ self._get_section(self.message.answer, ancount)
+ self._get_section(self.message.authority, aucount)
+ self._get_section(self.message.additional, adcount)
+ if not self.ignore_trailing and self.current != l:
+ raise TrailingJunk
+ if self.message.multi and self.message.tsig_ctx and \
+ not self.message.had_tsig:
+ self.message.tsig_ctx.update(self.wire)
+
+
+def from_wire(wire, keyring=None, request_mac='', xfr=False, origin=None,
+ tsig_ctx=None, multi=False, first=True,
+ question_only=False, one_rr_per_rrset=False,
+ ignore_trailing=False):
+ """Convert a DNS wire format message into a message
+ object.
+
+ @param keyring: The keyring to use if the message is signed.
+ @type keyring: dict
+ @param request_mac: If the message is a response to a TSIG-signed request,
+ I{request_mac} should be set to the MAC of that request.
+ @type request_mac: string
+ @param xfr: Is this message part of a zone transfer?
+ @type xfr: bool
+ @param origin: If the message is part of a zone transfer, I{origin}
+ should be the origin name of the zone.
+ @type origin: dns.name.Name object
+ @param tsig_ctx: The ongoing TSIG context, used when validating zone
+ transfers.
+ @type tsig_ctx: hmac.HMAC object
+ @param multi: Is this message part of a multiple message sequence?
+ @type multi: bool
+ @param first: Is this message standalone, or the first of a multi
+ message sequence?
+ @type first: bool
+ @param question_only: Read only up to the end of the question section?
+ @type question_only: bool
+ @param one_rr_per_rrset: Put each RR into its own RRset
+ @type one_rr_per_rrset: bool
+ @param ignore_trailing: Ignore trailing junk at end of request?
+ @type ignore_trailing: bool
+ @raises ShortHeader: The message is less than 12 octets long.
+ @raises TrailingJunk: There were octets in the message past the end
+ of the proper DNS message.
+ @raises BadEDNS: An OPT record was in the wrong section, or occurred more
+ than once.
+ @raises BadTSIG: A TSIG record was not the last record of the additional
+ data section.
+ @rtype: dns.message.Message object"""
+
+ m = Message(id=0)
+ m.keyring = keyring
+ m.request_mac = request_mac
+ m.xfr = xfr
+ m.origin = origin
+ m.tsig_ctx = tsig_ctx
+ m.multi = multi
+ m.first = first
+
+ reader = _WireReader(wire, m, question_only, one_rr_per_rrset,
+ ignore_trailing)
+ reader.read()
+
+ return m
+
+
+class _TextReader(object):
+
+ """Text format reader.
+
+ @ivar tok: the tokenizer
+ @type tok: dns.tokenizer.Tokenizer object
+ @ivar message: The message object being built
+ @type message: dns.message.Message object
+ @ivar updating: Is the message a dynamic update?
+ @type updating: bool
+ @ivar zone_rdclass: The class of the zone in messages which are
+ DNS dynamic updates.
+ @type zone_rdclass: int
+ @ivar last_name: The most recently read name when building a message object
+ from text format.
+ @type last_name: dns.name.Name object
+ """
+
+ def __init__(self, text, message):
+ self.message = message
+ self.tok = dns.tokenizer.Tokenizer(text)
+ self.last_name = None
+ self.zone_rdclass = dns.rdataclass.IN
+ self.updating = False
+
+ def _header_line(self, section):
+ """Process one line from the text format header section."""
+
+ token = self.tok.get()
+ what = token.value
+ if what == 'id':
+ self.message.id = self.tok.get_int()
+ elif what == 'flags':
+ while True:
+ token = self.tok.get()
+ if not token.is_identifier():
+ self.tok.unget(token)
+ break
+ self.message.flags = self.message.flags | \
+ dns.flags.from_text(token.value)
+ if dns.opcode.is_update(self.message.flags):
+ self.updating = True
+ elif what == 'edns':
+ self.message.edns = self.tok.get_int()
+ self.message.ednsflags = self.message.ednsflags | \
+ (self.message.edns << 16)
+ elif what == 'eflags':
+ if self.message.edns < 0:
+ self.message.edns = 0
+ while True:
+ token = self.tok.get()
+ if not token.is_identifier():
+ self.tok.unget(token)
+ break
+ self.message.ednsflags = self.message.ednsflags | \
+ dns.flags.edns_from_text(token.value)
+ elif what == 'payload':
+ self.message.payload = self.tok.get_int()
+ if self.message.edns < 0:
+ self.message.edns = 0
+ elif what == 'opcode':
+ text = self.tok.get_string()
+ self.message.flags = self.message.flags | \
+ dns.opcode.to_flags(dns.opcode.from_text(text))
+ elif what == 'rcode':
+ text = self.tok.get_string()
+ self.message.set_rcode(dns.rcode.from_text(text))
+ else:
+ raise UnknownHeaderField
+ self.tok.get_eol()
+
+ def _question_line(self, section):
+ """Process one line from the text format question section."""
+
+ token = self.tok.get(want_leading=True)
+ if not token.is_whitespace():
+ self.last_name = dns.name.from_text(token.value, None)
+ name = self.last_name
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except:
+ rdclass = dns.rdataclass.IN
+ # Type
+ rdtype = dns.rdatatype.from_text(token.value)
+ self.message.find_rrset(self.message.question, name,
+ rdclass, rdtype, create=True,
+ force_unique=True)
+ if self.updating:
+ self.zone_rdclass = rdclass
+ self.tok.get_eol()
+
+ def _rr_line(self, section):
+ """Process one line from the text format answer, authority, or
+ additional data sections.
+ """
+
+ deleting = None
+ # Name
+ token = self.tok.get(want_leading=True)
+ if not token.is_whitespace():
+ self.last_name = dns.name.from_text(token.value, None)
+ name = self.last_name
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ # TTL
+ try:
+ ttl = int(token.value, 0)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except:
+ ttl = 0
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ if rdclass == dns.rdataclass.ANY or rdclass == dns.rdataclass.NONE:
+ deleting = rdclass
+ rdclass = self.zone_rdclass
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except:
+ rdclass = dns.rdataclass.IN
+ # Type
+ rdtype = dns.rdatatype.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_eol_or_eof():
+ self.tok.unget(token)
+ rd = dns.rdata.from_text(rdclass, rdtype, self.tok, None)
+ covers = rd.covers()
+ else:
+ rd = None
+ covers = dns.rdatatype.NONE
+ rrset = self.message.find_rrset(section, name,
+ rdclass, rdtype, covers,
+ deleting, True, self.updating)
+ if rd is not None:
+ rrset.add(rd, ttl)
+
+ def read(self):
+ """Read a text format DNS message and build a dns.message.Message
+ object."""
+
+ line_method = self._header_line
+ section = None
+ while 1:
+ token = self.tok.get(True, True)
+ if token.is_eol_or_eof():
+ break
+ if token.is_comment():
+ u = token.value.upper()
+ if u == 'HEADER':
+ line_method = self._header_line
+ elif u == 'QUESTION' or u == 'ZONE':
+ line_method = self._question_line
+ section = self.message.question
+ elif u == 'ANSWER' or u == 'PREREQ':
+ line_method = self._rr_line
+ section = self.message.answer
+ elif u == 'AUTHORITY' or u == 'UPDATE':
+ line_method = self._rr_line
+ section = self.message.authority
+ elif u == 'ADDITIONAL':
+ line_method = self._rr_line
+ section = self.message.additional
+ self.tok.get_eol()
+ continue
+ self.tok.unget(token)
+ line_method(section)
+
+
+def from_text(text):
+ """Convert the text format message into a message object.
+
+ @param text: The text format message.
+ @type text: string
+ @raises UnknownHeaderField:
+ @raises dns.exception.SyntaxError:
+ @rtype: dns.message.Message object"""
+
+ # 'text' can also be a file, but we don't publish that fact
+ # since it's an implementation detail. The official file
+ # interface is from_file().
+
+ m = Message()
+
+ reader = _TextReader(text, m)
+ reader.read()
+
+ return m
+
+
+def from_file(f):
+ """Read the next text format message from the specified file.
+
+ @param f: file or string. If I{f} is a string, it is treated
+ as the name of a file to open.
+ @raises UnknownHeaderField:
+ @raises dns.exception.SyntaxError:
+ @rtype: dns.message.Message object"""
+
+ str_type = string_types
+ opts = 'rU'
+
+ if isinstance(f, str_type):
+ f = open(f, opts)
+ want_close = True
+ else:
+ want_close = False
+
+ try:
+ m = from_text(f)
+ finally:
+ if want_close:
+ f.close()
+ return m
+
+
+def make_query(qname, rdtype, rdclass=dns.rdataclass.IN, use_edns=None,
+ want_dnssec=False, ednsflags=None, payload=None,
+ request_payload=None, options=None):
+ """Make a query message.
+
+ The query name, type, and class may all be specified either
+ as objects of the appropriate type, or as strings.
+
+ The query will have a randomly chosen query id, and its DNS flags
+ will be set to dns.flags.RD.
+
+ @param qname: The query name.
+ @type qname: dns.name.Name object or string
+ @param rdtype: The desired rdata type.
+ @type rdtype: int
+ @param rdclass: The desired rdata class; the default is class IN.
+ @type rdclass: int
+ @param use_edns: The EDNS level to use; the default is None (no EDNS).
+ See the description of dns.message.Message.use_edns() for the possible
+ values for use_edns and their meanings.
+ @type use_edns: int or bool or None
+ @param want_dnssec: Should the query indicate that DNSSEC is desired?
+ @type want_dnssec: bool
+ @param ednsflags: EDNS flag values.
+ @type ednsflags: int
+ @param payload: The EDNS sender's payload field, which is the maximum
+ size of UDP datagram the sender can handle.
+ @type payload: int
+ @param request_payload: The EDNS payload size to use when sending
+ this message. If not specified, defaults to the value of payload.
+ @type request_payload: int or None
+ @param options: The EDNS options
+ @type options: None or list of dns.edns.Option objects
+ @see: RFC 2671
+ @rtype: dns.message.Message object"""
+
+ if isinstance(qname, string_types):
+ qname = dns.name.from_text(qname)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(rdclass, string_types):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ m = Message()
+ m.flags |= dns.flags.RD
+ m.find_rrset(m.question, qname, rdclass, rdtype, create=True,
+ force_unique=True)
+ # only pass keywords on to use_edns if they have been set to a
+ # non-None value. Setting a field will turn EDNS on if it hasn't
+ # been configured.
+ kwargs = {}
+ if ednsflags is not None:
+ kwargs['ednsflags'] = ednsflags
+ if use_edns is None:
+ use_edns = 0
+ if payload is not None:
+ kwargs['payload'] = payload
+ if use_edns is None:
+ use_edns = 0
+ if request_payload is not None:
+ kwargs['request_payload'] = request_payload
+ if use_edns is None:
+ use_edns = 0
+ if options is not None:
+ kwargs['options'] = options
+ if use_edns is None:
+ use_edns = 0
+ kwargs['edns'] = use_edns
+ m.use_edns(**kwargs)
+ m.want_dnssec(want_dnssec)
+ return m
+
+
+def make_response(query, recursion_available=False, our_payload=8192,
+ fudge=300):
+ """Make a message which is a response for the specified query.
+ The message returned is really a response skeleton; it has all
+ of the infrastructure required of a response, but none of the
+ content.
+
+ The response's question section is a shallow copy of the query's
+ question section, so the query's question RRsets should not be
+ changed.
+
+ @param query: the query to respond to
+ @type query: dns.message.Message object
+ @param recursion_available: should RA be set in the response?
+ @type recursion_available: bool
+ @param our_payload: payload size to advertise in EDNS responses; default
+ is 8192.
+ @type our_payload: int
+ @param fudge: TSIG time fudge; default is 300 seconds.
+ @type fudge: int
+ @rtype: dns.message.Message object"""
+
+ if query.flags & dns.flags.QR:
+ raise dns.exception.FormError('specified query message is not a query')
+ response = dns.message.Message(query.id)
+ response.flags = dns.flags.QR | (query.flags & dns.flags.RD)
+ if recursion_available:
+ response.flags |= dns.flags.RA
+ response.set_opcode(query.opcode())
+ response.question = list(query.question)
+ if query.edns >= 0:
+ response.use_edns(0, 0, our_payload, query.payload)
+ if query.had_tsig:
+ response.use_tsig(query.keyring, query.keyname, fudge, None, 0, '',
+ query.keyalgorithm)
+ response.request_mac = query.mac
+ return response
diff --git a/lib/dns/name.py b/lib/dns/name.py
new file mode 100644
index 00000000..2a74694c
--- /dev/null
+++ b/lib/dns/name.py
@@ -0,0 +1,763 @@
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Names.
+
+@var root: The DNS root name.
+@type root: dns.name.Name object
+@var empty: The empty DNS name.
+@type empty: dns.name.Name object
+"""
+
+from io import BytesIO
+import struct
+import sys
+import copy
+import encodings.idna
+
+import dns.exception
+import dns.wiredata
+
+from ._compat import long, binary_type, text_type, unichr
+
+try:
+ maxint = sys.maxint
+except:
+ maxint = (1 << (8 * struct.calcsize("P"))) / 2 - 1
+
+NAMERELN_NONE = 0
+NAMERELN_SUPERDOMAIN = 1
+NAMERELN_SUBDOMAIN = 2
+NAMERELN_EQUAL = 3
+NAMERELN_COMMONANCESTOR = 4
+
+
+class EmptyLabel(dns.exception.SyntaxError):
+
+ """A DNS label is empty."""
+
+
+class BadEscape(dns.exception.SyntaxError):
+
+ """An escaped code in a text format of DNS name is invalid."""
+
+
+class BadPointer(dns.exception.FormError):
+
+ """A DNS compression pointer points forward instead of backward."""
+
+
+class BadLabelType(dns.exception.FormError):
+
+ """The label type in DNS name wire format is unknown."""
+
+
+class NeedAbsoluteNameOrOrigin(dns.exception.DNSException):
+
+ """An attempt was made to convert a non-absolute name to
+ wire when there was also a non-absolute (or missing) origin."""
+
+
+class NameTooLong(dns.exception.FormError):
+
+ """A DNS name is > 255 octets long."""
+
+
+class LabelTooLong(dns.exception.SyntaxError):
+
+ """A DNS label is > 63 octets long."""
+
+
+class AbsoluteConcatenation(dns.exception.DNSException):
+
+ """An attempt was made to append anything other than the
+ empty name to an absolute DNS name."""
+
+
+class NoParent(dns.exception.DNSException):
+
+ """An attempt was made to get the parent of the root name
+ or the empty name."""
+
+_escaped = bytearray(b'"().;\\@$')
+
+
+def _escapify(label, unicode_mode=False):
+ """Escape the characters in label which need it.
+ @param unicode_mode: escapify only special and whitespace (<= 0x20)
+ characters
+ @returns: the escaped string
+ @rtype: string"""
+ if not unicode_mode:
+ text = ''
+ if isinstance(label, text_type):
+ label = label.encode()
+ for c in bytearray(label):
+ packed = struct.pack('!B', c).decode()
+ if c in _escaped:
+ text += '\\' + packed
+ elif c > 0x20 and c < 0x7F:
+ text += packed
+ else:
+ text += '\\%03d' % c
+ return text.encode()
+
+ text = u''
+ if isinstance(label, binary_type):
+ label = label.decode()
+ for c in label:
+ if c > u'\x20' and c < u'\x7f':
+ text += c
+ else:
+ if c >= u'\x7f':
+ text += c
+ else:
+ text += u'\\%03d' % c
+ return text
+
+
+def _validate_labels(labels):
+ """Check for empty labels in the middle of a label sequence,
+ labels that are too long, and for too many labels.
+ @raises NameTooLong: the name as a whole is too long
+ @raises EmptyLabel: a label is empty (i.e. the root label) and appears
+ in a position other than the end of the label sequence"""
+
+ l = len(labels)
+ total = 0
+ i = -1
+ j = 0
+ for label in labels:
+ ll = len(label)
+ total += ll + 1
+ if ll > 63:
+ raise LabelTooLong
+ if i < 0 and label == b'':
+ i = j
+ j += 1
+ if total > 255:
+ raise NameTooLong
+ if i >= 0 and i != l - 1:
+ raise EmptyLabel
+
+
+def _ensure_bytes(label):
+ if isinstance(label, binary_type):
+ return label
+ if isinstance(label, text_type):
+ return label.encode()
+ raise ValueError
+
+
+class Name(object):
+
+ """A DNS name.
+
+ The dns.name.Name class represents a DNS name as a tuple of labels.
+ Instances of the class are immutable.
+
+ @ivar labels: The tuple of labels in the name. Each label is a string of
+ up to 63 octets."""
+
+ __slots__ = ['labels']
+
+ def __init__(self, labels):
+ """Initialize a domain name from a list of labels.
+ @param labels: the labels
+ @type labels: any iterable whose values are strings
+ """
+ labels = [_ensure_bytes(x) for x in labels]
+ super(Name, self).__setattr__('labels', tuple(labels))
+ _validate_labels(self.labels)
+
+ def __setattr__(self, name, value):
+ raise TypeError("object doesn't support attribute assignment")
+
+ def __copy__(self):
+ return Name(self.labels)
+
+ def __deepcopy__(self, memo):
+ return Name(copy.deepcopy(self.labels, memo))
+
+ def __getstate__(self):
+ return {'labels': self.labels}
+
+ def __setstate__(self, state):
+ super(Name, self).__setattr__('labels', state['labels'])
+ _validate_labels(self.labels)
+
+ def is_absolute(self):
+ """Is the most significant label of this name the root label?
+ @rtype: bool
+ """
+
+ return len(self.labels) > 0 and self.labels[-1] == b''
+
+ def is_wild(self):
+ """Is this name wild? (I.e. Is the least significant label '*'?)
+ @rtype: bool
+ """
+
+ return len(self.labels) > 0 and self.labels[0] == b'*'
+
+ def __hash__(self):
+ """Return a case-insensitive hash of the name.
+ @rtype: int
+ """
+
+ h = long(0)
+ for label in self.labels:
+ for c in bytearray(label.lower()):
+ h += (h << 3) + c
+ return int(h % maxint)
+
+ def fullcompare(self, other):
+ """Compare two names, returning a 3-tuple (relation, order, nlabels).
+
+ I{relation} describes the relation ship between the names,
+ and is one of: dns.name.NAMERELN_NONE,
+ dns.name.NAMERELN_SUPERDOMAIN, dns.name.NAMERELN_SUBDOMAIN,
+ dns.name.NAMERELN_EQUAL, or dns.name.NAMERELN_COMMONANCESTOR
+
+ I{order} is < 0 if self < other, > 0 if self > other, and ==
+ 0 if self == other. A relative name is always less than an
+ absolute name. If both names have the same relativity, then
+ the DNSSEC order relation is used to order them.
+
+ I{nlabels} is the number of significant labels that the two names
+ have in common.
+ """
+
+ sabs = self.is_absolute()
+ oabs = other.is_absolute()
+ if sabs != oabs:
+ if sabs:
+ return (NAMERELN_NONE, 1, 0)
+ else:
+ return (NAMERELN_NONE, -1, 0)
+ l1 = len(self.labels)
+ l2 = len(other.labels)
+ ldiff = l1 - l2
+ if ldiff < 0:
+ l = l1
+ else:
+ l = l2
+
+ order = 0
+ nlabels = 0
+ namereln = NAMERELN_NONE
+ while l > 0:
+ l -= 1
+ l1 -= 1
+ l2 -= 1
+ label1 = self.labels[l1].lower()
+ label2 = other.labels[l2].lower()
+ if label1 < label2:
+ order = -1
+ if nlabels > 0:
+ namereln = NAMERELN_COMMONANCESTOR
+ return (namereln, order, nlabels)
+ elif label1 > label2:
+ order = 1
+ if nlabels > 0:
+ namereln = NAMERELN_COMMONANCESTOR
+ return (namereln, order, nlabels)
+ nlabels += 1
+ order = ldiff
+ if ldiff < 0:
+ namereln = NAMERELN_SUPERDOMAIN
+ elif ldiff > 0:
+ namereln = NAMERELN_SUBDOMAIN
+ else:
+ namereln = NAMERELN_EQUAL
+ return (namereln, order, nlabels)
+
+ def is_subdomain(self, other):
+ """Is self a subdomain of other?
+
+ The notion of subdomain includes equality.
+ @rtype: bool
+ """
+
+ (nr, o, nl) = self.fullcompare(other)
+ if nr == NAMERELN_SUBDOMAIN or nr == NAMERELN_EQUAL:
+ return True
+ return False
+
+ def is_superdomain(self, other):
+ """Is self a superdomain of other?
+
+ The notion of subdomain includes equality.
+ @rtype: bool
+ """
+
+ (nr, o, nl) = self.fullcompare(other)
+ if nr == NAMERELN_SUPERDOMAIN or nr == NAMERELN_EQUAL:
+ return True
+ return False
+
+ def canonicalize(self):
+ """Return a name which is equal to the current name, but is in
+ DNSSEC canonical form.
+ @rtype: dns.name.Name object
+ """
+
+ return Name([x.lower() for x in self.labels])
+
+ def __eq__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] == 0
+ else:
+ return False
+
+ def __ne__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] != 0
+ else:
+ return True
+
+ def __lt__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] < 0
+ else:
+ return NotImplemented
+
+ def __le__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] <= 0
+ else:
+ return NotImplemented
+
+ def __ge__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] >= 0
+ else:
+ return NotImplemented
+
+ def __gt__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] > 0
+ else:
+ return NotImplemented
+
+ def __repr__(self):
+ return ''
+
+ def __str__(self):
+ return self.to_text(False).decode()
+
+ def to_text(self, omit_final_dot=False):
+ """Convert name to text format.
+ @param omit_final_dot: If True, don't emit the final dot (denoting the
+ root label) for absolute names. The default is False.
+ @rtype: string
+ """
+
+ if len(self.labels) == 0:
+ return b'@'
+ if len(self.labels) == 1 and self.labels[0] == b'':
+ return b'.'
+ if omit_final_dot and self.is_absolute():
+ l = self.labels[:-1]
+ else:
+ l = self.labels
+ s = b'.'.join(map(_escapify, l))
+ return s
+
+ def to_unicode(self, omit_final_dot=False):
+ """Convert name to Unicode text format.
+
+ IDN ACE labels are converted to Unicode.
+
+ @param omit_final_dot: If True, don't emit the final dot (denoting the
+ root label) for absolute names. The default is False.
+ @rtype: string
+ """
+
+ if len(self.labels) == 0:
+ return u'@'
+ if len(self.labels) == 1 and self.labels[0] == '':
+ return u'.'
+ if omit_final_dot and self.is_absolute():
+ l = self.labels[:-1]
+ else:
+ l = self.labels
+ s = u'.'.join([_escapify(encodings.idna.ToUnicode(x), True)
+ for x in l])
+ return s
+
+ def to_digestable(self, origin=None):
+ """Convert name to a format suitable for digesting in hashes.
+
+ The name is canonicalized and converted to uncompressed wire format.
+
+ @param origin: If the name is relative and origin is not None, then
+ origin will be appended to it.
+ @type origin: dns.name.Name object
+ @raises NeedAbsoluteNameOrOrigin: All names in wire format are
+ absolute. If self is a relative name, then an origin must be supplied;
+ if it is missing, then this exception is raised
+ @rtype: string
+ """
+
+ if not self.is_absolute():
+ if origin is None or not origin.is_absolute():
+ raise NeedAbsoluteNameOrOrigin
+ labels = list(self.labels)
+ labels.extend(list(origin.labels))
+ else:
+ labels = self.labels
+ dlabels = [struct.pack('!B%ds' % len(x), len(x), x.lower())
+ for x in labels]
+ return b''.join(dlabels)
+
+ def to_wire(self, file=None, compress=None, origin=None):
+ """Convert name to wire format, possibly compressing it.
+
+ @param file: the file where the name is emitted (typically
+ a BytesIO file). If None, a string containing the wire name
+ will be returned.
+ @type file: file or None
+ @param compress: The compression table. If None (the default) names
+ will not be compressed.
+ @type compress: dict
+ @param origin: If the name is relative and origin is not None, then
+ origin will be appended to it.
+ @type origin: dns.name.Name object
+ @raises NeedAbsoluteNameOrOrigin: All names in wire format are
+ absolute. If self is a relative name, then an origin must be supplied;
+ if it is missing, then this exception is raised
+ """
+
+ if file is None:
+ file = BytesIO()
+ want_return = True
+ else:
+ want_return = False
+
+ if not self.is_absolute():
+ if origin is None or not origin.is_absolute():
+ raise NeedAbsoluteNameOrOrigin
+ labels = list(self.labels)
+ labels.extend(list(origin.labels))
+ else:
+ labels = self.labels
+ i = 0
+ for label in labels:
+ n = Name(labels[i:])
+ i += 1
+ if compress is not None:
+ pos = compress.get(n)
+ else:
+ pos = None
+ if pos is not None:
+ value = 0xc000 + pos
+ s = struct.pack('!H', value)
+ file.write(s)
+ break
+ else:
+ if compress is not None and len(n) > 1:
+ pos = file.tell()
+ if pos <= 0x3fff:
+ compress[n] = pos
+ l = len(label)
+ file.write(struct.pack('!B', l))
+ if l > 0:
+ file.write(label)
+ if want_return:
+ return file.getvalue()
+
+ def __len__(self):
+ """The length of the name (in labels).
+ @rtype: int
+ """
+
+ return len(self.labels)
+
+ def __getitem__(self, index):
+ return self.labels[index]
+
+ def __getslice__(self, start, stop):
+ return self.labels[start:stop]
+
+ def __add__(self, other):
+ return self.concatenate(other)
+
+ def __sub__(self, other):
+ return self.relativize(other)
+
+ def split(self, depth):
+ """Split a name into a prefix and suffix at depth.
+
+ @param depth: the number of labels in the suffix
+ @type depth: int
+ @raises ValueError: the depth was not >= 0 and <= the length of the
+ name.
+ @returns: the tuple (prefix, suffix)
+ @rtype: tuple
+ """
+
+ l = len(self.labels)
+ if depth == 0:
+ return (self, dns.name.empty)
+ elif depth == l:
+ return (dns.name.empty, self)
+ elif depth < 0 or depth > l:
+ raise ValueError(
+ 'depth must be >= 0 and <= the length of the name')
+ return (Name(self[: -depth]), Name(self[-depth:]))
+
+ def concatenate(self, other):
+ """Return a new name which is the concatenation of self and other.
+ @rtype: dns.name.Name object
+ @raises AbsoluteConcatenation: self is absolute and other is
+ not the empty name
+ """
+
+ if self.is_absolute() and len(other) > 0:
+ raise AbsoluteConcatenation
+ labels = list(self.labels)
+ labels.extend(list(other.labels))
+ return Name(labels)
+
+ def relativize(self, origin):
+ """If self is a subdomain of origin, return a new name which is self
+ relative to origin. Otherwise return self.
+ @rtype: dns.name.Name object
+ """
+
+ if origin is not None and self.is_subdomain(origin):
+ return Name(self[: -len(origin)])
+ else:
+ return self
+
+ def derelativize(self, origin):
+ """If self is a relative name, return a new name which is the
+ concatenation of self and origin. Otherwise return self.
+ @rtype: dns.name.Name object
+ """
+
+ if not self.is_absolute():
+ return self.concatenate(origin)
+ else:
+ return self
+
+ def choose_relativity(self, origin=None, relativize=True):
+ """Return a name with the relativity desired by the caller. If
+ origin is None, then self is returned. Otherwise, if
+ relativize is true the name is relativized, and if relativize is
+ false the name is derelativized.
+ @rtype: dns.name.Name object
+ """
+
+ if origin:
+ if relativize:
+ return self.relativize(origin)
+ else:
+ return self.derelativize(origin)
+ else:
+ return self
+
+ def parent(self):
+ """Return the parent of the name.
+ @rtype: dns.name.Name object
+ @raises NoParent: the name is either the root name or the empty name,
+ and thus has no parent.
+ """
+ if self == root or self == empty:
+ raise NoParent
+ return Name(self.labels[1:])
+
+root = Name([b''])
+empty = Name([])
+
+
+def from_unicode(text, origin=root):
+ """Convert unicode text into a Name object.
+
+ Labels are encoded in IDN ACE form.
+
+ @rtype: dns.name.Name object
+ """
+
+ if not isinstance(text, text_type):
+ raise ValueError("input to from_unicode() must be a unicode string")
+ if not (origin is None or isinstance(origin, Name)):
+ raise ValueError("origin must be a Name or None")
+ labels = []
+ label = u''
+ escaping = False
+ edigits = 0
+ total = 0
+ if text == u'@':
+ text = u''
+ if text:
+ if text == u'.':
+ return Name([b'']) # no Unicode "u" on this constant!
+ for c in text:
+ if escaping:
+ if edigits == 0:
+ if c.isdigit():
+ total = int(c)
+ edigits += 1
+ else:
+ label += c
+ escaping = False
+ else:
+ if not c.isdigit():
+ raise BadEscape
+ total *= 10
+ total += int(c)
+ edigits += 1
+ if edigits == 3:
+ escaping = False
+ label += unichr(total)
+ elif c in [u'.', u'\u3002', u'\uff0e', u'\uff61']:
+ if len(label) == 0:
+ raise EmptyLabel
+ try:
+ labels.append(encodings.idna.ToASCII(label))
+ except UnicodeError:
+ raise LabelTooLong
+ label = u''
+ elif c == u'\\':
+ escaping = True
+ edigits = 0
+ total = 0
+ else:
+ label += c
+ if escaping:
+ raise BadEscape
+ if len(label) > 0:
+ try:
+ labels.append(encodings.idna.ToASCII(label))
+ except UnicodeError:
+ raise LabelTooLong
+ else:
+ labels.append(b'')
+
+ if (len(labels) == 0 or labels[-1] != b'') and origin is not None:
+ labels.extend(list(origin.labels))
+ return Name(labels)
+
+
+def from_text(text, origin=root):
+ """Convert text into a Name object.
+ @rtype: dns.name.Name object
+ """
+
+ if isinstance(text, text_type):
+ return from_unicode(text, origin)
+ if not isinstance(text, binary_type):
+ raise ValueError("input to from_text() must be a string")
+ if not (origin is None or isinstance(origin, Name)):
+ raise ValueError("origin must be a Name or None")
+ labels = []
+ label = b''
+ escaping = False
+ edigits = 0
+ total = 0
+ if text == b'@':
+ text = b''
+ if text:
+ if text == b'.':
+ return Name([b''])
+ for c in bytearray(text):
+ byte_ = struct.pack('!B', c)
+ if escaping:
+ if edigits == 0:
+ if byte_.isdigit():
+ total = int(byte_)
+ edigits += 1
+ else:
+ label += byte_
+ escaping = False
+ else:
+ if not byte_.isdigit():
+ raise BadEscape
+ total *= 10
+ total += int(byte_)
+ edigits += 1
+ if edigits == 3:
+ escaping = False
+ label += struct.pack('!B', total)
+ elif byte_ == b'.':
+ if len(label) == 0:
+ raise EmptyLabel
+ labels.append(label)
+ label = b''
+ elif byte_ == b'\\':
+ escaping = True
+ edigits = 0
+ total = 0
+ else:
+ label += byte_
+ if escaping:
+ raise BadEscape
+ if len(label) > 0:
+ labels.append(label)
+ else:
+ labels.append(b'')
+ if (len(labels) == 0 or labels[-1] != b'') and origin is not None:
+ labels.extend(list(origin.labels))
+ return Name(labels)
+
+
+def from_wire(message, current):
+ """Convert possibly compressed wire format into a Name.
+ @param message: the entire DNS message
+ @type message: string
+ @param current: the offset of the beginning of the name from the start
+ of the message
+ @type current: int
+ @raises dns.name.BadPointer: a compression pointer did not point backwards
+ in the message
+ @raises dns.name.BadLabelType: an invalid label type was encountered.
+ @returns: a tuple consisting of the name that was read and the number
+ of bytes of the wire format message which were consumed reading it
+ @rtype: (dns.name.Name object, int) tuple
+ """
+
+ if not isinstance(message, binary_type):
+ raise ValueError("input to from_wire() must be a byte string")
+ message = dns.wiredata.maybe_wrap(message)
+ labels = []
+ biggest_pointer = current
+ hops = 0
+ count = message[current]
+ current += 1
+ cused = 1
+ while count != 0:
+ if count < 64:
+ labels.append(message[current: current + count].unwrap())
+ current += count
+ if hops == 0:
+ cused += count
+ elif count >= 192:
+ current = (count & 0x3f) * 256 + message[current]
+ if hops == 0:
+ cused += 1
+ if current >= biggest_pointer:
+ raise BadPointer
+ biggest_pointer = current
+ hops += 1
+ else:
+ raise BadLabelType
+ count = message[current]
+ current += 1
+ if hops == 0:
+ cused += 1
+ labels.append('')
+ return (Name(labels), cused)
diff --git a/lib/dns/namedict.py b/lib/dns/namedict.py
new file mode 100644
index 00000000..58e40344
--- /dev/null
+++ b/lib/dns/namedict.py
@@ -0,0 +1,104 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+# Copyright (C) 2016 Coresec Systems AB
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND CORESEC SYSTEMS AB DISCLAIMS ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL CORESEC
+# SYSTEMS AB BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS name dictionary"""
+
+import collections
+import dns.name
+from ._compat import xrange
+
+
+class NameDict(collections.MutableMapping):
+
+ """A dictionary whose keys are dns.name.Name objects.
+ @ivar max_depth: the maximum depth of the keys that have ever been
+ added to the dictionary.
+ @type max_depth: int
+ @ivar max_depth_items: the number of items of maximum depth
+ @type max_depth_items: int
+ """
+
+ __slots__ = ["max_depth", "max_depth_items", "__store"]
+
+ def __init__(self, *args, **kwargs):
+ self.__store = dict()
+ self.max_depth = 0
+ self.max_depth_items = 0
+ self.update(dict(*args, **kwargs))
+
+ def __update_max_depth(self, key):
+ if len(key) == self.max_depth:
+ self.max_depth_items = self.max_depth_items + 1
+ elif len(key) > self.max_depth:
+ self.max_depth = len(key)
+ self.max_depth_items = 1
+
+ def __getitem__(self, key):
+ return self.__store[key]
+
+ def __setitem__(self, key, value):
+ if not isinstance(key, dns.name.Name):
+ raise ValueError('NameDict key must be a name')
+ self.__store[key] = value
+ self.__update_max_depth(key)
+
+ def __delitem__(self, key):
+ value = self.__store.pop(key)
+ if len(value) == self.max_depth:
+ self.max_depth_items = self.max_depth_items - 1
+ if self.max_depth_items == 0:
+ self.max_depth = 0
+ for k in self.__store:
+ self.__update_max_depth(k)
+
+ def __iter__(self):
+ return iter(self.__store)
+
+ def __len__(self):
+ return len(self.__store)
+
+ def has_key(self, key):
+ return key in self.__store
+
+ def get_deepest_match(self, name):
+ """Find the deepest match to I{name} in the dictionary.
+
+ The deepest match is the longest name in the dictionary which is
+ a superdomain of I{name}.
+
+ @param name: the name
+ @type name: dns.name.Name object
+ @rtype: (key, value) tuple
+ """
+
+ depth = len(name)
+ if depth > self.max_depth:
+ depth = self.max_depth
+ for i in xrange(-depth, 0):
+ n = dns.name.Name(name[i:])
+ if n in self:
+ return (n, self[n])
+ v = self[dns.name.empty]
+ return (dns.name.empty, v)
diff --git a/lib/dns/node.py b/lib/dns/node.py
new file mode 100644
index 00000000..7c25060e
--- /dev/null
+++ b/lib/dns/node.py
@@ -0,0 +1,178 @@
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS nodes. A node is a set of rdatasets."""
+
+from io import StringIO
+
+import dns.rdataset
+import dns.rdatatype
+import dns.renderer
+
+
+class Node(object):
+
+ """A DNS node.
+
+ A node is a set of rdatasets
+
+ @ivar rdatasets: the node's rdatasets
+ @type rdatasets: list of dns.rdataset.Rdataset objects"""
+
+ __slots__ = ['rdatasets']
+
+ def __init__(self):
+ """Initialize a DNS node.
+ """
+
+ self.rdatasets = []
+
+ def to_text(self, name, **kw):
+ """Convert a node to text format.
+
+ Each rdataset at the node is printed. Any keyword arguments
+ to this method are passed on to the rdataset's to_text() method.
+ @param name: the owner name of the rdatasets
+ @type name: dns.name.Name object
+ @rtype: string
+ """
+
+ s = StringIO()
+ for rds in self.rdatasets:
+ if len(rds) > 0:
+ s.write(rds.to_text(name, **kw))
+ s.write(u'\n')
+ return s.getvalue()[:-1]
+
+ def __repr__(self):
+ return ''
+
+ def __eq__(self, other):
+ """Two nodes are equal if they have the same rdatasets.
+
+ @rtype: bool
+ """
+ #
+ # This is inefficient. Good thing we don't need to do it much.
+ #
+ for rd in self.rdatasets:
+ if rd not in other.rdatasets:
+ return False
+ for rd in other.rdatasets:
+ if rd not in self.rdatasets:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __len__(self):
+ return len(self.rdatasets)
+
+ def __iter__(self):
+ return iter(self.rdatasets)
+
+ def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
+ create=False):
+ """Find an rdataset matching the specified properties in the
+ current node.
+
+ @param rdclass: The class of the rdataset
+ @type rdclass: int
+ @param rdtype: The type of the rdataset
+ @type rdtype: int
+ @param covers: The covered type. Usually this value is
+ dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
+ dns.rdatatype.RRSIG, then the covers value will be the rdata
+ type the SIG/RRSIG covers. The library treats the SIG and RRSIG
+ types as if they were a family of
+ types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
+ easier to work with than if RRSIGs covering different rdata
+ types were aggregated into a single RRSIG rdataset.
+ @type covers: int
+ @param create: If True, create the rdataset if it is not found.
+ @type create: bool
+ @raises KeyError: An rdataset of the desired type and class does
+ not exist and I{create} is not True.
+ @rtype: dns.rdataset.Rdataset object
+ """
+
+ for rds in self.rdatasets:
+ if rds.match(rdclass, rdtype, covers):
+ return rds
+ if not create:
+ raise KeyError
+ rds = dns.rdataset.Rdataset(rdclass, rdtype)
+ self.rdatasets.append(rds)
+ return rds
+
+ def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
+ create=False):
+ """Get an rdataset matching the specified properties in the
+ current node.
+
+ None is returned if an rdataset of the specified type and
+ class does not exist and I{create} is not True.
+
+ @param rdclass: The class of the rdataset
+ @type rdclass: int
+ @param rdtype: The type of the rdataset
+ @type rdtype: int
+ @param covers: The covered type.
+ @type covers: int
+ @param create: If True, create the rdataset if it is not found.
+ @type create: bool
+ @rtype: dns.rdataset.Rdataset object or None
+ """
+
+ try:
+ rds = self.find_rdataset(rdclass, rdtype, covers, create)
+ except KeyError:
+ rds = None
+ return rds
+
+ def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
+ """Delete the rdataset matching the specified properties in the
+ current node.
+
+ If a matching rdataset does not exist, it is not an error.
+
+ @param rdclass: The class of the rdataset
+ @type rdclass: int
+ @param rdtype: The type of the rdataset
+ @type rdtype: int
+ @param covers: The covered type.
+ @type covers: int
+ """
+
+ rds = self.get_rdataset(rdclass, rdtype, covers)
+ if rds is not None:
+ self.rdatasets.remove(rds)
+
+ def replace_rdataset(self, replacement):
+ """Replace an rdataset.
+
+ It is not an error if there is no rdataset matching I{replacement}.
+
+ Ownership of the I{replacement} object is transferred to the node;
+ in other words, this method does not store a copy of I{replacement}
+ at the node, it stores I{replacement} itself.
+ """
+
+ if not isinstance(replacement, dns.rdataset.Rdataset):
+ raise ValueError('replacement is not an rdataset')
+ self.delete_rdataset(replacement.rdclass, replacement.rdtype,
+ replacement.covers)
+ self.rdatasets.append(replacement)
diff --git a/lib/dns/opcode.py b/lib/dns/opcode.py
new file mode 100644
index 00000000..2b2918e9
--- /dev/null
+++ b/lib/dns/opcode.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Opcodes."""
+
+import dns.exception
+
+QUERY = 0
+IQUERY = 1
+STATUS = 2
+NOTIFY = 4
+UPDATE = 5
+
+_by_text = {
+ 'QUERY': QUERY,
+ 'IQUERY': IQUERY,
+ 'STATUS': STATUS,
+ 'NOTIFY': NOTIFY,
+ 'UPDATE': UPDATE
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_by_value = dict((y, x) for x, y in _by_text.items())
+
+
+class UnknownOpcode(dns.exception.DNSException):
+
+ """An DNS opcode is unknown."""
+
+
+def from_text(text):
+ """Convert text into an opcode.
+
+ @param text: the textual opcode
+ @type text: string
+ @raises UnknownOpcode: the opcode is unknown
+ @rtype: int
+ """
+
+ if text.isdigit():
+ value = int(text)
+ if value >= 0 and value <= 15:
+ return value
+ value = _by_text.get(text.upper())
+ if value is None:
+ raise UnknownOpcode
+ return value
+
+
+def from_flags(flags):
+ """Extract an opcode from DNS message flags.
+
+ @param flags: int
+ @rtype: int
+ """
+
+ return (flags & 0x7800) >> 11
+
+
+def to_flags(value):
+ """Convert an opcode to a value suitable for ORing into DNS message
+ flags.
+ @rtype: int
+ """
+
+ return (value << 11) & 0x7800
+
+
+def to_text(value):
+ """Convert an opcode to text.
+
+ @param value: the opcdoe
+ @type value: int
+ @raises UnknownOpcode: the opcode is unknown
+ @rtype: string
+ """
+
+ text = _by_value.get(value)
+ if text is None:
+ text = str(value)
+ return text
+
+
+def is_update(flags):
+ """True if the opcode in flags is UPDATE.
+
+ @param flags: DNS flags
+ @type flags: int
+ @rtype: bool
+ """
+
+ if (from_flags(flags) == UPDATE):
+ return True
+ return False
diff --git a/lib/dns/query.py b/lib/dns/query.py
new file mode 100644
index 00000000..35670983
--- /dev/null
+++ b/lib/dns/query.py
@@ -0,0 +1,536 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Talk to a DNS server."""
+
+from __future__ import generators
+
+import errno
+import select
+import socket
+import struct
+import sys
+import time
+
+import dns.exception
+import dns.inet
+import dns.name
+import dns.message
+import dns.rdataclass
+import dns.rdatatype
+from ._compat import long, string_types
+
+if sys.version_info > (3,):
+ select_error = OSError
+else:
+ select_error = select.error
+
+
+class UnexpectedSource(dns.exception.DNSException):
+
+ """A DNS query response came from an unexpected address or port."""
+
+
+class BadResponse(dns.exception.FormError):
+
+ """A DNS query response does not respond to the question asked."""
+
+
+def _compute_expiration(timeout):
+ if timeout is None:
+ return None
+ else:
+ return time.time() + timeout
+
+
+def _poll_for(fd, readable, writable, error, timeout):
+ """Poll polling backend.
+ @param fd: File descriptor
+ @type fd: int
+ @param readable: Whether to wait for readability
+ @type readable: bool
+ @param writable: Whether to wait for writability
+ @type writable: bool
+ @param timeout: Deadline timeout (expiration time, in seconds)
+ @type timeout: float
+ @return True on success, False on timeout
+ """
+ event_mask = 0
+ if readable:
+ event_mask |= select.POLLIN
+ if writable:
+ event_mask |= select.POLLOUT
+ if error:
+ event_mask |= select.POLLERR
+
+ pollable = select.poll()
+ pollable.register(fd, event_mask)
+
+ if timeout:
+ event_list = pollable.poll(long(timeout * 1000))
+ else:
+ event_list = pollable.poll()
+
+ return bool(event_list)
+
+
+def _select_for(fd, readable, writable, error, timeout):
+ """Select polling backend.
+ @param fd: File descriptor
+ @type fd: int
+ @param readable: Whether to wait for readability
+ @type readable: bool
+ @param writable: Whether to wait for writability
+ @type writable: bool
+ @param timeout: Deadline timeout (expiration time, in seconds)
+ @type timeout: float
+ @return True on success, False on timeout
+ """
+ rset, wset, xset = [], [], []
+
+ if readable:
+ rset = [fd]
+ if writable:
+ wset = [fd]
+ if error:
+ xset = [fd]
+
+ if timeout is None:
+ (rcount, wcount, xcount) = select.select(rset, wset, xset)
+ else:
+ (rcount, wcount, xcount) = select.select(rset, wset, xset, timeout)
+
+ return bool((rcount or wcount or xcount))
+
+
+def _wait_for(fd, readable, writable, error, expiration):
+ done = False
+ while not done:
+ if expiration is None:
+ timeout = None
+ else:
+ timeout = expiration - time.time()
+ if timeout <= 0.0:
+ raise dns.exception.Timeout
+ try:
+ if not _polling_backend(fd, readable, writable, error, timeout):
+ raise dns.exception.Timeout
+ except select_error as e:
+ if e.args[0] != errno.EINTR:
+ raise e
+ done = True
+
+
+def _set_polling_backend(fn):
+ """
+ Internal API. Do not use.
+ """
+ global _polling_backend
+
+ _polling_backend = fn
+
+if hasattr(select, 'poll'):
+ # Prefer poll() on platforms that support it because it has no
+ # limits on the maximum value of a file descriptor (plus it will
+ # be more efficient for high values).
+ _polling_backend = _poll_for
+else:
+ _polling_backend = _select_for
+
+
+def _wait_for_readable(s, expiration):
+ _wait_for(s, True, False, True, expiration)
+
+
+def _wait_for_writable(s, expiration):
+ _wait_for(s, False, True, True, expiration)
+
+
+def _addresses_equal(af, a1, a2):
+ # Convert the first value of the tuple, which is a textual format
+ # address into binary form, so that we are not confused by different
+ # textual representations of the same address
+ n1 = dns.inet.inet_pton(af, a1[0])
+ n2 = dns.inet.inet_pton(af, a2[0])
+ return n1 == n2 and a1[1:] == a2[1:]
+
+
+def _destination_and_source(af, where, port, source, source_port):
+ # Apply defaults and compute destination and source tuples
+ # suitable for use in connect(), sendto(), or bind().
+ if af is None:
+ try:
+ af = dns.inet.af_for_address(where)
+ except:
+ af = dns.inet.AF_INET
+ if af == dns.inet.AF_INET:
+ destination = (where, port)
+ if source is not None or source_port != 0:
+ if source is None:
+ source = '0.0.0.0'
+ source = (source, source_port)
+ elif af == dns.inet.AF_INET6:
+ destination = (where, port, 0, 0)
+ if source is not None or source_port != 0:
+ if source is None:
+ source = '::'
+ source = (source, source_port, 0, 0)
+ return (af, destination, source)
+
+
+def udp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
+ ignore_unexpected=False, one_rr_per_rrset=False):
+ """Return the response obtained after sending a query via UDP.
+
+ @param q: the query
+ @type q: dns.message.Message
+ @param where: where to send the message
+ @type where: string containing an IPv4 or IPv6 address
+ @param timeout: The number of seconds to wait before the query times out.
+ If None, the default, wait forever.
+ @type timeout: float
+ @param port: The port to which to send the message. The default is 53.
+ @type port: int
+ @param af: the address family to use. The default is None, which
+ causes the address family to use to be inferred from the form of where.
+ If the inference attempt fails, AF_INET is used.
+ @type af: int
+ @rtype: dns.message.Message object
+ @param source: source address. The default is the wildcard address.
+ @type source: string
+ @param source_port: The port from which to send the message.
+ The default is 0.
+ @type source_port: int
+ @param ignore_unexpected: If True, ignore responses from unexpected
+ sources. The default is False.
+ @type ignore_unexpected: bool
+ @param one_rr_per_rrset: Put each RR into its own RRset
+ @type one_rr_per_rrset: bool
+ """
+
+ wire = q.to_wire()
+ (af, destination, source) = _destination_and_source(af, where, port,
+ source, source_port)
+ s = socket.socket(af, socket.SOCK_DGRAM, 0)
+ begin_time = None
+ try:
+ expiration = _compute_expiration(timeout)
+ s.setblocking(0)
+ if source is not None:
+ s.bind(source)
+ _wait_for_writable(s, expiration)
+ begin_time = time.time()
+ s.sendto(wire, destination)
+ while 1:
+ _wait_for_readable(s, expiration)
+ (wire, from_address) = s.recvfrom(65535)
+ if _addresses_equal(af, from_address, destination) or \
+ (dns.inet.is_multicast(where) and
+ from_address[1:] == destination[1:]):
+ break
+ if not ignore_unexpected:
+ raise UnexpectedSource('got a response from '
+ '%s instead of %s' % (from_address,
+ destination))
+ finally:
+ if begin_time is None:
+ response_time = 0
+ else:
+ response_time = time.time() - begin_time
+ s.close()
+ r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
+ one_rr_per_rrset=one_rr_per_rrset)
+ r.time = response_time
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+
+def _net_read(sock, count, expiration):
+ """Read the specified number of bytes from sock. Keep trying until we
+ either get the desired amount, or we hit EOF.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ s = b''
+ while count > 0:
+ _wait_for_readable(sock, expiration)
+ n = sock.recv(count)
+ if n == b'':
+ raise EOFError
+ count = count - len(n)
+ s = s + n
+ return s
+
+
+def _net_write(sock, data, expiration):
+ """Write the specified data to the socket.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ current = 0
+ l = len(data)
+ while current < l:
+ _wait_for_writable(sock, expiration)
+ current += sock.send(data[current:])
+
+
+def _connect(s, address):
+ try:
+ s.connect(address)
+ except socket.error:
+ (ty, v) = sys.exc_info()[:2]
+
+ if hasattr(v, 'errno'):
+ v_err = v.errno
+ else:
+ v_err = v[0]
+ if v_err not in [errno.EINPROGRESS, errno.EWOULDBLOCK, errno.EALREADY]:
+ raise v
+
+
+def tcp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
+ one_rr_per_rrset=False):
+ """Return the response obtained after sending a query via TCP.
+
+ @param q: the query
+ @type q: dns.message.Message object
+ @param where: where to send the message
+ @type where: string containing an IPv4 or IPv6 address
+ @param timeout: The number of seconds to wait before the query times out.
+ If None, the default, wait forever.
+ @type timeout: float
+ @param port: The port to which to send the message. The default is 53.
+ @type port: int
+ @param af: the address family to use. The default is None, which
+ causes the address family to use to be inferred from the form of where.
+ If the inference attempt fails, AF_INET is used.
+ @type af: int
+ @rtype: dns.message.Message object
+ @param source: source address. The default is the wildcard address.
+ @type source: string
+ @param source_port: The port from which to send the message.
+ The default is 0.
+ @type source_port: int
+ @param one_rr_per_rrset: Put each RR into its own RRset
+ @type one_rr_per_rrset: bool
+ """
+
+ wire = q.to_wire()
+ (af, destination, source) = _destination_and_source(af, where, port,
+ source, source_port)
+ s = socket.socket(af, socket.SOCK_STREAM, 0)
+ begin_time = None
+ try:
+ expiration = _compute_expiration(timeout)
+ s.setblocking(0)
+ begin_time = time.time()
+ if source is not None:
+ s.bind(source)
+ _connect(s, destination)
+
+ l = len(wire)
+
+ # copying the wire into tcpmsg is inefficient, but lets us
+ # avoid writev() or doing a short write that would get pushed
+ # onto the net
+ tcpmsg = struct.pack("!H", l) + wire
+ _net_write(s, tcpmsg, expiration)
+ ldata = _net_read(s, 2, expiration)
+ (l,) = struct.unpack("!H", ldata)
+ wire = _net_read(s, l, expiration)
+ finally:
+ if begin_time is None:
+ response_time = 0
+ else:
+ response_time = time.time() - begin_time
+ s.close()
+ r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
+ one_rr_per_rrset=one_rr_per_rrset)
+ r.time = response_time
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+
+def xfr(where, zone, rdtype=dns.rdatatype.AXFR, rdclass=dns.rdataclass.IN,
+ timeout=None, port=53, keyring=None, keyname=None, relativize=True,
+ af=None, lifetime=None, source=None, source_port=0, serial=0,
+ use_udp=False, keyalgorithm=dns.tsig.default_algorithm):
+ """Return a generator for the responses to a zone transfer.
+
+ @param where: where to send the message
+ @type where: string containing an IPv4 or IPv6 address
+ @param zone: The name of the zone to transfer
+ @type zone: dns.name.Name object or string
+ @param rdtype: The type of zone transfer. The default is
+ dns.rdatatype.AXFR.
+ @type rdtype: int or string
+ @param rdclass: The class of the zone transfer. The default is
+ dns.rdataclass.IN.
+ @type rdclass: int or string
+ @param timeout: The number of seconds to wait for each response message.
+ If None, the default, wait forever.
+ @type timeout: float
+ @param port: The port to which to send the message. The default is 53.
+ @type port: int
+ @param keyring: The TSIG keyring to use
+ @type keyring: dict
+ @param keyname: The name of the TSIG key to use
+ @type keyname: dns.name.Name object or string
+ @param relativize: If True, all names in the zone will be relativized to
+ the zone origin. It is essential that the relativize setting matches
+ the one specified to dns.zone.from_xfr().
+ @type relativize: bool
+ @param af: the address family to use. The default is None, which
+ causes the address family to use to be inferred from the form of where.
+ If the inference attempt fails, AF_INET is used.
+ @type af: int
+ @param lifetime: The total number of seconds to spend doing the transfer.
+ If None, the default, then there is no limit on the time the transfer may
+ take.
+ @type lifetime: float
+ @rtype: generator of dns.message.Message objects.
+ @param source: source address. The default is the wildcard address.
+ @type source: string
+ @param source_port: The port from which to send the message.
+ The default is 0.
+ @type source_port: int
+ @param serial: The SOA serial number to use as the base for an IXFR diff
+ sequence (only meaningful if rdtype == dns.rdatatype.IXFR).
+ @type serial: int
+ @param use_udp: Use UDP (only meaningful for IXFR)
+ @type use_udp: bool
+ @param keyalgorithm: The TSIG algorithm to use; defaults to
+ dns.tsig.default_algorithm
+ @type keyalgorithm: string
+ """
+
+ if isinstance(zone, string_types):
+ zone = dns.name.from_text(zone)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ q = dns.message.make_query(zone, rdtype, rdclass)
+ if rdtype == dns.rdatatype.IXFR:
+ rrset = dns.rrset.from_text(zone, 0, 'IN', 'SOA',
+ '. . %u 0 0 0 0' % serial)
+ q.authority.append(rrset)
+ if keyring is not None:
+ q.use_tsig(keyring, keyname, algorithm=keyalgorithm)
+ wire = q.to_wire()
+ (af, destination, source) = _destination_and_source(af, where, port,
+ source, source_port)
+ if use_udp:
+ if rdtype != dns.rdatatype.IXFR:
+ raise ValueError('cannot do a UDP AXFR')
+ s = socket.socket(af, socket.SOCK_DGRAM, 0)
+ else:
+ s = socket.socket(af, socket.SOCK_STREAM, 0)
+ s.setblocking(0)
+ if source is not None:
+ s.bind(source)
+ expiration = _compute_expiration(lifetime)
+ _connect(s, destination)
+ l = len(wire)
+ if use_udp:
+ _wait_for_writable(s, expiration)
+ s.send(wire)
+ else:
+ tcpmsg = struct.pack("!H", l) + wire
+ _net_write(s, tcpmsg, expiration)
+ done = False
+ delete_mode = True
+ expecting_SOA = False
+ soa_rrset = None
+ if relativize:
+ origin = zone
+ oname = dns.name.empty
+ else:
+ origin = None
+ oname = zone
+ tsig_ctx = None
+ first = True
+ while not done:
+ mexpiration = _compute_expiration(timeout)
+ if mexpiration is None or mexpiration > expiration:
+ mexpiration = expiration
+ if use_udp:
+ _wait_for_readable(s, expiration)
+ (wire, from_address) = s.recvfrom(65535)
+ else:
+ ldata = _net_read(s, 2, mexpiration)
+ (l,) = struct.unpack("!H", ldata)
+ wire = _net_read(s, l, mexpiration)
+ is_ixfr = (rdtype == dns.rdatatype.IXFR)
+ r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
+ xfr=True, origin=origin, tsig_ctx=tsig_ctx,
+ multi=True, first=first,
+ one_rr_per_rrset=is_ixfr)
+ tsig_ctx = r.tsig_ctx
+ first = False
+ answer_index = 0
+ if soa_rrset is None:
+ if not r.answer or r.answer[0].name != oname:
+ raise dns.exception.FormError(
+ "No answer or RRset not for qname")
+ rrset = r.answer[0]
+ if rrset.rdtype != dns.rdatatype.SOA:
+ raise dns.exception.FormError("first RRset is not an SOA")
+ answer_index = 1
+ soa_rrset = rrset.copy()
+ if rdtype == dns.rdatatype.IXFR:
+ if soa_rrset[0].serial <= serial:
+ #
+ # We're already up-to-date.
+ #
+ done = True
+ else:
+ expecting_SOA = True
+ #
+ # Process SOAs in the answer section (other than the initial
+ # SOA in the first message).
+ #
+ for rrset in r.answer[answer_index:]:
+ if done:
+ raise dns.exception.FormError("answers after final SOA")
+ if rrset.rdtype == dns.rdatatype.SOA and rrset.name == oname:
+ if expecting_SOA:
+ if rrset[0].serial != serial:
+ raise dns.exception.FormError(
+ "IXFR base serial mismatch")
+ expecting_SOA = False
+ elif rdtype == dns.rdatatype.IXFR:
+ delete_mode = not delete_mode
+ #
+ # If this SOA RRset is equal to the first we saw then we're
+ # finished. If this is an IXFR we also check that we're seeing
+ # the record in the expected part of the response.
+ #
+ if rrset == soa_rrset and \
+ (rdtype == dns.rdatatype.AXFR or
+ (rdtype == dns.rdatatype.IXFR and delete_mode)):
+ done = True
+ elif expecting_SOA:
+ #
+ # We made an IXFR request and are expecting another
+ # SOA RR, but saw something else, so this must be an
+ # AXFR response.
+ #
+ rdtype = dns.rdatatype.AXFR
+ expecting_SOA = False
+ if done and q.keyring and not r.had_tsig:
+ raise dns.exception.FormError("missing TSIG")
+ yield r
+ s.close()
diff --git a/lib/dns/rcode.py b/lib/dns/rcode.py
new file mode 100644
index 00000000..314815f7
--- /dev/null
+++ b/lib/dns/rcode.py
@@ -0,0 +1,125 @@
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Result Codes."""
+
+import dns.exception
+from ._compat import long
+
+
+NOERROR = 0
+FORMERR = 1
+SERVFAIL = 2
+NXDOMAIN = 3
+NOTIMP = 4
+REFUSED = 5
+YXDOMAIN = 6
+YXRRSET = 7
+NXRRSET = 8
+NOTAUTH = 9
+NOTZONE = 10
+BADVERS = 16
+
+_by_text = {
+ 'NOERROR': NOERROR,
+ 'FORMERR': FORMERR,
+ 'SERVFAIL': SERVFAIL,
+ 'NXDOMAIN': NXDOMAIN,
+ 'NOTIMP': NOTIMP,
+ 'REFUSED': REFUSED,
+ 'YXDOMAIN': YXDOMAIN,
+ 'YXRRSET': YXRRSET,
+ 'NXRRSET': NXRRSET,
+ 'NOTAUTH': NOTAUTH,
+ 'NOTZONE': NOTZONE,
+ 'BADVERS': BADVERS
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be a true inverse.
+
+_by_value = dict((y, x) for x, y in _by_text.items())
+
+
+class UnknownRcode(dns.exception.DNSException):
+
+ """A DNS rcode is unknown."""
+
+
+def from_text(text):
+ """Convert text into an rcode.
+
+ @param text: the textual rcode
+ @type text: string
+ @raises UnknownRcode: the rcode is unknown
+ @rtype: int
+ """
+
+ if text.isdigit():
+ v = int(text)
+ if v >= 0 and v <= 4095:
+ return v
+ v = _by_text.get(text.upper())
+ if v is None:
+ raise UnknownRcode
+ return v
+
+
+def from_flags(flags, ednsflags):
+ """Return the rcode value encoded by flags and ednsflags.
+
+ @param flags: the DNS flags
+ @type flags: int
+ @param ednsflags: the EDNS flags
+ @type ednsflags: int
+ @raises ValueError: rcode is < 0 or > 4095
+ @rtype: int
+ """
+
+ value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0)
+ if value < 0 or value > 4095:
+ raise ValueError('rcode must be >= 0 and <= 4095')
+ return value
+
+
+def to_flags(value):
+ """Return a (flags, ednsflags) tuple which encodes the rcode.
+
+ @param value: the rcode
+ @type value: int
+ @raises ValueError: rcode is < 0 or > 4095
+ @rtype: (int, int) tuple
+ """
+
+ if value < 0 or value > 4095:
+ raise ValueError('rcode must be >= 0 and <= 4095')
+ v = value & 0xf
+ ev = long(value & 0xff0) << 20
+ return (v, ev)
+
+
+def to_text(value):
+ """Convert rcode into text.
+
+ @param value: the rcode
+ @type value: int
+ @rtype: string
+ """
+
+ text = _by_value.get(value)
+ if text is None:
+ text = str(value)
+ return text
diff --git a/lib/dns/rdata.py b/lib/dns/rdata.py
new file mode 100644
index 00000000..824731c7
--- /dev/null
+++ b/lib/dns/rdata.py
@@ -0,0 +1,464 @@
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdata.
+
+@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
+the module which implements that type.
+@type _rdata_modules: dict
+@var _module_prefix: The prefix to use when forming modules names. The
+default is 'dns.rdtypes'. Changing this value will break the library.
+@type _module_prefix: string
+@var _hex_chunk: At most this many octets that will be represented in each
+chunk of hexstring that _hexify() produces before whitespace occurs.
+@type _hex_chunk: int"""
+
+from io import BytesIO
+import base64
+import binascii
+import struct
+
+import dns.exception
+import dns.name
+import dns.rdataclass
+import dns.rdatatype
+import dns.tokenizer
+import dns.wiredata
+from ._compat import xrange, string_types, text_type
+
+_hex_chunksize = 32
+
+
+def _hexify(data, chunksize=_hex_chunksize):
+ """Convert a binary string into its hex encoding, broken up into chunks
+ of I{chunksize} characters separated by a space.
+
+ @param data: the binary string
+ @type data: string
+ @param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
+ @rtype: string
+ """
+
+ line = binascii.hexlify(data)
+ return b' '.join([line[i:i + chunksize]
+ for i
+ in range(0, len(line), chunksize)]).decode()
+
+_base64_chunksize = 32
+
+
+def _base64ify(data, chunksize=_base64_chunksize):
+ """Convert a binary string into its base64 encoding, broken up into chunks
+ of I{chunksize} characters separated by a space.
+
+ @param data: the binary string
+ @type data: string
+ @param chunksize: the chunk size. Default is
+ L{dns.rdata._base64_chunksize}
+ @rtype: string
+ """
+
+ line = base64.b64encode(data)
+ return b' '.join([line[i:i + chunksize]
+ for i
+ in range(0, len(line), chunksize)]).decode()
+
+__escaped = {
+ '"': True,
+ '\\': True,
+}
+
+
+def _escapify(qstring):
+ """Escape the characters in a quoted string which need it.
+
+ @param qstring: the string
+ @type qstring: string
+ @returns: the escaped string
+ @rtype: string
+ """
+
+ if isinstance(qstring, text_type):
+ qstring = qstring.encode()
+ if not isinstance(qstring, bytearray):
+ qstring = bytearray(qstring)
+
+ text = ''
+ for c in qstring:
+ packed = struct.pack('!B', c).decode()
+ if packed in __escaped:
+ text += '\\' + packed
+ elif c >= 0x20 and c < 0x7F:
+ text += packed
+ else:
+ text += '\\%03d' % c
+ return text
+
+
+def _truncate_bitmap(what):
+ """Determine the index of greatest byte that isn't all zeros, and
+ return the bitmap that contains all the bytes less than that index.
+
+ @param what: a string of octets representing a bitmap.
+ @type what: string
+ @rtype: string
+ """
+
+ for i in xrange(len(what) - 1, -1, -1):
+ if what[i] != 0:
+ break
+ return what[0: i + 1]
+
+
+class Rdata(object):
+
+ """Base class for all DNS rdata types.
+ """
+
+ __slots__ = ['rdclass', 'rdtype']
+
+ def __init__(self, rdclass, rdtype):
+ """Initialize an rdata.
+ @param rdclass: The rdata class
+ @type rdclass: int
+ @param rdtype: The rdata type
+ @type rdtype: int
+ """
+
+ self.rdclass = rdclass
+ self.rdtype = rdtype
+
+ def covers(self):
+ """DNS SIG/RRSIG rdatas apply to a specific type; this type is
+ returned by the covers() function. If the rdata type is not
+ SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
+ creating rdatasets, allowing the rdataset to contain only RRSIGs
+ of a particular type, e.g. RRSIG(NS).
+ @rtype: int
+ """
+
+ return dns.rdatatype.NONE
+
+ def extended_rdatatype(self):
+ """Return a 32-bit type value, the least significant 16 bits of
+ which are the ordinary DNS type, and the upper 16 bits of which are
+ the "covered" type, if any.
+ @rtype: int
+ """
+
+ return self.covers() << 16 | self.rdtype
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ """Convert an rdata to text format.
+ @rtype: string
+ """
+ raise NotImplementedError
+
+ def to_wire(self, file, compress=None, origin=None):
+ """Convert an rdata to wire format.
+ @rtype: string
+ """
+
+ raise NotImplementedError
+
+ def to_digestable(self, origin=None):
+ """Convert rdata to a format suitable for digesting in hashes. This
+ is also the DNSSEC canonical form."""
+ f = BytesIO()
+ self.to_wire(f, None, origin)
+ return f.getvalue()
+
+ def validate(self):
+ """Check that the current contents of the rdata's fields are
+ valid. If you change an rdata by assigning to its fields,
+ it is a good idea to call validate() when you are done making
+ changes.
+ """
+ dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
+
+ def __repr__(self):
+ covers = self.covers()
+ if covers == dns.rdatatype.NONE:
+ ctext = ''
+ else:
+ ctext = '(' + dns.rdatatype.to_text(covers) + ')'
+ return ''
+
+ def __str__(self):
+ return self.to_text()
+
+ def _cmp(self, other):
+ """Compare an rdata with another rdata of the same rdtype and
+ rdclass. Return < 0 if self < other in the DNSSEC ordering,
+ 0 if self == other, and > 0 if self > other.
+ """
+ our = self.to_digestable(dns.name.root)
+ their = other.to_digestable(dns.name.root)
+ if our == their:
+ return 0
+ if our > their:
+ return 1
+
+ return -1
+
+ def __eq__(self, other):
+ if not isinstance(other, Rdata):
+ return False
+ if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+ return False
+ return self._cmp(other) == 0
+
+ def __ne__(self, other):
+ if not isinstance(other, Rdata):
+ return True
+ if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+ return True
+ return self._cmp(other) != 0
+
+ def __lt__(self, other):
+ if not isinstance(other, Rdata) or \
+ self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+
+ return NotImplemented
+ return self._cmp(other) < 0
+
+ def __le__(self, other):
+ if not isinstance(other, Rdata) or \
+ self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+ return NotImplemented
+ return self._cmp(other) <= 0
+
+ def __ge__(self, other):
+ if not isinstance(other, Rdata) or \
+ self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+ return NotImplemented
+ return self._cmp(other) >= 0
+
+ def __gt__(self, other):
+ if not isinstance(other, Rdata) or \
+ self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+ return NotImplemented
+ return self._cmp(other) > 0
+
+ def __hash__(self):
+ return hash(self.to_digestable(dns.name.root))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ """Build an rdata object from text format.
+
+ @param rdclass: The rdata class
+ @type rdclass: int
+ @param rdtype: The rdata type
+ @type rdtype: int
+ @param tok: The tokenizer
+ @type tok: dns.tokenizer.Tokenizer
+ @param origin: The origin to use for relative names
+ @type origin: dns.name.Name
+ @param relativize: should names be relativized?
+ @type relativize: bool
+ @rtype: dns.rdata.Rdata instance
+ """
+
+ raise NotImplementedError
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ """Build an rdata object from wire format
+
+ @param rdclass: The rdata class
+ @type rdclass: int
+ @param rdtype: The rdata type
+ @type rdtype: int
+ @param wire: The wire-format message
+ @type wire: string
+ @param current: The offset in wire of the beginning of the rdata.
+ @type current: int
+ @param rdlen: The length of the wire-format rdata
+ @type rdlen: int
+ @param origin: The origin to use for relative names
+ @type origin: dns.name.Name
+ @rtype: dns.rdata.Rdata instance
+ """
+
+ raise NotImplementedError
+
+ def choose_relativity(self, origin=None, relativize=True):
+ """Convert any domain names in the rdata to the specified
+ relativization.
+ """
+
+ pass
+
+
+class GenericRdata(Rdata):
+
+ """Generate Rdata Class
+
+ This class is used for rdata types for which we have no better
+ implementation. It implements the DNS "unknown RRs" scheme.
+ """
+
+ __slots__ = ['data']
+
+ def __init__(self, rdclass, rdtype, data):
+ super(GenericRdata, self).__init__(rdclass, rdtype)
+ self.data = data
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return r'\# %d ' % len(self.data) + _hexify(self.data)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ token = tok.get()
+ if not token.is_identifier() or token.value != '\#':
+ raise dns.exception.SyntaxError(
+ r'generic rdata does not start with \#')
+ length = tok.get_int()
+ chunks = []
+ while 1:
+ token = tok.get()
+ if token.is_eol_or_eof():
+ break
+ chunks.append(token.value.encode())
+ hex = b''.join(chunks)
+ data = binascii.unhexlify(hex)
+ if len(data) != length:
+ raise dns.exception.SyntaxError(
+ 'generic rdata hex data has wrong length')
+ return cls(rdclass, rdtype, data)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(self.data)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ return cls(rdclass, rdtype, wire[current: current + rdlen])
+
+_rdata_modules = {}
+_module_prefix = 'dns.rdtypes'
+
+
+def get_rdata_class(rdclass, rdtype):
+
+ def import_module(name):
+ mod = __import__(name)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+ mod = _rdata_modules.get((rdclass, rdtype))
+ rdclass_text = dns.rdataclass.to_text(rdclass)
+ rdtype_text = dns.rdatatype.to_text(rdtype)
+ rdtype_text = rdtype_text.replace('-', '_')
+ if not mod:
+ mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
+ if not mod:
+ try:
+ mod = import_module('.'.join([_module_prefix,
+ rdclass_text, rdtype_text]))
+ _rdata_modules[(rdclass, rdtype)] = mod
+ except ImportError:
+ try:
+ mod = import_module('.'.join([_module_prefix,
+ 'ANY', rdtype_text]))
+ _rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
+ except ImportError:
+ mod = None
+ if mod:
+ cls = getattr(mod, rdtype_text)
+ else:
+ cls = GenericRdata
+ return cls
+
+
+def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
+ """Build an rdata object from text format.
+
+ This function attempts to dynamically load a class which
+ implements the specified rdata class and type. If there is no
+ class-and-type-specific implementation, the GenericRdata class
+ is used.
+
+ Once a class is chosen, its from_text() class method is called
+ with the parameters to this function.
+
+ If I{tok} is a string, then a tokenizer is created and the string
+ is used as its input.
+
+ @param rdclass: The rdata class
+ @type rdclass: int
+ @param rdtype: The rdata type
+ @type rdtype: int
+ @param tok: The tokenizer or input text
+ @type tok: dns.tokenizer.Tokenizer or string
+ @param origin: The origin to use for relative names
+ @type origin: dns.name.Name
+ @param relativize: Should names be relativized?
+ @type relativize: bool
+ @rtype: dns.rdata.Rdata instance"""
+
+ if isinstance(tok, string_types):
+ tok = dns.tokenizer.Tokenizer(tok)
+ cls = get_rdata_class(rdclass, rdtype)
+ if cls != GenericRdata:
+ # peek at first token
+ token = tok.get()
+ tok.unget(token)
+ if token.is_identifier() and \
+ token.value == r'\#':
+ #
+ # Known type using the generic syntax. Extract the
+ # wire form from the generic syntax, and then run
+ # from_wire on it.
+ #
+ rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
+ relativize)
+ return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
+ origin)
+ return cls.from_text(rdclass, rdtype, tok, origin, relativize)
+
+
+def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
+ """Build an rdata object from wire format
+
+ This function attempts to dynamically load a class which
+ implements the specified rdata class and type. If there is no
+ class-and-type-specific implementation, the GenericRdata class
+ is used.
+
+ Once a class is chosen, its from_wire() class method is called
+ with the parameters to this function.
+
+ @param rdclass: The rdata class
+ @type rdclass: int
+ @param rdtype: The rdata type
+ @type rdtype: int
+ @param wire: The wire-format message
+ @type wire: string
+ @param current: The offset in wire of the beginning of the rdata.
+ @type current: int
+ @param rdlen: The length of the wire-format rdata
+ @type rdlen: int
+ @param origin: The origin to use for relative names
+ @type origin: dns.name.Name
+ @rtype: dns.rdata.Rdata instance"""
+
+ wire = dns.wiredata.maybe_wrap(wire)
+ cls = get_rdata_class(rdclass, rdtype)
+ return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
diff --git a/lib/dns/rdataclass.py b/lib/dns/rdataclass.py
new file mode 100644
index 00000000..17a4810d
--- /dev/null
+++ b/lib/dns/rdataclass.py
@@ -0,0 +1,118 @@
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Rdata Classes.
+
+@var _by_text: The rdata class textual name to value mapping
+@type _by_text: dict
+@var _by_value: The rdata class value to textual name mapping
+@type _by_value: dict
+@var _metaclasses: If an rdataclass is a metaclass, there will be a mapping
+whose key is the rdatatype value and whose value is True in this dictionary.
+@type _metaclasses: dict"""
+
+import re
+
+import dns.exception
+
+RESERVED0 = 0
+IN = 1
+CH = 3
+HS = 4
+NONE = 254
+ANY = 255
+
+_by_text = {
+ 'RESERVED0': RESERVED0,
+ 'IN': IN,
+ 'CH': CH,
+ 'HS': HS,
+ 'NONE': NONE,
+ 'ANY': ANY
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_by_value = dict((y, x) for x, y in _by_text.items())
+
+# Now that we've built the inverse map, we can add class aliases to
+# the _by_text mapping.
+
+_by_text.update({
+ 'INTERNET': IN,
+ 'CHAOS': CH,
+ 'HESIOD': HS
+})
+
+_metaclasses = {
+ NONE: True,
+ ANY: True
+}
+
+_unknown_class_pattern = re.compile('CLASS([0-9]+)$', re.I)
+
+
+class UnknownRdataclass(dns.exception.DNSException):
+
+ """A DNS class is unknown."""
+
+
+def from_text(text):
+ """Convert text into a DNS rdata class value.
+ @param text: the text
+ @type text: string
+ @rtype: int
+ @raises dns.rdataclass.UnknownRdataclass: the class is unknown
+ @raises ValueError: the rdata class value is not >= 0 and <= 65535
+ """
+
+ value = _by_text.get(text.upper())
+ if value is None:
+ match = _unknown_class_pattern.match(text)
+ if match is None:
+ raise UnknownRdataclass
+ value = int(match.group(1))
+ if value < 0 or value > 65535:
+ raise ValueError("class must be between >= 0 and <= 65535")
+ return value
+
+
+def to_text(value):
+ """Convert a DNS rdata class to text.
+ @param value: the rdata class value
+ @type value: int
+ @rtype: string
+ @raises ValueError: the rdata class value is not >= 0 and <= 65535
+ """
+
+ if value < 0 or value > 65535:
+ raise ValueError("class must be between >= 0 and <= 65535")
+ text = _by_value.get(value)
+ if text is None:
+ text = 'CLASS' + repr(value)
+ return text
+
+
+def is_metaclass(rdclass):
+ """True if the class is a metaclass.
+ @param rdclass: the rdata class
+ @type rdclass: int
+ @rtype: bool"""
+
+ if rdclass in _metaclasses:
+ return True
+ return False
diff --git a/lib/dns/rdataset.py b/lib/dns/rdataset.py
new file mode 100644
index 00000000..db266f2f
--- /dev/null
+++ b/lib/dns/rdataset.py
@@ -0,0 +1,338 @@
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
+
+import random
+from io import StringIO
+import struct
+
+import dns.exception
+import dns.rdatatype
+import dns.rdataclass
+import dns.rdata
+import dns.set
+from ._compat import string_types
+
+# define SimpleSet here for backwards compatibility
+SimpleSet = dns.set.Set
+
+
+class DifferingCovers(dns.exception.DNSException):
+
+ """An attempt was made to add a DNS SIG/RRSIG whose covered type
+ is not the same as that of the other rdatas in the rdataset."""
+
+
+class IncompatibleTypes(dns.exception.DNSException):
+
+ """An attempt was made to add DNS RR data of an incompatible type."""
+
+
+class Rdataset(dns.set.Set):
+
+ """A DNS rdataset.
+
+ @ivar rdclass: The class of the rdataset
+ @type rdclass: int
+ @ivar rdtype: The type of the rdataset
+ @type rdtype: int
+ @ivar covers: The covered type. Usually this value is
+ dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
+ dns.rdatatype.RRSIG, then the covers value will be the rdata
+ type the SIG/RRSIG covers. The library treats the SIG and RRSIG
+ types as if they were a family of
+ types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
+ easier to work with than if RRSIGs covering different rdata
+ types were aggregated into a single RRSIG rdataset.
+ @type covers: int
+ @ivar ttl: The DNS TTL (Time To Live) value
+ @type ttl: int
+ """
+
+ __slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
+
+ def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
+ """Create a new rdataset of the specified class and type.
+
+ @see: the description of the class instance variables for the
+ meaning of I{rdclass} and I{rdtype}"""
+
+ super(Rdataset, self).__init__()
+ self.rdclass = rdclass
+ self.rdtype = rdtype
+ self.covers = covers
+ self.ttl = 0
+
+ def _clone(self):
+ obj = super(Rdataset, self)._clone()
+ obj.rdclass = self.rdclass
+ obj.rdtype = self.rdtype
+ obj.covers = self.covers
+ obj.ttl = self.ttl
+ return obj
+
+ def update_ttl(self, ttl):
+ """Set the TTL of the rdataset to be the lesser of the set's current
+ TTL or the specified TTL. If the set contains no rdatas, set the TTL
+ to the specified TTL.
+ @param ttl: The TTL
+ @type ttl: int"""
+
+ if len(self) == 0:
+ self.ttl = ttl
+ elif ttl < self.ttl:
+ self.ttl = ttl
+
+ def add(self, rd, ttl=None):
+ """Add the specified rdata to the rdataset.
+
+ If the optional I{ttl} parameter is supplied, then
+ self.update_ttl(ttl) will be called prior to adding the rdata.
+
+ @param rd: The rdata
+ @type rd: dns.rdata.Rdata object
+ @param ttl: The TTL
+ @type ttl: int"""
+
+ #
+ # If we're adding a signature, do some special handling to
+ # check that the signature covers the same type as the
+ # other rdatas in this rdataset. If this is the first rdata
+ # in the set, initialize the covers field.
+ #
+ if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
+ raise IncompatibleTypes
+ if ttl is not None:
+ self.update_ttl(ttl)
+ if self.rdtype == dns.rdatatype.RRSIG or \
+ self.rdtype == dns.rdatatype.SIG:
+ covers = rd.covers()
+ if len(self) == 0 and self.covers == dns.rdatatype.NONE:
+ self.covers = covers
+ elif self.covers != covers:
+ raise DifferingCovers
+ if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
+ self.clear()
+ super(Rdataset, self).add(rd)
+
+ def union_update(self, other):
+ self.update_ttl(other.ttl)
+ super(Rdataset, self).union_update(other)
+
+ def intersection_update(self, other):
+ self.update_ttl(other.ttl)
+ super(Rdataset, self).intersection_update(other)
+
+ def update(self, other):
+ """Add all rdatas in other to self.
+
+ @param other: The rdataset from which to update
+ @type other: dns.rdataset.Rdataset object"""
+
+ self.update_ttl(other.ttl)
+ super(Rdataset, self).update(other)
+
+ def __repr__(self):
+ if self.covers == 0:
+ ctext = ''
+ else:
+ ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
+ return ''
+
+ def __str__(self):
+ return self.to_text()
+
+ def __eq__(self, other):
+ """Two rdatasets are equal if they have the same class, type, and
+ covers, and contain the same rdata.
+ @rtype: bool"""
+
+ if not isinstance(other, Rdataset):
+ return False
+ if self.rdclass != other.rdclass or \
+ self.rdtype != other.rdtype or \
+ self.covers != other.covers:
+ return False
+ return super(Rdataset, self).__eq__(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_text(self, name=None, origin=None, relativize=True,
+ override_rdclass=None, **kw):
+ """Convert the rdataset into DNS master file format.
+
+ @see: L{dns.name.Name.choose_relativity} for more information
+ on how I{origin} and I{relativize} determine the way names
+ are emitted.
+
+ Any additional keyword arguments are passed on to the rdata
+ to_text() method.
+
+ @param name: If name is not None, emit a RRs with I{name} as
+ the owner name.
+ @type name: dns.name.Name object
+ @param origin: The origin for relative names, or None.
+ @type origin: dns.name.Name object
+ @param relativize: True if names should names be relativized
+ @type relativize: bool"""
+ if name is not None:
+ name = name.choose_relativity(origin, relativize)
+ ntext = str(name)
+ pad = ' '
+ else:
+ ntext = ''
+ pad = ''
+ s = StringIO()
+ if override_rdclass is not None:
+ rdclass = override_rdclass
+ else:
+ rdclass = self.rdclass
+ if len(self) == 0:
+ #
+ # Empty rdatasets are used for the question section, and in
+ # some dynamic updates, so we don't need to print out the TTL
+ # (which is meaningless anyway).
+ #
+ s.write(u'%s%s%s %s\n' % (ntext, pad,
+ dns.rdataclass.to_text(rdclass),
+ dns.rdatatype.to_text(self.rdtype)))
+ else:
+ for rd in self:
+ s.write(u'%s%s%d %s %s %s\n' %
+ (ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
+ dns.rdatatype.to_text(self.rdtype),
+ rd.to_text(origin=origin, relativize=relativize,
+ **kw)))
+ #
+ # We strip off the final \n for the caller's convenience in printing
+ #
+ return s.getvalue()[:-1]
+
+ def to_wire(self, name, file, compress=None, origin=None,
+ override_rdclass=None, want_shuffle=True):
+ """Convert the rdataset to wire format.
+
+ @param name: The owner name of the RRset that will be emitted
+ @type name: dns.name.Name object
+ @param file: The file to which the wire format data will be appended
+ @type file: file
+ @param compress: The compression table to use; the default is None.
+ @type compress: dict
+ @param origin: The origin to be appended to any relative names when
+ they are emitted. The default is None.
+ @returns: the number of records emitted
+ @rtype: int
+ """
+
+ if override_rdclass is not None:
+ rdclass = override_rdclass
+ want_shuffle = False
+ else:
+ rdclass = self.rdclass
+ file.seek(0, 2)
+ if len(self) == 0:
+ name.to_wire(file, compress, origin)
+ stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
+ file.write(stuff)
+ return 1
+ else:
+ if want_shuffle:
+ l = list(self)
+ random.shuffle(l)
+ else:
+ l = self
+ for rd in l:
+ name.to_wire(file, compress, origin)
+ stuff = struct.pack("!HHIH", self.rdtype, rdclass,
+ self.ttl, 0)
+ file.write(stuff)
+ start = file.tell()
+ rd.to_wire(file, compress, origin)
+ end = file.tell()
+ assert end - start < 65536
+ file.seek(start - 2)
+ stuff = struct.pack("!H", end - start)
+ file.write(stuff)
+ file.seek(0, 2)
+ return len(self)
+
+ def match(self, rdclass, rdtype, covers):
+ """Returns True if this rdataset matches the specified class, type,
+ and covers"""
+ if self.rdclass == rdclass and \
+ self.rdtype == rdtype and \
+ self.covers == covers:
+ return True
+ return False
+
+
+def from_text_list(rdclass, rdtype, ttl, text_rdatas):
+ """Create an rdataset with the specified class, type, and TTL, and with
+ the specified list of rdatas in text format.
+
+ @rtype: dns.rdataset.Rdataset object
+ """
+
+ if isinstance(rdclass, string_types):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ r = Rdataset(rdclass, rdtype)
+ r.update_ttl(ttl)
+ for t in text_rdatas:
+ rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
+ r.add(rd)
+ return r
+
+
+def from_text(rdclass, rdtype, ttl, *text_rdatas):
+ """Create an rdataset with the specified class, type, and TTL, and with
+ the specified rdatas in text format.
+
+ @rtype: dns.rdataset.Rdataset object
+ """
+
+ return from_text_list(rdclass, rdtype, ttl, text_rdatas)
+
+
+def from_rdata_list(ttl, rdatas):
+ """Create an rdataset with the specified TTL, and with
+ the specified list of rdata objects.
+
+ @rtype: dns.rdataset.Rdataset object
+ """
+
+ if len(rdatas) == 0:
+ raise ValueError("rdata list must not be empty")
+ r = None
+ for rd in rdatas:
+ if r is None:
+ r = Rdataset(rd.rdclass, rd.rdtype)
+ r.update_ttl(ttl)
+ r.add(rd)
+ return r
+
+
+def from_rdata(ttl, *rdatas):
+ """Create an rdataset with the specified TTL, and with
+ the specified rdata objects.
+
+ @rtype: dns.rdataset.Rdataset object
+ """
+
+ return from_rdata_list(ttl, rdatas)
diff --git a/lib/dns/rdatatype.py b/lib/dns/rdatatype.py
new file mode 100644
index 00000000..cde1a0a1
--- /dev/null
+++ b/lib/dns/rdatatype.py
@@ -0,0 +1,253 @@
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Rdata Types.
+
+@var _by_text: The rdata type textual name to value mapping
+@type _by_text: dict
+@var _by_value: The rdata type value to textual name mapping
+@type _by_value: dict
+@var _metatypes: If an rdatatype is a metatype, there will be a mapping
+whose key is the rdatatype value and whose value is True in this dictionary.
+@type _metatypes: dict
+@var _singletons: If an rdatatype is a singleton, there will be a mapping
+whose key is the rdatatype value and whose value is True in this dictionary.
+@type _singletons: dict"""
+
+import re
+
+import dns.exception
+
+NONE = 0
+A = 1
+NS = 2
+MD = 3
+MF = 4
+CNAME = 5
+SOA = 6
+MB = 7
+MG = 8
+MR = 9
+NULL = 10
+WKS = 11
+PTR = 12
+HINFO = 13
+MINFO = 14
+MX = 15
+TXT = 16
+RP = 17
+AFSDB = 18
+X25 = 19
+ISDN = 20
+RT = 21
+NSAP = 22
+NSAP_PTR = 23
+SIG = 24
+KEY = 25
+PX = 26
+GPOS = 27
+AAAA = 28
+LOC = 29
+NXT = 30
+SRV = 33
+NAPTR = 35
+KX = 36
+CERT = 37
+A6 = 38
+DNAME = 39
+OPT = 41
+APL = 42
+DS = 43
+SSHFP = 44
+IPSECKEY = 45
+RRSIG = 46
+NSEC = 47
+DNSKEY = 48
+DHCID = 49
+NSEC3 = 50
+NSEC3PARAM = 51
+TLSA = 52
+HIP = 55
+CDS = 59
+CDNSKEY = 60
+CSYNC = 62
+SPF = 99
+UNSPEC = 103
+EUI48 = 108
+EUI64 = 109
+TKEY = 249
+TSIG = 250
+IXFR = 251
+AXFR = 252
+MAILB = 253
+MAILA = 254
+ANY = 255
+URI = 256
+CAA = 257
+TA = 32768
+DLV = 32769
+
+_by_text = {
+ 'NONE': NONE,
+ 'A': A,
+ 'NS': NS,
+ 'MD': MD,
+ 'MF': MF,
+ 'CNAME': CNAME,
+ 'SOA': SOA,
+ 'MB': MB,
+ 'MG': MG,
+ 'MR': MR,
+ 'NULL': NULL,
+ 'WKS': WKS,
+ 'PTR': PTR,
+ 'HINFO': HINFO,
+ 'MINFO': MINFO,
+ 'MX': MX,
+ 'TXT': TXT,
+ 'RP': RP,
+ 'AFSDB': AFSDB,
+ 'X25': X25,
+ 'ISDN': ISDN,
+ 'RT': RT,
+ 'NSAP': NSAP,
+ 'NSAP-PTR': NSAP_PTR,
+ 'SIG': SIG,
+ 'KEY': KEY,
+ 'PX': PX,
+ 'GPOS': GPOS,
+ 'AAAA': AAAA,
+ 'LOC': LOC,
+ 'NXT': NXT,
+ 'SRV': SRV,
+ 'NAPTR': NAPTR,
+ 'KX': KX,
+ 'CERT': CERT,
+ 'A6': A6,
+ 'DNAME': DNAME,
+ 'OPT': OPT,
+ 'APL': APL,
+ 'DS': DS,
+ 'SSHFP': SSHFP,
+ 'IPSECKEY': IPSECKEY,
+ 'RRSIG': RRSIG,
+ 'NSEC': NSEC,
+ 'DNSKEY': DNSKEY,
+ 'DHCID': DHCID,
+ 'NSEC3': NSEC3,
+ 'NSEC3PARAM': NSEC3PARAM,
+ 'TLSA': TLSA,
+ 'HIP': HIP,
+ 'CDS': CDS,
+ 'CDNSKEY': CDNSKEY,
+ 'CSYNC': CSYNC,
+ 'SPF': SPF,
+ 'UNSPEC': UNSPEC,
+ 'EUI48': EUI48,
+ 'EUI64': EUI64,
+ 'TKEY': TKEY,
+ 'TSIG': TSIG,
+ 'IXFR': IXFR,
+ 'AXFR': AXFR,
+ 'MAILB': MAILB,
+ 'MAILA': MAILA,
+ 'ANY': ANY,
+ 'URI': URI,
+ 'CAA': CAA,
+ 'TA': TA,
+ 'DLV': DLV,
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+
+_by_value = dict((y, x) for x, y in _by_text.items())
+
+
+_metatypes = {
+ OPT: True
+}
+
+_singletons = {
+ SOA: True,
+ NXT: True,
+ DNAME: True,
+ NSEC: True,
+ # CNAME is technically a singleton, but we allow multiple CNAMEs.
+}
+
+_unknown_type_pattern = re.compile('TYPE([0-9]+)$', re.I)
+
+
+class UnknownRdatatype(dns.exception.DNSException):
+
+ """DNS resource record type is unknown."""
+
+
+def from_text(text):
+ """Convert text into a DNS rdata type value.
+ @param text: the text
+ @type text: string
+ @raises dns.rdatatype.UnknownRdatatype: the type is unknown
+ @raises ValueError: the rdata type value is not >= 0 and <= 65535
+ @rtype: int"""
+
+ value = _by_text.get(text.upper())
+ if value is None:
+ match = _unknown_type_pattern.match(text)
+ if match is None:
+ raise UnknownRdatatype
+ value = int(match.group(1))
+ if value < 0 or value > 65535:
+ raise ValueError("type must be between >= 0 and <= 65535")
+ return value
+
+
+def to_text(value):
+ """Convert a DNS rdata type to text.
+ @param value: the rdata type value
+ @type value: int
+ @raises ValueError: the rdata type value is not >= 0 and <= 65535
+ @rtype: string"""
+
+ if value < 0 or value > 65535:
+ raise ValueError("type must be between >= 0 and <= 65535")
+ text = _by_value.get(value)
+ if text is None:
+ text = 'TYPE' + repr(value)
+ return text
+
+
+def is_metatype(rdtype):
+ """True if the type is a metatype.
+ @param rdtype: the type
+ @type rdtype: int
+ @rtype: bool"""
+
+ if rdtype >= TKEY and rdtype <= ANY or rdtype in _metatypes:
+ return True
+ return False
+
+
+def is_singleton(rdtype):
+ """True if the type is a singleton.
+ @param rdtype: the type
+ @type rdtype: int
+ @rtype: bool"""
+
+ if rdtype in _singletons:
+ return True
+ return False
diff --git a/lib/dns/rdtypes/ANY/AFSDB.py b/lib/dns/rdtypes/ANY/AFSDB.py
new file mode 100644
index 00000000..f3d51540
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/AFSDB.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+
+class AFSDB(dns.rdtypes.mxbase.UncompressedDowncasingMX):
+
+ """AFSDB record
+
+ @ivar subtype: the subtype value
+ @type subtype: int
+ @ivar hostname: the hostname name
+ @type hostname: dns.name.Name object"""
+
+ # Use the property mechanism to make "subtype" an alias for the
+ # "preference" attribute, and "hostname" an alias for the "exchange"
+ # attribute.
+ #
+ # This lets us inherit the UncompressedMX implementation but lets
+ # the caller use appropriate attribute names for the rdata type.
+ #
+ # We probably lose some performance vs. a cut-and-paste
+ # implementation, but this way we don't copy code, and that's
+ # good.
+
+ def get_subtype(self):
+ return self.preference
+
+ def set_subtype(self, subtype):
+ self.preference = subtype
+
+ subtype = property(get_subtype, set_subtype)
+
+ def get_hostname(self):
+ return self.exchange
+
+ def set_hostname(self, hostname):
+ self.exchange = hostname
+
+ hostname = property(get_hostname, set_hostname)
diff --git a/lib/dns/rdtypes/ANY/CAA.py b/lib/dns/rdtypes/ANY/CAA.py
new file mode 100644
index 00000000..e80d4693
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/CAA.py
@@ -0,0 +1,74 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+
+class CAA(dns.rdata.Rdata):
+
+ """CAA (Certification Authority Authorization) record
+
+ @ivar flags: the flags
+ @type flags: int
+ @ivar tag: the tag
+ @type tag: string
+ @ivar value: the value
+ @type value: string
+ @see: RFC 6844"""
+
+ __slots__ = ['flags', 'tag', 'value']
+
+ def __init__(self, rdclass, rdtype, flags, tag, value):
+ super(CAA, self).__init__(rdclass, rdtype)
+ self.flags = flags
+ self.tag = tag
+ self.value = value
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%u %s "%s"' % (self.flags,
+ dns.rdata._escapify(self.tag),
+ dns.rdata._escapify(self.value))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ flags = tok.get_uint8()
+ tag = tok.get_string().encode()
+ if len(tag) > 255:
+ raise dns.exception.SyntaxError("tag too long")
+ if not tag.isalnum():
+ raise dns.exception.SyntaxError("tag is not alphanumeric")
+ value = tok.get_string().encode()
+ return cls(rdclass, rdtype, flags, tag, value)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(struct.pack('!B', self.flags))
+ l = len(self.tag)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.tag)
+ file.write(self.value)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (flags, l) = struct.unpack('!BB', wire[current: current + 2])
+ current += 2
+ tag = wire[current: current + l]
+ value = wire[current + l:current + rdlen - 2]
+ return cls(rdclass, rdtype, flags, tag, value)
+
diff --git a/lib/dns/rdtypes/ANY/CDNSKEY.py b/lib/dns/rdtypes/ANY/CDNSKEY.py
new file mode 100644
index 00000000..83f3d51f
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/CDNSKEY.py
@@ -0,0 +1,25 @@
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dnskeybase
+from dns.rdtypes.dnskeybase import flags_to_text_set, flags_from_text_set
+
+
+__all__ = ['flags_to_text_set', 'flags_from_text_set']
+
+
+class CDNSKEY(dns.rdtypes.dnskeybase.DNSKEYBase):
+
+ """CDNSKEY record"""
diff --git a/lib/dns/rdtypes/ANY/CDS.py b/lib/dns/rdtypes/ANY/CDS.py
new file mode 100644
index 00000000..e1abfc36
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/CDS.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dsbase
+
+
+class CDS(dns.rdtypes.dsbase.DSBase):
+
+ """CDS record"""
diff --git a/lib/dns/rdtypes/ANY/CERT.py b/lib/dns/rdtypes/ANY/CERT.py
new file mode 100644
index 00000000..b7454409
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/CERT.py
@@ -0,0 +1,122 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import base64
+
+import dns.exception
+import dns.dnssec
+import dns.rdata
+import dns.tokenizer
+
+_ctype_by_value = {
+ 1: 'PKIX',
+ 2: 'SPKI',
+ 3: 'PGP',
+ 253: 'URI',
+ 254: 'OID',
+}
+
+_ctype_by_name = {
+ 'PKIX': 1,
+ 'SPKI': 2,
+ 'PGP': 3,
+ 'URI': 253,
+ 'OID': 254,
+}
+
+
+def _ctype_from_text(what):
+ v = _ctype_by_name.get(what)
+ if v is not None:
+ return v
+ return int(what)
+
+
+def _ctype_to_text(what):
+ v = _ctype_by_value.get(what)
+ if v is not None:
+ return v
+ return str(what)
+
+
+class CERT(dns.rdata.Rdata):
+
+ """CERT record
+
+ @ivar certificate_type: certificate type
+ @type certificate_type: int
+ @ivar key_tag: key tag
+ @type key_tag: int
+ @ivar algorithm: algorithm
+ @type algorithm: int
+ @ivar certificate: the certificate or CRL
+ @type certificate: string
+ @see: RFC 2538"""
+
+ __slots__ = ['certificate_type', 'key_tag', 'algorithm', 'certificate']
+
+ def __init__(self, rdclass, rdtype, certificate_type, key_tag, algorithm,
+ certificate):
+ super(CERT, self).__init__(rdclass, rdtype)
+ self.certificate_type = certificate_type
+ self.key_tag = key_tag
+ self.algorithm = algorithm
+ self.certificate = certificate
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ certificate_type = _ctype_to_text(self.certificate_type)
+ return "%s %d %s %s" % (certificate_type, self.key_tag,
+ dns.dnssec.algorithm_to_text(self.algorithm),
+ dns.rdata._base64ify(self.certificate))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ certificate_type = _ctype_from_text(tok.get_string())
+ key_tag = tok.get_uint16()
+ algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
+ if algorithm < 0 or algorithm > 255:
+ raise dns.exception.SyntaxError("bad algorithm type")
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ b64 = b''.join(chunks)
+ certificate = base64.b64decode(b64)
+ return cls(rdclass, rdtype, certificate_type, key_tag,
+ algorithm, certificate)
+
+ def to_wire(self, file, compress=None, origin=None):
+ prefix = struct.pack("!HHB", self.certificate_type, self.key_tag,
+ self.algorithm)
+ file.write(prefix)
+ file.write(self.certificate)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ prefix = wire[current: current + 5].unwrap()
+ current += 5
+ rdlen -= 5
+ if rdlen < 0:
+ raise dns.exception.FormError
+ (certificate_type, key_tag, algorithm) = struct.unpack("!HHB", prefix)
+ certificate = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, certificate_type, key_tag, algorithm,
+ certificate)
+
diff --git a/lib/dns/rdtypes/ANY/CNAME.py b/lib/dns/rdtypes/ANY/CNAME.py
new file mode 100644
index 00000000..65cf570c
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/CNAME.py
@@ -0,0 +1,25 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+
+class CNAME(dns.rdtypes.nsbase.NSBase):
+
+ """CNAME record
+
+ Note: although CNAME is officially a singleton type, dnspython allows
+ non-singleton CNAME rdatasets because such sets have been commonly
+ used by BIND and other nameservers for load balancing."""
diff --git a/lib/dns/rdtypes/ANY/CSYNC.py b/lib/dns/rdtypes/ANY/CSYNC.py
new file mode 100644
index 00000000..bf95cb27
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/CSYNC.py
@@ -0,0 +1,124 @@
+# Copyright (C) 2004-2007, 2009-2011, 2016 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+import dns.name
+from dns._compat import xrange
+
+class CSYNC(dns.rdata.Rdata):
+
+ """CSYNC record
+
+ @ivar serial: the SOA serial number
+ @type serial: int
+ @ivar flags: the CSYNC flags
+ @type flags: int
+ @ivar windows: the windowed bitmap list
+ @type windows: list of (window number, string) tuples"""
+
+ __slots__ = ['serial', 'flags', 'windows']
+
+ def __init__(self, rdclass, rdtype, serial, flags, windows):
+ super(CSYNC, self).__init__(rdclass, rdtype)
+ self.serial = serial
+ self.flags = flags
+ self.windows = windows
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ text = ''
+ for (window, bitmap) in self.windows:
+ bits = []
+ for i in xrange(0, len(bitmap)):
+ byte = bitmap[i]
+ for j in xrange(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(dns.rdatatype.to_text(window * 256 +
+ i * 8 + j))
+ text += (' ' + ' '.join(bits))
+ return '%d %d%s' % (self.serial, self.flags, text)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ serial = tok.get_uint32()
+ flags = tok.get_uint16()
+ rdtypes = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ nrdtype = dns.rdatatype.from_text(token.value)
+ if nrdtype == 0:
+ raise dns.exception.SyntaxError("CSYNC with bit 0")
+ if nrdtype > 65535:
+ raise dns.exception.SyntaxError("CSYNC with bit > 65535")
+ rdtypes.append(nrdtype)
+ rdtypes.sort()
+ window = 0
+ octets = 0
+ prior_rdtype = 0
+ bitmap = bytearray(b'\0' * 32)
+ windows = []
+ for nrdtype in rdtypes:
+ if nrdtype == prior_rdtype:
+ continue
+ prior_rdtype = nrdtype
+ new_window = nrdtype // 256
+ if new_window != window:
+ windows.append((window, bitmap[0:octets]))
+ bitmap = bytearray(b'\0' * 32)
+ window = new_window
+ offset = nrdtype % 256
+ byte = offset // 8
+ bit = offset % 8
+ octets = byte + 1
+ bitmap[byte] = bitmap[byte] | (0x80 >> bit)
+
+ windows.append((window, bitmap[0:octets]))
+ return cls(rdclass, rdtype, serial, flags, windows)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(struct.pack('!IH', self.serial, self.flags))
+ for (window, bitmap) in self.windows:
+ file.write(struct.pack('!BB', window, len(bitmap)))
+ file.write(bitmap)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ if rdlen < 6:
+ raise dns.exception.FormError("CSYNC too short")
+ (serial, flags) = struct.unpack("!IH", wire[current: current + 6])
+ current += 6
+ rdlen -= 6
+ windows = []
+ while rdlen > 0:
+ if rdlen < 3:
+ raise dns.exception.FormError("CSYNC too short")
+ window = wire[current]
+ octets = wire[current + 1]
+ if octets == 0 or octets > 32:
+ raise dns.exception.FormError("bad CSYNC octets")
+ current += 2
+ rdlen -= 2
+ if rdlen < octets:
+ raise dns.exception.FormError("bad CSYNC bitmap length")
+ bitmap = bytearray(wire[current: current + octets].unwrap())
+ current += octets
+ rdlen -= octets
+ windows.append((window, bitmap))
+ return cls(rdclass, rdtype, serial, flags, windows)
diff --git a/lib/dns/rdtypes/ANY/DLV.py b/lib/dns/rdtypes/ANY/DLV.py
new file mode 100644
index 00000000..cd1244c1
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/DLV.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dsbase
+
+
+class DLV(dns.rdtypes.dsbase.DSBase):
+
+ """DLV record"""
diff --git a/lib/dns/rdtypes/ANY/DNAME.py b/lib/dns/rdtypes/ANY/DNAME.py
new file mode 100644
index 00000000..dac97214
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/DNAME.py
@@ -0,0 +1,24 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+
+class DNAME(dns.rdtypes.nsbase.UncompressedNS):
+
+ """DNAME record"""
+
+ def to_digestable(self, origin=None):
+ return self.target.to_digestable(origin)
diff --git a/lib/dns/rdtypes/ANY/DNSKEY.py b/lib/dns/rdtypes/ANY/DNSKEY.py
new file mode 100644
index 00000000..e915e98b
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/DNSKEY.py
@@ -0,0 +1,25 @@
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dnskeybase
+from dns.rdtypes.dnskeybase import flags_to_text_set, flags_from_text_set
+
+
+__all__ = ['flags_to_text_set', 'flags_from_text_set']
+
+
+class DNSKEY(dns.rdtypes.dnskeybase.DNSKEYBase):
+
+ """DNSKEY record"""
diff --git a/lib/dns/rdtypes/ANY/DS.py b/lib/dns/rdtypes/ANY/DS.py
new file mode 100644
index 00000000..577c8d84
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/DS.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.dsbase
+
+
+class DS(dns.rdtypes.dsbase.DSBase):
+
+ """DS record"""
diff --git a/lib/dns/rdtypes/ANY/EUI48.py b/lib/dns/rdtypes/ANY/EUI48.py
new file mode 100644
index 00000000..aa260e20
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/EUI48.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2015 Red Hat, Inc.
+# Author: Petr Spacek
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.euibase
+
+
+class EUI48(dns.rdtypes.euibase.EUIBase):
+
+ """EUI48 record
+
+ @ivar fingerprint: 48-bit Extended Unique Identifier (EUI-48)
+ @type fingerprint: string
+ @see: rfc7043.txt"""
+
+ byte_len = 6 # 0123456789ab (in hex)
+ text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab
diff --git a/lib/dns/rdtypes/ANY/EUI64.py b/lib/dns/rdtypes/ANY/EUI64.py
new file mode 100644
index 00000000..5eba350d
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/EUI64.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2015 Red Hat, Inc.
+# Author: Petr Spacek
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.euibase
+
+
+class EUI64(dns.rdtypes.euibase.EUIBase):
+
+ """EUI64 record
+
+ @ivar fingerprint: 64-bit Extended Unique Identifier (EUI-64)
+ @type fingerprint: string
+ @see: rfc7043.txt"""
+
+ byte_len = 8 # 0123456789abcdef (in hex)
+ text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab-cd-ef
diff --git a/lib/dns/rdtypes/ANY/GPOS.py b/lib/dns/rdtypes/ANY/GPOS.py
new file mode 100644
index 00000000..a359a771
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/GPOS.py
@@ -0,0 +1,160 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+from dns._compat import long, text_type
+
+
+def _validate_float_string(what):
+ if what[0] == b'-'[0] or what[0] == b'+'[0]:
+ what = what[1:]
+ if what.isdigit():
+ return
+ (left, right) = what.split(b'.')
+ if left == b'' and right == b'':
+ raise dns.exception.FormError
+ if not left == b'' and not left.decode().isdigit():
+ raise dns.exception.FormError
+ if not right == b'' and not right.decode().isdigit():
+ raise dns.exception.FormError
+
+
+def _sanitize(value):
+ if isinstance(value, text_type):
+ return value.encode()
+ return value
+
+
+class GPOS(dns.rdata.Rdata):
+
+ """GPOS record
+
+ @ivar latitude: latitude
+ @type latitude: string
+ @ivar longitude: longitude
+ @type longitude: string
+ @ivar altitude: altitude
+ @type altitude: string
+ @see: RFC 1712"""
+
+ __slots__ = ['latitude', 'longitude', 'altitude']
+
+ def __init__(self, rdclass, rdtype, latitude, longitude, altitude):
+ super(GPOS, self).__init__(rdclass, rdtype)
+ if isinstance(latitude, float) or \
+ isinstance(latitude, int) or \
+ isinstance(latitude, long):
+ latitude = str(latitude)
+ if isinstance(longitude, float) or \
+ isinstance(longitude, int) or \
+ isinstance(longitude, long):
+ longitude = str(longitude)
+ if isinstance(altitude, float) or \
+ isinstance(altitude, int) or \
+ isinstance(altitude, long):
+ altitude = str(altitude)
+ latitude = _sanitize(latitude)
+ longitude = _sanitize(longitude)
+ altitude = _sanitize(altitude)
+ _validate_float_string(latitude)
+ _validate_float_string(longitude)
+ _validate_float_string(altitude)
+ self.latitude = latitude
+ self.longitude = longitude
+ self.altitude = altitude
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%s %s %s' % (self.latitude.decode(),
+ self.longitude.decode(),
+ self.altitude.decode())
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ latitude = tok.get_string()
+ longitude = tok.get_string()
+ altitude = tok.get_string()
+ tok.get_eol()
+ return cls(rdclass, rdtype, latitude, longitude, altitude)
+
+ def to_wire(self, file, compress=None, origin=None):
+ l = len(self.latitude)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.latitude)
+ l = len(self.longitude)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.longitude)
+ l = len(self.altitude)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.altitude)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ latitude = wire[current: current + l].unwrap()
+ current += l
+ rdlen -= l
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ longitude = wire[current: current + l].unwrap()
+ current += l
+ rdlen -= l
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l != rdlen:
+ raise dns.exception.FormError
+ altitude = wire[current: current + l].unwrap()
+ return cls(rdclass, rdtype, latitude, longitude, altitude)
+
+ def _get_float_latitude(self):
+ return float(self.latitude)
+
+ def _set_float_latitude(self, value):
+ self.latitude = str(value)
+
+ float_latitude = property(_get_float_latitude, _set_float_latitude,
+ doc="latitude as a floating point value")
+
+ def _get_float_longitude(self):
+ return float(self.longitude)
+
+ def _set_float_longitude(self, value):
+ self.longitude = str(value)
+
+ float_longitude = property(_get_float_longitude, _set_float_longitude,
+ doc="longitude as a floating point value")
+
+ def _get_float_altitude(self):
+ return float(self.altitude)
+
+ def _set_float_altitude(self, value):
+ self.altitude = str(value)
+
+ float_altitude = property(_get_float_altitude, _set_float_altitude,
+ doc="altitude as a floating point value")
diff --git a/lib/dns/rdtypes/ANY/HINFO.py b/lib/dns/rdtypes/ANY/HINFO.py
new file mode 100644
index 00000000..52298bc4
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/HINFO.py
@@ -0,0 +1,85 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+from dns._compat import text_type
+
+
+class HINFO(dns.rdata.Rdata):
+
+ """HINFO record
+
+ @ivar cpu: the CPU type
+ @type cpu: string
+ @ivar os: the OS type
+ @type os: string
+ @see: RFC 1035"""
+
+ __slots__ = ['cpu', 'os']
+
+ def __init__(self, rdclass, rdtype, cpu, os):
+ super(HINFO, self).__init__(rdclass, rdtype)
+ if isinstance(cpu, text_type):
+ self.cpu = cpu.encode()
+ else:
+ self.cpu = cpu
+ if isinstance(os, text_type):
+ self.os = os.encode()
+ else:
+ self.os = os
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '"%s" "%s"' % (dns.rdata._escapify(self.cpu),
+ dns.rdata._escapify(self.os))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ cpu = tok.get_string()
+ os = tok.get_string()
+ tok.get_eol()
+ return cls(rdclass, rdtype, cpu, os)
+
+ def to_wire(self, file, compress=None, origin=None):
+ l = len(self.cpu)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.cpu)
+ l = len(self.os)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.os)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ cpu = wire[current:current + l].unwrap()
+ current += l
+ rdlen -= l
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l != rdlen:
+ raise dns.exception.FormError
+ os = wire[current: current + l].unwrap()
+ return cls(rdclass, rdtype, cpu, os)
+
diff --git a/lib/dns/rdtypes/ANY/HIP.py b/lib/dns/rdtypes/ANY/HIP.py
new file mode 100644
index 00000000..e0cd2755
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/HIP.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2010, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import base64
+import binascii
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+
+
+class HIP(dns.rdata.Rdata):
+
+ """HIP record
+
+ @ivar hit: the host identity tag
+ @type hit: string
+ @ivar algorithm: the public key cryptographic algorithm
+ @type algorithm: int
+ @ivar key: the public key
+ @type key: string
+ @ivar servers: the rendezvous servers
+ @type servers: list of dns.name.Name objects
+ @see: RFC 5205"""
+
+ __slots__ = ['hit', 'algorithm', 'key', 'servers']
+
+ def __init__(self, rdclass, rdtype, hit, algorithm, key, servers):
+ super(HIP, self).__init__(rdclass, rdtype)
+ self.hit = hit
+ self.algorithm = algorithm
+ self.key = key
+ self.servers = servers
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ hit = binascii.hexlify(self.hit).decode()
+ key = base64.b64encode(self.key).replace(b'\n', b'').decode()
+ text = u''
+ servers = []
+ for server in self.servers:
+ servers.append(server.choose_relativity(origin, relativize))
+ if len(servers) > 0:
+ text += (u' ' + u' '.join(map(lambda x: x.to_unicode(), servers)))
+ return u'%u %s %s%s' % (self.algorithm, hit, key, text)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ algorithm = tok.get_uint8()
+ hit = binascii.unhexlify(tok.get_string().encode())
+ if len(hit) > 255:
+ raise dns.exception.SyntaxError("HIT too long")
+ key = base64.b64decode(tok.get_string().encode())
+ servers = []
+ while 1:
+ token = tok.get()
+ if token.is_eol_or_eof():
+ break
+ server = dns.name.from_text(token.value, origin)
+ server.choose_relativity(origin, relativize)
+ servers.append(server)
+ return cls(rdclass, rdtype, hit, algorithm, key, servers)
+
+ def to_wire(self, file, compress=None, origin=None):
+ lh = len(self.hit)
+ lk = len(self.key)
+ file.write(struct.pack("!BBH", lh, self.algorithm, lk))
+ file.write(self.hit)
+ file.write(self.key)
+ for server in self.servers:
+ server.to_wire(file, None, origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (lh, algorithm, lk) = struct.unpack('!BBH',
+ wire[current: current + 4])
+ current += 4
+ rdlen -= 4
+ hit = wire[current: current + lh].unwrap()
+ current += lh
+ rdlen -= lh
+ key = wire[current: current + lk].unwrap()
+ current += lk
+ rdlen -= lk
+ servers = []
+ while rdlen > 0:
+ (server, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ current += cused
+ rdlen -= cused
+ if origin is not None:
+ server = server.relativize(origin)
+ servers.append(server)
+ return cls(rdclass, rdtype, hit, algorithm, key, servers)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ servers = []
+ for server in self.servers:
+ server = server.choose_relativity(origin, relativize)
+ servers.append(server)
+ self.servers = servers
diff --git a/lib/dns/rdtypes/ANY/ISDN.py b/lib/dns/rdtypes/ANY/ISDN.py
new file mode 100644
index 00000000..01284a82
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/ISDN.py
@@ -0,0 +1,98 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+from dns._compat import text_type
+
+
+class ISDN(dns.rdata.Rdata):
+
+ """ISDN record
+
+ @ivar address: the ISDN address
+ @type address: string
+ @ivar subaddress: the ISDN subaddress (or '' if not present)
+ @type subaddress: string
+ @see: RFC 1183"""
+
+ __slots__ = ['address', 'subaddress']
+
+ def __init__(self, rdclass, rdtype, address, subaddress):
+ super(ISDN, self).__init__(rdclass, rdtype)
+ if isinstance(address, text_type):
+ self.address = address.encode()
+ else:
+ self.address = address
+ if isinstance(address, text_type):
+ self.subaddress = subaddress.encode()
+ else:
+ self.subaddress = subaddress
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.subaddress:
+ return '"%s" "%s"' % (dns.rdata._escapify(self.address),
+ dns.rdata._escapify(self.subaddress))
+ else:
+ return '"%s"' % dns.rdata._escapify(self.address)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ address = tok.get_string()
+ t = tok.get()
+ if not t.is_eol_or_eof():
+ tok.unget(t)
+ subaddress = tok.get_string()
+ else:
+ tok.unget(t)
+ subaddress = ''
+ tok.get_eol()
+ return cls(rdclass, rdtype, address, subaddress)
+
+ def to_wire(self, file, compress=None, origin=None):
+ l = len(self.address)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.address)
+ l = len(self.subaddress)
+ if l > 0:
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.subaddress)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ address = wire[current: current + l].unwrap()
+ current += l
+ rdlen -= l
+ if rdlen > 0:
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l != rdlen:
+ raise dns.exception.FormError
+ subaddress = wire[current: current + l].unwrap()
+ else:
+ subaddress = ''
+ return cls(rdclass, rdtype, address, subaddress)
+
diff --git a/lib/dns/rdtypes/ANY/LOC.py b/lib/dns/rdtypes/ANY/LOC.py
new file mode 100644
index 00000000..fbfcd70f
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/LOC.py
@@ -0,0 +1,327 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+from dns._compat import long, xrange
+
+
+_pows = tuple(long(10**i) for i in range(0, 11))
+
+# default values are in centimeters
+_default_size = 100.0
+_default_hprec = 1000000.0
+_default_vprec = 1000.0
+
+
+def _exponent_of(what, desc):
+ if what == 0:
+ return 0
+ exp = None
+ for i in xrange(len(_pows)):
+ if what // _pows[i] == long(0):
+ exp = i - 1
+ break
+ if exp is None or exp < 0:
+ raise dns.exception.SyntaxError("%s value out of bounds" % desc)
+ return exp
+
+
+def _float_to_tuple(what):
+ if what < 0:
+ sign = -1
+ what *= -1
+ else:
+ sign = 1
+ what = long(round(what * 3600000))
+ degrees = int(what // 3600000)
+ what -= degrees * 3600000
+ minutes = int(what // 60000)
+ what -= minutes * 60000
+ seconds = int(what // 1000)
+ what -= int(seconds * 1000)
+ what = int(what)
+ return (degrees, minutes, seconds, what, sign)
+
+
+def _tuple_to_float(what):
+ value = float(what[0])
+ value += float(what[1]) / 60.0
+ value += float(what[2]) / 3600.0
+ value += float(what[3]) / 3600000.0
+ return float(what[4]) * value
+
+
+def _encode_size(what, desc):
+ what = long(what)
+ exponent = _exponent_of(what, desc) & 0xF
+ base = what // pow(10, exponent) & 0xF
+ return base * 16 + exponent
+
+
+def _decode_size(what, desc):
+ exponent = what & 0x0F
+ if exponent > 9:
+ raise dns.exception.SyntaxError("bad %s exponent" % desc)
+ base = (what & 0xF0) >> 4
+ if base > 9:
+ raise dns.exception.SyntaxError("bad %s base" % desc)
+ return long(base) * pow(10, exponent)
+
+
+class LOC(dns.rdata.Rdata):
+
+ """LOC record
+
+ @ivar latitude: latitude
+ @type latitude: (int, int, int, int, sign) tuple specifying the degrees, minutes,
+ seconds, milliseconds, and sign of the coordinate.
+ @ivar longitude: longitude
+ @type longitude: (int, int, int, int, sign) tuple specifying the degrees,
+ minutes, seconds, milliseconds, and sign of the coordinate.
+ @ivar altitude: altitude
+ @type altitude: float
+ @ivar size: size of the sphere
+ @type size: float
+ @ivar horizontal_precision: horizontal precision
+ @type horizontal_precision: float
+ @ivar vertical_precision: vertical precision
+ @type vertical_precision: float
+ @see: RFC 1876"""
+
+ __slots__ = ['latitude', 'longitude', 'altitude', 'size',
+ 'horizontal_precision', 'vertical_precision']
+
+ def __init__(self, rdclass, rdtype, latitude, longitude, altitude,
+ size=_default_size, hprec=_default_hprec,
+ vprec=_default_vprec):
+ """Initialize a LOC record instance.
+
+ The parameters I{latitude} and I{longitude} may be either a 4-tuple
+ of integers specifying (degrees, minutes, seconds, milliseconds),
+ or they may be floating point values specifying the number of
+ degrees. The other parameters are floats. Size, horizontal precision,
+ and vertical precision are specified in centimeters."""
+
+ super(LOC, self).__init__(rdclass, rdtype)
+ if isinstance(latitude, int) or isinstance(latitude, long):
+ latitude = float(latitude)
+ if isinstance(latitude, float):
+ latitude = _float_to_tuple(latitude)
+ self.latitude = latitude
+ if isinstance(longitude, int) or isinstance(longitude, long):
+ longitude = float(longitude)
+ if isinstance(longitude, float):
+ longitude = _float_to_tuple(longitude)
+ self.longitude = longitude
+ self.altitude = float(altitude)
+ self.size = float(size)
+ self.horizontal_precision = float(hprec)
+ self.vertical_precision = float(vprec)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.latitude[4] > 0:
+ lat_hemisphere = 'N'
+ lat_degrees = self.latitude[0]
+ else:
+ lat_hemisphere = 'S'
+ lat_degrees = -1 * self.latitude[0]
+ if self.longitude[4] > 0:
+ long_hemisphere = 'E'
+ long_degrees = self.longitude[0]
+ else:
+ long_hemisphere = 'W'
+ long_degrees = -1 * self.longitude[0]
+ text = "%d %d %d.%03d %s %d %d %d.%03d %s %0.2fm" % (
+ self.latitude[0], self.latitude[1],
+ self.latitude[2], self.latitude[3], lat_hemisphere,
+ self.longitude[0], self.longitude[1], self.longitude[2],
+ self.longitude[3], long_hemisphere,
+ self.altitude / 100.0
+ )
+
+ # do not print default values
+ if self.size != _default_size or \
+ self.horizontal_precision != _default_hprec or \
+ self.vertical_precision != _default_vprec:
+ text += " %0.2fm %0.2fm %0.2fm" % (
+ self.size / 100.0, self.horizontal_precision / 100.0,
+ self.vertical_precision / 100.0
+ )
+ return text
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ latitude = [0, 0, 0, 0, 1]
+ longitude = [0, 0, 0, 0, 1]
+ size = _default_size
+ hprec = _default_hprec
+ vprec = _default_vprec
+
+ latitude[0] = tok.get_int()
+ t = tok.get_string()
+ if t.isdigit():
+ latitude[1] = int(t)
+ t = tok.get_string()
+ if '.' in t:
+ (seconds, milliseconds) = t.split('.')
+ if not seconds.isdigit():
+ raise dns.exception.SyntaxError(
+ 'bad latitude seconds value')
+ latitude[2] = int(seconds)
+ if latitude[2] >= 60:
+ raise dns.exception.SyntaxError('latitude seconds >= 60')
+ l = len(milliseconds)
+ if l == 0 or l > 3 or not milliseconds.isdigit():
+ raise dns.exception.SyntaxError(
+ 'bad latitude milliseconds value')
+ if l == 1:
+ m = 100
+ elif l == 2:
+ m = 10
+ else:
+ m = 1
+ latitude[3] = m * int(milliseconds)
+ t = tok.get_string()
+ elif t.isdigit():
+ latitude[2] = int(t)
+ t = tok.get_string()
+ if t == 'S':
+ latitude[4] = -1
+ elif t != 'N':
+ raise dns.exception.SyntaxError('bad latitude hemisphere value')
+
+ longitude[0] = tok.get_int()
+ t = tok.get_string()
+ if t.isdigit():
+ longitude[1] = int(t)
+ t = tok.get_string()
+ if '.' in t:
+ (seconds, milliseconds) = t.split('.')
+ if not seconds.isdigit():
+ raise dns.exception.SyntaxError(
+ 'bad longitude seconds value')
+ longitude[2] = int(seconds)
+ if longitude[2] >= 60:
+ raise dns.exception.SyntaxError('longitude seconds >= 60')
+ l = len(milliseconds)
+ if l == 0 or l > 3 or not milliseconds.isdigit():
+ raise dns.exception.SyntaxError(
+ 'bad longitude milliseconds value')
+ if l == 1:
+ m = 100
+ elif l == 2:
+ m = 10
+ else:
+ m = 1
+ longitude[3] = m * int(milliseconds)
+ t = tok.get_string()
+ elif t.isdigit():
+ longitude[2] = int(t)
+ t = tok.get_string()
+ if t == 'W':
+ longitude[4] = -1
+ elif t != 'E':
+ raise dns.exception.SyntaxError('bad longitude hemisphere value')
+
+ t = tok.get_string()
+ if t[-1] == 'm':
+ t = t[0: -1]
+ altitude = float(t) * 100.0 # m -> cm
+
+ token = tok.get().unescape()
+ if not token.is_eol_or_eof():
+ value = token.value
+ if value[-1] == 'm':
+ value = value[0: -1]
+ size = float(value) * 100.0 # m -> cm
+ token = tok.get().unescape()
+ if not token.is_eol_or_eof():
+ value = token.value
+ if value[-1] == 'm':
+ value = value[0: -1]
+ hprec = float(value) * 100.0 # m -> cm
+ token = tok.get().unescape()
+ if not token.is_eol_or_eof():
+ value = token.value
+ if value[-1] == 'm':
+ value = value[0: -1]
+ vprec = float(value) * 100.0 # m -> cm
+ tok.get_eol()
+
+ return cls(rdclass, rdtype, latitude, longitude, altitude,
+ size, hprec, vprec)
+
+ def to_wire(self, file, compress=None, origin=None):
+ milliseconds = (self.latitude[0] * 3600000 +
+ self.latitude[1] * 60000 +
+ self.latitude[2] * 1000 +
+ self.latitude[3]) * self.latitude[4]
+ latitude = long(0x80000000) + milliseconds
+ milliseconds = (self.longitude[0] * 3600000 +
+ self.longitude[1] * 60000 +
+ self.longitude[2] * 1000 +
+ self.longitude[3]) * self.longitude[4]
+ longitude = long(0x80000000) + milliseconds
+ altitude = long(self.altitude) + long(10000000)
+ size = _encode_size(self.size, "size")
+ hprec = _encode_size(self.horizontal_precision, "horizontal precision")
+ vprec = _encode_size(self.vertical_precision, "vertical precision")
+ wire = struct.pack("!BBBBIII", 0, size, hprec, vprec, latitude,
+ longitude, altitude)
+ file.write(wire)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (version, size, hprec, vprec, latitude, longitude, altitude) = \
+ struct.unpack("!BBBBIII", wire[current: current + rdlen])
+ if latitude > long(0x80000000):
+ latitude = float(latitude - long(0x80000000)) / 3600000
+ else:
+ latitude = -1 * float(long(0x80000000) - latitude) / 3600000
+ if latitude < -90.0 or latitude > 90.0:
+ raise dns.exception.FormError("bad latitude")
+ if longitude > long(0x80000000):
+ longitude = float(longitude - long(0x80000000)) / 3600000
+ else:
+ longitude = -1 * float(long(0x80000000) - longitude) / 3600000
+ if longitude < -180.0 or longitude > 180.0:
+ raise dns.exception.FormError("bad longitude")
+ altitude = float(altitude) - 10000000.0
+ size = _decode_size(size, "size")
+ hprec = _decode_size(hprec, "horizontal precision")
+ vprec = _decode_size(vprec, "vertical precision")
+ return cls(rdclass, rdtype, latitude, longitude, altitude,
+ size, hprec, vprec)
+
+ def _get_float_latitude(self):
+ return _tuple_to_float(self.latitude)
+
+ def _set_float_latitude(self, value):
+ self.latitude = _float_to_tuple(value)
+
+ float_latitude = property(_get_float_latitude, _set_float_latitude,
+ doc="latitude as a floating point value")
+
+ def _get_float_longitude(self):
+ return _tuple_to_float(self.longitude)
+
+ def _set_float_longitude(self, value):
+ self.longitude = _float_to_tuple(value)
+
+ float_longitude = property(_get_float_longitude, _set_float_longitude,
+ doc="longitude as a floating point value")
diff --git a/lib/dns/rdtypes/ANY/MX.py b/lib/dns/rdtypes/ANY/MX.py
new file mode 100644
index 00000000..3a6735dc
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/MX.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+
+class MX(dns.rdtypes.mxbase.MXBase):
+
+ """MX record"""
diff --git a/lib/dns/rdtypes/ANY/NS.py b/lib/dns/rdtypes/ANY/NS.py
new file mode 100644
index 00000000..ae56d819
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/NS.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+
+class NS(dns.rdtypes.nsbase.NSBase):
+
+ """NS record"""
diff --git a/lib/dns/rdtypes/ANY/NSEC.py b/lib/dns/rdtypes/ANY/NSEC.py
new file mode 100644
index 00000000..dfe96859
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/NSEC.py
@@ -0,0 +1,126 @@
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+import dns.name
+from dns._compat import xrange
+
+
+class NSEC(dns.rdata.Rdata):
+
+ """NSEC record
+
+ @ivar next: the next name
+ @type next: dns.name.Name object
+ @ivar windows: the windowed bitmap list
+ @type windows: list of (window number, string) tuples"""
+
+ __slots__ = ['next', 'windows']
+
+ def __init__(self, rdclass, rdtype, next, windows):
+ super(NSEC, self).__init__(rdclass, rdtype)
+ self.next = next
+ self.windows = windows
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ next = self.next.choose_relativity(origin, relativize)
+ text = ''
+ for (window, bitmap) in self.windows:
+ bits = []
+ for i in xrange(0, len(bitmap)):
+ byte = bitmap[i]
+ for j in xrange(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(dns.rdatatype.to_text(window * 256 +
+ i * 8 + j))
+ text += (' ' + ' '.join(bits))
+ return '%s%s' % (next, text)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ next = tok.get_name()
+ next = next.choose_relativity(origin, relativize)
+ rdtypes = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ nrdtype = dns.rdatatype.from_text(token.value)
+ if nrdtype == 0:
+ raise dns.exception.SyntaxError("NSEC with bit 0")
+ if nrdtype > 65535:
+ raise dns.exception.SyntaxError("NSEC with bit > 65535")
+ rdtypes.append(nrdtype)
+ rdtypes.sort()
+ window = 0
+ octets = 0
+ prior_rdtype = 0
+ bitmap = bytearray(b'\0' * 32)
+ windows = []
+ for nrdtype in rdtypes:
+ if nrdtype == prior_rdtype:
+ continue
+ prior_rdtype = nrdtype
+ new_window = nrdtype // 256
+ if new_window != window:
+ windows.append((window, bitmap[0:octets]))
+ bitmap = bytearray(b'\0' * 32)
+ window = new_window
+ offset = nrdtype % 256
+ byte = offset // 8
+ bit = offset % 8
+ octets = byte + 1
+ bitmap[byte] = bitmap[byte] | (0x80 >> bit)
+
+ windows.append((window, bitmap[0:octets]))
+ return cls(rdclass, rdtype, next, windows)
+
+ def to_wire(self, file, compress=None, origin=None):
+ self.next.to_wire(file, None, origin)
+ for (window, bitmap) in self.windows:
+ file.write(struct.pack('!BB', window, len(bitmap)))
+ file.write(bitmap)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (next, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ windows = []
+ while rdlen > 0:
+ if rdlen < 3:
+ raise dns.exception.FormError("NSEC too short")
+ window = wire[current]
+ octets = wire[current + 1]
+ if octets == 0 or octets > 32:
+ raise dns.exception.FormError("bad NSEC octets")
+ current += 2
+ rdlen -= 2
+ if rdlen < octets:
+ raise dns.exception.FormError("bad NSEC bitmap length")
+ bitmap = bytearray(wire[current: current + octets].unwrap())
+ current += octets
+ rdlen -= octets
+ windows.append((window, bitmap))
+ if origin is not None:
+ next = next.relativize(origin)
+ return cls(rdclass, rdtype, next, windows)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.next = self.next.choose_relativity(origin, relativize)
diff --git a/lib/dns/rdtypes/ANY/NSEC3.py b/lib/dns/rdtypes/ANY/NSEC3.py
new file mode 100644
index 00000000..3982f4b4
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/NSEC3.py
@@ -0,0 +1,192 @@
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import binascii
+import string
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+from dns._compat import xrange, text_type
+
+try:
+ b32_hex_to_normal = string.maketrans('0123456789ABCDEFGHIJKLMNOPQRSTUV',
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')
+ b32_normal_to_hex = string.maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ234567',
+ '0123456789ABCDEFGHIJKLMNOPQRSTUV')
+except AttributeError:
+ b32_hex_to_normal = bytes.maketrans(b'0123456789ABCDEFGHIJKLMNOPQRSTUV',
+ b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')
+ b32_normal_to_hex = bytes.maketrans(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567',
+ b'0123456789ABCDEFGHIJKLMNOPQRSTUV')
+
+# hash algorithm constants
+SHA1 = 1
+
+# flag constants
+OPTOUT = 1
+
+
+class NSEC3(dns.rdata.Rdata):
+
+ """NSEC3 record
+
+ @ivar algorithm: the hash algorithm number
+ @type algorithm: int
+ @ivar flags: the flags
+ @type flags: int
+ @ivar iterations: the number of iterations
+ @type iterations: int
+ @ivar salt: the salt
+ @type salt: string
+ @ivar next: the next name hash
+ @type next: string
+ @ivar windows: the windowed bitmap list
+ @type windows: list of (window number, string) tuples"""
+
+ __slots__ = ['algorithm', 'flags', 'iterations', 'salt', 'next', 'windows']
+
+ def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt,
+ next, windows):
+ super(NSEC3, self).__init__(rdclass, rdtype)
+ self.algorithm = algorithm
+ self.flags = flags
+ self.iterations = iterations
+ if isinstance(salt, text_type):
+ self.salt = salt.encode()
+ else:
+ self.salt = salt
+ self.next = next
+ self.windows = windows
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ next = base64.b32encode(self.next).translate(
+ b32_normal_to_hex).lower().decode()
+ if self.salt == b'':
+ salt = '-'
+ else:
+ salt = binascii.hexlify(self.salt).decode()
+ text = u''
+ for (window, bitmap) in self.windows:
+ bits = []
+ for i in xrange(0, len(bitmap)):
+ byte = bitmap[i]
+ for j in xrange(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(dns.rdatatype.to_text(window * 256 +
+ i * 8 + j))
+ text += (u' ' + u' '.join(bits))
+ return u'%u %u %u %s %s%s' % (self.algorithm, self.flags,
+ self.iterations, salt, next, text)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ algorithm = tok.get_uint8()
+ flags = tok.get_uint8()
+ iterations = tok.get_uint16()
+ salt = tok.get_string()
+ if salt == u'-':
+ salt = b''
+ else:
+ salt = binascii.unhexlify(salt.encode('ascii'))
+ next = tok.get_string().encode(
+ 'ascii').upper().translate(b32_hex_to_normal)
+ next = base64.b32decode(next)
+ rdtypes = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ nrdtype = dns.rdatatype.from_text(token.value)
+ if nrdtype == 0:
+ raise dns.exception.SyntaxError("NSEC3 with bit 0")
+ if nrdtype > 65535:
+ raise dns.exception.SyntaxError("NSEC3 with bit > 65535")
+ rdtypes.append(nrdtype)
+ rdtypes.sort()
+ window = 0
+ octets = 0
+ prior_rdtype = 0
+ bitmap = bytearray(b'\0' * 32)
+ windows = []
+ for nrdtype in rdtypes:
+ if nrdtype == prior_rdtype:
+ continue
+ prior_rdtype = nrdtype
+ new_window = nrdtype // 256
+ if new_window != window:
+ if octets != 0:
+ windows.append((window, ''.join(bitmap[0:octets])))
+ bitmap = bytearray(b'\0' * 32)
+ window = new_window
+ offset = nrdtype % 256
+ byte = offset // 8
+ bit = offset % 8
+ octets = byte + 1
+ bitmap[byte] = bitmap[byte] | (0x80 >> bit)
+ if octets != 0:
+ windows.append((window, bitmap[0:octets]))
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next,
+ windows)
+
+ def to_wire(self, file, compress=None, origin=None):
+ l = len(self.salt)
+ file.write(struct.pack("!BBHB", self.algorithm, self.flags,
+ self.iterations, l))
+ file.write(self.salt)
+ l = len(self.next)
+ file.write(struct.pack("!B", l))
+ file.write(self.next)
+ for (window, bitmap) in self.windows:
+ file.write(struct.pack("!BB", window, len(bitmap)))
+ file.write(bitmap)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (algorithm, flags, iterations, slen) = \
+ struct.unpack('!BBHB', wire[current: current + 5])
+
+ current += 5
+ rdlen -= 5
+ salt = wire[current: current + slen].unwrap()
+ current += slen
+ rdlen -= slen
+ nlen = wire[current]
+ current += 1
+ rdlen -= 1
+ next = wire[current: current + nlen].unwrap()
+ current += nlen
+ rdlen -= nlen
+ windows = []
+ while rdlen > 0:
+ if rdlen < 3:
+ raise dns.exception.FormError("NSEC3 too short")
+ window = wire[current]
+ octets = wire[current + 1]
+ if octets == 0 or octets > 32:
+ raise dns.exception.FormError("bad NSEC3 octets")
+ current += 2
+ rdlen -= 2
+ if rdlen < octets:
+ raise dns.exception.FormError("bad NSEC3 bitmap length")
+ bitmap = bytearray(wire[current: current + octets].unwrap())
+ current += octets
+ rdlen -= octets
+ windows.append((window, bitmap))
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next,
+ windows)
+
diff --git a/lib/dns/rdtypes/ANY/NSEC3PARAM.py b/lib/dns/rdtypes/ANY/NSEC3PARAM.py
new file mode 100644
index 00000000..b506282b
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/NSEC3PARAM.py
@@ -0,0 +1,89 @@
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import binascii
+
+import dns.exception
+import dns.rdata
+from dns._compat import text_type
+
+
+class NSEC3PARAM(dns.rdata.Rdata):
+
+ """NSEC3PARAM record
+
+ @ivar algorithm: the hash algorithm number
+ @type algorithm: int
+ @ivar flags: the flags
+ @type flags: int
+ @ivar iterations: the number of iterations
+ @type iterations: int
+ @ivar salt: the salt
+ @type salt: string"""
+
+ __slots__ = ['algorithm', 'flags', 'iterations', 'salt']
+
+ def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt):
+ super(NSEC3PARAM, self).__init__(rdclass, rdtype)
+ self.algorithm = algorithm
+ self.flags = flags
+ self.iterations = iterations
+ if isinstance(salt, text_type):
+ self.salt = salt.encode()
+ else:
+ self.salt = salt
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.salt == b'':
+ salt = '-'
+ else:
+ salt = binascii.hexlify(self.salt).decode()
+ return '%u %u %u %s' % (self.algorithm, self.flags, self.iterations,
+ salt)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ algorithm = tok.get_uint8()
+ flags = tok.get_uint8()
+ iterations = tok.get_uint16()
+ salt = tok.get_string()
+ if salt == '-':
+ salt = ''
+ else:
+ salt = binascii.unhexlify(salt.encode())
+ tok.get_eol()
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
+
+ def to_wire(self, file, compress=None, origin=None):
+ l = len(self.salt)
+ file.write(struct.pack("!BBHB", self.algorithm, self.flags,
+ self.iterations, l))
+ file.write(self.salt)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (algorithm, flags, iterations, slen) = \
+ struct.unpack('!BBHB',
+ wire[current: current + 5])
+ current += 5
+ rdlen -= 5
+ salt = wire[current: current + slen].unwrap()
+ current += slen
+ rdlen -= slen
+ if rdlen != 0:
+ raise dns.exception.FormError
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
+
diff --git a/lib/dns/rdtypes/ANY/PTR.py b/lib/dns/rdtypes/ANY/PTR.py
new file mode 100644
index 00000000..250187a6
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/PTR.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+
+class PTR(dns.rdtypes.nsbase.NSBase):
+
+ """PTR record"""
diff --git a/lib/dns/rdtypes/ANY/RP.py b/lib/dns/rdtypes/ANY/RP.py
new file mode 100644
index 00000000..e9071c76
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/RP.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+
+class RP(dns.rdata.Rdata):
+
+ """RP record
+
+ @ivar mbox: The responsible person's mailbox
+ @type mbox: dns.name.Name object
+ @ivar txt: The owner name of a node with TXT records, or the root name
+ if no TXT records are associated with this RP.
+ @type txt: dns.name.Name object
+ @see: RFC 1183"""
+
+ __slots__ = ['mbox', 'txt']
+
+ def __init__(self, rdclass, rdtype, mbox, txt):
+ super(RP, self).__init__(rdclass, rdtype)
+ self.mbox = mbox
+ self.txt = txt
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ mbox = self.mbox.choose_relativity(origin, relativize)
+ txt = self.txt.choose_relativity(origin, relativize)
+ return "%s %s" % (str(mbox), str(txt))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ mbox = tok.get_name()
+ txt = tok.get_name()
+ mbox = mbox.choose_relativity(origin, relativize)
+ txt = txt.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, mbox, txt)
+
+ def to_wire(self, file, compress=None, origin=None):
+ self.mbox.to_wire(file, None, origin)
+ self.txt.to_wire(file, None, origin)
+
+ def to_digestable(self, origin=None):
+ return self.mbox.to_digestable(origin) + \
+ self.txt.to_digestable(origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (mbox, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ current += cused
+ rdlen -= cused
+ if rdlen <= 0:
+ raise dns.exception.FormError
+ (txt, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ mbox = mbox.relativize(origin)
+ txt = txt.relativize(origin)
+ return cls(rdclass, rdtype, mbox, txt)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.mbox = self.mbox.choose_relativity(origin, relativize)
+ self.txt = self.txt.choose_relativity(origin, relativize)
diff --git a/lib/dns/rdtypes/ANY/RRSIG.py b/lib/dns/rdtypes/ANY/RRSIG.py
new file mode 100644
index 00000000..953dfb9a
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/RRSIG.py
@@ -0,0 +1,156 @@
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import calendar
+import struct
+import time
+
+import dns.dnssec
+import dns.exception
+import dns.rdata
+import dns.rdatatype
+
+
+class BadSigTime(dns.exception.DNSException):
+
+ """Time in DNS SIG or RRSIG resource record cannot be parsed."""
+
+
+def sigtime_to_posixtime(what):
+ if len(what) != 14:
+ raise BadSigTime
+ year = int(what[0:4])
+ month = int(what[4:6])
+ day = int(what[6:8])
+ hour = int(what[8:10])
+ minute = int(what[10:12])
+ second = int(what[12:14])
+ return calendar.timegm((year, month, day, hour, minute, second,
+ 0, 0, 0))
+
+
+def posixtime_to_sigtime(what):
+ return time.strftime('%Y%m%d%H%M%S', time.gmtime(what))
+
+
+class RRSIG(dns.rdata.Rdata):
+
+ """RRSIG record
+
+ @ivar type_covered: the rdata type this signature covers
+ @type type_covered: int
+ @ivar algorithm: the algorithm used for the sig
+ @type algorithm: int
+ @ivar labels: number of labels
+ @type labels: int
+ @ivar original_ttl: the original TTL
+ @type original_ttl: long
+ @ivar expiration: signature expiration time
+ @type expiration: long
+ @ivar inception: signature inception time
+ @type inception: long
+ @ivar key_tag: the key tag
+ @type key_tag: int
+ @ivar signer: the signer
+ @type signer: dns.name.Name object
+ @ivar signature: the signature
+ @type signature: string"""
+
+ __slots__ = ['type_covered', 'algorithm', 'labels', 'original_ttl',
+ 'expiration', 'inception', 'key_tag', 'signer',
+ 'signature']
+
+ def __init__(self, rdclass, rdtype, type_covered, algorithm, labels,
+ original_ttl, expiration, inception, key_tag, signer,
+ signature):
+ super(RRSIG, self).__init__(rdclass, rdtype)
+ self.type_covered = type_covered
+ self.algorithm = algorithm
+ self.labels = labels
+ self.original_ttl = original_ttl
+ self.expiration = expiration
+ self.inception = inception
+ self.key_tag = key_tag
+ self.signer = signer
+ self.signature = signature
+
+ def covers(self):
+ return self.type_covered
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%s %d %d %d %s %s %d %s %s' % (
+ dns.rdatatype.to_text(self.type_covered),
+ self.algorithm,
+ self.labels,
+ self.original_ttl,
+ posixtime_to_sigtime(self.expiration),
+ posixtime_to_sigtime(self.inception),
+ self.key_tag,
+ self.signer.choose_relativity(origin, relativize),
+ dns.rdata._base64ify(self.signature)
+ )
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ type_covered = dns.rdatatype.from_text(tok.get_string())
+ algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
+ labels = tok.get_int()
+ original_ttl = tok.get_ttl()
+ expiration = sigtime_to_posixtime(tok.get_string())
+ inception = sigtime_to_posixtime(tok.get_string())
+ key_tag = tok.get_int()
+ signer = tok.get_name()
+ signer = signer.choose_relativity(origin, relativize)
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ b64 = b''.join(chunks)
+ signature = base64.b64decode(b64)
+ return cls(rdclass, rdtype, type_covered, algorithm, labels,
+ original_ttl, expiration, inception, key_tag, signer,
+ signature)
+
+ def to_wire(self, file, compress=None, origin=None):
+ header = struct.pack('!HBBIIIH', self.type_covered,
+ self.algorithm, self.labels,
+ self.original_ttl, self.expiration,
+ self.inception, self.key_tag)
+ file.write(header)
+ self.signer.to_wire(file, None, origin)
+ file.write(self.signature)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ header = struct.unpack('!HBBIIIH', wire[current: current + 18])
+ current += 18
+ rdlen -= 18
+ (signer, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ if origin is not None:
+ signer = signer.relativize(origin)
+ signature = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, header[0], header[1], header[2],
+ header[3], header[4], header[5], header[6], signer,
+ signature)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.signer = self.signer.choose_relativity(origin, relativize)
diff --git a/lib/dns/rdtypes/ANY/RT.py b/lib/dns/rdtypes/ANY/RT.py
new file mode 100644
index 00000000..88b75486
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/RT.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+
+class RT(dns.rdtypes.mxbase.UncompressedDowncasingMX):
+
+ """RT record"""
diff --git a/lib/dns/rdtypes/ANY/SOA.py b/lib/dns/rdtypes/ANY/SOA.py
new file mode 100644
index 00000000..cc0098e8
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/SOA.py
@@ -0,0 +1,114 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+
+class SOA(dns.rdata.Rdata):
+
+ """SOA record
+
+ @ivar mname: the SOA MNAME (master name) field
+ @type mname: dns.name.Name object
+ @ivar rname: the SOA RNAME (responsible name) field
+ @type rname: dns.name.Name object
+ @ivar serial: The zone's serial number
+ @type serial: int
+ @ivar refresh: The zone's refresh value (in seconds)
+ @type refresh: int
+ @ivar retry: The zone's retry value (in seconds)
+ @type retry: int
+ @ivar expire: The zone's expiration value (in seconds)
+ @type expire: int
+ @ivar minimum: The zone's negative caching time (in seconds, called
+ "minimum" for historical reasons)
+ @type minimum: int
+ @see: RFC 1035"""
+
+ __slots__ = ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire',
+ 'minimum']
+
+ def __init__(self, rdclass, rdtype, mname, rname, serial, refresh, retry,
+ expire, minimum):
+ super(SOA, self).__init__(rdclass, rdtype)
+ self.mname = mname
+ self.rname = rname
+ self.serial = serial
+ self.refresh = refresh
+ self.retry = retry
+ self.expire = expire
+ self.minimum = minimum
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ mname = self.mname.choose_relativity(origin, relativize)
+ rname = self.rname.choose_relativity(origin, relativize)
+ return '%s %s %d %d %d %d %d' % (
+ mname, rname, self.serial, self.refresh, self.retry,
+ self.expire, self.minimum)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ mname = tok.get_name()
+ rname = tok.get_name()
+ mname = mname.choose_relativity(origin, relativize)
+ rname = rname.choose_relativity(origin, relativize)
+ serial = tok.get_uint32()
+ refresh = tok.get_ttl()
+ retry = tok.get_ttl()
+ expire = tok.get_ttl()
+ minimum = tok.get_ttl()
+ tok.get_eol()
+ return cls(rdclass, rdtype, mname, rname, serial, refresh, retry,
+ expire, minimum)
+
+ def to_wire(self, file, compress=None, origin=None):
+ self.mname.to_wire(file, compress, origin)
+ self.rname.to_wire(file, compress, origin)
+ five_ints = struct.pack('!IIIII', self.serial, self.refresh,
+ self.retry, self.expire, self.minimum)
+ file.write(five_ints)
+
+ def to_digestable(self, origin=None):
+ return self.mname.to_digestable(origin) + \
+ self.rname.to_digestable(origin) + \
+ struct.pack('!IIIII', self.serial, self.refresh,
+ self.retry, self.expire, self.minimum)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (mname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ (rname, cused) = dns.name.from_wire(wire[: current + rdlen], current)
+ current += cused
+ rdlen -= cused
+ if rdlen != 20:
+ raise dns.exception.FormError
+ five_ints = struct.unpack('!IIIII',
+ wire[current: current + rdlen])
+ if origin is not None:
+ mname = mname.relativize(origin)
+ rname = rname.relativize(origin)
+ return cls(rdclass, rdtype, mname, rname,
+ five_ints[0], five_ints[1], five_ints[2], five_ints[3],
+ five_ints[4])
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.mname = self.mname.choose_relativity(origin, relativize)
+ self.rname = self.rname.choose_relativity(origin, relativize)
diff --git a/lib/dns/rdtypes/ANY/SPF.py b/lib/dns/rdtypes/ANY/SPF.py
new file mode 100644
index 00000000..f3e0904e
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/SPF.py
@@ -0,0 +1,23 @@
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.txtbase
+
+
+class SPF(dns.rdtypes.txtbase.TXTBase):
+
+ """SPF record
+
+ @see: RFC 4408"""
diff --git a/lib/dns/rdtypes/ANY/SSHFP.py b/lib/dns/rdtypes/ANY/SSHFP.py
new file mode 100644
index 00000000..b6ed396f
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/SSHFP.py
@@ -0,0 +1,78 @@
+# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import binascii
+
+import dns.rdata
+import dns.rdatatype
+
+
+class SSHFP(dns.rdata.Rdata):
+
+ """SSHFP record
+
+ @ivar algorithm: the algorithm
+ @type algorithm: int
+ @ivar fp_type: the digest type
+ @type fp_type: int
+ @ivar fingerprint: the fingerprint
+ @type fingerprint: string
+ @see: draft-ietf-secsh-dns-05.txt"""
+
+ __slots__ = ['algorithm', 'fp_type', 'fingerprint']
+
+ def __init__(self, rdclass, rdtype, algorithm, fp_type,
+ fingerprint):
+ super(SSHFP, self).__init__(rdclass, rdtype)
+ self.algorithm = algorithm
+ self.fp_type = fp_type
+ self.fingerprint = fingerprint
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d %s' % (self.algorithm,
+ self.fp_type,
+ dns.rdata._hexify(self.fingerprint,
+ chunksize=128))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ algorithm = tok.get_uint8()
+ fp_type = tok.get_uint8()
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ fingerprint = b''.join(chunks)
+ fingerprint = binascii.unhexlify(fingerprint)
+ return cls(rdclass, rdtype, algorithm, fp_type, fingerprint)
+
+ def to_wire(self, file, compress=None, origin=None):
+ header = struct.pack("!BB", self.algorithm, self.fp_type)
+ file.write(header)
+ file.write(self.fingerprint)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ header = struct.unpack("!BB", wire[current: current + 2])
+ current += 2
+ rdlen -= 2
+ fingerprint = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, header[0], header[1], fingerprint)
+
diff --git a/lib/dns/rdtypes/ANY/TLSA.py b/lib/dns/rdtypes/ANY/TLSA.py
new file mode 100644
index 00000000..23f4e94b
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/TLSA.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import binascii
+
+import dns.rdata
+import dns.rdatatype
+
+
+class TLSA(dns.rdata.Rdata):
+
+ """TLSA record
+
+ @ivar usage: The certificate usage
+ @type usage: int
+ @ivar selector: The selector field
+ @type selector: int
+ @ivar mtype: The 'matching type' field
+ @type mtype: int
+ @ivar cert: The 'Certificate Association Data' field
+ @type cert: string
+ @see: RFC 6698"""
+
+ __slots__ = ['usage', 'selector', 'mtype', 'cert']
+
+ def __init__(self, rdclass, rdtype, usage, selector,
+ mtype, cert):
+ super(TLSA, self).__init__(rdclass, rdtype)
+ self.usage = usage
+ self.selector = selector
+ self.mtype = mtype
+ self.cert = cert
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d %d %s' % (self.usage,
+ self.selector,
+ self.mtype,
+ dns.rdata._hexify(self.cert,
+ chunksize=128))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ usage = tok.get_uint8()
+ selector = tok.get_uint8()
+ mtype = tok.get_uint8()
+ cert_chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ cert_chunks.append(t.value.encode())
+ cert = b''.join(cert_chunks)
+ cert = binascii.unhexlify(cert)
+ return cls(rdclass, rdtype, usage, selector, mtype, cert)
+
+ def to_wire(self, file, compress=None, origin=None):
+ header = struct.pack("!BBB", self.usage, self.selector, self.mtype)
+ file.write(header)
+ file.write(self.cert)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ header = struct.unpack("!BBB", wire[current: current + 3])
+ current += 3
+ rdlen -= 3
+ cert = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, header[0], header[1], header[2], cert)
+
diff --git a/lib/dns/rdtypes/ANY/TXT.py b/lib/dns/rdtypes/ANY/TXT.py
new file mode 100644
index 00000000..6c7fa450
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/TXT.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.txtbase
+
+
+class TXT(dns.rdtypes.txtbase.TXTBase):
+
+ """TXT record"""
diff --git a/lib/dns/rdtypes/ANY/URI.py b/lib/dns/rdtypes/ANY/URI.py
new file mode 100644
index 00000000..0c121d2c
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/URI.py
@@ -0,0 +1,81 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+# Copyright (C) 2015 Red Hat, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+from dns._compat import text_type
+
+
+class URI(dns.rdata.Rdata):
+
+ """URI record
+
+ @ivar priority: the priority
+ @type priority: int
+ @ivar weight: the weight
+ @type weight: int
+ @ivar target: the target host
+ @type target: dns.name.Name object
+ @see: draft-faltstrom-uri-13"""
+
+ __slots__ = ['priority', 'weight', 'target']
+
+ def __init__(self, rdclass, rdtype, priority, weight, target):
+ super(URI, self).__init__(rdclass, rdtype)
+ self.priority = priority
+ self.weight = weight
+ if len(target) < 1:
+ raise dns.exception.SyntaxError("URI target cannot be empty")
+ if isinstance(target, text_type):
+ self.target = target.encode()
+ else:
+ self.target = target
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d "%s"' % (self.priority, self.weight,
+ self.target.decode())
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ priority = tok.get_uint16()
+ weight = tok.get_uint16()
+ target = tok.get().unescape()
+ if not (target.is_quoted_string() or target.is_identifier()):
+ raise dns.exception.SyntaxError("URI target must be a string")
+ tok.get_eol()
+ return cls(rdclass, rdtype, priority, weight, target.value)
+
+ def to_wire(self, file, compress=None, origin=None):
+ two_ints = struct.pack("!HH", self.priority, self.weight)
+ file.write(two_ints)
+ file.write(self.target)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ if rdlen < 5:
+ raise dns.exception.FormError('URI RR is shorter than 5 octets')
+
+ (priority, weight) = struct.unpack('!HH', wire[current: current + 4])
+ current += 4
+ rdlen -= 4
+ target = wire[current: current + rdlen]
+ current += rdlen
+
+ return cls(rdclass, rdtype, priority, weight, target)
+
diff --git a/lib/dns/rdtypes/ANY/X25.py b/lib/dns/rdtypes/ANY/X25.py
new file mode 100644
index 00000000..f5cca114
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/X25.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+from dns._compat import text_type
+
+
+class X25(dns.rdata.Rdata):
+
+ """X25 record
+
+ @ivar address: the PSDN address
+ @type address: string
+ @see: RFC 1183"""
+
+ __slots__ = ['address']
+
+ def __init__(self, rdclass, rdtype, address):
+ super(X25, self).__init__(rdclass, rdtype)
+ if isinstance(address, text_type):
+ self.address = address.encode()
+ else:
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '"%s"' % dns.rdata._escapify(self.address)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ address = tok.get_string()
+ tok.get_eol()
+ return cls(rdclass, rdtype, address)
+
+ def to_wire(self, file, compress=None, origin=None):
+ l = len(self.address)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(self.address)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l != rdlen:
+ raise dns.exception.FormError
+ address = wire[current: current + l].unwrap()
+ return cls(rdclass, rdtype, address)
+
diff --git a/lib/dns/rdtypes/ANY/__init__.py b/lib/dns/rdtypes/ANY/__init__.py
new file mode 100644
index 00000000..ea9c3e2e
--- /dev/null
+++ b/lib/dns/rdtypes/ANY/__init__.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Class ANY (generic) rdata type classes."""
+
+__all__ = [
+ 'AFSDB',
+ 'CDNSKEY',
+ 'CDS',
+ 'CERT',
+ 'CNAME',
+ 'DLV',
+ 'DNAME',
+ 'DNSKEY',
+ 'DS',
+ 'EUI48',
+ 'EUI64',
+ 'GPOS',
+ 'HINFO',
+ 'HIP',
+ 'ISDN',
+ 'LOC',
+ 'MX',
+ 'NS',
+ 'NSEC',
+ 'NSEC3',
+ 'NSEC3PARAM',
+ 'TLSA',
+ 'PTR',
+ 'RP',
+ 'RRSIG',
+ 'RT',
+ 'SOA',
+ 'SPF',
+ 'SSHFP',
+ 'TXT',
+ 'X25',
+]
diff --git a/lib/dns/rdtypes/IN/A.py b/lib/dns/rdtypes/IN/A.py
new file mode 100644
index 00000000..42faf9ba
--- /dev/null
+++ b/lib/dns/rdtypes/IN/A.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.ipv4
+import dns.rdata
+import dns.tokenizer
+
+
+class A(dns.rdata.Rdata):
+
+ """A record.
+
+ @ivar address: an IPv4 address
+ @type address: string (in the standard "dotted quad" format)"""
+
+ __slots__ = ['address']
+
+ def __init__(self, rdclass, rdtype, address):
+ super(A, self).__init__(rdclass, rdtype)
+ # check that it's OK
+ dns.ipv4.inet_aton(address)
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return self.address
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ address = tok.get_identifier()
+ tok.get_eol()
+ return cls(rdclass, rdtype, address)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(dns.ipv4.inet_aton(self.address))
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ address = dns.ipv4.inet_ntoa(wire[current: current + rdlen]).decode()
+ return cls(rdclass, rdtype, address)
+
diff --git a/lib/dns/rdtypes/IN/AAAA.py b/lib/dns/rdtypes/IN/AAAA.py
new file mode 100644
index 00000000..d2c65c63
--- /dev/null
+++ b/lib/dns/rdtypes/IN/AAAA.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.inet
+import dns.rdata
+import dns.tokenizer
+
+
+class AAAA(dns.rdata.Rdata):
+
+ """AAAA record.
+
+ @ivar address: an IPv6 address
+ @type address: string (in the standard IPv6 format)"""
+
+ __slots__ = ['address']
+
+ def __init__(self, rdclass, rdtype, address):
+ super(AAAA, self).__init__(rdclass, rdtype)
+ # check that it's OK
+ dns.inet.inet_pton(dns.inet.AF_INET6, address)
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return self.address
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ address = tok.get_identifier()
+ tok.get_eol()
+ return cls(rdclass, rdtype, address)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.address))
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ address = dns.inet.inet_ntop(dns.inet.AF_INET6,
+ wire[current: current + rdlen])
+ return cls(rdclass, rdtype, address)
+
diff --git a/lib/dns/rdtypes/IN/APL.py b/lib/dns/rdtypes/IN/APL.py
new file mode 100644
index 00000000..82026adf
--- /dev/null
+++ b/lib/dns/rdtypes/IN/APL.py
@@ -0,0 +1,162 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import binascii
+
+import dns.exception
+import dns.inet
+import dns.rdata
+import dns.tokenizer
+from dns._compat import xrange
+
+
+class APLItem(object):
+
+ """An APL list item.
+
+ @ivar family: the address family (IANA address family registry)
+ @type family: int
+ @ivar negation: is this item negated?
+ @type negation: bool
+ @ivar address: the address
+ @type address: string
+ @ivar prefix: the prefix length
+ @type prefix: int
+ """
+
+ __slots__ = ['family', 'negation', 'address', 'prefix']
+
+ def __init__(self, family, negation, address, prefix):
+ self.family = family
+ self.negation = negation
+ self.address = address
+ self.prefix = prefix
+
+ def __str__(self):
+ if self.negation:
+ return "!%d:%s/%s" % (self.family, self.address, self.prefix)
+ else:
+ return "%d:%s/%s" % (self.family, self.address, self.prefix)
+
+ def to_wire(self, file):
+ if self.family == 1:
+ address = dns.inet.inet_pton(dns.inet.AF_INET, self.address)
+ elif self.family == 2:
+ address = dns.inet.inet_pton(dns.inet.AF_INET6, self.address)
+ else:
+ address = binascii.unhexlify(self.address)
+ #
+ # Truncate least significant zero bytes.
+ #
+ last = 0
+ for i in xrange(len(address) - 1, -1, -1):
+ if address[i] != chr(0):
+ last = i + 1
+ break
+ address = address[0: last]
+ l = len(address)
+ assert l < 128
+ if self.negation:
+ l |= 0x80
+ header = struct.pack('!HBB', self.family, self.prefix, l)
+ file.write(header)
+ file.write(address)
+
+
+class APL(dns.rdata.Rdata):
+
+ """APL record.
+
+ @ivar items: a list of APL items
+ @type items: list of APL_Item
+ @see: RFC 3123"""
+
+ __slots__ = ['items']
+
+ def __init__(self, rdclass, rdtype, items):
+ super(APL, self).__init__(rdclass, rdtype)
+ self.items = items
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return ' '.join(map(lambda x: str(x), self.items))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ items = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ item = token.value
+ if item[0] == '!':
+ negation = True
+ item = item[1:]
+ else:
+ negation = False
+ (family, rest) = item.split(':', 1)
+ family = int(family)
+ (address, prefix) = rest.split('/', 1)
+ prefix = int(prefix)
+ item = APLItem(family, negation, address, prefix)
+ items.append(item)
+
+ return cls(rdclass, rdtype, items)
+
+ def to_wire(self, file, compress=None, origin=None):
+ for item in self.items:
+ item.to_wire(file)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ items = []
+ while 1:
+ if rdlen == 0:
+ break
+ if rdlen < 4:
+ raise dns.exception.FormError
+ header = struct.unpack('!HBB', wire[current: current + 4])
+ afdlen = header[2]
+ if afdlen > 127:
+ negation = True
+ afdlen -= 128
+ else:
+ negation = False
+ current += 4
+ rdlen -= 4
+ if rdlen < afdlen:
+ raise dns.exception.FormError
+ address = wire[current: current + afdlen].unwrap()
+ l = len(address)
+ if header[0] == 1:
+ if l < 4:
+ address += '\x00' * (4 - l)
+ address = dns.inet.inet_ntop(dns.inet.AF_INET, address)
+ elif header[0] == 2:
+ if l < 16:
+ address += '\x00' * (16 - l)
+ address = dns.inet.inet_ntop(dns.inet.AF_INET6, address)
+ else:
+ #
+ # This isn't really right according to the RFC, but it
+ # seems better than throwing an exception
+ #
+ address = address.encode('hex_codec')
+ current += afdlen
+ rdlen -= afdlen
+ item = APLItem(header[0], negation, address, header[1])
+ items.append(item)
+ return cls(rdclass, rdtype, items)
+
diff --git a/lib/dns/rdtypes/IN/DHCID.py b/lib/dns/rdtypes/IN/DHCID.py
new file mode 100644
index 00000000..06a850ad
--- /dev/null
+++ b/lib/dns/rdtypes/IN/DHCID.py
@@ -0,0 +1,60 @@
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+
+import dns.exception
+
+
+class DHCID(dns.rdata.Rdata):
+
+ """DHCID record
+
+ @ivar data: the data (the content of the RR is opaque as far as the
+ DNS is concerned)
+ @type data: string
+ @see: RFC 4701"""
+
+ __slots__ = ['data']
+
+ def __init__(self, rdclass, rdtype, data):
+ super(DHCID, self).__init__(rdclass, rdtype)
+ self.data = data
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return dns.rdata._base64ify(self.data)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ b64 = b''.join(chunks)
+ data = base64.b64decode(b64)
+ return cls(rdclass, rdtype, data)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(self.data)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ data = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, data)
+
diff --git a/lib/dns/rdtypes/IN/IPSECKEY.py b/lib/dns/rdtypes/IN/IPSECKEY.py
new file mode 100644
index 00000000..4f07bd09
--- /dev/null
+++ b/lib/dns/rdtypes/IN/IPSECKEY.py
@@ -0,0 +1,149 @@
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import base64
+
+import dns.exception
+import dns.inet
+import dns.name
+
+
+class IPSECKEY(dns.rdata.Rdata):
+
+ """IPSECKEY record
+
+ @ivar precedence: the precedence for this key data
+ @type precedence: int
+ @ivar gateway_type: the gateway type
+ @type gateway_type: int
+ @ivar algorithm: the algorithm to use
+ @type algorithm: int
+ @ivar gateway: the public key
+ @type gateway: None, IPv4 address, IPV6 address, or domain name
+ @ivar key: the public key
+ @type key: string
+ @see: RFC 4025"""
+
+ __slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key']
+
+ def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm,
+ gateway, key):
+ super(IPSECKEY, self).__init__(rdclass, rdtype)
+ if gateway_type == 0:
+ if gateway != '.' and gateway is not None:
+ raise SyntaxError('invalid gateway for gateway type 0')
+ gateway = None
+ elif gateway_type == 1:
+ # check that it's OK
+ dns.inet.inet_pton(dns.inet.AF_INET, gateway)
+ elif gateway_type == 2:
+ # check that it's OK
+ dns.inet.inet_pton(dns.inet.AF_INET6, gateway)
+ elif gateway_type == 3:
+ pass
+ else:
+ raise SyntaxError(
+ 'invalid IPSECKEY gateway type: %d' % gateway_type)
+ self.precedence = precedence
+ self.gateway_type = gateway_type
+ self.algorithm = algorithm
+ self.gateway = gateway
+ self.key = key
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.gateway_type == 0:
+ gateway = '.'
+ elif self.gateway_type == 1:
+ gateway = self.gateway
+ elif self.gateway_type == 2:
+ gateway = self.gateway
+ elif self.gateway_type == 3:
+ gateway = str(self.gateway.choose_relativity(origin, relativize))
+ else:
+ raise ValueError('invalid gateway type')
+ return '%d %d %d %s %s' % (self.precedence, self.gateway_type,
+ self.algorithm, gateway,
+ dns.rdata._base64ify(self.key))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ precedence = tok.get_uint8()
+ gateway_type = tok.get_uint8()
+ algorithm = tok.get_uint8()
+ if gateway_type == 3:
+ gateway = tok.get_name().choose_relativity(origin, relativize)
+ else:
+ gateway = tok.get_string()
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ b64 = b''.join(chunks)
+ key = base64.b64decode(b64)
+ return cls(rdclass, rdtype, precedence, gateway_type, algorithm,
+ gateway, key)
+
+ def to_wire(self, file, compress=None, origin=None):
+ header = struct.pack("!BBB", self.precedence, self.gateway_type,
+ self.algorithm)
+ file.write(header)
+ if self.gateway_type == 0:
+ pass
+ elif self.gateway_type == 1:
+ file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway))
+ elif self.gateway_type == 2:
+ file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway))
+ elif self.gateway_type == 3:
+ self.gateway.to_wire(file, None, origin)
+ else:
+ raise ValueError('invalid gateway type')
+ file.write(self.key)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ if rdlen < 3:
+ raise dns.exception.FormError
+ header = struct.unpack('!BBB', wire[current: current + 3])
+ gateway_type = header[1]
+ current += 3
+ rdlen -= 3
+ if gateway_type == 0:
+ gateway = None
+ elif gateway_type == 1:
+ gateway = dns.inet.inet_ntop(dns.inet.AF_INET,
+ wire[current: current + 4])
+ current += 4
+ rdlen -= 4
+ elif gateway_type == 2:
+ gateway = dns.inet.inet_ntop(dns.inet.AF_INET6,
+ wire[current: current + 16])
+ current += 16
+ rdlen -= 16
+ elif gateway_type == 3:
+ (gateway, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ current += cused
+ rdlen -= cused
+ else:
+ raise dns.exception.FormError('invalid IPSECKEY gateway type')
+ key = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, header[0], gateway_type, header[2],
+ gateway, key)
+
diff --git a/lib/dns/rdtypes/IN/KX.py b/lib/dns/rdtypes/IN/KX.py
new file mode 100644
index 00000000..adbfe34b
--- /dev/null
+++ b/lib/dns/rdtypes/IN/KX.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.mxbase
+
+
+class KX(dns.rdtypes.mxbase.UncompressedMX):
+
+ """KX record"""
diff --git a/lib/dns/rdtypes/IN/NAPTR.py b/lib/dns/rdtypes/IN/NAPTR.py
new file mode 100644
index 00000000..5ae2feb1
--- /dev/null
+++ b/lib/dns/rdtypes/IN/NAPTR.py
@@ -0,0 +1,125 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.name
+import dns.rdata
+from dns._compat import xrange, text_type
+
+
+def _write_string(file, s):
+ l = len(s)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(s)
+
+
+def _sanitize(value):
+ if isinstance(value, text_type):
+ return value.encode()
+ return value
+
+
+class NAPTR(dns.rdata.Rdata):
+
+ """NAPTR record
+
+ @ivar order: order
+ @type order: int
+ @ivar preference: preference
+ @type preference: int
+ @ivar flags: flags
+ @type flags: string
+ @ivar service: service
+ @type service: string
+ @ivar regexp: regular expression
+ @type regexp: string
+ @ivar replacement: replacement name
+ @type replacement: dns.name.Name object
+ @see: RFC 3403"""
+
+ __slots__ = ['order', 'preference', 'flags', 'service', 'regexp',
+ 'replacement']
+
+ def __init__(self, rdclass, rdtype, order, preference, flags, service,
+ regexp, replacement):
+ super(NAPTR, self).__init__(rdclass, rdtype)
+ self.flags = _sanitize(flags)
+ self.service = _sanitize(service)
+ self.regexp = _sanitize(regexp)
+ self.order = order
+ self.preference = preference
+ self.replacement = replacement
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ replacement = self.replacement.choose_relativity(origin, relativize)
+ return '%d %d "%s" "%s" "%s" %s' % \
+ (self.order, self.preference,
+ dns.rdata._escapify(self.flags),
+ dns.rdata._escapify(self.service),
+ dns.rdata._escapify(self.regexp),
+ replacement)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ order = tok.get_uint16()
+ preference = tok.get_uint16()
+ flags = tok.get_string()
+ service = tok.get_string()
+ regexp = tok.get_string()
+ replacement = tok.get_name()
+ replacement = replacement.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, order, preference, flags, service,
+ regexp, replacement)
+
+ def to_wire(self, file, compress=None, origin=None):
+ two_ints = struct.pack("!HH", self.order, self.preference)
+ file.write(two_ints)
+ _write_string(file, self.flags)
+ _write_string(file, self.service)
+ _write_string(file, self.regexp)
+ self.replacement.to_wire(file, compress, origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (order, preference) = struct.unpack('!HH', wire[current: current + 4])
+ current += 4
+ rdlen -= 4
+ strings = []
+ for i in xrange(3):
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l > rdlen or rdlen < 0:
+ raise dns.exception.FormError
+ s = wire[current: current + l].unwrap()
+ current += l
+ rdlen -= l
+ strings.append(s)
+ (replacement, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ replacement = replacement.relativize(origin)
+ return cls(rdclass, rdtype, order, preference, strings[0], strings[1],
+ strings[2], replacement)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.replacement = self.replacement.choose_relativity(origin,
+ relativize)
diff --git a/lib/dns/rdtypes/IN/NSAP.py b/lib/dns/rdtypes/IN/NSAP.py
new file mode 100644
index 00000000..6dbe5af0
--- /dev/null
+++ b/lib/dns/rdtypes/IN/NSAP.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import binascii
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+
+
+class NSAP(dns.rdata.Rdata):
+
+ """NSAP record.
+
+ @ivar address: a NASP
+ @type address: string
+ @see: RFC 1706"""
+
+ __slots__ = ['address']
+
+ def __init__(self, rdclass, rdtype, address):
+ super(NSAP, self).__init__(rdclass, rdtype)
+ self.address = address
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return "0x%s" % binascii.hexlify(self.address).decode()
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ address = tok.get_string()
+ tok.get_eol()
+ if address[0:2] != '0x':
+ raise dns.exception.SyntaxError('string does not start with 0x')
+ address = address[2:].replace('.', '')
+ if len(address) % 2 != 0:
+ raise dns.exception.SyntaxError('hexstring has odd length')
+ address = binascii.unhexlify(address.encode())
+ return cls(rdclass, rdtype, address)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(self.address)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ address = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, address)
+
diff --git a/lib/dns/rdtypes/IN/NSAP_PTR.py b/lib/dns/rdtypes/IN/NSAP_PTR.py
new file mode 100644
index 00000000..56967df0
--- /dev/null
+++ b/lib/dns/rdtypes/IN/NSAP_PTR.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.rdtypes.nsbase
+
+
+class NSAP_PTR(dns.rdtypes.nsbase.UncompressedNS):
+
+ """NSAP-PTR record"""
diff --git a/lib/dns/rdtypes/IN/PX.py b/lib/dns/rdtypes/IN/PX.py
new file mode 100644
index 00000000..e1ef102b
--- /dev/null
+++ b/lib/dns/rdtypes/IN/PX.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+
+class PX(dns.rdata.Rdata):
+
+ """PX record.
+
+ @ivar preference: the preference value
+ @type preference: int
+ @ivar map822: the map822 name
+ @type map822: dns.name.Name object
+ @ivar mapx400: the mapx400 name
+ @type mapx400: dns.name.Name object
+ @see: RFC 2163"""
+
+ __slots__ = ['preference', 'map822', 'mapx400']
+
+ def __init__(self, rdclass, rdtype, preference, map822, mapx400):
+ super(PX, self).__init__(rdclass, rdtype)
+ self.preference = preference
+ self.map822 = map822
+ self.mapx400 = mapx400
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ map822 = self.map822.choose_relativity(origin, relativize)
+ mapx400 = self.mapx400.choose_relativity(origin, relativize)
+ return '%d %s %s' % (self.preference, map822, mapx400)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ preference = tok.get_uint16()
+ map822 = tok.get_name()
+ map822 = map822.choose_relativity(origin, relativize)
+ mapx400 = tok.get_name(None)
+ mapx400 = mapx400.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, preference, map822, mapx400)
+
+ def to_wire(self, file, compress=None, origin=None):
+ pref = struct.pack("!H", self.preference)
+ file.write(pref)
+ self.map822.to_wire(file, None, origin)
+ self.mapx400.to_wire(file, None, origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (preference, ) = struct.unpack('!H', wire[current: current + 2])
+ current += 2
+ rdlen -= 2
+ (map822, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused > rdlen:
+ raise dns.exception.FormError
+ current += cused
+ rdlen -= cused
+ if origin is not None:
+ map822 = map822.relativize(origin)
+ (mapx400, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ mapx400 = mapx400.relativize(origin)
+ return cls(rdclass, rdtype, preference, map822, mapx400)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.map822 = self.map822.choose_relativity(origin, relativize)
+ self.mapx400 = self.mapx400.choose_relativity(origin, relativize)
diff --git a/lib/dns/rdtypes/IN/SRV.py b/lib/dns/rdtypes/IN/SRV.py
new file mode 100644
index 00000000..f4396d61
--- /dev/null
+++ b/lib/dns/rdtypes/IN/SRV.py
@@ -0,0 +1,81 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+
+class SRV(dns.rdata.Rdata):
+
+ """SRV record
+
+ @ivar priority: the priority
+ @type priority: int
+ @ivar weight: the weight
+ @type weight: int
+ @ivar port: the port of the service
+ @type port: int
+ @ivar target: the target host
+ @type target: dns.name.Name object
+ @see: RFC 2782"""
+
+ __slots__ = ['priority', 'weight', 'port', 'target']
+
+ def __init__(self, rdclass, rdtype, priority, weight, port, target):
+ super(SRV, self).__init__(rdclass, rdtype)
+ self.priority = priority
+ self.weight = weight
+ self.port = port
+ self.target = target
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ target = self.target.choose_relativity(origin, relativize)
+ return '%d %d %d %s' % (self.priority, self.weight, self.port,
+ target)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ priority = tok.get_uint16()
+ weight = tok.get_uint16()
+ port = tok.get_uint16()
+ target = tok.get_name(None)
+ target = target.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, priority, weight, port, target)
+
+ def to_wire(self, file, compress=None, origin=None):
+ three_ints = struct.pack("!HHH", self.priority, self.weight, self.port)
+ file.write(three_ints)
+ self.target.to_wire(file, compress, origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (priority, weight, port) = struct.unpack('!HHH',
+ wire[current: current + 6])
+ current += 6
+ rdlen -= 6
+ (target, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ target = target.relativize(origin)
+ return cls(rdclass, rdtype, priority, weight, port, target)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.target = self.target.choose_relativity(origin, relativize)
diff --git a/lib/dns/rdtypes/IN/WKS.py b/lib/dns/rdtypes/IN/WKS.py
new file mode 100644
index 00000000..da2a2d88
--- /dev/null
+++ b/lib/dns/rdtypes/IN/WKS.py
@@ -0,0 +1,106 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import socket
+import struct
+
+import dns.ipv4
+import dns.rdata
+from dns._compat import xrange
+
+_proto_tcp = socket.getprotobyname('tcp')
+_proto_udp = socket.getprotobyname('udp')
+
+
+class WKS(dns.rdata.Rdata):
+
+ """WKS record
+
+ @ivar address: the address
+ @type address: string
+ @ivar protocol: the protocol
+ @type protocol: int
+ @ivar bitmap: the bitmap
+ @type bitmap: string
+ @see: RFC 1035"""
+
+ __slots__ = ['address', 'protocol', 'bitmap']
+
+ def __init__(self, rdclass, rdtype, address, protocol, bitmap):
+ super(WKS, self).__init__(rdclass, rdtype)
+ self.address = address
+ self.protocol = protocol
+ if not isinstance(bitmap, bytearray):
+ self.bitmap = bytearray(bitmap)
+ else:
+ self.bitmap = bitmap
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ bits = []
+ for i in xrange(0, len(self.bitmap)):
+ byte = self.bitmap[i]
+ for j in xrange(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(str(i * 8 + j))
+ text = ' '.join(bits)
+ return '%s %d %s' % (self.address, self.protocol, text)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ address = tok.get_string()
+ protocol = tok.get_string()
+ if protocol.isdigit():
+ protocol = int(protocol)
+ else:
+ protocol = socket.getprotobyname(protocol)
+ bitmap = bytearray()
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ if token.value.isdigit():
+ serv = int(token.value)
+ else:
+ if protocol != _proto_udp and protocol != _proto_tcp:
+ raise NotImplementedError("protocol must be TCP or UDP")
+ if protocol == _proto_udp:
+ protocol_text = "udp"
+ else:
+ protocol_text = "tcp"
+ serv = socket.getservbyname(token.value, protocol_text)
+ i = serv // 8
+ l = len(bitmap)
+ if l < i + 1:
+ for j in xrange(l, i + 1):
+ bitmap.append(0)
+ bitmap[i] = bitmap[i] | (0x80 >> (serv % 8))
+ bitmap = dns.rdata._truncate_bitmap(bitmap)
+ return cls(rdclass, rdtype, address, protocol, bitmap)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(dns.ipv4.inet_aton(self.address))
+ protocol = struct.pack('!B', self.protocol)
+ file.write(protocol)
+ file.write(self.bitmap)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ address = dns.ipv4.inet_ntoa(wire[current: current + 4])
+ protocol, = struct.unpack('!B', wire[current + 4: current + 5])
+ current += 5
+ rdlen -= 5
+ bitmap = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, address, protocol, bitmap)
+
diff --git a/lib/dns/rdtypes/IN/__init__.py b/lib/dns/rdtypes/IN/__init__.py
new file mode 100644
index 00000000..24cf1ece
--- /dev/null
+++ b/lib/dns/rdtypes/IN/__init__.py
@@ -0,0 +1,30 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Class IN rdata type classes."""
+
+__all__ = [
+ 'A',
+ 'AAAA',
+ 'APL',
+ 'DHCID',
+ 'KX',
+ 'NAPTR',
+ 'NSAP',
+ 'NSAP_PTR',
+ 'PX',
+ 'SRV',
+ 'WKS',
+]
diff --git a/lib/dns/rdtypes/__init__.py b/lib/dns/rdtypes/__init__.py
new file mode 100644
index 00000000..826efbb6
--- /dev/null
+++ b/lib/dns/rdtypes/__init__.py
@@ -0,0 +1,24 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdata type classes"""
+
+__all__ = [
+ 'ANY',
+ 'IN',
+ 'euibase',
+ 'mxbase',
+ 'nsbase',
+]
diff --git a/lib/dns/rdtypes/dnskeybase.py b/lib/dns/rdtypes/dnskeybase.py
new file mode 100644
index 00000000..85c4b23f
--- /dev/null
+++ b/lib/dns/rdtypes/dnskeybase.py
@@ -0,0 +1,136 @@
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import struct
+
+import dns.exception
+import dns.dnssec
+import dns.rdata
+
+# wildcard import
+__all__ = ["SEP", "REVOKE", "ZONE",
+ "flags_to_text_set", "flags_from_text_set"]
+
+# flag constants
+SEP = 0x0001
+REVOKE = 0x0080
+ZONE = 0x0100
+
+_flag_by_text = {
+ 'SEP': SEP,
+ 'REVOKE': REVOKE,
+ 'ZONE': ZONE
+}
+
+# We construct the inverse mapping programmatically to ensure that we
+# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
+# would cause the mapping not to be true inverse.
+_flag_by_value = dict((y, x) for x, y in _flag_by_text.items())
+
+
+def flags_to_text_set(flags):
+ """Convert a DNSKEY flags value to set texts
+ @rtype: set([string])"""
+
+ flags_set = set()
+ mask = 0x1
+ while mask <= 0x8000:
+ if flags & mask:
+ text = _flag_by_value.get(mask)
+ if not text:
+ text = hex(mask)
+ flags_set.add(text)
+ mask <<= 1
+ return flags_set
+
+
+def flags_from_text_set(texts_set):
+ """Convert set of DNSKEY flag mnemonic texts to DNSKEY flag value
+ @rtype: int"""
+
+ flags = 0
+ for text in texts_set:
+ try:
+ flags += _flag_by_text[text]
+ except KeyError:
+ raise NotImplementedError(
+ "DNSKEY flag '%s' is not supported" % text)
+ return flags
+
+
+class DNSKEYBase(dns.rdata.Rdata):
+
+ """Base class for rdata that is like a DNSKEY record
+
+ @ivar flags: the key flags
+ @type flags: int
+ @ivar protocol: the protocol for which this key may be used
+ @type protocol: int
+ @ivar algorithm: the algorithm used for the key
+ @type algorithm: int
+ @ivar key: the public key
+ @type key: string"""
+
+ __slots__ = ['flags', 'protocol', 'algorithm', 'key']
+
+ def __init__(self, rdclass, rdtype, flags, protocol, algorithm, key):
+ super(DNSKEYBase, self).__init__(rdclass, rdtype)
+ self.flags = flags
+ self.protocol = protocol
+ self.algorithm = algorithm
+ self.key = key
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d %d %s' % (self.flags, self.protocol, self.algorithm,
+ dns.rdata._base64ify(self.key))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ flags = tok.get_uint16()
+ protocol = tok.get_uint8()
+ algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ b64 = b''.join(chunks)
+ key = base64.b64decode(b64)
+ return cls(rdclass, rdtype, flags, protocol, algorithm, key)
+
+ def to_wire(self, file, compress=None, origin=None):
+ header = struct.pack("!HBB", self.flags, self.protocol, self.algorithm)
+ file.write(header)
+ file.write(self.key)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ if rdlen < 4:
+ raise dns.exception.FormError
+ header = struct.unpack('!HBB', wire[current: current + 4])
+ current += 4
+ rdlen -= 4
+ key = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, header[0], header[1], header[2],
+ key)
+
+ def flags_to_text_set(self):
+ """Convert a DNSKEY flags value to set texts
+ @rtype: set([string])"""
+ return flags_to_text_set(self.flags)
diff --git a/lib/dns/rdtypes/dsbase.py b/lib/dns/rdtypes/dsbase.py
new file mode 100644
index 00000000..80f792ac
--- /dev/null
+++ b/lib/dns/rdtypes/dsbase.py
@@ -0,0 +1,84 @@
+# Copyright (C) 2010, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+import binascii
+
+import dns.rdata
+import dns.rdatatype
+
+
+class DSBase(dns.rdata.Rdata):
+
+ """Base class for rdata that is like a DS record
+
+ @ivar key_tag: the key tag
+ @type key_tag: int
+ @ivar algorithm: the algorithm
+ @type algorithm: int
+ @ivar digest_type: the digest type
+ @type digest_type: int
+ @ivar digest: the digest
+ @type digest: int
+ @see: draft-ietf-dnsext-delegation-signer-14.txt"""
+
+ __slots__ = ['key_tag', 'algorithm', 'digest_type', 'digest']
+
+ def __init__(self, rdclass, rdtype, key_tag, algorithm, digest_type,
+ digest):
+ super(DSBase, self).__init__(rdclass, rdtype)
+ self.key_tag = key_tag
+ self.algorithm = algorithm
+ self.digest_type = digest_type
+ self.digest = digest
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return '%d %d %d %s' % (self.key_tag, self.algorithm,
+ self.digest_type,
+ dns.rdata._hexify(self.digest,
+ chunksize=128))
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ key_tag = tok.get_uint16()
+ algorithm = tok.get_uint8()
+ digest_type = tok.get_uint8()
+ chunks = []
+ while 1:
+ t = tok.get().unescape()
+ if t.is_eol_or_eof():
+ break
+ if not t.is_identifier():
+ raise dns.exception.SyntaxError
+ chunks.append(t.value.encode())
+ digest = b''.join(chunks)
+ digest = binascii.unhexlify(digest)
+ return cls(rdclass, rdtype, key_tag, algorithm, digest_type,
+ digest)
+
+ def to_wire(self, file, compress=None, origin=None):
+ header = struct.pack("!HBB", self.key_tag, self.algorithm,
+ self.digest_type)
+ file.write(header)
+ file.write(self.digest)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ header = struct.unpack("!HBB", wire[current: current + 4])
+ current += 4
+ rdlen -= 4
+ digest = wire[current: current + rdlen].unwrap()
+ return cls(rdclass, rdtype, header[0], header[1], header[2], digest)
+
diff --git a/lib/dns/rdtypes/euibase.py b/lib/dns/rdtypes/euibase.py
new file mode 100644
index 00000000..13109163
--- /dev/null
+++ b/lib/dns/rdtypes/euibase.py
@@ -0,0 +1,71 @@
+# Copyright (C) 2015 Red Hat, Inc.
+# Author: Petr Spacek
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import binascii
+
+import dns.rdata
+
+
+class EUIBase(dns.rdata.Rdata):
+
+ """EUIxx record
+
+ @ivar fingerprint: xx-bit Extended Unique Identifier (EUI-xx)
+ @type fingerprint: string
+ @see: rfc7043.txt"""
+
+ __slots__ = ['eui']
+ # define these in subclasses
+ # byte_len = 6 # 0123456789ab (in hex)
+ # text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab
+
+ def __init__(self, rdclass, rdtype, eui):
+ super(EUIBase, self).__init__(rdclass, rdtype)
+ if len(eui) != self.byte_len:
+ raise dns.exception.FormError('EUI%s rdata has to have %s bytes'
+ % (self.byte_len * 8, self.byte_len))
+ self.eui = eui
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return dns.rdata._hexify(self.eui, chunksize=2).replace(' ', '-')
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ text = tok.get_string()
+ tok.get_eol()
+ if len(text) != cls.text_len:
+ raise dns.exception.SyntaxError(
+ 'Input text must have %s characters' % cls.text_len)
+ expected_dash_idxs = range(2, cls.byte_len * 3 - 1, 3)
+ for i in expected_dash_idxs:
+ if text[i] != '-':
+ raise dns.exception.SyntaxError('Dash expected at position %s'
+ % i)
+ text = text.replace('-', '')
+ try:
+ data = binascii.unhexlify(text.encode())
+ except (ValueError, TypeError) as ex:
+ raise dns.exception.SyntaxError('Hex decoding error: %s' % str(ex))
+ return cls(rdclass, rdtype, data)
+
+ def to_wire(self, file, compress=None, origin=None):
+ file.write(self.eui)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ eui = wire[current:current + rdlen].unwrap()
+ return cls(rdclass, rdtype, eui)
+
diff --git a/lib/dns/rdtypes/mxbase.py b/lib/dns/rdtypes/mxbase.py
new file mode 100644
index 00000000..5ac8cef9
--- /dev/null
+++ b/lib/dns/rdtypes/mxbase.py
@@ -0,0 +1,101 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""MX-like base classes."""
+
+from io import BytesIO
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+
+class MXBase(dns.rdata.Rdata):
+
+ """Base class for rdata that is like an MX record.
+
+ @ivar preference: the preference value
+ @type preference: int
+ @ivar exchange: the exchange name
+ @type exchange: dns.name.Name object"""
+
+ __slots__ = ['preference', 'exchange']
+
+ def __init__(self, rdclass, rdtype, preference, exchange):
+ super(MXBase, self).__init__(rdclass, rdtype)
+ self.preference = preference
+ self.exchange = exchange
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ exchange = self.exchange.choose_relativity(origin, relativize)
+ return '%d %s' % (self.preference, exchange)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ preference = tok.get_uint16()
+ exchange = tok.get_name()
+ exchange = exchange.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, preference, exchange)
+
+ def to_wire(self, file, compress=None, origin=None):
+ pref = struct.pack("!H", self.preference)
+ file.write(pref)
+ self.exchange.to_wire(file, compress, origin)
+
+ def to_digestable(self, origin=None):
+ return struct.pack("!H", self.preference) + \
+ self.exchange.to_digestable(origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (preference, ) = struct.unpack('!H', wire[current: current + 2])
+ current += 2
+ rdlen -= 2
+ (exchange, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ exchange = exchange.relativize(origin)
+ return cls(rdclass, rdtype, preference, exchange)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.exchange = self.exchange.choose_relativity(origin, relativize)
+
+
+class UncompressedMX(MXBase):
+
+ """Base class for rdata that is like an MX record, but whose name
+ is not compressed when converted to DNS wire format, and whose
+ digestable form is not downcased."""
+
+ def to_wire(self, file, compress=None, origin=None):
+ super(UncompressedMX, self).to_wire(file, None, origin)
+
+ def to_digestable(self, origin=None):
+ f = BytesIO()
+ self.to_wire(f, None, origin)
+ return f.getvalue()
+
+
+class UncompressedDowncasingMX(MXBase):
+
+ """Base class for rdata that is like an MX record, but whose name
+ is not compressed when convert to DNS wire format."""
+
+ def to_wire(self, file, compress=None, origin=None):
+ super(UncompressedDowncasingMX, self).to_wire(file, None, origin)
diff --git a/lib/dns/rdtypes/nsbase.py b/lib/dns/rdtypes/nsbase.py
new file mode 100644
index 00000000..79333a14
--- /dev/null
+++ b/lib/dns/rdtypes/nsbase.py
@@ -0,0 +1,81 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""NS-like base classes."""
+
+from io import BytesIO
+
+import dns.exception
+import dns.rdata
+import dns.name
+
+
+class NSBase(dns.rdata.Rdata):
+
+ """Base class for rdata that is like an NS record.
+
+ @ivar target: the target name of the rdata
+ @type target: dns.name.Name object"""
+
+ __slots__ = ['target']
+
+ def __init__(self, rdclass, rdtype, target):
+ super(NSBase, self).__init__(rdclass, rdtype)
+ self.target = target
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ target = self.target.choose_relativity(origin, relativize)
+ return str(target)
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ target = tok.get_name()
+ target = target.choose_relativity(origin, relativize)
+ tok.get_eol()
+ return cls(rdclass, rdtype, target)
+
+ def to_wire(self, file, compress=None, origin=None):
+ self.target.to_wire(file, compress, origin)
+
+ def to_digestable(self, origin=None):
+ return self.target.to_digestable(origin)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ (target, cused) = dns.name.from_wire(wire[: current + rdlen],
+ current)
+ if cused != rdlen:
+ raise dns.exception.FormError
+ if origin is not None:
+ target = target.relativize(origin)
+ return cls(rdclass, rdtype, target)
+
+ def choose_relativity(self, origin=None, relativize=True):
+ self.target = self.target.choose_relativity(origin, relativize)
+
+
+class UncompressedNS(NSBase):
+
+ """Base class for rdata that is like an NS record, but whose name
+ is not compressed when convert to DNS wire format, and whose
+ digestable form is not downcased."""
+
+ def to_wire(self, file, compress=None, origin=None):
+ super(UncompressedNS, self).to_wire(file, None, origin)
+
+ def to_digestable(self, origin=None):
+ f = BytesIO()
+ self.to_wire(f, None, origin)
+ return f.getvalue()
diff --git a/lib/dns/rdtypes/txtbase.py b/lib/dns/rdtypes/txtbase.py
new file mode 100644
index 00000000..54d7e6f0
--- /dev/null
+++ b/lib/dns/rdtypes/txtbase.py
@@ -0,0 +1,91 @@
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""TXT-like base class."""
+
+import struct
+
+import dns.exception
+import dns.rdata
+import dns.tokenizer
+from dns._compat import binary_type
+
+
+class TXTBase(dns.rdata.Rdata):
+
+ """Base class for rdata that is like a TXT record
+
+ @ivar strings: the text strings
+ @type strings: list of string
+ @see: RFC 1035"""
+
+ __slots__ = ['strings']
+
+ def __init__(self, rdclass, rdtype, strings):
+ super(TXTBase, self).__init__(rdclass, rdtype)
+ if isinstance(strings, str):
+ strings = [strings]
+ self.strings = strings[:]
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ txt = ''
+ prefix = ''
+ for s in self.strings:
+ txt += '%s"%s"' % (prefix, dns.rdata._escapify(s))
+ prefix = ' '
+ return txt
+
+ @classmethod
+ def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
+ strings = []
+ while 1:
+ token = tok.get().unescape()
+ if token.is_eol_or_eof():
+ break
+ if not (token.is_quoted_string() or token.is_identifier()):
+ raise dns.exception.SyntaxError("expected a string")
+ if len(token.value) > 255:
+ raise dns.exception.SyntaxError("string too long")
+ value = token.value
+ if isinstance(value, binary_type):
+ strings.append(value)
+ else:
+ strings.append(value.encode())
+ if len(strings) == 0:
+ raise dns.exception.UnexpectedEnd
+ return cls(rdclass, rdtype, strings)
+
+ def to_wire(self, file, compress=None, origin=None):
+ for s in self.strings:
+ l = len(s)
+ assert l < 256
+ file.write(struct.pack('!B', l))
+ file.write(s)
+
+ @classmethod
+ def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
+ strings = []
+ while rdlen > 0:
+ l = wire[current]
+ current += 1
+ rdlen -= 1
+ if l > rdlen:
+ raise dns.exception.FormError
+ s = wire[current: current + l].unwrap()
+ current += l
+ rdlen -= l
+ strings.append(s)
+ return cls(rdclass, rdtype, strings)
+
diff --git a/lib/dns/renderer.py b/lib/dns/renderer.py
new file mode 100644
index 00000000..ddc277cd
--- /dev/null
+++ b/lib/dns/renderer.py
@@ -0,0 +1,330 @@
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Help for building DNS wire format messages"""
+
+from io import BytesIO
+import struct
+import random
+import time
+import sys
+
+import dns.exception
+import dns.tsig
+from ._compat import long
+
+
+QUESTION = 0
+ANSWER = 1
+AUTHORITY = 2
+ADDITIONAL = 3
+
+
+class Renderer(object):
+
+ """Helper class for building DNS wire-format messages.
+
+ Most applications can use the higher-level L{dns.message.Message}
+ class and its to_wire() method to generate wire-format messages.
+ This class is for those applications which need finer control
+ over the generation of messages.
+
+ Typical use::
+
+ r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512)
+ r.add_question(qname, qtype, qclass)
+ r.add_rrset(dns.renderer.ANSWER, rrset_1)
+ r.add_rrset(dns.renderer.ANSWER, rrset_2)
+ r.add_rrset(dns.renderer.AUTHORITY, ns_rrset)
+ r.add_edns(0, 0, 4096)
+ r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_1)
+ r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_2)
+ r.write_header()
+ r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac)
+ wire = r.get_wire()
+
+ @ivar output: where rendering is written
+ @type output: BytesIO object
+ @ivar id: the message id
+ @type id: int
+ @ivar flags: the message flags
+ @type flags: int
+ @ivar max_size: the maximum size of the message
+ @type max_size: int
+ @ivar origin: the origin to use when rendering relative names
+ @type origin: dns.name.Name object
+ @ivar compress: the compression table
+ @type compress: dict
+ @ivar section: the section currently being rendered
+ @type section: int (dns.renderer.QUESTION, dns.renderer.ANSWER,
+ dns.renderer.AUTHORITY, or dns.renderer.ADDITIONAL)
+ @ivar counts: list of the number of RRs in each section
+ @type counts: int list of length 4
+ @ivar mac: the MAC of the rendered message (if TSIG was used)
+ @type mac: string
+ """
+
+ def __init__(self, id=None, flags=0, max_size=65535, origin=None):
+ """Initialize a new renderer.
+
+ @param id: the message id
+ @type id: int
+ @param flags: the DNS message flags
+ @type flags: int
+ @param max_size: the maximum message size; the default is 65535.
+ If rendering results in a message greater than I{max_size},
+ then L{dns.exception.TooBig} will be raised.
+ @type max_size: int
+ @param origin: the origin to use when rendering relative names
+ @type origin: dns.name.Name or None.
+ """
+
+ self.output = BytesIO()
+ if id is None:
+ self.id = random.randint(0, 65535)
+ else:
+ self.id = id
+ self.flags = flags
+ self.max_size = max_size
+ self.origin = origin
+ self.compress = {}
+ self.section = QUESTION
+ self.counts = [0, 0, 0, 0]
+ self.output.write(b'\x00' * 12)
+ self.mac = ''
+
+ def _rollback(self, where):
+ """Truncate the output buffer at offset I{where}, and remove any
+ compression table entries that pointed beyond the truncation
+ point.
+
+ @param where: the offset
+ @type where: int
+ """
+
+ self.output.seek(where)
+ self.output.truncate()
+ keys_to_delete = []
+ for k, v in self.compress.items():
+ if v >= where:
+ keys_to_delete.append(k)
+ for k in keys_to_delete:
+ del self.compress[k]
+
+ def _set_section(self, section):
+ """Set the renderer's current section.
+
+ Sections must be rendered order: QUESTION, ANSWER, AUTHORITY,
+ ADDITIONAL. Sections may be empty.
+
+ @param section: the section
+ @type section: int
+ @raises dns.exception.FormError: an attempt was made to set
+ a section value less than the current section.
+ """
+
+ if self.section != section:
+ if self.section > section:
+ raise dns.exception.FormError
+ self.section = section
+
+ def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN):
+ """Add a question to the message.
+
+ @param qname: the question name
+ @type qname: dns.name.Name
+ @param rdtype: the question rdata type
+ @type rdtype: int
+ @param rdclass: the question rdata class
+ @type rdclass: int
+ """
+
+ self._set_section(QUESTION)
+ before = self.output.tell()
+ qname.to_wire(self.output, self.compress, self.origin)
+ self.output.write(struct.pack("!HH", rdtype, rdclass))
+ after = self.output.tell()
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.counts[QUESTION] += 1
+
+ def add_rrset(self, section, rrset, **kw):
+ """Add the rrset to the specified section.
+
+ Any keyword arguments are passed on to the rdataset's to_wire()
+ routine.
+
+ @param section: the section
+ @type section: int
+ @param rrset: the rrset
+ @type rrset: dns.rrset.RRset object
+ """
+
+ self._set_section(section)
+ before = self.output.tell()
+ n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
+ after = self.output.tell()
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.counts[section] += n
+
+ def add_rdataset(self, section, name, rdataset, **kw):
+ """Add the rdataset to the specified section, using the specified
+ name as the owner name.
+
+ Any keyword arguments are passed on to the rdataset's to_wire()
+ routine.
+
+ @param section: the section
+ @type section: int
+ @param name: the owner name
+ @type name: dns.name.Name object
+ @param rdataset: the rdataset
+ @type rdataset: dns.rdataset.Rdataset object
+ """
+
+ self._set_section(section)
+ before = self.output.tell()
+ n = rdataset.to_wire(name, self.output, self.compress, self.origin,
+ **kw)
+ after = self.output.tell()
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.counts[section] += n
+
+ def add_edns(self, edns, ednsflags, payload, options=None):
+ """Add an EDNS OPT record to the message.
+
+ @param edns: The EDNS level to use.
+ @type edns: int
+ @param ednsflags: EDNS flag values.
+ @type ednsflags: int
+ @param payload: The EDNS sender's payload field, which is the maximum
+ size of UDP datagram the sender can handle.
+ @type payload: int
+ @param options: The EDNS options list
+ @type options: list of dns.edns.Option instances
+ @see: RFC 2671
+ """
+
+ # make sure the EDNS version in ednsflags agrees with edns
+ ednsflags &= long(0xFF00FFFF)
+ ednsflags |= (edns << 16)
+ self._set_section(ADDITIONAL)
+ before = self.output.tell()
+ self.output.write(struct.pack('!BHHIH', 0, dns.rdatatype.OPT, payload,
+ ednsflags, 0))
+ if options is not None:
+ lstart = self.output.tell()
+ for opt in options:
+ stuff = struct.pack("!HH", opt.otype, 0)
+ self.output.write(stuff)
+ start = self.output.tell()
+ opt.to_wire(self.output)
+ end = self.output.tell()
+ assert end - start < 65536
+ self.output.seek(start - 2)
+ stuff = struct.pack("!H", end - start)
+ self.output.write(stuff)
+ self.output.seek(0, 2)
+ lend = self.output.tell()
+ assert lend - lstart < 65536
+ self.output.seek(lstart - 2)
+ stuff = struct.pack("!H", lend - lstart)
+ self.output.write(stuff)
+ self.output.seek(0, 2)
+ after = self.output.tell()
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.counts[ADDITIONAL] += 1
+
+ def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data,
+ request_mac, algorithm=dns.tsig.default_algorithm):
+ """Add a TSIG signature to the message.
+
+ @param keyname: the TSIG key name
+ @type keyname: dns.name.Name object
+ @param secret: the secret to use
+ @type secret: string
+ @param fudge: TSIG time fudge
+ @type fudge: int
+ @param id: the message id to encode in the tsig signature
+ @type id: int
+ @param tsig_error: TSIG error code; default is 0.
+ @type tsig_error: int
+ @param other_data: TSIG other data.
+ @type other_data: string
+ @param request_mac: This message is a response to the request which
+ had the specified MAC.
+ @type request_mac: string
+ @param algorithm: the TSIG algorithm to use
+ @type algorithm: dns.name.Name object
+ """
+
+ self._set_section(ADDITIONAL)
+ before = self.output.tell()
+ s = self.output.getvalue()
+ (tsig_rdata, self.mac, ctx) = dns.tsig.sign(s,
+ keyname,
+ secret,
+ int(time.time()),
+ fudge,
+ id,
+ tsig_error,
+ other_data,
+ request_mac,
+ algorithm=algorithm)
+ keyname.to_wire(self.output, self.compress, self.origin)
+ self.output.write(struct.pack('!HHIH', dns.rdatatype.TSIG,
+ dns.rdataclass.ANY, 0, 0))
+ rdata_start = self.output.tell()
+ self.output.write(tsig_rdata)
+ after = self.output.tell()
+ assert after - rdata_start < 65536
+ if after >= self.max_size:
+ self._rollback(before)
+ raise dns.exception.TooBig
+ self.output.seek(rdata_start - 2)
+ self.output.write(struct.pack('!H', after - rdata_start))
+ self.counts[ADDITIONAL] += 1
+ self.output.seek(10)
+ self.output.write(struct.pack('!H', self.counts[ADDITIONAL]))
+ self.output.seek(0, 2)
+
+ def write_header(self):
+ """Write the DNS message header.
+
+ Writing the DNS message header is done after all sections
+ have been rendered, but before the optional TSIG signature
+ is added.
+ """
+
+ self.output.seek(0)
+ self.output.write(struct.pack('!HHHHHH', self.id, self.flags,
+ self.counts[0], self.counts[1],
+ self.counts[2], self.counts[3]))
+ self.output.seek(0, 2)
+
+ def get_wire(self):
+ """Return the wire format message.
+
+ @rtype: string
+ """
+
+ return self.output.getvalue()
diff --git a/lib/dns/resolver.py b/lib/dns/resolver.py
new file mode 100644
index 00000000..bccb430d
--- /dev/null
+++ b/lib/dns/resolver.py
@@ -0,0 +1,1343 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS stub resolver.
+
+@var default_resolver: The default resolver object
+@type default_resolver: dns.resolver.Resolver object"""
+
+import socket
+import sys
+import time
+import random
+
+try:
+ import threading as _threading
+except ImportError:
+ import dummy_threading as _threading
+
+import dns.exception
+import dns.flags
+import dns.ipv4
+import dns.ipv6
+import dns.message
+import dns.name
+import dns.query
+import dns.rcode
+import dns.rdataclass
+import dns.rdatatype
+import dns.reversename
+import dns.tsig
+from ._compat import xrange, string_types
+
+if sys.platform == 'win32':
+ try:
+ import winreg as _winreg
+ except ImportError:
+ import _winreg
+
+class NXDOMAIN(dns.exception.DNSException):
+
+ """The DNS query name does not exist."""
+ supp_kwargs = set(['qname'])
+
+ def __str__(self):
+ if 'qname' not in self.kwargs:
+ return super(NXDOMAIN, self).__str__()
+
+ qname = self.kwargs['qname']
+ msg = self.__doc__[:-1]
+ if isinstance(qname, (list, set)):
+ if len(qname) > 1:
+ msg = 'None of DNS query names exist'
+ qname = list(map(str, qname))
+ else:
+ qname = qname[0]
+ return "%s: %s" % (msg, (str(qname)))
+
+
+class YXDOMAIN(dns.exception.DNSException):
+
+ """The DNS query name is too long after DNAME substitution."""
+
+# The definition of the Timeout exception has moved from here to the
+# dns.exception module. We keep dns.resolver.Timeout defined for
+# backwards compatibility.
+
+Timeout = dns.exception.Timeout
+
+
+class NoAnswer(dns.exception.DNSException):
+
+ """The DNS response does not contain an answer to the question."""
+ fmt = 'The DNS response does not contain an answer ' + \
+ 'to the question: {query}'
+ supp_kwargs = set(['response'])
+
+ def _fmt_kwargs(self, **kwargs):
+ return super(NoAnswer, self)._fmt_kwargs(
+ query=kwargs['response'].question)
+
+
+class NoNameservers(dns.exception.DNSException):
+
+ """All nameservers failed to answer the query.
+
+ @param errors: list of servers and respective errors
+ @type errors: [(server ip address, any object convertible to string)]
+ Non-empty errors list will add explanatory message ()
+ """
+
+ msg = "All nameservers failed to answer the query."
+ fmt = "%s {query}: {errors}" % msg[:-1]
+ supp_kwargs = set(['request', 'errors'])
+
+ def _fmt_kwargs(self, **kwargs):
+ srv_msgs = []
+ for err in kwargs['errors']:
+ srv_msgs.append('Server %s %s port %s answered %s' % (err[0],
+ 'TCP' if err[1] else 'UDP', err[2], err[3]))
+ return super(NoNameservers, self)._fmt_kwargs(
+ query=kwargs['request'].question, errors='; '.join(srv_msgs))
+
+
+class NotAbsolute(dns.exception.DNSException):
+
+ """An absolute domain name is required but a relative name was provided."""
+
+
+class NoRootSOA(dns.exception.DNSException):
+
+ """There is no SOA RR at the DNS root name. This should never happen!"""
+
+
+class NoMetaqueries(dns.exception.DNSException):
+
+ """DNS metaqueries are not allowed."""
+
+
+class Answer(object):
+
+ """DNS stub resolver answer
+
+ Instances of this class bundle up the result of a successful DNS
+ resolution.
+
+ For convenience, the answer object implements much of the sequence
+ protocol, forwarding to its rrset. E.g. "for a in answer" is
+ equivalent to "for a in answer.rrset", "answer[i]" is equivalent
+ to "answer.rrset[i]", and "answer[i:j]" is equivalent to
+ "answer.rrset[i:j]".
+
+ Note that CNAMEs or DNAMEs in the response may mean that answer
+ node's name might not be the query name.
+
+ @ivar qname: The query name
+ @type qname: dns.name.Name object
+ @ivar rdtype: The query type
+ @type rdtype: int
+ @ivar rdclass: The query class
+ @type rdclass: int
+ @ivar response: The response message
+ @type response: dns.message.Message object
+ @ivar rrset: The answer
+ @type rrset: dns.rrset.RRset object
+ @ivar expiration: The time when the answer expires
+ @type expiration: float (seconds since the epoch)
+ @ivar canonical_name: The canonical name of the query name
+ @type canonical_name: dns.name.Name object
+ """
+
+ def __init__(self, qname, rdtype, rdclass, response,
+ raise_on_no_answer=True):
+ self.qname = qname
+ self.rdtype = rdtype
+ self.rdclass = rdclass
+ self.response = response
+ min_ttl = -1
+ rrset = None
+ for count in xrange(0, 15):
+ try:
+ rrset = response.find_rrset(response.answer, qname,
+ rdclass, rdtype)
+ if min_ttl == -1 or rrset.ttl < min_ttl:
+ min_ttl = rrset.ttl
+ break
+ except KeyError:
+ if rdtype != dns.rdatatype.CNAME:
+ try:
+ crrset = response.find_rrset(response.answer,
+ qname,
+ rdclass,
+ dns.rdatatype.CNAME)
+ if min_ttl == -1 or crrset.ttl < min_ttl:
+ min_ttl = crrset.ttl
+ for rd in crrset:
+ qname = rd.target
+ break
+ continue
+ except KeyError:
+ if raise_on_no_answer:
+ raise NoAnswer(response=response)
+ if raise_on_no_answer:
+ raise NoAnswer(response=response)
+ if rrset is None and raise_on_no_answer:
+ raise NoAnswer(response=response)
+ self.canonical_name = qname
+ self.rrset = rrset
+ if rrset is None:
+ while 1:
+ # Look for a SOA RR whose owner name is a superdomain
+ # of qname.
+ try:
+ srrset = response.find_rrset(response.authority, qname,
+ rdclass, dns.rdatatype.SOA)
+ if min_ttl == -1 or srrset.ttl < min_ttl:
+ min_ttl = srrset.ttl
+ if srrset[0].minimum < min_ttl:
+ min_ttl = srrset[0].minimum
+ break
+ except KeyError:
+ try:
+ qname = qname.parent()
+ except dns.name.NoParent:
+ break
+ self.expiration = time.time() + min_ttl
+
+ def __getattr__(self, attr):
+ if attr == 'name':
+ return self.rrset.name
+ elif attr == 'ttl':
+ return self.rrset.ttl
+ elif attr == 'covers':
+ return self.rrset.covers
+ elif attr == 'rdclass':
+ return self.rrset.rdclass
+ elif attr == 'rdtype':
+ return self.rrset.rdtype
+ else:
+ raise AttributeError(attr)
+
+ def __len__(self):
+ return len(self.rrset)
+
+ def __iter__(self):
+ return iter(self.rrset)
+
+ def __getitem__(self, i):
+ return self.rrset[i]
+
+ def __delitem__(self, i):
+ del self.rrset[i]
+
+ def __getslice__(self, i, j):
+ return self.rrset[i:j]
+
+ def __delslice__(self, i, j):
+ del self.rrset[i:j]
+
+
+class Cache(object):
+
+ """Simple DNS answer cache.
+
+ @ivar data: A dictionary of cached data
+ @type data: dict
+ @ivar cleaning_interval: The number of seconds between cleanings. The
+ default is 300 (5 minutes).
+ @type cleaning_interval: float
+ @ivar next_cleaning: The time the cache should next be cleaned (in seconds
+ since the epoch.)
+ @type next_cleaning: float
+ """
+
+ def __init__(self, cleaning_interval=300.0):
+ """Initialize a DNS cache.
+
+ @param cleaning_interval: the number of seconds between periodic
+ cleanings. The default is 300.0
+ @type cleaning_interval: float.
+ """
+
+ self.data = {}
+ self.cleaning_interval = cleaning_interval
+ self.next_cleaning = time.time() + self.cleaning_interval
+ self.lock = _threading.Lock()
+
+ def _maybe_clean(self):
+ """Clean the cache if it's time to do so."""
+
+ now = time.time()
+ if self.next_cleaning <= now:
+ keys_to_delete = []
+ for (k, v) in self.data.items():
+ if v.expiration <= now:
+ keys_to_delete.append(k)
+ for k in keys_to_delete:
+ del self.data[k]
+ now = time.time()
+ self.next_cleaning = now + self.cleaning_interval
+
+ def get(self, key):
+ """Get the answer associated with I{key}. Returns None if
+ no answer is cached for the key.
+ @param key: the key
+ @type key: (dns.name.Name, int, int) tuple whose values are the
+ query name, rdtype, and rdclass.
+ @rtype: dns.resolver.Answer object or None
+ """
+
+ try:
+ self.lock.acquire()
+ self._maybe_clean()
+ v = self.data.get(key)
+ if v is None or v.expiration <= time.time():
+ return None
+ return v
+ finally:
+ self.lock.release()
+
+ def put(self, key, value):
+ """Associate key and value in the cache.
+ @param key: the key
+ @type key: (dns.name.Name, int, int) tuple whose values are the
+ query name, rdtype, and rdclass.
+ @param value: The answer being cached
+ @type value: dns.resolver.Answer object
+ """
+
+ try:
+ self.lock.acquire()
+ self._maybe_clean()
+ self.data[key] = value
+ finally:
+ self.lock.release()
+
+ def flush(self, key=None):
+ """Flush the cache.
+
+ If I{key} is specified, only that item is flushed. Otherwise
+ the entire cache is flushed.
+
+ @param key: the key to flush
+ @type key: (dns.name.Name, int, int) tuple or None
+ """
+
+ try:
+ self.lock.acquire()
+ if key is not None:
+ if key in self.data:
+ del self.data[key]
+ else:
+ self.data = {}
+ self.next_cleaning = time.time() + self.cleaning_interval
+ finally:
+ self.lock.release()
+
+
+class LRUCacheNode(object):
+
+ """LRUCache node.
+ """
+
+ def __init__(self, key, value):
+ self.key = key
+ self.value = value
+ self.prev = self
+ self.next = self
+
+ def link_before(self, node):
+ self.prev = node.prev
+ self.next = node
+ node.prev.next = self
+ node.prev = self
+
+ def link_after(self, node):
+ self.prev = node
+ self.next = node.next
+ node.next.prev = self
+ node.next = self
+
+ def unlink(self):
+ self.next.prev = self.prev
+ self.prev.next = self.next
+
+
+class LRUCache(object):
+
+ """Bounded least-recently-used DNS answer cache.
+
+ This cache is better than the simple cache (above) if you're
+ running a web crawler or other process that does a lot of
+ resolutions. The LRUCache has a maximum number of nodes, and when
+ it is full, the least-recently used node is removed to make space
+ for a new one.
+
+ @ivar data: A dictionary of cached data
+ @type data: dict
+ @ivar sentinel: sentinel node for circular doubly linked list of nodes
+ @type sentinel: LRUCacheNode object
+ @ivar max_size: The maximum number of nodes
+ @type max_size: int
+ """
+
+ def __init__(self, max_size=100000):
+ """Initialize a DNS cache.
+
+ @param max_size: The maximum number of nodes to cache; the default is
+ 100000. Must be > 1.
+ @type max_size: int
+ """
+ self.data = {}
+ self.set_max_size(max_size)
+ self.sentinel = LRUCacheNode(None, None)
+ self.lock = _threading.Lock()
+
+ def set_max_size(self, max_size):
+ if max_size < 1:
+ max_size = 1
+ self.max_size = max_size
+
+ def get(self, key):
+ """Get the answer associated with I{key}. Returns None if
+ no answer is cached for the key.
+ @param key: the key
+ @type key: (dns.name.Name, int, int) tuple whose values are the
+ query name, rdtype, and rdclass.
+ @rtype: dns.resolver.Answer object or None
+ """
+ try:
+ self.lock.acquire()
+ node = self.data.get(key)
+ if node is None:
+ return None
+ # Unlink because we're either going to move the node to the front
+ # of the LRU list or we're going to free it.
+ node.unlink()
+ if node.value.expiration <= time.time():
+ del self.data[node.key]
+ return None
+ node.link_after(self.sentinel)
+ return node.value
+ finally:
+ self.lock.release()
+
+ def put(self, key, value):
+ """Associate key and value in the cache.
+ @param key: the key
+ @type key: (dns.name.Name, int, int) tuple whose values are the
+ query name, rdtype, and rdclass.
+ @param value: The answer being cached
+ @type value: dns.resolver.Answer object
+ """
+ try:
+ self.lock.acquire()
+ node = self.data.get(key)
+ if node is not None:
+ node.unlink()
+ del self.data[node.key]
+ while len(self.data) >= self.max_size:
+ node = self.sentinel.prev
+ node.unlink()
+ del self.data[node.key]
+ node = LRUCacheNode(key, value)
+ node.link_after(self.sentinel)
+ self.data[key] = node
+ finally:
+ self.lock.release()
+
+ def flush(self, key=None):
+ """Flush the cache.
+
+ If I{key} is specified, only that item is flushed. Otherwise
+ the entire cache is flushed.
+
+ @param key: the key to flush
+ @type key: (dns.name.Name, int, int) tuple or None
+ """
+ try:
+ self.lock.acquire()
+ if key is not None:
+ node = self.data.get(key)
+ if node is not None:
+ node.unlink()
+ del self.data[node.key]
+ else:
+ node = self.sentinel.next
+ while node != self.sentinel:
+ next = node.next
+ node.prev = None
+ node.next = None
+ node = next
+ self.data = {}
+ finally:
+ self.lock.release()
+
+
+class Resolver(object):
+
+ """DNS stub resolver
+
+ @ivar domain: The domain of this host
+ @type domain: dns.name.Name object
+ @ivar nameservers: A list of nameservers to query. Each nameserver is
+ a string which contains the IP address of a nameserver.
+ @type nameservers: list of strings
+ @ivar search: The search list. If the query name is a relative name,
+ the resolver will construct an absolute query name by appending the search
+ names one by one to the query name.
+ @type search: list of dns.name.Name objects
+ @ivar port: The port to which to send queries. The default is 53.
+ @type port: int
+ @ivar timeout: The number of seconds to wait for a response from a
+ server, before timing out.
+ @type timeout: float
+ @ivar lifetime: The total number of seconds to spend trying to get an
+ answer to the question. If the lifetime expires, a Timeout exception
+ will occur.
+ @type lifetime: float
+ @ivar keyring: The TSIG keyring to use. The default is None.
+ @type keyring: dict
+ @ivar keyname: The TSIG keyname to use. The default is None.
+ @type keyname: dns.name.Name object
+ @ivar keyalgorithm: The TSIG key algorithm to use. The default is
+ dns.tsig.default_algorithm.
+ @type keyalgorithm: string
+ @ivar edns: The EDNS level to use. The default is -1, no Edns.
+ @type edns: int
+ @ivar ednsflags: The EDNS flags
+ @type ednsflags: int
+ @ivar payload: The EDNS payload size. The default is 0.
+ @type payload: int
+ @ivar flags: The message flags to use. The default is None (i.e. not
+ overwritten)
+ @type flags: int
+ @ivar cache: The cache to use. The default is None.
+ @type cache: dns.resolver.Cache object
+ @ivar retry_servfail: should we retry a nameserver if it says SERVFAIL?
+ The default is 'false'.
+ @type retry_servfail: bool
+ """
+
+ def __init__(self, filename='/etc/resolv.conf', configure=True):
+ """Initialize a resolver instance.
+
+ @param filename: The filename of a configuration file in
+ standard /etc/resolv.conf format. This parameter is meaningful
+ only when I{configure} is true and the platform is POSIX.
+ @type filename: string or file object
+ @param configure: If True (the default), the resolver instance
+ is configured in the normal fashion for the operating system
+ the resolver is running on. (I.e. a /etc/resolv.conf file on
+ POSIX systems and from the registry on Windows systems.)
+ @type configure: bool"""
+
+ self.reset()
+ if configure:
+ if sys.platform == 'win32':
+ self.read_registry()
+ elif filename:
+ self.read_resolv_conf(filename)
+
+ def reset(self):
+ """Reset all resolver configuration to the defaults."""
+ self.domain = \
+ dns.name.Name(dns.name.from_text(socket.gethostname())[1:])
+ if len(self.domain) == 0:
+ self.domain = dns.name.root
+ self.nameservers = []
+ self.nameserver_ports = {}
+ self.port = 53
+ self.search = []
+ self.timeout = 2.0
+ self.lifetime = 30.0
+ self.keyring = None
+ self.keyname = None
+ self.keyalgorithm = dns.tsig.default_algorithm
+ self.edns = -1
+ self.ednsflags = 0
+ self.payload = 0
+ self.cache = None
+ self.flags = None
+ self.retry_servfail = False
+ self.rotate = False
+
+ def read_resolv_conf(self, f):
+ """Process f as a file in the /etc/resolv.conf format. If f is
+ a string, it is used as the name of the file to open; otherwise it
+ is treated as the file itself."""
+ if isinstance(f, string_types):
+ try:
+ f = open(f, 'r')
+ except IOError:
+ # /etc/resolv.conf doesn't exist, can't be read, etc.
+ # We'll just use the default resolver configuration.
+ self.nameservers = ['127.0.0.1']
+ return
+ want_close = True
+ else:
+ want_close = False
+ try:
+ for l in f:
+ if len(l) == 0 or l[0] == '#' or l[0] == ';':
+ continue
+ tokens = l.split()
+
+ # Any line containing less than 2 tokens is malformed
+ if len(tokens) < 2:
+ continue
+
+ if tokens[0] == 'nameserver':
+ self.nameservers.append(tokens[1])
+ elif tokens[0] == 'domain':
+ self.domain = dns.name.from_text(tokens[1])
+ elif tokens[0] == 'search':
+ for suffix in tokens[1:]:
+ self.search.append(dns.name.from_text(suffix))
+ elif tokens[0] == 'options':
+ if 'rotate' in tokens[1:]:
+ self.rotate = True
+ finally:
+ if want_close:
+ f.close()
+ if len(self.nameservers) == 0:
+ self.nameservers.append('127.0.0.1')
+
+ def _determine_split_char(self, entry):
+ #
+ # The windows registry irritatingly changes the list element
+ # delimiter in between ' ' and ',' (and vice-versa) in various
+ # versions of windows.
+ #
+ if entry.find(' ') >= 0:
+ split_char = ' '
+ elif entry.find(',') >= 0:
+ split_char = ','
+ else:
+ # probably a singleton; treat as a space-separated list.
+ split_char = ' '
+ return split_char
+
+ def _config_win32_nameservers(self, nameservers):
+ """Configure a NameServer registry entry."""
+ # we call str() on nameservers to convert it from unicode to ascii
+ nameservers = str(nameservers)
+ split_char = self._determine_split_char(nameservers)
+ ns_list = nameservers.split(split_char)
+ for ns in ns_list:
+ if ns not in self.nameservers:
+ self.nameservers.append(ns)
+
+ def _config_win32_domain(self, domain):
+ """Configure a Domain registry entry."""
+ # we call str() on domain to convert it from unicode to ascii
+ self.domain = dns.name.from_text(str(domain))
+
+ def _config_win32_search(self, search):
+ """Configure a Search registry entry."""
+ # we call str() on search to convert it from unicode to ascii
+ search = str(search)
+ split_char = self._determine_split_char(search)
+ search_list = search.split(split_char)
+ for s in search_list:
+ if s not in self.search:
+ self.search.append(dns.name.from_text(s))
+
+ def _config_win32_fromkey(self, key):
+ """Extract DNS info from a registry key."""
+ try:
+ servers, rtype = _winreg.QueryValueEx(key, 'NameServer')
+ except WindowsError:
+ servers = None
+ if servers:
+ self._config_win32_nameservers(servers)
+ try:
+ dom, rtype = _winreg.QueryValueEx(key, 'Domain')
+ if dom:
+ self._config_win32_domain(dom)
+ except WindowsError:
+ pass
+ else:
+ try:
+ servers, rtype = _winreg.QueryValueEx(key, 'DhcpNameServer')
+ except WindowsError:
+ servers = None
+ if servers:
+ self._config_win32_nameservers(servers)
+ try:
+ dom, rtype = _winreg.QueryValueEx(key, 'DhcpDomain')
+ if dom:
+ self._config_win32_domain(dom)
+ except WindowsError:
+ pass
+ try:
+ search, rtype = _winreg.QueryValueEx(key, 'SearchList')
+ except WindowsError:
+ search = None
+ if search:
+ self._config_win32_search(search)
+
+ def read_registry(self):
+ """Extract resolver configuration from the Windows registry."""
+ lm = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
+ want_scan = False
+ try:
+ try:
+ # XP, 2000
+ tcp_params = _winreg.OpenKey(lm,
+ r'SYSTEM\CurrentControlSet'
+ r'\Services\Tcpip\Parameters')
+ want_scan = True
+ except EnvironmentError:
+ # ME
+ tcp_params = _winreg.OpenKey(lm,
+ r'SYSTEM\CurrentControlSet'
+ r'\Services\VxD\MSTCP')
+ try:
+ self._config_win32_fromkey(tcp_params)
+ finally:
+ tcp_params.Close()
+ if want_scan:
+ interfaces = _winreg.OpenKey(lm,
+ r'SYSTEM\CurrentControlSet'
+ r'\Services\Tcpip\Parameters'
+ r'\Interfaces')
+ try:
+ i = 0
+ while True:
+ try:
+ guid = _winreg.EnumKey(interfaces, i)
+ i += 1
+ key = _winreg.OpenKey(interfaces, guid)
+ if not self._win32_is_nic_enabled(lm, guid, key):
+ continue
+ try:
+ self._config_win32_fromkey(key)
+ finally:
+ key.Close()
+ except EnvironmentError:
+ break
+ finally:
+ interfaces.Close()
+ finally:
+ lm.Close()
+
+ def _win32_is_nic_enabled(self, lm, guid, interface_key):
+ # Look in the Windows Registry to determine whether the network
+ # interface corresponding to the given guid is enabled.
+ #
+ # (Code contributed by Paul Marks, thanks!)
+ #
+ try:
+ # This hard-coded location seems to be consistent, at least
+ # from Windows 2000 through Vista.
+ connection_key = _winreg.OpenKey(
+ lm,
+ r'SYSTEM\CurrentControlSet\Control\Network'
+ r'\{4D36E972-E325-11CE-BFC1-08002BE10318}'
+ r'\%s\Connection' % guid)
+
+ try:
+ # The PnpInstanceID points to a key inside Enum
+ (pnp_id, ttype) = _winreg.QueryValueEx(
+ connection_key, 'PnpInstanceID')
+
+ if ttype != _winreg.REG_SZ:
+ raise ValueError
+
+ device_key = _winreg.OpenKey(
+ lm, r'SYSTEM\CurrentControlSet\Enum\%s' % pnp_id)
+
+ try:
+ # Get ConfigFlags for this device
+ (flags, ttype) = _winreg.QueryValueEx(
+ device_key, 'ConfigFlags')
+
+ if ttype != _winreg.REG_DWORD:
+ raise ValueError
+
+ # Based on experimentation, bit 0x1 indicates that the
+ # device is disabled.
+ return not (flags & 0x1)
+
+ finally:
+ device_key.Close()
+ finally:
+ connection_key.Close()
+ except (EnvironmentError, ValueError):
+ # Pre-vista, enabled interfaces seem to have a non-empty
+ # NTEContextList; this was how dnspython detected enabled
+ # nics before the code above was contributed. We've retained
+ # the old method since we don't know if the code above works
+ # on Windows 95/98/ME.
+ try:
+ (nte, ttype) = _winreg.QueryValueEx(interface_key,
+ 'NTEContextList')
+ return nte is not None
+ except WindowsError:
+ return False
+
+ def _compute_timeout(self, start):
+ now = time.time()
+ duration = now - start
+ if duration < 0:
+ if duration < -1:
+ # Time going backwards is bad. Just give up.
+ raise Timeout(timeout=duration)
+ else:
+ # Time went backwards, but only a little. This can
+ # happen, e.g. under vmware with older linux kernels.
+ # Pretend it didn't happen.
+ now = start
+ if duration >= self.lifetime:
+ raise Timeout(timeout=duration)
+ return min(self.lifetime - duration, self.timeout)
+
+ def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+ tcp=False, source=None, raise_on_no_answer=True, source_port=0):
+ """Query nameservers to find the answer to the question.
+
+ The I{qname}, I{rdtype}, and I{rdclass} parameters may be objects
+ of the appropriate type, or strings that can be converted into objects
+ of the appropriate type. E.g. For I{rdtype} the integer 2 and the
+ the string 'NS' both mean to query for records with DNS rdata type NS.
+
+ @param qname: the query name
+ @type qname: dns.name.Name object or string
+ @param rdtype: the query type
+ @type rdtype: int or string
+ @param rdclass: the query class
+ @type rdclass: int or string
+ @param tcp: use TCP to make the query (default is False).
+ @type tcp: bool
+ @param source: bind to this IP address (defaults to machine default
+ IP).
+ @type source: IP address in dotted quad notation
+ @param raise_on_no_answer: raise NoAnswer if there's no answer
+ (defaults is True).
+ @type raise_on_no_answer: bool
+ @param source_port: The port from which to send the message.
+ The default is 0.
+ @type source_port: int
+ @rtype: dns.resolver.Answer instance
+ @raises Timeout: no answers could be found in the specified lifetime
+ @raises NXDOMAIN: the query name does not exist
+ @raises YXDOMAIN: the query name is too long after DNAME substitution
+ @raises NoAnswer: the response did not contain an answer and
+ raise_on_no_answer is True.
+ @raises NoNameservers: no non-broken nameservers are available to
+ answer the question."""
+
+ if isinstance(qname, string_types):
+ qname = dns.name.from_text(qname, None)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if dns.rdatatype.is_metatype(rdtype):
+ raise NoMetaqueries
+ if isinstance(rdclass, string_types):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ if dns.rdataclass.is_metaclass(rdclass):
+ raise NoMetaqueries
+ qnames_to_try = []
+ if qname.is_absolute():
+ qnames_to_try.append(qname)
+ else:
+ if len(qname) > 1:
+ qnames_to_try.append(qname.concatenate(dns.name.root))
+ if self.search:
+ for suffix in self.search:
+ qnames_to_try.append(qname.concatenate(suffix))
+ else:
+ qnames_to_try.append(qname.concatenate(self.domain))
+ all_nxdomain = True
+ start = time.time()
+ for qname in qnames_to_try:
+ if self.cache:
+ answer = self.cache.get((qname, rdtype, rdclass))
+ if answer is not None:
+ if answer.rrset is None and raise_on_no_answer:
+ raise NoAnswer
+ else:
+ return answer
+ request = dns.message.make_query(qname, rdtype, rdclass)
+ if self.keyname is not None:
+ request.use_tsig(self.keyring, self.keyname,
+ algorithm=self.keyalgorithm)
+ request.use_edns(self.edns, self.ednsflags, self.payload)
+ if self.flags is not None:
+ request.flags = self.flags
+ response = None
+ #
+ # make a copy of the servers list so we can alter it later.
+ #
+ nameservers = self.nameservers[:]
+ errors = []
+ if self.rotate:
+ random.shuffle(nameservers)
+ backoff = 0.10
+ while response is None:
+ if len(nameservers) == 0:
+ raise NoNameservers(request=request, errors=errors)
+ for nameserver in nameservers[:]:
+ timeout = self._compute_timeout(start)
+ port = self.nameserver_ports.get(nameserver, self.port)
+ try:
+ tcp_attempt = tcp
+ if tcp:
+ response = dns.query.tcp(request, nameserver,
+ timeout, port,
+ source=source,
+ source_port=source_port)
+ else:
+ response = dns.query.udp(request, nameserver,
+ timeout, port,
+ source=source,
+ source_port=source_port)
+ if response.flags & dns.flags.TC:
+ # Response truncated; retry with TCP.
+ tcp_attempt = True
+ timeout = self._compute_timeout(start)
+ response = \
+ dns.query.tcp(request, nameserver,
+ timeout, port,
+ source=source,
+ source_port=source_port)
+ except (socket.error, dns.exception.Timeout) as ex:
+ #
+ # Communication failure or timeout. Go to the
+ # next server
+ #
+ errors.append((nameserver, tcp_attempt, port, ex,
+ response))
+ response = None
+ continue
+ except dns.query.UnexpectedSource as ex:
+ #
+ # Who knows? Keep going.
+ #
+ errors.append((nameserver, tcp_attempt, port, ex,
+ response))
+ response = None
+ continue
+ except dns.exception.FormError as ex:
+ #
+ # We don't understand what this server is
+ # saying. Take it out of the mix and
+ # continue.
+ #
+ nameservers.remove(nameserver)
+ errors.append((nameserver, tcp_attempt, port, ex,
+ response))
+ response = None
+ continue
+ except EOFError as ex:
+ #
+ # We're using TCP and they hung up on us.
+ # Probably they don't support TCP (though
+ # they're supposed to!). Take it out of the
+ # mix and continue.
+ #
+ nameservers.remove(nameserver)
+ errors.append((nameserver, tcp_attempt, port, ex,
+ response))
+ response = None
+ continue
+ rcode = response.rcode()
+ if rcode == dns.rcode.YXDOMAIN:
+ ex = YXDOMAIN()
+ errors.append((nameserver, tcp_attempt, port, ex,
+ response))
+ raise ex
+ if rcode == dns.rcode.NOERROR or \
+ rcode == dns.rcode.NXDOMAIN:
+ break
+ #
+ # We got a response, but we're not happy with the
+ # rcode in it. Remove the server from the mix if
+ # the rcode isn't SERVFAIL.
+ #
+ if rcode != dns.rcode.SERVFAIL or not self.retry_servfail:
+ nameservers.remove(nameserver)
+ errors.append((nameserver, tcp_attempt, port,
+ dns.rcode.to_text(rcode), response))
+ response = None
+ if response is not None:
+ break
+ #
+ # All nameservers failed!
+ #
+ if len(nameservers) > 0:
+ #
+ # But we still have servers to try. Sleep a bit
+ # so we don't pound them!
+ #
+ timeout = self._compute_timeout(start)
+ sleep_time = min(timeout, backoff)
+ backoff *= 2
+ time.sleep(sleep_time)
+ if response.rcode() == dns.rcode.NXDOMAIN:
+ continue
+ all_nxdomain = False
+ break
+ if all_nxdomain:
+ raise NXDOMAIN(qname=qnames_to_try)
+ answer = Answer(qname, rdtype, rdclass, response,
+ raise_on_no_answer)
+ if self.cache:
+ self.cache.put((qname, rdtype, rdclass), answer)
+ return answer
+
+ def use_tsig(self, keyring, keyname=None,
+ algorithm=dns.tsig.default_algorithm):
+ """Add a TSIG signature to the query.
+
+ @param keyring: The TSIG keyring to use; defaults to None.
+ @type keyring: dict
+ @param keyname: The name of the TSIG key to use; defaults to None.
+ The key must be defined in the keyring. If a keyring is specified
+ but a keyname is not, then the key used will be the first key in the
+ keyring. Note that the order of keys in a dictionary is not defined,
+ so applications should supply a keyname when a keyring is used, unless
+ they know the keyring contains only one key.
+ @param algorithm: The TSIG key algorithm to use. The default
+ is dns.tsig.default_algorithm.
+ @type algorithm: string"""
+ self.keyring = keyring
+ if keyname is None:
+ self.keyname = list(self.keyring.keys())[0]
+ else:
+ self.keyname = keyname
+ self.keyalgorithm = algorithm
+
+ def use_edns(self, edns, ednsflags, payload):
+ """Configure Edns.
+
+ @param edns: The EDNS level to use. The default is -1, no Edns.
+ @type edns: int
+ @param ednsflags: The EDNS flags
+ @type ednsflags: int
+ @param payload: The EDNS payload size. The default is 0.
+ @type payload: int"""
+
+ if edns is None:
+ edns = -1
+ self.edns = edns
+ self.ednsflags = ednsflags
+ self.payload = payload
+
+ def set_flags(self, flags):
+ """Overrides the default flags with your own
+
+ @param flags: The flags to overwrite the default with
+ @type flags: int"""
+ self.flags = flags
+
+default_resolver = None
+
+
+def get_default_resolver():
+ """Get the default resolver, initializing it if necessary."""
+ global default_resolver
+ if default_resolver is None:
+ default_resolver = Resolver()
+ return default_resolver
+
+
+def query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+ tcp=False, source=None, raise_on_no_answer=True,
+ source_port=0):
+ """Query nameservers to find the answer to the question.
+
+ This is a convenience function that uses the default resolver
+ object to make the query.
+ @see: L{dns.resolver.Resolver.query} for more information on the
+ parameters."""
+ return get_default_resolver().query(qname, rdtype, rdclass, tcp, source,
+ raise_on_no_answer, source_port)
+
+
+def zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None):
+ """Find the name of the zone which contains the specified name.
+
+ @param name: the query name
+ @type name: absolute dns.name.Name object or string
+ @param rdclass: The query class
+ @type rdclass: int
+ @param tcp: use TCP to make the query (default is False).
+ @type tcp: bool
+ @param resolver: the resolver to use
+ @type resolver: dns.resolver.Resolver object or None
+ @rtype: dns.name.Name"""
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, dns.name.root)
+ if resolver is None:
+ resolver = get_default_resolver()
+ if not name.is_absolute():
+ raise NotAbsolute(name)
+ while 1:
+ try:
+ answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
+ if answer.rrset.name == name:
+ return name
+ # otherwise we were CNAMEd or DNAMEd and need to look higher
+ except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
+ pass
+ try:
+ name = name.parent()
+ except dns.name.NoParent:
+ raise NoRootSOA
+
+#
+# Support for overriding the system resolver for all python code in the
+# running process.
+#
+
+_protocols_for_socktype = {
+ socket.SOCK_DGRAM: [socket.SOL_UDP],
+ socket.SOCK_STREAM: [socket.SOL_TCP],
+}
+
+_resolver = None
+_original_getaddrinfo = socket.getaddrinfo
+_original_getnameinfo = socket.getnameinfo
+_original_getfqdn = socket.getfqdn
+_original_gethostbyname = socket.gethostbyname
+_original_gethostbyname_ex = socket.gethostbyname_ex
+_original_gethostbyaddr = socket.gethostbyaddr
+
+
+def _getaddrinfo(host=None, service=None, family=socket.AF_UNSPEC, socktype=0,
+ proto=0, flags=0):
+ if flags & (socket.AI_ADDRCONFIG | socket.AI_V4MAPPED) != 0:
+ raise NotImplementedError
+ if host is None and service is None:
+ raise socket.gaierror(socket.EAI_NONAME)
+ v6addrs = []
+ v4addrs = []
+ canonical_name = None
+ try:
+ # Is host None or a V6 address literal?
+ if host is None:
+ canonical_name = 'localhost'
+ if flags & socket.AI_PASSIVE != 0:
+ v6addrs.append('::')
+ v4addrs.append('0.0.0.0')
+ else:
+ v6addrs.append('::1')
+ v4addrs.append('127.0.0.1')
+ else:
+ parts = host.split('%')
+ if len(parts) == 2:
+ ahost = parts[0]
+ else:
+ ahost = host
+ addr = dns.ipv6.inet_aton(ahost)
+ v6addrs.append(host)
+ canonical_name = host
+ except:
+ try:
+ # Is it a V4 address literal?
+ addr = dns.ipv4.inet_aton(host)
+ v4addrs.append(host)
+ canonical_name = host
+ except:
+ if flags & socket.AI_NUMERICHOST == 0:
+ try:
+ if family == socket.AF_INET6 or family == socket.AF_UNSPEC:
+ v6 = _resolver.query(host, dns.rdatatype.AAAA,
+ raise_on_no_answer=False)
+ # Note that setting host ensures we query the same name
+ # for A as we did for AAAA.
+ host = v6.qname
+ canonical_name = v6.canonical_name.to_text(True)
+ if v6.rrset is not None:
+ for rdata in v6.rrset:
+ v6addrs.append(rdata.address)
+ if family == socket.AF_INET or family == socket.AF_UNSPEC:
+ v4 = _resolver.query(host, dns.rdatatype.A,
+ raise_on_no_answer=False)
+ host = v4.qname
+ canonical_name = v4.canonical_name.to_text(True)
+ if v4.rrset is not None:
+ for rdata in v4.rrset:
+ v4addrs.append(rdata.address)
+ except dns.resolver.NXDOMAIN:
+ raise socket.gaierror(socket.EAI_NONAME)
+ except:
+ raise socket.gaierror(socket.EAI_SYSTEM)
+ port = None
+ try:
+ # Is it a port literal?
+ if service is None:
+ port = 0
+ else:
+ port = int(service)
+ except:
+ if flags & socket.AI_NUMERICSERV == 0:
+ try:
+ port = socket.getservbyname(service)
+ except:
+ pass
+ if port is None:
+ raise socket.gaierror(socket.EAI_NONAME)
+ tuples = []
+ if socktype == 0:
+ socktypes = [socket.SOCK_DGRAM, socket.SOCK_STREAM]
+ else:
+ socktypes = [socktype]
+ if flags & socket.AI_CANONNAME != 0:
+ cname = canonical_name
+ else:
+ cname = ''
+ if family == socket.AF_INET6 or family == socket.AF_UNSPEC:
+ for addr in v6addrs:
+ for socktype in socktypes:
+ for proto in _protocols_for_socktype[socktype]:
+ tuples.append((socket.AF_INET6, socktype, proto,
+ cname, (addr, port, 0, 0)))
+ if family == socket.AF_INET or family == socket.AF_UNSPEC:
+ for addr in v4addrs:
+ for socktype in socktypes:
+ for proto in _protocols_for_socktype[socktype]:
+ tuples.append((socket.AF_INET, socktype, proto,
+ cname, (addr, port)))
+ if len(tuples) == 0:
+ raise socket.gaierror(socket.EAI_NONAME)
+ return tuples
+
+
+def _getnameinfo(sockaddr, flags=0):
+ host = sockaddr[0]
+ port = sockaddr[1]
+ if len(sockaddr) == 4:
+ scope = sockaddr[3]
+ family = socket.AF_INET6
+ else:
+ scope = None
+ family = socket.AF_INET
+ tuples = _getaddrinfo(host, port, family, socket.SOCK_STREAM,
+ socket.SOL_TCP, 0)
+ if len(tuples) > 1:
+ raise socket.error('sockaddr resolved to multiple addresses')
+ addr = tuples[0][4][0]
+ if flags & socket.NI_DGRAM:
+ pname = 'udp'
+ else:
+ pname = 'tcp'
+ qname = dns.reversename.from_address(addr)
+ if flags & socket.NI_NUMERICHOST == 0:
+ try:
+ answer = _resolver.query(qname, 'PTR')
+ hostname = answer.rrset[0].target.to_text(True)
+ except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
+ if flags & socket.NI_NAMEREQD:
+ raise socket.gaierror(socket.EAI_NONAME)
+ hostname = addr
+ if scope is not None:
+ hostname += '%' + str(scope)
+ else:
+ hostname = addr
+ if scope is not None:
+ hostname += '%' + str(scope)
+ if flags & socket.NI_NUMERICSERV:
+ service = str(port)
+ else:
+ service = socket.getservbyport(port, pname)
+ return (hostname, service)
+
+
+def _getfqdn(name=None):
+ if name is None:
+ name = socket.gethostname()
+ try:
+ return _getnameinfo(_getaddrinfo(name, 80)[0][4])[0]
+ except:
+ return name
+
+
+def _gethostbyname(name):
+ return _gethostbyname_ex(name)[2][0]
+
+
+def _gethostbyname_ex(name):
+ aliases = []
+ addresses = []
+ tuples = _getaddrinfo(name, 0, socket.AF_INET, socket.SOCK_STREAM,
+ socket.SOL_TCP, socket.AI_CANONNAME)
+ canonical = tuples[0][3]
+ for item in tuples:
+ addresses.append(item[4][0])
+ # XXX we just ignore aliases
+ return (canonical, aliases, addresses)
+
+
+def _gethostbyaddr(ip):
+ try:
+ dns.ipv6.inet_aton(ip)
+ sockaddr = (ip, 80, 0, 0)
+ family = socket.AF_INET6
+ except:
+ sockaddr = (ip, 80)
+ family = socket.AF_INET
+ (name, port) = _getnameinfo(sockaddr, socket.NI_NAMEREQD)
+ aliases = []
+ addresses = []
+ tuples = _getaddrinfo(name, 0, family, socket.SOCK_STREAM, socket.SOL_TCP,
+ socket.AI_CANONNAME)
+ canonical = tuples[0][3]
+ for item in tuples:
+ addresses.append(item[4][0])
+ # XXX we just ignore aliases
+ return (canonical, aliases, addresses)
+
+
+def override_system_resolver(resolver=None):
+ """Override the system resolver routines in the socket module with
+ versions which use dnspython's resolver.
+
+ This can be useful in testing situations where you want to control
+ the resolution behavior of python code without having to change
+ the system's resolver settings (e.g. /etc/resolv.conf).
+
+ The resolver to use may be specified; if it's not, the default
+ resolver will be used.
+
+ @param resolver: the resolver to use
+ @type resolver: dns.resolver.Resolver object or None
+ """
+ if resolver is None:
+ resolver = get_default_resolver()
+ global _resolver
+ _resolver = resolver
+ socket.getaddrinfo = _getaddrinfo
+ socket.getnameinfo = _getnameinfo
+ socket.getfqdn = _getfqdn
+ socket.gethostbyname = _gethostbyname
+ socket.gethostbyname_ex = _gethostbyname_ex
+ socket.gethostbyaddr = _gethostbyaddr
+
+
+def restore_system_resolver():
+ """Undo the effects of override_system_resolver().
+ """
+ global _resolver
+ _resolver = None
+ socket.getaddrinfo = _original_getaddrinfo
+ socket.getnameinfo = _original_getnameinfo
+ socket.getfqdn = _original_getfqdn
+ socket.gethostbyname = _original_gethostbyname
+ socket.gethostbyname_ex = _original_gethostbyname_ex
+ socket.gethostbyaddr = _original_gethostbyaddr
diff --git a/lib/dns/reversename.py b/lib/dns/reversename.py
new file mode 100644
index 00000000..a27e7050
--- /dev/null
+++ b/lib/dns/reversename.py
@@ -0,0 +1,89 @@
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Reverse Map Names.
+
+@var ipv4_reverse_domain: The DNS IPv4 reverse-map domain, in-addr.arpa.
+@type ipv4_reverse_domain: dns.name.Name object
+@var ipv6_reverse_domain: The DNS IPv6 reverse-map domain, ip6.arpa.
+@type ipv6_reverse_domain: dns.name.Name object
+"""
+
+import binascii
+import sys
+
+import dns.name
+import dns.ipv6
+import dns.ipv4
+
+ipv4_reverse_domain = dns.name.from_text('in-addr.arpa.')
+ipv6_reverse_domain = dns.name.from_text('ip6.arpa.')
+
+
+def from_address(text):
+ """Convert an IPv4 or IPv6 address in textual form into a Name object whose
+ value is the reverse-map domain name of the address.
+ @param text: an IPv4 or IPv6 address in textual form (e.g. '127.0.0.1',
+ '::1')
+ @type text: str
+ @rtype: dns.name.Name object
+ """
+ try:
+ v6 = dns.ipv6.inet_aton(text)
+ if dns.ipv6.is_mapped(v6):
+ if sys.version_info >= (3,):
+ parts = ['%d' % byte for byte in v6[12:]]
+ else:
+ parts = ['%d' % ord(byte) for byte in v6[12:]]
+ origin = ipv4_reverse_domain
+ else:
+ parts = [x for x in str(binascii.hexlify(v6).decode())]
+ origin = ipv6_reverse_domain
+ except:
+ parts = ['%d' %
+ byte for byte in bytearray(dns.ipv4.inet_aton(text))]
+ origin = ipv4_reverse_domain
+ parts.reverse()
+ return dns.name.from_text('.'.join(parts), origin=origin)
+
+
+def to_address(name):
+ """Convert a reverse map domain name into textual address form.
+ @param name: an IPv4 or IPv6 address in reverse-map form.
+ @type name: dns.name.Name object
+ @rtype: str
+ """
+ if name.is_subdomain(ipv4_reverse_domain):
+ name = name.relativize(ipv4_reverse_domain)
+ labels = list(name.labels)
+ labels.reverse()
+ text = b'.'.join(labels)
+ # run through inet_aton() to check syntax and make pretty.
+ return dns.ipv4.inet_ntoa(dns.ipv4.inet_aton(text))
+ elif name.is_subdomain(ipv6_reverse_domain):
+ name = name.relativize(ipv6_reverse_domain)
+ labels = list(name.labels)
+ labels.reverse()
+ parts = []
+ i = 0
+ l = len(labels)
+ while i < l:
+ parts.append(b''.join(labels[i:i + 4]))
+ i += 4
+ text = b':'.join(parts)
+ # run through inet_aton() to check syntax and make pretty.
+ return dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(text))
+ else:
+ raise dns.exception.SyntaxError('unknown reverse-map address family')
diff --git a/lib/dns/rrset.py b/lib/dns/rrset.py
new file mode 100644
index 00000000..6ad71da8
--- /dev/null
+++ b/lib/dns/rrset.py
@@ -0,0 +1,181 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS RRsets (an RRset is a named rdataset)"""
+
+
+import dns.name
+import dns.rdataset
+import dns.rdataclass
+import dns.renderer
+from ._compat import string_types
+
+
+class RRset(dns.rdataset.Rdataset):
+
+ """A DNS RRset (named rdataset).
+
+ RRset inherits from Rdataset, and RRsets can be treated as
+ Rdatasets in most cases. There are, however, a few notable
+ exceptions. RRsets have different to_wire() and to_text() method
+ arguments, reflecting the fact that RRsets always have an owner
+ name.
+ """
+
+ __slots__ = ['name', 'deleting']
+
+ def __init__(self, name, rdclass, rdtype, covers=dns.rdatatype.NONE,
+ deleting=None):
+ """Create a new RRset."""
+
+ super(RRset, self).__init__(rdclass, rdtype, covers)
+ self.name = name
+ self.deleting = deleting
+
+ def _clone(self):
+ obj = super(RRset, self)._clone()
+ obj.name = self.name
+ obj.deleting = self.deleting
+ return obj
+
+ def __repr__(self):
+ if self.covers == 0:
+ ctext = ''
+ else:
+ ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
+ if self.deleting is not None:
+ dtext = ' delete=' + dns.rdataclass.to_text(self.deleting)
+ else:
+ dtext = ''
+ return ''
+
+ def __str__(self):
+ return self.to_text()
+
+ def __eq__(self, other):
+ """Two RRsets are equal if they have the same name and the same
+ rdataset
+
+ @rtype: bool"""
+ if not isinstance(other, RRset):
+ return False
+ if self.name != other.name:
+ return False
+ return super(RRset, self).__eq__(other)
+
+ def match(self, name, rdclass, rdtype, covers, deleting=None):
+ """Returns True if this rrset matches the specified class, type,
+ covers, and deletion state."""
+
+ if not super(RRset, self).match(rdclass, rdtype, covers):
+ return False
+ if self.name != name or self.deleting != deleting:
+ return False
+ return True
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ """Convert the RRset into DNS master file format.
+
+ @see: L{dns.name.Name.choose_relativity} for more information
+ on how I{origin} and I{relativize} determine the way names
+ are emitted.
+
+ Any additional keyword arguments are passed on to the rdata
+ to_text() method.
+
+ @param origin: The origin for relative names, or None.
+ @type origin: dns.name.Name object
+ @param relativize: True if names should names be relativized
+ @type relativize: bool"""
+
+ return super(RRset, self).to_text(self.name, origin, relativize,
+ self.deleting, **kw)
+
+ def to_wire(self, file, compress=None, origin=None, **kw):
+ """Convert the RRset to wire format."""
+
+ return super(RRset, self).to_wire(self.name, file, compress, origin,
+ self.deleting, **kw)
+
+ def to_rdataset(self):
+ """Convert an RRset into an Rdataset.
+
+ @rtype: dns.rdataset.Rdataset object
+ """
+ return dns.rdataset.from_rdata_list(self.ttl, list(self))
+
+
+def from_text_list(name, ttl, rdclass, rdtype, text_rdatas):
+ """Create an RRset with the specified name, TTL, class, and type, and with
+ the specified list of rdatas in text format.
+
+ @rtype: dns.rrset.RRset object
+ """
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None)
+ if isinstance(rdclass, string_types):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ r = RRset(name, rdclass, rdtype)
+ r.update_ttl(ttl)
+ for t in text_rdatas:
+ rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
+ r.add(rd)
+ return r
+
+
+def from_text(name, ttl, rdclass, rdtype, *text_rdatas):
+ """Create an RRset with the specified name, TTL, class, and type and with
+ the specified rdatas in text format.
+
+ @rtype: dns.rrset.RRset object
+ """
+
+ return from_text_list(name, ttl, rdclass, rdtype, text_rdatas)
+
+
+def from_rdata_list(name, ttl, rdatas):
+ """Create an RRset with the specified name and TTL, and with
+ the specified list of rdata objects.
+
+ @rtype: dns.rrset.RRset object
+ """
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None)
+
+ if len(rdatas) == 0:
+ raise ValueError("rdata list must not be empty")
+ r = None
+ for rd in rdatas:
+ if r is None:
+ r = RRset(name, rd.rdclass, rd.rdtype)
+ r.update_ttl(ttl)
+ r.add(rd)
+ return r
+
+
+def from_rdata(name, ttl, *rdatas):
+ """Create an RRset with the specified name and TTL, and with
+ the specified rdata objects.
+
+ @rtype: dns.rrset.RRset object
+ """
+
+ return from_rdata_list(name, ttl, rdatas)
diff --git a/lib/dns/set.py b/lib/dns/set.py
new file mode 100644
index 00000000..f13af5f3
--- /dev/null
+++ b/lib/dns/set.py
@@ -0,0 +1,265 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""A simple Set class."""
+
+
+class Set(object):
+
+ """A simple set class.
+
+ Sets are not in Python until 2.3, and rdata are not immutable so
+ we cannot use sets.Set anyway. This class implements subset of
+ the 2.3 Set interface using a list as the container.
+
+ @ivar items: A list of the items which are in the set
+ @type items: list"""
+
+ __slots__ = ['items']
+
+ def __init__(self, items=None):
+ """Initialize the set.
+
+ @param items: the initial set of items
+ @type items: any iterable or None
+ """
+
+ self.items = []
+ if items is not None:
+ for item in items:
+ self.add(item)
+
+ def __repr__(self):
+ return "dns.simpleset.Set(%s)" % repr(self.items)
+
+ def add(self, item):
+ """Add an item to the set."""
+ if item not in self.items:
+ self.items.append(item)
+
+ def remove(self, item):
+ """Remove an item from the set."""
+ self.items.remove(item)
+
+ def discard(self, item):
+ """Remove an item from the set if present."""
+ try:
+ self.items.remove(item)
+ except ValueError:
+ pass
+
+ def _clone(self):
+ """Make a (shallow) copy of the set.
+
+ There is a 'clone protocol' that subclasses of this class
+ should use. To make a copy, first call your super's _clone()
+ method, and use the object returned as the new instance. Then
+ make shallow copies of the attributes defined in the subclass.
+
+ This protocol allows us to write the set algorithms that
+ return new instances (e.g. union) once, and keep using them in
+ subclasses.
+ """
+
+ cls = self.__class__
+ obj = cls.__new__(cls)
+ obj.items = list(self.items)
+ return obj
+
+ def __copy__(self):
+ """Make a (shallow) copy of the set."""
+ return self._clone()
+
+ def copy(self):
+ """Make a (shallow) copy of the set."""
+ return self._clone()
+
+ def union_update(self, other):
+ """Update the set, adding any elements from other which are not
+ already in the set.
+ @param other: the collection of items with which to update the set
+ @type other: Set object
+ """
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ if self is other:
+ return
+ for item in other.items:
+ self.add(item)
+
+ def intersection_update(self, other):
+ """Update the set, removing any elements from other which are not
+ in both sets.
+ @param other: the collection of items with which to update the set
+ @type other: Set object
+ """
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ if self is other:
+ return
+ # we make a copy of the list so that we can remove items from
+ # the list without breaking the iterator.
+ for item in list(self.items):
+ if item not in other.items:
+ self.items.remove(item)
+
+ def difference_update(self, other):
+ """Update the set, removing any elements from other which are in
+ the set.
+ @param other: the collection of items with which to update the set
+ @type other: Set object
+ """
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ if self is other:
+ self.items = []
+ else:
+ for item in other.items:
+ self.discard(item)
+
+ def union(self, other):
+ """Return a new set which is the union of I{self} and I{other}.
+
+ @param other: the other set
+ @type other: Set object
+ @rtype: the same type as I{self}
+ """
+
+ obj = self._clone()
+ obj.union_update(other)
+ return obj
+
+ def intersection(self, other):
+ """Return a new set which is the intersection of I{self} and I{other}.
+
+ @param other: the other set
+ @type other: Set object
+ @rtype: the same type as I{self}
+ """
+
+ obj = self._clone()
+ obj.intersection_update(other)
+ return obj
+
+ def difference(self, other):
+ """Return a new set which I{self} - I{other}, i.e. the items
+ in I{self} which are not also in I{other}.
+
+ @param other: the other set
+ @type other: Set object
+ @rtype: the same type as I{self}
+ """
+
+ obj = self._clone()
+ obj.difference_update(other)
+ return obj
+
+ def __or__(self, other):
+ return self.union(other)
+
+ def __and__(self, other):
+ return self.intersection(other)
+
+ def __add__(self, other):
+ return self.union(other)
+
+ def __sub__(self, other):
+ return self.difference(other)
+
+ def __ior__(self, other):
+ self.union_update(other)
+ return self
+
+ def __iand__(self, other):
+ self.intersection_update(other)
+ return self
+
+ def __iadd__(self, other):
+ self.union_update(other)
+ return self
+
+ def __isub__(self, other):
+ self.difference_update(other)
+ return self
+
+ def update(self, other):
+ """Update the set, adding any elements from other which are not
+ already in the set.
+ @param other: the collection of items with which to update the set
+ @type other: any iterable type"""
+ for item in other:
+ self.add(item)
+
+ def clear(self):
+ """Make the set empty."""
+ self.items = []
+
+ def __eq__(self, other):
+ # Yes, this is inefficient but the sets we're dealing with are
+ # usually quite small, so it shouldn't hurt too much.
+ for item in self.items:
+ if item not in other.items:
+ return False
+ for item in other.items:
+ if item not in self.items:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __len__(self):
+ return len(self.items)
+
+ def __iter__(self):
+ return iter(self.items)
+
+ def __getitem__(self, i):
+ return self.items[i]
+
+ def __delitem__(self, i):
+ del self.items[i]
+
+ def __getslice__(self, i, j):
+ return self.items[i:j]
+
+ def __delslice__(self, i, j):
+ del self.items[i:j]
+
+ def issubset(self, other):
+ """Is I{self} a subset of I{other}?
+
+ @rtype: bool
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ for item in self.items:
+ if item not in other.items:
+ return False
+ return True
+
+ def issuperset(self, other):
+ """Is I{self} a superset of I{other}?
+
+ @rtype: bool
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError('other must be a Set instance')
+ for item in other.items:
+ if item not in self.items:
+ return False
+ return True
diff --git a/lib/dns/tokenizer.py b/lib/dns/tokenizer.py
new file mode 100644
index 00000000..e5b09adf
--- /dev/null
+++ b/lib/dns/tokenizer.py
@@ -0,0 +1,564 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Tokenize DNS master file format"""
+
+from io import StringIO
+import sys
+
+import dns.exception
+import dns.name
+import dns.ttl
+from ._compat import long, text_type, binary_type
+
+_DELIMITERS = {
+ ' ': True,
+ '\t': True,
+ '\n': True,
+ ';': True,
+ '(': True,
+ ')': True,
+ '"': True}
+
+_QUOTING_DELIMITERS = {'"': True}
+
+EOF = 0
+EOL = 1
+WHITESPACE = 2
+IDENTIFIER = 3
+QUOTED_STRING = 4
+COMMENT = 5
+DELIMITER = 6
+
+
+class UngetBufferFull(dns.exception.DNSException):
+
+ """An attempt was made to unget a token when the unget buffer was full."""
+
+
+class Token(object):
+
+ """A DNS master file format token.
+
+ @ivar ttype: The token type
+ @type ttype: int
+ @ivar value: The token value
+ @type value: string
+ @ivar has_escape: Does the token value contain escapes?
+ @type has_escape: bool
+ """
+
+ def __init__(self, ttype, value='', has_escape=False):
+ """Initialize a token instance.
+
+ @param ttype: The token type
+ @type ttype: int
+ @param value: The token value
+ @type value: string
+ @param has_escape: Does the token value contain escapes?
+ @type has_escape: bool
+ """
+ self.ttype = ttype
+ self.value = value
+ self.has_escape = has_escape
+
+ def is_eof(self):
+ return self.ttype == EOF
+
+ def is_eol(self):
+ return self.ttype == EOL
+
+ def is_whitespace(self):
+ return self.ttype == WHITESPACE
+
+ def is_identifier(self):
+ return self.ttype == IDENTIFIER
+
+ def is_quoted_string(self):
+ return self.ttype == QUOTED_STRING
+
+ def is_comment(self):
+ return self.ttype == COMMENT
+
+ def is_delimiter(self):
+ return self.ttype == DELIMITER
+
+ def is_eol_or_eof(self):
+ return (self.ttype == EOL or self.ttype == EOF)
+
+ def __eq__(self, other):
+ if not isinstance(other, Token):
+ return False
+ return (self.ttype == other.ttype and
+ self.value == other.value)
+
+ def __ne__(self, other):
+ if not isinstance(other, Token):
+ return True
+ return (self.ttype != other.ttype or
+ self.value != other.value)
+
+ def __str__(self):
+ return '%d "%s"' % (self.ttype, self.value)
+
+ def unescape(self):
+ if not self.has_escape:
+ return self
+ unescaped = ''
+ l = len(self.value)
+ i = 0
+ while i < l:
+ c = self.value[i]
+ i += 1
+ if c == '\\':
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c = self.value[i]
+ i += 1
+ if c.isdigit():
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c2 = self.value[i]
+ i += 1
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c3 = self.value[i]
+ i += 1
+ if not (c2.isdigit() and c3.isdigit()):
+ raise dns.exception.SyntaxError
+ c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
+ unescaped += c
+ return Token(self.ttype, unescaped)
+
+ # compatibility for old-style tuple tokens
+
+ def __len__(self):
+ return 2
+
+ def __iter__(self):
+ return iter((self.ttype, self.value))
+
+ def __getitem__(self, i):
+ if i == 0:
+ return self.ttype
+ elif i == 1:
+ return self.value
+ else:
+ raise IndexError
+
+
+class Tokenizer(object):
+
+ """A DNS master file format tokenizer.
+
+ A token is a (type, value) tuple, where I{type} is an int, and
+ I{value} is a string. The valid types are EOF, EOL, WHITESPACE,
+ IDENTIFIER, QUOTED_STRING, COMMENT, and DELIMITER.
+
+ @ivar file: The file to tokenize
+ @type file: file
+ @ivar ungotten_char: The most recently ungotten character, or None.
+ @type ungotten_char: string
+ @ivar ungotten_token: The most recently ungotten token, or None.
+ @type ungotten_token: (int, string) token tuple
+ @ivar multiline: The current multiline level. This value is increased
+ by one every time a '(' delimiter is read, and decreased by one every time
+ a ')' delimiter is read.
+ @type multiline: int
+ @ivar quoting: This variable is true if the tokenizer is currently
+ reading a quoted string.
+ @type quoting: bool
+ @ivar eof: This variable is true if the tokenizer has encountered EOF.
+ @type eof: bool
+ @ivar delimiters: The current delimiter dictionary.
+ @type delimiters: dict
+ @ivar line_number: The current line number
+ @type line_number: int
+ @ivar filename: A filename that will be returned by the L{where} method.
+ @type filename: string
+ """
+
+ def __init__(self, f=sys.stdin, filename=None):
+ """Initialize a tokenizer instance.
+
+ @param f: The file to tokenize. The default is sys.stdin.
+ This parameter may also be a string, in which case the tokenizer
+ will take its input from the contents of the string.
+ @type f: file or string
+ @param filename: the name of the filename that the L{where} method
+ will return.
+ @type filename: string
+ """
+
+ if isinstance(f, text_type):
+ f = StringIO(f)
+ if filename is None:
+ filename = ''
+ elif isinstance(f, binary_type):
+ f = StringIO(f.decode())
+ if filename is None:
+ filename = ''
+ else:
+ if filename is None:
+ if f is sys.stdin:
+ filename = ''
+ else:
+ filename = ''
+ self.file = f
+ self.ungotten_char = None
+ self.ungotten_token = None
+ self.multiline = 0
+ self.quoting = False
+ self.eof = False
+ self.delimiters = _DELIMITERS
+ self.line_number = 1
+ self.filename = filename
+
+ def _get_char(self):
+ """Read a character from input.
+ @rtype: string
+ """
+
+ if self.ungotten_char is None:
+ if self.eof:
+ c = ''
+ else:
+ c = self.file.read(1)
+ if c == '':
+ self.eof = True
+ elif c == '\n':
+ self.line_number += 1
+ else:
+ c = self.ungotten_char
+ self.ungotten_char = None
+ return c
+
+ def where(self):
+ """Return the current location in the input.
+
+ @rtype: (string, int) tuple. The first item is the filename of
+ the input, the second is the current line number.
+ """
+
+ return (self.filename, self.line_number)
+
+ def _unget_char(self, c):
+ """Unget a character.
+
+ The unget buffer for characters is only one character large; it is
+ an error to try to unget a character when the unget buffer is not
+ empty.
+
+ @param c: the character to unget
+ @type c: string
+ @raises UngetBufferFull: there is already an ungotten char
+ """
+
+ if self.ungotten_char is not None:
+ raise UngetBufferFull
+ self.ungotten_char = c
+
+ def skip_whitespace(self):
+ """Consume input until a non-whitespace character is encountered.
+
+ The non-whitespace character is then ungotten, and the number of
+ whitespace characters consumed is returned.
+
+ If the tokenizer is in multiline mode, then newlines are whitespace.
+
+ @rtype: int
+ """
+
+ skipped = 0
+ while True:
+ c = self._get_char()
+ if c != ' ' and c != '\t':
+ if (c != '\n') or not self.multiline:
+ self._unget_char(c)
+ return skipped
+ skipped += 1
+
+ def get(self, want_leading=False, want_comment=False):
+ """Get the next token.
+
+ @param want_leading: If True, return a WHITESPACE token if the
+ first character read is whitespace. The default is False.
+ @type want_leading: bool
+ @param want_comment: If True, return a COMMENT token if the
+ first token read is a comment. The default is False.
+ @type want_comment: bool
+ @rtype: Token object
+ @raises dns.exception.UnexpectedEnd: input ended prematurely
+ @raises dns.exception.SyntaxError: input was badly formed
+ """
+
+ if self.ungotten_token is not None:
+ token = self.ungotten_token
+ self.ungotten_token = None
+ if token.is_whitespace():
+ if want_leading:
+ return token
+ elif token.is_comment():
+ if want_comment:
+ return token
+ else:
+ return token
+ skipped = self.skip_whitespace()
+ if want_leading and skipped > 0:
+ return Token(WHITESPACE, ' ')
+ token = ''
+ ttype = IDENTIFIER
+ has_escape = False
+ while True:
+ c = self._get_char()
+ if c == '' or c in self.delimiters:
+ if c == '' and self.quoting:
+ raise dns.exception.UnexpectedEnd
+ if token == '' and ttype != QUOTED_STRING:
+ if c == '(':
+ self.multiline += 1
+ self.skip_whitespace()
+ continue
+ elif c == ')':
+ if not self.multiline > 0:
+ raise dns.exception.SyntaxError
+ self.multiline -= 1
+ self.skip_whitespace()
+ continue
+ elif c == '"':
+ if not self.quoting:
+ self.quoting = True
+ self.delimiters = _QUOTING_DELIMITERS
+ ttype = QUOTED_STRING
+ continue
+ else:
+ self.quoting = False
+ self.delimiters = _DELIMITERS
+ self.skip_whitespace()
+ continue
+ elif c == '\n':
+ return Token(EOL, '\n')
+ elif c == ';':
+ while 1:
+ c = self._get_char()
+ if c == '\n' or c == '':
+ break
+ token += c
+ if want_comment:
+ self._unget_char(c)
+ return Token(COMMENT, token)
+ elif c == '':
+ if self.multiline:
+ raise dns.exception.SyntaxError(
+ 'unbalanced parentheses')
+ return Token(EOF)
+ elif self.multiline:
+ self.skip_whitespace()
+ token = ''
+ continue
+ else:
+ return Token(EOL, '\n')
+ else:
+ # This code exists in case we ever want a
+ # delimiter to be returned. It never produces
+ # a token currently.
+ token = c
+ ttype = DELIMITER
+ else:
+ self._unget_char(c)
+ break
+ elif self.quoting:
+ if c == '\\':
+ c = self._get_char()
+ if c == '':
+ raise dns.exception.UnexpectedEnd
+ if c.isdigit():
+ c2 = self._get_char()
+ if c2 == '':
+ raise dns.exception.UnexpectedEnd
+ c3 = self._get_char()
+ if c == '':
+ raise dns.exception.UnexpectedEnd
+ if not (c2.isdigit() and c3.isdigit()):
+ raise dns.exception.SyntaxError
+ c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
+ elif c == '\n':
+ raise dns.exception.SyntaxError('newline in quoted string')
+ elif c == '\\':
+ #
+ # It's an escape. Put it and the next character into
+ # the token; it will be checked later for goodness.
+ #
+ token += c
+ has_escape = True
+ c = self._get_char()
+ if c == '' or c == '\n':
+ raise dns.exception.UnexpectedEnd
+ token += c
+ if token == '' and ttype != QUOTED_STRING:
+ if self.multiline:
+ raise dns.exception.SyntaxError('unbalanced parentheses')
+ ttype = EOF
+ return Token(ttype, token, has_escape)
+
+ def unget(self, token):
+ """Unget a token.
+
+ The unget buffer for tokens is only one token large; it is
+ an error to try to unget a token when the unget buffer is not
+ empty.
+
+ @param token: the token to unget
+ @type token: Token object
+ @raises UngetBufferFull: there is already an ungotten token
+ """
+
+ if self.ungotten_token is not None:
+ raise UngetBufferFull
+ self.ungotten_token = token
+
+ def next(self):
+ """Return the next item in an iteration.
+ @rtype: (int, string)
+ """
+
+ token = self.get()
+ if token.is_eof():
+ raise StopIteration
+ return token
+
+ __next__ = next
+
+ def __iter__(self):
+ return self
+
+ # Helpers
+
+ def get_int(self):
+ """Read the next token and interpret it as an integer.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: int
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ if not token.value.isdigit():
+ raise dns.exception.SyntaxError('expecting an integer')
+ return int(token.value)
+
+ def get_uint8(self):
+ """Read the next token and interpret it as an 8-bit unsigned
+ integer.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: int
+ """
+
+ value = self.get_int()
+ if value < 0 or value > 255:
+ raise dns.exception.SyntaxError(
+ '%d is not an unsigned 8-bit integer' % value)
+ return value
+
+ def get_uint16(self):
+ """Read the next token and interpret it as a 16-bit unsigned
+ integer.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: int
+ """
+
+ value = self.get_int()
+ if value < 0 or value > 65535:
+ raise dns.exception.SyntaxError(
+ '%d is not an unsigned 16-bit integer' % value)
+ return value
+
+ def get_uint32(self):
+ """Read the next token and interpret it as a 32-bit unsigned
+ integer.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: int
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ if not token.value.isdigit():
+ raise dns.exception.SyntaxError('expecting an integer')
+ value = long(token.value)
+ if value < 0 or value > long(4294967296):
+ raise dns.exception.SyntaxError(
+ '%d is not an unsigned 32-bit integer' % value)
+ return value
+
+ def get_string(self, origin=None):
+ """Read the next token and interpret it as a string.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: string
+ """
+
+ token = self.get().unescape()
+ if not (token.is_identifier() or token.is_quoted_string()):
+ raise dns.exception.SyntaxError('expecting a string')
+ return token.value
+
+ def get_identifier(self, origin=None):
+ """Read the next token and raise an exception if it is not an identifier.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: string
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ return token.value
+
+ def get_name(self, origin=None):
+ """Read the next token and interpret it as a DNS name.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: dns.name.Name object"""
+
+ token = self.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ return dns.name.from_text(token.value, origin)
+
+ def get_eol(self):
+ """Read the next token and raise an exception if it isn't EOL or
+ EOF.
+
+ @raises dns.exception.SyntaxError:
+ @rtype: string
+ """
+
+ token = self.get()
+ if not token.is_eol_or_eof():
+ raise dns.exception.SyntaxError(
+ 'expected EOL or EOF, got %d "%s"' % (token.ttype,
+ token.value))
+ return token.value
+
+ def get_ttl(self):
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError('expecting an identifier')
+ return dns.ttl.from_text(token.value)
diff --git a/lib/dns/tsig.py b/lib/dns/tsig.py
new file mode 100644
index 00000000..92ce8603
--- /dev/null
+++ b/lib/dns/tsig.py
@@ -0,0 +1,233 @@
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS TSIG support."""
+
+import hmac
+import struct
+import sys
+
+import dns.exception
+import dns.hash
+import dns.rdataclass
+import dns.name
+from ._compat import long, string_types
+
+class BadTime(dns.exception.DNSException):
+
+ """The current time is not within the TSIG's validity time."""
+
+
+class BadSignature(dns.exception.DNSException):
+
+ """The TSIG signature fails to verify."""
+
+
+class PeerError(dns.exception.DNSException):
+
+ """Base class for all TSIG errors generated by the remote peer"""
+
+
+class PeerBadKey(PeerError):
+
+ """The peer didn't know the key we used"""
+
+
+class PeerBadSignature(PeerError):
+
+ """The peer didn't like the signature we sent"""
+
+
+class PeerBadTime(PeerError):
+
+ """The peer didn't like the time we sent"""
+
+
+class PeerBadTruncation(PeerError):
+
+ """The peer didn't like amount of truncation in the TSIG we sent"""
+
+# TSIG Algorithms
+
+HMAC_MD5 = dns.name.from_text("HMAC-MD5.SIG-ALG.REG.INT")
+HMAC_SHA1 = dns.name.from_text("hmac-sha1")
+HMAC_SHA224 = dns.name.from_text("hmac-sha224")
+HMAC_SHA256 = dns.name.from_text("hmac-sha256")
+HMAC_SHA384 = dns.name.from_text("hmac-sha384")
+HMAC_SHA512 = dns.name.from_text("hmac-sha512")
+
+_hashes = {
+ HMAC_SHA224: 'SHA224',
+ HMAC_SHA256: 'SHA256',
+ HMAC_SHA384: 'SHA384',
+ HMAC_SHA512: 'SHA512',
+ HMAC_SHA1: 'SHA1',
+ HMAC_MD5: 'MD5',
+}
+
+default_algorithm = HMAC_MD5
+
+BADSIG = 16
+BADKEY = 17
+BADTIME = 18
+BADTRUNC = 22
+
+
+def sign(wire, keyname, secret, time, fudge, original_id, error,
+ other_data, request_mac, ctx=None, multi=False, first=True,
+ algorithm=default_algorithm):
+ """Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata
+ for the input parameters, the HMAC MAC calculated by applying the
+ TSIG signature algorithm, and the TSIG digest context.
+ @rtype: (string, string, hmac.HMAC object)
+ @raises ValueError: I{other_data} is too long
+ @raises NotImplementedError: I{algorithm} is not supported
+ """
+
+ (algorithm_name, digestmod) = get_algorithm(algorithm)
+ if first:
+ ctx = hmac.new(secret, digestmod=digestmod)
+ ml = len(request_mac)
+ if ml > 0:
+ ctx.update(struct.pack('!H', ml))
+ ctx.update(request_mac)
+ id = struct.pack('!H', original_id)
+ ctx.update(id)
+ ctx.update(wire[2:])
+ if first:
+ ctx.update(keyname.to_digestable())
+ ctx.update(struct.pack('!H', dns.rdataclass.ANY))
+ ctx.update(struct.pack('!I', 0))
+ long_time = time + long(0)
+ upper_time = (long_time >> 32) & long(0xffff)
+ lower_time = long_time & long(0xffffffff)
+ time_mac = struct.pack('!HIH', upper_time, lower_time, fudge)
+ pre_mac = algorithm_name + time_mac
+ ol = len(other_data)
+ if ol > 65535:
+ raise ValueError('TSIG Other Data is > 65535 bytes')
+ post_mac = struct.pack('!HH', error, ol) + other_data
+ if first:
+ ctx.update(pre_mac)
+ ctx.update(post_mac)
+ else:
+ ctx.update(time_mac)
+ mac = ctx.digest()
+ mpack = struct.pack('!H', len(mac))
+ tsig_rdata = pre_mac + mpack + mac + id + post_mac
+ if multi:
+ ctx = hmac.new(secret, digestmod=digestmod)
+ ml = len(mac)
+ ctx.update(struct.pack('!H', ml))
+ ctx.update(mac)
+ else:
+ ctx = None
+ return (tsig_rdata, mac, ctx)
+
+
+def hmac_md5(wire, keyname, secret, time, fudge, original_id, error,
+ other_data, request_mac, ctx=None, multi=False, first=True,
+ algorithm=default_algorithm):
+ return sign(wire, keyname, secret, time, fudge, original_id, error,
+ other_data, request_mac, ctx, multi, first, algorithm)
+
+
+def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata,
+ tsig_rdlen, ctx=None, multi=False, first=True):
+ """Validate the specified TSIG rdata against the other input parameters.
+
+ @raises FormError: The TSIG is badly formed.
+ @raises BadTime: There is too much time skew between the client and the
+ server.
+ @raises BadSignature: The TSIG signature did not validate
+ @rtype: hmac.HMAC object"""
+
+ (adcount,) = struct.unpack("!H", wire[10:12])
+ if adcount == 0:
+ raise dns.exception.FormError
+ adcount -= 1
+ new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start]
+ current = tsig_rdata
+ (aname, used) = dns.name.from_wire(wire, current)
+ current = current + used
+ (upper_time, lower_time, fudge, mac_size) = \
+ struct.unpack("!HIHH", wire[current:current + 10])
+ time = ((upper_time + long(0)) << 32) + (lower_time + long(0))
+ current += 10
+ mac = wire[current:current + mac_size]
+ current += mac_size
+ (original_id, error, other_size) = \
+ struct.unpack("!HHH", wire[current:current + 6])
+ current += 6
+ other_data = wire[current:current + other_size]
+ current += other_size
+ if current != tsig_rdata + tsig_rdlen:
+ raise dns.exception.FormError
+ if error != 0:
+ if error == BADSIG:
+ raise PeerBadSignature
+ elif error == BADKEY:
+ raise PeerBadKey
+ elif error == BADTIME:
+ raise PeerBadTime
+ elif error == BADTRUNC:
+ raise PeerBadTruncation
+ else:
+ raise PeerError('unknown TSIG error code %d' % error)
+ time_low = time - fudge
+ time_high = time + fudge
+ if now < time_low or now > time_high:
+ raise BadTime
+ (junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge,
+ original_id, error, other_data,
+ request_mac, ctx, multi, first, aname)
+ if (our_mac != mac):
+ raise BadSignature
+ return ctx
+
+
+def get_algorithm(algorithm):
+ """Returns the wire format string and the hash module to use for the
+ specified TSIG algorithm
+
+ @rtype: (string, hash constructor)
+ @raises NotImplementedError: I{algorithm} is not supported
+ """
+
+ if isinstance(algorithm, string_types):
+ algorithm = dns.name.from_text(algorithm)
+
+ try:
+ return (algorithm.to_digestable(), dns.hash.hashes[_hashes[algorithm]])
+ except KeyError:
+ raise NotImplementedError("TSIG algorithm " + str(algorithm) +
+ " is not supported")
+
+
+def get_algorithm_and_mac(wire, tsig_rdata, tsig_rdlen):
+ """Return the tsig algorithm for the specified tsig_rdata
+ @raises FormError: The TSIG is badly formed.
+ """
+ current = tsig_rdata
+ (aname, used) = dns.name.from_wire(wire, current)
+ current = current + used
+ (upper_time, lower_time, fudge, mac_size) = \
+ struct.unpack("!HIHH", wire[current:current + 10])
+ current += 10
+ mac = wire[current:current + mac_size]
+ current += mac_size
+ if current > tsig_rdata + tsig_rdlen:
+ raise dns.exception.FormError
+ return (aname, mac)
diff --git a/lib/dns/tsigkeyring.py b/lib/dns/tsigkeyring.py
new file mode 100644
index 00000000..295bac14
--- /dev/null
+++ b/lib/dns/tsigkeyring.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""A place to store TSIG keys."""
+
+import base64
+
+import dns.name
+
+
+def from_text(textring):
+ """Convert a dictionary containing (textual DNS name, base64 secret) pairs
+ into a binary keyring which has (dns.name.Name, binary secret) pairs.
+ @rtype: dict"""
+
+ keyring = {}
+ for keytext in textring:
+ keyname = dns.name.from_text(keytext)
+ secret = base64.decodestring(textring[keytext])
+ keyring[keyname] = secret
+ return keyring
+
+
+def to_text(keyring):
+ """Convert a dictionary containing (dns.name.Name, binary secret) pairs
+ into a text keyring which has (textual DNS name, base64 secret) pairs.
+ @rtype: dict"""
+
+ textring = {}
+ for keyname in keyring:
+ keytext = keyname.to_text()
+ secret = base64.encodestring(keyring[keyname])
+ textring[keytext] = secret
+ return textring
diff --git a/lib/dns/ttl.py b/lib/dns/ttl.py
new file mode 100644
index 00000000..a27d8251
--- /dev/null
+++ b/lib/dns/ttl.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS TTL conversion."""
+
+import dns.exception
+from ._compat import long
+
+
+class BadTTL(dns.exception.SyntaxError):
+
+ """DNS TTL value is not well-formed."""
+
+
+def from_text(text):
+ """Convert the text form of a TTL to an integer.
+
+ The BIND 8 units syntax for TTLs (e.g. '1w6d4h3m10s') is supported.
+
+ @param text: the textual TTL
+ @type text: string
+ @raises dns.ttl.BadTTL: the TTL is not well-formed
+ @rtype: int
+ """
+
+ if text.isdigit():
+ total = long(text)
+ else:
+ if not text[0].isdigit():
+ raise BadTTL
+ total = long(0)
+ current = long(0)
+ for c in text:
+ if c.isdigit():
+ current *= 10
+ current += long(c)
+ else:
+ c = c.lower()
+ if c == 'w':
+ total += current * long(604800)
+ elif c == 'd':
+ total += current * long(86400)
+ elif c == 'h':
+ total += current * long(3600)
+ elif c == 'm':
+ total += current * long(60)
+ elif c == 's':
+ total += current
+ else:
+ raise BadTTL("unknown unit '%s'" % c)
+ current = 0
+ if not current == 0:
+ raise BadTTL("trailing integer")
+ if total < long(0) or total > long(2147483647):
+ raise BadTTL("TTL should be between 0 and 2^31 - 1 (inclusive)")
+ return total
diff --git a/lib/dns/update.py b/lib/dns/update.py
new file mode 100644
index 00000000..59728d98
--- /dev/null
+++ b/lib/dns/update.py
@@ -0,0 +1,249 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Dynamic Update Support"""
+
+
+import dns.message
+import dns.name
+import dns.opcode
+import dns.rdata
+import dns.rdataclass
+import dns.rdataset
+import dns.tsig
+from ._compat import string_types
+
+
+class Update(dns.message.Message):
+
+ def __init__(self, zone, rdclass=dns.rdataclass.IN, keyring=None,
+ keyname=None, keyalgorithm=dns.tsig.default_algorithm):
+ """Initialize a new DNS Update object.
+
+ @param zone: The zone which is being updated.
+ @type zone: A dns.name.Name or string
+ @param rdclass: The class of the zone; defaults to dns.rdataclass.IN.
+ @type rdclass: An int designating the class, or a string whose value
+ is the name of a class.
+ @param keyring: The TSIG keyring to use; defaults to None.
+ @type keyring: dict
+ @param keyname: The name of the TSIG key to use; defaults to None.
+ The key must be defined in the keyring. If a keyring is specified
+ but a keyname is not, then the key used will be the first key in the
+ keyring. Note that the order of keys in a dictionary is not defined,
+ so applications should supply a keyname when a keyring is used, unless
+ they know the keyring contains only one key.
+ @type keyname: dns.name.Name or string
+ @param keyalgorithm: The TSIG algorithm to use; defaults to
+ dns.tsig.default_algorithm. Constants for TSIG algorithms are defined
+ in dns.tsig, and the currently implemented algorithms are
+ HMAC_MD5, HMAC_SHA1, HMAC_SHA224, HMAC_SHA256, HMAC_SHA384, and
+ HMAC_SHA512.
+ @type keyalgorithm: string
+ """
+ super(Update, self).__init__()
+ self.flags |= dns.opcode.to_flags(dns.opcode.UPDATE)
+ if isinstance(zone, string_types):
+ zone = dns.name.from_text(zone)
+ self.origin = zone
+ if isinstance(rdclass, string_types):
+ rdclass = dns.rdataclass.from_text(rdclass)
+ self.zone_rdclass = rdclass
+ self.find_rrset(self.question, self.origin, rdclass, dns.rdatatype.SOA,
+ create=True, force_unique=True)
+ if keyring is not None:
+ self.use_tsig(keyring, keyname, algorithm=keyalgorithm)
+
+ def _add_rr(self, name, ttl, rd, deleting=None, section=None):
+ """Add a single RR to the update section."""
+
+ if section is None:
+ section = self.authority
+ covers = rd.covers()
+ rrset = self.find_rrset(section, name, self.zone_rdclass, rd.rdtype,
+ covers, deleting, True, True)
+ rrset.add(rd, ttl)
+
+ def _add(self, replace, section, name, *args):
+ """Add records. The first argument is the replace mode. If
+ false, RRs are added to an existing RRset; if true, the RRset
+ is replaced with the specified contents. The second
+ argument is the section to add to. The third argument
+ is always a name. The other arguments can be:
+
+ - rdataset...
+
+ - ttl, rdata...
+
+ - ttl, rdtype, string..."""
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None)
+ if isinstance(args[0], dns.rdataset.Rdataset):
+ for rds in args:
+ if replace:
+ self.delete(name, rds.rdtype)
+ for rd in rds:
+ self._add_rr(name, rds.ttl, rd, section=section)
+ else:
+ args = list(args)
+ ttl = int(args.pop(0))
+ if isinstance(args[0], dns.rdata.Rdata):
+ if replace:
+ self.delete(name, args[0].rdtype)
+ for rd in args:
+ self._add_rr(name, ttl, rd, section=section)
+ else:
+ rdtype = args.pop(0)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if replace:
+ self.delete(name, rdtype)
+ for s in args:
+ rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s,
+ self.origin)
+ self._add_rr(name, ttl, rd, section=section)
+
+ def add(self, name, *args):
+ """Add records. The first argument is always a name. The other
+ arguments can be:
+
+ - rdataset...
+
+ - ttl, rdata...
+
+ - ttl, rdtype, string..."""
+ self._add(False, self.authority, name, *args)
+
+ def delete(self, name, *args):
+ """Delete records. The first argument is always a name. The other
+ arguments can be:
+
+ - I{nothing}
+
+ - rdataset...
+
+ - rdata...
+
+ - rdtype, [string...]"""
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None)
+ if len(args) == 0:
+ self.find_rrset(self.authority, name, dns.rdataclass.ANY,
+ dns.rdatatype.ANY, dns.rdatatype.NONE,
+ dns.rdatatype.ANY, True, True)
+ elif isinstance(args[0], dns.rdataset.Rdataset):
+ for rds in args:
+ for rd in rds:
+ self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+ else:
+ args = list(args)
+ if isinstance(args[0], dns.rdata.Rdata):
+ for rd in args:
+ self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+ else:
+ rdtype = args.pop(0)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if len(args) == 0:
+ self.find_rrset(self.authority, name,
+ self.zone_rdclass, rdtype,
+ dns.rdatatype.NONE,
+ dns.rdataclass.ANY,
+ True, True)
+ else:
+ for s in args:
+ rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s,
+ self.origin)
+ self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+
+ def replace(self, name, *args):
+ """Replace records. The first argument is always a name. The other
+ arguments can be:
+
+ - rdataset...
+
+ - ttl, rdata...
+
+ - ttl, rdtype, string...
+
+ Note that if you want to replace the entire node, you should do
+ a delete of the name followed by one or more calls to add."""
+
+ self._add(True, self.authority, name, *args)
+
+ def present(self, name, *args):
+ """Require that an owner name (and optionally an rdata type,
+ or specific rdataset) exists as a prerequisite to the
+ execution of the update. The first argument is always a name.
+ The other arguments can be:
+
+ - rdataset...
+
+ - rdata...
+
+ - rdtype, string..."""
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None)
+ if len(args) == 0:
+ self.find_rrset(self.answer, name,
+ dns.rdataclass.ANY, dns.rdatatype.ANY,
+ dns.rdatatype.NONE, None,
+ True, True)
+ elif isinstance(args[0], dns.rdataset.Rdataset) or \
+ isinstance(args[0], dns.rdata.Rdata) or \
+ len(args) > 1:
+ if not isinstance(args[0], dns.rdataset.Rdataset):
+ # Add a 0 TTL
+ args = list(args)
+ args.insert(0, 0)
+ self._add(False, self.answer, name, *args)
+ else:
+ rdtype = args[0]
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ self.find_rrset(self.answer, name,
+ dns.rdataclass.ANY, rdtype,
+ dns.rdatatype.NONE, None,
+ True, True)
+
+ def absent(self, name, rdtype=None):
+ """Require that an owner name (and optionally an rdata type) does
+ not exist as a prerequisite to the execution of the update."""
+
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None)
+ if rdtype is None:
+ self.find_rrset(self.answer, name,
+ dns.rdataclass.NONE, dns.rdatatype.ANY,
+ dns.rdatatype.NONE, None,
+ True, True)
+ else:
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ self.find_rrset(self.answer, name,
+ dns.rdataclass.NONE, rdtype,
+ dns.rdatatype.NONE, None,
+ True, True)
+
+ def to_wire(self, origin=None, max_size=65535):
+ """Return a string containing the update in DNS compressed wire
+ format.
+ @rtype: string"""
+ if origin is None:
+ origin = self.origin
+ return super(Update, self).to_wire(origin, max_size)
diff --git a/lib/dns/version.py b/lib/dns/version.py
new file mode 100644
index 00000000..3d97f696
--- /dev/null
+++ b/lib/dns/version.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""dnspython release version information."""
+
+MAJOR = 1
+MINOR = 14
+MICRO = 0
+RELEASELEVEL = 0x0f
+SERIAL = 0
+
+if RELEASELEVEL == 0x0f:
+ version = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
+elif RELEASELEVEL == 0x00:
+ version = '%d.%d.%dx%d' % \
+ (MAJOR, MINOR, MICRO, SERIAL)
+else:
+ version = '%d.%d.%d%x%d' % \
+ (MAJOR, MINOR, MICRO, RELEASELEVEL, SERIAL)
+
+hexversion = MAJOR << 24 | MINOR << 16 | MICRO << 8 | RELEASELEVEL << 4 | \
+ SERIAL
diff --git a/lib/dns/wiredata.py b/lib/dns/wiredata.py
new file mode 100644
index 00000000..b381f7b9
--- /dev/null
+++ b/lib/dns/wiredata.py
@@ -0,0 +1,84 @@
+# Copyright (C) 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Wire Data Helper"""
+
+
+import dns.exception
+from ._compat import binary_type, string_types
+
+# Figure out what constant python passes for an unspecified slice bound.
+# It's supposed to be sys.maxint, yet on 64-bit windows sys.maxint is 2^31 - 1
+# but Python uses 2^63 - 1 as the constant. Rather than making pointless
+# extra comparisons, duplicating code, or weakening WireData, we just figure
+# out what constant Python will use.
+
+
+class _SliceUnspecifiedBound(str):
+
+ def __getslice__(self, i, j):
+ return j
+
+_unspecified_bound = _SliceUnspecifiedBound('')[1:]
+
+
+class WireData(binary_type):
+ # WireData is a string with stricter slicing
+
+ def __getitem__(self, key):
+ try:
+ if isinstance(key, slice):
+ return WireData(super(WireData, self).__getitem__(key))
+ return bytearray(self.unwrap())[key]
+ except IndexError:
+ raise dns.exception.FormError
+
+ def __getslice__(self, i, j):
+ try:
+ if j == _unspecified_bound:
+ # handle the case where the right bound is unspecified
+ j = len(self)
+ if i < 0 or j < 0:
+ raise dns.exception.FormError
+ # If it's not an empty slice, access left and right bounds
+ # to make sure they're valid
+ if i != j:
+ super(WireData, self).__getitem__(i)
+ super(WireData, self).__getitem__(j - 1)
+ return WireData(super(WireData, self).__getslice__(i, j))
+ except IndexError:
+ raise dns.exception.FormError
+
+ def __iter__(self):
+ i = 0
+ while 1:
+ try:
+ yield self[i]
+ i += 1
+ except dns.exception.FormError:
+ raise StopIteration
+
+ def unwrap(self):
+ return binary_type(self)
+
+
+def maybe_wrap(wire):
+ if isinstance(wire, WireData):
+ return wire
+ elif isinstance(wire, binary_type):
+ return WireData(wire)
+ elif isinstance(wire, string_types):
+ return WireData(wire.encode())
+ raise ValueError("unhandled type %s" % type(wire))
diff --git a/lib/dns/zone.py b/lib/dns/zone.py
new file mode 100644
index 00000000..ae099bd8
--- /dev/null
+++ b/lib/dns/zone.py
@@ -0,0 +1,1064 @@
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Zones."""
+
+from __future__ import generators
+
+import sys
+import re
+from io import BytesIO
+
+import dns.exception
+import dns.name
+import dns.node
+import dns.rdataclass
+import dns.rdatatype
+import dns.rdata
+import dns.rrset
+import dns.tokenizer
+import dns.ttl
+import dns.grange
+from ._compat import string_types, text_type
+
+
+class BadZone(dns.exception.DNSException):
+
+ """The DNS zone is malformed."""
+
+
+class NoSOA(BadZone):
+
+ """The DNS zone has no SOA RR at its origin."""
+
+
+class NoNS(BadZone):
+
+ """The DNS zone has no NS RRset at its origin."""
+
+
+class UnknownOrigin(BadZone):
+
+ """The DNS zone's origin is unknown."""
+
+
+class Zone(object):
+
+ """A DNS zone.
+
+ A Zone is a mapping from names to nodes. The zone object may be
+ treated like a Python dictionary, e.g. zone[name] will retrieve
+ the node associated with that name. The I{name} may be a
+ dns.name.Name object, or it may be a string. In the either case,
+ if the name is relative it is treated as relative to the origin of
+ the zone.
+
+ @ivar rdclass: The zone's rdata class; the default is class IN.
+ @type rdclass: int
+ @ivar origin: The origin of the zone.
+ @type origin: dns.name.Name object
+ @ivar nodes: A dictionary mapping the names of nodes in the zone to the
+ nodes themselves.
+ @type nodes: dict
+ @ivar relativize: should names in the zone be relativized?
+ @type relativize: bool
+ @cvar node_factory: the factory used to create a new node
+ @type node_factory: class or callable
+ """
+
+ node_factory = dns.node.Node
+
+ __slots__ = ['rdclass', 'origin', 'nodes', 'relativize']
+
+ def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True):
+ """Initialize a zone object.
+
+ @param origin: The origin of the zone.
+ @type origin: dns.name.Name object
+ @param rdclass: The zone's rdata class; the default is class IN.
+ @type rdclass: int"""
+
+ if origin is not None:
+ if isinstance(origin, string_types):
+ origin = dns.name.from_text(origin)
+ elif not isinstance(origin, dns.name.Name):
+ raise ValueError("origin parameter must be convertible to a "
+ "DNS name")
+ if not origin.is_absolute():
+ raise ValueError("origin parameter must be an absolute name")
+ self.origin = origin
+ self.rdclass = rdclass
+ self.nodes = {}
+ self.relativize = relativize
+
+ def __eq__(self, other):
+ """Two zones are equal if they have the same origin, class, and
+ nodes.
+ @rtype: bool
+ """
+
+ if not isinstance(other, Zone):
+ return False
+ if self.rdclass != other.rdclass or \
+ self.origin != other.origin or \
+ self.nodes != other.nodes:
+ return False
+ return True
+
+ def __ne__(self, other):
+ """Are two zones not equal?
+ @rtype: bool
+ """
+
+ return not self.__eq__(other)
+
+ def _validate_name(self, name):
+ if isinstance(name, string_types):
+ name = dns.name.from_text(name, None)
+ elif not isinstance(name, dns.name.Name):
+ raise KeyError("name parameter must be convertible to a DNS name")
+ if name.is_absolute():
+ if not name.is_subdomain(self.origin):
+ raise KeyError(
+ "name parameter must be a subdomain of the zone origin")
+ if self.relativize:
+ name = name.relativize(self.origin)
+ return name
+
+ def __getitem__(self, key):
+ key = self._validate_name(key)
+ return self.nodes[key]
+
+ def __setitem__(self, key, value):
+ key = self._validate_name(key)
+ self.nodes[key] = value
+
+ def __delitem__(self, key):
+ key = self._validate_name(key)
+ del self.nodes[key]
+
+ def __iter__(self):
+ return self.nodes.iterkeys()
+
+ def iterkeys(self):
+ return self.nodes.iterkeys()
+
+ def keys(self):
+ return self.nodes.keys()
+
+ def itervalues(self):
+ return self.nodes.itervalues()
+
+ def values(self):
+ return self.nodes.values()
+
+ def items(self):
+ return self.nodes.items()
+
+ iteritems = items
+
+ def get(self, key):
+ key = self._validate_name(key)
+ return self.nodes.get(key)
+
+ def __contains__(self, other):
+ return other in self.nodes
+
+ def find_node(self, name, create=False):
+ """Find a node in the zone, possibly creating it.
+
+ @param name: the name of the node to find
+ @type name: dns.name.Name object or string
+ @param create: should the node be created if it doesn't exist?
+ @type create: bool
+ @raises KeyError: the name is not known and create was not specified.
+ @rtype: dns.node.Node object
+ """
+
+ name = self._validate_name(name)
+ node = self.nodes.get(name)
+ if node is None:
+ if not create:
+ raise KeyError
+ node = self.node_factory()
+ self.nodes[name] = node
+ return node
+
+ def get_node(self, name, create=False):
+ """Get a node in the zone, possibly creating it.
+
+ This method is like L{find_node}, except it returns None instead
+ of raising an exception if the node does not exist and creation
+ has not been requested.
+
+ @param name: the name of the node to find
+ @type name: dns.name.Name object or string
+ @param create: should the node be created if it doesn't exist?
+ @type create: bool
+ @rtype: dns.node.Node object or None
+ """
+
+ try:
+ node = self.find_node(name, create)
+ except KeyError:
+ node = None
+ return node
+
+ def delete_node(self, name):
+ """Delete the specified node if it exists.
+
+ It is not an error if the node does not exist.
+ """
+
+ name = self._validate_name(name)
+ if name in self.nodes:
+ del self.nodes[name]
+
+ def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
+ create=False):
+ """Look for rdata with the specified name and type in the zone,
+ and return an rdataset encapsulating it.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ The rdataset returned is not a copy; changes to it will change
+ the zone.
+
+ KeyError is raised if the name or type are not found.
+ Use L{get_rdataset} if you want to have None returned instead.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ @param create: should the node and rdataset be created if they do not
+ exist?
+ @type create: bool
+ @raises KeyError: the node or rdata could not be found
+ @rtype: dns.rrset.RRset object
+ """
+
+ name = self._validate_name(name)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, string_types):
+ covers = dns.rdatatype.from_text(covers)
+ node = self.find_node(name, create)
+ return node.find_rdataset(self.rdclass, rdtype, covers, create)
+
+ def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
+ create=False):
+ """Look for rdata with the specified name and type in the zone,
+ and return an rdataset encapsulating it.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ The rdataset returned is not a copy; changes to it will change
+ the zone.
+
+ None is returned if the name or type are not found.
+ Use L{find_rdataset} if you want to have KeyError raised instead.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ @param create: should the node and rdataset be created if they do not
+ exist?
+ @type create: bool
+ @rtype: dns.rrset.RRset object
+ """
+
+ try:
+ rdataset = self.find_rdataset(name, rdtype, covers, create)
+ except KeyError:
+ rdataset = None
+ return rdataset
+
+ def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):
+ """Delete the rdataset matching I{rdtype} and I{covers}, if it
+ exists at the node specified by I{name}.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ It is not an error if the node does not exist, or if there is no
+ matching rdataset at the node.
+
+ If the node has no rdatasets after the deletion, it will itself
+ be deleted.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ """
+
+ name = self._validate_name(name)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, string_types):
+ covers = dns.rdatatype.from_text(covers)
+ node = self.get_node(name)
+ if node is not None:
+ node.delete_rdataset(self.rdclass, rdtype, covers)
+ if len(node) == 0:
+ self.delete_node(name)
+
+ def replace_rdataset(self, name, replacement):
+ """Replace an rdataset at name.
+
+ It is not an error if there is no rdataset matching I{replacement}.
+
+ Ownership of the I{replacement} object is transferred to the zone;
+ in other words, this method does not store a copy of I{replacement}
+ at the node, it stores I{replacement} itself.
+
+ If the I{name} node does not exist, it is created.
+
+ @param name: the owner name
+ @type name: DNS.name.Name object or string
+ @param replacement: the replacement rdataset
+ @type replacement: dns.rdataset.Rdataset
+ """
+
+ if replacement.rdclass != self.rdclass:
+ raise ValueError('replacement.rdclass != zone.rdclass')
+ node = self.find_node(name, True)
+ node.replace_rdataset(replacement)
+
+ def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
+ """Look for rdata with the specified name and type in the zone,
+ and return an RRset encapsulating it.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ This method is less efficient than the similar
+ L{find_rdataset} because it creates an RRset instead of
+ returning the matching rdataset. It may be more convenient
+ for some uses since it returns an object which binds the owner
+ name to the rdata.
+
+ This method may not be used to create new nodes or rdatasets;
+ use L{find_rdataset} instead.
+
+ KeyError is raised if the name or type are not found.
+ Use L{get_rrset} if you want to have None returned instead.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ @raises KeyError: the node or rdata could not be found
+ @rtype: dns.rrset.RRset object
+ """
+
+ name = self._validate_name(name)
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, string_types):
+ covers = dns.rdatatype.from_text(covers)
+ rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers)
+ rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers)
+ rrset.update(rdataset)
+ return rrset
+
+ def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
+ """Look for rdata with the specified name and type in the zone,
+ and return an RRset encapsulating it.
+
+ The I{name}, I{rdtype}, and I{covers} parameters may be
+ strings, in which case they will be converted to their proper
+ type.
+
+ This method is less efficient than the similar L{get_rdataset}
+ because it creates an RRset instead of returning the matching
+ rdataset. It may be more convenient for some uses since it
+ returns an object which binds the owner name to the rdata.
+
+ This method may not be used to create new nodes or rdatasets;
+ use L{find_rdataset} instead.
+
+ None is returned if the name or type are not found.
+ Use L{find_rrset} if you want to have KeyError raised instead.
+
+ @param name: the owner name to look for
+ @type name: DNS.name.Name object or string
+ @param rdtype: the rdata type desired
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ @rtype: dns.rrset.RRset object
+ """
+
+ try:
+ rrset = self.find_rrset(name, rdtype, covers)
+ except KeyError:
+ rrset = None
+ return rrset
+
+ def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY,
+ covers=dns.rdatatype.NONE):
+ """Return a generator which yields (name, rdataset) tuples for
+ all rdatasets in the zone which have the specified I{rdtype}
+ and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
+ then all rdatasets will be matched.
+
+ @param rdtype: int or string
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ """
+
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, string_types):
+ covers = dns.rdatatype.from_text(covers)
+ for (name, node) in self.iteritems():
+ for rds in node:
+ if rdtype == dns.rdatatype.ANY or \
+ (rds.rdtype == rdtype and rds.covers == covers):
+ yield (name, rds)
+
+ def iterate_rdatas(self, rdtype=dns.rdatatype.ANY,
+ covers=dns.rdatatype.NONE):
+ """Return a generator which yields (name, ttl, rdata) tuples for
+ all rdatas in the zone which have the specified I{rdtype}
+ and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
+ then all rdatas will be matched.
+
+ @param rdtype: int or string
+ @type rdtype: int or string
+ @param covers: the covered type (defaults to None)
+ @type covers: int or string
+ """
+
+ if isinstance(rdtype, string_types):
+ rdtype = dns.rdatatype.from_text(rdtype)
+ if isinstance(covers, string_types):
+ covers = dns.rdatatype.from_text(covers)
+ for (name, node) in self.iteritems():
+ for rds in node:
+ if rdtype == dns.rdatatype.ANY or \
+ (rds.rdtype == rdtype and rds.covers == covers):
+ for rdata in rds:
+ yield (name, rds.ttl, rdata)
+
+ def to_file(self, f, sorted=True, relativize=True, nl=None):
+ """Write a zone to a file.
+
+ @param f: file or string. If I{f} is a string, it is treated
+ as the name of a file to open.
+ @param sorted: if True, the file will be written with the
+ names sorted in DNSSEC order from least to greatest. Otherwise
+ the names will be written in whatever order they happen to have
+ in the zone's dictionary.
+ @param relativize: if True, domain names in the output will be
+ relativized to the zone's origin (if possible).
+ @type relativize: bool
+ @param nl: The end of line string. If not specified, the
+ output will use the platform's native end-of-line marker (i.e.
+ LF on POSIX, CRLF on Windows, CR on Macintosh).
+ @type nl: string or None
+ """
+
+ str_type = string_types
+
+ if nl is None:
+ opts = 'wb'
+ else:
+ opts = 'wb'
+
+ if isinstance(f, str_type):
+ f = open(f, opts)
+ want_close = True
+ else:
+ want_close = False
+ try:
+ if sorted:
+ names = list(self.keys())
+ names.sort()
+ else:
+ names = self.iterkeys()
+ for n in names:
+ l = self[n].to_text(n, origin=self.origin,
+ relativize=relativize)
+ if isinstance(l, text_type):
+ l = l.encode()
+ if nl is None:
+ f.write(l)
+ f.write('\n')
+ else:
+ f.write(l)
+ f.write(nl)
+ finally:
+ if want_close:
+ f.close()
+
+ def to_text(self, sorted=True, relativize=True, nl=None):
+ """Return a zone's text as though it were written to a file.
+
+ @param sorted: if True, the file will be written with the
+ names sorted in DNSSEC order from least to greatest. Otherwise
+ the names will be written in whatever order they happen to have
+ in the zone's dictionary.
+ @param relativize: if True, domain names in the output will be
+ relativized to the zone's origin (if possible).
+ @type relativize: bool
+ @param nl: The end of line string. If not specified, the
+ output will use the platform's native end-of-line marker (i.e.
+ LF on POSIX, CRLF on Windows, CR on Macintosh).
+ @type nl: string or None
+ """
+ temp_buffer = BytesIO()
+ self.to_file(temp_buffer, sorted, relativize, nl)
+ return_value = temp_buffer.getvalue()
+ temp_buffer.close()
+ return return_value
+
+ def check_origin(self):
+ """Do some simple checking of the zone's origin.
+
+ @raises dns.zone.NoSOA: there is no SOA RR
+ @raises dns.zone.NoNS: there is no NS RRset
+ @raises KeyError: there is no origin node
+ """
+ if self.relativize:
+ name = dns.name.empty
+ else:
+ name = self.origin
+ if self.get_rdataset(name, dns.rdatatype.SOA) is None:
+ raise NoSOA
+ if self.get_rdataset(name, dns.rdatatype.NS) is None:
+ raise NoNS
+
+
+class _MasterReader(object):
+
+ """Read a DNS master file
+
+ @ivar tok: The tokenizer
+ @type tok: dns.tokenizer.Tokenizer object
+ @ivar ttl: The default TTL
+ @type ttl: int
+ @ivar last_name: The last name read
+ @type last_name: dns.name.Name object
+ @ivar current_origin: The current origin
+ @type current_origin: dns.name.Name object
+ @ivar relativize: should names in the zone be relativized?
+ @type relativize: bool
+ @ivar zone: the zone
+ @type zone: dns.zone.Zone object
+ @ivar saved_state: saved reader state (used when processing $INCLUDE)
+ @type saved_state: list of (tokenizer, current_origin, last_name, file)
+ tuples.
+ @ivar current_file: the file object of the $INCLUDed file being parsed
+ (None if no $INCLUDE is active).
+ @ivar allow_include: is $INCLUDE allowed?
+ @type allow_include: bool
+ @ivar check_origin: should sanity checks of the origin node be done?
+ The default is True.
+ @type check_origin: bool
+ """
+
+ def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone,
+ allow_include=False, check_origin=True):
+ if isinstance(origin, string_types):
+ origin = dns.name.from_text(origin)
+ self.tok = tok
+ self.current_origin = origin
+ self.relativize = relativize
+ self.ttl = 0
+ self.last_name = self.current_origin
+ self.zone = zone_factory(origin, rdclass, relativize=relativize)
+ self.saved_state = []
+ self.current_file = None
+ self.allow_include = allow_include
+ self.check_origin = check_origin
+
+ def _eat_line(self):
+ while 1:
+ token = self.tok.get()
+ if token.is_eol_or_eof():
+ break
+
+ def _rr_line(self):
+ """Process one line from a DNS master file."""
+ # Name
+ if self.current_origin is None:
+ raise UnknownOrigin
+ token = self.tok.get(want_leading=True)
+ if not token.is_whitespace():
+ self.last_name = dns.name.from_text(
+ token.value, self.current_origin)
+ else:
+ token = self.tok.get()
+ if token.is_eol_or_eof():
+ # treat leading WS followed by EOL/EOF as if they were EOL/EOF.
+ return
+ self.tok.unget(token)
+ name = self.last_name
+ if not name.is_subdomain(self.zone.origin):
+ self._eat_line()
+ return
+ if self.relativize:
+ name = name.relativize(self.zone.origin)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ # TTL
+ try:
+ ttl = dns.ttl.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.ttl.BadTTL:
+ ttl = self.ttl
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except:
+ rdclass = self.zone.rdclass
+ if rdclass != self.zone.rdclass:
+ raise dns.exception.SyntaxError("RR class is not zone's class")
+ # Type
+ try:
+ rdtype = dns.rdatatype.from_text(token.value)
+ except:
+ raise dns.exception.SyntaxError(
+ "unknown rdatatype '%s'" % token.value)
+ n = self.zone.nodes.get(name)
+ if n is None:
+ n = self.zone.node_factory()
+ self.zone.nodes[name] = n
+ try:
+ rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
+ self.current_origin, False)
+ except dns.exception.SyntaxError:
+ # Catch and reraise.
+ (ty, va) = sys.exc_info()[:2]
+ raise va
+ except:
+ # All exceptions that occur in the processing of rdata
+ # are treated as syntax errors. This is not strictly
+ # correct, but it is correct almost all of the time.
+ # We convert them to syntax errors so that we can emit
+ # helpful filename:line info.
+ (ty, va) = sys.exc_info()[:2]
+ raise dns.exception.SyntaxError(
+ "caught exception %s: %s" % (str(ty), str(va)))
+
+ rd.choose_relativity(self.zone.origin, self.relativize)
+ covers = rd.covers()
+ rds = n.find_rdataset(rdclass, rdtype, covers, True)
+ rds.add(rd, ttl)
+
+ def _parse_modify(self, side):
+ # Here we catch everything in '{' '}' in a group so we can replace it
+ # with ''.
+ is_generate1 = re.compile("^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$")
+ is_generate2 = re.compile("^.*\$({(\+|-?)(\d+)}).*$")
+ is_generate3 = re.compile("^.*\$({(\+|-?)(\d+),(\d+)}).*$")
+ # Sometimes there are modifiers in the hostname. These come after
+ # the dollar sign. They are in the form: ${offset[,width[,base]]}.
+ # Make names
+ g1 = is_generate1.match(side)
+ if g1:
+ mod, sign, offset, width, base = g1.groups()
+ if sign == '':
+ sign = '+'
+ g2 = is_generate2.match(side)
+ if g2:
+ mod, sign, offset = g2.groups()
+ if sign == '':
+ sign = '+'
+ width = 0
+ base = 'd'
+ g3 = is_generate3.match(side)
+ if g3:
+ mod, sign, offset, width = g1.groups()
+ if sign == '':
+ sign = '+'
+ width = g1.groups()[2]
+ base = 'd'
+
+ if not (g1 or g2 or g3):
+ mod = ''
+ sign = '+'
+ offset = 0
+ width = 0
+ base = 'd'
+
+ if base != 'd':
+ raise NotImplemented
+
+ return mod, sign, offset, width, base
+
+ def _generate_line(self):
+ # range lhs [ttl] [class] type rhs [ comment ]
+ """Process one line containing the GENERATE statement from a DNS
+ master file."""
+ if self.current_origin is None:
+ raise UnknownOrigin
+
+ token = self.tok.get()
+ # Range (required)
+ try:
+ start, stop, step = dns.grange.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except:
+ raise dns.exception.SyntaxError
+
+ # lhs (required)
+ try:
+ lhs = token.value
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except:
+ raise dns.exception.SyntaxError
+
+ # TTL
+ try:
+ ttl = dns.ttl.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.ttl.BadTTL:
+ ttl = self.ttl
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except:
+ rdclass = self.zone.rdclass
+ if rdclass != self.zone.rdclass:
+ raise dns.exception.SyntaxError("RR class is not zone's class")
+ # Type
+ try:
+ rdtype = dns.rdatatype.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except:
+ raise dns.exception.SyntaxError("unknown rdatatype '%s'" %
+ token.value)
+
+ # lhs (required)
+ try:
+ rhs = token.value
+ except:
+ raise dns.exception.SyntaxError
+
+ lmod, lsign, loffset, lwidth, lbase = self._parse_modify(lhs)
+ rmod, rsign, roffset, rwidth, rbase = self._parse_modify(rhs)
+ for i in range(start, stop + 1, step):
+ # +1 because bind is inclusive and python is exclusive
+
+ if lsign == u'+':
+ lindex = i + int(loffset)
+ elif lsign == u'-':
+ lindex = i - int(loffset)
+
+ if rsign == u'-':
+ rindex = i - int(roffset)
+ elif rsign == u'+':
+ rindex = i + int(roffset)
+
+ lzfindex = str(lindex).zfill(int(lwidth))
+ rzfindex = str(rindex).zfill(int(rwidth))
+
+ name = lhs.replace(u'$%s' % (lmod), lzfindex)
+ rdata = rhs.replace(u'$%s' % (rmod), rzfindex)
+
+ self.last_name = dns.name.from_text(name, self.current_origin)
+ name = self.last_name
+ if not name.is_subdomain(self.zone.origin):
+ self._eat_line()
+ return
+ if self.relativize:
+ name = name.relativize(self.zone.origin)
+
+ n = self.zone.nodes.get(name)
+ if n is None:
+ n = self.zone.node_factory()
+ self.zone.nodes[name] = n
+ try:
+ rd = dns.rdata.from_text(rdclass, rdtype, rdata,
+ self.current_origin, False)
+ except dns.exception.SyntaxError:
+ # Catch and reraise.
+ (ty, va) = sys.exc_info()[:2]
+ raise va
+ except:
+ # All exceptions that occur in the processing of rdata
+ # are treated as syntax errors. This is not strictly
+ # correct, but it is correct almost all of the time.
+ # We convert them to syntax errors so that we can emit
+ # helpful filename:line info.
+ (ty, va) = sys.exc_info()[:2]
+ raise dns.exception.SyntaxError("caught exception %s: %s" %
+ (str(ty), str(va)))
+
+ rd.choose_relativity(self.zone.origin, self.relativize)
+ covers = rd.covers()
+ rds = n.find_rdataset(rdclass, rdtype, covers, True)
+ rds.add(rd, ttl)
+
+ def read(self):
+ """Read a DNS master file and build a zone object.
+
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ """
+
+ try:
+ while 1:
+ token = self.tok.get(True, True)
+ if token.is_eof():
+ if self.current_file is not None:
+ self.current_file.close()
+ if len(self.saved_state) > 0:
+ (self.tok,
+ self.current_origin,
+ self.last_name,
+ self.current_file,
+ self.ttl) = self.saved_state.pop(-1)
+ continue
+ break
+ elif token.is_eol():
+ continue
+ elif token.is_comment():
+ self.tok.get_eol()
+ continue
+ elif token.value[0] == u'$':
+ c = token.value.upper()
+ if c == u'$TTL':
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError("bad $TTL")
+ self.ttl = dns.ttl.from_text(token.value)
+ self.tok.get_eol()
+ elif c == u'$ORIGIN':
+ self.current_origin = self.tok.get_name()
+ self.tok.get_eol()
+ if self.zone.origin is None:
+ self.zone.origin = self.current_origin
+ elif c == u'$INCLUDE' and self.allow_include:
+ token = self.tok.get()
+ filename = token.value
+ token = self.tok.get()
+ if token.is_identifier():
+ new_origin =\
+ dns.name.from_text(token.value,
+ self.current_origin)
+ self.tok.get_eol()
+ elif not token.is_eol_or_eof():
+ raise dns.exception.SyntaxError(
+ "bad origin in $INCLUDE")
+ else:
+ new_origin = self.current_origin
+ self.saved_state.append((self.tok,
+ self.current_origin,
+ self.last_name,
+ self.current_file,
+ self.ttl))
+ self.current_file = open(filename, 'r')
+ self.tok = dns.tokenizer.Tokenizer(self.current_file,
+ filename)
+ self.current_origin = new_origin
+ elif c == u'$GENERATE':
+ self._generate_line()
+ else:
+ raise dns.exception.SyntaxError(
+ "Unknown master file directive '" + c + "'")
+ continue
+ self.tok.unget(token)
+ self._rr_line()
+ except dns.exception.SyntaxError as detail:
+ (filename, line_number) = self.tok.where()
+ if detail is None:
+ detail = "syntax error"
+ raise dns.exception.SyntaxError(
+ "%s:%d: %s" % (filename, line_number, detail))
+
+ # Now that we're done reading, do some basic checking of the zone.
+ if self.check_origin:
+ self.zone.check_origin()
+
+
+def from_text(text, origin=None, rdclass=dns.rdataclass.IN,
+ relativize=True, zone_factory=Zone, filename=None,
+ allow_include=False, check_origin=True):
+ """Build a zone object from a master file format string.
+
+ @param text: the master file format input
+ @type text: string.
+ @param origin: The origin of the zone; if not specified, the first
+ $ORIGIN statement in the master file will determine the origin of the
+ zone.
+ @type origin: dns.name.Name object or string
+ @param rdclass: The zone's rdata class; the default is class IN.
+ @type rdclass: int
+ @param relativize: should names be relativized? The default is True
+ @type relativize: bool
+ @param zone_factory: The zone factory to use
+ @type zone_factory: function returning a Zone
+ @param filename: The filename to emit when describing where an error
+ occurred; the default is ''.
+ @type filename: string
+ @param allow_include: is $INCLUDE allowed?
+ @type allow_include: bool
+ @param check_origin: should sanity checks of the origin node be done?
+ The default is True.
+ @type check_origin: bool
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ @rtype: dns.zone.Zone object
+ """
+
+ # 'text' can also be a file, but we don't publish that fact
+ # since it's an implementation detail. The official file
+ # interface is from_file().
+
+ if filename is None:
+ filename = ''
+ tok = dns.tokenizer.Tokenizer(text, filename)
+ reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory,
+ allow_include=allow_include,
+ check_origin=check_origin)
+ reader.read()
+ return reader.zone
+
+
+def from_file(f, origin=None, rdclass=dns.rdataclass.IN,
+ relativize=True, zone_factory=Zone, filename=None,
+ allow_include=True, check_origin=True):
+ """Read a master file and build a zone object.
+
+ @param f: file or string. If I{f} is a string, it is treated
+ as the name of a file to open.
+ @param origin: The origin of the zone; if not specified, the first
+ $ORIGIN statement in the master file will determine the origin of the
+ zone.
+ @type origin: dns.name.Name object or string
+ @param rdclass: The zone's rdata class; the default is class IN.
+ @type rdclass: int
+ @param relativize: should names be relativized? The default is True
+ @type relativize: bool
+ @param zone_factory: The zone factory to use
+ @type zone_factory: function returning a Zone
+ @param filename: The filename to emit when describing where an error
+ occurred; the default is '', or the value of I{f} if I{f} is a
+ string.
+ @type filename: string
+ @param allow_include: is $INCLUDE allowed?
+ @type allow_include: bool
+ @param check_origin: should sanity checks of the origin node be done?
+ The default is True.
+ @type check_origin: bool
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ @rtype: dns.zone.Zone object
+ """
+
+ str_type = string_types
+ opts = 'rU'
+
+ if isinstance(f, str_type):
+ if filename is None:
+ filename = f
+ f = open(f, opts)
+ want_close = True
+ else:
+ if filename is None:
+ filename = ''
+ want_close = False
+
+ try:
+ z = from_text(f, origin, rdclass, relativize, zone_factory,
+ filename, allow_include, check_origin)
+ finally:
+ if want_close:
+ f.close()
+ return z
+
+
+def from_xfr(xfr, zone_factory=Zone, relativize=True, check_origin=True):
+ """Convert the output of a zone transfer generator into a zone object.
+
+ @param xfr: The xfr generator
+ @type xfr: generator of dns.message.Message objects
+ @param relativize: should names be relativized? The default is True.
+ It is essential that the relativize setting matches the one specified
+ to dns.query.xfr().
+ @type relativize: bool
+ @param check_origin: should sanity checks of the origin node be done?
+ The default is True.
+ @type check_origin: bool
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ @rtype: dns.zone.Zone object
+ """
+
+ z = None
+ for r in xfr:
+ if z is None:
+ if relativize:
+ origin = r.origin
+ else:
+ origin = r.answer[0].name
+ rdclass = r.answer[0].rdclass
+ z = zone_factory(origin, rdclass, relativize=relativize)
+ for rrset in r.answer:
+ znode = z.nodes.get(rrset.name)
+ if not znode:
+ znode = z.node_factory()
+ z.nodes[rrset.name] = znode
+ zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
+ rrset.covers, True)
+ zrds.update_ttl(rrset.ttl)
+ for rd in rrset:
+ rd.choose_relativity(z.origin, relativize)
+ zrds.add(rd)
+ if check_origin:
+ z.check_origin()
+ return z
diff --git a/lib/ipaddr.py b/lib/ipaddr.py
new file mode 100644
index 00000000..0c851432
--- /dev/null
+++ b/lib/ipaddr.py
@@ -0,0 +1,1865 @@
+#!/usr/bin/python
+#
+# Copyright 2007 Google Inc.
+# Licensed to PSF under a Contributor Agreement.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+
+"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
+
+This library is used to create/poke/manipulate IPv4 and IPv6 addresses
+and networks.
+
+"""
+
+__version__ = '2.1.11'
+
+import struct
+
+IPV4LENGTH = 32
+IPV6LENGTH = 128
+
+
+class AddressValueError(ValueError):
+ """A Value Error related to the address."""
+
+
+class NetmaskValueError(ValueError):
+ """A Value Error related to the netmask."""
+
+
+def IPAddress(address, version=None):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+ version: An Integer, 4 or 6. If set, don't try to automatically
+ determine what the IP address type is. important for things
+ like IPAddress(1), which could be IPv4, '0.0.0.1', or IPv6,
+ '::1'.
+
+ Returns:
+ An IPv4Address or IPv6Address object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address.
+
+ """
+ if version:
+ if version == 4:
+ return IPv4Address(address)
+ elif version == 6:
+ return IPv6Address(address)
+
+ try:
+ return IPv4Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
+ address)
+
+
+def IPNetwork(address, version=None, strict=False):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+ version: An Integer, if set, don't try to automatically
+ determine what the IP address type is. important for things
+ like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
+ '::1/128'.
+
+ Returns:
+ An IPv4Network or IPv6Network object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address. Or if a strict network was requested and a strict
+ network wasn't given.
+
+ """
+ if version:
+ if version == 4:
+ return IPv4Network(address, strict)
+ elif version == 6:
+ return IPv6Network(address, strict)
+
+ try:
+ return IPv4Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
+ address)
+
+
+def v4_int_to_packed(address):
+ """The binary representation of this address.
+
+ Args:
+ address: An integer representation of an IPv4 IP address.
+
+ Returns:
+ The binary representation of this address.
+
+ Raises:
+ ValueError: If the integer is too large to be an IPv4 IP
+ address.
+ """
+ if address > _BaseV4._ALL_ONES:
+ raise ValueError('Address too large for IPv4')
+ return Bytes(struct.pack('!I', address))
+
+
+def v6_int_to_packed(address):
+ """The binary representation of this address.
+
+ Args:
+ address: An integer representation of an IPv6 IP address.
+
+ Returns:
+ The binary representation of this address.
+ """
+ return Bytes(struct.pack('!QQ', address >> 64, address & (2**64 - 1)))
+
+
+def _find_address_range(addresses):
+ """Find a sequence of addresses.
+
+ Args:
+ addresses: a list of IPv4 or IPv6 addresses.
+
+ Returns:
+ A tuple containing the first and last IP addresses in the sequence.
+
+ """
+ first = last = addresses[0]
+ for ip in addresses[1:]:
+ if ip._ip == last._ip + 1:
+ last = ip
+ else:
+ break
+ return (first, last)
+
+def _get_prefix_length(number1, number2, bits):
+ """Get the number of leading bits that are same for two numbers.
+
+ Args:
+ number1: an integer.
+ number2: another integer.
+ bits: the maximum number of bits to compare.
+
+ Returns:
+ The number of leading bits that are the same for two numbers.
+
+ """
+ for i in range(bits):
+ if number1 >> i == number2 >> i:
+ return bits - i
+ return 0
+
+def _count_righthand_zero_bits(number, bits):
+ """Count the number of zero bits on the right hand side.
+
+ Args:
+ number: an integer.
+ bits: maximum number of bits to count.
+
+ Returns:
+ The number of zero bits on the right hand side of the number.
+
+ """
+ if number == 0:
+ return bits
+ for i in range(bits):
+ if (number >> i) % 2:
+ return i
+
+def summarize_address_range(first, last):
+ """Summarize a network range given the first and last IP addresses.
+
+ Example:
+ >>> summarize_address_range(IPv4Address('1.1.1.0'),
+ IPv4Address('1.1.1.130'))
+ [IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
+ IPv4Network('1.1.1.130/32')]
+
+ Args:
+ first: the first IPv4Address or IPv6Address in the range.
+ last: the last IPv4Address or IPv6Address in the range.
+
+ Returns:
+ The address range collapsed to a list of IPv4Network's or
+ IPv6Network's.
+
+ Raise:
+ TypeError:
+ If the first and last objects are not IP addresses.
+ If the first and last objects are not the same version.
+ ValueError:
+ If the last object is not greater than the first.
+ If the version is not 4 or 6.
+
+ """
+ if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
+ raise TypeError('first and last must be IP addresses, not networks')
+ if first.version != last.version:
+ raise TypeError("%s and %s are not of the same version" % (
+ str(first), str(last)))
+ if first > last:
+ raise ValueError('last IP address must be greater than first')
+
+ networks = []
+
+ if first.version == 4:
+ ip = IPv4Network
+ elif first.version == 6:
+ ip = IPv6Network
+ else:
+ raise ValueError('unknown IP version')
+
+ ip_bits = first._max_prefixlen
+ first_int = first._ip
+ last_int = last._ip
+ while first_int <= last_int:
+ nbits = _count_righthand_zero_bits(first_int, ip_bits)
+ current = None
+ while nbits >= 0:
+ addend = 2**nbits - 1
+ current = first_int + addend
+ nbits -= 1
+ if current <= last_int:
+ break
+ prefix = _get_prefix_length(first_int, current, ip_bits)
+ net = ip('%s/%d' % (str(first), prefix))
+ networks.append(net)
+ if current == ip._ALL_ONES:
+ break
+ first_int = current + 1
+ first = IPAddress(first_int, version=first._version)
+ return networks
+
+def _collapse_address_list_recursive(addresses):
+ """Loops through the addresses, collapsing concurrent netblocks.
+
+ Example:
+
+ ip1 = IPv4Network('1.1.0.0/24')
+ ip2 = IPv4Network('1.1.1.0/24')
+ ip3 = IPv4Network('1.1.2.0/24')
+ ip4 = IPv4Network('1.1.3.0/24')
+ ip5 = IPv4Network('1.1.4.0/24')
+ ip6 = IPv4Network('1.1.0.1/22')
+
+ _collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
+ [IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
+
+ This shouldn't be called directly; it is called via
+ collapse_address_list([]).
+
+ Args:
+ addresses: A list of IPv4Network's or IPv6Network's
+
+ Returns:
+ A list of IPv4Network's or IPv6Network's depending on what we were
+ passed.
+
+ """
+ ret_array = []
+ optimized = False
+
+ for cur_addr in addresses:
+ if not ret_array:
+ ret_array.append(cur_addr)
+ continue
+ if cur_addr in ret_array[-1]:
+ optimized = True
+ elif cur_addr == ret_array[-1].supernet().subnet()[1]:
+ ret_array.append(ret_array.pop().supernet())
+ optimized = True
+ else:
+ ret_array.append(cur_addr)
+
+ if optimized:
+ return _collapse_address_list_recursive(ret_array)
+
+ return ret_array
+
+
+def collapse_address_list(addresses):
+ """Collapse a list of IP objects.
+
+ Example:
+ collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
+ [IPv4('1.1.0.0/23')]
+
+ Args:
+ addresses: A list of IPv4Network or IPv6Network objects.
+
+ Returns:
+ A list of IPv4Network or IPv6Network objects depending on what we
+ were passed.
+
+ Raises:
+ TypeError: If passed a list of mixed version objects.
+
+ """
+ i = 0
+ addrs = []
+ ips = []
+ nets = []
+
+ # split IP addresses and networks
+ for ip in addresses:
+ if isinstance(ip, _BaseIP):
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ str(ip), str(ips[-1])))
+ ips.append(ip)
+ elif ip._prefixlen == ip._max_prefixlen:
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ str(ip), str(ips[-1])))
+ ips.append(ip.ip)
+ else:
+ if nets and nets[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ str(ip), str(nets[-1])))
+ nets.append(ip)
+
+ # sort and dedup
+ ips = sorted(set(ips))
+ nets = sorted(set(nets))
+
+ while i < len(ips):
+ (first, last) = _find_address_range(ips[i:])
+ i = ips.index(last) + 1
+ addrs.extend(summarize_address_range(first, last))
+
+ return _collapse_address_list_recursive(sorted(
+ addrs + nets, key=_BaseNet._get_networks_key))
+
+# backwards compatibility
+CollapseAddrList = collapse_address_list
+
+# We need to distinguish between the string and packed-bytes representations
+# of an IP address. For example, b'0::1' is the IPv4 address 48.58.58.49,
+# while '0::1' is an IPv6 address.
+#
+# In Python 3, the native 'bytes' type already provides this functionality,
+# so we use it directly. For earlier implementations where bytes is not a
+# distinct type, we create a subclass of str to serve as a tag.
+#
+# Usage example (Python 2):
+# ip = ipaddr.IPAddress(ipaddr.Bytes('xxxx'))
+#
+# Usage example (Python 3):
+# ip = ipaddr.IPAddress(b'xxxx')
+try:
+ if bytes is str:
+ raise TypeError("bytes is not a distinct type")
+ Bytes = bytes
+except (NameError, TypeError):
+ class Bytes(str):
+ def __repr__(self):
+ return 'Bytes(%s)' % str.__repr__(self)
+
+def get_mixed_type_key(obj):
+ """Return a key suitable for sorting between networks and addresses.
+
+ Address and Network objects are not sortable by default; they're
+ fundamentally different so the expression
+
+ IPv4Address('1.1.1.1') <= IPv4Network('1.1.1.1/24')
+
+ doesn't make any sense. There are some times however, where you may wish
+ to have ipaddr sort these for you anyway. If you need to do this, you
+ can use this function as the key= argument to sorted().
+
+ Args:
+ obj: either a Network or Address object.
+ Returns:
+ appropriate key.
+
+ """
+ if isinstance(obj, _BaseNet):
+ return obj._get_networks_key()
+ elif isinstance(obj, _BaseIP):
+ return obj._get_address_key()
+ return NotImplemented
+
+class _IPAddrBase(object):
+
+ """The mother class."""
+
+ def __index__(self):
+ return self._ip
+
+ def __int__(self):
+ return self._ip
+
+ def __hex__(self):
+ return hex(self._ip)
+
+ @property
+ def exploded(self):
+ """Return the longhand version of the IP address as a string."""
+ return self._explode_shorthand_ip_string()
+
+ @property
+ def compressed(self):
+ """Return the shorthand version of the IP address as a string."""
+ return str(self)
+
+
+class _BaseIP(_IPAddrBase):
+
+ """A generic IP object.
+
+ This IP class contains the version independent methods which are
+ used by single IP addresses.
+
+ """
+
+ def __eq__(self, other):
+ try:
+ return (self._ip == other._ip
+ and self._version == other._version)
+ except AttributeError:
+ return NotImplemented
+
+ def __ne__(self, other):
+ eq = self.__eq__(other)
+ if eq is NotImplemented:
+ return NotImplemented
+ return not eq
+
+ def __le__(self, other):
+ gt = self.__gt__(other)
+ if gt is NotImplemented:
+ return NotImplemented
+ return not gt
+
+ def __ge__(self, other):
+ lt = self.__lt__(other)
+ if lt is NotImplemented:
+ return NotImplemented
+ return not lt
+
+ def __lt__(self, other):
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same version' % (
+ str(self), str(other)))
+ if not isinstance(other, _BaseIP):
+ raise TypeError('%s and %s are not of the same type' % (
+ str(self), str(other)))
+ if self._ip != other._ip:
+ return self._ip < other._ip
+ return False
+
+ def __gt__(self, other):
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same version' % (
+ str(self), str(other)))
+ if not isinstance(other, _BaseIP):
+ raise TypeError('%s and %s are not of the same type' % (
+ str(self), str(other)))
+ if self._ip != other._ip:
+ return self._ip > other._ip
+ return False
+
+ # Shorthand for Integer addition and subtraction. This is not
+ # meant to ever support addition/subtraction of addresses.
+ def __add__(self, other):
+ if not isinstance(other, int):
+ return NotImplemented
+ return IPAddress(int(self) + other, version=self._version)
+
+ def __sub__(self, other):
+ if not isinstance(other, int):
+ return NotImplemented
+ return IPAddress(int(self) - other, version=self._version)
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, str(self))
+
+ def __str__(self):
+ return '%s' % self._string_from_ip_int(self._ip)
+
+ def __hash__(self):
+ return hash(hex(long(self._ip)))
+
+ def _get_address_key(self):
+ return (self._version, self)
+
+ @property
+ def version(self):
+ raise NotImplementedError('BaseIP has no version')
+
+
+class _BaseNet(_IPAddrBase):
+
+ """A generic IP object.
+
+ This IP class contains the version independent methods which are
+ used by networks.
+
+ """
+
+ def __init__(self, address):
+ self._cache = {}
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, str(self))
+
+ def iterhosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the network
+ or broadcast addresses.
+
+ """
+ cur = int(self.network) + 1
+ bcast = int(self.broadcast) - 1
+ while cur <= bcast:
+ cur += 1
+ yield IPAddress(cur - 1, version=self._version)
+
+ def __iter__(self):
+ cur = int(self.network)
+ bcast = int(self.broadcast)
+ while cur <= bcast:
+ cur += 1
+ yield IPAddress(cur - 1, version=self._version)
+
+ def __getitem__(self, n):
+ network = int(self.network)
+ broadcast = int(self.broadcast)
+ if n >= 0:
+ if network + n > broadcast:
+ raise IndexError
+ return IPAddress(network + n, version=self._version)
+ else:
+ n += 1
+ if broadcast + n < network:
+ raise IndexError
+ return IPAddress(broadcast + n, version=self._version)
+
+ def __lt__(self, other):
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same version' % (
+ str(self), str(other)))
+ if not isinstance(other, _BaseNet):
+ raise TypeError('%s and %s are not of the same type' % (
+ str(self), str(other)))
+ if self.network != other.network:
+ return self.network < other.network
+ if self.netmask != other.netmask:
+ return self.netmask < other.netmask
+ return False
+
+ def __gt__(self, other):
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same version' % (
+ str(self), str(other)))
+ if not isinstance(other, _BaseNet):
+ raise TypeError('%s and %s are not of the same type' % (
+ str(self), str(other)))
+ if self.network != other.network:
+ return self.network > other.network
+ if self.netmask != other.netmask:
+ return self.netmask > other.netmask
+ return False
+
+ def __le__(self, other):
+ gt = self.__gt__(other)
+ if gt is NotImplemented:
+ return NotImplemented
+ return not gt
+
+ def __ge__(self, other):
+ lt = self.__lt__(other)
+ if lt is NotImplemented:
+ return NotImplemented
+ return not lt
+
+ def __eq__(self, other):
+ try:
+ return (self._version == other._version
+ and self.network == other.network
+ and int(self.netmask) == int(other.netmask))
+ except AttributeError:
+ if isinstance(other, _BaseIP):
+ return (self._version == other._version
+ and self._ip == other._ip)
+
+ def __ne__(self, other):
+ eq = self.__eq__(other)
+ if eq is NotImplemented:
+ return NotImplemented
+ return not eq
+
+ def __str__(self):
+ return '%s/%s' % (str(self.ip),
+ str(self._prefixlen))
+
+ def __hash__(self):
+ return hash(int(self.network) ^ int(self.netmask))
+
+ def __contains__(self, other):
+ # always false if one is v4 and the other is v6.
+ if self._version != other._version:
+ return False
+ # dealing with another network.
+ if isinstance(other, _BaseNet):
+ return (self.network <= other.network and
+ self.broadcast >= other.broadcast)
+ # dealing with another address
+ else:
+ return (int(self.network) <= int(other._ip) <=
+ int(self.broadcast))
+
+ def overlaps(self, other):
+ """Tell if self is partly contained in other."""
+ return self.network in other or self.broadcast in other or (
+ other.network in self or other.broadcast in self)
+
+ @property
+ def network(self):
+ x = self._cache.get('network')
+ if x is None:
+ x = IPAddress(self._ip & int(self.netmask), version=self._version)
+ self._cache['network'] = x
+ return x
+
+ @property
+ def broadcast(self):
+ x = self._cache.get('broadcast')
+ if x is None:
+ x = IPAddress(self._ip | int(self.hostmask), version=self._version)
+ self._cache['broadcast'] = x
+ return x
+
+ @property
+ def hostmask(self):
+ x = self._cache.get('hostmask')
+ if x is None:
+ x = IPAddress(int(self.netmask) ^ self._ALL_ONES,
+ version=self._version)
+ self._cache['hostmask'] = x
+ return x
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%d' % (str(self.ip), self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (str(self.ip), str(self.netmask))
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (str(self.ip), str(self.hostmask))
+
+ @property
+ def numhosts(self):
+ """Number of hosts in the current subnet."""
+ return int(self.broadcast) - int(self.network) + 1
+
+ @property
+ def version(self):
+ raise NotImplementedError('BaseNet has no version')
+
+ @property
+ def prefixlen(self):
+ return self._prefixlen
+
+ def address_exclude(self, other):
+ """Remove an address from a larger block.
+
+ For example:
+
+ addr1 = IPNetwork('10.1.1.0/24')
+ addr2 = IPNetwork('10.1.1.0/26')
+ addr1.address_exclude(addr2) =
+ [IPNetwork('10.1.1.64/26'), IPNetwork('10.1.1.128/25')]
+
+ or IPv6:
+
+ addr1 = IPNetwork('::1/32')
+ addr2 = IPNetwork('::1/128')
+ addr1.address_exclude(addr2) = [IPNetwork('::0/128'),
+ IPNetwork('::2/127'),
+ IPNetwork('::4/126'),
+ IPNetwork('::8/125'),
+ ...
+ IPNetwork('0:0:8000::/33')]
+
+ Args:
+ other: An IPvXNetwork object of the same type.
+
+ Returns:
+ A sorted list of IPvXNetwork objects addresses which is self
+ minus other.
+
+ Raises:
+ TypeError: If self and other are of difffering address
+ versions, or if other is not a network object.
+ ValueError: If other is not completely contained by self.
+
+ """
+ if not self._version == other._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ str(self), str(other)))
+
+ if not isinstance(other, _BaseNet):
+ raise TypeError("%s is not a network object" % str(other))
+
+ if other not in self:
+ raise ValueError('%s not contained in %s' % (str(other),
+ str(self)))
+ if other == self:
+ return []
+
+ ret_addrs = []
+
+ # Make sure we're comparing the network of other.
+ other = IPNetwork('%s/%s' % (str(other.network), str(other.prefixlen)),
+ version=other._version)
+
+ s1, s2 = self.subnet()
+ while s1 != other and s2 != other:
+ if other in s1:
+ ret_addrs.append(s2)
+ s1, s2 = s1.subnet()
+ elif other in s2:
+ ret_addrs.append(s1)
+ s1, s2 = s2.subnet()
+ else:
+ # If we got here, there's a bug somewhere.
+ assert True == False, ('Error performing exclusion: '
+ 's1: %s s2: %s other: %s' %
+ (str(s1), str(s2), str(other)))
+ if s1 == other:
+ ret_addrs.append(s2)
+ elif s2 == other:
+ ret_addrs.append(s1)
+ else:
+ # If we got here, there's a bug somewhere.
+ assert True == False, ('Error performing exclusion: '
+ 's1: %s s2: %s other: %s' %
+ (str(s1), str(s2), str(other)))
+
+ return sorted(ret_addrs, key=_BaseNet._get_networks_key)
+
+ def compare_networks(self, other):
+ """Compare two IP objects.
+
+ This is only concerned about the comparison of the integer
+ representation of the network addresses. This means that the
+ host bits aren't considered at all in this method. If you want
+ to compare host bits, you can easily enough do a
+ 'HostA._ip < HostB._ip'
+
+ Args:
+ other: An IP object.
+
+ Returns:
+ If the IP versions of self and other are the same, returns:
+
+ -1 if self < other:
+ eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24')
+ IPv6('1080::200C:417A') < IPv6('1080::200B:417B')
+ 0 if self == other
+ eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24')
+ IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96')
+ 1 if self > other
+ eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24')
+ IPv6('1080::1:200C:417A/112') >
+ IPv6('1080::0:200C:417A/112')
+
+ If the IP versions of self and other are different, returns:
+
+ -1 if self._version < other._version
+ eg: IPv4('10.0.0.1/24') < IPv6('::1/128')
+ 1 if self._version > other._version
+ eg: IPv6('::1/128') > IPv4('255.255.255.0/24')
+
+ """
+ if self._version < other._version:
+ return -1
+ if self._version > other._version:
+ return 1
+ # self._version == other._version below here:
+ if self.network < other.network:
+ return -1
+ if self.network > other.network:
+ return 1
+ # self.network == other.network below here:
+ if self.netmask < other.netmask:
+ return -1
+ if self.netmask > other.netmask:
+ return 1
+ # self.network == other.network and self.netmask == other.netmask
+ return 0
+
+ def _get_networks_key(self):
+ """Network-only key function.
+
+ Returns an object that identifies this address' network and
+ netmask. This function is a suitable "key" argument for sorted()
+ and list.sort().
+
+ """
+ return (self._version, self.network, self.netmask)
+
+ def _ip_int_from_prefix(self, prefixlen):
+ """Turn the prefix length into a bitwise netmask.
+
+ Args:
+ prefixlen: An integer, the prefix length.
+
+ Returns:
+ An integer.
+
+ """
+ return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
+
+ def _prefix_from_ip_int(self, ip_int):
+ """Return prefix length from a bitwise netmask.
+
+ Args:
+ ip_int: An integer, the netmask in expanded bitwise format.
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask.
+
+ """
+ prefixlen = self._max_prefixlen
+ while prefixlen:
+ if ip_int & 1:
+ break
+ ip_int >>= 1
+ prefixlen -= 1
+
+ if ip_int == (1 << prefixlen) - 1:
+ return prefixlen
+ else:
+ raise NetmaskValueError('Bit pattern does not match /1*0*/')
+
+ def _prefix_from_prefix_string(self, prefixlen_str):
+ """Turn a prefix length string into an integer.
+
+ Args:
+ prefixlen_str: A decimal string containing the prefix length.
+
+ Returns:
+ The prefix length as an integer.
+
+ Raises:
+ NetmaskValueError: If the input is malformed or out of range.
+
+ """
+ try:
+ if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
+ raise ValueError
+ prefixlen = int(prefixlen_str)
+ if not (0 <= prefixlen <= self._max_prefixlen):
+ raise ValueError
+ except ValueError:
+ raise NetmaskValueError('%s is not a valid prefix length' %
+ prefixlen_str)
+ return prefixlen
+
+ def _prefix_from_ip_string(self, ip_str):
+ """Turn a netmask/hostmask string into a prefix length.
+
+ Args:
+ ip_str: A netmask or hostmask, formatted as an IP address.
+
+ Returns:
+ The prefix length as an integer.
+
+ Raises:
+ NetmaskValueError: If the input is not a netmask or hostmask.
+
+ """
+ # Parse the netmask/hostmask like an IP address.
+ try:
+ ip_int = self._ip_int_from_string(ip_str)
+ except AddressValueError:
+ raise NetmaskValueError('%s is not a valid netmask' % ip_str)
+
+ # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
+ # Note that the two ambiguous cases (all-ones and all-zeroes) are
+ # treated as netmasks.
+ try:
+ return self._prefix_from_ip_int(ip_int)
+ except NetmaskValueError:
+ pass
+
+ # Invert the bits, and try matching a /0+1+/ hostmask instead.
+ ip_int ^= self._ALL_ONES
+ try:
+ return self._prefix_from_ip_int(ip_int)
+ except NetmaskValueError:
+ raise NetmaskValueError('%s is not a valid netmask' % ip_str)
+
+ def iter_subnets(self, prefixlen_diff=1, new_prefix=None):
+ """The subnets which join to make the current subnet.
+
+ In the case that self contains only one IP
+ (self._prefixlen == 32 for IPv4 or self._prefixlen == 128
+ for IPv6), return a list with just ourself.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length
+ should be increased by. This should not be set if
+ new_prefix is also set.
+ new_prefix: The desired new prefix length. This must be a
+ larger number (smaller prefix) than the existing prefix.
+ This should not be set if prefixlen_diff is also set.
+
+ Returns:
+ An iterator of IPv(4|6) objects.
+
+ Raises:
+ ValueError: The prefixlen_diff is too small or too large.
+ OR
+ prefixlen_diff and new_prefix are both set or new_prefix
+ is a smaller number than the current prefix (smaller
+ number means a larger network)
+
+ """
+ if self._prefixlen == self._max_prefixlen:
+ yield self
+ return
+
+ if new_prefix is not None:
+ if new_prefix < self._prefixlen:
+ raise ValueError('new prefix must be longer')
+ if prefixlen_diff != 1:
+ raise ValueError('cannot set prefixlen_diff and new_prefix')
+ prefixlen_diff = new_prefix - self._prefixlen
+
+ if prefixlen_diff < 0:
+ raise ValueError('prefix length diff must be > 0')
+ new_prefixlen = self._prefixlen + prefixlen_diff
+
+ if new_prefixlen > self._max_prefixlen:
+ raise ValueError(
+ 'prefix length diff %d is invalid for netblock %s' % (
+ new_prefixlen, str(self)))
+
+ first = IPNetwork('%s/%s' % (str(self.network),
+ str(self._prefixlen + prefixlen_diff)),
+ version=self._version)
+
+ yield first
+ current = first
+ while True:
+ broadcast = current.broadcast
+ if broadcast == self.broadcast:
+ return
+ new_addr = IPAddress(int(broadcast) + 1, version=self._version)
+ current = IPNetwork('%s/%s' % (str(new_addr), str(new_prefixlen)),
+ version=self._version)
+
+ yield current
+
+ def masked(self):
+ """Return the network object with the host bits masked out."""
+ return IPNetwork('%s/%d' % (self.network, self._prefixlen),
+ version=self._version)
+
+ def subnet(self, prefixlen_diff=1, new_prefix=None):
+ """Return a list of subnets, rather than an iterator."""
+ return list(self.iter_subnets(prefixlen_diff, new_prefix))
+
+ def supernet(self, prefixlen_diff=1, new_prefix=None):
+ """The supernet containing the current network.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length of
+ the network should be decreased by. For example, given a
+ /24 network and a prefixlen_diff of 3, a supernet with a
+ /21 netmask is returned.
+
+ Returns:
+ An IPv4 network object.
+
+ Raises:
+ ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a
+ negative prefix length.
+ OR
+ If prefixlen_diff and new_prefix are both set or new_prefix is a
+ larger number than the current prefix (larger number means a
+ smaller network)
+
+ """
+ if self._prefixlen == 0:
+ return self
+
+ if new_prefix is not None:
+ if new_prefix > self._prefixlen:
+ raise ValueError('new prefix must be shorter')
+ if prefixlen_diff != 1:
+ raise ValueError('cannot set prefixlen_diff and new_prefix')
+ prefixlen_diff = self._prefixlen - new_prefix
+
+
+ if self.prefixlen - prefixlen_diff < 0:
+ raise ValueError(
+ 'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
+ (self.prefixlen, prefixlen_diff))
+ return IPNetwork('%s/%s' % (str(self.network),
+ str(self.prefixlen - prefixlen_diff)),
+ version=self._version)
+
+ # backwards compatibility
+ Subnet = subnet
+ Supernet = supernet
+ AddressExclude = address_exclude
+ CompareNetworks = compare_networks
+ Contains = __contains__
+
+
+class _BaseV4(object):
+
+ """Base IPv4 object.
+
+ The following methods are used by IPv4 objects in both single IP
+ addresses and networks.
+
+ """
+
+ # Equivalent to 255.255.255.255 or 32 bits of 1's.
+ _ALL_ONES = (2**IPV4LENGTH) - 1
+ _DECIMAL_DIGITS = frozenset('0123456789')
+
+ def __init__(self, address):
+ self._version = 4
+ self._max_prefixlen = IPV4LENGTH
+
+ def _explode_shorthand_ip_string(self):
+ return str(self)
+
+ def _ip_int_from_string(self, ip_str):
+ """Turn the given IP string into an integer for comparison.
+
+ Args:
+ ip_str: A string, the IP ip_str.
+
+ Returns:
+ The IP ip_str as an integer.
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv4 Address.
+
+ """
+ octets = ip_str.split('.')
+ if len(octets) != 4:
+ raise AddressValueError(ip_str)
+
+ packed_ip = 0
+ for oc in octets:
+ try:
+ packed_ip = (packed_ip << 8) | self._parse_octet(oc)
+ except ValueError:
+ raise AddressValueError(ip_str)
+ return packed_ip
+
+ def _parse_octet(self, octet_str):
+ """Convert a decimal octet into an integer.
+
+ Args:
+ octet_str: A string, the number to parse.
+
+ Returns:
+ The octet as an integer.
+
+ Raises:
+ ValueError: if the octet isn't strictly a decimal from [0..255].
+
+ """
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not self._DECIMAL_DIGITS.issuperset(octet_str):
+ raise ValueError
+ octet_int = int(octet_str, 10)
+ # Disallow leading zeroes, because no clear standard exists on
+ # whether these should be interpreted as decimal or octal.
+ if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1):
+ raise ValueError
+ return octet_int
+
+ def _string_from_ip_int(self, ip_int):
+ """Turns a 32-bit integer into dotted decimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ The IP address as a string in dotted decimal notation.
+
+ """
+ octets = []
+ for _ in xrange(4):
+ octets.insert(0, str(ip_int & 0xFF))
+ ip_int >>= 8
+ return '.'.join(octets)
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v4_int_to_packed(self._ip)
+
+ @property
+ def version(self):
+ return self._version
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within the
+ reserved IPv4 Network range.
+
+ """
+ return self in IPv4Network('240.0.0.0/4')
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 1918.
+
+ """
+ return (self in IPv4Network('10.0.0.0/8') or
+ self in IPv4Network('172.16.0.0/12') or
+ self in IPv4Network('192.168.0.0/16'))
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is multicast.
+ See RFC 3171 for details.
+
+ """
+ return self in IPv4Network('224.0.0.0/4')
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 5735 3.
+
+ """
+ return self in IPv4Network('0.0.0.0')
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback per RFC 3330.
+
+ """
+ return self in IPv4Network('127.0.0.0/8')
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is link-local per RFC 3927.
+
+ """
+ return self in IPv4Network('169.254.0.0/16')
+
+
+class IPv4Address(_BaseV4, _BaseIP):
+
+ """Represent and manipulate single IPv4 Addresses."""
+
+ def __init__(self, address):
+
+ """
+ Args:
+ address: A string or integer representing the IP
+ '192.168.1.1'
+
+ Additionally, an integer can be passed, so
+ IPv4Address('192.168.1.1') == IPv4Address(3232235777).
+ or, more generally
+ IPv4Address(int(IPv4Address('192.168.1.1'))) ==
+ IPv4Address('192.168.1.1')
+
+ Raises:
+ AddressValueError: If ipaddr isn't a valid IPv4 address.
+
+ """
+ _BaseV4.__init__(self, address)
+
+ # Efficient constructor from integer.
+ if isinstance(address, (int, long)):
+ self._ip = address
+ if address < 0 or address > self._ALL_ONES:
+ raise AddressValueError(address)
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, Bytes):
+ try:
+ self._ip, = struct.unpack('!I', address)
+ except struct.error:
+ raise AddressValueError(address) # Wrong length.
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = str(address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+
+class IPv4Network(_BaseV4, _BaseNet):
+
+ """This class represents and manipulates 32-bit IPv4 networks.
+
+ Attributes: [examples for IPv4Network('1.2.3.4/27')]
+ ._ip: 16909060
+ .ip: IPv4Address('1.2.3.4')
+ .network: IPv4Address('1.2.3.0')
+ .hostmask: IPv4Address('0.0.0.31')
+ .broadcast: IPv4Address('1.2.3.31')
+ .netmask: IPv4Address('255.255.255.224')
+ .prefixlen: 27
+
+ """
+
+ def __init__(self, address, strict=False):
+ """Instantiate a new IPv4 network object.
+
+ Args:
+ address: A string or integer representing the IP [& network].
+ '192.168.1.1/24'
+ '192.168.1.1/255.255.255.0'
+ '192.168.1.1/0.0.0.255'
+ are all functionally the same in IPv4. Similarly,
+ '192.168.1.1'
+ '192.168.1.1/255.255.255.255'
+ '192.168.1.1/32'
+ are also functionaly equivalent. That is to say, failing to
+ provide a subnetmask will create an object with a mask of /32.
+
+ If the mask (portion after the / in the argument) is given in
+ dotted quad form, it is treated as a netmask if it starts with a
+ non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
+ starts with a zero field (e.g. 0.255.255.255 == /8), with the
+ single exception of an all-zero mask which is treated as a
+ netmask == /0. If no mask is given, a default of /32 is used.
+
+ Additionally, an integer can be passed, so
+ IPv4Network('192.168.1.1') == IPv4Network(3232235777).
+ or, more generally
+ IPv4Network(int(IPv4Network('192.168.1.1'))) ==
+ IPv4Network('192.168.1.1')
+
+ strict: A boolean. If true, ensure that we have been passed
+ A true network address, eg, 192.168.1.0/24 and not an
+ IP address on a network, eg, 192.168.1.1/24.
+
+ Raises:
+ AddressValueError: If ipaddr isn't a valid IPv4 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv4 address.
+ ValueError: If strict was True and a network address was not
+ supplied.
+
+ """
+ _BaseNet.__init__(self, address)
+ _BaseV4.__init__(self, address)
+
+ # Constructing from an integer or packed bytes.
+ if isinstance(address, (int, long, Bytes)):
+ self.ip = IPv4Address(address)
+ self._ip = self.ip._ip
+ self._prefixlen = self._max_prefixlen
+ self.netmask = IPv4Address(self._ALL_ONES)
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = str(address).split('/')
+
+ if len(addr) > 2:
+ raise AddressValueError(address)
+
+ self._ip = self._ip_int_from_string(addr[0])
+ self.ip = IPv4Address(self._ip)
+
+ if len(addr) == 2:
+ try:
+ # Check for a netmask in prefix length form.
+ self._prefixlen = self._prefix_from_prefix_string(addr[1])
+ except NetmaskValueError:
+ # Check for a netmask or hostmask in dotted-quad form.
+ # This may raise NetmaskValueError.
+ self._prefixlen = self._prefix_from_ip_string(addr[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+
+ self.netmask = IPv4Address(self._ip_int_from_prefix(self._prefixlen))
+
+ if strict:
+ if self.ip != self.network:
+ raise ValueError('%s has host bits set' %
+ self.ip)
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.iterhosts = self.__iter__
+
+ # backwards compatibility
+ IsRFC1918 = lambda self: self.is_private
+ IsMulticast = lambda self: self.is_multicast
+ IsLoopback = lambda self: self.is_loopback
+ IsLinkLocal = lambda self: self.is_link_local
+
+
+class _BaseV6(object):
+
+ """Base IPv6 object.
+
+ The following methods are used by IPv6 objects in both single IP
+ addresses and networks.
+
+ """
+
+ _ALL_ONES = (2**IPV6LENGTH) - 1
+ _HEXTET_COUNT = 8
+ _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
+
+ def __init__(self, address):
+ self._version = 6
+ self._max_prefixlen = IPV6LENGTH
+
+ def _ip_int_from_string(self, ip_str):
+ """Turn an IPv6 ip_str into an integer.
+
+ Args:
+ ip_str: A string, the IPv6 ip_str.
+
+ Returns:
+ A long, the IPv6 ip_str.
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv6 Address.
+
+ """
+ parts = ip_str.split(':')
+
+ # An IPv6 address needs at least 2 colons (3 parts).
+ if len(parts) < 3:
+ raise AddressValueError(ip_str)
+
+ # If the address has an IPv4-style suffix, convert it to hexadecimal.
+ if '.' in parts[-1]:
+ ipv4_int = IPv4Address(parts.pop())._ip
+ parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
+ parts.append('%x' % (ipv4_int & 0xFFFF))
+
+ # An IPv6 address can't have more than 8 colons (9 parts).
+ if len(parts) > self._HEXTET_COUNT + 1:
+ raise AddressValueError(ip_str)
+
+ # Disregarding the endpoints, find '::' with nothing in between.
+ # This indicates that a run of zeroes has been skipped.
+ try:
+ skip_index, = (
+ [i for i in xrange(1, len(parts) - 1) if not parts[i]] or
+ [None])
+ except ValueError:
+ # Can't have more than one '::'
+ raise AddressValueError(ip_str)
+
+ # parts_hi is the number of parts to copy from above/before the '::'
+ # parts_lo is the number of parts to copy from below/after the '::'
+ if skip_index is not None:
+ # If we found a '::', then check if it also covers the endpoints.
+ parts_hi = skip_index
+ parts_lo = len(parts) - skip_index - 1
+ if not parts[0]:
+ parts_hi -= 1
+ if parts_hi:
+ raise AddressValueError(ip_str) # ^: requires ^::
+ if not parts[-1]:
+ parts_lo -= 1
+ if parts_lo:
+ raise AddressValueError(ip_str) # :$ requires ::$
+ parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo)
+ if parts_skipped < 1:
+ raise AddressValueError(ip_str)
+ else:
+ # Otherwise, allocate the entire address to parts_hi. The endpoints
+ # could still be empty, but _parse_hextet() will check for that.
+ if len(parts) != self._HEXTET_COUNT:
+ raise AddressValueError(ip_str)
+ parts_hi = len(parts)
+ parts_lo = 0
+ parts_skipped = 0
+
+ try:
+ # Now, parse the hextets into a 128-bit integer.
+ ip_int = 0L
+ for i in xrange(parts_hi):
+ ip_int <<= 16
+ ip_int |= self._parse_hextet(parts[i])
+ ip_int <<= 16 * parts_skipped
+ for i in xrange(-parts_lo, 0):
+ ip_int <<= 16
+ ip_int |= self._parse_hextet(parts[i])
+ return ip_int
+ except ValueError:
+ raise AddressValueError(ip_str)
+
+ def _parse_hextet(self, hextet_str):
+ """Convert an IPv6 hextet string into an integer.
+
+ Args:
+ hextet_str: A string, the number to parse.
+
+ Returns:
+ The hextet as an integer.
+
+ Raises:
+ ValueError: if the input isn't strictly a hex number from [0..FFFF].
+
+ """
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not self._HEX_DIGITS.issuperset(hextet_str):
+ raise ValueError
+ if len(hextet_str) > 4:
+ raise ValueError
+ hextet_int = int(hextet_str, 16)
+ if hextet_int > 0xFFFF:
+ raise ValueError
+ return hextet_int
+
+ def _compress_hextets(self, hextets):
+ """Compresses a list of hextets.
+
+ Compresses a list of strings, replacing the longest continuous
+ sequence of "0" in the list with "" and adding empty strings at
+ the beginning or at the end of the string such that subsequently
+ calling ":".join(hextets) will produce the compressed version of
+ the IPv6 address.
+
+ Args:
+ hextets: A list of strings, the hextets to compress.
+
+ Returns:
+ A list of strings.
+
+ """
+ best_doublecolon_start = -1
+ best_doublecolon_len = 0
+ doublecolon_start = -1
+ doublecolon_len = 0
+ for index in range(len(hextets)):
+ if hextets[index] == '0':
+ doublecolon_len += 1
+ if doublecolon_start == -1:
+ # Start of a sequence of zeros.
+ doublecolon_start = index
+ if doublecolon_len > best_doublecolon_len:
+ # This is the longest sequence of zeros so far.
+ best_doublecolon_len = doublecolon_len
+ best_doublecolon_start = doublecolon_start
+ else:
+ doublecolon_len = 0
+ doublecolon_start = -1
+
+ if best_doublecolon_len > 1:
+ best_doublecolon_end = (best_doublecolon_start +
+ best_doublecolon_len)
+ # For zeros at the end of the address.
+ if best_doublecolon_end == len(hextets):
+ hextets += ['']
+ hextets[best_doublecolon_start:best_doublecolon_end] = ['']
+ # For zeros at the beginning of the address.
+ if best_doublecolon_start == 0:
+ hextets = [''] + hextets
+
+ return hextets
+
+ def _string_from_ip_int(self, ip_int=None):
+ """Turns a 128-bit integer into hexadecimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ A string, the hexadecimal representation of the address.
+
+ Raises:
+ ValueError: The address is bigger than 128 bits of all ones.
+
+ """
+ if not ip_int and ip_int != 0:
+ ip_int = int(self._ip)
+
+ if ip_int > self._ALL_ONES:
+ raise ValueError('IPv6 address is too large')
+
+ hex_str = '%032x' % ip_int
+ hextets = []
+ for x in range(0, 32, 4):
+ hextets.append('%x' % int(hex_str[x:x+4], 16))
+
+ hextets = self._compress_hextets(hextets)
+ return ':'.join(hextets)
+
+ def _explode_shorthand_ip_string(self):
+ """Expand a shortened IPv6 address.
+
+ Args:
+ ip_str: A string, the IPv6 address.
+
+ Returns:
+ A string, the expanded IPv6 address.
+
+ """
+ if isinstance(self, _BaseNet):
+ ip_str = str(self.ip)
+ else:
+ ip_str = str(self)
+
+ ip_int = self._ip_int_from_string(ip_str)
+ parts = []
+ for i in xrange(self._HEXTET_COUNT):
+ parts.append('%04x' % (ip_int & 0xFFFF))
+ ip_int >>= 16
+ parts.reverse()
+ if isinstance(self, _BaseNet):
+ return '%s/%d' % (':'.join(parts), self.prefixlen)
+ return ':'.join(parts)
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v6_int_to_packed(self._ip)
+
+ @property
+ def version(self):
+ return self._version
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return self in IPv6Network('ff00::/8')
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return (self in IPv6Network('::/8') or
+ self in IPv6Network('100::/8') or
+ self in IPv6Network('200::/7') or
+ self in IPv6Network('400::/6') or
+ self in IPv6Network('800::/5') or
+ self in IPv6Network('1000::/4') or
+ self in IPv6Network('4000::/3') or
+ self in IPv6Network('6000::/3') or
+ self in IPv6Network('8000::/3') or
+ self in IPv6Network('A000::/3') or
+ self in IPv6Network('C000::/3') or
+ self in IPv6Network('E000::/4') or
+ self in IPv6Network('F000::/5') or
+ self in IPv6Network('F800::/6') or
+ self in IPv6Network('FE00::/9'))
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return self._ip == 0 and getattr(self, '_prefixlen', 128) == 128
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return self._ip == 1 and getattr(self, '_prefixlen', 128) == 128
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return self in IPv6Network('fe80::/10')
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return self in IPv6Network('fec0::/10')
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4193.
+
+ """
+ return self in IPv6Network('fc00::/7')
+
+ @property
+ def ipv4_mapped(self):
+ """Return the IPv4 mapped address.
+
+ Returns:
+ If the IPv6 address is a v4 mapped address, return the
+ IPv4 mapped address. Return None otherwise.
+
+ """
+ if (self._ip >> 32) != 0xFFFF:
+ return None
+ return IPv4Address(self._ip & 0xFFFFFFFF)
+
+ @property
+ def teredo(self):
+ """Tuple of embedded teredo IPs.
+
+ Returns:
+ Tuple of the (server, client) IPs or None if the address
+ doesn't appear to be a teredo address (doesn't start with
+ 2001::/32)
+
+ """
+ if (self._ip >> 96) != 0x20010000:
+ return None
+ return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
+ IPv4Address(~self._ip & 0xFFFFFFFF))
+
+ @property
+ def sixtofour(self):
+ """Return the IPv4 6to4 embedded address.
+
+ Returns:
+ The IPv4 6to4-embedded address if present or None if the
+ address doesn't appear to contain a 6to4 embedded address.
+
+ """
+ if (self._ip >> 112) != 0x2002:
+ return None
+ return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
+
+
+class IPv6Address(_BaseV6, _BaseIP):
+
+ """Represent and manipulate single IPv6 Addresses.
+ """
+
+ def __init__(self, address):
+ """Instantiate a new IPv6 address object.
+
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv6Address('2001:4860::') ==
+ IPv6Address(42541956101370907050197289607612071936L).
+ or, more generally
+ IPv6Address(IPv6Address('2001:4860::')._ip) ==
+ IPv6Address('2001:4860::')
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+
+ """
+ _BaseV6.__init__(self, address)
+
+ # Efficient constructor from integer.
+ if isinstance(address, (int, long)):
+ self._ip = address
+ if address < 0 or address > self._ALL_ONES:
+ raise AddressValueError(address)
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, Bytes):
+ try:
+ hi, lo = struct.unpack('!QQ', address)
+ except struct.error:
+ raise AddressValueError(address) # Wrong length.
+ self._ip = (hi << 64) | lo
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = str(address)
+ if not addr_str:
+ raise AddressValueError('')
+
+ self._ip = self._ip_int_from_string(addr_str)
+
+
+class IPv6Network(_BaseV6, _BaseNet):
+
+ """This class represents and manipulates 128-bit IPv6 networks.
+
+ Attributes: [examples for IPv6('2001:658:22A:CAFE:200::1/64')]
+ .ip: IPv6Address('2001:658:22a:cafe:200::1')
+ .network: IPv6Address('2001:658:22a:cafe::')
+ .hostmask: IPv6Address('::ffff:ffff:ffff:ffff')
+ .broadcast: IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff')
+ .netmask: IPv6Address('ffff:ffff:ffff:ffff::')
+ .prefixlen: 64
+
+ """
+
+
+ def __init__(self, address, strict=False):
+ """Instantiate a new IPv6 Network object.
+
+ Args:
+ address: A string or integer representing the IPv6 network or the IP
+ and prefix/netmask.
+ '2001:4860::/128'
+ '2001:4860:0000:0000:0000:0000:0000:0000/128'
+ '2001:4860::'
+ are all functionally the same in IPv6. That is to say,
+ failing to provide a subnetmask will create an object with
+ a mask of /128.
+
+ Additionally, an integer can be passed, so
+ IPv6Network('2001:4860::') ==
+ IPv6Network(42541956101370907050197289607612071936L).
+ or, more generally
+ IPv6Network(IPv6Network('2001:4860::')._ip) ==
+ IPv6Network('2001:4860::')
+
+ strict: A boolean. If true, ensure that we have been passed
+ A true network address, eg, 192.168.1.0/24 and not an
+ IP address on a network, eg, 192.168.1.1/24.
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv6 address.
+ ValueError: If strict was True and a network address was not
+ supplied.
+
+ """
+ _BaseNet.__init__(self, address)
+ _BaseV6.__init__(self, address)
+
+ # Constructing from an integer or packed bytes.
+ if isinstance(address, (int, long, Bytes)):
+ self.ip = IPv6Address(address)
+ self._ip = self.ip._ip
+ self._prefixlen = self._max_prefixlen
+ self.netmask = IPv6Address(self._ALL_ONES)
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = str(address).split('/')
+
+ if len(addr) > 2:
+ raise AddressValueError(address)
+
+ self._ip = self._ip_int_from_string(addr[0])
+ self.ip = IPv6Address(self._ip)
+
+ if len(addr) == 2:
+ # This may raise NetmaskValueError
+ self._prefixlen = self._prefix_from_prefix_string(addr[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+
+ self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
+
+ if strict:
+ if self.ip != self.network:
+ raise ValueError('%s has host bits set' %
+ self.ip)
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.iterhosts = self.__iter__
+
+ @property
+ def with_netmask(self):
+ return self.with_prefixlen
diff --git a/lib/ipaddress.py b/lib/ipaddress.py
deleted file mode 100644
index 7657fc8f..00000000
--- a/lib/ipaddress.py
+++ /dev/null
@@ -1,2417 +0,0 @@
-# Copyright 2007 Google Inc.
-# Licensed to PSF under a Contributor Agreement.
-
-"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
-
-This library is used to create/poke/manipulate IPv4 and IPv6 addresses
-and networks.
-
-"""
-
-from __future__ import unicode_literals
-
-
-import itertools
-import struct
-
-__version__ = '1.0.16'
-
-# Compatibility functions
-_compat_int_types = (int,)
-try:
- _compat_int_types = (int, long)
-except NameError:
- pass
-try:
- _compat_str = unicode
-except NameError:
- _compat_str = str
- assert bytes != str
-if b'\0'[0] == 0: # Python 3 semantics
- def _compat_bytes_to_byte_vals(byt):
- return byt
-else:
- def _compat_bytes_to_byte_vals(byt):
- return [struct.unpack(b'!B', b)[0] for b in byt]
-try:
- _compat_int_from_byte_vals = int.from_bytes
-except AttributeError:
- def _compat_int_from_byte_vals(bytvals, endianess):
- assert endianess == 'big'
- res = 0
- for bv in bytvals:
- assert isinstance(bv, _compat_int_types)
- res = (res << 8) + bv
- return res
-
-
-def _compat_to_bytes(intval, length, endianess):
- assert isinstance(intval, _compat_int_types)
- assert endianess == 'big'
- if length == 4:
- if intval < 0 or intval >= 2 ** 32:
- raise struct.error("integer out of range for 'I' format code")
- return struct.pack(b'!I', intval)
- elif length == 16:
- if intval < 0 or intval >= 2 ** 128:
- raise struct.error("integer out of range for 'QQ' format code")
- return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
- else:
- raise NotImplementedError()
-if hasattr(int, 'bit_length'):
- # Not int.bit_length , since that won't work in 2.7 where long exists
- def _compat_bit_length(i):
- return i.bit_length()
-else:
- def _compat_bit_length(i):
- for res in itertools.count():
- if i >> res == 0:
- return res
-
-
-def _compat_range(start, end, step=1):
- assert step > 0
- i = start
- while i < end:
- yield i
- i += step
-
-
-class _TotalOrderingMixin(object):
- __slots__ = ()
-
- # Helper that derives the other comparison operations from
- # __lt__ and __eq__
- # We avoid functools.total_ordering because it doesn't handle
- # NotImplemented correctly yet (http://bugs.python.org/issue10042)
- def __eq__(self, other):
- raise NotImplementedError
-
- def __ne__(self, other):
- equal = self.__eq__(other)
- if equal is NotImplemented:
- return NotImplemented
- return not equal
-
- def __lt__(self, other):
- raise NotImplementedError
-
- def __le__(self, other):
- less = self.__lt__(other)
- if less is NotImplemented or not less:
- return self.__eq__(other)
- return less
-
- def __gt__(self, other):
- less = self.__lt__(other)
- if less is NotImplemented:
- return NotImplemented
- equal = self.__eq__(other)
- if equal is NotImplemented:
- return NotImplemented
- return not (less or equal)
-
- def __ge__(self, other):
- less = self.__lt__(other)
- if less is NotImplemented:
- return NotImplemented
- return not less
-
-
-IPV4LENGTH = 32
-IPV6LENGTH = 128
-
-
-class AddressValueError(ValueError):
- """A Value Error related to the address."""
-
-
-class NetmaskValueError(ValueError):
- """A Value Error related to the netmask."""
-
-
-def ip_address(address):
- """Take an IP string/int and return an object of the correct type.
-
- Args:
- address: A string or integer, the IP address. Either IPv4 or
- IPv6 addresses may be supplied; integers less than 2**32 will
- be considered to be IPv4 by default.
-
- Returns:
- An IPv4Address or IPv6Address object.
-
- Raises:
- ValueError: if the *address* passed isn't either a v4 or a v6
- address
-
- """
- try:
- return IPv4Address(address)
- except (AddressValueError, NetmaskValueError):
- pass
-
- try:
- return IPv6Address(address)
- except (AddressValueError, NetmaskValueError):
- pass
-
- if isinstance(address, bytes):
- raise AddressValueError(
- '%r does not appear to be an IPv4 or IPv6 address. '
- 'Did you pass in a bytes (str in Python 2) instead of'
- ' a unicode object?' % address)
-
- raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
- address)
-
-
-def ip_network(address, strict=True):
- """Take an IP string/int and return an object of the correct type.
-
- Args:
- address: A string or integer, the IP network. Either IPv4 or
- IPv6 networks may be supplied; integers less than 2**32 will
- be considered to be IPv4 by default.
-
- Returns:
- An IPv4Network or IPv6Network object.
-
- Raises:
- ValueError: if the string passed isn't either a v4 or a v6
- address. Or if the network has host bits set.
-
- """
- try:
- return IPv4Network(address, strict)
- except (AddressValueError, NetmaskValueError):
- pass
-
- try:
- return IPv6Network(address, strict)
- except (AddressValueError, NetmaskValueError):
- pass
-
- if isinstance(address, bytes):
- raise AddressValueError(
- '%r does not appear to be an IPv4 or IPv6 network. '
- 'Did you pass in a bytes (str in Python 2) instead of'
- ' a unicode object?' % address)
-
- raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
- address)
-
-
-def ip_interface(address):
- """Take an IP string/int and return an object of the correct type.
-
- Args:
- address: A string or integer, the IP address. Either IPv4 or
- IPv6 addresses may be supplied; integers less than 2**32 will
- be considered to be IPv4 by default.
-
- Returns:
- An IPv4Interface or IPv6Interface object.
-
- Raises:
- ValueError: if the string passed isn't either a v4 or a v6
- address.
-
- Notes:
- The IPv?Interface classes describe an Address on a particular
- Network, so they're basically a combination of both the Address
- and Network classes.
-
- """
- try:
- return IPv4Interface(address)
- except (AddressValueError, NetmaskValueError):
- pass
-
- try:
- return IPv6Interface(address)
- except (AddressValueError, NetmaskValueError):
- pass
-
- raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
- address)
-
-
-def v4_int_to_packed(address):
- """Represent an address as 4 packed bytes in network (big-endian) order.
-
- Args:
- address: An integer representation of an IPv4 IP address.
-
- Returns:
- The integer address packed as 4 bytes in network (big-endian) order.
-
- Raises:
- ValueError: If the integer is negative or too large to be an
- IPv4 IP address.
-
- """
- try:
- return _compat_to_bytes(address, 4, 'big')
- except (struct.error, OverflowError):
- raise ValueError("Address negative or too large for IPv4")
-
-
-def v6_int_to_packed(address):
- """Represent an address as 16 packed bytes in network (big-endian) order.
-
- Args:
- address: An integer representation of an IPv6 IP address.
-
- Returns:
- The integer address packed as 16 bytes in network (big-endian) order.
-
- """
- try:
- return _compat_to_bytes(address, 16, 'big')
- except (struct.error, OverflowError):
- raise ValueError("Address negative or too large for IPv6")
-
-
-def _split_optional_netmask(address):
- """Helper to split the netmask and raise AddressValueError if needed"""
- addr = _compat_str(address).split('/')
- if len(addr) > 2:
- raise AddressValueError("Only one '/' permitted in %r" % address)
- return addr
-
-
-def _find_address_range(addresses):
- """Find a sequence of sorted deduplicated IPv#Address.
-
- Args:
- addresses: a list of IPv#Address objects.
-
- Yields:
- A tuple containing the first and last IP addresses in the sequence.
-
- """
- it = iter(addresses)
- first = last = next(it)
- for ip in it:
- if ip._ip != last._ip + 1:
- yield first, last
- first = ip
- last = ip
- yield first, last
-
-
-def _count_righthand_zero_bits(number, bits):
- """Count the number of zero bits on the right hand side.
-
- Args:
- number: an integer.
- bits: maximum number of bits to count.
-
- Returns:
- The number of zero bits on the right hand side of the number.
-
- """
- if number == 0:
- return bits
- return min(bits, _compat_bit_length(~number & (number - 1)))
-
-
-def summarize_address_range(first, last):
- """Summarize a network range given the first and last IP addresses.
-
- Example:
- >>> list(summarize_address_range(IPv4Address('192.0.2.0'),
- ... IPv4Address('192.0.2.130')))
- ... #doctest: +NORMALIZE_WHITESPACE
- [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
- IPv4Network('192.0.2.130/32')]
-
- Args:
- first: the first IPv4Address or IPv6Address in the range.
- last: the last IPv4Address or IPv6Address in the range.
-
- Returns:
- An iterator of the summarized IPv(4|6) network objects.
-
- Raise:
- TypeError:
- If the first and last objects are not IP addresses.
- If the first and last objects are not the same version.
- ValueError:
- If the last object is not greater than the first.
- If the version of the first address is not 4 or 6.
-
- """
- if (not (isinstance(first, _BaseAddress) and
- isinstance(last, _BaseAddress))):
- raise TypeError('first and last must be IP addresses, not networks')
- if first.version != last.version:
- raise TypeError("%s and %s are not of the same version" % (
- first, last))
- if first > last:
- raise ValueError('last IP address must be greater than first')
-
- if first.version == 4:
- ip = IPv4Network
- elif first.version == 6:
- ip = IPv6Network
- else:
- raise ValueError('unknown IP version')
-
- ip_bits = first._max_prefixlen
- first_int = first._ip
- last_int = last._ip
- while first_int <= last_int:
- nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
- _compat_bit_length(last_int - first_int + 1) - 1)
- net = ip((first_int, ip_bits - nbits))
- yield net
- first_int += 1 << nbits
- if first_int - 1 == ip._ALL_ONES:
- break
-
-
-def _collapse_addresses_internal(addresses):
- """Loops through the addresses, collapsing concurrent netblocks.
-
- Example:
-
- ip1 = IPv4Network('192.0.2.0/26')
- ip2 = IPv4Network('192.0.2.64/26')
- ip3 = IPv4Network('192.0.2.128/26')
- ip4 = IPv4Network('192.0.2.192/26')
-
- _collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
- [IPv4Network('192.0.2.0/24')]
-
- This shouldn't be called directly; it is called via
- collapse_addresses([]).
-
- Args:
- addresses: A list of IPv4Network's or IPv6Network's
-
- Returns:
- A list of IPv4Network's or IPv6Network's depending on what we were
- passed.
-
- """
- # First merge
- to_merge = list(addresses)
- subnets = {}
- while to_merge:
- net = to_merge.pop()
- supernet = net.supernet()
- existing = subnets.get(supernet)
- if existing is None:
- subnets[supernet] = net
- elif existing != net:
- # Merge consecutive subnets
- del subnets[supernet]
- to_merge.append(supernet)
- # Then iterate over resulting networks, skipping subsumed subnets
- last = None
- for net in sorted(subnets.values()):
- if last is not None:
- # Since they are sorted,
- # last.network_address <= net.network_address is a given.
- if last.broadcast_address >= net.broadcast_address:
- continue
- yield net
- last = net
-
-
-def collapse_addresses(addresses):
- """Collapse a list of IP objects.
-
- Example:
- collapse_addresses([IPv4Network('192.0.2.0/25'),
- IPv4Network('192.0.2.128/25')]) ->
- [IPv4Network('192.0.2.0/24')]
-
- Args:
- addresses: An iterator of IPv4Network or IPv6Network objects.
-
- Returns:
- An iterator of the collapsed IPv(4|6)Network objects.
-
- Raises:
- TypeError: If passed a list of mixed version objects.
-
- """
- addrs = []
- ips = []
- nets = []
-
- # split IP addresses and networks
- for ip in addresses:
- if isinstance(ip, _BaseAddress):
- if ips and ips[-1]._version != ip._version:
- raise TypeError("%s and %s are not of the same version" % (
- ip, ips[-1]))
- ips.append(ip)
- elif ip._prefixlen == ip._max_prefixlen:
- if ips and ips[-1]._version != ip._version:
- raise TypeError("%s and %s are not of the same version" % (
- ip, ips[-1]))
- try:
- ips.append(ip.ip)
- except AttributeError:
- ips.append(ip.network_address)
- else:
- if nets and nets[-1]._version != ip._version:
- raise TypeError("%s and %s are not of the same version" % (
- ip, nets[-1]))
- nets.append(ip)
-
- # sort and dedup
- ips = sorted(set(ips))
-
- # find consecutive address ranges in the sorted sequence and summarize them
- if ips:
- for first, last in _find_address_range(ips):
- addrs.extend(summarize_address_range(first, last))
-
- return _collapse_addresses_internal(addrs + nets)
-
-
-def get_mixed_type_key(obj):
- """Return a key suitable for sorting between networks and addresses.
-
- Address and Network objects are not sortable by default; they're
- fundamentally different so the expression
-
- IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
-
- doesn't make any sense. There are some times however, where you may wish
- to have ipaddress sort these for you anyway. If you need to do this, you
- can use this function as the key= argument to sorted().
-
- Args:
- obj: either a Network or Address object.
- Returns:
- appropriate key.
-
- """
- if isinstance(obj, _BaseNetwork):
- return obj._get_networks_key()
- elif isinstance(obj, _BaseAddress):
- return obj._get_address_key()
- return NotImplemented
-
-
-class _IPAddressBase(_TotalOrderingMixin):
-
- """The mother class."""
-
- __slots__ = ()
-
- @property
- def exploded(self):
- """Return the longhand version of the IP address as a string."""
- return self._explode_shorthand_ip_string()
-
- @property
- def compressed(self):
- """Return the shorthand version of the IP address as a string."""
- return _compat_str(self)
-
- @property
- def reverse_pointer(self):
- """The name of the reverse DNS pointer for the IP address, e.g.:
- >>> ipaddress.ip_address("127.0.0.1").reverse_pointer
- '1.0.0.127.in-addr.arpa'
- >>> ipaddress.ip_address("2001:db8::1").reverse_pointer
- '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
-
- """
- return self._reverse_pointer()
-
- @property
- def version(self):
- msg = '%200s has no version specified' % (type(self),)
- raise NotImplementedError(msg)
-
- def _check_int_address(self, address):
- if address < 0:
- msg = "%d (< 0) is not permitted as an IPv%d address"
- raise AddressValueError(msg % (address, self._version))
- if address > self._ALL_ONES:
- msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
- raise AddressValueError(msg % (address, self._max_prefixlen,
- self._version))
-
- def _check_packed_address(self, address, expected_len):
- address_len = len(address)
- if address_len != expected_len:
- msg = (
- '%r (len %d != %d) is not permitted as an IPv%d address. '
- 'Did you pass in a bytes (str in Python 2) instead of'
- ' a unicode object?'
- )
- raise AddressValueError(msg % (address, address_len,
- expected_len, self._version))
-
- @classmethod
- def _ip_int_from_prefix(cls, prefixlen):
- """Turn the prefix length into a bitwise netmask
-
- Args:
- prefixlen: An integer, the prefix length.
-
- Returns:
- An integer.
-
- """
- return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
-
- @classmethod
- def _prefix_from_ip_int(cls, ip_int):
- """Return prefix length from the bitwise netmask.
-
- Args:
- ip_int: An integer, the netmask in expanded bitwise format
-
- Returns:
- An integer, the prefix length.
-
- Raises:
- ValueError: If the input intermingles zeroes & ones
- """
- trailing_zeroes = _count_righthand_zero_bits(ip_int,
- cls._max_prefixlen)
- prefixlen = cls._max_prefixlen - trailing_zeroes
- leading_ones = ip_int >> trailing_zeroes
- all_ones = (1 << prefixlen) - 1
- if leading_ones != all_ones:
- byteslen = cls._max_prefixlen // 8
- details = _compat_to_bytes(ip_int, byteslen, 'big')
- msg = 'Netmask pattern %r mixes zeroes & ones'
- raise ValueError(msg % details)
- return prefixlen
-
- @classmethod
- def _report_invalid_netmask(cls, netmask_str):
- msg = '%r is not a valid netmask' % netmask_str
- raise NetmaskValueError(msg)
-
- @classmethod
- def _prefix_from_prefix_string(cls, prefixlen_str):
- """Return prefix length from a numeric string
-
- Args:
- prefixlen_str: The string to be converted
-
- Returns:
- An integer, the prefix length.
-
- Raises:
- NetmaskValueError: If the input is not a valid netmask
- """
- # int allows a leading +/- as well as surrounding whitespace,
- # so we ensure that isn't the case
- if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
- cls._report_invalid_netmask(prefixlen_str)
- try:
- prefixlen = int(prefixlen_str)
- except ValueError:
- cls._report_invalid_netmask(prefixlen_str)
- if not (0 <= prefixlen <= cls._max_prefixlen):
- cls._report_invalid_netmask(prefixlen_str)
- return prefixlen
-
- @classmethod
- def _prefix_from_ip_string(cls, ip_str):
- """Turn a netmask/hostmask string into a prefix length
-
- Args:
- ip_str: The netmask/hostmask to be converted
-
- Returns:
- An integer, the prefix length.
-
- Raises:
- NetmaskValueError: If the input is not a valid netmask/hostmask
- """
- # Parse the netmask/hostmask like an IP address.
- try:
- ip_int = cls._ip_int_from_string(ip_str)
- except AddressValueError:
- cls._report_invalid_netmask(ip_str)
-
- # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
- # Note that the two ambiguous cases (all-ones and all-zeroes) are
- # treated as netmasks.
- try:
- return cls._prefix_from_ip_int(ip_int)
- except ValueError:
- pass
-
- # Invert the bits, and try matching a /0+1+/ hostmask instead.
- ip_int ^= cls._ALL_ONES
- try:
- return cls._prefix_from_ip_int(ip_int)
- except ValueError:
- cls._report_invalid_netmask(ip_str)
-
- def __reduce__(self):
- return self.__class__, (_compat_str(self),)
-
-
-class _BaseAddress(_IPAddressBase):
-
- """A generic IP object.
-
- This IP class contains the version independent methods which are
- used by single IP addresses.
- """
-
- __slots__ = ()
-
- def __int__(self):
- return self._ip
-
- def __eq__(self, other):
- try:
- return (self._ip == other._ip and
- self._version == other._version)
- except AttributeError:
- return NotImplemented
-
- def __lt__(self, other):
- if not isinstance(other, _IPAddressBase):
- return NotImplemented
- if not isinstance(other, _BaseAddress):
- raise TypeError('%s and %s are not of the same type' % (
- self, other))
- if self._version != other._version:
- raise TypeError('%s and %s are not of the same version' % (
- self, other))
- if self._ip != other._ip:
- return self._ip < other._ip
- return False
-
- # Shorthand for Integer addition and subtraction. This is not
- # meant to ever support addition/subtraction of addresses.
- def __add__(self, other):
- if not isinstance(other, _compat_int_types):
- return NotImplemented
- return self.__class__(int(self) + other)
-
- def __sub__(self, other):
- if not isinstance(other, _compat_int_types):
- return NotImplemented
- return self.__class__(int(self) - other)
-
- def __repr__(self):
- return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
-
- def __str__(self):
- return _compat_str(self._string_from_ip_int(self._ip))
-
- def __hash__(self):
- return hash(hex(int(self._ip)))
-
- def _get_address_key(self):
- return (self._version, self)
-
- def __reduce__(self):
- return self.__class__, (self._ip,)
-
-
-class _BaseNetwork(_IPAddressBase):
-
- """A generic IP network object.
-
- This IP class contains the version independent methods which are
- used by networks.
-
- """
- def __init__(self, address):
- self._cache = {}
-
- def __repr__(self):
- return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
-
- def __str__(self):
- return '%s/%d' % (self.network_address, self.prefixlen)
-
- def hosts(self):
- """Generate Iterator over usable hosts in a network.
-
- This is like __iter__ except it doesn't return the network
- or broadcast addresses.
-
- """
- network = int(self.network_address)
- broadcast = int(self.broadcast_address)
- for x in _compat_range(network + 1, broadcast):
- yield self._address_class(x)
-
- def __iter__(self):
- network = int(self.network_address)
- broadcast = int(self.broadcast_address)
- for x in _compat_range(network, broadcast + 1):
- yield self._address_class(x)
-
- def __getitem__(self, n):
- network = int(self.network_address)
- broadcast = int(self.broadcast_address)
- if n >= 0:
- if network + n > broadcast:
- raise IndexError
- return self._address_class(network + n)
- else:
- n += 1
- if broadcast + n < network:
- raise IndexError
- return self._address_class(broadcast + n)
-
- def __lt__(self, other):
- if not isinstance(other, _IPAddressBase):
- return NotImplemented
- if not isinstance(other, _BaseNetwork):
- raise TypeError('%s and %s are not of the same type' % (
- self, other))
- if self._version != other._version:
- raise TypeError('%s and %s are not of the same version' % (
- self, other))
- if self.network_address != other.network_address:
- return self.network_address < other.network_address
- if self.netmask != other.netmask:
- return self.netmask < other.netmask
- return False
-
- def __eq__(self, other):
- try:
- return (self._version == other._version and
- self.network_address == other.network_address and
- int(self.netmask) == int(other.netmask))
- except AttributeError:
- return NotImplemented
-
- def __hash__(self):
- return hash(int(self.network_address) ^ int(self.netmask))
-
- def __contains__(self, other):
- # always false if one is v4 and the other is v6.
- if self._version != other._version:
- return False
- # dealing with another network.
- if isinstance(other, _BaseNetwork):
- return False
- # dealing with another address
- else:
- # address
- return (int(self.network_address) <= int(other._ip) <=
- int(self.broadcast_address))
-
- def overlaps(self, other):
- """Tell if self is partly contained in other."""
- return self.network_address in other or (
- self.broadcast_address in other or (
- other.network_address in self or (
- other.broadcast_address in self)))
-
- @property
- def broadcast_address(self):
- x = self._cache.get('broadcast_address')
- if x is None:
- x = self._address_class(int(self.network_address) |
- int(self.hostmask))
- self._cache['broadcast_address'] = x
- return x
-
- @property
- def hostmask(self):
- x = self._cache.get('hostmask')
- if x is None:
- x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
- self._cache['hostmask'] = x
- return x
-
- @property
- def with_prefixlen(self):
- return '%s/%d' % (self.network_address, self._prefixlen)
-
- @property
- def with_netmask(self):
- return '%s/%s' % (self.network_address, self.netmask)
-
- @property
- def with_hostmask(self):
- return '%s/%s' % (self.network_address, self.hostmask)
-
- @property
- def num_addresses(self):
- """Number of hosts in the current subnet."""
- return int(self.broadcast_address) - int(self.network_address) + 1
-
- @property
- def _address_class(self):
- # Returning bare address objects (rather than interfaces) allows for
- # more consistent behaviour across the network address, broadcast
- # address and individual host addresses.
- msg = '%200s has no associated address class' % (type(self),)
- raise NotImplementedError(msg)
-
- @property
- def prefixlen(self):
- return self._prefixlen
-
- def address_exclude(self, other):
- """Remove an address from a larger block.
-
- For example:
-
- addr1 = ip_network('192.0.2.0/28')
- addr2 = ip_network('192.0.2.1/32')
- addr1.address_exclude(addr2) =
- [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
- IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
-
- or IPv6:
-
- addr1 = ip_network('2001:db8::1/32')
- addr2 = ip_network('2001:db8::1/128')
- addr1.address_exclude(addr2) =
- [ip_network('2001:db8::1/128'),
- ip_network('2001:db8::2/127'),
- ip_network('2001:db8::4/126'),
- ip_network('2001:db8::8/125'),
- ...
- ip_network('2001:db8:8000::/33')]
-
- Args:
- other: An IPv4Network or IPv6Network object of the same type.
-
- Returns:
- An iterator of the IPv(4|6)Network objects which is self
- minus other.
-
- Raises:
- TypeError: If self and other are of differing address
- versions, or if other is not a network object.
- ValueError: If other is not completely contained by self.
-
- """
- if not self._version == other._version:
- raise TypeError("%s and %s are not of the same version" % (
- self, other))
-
- if not isinstance(other, _BaseNetwork):
- raise TypeError("%s is not a network object" % other)
-
- if not other.subnet_of(self):
- raise ValueError('%s not contained in %s' % (other, self))
- if other == self:
- return
-
- # Make sure we're comparing the network of other.
- other = other.__class__('%s/%s' % (other.network_address,
- other.prefixlen))
-
- s1, s2 = self.subnets()
- while s1 != other and s2 != other:
- if other.subnet_of(s1):
- yield s2
- s1, s2 = s1.subnets()
- elif other.subnet_of(s2):
- yield s1
- s1, s2 = s2.subnets()
- else:
- # If we got here, there's a bug somewhere.
- raise AssertionError('Error performing exclusion: '
- 's1: %s s2: %s other: %s' %
- (s1, s2, other))
- if s1 == other:
- yield s2
- elif s2 == other:
- yield s1
- else:
- # If we got here, there's a bug somewhere.
- raise AssertionError('Error performing exclusion: '
- 's1: %s s2: %s other: %s' %
- (s1, s2, other))
-
- def compare_networks(self, other):
- """Compare two IP objects.
-
- This is only concerned about the comparison of the integer
- representation of the network addresses. This means that the
- host bits aren't considered at all in this method. If you want
- to compare host bits, you can easily enough do a
- 'HostA._ip < HostB._ip'
-
- Args:
- other: An IP object.
-
- Returns:
- If the IP versions of self and other are the same, returns:
-
- -1 if self < other:
- eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
- IPv6Network('2001:db8::1000/124') <
- IPv6Network('2001:db8::2000/124')
- 0 if self == other
- eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
- IPv6Network('2001:db8::1000/124') ==
- IPv6Network('2001:db8::1000/124')
- 1 if self > other
- eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
- IPv6Network('2001:db8::2000/124') >
- IPv6Network('2001:db8::1000/124')
-
- Raises:
- TypeError if the IP versions are different.
-
- """
- # does this need to raise a ValueError?
- if self._version != other._version:
- raise TypeError('%s and %s are not of the same type' % (
- self, other))
- # self._version == other._version below here:
- if self.network_address < other.network_address:
- return -1
- if self.network_address > other.network_address:
- return 1
- # self.network_address == other.network_address below here:
- if self.netmask < other.netmask:
- return -1
- if self.netmask > other.netmask:
- return 1
- return 0
-
- def _get_networks_key(self):
- """Network-only key function.
-
- Returns an object that identifies this address' network and
- netmask. This function is a suitable "key" argument for sorted()
- and list.sort().
-
- """
- return (self._version, self.network_address, self.netmask)
-
- def subnets(self, prefixlen_diff=1, new_prefix=None):
- """The subnets which join to make the current subnet.
-
- In the case that self contains only one IP
- (self._prefixlen == 32 for IPv4 or self._prefixlen == 128
- for IPv6), yield an iterator with just ourself.
-
- Args:
- prefixlen_diff: An integer, the amount the prefix length
- should be increased by. This should not be set if
- new_prefix is also set.
- new_prefix: The desired new prefix length. This must be a
- larger number (smaller prefix) than the existing prefix.
- This should not be set if prefixlen_diff is also set.
-
- Returns:
- An iterator of IPv(4|6) objects.
-
- Raises:
- ValueError: The prefixlen_diff is too small or too large.
- OR
- prefixlen_diff and new_prefix are both set or new_prefix
- is a smaller number than the current prefix (smaller
- number means a larger network)
-
- """
- if self._prefixlen == self._max_prefixlen:
- yield self
- return
-
- if new_prefix is not None:
- if new_prefix < self._prefixlen:
- raise ValueError('new prefix must be longer')
- if prefixlen_diff != 1:
- raise ValueError('cannot set prefixlen_diff and new_prefix')
- prefixlen_diff = new_prefix - self._prefixlen
-
- if prefixlen_diff < 0:
- raise ValueError('prefix length diff must be > 0')
- new_prefixlen = self._prefixlen + prefixlen_diff
-
- if new_prefixlen > self._max_prefixlen:
- raise ValueError(
- 'prefix length diff %d is invalid for netblock %s' % (
- new_prefixlen, self))
-
- start = int(self.network_address)
- end = int(self.broadcast_address)
- step = (int(self.hostmask) + 1) >> prefixlen_diff
- for new_addr in _compat_range(start, end, step):
- current = self.__class__((new_addr, new_prefixlen))
- yield current
-
- def supernet(self, prefixlen_diff=1, new_prefix=None):
- """The supernet containing the current network.
-
- Args:
- prefixlen_diff: An integer, the amount the prefix length of
- the network should be decreased by. For example, given a
- /24 network and a prefixlen_diff of 3, a supernet with a
- /21 netmask is returned.
-
- Returns:
- An IPv4 network object.
-
- Raises:
- ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
- a negative prefix length.
- OR
- If prefixlen_diff and new_prefix are both set or new_prefix is a
- larger number than the current prefix (larger number means a
- smaller network)
-
- """
- if self._prefixlen == 0:
- return self
-
- if new_prefix is not None:
- if new_prefix > self._prefixlen:
- raise ValueError('new prefix must be shorter')
- if prefixlen_diff != 1:
- raise ValueError('cannot set prefixlen_diff and new_prefix')
- prefixlen_diff = self._prefixlen - new_prefix
-
- new_prefixlen = self.prefixlen - prefixlen_diff
- if new_prefixlen < 0:
- raise ValueError(
- 'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
- (self.prefixlen, prefixlen_diff))
- return self.__class__((
- int(self.network_address) & (int(self.netmask) << prefixlen_diff),
- new_prefixlen
- ))
-
- @property
- def is_multicast(self):
- """Test if the address is reserved for multicast use.
-
- Returns:
- A boolean, True if the address is a multicast address.
- See RFC 2373 2.7 for details.
-
- """
- return (self.network_address.is_multicast and
- self.broadcast_address.is_multicast)
-
- def subnet_of(self, other):
- # always false if one is v4 and the other is v6.
- if self._version != other._version:
- return False
- # dealing with another network.
- if (hasattr(other, 'network_address') and
- hasattr(other, 'broadcast_address')):
- return (other.network_address <= self.network_address and
- other.broadcast_address >= self.broadcast_address)
- # dealing with another address
- else:
- raise TypeError('Unable to test subnet containment with element '
- 'of type %s' % type(other))
-
- def supernet_of(self, other):
- # always false if one is v4 and the other is v6.
- if self._version != other._version:
- return False
- # dealing with another network.
- if (hasattr(other, 'network_address') and
- hasattr(other, 'broadcast_address')):
- return (other.network_address >= self.network_address and
- other.broadcast_address <= self.broadcast_address)
- # dealing with another address
- else:
- raise TypeError('Unable to test subnet containment with element '
- 'of type %s' % type(other))
-
- @property
- def is_reserved(self):
- """Test if the address is otherwise IETF reserved.
-
- Returns:
- A boolean, True if the address is within one of the
- reserved IPv6 Network ranges.
-
- """
- return (self.network_address.is_reserved and
- self.broadcast_address.is_reserved)
-
- @property
- def is_link_local(self):
- """Test if the address is reserved for link-local.
-
- Returns:
- A boolean, True if the address is reserved per RFC 4291.
-
- """
- return (self.network_address.is_link_local and
- self.broadcast_address.is_link_local)
-
- @property
- def is_private(self):
- """Test if this address is allocated for private networks.
-
- Returns:
- A boolean, True if the address is reserved per
- iana-ipv4-special-registry or iana-ipv6-special-registry.
-
- """
- return (self.network_address.is_private and
- self.broadcast_address.is_private)
-
- @property
- def is_global(self):
- """Test if this address is allocated for public networks.
-
- Returns:
- A boolean, True if the address is not reserved per
- iana-ipv4-special-registry or iana-ipv6-special-registry.
-
- """
- return not self.is_private
-
- @property
- def is_unspecified(self):
- """Test if the address is unspecified.
-
- Returns:
- A boolean, True if this is the unspecified address as defined in
- RFC 2373 2.5.2.
-
- """
- return (self.network_address.is_unspecified and
- self.broadcast_address.is_unspecified)
-
- @property
- def is_loopback(self):
- """Test if the address is a loopback address.
-
- Returns:
- A boolean, True if the address is a loopback address as defined in
- RFC 2373 2.5.3.
-
- """
- return (self.network_address.is_loopback and
- self.broadcast_address.is_loopback)
-
-
-class _BaseV4(object):
-
- """Base IPv4 object.
-
- The following methods are used by IPv4 objects in both single IP
- addresses and networks.
-
- """
-
- __slots__ = ()
- _version = 4
- # Equivalent to 255.255.255.255 or 32 bits of 1's.
- _ALL_ONES = (2 ** IPV4LENGTH) - 1
- _DECIMAL_DIGITS = frozenset('0123456789')
-
- # the valid octets for host and netmasks. only useful for IPv4.
- _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
-
- _max_prefixlen = IPV4LENGTH
- # There are only a handful of valid v4 netmasks, so we cache them all
- # when constructed (see _make_netmask()).
- _netmask_cache = {}
-
- def _explode_shorthand_ip_string(self):
- return _compat_str(self)
-
- @classmethod
- def _make_netmask(cls, arg):
- """Make a (netmask, prefix_len) tuple from the given argument.
-
- Argument can be:
- - an integer (the prefix length)
- - a string representing the prefix length (e.g. "24")
- - a string representing the prefix netmask (e.g. "255.255.255.0")
- """
- if arg not in cls._netmask_cache:
- if isinstance(arg, _compat_int_types):
- prefixlen = arg
- else:
- try:
- # Check for a netmask in prefix length form
- prefixlen = cls._prefix_from_prefix_string(arg)
- except NetmaskValueError:
- # Check for a netmask or hostmask in dotted-quad form.
- # This may raise NetmaskValueError.
- prefixlen = cls._prefix_from_ip_string(arg)
- netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
- cls._netmask_cache[arg] = netmask, prefixlen
- return cls._netmask_cache[arg]
-
- @classmethod
- def _ip_int_from_string(cls, ip_str):
- """Turn the given IP string into an integer for comparison.
-
- Args:
- ip_str: A string, the IP ip_str.
-
- Returns:
- The IP ip_str as an integer.
-
- Raises:
- AddressValueError: if ip_str isn't a valid IPv4 Address.
-
- """
- if not ip_str:
- raise AddressValueError('Address cannot be empty')
-
- octets = ip_str.split('.')
- if len(octets) != 4:
- raise AddressValueError("Expected 4 octets in %r" % ip_str)
-
- try:
- return _compat_int_from_byte_vals(
- map(cls._parse_octet, octets), 'big')
- except ValueError as exc:
- raise AddressValueError("%s in %r" % (exc, ip_str))
-
- @classmethod
- def _parse_octet(cls, octet_str):
- """Convert a decimal octet into an integer.
-
- Args:
- octet_str: A string, the number to parse.
-
- Returns:
- The octet as an integer.
-
- Raises:
- ValueError: if the octet isn't strictly a decimal from [0..255].
-
- """
- if not octet_str:
- raise ValueError("Empty octet not permitted")
- # Whitelist the characters, since int() allows a lot of bizarre stuff.
- if not cls._DECIMAL_DIGITS.issuperset(octet_str):
- msg = "Only decimal digits permitted in %r"
- raise ValueError(msg % octet_str)
- # We do the length check second, since the invalid character error
- # is likely to be more informative for the user
- if len(octet_str) > 3:
- msg = "At most 3 characters permitted in %r"
- raise ValueError(msg % octet_str)
- # Convert to integer (we know digits are legal)
- octet_int = int(octet_str, 10)
- # Any octets that look like they *might* be written in octal,
- # and which don't look exactly the same in both octal and
- # decimal are rejected as ambiguous
- if octet_int > 7 and octet_str[0] == '0':
- msg = "Ambiguous (octal/decimal) value in %r not permitted"
- raise ValueError(msg % octet_str)
- if octet_int > 255:
- raise ValueError("Octet %d (> 255) not permitted" % octet_int)
- return octet_int
-
- @classmethod
- def _string_from_ip_int(cls, ip_int):
- """Turns a 32-bit integer into dotted decimal notation.
-
- Args:
- ip_int: An integer, the IP address.
-
- Returns:
- The IP address as a string in dotted decimal notation.
-
- """
- return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
- if isinstance(b, bytes)
- else b)
- for b in _compat_to_bytes(ip_int, 4, 'big'))
-
- def _is_hostmask(self, ip_str):
- """Test if the IP string is a hostmask (rather than a netmask).
-
- Args:
- ip_str: A string, the potential hostmask.
-
- Returns:
- A boolean, True if the IP string is a hostmask.
-
- """
- bits = ip_str.split('.')
- try:
- parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
- except ValueError:
- return False
- if len(parts) != len(bits):
- return False
- if parts[0] < parts[-1]:
- return True
- return False
-
- def _reverse_pointer(self):
- """Return the reverse DNS pointer name for the IPv4 address.
-
- This implements the method described in RFC1035 3.5.
-
- """
- reverse_octets = _compat_str(self).split('.')[::-1]
- return '.'.join(reverse_octets) + '.in-addr.arpa'
-
- @property
- def max_prefixlen(self):
- return self._max_prefixlen
-
- @property
- def version(self):
- return self._version
-
-
-class IPv4Address(_BaseV4, _BaseAddress):
-
- """Represent and manipulate single IPv4 Addresses."""
-
- __slots__ = ('_ip', '__weakref__')
-
- def __init__(self, address):
-
- """
- Args:
- address: A string or integer representing the IP
-
- Additionally, an integer can be passed, so
- IPv4Address('192.0.2.1') == IPv4Address(3221225985).
- or, more generally
- IPv4Address(int(IPv4Address('192.0.2.1'))) ==
- IPv4Address('192.0.2.1')
-
- Raises:
- AddressValueError: If ipaddress isn't a valid IPv4 address.
-
- """
- # Efficient constructor from integer.
- if isinstance(address, _compat_int_types):
- self._check_int_address(address)
- self._ip = address
- return
-
- # Constructing from a packed address
- if isinstance(address, bytes):
- self._check_packed_address(address, 4)
- bvs = _compat_bytes_to_byte_vals(address)
- self._ip = _compat_int_from_byte_vals(bvs, 'big')
- return
-
- # Assume input argument to be string or any object representation
- # which converts into a formatted IP string.
- addr_str = _compat_str(address)
- if '/' in addr_str:
- raise AddressValueError("Unexpected '/' in %r" % address)
- self._ip = self._ip_int_from_string(addr_str)
-
- @property
- def packed(self):
- """The binary representation of this address."""
- return v4_int_to_packed(self._ip)
-
- @property
- def is_reserved(self):
- """Test if the address is otherwise IETF reserved.
-
- Returns:
- A boolean, True if the address is within the
- reserved IPv4 Network range.
-
- """
- return self in self._constants._reserved_network
-
- @property
- def is_private(self):
- """Test if this address is allocated for private networks.
-
- Returns:
- A boolean, True if the address is reserved per
- iana-ipv4-special-registry.
-
- """
- return any(self in net for net in self._constants._private_networks)
-
- @property
- def is_multicast(self):
- """Test if the address is reserved for multicast use.
-
- Returns:
- A boolean, True if the address is multicast.
- See RFC 3171 for details.
-
- """
- return self in self._constants._multicast_network
-
- @property
- def is_unspecified(self):
- """Test if the address is unspecified.
-
- Returns:
- A boolean, True if this is the unspecified address as defined in
- RFC 5735 3.
-
- """
- return self == self._constants._unspecified_address
-
- @property
- def is_loopback(self):
- """Test if the address is a loopback address.
-
- Returns:
- A boolean, True if the address is a loopback per RFC 3330.
-
- """
- return self in self._constants._loopback_network
-
- @property
- def is_link_local(self):
- """Test if the address is reserved for link-local.
-
- Returns:
- A boolean, True if the address is link-local per RFC 3927.
-
- """
- return self in self._constants._linklocal_network
-
-
-class IPv4Interface(IPv4Address):
-
- def __init__(self, address):
- if isinstance(address, (bytes, _compat_int_types)):
- IPv4Address.__init__(self, address)
- self.network = IPv4Network(self._ip)
- self._prefixlen = self._max_prefixlen
- return
-
- if isinstance(address, tuple):
- IPv4Address.__init__(self, address[0])
- if len(address) > 1:
- self._prefixlen = int(address[1])
- else:
- self._prefixlen = self._max_prefixlen
-
- self.network = IPv4Network(address, strict=False)
- self.netmask = self.network.netmask
- self.hostmask = self.network.hostmask
- return
-
- addr = _split_optional_netmask(address)
- IPv4Address.__init__(self, addr[0])
-
- self.network = IPv4Network(address, strict=False)
- self._prefixlen = self.network._prefixlen
-
- self.netmask = self.network.netmask
- self.hostmask = self.network.hostmask
-
- def __str__(self):
- return '%s/%d' % (self._string_from_ip_int(self._ip),
- self.network.prefixlen)
-
- def __eq__(self, other):
- address_equal = IPv4Address.__eq__(self, other)
- if not address_equal or address_equal is NotImplemented:
- return address_equal
- try:
- return self.network == other.network
- except AttributeError:
- # An interface with an associated network is NOT the
- # same as an unassociated address. That's why the hash
- # takes the extra info into account.
- return False
-
- def __lt__(self, other):
- address_less = IPv4Address.__lt__(self, other)
- if address_less is NotImplemented:
- return NotImplemented
- try:
- return self.network < other.network
- except AttributeError:
- # We *do* allow addresses and interfaces to be sorted. The
- # unassociated address is considered less than all interfaces.
- return False
-
- def __hash__(self):
- return self._ip ^ self._prefixlen ^ int(self.network.network_address)
-
- __reduce__ = _IPAddressBase.__reduce__
-
- @property
- def ip(self):
- return IPv4Address(self._ip)
-
- @property
- def with_prefixlen(self):
- return '%s/%s' % (self._string_from_ip_int(self._ip),
- self._prefixlen)
-
- @property
- def with_netmask(self):
- return '%s/%s' % (self._string_from_ip_int(self._ip),
- self.netmask)
-
- @property
- def with_hostmask(self):
- return '%s/%s' % (self._string_from_ip_int(self._ip),
- self.hostmask)
-
-
-class IPv4Network(_BaseV4, _BaseNetwork):
-
- """This class represents and manipulates 32-bit IPv4 network + addresses..
-
- Attributes: [examples for IPv4Network('192.0.2.0/27')]
- .network_address: IPv4Address('192.0.2.0')
- .hostmask: IPv4Address('0.0.0.31')
- .broadcast_address: IPv4Address('192.0.2.32')
- .netmask: IPv4Address('255.255.255.224')
- .prefixlen: 27
-
- """
- # Class to use when creating address objects
- _address_class = IPv4Address
-
- def __init__(self, address, strict=True):
-
- """Instantiate a new IPv4 network object.
-
- Args:
- address: A string or integer representing the IP [& network].
- '192.0.2.0/24'
- '192.0.2.0/255.255.255.0'
- '192.0.0.2/0.0.0.255'
- are all functionally the same in IPv4. Similarly,
- '192.0.2.1'
- '192.0.2.1/255.255.255.255'
- '192.0.2.1/32'
- are also functionally equivalent. That is to say, failing to
- provide a subnetmask will create an object with a mask of /32.
-
- If the mask (portion after the / in the argument) is given in
- dotted quad form, it is treated as a netmask if it starts with a
- non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
- starts with a zero field (e.g. 0.255.255.255 == /8), with the
- single exception of an all-zero mask which is treated as a
- netmask == /0. If no mask is given, a default of /32 is used.
-
- Additionally, an integer can be passed, so
- IPv4Network('192.0.2.1') == IPv4Network(3221225985)
- or, more generally
- IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
- IPv4Interface('192.0.2.1')
-
- Raises:
- AddressValueError: If ipaddress isn't a valid IPv4 address.
- NetmaskValueError: If the netmask isn't valid for
- an IPv4 address.
- ValueError: If strict is True and a network address is not
- supplied.
-
- """
- _BaseNetwork.__init__(self, address)
-
- # Constructing from a packed address or integer
- if isinstance(address, (_compat_int_types, bytes)):
- self.network_address = IPv4Address(address)
- self.netmask, self._prefixlen = self._make_netmask(
- self._max_prefixlen)
- # fixme: address/network test here.
- return
-
- if isinstance(address, tuple):
- if len(address) > 1:
- arg = address[1]
- else:
- # We weren't given an address[1]
- arg = self._max_prefixlen
- self.network_address = IPv4Address(address[0])
- self.netmask, self._prefixlen = self._make_netmask(arg)
- packed = int(self.network_address)
- if packed & int(self.netmask) != packed:
- if strict:
- raise ValueError('%s has host bits set' % self)
- else:
- self.network_address = IPv4Address(packed &
- int(self.netmask))
- return
-
- # Assume input argument to be string or any object representation
- # which converts into a formatted IP prefix string.
- addr = _split_optional_netmask(address)
- self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
-
- if len(addr) == 2:
- arg = addr[1]
- else:
- arg = self._max_prefixlen
- self.netmask, self._prefixlen = self._make_netmask(arg)
-
- if strict:
- if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
- self.network_address):
- raise ValueError('%s has host bits set' % self)
- self.network_address = IPv4Address(int(self.network_address) &
- int(self.netmask))
-
- if self._prefixlen == (self._max_prefixlen - 1):
- self.hosts = self.__iter__
-
- @property
- def is_global(self):
- """Test if this address is allocated for public networks.
-
- Returns:
- A boolean, True if the address is not reserved per
- iana-ipv4-special-registry.
-
- """
- return (not (self.network_address in IPv4Network('100.64.0.0/10') and
- self.broadcast_address in IPv4Network('100.64.0.0/10')) and
- not self.is_private)
-
-
-class _IPv4Constants(object):
-
- _linklocal_network = IPv4Network('169.254.0.0/16')
-
- _loopback_network = IPv4Network('127.0.0.0/8')
-
- _multicast_network = IPv4Network('224.0.0.0/4')
-
- _private_networks = [
- IPv4Network('0.0.0.0/8'),
- IPv4Network('10.0.0.0/8'),
- IPv4Network('127.0.0.0/8'),
- IPv4Network('169.254.0.0/16'),
- IPv4Network('172.16.0.0/12'),
- IPv4Network('192.0.0.0/29'),
- IPv4Network('192.0.0.170/31'),
- IPv4Network('192.0.2.0/24'),
- IPv4Network('192.168.0.0/16'),
- IPv4Network('198.18.0.0/15'),
- IPv4Network('198.51.100.0/24'),
- IPv4Network('203.0.113.0/24'),
- IPv4Network('240.0.0.0/4'),
- IPv4Network('255.255.255.255/32'),
- ]
-
- _reserved_network = IPv4Network('240.0.0.0/4')
-
- _unspecified_address = IPv4Address('0.0.0.0')
-
-
-IPv4Address._constants = _IPv4Constants
-
-
-class _BaseV6(object):
-
- """Base IPv6 object.
-
- The following methods are used by IPv6 objects in both single IP
- addresses and networks.
-
- """
-
- __slots__ = ()
- _version = 6
- _ALL_ONES = (2 ** IPV6LENGTH) - 1
- _HEXTET_COUNT = 8
- _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
- _max_prefixlen = IPV6LENGTH
-
- # There are only a bunch of valid v6 netmasks, so we cache them all
- # when constructed (see _make_netmask()).
- _netmask_cache = {}
-
- @classmethod
- def _make_netmask(cls, arg):
- """Make a (netmask, prefix_len) tuple from the given argument.
-
- Argument can be:
- - an integer (the prefix length)
- - a string representing the prefix length (e.g. "24")
- - a string representing the prefix netmask (e.g. "255.255.255.0")
- """
- if arg not in cls._netmask_cache:
- if isinstance(arg, _compat_int_types):
- prefixlen = arg
- else:
- prefixlen = cls._prefix_from_prefix_string(arg)
- netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
- cls._netmask_cache[arg] = netmask, prefixlen
- return cls._netmask_cache[arg]
-
- @classmethod
- def _ip_int_from_string(cls, ip_str):
- """Turn an IPv6 ip_str into an integer.
-
- Args:
- ip_str: A string, the IPv6 ip_str.
-
- Returns:
- An int, the IPv6 address
-
- Raises:
- AddressValueError: if ip_str isn't a valid IPv6 Address.
-
- """
- if not ip_str:
- raise AddressValueError('Address cannot be empty')
-
- parts = ip_str.split(':')
-
- # An IPv6 address needs at least 2 colons (3 parts).
- _min_parts = 3
- if len(parts) < _min_parts:
- msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
- raise AddressValueError(msg)
-
- # If the address has an IPv4-style suffix, convert it to hexadecimal.
- if '.' in parts[-1]:
- try:
- ipv4_int = IPv4Address(parts.pop())._ip
- except AddressValueError as exc:
- raise AddressValueError("%s in %r" % (exc, ip_str))
- parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
- parts.append('%x' % (ipv4_int & 0xFFFF))
-
- # An IPv6 address can't have more than 8 colons (9 parts).
- # The extra colon comes from using the "::" notation for a single
- # leading or trailing zero part.
- _max_parts = cls._HEXTET_COUNT + 1
- if len(parts) > _max_parts:
- msg = "At most %d colons permitted in %r" % (
- _max_parts - 1, ip_str)
- raise AddressValueError(msg)
-
- # Disregarding the endpoints, find '::' with nothing in between.
- # This indicates that a run of zeroes has been skipped.
- skip_index = None
- for i in _compat_range(1, len(parts) - 1):
- if not parts[i]:
- if skip_index is not None:
- # Can't have more than one '::'
- msg = "At most one '::' permitted in %r" % ip_str
- raise AddressValueError(msg)
- skip_index = i
-
- # parts_hi is the number of parts to copy from above/before the '::'
- # parts_lo is the number of parts to copy from below/after the '::'
- if skip_index is not None:
- # If we found a '::', then check if it also covers the endpoints.
- parts_hi = skip_index
- parts_lo = len(parts) - skip_index - 1
- if not parts[0]:
- parts_hi -= 1
- if parts_hi:
- msg = "Leading ':' only permitted as part of '::' in %r"
- raise AddressValueError(msg % ip_str) # ^: requires ^::
- if not parts[-1]:
- parts_lo -= 1
- if parts_lo:
- msg = "Trailing ':' only permitted as part of '::' in %r"
- raise AddressValueError(msg % ip_str) # :$ requires ::$
- parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
- if parts_skipped < 1:
- msg = "Expected at most %d other parts with '::' in %r"
- raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
- else:
- # Otherwise, allocate the entire address to parts_hi. The
- # endpoints could still be empty, but _parse_hextet() will check
- # for that.
- if len(parts) != cls._HEXTET_COUNT:
- msg = "Exactly %d parts expected without '::' in %r"
- raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
- if not parts[0]:
- msg = "Leading ':' only permitted as part of '::' in %r"
- raise AddressValueError(msg % ip_str) # ^: requires ^::
- if not parts[-1]:
- msg = "Trailing ':' only permitted as part of '::' in %r"
- raise AddressValueError(msg % ip_str) # :$ requires ::$
- parts_hi = len(parts)
- parts_lo = 0
- parts_skipped = 0
-
- try:
- # Now, parse the hextets into a 128-bit integer.
- ip_int = 0
- for i in range(parts_hi):
- ip_int <<= 16
- ip_int |= cls._parse_hextet(parts[i])
- ip_int <<= 16 * parts_skipped
- for i in range(-parts_lo, 0):
- ip_int <<= 16
- ip_int |= cls._parse_hextet(parts[i])
- return ip_int
- except ValueError as exc:
- raise AddressValueError("%s in %r" % (exc, ip_str))
-
- @classmethod
- def _parse_hextet(cls, hextet_str):
- """Convert an IPv6 hextet string into an integer.
-
- Args:
- hextet_str: A string, the number to parse.
-
- Returns:
- The hextet as an integer.
-
- Raises:
- ValueError: if the input isn't strictly a hex number from
- [0..FFFF].
-
- """
- # Whitelist the characters, since int() allows a lot of bizarre stuff.
- if not cls._HEX_DIGITS.issuperset(hextet_str):
- raise ValueError("Only hex digits permitted in %r" % hextet_str)
- # We do the length check second, since the invalid character error
- # is likely to be more informative for the user
- if len(hextet_str) > 4:
- msg = "At most 4 characters permitted in %r"
- raise ValueError(msg % hextet_str)
- # Length check means we can skip checking the integer value
- return int(hextet_str, 16)
-
- @classmethod
- def _compress_hextets(cls, hextets):
- """Compresses a list of hextets.
-
- Compresses a list of strings, replacing the longest continuous
- sequence of "0" in the list with "" and adding empty strings at
- the beginning or at the end of the string such that subsequently
- calling ":".join(hextets) will produce the compressed version of
- the IPv6 address.
-
- Args:
- hextets: A list of strings, the hextets to compress.
-
- Returns:
- A list of strings.
-
- """
- best_doublecolon_start = -1
- best_doublecolon_len = 0
- doublecolon_start = -1
- doublecolon_len = 0
- for index, hextet in enumerate(hextets):
- if hextet == '0':
- doublecolon_len += 1
- if doublecolon_start == -1:
- # Start of a sequence of zeros.
- doublecolon_start = index
- if doublecolon_len > best_doublecolon_len:
- # This is the longest sequence of zeros so far.
- best_doublecolon_len = doublecolon_len
- best_doublecolon_start = doublecolon_start
- else:
- doublecolon_len = 0
- doublecolon_start = -1
-
- if best_doublecolon_len > 1:
- best_doublecolon_end = (best_doublecolon_start +
- best_doublecolon_len)
- # For zeros at the end of the address.
- if best_doublecolon_end == len(hextets):
- hextets += ['']
- hextets[best_doublecolon_start:best_doublecolon_end] = ['']
- # For zeros at the beginning of the address.
- if best_doublecolon_start == 0:
- hextets = [''] + hextets
-
- return hextets
-
- @classmethod
- def _string_from_ip_int(cls, ip_int=None):
- """Turns a 128-bit integer into hexadecimal notation.
-
- Args:
- ip_int: An integer, the IP address.
-
- Returns:
- A string, the hexadecimal representation of the address.
-
- Raises:
- ValueError: The address is bigger than 128 bits of all ones.
-
- """
- if ip_int is None:
- ip_int = int(cls._ip)
-
- if ip_int > cls._ALL_ONES:
- raise ValueError('IPv6 address is too large')
-
- hex_str = '%032x' % ip_int
- hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
-
- hextets = cls._compress_hextets(hextets)
- return ':'.join(hextets)
-
- def _explode_shorthand_ip_string(self):
- """Expand a shortened IPv6 address.
-
- Args:
- ip_str: A string, the IPv6 address.
-
- Returns:
- A string, the expanded IPv6 address.
-
- """
- if isinstance(self, IPv6Network):
- ip_str = _compat_str(self.network_address)
- elif isinstance(self, IPv6Interface):
- ip_str = _compat_str(self.ip)
- else:
- ip_str = _compat_str(self)
-
- ip_int = self._ip_int_from_string(ip_str)
- hex_str = '%032x' % ip_int
- parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
- if isinstance(self, (_BaseNetwork, IPv6Interface)):
- return '%s/%d' % (':'.join(parts), self._prefixlen)
- return ':'.join(parts)
-
- def _reverse_pointer(self):
- """Return the reverse DNS pointer name for the IPv6 address.
-
- This implements the method described in RFC3596 2.5.
-
- """
- reverse_chars = self.exploded[::-1].replace(':', '')
- return '.'.join(reverse_chars) + '.ip6.arpa'
-
- @property
- def max_prefixlen(self):
- return self._max_prefixlen
-
- @property
- def version(self):
- return self._version
-
-
-class IPv6Address(_BaseV6, _BaseAddress):
-
- """Represent and manipulate single IPv6 Addresses."""
-
- __slots__ = ('_ip', '__weakref__')
-
- def __init__(self, address):
- """Instantiate a new IPv6 address object.
-
- Args:
- address: A string or integer representing the IP
-
- Additionally, an integer can be passed, so
- IPv6Address('2001:db8::') ==
- IPv6Address(42540766411282592856903984951653826560)
- or, more generally
- IPv6Address(int(IPv6Address('2001:db8::'))) ==
- IPv6Address('2001:db8::')
-
- Raises:
- AddressValueError: If address isn't a valid IPv6 address.
-
- """
- # Efficient constructor from integer.
- if isinstance(address, _compat_int_types):
- self._check_int_address(address)
- self._ip = address
- return
-
- # Constructing from a packed address
- if isinstance(address, bytes):
- self._check_packed_address(address, 16)
- bvs = _compat_bytes_to_byte_vals(address)
- self._ip = _compat_int_from_byte_vals(bvs, 'big')
- return
-
- # Assume input argument to be string or any object representation
- # which converts into a formatted IP string.
- addr_str = _compat_str(address)
- if '/' in addr_str:
- raise AddressValueError("Unexpected '/' in %r" % address)
- self._ip = self._ip_int_from_string(addr_str)
-
- @property
- def packed(self):
- """The binary representation of this address."""
- return v6_int_to_packed(self._ip)
-
- @property
- def is_multicast(self):
- """Test if the address is reserved for multicast use.
-
- Returns:
- A boolean, True if the address is a multicast address.
- See RFC 2373 2.7 for details.
-
- """
- return self in self._constants._multicast_network
-
- @property
- def is_reserved(self):
- """Test if the address is otherwise IETF reserved.
-
- Returns:
- A boolean, True if the address is within one of the
- reserved IPv6 Network ranges.
-
- """
- return any(self in x for x in self._constants._reserved_networks)
-
- @property
- def is_link_local(self):
- """Test if the address is reserved for link-local.
-
- Returns:
- A boolean, True if the address is reserved per RFC 4291.
-
- """
- return self in self._constants._linklocal_network
-
- @property
- def is_site_local(self):
- """Test if the address is reserved for site-local.
-
- Note that the site-local address space has been deprecated by RFC 3879.
- Use is_private to test if this address is in the space of unique local
- addresses as defined by RFC 4193.
-
- Returns:
- A boolean, True if the address is reserved per RFC 3513 2.5.6.
-
- """
- return self in self._constants._sitelocal_network
-
- @property
- def is_private(self):
- """Test if this address is allocated for private networks.
-
- Returns:
- A boolean, True if the address is reserved per
- iana-ipv6-special-registry.
-
- """
- return any(self in net for net in self._constants._private_networks)
-
- @property
- def is_global(self):
- """Test if this address is allocated for public networks.
-
- Returns:
- A boolean, true if the address is not reserved per
- iana-ipv6-special-registry.
-
- """
- return not self.is_private
-
- @property
- def is_unspecified(self):
- """Test if the address is unspecified.
-
- Returns:
- A boolean, True if this is the unspecified address as defined in
- RFC 2373 2.5.2.
-
- """
- return self._ip == 0
-
- @property
- def is_loopback(self):
- """Test if the address is a loopback address.
-
- Returns:
- A boolean, True if the address is a loopback address as defined in
- RFC 2373 2.5.3.
-
- """
- return self._ip == 1
-
- @property
- def ipv4_mapped(self):
- """Return the IPv4 mapped address.
-
- Returns:
- If the IPv6 address is a v4 mapped address, return the
- IPv4 mapped address. Return None otherwise.
-
- """
- if (self._ip >> 32) != 0xFFFF:
- return None
- return IPv4Address(self._ip & 0xFFFFFFFF)
-
- @property
- def teredo(self):
- """Tuple of embedded teredo IPs.
-
- Returns:
- Tuple of the (server, client) IPs or None if the address
- doesn't appear to be a teredo address (doesn't start with
- 2001::/32)
-
- """
- if (self._ip >> 96) != 0x20010000:
- return None
- return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
- IPv4Address(~self._ip & 0xFFFFFFFF))
-
- @property
- def sixtofour(self):
- """Return the IPv4 6to4 embedded address.
-
- Returns:
- The IPv4 6to4-embedded address if present or None if the
- address doesn't appear to contain a 6to4 embedded address.
-
- """
- if (self._ip >> 112) != 0x2002:
- return None
- return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
-
-
-class IPv6Interface(IPv6Address):
-
- def __init__(self, address):
- if isinstance(address, (bytes, _compat_int_types)):
- IPv6Address.__init__(self, address)
- self.network = IPv6Network(self._ip)
- self._prefixlen = self._max_prefixlen
- return
- if isinstance(address, tuple):
- IPv6Address.__init__(self, address[0])
- if len(address) > 1:
- self._prefixlen = int(address[1])
- else:
- self._prefixlen = self._max_prefixlen
- self.network = IPv6Network(address, strict=False)
- self.netmask = self.network.netmask
- self.hostmask = self.network.hostmask
- return
-
- addr = _split_optional_netmask(address)
- IPv6Address.__init__(self, addr[0])
- self.network = IPv6Network(address, strict=False)
- self.netmask = self.network.netmask
- self._prefixlen = self.network._prefixlen
- self.hostmask = self.network.hostmask
-
- def __str__(self):
- return '%s/%d' % (self._string_from_ip_int(self._ip),
- self.network.prefixlen)
-
- def __eq__(self, other):
- address_equal = IPv6Address.__eq__(self, other)
- if not address_equal or address_equal is NotImplemented:
- return address_equal
- try:
- return self.network == other.network
- except AttributeError:
- # An interface with an associated network is NOT the
- # same as an unassociated address. That's why the hash
- # takes the extra info into account.
- return False
-
- def __lt__(self, other):
- address_less = IPv6Address.__lt__(self, other)
- if address_less is NotImplemented:
- return NotImplemented
- try:
- return self.network < other.network
- except AttributeError:
- # We *do* allow addresses and interfaces to be sorted. The
- # unassociated address is considered less than all interfaces.
- return False
-
- def __hash__(self):
- return self._ip ^ self._prefixlen ^ int(self.network.network_address)
-
- __reduce__ = _IPAddressBase.__reduce__
-
- @property
- def ip(self):
- return IPv6Address(self._ip)
-
- @property
- def with_prefixlen(self):
- return '%s/%s' % (self._string_from_ip_int(self._ip),
- self._prefixlen)
-
- @property
- def with_netmask(self):
- return '%s/%s' % (self._string_from_ip_int(self._ip),
- self.netmask)
-
- @property
- def with_hostmask(self):
- return '%s/%s' % (self._string_from_ip_int(self._ip),
- self.hostmask)
-
- @property
- def is_unspecified(self):
- return self._ip == 0 and self.network.is_unspecified
-
- @property
- def is_loopback(self):
- return self._ip == 1 and self.network.is_loopback
-
-
-class IPv6Network(_BaseV6, _BaseNetwork):
-
- """This class represents and manipulates 128-bit IPv6 networks.
-
- Attributes: [examples for IPv6('2001:db8::1000/124')]
- .network_address: IPv6Address('2001:db8::1000')
- .hostmask: IPv6Address('::f')
- .broadcast_address: IPv6Address('2001:db8::100f')
- .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
- .prefixlen: 124
-
- """
-
- # Class to use when creating address objects
- _address_class = IPv6Address
-
- def __init__(self, address, strict=True):
- """Instantiate a new IPv6 Network object.
-
- Args:
- address: A string or integer representing the IPv6 network or the
- IP and prefix/netmask.
- '2001:db8::/128'
- '2001:db8:0000:0000:0000:0000:0000:0000/128'
- '2001:db8::'
- are all functionally the same in IPv6. That is to say,
- failing to provide a subnetmask will create an object with
- a mask of /128.
-
- Additionally, an integer can be passed, so
- IPv6Network('2001:db8::') ==
- IPv6Network(42540766411282592856903984951653826560)
- or, more generally
- IPv6Network(int(IPv6Network('2001:db8::'))) ==
- IPv6Network('2001:db8::')
-
- strict: A boolean. If true, ensure that we have been passed
- A true network address, eg, 2001:db8::1000/124 and not an
- IP address on a network, eg, 2001:db8::1/124.
-
- Raises:
- AddressValueError: If address isn't a valid IPv6 address.
- NetmaskValueError: If the netmask isn't valid for
- an IPv6 address.
- ValueError: If strict was True and a network address was not
- supplied.
-
- """
- _BaseNetwork.__init__(self, address)
-
- # Efficient constructor from integer or packed address
- if isinstance(address, (bytes, _compat_int_types)):
- self.network_address = IPv6Address(address)
- self.netmask, self._prefixlen = self._make_netmask(
- self._max_prefixlen)
- return
-
- if isinstance(address, tuple):
- if len(address) > 1:
- arg = address[1]
- else:
- arg = self._max_prefixlen
- self.netmask, self._prefixlen = self._make_netmask(arg)
- self.network_address = IPv6Address(address[0])
- packed = int(self.network_address)
- if packed & int(self.netmask) != packed:
- if strict:
- raise ValueError('%s has host bits set' % self)
- else:
- self.network_address = IPv6Address(packed &
- int(self.netmask))
- return
-
- # Assume input argument to be string or any object representation
- # which converts into a formatted IP prefix string.
- addr = _split_optional_netmask(address)
-
- self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
-
- if len(addr) == 2:
- arg = addr[1]
- else:
- arg = self._max_prefixlen
- self.netmask, self._prefixlen = self._make_netmask(arg)
-
- if strict:
- if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
- self.network_address):
- raise ValueError('%s has host bits set' % self)
- self.network_address = IPv6Address(int(self.network_address) &
- int(self.netmask))
-
- if self._prefixlen == (self._max_prefixlen - 1):
- self.hosts = self.__iter__
-
- def hosts(self):
- """Generate Iterator over usable hosts in a network.
-
- This is like __iter__ except it doesn't return the
- Subnet-Router anycast address.
-
- """
- network = int(self.network_address)
- broadcast = int(self.broadcast_address)
- for x in _compat_range(network + 1, broadcast + 1):
- yield self._address_class(x)
-
- @property
- def is_site_local(self):
- """Test if the address is reserved for site-local.
-
- Note that the site-local address space has been deprecated by RFC 3879.
- Use is_private to test if this address is in the space of unique local
- addresses as defined by RFC 4193.
-
- Returns:
- A boolean, True if the address is reserved per RFC 3513 2.5.6.
-
- """
- return (self.network_address.is_site_local and
- self.broadcast_address.is_site_local)
-
-
-class _IPv6Constants(object):
-
- _linklocal_network = IPv6Network('fe80::/10')
-
- _multicast_network = IPv6Network('ff00::/8')
-
- _private_networks = [
- IPv6Network('::1/128'),
- IPv6Network('::/128'),
- IPv6Network('::ffff:0:0/96'),
- IPv6Network('100::/64'),
- IPv6Network('2001::/23'),
- IPv6Network('2001:2::/48'),
- IPv6Network('2001:db8::/32'),
- IPv6Network('2001:10::/28'),
- IPv6Network('fc00::/7'),
- IPv6Network('fe80::/10'),
- ]
-
- _reserved_networks = [
- IPv6Network('::/8'), IPv6Network('100::/8'),
- IPv6Network('200::/7'), IPv6Network('400::/6'),
- IPv6Network('800::/5'), IPv6Network('1000::/4'),
- IPv6Network('4000::/3'), IPv6Network('6000::/3'),
- IPv6Network('8000::/3'), IPv6Network('A000::/3'),
- IPv6Network('C000::/3'), IPv6Network('E000::/4'),
- IPv6Network('F000::/5'), IPv6Network('F800::/6'),
- IPv6Network('FE00::/9'),
- ]
-
- _sitelocal_network = IPv6Network('fec0::/10')
-
-
-IPv6Address._constants = _IPv6Constants
diff --git a/lib/ipwhois/__init__.py b/lib/ipwhois/__init__.py
new file mode 100644
index 00000000..59983e20
--- /dev/null
+++ b/lib/ipwhois/__init__.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2013, 2014, 2015, 2016 Philip Hane
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+__version__ = '0.13.0'
+
+from .exceptions import *
+from .net import Net
+from .ipwhois import IPWhois
diff --git a/lib/ipwhois/data/iso_3166-1.csv b/lib/ipwhois/data/iso_3166-1.csv
new file mode 100644
index 00000000..47a52f90
--- /dev/null
+++ b/lib/ipwhois/data/iso_3166-1.csv
@@ -0,0 +1,252 @@
+AD,Andorra,
+AE,United Arab Emirates,
+AF,Afghanistan,
+AG,Antigua and Barbuda,
+AI,Anguilla,
+AL,Albania,
+AM,Armenia,
+AN,Netherlands Antilles,
+AO,Angola,
+AP,"Asia/Pacific Region",
+AQ,Antarctica,
+AR,Argentina,
+AS,American Samoa,
+AT,Austria,
+AU,Australia,
+AW,Aruba,
+AX,Aland Islands,
+AZ,Azerbaijan,
+BA,Bosnia and Herzegovina,
+BB,Barbados,
+BD,Bangladesh,
+BE,Belgium,
+BF,Burkina Faso,
+BG,Bulgaria,
+BH,Bahrain,
+BI,Burundi,
+BJ,Benin,
+BL,Saint Bartelemey,
+BM,Bermuda,
+BN,Brunei Darussalam,
+BO,Bolivia,
+BQ,"Bonaire, Saint Eustatius and Saba",
+BR,Brazil,
+BS,Bahamas,
+BT,Bhutan,
+BV,Bouvet Island,
+BW,Botswana,
+BY,Belarus,
+BZ,Belize,
+CA,Canada,
+CC,Cocos (Keeling) Islands,
+CD,"Congo, The Democratic Republic of the",
+CF,Central African Republic,
+CG,Congo,
+CH,Switzerland,
+CI,Cote d'Ivoire,
+CK,Cook Islands,
+CL,Chile,
+CM,Cameroon,
+CN,China,
+CO,Colombia,
+CR,Costa Rica,
+CU,Cuba,
+CV,Cape Verde,
+CW,Curacao,
+CX,Christmas Island,
+CY,Cyprus,
+CZ,Czech Republic,
+DE,Germany,
+DJ,Djibouti,
+DK,Denmark,
+DM,Dominica,
+DO,Dominican Republic,
+DZ,Algeria,
+EC,Ecuador,
+EE,Estonia,
+EG,Egypt,
+EH,Western Sahara,
+ER,Eritrea,
+ES,Spain,
+ET,Ethiopia,
+EU,Europe,
+FI,Finland,
+FJ,Fiji,
+FK,Falkland Islands (Malvinas),
+FM,"Micronesia, Federated States of",
+FO,Faroe Islands,
+FR,France,
+GA,Gabon,
+GB,United Kingdom,
+GD,Grenada,
+GE,Georgia,
+GF,French Guiana,
+GG,Guernsey,
+GH,Ghana,
+GI,Gibraltar,
+GL,Greenland,
+GM,Gambia,
+GN,Guinea,
+GP,Guadeloupe,
+GQ,Equatorial Guinea,
+GR,Greece,
+GS,South Georgia and the South Sandwich Islands,
+GT,Guatemala,
+GU,Guam,
+GW,Guinea-Bissau,
+GY,Guyana,
+HK,Hong Kong,
+HM,Heard Island and McDonald Islands,
+HN,Honduras,
+HR,Croatia,
+HT,Haiti,
+HU,Hungary,
+ID,Indonesia,
+IE,Ireland,
+IL,Israel,
+IM,Isle of Man,
+IN,India,
+IO,British Indian Ocean Territory,
+IQ,Iraq,
+IR,"Iran, Islamic Republic of",
+IS,Iceland,
+IT,Italy,
+JE,Jersey,
+JM,Jamaica,
+JO,Jordan,
+JP,Japan,
+KE,Kenya,
+KG,Kyrgyzstan,
+KH,Cambodia,
+KI,Kiribati,
+KM,Comoros,
+KN,Saint Kitts and Nevis,
+KP,"Korea, Democratic People's Republic of",
+KR,"Korea, Republic of",
+KW,Kuwait,
+KY,Cayman Islands,
+KZ,Kazakhstan,
+LA,Lao People's Democratic Republic,
+LB,Lebanon,
+LC,Saint Lucia,
+LI,Liechtenstein,
+LK,Sri Lanka,
+LR,Liberia,
+LS,Lesotho,
+LT,Lithuania,
+LU,Luxembourg,
+LV,Latvia,
+LY,Libyan Arab Jamahiriya,
+MA,Morocco,
+MC,Monaco,
+MD,"Moldova, Republic of",
+ME,Montenegro,
+MF,Saint Martin,
+MG,Madagascar,
+MH,Marshall Islands,
+MK,Macedonia,
+ML,Mali,
+MM,Myanmar,
+MN,Mongolia,
+MO,Macao,
+MP,Northern Mariana Islands,
+MQ,Martinique,
+MR,Mauritania,
+MS,Montserrat,
+MT,Malta,
+MU,Mauritius,
+MV,Maldives,
+MW,Malawi,
+MX,Mexico,
+MY,Malaysia,
+MZ,Mozambique,
+NA,Namibia,
+NC,New Caledonia,
+NE,Niger,
+NF,Norfolk Island,
+NG,Nigeria,
+NI,Nicaragua,
+NL,Netherlands,
+NO,Norway,
+NP,Nepal,
+NR,Nauru,
+NU,Niue,
+NZ,New Zealand,
+OM,Oman,
+PA,Panama,
+PE,Peru,
+PF,French Polynesia,
+PG,Papua New Guinea,
+PH,Philippines,
+PK,Pakistan,
+PL,Poland,
+PM,Saint Pierre and Miquelon,
+PN,Pitcairn,
+PR,Puerto Rico,
+PS,Palestinian Territory,
+PT,Portugal,
+PW,Palau,
+PY,Paraguay,
+QA,Qatar,
+RE,Reunion,
+RO,Romania,
+RS,Serbia,
+RU,Russian Federation,
+RW,Rwanda,
+SA,Saudi Arabia,
+SB,Solomon Islands,
+SC,Seychelles,
+SD,Sudan,
+SE,Sweden,
+SG,Singapore,
+SH,Saint Helena,
+SI,Slovenia,
+SJ,Svalbard and Jan Mayen,
+SK,Slovakia,
+SL,Sierra Leone,
+SM,San Marino,
+SN,Senegal,
+SO,Somalia,
+SR,Suriname,
+SS,South Sudan,
+ST,Sao Tome and Principe,
+SV,El Salvador,
+SX,Sint Maarten,
+SY,Syrian Arab Republic,
+SZ,Swaziland,
+TC,Turks and Caicos Islands,
+TD,Chad,
+TF,French Southern Territories,
+TG,Togo,
+TH,Thailand,
+TJ,Tajikistan,
+TK,Tokelau,
+TL,Timor-Leste,
+TM,Turkmenistan,
+TN,Tunisia,
+TO,Tonga,
+TR,Turkey,
+TT,Trinidad and Tobago,
+TV,Tuvalu,
+TW,Taiwan,
+TZ,"Tanzania, United Republic of",
+UA,Ukraine,
+UG,Uganda,
+UM,United States Minor Outlying Islands,
+US,United States,
+UY,Uruguay,
+UZ,Uzbekistan,
+VA,Holy See (Vatican City State),
+VC,Saint Vincent and the Grenadines,
+VE,Venezuela,
+VG,"Virgin Islands, British",
+VI,"Virgin Islands, U.S.",
+VN,Vietnam,
+VU,Vanuatu,
+WF,Wallis and Futuna,
+WS,Samoa,
+YE,Yemen,
+YT,Mayotte,
+ZA,South Africa,
+ZM,Zambia,
+ZW,Zimbabwe,
\ No newline at end of file
diff --git a/lib/ipwhois/data/iso_3166-1_list_en.xml b/lib/ipwhois/data/iso_3166-1_list_en.xml
new file mode 100644
index 00000000..274171b1
--- /dev/null
+++ b/lib/ipwhois/data/iso_3166-1_list_en.xml
@@ -0,0 +1,1003 @@
+
+
+
+ AFGHANISTAN
+ AF
+
+
+ ALAND ISLANDS
+ AX
+
+
+ ALBANIA
+ AL
+
+
+ ALGERIA
+ DZ
+
+
+ AMERICAN SAMOA
+ AS
+
+
+ ANDORRA
+ AD
+
+
+ ANGOLA
+ AO
+
+
+ ANGUILLA
+ AI
+
+
+ ANTARCTICA
+ AQ
+
+
+ ANTIGUA AND BARBUDA
+ AG
+
+
+ NETHERLANDS ANTILLES
+ AN
+
+
+ ARGENTINA
+ AR
+
+
+ ARMENIA
+ AM
+
+
+ ARUBA
+ AW
+
+
+ AUSTRALIA
+ AU
+
+
+ AUSTRIA
+ AT
+
+
+ AZERBAIJAN
+ AZ
+
+
+ BAHAMAS
+ BS
+
+
+ BAHRAIN
+ BH
+
+
+ BANGLADESH
+ BD
+
+
+ BARBADOS
+ BB
+
+
+ BELARUS
+ BY
+
+
+ BELGIUM
+ BE
+
+
+ BELIZE
+ BZ
+
+
+ BENIN
+ BJ
+
+
+ BERMUDA
+ BM
+
+
+ BHUTAN
+ BT
+
+
+ BOLIVIA, PLURINATIONAL STATE OF
+ BO
+
+
+ BONAIRE, SINT EUSTATIUS AND SABA
+ BQ
+
+
+ BOSNIA AND HERZEGOVINA
+ BA
+
+
+ BOTSWANA
+ BW
+
+
+ BOUVET ISLAND
+ BV
+
+
+ BRAZIL
+ BR
+
+
+ BRITISH INDIAN OCEAN TERRITORY
+ IO
+
+
+ BRUNEI DARUSSALAM
+ BN
+
+
+ BULGARIA
+ BG
+
+
+ BURKINA FASO
+ BF
+
+
+ BURUNDI
+ BI
+
+
+ CAMBODIA
+ KH
+
+
+ CAMEROON
+ CM
+
+
+ CANADA
+ CA
+
+
+ CAPE VERDE
+ CV
+
+
+ CAYMAN ISLANDS
+ KY
+
+
+ CENTRAL AFRICAN REPUBLIC
+ CF
+
+
+ CHAD
+ TD
+
+
+ CHILE
+ CL
+
+
+ CHINA
+ CN
+
+
+ CHRISTMAS ISLAND
+ CX
+
+
+ COCOS (KEELING) ISLANDS
+ CC
+
+
+ COLOMBIA
+ CO
+
+
+ COMOROS
+ KM
+
+
+ CONGO
+ CG
+
+
+ CONGO, THE DEMOCRATIC REPUBLIC OF THE
+ CD
+
+
+ COOK ISLANDS
+ CK
+
+
+ COSTA RICA
+ CR
+
+
+ COTE D'IVOIRE
+ CI
+
+
+ CROATIA
+ HR
+
+
+ CURACAO
+ CW
+
+
+ CUBA
+ CU
+
+
+ CYPRUS
+ CY
+
+
+ CZECH REPUBLIC
+ CZ
+
+
+ DENMARK
+ DK
+
+
+ DJIBOUTI
+ DJ
+
+
+ DOMINICA
+ DM
+
+
+ DOMINICAN REPUBLIC
+ DO
+
+
+ ECUADOR
+ EC
+
+
+ EGYPT
+ EG
+
+
+ EL SALVADOR
+ SV
+
+
+ EQUATORIAL GUINEA
+ GQ
+
+
+ ERITREA
+ ER
+
+
+ ESTONIA
+ EE
+
+
+ ETHIOPIA
+ ET
+
+
+ FALKLAND ISLANDS (MALVINAS)
+ FK
+
+
+ FAROE ISLANDS
+ FO
+
+
+ FIJI
+ FJ
+
+
+ FINLAND
+ FI
+
+
+ FRANCE
+ FR
+
+
+ FRENCH GUIANA
+ GF
+
+
+ FRENCH POLYNESIA
+ PF
+
+
+ FRENCH SOUTHERN TERRITORIES
+ TF
+
+
+ GABON
+ GA
+
+
+ GAMBIA
+ GM
+
+
+ GEORGIA
+ GE
+
+
+ GERMANY
+ DE
+
+
+ GHANA
+ GH
+
+
+ GIBRALTAR
+ GI
+
+
+ GREECE
+ GR
+
+
+ GREENLAND
+ GL
+
+
+ GRENADA
+ GD
+
+
+ GUADELOUPE
+ GP
+
+
+ GUAM
+ GU
+
+
+ GUATEMALA
+ GT
+
+
+ GUERNSEY
+ GG
+
+
+ GUINEA
+ GN
+
+
+ GUINEA-BISSAU
+ GW
+
+
+ GUYANA
+ GY
+
+
+ HAITI
+ HT
+
+
+ HEARD ISLAND AND MCDONALD ISLANDS
+ HM
+
+
+ HOLY SEE (VATICAN CITY STATE)
+ VA
+
+
+ HONDURAS
+ HN
+
+
+ HONG KONG
+ HK
+
+
+ HUNGARY
+ HU
+
+
+ ICELAND
+ IS
+
+
+ INDIA
+ IN
+
+
+ INDONESIA
+ ID
+
+
+ IRAN, ISLAMIC REPUBLIC OF
+ IR
+
+
+ IRAQ
+ IQ
+
+
+ IRELAND
+ IE
+
+
+ ISLE OF MAN
+ IM
+
+
+ ISRAEL
+ IL
+
+
+ ITALY
+ IT
+
+
+ JAMAICA
+ JM
+
+
+ JAPAN
+ JP
+
+
+ JERSEY
+ JE
+
+
+ JORDAN
+ JO
+
+
+ KAZAKHSTAN
+ KZ
+
+
+ KENYA
+ KE
+
+
+ KIRIBATI
+ KI
+
+
+ KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF
+ KP
+
+
+ KOREA, REPUBLIC OF
+ KR
+
+
+ KUWAIT
+ KW
+
+
+ KYRGYZSTAN
+ KG
+
+
+ LAO PEOPLE'S DEMOCRATIC REPUBLIC
+ LA
+
+
+ LATVIA
+ LV
+
+
+ LEBANON
+ LB
+
+
+ LESOTHO
+ LS
+
+
+ LIBERIA
+ LR
+
+
+ LIBYA
+ LY
+
+
+ LIECHTENSTEIN
+ LI
+
+
+ LITHUANIA
+ LT
+
+
+ LUXEMBOURG
+ LU
+
+
+ MACAO
+ MO
+
+
+ MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF
+ MK
+
+
+ MADAGASCAR
+ MG
+
+
+ MALAWI
+ MW
+
+
+ MALAYSIA
+ MY
+
+
+ MALDIVES
+ MV
+
+
+ MALI
+ ML
+
+
+ MALTA
+ MT
+
+
+ MARSHALL ISLANDS
+ MH
+
+
+ MARTINIQUE
+ MQ
+
+
+ MAURITANIA
+ MR
+
+
+ MAURITIUS
+ MU
+
+
+ MAYOTTE
+ YT
+
+
+ MEXICO
+ MX
+
+
+ MICRONESIA, FEDERATED STATES OF
+ FM
+
+
+ MOLDOVA, REPUBLIC OF
+ MD
+
+
+ MONACO
+ MC
+
+
+ MONGOLIA
+ MN
+
+
+ MONTENEGRO
+ ME
+
+
+ MONTSERRAT
+ MS
+
+
+ MOROCCO
+ MA
+
+
+ MOZAMBIQUE
+ MZ
+
+
+ MYANMAR
+ MM
+
+
+ NAMIBIA
+ NA
+
+
+ NAURU
+ NR
+
+
+ NEPAL
+ NP
+
+
+ NETHERLANDS
+ NL
+
+
+ NEW CALEDONIA
+ NC
+
+
+ NEW ZEALAND
+ NZ
+
+
+ NICARAGUA
+ NI
+
+
+ NIGER
+ NE
+
+
+ NIGERIA
+ NG
+
+
+ NIUE
+ NU
+
+
+ NORFOLK ISLAND
+ NF
+
+
+ NORTHERN MARIANA ISLANDS
+ MP
+
+
+ NORWAY
+ NO
+
+
+ OMAN
+ OM
+
+
+ PAKISTAN
+ PK
+
+
+ PALAU
+ PW
+
+
+ PALESTINIAN TERRITORY, OCCUPIED
+ PS
+
+
+ PANAMA
+ PA
+
+
+ PAPUA NEW GUINEA
+ PG
+
+
+ PARAGUAY
+ PY
+
+
+ PERU
+ PE
+
+
+ PHILIPPINES
+ PH
+
+
+ PITCAIRN
+ PN
+
+
+ POLAND
+ PL
+
+
+ PORTUGAL
+ PT
+
+
+ PUERTO RICO
+ PR
+
+
+ QATAR
+ QA
+
+
+ REUNION
+ RE
+
+
+ ROMANIA
+ RO
+
+
+ RUSSIAN FEDERATION
+ RU
+
+
+ RWANDA
+ RW
+
+
+ SAINT BARTHELEMY
+ BL
+
+
+ SAINT HELENA, ASCENSION AND TRISTAN DA CUNHA
+ SH
+
+
+ SAINT KITTS AND NEVIS
+ KN
+
+
+ SAINT LUCIA
+ LC
+
+
+ SAINT MARTIN (FRENCH PART)
+ MF
+
+
+ SAINT PIERRE AND MIQUELON
+ PM
+
+
+ SAINT VINCENT AND THE GRENADINES
+ VC
+
+
+ SAMOA
+ WS
+
+
+ SAN MARINO
+ SM
+
+
+ SAO TOME AND PRINCIPE
+ ST
+
+
+ SAUDI ARABIA
+ SA
+
+
+ SENEGAL
+ SN
+
+
+ SERBIA
+ RS
+
+
+ SEYCHELLES
+ SC
+
+
+ SIERRA LEONE
+ SL
+
+
+ SINGAPORE
+ SG
+
+
+ SINT MAARTEN (DUTCH PART)
+ SX
+
+
+ SLOVAKIA
+ SK
+
+
+ SLOVENIA
+ SI
+
+
+ SOLOMON ISLANDS
+ SB
+
+
+ SOMALIA
+ SO
+
+
+ SOUTH AFRICA
+ ZA
+
+
+ SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS
+ GS
+
+
+ SOUTH SUDAN
+ SS
+
+
+ SPAIN
+ ES
+
+
+ SRI LANKA
+ LK
+
+
+ SUDAN
+ SD
+
+
+ SURINAME
+ SR
+
+
+ SVALBARD AND JAN MAYEN
+ SJ
+
+
+ SWAZILAND
+ SZ
+
+
+ SWEDEN
+ SE
+
+
+ SWITZERLAND
+ CH
+
+
+ SYRIAN ARAB REPUBLIC
+ SY
+
+
+ TAIWAN, PROVINCE OF CHINA
+ TW
+
+
+ TAJIKISTAN
+ TJ
+
+
+ TANZANIA, UNITED REPUBLIC OF
+ TZ
+
+
+ THAILAND
+ TH
+
+
+ TIMOR-LESTE
+ TL
+
+
+ TOGO
+ TG
+
+
+ TOKELAU
+ TK
+
+
+ TONGA
+ TO
+
+
+ TRINIDAD AND TOBAGO
+ TT
+
+
+ TUNISIA
+ TN
+
+
+ TURKEY
+ TR
+
+
+ TURKMENISTAN
+ TM
+
+
+ TURKS AND CAICOS ISLANDS
+ TC
+
+
+ TUVALU
+ TV
+
+
+ UGANDA
+ UG
+
+
+ UKRAINE
+ UA
+
+
+ UNITED ARAB EMIRATES
+ AE
+
+
+ UNITED KINGDOM
+ GB
+
+
+ UNITED STATES
+ US
+
+
+ UNITED STATES MINOR OUTLYING ISLANDS
+ UM
+
+
+ URUGUAY
+ UY
+
+
+ UZBEKISTAN
+ UZ
+
+
+ VANUATU
+ VU
+
+
+ VENEZUELA, BOLIVARIAN REPUBLIC OF
+ VE
+
+
+ VIET NAM
+ VN
+
+
+ VIRGIN ISLANDS, BRITISH
+ VG
+
+
+ VIRGIN ISLANDS, U.S.
+ VI
+
+
+ WALLIS AND FUTUNA
+ WF
+
+
+ WESTERN SAHARA
+ EH
+
+
+ YEMEN
+ YE
+
+
+ ZAMBIA
+ ZM
+
+
+ ZIMBABWE
+ ZW
+
+
diff --git a/lib/ipwhois/exceptions.py b/lib/ipwhois/exceptions.py
new file mode 100644
index 00000000..57f4684e
--- /dev/null
+++ b/lib/ipwhois/exceptions.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2013, 2014, 2015, 2016 Philip Hane
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+
+class NetError(Exception):
+ """
+ An Exception for when a parameter provided is not an instance of
+ ipwhois.net.Net.
+ """
+
+
+class IPDefinedError(Exception):
+ """
+ An Exception for when the IP is defined (does not need to be resolved).
+ """
+
+
+class ASNLookupError(Exception):
+ """
+ An Exception for when the ASN lookup failed.
+ """
+
+
+class ASNRegistryError(Exception):
+ """
+ An Exception for when the ASN registry does not match one of the five
+ expected values (arin, ripencc, apnic, lacnic, afrinic).
+ """
+
+
+class HostLookupError(Exception):
+ """
+ An Exception for when the host lookup failed.
+ """
+
+
+class BlacklistError(Exception):
+ """
+ An Exception for when the server is in a blacklist.
+ """
+
+
+class WhoisLookupError(Exception):
+ """
+ An Exception for when the whois lookup failed.
+ """
+
+
+class HTTPLookupError(Exception):
+ """
+ An Exception for when the RDAP lookup failed.
+ """
+
+
+class HTTPRateLimitError(Exception):
+ """
+ An Exception for when HTTP queries exceed the NIC's request limit and have
+ exhausted all retries.
+ """
+
+
+class InvalidEntityContactObject(Exception):
+ """
+ An Exception for when JSON output is not an RDAP entity contact information
+ object:
+ https://tools.ietf.org/html/rfc7483#section-5.4
+ """
+
+
+class InvalidNetworkObject(Exception):
+ """
+ An Exception for when JSON output is not an RDAP network object:
+ https://tools.ietf.org/html/rfc7483#section-5.4
+ """
+
+
+class InvalidEntityObject(Exception):
+ """
+ An Exception for when JSON output is not an RDAP entity object:
+ https://tools.ietf.org/html/rfc7483#section-5.1
+ """
diff --git a/lib/ipwhois/hr.py b/lib/ipwhois/hr.py
new file mode 100644
index 00000000..30837e81
--- /dev/null
+++ b/lib/ipwhois/hr.py
@@ -0,0 +1,355 @@
+# Copyright (c) 2013, 2014, 2015, 2016 Philip Hane
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# TODO: Add '_links' for RFC/other references
+
+HR_ASN = {
+ 'asn': {
+ '_short': 'ASN',
+ '_name': 'Autonomous System Number',
+ '_description': 'Globally unique identifier used for routing '
+ 'information exchange with Autonomous Systems.'
+ },
+ 'asn_cidr': {
+ '_short': 'ASN CIDR Block',
+ '_name': 'ASN Classless Inter-Domain Routing Block',
+ '_description': 'Network routing block assigned to an ASN.'
+ },
+ 'asn_country_code': {
+ '_short': 'ASN Country Code',
+ '_name': 'ASN Assigned Country Code',
+ '_description': 'ASN assigned country code in ISO 3166-1 format.'
+ },
+ 'asn_date': {
+ '_short': 'ASN Date',
+ '_name': 'ASN Allocation Date',
+ '_description': 'ASN allocation date in ISO 8601 format.'
+ },
+ 'asn_registry': {
+ '_short': 'ASN Registry',
+ '_name': 'ASN Assigned Registry',
+ '_description': 'ASN assigned regional internet registry.'
+ }
+}
+
+HR_RDAP_COMMON = {
+ 'entities': {
+ '_short': 'Entities',
+ '_name': 'RIR Object Entities',
+ '_description': 'List of object names referenced by an RIR object.'
+ },
+ 'events': {
+ '_short': 'Events',
+ '_name': 'Events',
+ '_description': 'Events for an RIR object.',
+ 'action': {
+ '_short': 'Action',
+ '_name': 'Event Action (Reason)',
+ '_description': 'The reason for an event.'
+ },
+ 'timestamp': {
+ '_short': 'Timestamp',
+ '_name': 'Event Timestamp',
+ '_description': 'The date an event occured in ISO 8601 '
+ 'format.'
+ },
+ 'actor': {
+ '_short': 'Actor',
+ '_name': 'Event Actor',
+ '_description': 'The identifier for an event initiator.'
+ }
+ },
+ 'handle': {
+ '_short': 'Handle',
+ '_name': 'RIR Handle',
+ '_description': 'Unique identifier for a registered object.'
+ },
+ 'links': {
+ '_short': 'Links',
+ '_name': 'Links',
+ '_description': 'HTTP/HTTPS links provided for an RIR object.'
+ },
+ 'notices': {
+ '_short': 'Notices',
+ '_name': 'Notices',
+ '_description': 'Notices for an RIR object.',
+ 'description': {
+ '_short': 'Description',
+ '_name': 'Notice Description',
+ '_description': 'The description/body of a notice.'
+ },
+ 'title': {
+ '_short': 'Title',
+ '_name': 'Notice Title',
+ '_description': 'The title/header for a notice.'
+ },
+ 'links': {
+ '_short': 'Links',
+ '_name': 'Notice Links',
+ '_description': 'HTTP/HTTPS links provided for a notice.'
+ }
+ },
+ 'remarks': {
+ '_short': 'Remarks',
+ '_name': 'Remarks',
+ '_description': 'Remarks for an RIR object.',
+ 'description': {
+ '_short': 'Description',
+ '_name': 'Remark Description',
+ '_description': 'The description/body of a remark.'
+ },
+ 'title': {
+ '_short': 'Title',
+ '_name': 'Remark Title',
+ '_description': 'The title/header for a remark.'
+ },
+ 'links': {
+ '_short': 'Links',
+ '_name': 'Remark Links',
+ '_description': 'HTTP/HTTPS links provided for a remark.'
+ }
+ },
+ 'status': {
+ '_short': 'Status',
+ '_name': 'Object Status',
+ '_description': 'List indicating the state of a registered object.'
+ }
+}
+
+HR_RDAP = {
+ 'network': {
+ '_short': 'Network',
+ '_name': 'RIR Network',
+ '_description': 'The assigned network for an IP address.',
+ 'cidr': {
+ '_short': 'CIDR Block',
+ '_name': 'Classless Inter-Domain Routing Block',
+ '_description': 'Network routing block an IP address belongs to.'
+ },
+ 'country': {
+ '_short': 'Country Code',
+ '_name': 'Country Code',
+ '_description': 'Country code registered with the RIR in '
+ 'ISO 3166-1 format.'
+ },
+ 'end_address': {
+ '_short': 'End Address',
+ '_name': 'Ending IP Address',
+ '_description': 'The last IP address in a network block.'
+ },
+ 'events': HR_RDAP_COMMON['events'],
+ 'handle': HR_RDAP_COMMON['handle'],
+ 'ip_version': {
+ '_short': 'IP Version',
+ '_name': 'IP Protocol Version',
+ '_description': 'The IP protocol version (v4 or v6) of an IP '
+ 'address.'
+ },
+ 'links': HR_RDAP_COMMON['links'],
+ 'name': {
+ '_short': 'Name',
+ '_name': 'RIR Network Name',
+ '_description': 'The identifier assigned to the network '
+ 'registration for an IP address.'
+ },
+ 'notices': HR_RDAP_COMMON['notices'],
+ 'parent_handle': {
+ '_short': 'Parent Handle',
+ '_name': 'RIR Parent Handle',
+ '_description': 'Unique identifier for the parent network of '
+ 'a registered network.'
+ },
+ 'remarks': HR_RDAP_COMMON['remarks'],
+ 'start_address': {
+ '_short': 'Start Address',
+ '_name': 'Starting IP Address',
+ '_description': 'The first IP address in a network block.'
+ },
+ 'status': HR_RDAP_COMMON['status'],
+ 'type': {
+ '_short': 'Type',
+ '_name': 'RIR Network Type',
+ '_description': 'The RIR classification of a registered network.'
+ }
+ },
+ 'entities': HR_RDAP_COMMON['entities'],
+ 'objects': {
+ '_short': 'Objects',
+ '_name': 'RIR Objects',
+ '_description': 'The objects (entities) referenced by an RIR network.',
+ 'contact': {
+ '_short': 'Contact',
+ '_name': 'Contact Information',
+ '_description': 'Contact information registered with an RIR '
+ 'object.',
+ 'address': {
+ '_short': 'Address',
+ '_name': 'Postal Address',
+ '_description': 'The contact postal address.'
+ },
+ 'email': {
+ '_short': 'Email',
+ '_name': 'Email Address',
+ '_description': 'The contact email address.'
+ },
+ 'kind': {
+ '_short': 'Kind',
+ '_name': 'Kind',
+ '_description': 'The contact information kind (individual, '
+ 'group, org, etc).'
+ },
+ 'name': {
+ '_short': 'Name',
+ '_name': 'Name',
+ '_description': 'The contact name.'
+ },
+ 'phone': {
+ '_short': 'Phone',
+ '_name': 'Phone Number',
+ '_description': 'The contact phone number.'
+ },
+ 'role': {
+ '_short': 'Role',
+ '_name': 'Role',
+ '_description': 'The contact\'s role.'
+ },
+ 'title': {
+ '_short': 'Title',
+ '_name': 'Title',
+ '_description': 'The contact\'s position or job title.'
+ }
+ },
+ 'entities': HR_RDAP_COMMON['entities'],
+ 'events': HR_RDAP_COMMON['events'],
+ 'events_actor': {
+ '_short': 'Events Misc',
+ '_name': 'Events w/o Actor',
+ '_description': 'An event for an RIR object with no event actor.',
+ 'action': {
+ '_short': 'Action',
+ '_name': 'Event Action (Reason)',
+ '_description': 'The reason for an event.'
+ },
+ 'timestamp': {
+ '_short': 'Timestamp',
+ '_name': 'Event Timestamp',
+ '_description': 'The date an event occured in ISO 8601 '
+ 'format.'
+ }
+ },
+ 'handle': HR_RDAP_COMMON['handle'],
+ 'links': HR_RDAP_COMMON['links'],
+ 'notices': HR_RDAP_COMMON['notices'],
+ 'remarks': HR_RDAP_COMMON['remarks'],
+ 'roles': {
+ '_short': 'Roles',
+ '_name': 'Roles',
+ '_description': 'List of roles assigned to a registered object.'
+ },
+ 'status': HR_RDAP_COMMON['status'],
+ }
+}
+
+HR_WHOIS = {
+ 'nets': {
+ '_short': 'Network',
+ '_name': 'RIR Network',
+ '_description': 'The assigned network for an IP address. May be a '
+ 'parent or child network.',
+ 'address': {
+ '_short': 'Address',
+ '_name': 'Postal Address',
+ '_description': 'The contact postal address.'
+ },
+ 'cidr': {
+ '_short': 'CIDR Blocks',
+ '_name': 'Classless Inter-Domain Routing Blocks',
+ '_description': 'Network routing blocks an IP address belongs to.'
+ },
+ 'city': {
+ '_short': 'City',
+ '_name': 'City',
+ '_description': 'The city registered with a whois network.'
+ },
+ 'country': {
+ '_short': 'Country Code',
+ '_name': 'Country Code',
+ '_description': 'Country code registered for the network in '
+ 'ISO 3166-1 format.'
+ },
+ 'created': {
+ '_short': 'Created',
+ '_name': 'Created Timestamp',
+ '_description': 'The date the network was created in ISO 8601 '
+ 'format.'
+ },
+ 'description': {
+ '_short': 'Description',
+ '_name': 'Description',
+ '_description': 'The description for the network.'
+ },
+ 'emails': {
+ '_short': 'Emails',
+ '_name': 'Email Addresses',
+ '_description': 'The contact email addresses.'
+ },
+ 'handle': {
+ '_short': 'Handle',
+ '_name': 'RIR Network Handle',
+ '_description': 'Unique identifier for a registered network.'
+ },
+ 'name': {
+ '_short': 'Name',
+ '_name': 'RIR Network Name',
+ '_description': 'The identifier assigned to the network '
+ 'registration for an IP address.'
+ },
+ 'postal_code': {
+ '_short': 'Postal',
+ '_name': 'Postal Code',
+ '_description': 'The postal code registered with a whois network.'
+ },
+ 'range': {
+ '_short': 'Ranges',
+ '_name': 'CIDR Block Ranges',
+ '_description': 'Network routing blocks an IP address belongs to.'
+ },
+ 'state': {
+ '_short': 'State',
+ '_name': 'State',
+ '_description': 'The state registered with a whois network.'
+ },
+ 'updated': {
+ '_short': 'Updated',
+ '_name': 'Updated Timestamp',
+ '_description': 'The date the network was updated in ISO 8601 '
+ 'format.'
+ }
+ },
+ 'referral': {
+ '_short': 'Referral',
+ '_name': 'Referral Whois',
+ '_description': 'The referral whois data if referenced and enabled.',
+ }
+}
diff --git a/lib/ipwhois/ipwhois.py b/lib/ipwhois/ipwhois.py
new file mode 100644
index 00000000..3a6fb847
--- /dev/null
+++ b/lib/ipwhois/ipwhois.py
@@ -0,0 +1,244 @@
+# Copyright (c) 2013, 2014, 2015, 2016 Philip Hane
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from . import Net
+import logging
+
+log = logging.getLogger(__name__)
+
+
+class IPWhois:
+ """
+ The wrapper class for performing whois/RDAP lookups and parsing for
+ IPv4 and IPv6 addresses.
+
+ Args:
+ address: An IPv4 or IPv6 address as a string, integer, IPv4Address, or
+ IPv6Address.
+ timeout: The default timeout for socket connections in seconds.
+ proxy_opener: The urllib.request.OpenerDirector request for proxy
+ support or None.
+ allow_permutations: allow net.Net() to use additional methods if DNS
+ lookups to Cymru fail.
+ """
+
+ def __init__(self, address, timeout=5, proxy_opener=None,
+ allow_permutations=True):
+
+ self.net = Net(
+ address=address, timeout=timeout, proxy_opener=proxy_opener,
+ allow_permutations=allow_permutations
+ )
+
+ self.address = self.net.address
+ self.timeout = self.net.timeout
+ self.address_str = self.net.address_str
+ self.version = self.net.version
+ self.reversed = self.net.reversed
+ self.dns_zone = self.net.dns_zone
+
+ def __repr__(self):
+
+ return 'IPWhois({0}, {1}, {2})'.format(
+ self.address_str, str(self.timeout), repr(self.net.opener)
+ )
+
+ def lookup(self, *args, **kwargs):
+ """
+ Temporary wrapper for legacy whois lookups (moved to
+ IPWhois.lookup_whois()). This will be removed in a future
+ release (TBD).
+ """
+
+ from warnings import warn
+ warn("IPWhois.lookup() has been deprecated and will be removed. "
+ "You should now use IPWhois.lookup_whois() for legacy whois "
+ "lookups.")
+ return self.lookup_whois(*args, **kwargs)
+
+ def lookup_whois(self, inc_raw=False, retry_count=3, get_referral=False,
+ extra_blacklist=None, ignore_referral_errors=False,
+ field_list=None, asn_alts=None, extra_org_map=None):
+ """
+ The function for retrieving and parsing whois information for an IP
+ address via port 43 (WHOIS).
+
+ Args:
+ inc_raw: Boolean for whether to include the raw whois results in
+ the returned dictionary.
+ retry_count: The number of times to retry in case socket errors,
+ timeouts, connection resets, etc. are encountered.
+ get_referral: Boolean for whether to retrieve referral whois
+ information, if available.
+ extra_blacklist: A list of blacklisted whois servers in addition to
+ the global BLACKLIST.
+ ignore_referral_errors: Boolean for whether to ignore and continue
+ when an exception is encountered on referral whois lookups.
+ field_list: If provided, a list of fields to parse:
+ ['name', 'handle', 'description', 'country', 'state', 'city',
+ 'address', 'postal_code', 'emails', 'created', 'updated']
+ asn_alts: Array of additional lookup types to attempt if the
+ ASN dns lookup fails. Allow permutations must be enabled.
+ Defaults to all ['whois', 'http'].
+ extra_org_map: Dictionary mapping org handles to RIRs. This is for
+ limited cases where ARIN REST (ASN fallback HTTP lookup) does
+ not show an RIR as the org handle e.g., DNIC (which is now the
+ built in ORG_MAP) e.g., {'DNIC': 'arin'}. Valid RIR values are
+ (note the case-sensitive - this is meant to match the REST
+ result): 'ARIN', 'RIPE', 'apnic', 'lacnic', 'afrinic'
+
+ Returns:
+ Dictionary:
+
+ :query: The IP address (String)
+ :asn: The Autonomous System Number (String)
+ :asn_date: The ASN Allocation date (String)
+ :asn_registry: The assigned ASN registry (String)
+ :asn_cidr: The assigned ASN CIDR (String)
+ :asn_country_code: The assigned ASN country code (String)
+ :nets: Dictionaries containing network information which consists
+ of the fields listed in the ipwhois.whois.RIR_WHOIS dictionary.
+ (List)
+ :raw: Raw whois results if the inc_raw parameter is True. (String)
+ :referral: Dictionary of referral whois information if get_referral
+ is True and the server isn't blacklisted. Consists of fields
+ listed in the ipwhois.whois.RWHOIS dictionary.
+ :raw_referral: Raw referral whois results if the inc_raw parameter
+ is True. (String)
+ """
+
+ from .whois import Whois
+
+ # Create the return dictionary.
+ results = {}
+
+ # Retrieve the ASN information.
+ log.debug('ASN lookup for {0}'.format(self.address_str))
+ asn_data, response = self.net.lookup_asn(
+ retry_count=retry_count, asn_alts=asn_alts,
+ extra_org_map=extra_org_map
+ )
+
+ # Add the ASN information to the return dictionary.
+ results.update(asn_data)
+
+ # Retrieve the whois data and parse.
+ whois = Whois(self.net)
+ log.debug('WHOIS lookup for {0}'.format(self.address_str))
+ whois_data = whois.lookup(
+ inc_raw=inc_raw, retry_count=retry_count, response=response,
+ get_referral=get_referral, extra_blacklist=extra_blacklist,
+ ignore_referral_errors=ignore_referral_errors, asn_data=asn_data,
+ field_list=field_list
+ )
+
+ # Add the RDAP information to the return dictionary.
+ results.update(whois_data)
+
+ return results
+
+ def lookup_rdap(self, inc_raw=False, retry_count=3, depth=0,
+ excluded_entities=None, bootstrap=False,
+ rate_limit_timeout=120, asn_alts=None, extra_org_map=None):
+ """
+ The function for retrieving and parsing whois information for an IP
+ address via HTTP (RDAP).
+
+ **This is now the recommended method, as RDAP contains much better
+ information to parse.**
+
+ Args:
+ inc_raw: Boolean for whether to include the raw whois results in
+ the returned dictionary.
+ retry_count: The number of times to retry in case socket errors,
+ timeouts, connection resets, etc. are encountered.
+ depth: How many levels deep to run queries when additional
+ referenced objects are found.
+ excluded_entities: A list of entity handles to not perform lookups.
+ bootstrap: If True, performs lookups via ARIN bootstrap rather
+ than lookups based on ASN data. ASN lookups are not performed
+ and no output for any of the asn* fields is provided.
+ rate_limit_timeout: The number of seconds to wait before retrying
+ when a rate limit notice is returned via rdap+json.
+ asn_alts: Array of additional lookup types to attempt if the
+ ASN dns lookup fails. Allow permutations must be enabled.
+ Defaults to all ['whois', 'http'].
+ extra_org_map: Dictionary mapping org handles to RIRs. This is for
+ limited cases where ARIN REST (ASN fallback HTTP lookup) does
+ not show an RIR as the org handle e.g., DNIC (which is now the
+ built in ORG_MAP) e.g., {'DNIC': 'arin'}. Valid RIR values are
+ (note the case-sensitive - this is meant to match the REST
+ result): 'ARIN', 'RIPE', 'apnic', 'lacnic', 'afrinic'
+
+ Returns:
+ Dictionary:
+
+ :query: The IP address (String)
+ :asn: The Autonomous System Number (String)
+ :asn_date: The ASN Allocation date (String)
+ :asn_registry: The assigned ASN registry (String)
+ :asn_cidr: The assigned ASN CIDR (String)
+ :asn_country_code: The assigned ASN country code (String)
+ :entities: List of entity handles referred by the top level query.
+ :network: Dictionary containing network information which consists
+ of the fields listed in the ipwhois.rdap._RDAPNetwork dict.
+ :objects: Dictionary of (entity handle: entity dict) which consists
+ of the fields listed in the ipwhois.rdap._RDAPEntity dict.
+ :raw: (Dictionary) - Whois results in json format if the inc_raw
+ parameter is True.
+ """
+
+ from .rdap import RDAP
+
+ # Create the return dictionary.
+ results = {}
+
+ asn_data = None
+ response = None
+ if not bootstrap:
+
+ # Retrieve the ASN information.
+ log.debug('ASN lookup for {0}'.format(self.address_str))
+ asn_data, asn_response = self.net.lookup_asn(
+ retry_count=retry_count, asn_alts=asn_alts,
+ extra_org_map=extra_org_map
+ )
+
+ # Add the ASN information to the return dictionary.
+ results.update(asn_data)
+
+ # Retrieve the RDAP data and parse.
+ rdap = RDAP(self.net)
+ log.debug('RDAP lookup for {0}'.format(self.address_str))
+ rdap_data = rdap.lookup(
+ inc_raw=inc_raw, retry_count=retry_count, asn_data=asn_data,
+ depth=depth, excluded_entities=excluded_entities,
+ response=response, bootstrap=bootstrap,
+ rate_limit_timeout=rate_limit_timeout
+ )
+
+ # Add the RDAP information to the return dictionary.
+ results.update(rdap_data)
+
+ return results
diff --git a/lib/ipwhois/net.py b/lib/ipwhois/net.py
new file mode 100644
index 00000000..9f0a45a9
--- /dev/null
+++ b/lib/ipwhois/net.py
@@ -0,0 +1,958 @@
+# Copyright (c) 2013, 2014, 2015, 2016 Philip Hane
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import socket
+import dns.resolver
+import json
+import logging
+from time import sleep
+
+# Import the dnspython3 rdtypes to fix the dynamic import problem when frozen.
+import dns.rdtypes.ANY.TXT # @UnusedImport
+
+from .exceptions import (IPDefinedError, ASNRegistryError, ASNLookupError,
+ BlacklistError, WhoisLookupError, HTTPLookupError,
+ HostLookupError, HTTPRateLimitError)
+from .whois import RIR_WHOIS
+from .utils import ipv4_is_defined, ipv6_is_defined
+
+if sys.version_info >= (3, 3): # pragma: no cover
+ from ipaddress import (ip_address,
+ IPv4Address,
+ IPv6Address,
+ ip_network,
+ summarize_address_range,
+ collapse_addresses)
+else: # pragma: no cover
+ from ipaddr import (IPAddress as ip_address,
+ IPv4Address,
+ IPv6Address,
+ IPNetwork as ip_network,
+ summarize_address_range,
+ collapse_address_list as collapse_addresses)
+
+try: # pragma: no cover
+ from urllib.request import (OpenerDirector,
+ ProxyHandler,
+ build_opener,
+ Request,
+ URLError)
+ from urllib.parse import urlencode
+except ImportError: # pragma: no cover
+ from urllib2 import (OpenerDirector,
+ ProxyHandler,
+ build_opener,
+ Request,
+ URLError)
+ from urllib import urlencode
+
+log = logging.getLogger(__name__)
+
+# POSSIBLY UPDATE TO USE RDAP
+ARIN = 'http://whois.arin.net/rest/nets;q={0}?showDetails=true&showARIN=true'
+
+# National Internet Registry
+NIR = {
+ 'jpnic': {
+ 'url': ('http://whois.nic.ad.jp/cgi-bin/whois_gw?lang=%2Fe&key={0}'
+ '&submit=query'),
+ 'request_type': 'GET',
+ 'request_headers': {'Accept': 'text/html'}
+ },
+ 'krnic': {
+ 'url': 'http://whois.kisa.or.kr/eng/whois.jsc',
+ 'request_type': 'POST',
+ 'request_headers': {'Accept': 'text/html'},
+ 'form_data_ip_field': 'query'
+ }
+}
+
+CYMRU_WHOIS = 'whois.cymru.com'
+
+IPV4_DNS_ZONE = '{0}.origin.asn.cymru.com'
+
+IPV6_DNS_ZONE = '{0}.origin6.asn.cymru.com'
+
+BLACKLIST = [
+ 'root.rwhois.net'
+]
+
+ORG_MAP = {
+ 'ARIN': 'arin',
+ 'VR-ARIN': 'arin',
+ 'RIPE': 'ripencc',
+ 'APNIC': 'apnic',
+ 'LACNIC': 'lacnic',
+ 'AFRINIC': 'afrinic',
+ 'DNIC': 'arin'
+}
+
+
+class Net:
+ """
+ The class for performing network queries.
+
+ Args:
+ address: An IPv4 or IPv6 address in string format.
+ timeout: The default timeout for socket connections in seconds.
+ proxy_opener: The urllib.request.OpenerDirector request for proxy
+ support or None.
+ allow_permutations: Use additional methods if DNS lookups to Cymru
+ fail.
+
+ Raises:
+ IPDefinedError: The address provided is defined (does not need to be
+ resolved).
+ """
+
+ def __init__(self, address, timeout=5, proxy_opener=None,
+ allow_permutations=True):
+
+ # IPv4Address or IPv6Address
+ if isinstance(address, IPv4Address) or isinstance(
+ address, IPv6Address):
+
+ self.address = address
+
+ else:
+
+ # Use ipaddress package exception handling.
+ self.address = ip_address(address)
+
+ # Default timeout for socket connections.
+ self.timeout = timeout
+
+ # Allow other than DNS lookups for ASNs.
+ self.allow_permutations = allow_permutations
+
+ self.dns_resolver = dns.resolver.Resolver()
+ self.dns_resolver.timeout = timeout
+ self.dns_resolver.lifetime = timeout
+
+ # Proxy opener.
+ if isinstance(proxy_opener, OpenerDirector):
+
+ self.opener = proxy_opener
+
+ else:
+
+ handler = ProxyHandler()
+ self.opener = build_opener(handler)
+
+ # IP address in string format for use in queries.
+ self.address_str = self.address.__str__()
+
+ # Determine the IP version, 4 or 6
+ self.version = self.address.version
+
+ if self.version == 4:
+
+ # Check if no ASN/whois resolution needs to occur.
+ is_defined = ipv4_is_defined(self.address_str)
+
+ if is_defined[0]:
+
+ raise IPDefinedError(
+ 'IPv4 address {0} is already defined as {1} via '
+ '{2}.'.format(
+ self.address_str, is_defined[1], is_defined[2]
+ )
+ )
+
+ # Reverse the IPv4Address for the DNS ASN query.
+ split = self.address_str.split('.')
+ split.reverse()
+ self.reversed = '.'.join(split)
+
+ self.dns_zone = IPV4_DNS_ZONE.format(self.reversed)
+
+ else:
+
+ # Check if no ASN/whois resolution needs to occur.
+ is_defined = ipv6_is_defined(self.address_str)
+
+ if is_defined[0]:
+
+ raise IPDefinedError(
+ 'IPv6 address {0} is already defined as {1} via '
+ '{2}.'.format(
+ self.address_str, is_defined[1], is_defined[2]
+ )
+ )
+
+ # Explode the IPv6Address to fill in any missing 0's.
+ exploded = self.address.exploded
+
+ # Cymru seems to timeout when the IPv6 address has trailing '0000'
+ # groups. Remove these groups.
+ groups = exploded.split(':')
+ for index, value in reversed(list(enumerate(groups))):
+
+ if value == '0000':
+
+ del groups[index]
+
+ else:
+
+ break
+
+ exploded = ':'.join(groups)
+
+ # Reverse the IPv6Address for the DNS ASN query.
+ val = str(exploded).replace(':', '')
+ val = val[::-1]
+ self.reversed = '.'.join(val)
+
+ self.dns_zone = IPV6_DNS_ZONE.format(self.reversed)
+
+ def get_asn_dns(self, result=None):
+ """
+ The function for retrieving ASN information for an IP address from
+ Cymru via port 53 (DNS).
+
+ Args:
+ result: Optional result object. This bypasses the ASN lookup.
+
+ Returns:
+ Dictionary: A dictionary containing the following keys:
+ asn (String) - The Autonomous System Number.
+ asn_date (String) - The ASN Allocation date.
+ asn_registry (String) - The assigned ASN registry.
+ asn_cidr (String) - The assigned ASN CIDR.
+ asn_country_code (String) - The assigned ASN country code.
+
+ Raises:
+ ASNRegistryError: The ASN registry is not known.
+ ASNLookupError: The ASN lookup failed.
+ """
+
+ try:
+
+ if result is None:
+
+ log.debug('ASN query for {0}'.format(self.dns_zone))
+ data = self.dns_resolver.query(self.dns_zone, 'TXT')
+ temp = str(data[0]).split('|')
+
+ else:
+
+ temp = result
+
+ # Parse out the ASN information.
+ ret = {'asn_registry': temp[3].strip(' \n')}
+
+ if ret['asn_registry'] not in RIR_WHOIS.keys():
+
+ raise ASNRegistryError(
+ 'ASN registry {0} is not known.'.format(
+ ret['asn_registry'])
+ )
+
+ ret['asn'] = temp[0].strip(' "\n')
+ ret['asn_cidr'] = temp[1].strip(' \n')
+ ret['asn_country_code'] = temp[2].strip(' \n').upper()
+ ret['asn_date'] = temp[4].strip(' "\n')
+
+ return ret
+
+ except ASNRegistryError:
+
+ raise
+
+ except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers,
+ dns.resolver.NoAnswer, dns.exception.Timeout) as e:
+
+ raise ASNLookupError(
+ 'ASN lookup failed (DNS {0}) for {1}.'.format(
+ e.__class__.__name__, self.address_str)
+ )
+
+ except:
+
+ raise ASNLookupError(
+ 'ASN lookup failed for {0}.'.format(self.address_str)
+ )
+
+ def get_asn_whois(self, retry_count=3, result=None):
+ """
+ The function for retrieving ASN information for an IP address from
+ Cymru via port 43/tcp (WHOIS).
+
+ Args:
+ retry_count: The number of times to retry in case socket errors,
+ timeouts, connection resets, etc. are encountered.
+ result: Optional result object. This bypasses the ASN lookup.
+
+ Returns:
+ Dictionary: A dictionary containing the following keys:
+ asn (String) - The Autonomous System Number.
+ asn_date (String) - The ASN Allocation date.
+ asn_registry (String) - The assigned ASN registry.
+ asn_cidr (String) - The assigned ASN CIDR.
+ asn_country_code (String) - The assigned ASN country code.
+
+ Raises:
+ ASNRegistryError: The ASN registry is not known.
+ ASNLookupError: The ASN lookup failed.
+ """
+
+ try:
+
+ if result is None:
+
+ # Create the connection for the Cymru whois query.
+ conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ conn.settimeout(self.timeout)
+ log.debug('ASN query for {0}'.format(self.address_str))
+ conn.connect((CYMRU_WHOIS, 43))
+
+ # Query the Cymru whois server, and store the results.
+ conn.send((
+ ' -r -a -c -p -f -o {0}{1}'.format(
+ self.address_str, '\r\n')
+ ).encode())
+
+ data = ''
+ while True:
+
+ d = conn.recv(4096).decode()
+ data += d
+
+ if not d:
+
+ break
+
+ conn.close()
+
+ else:
+
+ data = result
+
+ # Parse out the ASN information.
+ temp = str(data).split('|')
+
+ ret = {'asn_registry': temp[4].strip(' \n')}
+
+ if ret['asn_registry'] not in RIR_WHOIS.keys():
+
+ raise ASNRegistryError(
+ 'ASN registry {0} is not known.'.format(
+ ret['asn_registry'])
+ )
+
+ ret['asn'] = temp[0].strip(' \n')
+ ret['asn_cidr'] = temp[2].strip(' \n')
+ ret['asn_country_code'] = temp[3].strip(' \n').upper()
+ ret['asn_date'] = temp[5].strip(' \n')
+
+ return ret
+
+ except (socket.timeout, socket.error) as e: # pragma: no cover
+
+ log.debug('ASN query socket error: {0}'.format(e))
+ if retry_count > 0:
+
+ log.debug('ASN query retrying (count: {0})'.format(
+ str(retry_count)))
+ return self.get_asn_whois(retry_count - 1)
+
+ else:
+
+ raise ASNLookupError(
+ 'ASN lookup failed for {0}.'.format(self.address_str)
+ )
+
+ except ASNRegistryError:
+
+ raise
+
+ except:
+
+ raise ASNLookupError(
+ 'ASN lookup failed for {0}.'.format(self.address_str)
+ )
+
+ def get_asn_http(self, retry_count=3, result=None, extra_org_map=None):
+ """
+ The function for retrieving ASN information for an IP address from
+ Arin via port 80 (HTTP). Currently limited to fetching asn_registry
+ through a Arin whois (REST) lookup. The other values are returned as
+ None to keep a consistent dict output. This should be used as a last
+ chance fallback call behind ASN DNS & ASN Whois lookups.
+
+ Args:
+ retry_count: The number of times to retry in case socket errors,
+ timeouts, connection resets, etc. are encountered.
+ result: Optional result object. This bypasses the ASN lookup.
+ extra_org_map: Dictionary mapping org handles to RIRs. This is for
+ limited cases where ARIN REST (ASN fallback HTTP lookup) does
+ not show an RIR as the org handle e.g., DNIC (which is now the
+ built in ORG_MAP) e.g., {'DNIC': 'arin'}. Valid RIR values are
+ (note the case-sensitive - this is meant to match the REST
+ result): 'ARIN', 'RIPE', 'apnic', 'lacnic', 'afrinic'
+
+ Returns:
+ Dictionary: A dictionary containing the following keys:
+ asn (String) - None, can't retrieve with this method.
+ asn_date (String) - None, can't retrieve with this method.
+ asn_registry (String) - The assigned ASN registry.
+ asn_cidr (String) - None, can't retrieve with this method.
+ asn_country_code (String) - None, can't retrieve with this
+ method.
+
+ Raises:
+ ASNRegistryError: The ASN registry is not known.
+ ASNLookupError: The ASN lookup failed.
+ """
+
+ # Set the org_map. Map the orgRef handle to an RIR.
+ org_map = ORG_MAP.copy()
+ try:
+
+ org_map.update(extra_org_map)
+
+ except (TypeError, ValueError, IndexError, KeyError):
+
+ pass
+
+ try:
+
+ if result is None:
+
+ # Lets attempt to get the ASN registry information from
+ # ARIN.
+ log.debug('ASN query for {0}'.format(self.address_str))
+ response = self.get_http_json(
+ url=str(ARIN).format(self.address_str),
+ retry_count=retry_count,
+ headers={'Accept': 'application/json'}
+ )
+
+ else:
+
+ response = result
+
+ asn_data = {
+ 'asn_registry': None,
+ 'asn': None,
+ 'asn_cidr': None,
+ 'asn_country_code': None,
+ 'asn_date': None
+ }
+
+ try:
+
+ net_list = response['nets']['net']
+
+ if not isinstance(net_list, list):
+ net_list = [net_list]
+
+ except (KeyError, TypeError):
+
+ log.debug('No networks found')
+ net_list = []
+
+ for n in net_list:
+
+ try:
+
+ asn_data['asn_registry'] = (
+ org_map[n['orgRef']['@handle'].upper()]
+ )
+
+ except KeyError as e:
+
+ log.debug('Could not parse ASN registry via HTTP: '
+ '{0}'.format(str(e)))
+ raise ASNRegistryError('ASN registry lookup failed.')
+
+ break
+
+ return asn_data
+
+ except (socket.timeout, socket.error) as e: # pragma: no cover
+
+ log.debug('ASN query socket error: {0}'.format(e))
+ if retry_count > 0:
+
+ log.debug('ASN query retrying (count: {0})'.format(
+ str(retry_count)))
+ return self.get_asn_http(retry_count=retry_count-1)
+
+ else:
+
+ raise ASNLookupError(
+ 'ASN lookup failed for {0}.'.format(self.address_str)
+ )
+
+ except ASNRegistryError:
+
+ raise
+
+ except:
+
+ raise ASNLookupError(
+ 'ASN lookup failed for {0}.'.format(self.address_str)
+ )
+
+ def get_whois(self, asn_registry='arin', retry_count=3, server=None,
+ port=43, extra_blacklist=None):
+ """
+ The function for retrieving whois or rwhois information for an IP
+ address via any port. Defaults to port 43/tcp (WHOIS).
+
+ Args:
+ asn_registry: The NIC to run the query against.
+ retry_count: The number of times to retry in case socket errors,
+ timeouts, connection resets, etc. are encountered.
+ server: An optional server to connect to. If provided, asn_registry
+ will be ignored.
+ port: The network port to connect on.
+ extra_blacklist: A list of blacklisted whois servers in addition to
+ the global BLACKLIST.
+
+ Returns:
+ String: The raw whois data.
+
+ Raises:
+ BlacklistError: Raised if the whois server provided is in the
+ global BLACKLIST or extra_blacklist.
+ WhoisLookupError: The whois lookup failed.
+ """
+
+ try:
+
+ extra_bl = extra_blacklist if extra_blacklist else []
+
+ if any(server in srv for srv in (BLACKLIST, extra_bl)):
+ raise BlacklistError(
+ 'The server {0} is blacklisted.'.format(server)
+ )
+
+ if server is None:
+ server = RIR_WHOIS[asn_registry]['server']
+
+ # Create the connection for the whois query.
+ conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ conn.settimeout(self.timeout)
+ log.debug('WHOIS query for {0} at {1}:{2}'.format(
+ self.address_str, server, port))
+ conn.connect((server, port))
+
+ # Prep the query.
+ query = self.address_str + '\r\n'
+ if asn_registry == 'arin':
+
+ query = 'n + {0}'.format(query)
+
+ # Query the whois server, and store the results.
+ conn.send(query.encode())
+
+ response = ''
+ while True:
+
+ d = conn.recv(4096).decode('ascii', 'ignore')
+
+ response += d
+
+ if not d:
+
+ break
+
+ conn.close()
+
+ if 'Query rate limit exceeded' in response: # pragma: no cover
+
+ log.debug('WHOIS query rate limit exceeded. Waiting...')
+ sleep(1)
+ return self.get_whois(
+ asn_registry=asn_registry, retry_count=retry_count-1,
+ server=server, port=port, extra_blacklist=extra_blacklist
+ )
+
+ elif ('error 501' in response or 'error 230' in response
+ ): # pragma: no cover
+
+ log.debug('WHOIS query error: {0}'.format(response))
+ raise ValueError
+
+ return str(response)
+
+ except (socket.timeout, socket.error) as e:
+
+ log.debug('WHOIS query socket error: {0}'.format(e))
+ if retry_count > 0:
+
+ log.debug('WHOIS query retrying (count: {0})'.format(
+ str(retry_count)))
+ return self.get_whois(
+ asn_registry=asn_registry, retry_count=retry_count-1,
+ server=server, port=port, extra_blacklist=extra_blacklist
+ )
+
+ else:
+
+ raise WhoisLookupError(
+ 'WHOIS lookup failed for {0}.'.format(self.address_str)
+ )
+
+ except BlacklistError:
+
+ raise
+
+ except: # pragma: no cover
+
+ raise WhoisLookupError(
+ 'WHOIS lookup failed for {0}.'.format(self.address_str)
+ )
+
+ def get_http_json(self, url=None, retry_count=3, rate_limit_timeout=120,
+ headers=None):
+ """
+ The function for retrieving a json result via HTTP.
+
+ Args:
+ url: The URL to retrieve.
+ retry_count: The number of times to retry in case socket errors,
+ timeouts, connection resets, etc. are encountered.
+ rate_limit_timeout: The number of seconds to wait before retrying
+ when a rate limit notice is returned via rdap+json.
+ headers: The HTTP headers dictionary. The Accept header defaults
+ to 'application/rdap+json'.
+
+ Returns:
+ Dictionary: The data in json format.
+
+ Raises:
+ HTTPLookupError: The HTTP lookup failed.
+ HTTPRateLimitError: The HTTP request rate limited and retries
+ were exhausted.
+ """
+
+ if headers is None:
+ headers = {'Accept': 'application/rdap+json'}
+
+ try:
+
+ # Create the connection for the whois query.
+ log.debug('HTTP query for {0} at {1}'.format(
+ self.address_str, url))
+ conn = Request(url, headers=headers)
+ data = self.opener.open(conn, timeout=self.timeout)
+ try:
+ d = json.loads(data.readall().decode('utf-8', 'ignore'))
+ except AttributeError: # pragma: no cover
+ d = json.loads(data.read().decode('utf-8', 'ignore'))
+
+ try:
+ # Tests written but commented out. I do not want to send a
+ # flood of requests on every test.
+ for tmp in d['notices']: # pragma: no cover
+ if tmp['title'] == 'Rate Limit Notice':
+ log.debug('RDAP query rate limit exceeded.')
+
+ if retry_count > 0:
+ log.debug('Waiting {0} seconds...'.format(
+ str(rate_limit_timeout)))
+
+ sleep(rate_limit_timeout)
+ return self.get_http_json(
+ url=url, retry_count=retry_count-1,
+ rate_limit_timeout=rate_limit_timeout,
+ headers=headers
+ )
+ else:
+ raise HTTPRateLimitError(
+ 'HTTP lookup failed for {0}. Rate limit '
+ 'exceeded, wait and try again (possibly a '
+ 'temporary block).'.format(url))
+
+ except (KeyError, IndexError): # pragma: no cover
+
+ pass
+
+ return d
+
+ except (URLError, socket.timeout, socket.error) as e:
+
+ # Check needed for Python 2.6, also why URLError is caught.
+ try: # pragma: no cover
+ if not isinstance(e.reason, (socket.timeout, socket.error)):
+ raise HTTPLookupError('HTTP lookup failed for {0}.'
+ ''.format(url))
+ except AttributeError: # pragma: no cover
+
+ pass
+
+ log.debug('HTTP query socket error: {0}'.format(e))
+ if retry_count > 0:
+
+ log.debug('HTTP query retrying (count: {0})'.format(
+ str(retry_count)))
+
+ return self.get_http_json(
+ url=url, retry_count=retry_count-1,
+ rate_limit_timeout=rate_limit_timeout, headers=headers
+ )
+
+ else:
+
+ raise HTTPLookupError('HTTP lookup failed for {0}.'.format(
+ url))
+
+ except (HTTPLookupError, HTTPRateLimitError) as e: # pragma: no cover
+
+ raise e
+
+ except: # pragma: no cover
+
+ raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url))
+
+ def get_host(self, retry_count=3):
+ """
+ The function for retrieving host information for an IP address.
+
+ Args:
+ retry_count: The number of times to retry in case socket errors,
+ timeouts, connection resets, etc. are encountered.
+
+ Returns:
+ Tuple: hostname, aliaslist, ipaddrlist
+
+ Raises:
+ HostLookupError: The host lookup failed.
+ """
+
+ try:
+
+ default_timeout_set = False
+ if not socket.getdefaulttimeout():
+
+ socket.setdefaulttimeout(self.timeout)
+ default_timeout_set = True
+
+ log.debug('Host query for {0}'.format(self.address_str))
+ ret = socket.gethostbyaddr(self.address_str)
+
+ if default_timeout_set: # pragma: no cover
+
+ socket.setdefaulttimeout(None)
+
+ return ret
+
+ except (socket.timeout, socket.error) as e:
+
+ log.debug('Host query socket error: {0}'.format(e))
+ if retry_count > 0:
+
+ log.debug('Host query retrying (count: {0})'.format(
+ str(retry_count)))
+
+ return self.get_host(retry_count - 1)
+
+ else:
+
+ raise HostLookupError(
+ 'Host lookup failed for {0}.'.format(self.address_str)
+ )
+
+ except: # pragma: no cover
+
+ raise HostLookupError(
+ 'Host lookup failed for {0}.'.format(self.address_str)
+ )
+
+ def lookup_asn(self, retry_count=3, asn_alts=None, extra_org_map=None):
+ """
+ The wrapper function for retrieving and parsing ASN information for an
+ IP address.
+
+ Args:
+ retry_count: The number of times to retry in case socket errors,
+ timeouts, connection resets, etc. are encountered.
+ asn_alts: Array of additional lookup types to attempt if the
+ ASN dns lookup fails. Allow permutations must be enabled.
+ Defaults to all ['whois', 'http'].
+ extra_org_map: Dictionary mapping org handles to RIRs. This is for
+ limited cases where ARIN REST (ASN fallback HTTP lookup) does
+ not show an RIR as the org handle e.g., DNIC (which is now the
+ built in ORG_MAP) e.g., {'DNIC': 'arin'}. Valid RIR values are
+ (note the case-sensitive - this is meant to match the REST
+ result): 'ARIN', 'RIPE', 'apnic', 'lacnic', 'afrinic'
+
+ Returns:
+ Tuple:
+
+ :Dictionary: Result from get_asn_dns() or get_asn_whois().
+ :Dictionary: The response returned by get_asn_dns() or
+ get_asn_whois().
+
+ Raises:
+ ASNRegistryError: ASN registry does not match.
+ HTTPLookupError: The HTTP lookup failed.
+ """
+
+ lookups = asn_alts if asn_alts is not None else ['whois', 'http']
+
+ # Initialize the response.
+ response = None
+
+ # Attempt to resolve ASN info via Cymru. DNS is faster, try that first.
+ try:
+
+ self.dns_resolver.lifetime = self.dns_resolver.timeout * (
+ retry_count and retry_count or 1)
+ asn_data = self.get_asn_dns()
+
+ except (ASNLookupError, ASNRegistryError) as e:
+
+ if not self.allow_permutations:
+
+ raise ASNRegistryError('ASN registry lookup failed. '
+ 'Permutations not allowed.')
+
+ try:
+ if 'whois' in lookups:
+
+ log.debug('ASN DNS lookup failed, trying ASN WHOIS: '
+ '{0}'.format(e))
+ asn_data = self.get_asn_whois(retry_count)
+
+ else:
+
+ raise ASNLookupError
+
+ except (ASNLookupError, ASNRegistryError): # pragma: no cover
+
+ if 'http' in lookups:
+
+ # Lets attempt to get the ASN registry information from
+ # ARIN.
+ log.debug('ASN WHOIS lookup failed, trying ASN via HTTP')
+ try:
+
+ asn_data = self.get_asn_http(
+ retry_count=retry_count,
+ extra_org_map=extra_org_map
+ )
+
+ except ASNRegistryError:
+
+ raise ASNRegistryError('ASN registry lookup failed.')
+
+ except ASNLookupError:
+
+ raise HTTPLookupError('ASN HTTP lookup failed.')
+
+ else:
+
+ raise ASNRegistryError('ASN registry lookup failed.')
+
+ return asn_data, response
+
+ def get_http_raw(self, url=None, retry_count=3, headers=None,
+ request_type='GET', form_data=None):
+ """
+ The function for retrieving a raw HTML result via HTTP.
+
+ Args:
+ url: The URL to retrieve.
+ retry_count: The number of times to retry in case socket errors,
+ timeouts, connection resets, etc. are encountered.
+ headers: The HTTP headers dictionary. The Accept header defaults
+ to 'application/rdap+json'.
+ request_type: 'GET' or 'POST'
+ form_data: Dictionary of form POST data
+
+ Returns:
+ String: The raw data.
+
+ Raises:
+ HTTPLookupError: The HTTP lookup failed.
+ """
+
+ if headers is None:
+ headers = {'Accept': 'text/html'}
+
+ if form_data:
+ form_data = urlencode(form_data)
+ try:
+ form_data = bytes(form_data, encoding='ascii')
+ except TypeError: # pragma: no cover
+ pass
+
+ try:
+
+ # Create the connection for the HTTP query.
+ log.debug('HTTP query for {0} at {1}'.format(
+ self.address_str, url))
+ try:
+ conn = Request(url=url, data=form_data, headers=headers,
+ method=request_type)
+ except TypeError: # pragma: no cover
+ conn = Request(url=url, data=form_data, headers=headers)
+ data = self.opener.open(conn, timeout=self.timeout)
+
+ try:
+ d = data.readall().decode('ascii', 'ignore')
+ except AttributeError: # pragma: no cover
+ d = data.read().decode('ascii', 'ignore')
+
+ return str(d)
+
+ except (URLError, socket.timeout, socket.error) as e:
+
+ # Check needed for Python 2.6, also why URLError is caught.
+ try: # pragma: no cover
+ if not isinstance(e.reason, (socket.timeout, socket.error)):
+ raise HTTPLookupError('HTTP lookup failed for {0}.'
+ ''.format(url))
+ except AttributeError: # pragma: no cover
+
+ pass
+
+ log.debug('HTTP query socket error: {0}'.format(e))
+ if retry_count > 0:
+
+ log.debug('HTTP query retrying (count: {0})'.format(
+ str(retry_count)))
+
+ return self.get_http_raw(
+ url=url, retry_count=retry_count - 1, headers=headers,
+ request_type=request_type, form_data=form_data
+ )
+
+ else:
+
+ raise HTTPLookupError('HTTP lookup failed for {0}.'.format(
+ url))
+
+ except HTTPLookupError as e: # pragma: no cover
+
+ raise e
+
+ except Exception: # pragma: no cover
+
+ raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url))
diff --git a/lib/ipwhois/rdap.py b/lib/ipwhois/rdap.py
new file mode 100644
index 00000000..808bb696
--- /dev/null
+++ b/lib/ipwhois/rdap.py
@@ -0,0 +1,832 @@
+# Copyright (c) 2013, 2014, 2015, 2016 Philip Hane
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from . import (Net, NetError, InvalidEntityContactObject, InvalidNetworkObject,
+ InvalidEntityObject, HTTPLookupError)
+from .utils import ipv4_lstrip_zeros, calculate_cidr, unique_everseen
+from .net import ip_address
+import logging
+import json
+
+log = logging.getLogger(__name__)
+
+BOOTSTRAP_URL = 'http://rdap.arin.net/bootstrap'
+
+RIR_RDAP = {
+ 'arin': {
+ 'ip_url': 'http://rdap.arin.net/registry/ip/{0}',
+ 'entity_url': 'http://rdap.arin.net/registry/entity/{0}'
+ },
+ 'ripencc': {
+ 'ip_url': 'http://rdap.db.ripe.net/ip/{0}',
+ 'entity_url': 'http://rdap.db.ripe.net/entity/{0}'
+ },
+ 'apnic': {
+ 'ip_url': 'http://rdap.apnic.net/ip/{0}',
+ 'entity_url': 'http://rdap.apnic.net/entity/{0}'
+ },
+ 'lacnic': {
+ 'ip_url': 'http://rdap.lacnic.net/rdap/ip/{0}',
+ 'entity_url': 'http://rdap.lacnic.net/rdap/entity/{0}'
+ },
+ 'afrinic': {
+ 'ip_url': 'http://rdap.afrinic.net/rdap/ip/{0}',
+ 'entity_url': 'http://rdap.afrinic.net/rdap/entity/{0}'
+ }
+}
+
+
+class _RDAPContact:
+ """
+ The class for parsing RDAP entity contact information objects:
+ https://tools.ietf.org/html/rfc7483#section-5.1
+ https://tools.ietf.org/html/rfc7095
+
+ Args:
+ vcard: The vcard list from an RDAP IP address query.
+
+ Raises:
+ InvalidEntityContactObject: vcard is not an RDAP entity contact
+ information object.
+ """
+
+ def __init__(self, vcard):
+
+ if not isinstance(vcard, list):
+
+ raise InvalidEntityContactObject('JSON result must be a list.')
+
+ self.vcard = vcard
+ self.vars = {
+ 'name': None,
+ 'kind': None,
+ 'address': None,
+ 'phone': None,
+ 'email': None,
+ 'role': None,
+ 'title': None
+ }
+
+ def _parse_name(self, val):
+ """
+ The function for parsing the vcard name.
+
+ Args:
+ val: The value to parse.
+ """
+
+ self.vars['name'] = val[3].strip()
+
+ def _parse_kind(self, val):
+ """
+ The function for parsing the vcard kind.
+
+ Args:
+ val: The value to parse.
+ """
+
+ self.vars['kind'] = val[3].strip()
+
+ def _parse_address(self, val):
+ """
+ The function for parsing the vcard address.
+
+ Args:
+ val: The value to parse.
+ """
+
+ ret = {
+ 'type': None,
+ 'value': None
+ }
+
+ try:
+
+ ret['type'] = val[1]['type']
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+ try:
+
+ ret['value'] = val[1]['label']
+
+ except (KeyError, ValueError, TypeError):
+
+ ret['value'] = '\n'.join(val[3]).strip()
+
+ try:
+
+ self.vars['address'].append(ret)
+
+ except AttributeError:
+
+ self.vars['address'] = []
+ self.vars['address'].append(ret)
+
+ def _parse_phone(self, val):
+ """
+ The function for parsing the vcard phone numbers.
+
+ Args:
+ val: The value to parse.
+ """
+
+ ret = {
+ 'type': None,
+ 'value': None
+ }
+
+ try:
+
+ ret['type'] = val[1]['type']
+
+ except (IndexError, KeyError, ValueError, TypeError):
+
+ pass
+
+ ret['value'] = val[3].strip()
+
+ try:
+
+ self.vars['phone'].append(ret)
+
+ except AttributeError:
+
+ self.vars['phone'] = []
+ self.vars['phone'].append(ret)
+
+ def _parse_email(self, val):
+ """
+ The function for parsing the vcard email addresses.
+
+ Args:
+ val: The value to parse.
+ """
+
+ ret = {
+ 'type': None,
+ 'value': None
+ }
+
+ try:
+
+ ret['type'] = val[1]['type']
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+ ret['value'] = val[3].strip()
+
+ try:
+
+ self.vars['email'].append(ret)
+
+ except AttributeError:
+
+ self.vars['email'] = []
+ self.vars['email'].append(ret)
+
+ def _parse_role(self, val):
+ """
+ The function for parsing the vcard role.
+
+ Args:
+ val: The value to parse.
+ """
+
+ self.vars['role'] = val[3].strip()
+
+ def _parse_title(self, val):
+ """
+ The function for parsing the vcard title.
+
+ Args:
+ val: The value to parse.
+ """
+
+ self.vars['title'] = val[3].strip()
+
+ def parse(self):
+ """
+ The function for parsing the vcard to the vars dictionary.
+ """
+
+ keys = {
+ 'fn': self._parse_name,
+ 'kind': self._parse_kind,
+ 'adr': self._parse_address,
+ 'tel': self._parse_phone,
+ 'email': self._parse_email,
+ 'role': self._parse_role,
+ 'title': self._parse_title
+ }
+
+ for val in self.vcard:
+
+ try:
+
+ parser = keys.get(val[0])
+ parser(val)
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+
+class _RDAPCommon:
+ """
+ The common class for parsing RDAP objects:
+ https://tools.ietf.org/html/rfc7483#section-5
+
+ Args:
+ json_result: The JSON response from an RDAP query.
+
+ Raises:
+ ValueError: vcard is not a known RDAP object.
+ """
+
+ def __init__(self, json_result):
+
+ if not isinstance(json_result, dict):
+
+ raise ValueError
+
+ self.json = json_result
+ self.vars = {
+ 'handle': None,
+ 'status': None,
+ 'remarks': None,
+ 'notices': None,
+ 'links': None,
+ 'events': None,
+ 'raw': None
+ }
+
+ def summarize_links(self, links_json):
+ """
+ The function for summarizing RDAP links in to a unique list.
+ https://tools.ietf.org/html/rfc7483#section-4.2
+
+ Args:
+ links_json: A json dictionary of links from RDAP results.
+
+ Returns:
+ List: A unique list of found RDAP link dictionaries.
+ """
+
+ ret = []
+
+ for link_dict in links_json:
+
+ ret.append(link_dict['href'])
+
+ ret = list(unique_everseen(ret))
+
+ return ret
+
+ def summarize_notices(self, notices_json):
+ """
+ The function for summarizing RDAP notices in to a unique list.
+ https://tools.ietf.org/html/rfc7483#section-4.3
+
+ Args:
+ notices_json: A json dictionary of notices from RDAP results.
+
+ Returns:
+ List: A unique list of found RDAP notices dictionaries.
+ """
+
+ ret = []
+
+ for notices_dict in notices_json:
+
+ tmp = {
+ 'title': None,
+ 'description': None,
+ 'links': None
+ }
+
+ try:
+
+ tmp['title'] = notices_dict['title']
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+ try:
+
+ tmp['description'] = '\n'.join(notices_dict['description'])
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+ try:
+
+ tmp['links'] = self.summarize_links(notices_dict['links'])
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+ if all(tmp.values()):
+
+ ret.append(tmp)
+
+ return ret
+
+ def summarize_events(self, events_json):
+ """
+ The function for summarizing RDAP events in to a unique list.
+ https://tools.ietf.org/html/rfc7483#section-4.5
+
+ Args:
+ events_json: A json dictionary of events from RDAP results.
+
+ Returns:
+ List: A unique list of found RDAP events dictionaries.
+ """
+
+ ret = []
+
+ for event in events_json:
+
+ event_dict = {
+ 'action': event['eventAction'],
+ 'timestamp': event['eventDate'],
+ 'actor': None
+ }
+
+ try:
+
+ event_dict['actor'] = event['eventActor']
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+ ret.append(event_dict)
+
+ return ret
+
+ def _parse(self):
+ """
+ The function for parsing the JSON response to the vars dictionary.
+ """
+
+ try:
+
+ self.vars['status'] = self.json['status']
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+ for v in ['remarks', 'notices']:
+
+ try:
+
+ self.vars[v] = self.summarize_notices(self.json[v])
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+ try:
+
+ self.vars['links'] = self.summarize_links(self.json['links'])
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+ try:
+
+ self.vars['events'] = self.summarize_events(self.json['events'])
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+
+class _RDAPNetwork(_RDAPCommon):
+ """
+ The class for parsing RDAP network objects:
+ https://tools.ietf.org/html/rfc7483#section-5.4
+
+ Args:
+ json_result: The JSON response from an RDAP IP address query.
+
+ Raises:
+ InvalidNetworkObject: json_result is not an RDAP network object.
+ """
+
+ def __init__(self, json_result):
+
+ try:
+
+ _RDAPCommon.__init__(self, json_result)
+
+ except ValueError:
+
+ raise InvalidNetworkObject('JSON result must be a dict.')
+
+ self.vars.update({
+ 'start_address': None,
+ 'end_address': None,
+ 'cidr': None,
+ 'ip_version': None,
+ 'type': None,
+ 'name': None,
+ 'country': None,
+ 'parent_handle': None
+ })
+
+ def parse(self):
+ """
+ The function for parsing the JSON response to the vars dictionary.
+ """
+
+ try:
+
+ self.vars['handle'] = self.json['handle'].strip()
+
+ except (KeyError, ValueError):
+
+ log.debug('Handle missing, json_output: {0}'.format(json.dumps(
+ self.json)))
+ raise InvalidNetworkObject('Handle is missing for RDAP network '
+ 'object')
+
+ try:
+
+ self.vars['ip_version'] = self.json['ipVersion'].strip()
+
+ # RDAP IPv4 addresses are padded to 3 digits per octet, remove
+ # the leading 0's.
+ if self.vars['ip_version'] == 'v4':
+
+ self.vars['start_address'] = ip_address(
+ ipv4_lstrip_zeros(self.json['startAddress'])
+ ).__str__()
+
+ self.vars['end_address'] = ip_address(
+ ipv4_lstrip_zeros(self.json['endAddress'])
+ ).__str__()
+
+ # No bugs found for IPv6 yet, proceed as normal.
+ else:
+
+ self.vars['start_address'] = self.json['startAddress'].strip()
+ self.vars['end_address'] = self.json['endAddress'].strip()
+
+ except (KeyError, ValueError, TypeError):
+
+ log.debug('IP address data incomplete. Data parsed prior to '
+ 'exception: {0}'.format(json.dumps(self.vars)))
+ raise InvalidNetworkObject('IP address data is missing for RDAP '
+ 'network object.')
+
+ try:
+
+ self.vars['cidr'] = ', '.join(calculate_cidr(
+ self.vars['start_address'], self.vars['end_address']
+ ))
+
+ except (KeyError, ValueError, TypeError, AttributeError) as \
+ e: # pragma: no cover
+
+ log.debug('CIDR calculation failed: {0}'.format(e))
+ pass
+
+ for v in ['name', 'type', 'country']:
+
+ try:
+
+ self.vars[v] = self.json[v].strip()
+
+ except (KeyError, ValueError):
+
+ pass
+
+ try:
+
+ self.vars['parent_handle'] = self.json['parentHandle'].strip()
+
+ except (KeyError, ValueError):
+
+ pass
+
+ self._parse()
+
+
+class _RDAPEntity(_RDAPCommon):
+ """
+ The class for parsing RDAP entity objects:
+ https://tools.ietf.org/html/rfc7483#section-5.1
+
+ Args:
+ json_result: The JSON response from an RDAP query.
+
+ Raises:
+ InvalidEntityObject: json_result is not an RDAP entity object.
+ """
+
+ def __init__(self, json_result):
+
+ try:
+
+ _RDAPCommon.__init__(self, json_result)
+
+ except ValueError:
+
+ raise InvalidEntityObject('JSON result must be a dict.')
+
+ self.vars.update({
+ 'roles': None,
+ 'contact': None,
+ 'events_actor': None,
+ 'entities': []
+ })
+
+ def parse(self):
+ """
+ The function for parsing the JSON response to the vars dictionary.
+ """
+
+ try:
+
+ self.vars['handle'] = self.json['handle'].strip()
+
+ except (KeyError, ValueError, TypeError):
+
+ raise InvalidEntityObject('Handle is missing for RDAP entity')
+
+ for v in ['roles', 'country']:
+
+ try:
+
+ self.vars[v] = self.json[v]
+
+ except (KeyError, ValueError):
+
+ pass
+
+ try:
+
+ vcard = self.json['vcardArray'][1]
+ c = _RDAPContact(vcard)
+ c.parse()
+
+ self.vars['contact'] = c.vars
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+ try:
+
+ self.vars['events_actor'] = self.summarize_events(
+ self.json['asEventActor'])
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+ self.vars['entities'] = []
+ try:
+
+ for ent in self.json['entities']:
+
+ if ent['handle'] not in self.vars['entities']:
+
+ self.vars['entities'].append(ent['handle'])
+
+ except (KeyError, ValueError, TypeError):
+
+ pass
+
+ if not self.vars['entities']:
+
+ self.vars['entities'] = None
+
+ self._parse()
+
+
+class RDAP:
+ """
+ The class for parsing IP address whois information via RDAP:
+ https://tools.ietf.org/html/rfc7483
+ https://www.arin.net/resources/rdap.html
+
+ Args:
+ net: A ipwhois.net.Net object.
+
+ Raises:
+ NetError: The parameter provided is not an instance of
+ ipwhois.net.Net
+ IPDefinedError: The address provided is defined (does not need to be
+ resolved).
+ """
+
+ def __init__(self, net):
+
+ if isinstance(net, Net):
+
+ self._net = net
+
+ else:
+
+ raise NetError('The provided net parameter is not an instance of '
+ 'ipwhois.net.Net')
+
+ def lookup(self, inc_raw=False, retry_count=3, asn_data=None, depth=0,
+ excluded_entities=None, response=None, bootstrap=False,
+ rate_limit_timeout=120):
+ """
+ The function for retrieving and parsing information for an IP
+ address via RDAP (HTTP).
+
+ Args:
+ inc_raw: Boolean for whether to include the raw results in the
+ returned dictionary.
+ retry_count: The number of times to retry in case socket errors,
+ timeouts, connection resets, etc. are encountered.
+ asn_data: Result dictionary from ipwhois.net.Net.lookup_asn().
+ Optional if the bootstrap parameter is True.
+ depth: How many levels deep to run queries when additional
+ referenced objects are found.
+ excluded_entities: A list of entity handles to not perform lookups.
+ response: Optional response object, this bypasses the RDAP lookup.
+ bootstrap: If True, performs lookups via ARIN bootstrap rather
+ than lookups based on ASN data.
+ rate_limit_timeout: The number of seconds to wait before retrying
+ when a rate limit notice is returned via rdap+json.
+
+ Returns:
+ Dictionary:
+
+ :query: The IP address (String)
+ :network: Dictionary of values returned by _RDAPNetwork. The raw
+ result is included for each entity if the inc_raw parameter is
+ True.
+ :entities: List of entity keys referenced by the top level IP
+ address query.
+ :objects: Dictionary of objects with the handles as keys, and the
+ dictionary returned by _RDAPEntity, etc as the values. The raw
+ result is included for each object if the inc_raw parameter is
+ True.
+ """
+
+ if not excluded_entities:
+
+ excluded_entities = []
+
+ # Create the return dictionary.
+ results = {
+ 'query': self._net.address_str,
+ 'network': None,
+ 'entities': None,
+ 'objects': None,
+ 'raw': None
+ }
+
+ if bootstrap:
+
+ ip_url = '{0}/ip/{1}'.format(BOOTSTRAP_URL, self._net.address_str)
+
+ else:
+
+ ip_url = str(RIR_RDAP[asn_data['asn_registry']]['ip_url']).format(
+ self._net.address_str)
+
+ # Only fetch the response if we haven't already.
+ if response is None:
+
+ log.debug('Response not given, perform RDAP lookup for {0}'.format(
+ ip_url))
+
+ # Retrieve the whois data.
+ response = self._net.get_http_json(
+ url=ip_url, retry_count=retry_count,
+ rate_limit_timeout=rate_limit_timeout
+ )
+
+ if inc_raw:
+
+ results['raw'] = response
+
+ log.debug('Parsing RDAP network object')
+ result_net = _RDAPNetwork(response)
+ result_net.parse()
+ results['network'] = result_net.vars
+ results['entities'] = []
+ results['objects'] = {}
+
+ # Iterate through and parse the root level entities.
+ log.debug('Parsing RDAP root level entities')
+ try:
+
+ for ent in response['entities']:
+
+ if ent['handle'] not in [results['entities'],
+ excluded_entities]:
+
+ result_ent = _RDAPEntity(ent)
+ result_ent.parse()
+
+ results['objects'][ent['handle']] = result_ent.vars
+
+ results['entities'].append(ent['handle'])
+
+ except KeyError:
+
+ pass
+
+ # Iterate through to the defined depth, retrieving and parsing all
+ # unique entities.
+ temp_objects = results['objects']
+
+ if depth > 0 and len(temp_objects) > 0:
+
+ log.debug('Parsing RDAP sub-entities to depth: {0}'.format(str(
+ depth)))
+
+ while depth > 0 and len(temp_objects) > 0:
+
+ new_objects = {}
+ for obj in temp_objects.values():
+
+ try:
+
+ for ent in obj['entities']:
+
+ if ent not in (list(results['objects'].keys()) +
+ list(new_objects.keys()) +
+ excluded_entities):
+
+ if bootstrap:
+ entity_url = '{0}/entity/{1}'.format(
+ BOOTSTRAP_URL, ent)
+ else:
+ tmp_reg = asn_data['asn_registry']
+ entity_url = RIR_RDAP[tmp_reg]['entity_url']
+ entity_url = str(entity_url).format(ent)
+
+ try:
+
+ # RDAP entity query
+ response = self._net.get_http_json(
+ url=entity_url, retry_count=retry_count,
+ rate_limit_timeout=rate_limit_timeout
+ )
+
+ # Parse the entity
+ result_ent = _RDAPEntity(response)
+ result_ent.parse()
+ new_objects[ent] = result_ent.vars
+
+ if inc_raw:
+
+ new_objects[ent]['raw'] = response
+
+ except (HTTPLookupError, InvalidEntityObject):
+
+ pass
+
+ except TypeError:
+
+ pass
+
+ # Update the result objects, and set the new temp object list to
+ # iterate for the next depth of entities.
+ results['objects'].update(new_objects)
+ temp_objects = new_objects
+ depth -= 1
+
+ return results
diff --git a/lib/ipwhois/utils.py b/lib/ipwhois/utils.py
new file mode 100644
index 00000000..92944c74
--- /dev/null
+++ b/lib/ipwhois/utils.py
@@ -0,0 +1,553 @@
+# Copyright (c) 2013, 2014, 2015, 2016 Philip Hane
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+from xml.dom.minidom import parseString
+from os import path
+import re
+import copy
+import io
+import csv
+import logging
+
+if sys.version_info >= (3, 3): # pragma: no cover
+ from ipaddress import (ip_address,
+ ip_network,
+ IPv4Address,
+ IPv4Network,
+ IPv6Address,
+ summarize_address_range,
+ collapse_addresses)
+else: # pragma: no cover
+ from ipaddr import (IPAddress as ip_address,
+ IPNetwork as ip_network,
+ IPv4Address,
+ IPv4Network,
+ IPv6Address,
+ summarize_address_range,
+ collapse_address_list as collapse_addresses)
+
+try: # pragma: no cover
+ from itertools import filterfalse
+
+except ImportError: # pragma: no cover
+ from itertools import ifilterfalse as filterfalse
+
+log = logging.getLogger(__name__)
+
+IETF_RFC_REFERENCES = {
+ # IPv4
+ 'RFC 1122, Section 3.2.1.3':
+ 'http://tools.ietf.org/html/rfc1122#section-3.2.1.3',
+ 'RFC 1918': 'http://tools.ietf.org/html/rfc1918',
+ 'RFC 3927': 'http://tools.ietf.org/html/rfc3927',
+ 'RFC 5736': 'http://tools.ietf.org/html/rfc5736',
+ 'RFC 5737': 'http://tools.ietf.org/html/rfc5737',
+ 'RFC 3068': 'http://tools.ietf.org/html/rfc3068',
+ 'RFC 2544': 'http://tools.ietf.org/html/rfc2544',
+ 'RFC 3171': 'http://tools.ietf.org/html/rfc3171',
+ 'RFC 919, Section 7': 'http://tools.ietf.org/html/rfc919#section-7',
+ # IPv6
+ 'RFC 4291, Section 2.7': 'http://tools.ietf.org/html/rfc4291#section-2.7',
+ 'RFC 4291': 'http://tools.ietf.org/html/rfc4291',
+ 'RFC 4291, Section 2.5.2':
+ 'http://tools.ietf.org/html/rfc4291#section-2.5.2',
+ 'RFC 4291, Section 2.5.3':
+ 'http://tools.ietf.org/html/rfc4291#section-2.5.3',
+ 'RFC 4291, Section 2.5.6':
+ 'http://tools.ietf.org/html/rfc4291#section-2.5.6',
+ 'RFC 4291, Section 2.5.7':
+ 'http://tools.ietf.org/html/rfc4291#section-2.5.7',
+ 'RFC 4193': 'https://tools.ietf.org/html/rfc4193'
+}
+
+IP_REGEX = (
+ r'(?P'
+ # IPv4
+ '(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.)){3}'
+ '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
+ # IPv6
+ '|\[?(((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:)'
+ '{6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|'
+ '2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]'
+ '{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d'
+ '\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|'
+ '((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|'
+ '2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]'
+ '{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
+ '(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(('
+ '(:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1'
+ '\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(('
+ '[0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4})'
+ '{0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]'
+ '?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:(('
+ '25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})'
+ ')|:)))(%.+)?))\]?'
+ # Optional IPv4 Port
+ '((:(6553[0-5]|655[0-2]\d|65[0-4]\d{2}|6[0-4]\d{3}|[1-5]\d{4}|[1-9]\d{0,3}'
+ # Optional CIDR block
+ '))|(\/(?:[012]\d?|3[012]?|[4-9])))?'
+ ')'
+)
+
+
+def ipv4_lstrip_zeros(address):
+ """
+ The function to strip leading zeros in each octet of an IPv4 address.
+
+ Args:
+ address: An IPv4 address in string format.
+
+ Returns:
+ String: The modified IPv4 address string.
+ """
+
+ # Split the octets.
+ obj = address.strip().split('.')
+
+ for x, y in enumerate(obj):
+
+ # Strip leading zeros. Split / here in case CIDR is attached.
+ obj[x] = y.split('/')[0].lstrip('0')
+ if obj[x] in ['', None]:
+ obj[x] = '0'
+
+ return '.'.join(obj)
+
+
+def calculate_cidr(start_address, end_address):
+ """
+ The function to calculate a CIDR range(s) from a start and end IP address.
+
+ Args:
+ start_address: The starting IP address in string format.
+ end_address: The ending IP address in string format.
+
+ Returns:
+ List: A list of calculated CIDR ranges.
+ """
+
+ tmp_addrs = []
+
+ try:
+
+ tmp_addrs.extend(summarize_address_range(
+ ip_address(start_address),
+ ip_address(end_address)))
+
+ except (KeyError, ValueError, TypeError): # pragma: no cover
+
+ try:
+
+ tmp_addrs.extend(summarize_address_range(
+ ip_network(start_address).network_address,
+ ip_network(end_address).network_address))
+
+ except AttributeError: # pragma: no cover
+
+ tmp_addrs.extend(summarize_address_range(
+ ip_network(start_address).ip,
+ ip_network(end_address).ip))
+
+ return [i.__str__() for i in collapse_addresses(tmp_addrs)]
+
+
+def get_countries(is_legacy_xml=False):
+ """
+ The function to generate a dictionary containing ISO_3166-1 country codes
+ to names.
+
+ Args:
+ is_legacy_xml: Boolean for whether to use the older country code
+ list (iso_3166-1_list_en.xml).
+
+ Returns:
+ Dictionary: A dictionary with the country codes as the keys and the
+ country names as the values.
+ """
+
+ # Initialize the countries dictionary.
+ countries = {}
+
+ # Set the data directory based on if the script is a frozen executable.
+ if sys.platform == 'win32' and getattr(sys, 'frozen', False):
+
+ data_dir = path.dirname(sys.executable) # pragma: no cover
+
+ else:
+
+ data_dir = path.dirname(__file__)
+
+ if is_legacy_xml:
+
+ log.debug('Opening country code legacy XML: {0}'.format(
+ str(data_dir) + '/data/iso_3166-1_list_en.xml'))
+
+ # Create the country codes file object.
+ f = io.open(str(data_dir) + '/data/iso_3166-1_list_en.xml', 'r',
+ encoding='ISO-8859-1')
+
+ # Read the file.
+ data = f.read()
+
+ # Check if there is data.
+ if not data: # pragma: no cover
+
+ return {}
+
+ # Parse the data to get the DOM.
+ dom = parseString(data)
+
+ # Retrieve the country entries.
+ entries = dom.getElementsByTagName('ISO_3166-1_Entry')
+
+ # Iterate through the entries and add to the countries dictionary.
+ for entry in entries:
+
+ # Retrieve the country code and name from the DOM.
+ code = entry.getElementsByTagName(
+ 'ISO_3166-1_Alpha-2_Code_element')[0].firstChild.data
+ name = entry.getElementsByTagName(
+ 'ISO_3166-1_Country_name')[0].firstChild.data
+
+ # Add to the countries dictionary.
+ countries[code] = name.title()
+
+ else:
+
+ log.debug('Opening country code CSV: {0}'.format(
+ str(data_dir) + '/data/iso_3166-1_list_en.xml'))
+
+ # Create the country codes file object.
+ f = io.open(str(data_dir) + '/data/iso_3166-1.csv', 'r',
+ encoding='utf-8')
+
+ # Create csv reader object.
+ csv_reader = csv.reader(f, delimiter=',', quotechar='"')
+
+ # Iterate through the rows and add to the countries dictionary.
+ for row in csv_reader:
+
+ # Retrieve the country code and name columns.
+ code = row[0]
+ name = row[1]
+
+ # Add to the countries dictionary.
+ countries[code] = name
+
+ return countries
+
+
+def ipv4_is_defined(address):
+ """
+ The function for checking if an IPv4 address is defined (does not need to
+ be resolved).
+
+ Args:
+ address: An IPv4 address in string format.
+
+ Returns:
+ Tuple:
+
+ :Boolean: True if given address is defined, otherwise False
+ :String: IETF assignment name if given address is defined, otherwise ''
+ :String: IETF assignment RFC if given address is defined, otherwise ''
+ """
+
+ # Initialize the IP address object.
+ query_ip = IPv4Address(str(address))
+
+ # This Network
+ if query_ip in IPv4Network('0.0.0.0/8'):
+
+ return True, 'This Network', 'RFC 1122, Section 3.2.1.3'
+
+ # Loopback
+ elif query_ip.is_loopback:
+
+ return True, 'Loopback', 'RFC 1122, Section 3.2.1.3'
+
+ # Link Local
+ elif query_ip.is_link_local:
+
+ return True, 'Link Local', 'RFC 3927'
+
+ # IETF Protocol Assignments
+ elif query_ip in IPv4Network('192.0.0.0/24'):
+
+ return True, 'IETF Protocol Assignments', 'RFC 5736'
+
+ # TEST-NET-1
+ elif query_ip in IPv4Network('192.0.2.0/24'):
+
+ return True, 'TEST-NET-1', 'RFC 5737'
+
+ # 6to4 Relay Anycast
+ elif query_ip in IPv4Network('192.88.99.0/24'):
+
+ return True, '6to4 Relay Anycast', 'RFC 3068'
+
+ # Network Interconnect Device Benchmark Testing
+ elif query_ip in IPv4Network('198.18.0.0/15'):
+
+ return (True,
+ 'Network Interconnect Device Benchmark Testing',
+ 'RFC 2544')
+
+ # TEST-NET-2
+ elif query_ip in IPv4Network('198.51.100.0/24'):
+
+ return True, 'TEST-NET-2', 'RFC 5737'
+
+ # TEST-NET-3
+ elif query_ip in IPv4Network('203.0.113.0/24'):
+
+ return True, 'TEST-NET-3', 'RFC 5737'
+
+ # Multicast
+ elif query_ip.is_multicast:
+
+ return True, 'Multicast', 'RFC 3171'
+
+ # Limited Broadcast
+ elif query_ip in IPv4Network('255.255.255.255/32'):
+
+ return True, 'Limited Broadcast', 'RFC 919, Section 7'
+
+ # Private-Use Networks
+ elif query_ip.is_private:
+
+ return True, 'Private-Use Networks', 'RFC 1918'
+
+ return False, '', ''
+
+
+def ipv6_is_defined(address):
+ """
+ The function for checking if an IPv6 address is defined (does not need to
+ be resolved).
+
+ Args:
+ address: An IPv6 address in string format.
+
+ Returns:
+ Tuple:
+
+ :Boolean: True if address is defined, otherwise False
+ :String: IETF assignment name if address is defined, otherwise ''
+ :String: IETF assignment RFC if address is defined, otherwise ''
+ """
+
+ # Initialize the IP address object.
+ query_ip = IPv6Address(str(address))
+
+ # Multicast
+ if query_ip.is_multicast:
+
+ return True, 'Multicast', 'RFC 4291, Section 2.7'
+
+ # Unspecified
+ elif query_ip.is_unspecified:
+
+ return True, 'Unspecified', 'RFC 4291, Section 2.5.2'
+
+ # Loopback.
+ elif query_ip.is_loopback:
+
+ return True, 'Loopback', 'RFC 4291, Section 2.5.3'
+
+ # Reserved
+ elif query_ip.is_reserved:
+
+ return True, 'Reserved', 'RFC 4291'
+
+ # Link-Local
+ elif query_ip.is_link_local:
+
+ return True, 'Link-Local', 'RFC 4291, Section 2.5.6'
+
+ # Site-Local
+ elif query_ip.is_site_local:
+
+ return True, 'Site-Local', 'RFC 4291, Section 2.5.7'
+
+ # Unique Local Unicast
+ elif query_ip.is_private:
+
+ return True, 'Unique Local Unicast', 'RFC 4193'
+
+ return False, '', ''
+
+
+def unique_everseen(iterable, key=None):
+ """
+ The generator to list unique elements, preserving the order. Remember all
+ elements ever seen. This was taken from the itertools recipes.
+
+ Args:
+ iterable: An iterable to process.
+ key: Optional function to run when checking elements (e.g., str.lower)
+
+ Returns:
+ Generator: Yields a generator object.
+ """
+
+ seen = set()
+ seen_add = seen.add
+
+ if key is None:
+
+ for element in filterfalse(seen.__contains__, iterable):
+
+ seen_add(element)
+ yield element
+
+ else:
+
+ for element in iterable:
+
+ k = key(element)
+
+ if k not in seen:
+
+ seen_add(k)
+ yield element
+
+
+def unique_addresses(data=None, file_path=None):
+ """
+ The function to search an input string and/or file, extracting and
+ counting IPv4/IPv6 addresses/networks. Summarizes ports with sub-counts.
+ If both a string and file_path are provided, it will process them both.
+
+ Args:
+ data: A string to process.
+ file_path: An optional file path to process.
+
+ Returns:
+ Dictionary:
+
+ :ip address/network: Each address or network found is a dictionary w/\:
+
+ :count: Total number of times seen (Integer)
+ :ports: Dictionary with port numbers as keys and the number of
+ times seen for this ip as values (Dictionary)
+
+ Raises:
+ ValueError: Arguments provided are invalid.
+ """
+
+ if not data and not file_path:
+
+ raise ValueError('No data or file path provided.')
+
+ ret = {}
+ base = {
+ 'count': 0,
+ 'ports': {}
+ }
+
+ file_data = None
+ if file_path:
+
+ log.debug('Opening file for unique address analysis: {0}'.format(
+ str(file_path)))
+
+ f = open(str(file_path), 'r')
+
+ # Read the file.
+ file_data = f.read()
+
+ pattern = re.compile(
+ str(IP_REGEX),
+ re.DOTALL
+ )
+
+ # Check if there is data.
+ log.debug('Analyzing input/file data'.format(
+ str(file_path)))
+ for input_data in [data, file_data]:
+
+ if input_data:
+
+ # Search for IPs.
+ for match in pattern.finditer(input_data):
+
+ is_net = False
+ port = None
+ try:
+
+ found = match.group('ip')
+
+ if '.' in found and ':' in found:
+
+ split = found.split(':')
+ ip_or_net = split[0]
+ port = split[1]
+
+ elif '[' in found:
+
+ split = found.split(']:')
+ ip_or_net = split[0][1:]
+ port = split[1]
+
+ elif '/' in found:
+
+ is_net = True
+ ip_or_net = found
+
+ else:
+
+ ip_or_net = found
+
+ if is_net:
+
+ ip_obj = ip_network(ip_or_net)
+
+ else:
+ ip_obj = ip_address(ip_or_net)
+
+ obj_str = ip_obj.__str__()
+
+ if obj_str not in ret.keys():
+
+ ret[obj_str] = copy.deepcopy(base)
+
+ ret[obj_str]['count'] += 1
+
+ if port:
+
+ try:
+
+ ret[obj_str]['ports'][str(port)] += 1
+
+ except KeyError:
+
+ ret[obj_str]['ports'][str(port)] = 1
+
+ except (KeyError, ValueError):
+
+ continue
+
+ return ret
diff --git a/lib/ipwhois/whois.py b/lib/ipwhois/whois.py
new file mode 100644
index 00000000..0c949e0d
--- /dev/null
+++ b/lib/ipwhois/whois.py
@@ -0,0 +1,683 @@
+# Copyright (c) 2013, 2014, 2015, 2016 Philip Hane
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import re
+import copy
+from datetime import datetime
+import logging
+from .utils import unique_everseen
+from . import (BlacklistError, WhoisLookupError, NetError)
+
+if sys.version_info >= (3, 3): # pragma: no cover
+ from ipaddress import (ip_address,
+ ip_network,
+ summarize_address_range,
+ collapse_addresses)
+else: # pragma: no cover
+ from ipaddr import (IPAddress as ip_address,
+ IPNetwork as ip_network,
+ summarize_address_range,
+ collapse_address_list as collapse_addresses)
+
+log = logging.getLogger(__name__)
+
+# Legacy base whois output dictionary.
+BASE_NET = {
+ 'cidr': None,
+ 'name': None,
+ 'handle': None,
+ 'range': None,
+ 'description': None,
+ 'country': None,
+ 'state': None,
+ 'city': None,
+ 'address': None,
+ 'postal_code': None,
+ 'emails': None,
+ 'created': None,
+ 'updated': None
+}
+
+RIR_WHOIS = {
+ 'arin': {
+ 'server': 'whois.arin.net',
+ 'fields': {
+ 'name': r'(NetName):[^\S\n]+(?P.+?)\n',
+ 'handle': r'(NetHandle):[^\S\n]+(?P.+?)\n',
+ 'description': r'(OrgName|CustName):[^\S\n]+(?P.+?)'
+ '(?=(\n\S):?)',
+ 'country': r'(Country):[^\S\n]+(?P.+?)\n',
+ 'state': r'(StateProv):[^\S\n]+(?P.+?)\n',
+ 'city': r'(City):[^\S\n]+(?P.+?)\n',
+ 'address': r'(Address):[^\S\n]+(?P.+?)(?=(\n\S):?)',
+ 'postal_code': r'(PostalCode):[^\S\n]+(?P.+?)\n',
+ 'emails': (
+ r'.+?:.*?[^\S\n]+(?P[\w\-\.]+?@[\w\-\.]+\.[\w\-]+)('
+ '[^\S\n]+.*?)*?\n'
+ ),
+ 'created': r'(RegDate):[^\S\n]+(?P.+?)\n',
+ 'updated': r'(Updated):[^\S\n]+(?P.+?)\n',
+ },
+ 'dt_format': '%Y-%m-%d'
+ },
+ 'ripencc': {
+ 'server': 'whois.ripe.net',
+ 'fields': {
+ 'name': r'(netname):[^\S\n]+(?P.+?)\n',
+ 'handle': r'(nic-hdl):[^\S\n]+(?P.+?)\n',
+ 'description': r'(descr):[^\S\n]+(?P.+?)(?=(\n\S):?)',
+ 'country': r'(country):[^\S\n]+(?P.+?)\n',
+ 'address': r'(address):[^\S\n]+(?P.+?)(?=(\n\S):?)',
+ 'emails': (
+ r'.+?:.*?[^\S\n]+(?P[\w\-\.]+?@[\w\-\.]+\.[\w\-]+)('
+ '[^\S\n]+.*?)*?\n'
+ ),
+ 'created': (
+ r'(created):[^\S\n]+(?P[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]'
+ '{2}:[0-9]{2}:[0-9]{2}Z).*?\n'
+ ),
+ 'updated': (
+ r'(last-modified):[^\S\n]+(?P[0-9]{4}-[0-9]{2}-[0-9]{2}T'
+ '[0-9]{2}:[0-9]{2}:[0-9]{2}Z).*?\n'
+ )
+ },
+ 'dt_format': '%Y-%m-%dT%H:%M:%SZ'
+ },
+ 'apnic': {
+ 'server': 'whois.apnic.net',
+ 'fields': {
+ 'name': r'(netname):[^\S\n]+(?P.+?)\n',
+ 'handle': r'(nic-hdl):[^\S\n]+(?P.+?)\n',
+ 'description': r'(descr):[^\S\n]+(?P.+?)(?=(\n\S):?)',
+ 'country': r'(country):[^\S\n]+(?P.+?)\n',
+ 'address': r'(address):[^\S\n]+(?P.+?)(?=(\n\S):?)',
+ 'emails': (
+ r'.+?:.*?[^\S\n]+(?P[\w\-\.]+?@[\w\-\.]+\.[\w\-]+)('
+ '[^\S\n]+.*?)*?\n'
+ ),
+ 'updated': r'(changed):[^\S\n]+.*(?P[0-9]{8}).*?\n'
+ },
+ 'dt_format': '%Y%m%d'
+ },
+ 'lacnic': {
+ 'server': 'whois.lacnic.net',
+ 'fields': {
+ 'handle': r'(nic-hdl):[^\S\n]+(?P.+?)\n',
+ 'description': r'(owner):[^\S\n]+(?P.+?)(?=(\n\S):?)',
+ 'country': r'(country):[^\S\n]+(?P.+?)\n',
+ 'emails': (
+ r'.+?:.*?[^\S\n]+(?P[\w\-\.]+?@[\w\-\.]+\.[\w\-]+)('
+ '[^\S\n]+.*?)*?\n'
+ ),
+ 'created': r'(created):[^\S\n]+(?P[0-9]{8}).*?\n',
+ 'updated': r'(changed):[^\S\n]+(?P[0-9]{8}).*?\n'
+ },
+ 'dt_format': '%Y%m%d'
+ },
+ 'afrinic': {
+ 'server': 'whois.afrinic.net',
+ 'fields': {
+ 'name': r'(netname):[^\S\n]+(?P.+?)\n',
+ 'handle': r'(nic-hdl):[^\S\n]+(?P.+?)\n',
+ 'description': r'(descr):[^\S\n]+(?P.+?)(?=(\n\S):?)',
+ 'country': r'(country):[^\S\n]+(?P.+?)\n',
+ 'address': r'(address):[^\S\n]+(?P.+?)(?=(\n\S):?)',
+ 'emails': (
+ r'.+?:.*?[^\S\n]+(?P[\w\-\.]+?@[\w\-\.]+\.[\w\-]+)('
+ '[^\S\n]+.*?)*?\n'
+ ),
+ }
+ }
+}
+
+RWHOIS = {
+ 'fields': {
+ 'cidr': r'(network:IP-Network):(?P.+?)\n',
+ 'name': r'(network:ID):(?P.+?)\n',
+ 'description': (
+ r'(network:(Org-Name|Organization(;I)?)):(?P.+?)\n'
+ ),
+ 'country': r'(network:(Country|Country-Code)):(?P.+?)\n',
+ 'state': r'(network:State):(?P.+?)\n',
+ 'city': r'(network:City):(?P.+?)\n',
+ 'address': r'(network:Street-Address):(?P.+?)\n',
+ 'postal_code': r'(network:Postal-Code):(?P.+?)\n',
+ 'emails': (
+ r'.+?:.*?[^\S\n]+(?P[\w\-\.]+?@[\w\-\.]+\.[\w\-]+)('
+ '[^\S\n]+.*?)*?\n'
+ ),
+ 'created': r'(network:Created):(?P.+?)\n',
+ 'updated': r'(network:Updated):(?P.+?)\n'
+ }
+}
+
+ASN_REFERRALS = {
+ 'whois://whois.ripe.net': 'ripencc',
+ 'whois://whois.apnic.net': 'apnic',
+ 'whois://whois.lacnic.net': 'lacnic',
+ 'whois://whois.afrinic.net': 'afrinic',
+}
+
+
+class Whois:
+ """
+ The class for parsing via whois
+
+ Args:
+ net: A ipwhois.net.Net object.
+
+ Raises:
+ NetError: The parameter provided is not an instance of
+ ipwhois.net.Net
+ IPDefinedError: The address provided is defined (does not need to be
+ resolved).
+ """
+
+ def __init__(self, net):
+
+ from .net import Net
+
+ # ipwhois.net.Net validation
+ if isinstance(net, Net):
+
+ self._net = net
+
+ else:
+
+ raise NetError('The provided net parameter is not an instance of '
+ 'ipwhois.net.Net')
+
+ def _parse_fields(self, response, fields_dict, net_start=None,
+ net_end=None, dt_format=None, field_list=None):
+ """
+ The function for parsing whois fields from a data input.
+
+ Args:
+ response: The response from the whois/rwhois server.
+ fields_dict: The dictionary of fields -> regex search values.
+ net_start: The starting point of the network (if parsing multiple
+ networks).
+ net_end: The ending point of the network (if parsing multiple
+ networks).
+ dt_format: The format of datetime fields if known.
+ field_list: If provided, a list of fields to parse:
+ ['name', 'handle', 'description', 'country', 'state', 'city',
+ 'address', 'postal_code', 'emails', 'created', 'updated']
+
+ Returns:
+ Dictionary: A dictionary of fields provided in fields_dict.
+ """
+
+ ret = {}
+
+ if not field_list:
+
+ field_list = ['name', 'handle', 'description', 'country', 'state',
+ 'city', 'address', 'postal_code', 'emails',
+ 'created', 'updated']
+
+ generate = ((field, pattern) for (field, pattern) in
+ fields_dict.items() if field in field_list)
+
+ for field, pattern in generate:
+
+ pattern = re.compile(
+ str(pattern),
+ re.DOTALL
+ )
+
+ if net_start is not None:
+
+ match = pattern.finditer(response, net_end, net_start)
+
+ elif net_end is not None:
+
+ match = pattern.finditer(response, net_end)
+
+ else:
+
+ match = pattern.finditer(response)
+
+ values = []
+ sub_section_end = None
+ for m in match:
+
+ if sub_section_end:
+
+ if field not in (
+ 'emails'
+ ) and (sub_section_end != (m.start() - 1)):
+
+ break
+
+ try:
+
+ values.append(m.group('val').strip())
+
+ except IndexError:
+
+ pass
+
+ sub_section_end = m.end()
+
+ if len(values) > 0:
+
+ value = None
+ try:
+
+ if field == 'country':
+
+ value = values[0].upper()
+
+ elif field in ['created', 'updated'] and dt_format:
+
+ value = datetime.strptime(
+ values[0],
+ str(dt_format)).isoformat('T')
+
+ else:
+
+ values = unique_everseen(values)
+ value = '\n'.join(values)
+
+ except ValueError as e:
+
+ log.debug('Whois field parsing failed for {0}: {1}'.format(
+ field, e))
+ pass
+
+ ret[field] = value
+
+ return ret
+
+ def _get_nets_arin(self, response):
+ """
+ The function for parsing network blocks from ARIN whois data.
+
+ Args:
+ response: The response from the ARIN whois server.
+
+ Returns:
+ List: A of dictionaries containing keys: cidr, start, end.
+ """
+
+ nets = []
+
+ # Find the first NetRange value.
+ pattern = re.compile(
+ r'^NetRange:[^\S\n]+(.+)$',
+ re.MULTILINE
+ )
+ temp = pattern.search(response)
+ net_range = None
+ net_range_start = None
+ if temp is not None:
+ net_range = temp.group(1).strip()
+ net_range_start = temp.start()
+
+ # Iterate through all of the networks found, storing the CIDR value
+ # and the start and end positions.
+ for match in re.finditer(
+ r'^CIDR:[^\S\n]+(.+?,[^\S\n].+|.+)$',
+ response,
+ re.MULTILINE
+ ):
+
+ try:
+
+ net = copy.deepcopy(BASE_NET)
+
+ if len(nets) > 0:
+ temp = pattern.search(response, match.start())
+ net_range = None
+ net_range_start = None
+ if temp is not None:
+ net_range = temp.group(1).strip()
+ net_range_start = temp.start()
+
+ if net_range is not None:
+ if net_range_start < match.start() or len(nets) > 0:
+ net['range'] = net_range
+
+ net['cidr'] = ', '.join(
+ [ip_network(c.strip()).__str__()
+ for c in match.group(1).split(', ')]
+ )
+ net['start'] = match.start()
+ net['end'] = match.end()
+ nets.append(net)
+
+ except ValueError:
+
+ pass
+
+ return nets
+
+ def _get_nets_lacnic(self, response):
+ """
+ The function for parsing network blocks from LACNIC whois data.
+
+ Args:
+ response: The response from the LACNIC whois server.
+
+ Returns:
+ List: A of dictionaries containing keys: cidr, start, end.
+ """
+
+ nets = []
+
+ # Iterate through all of the networks found, storing the CIDR value
+ # and the start and end positions.
+ for match in re.finditer(
+ r'^(inetnum|inet6num|route):[^\S\n]+(.+?,[^\S\n].+|.+)$',
+ response,
+ re.MULTILINE
+ ):
+
+ try:
+
+ net = copy.deepcopy(BASE_NET)
+ net['range'] = match.group(2).strip()
+
+ temp = []
+ for addr in match.group(2).strip().split(', '):
+
+ count = addr.count('.')
+ if count is not 0 and count < 4:
+
+ addr_split = addr.strip().split('/')
+ for i in range(count + 1, 4):
+ addr_split[0] += '.0'
+
+ addr = '/'.join(addr_split)
+
+ temp.append(ip_network(addr.strip()).__str__())
+
+ net['cidr'] = ', '.join(temp)
+ net['start'] = match.start()
+ net['end'] = match.end()
+ nets.append(net)
+
+ except ValueError:
+
+ pass
+
+ return nets
+
+ def _get_nets_other(self, response):
+ """
+ The function for parsing network blocks from generic whois data.
+
+ Args:
+ response: The response from the whois/rwhois server.
+
+ Returns:
+ List: A of dictionaries containing keys: cidr, start, end.
+ """
+
+ nets = []
+
+ # Iterate through all of the networks found, storing the CIDR value
+ # and the start and end positions.
+ for match in re.finditer(
+ r'^(inetnum|inet6num|route):[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+)|'
+ '.+)$',
+ response,
+ re.MULTILINE
+ ):
+
+ try:
+
+ net = copy.deepcopy(BASE_NET)
+ net['range'] = match.group(2)
+
+ if match.group(3) and match.group(4):
+
+ addrs = []
+ addrs.extend(summarize_address_range(
+ ip_address(match.group(3).strip()),
+ ip_address(match.group(4).strip())))
+
+ cidr = ', '.join(
+ [i.__str__() for i in collapse_addresses(addrs)]
+ )
+
+ else:
+
+ cidr = ip_network(match.group(2).strip()).__str__()
+
+ net['cidr'] = cidr
+ net['start'] = match.start()
+ net['end'] = match.end()
+ nets.append(net)
+
+ except (ValueError, TypeError):
+
+ pass
+
+ return nets
+
+ def lookup(self, inc_raw=False, retry_count=3, response=None,
+ get_referral=False, extra_blacklist=None,
+ ignore_referral_errors=False, asn_data=None,
+ field_list=None, is_offline=False):
+ """
+ The function for retrieving and parsing whois information for an IP
+ address via port 43/tcp (WHOIS).
+
+ Args:
+ inc_raw: Boolean for whether to include the raw results in the
+ returned dictionary.
+ retry_count: The number of times to retry in case socket errors,
+ timeouts, connection resets, etc. are encountered.
+ response: Optional response object, this bypasses the Whois lookup.
+ get_referral: Boolean for whether to retrieve referral whois
+ information, if available.
+ extra_blacklist: A list of blacklisted whois servers in addition to
+ the global BLACKLIST.
+ ignore_referral_errors: Boolean for whether to ignore and continue
+ when an exception is encountered on referral whois lookups.
+ asn_data: Optional ASN result object, this bypasses the ASN lookup.
+ field_list: If provided, a list of fields to parse:
+ ['name', 'handle', 'description', 'country', 'state', 'city',
+ 'address', 'postal_code', 'emails', 'created', 'updated']
+ is_offline: Boolean for whether to perform lookups offline. If
+ True, response and asn_data must be provided. Primarily used
+ for testing.
+
+ Returns:
+ Dictionary:
+
+ :query: The IP address (String)
+ :asn: The Autonomous System Number (String)
+ :asn_date: The ASN Allocation date (String)
+ :asn_registry: The assigned ASN registry (String)
+ :asn_cidr: The assigned ASN CIDR (String)
+ :asn_country_code: The assigned ASN country code (String)
+ :nets: Dictionaries containing network information which consists
+ of the fields listed in the NIC_WHOIS dictionary. (List)
+ :raw: Raw whois results if the inc_raw parameter is True. (String)
+ :referral: Dictionary of referral whois information if get_referral
+ is True and the server isn't blacklisted. Consists of fields
+ listed in the RWHOIS dictionary.
+ :raw_referral: Raw referral whois results if the inc_raw parameter
+ is True. (String)
+ """
+
+ # Create the return dictionary.
+ results = {
+ 'query': self._net.address_str,
+ 'nets': [],
+ 'raw': None,
+ 'referral': None,
+ 'raw_referral': None
+ }
+
+ # The referral server and port. Only used if get_referral is True.
+ referral_server = None
+ referral_port = 0
+
+ # Only fetch the response if we haven't already.
+ if response is None or (not is_offline and
+ asn_data['asn_registry'] is not 'arin'):
+
+ log.debug('Response not given, perform WHOIS lookup for {0}'
+ .format(self._net.address_str))
+
+ # Retrieve the whois data.
+ response = self._net.get_whois(
+ asn_registry=asn_data['asn_registry'], retry_count=retry_count,
+ extra_blacklist=extra_blacklist
+ )
+
+ if get_referral:
+
+ # Search for a referral server.
+ for match in re.finditer(
+ r'^ReferralServer:[^\S\n]+(.+:[0-9]+)$',
+ response,
+ re.MULTILINE
+ ):
+
+ try:
+
+ temp = match.group(1)
+ if 'rwhois://' not in temp: # pragma: no cover
+ raise ValueError
+
+ temp = temp.replace('rwhois://', '').split(':')
+
+ if int(temp[1]) > 65535: # pragma: no cover
+ raise ValueError
+
+ referral_server = temp[0]
+ referral_port = int(temp[1])
+
+ except (ValueError, KeyError): # pragma: no cover
+
+ continue
+
+ break
+
+ # Retrieve the referral whois data.
+ if get_referral and referral_server:
+
+ log.debug('Perform referral WHOIS lookup')
+
+ response_ref = None
+
+ try:
+
+ response_ref = self._net.get_whois(
+ asn_registry='', retry_count=retry_count,
+ server=referral_server, port=referral_port,
+ extra_blacklist=extra_blacklist
+ )
+
+ except (BlacklistError, WhoisLookupError):
+
+ if ignore_referral_errors:
+
+ pass
+
+ else:
+
+ raise
+
+ if response_ref:
+
+ log.debug('Parsing referral WHOIS data')
+
+ if inc_raw:
+
+ results['raw_referral'] = response_ref
+
+ temp_rnet = self._parse_fields(
+ response_ref,
+ RWHOIS['fields'],
+ field_list=field_list
+ )
+
+ # Add the networks to the return dictionary.
+ results['referral'] = temp_rnet
+
+ # If inc_raw parameter is True, add the response to return dictionary.
+ if inc_raw:
+
+ results['raw'] = response
+
+ nets = []
+
+ if asn_data['asn_registry'] == 'arin':
+
+ nets_response = self._get_nets_arin(response)
+
+ elif asn_data['asn_registry'] == 'lacnic':
+
+ nets_response = self._get_nets_lacnic(response)
+
+ else:
+
+ nets_response = self._get_nets_other(response)
+
+ nets.extend(nets_response)
+
+ # Iterate through all of the network sections and parse out the
+ # appropriate fields for each.
+ log.debug('Parsing WHOIS data')
+ for index, net in enumerate(nets):
+
+ section_end = None
+ if index + 1 < len(nets):
+
+ section_end = nets[index + 1]['start']
+
+ try:
+
+ dt_format = RIR_WHOIS[results['asn_registry']]['dt_format']
+
+ except KeyError:
+
+ dt_format = None
+
+ temp_net = self._parse_fields(
+ response,
+ RIR_WHOIS[asn_data['asn_registry']]['fields'],
+ section_end,
+ net['end'],
+ dt_format,
+ field_list
+ )
+
+ # Merge the net dictionaries.
+ net.update(temp_net)
+
+ # The start and end values are no longer needed.
+ del net['start'], net['end']
+
+ # Add the networks to the return dictionary.
+ results['nets'] = nets
+
+ return results
diff --git a/plexpy/__init__.py b/plexpy/__init__.py
index b185d82c..d89845ee 100644
--- a/plexpy/__init__.py
+++ b/plexpy/__init__.py
@@ -386,9 +386,11 @@ def dbcheck():
'transcode_key TEXT, rating_key INTEGER, section_id INTEGER, media_type TEXT, started INTEGER, stopped INTEGER, '
'paused_counter INTEGER DEFAULT 0, state TEXT, user_id INTEGER, user TEXT, friendly_name TEXT, '
'ip_address TEXT, machine_id TEXT, player TEXT, platform TEXT, title TEXT, parent_title TEXT, '
- 'grandparent_title TEXT, parent_rating_key INTEGER, grandparent_rating_key INTEGER, '
+ 'grandparent_title TEXT, full_title TEXT, media_index INTEGER, parent_media_index INTEGER, '
+ 'thumb TEXT, parent_thumb TEXT, grandparent_thumb TEXT, year INTEGER, '
+ 'parent_rating_key INTEGER, grandparent_rating_key INTEGER, '
'view_offset INTEGER DEFAULT 0, duration INTEGER, video_decision TEXT, audio_decision TEXT, '
- 'width INTEGER, height INTEGER, container TEXT, video_codec TEXT, audio_codec TEXT, '
+ 'transcode_decision TEXT, width INTEGER, height INTEGER, container TEXT, video_codec TEXT, audio_codec TEXT, '
'bitrate INTEGER, video_resolution TEXT, video_framerate TEXT, aspect_ratio TEXT, '
'audio_channels INTEGER, transcode_protocol TEXT, transcode_container TEXT, '
'transcode_video_codec TEXT, transcode_audio_codec TEXT, transcode_audio_channels INTEGER,'
@@ -643,6 +645,36 @@ def dbcheck():
'ALTER TABLE sessions ADD COLUMN write_attempts INTEGER DEFAULT 0'
)
+ # Upgrade sessions table from earlier versions
+ try:
+ c_db.execute('SELECT transcode_decision FROM sessions')
+ except sqlite3.OperationalError:
+ logger.debug(u"Altering database. Updating database table sessions.")
+ c_db.execute(
+ 'ALTER TABLE sessions ADD COLUMN transcode_decision TEXT'
+ )
+ c_db.execute(
+ 'ALTER TABLE sessions ADD COLUMN full_title TEXT'
+ )
+ c_db.execute(
+ 'ALTER TABLE sessions ADD COLUMN media_index INTEGER'
+ )
+ c_db.execute(
+ 'ALTER TABLE sessions ADD COLUMN parent_media_index INTEGER'
+ )
+ c_db.execute(
+ 'ALTER TABLE sessions ADD COLUMN thumb TEXT'
+ )
+ c_db.execute(
+ 'ALTER TABLE sessions ADD COLUMN parent_thumb TEXT'
+ )
+ c_db.execute(
+ 'ALTER TABLE sessions ADD COLUMN grandparent_thumb TEXT'
+ )
+ c_db.execute(
+ 'ALTER TABLE sessions ADD COLUMN year INTEGER'
+ )
+
# Upgrade session_history table from earlier versions
try:
c_db.execute('SELECT reference_id FROM session_history')
diff --git a/plexpy/activity_handler.py b/plexpy/activity_handler.py
index 708eb0df..313fef77 100644
--- a/plexpy/activity_handler.py
+++ b/plexpy/activity_handler.py
@@ -45,6 +45,12 @@ class ActivityHandler(object):
return None
+ def get_rating_key(self):
+ if self.is_valid_session():
+ return int(self.timeline['ratingKey'])
+
+ return None
+
def get_live_session(self):
pms_connect = pmsconnect.PmsConnect()
session_list = pms_connect.get_current_activity()
@@ -63,7 +69,8 @@ class ActivityHandler(object):
def on_start(self):
if self.is_valid_session() and self.get_live_session():
- logger.debug(u"PlexPy ActivityHandler :: Session %s has started." % str(self.get_session_key()))
+ logger.debug(u"PlexPy ActivityHandler :: Session %s has started with ratingKey %s."
+ % (str(self.get_session_key()), str(self.get_rating_key())))
session = self.get_live_session()
@@ -127,7 +134,8 @@ class ActivityHandler(object):
monitor_proc.write_session_history(session=db_session)
# Remove the session from our temp session table
- logger.debug(u"PlexPy ActivityHandler :: Removing session %s from session queue" % str(self.get_session_key()))
+ logger.debug(u"PlexPy ActivityHandler :: Removing sessionKey %s ratingKey %s from session queue"
+ % (str(self.get_session_key()), str(self.get_rating_key())))
ap.delete_session(session_key=self.get_session_key())
def on_pause(self):
diff --git a/plexpy/activity_pinger.py b/plexpy/activity_pinger.py
index 8ab3bf31..c07ea77b 100644
--- a/plexpy/activity_pinger.py
+++ b/plexpy/activity_pinger.py
@@ -230,7 +230,8 @@ def check_active_sessions(ws_request=False):
new_session = monitor_process.write_session(session)
if new_session:
- logger.debug(u"PlexPy Monitor :: Session %s has started." % session['session_key'])
+ logger.debug(u"PlexPy Monitor :: Session %s has started with ratingKey %s."
+ % (session['session_key'], session['rating_key']))
else:
logger.debug(u"PlexPy Monitor :: Unable to read session list.")
diff --git a/plexpy/activity_processor.py b/plexpy/activity_processor.py
index 20e799aa..9e731514 100644
--- a/plexpy/activity_processor.py
+++ b/plexpy/activity_processor.py
@@ -48,6 +48,13 @@ class ActivityProcessor(object):
'title': session['title'],
'parent_title': session['parent_title'],
'grandparent_title': session['grandparent_title'],
+ 'full_title': session['full_title'],
+ 'media_index': session['media_index'],
+ 'parent_media_index': session['parent_media_index'],
+ 'thumb': session['thumb'],
+ 'parent_thumb': session['parent_thumb'],
+ 'grandparent_thumb': session['grandparent_thumb'],
+ 'year': session['year'],
'friendly_name': session['friendly_name'],
#'ip_address': session['ip_address'],
'player': session['player'],
@@ -58,6 +65,7 @@ class ActivityProcessor(object):
'duration': session['duration'],
'video_decision': session['video_decision'],
'audio_decision': session['audio_decision'],
+ 'transcode_decision': session['transcode_decision'],
'width': session['width'],
'height': session['height'],
'container': session['container'],
@@ -275,14 +283,6 @@ class ActivityProcessor(object):
# Write the session_history_media_info table
- # Generate a combined transcode decision value
- if session['video_decision'] == 'transcode' or session['audio_decision'] == 'transcode':
- transcode_decision = 'transcode'
- elif session['video_decision'] == 'copy' or session['audio_decision'] == 'copy':
- transcode_decision = 'copy'
- else:
- transcode_decision = 'direct play'
-
# logger.debug(u"PlexPy ActivityProcessor :: Attempting to write to session_history_media_info table...")
query = 'INSERT INTO session_history_media_info (id, rating_key, video_decision, audio_decision, ' \
'duration, width, height, container, video_codec, audio_codec, bitrate, video_resolution, ' \
@@ -298,7 +298,7 @@ class ActivityProcessor(object):
session['audio_channels'], session['transcode_protocol'], session['transcode_container'],
session['transcode_video_codec'], session['transcode_audio_codec'],
session['transcode_audio_channels'], session['transcode_width'], session['transcode_height'],
- transcode_decision]
+ session['transcode_decision']]
# logger.debug(u"PlexPy ActivityProcessor :: Writing session_history_media_info transaction...")
self.db.action(query=query, args=args)
@@ -310,14 +310,6 @@ class ActivityProcessor(object):
genres = ";".join(metadata['genres'])
labels = ";".join(metadata['labels'])
- # Build media item title
- if session['media_type'] == 'episode' or session['media_type'] == 'track':
- full_title = '%s - %s' % (metadata['grandparent_title'], metadata['title'])
- elif session['media_type'] == 'movie':
- full_title = metadata['title']
- else:
- full_title = metadata['title']
-
# logger.debug(u"PlexPy ActivityProcessor :: Attempting to write to session_history_metadata table...")
query = 'INSERT INTO session_history_metadata (id, rating_key, parent_rating_key, ' \
'grandparent_rating_key, title, parent_title, grandparent_title, full_title, media_index, ' \
@@ -328,7 +320,7 @@ class ActivityProcessor(object):
'?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'
args = [session['rating_key'], session['parent_rating_key'], session['grandparent_rating_key'],
- session['title'], session['parent_title'], session['grandparent_title'], full_title,
+ session['title'], session['parent_title'], session['grandparent_title'], session['full_title'],
metadata['media_index'], metadata['parent_media_index'], metadata['section_id'], metadata['thumb'],
metadata['parent_thumb'], metadata['grandparent_thumb'], metadata['art'], session['media_type'],
metadata['year'], metadata['originally_available_at'], metadata['added_at'], metadata['updated_at'],
diff --git a/plexpy/config.py b/plexpy/config.py
index 344cf7c3..f228056c 100644
--- a/plexpy/config.py
+++ b/plexpy/config.py
@@ -59,6 +59,7 @@ _CONFIG_DEFINITIONS = {
'PMS_PLATFORM': (str, 'PMS', ''),
'PMS_VERSION': (str, 'PMS', ''),
'PMS_UPDATE_CHANNEL': (str, 'PMS', 'public'),
+ 'PMS_UPDATE_DISTRO': (str, 'PMS', ''),
'PMS_UPDATE_DISTRO_BUILD': (str, 'PMS', ''),
'TIME_FORMAT': (str, 'General', 'HH:mm'),
'ANON_REDIRECT': (str, 'General', 'http://dereferer.org/?'),
@@ -191,6 +192,7 @@ _CONFIG_DEFINITIONS = {
'GROWL_ON_PMSUPDATE': (int, 'Growl', 0),
'GROWL_ON_CONCURRENT': (int, 'Growl', 0),
'GROWL_ON_NEWDEVICE': (int, 'Growl', 0),
+ 'HISTORY_TABLE_ACTIVITY': (int, 'General', 1),
'HOME_SECTIONS': (list, 'General', ['current_activity','watch_stats','library_stats','recently_added']),
'HOME_LIBRARY_CARDS': (list, 'General', ['first_run']),
'HOME_STATS_LENGTH': (int, 'General', 30),
@@ -510,6 +512,7 @@ _CONFIG_DEFINITIONS = {
'TELEGRAM_BOT_TOKEN': (str, 'Telegram', ''),
'TELEGRAM_ENABLED': (int, 'Telegram', 0),
'TELEGRAM_CHAT_ID': (str, 'Telegram', ''),
+ 'TELEGRAM_DISABLE_WEB_PREVIEW': (int, 'Telegram', 0),
'TELEGRAM_HTML_SUPPORT': (int, 'Telegram', 1),
'TELEGRAM_INCL_POSTER': (int, 'Telegram', 0),
'TELEGRAM_INCL_SUBJECT': (int, 'Telegram', 1),
diff --git a/plexpy/database.py b/plexpy/database.py
index 7347f764..d35adcda 100644
--- a/plexpy/database.py
+++ b/plexpy/database.py
@@ -146,7 +146,7 @@ class MonitorDatabase(object):
break
except sqlite3.OperationalError as e:
- if "unable to open database file" in e.message or "database is locked" in e.message:
+ if "unable to open database file" in e or "database is locked" in e:
logger.warn(u"PlexPy Database :: Database Error: %s", e)
attempts += 1
time.sleep(1)
diff --git a/plexpy/datafactory.py b/plexpy/datafactory.py
index 6c9a0c7c..649b8f8b 100644
--- a/plexpy/datafactory.py
+++ b/plexpy/datafactory.py
@@ -59,45 +59,101 @@ class DataFactory(object):
group_by = ['session_history.reference_id'] if grouping else ['session_history.id']
- columns = ['session_history.reference_id',
- 'session_history.id',
- 'started AS date',
- 'MIN(started) AS started',
- 'MAX(stopped) AS stopped',
- 'SUM(CASE WHEN stopped > 0 THEN (stopped - started) ELSE 0 END) - \
- SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS duration',
- 'SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS paused_counter',
- 'session_history.user_id',
- 'session_history.user',
- '(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = "" \
- THEN users.username ELSE users.friendly_name END) AS friendly_name',
- 'platform',
- 'player',
- 'ip_address',
- 'session_history.media_type',
- 'session_history_metadata.rating_key',
- 'session_history_metadata.parent_rating_key',
- 'session_history_metadata.grandparent_rating_key',
- 'session_history_metadata.full_title',
- 'session_history_metadata.parent_title',
- 'session_history_metadata.year',
- 'session_history_metadata.media_index',
- 'session_history_metadata.parent_media_index',
- 'session_history_metadata.thumb',
- 'session_history_metadata.parent_thumb',
- 'session_history_metadata.grandparent_thumb',
- 'MAX((CASE WHEN (view_offset IS NULL OR view_offset = "") THEN 0.1 ELSE view_offset * 1.0 END) / \
- (CASE WHEN (session_history_metadata.duration IS NULL OR session_history_metadata.duration = "") \
- THEN 1.0 ELSE session_history_metadata.duration * 1.0 END) * 100) AS percent_complete',
- 'session_history_media_info.transcode_decision',
- 'COUNT(*) AS group_count',
- 'GROUP_CONCAT(session_history.id) AS group_ids'
- ]
+ columns = [
+ 'session_history.reference_id',
+ 'session_history.id',
+ 'started AS date',
+ 'MIN(started) AS started',
+ 'MAX(stopped) AS stopped',
+ 'SUM(CASE WHEN stopped > 0 THEN (stopped - started) ELSE 0 END) - \
+ SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS duration',
+ 'SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS paused_counter',
+ 'session_history.user_id',
+ 'session_history.user',
+ '(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = "" \
+ THEN users.username ELSE users.friendly_name END) AS friendly_name',
+ 'platform',
+ 'player',
+ 'ip_address',
+ 'session_history.media_type',
+ 'session_history_metadata.rating_key',
+ 'session_history_metadata.parent_rating_key',
+ 'session_history_metadata.grandparent_rating_key',
+ 'session_history_metadata.full_title',
+ 'session_history_metadata.parent_title',
+ 'session_history_metadata.year',
+ 'session_history_metadata.media_index',
+ 'session_history_metadata.parent_media_index',
+ 'session_history_metadata.thumb',
+ 'session_history_metadata.parent_thumb',
+ 'session_history_metadata.grandparent_thumb',
+ 'MAX((CASE WHEN (view_offset IS NULL OR view_offset = "") THEN 0.1 ELSE view_offset * 1.0 END) / \
+ (CASE WHEN (session_history_metadata.duration IS NULL OR session_history_metadata.duration = "") \
+ THEN 1.0 ELSE session_history_metadata.duration * 1.0 END) * 100) AS percent_complete',
+ 'session_history_media_info.transcode_decision',
+ 'COUNT(*) AS group_count',
+ 'GROUP_CONCAT(session_history.id) AS group_ids',
+ 'NULL AS state',
+ 'NULL AS session_key'
+ ]
+
+ if plexpy.CONFIG.HISTORY_TABLE_ACTIVITY:
+ table_name_union = 'sessions'
+ # Very hacky way to match the custom where parameters for the unioned table
+ custom_where_union = [[c[0].split('.')[-1], c[1]] for c in custom_where]
+ group_by_union = ['session_key']
+
+ columns_union = [
+ 'NULL AS reference_id',
+ 'NULL AS id',
+ 'started AS date',
+ 'started',
+ 'stopped',
+ 'strftime("%s", "now") - started - \
+ SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS duration',
+ 'SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS paused_counter',
+ 'user_id',
+ 'user',
+ '(CASE WHEN friendly_name IS NULL OR TRIM(friendly_name) = "" \
+ THEN user ELSE friendly_name END) AS friendly_name',
+ 'platform',
+ 'player',
+ 'ip_address',
+ 'media_type',
+ 'rating_key',
+ 'parent_rating_key',
+ 'grandparent_rating_key',
+ 'full_title',
+ 'parent_title',
+ 'year',
+ 'media_index',
+ 'parent_media_index',
+ 'thumb',
+ 'parent_thumb',
+ 'grandparent_thumb',
+ 'MAX((CASE WHEN (view_offset IS NULL OR view_offset = "") THEN 0.1 ELSE view_offset * 1.0 END) / \
+ (CASE WHEN (duration IS NULL OR duration = "") \
+ THEN 1.0 ELSE duration * 1.0 END) * 100) AS percent_complete',
+ 'transcode_decision',
+ 'NULL AS group_count',
+ 'NULL AS group_ids',
+ 'state',
+ 'session_key'
+ ]
+
+ else:
+ table_name_union = None
+ custom_where_union = group_by_union = columns_union = []
+
try:
query = data_tables.ssp_query(table_name='session_history',
+ table_name_union=table_name_union,
columns=columns,
+ columns_union=columns_union,
custom_where=custom_where,
+ custom_where_union=custom_where_union,
group_by=group_by,
+ group_by_union=group_by_union,
join_types=['LEFT OUTER JOIN',
'JOIN',
'JOIN'],
@@ -170,7 +226,9 @@ class DataFactory(object):
'percent_complete': int(round(item['percent_complete'])),
'watched_status': watched_status,
'group_count': item['group_count'],
- 'group_ids': item['group_ids']
+ 'group_ids': item['group_ids'],
+ 'state': item['state'],
+ 'session_key': item['session_key']
}
rows.append(row)
@@ -756,12 +814,13 @@ class DataFactory(object):
return library_stats
- def get_stream_details(self, row_id=None):
+ def get_stream_details(self, row_id=None, session_key=None):
monitor_db = database.MonitorDatabase()
user_cond = ''
+ table = 'session_history' if row_id else 'sessions'
if session.get_session_user_id():
- user_cond = 'AND session_history.user_id = %s ' % session.get_session_user_id()
+ user_cond = 'AND %s.user_id = %s ' % (table, session.get_session_user_id())
if row_id:
query = 'SELECT container, bitrate, video_resolution, width, height, aspect_ratio, video_framerate, ' \
@@ -773,6 +832,14 @@ class DataFactory(object):
'JOIN session_history_metadata ON session_history_media_info.id = session_history_metadata.id ' \
'WHERE session_history_media_info.id = ? %s' % user_cond
result = monitor_db.select(query, args=[row_id])
+ elif session_key:
+ query = 'SELECT container, bitrate, video_resolution, width, height, aspect_ratio, video_framerate, ' \
+ 'video_codec, audio_codec, audio_channels, video_decision, transcode_video_codec, transcode_height, ' \
+ 'transcode_width, audio_decision, transcode_audio_codec, transcode_audio_channels, transcode_container, ' \
+ 'media_type, title, grandparent_title ' \
+ 'FROM sessions ' \
+ 'WHERE session_key = ? %s' % user_cond
+ result = monitor_db.select(query, args=[session_key])
else:
return None
diff --git a/plexpy/datatables.py b/plexpy/datatables.py
index 7c4bac4d..951932d7 100644
--- a/plexpy/datatables.py
+++ b/plexpy/datatables.py
@@ -30,9 +30,13 @@ class DataTables(object):
def ssp_query(self,
table_name=None,
+ table_name_union=None,
columns=[],
+ columns_union=[],
custom_where=[],
+ custom_where_union=[],
group_by=[],
+ group_by_union=[],
join_types=[],
join_tables=[],
join_evals=[],
@@ -42,15 +46,6 @@ class DataTables(object):
logger.error('PlexPy DataTables :: No table name received.')
return None
- # Set default variable values
- parameters = {}
- args = []
- group = ''
- order = ''
- where = ''
- join = ''
- c_where = ''
-
# Fetch all our parameters
if kwargs.get('json_data'):
parameters = helpers.process_json_kwargs(json_kwargs=kwargs.get('json_data'))
@@ -59,132 +54,47 @@ class DataTables(object):
'named json_data.')
return None
- dt_columns = parameters['columns']
extracted_columns = self.extract_columns(columns=columns)
+ join = self.build_join(join_types, join_tables, join_evals)
+ group = self.build_grouping(group_by)
+ c_where, cw_args = self.build_custom_where(custom_where)
+ order = self.build_order(parameters['order'],
+ extracted_columns['column_named'],
+ parameters['columns'])
+ where, w_args = self.build_where(parameters['search']['value'],
+ extracted_columns['column_named'],
+ parameters['columns'])
- # Build grouping
- if group_by:
- for g in group_by:
- group += g + ', '
- if group:
- grouping = True
- group = 'GROUP BY ' + group.rstrip(', ')
+ args = cw_args + w_args
+
+ # Build union parameters
+ if table_name_union:
+ extracted_columns_union = self.extract_columns(columns=columns_union)
+ group_u = self.build_grouping(group_by_union)
+ c_where_u, cwu_args = self.build_custom_where(custom_where_union)
+
+ args += cwu_args
+
+ union = 'UNION SELECT %s FROM %s %s %s' % (extracted_columns_union['column_string'],
+ table_name_union,
+ c_where_u,
+ group_u)
else:
- grouping = False
+ union = ''
- # Build join parameters
- if join_types:
- counter = 0
- for join_type in join_types:
- if join_type.upper() == 'LEFT OUTER JOIN':
- join_item = 'LEFT OUTER JOIN %s ON %s = %s ' % \
- (join_tables[counter], join_evals[counter][0], join_evals[counter][1])
- elif join_type.upper() == 'JOIN' or join_type.upper() == 'INNER JOIN':
- join_item = 'JOIN %s ON %s = %s ' % \
- (join_tables[counter], join_evals[counter][0], join_evals[counter][1])
- else:
- join_item = ''
- counter += 1
- join += join_item
-
- # Build custom where parameters
- if custom_where:
- for w in custom_where:
- if isinstance(w[1], (list, tuple)) and len(w[1]):
- c_where += '('
- for w_ in w[1]:
- if w_ == None:
- c_where += w[0] + ' IS NULL OR '
- else:
- c_where += w[0] + ' = ? OR '
- args.append(w_)
- c_where = c_where.rstrip(' OR ') + ') AND '
- else:
- if w[1] == None:
- c_where += w[0] + ' IS NULL AND '
- else:
- c_where += w[0] + ' = ? AND '
- args.append(w[1])
-
- if c_where:
- c_where = 'WHERE ' + c_where.rstrip(' AND ')
-
- # Build ordering
- for o in parameters['order']:
- sort_order = ' COLLATE NOCASE'
- if o['dir'] == 'desc':
- sort_order = ' COLLATE NOCASE DESC'
- # We first see if a name was sent though for the column sort.
- if dt_columns[int(o['column'])]['data']:
- # We have a name, now check if it's a valid column name for our query
- # so we don't just inject a random value
- if any(d.lower() == dt_columns[int(o['column'])]['data'].lower()
- for d in extracted_columns['column_named']):
- order += dt_columns[int(o['column'])]['data'] + '%s' % sort_order
- else:
- # if we receive a bogus name, rather not sort at all.
- pass
- # If no name exists for the column, just use the column index to sort
- else:
- order += extracted_columns['column_named'][int(o['column'])]
-
- order += ', '
-
- order = order.rstrip(', ')
- if order:
- order = 'ORDER BY ' + order
-
- # Build where parameters
- if parameters['search']['value']:
- counter = 0
- for s in parameters['columns']:
- if s['searchable']:
- # We first see if a name was sent though for the column search.
- if s['data']:
- # We have a name, now check if it's a valid column name for our query
- # so we don't just inject a random value
- if any(d.lower() == s['data'].lower() for d in extracted_columns['column_named']):
- where += s['data'] + ' LIKE ? OR '
- args.append('%' + parameters['search']['value'] + '%')
- else:
- # if we receive a bogus name, rather not search at all.
- pass
- # If no name exists for the column, just use the column index to search
- else:
- where += extracted_columns['column_named'][counter] + ' LIKE ? OR '
- args.append('%' + parameters['search']['value'] + '%')
-
- counter += 1
-
- if where:
- where = 'WHERE ' + where.rstrip(' OR ')
-
- # Build our queries
- if grouping:
- if c_where == '':
- query = 'SELECT * FROM (SELECT %s FROM %s %s %s) %s %s' \
- % (extracted_columns['column_string'], table_name, join, group,
- where, order)
- else:
- query = 'SELECT * FROM (SELECT %s FROM %s %s %s %s) %s %s' \
- % (extracted_columns['column_string'], table_name, join, c_where, group,
- where, order)
- else:
- if c_where == '':
- query = 'SELECT %s FROM %s %s %s %s' \
- % (extracted_columns['column_string'], table_name, join, where,
- order)
- else:
- query = 'SELECT * FROM (SELECT %s FROM %s %s %s %s) %s' \
- % (extracted_columns['column_string'], table_name, join, where,
- order, c_where)
+ # Build the query
+ query = 'SELECT * FROM (SELECT %s FROM %s %s %s %s %s) %s %s' \
+ % (extracted_columns['column_string'], table_name, join, c_where, group, union, where, order)
# logger.debug(u"Query: %s" % query)
# Execute the query
filtered = self.ssp_db.select(query, args=args)
+ # Remove NULL rows
+ filtered = [row for row in filtered if not all(v is None for v in row.values())]
+
# Build grand totals
totalcount = self.ssp_db.select('SELECT COUNT(id) as total_count from %s' % table_name)[0]['total_count']
@@ -205,6 +115,110 @@ class DataTables(object):
return output
+ def build_grouping(self, group_by=[]):
+ # Build grouping
+ group = ''
+
+ for g in group_by:
+ group += g + ', '
+ if group:
+ group = 'GROUP BY ' + group.rstrip(', ')
+
+ return group
+
+ def build_join(self, join_types=[], join_tables=[], join_evals=[]):
+ # Build join parameters
+ join = ''
+
+ for i, join_type in enumerate(join_types):
+ if join_type.upper() == 'LEFT OUTER JOIN':
+ join += 'LEFT OUTER JOIN %s ON %s = %s ' % (join_tables[i], join_evals[i][0], join_evals[i][1])
+ elif join_type.upper() == 'JOIN' or join_type.upper() == 'INNER JOIN':
+ join += 'JOIN %s ON %s = %s ' % (join_tables[i], join_evals[i][0], join_evals[i][1])
+
+ return join
+
+ def build_custom_where(self, custom_where=[]):
+ # Build custom where parameters
+ c_where = ''
+ args = []
+
+ for w in custom_where:
+ if isinstance(w[1], (list, tuple)) and len(w[1]):
+ c_where += '('
+ for w_ in w[1]:
+ if w_ == None:
+ c_where += w[0] + ' IS NULL OR '
+ else:
+ c_where += w[0] + ' = ? OR '
+ args.append(w_)
+ c_where = c_where.rstrip(' OR ') + ') AND '
+ else:
+ if w[1] == None:
+ c_where += w[0] + ' IS NULL AND '
+ else:
+ c_where += w[0] + ' = ? AND '
+ args.append(w[1])
+
+ if c_where:
+ c_where = 'WHERE ' + c_where.rstrip(' AND ')
+
+ return c_where, args
+
+ def build_order(self, order_param=[], columns=[], dt_columns=[]):
+ # Build ordering
+ order = ''
+
+ for o in order_param:
+ sort_order = ' COLLATE NOCASE'
+ if o['dir'] == 'desc':
+ sort_order += ' DESC'
+ # We first see if a name was sent though for the column sort.
+ if dt_columns[int(o['column'])]['data']:
+ # We have a name, now check if it's a valid column name for our query
+ # so we don't just inject a random value
+ if any(d.lower() == dt_columns[int(o['column'])]['data'].lower()
+ for d in columns):
+ order += dt_columns[int(o['column'])]['data'] + '%s, ' % sort_order
+ else:
+ # if we receive a bogus name, rather not sort at all.
+ pass
+ # If no name exists for the column, just use the column index to sort
+ else:
+ order += columns[int(o['column'])] + ', '
+
+ if order:
+ order = 'ORDER BY ' + order.rstrip(', ')
+
+ return order
+
+ def build_where(self, search_param='', columns=[], dt_columns=[]):
+ # Build where parameters
+ where = ''
+ args = []
+
+ if search_param:
+ for i, s in enumerate(dt_columns):
+ if s['searchable']:
+ # We first see if a name was sent though for the column search.
+ if s['data']:
+ # We have a name, now check if it's a valid column name for our query
+ # so we don't just inject a random value
+ if any(d.lower() == s['data'].lower() for d in columns):
+ where += s['data'] + ' LIKE ? OR '
+ args.append('%' + search_param + '%')
+ else:
+ # if we receive a bogus name, rather not search at all.
+ pass
+ # If no name exists for the column, just use the column index to search
+ else:
+ where += columns[i] + ' LIKE ? OR '
+ args.append('%' + search_param + '%')
+ if where:
+ where = 'WHERE ' + where.rstrip(' OR ')
+
+ return where, args
+
# This method extracts column data from our column list
# The first parameter is required, the match_columns parameter is optional and will cause the function to
# only return results if the value also exists in the match_columns 'data' field
diff --git a/plexpy/helpers.py b/plexpy/helpers.py
index 37b1aa5e..07a6fb13 100644
--- a/plexpy/helpers.py
+++ b/plexpy/helpers.py
@@ -20,6 +20,7 @@ import geoip2.database, geoip2.errors
import gzip
import hashlib
import imghdr
+import ipwhois, ipwhois.exceptions, ipwhois.utils
from IPy import IP
import json
import math
@@ -602,12 +603,11 @@ def geoip_lookup(ip_address):
reader = geoip2.database.Reader(plexpy.CONFIG.GEOIP_DB)
geo = reader.city(ip_address)
reader.close()
+ except ValueError as e:
+ return 'Invalid IP address provided: %s.' % ip_address
except IOError as e:
return 'Missing GeoLite2 database. Please reinstall from the ' \
'Settings page.'
- except ValueError as e:
- return 'Unable to read GeoLite2 database. Please reinstall from the ' \
- 'Settings page.'
except maxminddb.InvalidDatabaseError as e:
return 'Invalid GeoLite2 database. Please reinstall from the ' \
'Settings page.'
@@ -629,6 +629,42 @@ def geoip_lookup(ip_address):
return geo_info
+def whois_lookup(ip_address):
+
+ nets = []
+ err = None
+ try:
+ whois = ipwhois.IPWhois(ip_address).lookup_whois(retry_count=0)
+ countries = ipwhois.utils.get_countries()
+ nets = whois['nets']
+ for net in nets:
+ net['country'] = countries[net['country']]
+ if net['postal_code']:
+ net['postal_code'] = net['postal_code'].replace('-', ' ')
+ except ValueError as e:
+ err = 'Invalid IP address provided: %s.' % ip_address
+ except ipwhois.exceptions.IPDefinedError as e:
+ err = '%s' % e
+ except ipwhois.exceptions.ASNRegistryError as e:
+ err = '%s' % e
+ except Exception as e:
+ err = 'Error: %s' % e
+
+ host = ''
+ try:
+ host = ipwhois.Net(ip_address).get_host(retry_count=0)[0]
+ except Exception as e:
+ host = 'Not available'
+
+ whois_info = {"host": host,
+ "nets": nets
+ }
+
+ if err:
+ whois_info['error'] = err
+
+ return whois_info
+
# Taken from SickRage
def anon_url(*url):
"""
diff --git a/plexpy/notifiers.py b/plexpy/notifiers.py
index c480de4e..5e3d3e06 100644
--- a/plexpy/notifiers.py
+++ b/plexpy/notifiers.py
@@ -1396,9 +1396,9 @@ class TwitterNotifier(object):
poster_url = metadata.get('poster_url','')
if self.incl_subject:
- self._send_tweet(subject + '\r\n' + message, attachment=poster_url)
+ return self._send_tweet(subject + '\r\n' + message, attachment=poster_url)
else:
- self._send_tweet(message, attachment=poster_url)
+ return self._send_tweet(message, attachment=poster_url)
def test_notify(self):
return self._send_tweet("This is a test notification from PlexPy at " + helpers.now())
@@ -1839,6 +1839,7 @@ class TELEGRAM(object):
self.enabled = plexpy.CONFIG.TELEGRAM_ENABLED
self.bot_token = plexpy.CONFIG.TELEGRAM_BOT_TOKEN
self.chat_id = plexpy.CONFIG.TELEGRAM_CHAT_ID
+ self.disable_web_preview = plexpy.CONFIG.TELEGRAM_DISABLE_WEB_PREVIEW
self.html_support = plexpy.CONFIG.TELEGRAM_HTML_SUPPORT
self.incl_poster = plexpy.CONFIG.TELEGRAM_INCL_POSTER
self.incl_subject = plexpy.CONFIG.TELEGRAM_INCL_SUBJECT
@@ -1880,9 +1881,13 @@ class TELEGRAM(object):
logger.warn(u"PlexPy Notifiers :: Telegram poster failed.")
data['text'] = text
+
if self.html_support:
data['parse_mode'] = 'HTML'
+ if self.disable_web_preview:
+ data['disable_web_page_preview'] = True
+
http_handler = HTTPSConnection("api.telegram.org")
http_handler.request('POST',
'/bot%s/%s' % (self.bot_token, 'sendMessage'),
@@ -1945,7 +1950,13 @@ class TELEGRAM(object):
{'label': 'Enable HTML Support',
'value': self.html_support,
'name': 'telegram_html_support',
- 'description': 'Style your messages using these HTML tags: b, i, a[href], code, pre',
+ 'description': 'Style your messages using these HTML tags: b, i, a[href], code, pre.',
+ 'input_type': 'checkbox'
+ },
+ {'label': 'Disable Web Page Previews',
+ 'value': self.disable_web_preview,
+ 'name': 'telegram_disable_web_preview',
+ 'description': 'Disables automatic link previews for links in the message',
'input_type': 'checkbox'
}
]
@@ -2024,9 +2035,9 @@ class SLACK(object):
http_handler = HTTPSConnection(slackhost)
http_handler.request("POST",
- slackpath,
- headers={'Content-type': "application/x-www-form-urlencoded"},
- body=json.dumps(data))
+ slackpath,
+ headers={'Content-type': "application/json"},
+ body=json.dumps(data))
response = http_handler.getresponse()
request_status = response.status
@@ -2431,9 +2442,9 @@ class FacebookNotifier(object):
attachment['description'] = subtitle
if self.incl_subject:
- self._post_facebook(subject + '\r\n' + message, attachment=attachment)
+ return self._post_facebook(subject + '\r\n' + message, attachment=attachment)
else:
- self._post_facebook(message, attachment=attachment)
+ return self._post_facebook(message, attachment=attachment)
def test_notify(self):
return self._post_facebook(u"PlexPy Notifiers :: This is a test notification from PlexPy at " + helpers.now())
diff --git a/plexpy/plextv.py b/plexpy/plextv.py
index 22106351..5cdf6e78 100644
--- a/plexpy/plextv.py
+++ b/plexpy/plextv.py
@@ -615,6 +615,9 @@ class PlexTV(object):
return clean_servers
def get_plex_downloads(self):
+ logger.debug(u"PlexPy PlexTV :: Retrieving current server version.")
+ pmsconnect.PmsConnect().set_server_version()
+
logger.debug(u"PlexPy PlexTV :: Plex update channel is %s." % plexpy.CONFIG.PMS_UPDATE_CHANNEL)
plex_downloads = self.get_plextv_downloads(plexpass=(plexpy.CONFIG.PMS_UPDATE_CHANNEL == 'plexpass'))
@@ -647,7 +650,8 @@ class PlexTV(object):
# Get proper download
releases = platform_downloads.get('releases', [{}])
- release = next((r for r in releases if r['build'] == plexpy.CONFIG.PMS_UPDATE_DISTRO_BUILD), releases[0])
+ release = next((r for r in releases if r['distro'] == plexpy.CONFIG.PMS_UPDATE_DISTRO and
+ r['build'] == plexpy.CONFIG.PMS_UPDATE_DISTRO_BUILD), releases[0])
download_info = {'update_available': v_new > v_old,
'platform': platform_downloads.get('name'),
diff --git a/plexpy/pmsconnect.py b/plexpy/pmsconnect.py
index d01fa4a2..e06eee3e 100644
--- a/plexpy/pmsconnect.py
+++ b/plexpy/pmsconnect.py
@@ -1075,6 +1075,9 @@ class PmsConnect(object):
transcode_container = ''
transcode_protocol = ''
+ # Generate a combined transcode decision value
+ transcode_decision = audio_decision
+
user_details = user_data.get_details(
user=helpers.get_xml_attr(session.getElementsByTagName('User')[0], 'title'))
@@ -1109,6 +1112,8 @@ class PmsConnect(object):
'grandparent_title': helpers.get_xml_attr(session, 'grandparentTitle'),
'parent_title': helpers.get_xml_attr(session, 'parentTitle'),
'title': helpers.get_xml_attr(session, 'title'),
+ 'full_title': '%s - %s' % (helpers.get_xml_attr(session, 'grandparentTitle'),
+ helpers.get_xml_attr(session, 'title')),
'year': helpers.get_xml_attr(session, 'year'),
'rating_key': helpers.get_xml_attr(session, 'ratingKey'),
'parent_rating_key': helpers.get_xml_attr(session, 'parentRatingKey'),
@@ -1131,6 +1136,7 @@ class PmsConnect(object):
'video_resolution': '',
'video_framerate': '',
'aspect_ratio': '',
+ 'transcode_decision': transcode_decision,
'transcode_audio_channels': transcode_audio_channels,
'transcode_audio_codec': transcode_audio_codec,
'transcode_video_codec': '',
@@ -1190,6 +1196,14 @@ class PmsConnect(object):
transcode_container = ''
transcode_protocol = ''
+ # Generate a combined transcode decision value
+ if video_decision == 'transcode' or audio_decision == 'transcode':
+ transcode_decision = 'transcode'
+ elif video_decision == 'copy' or audio_decision == 'copy':
+ transcode_decision = 'copy'
+ else:
+ transcode_decision = 'direct play'
+
if media_info.getElementsByTagName('Part'):
indexes = helpers.get_xml_attr(media_info.getElementsByTagName('Part')[0], 'indexes')
part_id = helpers.get_xml_attr(media_info.getElementsByTagName('Part')[0], 'id')
@@ -1241,6 +1255,8 @@ class PmsConnect(object):
'grandparent_title': helpers.get_xml_attr(session, 'grandparentTitle'),
'parent_title': helpers.get_xml_attr(session, 'parentTitle'),
'title': helpers.get_xml_attr(session, 'title'),
+ 'full_title': '%s - %s' % (helpers.get_xml_attr(session, 'grandparentTitle'),
+ helpers.get_xml_attr(session, 'title')),
'year': helpers.get_xml_attr(session, 'year'),
'rating_key': helpers.get_xml_attr(session, 'ratingKey'),
'parent_rating_key': helpers.get_xml_attr(session, 'parentRatingKey'),
@@ -1263,6 +1279,7 @@ class PmsConnect(object):
'video_resolution': video_resolution,
'video_framerate': video_framerate,
'aspect_ratio': aspect_ratio,
+ 'transcode_decision': transcode_decision,
'transcode_audio_channels': transcode_audio_channels,
'transcode_audio_codec': transcode_audio_codec,
'transcode_video_codec': transcode_video_codec,
@@ -1302,6 +1319,7 @@ class PmsConnect(object):
'grandparent_title': helpers.get_xml_attr(session, 'grandparentTitle'),
'parent_title': helpers.get_xml_attr(session, 'parentTitle'),
'title': helpers.get_xml_attr(session, 'title'),
+ 'full_title': helpers.get_xml_attr(session, 'title'),
'year': helpers.get_xml_attr(session, 'year'),
'rating_key': helpers.get_xml_attr(session, 'ratingKey'),
'parent_rating_key': helpers.get_xml_attr(session, 'parentRatingKey'),
@@ -1324,6 +1342,7 @@ class PmsConnect(object):
'video_resolution': video_resolution,
'video_framerate': video_framerate,
'aspect_ratio': aspect_ratio,
+ 'transcode_decision': transcode_decision,
'transcode_audio_channels': transcode_audio_channels,
'transcode_audio_codec': transcode_audio_codec,
'transcode_video_codec': transcode_video_codec,
@@ -1363,6 +1382,7 @@ class PmsConnect(object):
'grandparent_title': helpers.get_xml_attr(session, 'grandparentTitle'),
'parent_title': helpers.get_xml_attr(session, 'parentTitle'),
'title': helpers.get_xml_attr(session, 'title'),
+ 'full_title': helpers.get_xml_attr(session, 'title'),
'year': helpers.get_xml_attr(session, 'year'),
'rating_key': helpers.get_xml_attr(session, 'ratingKey'),
'parent_rating_key': helpers.get_xml_attr(session, 'parentRatingKey'),
@@ -1385,6 +1405,7 @@ class PmsConnect(object):
'video_resolution': video_resolution,
'video_framerate': video_framerate,
'aspect_ratio': aspect_ratio,
+ 'transcode_decision': transcode_decision,
'transcode_audio_channels': transcode_audio_channels,
'transcode_audio_codec': transcode_audio_codec,
'transcode_video_codec': transcode_video_codec,
@@ -1430,6 +1451,9 @@ class PmsConnect(object):
transcode_container = ''
transcode_protocol = ''
+ # Generate a combined transcode decision value
+ transcode_decision = video_decision
+
user_details = user_data.get_details(
user=helpers.get_xml_attr(session.getElementsByTagName('User')[0], 'title'))
@@ -1464,6 +1488,8 @@ class PmsConnect(object):
'grandparent_title': helpers.get_xml_attr(session, 'grandparentTitle'),
'parent_title': helpers.get_xml_attr(session, 'parentTitle'),
'title': helpers.get_xml_attr(session, 'title'),
+ 'full_title': '%s - %s' % (helpers.get_xml_attr(session, 'grandparentTitle'),
+ helpers.get_xml_attr(session, 'title')),
'year': helpers.get_xml_attr(session, 'year'),
'rating_key': helpers.get_xml_attr(session, 'ratingKey'),
'parent_rating_key': helpers.get_xml_attr(session, 'parentRatingKey'),
@@ -1486,6 +1512,7 @@ class PmsConnect(object):
'video_resolution': '',
'video_framerate': '',
'aspect_ratio': aspect_ratio,
+ 'transcode_decision': transcode_decision,
'transcode_audio_channels': '',
'transcode_audio_codec': '',
'transcode_video_codec': transcode_video_codec,
@@ -2190,3 +2217,10 @@ class PmsConnect(object):
}
return updater_info
+
+ def set_server_version(self):
+ identity = self.get_server_identity()
+ version = identity.get('version', plexpy.CONFIG.PMS_VERSION)
+
+ plexpy.CONFIG.__setattr__('PMS_VERSION', version)
+ plexpy.CONFIG.write()
\ No newline at end of file
diff --git a/plexpy/users.py b/plexpy/users.py
index 949c9f44..d7efd4c5 100644
--- a/plexpy/users.py
+++ b/plexpy/users.py
@@ -707,9 +707,9 @@ class Users(object):
data_tables = datatables.DataTables()
if session.get_session_user_id():
- custom_where = [['user_id', session.get_session_user_id()]]
+ custom_where = [['user_login.user_id', session.get_session_user_id()]]
else:
- custom_where = [['user_id', user_id]] if user_id else []
+ custom_where = [['user_login.user_id', user_id]] if user_id else []
columns = ['user_login.user_id',
'user_login.user_group',
diff --git a/plexpy/version.py b/plexpy/version.py
index b72e032c..ec03a370 100644
--- a/plexpy/version.py
+++ b/plexpy/version.py
@@ -1,2 +1,2 @@
PLEXPY_VERSION = "master"
-PLEXPY_RELEASE_VERSION = "1.4.8"
+PLEXPY_RELEASE_VERSION = "1.4.9"
diff --git a/plexpy/webserve.py b/plexpy/webserve.py
index 1a30e74e..78cd9e7d 100644
--- a/plexpy/webserve.py
+++ b/plexpy/webserve.py
@@ -1702,10 +1702,10 @@ class WebInterface(object):
@cherrypy.expose
@requireAuth()
- def get_stream_data(self, row_id=None, user=None, **kwargs):
+ def get_stream_data(self, row_id=None, session_key=None, user=None, **kwargs):
data_factory = datafactory.DataFactory()
- stream_data = data_factory.get_stream_details(row_id)
+ stream_data = data_factory.get_stream_details(row_id, session_key)
return serve_template(templatename="stream_data.html", title="Stream Data", data=stream_data, user=user)
@@ -2505,6 +2505,7 @@ class WebInterface(object):
config = {
"allow_guest_access": checked(plexpy.CONFIG.ALLOW_GUEST_ACCESS),
+ "history_table_activity": checked(plexpy.CONFIG.HISTORY_TABLE_ACTIVITY),
"http_basic_auth": checked(plexpy.CONFIG.HTTP_BASIC_AUTH),
"http_hash_password": checked(plexpy.CONFIG.HTTP_HASH_PASSWORD),
"http_hashed_password": plexpy.CONFIG.HTTP_HASHED_PASSWORD,
@@ -2634,7 +2635,8 @@ class WebInterface(object):
"ip_logging_enable", "movie_logging_enable", "tv_logging_enable", "music_logging_enable",
"notify_consecutive", "notify_upload_posters", "notify_recently_added", "notify_recently_added_grandparent",
"monitor_pms_updates", "monitor_remote_access", "get_file_sizes", "log_blacklist", "http_hash_password",
- "allow_guest_access", "cache_images", "http_proxy", "http_basic_auth", "notify_concurrent_by_ip"
+ "allow_guest_access", "cache_images", "http_proxy", "http_basic_auth", "notify_concurrent_by_ip",
+ "history_table_activity"
]
for checked_config in checked_configs:
if checked_config not in kwargs:
@@ -2791,6 +2793,7 @@ class WebInterface(object):
return {'plexpass': plexpass,
'pms_platform': plexpy.CONFIG.PMS_PLATFORM,
'pms_update_channel': plexpy.CONFIG.PMS_UPDATE_CHANNEL,
+ 'pms_update_distro': plexpy.CONFIG.PMS_UPDATE_DISTRO,
'pms_update_distro_build': plexpy.CONFIG.PMS_UPDATE_DISTRO_BUILD}
@cherrypy.expose
@@ -4320,3 +4323,40 @@ class WebInterface(object):
if isinstance(geo_info, basestring):
return {'error': geo_info}
return geo_info
+
+ @cherrypy.expose
+ @cherrypy.tools.json_out()
+ @requireAuth()
+ @addtoapi()
+ def get_whois_lookup(self, ip_address='', **kwargs):
+ """ Get the connection info for an IP address.
+
+ ```
+ Required parameters:
+ ip_address
+
+ Optional parameters:
+ None
+
+ Returns:
+ json:
+ {"host": "google-public-dns-a.google.com",
+ "nets": [{"description": "Google Inc.",
+ "address": "1600 Amphitheatre Parkway",
+ "city": "Mountain View",
+ "state": "CA",
+ "postal_code": "94043",
+ "country": "United States",
+ ...
+ },
+ {...}
+ ]
+ json:
+ {"host": "Not available",
+ "nets": [],
+ "error": "IPv4 address 127.0.0.1 is already defined as Loopback via RFC 1122, Section 3.2.1.3."
+ }
+ ```
+ """
+ whois_info = helpers.whois_lookup(ip_address)
+ return whois_info