summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--designate/backend/agent.py40
-rw-r--r--designate/central/service.py534
-rw-r--r--designate/dnsutils.py152
-rw-r--r--designate/mdns/notify.py24
-rw-r--r--designate/tests/__init__.py26
-rw-r--r--designate/tests/test_api/test_v2/test_floatingips.py19
-rw-r--r--designate/tests/test_api/test_v2/test_import_export.py6
-rw-r--r--designate/tests/test_central/test_service.py68
-rw-r--r--designate/tests/unit/backend/test_agent.py56
-rw-r--r--designate/tests/unit/mdns/test_notify.py76
-rw-r--r--designate/tests/unit/test_central/test_basic.py40
-rw-r--r--designate/tests/unit/test_dnsutils.py19
-rw-r--r--designate/tests/unit/workers/test_base_task.py88
-rw-r--r--designate/tests/unit/workers/test_zone_tasks.py20
-rw-r--r--designate/worker/README.md2
-rw-r--r--designate/worker/tasks/base.py48
-rw-r--r--designate/worker/tasks/zone.py38
-rw-r--r--designate/worker/utils.py82
18 files changed, 711 insertions, 627 deletions
diff --git a/designate/backend/agent.py b/designate/backend/agent.py
index 8ddc1818..67f8c80c 100644
--- a/designate/backend/agent.py
+++ b/designate/backend/agent.py
@@ -24,25 +24,23 @@
Configured in the [service:pool_manager] section
"""
-import eventlet
import dns
-import dns.rdataclass
-import dns.rdatatype
import dns.exception
import dns.flags
-import dns.rcode
import dns.message
import dns.opcode
+import dns.rcode
+import dns.rdataclass
+import dns.rdatatype
from oslo_config import cfg
from oslo_log import log as logging
from designate.backend import base
-from designate import exceptions
+from designate.backend import private_codes
from designate.conf.agent import DEFAULT_AGENT_PORT
+from designate import dnsutils
+from designate import exceptions
from designate.mdns import rpcapi as mdns_api
-import designate.backend.private_codes as pcodes
-
-dns_query = eventlet.import_patched('dns.query')
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@@ -72,9 +70,9 @@ class AgentPoolBackend(base.Backend):
response, retry = self._make_and_send_dns_message(
zone.name,
self.timeout,
- pcodes.CC,
- pcodes.CREATE,
- pcodes.CLASSCC,
+ private_codes.CC,
+ private_codes.CREATE,
+ private_codes.CLASSCC,
self.host,
self.port
)
@@ -100,9 +98,9 @@ class AgentPoolBackend(base.Backend):
response, retry = self._make_and_send_dns_message(
zone.name,
self.timeout,
- pcodes.CC,
- pcodes.DELETE,
- pcodes.CLASSCC,
+ private_codes.CC,
+ private_codes.DELETE,
+ private_codes.CLASSCC,
self.host,
self.port
)
@@ -134,7 +132,7 @@ class AgentPoolBackend(base.Backend):
'port': dest_port, 'timeout': timeout,
'retry': retry})
response = None
- elif isinstance(response, dns_query.BadResponse):
+ elif isinstance(response, dns.query.BadResponse):
LOG.warning("Got BadResponse while trying to send '%(msg)s' for "
"'%(zone)s' to '%(server)s:%(port)d'. "
"Timeout='%(timeout)d' seconds. Retry='%(retry)d'",
@@ -173,14 +171,10 @@ class AgentPoolBackend(base.Backend):
def _send_dns_message(self, dns_message, dest_ip, dest_port, timeout):
try:
- if not CONF['service:mdns'].all_tcp:
- response = dns_query.udp(
- dns_message, dest_ip, port=dest_port, timeout=timeout)
- else:
- response = dns_query.tcp(
- dns_message, dest_ip, port=dest_port, timeout=timeout)
- return response
+ return dnsutils.send_dns_message(
+ dns_message, dest_ip, port=dest_port, timeout=timeout
+ )
except dns.exception.Timeout as timeout:
return timeout
- except dns_query.BadResponse as badResponse:
+ except dns.query.BadResponse as badResponse:
return badResponse
diff --git a/designate/central/service.py b/designate/central/service.py
index fe3fb04c..fbe880c2 100644
--- a/designate/central/service.py
+++ b/designate/central/service.py
@@ -26,9 +26,8 @@ import random
from random import SystemRandom
import time
-from eventlet import tpool
-from dns import zone as dnszone
from dns import exception as dnsexception
+from dns import zone as dnszone
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_log import log as logging
@@ -561,51 +560,55 @@ class Service(service.RPCService):
objects.Record(data=r, managed=True) for r in ns_records])
values = {
'name': zone['name'],
- 'type': "NS",
+ 'type': 'NS',
'records': recordlist
}
ns, zone = self._create_recordset_in_storage(
context, zone, objects.RecordSet(**values),
- increment_serial=False)
+ increment_serial=False
+ )
return ns
def _add_ns(self, context, zone, ns_record):
# Get NS recordset
# If the zone doesn't have an NS recordset yet, create one
- recordsets = self.find_recordsets(
- context, criterion={'zone_id': zone['id'], 'type': "NS"}
- )
-
- managed = []
- for rs in recordsets:
- if rs.managed:
- managed.append(rs)
-
- if len(managed) == 0:
+ try:
+ recordset = self.find_recordset(
+ context,
+ criterion={
+ 'zone_id': zone['id'],
+ 'name': zone['name'],
+ 'type': 'NS'
+ }
+ )
+ except exceptions.RecordSetNotFound:
self._create_ns(context, zone, [ns_record])
return
- elif len(managed) != 1:
- raise exceptions.RecordSetNotFound("No valid recordset found")
-
- ns_recordset = managed[0]
# Add new record to recordset based on the new nameserver
- ns_recordset.records.append(
- objects.Record(data=ns_record, managed=True))
+ recordset.records.append(
+ objects.Record(data=ns_record, managed=True)
+ )
- self._update_recordset_in_storage(context, zone, ns_recordset,
+ self._update_recordset_in_storage(context, zone, recordset,
set_delayed_notify=True)
def _delete_ns(self, context, zone, ns_record):
- ns_recordset = self.find_recordset(
- context, criterion={'zone_id': zone['id'], 'type': "NS"})
+ recordset = self.find_recordset(
+ context,
+ criterion={
+ 'zone_id': zone['id'],
+ 'name': zone['name'],
+ 'type': 'NS'
+ }
+ )
- for record in copy.deepcopy(ns_recordset.records):
+ for record in list(recordset.records):
if record.data == ns_record:
- ns_recordset.records.remove(record)
+ recordset.records.remove(record)
- self._update_recordset_in_storage(context, zone, ns_recordset,
+ self._update_recordset_in_storage(context, zone, recordset,
set_delayed_notify=True)
# Quota Enforcement Methods
@@ -1611,7 +1614,7 @@ class Service(service.RPCService):
# Update the recordset
recordset = self.storage.update_recordset(context, recordset)
- return (recordset, zone)
+ return recordset, zone
@rpc.expected_exceptions()
@notification('dns.recordset.delete')
@@ -1760,7 +1763,7 @@ class Service(service.RPCService):
record = self.storage.create_record(context, zone.id, recordset.id,
record)
- return (record, zone)
+ return record, zone
@rpc.expected_exceptions()
def get_record(self, context, zone_id, recordset_id, record_id):
@@ -1901,7 +1904,7 @@ class Service(service.RPCService):
# Update the record
record = self.storage.update_record(context, record)
- return (record, zone)
+ return record, zone
@rpc.expected_exceptions()
@notification('dns.record.delete')
@@ -1973,7 +1976,7 @@ class Service(service.RPCService):
record = self.storage.update_record(context, record)
- return (record, zone)
+ return record, zone
@rpc.expected_exceptions()
def count_records(self, context, criterion=None):
@@ -2085,16 +2088,15 @@ class Service(service.RPCService):
'storage': storage_status
}
- def _determine_floatingips(self, context, fips, records=None,
- tenant_id=None):
+ def _determine_floatingips(self, context, fips, project_id=None):
"""
- Given the context or tenant, records and fips it returns the valid
- floatingips either with a associated record or not. Deletes invalid
+ Given the context or project, and fips it returns the valid
+ floating ips either with an associated record or not. Deletes invalid
records also.
- Returns a list of tuples with FloatingIPs and it's Record.
+ Returns a list of tuples with FloatingIPs and its Record.
"""
- tenant_id = tenant_id or context.project_id
+ project_id = project_id or context.project_id
elevated_context = context.elevated(all_tenants=True,
edit_managed_records=True)
@@ -2108,23 +2110,24 @@ class Service(service.RPCService):
invalid = []
data = {}
- # First populate the list of FIPS
+ # First populate the list of FIPS.
for fip_key, fip_values in fips.items():
# Check if the FIP has a record
record = records.get(fip_values['address'])
- # NOTE: Now check if it's owned by the tenant that actually has the
- # FIP in the external service and if not invalidate it (delete it)
- # thus not returning it with in the tuple with the FIP, but None..
+ # NOTE: Now check if it's owned by the project that actually has
+ # the FIP in the external service and if not invalidate it
+ # (delete it) thus not returning it with in the tuple with the FIP,
+ # but None.
if record:
- record_tenant = record['managed_tenant_id']
-
- if record_tenant != tenant_id:
- msg = "Invalid FloatingIP %s belongs to %s but record " \
- "owner %s"
- LOG.debug(msg, fip_key, tenant_id, record_tenant)
+ record_project = record['managed_tenant_id']
+ if record_project != project_id:
+ LOG.debug(
+ 'Invalid FloatingIP %s belongs to %s but record '
+ 'project %s', fip_key, project_id, record_project
+ )
invalid.append(record)
record = None
data[fip_key] = (fip_values, record)
@@ -2135,66 +2138,17 @@ class Service(service.RPCService):
"""
Utility method to delete a list of records.
"""
+ if not records:
+ return
+
elevated_context = context.elevated(all_tenants=True,
edit_managed_records=True)
- if len(records) > 0:
- for r in records:
- msg = 'Deleting record %s for FIP %s'
- LOG.debug(msg, r['id'], r['managed_resource_id'])
- self.delete_record(elevated_context, r['zone_id'],
- r['recordset_id'], r['id'])
-
- def _format_floatingips(self, context, data, recordsets=None):
- """
- Given a list of FloatingIP and Record tuples we look through creating
- a new dict of FloatingIPs
- """
- elevated_context = context.elevated(all_tenants=True)
-
- fips = objects.FloatingIPList()
- for key, value in data.items():
- fip, record = value
-
- fip_ptr = objects.FloatingIP().from_dict({
- 'address': fip['address'],
- 'id': fip['id'],
- 'region': fip['region'],
- 'ptrdname': None,
- 'ttl': None,
- 'description': None,
- 'action': None,
- 'status': 'ACTIVE'
- })
-
- # TTL population requires a present record in order to find the
- # RS or Zone
- if record:
- fip_ptr['action'] = record.action
- fip_ptr['status'] = record.status
-
- # We can have a recordset dict passed in
- if (recordsets is not None and
- record['recordset_id'] in recordsets):
- recordset = recordsets[record['recordset_id']]
- else:
- recordset = self.storage.get_recordset(
- elevated_context, record['recordset_id'])
-
- if recordset['ttl'] is not None:
- fip_ptr['ttl'] = recordset['ttl']
- else:
- zone = self.get_zone(
- elevated_context, record['zone_id'])
- fip_ptr['ttl'] = zone['ttl']
-
- fip_ptr['ptrdname'] = record['data']
- fip_ptr['description'] = record['description']
- else:
- LOG.debug("No record information found for %s", value[0]['id'])
-
- # Store the "fip_record" with the region and it's id as key
- fips.append(fip_ptr)
- return fips
+ for record in records:
+ LOG.debug('Deleting record %s for FIP %s',
+ record['id'], record['managed_resource_id'])
+ self._delete_ptr_record(
+ elevated_context, record
+ )
def _list_floatingips(self, context, region=None):
data = self.network_api.list_floatingips(context, region=region)
@@ -2211,9 +2165,11 @@ class Service(service.RPCService):
def _get_floatingip(self, context, region, floatingip_id, fips):
if (region, floatingip_id) not in fips:
- msg = 'FloatingIP %s in %s is not associated for tenant "%s"' % \
- (floatingip_id, region, context.project_id)
- raise exceptions.NotFound(msg)
+ raise exceptions.NotFound(
+ 'FloatingIP %s in %s is not associated for project "%s"' % (
+ floatingip_id, region, context.project_id
+ )
+ )
return fips[region, floatingip_id]
# PTR ops
@@ -2229,14 +2185,15 @@ class Service(service.RPCService):
elevated_context = context.elevated(all_tenants=True,
edit_managed_records=True)
- tenant_fips = self._list_floatingips(context)
+ project_floatingips = self._list_floatingips(context)
valid, invalid = self._determine_floatingips(
- elevated_context, tenant_fips)
+ elevated_context, project_floatingips
+ )
self._invalidate_floatingips(context, invalid)
- return self._format_floatingips(context, valid)
+ return self._create_floating_ip_list(context, valid)
@rpc.expected_exceptions()
def get_floatingip(self, context, region, floatingip_id):
@@ -2252,11 +2209,12 @@ class Service(service.RPCService):
result = self._list_to_dict([fip], keys=['region', 'id'])
valid, invalid = self._determine_floatingips(
- elevated_context, result)
+ elevated_context, result
+ )
self._invalidate_floatingips(context, invalid)
- return self._format_floatingips(context, valid)[0]
+ return self._create_floating_ip_list(context, valid)[0]
def _set_floatingip_reverse(self, context, region, floatingip_id, values):
"""
@@ -2266,16 +2224,18 @@ class Service(service.RPCService):
elevated_context = context.elevated(all_tenants=True,
edit_managed_records=True)
- tenant_fips = self._list_floatingips(context, region=region)
+ project_fips = self._list_floatingips(context, region=region)
- fip = self._get_floatingip(context, region, floatingip_id, tenant_fips)
+ fip = self._get_floatingip(
+ context, region, floatingip_id, project_fips
+ )
zone_name = self.network_api.address_zone(fip['address'])
- # NOTE: Find existing zone or create it..
try:
zone = self.storage.find_zone(
- elevated_context, {'name': zone_name})
+ elevated_context, {'name': zone_name}
+ )
except exceptions.ZoneNotFound:
LOG.info(
'Creating zone for %(fip_id)s:%(region)s - %(fip_addr)s '
@@ -2298,44 +2258,16 @@ class Service(service.RPCService):
}
zone = self.create_zone(
- elevated_context, objects.Zone(**zone_values))
+ elevated_context, objects.Zone(**zone_values)
+ )
record_name = self.network_api.address_name(fip['address'])
-
recordset_values = {
'name': record_name,
+ 'zone_id': zone['id'],
'type': 'PTR',
- 'ttl': values.get('ttl', None)
+ 'ttl': values.get('ttl')
}
-
- try:
- recordset = self.find_recordset(
- elevated_context, {'name': record_name, 'type': 'PTR'})
-
- # Update the recordset values
- recordset.name = recordset_values['name']
- recordset.type = recordset_values['type']
- recordset.ttl = recordset_values['ttl']
- recordset.zone_id = zone['id']
- recordset = self.update_recordset(
- elevated_context,
- recordset=recordset)
-
- # Delete the current records for the recordset
- LOG.debug("Removing old Record")
- for record in recordset.records:
- self.delete_record(
- elevated_context,
- zone_id=recordset['zone_id'],
- recordset_id=recordset['id'],
- record_id=record['id'])
-
- except exceptions.RecordSetNotFound:
- recordset = self.create_recordset(
- elevated_context,
- zone_id=zone['id'],
- recordset=objects.RecordSet(**recordset_values))
-
record_values = {
'data': values['ptrdname'],
'description': values['description'],
@@ -2346,16 +2278,14 @@ class Service(service.RPCService):
'managed_resource_type': 'ptr:floatingip',
'managed_tenant_id': context.project_id
}
-
- record = self.create_record(
- elevated_context,
- zone_id=zone['id'],
- recordset_id=recordset['id'],
- record=objects.Record(**record_values))
-
- return self._format_floatingips(
- context, {(region, floatingip_id): (fip, record)},
- {recordset['id']: recordset})[0]
+ record = objects.Record(**record_values)
+ recordset = self._replace_or_create_ptr_recordset(
+ elevated_context, record,
+ **recordset_values
+ )
+ return self._create_floating_ip(
+ context, fip, record, zone=zone, recordset=recordset
+ )
def _unset_floatingip_reverse(self, context, region, floatingip_id):
"""
@@ -2375,16 +2305,121 @@ class Service(service.RPCService):
try:
record = self.storage.find_record(
- elevated_context, criterion=criterion)
+ elevated_context, criterion=criterion
+ )
except exceptions.RecordNotFound:
msg = 'No such FloatingIP %s:%s' % (region, floatingip_id)
raise exceptions.NotFound(msg)
- self.delete_record(
- elevated_context,
- zone_id=record['zone_id'],
- recordset_id=record['recordset_id'],
- record_id=record['id'])
+ self._delete_ptr_record(
+ elevated_context, record
+ )
+
+ def _create_floating_ip(self, context, fip, record,
+ zone=None, recordset=None):
+ """
+ Creates a FloatingIP based on floating ip and record data.
+ """
+ elevated_context = context.elevated(all_tenants=True)
+ fip_ptr = objects.FloatingIP().from_dict({
+ 'address': fip['address'],
+ 'id': fip['id'],
+ 'region': fip['region'],
+ 'ptrdname': None,
+ 'ttl': None,
+ 'description': None,
+ 'action': None,
+ 'status': 'ACTIVE'
+ })
+ # TTL population requires a present record in order to find the
+ # RS or Zone.
+ if record and record.action != 'DELETE':
+ if not recordset:
+ recordset = self.storage.get_recordset(
+ elevated_context, record.recordset_id)
+
+ fip_ptr['action'] = recordset.action
+ fip_ptr['status'] = recordset.status
+
+ if recordset.ttl is not None:
+ fip_ptr['ttl'] = recordset.ttl
+ else:
+ if not zone:
+ zone = self.get_zone(elevated_context,
+ record.zone_id)
+ fip_ptr['ttl'] = zone.ttl
+
+ fip_ptr['ptrdname'] = record.data
+ fip_ptr['description'] = record.description
+ else:
+ LOG.debug('No record information found for %s', fip['id'])
+
+ return fip_ptr
+
+ def _create_floating_ip_list(self, context, data):
+ """
+ Creates a FloatingIPList based on floating ips and records data.
+ """
+ fips = objects.FloatingIPList()
+ for key, value in data.items():
+ fip, record = value
+ fip_ptr = self._create_floating_ip(context, fip, record)
+ fips.append(fip_ptr)
+ return fips
+
+ def _delete_ptr_record(self, context, record):
+ try:
+ recordset = self.get_recordset(
+ context, record.zone_id, record.recordset_id
+ )
+
+ if record not in recordset.records:
+ LOG.debug(
+ 'PTR Record %s not found in recordset %s',
+ record.id, record.recordset_id
+ )
+ return
+
+ recordset.records.remove(record)
+
+ if not recordset.records:
+ self.delete_recordset(
+ context, record.zone_id, record.recordset_id
+ )
+ return
+
+ recordset.validate()
+ self.update_recordset(context, recordset)
+ except exceptions.RecordSetNotFound:
+ pass
+
+ def _replace_or_create_ptr_recordset(self, context, record, zone_id,
+ name, type, ttl=None):
+ try:
+ recordset = self.find_recordset(context, {
+ 'zone_id': zone_id,
+ 'name': name,
+ 'type': type,
+ })
+ recordset.ttl = ttl
+ recordset.records = objects.RecordList(objects=[record])
+ recordset.validate()
+ recordset = self.update_recordset(
+ context, recordset
+ )
+ except exceptions.RecordSetNotFound:
+ values = {
+ 'name': name,
+ 'type': type,
+ 'ttl': ttl,
+ }
+ recordset = objects.RecordSet(**values)
+ recordset.records = objects.RecordList(objects=[record])
+ recordset.validate()
+ recordset = self.create_recordset(
+ context, zone_id, recordset
+ )
+ return recordset
@rpc.expected_exceptions()
@transaction
@@ -2393,12 +2428,15 @@ class Service(service.RPCService):
We strictly see if values['ptrdname'] is str or None and set / unset
the requested FloatingIP's PTR record based on that.
"""
- if 'ptrdname' in values.obj_what_changed() and\
- values['ptrdname'] is None:
- self._unset_floatingip_reverse(context, region, floatingip_id)
+ if ('ptrdname' in values.obj_what_changed() and
+ values['ptrdname'] is None):
+ self._unset_floatingip_reverse(
+ context, region, floatingip_id
+ )
elif isinstance(values['ptrdname'], str):
return self._set_floatingip_reverse(
- context, region, floatingip_id, values)
+ context, region, floatingip_id, values
+ )
# Blacklisted zones
@rpc.expected_exceptions()
@@ -2505,46 +2543,49 @@ class Service(service.RPCService):
@notification('dns.pool.update')
@transaction
def update_pool(self, context, pool):
-
policy.check('update_pool', context)
# If there is a nameserver, then additional steps need to be done
# Since these are treated as mutable objects, we're only going to
# be comparing the nameserver.value which is the FQDN
- if pool.obj_attr_is_set('ns_records'):
- elevated_context = context.elevated(all_tenants=True)
+ elevated_context = context.elevated(all_tenants=True)
- # TODO(kiall): ListObjects should be able to give you their
- # original set of values.
- original_pool_ns_records = self._get_pool_ns_records(context,
- pool.id)
- # Find the current NS hostnames
- existing_ns = set([n.hostname for n in original_pool_ns_records])
+ # TODO(kiall): ListObjects should be able to give you their
+ # original set of values.
+ original_pool_ns_records = self._get_pool_ns_records(
+ context, pool.id
+ )
+
+ updated_pool = self.storage.update_pool(context, pool)
- # Find the desired NS hostnames
- request_ns = set([n.hostname for n in pool.ns_records])
+ if not pool.obj_attr_is_set('ns_records'):
+ return updated_pool
- # Get the NS's to be created and deleted, ignoring the ones that
- # are in both sets, as those haven't changed.
- # TODO(kiall): Factor in priority
- create_ns = request_ns.difference(existing_ns)
- delete_ns = existing_ns.difference(request_ns)
+ # Find the current NS hostnames
+ existing_ns = set([n.hostname for n in original_pool_ns_records])
- updated_pool = self.storage.update_pool(context, pool)
+ # Find the desired NS hostnames
+ request_ns = set([n.hostname for n in pool.ns_records])
+
+ # Get the NS's to be created and deleted, ignoring the ones that
+ # are in both sets, as those haven't changed.
+ # TODO(kiall): Factor in priority
+ create_ns = request_ns.difference(existing_ns)
+ delete_ns = existing_ns.difference(request_ns)
# After the update, handle new ns_records
- for ns in create_ns:
+ for ns_record in create_ns:
# Create new NS recordsets for every zone
zones = self.find_zones(
context=elevated_context,
criterion={'pool_id': pool.id, 'action': '!DELETE'})
- for z in zones:
- self._add_ns(elevated_context, z, ns)
+ for zone in zones:
+ self._add_ns(elevated_context, zone, ns_record)
# Then handle the ns_records to delete
- for ns in delete_ns:
+ for ns_record in delete_ns:
# Cannot delete the last nameserver, so verify that first.
- if len(pool.ns_records) == 0:
+ if not pool.ns_records:
raise exceptions.LastServerDeleteNotAllowed(
"Not allowed to delete last of servers"
)
@@ -2552,9 +2593,10 @@ class Service(service.RPCService):
# Delete the NS record for every zone
zones = self.find_zones(
context=elevated_context,
- criterion={'pool_id': pool.id})
- for z in zones:
- self._delete_ns(elevated_context, z, ns)
+ criterion={'pool_id': pool.id}
+ )
+ for zone in zones:
+ self._delete_ns(elevated_context, zone, ns_record)
return updated_pool
@@ -2990,7 +3032,6 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
@notification('dns.zone_import.create')
def create_zone_import(self, context, request_body):
-
if policy.enforce_new_defaults():
target = {constants.RBAC_PROJECT_ID: context.project_id}
else:
@@ -3010,59 +3051,49 @@ class Service(service.RPCService):
zone_import = objects.ZoneImport(**values)
created_zone_import = self.storage.create_zone_import(context,
- zone_import)
+ zone_import)
self.tg.add_thread(self._import_zone, context, created_zone_import,
- request_body)
+ request_body)
return created_zone_import
def _import_zone(self, context, zone_import, request_body):
-
- def _import(self, context, zone_import, request_body):
- # Dnspython needs a str instead of a unicode object
- zone = None
- try:
- dnspython_zone = dnszone.from_text(
- request_body,
- # Don't relativize, or we end up with '@' record names.
- relativize=False,
- # Don't check origin, we allow missing NS records
- # (missing SOA records are taken care of in _create_zone).
- check_origin=False)
- zone = dnsutils.from_dnspython_zone(dnspython_zone)
- zone.type = 'PRIMARY'
-
- for rrset in list(zone.recordsets):
- if rrset.type == 'SOA':
- zone.recordsets.remove(rrset)
- # subdomain NS records should be kept
- elif rrset.type == 'NS' and rrset.name == zone.name:
- zone.recordsets.remove(rrset)
-
- except dnszone.UnknownOrigin:
- zone_import.message = ('The $ORIGIN statement is required and'
- ' must be the first statement in the'
- ' zonefile.')
- zone_import.status = 'ERROR'
- except dnsexception.SyntaxError:
- zone_import.message = 'Malformed zonefile.'
- zone_import.status = 'ERROR'
- except exceptions.BadRequest:
- zone_import.message = 'An SOA record is required.'
- zone_import.status = 'ERROR'
- except Exception as e:
- LOG.exception('An undefined error occurred during zone import')
- msg = 'An undefined error occurred. %s'\
- % str(e)[:130]
- zone_import.message = msg
- zone_import.status = 'ERROR'
-
- return zone, zone_import
-
- # Execute the import in a real Python thread
- zone, zone_import = tpool.execute(_import, self, context,
- zone_import, request_body)
+ zone = None
+ try:
+ dnspython_zone = dnszone.from_text(
+ request_body,
+ # Don't relativize, or we end up with '@' record names.
+ relativize=False,
+ # Don't check origin, we allow missing NS records
+ # (missing SOA records are taken care of in _create_zone).
+ check_origin=False)
+ zone = dnsutils.from_dnspython_zone(dnspython_zone)
+ zone.type = 'PRIMARY'
+ for rrset in list(zone.recordsets):
+ if rrset.type == 'SOA':
+ zone.recordsets.remove(rrset)
+ # subdomain NS records should be kept
+ elif rrset.type == 'NS' and rrset.name == zone.name:
+ zone.recordsets.remove(rrset)
+ except dnszone.UnknownOrigin:
+ zone_import.message = (
+ 'The $ORIGIN statement is required and must be the first '
+ 'statement in the zonefile.'
+ )
+ zone_import.status = 'ERROR'
+ except dnsexception.SyntaxError:
+ zone_import.message = 'Malformed zonefile.'
+ zone_import.status = 'ERROR'
+ except exceptions.BadRequest:
+ zone_import.message = 'An SOA record is required.'
+ zone_import.status = 'ERROR'
+ except Exception as e:
+ LOG.exception('An undefined error occurred during zone import')
+ zone_import.message = (
+ 'An undefined error occurred. %s' % str(e)[:130]
+ )
+ zone_import.status = 'ERROR'
# If the zone import was valid, create the zone
if zone_import.status != 'ERROR':
@@ -3070,27 +3101,32 @@ class Service(service.RPCService):
zone = self.create_zone(context, zone)
zone_import.status = 'COMPLETE'
zone_import.zone_id = zone.id
- zone_import.message = '%(name)s imported' % {'name':
- zone.name}
+ zone_import.message = (
+ '%(name)s imported' % {'name': zone.name}
+ )
except exceptions.DuplicateZone:
zone_import.status = 'ERROR'
zone_import.message = 'Duplicate zone.'
except exceptions.InvalidTTL as e:
zone_import.status = 'ERROR'
zone_import.message = str(e)
+ except exceptions.OverQuota:
+ zone_import.status = 'ERROR'
+ zone_import.message = 'Quota exceeded during zone import.'
except Exception as e:
- LOG.exception('An undefined error occurred during zone '
- 'import creation')
- msg = 'An undefined error occurred. %s'\
- % str(e)[:130]
- zone_import.message = msg
+ LOG.exception(
+ 'An undefined error occurred during zone import creation'
+ )
+ zone_import.message = (
+ 'An undefined error occurred. %s' % str(e)[:130]
+ )
zone_import.status = 'ERROR'
self.update_zone_import(context, zone_import)
@rpc.expected_exceptions()
def find_zone_imports(self, context, criterion=None, marker=None,
- limit=None, sort_key=None, sort_dir=None):
+ limit=None, sort_key=None, sort_dir=None):
if policy.enforce_new_defaults():
target = {constants.RBAC_PROJECT_ID: context.project_id}
diff --git a/designate/dnsutils.py b/designate/dnsutils.py
index 5875bc1c..1bf46300 100644
--- a/designate/dnsutils.py
+++ b/designate/dnsutils.py
@@ -20,9 +20,10 @@ from threading import Lock
import dns
import dns.exception
+import dns.query
+import dns.rdatatype
import dns.zone
import eventlet
-from dns import rdatatype
from oslo_serialization import base64
from oslo_log import log as logging
@@ -312,7 +313,7 @@ def dnspyrecords_to_recordsetlist(dnspython_records):
def dnspythonrecord_to_recordset(rname, rdataset):
- record_type = rdatatype.to_text(rdataset.rdtype)
+ record_type = dns.rdatatype.to_text(rdataset.rdtype)
name = rname.to_text()
if isinstance(name, bytes):
@@ -346,39 +347,122 @@ def do_axfr(zone_name, servers, timeout=None, source=None):
timeout = timeout or CONF["service:mdns"].xfr_timeout
xfr = None
-
for srv in servers:
- to = eventlet.Timeout(timeout)
- log_info = {'name': zone_name, 'host': srv}
- try:
- LOG.info("Doing AXFR for %(name)s from %(host)s", log_info)
-
- xfr = dns.query.xfr(srv['host'], zone_name, relativize=False,
- timeout=1, port=srv['port'], source=source)
- raw_zone = dns.zone.from_xfr(xfr, relativize=False)
- break
- except eventlet.Timeout as t:
- if t == to:
- LOG.error("AXFR timed out for %(name)s from %(host)s",
- log_info)
- continue
- except dns.exception.FormError:
- LOG.error("Zone %(name)s is not present on %(host)s."
- "Trying next server.", log_info)
- except socket.error:
- LOG.error("Connection error when doing AXFR for %(name)s from "
- "%(host)s", log_info)
- except Exception:
- LOG.exception("Problem doing AXFR %(name)s from %(host)s. "
+ for address in get_ip_addresses(srv['host']):
+ to = eventlet.Timeout(timeout)
+ log_info = {'name': zone_name, 'host': srv, 'address': address}
+ try:
+ LOG.info(
+ 'Doing AXFR for %(name)s from %(host)s %(address)s',
+ log_info
+ )
+ xfr = dns.query.xfr(
+ address, zone_name, relativize=False, timeout=1,
+ port=srv['port'], source=source
+ )
+ raw_zone = dns.zone.from_xfr(xfr, relativize=False)
+ LOG.debug("AXFR Successful for %s", raw_zone.origin.to_text())
+ return raw_zone
+ except eventlet.Timeout as t:
+ if t == to:
+ LOG.error("AXFR timed out for %(name)s from %(host)s",
+ log_info)
+ continue
+ except dns.exception.FormError:
+ LOG.error("Zone %(name)s is not present on %(host)s."
"Trying next server.", log_info)
- finally:
- to.cancel()
- continue
- else:
- raise exceptions.XFRFailure(
- "XFR failed for %(name)s. No servers in %(servers)s was reached." %
- {"name": zone_name, "servers": servers})
+ except socket.error:
+ LOG.error("Connection error when doing AXFR for %(name)s from "
+ "%(host)s", log_info)
+ except Exception:
+ LOG.exception("Problem doing AXFR %(name)s from %(host)s. "
+ "Trying next server.", log_info)
+ finally:
+ to.cancel()
+
+ raise exceptions.XFRFailure(
+ "XFR failed for %(name)s. No servers in %(servers)s was reached." %
+ {"name": zone_name, "servers": servers}
+ )
+
+
+def prepare_msg(zone_name, rdatatype=dns.rdatatype.SOA,
+ dns_opcode=dns.opcode.QUERY):
+ """
+ Do the needful to set up a dns packet with dnspython
+ """
+ dns_message = dns.message.make_query(zone_name, rdatatype)
+ dns_message.set_opcode(dns_opcode)
+
+ return dns_message
+
+
+def dig(zone_name, host, rdatatype, port=53):
+ """
+ Set up and send a regular dns query, datatype configurable
+ """
+ query = prepare_msg(zone_name, rdatatype=rdatatype)
+
+ return send_dns_message(query, host, port=port)
+
+
+def notify(zone_name, host, port=53):
+ """
+ Set up a notify packet and send it
+ """
+ msg = prepare_msg(zone_name, dns_opcode=dns.opcode.NOTIFY)
+
+ return send_dns_message(msg, host, port=port)
- LOG.debug("AXFR Successful for %s", raw_zone.origin.to_text())
- return raw_zone
+def send_dns_message(dns_message, host, port=53, timeout=10):
+ """
+ Send the dns message and return the response
+
+ :return: dns.Message of the response to the dns query
+ """
+ ip_address = get_ip_address(host)
+ # This can raise some exceptions, but we'll catch them elsewhere
+ if not CONF['service:mdns'].all_tcp:
+ return dns.query.udp(
+ dns_message, ip_address, port=port, timeout=timeout)
+ return dns.query.tcp(
+ dns_message, ip_address, port=port, timeout=timeout)
+
+
+def get_serial(zone_name, host, port=53):
+ """
+ Possibly raises dns.exception.Timeout or dns.query.BadResponse.
+ Possibly returns 0 if, e.g., the answer section is empty.
+ """
+ resp = dig(zone_name, host, dns.rdatatype.SOA, port=port)
+ if not resp.answer:
+ return 0
+ rdataset = resp.answer[0].to_rdataset()
+ if not rdataset:
+ return 0
+ return rdataset[0].serial
+
+
+def get_ip_address(ip_address_or_hostname):
+ """
+ Provide an ip or hostname and return a valid ip4 or ipv6 address.
+
+ :return: ip address
+ """
+ addresses = get_ip_addresses(ip_address_or_hostname)
+ if not addresses:
+ return None
+ return addresses[0]
+
+
+def get_ip_addresses(ip_address_or_hostname):
+ """
+ Provide an ip or hostname and return all valid ip4 or ipv6 addresses.
+
+ :return: ip addresses
+ """
+ addresses = []
+ for res in socket.getaddrinfo(ip_address_or_hostname, 0):
+ addresses.append(res[4][0])
+ return list(set(addresses))
diff --git a/designate/mdns/notify.py b/designate/mdns/notify.py
index 0d66970d..0e464960 100644
--- a/designate/mdns/notify.py
+++ b/designate/mdns/notify.py
@@ -28,6 +28,7 @@ import dns.opcode
from oslo_config import cfg
from oslo_log import log as logging
+from designate import dnsutils
from designate.mdns import base
from designate.metrics import metrics
@@ -186,8 +187,9 @@ class NotifyEndpoint(base.BaseEndpoint):
'zone': zone.name, 'server': host,
'port': port})
try:
- response = self._send_dns_message(dns_message, host, port,
- timeout)
+ response = dnsutils.send_dns_message(
+ dns_message, host, port, timeout=timeout
+ )
except socket.error as e:
if e.errno != socket.errno.EAGAIN:
@@ -285,21 +287,3 @@ class NotifyEndpoint(base.BaseEndpoint):
dns_message.flags |= dns.flags.RD
return dns_message
-
- def _send_dns_message(self, dns_message, host, port, timeout):
- """
- Send DNS Message over TCP or UDP, return response.
-
- :param dns_message: The dns message that needs to be sent.
- :param host: The destination ip of dns_message.
- :param port: The destination port of dns_message.
- :param timeout: The timeout in seconds to wait for a response.
- :return: response
- """
- send = dns_query.tcp if CONF['service:mdns'].all_tcp else dns_query.udp
- return send(
- dns_message,
- socket.gethostbyname(host),
- port=port,
- timeout=timeout
- )
diff --git a/designate/tests/__init__.py b/designate/tests/__init__.py
index 07bf510d..24c7beaa 100644
--- a/designate/tests/__init__.py
+++ b/designate/tests/__init__.py
@@ -786,34 +786,36 @@ class TestCase(base.BaseTestCase):
return self.storage.create_zone_export(
context, objects.ZoneExport.from_dict(zone_export))
- def wait_for_import(self, zone_import_id, errorok=False):
+ def wait_for_import(self, zone_import_id, error_is_ok=False, max_wait=10):
"""
Zone imports spawn a thread to parse the zone file and
insert the data. This waits for this process before continuing
"""
- attempts = 0
- while attempts < 20:
- # Give the import a half second to complete
- time.sleep(.5)
-
+ start_time = time.time()
+ while True:
# Retrieve it, and ensure it's the same
zone_import = self.central_service.get_zone_import(
- self.admin_context_all_tenants, zone_import_id)
+ self.admin_context_all_tenants, zone_import_id
+ )
# If the import is done, we're done
if zone_import.status == 'COMPLETE':
break
# If errors are allowed, just make sure that something completed
- if errorok:
- if zone_import.status != 'PENDING':
- break
+ if error_is_ok and zone_import.status != 'PENDING':
+ break
- attempts += 1
+ if (time.time() - start_time) > max_wait:
+ break
- if not errorok:
+ time.sleep(0.5)
+
+ if not error_is_ok:
self.assertEqual('COMPLETE', zone_import.status)
+ return zone_import
+
def _ensure_interface(self, interface, implementation):
for name in interface.__abstractmethods__:
in_arginfo = inspect.getfullargspec(getattr(interface, name))
diff --git a/designate/tests/test_api/test_v2/test_floatingips.py b/designate/tests/test_api/test_v2/test_floatingips.py
index 2810029d..4667da55 100644
--- a/designate/tests/test_api/test_v2/test_floatingips.py
+++ b/designate/tests/test_api/test_v2/test_floatingips.py
@@ -197,19 +197,6 @@ class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
- criterion = {
- 'managed_resource_id': fip['id'],
- 'managed_tenant_id': context.project_id
- }
- zone_id = self.central_service.find_record(
- elevated_context, criterion=criterion).zone_id
-
- # Simulate the update on the backend
- zone_serial = self.central_service.get_zone(
- elevated_context, zone_id).serial
- self.central_service.update_status(
- elevated_context, zone_id, "SUCCESS", zone_serial)
-
# Unset PTR ('ptrdname' is None aka null in JSON)
response = self.client.patch_json(
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
@@ -218,12 +205,6 @@ class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
self.assertIsNone(response.json)
self.assertEqual(202, response.status_int)
- # Simulate the unset on the backend
- zone_serial = self.central_service.get_zone(
- elevated_context, zone_id).serial
- self.central_service.update_status(
- elevated_context, zone_id, "SUCCESS", zone_serial)
-
fip = self.central_service.get_floatingip(
context, fip['region'], fip['id'])
self.assertIsNone(fip['ptrdname'])
diff --git a/designate/tests/test_api/test_v2/test_import_export.py b/designate/tests/test_api/test_v2/test_import_export.py
index aefc6e1d..b9e491e0 100644
--- a/designate/tests/test_api/test_v2/test_import_export.py
+++ b/designate/tests/test_api/test_v2/test_import_export.py
@@ -53,7 +53,7 @@ class APIV2ZoneImportExportTest(ApiV2TestCase):
headers={'Content-type': 'text/dns'})
import_id = response.json_body['id']
- self.wait_for_import(import_id, errorok=True)
+ self.wait_for_import(import_id, error_is_ok=True)
url = '/zones/tasks/imports/%s' % import_id
@@ -70,7 +70,7 @@ class APIV2ZoneImportExportTest(ApiV2TestCase):
headers={'Content-type': 'text/dns'})
import_id = response.json_body['id']
- self.wait_for_import(import_id, errorok=True)
+ self.wait_for_import(import_id, error_is_ok=True)
url = '/zones/tasks/imports/%s' % import_id
@@ -86,7 +86,7 @@ class APIV2ZoneImportExportTest(ApiV2TestCase):
headers={'Content-type': 'text/dns'})
import_id = response.json_body['id']
- self.wait_for_import(import_id, errorok=True)
+ self.wait_for_import(import_id, error_is_ok=True)
url = '/zones/tasks/imports/%s' % import_id
diff --git a/designate/tests/test_central/test_service.py b/designate/tests/test_central/test_service.py
index 5db47b4f..8307f348 100644
--- a/designate/tests/test_central/test_service.py
+++ b/designate/tests/test_central/test_service.py
@@ -2422,8 +2422,10 @@ class CentralServiceTest(CentralTestCase):
actual = self.central_service.get_floatingip(
context, fip['region'], fip['id'])
- self.assertEqual(expected, actual)
+ self.assertEqual(expected.address, actual.address)
+ self.assertEqual(expected.ptrdname, actual.ptrdname)
+ self.assertEqual(expected.ttl, actual.ttl)
self.assertEqual(expected, actual)
def test_get_floatingip_not_allocated(self):
@@ -2456,14 +2458,6 @@ class CentralServiceTest(CentralTestCase):
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.project_id}
- zone_id = self.central_service.find_record(
- elevated_a, criterion).zone_id
-
- # Simulate the update on the backend
- zone_serial = self.central_service.get_zone(
- elevated_a, zone_id).serial
- self.central_service.update_status(
- elevated_a, zone_id, "SUCCESS", zone_serial)
self.network_api.fake.deallocate_floatingip(fip['id'])
@@ -2485,19 +2479,9 @@ class CentralServiceTest(CentralTestCase):
context_b, fip['region'], fip['id'])
self.assertIsNone(fip_ptr['ptrdname'])
- # Simulate the invalidation on the backend
- zone_serial = self.central_service.get_zone(
- elevated_a, zone_id).serial
- self.central_service.update_status(
- elevated_a, zone_id, "SUCCESS", zone_serial)
-
- # Ensure that the old record for tenant a for the fip now owned by
- # tenant b is gone
- exc = self.assertRaises(rpc_dispatcher.ExpectedException,
- self.central_service.find_record,
- elevated_a, criterion)
-
- self.assertEqual(exceptions.RecordNotFound, exc.exc_info[0])
+ record = self.central_service.find_record(elevated_a, criterion)
+ self.assertEqual('DELETE', record.action)
+ self.assertEqual('PENDING', record.status)
def test_list_floatingips_no_allocations(self):
context = self.get_context(project_id='a')
@@ -2583,19 +2567,9 @@ class CentralServiceTest(CentralTestCase):
self.assertEqual(1, len(fips))
self.assertIsNone(fips[0]['ptrdname'])
- # Simulate the invalidation on the backend
- zone_serial = self.central_service.get_zone(
- elevated_a, zone_id).serial
- self.central_service.update_status(
- elevated_a, zone_id, "SUCCESS", zone_serial)
-
- # Ensure that the old record for tenant a for the fip now owned by
- # tenant b is gone
- exc = self.assertRaises(rpc_dispatcher.ExpectedException,
- self.central_service.find_record,
- elevated_a, criterion)
-
- self.assertEqual(exceptions.RecordNotFound, exc.exc_info[0])
+ record = self.central_service.find_record(elevated_a, criterion)
+ self.assertEqual('DELETE', record.action)
+ self.assertEqual('PENDING', record.status)
def test_set_floatingip(self):
context = self.get_context(project_id='a')
@@ -3548,6 +3522,30 @@ class CentralServiceTest(CentralTestCase):
self.wait_for_import(zone_import.id)
+ def test_create_zone_import_overquota(self):
+ self.config(
+ quota_zone_records=5,
+ quota_zone_recordsets=5,
+ )
+
+ # Create a Zone Import
+ context = self.get_context(project_id=utils.generate_uuid())
+ request_body = self.get_zonefile_fixture()
+ zone_import = self.central_service.create_zone_import(context,
+ request_body)
+
+ # Ensure all values have been set correctly
+ self.assertIsNotNone(zone_import['id'])
+ self.assertEqual('PENDING', zone_import.status)
+ self.assertIsNone(zone_import.message)
+ self.assertIsNone(zone_import.zone_id)
+
+ zone_import = self.wait_for_import(zone_import.id, error_is_ok=True)
+
+ self.assertEqual('Quota exceeded during zone import.',
+ zone_import.message)
+ self.assertEqual('ERROR', zone_import.status)
+
def test_find_zone_imports(self):
context = self.get_context(project_id=utils.generate_uuid())
diff --git a/designate/tests/unit/backend/test_agent.py b/designate/tests/unit/backend/test_agent.py
index d1e585bf..408544d8 100644
--- a/designate/tests/unit/backend/test_agent.py
+++ b/designate/tests/unit/backend/test_agent.py
@@ -14,11 +14,13 @@
from unittest import mock
import dns
+import dns.query
import dns.rdataclass
import dns.rdatatype
import designate.backend.agent as agent
import designate.backend.private_codes as pcodes
+from designate import dnsutils
from designate import exceptions
from designate import objects
from designate import tests
@@ -130,7 +132,7 @@ class AgentBackendTestCase(tests.TestCase):
def test_make_and_send_dns_message_bad_response(self):
self.backend._make_dns_message = mock.Mock(return_value='')
self.backend._send_dns_message = mock.Mock(
- return_value=agent.dns_query.BadResponse())
+ return_value=dns.query.BadResponse())
out = self.backend._make_and_send_dns_message('h', 123, 1, 2, 3, 4, 5)
@@ -176,50 +178,16 @@ class AgentBackendTestCase(tests.TestCase):
self.assertEqual((response, 0), out)
- @mock.patch.object(agent.dns_query, 'tcp')
- @mock.patch.object(agent.dns_query, 'udp')
- def test_send_dns_message(self, mock_udp, mock_tcp):
+ @mock.patch.object(dnsutils, 'get_ip_address')
+ @mock.patch.object(dns.query, 'tcp')
+ @mock.patch.object(dns.query, 'udp')
+ def test_send_dns_message(self, mock_udp, mock_tcp, mock_get_ip_address):
mock_udp.return_value = 'mock udp resp'
+ mock_get_ip_address.return_value = '10.0.1.39'
- out = self.backend._send_dns_message('msg', 'host', 123, 1)
+ out = self.backend._send_dns_message('msg', '10.0.1.39', 123, 1)
- self.assertFalse(agent.dns_query.tcp.called)
- agent.dns_query.udp.assert_called_with('msg', 'host', port=123,
- timeout=1)
+ self.assertFalse(mock_tcp.called)
+ mock_udp.assert_called_with('msg', '10.0.1.39', port=123,
+ timeout=1)
self.assertEqual('mock udp resp', out)
-
- @mock.patch.object(agent.dns_query, 'tcp')
- @mock.patch.object(agent.dns_query, 'udp')
- def test_send_dns_message_timeout(self, mock_udp, mock_tcp):
- mock_udp.side_effect = dns.exception.Timeout
-
- out = self.backend._send_dns_message('msg', 'host', 123, 1)
-
- agent.dns_query.udp.assert_called_with('msg', 'host', port=123,
- timeout=1)
- self.assertIsInstance(out, dns.exception.Timeout)
-
- @mock.patch.object(agent.dns_query, 'tcp')
- @mock.patch.object(agent.dns_query, 'udp')
- def test_send_dns_message_bad_response(self, mock_udp, mock_tcp):
- mock_udp.side_effect = agent.dns_query.BadResponse
-
- out = self.backend._send_dns_message('msg', 'host', 123, 1)
-
- agent.dns_query.udp.assert_called_with('msg', 'host', port=123,
- timeout=1)
- self.assertIsInstance(out, agent.dns_query.BadResponse)
-
- @mock.patch.object(agent.dns_query, 'tcp')
- @mock.patch.object(agent.dns_query, 'udp')
- def test_send_dns_message_tcp(self, mock_udp, mock_tcp):
- self.CONF.set_override('all_tcp', True, 'service:mdns')
-
- mock_tcp.return_value = 'mock tcp resp'
-
- out = self.backend._send_dns_message('msg', 'host', 123, 1)
-
- self.assertFalse(agent.dns_query.udp.called)
- agent.dns_query.tcp.assert_called_with('msg', 'host', port=123,
- timeout=1)
- self.assertEqual('mock tcp resp', out)
diff --git a/designate/tests/unit/mdns/test_notify.py b/designate/tests/unit/mdns/test_notify.py
index 68b47467..45c82338 100644
--- a/designate/tests/unit/mdns/test_notify.py
+++ b/designate/tests/unit/mdns/test_notify.py
@@ -20,6 +20,7 @@ import dns
import dns.rdataclass
import dns.rdatatype
+from designate import dnsutils
import designate.mdns.notify as notify
import designate.tests
from designate.tests.unit import RoObject
@@ -130,12 +131,11 @@ class MdnsNotifyTest(designate.tests.TestCase):
self.assertEqual(('ERROR', 310, 0), out)
@mock.patch('time.sleep')
- def test_make_and_send_dns_message_timeout(self, mock_sleep):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_timeout(self, mock_send_dns_message,
+ mock_sleep):
zone = RoObject(name='zn')
- self.notify._make_dns_message = mock.Mock(return_value='')
- self.notify._send_dns_message = mock.Mock(
- side_effect=dns.exception.Timeout
- )
+ mock_send_dns_message.side_effect = dns.exception.Timeout
out = self.notify._make_and_send_dns_message(
zone, 'host', 123, 1, 2, 3
@@ -143,12 +143,12 @@ class MdnsNotifyTest(designate.tests.TestCase):
self.assertEqual((None, 3), out)
- def test_make_and_send_dns_message_bad_response(self):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_bad_response(self,
+ mock_send_dns_message):
zone = RoObject(name='zn')
self.notify._make_dns_message = mock.Mock(return_value='')
- self.notify._send_dns_message = mock.Mock(
- side_effect=notify.dns_query.BadResponse
- )
+ mock_send_dns_message.side_effect = notify.dns_query.BadResponse
out = self.notify._make_and_send_dns_message(
zone, 'host', 123, 1, 2, 3
@@ -157,15 +157,14 @@ class MdnsNotifyTest(designate.tests.TestCase):
self.assertEqual((None, 1), out)
@mock.patch('time.sleep')
- def test_make_and_send_dns_message_eagain(self, mock_sleep):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_eagain(self, mock_send_dns_message,
+ mock_sleep):
# bug #1558096
zone = RoObject(name='zn')
- self.notify._make_dns_message = mock.Mock(return_value='')
socket_error = socket.error()
socket_error.errno = socket.errno.EAGAIN
- self.notify._send_dns_message = mock.Mock(
- side_effect=socket_error
- )
+ mock_send_dns_message.side_effect = socket_error
out = self.notify._make_and_send_dns_message(
zone, 'host', 123, 1, 2, 3
@@ -173,15 +172,15 @@ class MdnsNotifyTest(designate.tests.TestCase):
self.assertEqual((None, 3), out)
- def test_make_and_send_dns_message_econnrefused(self):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_econnrefused(self,
+ mock_send_dns_message):
# bug #1558096
zone = RoObject(name='zn')
- self.notify._make_dns_message = mock.Mock(return_value='')
socket_error = socket.error()
socket_error.errno = socket.errno.ECONNREFUSED
# socket errors other than EAGAIN should raise
- self.notify._send_dns_message = mock.Mock(
- side_effect=socket_error)
+ mock_send_dns_message.side_effect = socket_error
self.assertRaises(
socket.error,
@@ -189,11 +188,11 @@ class MdnsNotifyTest(designate.tests.TestCase):
zone, 'host', 123, 1, 2, 3
)
- def test_make_and_send_dns_message_nxdomain(self):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_nxdomain(self, mock_send_dns_message):
zone = RoObject(name='zn')
- self.notify._make_dns_message = mock.Mock(return_value='')
response = RoObject(rcode=mock.Mock(return_value=dns.rcode.NXDOMAIN))
- self.notify._send_dns_message = mock.Mock(return_value=response)
+ mock_send_dns_message.return_value = response
out = self.notify._make_and_send_dns_message(
zone, 'host', 123, 1, 2, 3
@@ -201,17 +200,17 @@ class MdnsNotifyTest(designate.tests.TestCase):
self.assertEqual((response, 1), out)
- def test_make_and_send_dns_message_missing_AA_flags(self):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_missing_AA_flags(self,
+ mock_send_dns_message):
zone = RoObject(name='zn')
- self.notify._make_dns_message = mock.Mock(return_value='')
-
response = RoObject(
rcode=mock.Mock(return_value=dns.rcode.NOERROR),
# rcode is NOERROR but (flags & dns.flags.AA) gives 0
flags=0,
answer=['answer'],
)
- self.notify._send_dns_message = mock.Mock(return_value=response)
+ mock_send_dns_message.return_value = response
out = self.notify._make_and_send_dns_message(
zone, 'host', 123, 1, 2, 3
@@ -219,9 +218,10 @@ class MdnsNotifyTest(designate.tests.TestCase):
self.assertEqual((None, 1), out)
- def test_make_and_send_dns_message_error_flags(self):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_error_flags(self,
+ mock_send_dns_message):
zone = RoObject(name='zn')
- self.notify._make_dns_message = mock.Mock(return_value='')
response = RoObject(
rcode=mock.Mock(return_value=dns.rcode.NOERROR),
# rcode is NOERROR but flags are not NOERROR
@@ -229,7 +229,7 @@ class MdnsNotifyTest(designate.tests.TestCase):
ednsflags=321,
answer=['answer'],
)
- self.notify._send_dns_message = mock.Mock(return_value=response)
+ mock_send_dns_message.return_value = response
out = self.notify._make_and_send_dns_message(
zone, 'host', 123, 1, 2, 3
@@ -266,23 +266,3 @@ class MdnsNotifyTest(designate.tests.TestCase):
';AUTHORITY',
';ADDITIONAL',
], txt)
-
- @mock.patch.object(notify.dns_query, 'udp')
- def test_send_udp_dns_message(self, mock_udp):
- self.CONF.set_override('all_tcp', False, 'service:mdns')
-
- self.notify._send_dns_message('msg', '192.0.2.1', 1234, 1)
-
- mock_udp.assert_called_with(
- 'msg', '192.0.2.1', port=1234, timeout=1
- )
-
- @mock.patch.object(notify.dns_query, 'tcp')
- def test_send_tcp_dns_message(self, mock_tcp):
- self.CONF.set_override('all_tcp', True, 'service:mdns')
-
- self.notify._send_dns_message('msg', '192.0.2.1', 1234, 1)
-
- mock_tcp.assert_called_with(
- 'msg', '192.0.2.1', port=1234, timeout=1
- )
diff --git a/designate/tests/unit/test_central/test_basic.py b/designate/tests/unit/test_central/test_basic.py
index e75922e3..d3bb6b41 100644
--- a/designate/tests/unit/test_central/test_basic.py
+++ b/designate/tests/unit/test_central/test_basic.py
@@ -789,13 +789,13 @@ class CentralZoneTestCase(CentralBasic):
def test_add_ns_creation(self):
self.service._create_ns = mock.Mock()
- self.service.find_recordsets = mock.Mock(
- return_value=[]
+ self.service.find_recordset = mock.Mock(
+ side_effect=exceptions.RecordSetNotFound()
)
self.service._add_ns(
self.context,
- RoObject(id=CentralZoneTestCase.zone__id),
+ RoObject(name='foo', id=CentralZoneTestCase.zone__id),
RoObject(name='bar')
)
ctx, zone, records = self.service._create_ns.call_args[0]
@@ -804,16 +804,15 @@ class CentralZoneTestCase(CentralBasic):
def test_add_ns(self):
self.service._update_recordset_in_storage = mock.Mock()
- recordsets = [
- RoObject(records=objects.RecordList.from_list([]), managed=True)
- ]
- self.service.find_recordsets = mock.Mock(
- return_value=recordsets
+ self.service.find_recordset = mock.Mock(
+ return_value=RoObject(
+ records=objects.RecordList.from_list([]), managed=True
+ )
)
self.service._add_ns(
self.context,
- RoObject(id=CentralZoneTestCase.zone__id),
+ RoObject(name='foo', id=CentralZoneTestCase.zone__id),
RoObject(name='bar')
)
ctx, zone, rset = \
@@ -822,29 +821,6 @@ class CentralZoneTestCase(CentralBasic):
self.assertTrue(rset.records[0].managed)
self.assertEqual('bar', rset.records[0].data.name)
- def test_add_ns_with_other_ns_rs(self):
- self.service._update_recordset_in_storage = mock.Mock()
-
- recordsets = [
- RoObject(records=objects.RecordList.from_list([]), managed=True),
- RoObject(records=objects.RecordList.from_list([]), managed=False)
- ]
-
- self.service.find_recordsets = mock.Mock(
- return_value=recordsets
- )
-
- self.service._add_ns(
- self.context,
- RoObject(id=CentralZoneTestCase.zone__id),
- RoObject(name='bar')
- )
- ctx, zone, rset = \
- self.service._update_recordset_in_storage.call_args[0]
- self.assertEqual(1, len(rset.records))
- self.assertTrue(rset.records[0].managed)
- self.assertEqual('bar', rset.records[0].data.name)
-
def test_create_zone_no_servers(self):
self.service._enforce_zone_quota = mock.Mock()
self.service._is_valid_zone_name = mock.Mock()
diff --git a/designate/tests/unit/test_dnsutils.py b/designate/tests/unit/test_dnsutils.py
index 8345b8ef..eac016fc 100644
--- a/designate/tests/unit/test_dnsutils.py
+++ b/designate/tests/unit/test_dnsutils.py
@@ -23,6 +23,7 @@ import dns.rcode
import dns.rdatatype
import dns.zone
import eventlet
+from oslo_config import cfg
import oslotest.base
from dns import zone as dnszone
@@ -31,6 +32,8 @@ from designate import dnsutils
from designate import exceptions
from designate import objects
+CONF = cfg.CONF
+
SAMPLES = {
("cname.example.com.", "CNAME"): {
"ttl": 10800,
@@ -320,3 +323,19 @@ class TestDoAfxr(oslotest.base.BaseTestCase):
self.assertTrue(mock_xfr.called)
self.assertTrue(mock_from_xfr.called)
+
+ @mock.patch.object(dns.query, 'udp')
+ def test_send_udp_dns_message(self, mock_udp):
+ CONF.set_override('all_tcp', False, 'service:mdns')
+ dnsutils.send_dns_message('msg', '192.0.2.1', 1234, 1)
+ mock_udp.assert_called_with(
+ 'msg', '192.0.2.1', port=1234, timeout=1
+ )
+
+ @mock.patch.object(dns.query, 'tcp')
+ def test_send_tcp_dns_message(self, mock_tcp):
+ CONF.set_override('all_tcp', True, 'service:mdns')
+ dnsutils.send_dns_message('msg', '192.0.2.1', 1234, 1)
+ mock_tcp.assert_called_with(
+ 'msg', '192.0.2.1', port=1234, timeout=1
+ )
diff --git a/designate/tests/unit/workers/test_base_task.py b/designate/tests/unit/workers/test_base_task.py
index 9b37b52e..f419f4f4 100644
--- a/designate/tests/unit/workers/test_base_task.py
+++ b/designate/tests/unit/workers/test_base_task.py
@@ -14,17 +14,105 @@
# License for the specific language governing permissions and limitations
# under the License.mport threading
import oslotest.base
+from unittest import mock
+from designate import exceptions
+from designate import objects
from designate.worker.tasks import base
class TestTask(oslotest.base.BaseTestCase):
def setUp(self):
super(TestTask, self).setUp()
+ self.context = mock.Mock()
self.task = base.Task(None)
+ self.storage = self.task._storage = mock.Mock()
def test_constructor(self):
self.assertTrue(self.task)
def test_call(self):
self.assertRaises(NotImplementedError, self.task)
+
+ def test_current_action_is_valid(self):
+ self.storage.get_zone = mock.Mock(
+ return_value=objects.Zone(action='UPDATE')
+ )
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'UPDATE', objects.Zone(action='UPDATE'))
+ )
+
+ self.storage.get_zone = mock.Mock(
+ return_value=objects.Zone(action='CREATE')
+ )
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'CREATE', objects.Zone(action='CREATE'))
+ )
+
+ self.storage.get_zone = mock.Mock(
+ return_value=objects.Zone(action='UPDATE')
+ )
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'CREATE', objects.Zone(action='CREATE'))
+ )
+
+ self.storage.get_zone = mock.Mock(
+ return_value=objects.Zone(action='DELETE')
+ )
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'DELETE', objects.Zone(action='DELETE'))
+ )
+
+ def test_current_action_delete_always_valid(self):
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'DELETE', None)
+ )
+
+ def test_current_action_bad_storage_always_valid(self):
+ self.storage.get_zone = mock.Mock(
+ side_effect=exceptions.DesignateException()
+ )
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'CREATE', objects.Zone(action='CREATE'))
+ )
+
+ def test_current_action_is_not_valid_none(self):
+ self.storage.get_zone = mock.Mock(
+ return_value=objects.Zone(action='NONE')
+ )
+ self.assertFalse(
+ self.task.is_current_action_valid(
+ self.context, 'UPDATE', objects.Zone(action='UPDATE'))
+ )
+
+ def test_current_action_is_not_valid_deleted(self):
+ self.storage.get_zone = mock.Mock(
+ return_value=objects.Zone(action='DELETE')
+ )
+ self.assertFalse(
+ self.task.is_current_action_valid(
+ self.context, 'UPDATE', objects.Zone(action='UPDATE'))
+ )
+
+ def test_current_action_is_not_found(self):
+ self.storage.get_zone = mock.Mock(
+ side_effect=exceptions.ZoneNotFound()
+ )
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'CREATE', objects.Zone(action='CREATE'))
+ )
+
+ self.storage.get_zone = mock.Mock(
+ side_effect=exceptions.ZoneNotFound()
+ )
+ self.assertFalse(
+ self.task.is_current_action_valid(
+ self.context, 'UPDATE', objects.Zone(action='UPDATE'))
+ )
diff --git a/designate/tests/unit/workers/test_zone_tasks.py b/designate/tests/unit/workers/test_zone_tasks.py
index 25b9c41a..47eb7d27 100644
--- a/designate/tests/unit/workers/test_zone_tasks.py
+++ b/designate/tests/unit/workers/test_zone_tasks.py
@@ -20,11 +20,11 @@ import oslotest.base
from oslo_config import cfg
from oslo_config import fixture as cfg_fixture
+from designate import dnsutils
from designate import exceptions
from designate import objects
from designate.tests.unit import utils
from designate.worker import processing
-from designate.worker import utils as wutils
from designate.worker.tasks import zone
CONF = cfg.CONF
@@ -167,7 +167,7 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
self.context = mock.Mock()
self.executor = mock.Mock()
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
def test_call_create(self, mock_notify):
self.zone = objects.Zone(name='example.org.', action='CREATE')
self.actor = zone.ZoneActionOnTarget(
@@ -185,7 +185,7 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
port=53
)
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
def test_call_update(self, mock_notify):
self.zone = objects.Zone(name='example.org.', action='UPDATE')
self.actor = zone.ZoneActionOnTarget(
@@ -203,7 +203,7 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
port=53
)
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
def test_call_delete(self, mock_notify):
self.zone = objects.Zone(name='example.org.', action='DELETE')
self.actor = zone.ZoneActionOnTarget(
@@ -217,7 +217,7 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
mock_notify.assert_not_called()
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
@mock.patch('time.sleep', mock.Mock())
def test_call_exception_raised(self, mock_notify):
self.backend.create_zone.side_effect = exceptions.BadRequest()
@@ -250,7 +250,7 @@ class TestSendNotify(oslotest.base.BaseTestCase):
self.executor = mock.Mock()
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
def test_call_notify(self, mock_notify):
self.zone = objects.Zone(name='example.org.')
self.actor = zone.SendNotify(
@@ -267,7 +267,7 @@ class TestSendNotify(oslotest.base.BaseTestCase):
port=53
)
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
def test_call_notify_timeout(self, mock_notify):
mock_notify.side_effect = dns.exception.Timeout()
self.zone = objects.Zone(name='example.org.')
@@ -282,7 +282,7 @@ class TestSendNotify(oslotest.base.BaseTestCase):
self.actor
)
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
def test_call_dont_notify(self, mock_notify):
CONF.set_override('notify', False, 'service:worker')
@@ -668,11 +668,11 @@ class TestPollForZone(oslotest.base.BaseTestCase):
self.task._max_retries = 3
self.task._retry_interval = 2
- @mock.patch.object(zone.wutils, 'get_serial', mock.Mock(return_value=10))
+ @mock.patch.object(dnsutils, 'get_serial', mock.Mock(return_value=10))
def test_get_serial(self):
self.assertEqual(10, self.task._get_serial())
- zone.wutils.get_serial.assert_called_with(
+ dnsutils.get_serial.assert_called_with(
'example.org.',
'ns.example.org',
port=53
diff --git a/designate/worker/README.md b/designate/worker/README.md
index 9c5d9920..47da1934 100644
--- a/designate/worker/README.md
+++ b/designate/worker/README.md
@@ -29,7 +29,7 @@ class SendNotify(base.Task):
port = int(self.target.options.get('port'))
try:
- wutils.notify(self.zone.name, host, port=port)
+ dnsutils.notify(self.zone.name, host, port=port)
return True
except Exception:
return False
diff --git a/designate/worker/tasks/base.py b/designate/worker/tasks/base.py
index b6959391..5c3c8294 100644
--- a/designate/worker/tasks/base.py
+++ b/designate/worker/tasks/base.py
@@ -18,6 +18,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from designate.central import rpcapi as central_rpcapi
+from designate import exceptions
from designate import quota
from designate import storage
from designate import utils
@@ -139,5 +140,52 @@ class Task(TaskConfig):
self._worker_api = worker_rpcapi.WorkerAPI.get_instance()
return self._worker_api
+ def is_current_action_valid(self, context, action, zone):
+ """Is our current action still valid?"""
+
+ # We always allow for DELETE operations.
+ if action == 'DELETE':
+ return True
+
+ try:
+ zone = self.storage.get_zone(context, zone.id)
+
+ # If the zone is either in a DELETE or NONE state,
+ # we don't need to continue with the current action.
+ if zone.action in ['DELETE', 'NONE']:
+ LOG.info(
+ 'Failed to %(action)s zone_name=%(zone_name)s '
+ 'zone_id=%(zone_id)s action state has changed '
+ 'to %(current_action)s, not retrying action',
+ {
+ 'action': action,
+ 'zone_name': zone.name,
+ 'zone_id': zone.id,
+ 'current_action': zone.action,
+ }
+ )
+ return False
+ except exceptions.ZoneNotFound:
+ if action != 'CREATE':
+ LOG.info(
+ 'Failed to %(action)s zone_name=%(zone_name)s '
+ 'zone_id=%(zone_id)s Error=ZoneNotFound',
+ {
+ 'action': action,
+ 'zone_name': zone.name,
+ 'zone_id': zone.id,
+ }
+ )
+ return False
+ except Exception as e:
+ LOG.warning(
+ 'Error trying to get zone action. Error=%(error)s',
+ {
+ 'error': str(e),
+ }
+ )
+
+ return True
+
def __call__(self):
raise NotImplementedError
diff --git a/designate/worker/tasks/zone.py b/designate/worker/tasks/zone.py
index 3189de27..6b18b693 100644
--- a/designate/worker/tasks/zone.py
+++ b/designate/worker/tasks/zone.py
@@ -20,10 +20,10 @@ import dns
from oslo_config import cfg
from oslo_log import log as logging
-from designate.worker import utils as wutils
-from designate.worker.tasks import base
+from designate import dnsutils
from designate import exceptions
from designate import utils
+from designate.worker.tasks import base
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@@ -88,17 +88,21 @@ class ZoneActionOnTarget(base.Task):
self.action, self.zone.name, self.target)
return True
except Exception as e:
- LOG.info('Failed to %(action)s zone %(zone)s on '
- 'target %(target)s on attempt %(attempt)d, '
- 'Error: %(error)s.',
- {
- 'action': self.action,
- 'zone': self.zone.name,
- 'target': self.target.id,
- 'attempt': retry + 1,
- 'error': str(e)
- })
- time.sleep(self.retry_interval)
+ LOG.info(
+ 'Failed to %(action)s zone_name=%(zone_name)s '
+ 'zone_id=%(zone_id)s on target=%(target)s on '
+ 'attempt=%(attempt)d Error=%(error)s',
+ {
+ 'action': self.action,
+ 'zone_name': self.zone.name,
+ 'zone_id': self.zone.id,
+ 'target': self.target,
+ 'attempt': retry + 1,
+ 'error': str(e),
+ }
+ )
+
+ time.sleep(self.retry_interval)
return False
@@ -124,7 +128,7 @@ class SendNotify(base.Task):
port = int(self.target.options.get('port'))
try:
- wutils.notify(self.zone.name, host, port=port)
+ dnsutils.notify(self.zone.name, host, port=port)
LOG.debug('Sent NOTIFY to %(host)s:%(port)s for zone %(zone)s',
{
'host': host,
@@ -311,7 +315,7 @@ class PollForZone(base.Task):
self.ns = ns
def _get_serial(self):
- return wutils.get_serial(
+ return dnsutils.get_serial(
self.zone.name,
self.ns.host,
port=self.ns.port
@@ -404,6 +408,10 @@ class ZonePoller(base.Task, ThresholdMixin):
{'zone': self.zone.name, 'n': retry + 1})
time.sleep(retry_interval)
+ if not self.is_current_action_valid(self.context, self.zone.action,
+ self.zone):
+ break
+
return query_result
def _on_failure(self, error_status):
diff --git a/designate/worker/utils.py b/designate/worker/utils.py
deleted file mode 100644
index f82d5432..00000000
--- a/designate/worker/utils.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2016 Rackspace Inc.
-#
-# Author: Tim Simmons <tim.simmons@rackspace>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.mport threading
-import dns
-import dns.exception
-import dns.query
-from oslo_config import cfg
-from oslo_log import log as logging
-
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-
-
-def prepare_msg(zone_name, rdatatype=dns.rdatatype.SOA, notify=False):
- """
- Do the needful to set up a dns packet with dnspython
- """
- dns_message = dns.message.make_query(zone_name, rdatatype)
- if notify:
- dns_message.set_opcode(dns.opcode.NOTIFY)
- else:
- dns_message.set_opcode(dns.opcode.QUERY)
- return dns_message
-
-
-def dig(zone_name, host, rdatatype, port=53):
- """
- Set up and send a regular dns query, datatype configurable
- """
- query = prepare_msg(zone_name, rdatatype=rdatatype)
-
- return send_dns_msg(query, host, port=port)
-
-
-def notify(zone_name, host, port=53):
- """
- Set up a notify packet and send it
- """
- msg = prepare_msg(zone_name, notify=True)
-
- return send_dns_msg(msg, host, port=port)
-
-
-def send_dns_msg(dns_message, host, port=53):
- """
- Send the dns message and return the response
-
- :return: dns.Message of the response to the dns query
- """
- # This can raise some exceptions, but we'll catch them elsewhere
- if not CONF['service:mdns'].all_tcp:
- return dns.query.udp(
- dns_message, host, port=port, timeout=10)
- else:
- return dns.query.tcp(
- dns_message, host, port=port, timeout=10)
-
-
-def get_serial(zone_name, host, port=53):
- """
- Possibly raises dns.exception.Timeout or dns.query.BadResponse.
- Possibly returns 0 if, e.g., the answer section is empty.
- """
- resp = dig(zone_name, host, dns.rdatatype.SOA, port=port)
- if not resp.answer:
- return 0
- rdataset = resp.answer[0].to_rdataset()
- if not rdataset:
- return 0
- return rdataset[0].serial