summaryrefslogtreecommitdiff
path: root/integration/tests
diff options
context:
space:
mode:
authorAmrith Kumar <amrith@tesora.com>2016-10-09 07:13:29 -0400
committerAmrith Kumar <amrith@tesora.com>2016-10-09 07:14:25 -0400
commitb120be1772f2416730ea1066782bf0813ca2dd1a (patch)
treee86dddeed295fdb97489f66170d6965a58474cd9 /integration/tests
parent473d360b906dba80f0f914e88b461221c1bfd5f3 (diff)
downloadtrove-b120be1772f2416730ea1066782bf0813ca2dd1a.tar.gz
Merge trove-integration into trove
This commit will merge into trove, the trove-integration tree as of commit 9f92ca853f8aa2f72921e54682c918941a8f0919. This is in preparation for making trove-integration go away. In addition, it supresses any consideration of the integration directory in the trove tox tests as it is understandably a small pile of pooh and in need of much cleanup. Change-Id: Ib7f2655c4c5ed86b5454708c04371ee55e37ec2d Partially-Implements-Blueprint: eliminate-trove-integration-and-redstack
Diffstat (limited to 'integration/tests')
-rw-r--r--integration/tests/examples/README13
-rwxr-xr-xintegration/tests/examples/example_gen.sh4
-rw-r--r--integration/tests/examples/examples/client.py235
-rw-r--r--integration/tests/examples/examples/example_generation.py1042
-rw-r--r--integration/tests/examples/examples/local.conf10
-rwxr-xr-xintegration/tests/examples/gendoc.sh8
-rw-r--r--integration/tests/examples/local.conf10
-rw-r--r--integration/tests/examples/setup.py30
-rw-r--r--integration/tests/examples/tox.ini16
-rw-r--r--integration/tests/integration/core.test.conf48
-rw-r--r--integration/tests/integration/int_tests.py263
-rw-r--r--integration/tests/integration/localhost.test.conf95
-rwxr-xr-xintegration/tests/integration/run_local.sh57
-rw-r--r--integration/tests/integration/setup.py30
-rw-r--r--integration/tests/integration/tests/README1
-rw-r--r--integration/tests/integration/tests/__init__.py27
-rw-r--r--integration/tests/integration/tests/api/__init__.py13
-rw-r--r--integration/tests/integration/tests/api/delete_all.py32
-rw-r--r--integration/tests/integration/tests/api/instances_pagination.py219
-rw-r--r--integration/tests/integration/tests/api/instances_quotas.py47
-rw-r--r--integration/tests/integration/tests/api/instances_states.py76
-rw-r--r--integration/tests/integration/tests/colorizer.py446
-rw-r--r--integration/tests/integration/tests/dns/__init__.py0
-rw-r--r--integration/tests/integration/tests/dns/check_domain.py174
-rw-r--r--integration/tests/integration/tests/dns/concurrency.py111
-rw-r--r--integration/tests/integration/tests/dns/conversion.py105
-rw-r--r--integration/tests/integration/tests/dns/dns.py104
-rw-r--r--integration/tests/integration/tests/initialize.py176
-rw-r--r--integration/tests/integration/tests/smoke/__init__.py0
-rw-r--r--integration/tests/integration/tests/smoke/instance.py103
-rw-r--r--integration/tests/integration/tests/util/__init__.py16
-rw-r--r--integration/tests/integration/tests/util/report.py76
-rw-r--r--integration/tests/integration/tests/util/rpc.py110
-rw-r--r--integration/tests/integration/tests/util/services.py280
-rw-r--r--integration/tests/integration/tests/volumes/__init__.py25
-rw-r--r--integration/tests/integration/tests/volumes/driver.py546
-rw-r--r--integration/tests/integration/tox.ini28
37 files changed, 4576 insertions, 0 deletions
diff --git a/integration/tests/examples/README b/integration/tests/examples/README
new file mode 100644
index 00000000..f7ebb0b8
--- /dev/null
+++ b/integration/tests/examples/README
@@ -0,0 +1,13 @@
+Example Generator
+
+Settings for the example generator are in examples/local.conf
+
+After customizing examples/local.conf, run:
+./example_gen.sh
+
+2012-06-12
+ - Updated to work with trove
+ - All XML calls are commented out
+ - Management calls are also commented out
+2012-06-14
+ - Renabled XML calls
diff --git a/integration/tests/examples/example_gen.sh b/integration/tests/examples/example_gen.sh
new file mode 100755
index 00000000..cbbe153f
--- /dev/null
+++ b/integration/tests/examples/example_gen.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+python ./examples/example_generation.py ./examples/local.conf
+
diff --git a/integration/tests/examples/examples/client.py b/integration/tests/examples/examples/client.py
new file mode 100644
index 00000000..e52d1833
--- /dev/null
+++ b/integration/tests/examples/examples/client.py
@@ -0,0 +1,235 @@
+import httplib2
+import json
+import os
+import re
+import sys
+import time
+from urlparse import urlparse
+import xml.dom.minidom
+
+from proboscis.asserts import *
+from troveclient.compat.client import TroveHTTPClient
+from troveclient.compat.xml import TroveXmlClient
+
+
+print_req = True
+
+
+class ConfigFile(object):
+
+ def __init__(self, config_file):
+ if not os.path.exists(config_file):
+ raise RuntimeError("Could not find Example CONF at %s." %
+ config_file)
+ file_contents = open(config_file, "r").read()
+ try:
+ config = json.loads(file_contents)
+ except Exception as exception:
+ msg = 'Error loading config file "%s".' % config_file
+ raise RuntimeError(msg, exception)
+
+ self.directory = config.get("directory", None)
+ if not self.directory.endswith('/'):
+ self.directory += '/'
+ self.api_url = config.get("api_url", None)
+ self.auth_url = config.get("auth_url", None)
+ self.username = config.get("username", None)
+ self.password = config.get("password", None)
+ self.tenant = config.get("tenant", None)
+ self.replace_host = config.get("replace_host", None)
+ self.replace_dns_hostname = config.get("replace_dns_hostname", None)
+ if self.auth_url:
+ auth_id, tenant_id = self.get_auth_token_id_tenant_id(self.auth_url,
+ self.username,
+ self.password)
+ else:
+ auth_id = self.tenant
+ tenant_id = self.tenant
+
+ print "id = %s" % auth_id
+ self.headers = {
+ 'X-Auth-Token': str(auth_id)
+ }
+ print "tenantID = %s" % tenant_id
+ self.tenantID = tenant_id
+ self.dbaas_url = "%s/v1.0/%s" % (self.api_url, self.tenantID)
+
+
+def shorten_url(url):
+ parsed = urlparse(url)
+ if parsed.query:
+ method_url = parsed.path + '?' + parsed.query
+ else:
+ method_url = parsed.path
+ return method_url
+
+
+class SnippetWriter(object):
+
+ def __init__(self, conf):
+ self.conf = conf
+
+ def _indent_xml(self, my_string):
+ my_string = my_string.encode("utf-8")
+ # convert to plain string without indents and spaces
+ my_re = re.compile('>\s+([^\s])', re.DOTALL)
+ my_string = myre.sub('>\g<1>', my_string)
+ my_string = xml.dom.minidom.parseString(my_string).toprettyxml()
+ # remove line breaks
+ my_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL)
+ my_string = my_re.sub('>\g<1></', my_string)
+ return my_string
+
+ def output_request(self, url, output_headers, body, content_type, method,
+ static_auth_token=True):
+ output_list = []
+ parsed = urlparse(url)
+ method_url = shorten_url(url)
+ output_list.append("%s %s HTTP/1.1" % (method, method_url))
+ output_list.append("User-Agent: %s" % output_headers['User-Agent'])
+ output_list.append("Host: %s" % parsed.netloc)
+ # static_auth_token option for documentation purposes
+ if static_auth_token:
+ output_token = '87c6033c-9ff6-405f-943e-2deb73f278b7'
+ else:
+ output_token = output_headers['X-Auth-Token']
+ output_list.append("X-Auth-Token: %s" % output_token)
+ output_list.append("Accept: %s" % output_headers['Accept'])
+ print("OUTPUT HEADERS: %s" % output_headers)
+ output_list.append("Content-Type: %s" % output_headers['Content-Type'])
+ output_list.append("")
+ pretty_body = self.format_body(body, content_type)
+ output_list.append("%s" % pretty_body)
+ output_list.append("")
+ return '\n'.join(output_list)
+
+ def output_response(self, resp, body, content_type):
+ output_list = []
+ version = "1.1" if resp.version == 11 else "1.0"
+ lines = [
+ ["HTTP/%s %s %s" % (version, resp.status, resp.reason)],
+ ["Content-Type: %s" % resp['content-type']],
+ ["Content-Length: %s" % resp['content-length']],
+ ["Date: %s" % resp['date']]]
+ new_lines = [x[0] for x in lines]
+ joined_lines = '\n'.join(new_lines)
+ output_list.append(joined_lines)
+ if body:
+ output_list.append("")
+ pretty_body = self.format_body(body, content_type)
+ output_list.append("%s" % pretty_body)
+ output_list.append("")
+ return '\n'.join(output_list)
+
+ def format_body(self, body, content_type):
+ if content_type == 'json':
+ try:
+ if self.conf.replace_dns_hostname:
+ before = r'\"hostname\": \"[a-zA-Z0-9-_\.]*\"'
+ after = '\"hostname\": \"%s\"' % self.conf.replace_dns_hostname
+ body = re.sub(before, after, body)
+ return json.dumps(json.loads(body), sort_keys=True, indent=4)
+ except Exception:
+ return body or ''
+ else:
+ # expected type of body is xml
+ try:
+ if self.conf.replace_dns_hostname:
+ hostname = 'hostname=\"%s\"' % self.conf.replace_dns_hostname,
+ body = re.sub(r'hostname=\"[a-zA-Z0-9-_\.]*\"',
+ hostname, body)
+ return self._indent_xml(body)
+ except Exception as ex:
+ return body if body else ''
+
+
+ def write_request_file(self, name, content_type, url, method,
+ req_headers, request_body):
+ def write_request():
+ return self.output_request(url, req_headers, request_body,
+ content_type, method)
+ if print_req:
+ print("\t%s req url:%s" % (content_type, url))
+ print("\t%s req method:%s" % (content_type, method))
+ print("\t%s req headers:%s" % (content_type, req_headers))
+ print("\t%s req body:%s" % (content_type, request_body))
+ self.write_file(name, content_type, url, method, "request",
+ write_request)
+
+ def write_response_file(self, name, content_type, url, method,
+ resp, resp_content):
+ def write_response():
+ return self.output_response(resp, resp_content, content_type)
+ self.write_file(name, content_type, url, method, "response",
+ write_response)
+ if print_req:
+ print("\t%s resp:%s" % (content_type, resp))
+ print("\t%s resp content:%s" % (content_type, resp_content))
+
+ def write_file(self, name, content_type, url, method, in_or_out, func):
+ filename = "%sdb-%s-%s.%s" % (self.conf.directory,
+ name.replace('_', '-'), in_or_out,
+ content_type)
+ with open(filename, "w") as file:
+ output = func()
+ output = output.replace(self.conf.tenantID, '1234')
+ if self.conf.replace_host:
+ output = output.replace(self.conf.api_url, self.conf.replace_host)
+ pre_host_port = urlparse(self.conf.api_url).netloc
+ post_host = urlparse(self.conf.replace_host).netloc
+ output = output.replace(pre_host_port, post_host)
+ output = output.replace("fake_host", "hostname")
+ output = output.replace("FAKE_", "")
+
+ file.write(output)
+
+
+# This method is mixed into the client class.
+# It requires the following fields: snippet_writer, content_type, and
+# "name," the last of which must be set before each call.
+def write_to_snippet(self, args, kwargs, resp, body):
+ if self.name is None:
+ raise RuntimeError("'name' not set before call.")
+ url = args[0]
+ method = args[1]
+ request_headers = kwargs['headers']
+ request_body = kwargs.get('body', None)
+ response_headers = resp
+ response_body = body
+
+ # Log request
+ self.snippet_writer.write_request_file(self.name, self.content_type,
+ url, method, request_headers, request_body)
+ self.snippet_writer.write_response_file(self.name, self.content_type,
+ url, method, response_headers, response_body)
+
+ # Create a short url to assert against.
+ short_url = url
+ for prefix in (self.snippet_writer.conf.dbaas_url,
+ self.snippet_writer.conf.api_url):
+ if short_url.startswith(prefix):
+ short_url = short_url[len(prefix):]
+ self.old_info = {
+ 'url':shorten_url(short_url),
+ 'method': method,
+ 'request_headers':request_headers,
+ 'request_body':request_body,
+ 'response_headers':response_headers,
+ 'response_body':response_body
+ }
+
+
+class JsonClient(TroveHTTPClient):
+
+ content_type = 'json'
+
+ def http_log(self, *args, **kwargs):
+ return write_to_snippet(self, *args, **kwargs)
+
+
+class XmlClient(TroveXmlClient):
+
+ content_type = 'xml'
+
+ def http_log(self, *args, **kwargs):
+ return write_to_snippet(self, *args, **kwargs)
diff --git a/integration/tests/examples/examples/example_generation.py b/integration/tests/examples/examples/example_generation.py
new file mode 100644
index 00000000..7ca8a02c
--- /dev/null
+++ b/integration/tests/examples/examples/example_generation.py
@@ -0,0 +1,1042 @@
+import httplib2
+import json
+import os
+import re
+import sys
+import time
+from urlparse import urlparse
+import xml.dom.minidom
+
+from proboscis import before_class
+from proboscis import test
+from proboscis import TestProgram
+from proboscis.asserts import *
+from proboscis.asserts import Check
+
+from troveclient.compat import Dbaas
+from troveclient.compat import TroveHTTPClient
+
+
+from client import ConfigFile
+from client import SnippetWriter
+from client import JsonClient
+from client import XmlClient
+
+
+print_req = True
+
+
+class ExampleClient(object):
+
+ def __init__(self, config_file):
+ if not os.path.exists(config_file):
+ raise RuntimeError("Could not find Example CONF at %s." %
+ config_file)
+ file_contents = open(config_file, "r").read()
+ try:
+ config = json.loads(file_contents)
+ except Exception as exception:
+ msg = 'Error loading config file "%s".' % config_file
+ raise RuntimeError(msg, exception)
+
+ self.directory = config.get("directory", None)
+ if not self.directory.endswith('/'):
+ self.directory += '/'
+ print "directory = %s" % self.directory
+ self.api_url = config.get("api_url", None)
+ print "api_url = %s" % self.api_url
+ #auth
+ auth_url = config.get("auth_url", None)
+ print "auth_url = %s" % auth_url
+ username = config.get("username", None)
+ print "username = %s" % username
+ password = config.get("password", None)
+ print "password = %s" % password
+ self.tenant = config.get("tenant", None)
+ self.replace_host = config.get("replace_host", None)
+ print "tenant = %s" % self.tenant
+ self.replace_dns_hostname = config.get("replace_dns_hostname", None)
+ if auth_url:
+ auth_id, tenant_id = self.get_auth_token_id_tenant_id(auth_url,
+ username,
+ password)
+ else:
+ auth_id = self.tenant
+ tenant_id = self.tenant
+
+ print "id = %s" % auth_id
+ self.headers = {
+ 'X-Auth-Token': str(auth_id)
+ }
+ print "tenantID = %s" % tenant_id
+ self.tenantID = tenant_id
+ self.dbaas_url = "%s/v1.0/%s" % (self.api_url, self.tenantID)
+
+ def write_request_file(self, name, content_type, url, method,
+ req_headers, request_body):
+ def write_request():
+ return self.output_request(url, req_headers, request_body,
+ content_type, method)
+ if print_req:
+ print("\t%s req url:%s" % (content_type, url))
+ print("\t%s req method:%s" % (content_type, method))
+ print("\t%s req headers:%s" % (content_type, req_headers))
+ print("\t%s req body:%s" % (content_type, request_body))
+ self.write_file(name, content_type, url, method, write_request)
+
+ def write_response_file(self, name, content_type, url, method,
+ resp, resp_content):
+ def write_response():
+ return self.output_response(resp, resp_content, content_type)
+ self.write_file(name, content_type, url, method, write_response)
+ if print_req:
+ print("\t%s resp:%s" % (content_type, resp))
+ print("\t%s resp content:%s" % (content_type, resp_content))
+
+ def write_file(self, name, content_type, url, method, func):
+ filename = "%sdb-%s-request.%s" % (self.directory, name, content_type)
+ with open(filename, "w") as file:
+ output = func()
+ output = output.replace(self.tenantID, '1234')
+ if self.replace_host:
+ output = output.replace(self.api_url, self.replace_host)
+ pre_host_port = urlparse(self.api_url).netloc
+ post_host = urlparse(self.replace_host).netloc
+ output = output.replace(pre_host_port, post_host)
+
+ file.write(output)
+
+ def version_http_call(self, name, method, json, xml,
+ output=True, print_resp=False):
+ json['url'] = "%s/%s" % (self.api_url, json['url'])
+ xml['url'] = "%s/%s" % (self.api_url, xml['url'])
+ return self.make_request(name, method, json, xml, output, print_resp)
+
+ def http_call(self, name, method, url, json, xml,
+ output=True, print_resp=False):
+ json['url'] = "%s/%s" % (self.dbaas_url, json['url'])
+ xml['url'] = "%s/%s" % (self.dbaas_url, xml['url'])
+ return self.make_request(name, method, json, xml, output, print_resp)
+
+ # print_req and print_resp for debugging purposes
+ def make_request(self, name, method, json, xml,
+ output=True, print_resp=False):
+ name = name.replace('_', '-')
+ print "http call for %s" % name
+ http = httplib2.Http(disable_ssl_certificate_validation=True)
+ req_headers = {'User-Agent': "python-example-client",
+ 'Content-Type': "application/json",
+ 'Accept': "application/json"
+ }
+ req_headers.update(self.headers)
+
+
+ content_type = 'json'
+ request_body = json.get('body', None)
+ url = json.get('url')
+ if output:
+ self.write_request_file(name, 'json', url, method, req_headers,
+ request_body)
+
+ resp, resp_content = http.request(url, method, body=request_body,
+ headers=req_headers)
+ json_resp = resp, resp_content
+ if output:
+ filename = "%sdb-%s-response.%s" % (self.directory, name,
+ content_type)
+ self.write_response_file(name, 'json', url, method, resp,
+ resp_content)
+
+
+ content_type = 'xml'
+ req_headers['Accept'] = 'application/xml'
+ req_headers['Content-Type'] = 'application/xml'
+ request_body = xml.get('body', None)
+ url = xml.get('url')
+ if output:
+ filename = "%sdb-%s-request.%s" % (self.directory, name,
+ content_type)
+ output = self.write_request_file(name, 'xml', url, method,
+ req_headers, request_body)
+ resp, resp_content = http.request(url, method, body=request_body,
+ headers=req_headers)
+ xml_resp = resp, resp_content
+ if output:
+ filename = "%sdb-%s-response.%s" % (self.directory, name,
+ content_type)
+ self.write_response_file(name, 'xml', url, method, resp,
+ resp_content)
+
+
+ return json_resp, xml_resp
+
+ def _indent_xml(self, my_string):
+ my_string = my_string.encode("utf-8")
+ # convert to plain string without indents and spaces
+ my_re = re.compile('>\s+([^\s])', re.DOTALL)
+ my_string = myre.sub('>\g<1>', my_string)
+ my_string = xml.dom.minidom.parseString(my_string).toprettyxml()
+ # remove line breaks
+ my_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL)
+ my_string = my_re.sub('>\g<1></', my_string)
+ return my_string
+
+ def output_request(self, url, output_headers, body, content_type, method,
+ static_auth_token=True):
+ output_list = []
+ parsed = urlparse(url)
+ if parsed.query:
+ method_url = parsed.path + '?' + parsed.query
+ else:
+ method_url = parsed.path
+ output_list.append("%s %s HTTP/1.1" % (method, method_url))
+ output_list.append("User-Agent: %s" % output_headers['User-Agent'])
+ output_list.append("Host: %s" % parsed.netloc)
+ # static_auth_token option for documentation purposes
+ if static_auth_token:
+ output_token = '87c6033c-9ff6-405f-943e-2deb73f278b7'
+ else:
+ output_token = output_headers['X-Auth-Token']
+ output_list.append("X-Auth-Token: %s" % output_token)
+ output_list.append("Accept: %s" % output_headers['Accept'])
+ output_list.append("Content-Type: %s" % output_headers['Content-Type'])
+ output_list.append("")
+ pretty_body = self.format_body(body, content_type)
+ output_list.append("%s" % pretty_body)
+ output_list.append("")
+ return '\n'.join(output_list)
+
+ def output_response(self, resp, body, content_type):
+ output_list = []
+ version = "1.1" if resp.version == 11 else "1.0"
+ lines = [
+ ["HTTP/%s %s %s" % (version, resp.status, resp.reason)],
+ ["Content-Type: %s" % resp['content-type']],
+ ["Content-Length: %s" % resp['content-length']],
+ ["Date: %s" % resp['date']]]
+ new_lines = [x[0] for x in lines]
+ joined_lines = '\n'.join(new_lines)
+ output_list.append(joined_lines)
+ if body:
+ output_list.append("")
+ pretty_body = self.format_body(body, content_type)
+ output_list.append("%s" % pretty_body)
+ output_list.append("")
+ return '\n'.join(output_list)
+
+ def format_body(self, body, content_type):
+ if content_type == 'json':
+ try:
+ if self.replace_dns_hostname:
+ before = r'\"hostname\": \"[a-zA-Z0-9-_\.]*\"'
+ after = '\"hostname\": \"%s\"' % self.replace_dns_hostname
+ body = re.sub(before, after, body)
+ return json.dumps(json.loads(body), sort_keys=True, indent=4)
+ except Exception:
+ return body if body else ''
+ else:
+ # expected type of body is xml
+ try:
+ if self.replace_dns_hostname:
+ hostname = 'hostname=\"%s\"' % self.replace_dns_hostname,
+ body = re.sub(r'hostname=\"[a-zA-Z0-9-_\.]*\"',
+ hostname, body)
+ return self._indent_xml(body)
+ except Exception as ex:
+ return body if body else ''
+
+ def get_auth_token_id_tenant_id(self, url, username, password):
+ body = ('{"auth":{"tenantName": "%s", "passwordCredentials": '
+ '{"username": "%s", "password": "%s"}}}')
+ body = body % (self.tenant, username, password)
+ http = httplib2.Http(disable_ssl_certificate_validation=True)
+ req_headers = {'User-Agent': "python-example-client",
+ 'Content-Type': "application/json",
+ 'Accept': "application/json",
+ }
+ resp, body = http.request(url, 'POST', body=body, headers=req_headers)
+ auth = json.loads(body)
+ auth_id = auth['access']['token']['id']
+ tenant_id = auth['access']['token']['tenant']['id']
+ return auth_id, tenant_id
+
+
+@test
+def load_config_file():
+ global conf
+ print("RUNNING ARGS : " + str(sys.argv))
+ conf = None
+ for arg in sys.argv[1:]:
+ conf_file_path = os.path.expanduser(arg)
+ conf = ConfigFile(conf_file_path)
+ return
+ if not conf:
+ fail("Missing conf file.")
+
+def create_client(cls=TroveHTTPClient):
+ client = Dbaas(conf.username, conf.password, tenant=conf.tenant,
+ auth_url="blah/", auth_strategy='fake',
+ insecure=True, service_type='trove',
+ service_url=conf.dbaas_url, client_cls=cls)
+ return client
+
+class ClientPair(object):
+ """
+ Combines a Json and XML version of the Dbaas client.
+ """
+
+ def __init__(self):
+ snippet_writer = SnippetWriter(conf)
+ def make_client(cls):
+ client = create_client(cls)
+ client.client.name = "auth"
+ client.client.snippet_writer = snippet_writer
+ client.authenticate()
+ return client
+ self.json = make_client(JsonClient)
+ self.xml = make_client(XmlClient)
+ self.clients = [self.json, self.xml]
+
+ def do(self, name, url, method, status, reason, func, func_args=None):
+ """
+ Performs the given function twice, first for the JSON client, then for
+ the XML one, and writes both to their respective files.
+ 'name' is the name of the file, while 'url,' 'method,' 'status,'
+ and 'reason' are expected values that are asserted against.
+ If func_args is present, it is a list of lists, each one of which
+ is passed as the *args to the two invocations of "func".
+ """
+ func_args = func_args or [[], []]
+ snippet_writer = SnippetWriter(conf)
+ results = []
+ for index, client in enumerate(self.clients):
+ client.client.snippet_writer = snippet_writer
+ client.client.name = name
+ args = func_args[index]
+ result = func(client, *args)
+ with Check() as check:
+ if isinstance(url, (list, tuple)):
+ check.equal(client.client.old_info['url'], url[index])
+ else:
+ check.equal(client.client.old_info['url'], url)
+ check.equal(client.client.old_info['method'], method)
+ check.equal(client.client.old_info['response_headers'].status,
+ status)
+ check.equal(client.client.old_info['response_headers'].reason,
+ reason)
+ results.append(result)
+ # To prevent this from writing a snippet somewhere else...
+ client.client.name = "junk"
+
+ return results
+
+
+JSON_INDEX = 0
+XML_INDEX = 1
+
+@test(depends_on=[load_config_file])
+class Versions(object):
+
+ @before_class
+ def setup(self):
+ self.clients = ClientPair()
+
+ @test
+ def get_versions(self):
+ self.clients.do("versions",
+ "", "GET", 200, "OK",
+ lambda client : client.versions.index(conf.api_url))
+
+
+ @test
+ def get_version(self):
+ self.clients.do("versions",
+ "/v1.0", "GET", 200, "OK",
+ lambda client : client.versions.index(conf.api_url + "/v1.0/"))
+
+
+@test(depends_on=[load_config_file])
+class Flavors(object):
+
+ @before_class
+ def setup(self):
+ self.clients = ClientPair()
+
+ @test
+ def get_flavors(self):
+ self.clients.do("flavors",
+ "/flavors", "GET", 200, "OK",
+ lambda client : client.flavors.list())
+
+ @test
+ def get_flavor_by_id(self):
+ self.clients.do("flavors_by_id",
+ "/flavors/1", "GET", 200, "OK",
+ lambda client : client.flavors.get(1))
+
+
+@test(depends_on=[load_config_file])
+def clean_slate():
+ client = create_client()
+ client.client.name = "list"
+ instances = client.instances.list()
+ assert_equal(0, len(instances), "Instance count must be zero.")
+
+
+@test(depends_on=[clean_slate])
+class CreateInstance(object):
+
+ @before_class
+ def setup(self):
+ self.clients = ClientPair()
+
+ @test
+ def post_create_instance(self):
+ def create_instance(client, name):
+ instance = client.instances.create(name, 1, volume={'size':2},
+ databases=[{
+ "name": "sampledb",
+ "character_set": "utf8",
+ "collate": "utf8_general_ci"
+ },{
+ "name": "nextround"
+ }
+ ],
+ users =[{
+ "databases":[{ "name":"sampledb"}],
+ "name":"demouser",
+ "password": "demopassword"
+ }
+ ])
+ assert_equal(instance.status, "BUILD")
+ return instance
+ self.instances = self.clients.do("create_instance",
+ "/instances", "POST", 200, "OK",
+ create_instance,
+ (["json_rack_instance"], ["xml_rack_instance"]))
+ #self.instance_j = create_instance(self.clients.json,
+ # "json_rack_instance")
+ #self.instance_x = create_instance(self.clients.xml,
+ # "xml_rack_instance")
+
+ @test(depends_on=[post_create_instance])
+ def wait_for_instances(self):
+ for instance in self.instances:
+ while instance.status != "ACTIVE":
+ assert_equal(instance.status, "BUILD")
+ instance.get()
+ time.sleep(0.1)
+ global json_instance
+ json_instance = self.instances[0]
+ global xml_instance
+ xml_instance = self.instances[1]
+
+
+@test(depends_on=[CreateInstance], groups=['uses_instances'])
+class Databases(object):
+
+ @before_class
+ def setup(self):
+ self.clients = ClientPair()
+
+ @test
+ def post_create_databases(self):
+ self.clients.do("create_databases",
+ ("/instances/%s/databases" % json_instance.id,
+ "/instances/%s/databases" % xml_instance.id),
+ "POST", 202, "Accepted",
+ lambda client, id : client.databases.create(id, databases=[
+ {
+ "name": "testingdb",
+ "character_set": "utf8",
+ "collate": "utf8_general_ci"
+ },
+ {
+ "name": "anotherdb"
+ },
+ {
+ "name": "oneMoreDB"
+ }
+ ]), ([json_instance.id], [xml_instance.id]))
+
+ @test(depends_on=[post_create_databases])
+ def get_list_databases(self):
+ results = self.clients.do("list_databases",
+ ("/instances/%s/databases" % json_instance.id,
+ "/instances/%s/databases" % xml_instance.id),
+ "GET", 200, "OK",
+ lambda client, id : client.databases.list(id),
+ ([json_instance.id], [xml_instance.id]))
+
+ @test(depends_on=[post_create_databases])
+ def get_list_databases_limit_two(self):
+ results = self.clients.do("list_databases_pagination",
+ ("/instances/%s/databases?limit=1" % json_instance.id,
+ "/instances/%s/databases?limit=2" % xml_instance.id),
+ "GET", 200, "OK",
+ lambda client, id, limit : client.databases.list(id, limit=limit),
+ ([json_instance.id, 1], [xml_instance.id, 2]))
+ assert_equal(1, len(results[JSON_INDEX]))
+ assert_equal(2, len(results[XML_INDEX]))
+ assert_equal("anotherdb", results[JSON_INDEX].next)
+ assert_equal("nextround", results[XML_INDEX].next)
+
+ @test(depends_on=[post_create_databases],
+ runs_after=[get_list_databases, get_list_databases_limit_two])
+ def delete_databases(self):
+ results = self.clients.do("delete_databases",
+ ("/instances/%s/databases/testingdb" % json_instance.id,
+ "/instances/%s/databases/oneMoreDB" % xml_instance.id),
+ "DELETE", 202, "Accepted",
+ lambda client, id, name : client.databases.delete(id, name),
+ ([json_instance.id, 'testingdb'], [xml_instance.id, 'oneMoreDB']))
+
+
+
+@test(depends_on=[CreateInstance], groups=['uses_instances'])
+class Users(object):
+
+ @before_class
+ def setup(self):
+ self.clients = ClientPair()
+
+ @test
+ def post_create_users(self):
+ results = self.clients.do("create_users",
+ ("/instances/%s/users" % json_instance.id,
+ "/instances/%s/users" % xml_instance.id),
+ "POST", 202, "Accepted",
+ lambda client, id : client.users.create(id, [
+ {
+ "name": "dbuser1",
+ "password": "password",
+ "database": "databaseA"
+ },
+ {
+ "name": "dbuser2",
+ "password": "password",
+ "databases": [
+ {
+ "name": "databaseB"
+ },
+ {
+ "name": "databaseC"
+ }
+ ]
+ },
+ {
+ "name": "dbuser3",
+ "password": "password",
+ "database": "databaseD"
+ }
+ ]),
+ ([json_instance.id], [xml_instance.id]))
+
+ @test(depends_on=[post_create_users])
+ def get_list_users(self):
+ results = self.clients.do("list_users",
+ ("/instances/%s/users" % json_instance.id,
+ "/instances/%s/users" % xml_instance.id),
+ "GET", 200, "OK",
+ lambda client, id : client.users.list(id),
+ ([json_instance.id], [xml_instance.id]))
+
+ @test(depends_on=[post_create_users])
+ def get_list_users_limit_two(self):
+ results = self.clients.do("list_users_pagination",
+ ("/instances/%s/users?limit=2" % json_instance.id,
+ "/instances/%s/users?limit=2" % xml_instance.id),
+ "GET", 200, "OK",
+ lambda client, id : client.users.list(id, limit=2),
+ ([json_instance.id], [xml_instance.id]))
+
+ @test(depends_on=[post_create_users],
+ runs_after=[get_list_users, get_list_users_limit_two])
+ def delete_users(self):
+ user_name = "testuser"
+ results = self.clients.do("delete_users",
+ ("/instances/%s/users/%s" % (json_instance.id, user_name),
+ "/instances/%s/users/%s" % (xml_instance.id, user_name)),
+ "DELETE", 202, "Accepted",
+ lambda client, id : client.users.delete(id, user=user_name),
+ ([json_instance.id], [xml_instance.id]))
+
+
+@test(depends_on=[CreateInstance], groups=['uses_instances'])
+class Root(object):
+
+ @before_class
+ def setup(self):
+ self.clients = ClientPair()
+
+ @test
+ def post_enable_root_access(self):
+ results = self.clients.do("enable_root_user",
+ ("/instances/%s/root" % json_instance.id,
+ "/instances/%s/root" % xml_instance.id),
+ "POST", 200, "OK",
+ lambda client, id : client.root.create(id),
+ ([json_instance.id], [xml_instance.id]))
+
+ @test(depends_on=[post_enable_root_access])
+ def get_check_root_access(self):
+ results = self.clients.do("check_root_user",
+ ("/instances/%s/root" % json_instance.id,
+ "/instances/%s/root" % xml_instance.id),
+ "GET", 200, "OK",
+ lambda client, id : client.root.is_root_enabled(id),
+ ([json_instance.id], [xml_instance.id]))
+ assert_equal(results[JSON_INDEX], True)
+ assert_equal(results[XML_INDEX], True)
+
+
+@test(depends_on=[CreateInstance], groups=['uses_instances'])
+class InstanceList(object):
+
+ @before_class
+ def setup(self):
+ self.clients = ClientPair()
+
+ @test
+ def get_list_instance_index(self):
+ results = self.clients.do("instances_index",
+ "/instances", "GET", 200, "OK",
+ lambda client : client.instances.list())
+ for result in results:
+ assert_equal(2, len(result))
+
+ @test
+ def get_instance_details(self):
+ results = self.clients.do("instance_status_detail",
+ ("/instances/%s" % json_instance.id,
+ "/instances/%s" % xml_instance.id),
+ "GET", 200, "OK",
+ lambda client, id : client.instances.get(id),
+ ([json_instance.id], [xml_instance.id]))
+ assert_equal(results[JSON_INDEX].id, json_instance.id)
+ assert_equal(results[XML_INDEX].id, xml_instance.id)
+
+ @test
+ def get_list_instance_index_limit_two(self):
+ third_instance = self.clients.json.instances.create(
+ "The Third Instance", 1, volume={'size':2})
+ while third_instance.status != "ACTIVE":
+ third_instance.get()
+ time.sleep(0.1)
+
+ results = self.clients.do("instances_index_pagination",
+ "/instances?limit=2", "GET", 200, "OK",
+ lambda client : client.instances.list(limit=2))
+ for result in results:
+ assert_equal(2, len(result))
+
+ self.clients.json.instances.delete(third_instance.id)
+
+
+@test(depends_on=[CreateInstance], groups=['uses_instances'])
+class Actions(object):
+
+ @before_class
+ def setup(self):
+ self.clients = ClientPair()
+
+ def _wait_for_active(self, *acceptable_states):
+ for instance in (json_instance, xml_instance):
+ instance.get()
+ print('instance.status=%s' % instance.status)
+ while instance.status != "ACTIVE":
+ assert_true(instance.status in acceptable_states,
+ "Instance status == %s; expected it to be one of these: %s"
+ % (instance.status, acceptable_states))
+ instance.get()
+ time.sleep(0.1)
+
+ @test
+ def instance_restart(self):
+ results = self.clients.do("instance_restart",
+ ("/instances/%s/action" % json_instance.id,
+ "/instances/%s/action" % xml_instance.id),
+ "POST", 202, "Accepted",
+ lambda client, id : client.instances.restart(id),
+ ([json_instance.id], [xml_instance.id]))
+ self._wait_for_active("RESTART")
+
+ @test
+ def instance_resize_volume(self):
+ results = self.clients.do("instance_resize_volume",
+ ("/instances/%s/action" % json_instance.id,
+ "/instances/%s/action" % xml_instance.id),
+ "POST", 202, "Accepted",
+ lambda client, id : client.instances.resize_volume(id, 4),
+ ([json_instance.id], [xml_instance.id]))
+ self._wait_for_active("RESIZE")
+ assert_equal(json_instance.volume['size'], 4)
+ assert_equal(xml_instance.volume['size'], '4')
+
+ @test
+ def instance_resize_flavor(self):
+ results = self.clients.do("instance_resize_flavor",
+ ("/instances/%s/action" % json_instance.id,
+ "/instances/%s/action" % xml_instance.id),
+ "POST", 202, "Accepted",
+ lambda client, id : client.instances.resize_flavor(id, 3),
+ ([json_instance.id], [xml_instance.id]))
+ self._wait_for_active("RESIZE")
+ assert_equal(json_instance.flavor['id'], '3')
+ assert_equal(xml_instance.flavor['id'], '3')
+
+
+@test(depends_on=[CreateInstance], groups=['uses_instances', "MgmtHosts"])
+class MgmtHosts(object):
+
+ @before_class
+ def setup(self):
+ self.clients = ClientPair()
+
+ @test
+ def mgmt_list_hosts(self):
+ results = self.clients.do("mgmt_list_hosts",
+ "/mgmt/hosts", "GET", 200, "OK",
+ lambda client : client.mgmt.hosts.index())
+ with Check() as check:
+ for hosts in results:
+ check.equal(1, len(hosts))
+ check.equal("fake_host", hosts[0].name)
+ check.equal(2, results[0][0].instanceCount)
+ # In XML land this is a string. :'(
+ check.equal("2", results[1][0].instanceCount)
+
+ @test
+ def mgmt_get_host_detail(self):
+ results = self.clients.do("mgmt_get_host_detail",
+ "/mgmt/hosts/fake_host", "GET", 200, "OK",
+ lambda client : client.mgmt.hosts.get("fake_host"))
+ with Check() as check:
+ for host in results:
+ check.equal(results[0].name, "fake_host")
+ check.equal(results[1].name, "fake_host")
+ # XML entries won't come back as these types. :(
+ check.true(isinstance(results[0].percentUsed, int)),
+ check.true(isinstance(results[0].totalRAM, int)),
+ check.true(isinstance(results[0].usedRAM, int)),
+ with Check() as check:
+ for host in results:
+ check.equal(2, len(host.instances))
+ for instance in host.instances:
+ check.equal(instance['status'], 'ACTIVE')
+ check.true(instance['name'] == 'json_rack_instance' or
+ instance['name'] == 'xml_rack_instance')
+ #TODO: Check with GUID regex.
+ check.true(isinstance(instance['id'], basestring))
+ check.true(isinstance(instance['server_id'], basestring))
+ check.true(isinstance(instance['tenant_id'], basestring))
+
+ @test
+ def mgmt_host_update_all(self):
+ results = self.clients.do("mgmt_host_update",
+ "/mgmt/hosts/fake_host/instances/action",
+ "POST", 202, "Accepted",
+ lambda client : client.mgmt.hosts.update_all("fake_host"))
+
+
+@test(depends_on=[CreateInstance], groups=['uses_instances'])
+class MgmtStorage(object):
+
+ @before_class
+ def setup(self):
+ self.clients = ClientPair()
+
+ @test
+ def mgmt_get_storage(self):
+ results = self.clients.do("mgmt_get_storage",
+ "/mgmt/storage", "GET", 200, "OK",
+ lambda client : client.mgmt.storage.index())
+ for index, devices in enumerate(results):
+ with Check() as check:
+ check.equal(1, len(devices))
+ device = devices[0]
+ check.equal(int(device.capacity['available']), 90)
+ check.equal(int(device.capacity['total']), 100)
+ check.equal(device.name, "fake_storage")
+ check.equal(int(device.provision['available']), 40)
+ check.equal(int(device.provision['percent']), 10)
+ check.equal(int(device.provision['total']), 50)
+ check.equal(device.type, "test_type")
+ check.equal(int(device.used), 10)
+ if index == JSON_INDEX:
+ check.true(isinstance(device.capacity['available'], int))
+ check.true(isinstance(device.capacity['total'], int))
+ check.true(isinstance(device.provision['available'], int))
+ check.true(isinstance(device.provision['percent'], int))
+ check.true(isinstance(device.provision['total'], int))
+ check.true(isinstance(device.used, int))
+
+
+@test(depends_on=[CreateInstance], groups=['uses_instances'])
+class MgmtAccount(object):
+
+ @before_class
+ def setup(self):
+ self.clients = ClientPair()
+
+ @test
+ def mgmt_get_account_details(self):
+ results = self.clients.do("mgmt_get_account_details",
+ "/mgmt/accounts/admin", "GET", 200, "OK",
+ lambda client : client.mgmt.accounts.show("admin"))
+ with Check() as check:
+ for account_info in results:
+ check.equal(2, len(account_info.instances))
+ check.equal('admin', account_info.id)
+
+ @test
+ def mgmt_get_account_list(self):
+ results = self.clients.do("mgmt_list_accounts",
+ "/mgmt/accounts", "GET", 200, "OK",
+ lambda client : client.mgmt.accounts.index())
+ for index, result in enumerate(results):
+ for account in result.accounts:
+ assert_equal('admin', account['id'])
+ if index == JSON_INDEX:
+ assert_equal(2, account['num_instances'])
+ else:
+ assert_equal("2", account['num_instances'])
+
+
+def for_both(func):
+ def both(self):
+ for result in self.results:
+ func(self, result)
+ return both
+
+
+@test(depends_on=[CreateInstance], groups=['uses_instances'])
+class MgmtInstance(object):
+
+ @before_class
+ def mgmt_get_instance_details(self):
+ self.clients = ClientPair()
+ self.results = self.clients.do("mgmt_get_instance_details",
+ ("/mgmt/instances/%s" % json_instance.id,
+ "/mgmt/instances/%s" % xml_instance.id),
+ "GET", 200, "OK",
+ lambda client, id : client.mgmt.instances.show(id),
+ ([json_instance.id], [xml_instance.id]))
+
+ @test
+ @for_both
+ def created(self, result):
+ #TODO: use regex
+ assert_true(isinstance(result.created, basestring))
+
+ @test
+ def deleted(self):
+ assert_equal(self.results[JSON_INDEX].deleted, False)
+ assert_equal(self.results[XML_INDEX].deleted, "False")
+
+ @test
+ @for_both
+ def flavor(self, result):
+ assert_true(result.flavor['id'] == "1" or result.flavor['id'] == "3")
+ assert_equal(len(result.flavor['links']), 2)
+ #TODO: validate the flavors format.
+
+ @test
+ @for_both
+ def guest_status(self, result):
+ assert_equal(result.guest_status['state_description'], 'running')
+
+ @test
+ @for_both
+ def host(self, result):
+ assert_equal(result.host, 'fake_host')
+
+ @test
+ def id(self):
+ assert_equal(self.results[JSON_INDEX].id, json_instance.id)
+ assert_equal(self.results[XML_INDEX].id, xml_instance.id)
+
+ @test
+ @for_both
+ def links(self, result):
+ assert_true(isinstance(result.links, list))
+ for link in result.links:
+ assert_true(isinstance(link, dict))
+ assert_true(isinstance(link['href'], basestring))
+ assert_true(isinstance(link['rel'], basestring))
+
+ @test
+ def local_id(self):
+ #TODO: regex
+ assert_true(isinstance(self.results[JSON_INDEX].local_id, int))
+ assert_true(isinstance(self.results[XML_INDEX].local_id, basestring))
+
+ @test
+ @for_both
+ def name(self, result):
+ #TODO: regex
+ assert_true(isinstance(result.name, basestring))
+
+ @test
+ @for_both
+ def server_id(self, result):
+ #TODO: regex
+ assert_true(isinstance(result.server_id, basestring))
+
+ @test
+ @for_both
+ def status(self, result):
+ #TODO: regex
+ assert_equal("ACTIVE", result.status)
+
+ @test
+ @for_both
+ def task_description(self, result):
+ assert_equal(result.task_description, "No tasks for the instance.")
+
+ @test
+ @for_both
+ def tenant_id(self, result):
+ assert_equal(result.tenant_id, "admin")
+
+ @test
+ @for_both
+ def updated(self, result):
+ #TODO: regex
+ assert_true(isinstance(result.updated, basestring))
+
+ @test
+ @for_both
+ def volume(self, result):
+ #TODO: regex
+ assert_true(isinstance(result.volume, dict))
+ assert_true('id' in result.volume)
+ assert_true('size' in result.volume)
+
+
+@test(depends_on=[CreateInstance], groups=['uses_instances'])
+class MgmtInstanceIndex(object):
+
+ @before_class
+ def mgmt_get_instance_details(self):
+ self.clients = ClientPair()
+
+ @test
+ def mgmt_instance_index(self, deleted=False):
+ url = "/mgmt/instances?deleted=false"
+ results = self.clients.do("mgmt_instance_index",
+ "/mgmt/instances?deleted=false", "GET", 200, "OK",
+ lambda client : client.mgmt.instances.index(deleted=False))
+ #TODO: Valdiate everything... *sigh*
+
+
+@test(depends_on=[CreateInstance], groups=['uses_instances'])
+class MgmtInstanceDiagnostics(object):
+
+ @before_class
+ def mgmt_get_instance_details(self):
+ self.clients = ClientPair()
+
+ @test
+ def mgmt_get_instance_diagnostics(self):
+ results = self.clients.do("mgmt_instance_diagnostics",
+ ("/mgmt/instances/%s/diagnostics" % json_instance.id,
+ "/mgmt/instances/%s/diagnostics" % xml_instance.id),
+ "GET", 200, "OK",
+ lambda client, id: client.diagnostics.get(id),
+ ([json_instance.id], [xml_instance.id]))
+ #TODO: validate the actual stuff that comes back (booorring!).
+
+
+@test(depends_on=[CreateInstance])
+class MgmtInstanceRoot(object):
+
+ @before_class
+ def mgmt_get_instance_details(self):
+ self.clients = ClientPair()
+
+ @test
+ def mgmt_get_root_details(self):
+ results = self.clients.do("mgmt_get_root_details",
+ ("/mgmt/instances/%s/root" % json_instance.id,
+ "/mgmt/instances/%s/root" % xml_instance.id),
+ "GET", 200, "OK",
+ lambda client, id: client.mgmt.instances.root_enabled_history(id),
+ ([json_instance.id], [xml_instance.id]))
+ #TODO: validate the actual stuff that comes back (booorring!).
+
+
+@test(depends_on=[CreateInstance])
+class MgmtInstanceHWInfo(object):
+
+ @before_class
+ def mgmt_get_instance_details(self):
+ self.clients = ClientPair()
+
+ @test
+ def mgmt_get_hw_info(self):
+ results = self.clients.do("mgmt_get_hw_info",
+ ("/mgmt/instances/%s/hwinfo" % json_instance.id,
+ "/mgmt/instances/%s/hwinfo" % xml_instance.id),
+ "GET", 200, "OK",
+ lambda client, id: client.hw_info.get(id),
+ ([json_instance.id], [xml_instance.id]))
+
+
+@test(depends_on=[CreateInstance], groups=['uses_instances'])
+class MgmtInstanceReboot(object):
+
+ @before_class
+ def mgmt_get_instance_details(self):
+ self.clients = ClientPair()
+
+ @test
+ def mgmt_instance_reboot(self):
+ results = self.clients.do("instance_reboot",
+ ("/mgmt/instances/%s/action" % json_instance.id,
+ "/mgmt/instances/%s/action" % xml_instance.id),
+ "POST", 202, "Accepted",
+ lambda client, id: client.mgmt.instances.reboot(id),
+ ([json_instance.id], [xml_instance.id]))
+
+
+@test(depends_on=[CreateInstance],
+ groups=['uses_instances'], enabled=False)
+class MgmtInstanceGuestUpdate(object):
+
+ @before_class
+ def mgmt_get_instance_details(self):
+ self.clients = ClientPair()
+
+ @test
+ def mgmt_instance_guest_update(self):
+ results = self.clients.do("guest_update",
+ ("/mgmt/instances/%s/action" % json_instance.id,
+ "/mgmt/instances/%s/action" % xml_instance.id),
+ "POST", 202, "Accepted",
+ lambda client, id: client.mgmt.instances.update(id),
+ ([json_instance.id], [xml_instance.id]))
+
+
+@test(depends_on=[CreateInstance], runs_after_groups=['uses_instances'])
+class ZzzDeleteInstance(object):
+
+ @before_class
+ def mgmt_get_instance_details(self):
+ self.clients = ClientPair()
+
+ @test
+ def zzz_delete_instance(self):
+ results = self.clients.do("delete_instance",
+ ("/instances/%s" % json_instance.id,
+ "/instances/%s" % xml_instance.id),
+ "DELETE", 202, "Accepted",
+ lambda client, id: client.instances.delete(id),
+ ([json_instance.id], [xml_instance.id]))
+ for result in json_instance, xml_instance:
+ result.get()
+ assert_equal(result.status, "SHUTDOWN")
+
+
+if __name__ == "__main__":
+ TestProgram().run_and_exit()
diff --git a/integration/tests/examples/examples/local.conf b/integration/tests/examples/examples/local.conf
new file mode 100644
index 00000000..682200da
--- /dev/null
+++ b/integration/tests/examples/examples/local.conf
@@ -0,0 +1,10 @@
+{
+ "directory": "/src/apidocs/src/resources/samples/",
+ "auth_url":"http://localhost:35357/v2.0/tokens",
+ "api_url":"http://localhost:8779",
+ "replace_host":"https://ord.databases.api.rackspacecloud.com",
+ "replace_dns_hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.rackspaceclouddb.com",
+ "username":"examples",
+ "password":"examples",
+ "tenant":"trove"
+}
diff --git a/integration/tests/examples/gendoc.sh b/integration/tests/examples/gendoc.sh
new file mode 100755
index 00000000..01b23271
--- /dev/null
+++ b/integration/tests/examples/gendoc.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+pushd ../../apidocs
+mvn clean
+mvn generate-sources
+
+popd
+
diff --git a/integration/tests/examples/local.conf b/integration/tests/examples/local.conf
new file mode 100644
index 00000000..adaaecd4
--- /dev/null
+++ b/integration/tests/examples/local.conf
@@ -0,0 +1,10 @@
+{
+ "directory": "output",
+ "auth_url":null,
+ "api_url":"http://localhost:8779",
+ "replace_host":"https://ord.databases.api.rackspacecloud.com",
+ "replace_dns_hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.rackspaceclouddb.com",
+ "username":"examples",
+ "password":"examples",
+ "tenant":"admin"
+}
diff --git a/integration/tests/examples/setup.py b/integration/tests/examples/setup.py
new file mode 100644
index 00000000..a7221505
--- /dev/null
+++ b/integration/tests/examples/setup.py
@@ -0,0 +1,30 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from setuptools import setup
+
+
+def read(fname):
+ return open(os.path.join(os.path.dirname(__file__), fname)).read()
+
+
+setup(
+ name="Trove Example Generator",
+ version="0.0.9.9",
+ author='OpenStack',
+ description="Generates documentation examples.",
+ license='Apache',
+ py_modules=[],
+ packages=['examples'],
+ scripts=[]
+)
diff --git a/integration/tests/examples/tox.ini b/integration/tests/examples/tox.ini
new file mode 100644
index 00000000..19953428
--- /dev/null
+++ b/integration/tests/examples/tox.ini
@@ -0,0 +1,16 @@
+# Examples:
+# Run tests against Trove running locally in fake mode:
+# tox -e local -- --group=blackbox
+[tox]
+envlist = py27
+
+[testenv]
+deps =
+ lxml==2.3
+ nose
+ proboscis
+ {env:TROVE_CLIENT_PATH}
+
+[testenv:py27]
+commands =
+ {envpython} {toxinidir}/examples/example_generation.py {toxinidir}/local.conf {posargs:DEFAULTS}
diff --git a/integration/tests/integration/core.test.conf b/integration/tests/integration/core.test.conf
new file mode 100644
index 00000000..2ba05bef
--- /dev/null
+++ b/integration/tests/integration/core.test.conf
@@ -0,0 +1,48 @@
+{
+ "report_directory":"rdli-test-report",
+ "start_services": false,
+
+
+ "white_box":false,
+ "test_mgmt":false,
+ "use_local_ovz":false,
+ "use_venv":false,
+ "glance_code_root":"/opt/stack/glance",
+ "glance_api_conf":"/vagrant/conf/glance-api.conf",
+ "glance_reg_conf":"/vagrant/conf/glance-reg.conf",
+ "glance_images_directory": "/glance_images",
+ "glance_image": "fakey_fakerson.tar.gz",
+ "instance_flavor_name":"m1.rd-tiny",
+ "instance_bigger_flavor_name":"m1.rd-smaller",
+ "nova_code_root":"/opt/stack/nova",
+ "nova_conf":"/home/vagrant/nova.conf",
+ "keystone_code_root":"/opt/stack/keystone",
+ "keystone_conf":"/etc/keystone/keystone.conf",
+ "keystone_use_combined":true,
+ "trove_code_root":"/opt/stack/trove",
+ "trove_conf":"/tmp/trove.conf",
+ "trove_version":"v1.0",
+ "trove_api_updated":"2012-08-01T00:00:00Z",
+ "trove_must_have_volume":false,
+ "trove_can_have_volume":true,
+ "trove_main_instance_has_volume": true,
+ "trove_max_accepted_volume_size": 1000,
+ "trove_max_instances_per_user": 55,
+ "trove_max_volumes_per_user": 100,
+ "use_nova_volume": false,
+ "use_reaper":false,
+"root_removed_from_instance_api": true,
+ "root_timestamp_disabled": false,
+ "openvz_disabled": false,
+ "management_api_disabled": true,
+ "dbaas_image": 1,
+ "dns_driver":"trove.dns.rsdns.driver.RsDnsDriver",
+ "dns_instance_entry_factory":"trove.dns.rsdns.driver.RsDnsInstanceEntryFactory",
+ "databases_page_size": 20,
+ "instances_page_size": 20,
+ "users_page_size": 20,
+ "rabbit_runs_locally":false,
+
+"dns_instance_entry_factory":"trove.dns.rsdns.driver.RsDnsInstanceEntryFactory",
+ "sentinel": null
+}
diff --git a/integration/tests/integration/int_tests.py b/integration/tests/integration/int_tests.py
new file mode 100644
index 00000000..e8f00fc4
--- /dev/null
+++ b/integration/tests/integration/int_tests.py
@@ -0,0 +1,263 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# # Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""Runs the tests.
+
+There are a few initialization issues to deal with.
+The first is flags, which must be initialized before any imports. The test
+configuration has the same problem (it was based on flags back when the tests
+resided outside of the Nova code).
+
+The command line is picked apart so that Nose won't see commands it isn't
+compatible with, such as "--flagfile" or "--group".
+
+This script imports all other tests to make them known to Proboscis before
+passing control to proboscis.TestProgram which itself calls nose, which then
+call unittest.TestProgram and exits.
+
+If "repl" is a command line argument, then the original stdout and stderr is
+saved and sys.exit is neutralized so that unittest.TestProgram will not exit
+and instead sys.stdout and stderr are restored so that interactive mode can
+be used.
+
+"""
+
+
+from __future__ import absolute_import
+import atexit
+import gettext
+import logging
+import os
+import time
+import unittest
+import sys
+import proboscis
+
+from nose import config
+from nose import core
+
+from tests.colorizer import NovaTestRunner
+
+
+if os.environ.get("PYDEV_DEBUG", "False") == 'True':
+ from pydev import pydevd
+ pydevd.settrace('10.0.2.2', port=7864, stdoutToServer=True,
+ stderrToServer=True)
+
+
+def add_support_for_localization():
+ """Adds support for localization in the logging.
+
+ If ../nova/__init__.py exists, add ../ to Python search path, so that
+ it will override what happens to be installed in
+ /usr/(local/)lib/python...
+
+ """
+ path = os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)
+ possible_topdir = os.path.normpath(path)
+ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+ gettext.install('nova', unicode=1)
+
+
+MAIN_RUNNER = None
+
+
+def initialize_rdl_config(config_file):
+ from trove.common import cfg
+ from oslo_log import log
+ from trove.db import get_db_api
+ conf = cfg.CONF
+ cfg.parse_args(['int_tests'], default_config_files=[config_file])
+ log.setup(conf, None)
+ try:
+ get_db_api().configure_db(conf)
+ conf_file = conf.find_file(conf.api_paste_config)
+ except RuntimeError as error:
+ import traceback
+ print(traceback.format_exc())
+ sys.exit("ERROR: %s" % error)
+
+
+def _clean_up():
+ """Shuts down any services this program has started and shows results."""
+ from tests.util import report
+ report.update()
+ if MAIN_RUNNER is not None:
+ MAIN_RUNNER.on_exit()
+ from tests.util.services import get_running_services
+ for service in get_running_services():
+ sys.stderr.write("Stopping service ")
+ for c in service.cmd:
+ sys.stderr.write(c + " ")
+ sys.stderr.write("...\n\r")
+ service.stop()
+
+
+def import_tests():
+
+ # TODO(tim.simpson): Import these again once white box test functionality
+ # is restored.
+ # from tests.dns import check_domain
+ # from tests.dns import concurrency
+ # from tests.dns import conversion
+
+ # The DNS stuff is problematic. Not loading the other tests allow us to
+ # run its functional tests only.
+ ADD_DOMAINS = os.environ.get("ADD_DOMAINS", "False") == 'True'
+ if not ADD_DOMAINS:
+ from tests.api import delete_all
+ from tests.api import instances_pagination
+ from tests.api import instances_quotas
+ from tests.api import instances_states
+ from tests.dns import dns
+ from tests import initialize
+ from tests.smoke import instance
+ from tests.volumes import driver
+
+ # Groups that exist as core int-tests are registered from the
+ # trove.tests.int_tests module
+ from trove.tests import int_tests
+
+ # Groups defined in trove-integration, or any other externally
+ # defined groups can be registered here
+ heavy_black_box_groups = [
+ "dbaas.api.instances.pagination",
+ "dbaas.api.instances.delete",
+ "dbaas.api.instances.status",
+ "dbaas.api.instances.down",
+ "dbaas.api.mgmt.hosts.update",
+ "fake.dbaas.api.mgmt.instances",
+ "fake.dbaas.api.mgmt.accounts.broken",
+ "fake.dbaas.api.mgmt.allaccounts"
+ ]
+ proboscis.register(groups=["heavy_blackbox"],
+ depends_on_groups=heavy_black_box_groups)
+
+
+def run_main(test_importer):
+
+ add_support_for_localization()
+
+ # Strip non-nose arguments out before passing this to nosetests
+
+ repl = False
+ nose_args = []
+ conf_file = "~/test.conf"
+ show_elapsed = True
+ groups = []
+ print("RUNNING TEST ARGS : " + str(sys.argv))
+ extra_test_conf_lines = []
+ rdl_config_file = None
+ nova_flag_file = None
+ index = 0
+ while index < len(sys.argv):
+ arg = sys.argv[index]
+ if arg[:2] == "-i" or arg == '--repl':
+ repl = True
+ elif arg[:7] == "--conf=":
+ conf_file = os.path.expanduser(arg[7:])
+ print("Setting TEST_CONF to " + conf_file)
+ os.environ["TEST_CONF"] = conf_file
+ elif arg[:8] == "--group=":
+ groups.append(arg[8:])
+ elif arg == "--test-config":
+ if index >= len(sys.argv) - 1:
+ print('Expected an argument to follow "--test-conf".')
+ sys.exit()
+ conf_line = sys.argv[index + 1]
+ extra_test_conf_lines.append(conf_line)
+ elif arg[:11] == "--flagfile=":
+ pass
+ elif arg[:14] == "--config-file=":
+ rdl_config_file = arg[14:]
+ elif arg[:13] == "--nova-flags=":
+ nova_flag_file = arg[13:]
+ elif arg.startswith('--hide-elapsed'):
+ show_elapsed = False
+ else:
+ nose_args.append(arg)
+ index += 1
+
+ # Many of the test decorators depend on configuration values, so before
+ # start importing modules we have to load the test config followed by the
+ # flag files.
+ from trove.tests.config import CONFIG
+
+ # Find config file.
+ if not "TEST_CONF" in os.environ:
+ raise RuntimeError("Please define an environment variable named " +
+ "TEST_CONF with the location to a conf file.")
+ file_path = os.path.expanduser(os.environ["TEST_CONF"])
+ if not os.path.exists(file_path):
+ raise RuntimeError("Could not find TEST_CONF at " + file_path + ".")
+ # Load config file and then any lines we read from the arguments.
+ CONFIG.load_from_file(file_path)
+ for line in extra_test_conf_lines:
+ CONFIG.load_from_line(line)
+
+ if CONFIG.white_box: # If white-box testing, set up the flags.
+ # Handle loading up RDL's config file madness.
+ initialize_rdl_config(rdl_config_file)
+
+ # Set up the report, and print out how we're running the tests.
+ from tests.util import report
+ from datetime import datetime
+ report.log("Trove Integration Tests, %s" % datetime.now())
+ report.log("Invoked via command: " + str(sys.argv))
+ report.log("Groups = " + str(groups))
+ report.log("Test conf file = %s" % os.environ["TEST_CONF"])
+ if CONFIG.white_box:
+ report.log("")
+ report.log("Test config file = %s" % rdl_config_file)
+ report.log("")
+ report.log("sys.path:")
+ for path in sys.path:
+ report.log("\t%s" % path)
+
+ # Now that all configurations are loaded its time to import everything
+ test_importer()
+
+ atexit.register(_clean_up)
+
+ c = config.Config(stream=sys.stdout,
+ env=os.environ,
+ verbosity=3,
+ plugins=core.DefaultPluginManager())
+ runner = NovaTestRunner(stream=c.stream,
+ verbosity=c.verbosity,
+ config=c,
+ show_elapsed=show_elapsed,
+ known_bugs=CONFIG.known_bugs)
+ MAIN_RUNNER = runner
+
+ if repl:
+ # Turn off the following "feature" of the unittest module in case
+ # we want to start a REPL.
+ sys.exit = lambda x: None
+
+ proboscis.TestProgram(argv=nose_args, groups=groups, config=c,
+ testRunner=MAIN_RUNNER).run_and_exit()
+ sys.stdout = sys.__stdout__
+ sys.stderr = sys.__stderr__
+
+
+if __name__ == "__main__":
+ run_main(import_tests)
diff --git a/integration/tests/integration/localhost.test.conf b/integration/tests/integration/localhost.test.conf
new file mode 100644
index 00000000..6459935c
--- /dev/null
+++ b/integration/tests/integration/localhost.test.conf
@@ -0,0 +1,95 @@
+{
+ "include-files":["core.test.conf"],
+
+ "fake_mode": true,
+ "dbaas_url":"http://localhost:8779/v1.0",
+ "version_url":"http://localhost:8779",
+ "nova_auth_url":"http://localhost:8779/v1.0/auth",
+ "trove_auth_url":"http://localhost:8779/v1.0/auth",
+ "trove_client_insecure":false,
+ "auth_strategy":"fake",
+
+ "trove_version":"v1.0",
+ "trove_api_updated":"2012-08-01T00:00:00Z",
+
+ "trove_dns_support":false,
+ "trove_ip_support":false,
+
+ "nova_client": null,
+
+
+ "users": [
+ {
+ "auth_user":"admin",
+ "auth_key":"password",
+ "tenant":"admin-1000",
+ "requirements": {
+ "is_admin":true,
+ "services": ["trove"]
+ }
+ },
+ {
+ "auth_user":"jsmith",
+ "auth_key":"password",
+ "tenant":"2500",
+ "requirements": {
+ "is_admin":false,
+ "services": ["trove"]
+ }
+ },
+ {
+ "auth_user":"hub_cap",
+ "auth_key":"password",
+ "tenant":"3000",
+ "requirements": {
+ "is_admin":false,
+ "services": ["trove"]
+ }
+ }
+ ],
+
+ "flavors": [
+ {
+ "id": 1,
+ "name": "m1.tiny",
+ "ram": 512
+ },
+ {
+ "id": 2,
+ "name": "m1.small",
+ "ram": 2048
+ },
+ {
+ "id": 3,
+ "name": "m1.medium",
+ "ram": 4096
+ },
+ {
+ "id": 4,
+ "name": "m1.large",
+ "ram": 8192
+ },
+ {
+ "id": 5,
+ "name": "m1.xlarge",
+ "ram": 16384
+ },
+ {
+ "id": 6,
+ "name": "tinier",
+ "ram": 506
+ },
+ {
+ "id": 7,
+ "name": "m1.rd-tiny",
+ "ram": 512
+ },
+ {
+ "id": 8,
+ "name": "m1.rd-smaller",
+ "ram": 768
+ }
+
+ ],
+ "sentinel": null
+}
diff --git a/integration/tests/integration/run_local.sh b/integration/tests/integration/run_local.sh
new file mode 100755
index 00000000..83d313dc
--- /dev/null
+++ b/integration/tests/integration/run_local.sh
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+# Specify the path to the Trove repo as argument one.
+# This script will create a .pid file and report in the current directory.
+
+set -e
+if [ $# -lt 1 ]; then
+ echo "Please give the path to the Trove repo as argument one."
+ exit 5
+else
+ TROVE_PATH=$1
+fi
+if [ $# -lt 2 ]; then
+ echo "Please give the path to the Trove Client as argument two."
+ exit 5
+else
+ TROVECLIENT_PATH=$2
+fi
+shift;
+shift;
+
+
+PID_FILE="`pwd`.pid"
+
+function start_server() {
+ pushd $TROVE_PATH
+ bin/start_server.sh --pid_file=$PID_FILE
+ popd
+}
+
+function stop_server() {
+ if [ -f $PID_FILE ];
+ then
+ pushd $TROVE_PATH
+ bin/stop_server.sh $PID_FILE
+ popd
+ else
+ echo "The pid file did not exist, so not stopping server."
+ fi
+}
+function on_error() {
+ echo "Something went wrong!"
+ stop_server
+}
+
+trap on_error EXIT # Proceed to trap - END in event of failure.
+
+TROVE_CLIENT_PATH=$TROVECLIENT_PATH tox -e py26
+start_server
+.tox/py26/bin/pip install -U $TROVECLIENT_PATH
+PYTHONPATH=$PYTHONPATH:$TROVECLIENT_PATH .tox/py26/bin/python int_tests.py \
+ --conf=localhost.test.conf -- $@
+stop_server
+
+
+trap - EXIT
+echo "Ran tests successfully. :)"
+exit 0
diff --git a/integration/tests/integration/setup.py b/integration/tests/integration/setup.py
new file mode 100644
index 00000000..52216f61
--- /dev/null
+++ b/integration/tests/integration/setup.py
@@ -0,0 +1,30 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from setuptools import setup
+
+
+def read(fname):
+ return open(os.path.join(os.path.dirname(__file__), fname)).read()
+
+
+setup(
+ name="Trove Integration Tests",
+ version="0.0.9.9",
+ author='OpenStack',
+ description="Runs integration tests on Ridley.",
+ license='Apache',
+ py_modules=[],
+ packages=['tests'],
+ scripts=[]
+)
diff --git a/integration/tests/integration/tests/README b/integration/tests/integration/tests/README
new file mode 100644
index 00000000..05e1db67
--- /dev/null
+++ b/integration/tests/integration/tests/README
@@ -0,0 +1 @@
+Integration tests.
diff --git a/integration/tests/integration/tests/__init__.py b/integration/tests/integration/tests/__init__.py
new file mode 100644
index 00000000..65f633d2
--- /dev/null
+++ b/integration/tests/integration/tests/__init__.py
@@ -0,0 +1,27 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`tests` -- Integration / Functional Tests for Nova
+===================================
+
+.. automodule:: tests
+ :platform: Unix
+ :synopsis: Tests for Nova.
+.. moduleauthor:: Nirmal Ranganathan <nirmal.ranganathan@rackspace.com>
+.. moduleauthor:: Tim Simpson <tim.simpson@rackspace.com>
+"""
diff --git a/integration/tests/integration/tests/api/__init__.py b/integration/tests/integration/tests/api/__init__.py
new file mode 100644
index 00000000..40d014dd
--- /dev/null
+++ b/integration/tests/integration/tests/api/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/integration/tests/integration/tests/api/delete_all.py b/integration/tests/integration/tests/api/delete_all.py
new file mode 100644
index 00000000..98c67aba
--- /dev/null
+++ b/integration/tests/integration/tests/api/delete_all.py
@@ -0,0 +1,32 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from proboscis import test
+
+from trove.tests.config import CONFIG
+from trove.tests.util import create_dbaas_client
+from trove.tests.util.users import Requirements
+
+GROUP = "dbaas.api.instances.delete"
+
+
+@test(groups=[GROUP])
+def delete_all():
+ """Delete every single one."""
+ user = CONFIG.users.find_user(Requirements(is_admin=False))
+ dbaas = create_dbaas_client(user)
+ instances = dbaas.instances.list()
+ for instance in instances:
+ instance.delete()
diff --git a/integration/tests/integration/tests/api/instances_pagination.py b/integration/tests/integration/tests/api/instances_pagination.py
new file mode 100644
index 00000000..a21aadd7
--- /dev/null
+++ b/integration/tests/integration/tests/api/instances_pagination.py
@@ -0,0 +1,219 @@
+
+from proboscis import after_class
+from proboscis import before_class
+from proboscis import test
+from proboscis.asserts import assert_equal
+from proboscis.asserts import assert_is_not
+from proboscis.asserts import assert_is_none
+from proboscis.asserts import assert_true
+
+
+from troveclient.compat import exceptions
+from trove.tests.config import CONFIG
+from trove.tests.util import create_dbaas_client
+from trove.tests.util.users import Requirements
+
+
+class TestBase(object):
+
+ def set_up(self):
+ """Create a ton of instances."""
+ reqs = Requirements(is_admin=False)
+ self.user = CONFIG.users.find_user(reqs)
+ self.dbaas = create_dbaas_client(self.user)
+
+ def delete_instances(self):
+ chunk = 0
+ while True:
+ chunk += 1
+ attempts = 0
+ instances = self.dbaas.instances.list()
+ if len(instances) == 0:
+ break
+ # Sit around and try to delete this chunk.
+ while True:
+ instance_results = []
+ attempts += 1
+ deleted_count = 0
+ for instance in instances:
+ try:
+ instance.delete()
+ result = "[w]"
+ except exceptions.UnprocessableEntity:
+ result = "[W]"
+ except exceptions.NotFound:
+ result = "[O]"
+ deleted_count += 1
+ except Exception:
+ result = "[X]"
+ instance_results.append(result)
+ print("Chunk %d, attempt %d : %s"
+ % (chunk, attempts, ",".join(instance_results)))
+ if deleted_count == len(instances):
+ break
+
+ def create_instances(self):
+ self.ids = []
+ for index in range(self.max):
+ name = "multi-%03d" % index
+ result = self.dbaas.instances.create(name, 1,
+ {'size': 1}, [], [])
+ self.ids.append(result.id)
+ # Sort the list of IDs in order, so we can confirm the lists pagination
+ # returns is also sorted correctly.
+ self.ids.sort()
+
+ @staticmethod
+ def assert_instances_sorted_by_ids(instances):
+ # Assert that the strings are always increasing.
+ last_id = ""
+ for instance in instances:
+ assert_true(last_id < instance.id)
+
+ def print_list(self, instances):
+ print("Length = %d" % len(instances))
+ print(",".join([instance.id for instance in instances]))
+
+ def test_pagination(self, requested_limit, requested_marker,
+ expected_length, expected_marker, expected_last_item):
+ instances = self.dbaas.instances.list(limit=requested_limit,
+ marker=requested_marker)
+ marker = instances.next
+
+ self.print_list(instances)
+
+ # Better get as many as we asked for.
+ assert_equal(len(instances), expected_length)
+ # The last one should be roughly this one in the list.
+ assert_equal(instances[-1].id, expected_last_item)
+ # Because limit < count, the marker must be something.
+ if expected_marker:
+ assert_is_not(marker, None)
+ assert_equal(marker, expected_marker)
+ else:
+ assert_is_none(marker)
+ self.assert_instances_sorted_by_ids(instances)
+
+
+@test(runs_after_groups=["dbaas.guest.shutdown"],
+ groups=['dbaas.api.instances.pagination'])
+class SimpleCreateAndDestroy(TestBase):
+ """
+ It turns out a big part of guaranteeing pagination works is to make sure
+ we can create a big batch of instances and delete them without problems.
+ Even in fake mode though its worth it to check this is the case.
+ """
+
+ max = 5
+
+ @before_class
+ def set_up(self):
+ """Create a ton of instances."""
+ super(SimpleCreateAndDestroy, self).set_up()
+ self.delete_instances()
+
+ @test
+ def spin_up(self):
+ self.create_instances()
+
+ @after_class(always_run=True)
+ def tear_down(self):
+ self.delete_instances()
+
+
+@test(runs_after_groups=["dbaas.guest.shutdown"],
+ groups=['dbaas.api.instances.pagination'])
+class InstancePagination50(TestBase):
+
+ max = 50
+
+ @before_class
+ def set_up(self):
+ """Create a ton of instances."""
+ super(InstancePagination50, self).set_up()
+ self.delete_instances()
+ self.create_instances()
+
+ @after_class(always_run=True)
+ def tear_down(self):
+ """Tear down all instances."""
+ self.delete_instances()
+
+ @test
+ def pagination_short(self):
+ self.test_pagination(requested_limit=10, requested_marker=None,
+ expected_length=10, expected_marker=self.ids[9],
+ expected_last_item=self.ids[9])
+
+ @test
+ def pagination_default(self):
+ self.test_pagination(requested_limit=None, requested_marker=None,
+ expected_length=20, expected_marker=self.ids[19],
+ expected_last_item=self.ids[19])
+
+ @test
+ def pagination_full(self):
+ self.test_pagination(requested_limit=50, requested_marker=None,
+ expected_length=20, expected_marker=self.ids[19],
+ expected_last_item=self.ids[19])
+
+
+@test(runs_after_groups=["dbaas.guest.shutdown"],
+ groups=['dbaas.api.instances.pagination'])
+class InstancePagination20(TestBase):
+
+ max = 20
+
+ @before_class
+ def set_up(self):
+ """Create a ton of instances."""
+ super(InstancePagination20, self).set_up()
+ self.delete_instances()
+ self.create_instances()
+
+ @after_class(always_run=True)
+ def tear_down(self):
+ """Tear down all instances."""
+ self.delete_instances()
+
+ @test
+ def pagination_short(self):
+ self.test_pagination(requested_limit=10, requested_marker=None,
+ expected_length=10, expected_marker=self.ids[9],
+ expected_last_item=self.ids[9])
+
+ @test
+ def pagination_default(self):
+ self.test_pagination(requested_limit=None, requested_marker=None,
+ expected_length=20, expected_marker=None,
+ expected_last_item=self.ids[19])
+
+ @test
+ def pagination_full(self):
+ self.test_pagination(requested_limit=20, requested_marker=None,
+ expected_length=20, expected_marker=None,
+ expected_last_item=self.ids[19])
+
+ @test
+ def pagination_overkill(self):
+ self.test_pagination(requested_limit=30, requested_marker=None,
+ expected_length=20, expected_marker=None,
+ expected_last_item=self.ids[19])
+
+ @test
+ def pagination_last_half(self):
+ self.test_pagination(requested_limit=10, requested_marker=self.ids[9],
+ expected_length=10, expected_marker=None,
+ expected_last_item=self.ids[19])
+
+ @test
+ def pagination_third_quarter(self):
+ self.test_pagination(requested_limit=5, requested_marker=self.ids[9],
+ expected_length=5, expected_marker=self.ids[14],
+ expected_last_item=self.ids[14])
+
+ @test
+ def pagination_fourth_quarter(self):
+ self.test_pagination(requested_limit=20, requested_marker=self.ids[14],
+ expected_length=5, expected_marker=None,
+ expected_last_item=self.ids[19])
diff --git a/integration/tests/integration/tests/api/instances_quotas.py b/integration/tests/integration/tests/api/instances_quotas.py
new file mode 100644
index 00000000..3a1c2de6
--- /dev/null
+++ b/integration/tests/integration/tests/api/instances_quotas.py
@@ -0,0 +1,47 @@
+from proboscis import before_class
+from proboscis import test
+from proboscis.asserts import assert_raises
+
+from troveclient.compat import exceptions
+from trove.tests.config import CONFIG
+from trove.tests.util import create_client
+
+
+@test(groups=['dbaas.api.instances.quotas'])
+class InstanceQuotas(object):
+
+ created_instances = []
+
+ @before_class
+ def setup(self):
+ self.client = create_client(is_admin=False)
+
+ @test
+ def test_too_many_instances(self):
+ self.created_instances = []
+ if 'trove_max_instances_per_user' in CONFIG.values:
+ too_many = CONFIG.values['trove_max_instances_per_user']
+ already_there = len(self.client.instances.list())
+ flavor = 1
+ for i in range(too_many - already_there):
+ response = self.client.instances.create('too_many_%d' % i,
+ flavor,
+ {'size': 1})
+ self.created_instances.append(response)
+ # This one better fail, because we just reached our quota.
+ assert_raises(exceptions.OverLimit,
+ self.client.instances.create,
+ "too_many", flavor,
+ {'size': 1})
+
+ @test(runs_after=[test_too_many_instances])
+ def delete_excessive_entries(self):
+ # Delete all the instances called too_many*.
+ for id in self.created_instances:
+ while True:
+ try:
+ self.client.instances.delete(id)
+ except exceptions.UnprocessableEntity:
+ continue
+ except exceptions.NotFound:
+ break
diff --git a/integration/tests/integration/tests/api/instances_states.py b/integration/tests/integration/tests/api/instances_states.py
new file mode 100644
index 00000000..965d8e77
--- /dev/null
+++ b/integration/tests/integration/tests/api/instances_states.py
@@ -0,0 +1,76 @@
+# Copyright 2012 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+GROUP = "dbaas.api.instances.status"
+
+from proboscis import before_class
+from proboscis import test
+from proboscis.asserts import assert_equal
+
+from trove.tests.config import CONFIG
+from trove.tests.util import create_dbaas_client
+from trove.tests.util.users import Requirements
+from trove.common.utils import poll_until
+
+
+@test(groups=[GROUP])
+class InstanceStatusTests(object):
+
+ @before_class
+ def set_up(self):
+ reqs = Requirements(is_admin=False)
+ self.user = CONFIG.users.find_user(reqs)
+ self.dbaas = create_dbaas_client(self.user)
+
+ @test
+ def test_create_failure_on_volume_prov_failure(self):
+ # Fake nova will fail a volume of size 9.
+ response = self.dbaas.instances.create('volume_fail', 1,
+ {'size': 9}, [])
+ poll_until(lambda: self.dbaas.instances.get(response.id),
+ lambda instance: instance.status == 'ERROR',
+ time_out=10)
+ instance = self.dbaas.instances.get(response.id)
+ print "Status: %s" % instance.status
+ assert_equal(instance.status, "ERROR",
+ "Instance did not drop to error after volume prov failure.")
+
+ @test
+ def test_create_failure_on_server_failure(self):
+ # Fake nova will fail a server ending with 'SERVER_ERROR'."
+ response = self.dbaas.instances.create('test_SERVER_ERROR', 1,
+ {'size': 1}, [])
+ poll_until(lambda: self.dbaas.instances.get(response.id),
+ lambda instance: instance.status == 'ERROR',
+ time_out=10)
+ instance = self.dbaas.instances.get(response.id)
+ print "Status: %s" % instance.status
+ assert_equal(instance.status, "ERROR",
+ "Instance did not drop to error after server prov failure.")
+
+ ###TODO(ed-): We don't at present have a way to test DNS in FAKE_MODE.
+ @test(enabled=False)
+ def test_create_failure_on_dns_failure(self):
+ #TODO(ed-): Throw DNS-specific monkeywrench into works
+ response = self.dbaas.instances.create('test_DNS_ERROR', 1,
+ {'size': 1}, [])
+ poll_until(lambda: self.dbaas.instances.get(response.id),
+ lambda instance: instance.status == 'ERROR',
+ time_out=10)
+ instance = self.dbaas.instances.get(response.id)
+ print "Status: %s" % instance.status
+ assert_equal(instance.status, "ERROR",
+ "Instance did not drop to error after DNS prov failure.")
diff --git a/integration/tests/integration/tests/colorizer.py b/integration/tests/integration/tests/colorizer.py
new file mode 100644
index 00000000..4dd797dc
--- /dev/null
+++ b/integration/tests/integration/tests/colorizer.py
@@ -0,0 +1,446 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Colorizer Code is borrowed from Twisted:
+# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+"""Unittest runner for Nova.
+
+To run all tests
+ python run_tests.py
+
+To run a single test:
+ python run_tests.py test_compute:ComputeTestCase.test_run_terminate
+
+To run a single test module:
+ python run_tests.py test_compute
+
+ or
+
+ python run_tests.py api.test_wsgi
+
+"""
+
+import gettext
+import heapq
+import logging
+import os
+import unittest
+import sys
+import time
+
+gettext.install('nova', unicode=1)
+
+from nose import config
+from nose import core
+from nose import result
+from proboscis import case
+from proboscis import SkipTest
+
+class _AnsiColorizer(object):
+ """
+ A colorizer is an object that loosely wraps around a stream, allowing
+ callers to write text to the stream in a particular color.
+
+ Colorizer classes must implement C{supported()} and C{write(text, color)}.
+ """
+ _colors = dict(black=30, red=31, green=32, yellow=33,
+ blue=34, magenta=35, cyan=36, white=37)
+
+ def __init__(self, stream):
+ self.stream = stream
+
+ def supported(cls, stream=sys.stdout):
+ """
+ A class method that returns True if the current platform supports
+ coloring terminal output using this method. Returns False otherwise.
+ """
+ if not stream.isatty():
+ return False # auto color only on TTYs
+ try:
+ import curses
+ except ImportError:
+ return False
+ else:
+ try:
+ try:
+ return curses.tigetnum("colors") > 2
+ except curses.error:
+ curses.setupterm()
+ return curses.tigetnum("colors") > 2
+ except:
+ raise
+ # guess false in case of error
+ return False
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ """
+ Write the given text to the stream in the given color.
+
+ @param text: Text to be written to the stream.
+
+ @param color: A string label for a color. e.g. 'red', 'white'.
+ """
+ color = self._colors[color]
+ self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
+
+
+class _Win32Colorizer(object):
+ """
+ See _AnsiColorizer docstring.
+ """
+ def __init__(self, stream):
+ from win32console import GetStdHandle, STD_OUT_HANDLE, \
+ FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \
+ FOREGROUND_INTENSITY
+ red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN,
+ FOREGROUND_BLUE, FOREGROUND_INTENSITY)
+ self.stream = stream
+ self.screenBuffer = GetStdHandle(STD_OUT_HANDLE)
+ self._colors = {
+ 'normal': red | green | blue,
+ 'red': red | bold,
+ 'green': green | bold,
+ 'blue': blue | bold,
+ 'yellow': red | green | bold,
+ 'magenta': red | blue | bold,
+ 'cyan': green | blue | bold,
+ 'white': red | green | blue | bold
+ }
+
+ def supported(cls, stream=sys.stdout):
+ try:
+ import win32console
+ screenBuffer = win32console.GetStdHandle(
+ win32console.STD_OUT_HANDLE)
+ except ImportError:
+ return False
+ import pywintypes
+ try:
+ screenBuffer.SetConsoleTextAttribute(
+ win32console.FOREGROUND_RED |
+ win32console.FOREGROUND_GREEN |
+ win32console.FOREGROUND_BLUE)
+ except pywintypes.error:
+ return False
+ else:
+ return True
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ color = self._colors[color]
+ self.screenBuffer.SetConsoleTextAttribute(color)
+ self.stream.write(text)
+ self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
+
+
+class _NullColorizer(object):
+ """
+ See _AnsiColorizer docstring.
+ """
+ def __init__(self, stream):
+ self.stream = stream
+
+ def supported(cls, stream=sys.stdout):
+ return True
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ self.stream.write(text)
+
+
+def get_elapsed_time_color(elapsed_time):
+ if elapsed_time > 1.0:
+ return 'yellow'
+ elif elapsed_time > 0.25:
+ return 'cyan'
+ else:
+ return 'green'
+
+
+class NovaTestResult(case.TestResult):
+ def __init__(self, *args, **kw):
+ self.show_elapsed = kw.pop('show_elapsed')
+ self.known_bugs = kw.pop('known_bugs', {})
+ super(NovaTestResult, self).__init__(*args, **kw)
+ self.num_slow_tests = 5
+ self.slow_tests = [] # this is a fixed-sized heap
+ self._last_case = None
+ self.colorizer = None
+ # NOTE(vish): reset stdout for the terminal check
+ stdout = sys.stdout
+ sys.stdout = sys.__stdout__
+ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
+ if colorizer.supported():
+ self.colorizer = colorizer(self.stream)
+ break
+ sys.stdout = stdout
+
+ # NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
+ # error results in it failing to be initialized later. Otherwise,
+ # _handleElapsedTime will fail, causing the wrong error message to
+ # be outputted.
+ self.start_time = time.time()
+
+ def _intercept_known_bugs(self, test, err):
+ name = str(test)
+ excuse = self.known_bugs.get(name, None)
+ if excuse:
+ tracker_id, error_string = excuse
+ if error_string in str(err[1]):
+ skip = SkipTest("KNOWN BUG: %s\n%s"
+ % (tracker_id, str(err[1])))
+ self.onError(test)
+ super(NovaTestResult, self).addSkip(test, skip)
+ else:
+ result = (RuntimeError, RuntimeError(
+ 'Test "%s" contains known bug %s.\n'
+ 'Expected the following error string:\n%s\n'
+ 'What was seen was the following:\n%s\n'
+ 'If the bug is no longer happening, please change '
+ 'the test config.'
+ % (name, tracker_id, error_string, str(err))), None)
+ self.onError(test)
+ super(NovaTestResult, self).addError(test, result)
+ return True
+ return False
+
+ def getDescription(self, test):
+ return str(test)
+
+ def _handleElapsedTime(self, test):
+ self.elapsed_time = time.time() - self.start_time
+ item = (self.elapsed_time, test)
+ # Record only the n-slowest tests using heap
+ if len(self.slow_tests) >= self.num_slow_tests:
+ heapq.heappushpop(self.slow_tests, item)
+ else:
+ heapq.heappush(self.slow_tests, item)
+
+ def _writeElapsedTime(self, test):
+ color = get_elapsed_time_color(self.elapsed_time)
+ self.colorizer.write(" %.2f" % self.elapsed_time, color)
+
+ def _writeResult(self, test, long_result, color, short_result, success):
+ if self.showAll:
+ self.colorizer.write(long_result, color)
+ if self.show_elapsed and success:
+ self._writeElapsedTime(test)
+ self.stream.writeln()
+ elif self.dots:
+ self.stream.write(short_result)
+ self.stream.flush()
+
+ # NOTE(vish): copied from unittest with edit to add color
+ def addSuccess(self, test):
+ if self._intercept_known_bugs(test, None):
+ return
+ unittest.TestResult.addSuccess(self, test)
+ self._handleElapsedTime(test)
+ self._writeResult(test, 'OK', 'green', '.', True)
+
+ # NOTE(vish): copied from unittest with edit to add color
+ def addFailure(self, test, err):
+ if self._intercept_known_bugs(test, err):
+ return
+ self.onError(test)
+ unittest.TestResult.addFailure(self, test, err)
+ self._handleElapsedTime(test)
+ self._writeResult(test, 'FAIL', 'red', 'F', False)
+
+ # NOTE(vish): copied from nose with edit to add color
+ def addError(self, test, err):
+ """Overrides normal addError to add support for
+ errorClasses. If the exception is a registered class, the
+ error will be added to the list for that class, not errors.
+ """
+ if self._intercept_known_bugs(test, err):
+ return
+ self.onError(test)
+ self._handleElapsedTime(test)
+ stream = getattr(self, 'stream', None)
+ ec, ev, tb = err
+ try:
+ exc_info = self._exc_info_to_string(err, test)
+ except TypeError:
+ # 2.3 compat
+ exc_info = self._exc_info_to_string(err)
+ for cls, (storage, label, isfail) in self.errorClasses.items():
+ if result.isclass(ec) and issubclass(ec, cls):
+ if isfail:
+ test.passed = False
+ storage.append((test, exc_info))
+ # Might get patched into a streamless result
+ if stream is not None:
+ if self.showAll:
+ message = [label]
+ detail = result._exception_detail(err[1])
+ if detail:
+ message.append(detail)
+ stream.writeln(": ".join(message))
+ elif self.dots:
+ stream.write(label[:1])
+ return
+ self.errors.append((test, exc_info))
+ test.passed = False
+ if stream is not None:
+ self._writeResult(test, 'ERROR', 'red', 'E', False)
+
+ @staticmethod
+ def get_doc(cls_or_func):
+ """Grabs the doc abbreviated doc string."""
+ try:
+ return cls_or_func.__doc__.split("\n")[0].strip()
+ except (AttributeError, IndexError):
+ return None
+
+ def startTest(self, test):
+ unittest.TestResult.startTest(self, test)
+ self.start_time = time.time()
+ test_name = None
+ try:
+ entry = test.test.__proboscis_case__.entry
+ if entry.method:
+ current_class = entry.method.im_class
+ test_name = self.get_doc(entry.home) or entry.home.__name__
+ else:
+ current_class = entry.home
+ except AttributeError:
+ current_class = test.test.__class__
+
+ if self.showAll:
+ if current_class.__name__ != self._last_case:
+ self.stream.writeln(current_class.__name__)
+ self._last_case = current_class.__name__
+ try:
+ doc = self.get_doc(current_class)
+ except (AttributeError, IndexError):
+ doc = None
+ if doc:
+ self.stream.writeln(' ' + doc)
+
+ if not test_name:
+ if hasattr(test.test, 'shortDescription'):
+ test_name = test.test.shortDescription()
+ if not test_name:
+ test_name = test.test._testMethodName
+ self.stream.write('\t%s' % str(test_name).ljust(60))
+ self.stream.flush()
+
+
+class NovaTestRunner(core.TextTestRunner):
+ def __init__(self, *args, **kwargs):
+ self.show_elapsed = kwargs.pop('show_elapsed')
+ self.known_bugs = kwargs.pop('known_bugs', {})
+ self.__result = None
+ self.__finished = False
+ self.__start_time = None
+ super(NovaTestRunner, self).__init__(*args, **kwargs)
+
+ def _makeResult(self):
+ self.__result = NovaTestResult(
+ self.stream,
+ self.descriptions,
+ self.verbosity,
+ self.config,
+ show_elapsed=self.show_elapsed,
+ known_bugs=self.known_bugs)
+ self.__start_time = time.time()
+ return self.__result
+
+ def _writeSlowTests(self, result_):
+ # Pare out 'fast' tests
+ slow_tests = [item for item in result_.slow_tests
+ if get_elapsed_time_color(item[0]) != 'green']
+ if slow_tests:
+ slow_total_time = sum(item[0] for item in slow_tests)
+ self.stream.writeln("Slowest %i tests took %.2f secs:"
+ % (len(slow_tests), slow_total_time))
+ for elapsed_time, test in sorted(slow_tests, reverse=True):
+ time_str = "%.2f" % elapsed_time
+ self.stream.writeln(" %s %s" % (time_str.ljust(10), test))
+
+ def on_exit(self):
+ if self.__result is None:
+ print("Exiting before tests even started.")
+ else:
+ if not self.__finished:
+ msg = "Tests aborted, trying to print available results..."
+ print(msg)
+ stop_time = time.time()
+ self.__result.printErrors()
+ self.__result.printSummary(self.__start_time, stop_time)
+ self.config.plugins.finalize(self.__result)
+ if self.show_elapsed:
+ self._writeSlowTests(self.__result)
+
+ def run(self, test):
+ result_ = super(NovaTestRunner, self).run(test)
+ if self.show_elapsed:
+ self._writeSlowTests(result_)
+ self.__finished = True
+ return result_
+
+
+if __name__ == '__main__':
+ logging.setup()
+ # If any argument looks like a test name but doesn't have "nova.tests" in
+ # front of it, automatically add that so we don't have to type as much
+ show_elapsed = True
+ argv = []
+ test_fixture = os.getenv("UNITTEST_FIXTURE", "trove")
+ for x in sys.argv:
+ if x.startswith('test_'):
+ argv.append('%s.tests.%s' % (test_fixture, x))
+ elif x.startswith('--hide-elapsed'):
+ show_elapsed = False
+ else:
+ argv.append(x)
+
+ testdir = os.path.abspath(os.path.join(test_fixture, "tests"))
+ c = config.Config(stream=sys.stdout,
+ env=os.environ,
+ verbosity=3,
+ workingDir=testdir,
+ plugins=core.DefaultPluginManager())
+
+ runner = NovaTestRunner(stream=c.stream,
+ verbosity=c.verbosity,
+ config=c,
+ show_elapsed=show_elapsed)
+ sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
diff --git a/integration/tests/integration/tests/dns/__init__.py b/integration/tests/integration/tests/dns/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/integration/tests/integration/tests/dns/__init__.py
diff --git a/integration/tests/integration/tests/dns/check_domain.py b/integration/tests/integration/tests/dns/check_domain.py
new file mode 100644
index 00000000..61e5f638
--- /dev/null
+++ b/integration/tests/integration/tests/dns/check_domain.py
@@ -0,0 +1,174 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Checks that the domain specified in the flag file exists and is valid.
+
+If you define the environment variable ADD_DOMAINS=True when running the tests,
+they will create the domain if its not found (see below for details).
+
+"""
+import os
+import time
+import unittest
+from proboscis import test
+from proboscis import before_class
+from proboscis.asserts import assert_equal
+from proboscis.asserts import assert_not_equal
+from proboscis.decorators import expect_exception
+from proboscis.decorators import time_out
+
+from trove.tests.config import CONFIG
+
+WHITE_BOX = CONFIG.white_box
+RUN_DNS = CONFIG.values.get("trove_dns_support", False)
+
+if WHITE_BOX:
+ from nova import utils
+ from nova import flags
+ import rsdns
+ from trove.dns.rsdns.driver import create_client_with_flag_values
+ from trove.dns.driver import DnsEntry
+ from trove.dns.rsdns.driver import RsDnsInstanceEntryFactory
+ from trove.dns.rsdns.driver import RsDnsDriver
+ from trove.dns.rsdns.driver import RsDnsZone
+ from trove.utils import poll_until
+ FLAGS = flags.FLAGS
+ TEST_CONTENT = "126.1.1.1"
+ TEST_NAME = "hiwassup.%s" % FLAGS.dns_domain_name
+ DNS_DOMAIN_ID = None
+
+
+@test(groups=["rsdns.domains", "rsdns.show_entries"],
+ enabled=WHITE_BOX and RUN_DNS)
+class ClientTests(object):
+
+ @before_class
+ def increase_logging(self):
+ import httplib2
+ httplib2.debuglevel = 1
+
+ @test
+ def can_auth(self):
+ self.client = create_client_with_flag_values()
+ self.client.authenticate()
+
+ @test(depends_on=[can_auth])
+ def list_domains(self):
+ domains = self.client.domains.list()
+ print(domains)
+
+
+@test(groups=["rsdns.domains"], depends_on=[ClientTests],
+ enabled=WHITE_BOX and RUN_DNS)
+class RsDnsDriverTests(object):
+ """Tests the RS DNS Driver."""
+
+ def create_domain_if_needed(self):
+ """Adds the domain specified in the flags."""
+ print("Creating domain %s" % self.driver.default_dns_zone.name)
+ future = self.driver.dns_client.domains.create(
+ self.driver.default_dns_zone.name)
+ while not future.ready:
+ time.sleep(2)
+ print("Got something: %s" % future.resource)
+ with open('/home/vagrant/dns_resource.txt', 'w') as f:
+ f.write('%r\n' % future.result[0].id)
+ global DNS_DOMAIN_ID
+ DNS_DOMAIN_ID = future.result[0].id
+ print("The domain should have been created with id=%s" % DNS_DOMAIN_ID)
+
+ @test
+ @time_out(2 * 60)
+ def ensure_domain_specified_in_flags_exists(self):
+ """Make sure the domain in the FLAGS exists."""
+ self.driver = RsDnsDriver(raise_if_zone_missing=False)
+ assert_not_equal(None, self.driver.default_dns_zone)
+
+ def zone_found():
+ zones = self.driver.get_dns_zones()
+ print("Retrieving zones.")
+ for zone in zones:
+ print("zone %s" % zone)
+ if zone.name == self.driver.default_dns_zone.name:
+ self.driver.default_dns_zone.id = zone.id
+ global DNS_DOMAIN_ID
+ DNS_DOMAIN_ID = zone.id
+ return True
+ return False
+ if zone_found():
+ return
+ self.create_domain_if_needed()
+ for i in range(5):
+ if zone_found():
+ return
+ self.fail("""Could not find default dns zone.
+ This happens when they clear the staging DNS service of data.
+ To fix it, manually run the tests as follows:
+ $ ADD_DOMAINS=True python int_tests.py
+ and if all goes well the tests will create a new domain
+ record.""")
+
+ @test(depends_on=[ensure_domain_specified_in_flags_exists],
+ enabled=WHITE_BOX and FLAGS.dns_domain_name != "dbaas.rackspace.com")
+ def delete_all_entries(self):
+ """Deletes all entries under the default domain."""
+ list = self.driver.get_entries()
+ for entry in list:
+ if entry.type == "A":
+ self.driver.delete_entry(name=entry.name, type=entry.type,
+ dns_zone=entry.dns_zone)
+ # It takes awhile for them to be deleted.
+ poll_until(lambda: self.driver.get_entries_by_name(TEST_NAME),
+ lambda list: len(list) == 0,
+ sleep_time=4, time_out=60)
+
+ @test(depends_on=[delete_all_entries])
+ def create_test_entry(self):
+ fullname = TEST_NAME
+ entry = DnsEntry(name=fullname, content=TEST_CONTENT, type="A",
+ ttl=3600)
+ self.driver.create_entry(entry)
+ list = None
+ for i in range(500):
+ list = self.driver.get_entries_by_name(name=fullname)
+ if len(list) > 0:
+ break
+ time.sleep(1)
+ print("This is the list: %r" % list)
+ assert_equal(1, len(list))
+ list2 = self.driver.get_entries_by_content(content=TEST_CONTENT)
+ assert_equal(1, len(list2))
+
+ @test(depends_on=[delete_all_entries])
+ def create_test_rsdns_entry(self):
+ """Create an entry using the RsDnsInstanceEntryFactory."""
+ instance = {'uuid': '000136c0-effa-4711-a747-a5b9fbfcb3bd', 'id': '10'}
+ ip = "10.100.2.7"
+ factory = RsDnsInstanceEntryFactory(dns_domain_id=DNS_DOMAIN_ID)
+ entry = factory.create_entry(instance)
+ entry.content = ip
+ self.driver.create_entry(entry)
+ entries = self.driver.get_entries_by_name(name=entry.name)
+ assert_equal(1, len(entries))
+ assert_equal(ip, entries[0].content)
+ assert_equal(FLAGS.dns_ttl, entries[0].ttl)
+
+ @test(depends_on=[create_test_entry])
+ def delete_test_entry(self):
+ fullname = TEST_NAME
+ self.driver.delete_entry(fullname, "A")
+ # It takes awhile for them to be deleted.
+ poll_until(lambda: self.driver.get_entries_by_name(TEST_NAME),
+ lambda list: len(list) == 0,
+ sleep_time=2, time_out=60)
diff --git a/integration/tests/integration/tests/dns/concurrency.py b/integration/tests/integration/tests/dns/concurrency.py
new file mode 100644
index 00000000..4fe460b0
--- /dev/null
+++ b/integration/tests/integration/tests/dns/concurrency.py
@@ -0,0 +1,111 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+This test recreates an issue we had with eventlet. In the logs, we'd see that
+the JSON response was malformed; instead of JSON, it contained the following
+string:
+Second simultaneous read on fileno 5 detected. Unless you really know what
+you're doing, make sure that only one greenthread can read any particular
+socket. Consider using a pools.Pool. If you do know what you're doing and want
+to disable this error, call
+eventlet.debug.hub_multiple_reader_prevention(False)
+
+It is perhaps the most helpful error message ever created.
+
+The root issue was that a subclass of httplib2.Http was created at program
+started and used in all threads.
+
+Using the old (broken) RsDNS client code this test recreates the greatest error
+message ever.
+"""
+
+try:
+ import eventlet
+ CAN_USE_EVENTLET = True
+except ImportError:
+ CAN_USE_EVENTLET = False
+import uuid
+
+from proboscis import before_class
+from proboscis import test
+from proboscis.asserts import assert_true
+
+from trove.tests.config import CONFIG
+
+WHITE_BOX = CONFIG.white_box
+RUN_DNS = CONFIG.values.get("trove_dns_support", False)
+
+
+if CONFIG.white_box:
+ from trove.dns.rsdns.driver import RsDnsInstanceEntryFactory
+ from nova import flags
+ from nova import utils
+ FLAGS = flags.FLAGS
+
+
+@test(groups=["rsdns.eventlet"], enabled=CAN_USE_EVENTLET)
+class RsdnsEventletTests(object):
+ """Makes sure the RSDNS client can be used from multiple green threads."""
+
+ def assert_record_created(self, index):
+ msg = "Record %d wasn't created!" % index
+ assert_true(index in self.new_records, msg)
+
+ @before_class(enabled=WHITE_BOX and RUN_DNS)
+ def create_driver(self):
+ """Creates the DNS Driver used in subsequent tests."""
+ self.driver = utils.import_object(FLAGS.dns_driver)
+ self.entry_factory = RsDnsInstanceEntryFactory()
+ self.test_uuid = uuid.uuid4().hex
+ self.new_records = {}
+
+ def make_record(self, index):
+ """Creates a record with the form 'eventlet-%s-%d'."""
+ uuid = "eventlet-%s-%d" % (self.test_uuid, index)
+ instance = {'uuid': uuid}
+ entry = self.entry_factory.create_entry(instance)
+ entry.name = uuid + "." + self.entry_factory.default_dns_zone.name
+ entry.content = "123.123.123.123"
+ self.driver.create_entry(entry)
+ self.new_records[index] = True
+
+ @test(enabled=WHITE_BOX and RUN_DNS)
+ def use_dns_from_a_single_thread(self):
+ """Add DNS records one at a time."""
+ self.new_records = {}
+ for index in range(-1, -5, -1):
+ self.make_record(index)
+ self.assert_record_created(index)
+
+ @test(enabled=WHITE_BOX and RUN_DNS)
+ def use_dns_from_multiple_greenthreads(self):
+ """Add multiple DNS records at once."""
+ self.new_records = {}
+
+ def make_record(index):
+ def __cb():
+ self.make_record(index)
+ self.assert_record_created(index)
+ return index
+ return __cb
+
+ pile = eventlet.GreenPile()
+ indices = range(1, 4)
+ for index in indices:
+ pile.spawn(make_record(index))
+
+ list(pile) # Wait for them to finish
+ for index in indices:
+ self.assert_record_created(index)
diff --git a/integration/tests/integration/tests/dns/conversion.py b/integration/tests/integration/tests/dns/conversion.py
new file mode 100644
index 00000000..2af3b959
--- /dev/null
+++ b/integration/tests/integration/tests/dns/conversion.py
@@ -0,0 +1,105 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Tests classes which convert RS style-entries to Nova DNS entries."""
+
+import hashlib
+import re
+import unittest
+from proboscis import test
+from proboscis.decorators import expect_exception
+
+from trove.tests.config import CONFIG
+
+
+if CONFIG.white_box:
+ from nova import flags
+ from rsdns.client.records import Record
+ from trove.dns.rsdns.driver import EntryToRecordConverter
+ from trove.dns.rsdns.driver import RsDnsInstanceEntryFactory
+ from trove.dns.rsdns.driver import RsDnsZone
+ FLAGS = flags.FLAGS
+ driver = None
+ DEFAULT_ZONE = RsDnsZone(1, "dbaas.rackspace.org")
+ TEST_CONTENT = "126.1.1.1"
+ TEST_NAME = "hiwassup.dbaas.rackspace.org"
+
+
+@test(groups=["unit", "rsdns.conversion"],
+ enabled=CONFIG.white_box)
+class ConvertingNovaEntryNamesToRecordNames(unittest.TestCase):
+
+ def setUp(self):
+ self.converter = EntryToRecordConverter(DEFAULT_ZONE)
+ self.fake_zone = RsDnsZone(id=5, name="blah.org")
+
+ def test_normal_name(self):
+ long_name = self.converter.name_to_long_name("hi", self.fake_zone)
+ self.assertEqual("hi.blah.org", long_name)
+
+ def test_short_name(self):
+ long_name = self.converter.name_to_long_name("", self.fake_zone)
+ self.assertEqual("", long_name)
+
+ def test_long_name(self):
+ long_name = self.converter.name_to_long_name("blah.org.",
+ self.fake_zone)
+ self.assertEqual("blah.org..blah.org", long_name)
+
+
+@test(groups=["unit", "rsdns.conversion"],
+ enabled=CONFIG.white_box)
+class ConvertingRecordsToEntries(unittest.TestCase):
+
+ def setUp(self):
+ self.converter = EntryToRecordConverter(DEFAULT_ZONE)
+ self.fake_zone = RsDnsZone(id=5, name="blah.org")
+
+ def test_normal_name(self):
+ record = Record(None, {"id": 5, "name": "hi.blah.org",
+ "data": "stacker.com blah@blah 13452378",
+ "ttl": 5,
+ "type": "SOA"})
+ entry = self.converter.record_to_entry(record=record,
+ dns_zone=self.fake_zone)
+ self.assertEqual("stacker.com blah@blah 13452378", entry.content)
+ self.assertEqual("hi.blah.org", entry.name)
+ self.assertEqual("5", str(entry.ttl))
+ self.assertEqual("SOA", entry.type)
+
+
+@test(groups=["rsdns.conversion"],
+ enabled=CONFIG.white_box)
+class WhenCreatingAnEntryForAnInstance(unittest.TestCase):
+ # This isn't a unit test because RsDnsInstanceEntryFactory connects to the
+ # service.
+
+ def setUp(self):
+ self.creator = RsDnsInstanceEntryFactory()
+
+ def test_should_concatanate_strings(self):
+ instance = {'id': '56',
+ 'uuid': '000136c0-effa-4711-a747-a5b9fbfcb3bd'}
+ entry = self.creator.create_entry(instance)
+ expected_name = "%s.%s" % (hashlib.sha1(instance['uuid']).hexdigest(),
+ FLAGS.dns_domain_name)
+ self.assertEqual(expected_name, entry.name,
+ msg="Entry name should match - %s" % entry.name)
+ self.assertIsNone(entry.content)
+ self.assertEqual("A", entry.type)
+ self.assertEqual(FLAGS.dns_ttl, entry.ttl)
+ self.assertIsNone(entry.priority)
+ self.assertEqual(FLAGS.dns_domain_name, entry.dns_zone.name)
+ if not entry.dns_zone.id:
+ self.fail(msg="DNS Zone Id should not be empty")
diff --git a/integration/tests/integration/tests/dns/dns.py b/integration/tests/integration/tests/dns/dns.py
new file mode 100644
index 00000000..3734ad54
--- /dev/null
+++ b/integration/tests/integration/tests/dns/dns.py
@@ -0,0 +1,104 @@
+
+import unittest
+
+from proboscis import test
+
+from trove.tests.api.instances import instance_info
+from trove.tests.api.instances import GROUP_START as INSTANCE_START
+from trove.tests.api.instances import GROUP_TEST
+from trove.tests.api.instances import GROUP_STOP as INSTANCE_STOP
+from trove.tests.config import CONFIG
+from trove.common.utils import import_object
+from trove.common.utils import poll_until
+
+WHITE_BOX = CONFIG.white_box
+
+if WHITE_BOX:
+ # TODO(tim.simpson): Restore this once white box functionality can be
+ # added back to this test module.
+ pass
+ # import rsdns
+ # from nova import flags
+ # from nova import utils
+
+ # from trove import exception
+ # from trove.utils import poll_until
+
+ # FLAGS = flags.FLAGS
+
+dns_driver = None
+
+GROUP = "dbaas.guest.dns"
+
+
+@test(groups=[GROUP, GROUP_TEST])
+class Setup(unittest.TestCase):
+ """Creates the DNS Driver and entry factory used in subsequent tests."""
+
+ def test_create_rs_dns_driver(self):
+ global dns_driver
+ dns_driver = import_object(FLAGS.dns_driver)
+
+
+def expected_dns_entry():
+ """Returns expected DNS entry for this instance.
+
+ :rtype: Instance of :class:`DnsEntry`.
+
+ """
+ return create_dns_entry(instance_info.local_id, instance_info.id)
+
+
+@test(depends_on_classes=[Setup],
+ depends_on_groups=[INSTANCE_START],
+ groups=[GROUP, GROUP_TEST])
+class WhenInstanceIsCreated(unittest.TestCase):
+ """Make sure the DNS name was provisioned.
+
+ This class actually calls the DNS driver to confirm the entry that should
+ exist for the given instance does exist.
+
+ """
+
+ def test_dns_entry_should_exist(self):
+ entry = expected_dns_entry()
+ if entry:
+ def get_entries():
+ return dns_driver.get_entries_by_name(entry.name)
+ try:
+ poll_until(get_entries, lambda entries: len(entries) > 0,
+ sleep_time=2, time_out=60)
+ except exception.PollTimeOut:
+ self.fail("Did not find name " + entry.name + \
+ " in the entries, which were as follows:"
+ + str(dns_driver.get_entries()))
+
+
+@test(depends_on_classes=[Setup, WhenInstanceIsCreated],
+ depends_on_groups=[INSTANCE_STOP],
+ groups=[GROUP])
+class AfterInstanceIsDestroyed(unittest.TestCase):
+ """Make sure the DNS name is removed along with an instance.
+
+ Because the compute manager calls the DNS manager with RPC cast, it can
+ take awhile. So we wait for 30 seconds for it to disappear.
+
+ """
+
+ def test_dns_entry_exist_should_be_removed_shortly_thereafter(self):
+ entry = expected_dns_entry()
+
+ if not entry:
+ return
+
+ def get_entries():
+ return dns_driver.get_entries_by_name(entry.name)
+
+ try:
+ poll_until(get_entries, lambda entries: len(entries) == 0,
+ sleep_time=2, time_out=60)
+ except exception.PollTimeOut:
+ # Manually delete the rogue item
+ dns_driver.delete_entry(entry.name, entry.type, entry.dns_zone)
+ self.fail("The DNS entry was never deleted when the instance "
+ "was destroyed.")
diff --git a/integration/tests/integration/tests/initialize.py b/integration/tests/integration/tests/initialize.py
new file mode 100644
index 00000000..ddd5aa86
--- /dev/null
+++ b/integration/tests/integration/tests/initialize.py
@@ -0,0 +1,176 @@
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+import os
+import time
+import socket
+
+from nose.plugins.skip import SkipTest
+
+from proboscis import test
+from proboscis.asserts import fail
+from proboscis.decorators import time_out
+from tests.util.services import Service
+from tests.util.services import start_proc
+from tests.util.services import WebService
+from trove.tests.config import CONFIG
+
+
+FAKE = CONFIG.fake_mode
+START_SERVICES = (not FAKE) and CONFIG.values.get('start_services', False)
+START_NOVA_NETWORK = (START_SERVICES and
+ not CONFIG.values.get('neutron_enabled',
+ False))
+KEYSTONE_ALL = CONFIG.values.get('keystone_use_combined', True)
+USE_NOVA_VOLUME = CONFIG.values.get('use_nova_volume', False)
+
+dbaas_image = None
+instance_name = None
+success_statuses = ["build", "active"]
+
+
+def dbaas_url():
+ return str(CONFIG.values.get("dbaas_url"))
+
+def nova_url():
+ return str(CONFIG.values.get("nova_client")['url'])
+
+
+
+class Daemon(object):
+ """Starts a daemon."""
+
+ def __init__(self, alternate_path=None, conf_file_name=None,
+ extra_cmds=None, service_path_root=None, service_path=None):
+ # The path to the daemon bin if the other one doesn't work.
+ self.alternate_path = alternate_path
+ self.extra_cmds = extra_cmds or []
+ # The name of a test config value which points to a conf file.
+ self.conf_file_name = conf_file_name
+ # The name of a test config value, which is inserted into the service_path.
+ self.service_path_root = service_path_root
+ # The first path to the daemon bin we try.
+ self.service_path = service_path or "%s"
+
+ def run(self):
+ # Print out everything to make it
+ print("Looking for config value %s..." % self.service_path_root)
+ print(CONFIG.values[self.service_path_root])
+ path = self.service_path % CONFIG.values[self.service_path_root]
+ print("Path = %s" % path)
+ if not os.path.exists(path):
+ path = self.alternate_path
+ if path is None:
+ fail("Could not find path to %s" % self.service_path_root)
+ conf_path = str(CONFIG.values[self.conf_file_name])
+ cmds = CONFIG.python_cmd_list() + [path] + self.extra_cmds + \
+ [conf_path]
+ print("Running cmds: %s" % cmds)
+ self.service = Service(cmds)
+ if not self.service.is_service_alive():
+ self.service.start()
+
+@test(groups=["services.initialize"],
+ enabled=START_SERVICES and (not KEYSTONE_ALL))
+def start_keystone_all():
+ """Starts the Keystone API."""
+ Daemon(service_path_root="usr_bin_dir",
+ service_path="%s/keystone-all",
+ extra_cmds=['--config-file'],
+ conf_file_name="keystone_conf").run()
+
+
+@test(groups=["services.initialize", "services.initialize.glance"],
+ enabled=START_SERVICES)
+def start_glance_registry():
+ """Starts the Glance Registry."""
+ Daemon(alternate_path="/usr/bin/glance-registry",
+ conf_file_name="glance_reg_conf",
+ service_path_root="usr_bin_dir",
+ service_path="%s/glance-registry").run()
+
+
+@test(groups=["services.initialize", "services.initialize.glance"],
+ depends_on=[start_glance_registry], enabled=START_SERVICES)
+def start_glance_api():
+ """Starts the Glance API."""
+ Daemon(alternate_path="/usr/bin/glance-api",
+ conf_file_name="glance_reg_conf",
+ service_path_root="usr_bin_dir",
+ service_path="%s/glance-api").run()
+
+
+@test(groups=["services.initialize"], depends_on_classes=[start_glance_api],
+ enabled=START_NOVA_NETWORK)
+def start_nova_network():
+ """Starts the Nova Network Service."""
+ Daemon(service_path_root="usr_bin_dir",
+ service_path="%s/nova-network",
+ extra_cmds=['--config-file='],
+ conf_file_name="nova_conf").run()
+
+
+@test(groups=["services.initialize"], enabled=START_SERVICES)
+def start_scheduler():
+ """Starts the Scheduler Service."""
+ Daemon(service_path_root="usr_bin_dir",
+ service_path="%s/nova-scheduler",
+ extra_cmds=['--config-file='],
+ conf_file_name="nova_conf").run()
+
+
+@test(groups=["services.initialize"],
+ depends_on_classes=[start_glance_api],
+ enabled=START_SERVICES)
+def start_compute():
+ """Starts the Nova Compute Service."""
+ Daemon(service_path_root="usr_bin_dir",
+ service_path="%s/nova-compute",
+ extra_cmds=['--config-file='],
+ conf_file_name="nova_conf").run()
+
+
+@test(groups=["services.initialize"], depends_on_classes=[start_scheduler],
+ enabled=START_SERVICES and USE_NOVA_VOLUME)
+def start_volume():
+ """Starts the Nova Compute Service."""
+ Daemon(service_path_root="usr_bin_dir",
+ service_path="%s/nova-volume",
+ extra_cmds=['--config-file='],
+ conf_file_name="nova_conf").run()
+
+
+@test(groups=["services.initialize"],
+ depends_on_classes=[start_glance_api, start_nova_network, start_compute,
+ start_volume],
+ enabled=START_SERVICES)
+def start_nova_api():
+ """Starts the Nova Compute Service."""
+ Daemon(service_path_root="usr_bin_dir",
+ service_path="%s/nova-api",
+ extra_cmds=['--config-file='],
+ conf_file_name="nova_conf").run()
+
+
+@test(groups=["services.initialize"],
+ depends_on_classes=[start_nova_api],
+ enabled=START_SERVICES)
+def start_trove_api():
+ """Starts the Trove Service."""
+ Daemon(service_path_root="usr_bin_dir",
+ service_path="%s/trove-api",
+ extra_cmds=['--config-file='],
+ conf_file_name="trove_conf").run()
diff --git a/integration/tests/integration/tests/smoke/__init__.py b/integration/tests/integration/tests/smoke/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/integration/tests/integration/tests/smoke/__init__.py
diff --git a/integration/tests/integration/tests/smoke/instance.py b/integration/tests/integration/tests/smoke/instance.py
new file mode 100644
index 00000000..3488e5f2
--- /dev/null
+++ b/integration/tests/integration/tests/smoke/instance.py
@@ -0,0 +1,103 @@
+
+from proboscis.asserts import assert_equal
+from proboscis import test
+from proboscis import before_class
+
+from trove.common.utils import poll_until
+from trove.tests.util import create_client
+
+
+class InstanceGenerator(object):
+
+ def __init__(self, client, status=None, name=None, flavor=None,
+ account_id=None, created_at=None, databases=None, users=None,
+ volume_size=None):
+ self.client = client
+ self.status = status
+ self.name = name
+ self.flavor = flavor
+ self.account_id = account_id
+ self.databases = databases
+ self.users = users
+ self.volume_size = volume_size
+ self.id = None
+
+ def create_instance(self):
+ #make the call to create the instance
+ instance = self.client.instances.create(self.name, self.flavor,
+ self.volume_size, self.databases, self.users)
+ self.client.assert_http_code(200)
+
+ #verify we are in a build state
+ assert_equal(instance.status, "BUILD")
+ #pull out the ID
+ self.id = instance.id
+
+ return instance
+
+ def wait_for_build_to_finish(self):
+ poll_until(lambda: self.client.instance.get(self.id),
+ lambda instance: instance.status != "BUILD",
+ time_out=600)
+
+ def get_active_instance(self):
+ instance = self.client.instance.get(self.id)
+ self.client.assert_http_code(200)
+
+ #check the container name
+ assert_equal(instance.name, self.name)
+
+ #pull out volume info and verify
+ assert_equal(str(instance.volume_size), str(self.volume_size))
+
+ #pull out the flavor and verify
+ assert_equal(str(instance.flavor), str(self.flavor))
+
+ return instance
+
+
+@test(groups=['smoke', 'positive'])
+class CreateInstance(object):
+
+ @before_class
+ def set_up(self):
+ client = create_client(is_admin=False)
+ name = 'test_createInstance_container'
+ flavor = 1
+ volume_size = 1
+ db_name = 'test_db'
+ databases = [
+ {
+ "name": db_name
+ }
+ ]
+ users = [
+ {
+ "name": "lite",
+ "password": "litepass",
+ "databases": [{"name": db_name}]
+ }
+ ]
+
+ #create the Instance
+ instance = InstanceGenerator(client, name=self.name,
+ flavor=flavor,
+ volume_size=self.volume_size,
+ databases=databases, users=users)
+ instance.create_instance()
+
+ #wait for the instance
+ instance.wait_for_build_to_finish()
+
+ #get the active instance
+ inst = instance.get_active_instance()
+
+ #list out the databases for our instance and verify the db name
+ dbs = client.databases.list(inst.id)
+ client.assert_http_code(200)
+
+ assert_equal(len(dbs), 1)
+ assert_equal(dbs[0].name, instance.db_name)
+
+ client.instance.delete(inst.id)
+ client.assert_http_code(202)
diff --git a/integration/tests/integration/tests/util/__init__.py b/integration/tests/integration/tests/util/__init__.py
new file mode 100644
index 00000000..671d3c17
--- /dev/null
+++ b/integration/tests/integration/tests/util/__init__.py
@@ -0,0 +1,16 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/integration/tests/integration/tests/util/report.py b/integration/tests/integration/tests/util/report.py
new file mode 100644
index 00000000..c3c1007c
--- /dev/null
+++ b/integration/tests/integration/tests/util/report.py
@@ -0,0 +1,76 @@
+"""Creates a report for the test.
+"""
+
+import os
+import shutil
+from os import path
+from trove.tests.config import CONFIG
+
+USE_LOCAL_OVZ = CONFIG.use_local_ovz
+
+
+class Reporter(object):
+ """Saves the logs from a test run."""
+
+ def __init__(self, root_path):
+ self.root_path = root_path
+ if not path.exists(self.root_path):
+ os.mkdir(self.root_path)
+ for file in os.listdir(self.root_path):
+ if file.endswith(".log"):
+ os.remove(path.join(self.root_path, file))
+
+ def _find_all_instance_ids(self):
+ instances = []
+ if USE_LOCAL_OVZ:
+ for dir in os.listdir("/var/lib/vz/private"):
+ instances.append(dir)
+ return instances
+
+ def log(self, msg):
+ with open("%s/report.log" % self.root_path, 'a') as file:
+ file.write(str(msg) + "\n")
+
+ def _save_syslog(self):
+ try:
+ shutil.copyfile("/var/log/syslog", "host-syslog.log")
+ except (shutil.Error, IOError) as err:
+ self.log("ERROR logging syslog : %s" % (err))
+
+ def _update_instance(self, id):
+ root = "%s/%s" % (self.root_path, id)
+
+ def save_file(path, short_name):
+ if USE_LOCAL_OVZ:
+ try:
+ shutil.copyfile("/var/lib/vz/private/%s/%s" % (id, path),
+ "%s-%s.log" % (root, short_name))
+ except (shutil.Error, IOError) as err:
+ self.log("ERROR logging %s for instance id %s! : %s"
+ % (path, id, err))
+ else:
+ #TODO: Can we somehow capture these (maybe SSH to the VM)?
+ pass
+
+ save_file("/var/log/firstboot", "firstboot")
+ save_file("/var/log/syslog", "syslog")
+ save_file("/var/log/nova/guest.log", "nova-guest")
+
+ def _update_instances(self):
+ for id in self._find_all_instance_ids():
+ self._update_instance(id)
+
+ def update(self):
+ self._update_instances()
+ self._save_syslog()
+
+
+REPORTER = Reporter(CONFIG.report_directory)
+
+
+def log(msg):
+ REPORTER.log(msg)
+
+
+def update():
+ REPORTER.update()
diff --git a/integration/tests/integration/tests/util/rpc.py b/integration/tests/integration/tests/util/rpc.py
new file mode 100644
index 00000000..d534ff31
--- /dev/null
+++ b/integration/tests/integration/tests/util/rpc.py
@@ -0,0 +1,110 @@
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Test utility for RPC checks.
+
+Functionality to check for rabbit here depends on having rabbit running on
+the same machine as the tests, so that the rabbitmqctl commands will function.
+The functionality is turned on or off by the test config "rabbit_runs_locally".
+
+"""
+
+import re
+
+from trove.tests.config import CONFIG
+from services import start_proc
+
+
+if CONFIG.values.get('rabbit_runs_locally', False) == True:
+
+ DIRECT_ACCESS = True
+
+ class Rabbit(object):
+
+ def declare_queue(self, topic):
+ """Call this to declare a queue from Python."""
+ #from trove.rpc.impl_kombu import Connection
+ from trove.openstack.common.rpc import create_connection
+ with create_connection() as conn:
+ consumer = conn.declare_topic_consumer(topic=topic)
+
+ def get_queue_items(self, queue_name):
+ """Determines if the queue exists and if so the message count.
+
+ If the queue exists the return value is an integer, otherwise
+ its None.
+
+ Be careful because queue_name is used in a regex and can't have
+ any unescaped characters.
+
+ """
+ proc = start_proc(["/usr/bin/sudo", "rabbitmqctl", "list_queues"],
+ shell=False)
+ for line in iter(proc.stdout.readline, ""):
+ print("LIST QUEUES:" + line)
+ m = re.search("""%s\s+([0-9]+)""" % queue_name, line)
+ if m:
+ return int(m.group(1))
+ return None
+
+ @property
+ def is_alive(self):
+ """Calls list_queues, should fail."""
+ try:
+ stdout, stderr = self.run(0, "rabbitmqctl", "list_queues")
+ for lines in stdout, stderr:
+ for line in lines:
+ if "no_exists" in line:
+ return False
+ return True
+ except Exception:
+ return False
+
+ def reset(self):
+ out, err = self.run(0, "rabbitmqctl", "reset")
+ print(out)
+ print(err)
+
+ def run(self, check_exit_code, *cmd):
+ cmds = ["/usr/bin/sudo"] + list(cmd)
+ proc = start_proc(cmds)
+ lines = proc.stdout.readlines()
+ err_lines = proc.stderr.readlines()
+ return lines, err_lines
+
+ def start(self):
+ print("Calling rabbitmqctl start_app")
+ out = self.run(0, "rabbitmqctl", "start_app")
+ print(out)
+ out, err = self.run(0, "rabbitmqctl", "change_password", "guest",
+ CONFIG.values['rabbit_password'])
+ print(out)
+ print(err)
+
+ def stop(self):
+ print("Calling rabbitmqctl stop_app")
+ out = self.run(0, "rabbitmqctl", "stop_app")
+ print(out)
+
+else:
+
+ DIRECT_ACCESS = False
+
+ class Rabbit(object):
+
+ def __init__(self):
+ raise RuntimeError("rabbit_runs_locally is set to False in the "
+ "test config, so this test cannot be run.")
+
diff --git a/integration/tests/integration/tests/util/services.py b/integration/tests/integration/tests/util/services.py
new file mode 100644
index 00000000..b54f92d6
--- /dev/null
+++ b/integration/tests/integration/tests/util/services.py
@@ -0,0 +1,280 @@
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Functions to initiate and shut down services needed by the tests."""
+
+import os
+import re
+import subprocess
+import time
+
+from collections import namedtuple
+from httplib2 import Http
+from nose.plugins.skip import SkipTest
+
+from proboscis import decorators
+
+
+def _is_web_service_alive(url):
+ """Does a HTTP GET request to see if the web service is up."""
+ client = Http()
+ try:
+ resp = client.request(url, 'GET')
+ return resp != None
+ except Exception:
+ return False
+
+
+_running_services = []
+
+
+def get_running_services():
+ """ Returns the list of services which this program has started."""
+ return _running_services
+
+
+def start_proc(cmd, shell=False):
+ """Given a command, starts and returns a process."""
+ env = os.environ.copy()
+ proc = subprocess.Popen(
+ cmd,
+ shell=shell,
+ stdin=subprocess.PIPE,
+ bufsize=0,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=env
+ )
+ return proc
+
+
+MemoryInfo = namedtuple("MemoryInfo", ['mapped', 'writeable', 'shared'])
+
+
+class Service(object):
+ """Starts and stops a service under test.
+
+ The methods to start and stop the service will not actually do anything
+ if they detect the service is already running on this machine. This is
+ because it may be useful for developers to start the services themselves
+ some other way.
+
+ """
+
+ # TODO(tim.simpson): Hard to follow, consider renaming certain attributes.
+
+ def __init__(self, cmd):
+ """Defines a service to run."""
+ if not isinstance(cmd, list):
+ raise TypeError()
+ self.cmd = cmd
+ self.do_not_manage_proc = False
+ self.proc = None
+
+ def __del__(self):
+ if self.is_running:
+ self.stop()
+
+ def ensure_started(self):
+ """Starts the service if it is not running."""
+ if not self.is_running:
+ self.start()
+
+ def find_proc_id(self):
+ """Finds and returns the process id."""
+ if not self.cmd:
+ return False
+ # The cmd[1] signifies the executable python script. It gets invoked
+ # as python /path/to/executable args, so the entry is
+ # /path/to/executable
+ actual_command = self.cmd[1].split("/")[-1]
+ proc_command = ["/usr/bin/pgrep", "-f", actual_command]
+ proc = start_proc(proc_command, shell=False)
+ # this is to make sure there is only one pid returned from the pgrep
+ has_two_lines = False
+ pid = None
+ for line in iter(proc.stdout.readline, ""):
+ if has_two_lines:
+ raise RuntimeError("Found PID twice.")
+ pid = int(line)
+ has_two_lines = True
+ return pid
+
+ def get_memory_info(self):
+ """Returns how much memory the process is using according to pmap."""
+ pid = self.find_proc_id()
+ if not pid:
+ raise RuntimeError("Can't find PID, so can't get memory.")
+ proc = start_proc(["/usr/bin/pmap", "-d", str(pid)],
+ shell=False)
+ for line in iter(proc.stdout.readline, ""):
+ m = re.search("""mapped\:\s([0-9]+)K\s+"""
+ """writeable/private:\s([0-9]+)K\s+"""
+ """shared:\s+([0-9]+)K""", line)
+ if m:
+ return MemoryInfo(int(m.group(1)), int(m.group(2)),
+ int(m.group(3)))
+ raise RuntimeError("Memory info not found.")
+
+ def get_fd_count_from_proc_file(self):
+ """Returns file descriptors according to /proc/<id>/status."""
+ pid = self.find_proc_id()
+ with open("/proc/%d/status" % pid) as status:
+ for line in status.readlines():
+ index = line.find(":")
+ name = line[:index]
+ value = line[index + 1:]
+ if name == "FDSize":
+ return int(value)
+ raise RuntimeError("FDSize not found!")
+
+ def get_fd_count(self):
+ """Returns file descriptors according to /proc/<id>/status."""
+ pid = self.find_proc_id()
+ cmd = "Finding file descriptors..."
+ print("CMD" + cmd)
+ proc = start_proc(['ls', '-la', '/proc/%d/fd' % pid], shell=False)
+ count = -3
+ has_two_lines = False
+ for line in iter(proc.stdout.readline, ""):
+ print("\t" + line)
+ count += 1
+ if not count:
+ raise RuntimeError("Could not get file descriptors!")
+ return count
+
+
+ with open("/proc/%d/fd" % pid) as status:
+ for line in status.readlines():
+ index = line.find(":")
+ name = line[:index]
+ value = line[index + 1:]
+ if name == "FDSize":
+ return int(value)
+ raise RuntimeError("FDSize not found!")
+
+ def kill_proc(self):
+ """Kills the process, wherever it may be."""
+ pid = self.find_proc_id()
+ if pid:
+ start_proc("sudo kill -9 " + str(pid), shell=True)
+ time.sleep(1)
+ if self.is_service_alive():
+ raise RuntimeError('Cannot kill process, PID=' +
+ str(self.proc.pid))
+
+ def is_service_alive(self, proc_name_index=1):
+ """Searches for the process to see if its alive.
+
+ This function will return true even if this class has not started
+ the service (searches using ps).
+
+ """
+ if not self.cmd:
+ return False
+ time.sleep(1)
+ # The cmd[1] signifies the executable python script. It gets invoked
+ # as python /path/to/executable args, so the entry is
+ # /path/to/executable
+ actual_command = self.cmd[proc_name_index].split("/")[-1]
+ print actual_command
+ proc_command = ["/usr/bin/pgrep", "-f", actual_command]
+ print proc_command
+ proc = start_proc(proc_command, shell=False)
+ line = proc.stdout.readline()
+ print line
+ # pgrep only returns a pid. if there is no pid, it'll return nothing
+ return len(line) != 0
+
+ @property
+ def is_running(self):
+ """Returns true if the service has already been started.
+
+ Returns true if this program has started the service or if it
+ previously detected it had started. The main use of this property
+ is to know if the service was already begun by this program-
+ use is_service_alive for a more definitive answer.
+
+ """
+ return self.proc or self.do_not_manage_proc
+
+ def restart(self, extra_args):
+ if self.do_not_manage_proc:
+ raise RuntimeError("Can't restart proc as the tests don't own it.")
+ self.stop()
+ time.sleep(2)
+ self.start(extra_args=extra_args)
+
+ def start(self, time_out=30, extra_args=None):
+ """Starts the service if necessary."""
+ extra_args = extra_args or []
+ if self.is_running:
+ raise RuntimeError("Process is already running.")
+ if self.is_service_alive():
+ self.do_not_manage_proc = True
+ return
+ self.proc = start_proc(self.cmd + extra_args, shell=False)
+ if not self._wait_for_start(time_out=time_out):
+ self.stop()
+ raise RuntimeError("Issued the command successfully but the "
+ "service (" + str(self.cmd + extra_args) +
+ ") never seemed to start.")
+ _running_services.append(self)
+
+ def stop(self):
+ """Stops the service, but only if this program started it."""
+ if self.do_not_manage_proc:
+ return
+ if not self.proc:
+ raise RuntimeError("Process was not started.")
+ self.proc.terminate()
+ self.proc.kill()
+ self.proc.wait()
+ self.proc.stdin.close()
+ self.kill_proc()
+ self.proc = None
+ global _running_services
+ _running_services = [svc for svc in _running_services if svc != self]
+
+ def _wait_for_start(self, time_out):
+ """Waits until time_out (in seconds) for service to appear."""
+ give_up_time = time.time() + time_out
+ while time.time() < give_up_time:
+ if self.is_service_alive():
+ return True
+ return False
+
+
+class NativeService(Service):
+
+ def is_service_alive(self):
+ return super(NativeService, self).is_service_alive(proc_name_index=0)
+
+
+
+class WebService(Service):
+ """Starts and stops a web service under test."""
+
+ def __init__(self, cmd, url):
+ """Defines a service to run."""
+ Service.__init__(self, cmd)
+ if not isinstance(url, (str, unicode)):
+ raise TypeError()
+ self.url = url
+ self.do_not_manage_proc = self.is_service_alive()
+
+ def is_service_alive(self):
+ """Searches for the process to see if its alive."""
+ return _is_web_service_alive(self.url)
diff --git a/integration/tests/integration/tests/volumes/__init__.py b/integration/tests/integration/tests/volumes/__init__.py
new file mode 100644
index 00000000..09c4cfed
--- /dev/null
+++ b/integration/tests/integration/tests/volumes/__init__.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`volumes` -- Tests for volumes.
+===================================
+"""
+
+""""Tests for Volumes."""
+
+# Is a set of tests written directly against the VolumeManager and VolumeClient
+# classes which doesn't require standing up Nova daemons or anything.
+VOLUMES_DRIVER = "trove.volumes.driver"
diff --git a/integration/tests/integration/tests/volumes/driver.py b/integration/tests/integration/tests/volumes/driver.py
new file mode 100644
index 00000000..221b2a41
--- /dev/null
+++ b/integration/tests/integration/tests/volumes/driver.py
@@ -0,0 +1,546 @@
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from numbers import Number
+import os
+import re
+import shutil
+import socket
+import time
+import unittest
+
+import pexpect
+
+from proboscis import test
+from proboscis.asserts import assert_raises
+from proboscis.decorators import expect_exception
+from proboscis.decorators import time_out
+
+from trove.tests.config import CONFIG
+from trove.common.utils import poll_until
+from trove.tests.util import process
+from trove.common.utils import import_class
+from tests import initialize
+
+
+WHITE_BOX = CONFIG.white_box
+VOLUMES_DRIVER = "trove.volumes.driver"
+
+if WHITE_BOX:
+ # TODO(tim.simpson): Restore this once white box functionality can be
+ # added back to this test module.
+ pass
+ # from nova import context
+ # from nova import exception
+ # from nova import flags
+ # from nova import utils
+ # from trove import exception as trove_exception
+ # from trove.utils import poll_until
+ # from trove import volume
+ # from trove.tests.volume import driver as test_driver
+
+ # FLAGS = flags.FLAGS
+
+
+UUID_PATTERN = re.compile('^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-'
+ '[0-9a-f]{4}-[0-9a-f]{12}$')
+
+HUGE_VOLUME = 5000
+
+
+def is_uuid(text):
+ return UUID_PATTERN.search(text) is not None
+
+
+class StoryDetails(object):
+
+ def __init__(self):
+ self.api = volume.API()
+ self.client = volume.Client()
+ self.context = context.get_admin_context()
+ self.device_path = None
+ self.volume_desc = None
+ self.volume_id = None
+ self.volume_name = None
+ self.volume = None
+ self.host = socket.gethostname()
+ self.original_uuid = None
+ self.original_device_info = None
+ self.resize_volume_size = 2
+
+ def get_volume(self):
+ return self.api.get(self.context, self.volume_id)
+
+ @property
+ def mount_point(self):
+ return "%s/%s" % (LOCAL_MOUNT_PATH, self.volume_id)
+
+ @property
+ def test_mount_file_path(self):
+ return "%s/test.txt" % self.mount_point
+
+
+story = None
+storyFail = None
+
+LOCAL_MOUNT_PATH = "/testsmnt"
+
+
+class VolumeTest(unittest.TestCase):
+ """This test tells the story of a volume, from cradle to grave."""
+
+ def __init__(self, *args, **kwargs):
+ unittest.TestCase.__init__(self, *args, **kwargs)
+
+ def setUp(self):
+ global story, storyFail
+ self.story = story
+ self.storyFail = storyFail
+
+ def assert_volume_as_expected(self, volume):
+ self.assertIsInstance(volume["id"], Number)
+ self.assertEqual(self.story.volume_name, volume["display_name"])
+ self.assertEqual(self.story.volume_desc, volume["display_description"])
+ self.assertEqual(1, volume["size"])
+ self.assertEqual(self.story.context.user_id, volume["user_id"])
+ self.assertEqual(self.story.context.project_id, volume["project_id"])
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[initialize.start_volume])
+class SetUp(VolumeTest):
+
+ def test_05_create_story(self):
+ """Creating 'story' vars used by the rest of these tests."""
+ global story, storyFail
+ story = StoryDetails()
+ storyFail = StoryDetails()
+
+ @time_out(60)
+ def test_10_wait_for_topics(self):
+ """Wait until the volume topic is up before proceeding."""
+ topics = ["volume"]
+ from tests.util.topics import hosts_up
+ while not all(hosts_up(topic) for topic in topics):
+ pass
+
+ def test_20_refresh_local_folders(self):
+ """Delete the local folders used as mount locations if they exist."""
+ if os.path.exists(LOCAL_MOUNT_PATH):
+ #TODO(rnirmal): Also need to remove any existing mounts.
+ shutil.rmtree(LOCAL_MOUNT_PATH)
+ os.mkdir(LOCAL_MOUNT_PATH)
+ # Give some time for the services to startup
+ time.sleep(10)
+
+ @time_out(60)
+ def test_30_mgmt_volume_check(self):
+ """Get the volume information from the mgmt API"""
+ story_context = self.story.context
+ device_info = self.story.api.get_storage_device_info(story_context)
+ print("device_info : %r" % device_info)
+ self.assertNotEqual(device_info, None,
+ "the storage device information should exist")
+ self.story.original_device_info = device_info
+
+ @time_out(60)
+ def test_31_mgmt_volume_info(self):
+ """Check the available space against the mgmt API info."""
+ story_context = self.story.context
+ device_info = self.story.api.get_storage_device_info(story_context)
+ print("device_info : %r" % device_info)
+ info = {'spaceTotal': device_info['raw_total'],
+ 'spaceAvail': device_info['raw_avail']}
+ self._assert_available_space(info)
+
+ def _assert_available_space(self, device_info, fail=False):
+ """
+ Give the SAN device_info(fake or not) and get the asserts for free
+ """
+ print("DEVICE_INFO on SAN : %r" % device_info)
+ # Calculate the GBs; Divide by 2 for the FLAGS.san_network_raid_factor
+ gbs = 1.0 / 1024 / 1024 / 1024 / 2
+ total = int(device_info['spaceTotal']) * gbs
+ free = int(device_info['spaceAvail']) * gbs
+ used = total - free
+ usable = total * (FLAGS.san_max_provision_percent * 0.01)
+ real_free = float(int(usable - used))
+
+ print("total : %r" % total)
+ print("free : %r" % free)
+ print("used : %r" % used)
+ print("usable : %r" % usable)
+ print("real_free : %r" % real_free)
+
+ check_space = self.story.api.check_for_available_space
+ self.assertFalse(check_space(self.story.context, HUGE_VOLUME))
+ self.assertFalse(check_space(self.story.context, real_free + 1))
+
+ if fail:
+ self.assertFalse(check_space(self.story.context, real_free))
+ self.assertFalse(check_space(self.story.context, real_free - 1))
+ self.assertFalse(check_space(self.story.context, 1))
+ else:
+ self.assertTrue(check_space(self.story.context, real_free))
+ self.assertTrue(check_space(self.story.context, real_free - 1))
+ self.assertTrue(check_space(self.story.context, 1))
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[SetUp])
+class AddVolumeFailure(VolumeTest):
+
+ @time_out(60)
+ def test_add(self):
+ """
+ Make call to FAIL a prov. volume and assert the return value is a
+ FAILURE.
+ """
+ self.assertIsNone(self.storyFail.volume_id)
+ name = "TestVolume"
+ desc = "A volume that was created for testing."
+ self.storyFail.volume_name = name
+ self.storyFail.volume_desc = desc
+ volume = self.storyFail.api.create(self.storyFail.context,
+ size=HUGE_VOLUME,
+ snapshot_id=None, name=name,
+ description=desc)
+ self.assertEqual(HUGE_VOLUME, volume["size"])
+ self.assertTrue("creating", volume["status"])
+ self.assertTrue("detached", volume["attach_status"])
+ self.storyFail.volume = volume
+ self.storyFail.volume_id = volume["id"]
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[AddVolumeFailure])
+class AfterVolumeFailureIsAdded(VolumeTest):
+ """Check that the volume can be retrieved via the API, and setup.
+
+ All we want to see returned is a list-like with an initial string.
+
+ """
+
+ @time_out(120)
+ def test_api_get(self):
+ """Wait until the volume is a FAILURE."""
+ volume = poll_until(lambda: self.storyFail.get_volume(),
+ lambda volume: volume["status"] != "creating")
+ self.assertEqual(volume["status"], "error")
+ self.assertTrue(volume["attach_status"], "detached")
+
+ @time_out(60)
+ def test_mgmt_volume_check(self):
+ """Get the volume information from the mgmt API"""
+ info = self.story.api.get_storage_device_info(self.story.context)
+ print("device_info : %r" % info)
+ self.assertNotEqual(info, None,
+ "the storage device information should exist")
+ self.assertEqual(self.story.original_device_info['raw_total'],
+ info['raw_total'])
+ self.assertEqual(self.story.original_device_info['raw_avail'],
+ info['raw_avail'])
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[SetUp])
+class AddVolume(VolumeTest):
+
+ @time_out(60)
+ def test_add(self):
+ """Make call to prov. a volume and assert the return value is OK."""
+ self.assertIsNone(self.story.volume_id)
+ name = "TestVolume"
+ desc = "A volume that was created for testing."
+ self.story.volume_name = name
+ self.story.volume_desc = desc
+ volume = self.story.api.create(self.story.context, size=1,
+ snapshot_id=None, name=name,
+ description=desc)
+ self.assert_volume_as_expected(volume)
+ self.assertTrue("creating", volume["status"])
+ self.assertTrue("detached", volume["attach_status"])
+ self.story.volume = volume
+ self.story.volume_id = volume["id"]
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[AddVolume])
+class AfterVolumeIsAdded(VolumeTest):
+ """Check that the volume can be retrieved via the API, and setup.
+
+ All we want to see returned is a list-like with an initial string.
+
+ """
+
+ @time_out(120)
+ def test_api_get(self):
+ """Wait until the volume is finished provisioning."""
+ volume = poll_until(lambda: self.story.get_volume(),
+ lambda volume: volume["status"] != "creating")
+ self.assertEqual(volume["status"], "available")
+ self.assert_volume_as_expected(volume)
+ self.assertTrue(volume["attach_status"], "detached")
+
+ @time_out(60)
+ def test_mgmt_volume_check(self):
+ """Get the volume information from the mgmt API"""
+ print("self.story.original_device_info : %r" %
+ self.story.original_device_info)
+ info = self.story.api.get_storage_device_info(self.story.context)
+ print("device_info : %r" % info)
+ self.assertNotEqual(info, None,
+ "the storage device information should exist")
+ self.assertEqual(self.story.original_device_info['raw_total'],
+ info['raw_total'])
+ volume_size = int(self.story.volume['size']) * (1024 ** 3) * 2
+ print("volume_size: %r" % volume_size)
+ print("self.story.volume['size']: %r" % self.story.volume['size'])
+ avail = int(self.story.original_device_info['raw_avail']) - volume_size
+ print("avail space: %r" % avail)
+ self.assertEqual(int(info['raw_avail']), avail)
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[AfterVolumeIsAdded])
+class SetupVolume(VolumeTest):
+
+ @time_out(60)
+ def test_assign_volume(self):
+ """Tell the volume it belongs to this host node."""
+ #TODO(tim.simpson) If this is important, could we add a test to
+ # make sure some kind of exception is thrown if it
+ # isn't added to certain drivers?
+ self.assertNotEqual(None, self.story.volume_id)
+ self.story.api.assign_to_compute(self.story.context,
+ self.story.volume_id,
+ self.story.host)
+
+ @time_out(60)
+ def test_setup_volume(self):
+ """Set up the volume on this host. AKA discovery."""
+ self.assertNotEqual(None, self.story.volume_id)
+ device = self.story.client._setup_volume(self.story.context,
+ self.story.volume_id,
+ self.story.host)
+ if not isinstance(device, basestring):
+ self.fail("Expected device to be a string, but instead it was " +
+ str(type(device)) + ".")
+ self.story.device_path = device
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[SetupVolume])
+class FormatVolume(VolumeTest):
+
+ @expect_exception(IOError)
+ @time_out(60)
+ def test_10_should_raise_IOError_if_format_fails(self):
+ """
+
+ Tests that if the driver's _format method fails, its
+ public format method will perform an assertion properly, discover
+ it failed, and raise an exception.
+
+ """
+
+ volume_driver_cls = import_class(FLAGS.volume_driver)
+
+ class BadFormatter(volume_driver_cls):
+
+ def _format(self, device_path):
+ pass
+
+ bad_client = volume.Client(volume_driver=BadFormatter())
+ bad_client._format(self.story.device_path)
+
+ @time_out(60)
+ def test_20_format(self):
+ self.assertNotEqual(None, self.story.device_path)
+ self.story.client._format(self.story.device_path)
+
+ def test_30_check_options(self):
+ cmd = ("sudo dumpe2fs -h %s 2> /dev/null | "
+ "awk -F ':' '{ if($1 == \"Reserved block count\") "
+ "{ rescnt=$2 } } { if($1 == \"Block count\") "
+ "{ blkcnt=$2 } } END { print (rescnt/blkcnt)*100 }'")
+ cmd = cmd % self.story.device_path
+ out, err = process(cmd)
+ self.assertEqual(float(5), round(float(out)), msg=out)
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[FormatVolume])
+class MountVolume(VolumeTest):
+
+ @time_out(60)
+ def test_mount(self):
+ self.story.client._mount(self.story.device_path,
+ self.story.mount_point)
+ with open(self.story.test_mount_file_path, 'w') as file:
+ file.write("Yep, it's mounted alright.")
+ self.assertTrue(os.path.exists(self.story.test_mount_file_path))
+
+ def test_mount_options(self):
+ cmd = "mount -l | awk '/%s.*noatime/ { print $1 }'"
+ cmd %= LOCAL_MOUNT_PATH.replace('/', '')
+ out, err = process(cmd)
+ self.assertEqual(os.path.realpath(self.story.device_path), out.strip(),
+ msg=out)
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[MountVolume])
+class ResizeVolume(VolumeTest):
+
+ @time_out(300)
+ def test_resize(self):
+ self.story.api.resize(self.story.context, self.story.volume_id,
+ self.story.resize_volume_size)
+
+ volume = poll_until(lambda: self.story.get_volume(),
+ lambda volume: volume["status"] == "resized")
+ self.assertEqual(volume["status"], "resized")
+ self.assertTrue(volume["attach_status"], "attached")
+ self.assertTrue(volume['size'], self.story.resize_volume_size)
+
+ @time_out(300)
+ def test_resizefs_rescan(self):
+ self.story.client.resize_fs(self.story.context,
+ self.story.volume_id)
+ expected = "trove.tests.volume.driver.ISCSITestDriver"
+ if FLAGS.volume_driver is expected:
+ size = self.story.resize_volume_size * \
+ test_driver.TESTS_VOLUME_SIZE_MULTIPLIER * 1024 * 1024
+ else:
+ size = self.story.resize_volume_size * 1024 * 1024
+ out, err = process('sudo blockdev --getsize64 %s' %
+ os.path.realpath(self.story.device_path))
+ if int(out) < (size * 0.8):
+ self.fail("Size %s is not more or less %s" % (out, size))
+
+ # Reset the volume status to available
+ self.story.api.update(self.story.context, self.story.volume_id,
+ {'status': 'available'})
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[MountVolume])
+class UnmountVolume(VolumeTest):
+
+ @time_out(60)
+ def test_unmount(self):
+ self.story.client._unmount(self.story.mount_point)
+ child = pexpect.spawn("sudo mount %s" % self.story.mount_point)
+ child.expect("mount: can't find %s in" % self.story.mount_point)
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[UnmountVolume])
+class GrabUuid(VolumeTest):
+
+ @time_out(60)
+ def test_uuid_must_match_pattern(self):
+ """UUID must be hex chars in the form 8-4-4-4-12."""
+ client = self.story.client # volume.Client()
+ device_path = self.story.device_path # '/dev/sda5'
+ uuid = client.get_uuid(device_path)
+ self.story.original_uuid = uuid
+ self.assertTrue(is_uuid(uuid), "uuid must match regex")
+
+ @time_out(60)
+ def test_get_invalid_uuid(self):
+ """DevicePathInvalidForUuid is raised if device_path is wrong."""
+ client = self.story.client
+ device_path = "gdfjghsfjkhggrsyiyerreygghdsghsdfjhf"
+ self.assertRaises(trove_exception.DevicePathInvalidForUuid,
+ client.get_uuid, device_path)
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[GrabUuid])
+class RemoveVolume(VolumeTest):
+
+ @time_out(60)
+ def test_remove(self):
+ self.story.client.remove_volume(self.story.context,
+ self.story.volume_id,
+ self.story.host)
+ self.assertRaises(Exception,
+ self.story.client._format, self.story.device_path)
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[GrabUuid])
+class Initialize(VolumeTest):
+
+ @time_out(300)
+ def test_10_initialize_will_format(self):
+ """initialize will setup, format, and store the UUID of a volume"""
+ self.assertTrue(self.story.get_volume()['uuid'] is None)
+ self.story.client.initialize(self.story.context, self.story.volume_id,
+ self.story.host)
+ volume = self.story.get_volume()
+ self.assertTrue(is_uuid(volume['uuid']), "uuid must match regex")
+ self.assertNotEqual(self.story.original_uuid, volume['uuid'],
+ "Validate our assumption that the volume UUID "
+ "will change when the volume is formatted.")
+ self.story.client.remove_volume(self.story.context,
+ self.story.volume_id,
+ self.story.host)
+
+ @time_out(60)
+ def test_20_initialize_the_second_time_will_not_format(self):
+ """If initialize is called but a UUID exists, it should not format."""
+ old_uuid = self.story.get_volume()['uuid']
+ self.assertTrue(old_uuid is not None)
+
+ class VolumeClientNoFmt(volume.Client):
+
+ def _format(self, device_path):
+ raise RuntimeError("_format should not be called!")
+
+ no_fmt_client = VolumeClientNoFmt()
+ no_fmt_client.initialize(self.story.context, self.story.volume_id,
+ self.story.host)
+ self.assertEqual(old_uuid, self.story.get_volume()['uuid'],
+ "UUID should be the same as no formatting occurred.")
+ self.story.client.remove_volume(self.story.context,
+ self.story.volume_id,
+ self.story.host)
+
+ def test_30_check_device_exists(self):
+ assert_raises(exception.InvalidDevicePath, self.story.client._format,
+ self.story.device_path)
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[Initialize])
+class DeleteVolume(VolumeTest):
+
+ @time_out(60)
+ def test_delete(self):
+ self.story.api.delete(self.story.context, self.story.volume_id)
+
+
+@test(groups=[VOLUMES_DRIVER], depends_on_classes=[DeleteVolume])
+class ConfirmMissing(VolumeTest):
+
+ @time_out(60)
+ def test_discover_should_fail(self):
+ try:
+ self.story.client.driver.discover_volume(self.story.context,
+ self.story.volume)
+ self.fail("Expecting an error but did not get one.")
+ except exception.Error:
+ pass
+ except trove_exception.ISCSITargetNotDiscoverable:
+ pass
+
+ @time_out(60)
+ def test_get_missing_volume(self):
+ try:
+ volume = poll_until(lambda: self.story.api.get(self.story.context,
+ self.story.volume_id),
+ lambda volume: volume["status"] != "deleted")
+ self.assertEqual(volume["deleted"], False)
+ except exception.VolumeNotFound:
+ pass
diff --git a/integration/tests/integration/tox.ini b/integration/tests/integration/tox.ini
new file mode 100644
index 00000000..81051493
--- /dev/null
+++ b/integration/tests/integration/tox.ini
@@ -0,0 +1,28 @@
+# Examples:
+# Run tests against Trove running locally in fake mode:
+# TROVE_CLIENT_PATH=../some_path tox -e local -- --group=blackbox
+[tox]
+envlist = py26
+
+[testenv]
+deps =
+ coverage
+ nose
+ pexpect
+ proboscis
+ sqlalchemy
+ {env:TROVE_PATH}
+ {env:TROVE_CLIENT_PATH}
+
+[testenv:py26]
+
+[testenv:local]
+deps =
+ nose
+ pexpect
+ proboscis
+ sqlalchemy
+ {env:TROVE_PATH}
+ {env:TROVE_CLIENT_PATH}
+commands =
+ {envpython} int_tests.py --conf=localhost.test.conf {posargs:DEFAULTS}