summaryrefslogtreecommitdiff
path: root/etc
diff options
context:
space:
mode:
authorLingxian Kong <anlin.kong@gmail.com>2019-08-28 16:07:07 +1200
committerLingxian Kong <anlin.kong@gmail.com>2019-08-29 15:41:58 +1200
commit0ef474a70ce3d6ba15c19240425f2ac48d12e778 (patch)
tree663156df263121956e2aa8d37380e63dfe99ea36 /etc
parent259e0f6f093ec5397a778bd485379e2532b4cc5f (diff)
downloadtrove-0ef474a70ce3d6ba15c19240425f2ac48d12e778.tar.gz
Support keypair in devstack
Since Trove already supports to specify a Nova keypair when creating instance for management convenience, devstack needs to be changed to create the management keypair and add to Trove config file. One extra change in this patch is to use a single config file for Trove API, task-manager and conductor. Change-Id: I1e6c4f4305104815bdf89b31776a4955de61bc89 Story: 2005429 Task: 30463
Diffstat (limited to 'etc')
-rw-r--r--etc/trove/trove-conductor.conf.sample58
-rw-r--r--etc/trove/trove-taskmanager.conf.sample248
2 files changed, 0 insertions, 306 deletions
diff --git a/etc/trove/trove-conductor.conf.sample b/etc/trove/trove-conductor.conf.sample
deleted file mode 100644
index 8f634567..00000000
--- a/etc/trove/trove-conductor.conf.sample
+++ /dev/null
@@ -1,58 +0,0 @@
-[DEFAULT]
-debug = True
-trove_auth_url = http://0.0.0.0/identity/v2.0
-
-# The manager class to use for conductor. (string value)
-conductor_manager = trove.conductor.manager.Manager
-
-#===================== RPC Configuration =================================
-
-# URL representing the messaging driver to use and its full configuration.
-# If not set, we fall back to the 'rpc_backend' option and driver specific
-# configuration.
-#transport_url=<None>
-
-# The messaging driver to use. Options include rabbit, qpid and zmq.
-# Default is rabbit. (string value)
-#rpc_backend=rabbit
-
-# The default exchange under which topics are scoped. May be
-# overridden by an exchange name specified in the 'transport_url option.
-control_exchange = trove
-
-[profiler]
-# If False fully disable profiling feature.
-#enabled = False
-# If False doesn't trace SQL requests.
-#trace_sqlalchemy = True
-
-[database]
-connection = mysql+pymysql://root:e1a2c042c828d3566d0a@localhost/trove
-
-[oslo_messaging_notifications]
-
-#
-# From oslo.messaging
-#
-
-# The Driver(s) to handle sending notifications. Possible
-# values are messaging, messagingv2, routing, log, test, noop
-# (multi valued)
-# Deprecated group/name - [DEFAULT]/notification_driver
-#driver =
-
-# A URL representing the messaging driver to use for
-# notifications. If not set, we fall back to the same
-# configuration used for RPC. (string value)
-# Deprecated group/name - [DEFAULT]/notification_transport_url
-#transport_url = <None>
-
-# AMQP topic used for OpenStack notifications. (list value)
-# Deprecated group/name - [rpc_notifier2]/topics
-# Deprecated group/name - [DEFAULT]/notification_topics
-#topics = notifications
-
-# The maximum number of attempts to re-send a notification
-# message which failed to be delivered due to a recoverable
-# error. 0 - No retry, -1 - indefinite (integer value)
-#retry = -1
diff --git a/etc/trove/trove-taskmanager.conf.sample b/etc/trove/trove-taskmanager.conf.sample
deleted file mode 100644
index 214a84f6..00000000
--- a/etc/trove/trove-taskmanager.conf.sample
+++ /dev/null
@@ -1,248 +0,0 @@
-[DEFAULT]
-# Show debugging output in logs (sets DEBUG log level output)
-debug = True
-
-# Update the service and instance statuses if the instances fails to become
-# active within the configured usage_timeout.
-# usage_timeout = 600
-# restore_usage_timeout = 36000
-update_status_on_fail = True
-
-#================= RPC Configuration ================================
-
-# URL representing the messaging driver to use and its full configuration.
-# If not set, we fall back to the 'rpc_backend' option and driver specific
-# configuration.
-#transport_url=<None>
-
-# The messaging driver to use. Options include rabbit, qpid and zmq.
-# Default is rabbit. (string value)
-#rpc_backend=rabbit
-
-# The default exchange under which topics are scoped. May be
-# overridden by an exchange name specified in the 'transport_url option.
-control_exchange = trove
-
-#DB Api Implementation
-db_api_implementation = trove.db.sqlalchemy.api
-
-# Configuration options for talking to nova via the novaclient.
-trove_auth_url = http://0.0.0.0/identity/v2.0
-#nova_compute_url = http://localhost:8774/v2
-#cinder_url = http://localhost:8776/v1
-#swift_url = http://localhost:8080/v1/AUTH_
-#neutron_url = http://localhost:9696/
-
-# nova_compute_url, cinder_url, swift_url, and can all be fetched
-# from Keystone. To fetch from Keystone, comment out nova_compute_url,
-# cinder_url, swift_url, and and optionally uncomment the lines below.
-
-# Region name of this node. Used when searching catalog. Default value is None.
-#os_region_name = RegionOne
-# Service type to use when searching catalog.
-#nova_compute_service_type = compute
-# Service type to use when searching catalog.
-#cinder_service_type = volumev2
-# Service type to use when searching catalog.
-#swift_service_type = object-store
-# Service type to use when searching catalog.
-#neutron_service_type = network
-
-# Config options for enabling volume service
-trove_volume_support = True
-block_device_mapping = vdb
-device_path = /dev/vdb
-mount_point = /var/lib/mysql
-volume_time_out=30
-server_delete_time_out=480
-
-# Nova server boot options
-# sets the --config-drive argument when doing a nova boot
-# (controls how file injection is handled by nova)
-use_nova_server_config_drive = True
-
-# Configuration options for talking to nova via the novaclient.
-# These options are for an admin user in your keystone config.
-# It proxy's the token received from the user to send to nova via this admin users creds,
-# basically acting like the client via that proxy token.
-nova_proxy_admin_user = admin
-nova_proxy_admin_pass = 3de4922d8b6ac5a1aad9
-nova_proxy_admin_tenant_id =
-
-# Manager impl for the taskmanager
-taskmanager_manager=trove.taskmanager.manager.Manager
-
-# Manager sends Exists Notifications
-exists_notification_transformer = trove.extensions.mgmt.instances.models.NovaNotificationTransformer
-notification_service_id = mysql:2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b
-
-# Trove DNS
-trove_dns_support = False
-dns_account_id = 123456
-dns_auth_url = http://127.0.0.1/identity/v2.0
-dns_username = user
-dns_passkey = password
-dns_ttl = 3600
-dns_domain_name = 'trove.com.'
-dns_domain_id = 11111111-1111-1111-1111-111111111111
-dns_driver = trove.dns.designate.driver.DesignateDriver
-dns_instance_entry_factory = trove.dns.designate.driver.DesignateInstanceEntryFactory
-dns_endpoint_url = http://127.0.0.1/v1/
-dns_service_type = dns
-
-# Neutron
-network_driver = trove.network.nova.NovaNetwork
-management_networks =
-
-# Trove Security Groups for Instances
-trove_security_groups_support = True
-trove_security_group_rule_cidr = 0.0.0.0/0
-
-# Guest related conf
-agent_heartbeat_time = 10
-agent_call_low_timeout = 5
-agent_call_high_timeout = 150
-agent_replication_snapshot_timeout = 36000
-
-# Config option for filtering the IP address that DNS uses
-# For nova-network, set this to the appropriate network label defined in nova
-# For neutron, set this to .* since users can specify custom network labels
-# You can also optionally specify regex'es to match the actual IP addresses
-# ip_regex (white-list) is applied before black_list_regex in the filter chain
-network_label_regex = ^private$
-#ip_regex = ^(15.|123.)
-#black_list_regex = ^(10.0.0.)
-
-# Datastore templates
-template_path = /etc/trove/templates/
-
-# ============ Notification System configuration ===========================
-
-# Sets the notification driver used by oslo.messaging. Options include
-# messaging, messagingv2, log and routing. Default is 'noop'
-# notification_driver=noop
-
-# Topics used for OpenStack notifications, list value. Default is 'notifications'.
-# notification_topics=notifications
-
-# ============ Logging information =============================
-#log_dir = /integration/report
-#log_file = trove-taskmanager.log
-
-# ============ PyDev remote dubugging =============================
-
-# Enable or disable pydev remote debugging.
-# There are three values allowed: 'disabled', 'enabled' and 'auto'
-# If value is 'auto' tries to connect to remote debugger server,
-# but in case of error continue running with disabled debugging
-pydev_debug = disabled
-
-# remote debug server host and port options
-#pydev_debug_host = localhost
-#pydev_debug_port = 5678
-
-# path to pydevd library. It will be used if pydevd is absent in sys.path
-#pydev_path = <path>
-
-# ================= Guestagent related ========================
-#guest_config = /etc/trove/trove-guestagent.conf
-# Use 'guest_info = /etc/guest_info' for pre-Kilo compatibility
-#guest_info = guest_info.conf
-# Use 'injected_config_location = /etc/trove' for pre-Kilo compatibility
-#injected_config_location = /etc/trove/conf.d
-#cloudinit_location = /etc/trove/cloudinit
-
-[database]
-
-# SQLAlchemy connection string for the reference implementation
-# registry server. Any valid SQLAlchemy connection string is fine.
-# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
-connection = mysql+pymysql://root:e1a2c042c828d3566d0a@localhost/trove
-# connection = mysql+pymysql://root:root@localhost/trove
-
-# Period in seconds after which SQLAlchemy should reestablish its connection
-# to the database.
-#
-# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
-# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
-# notice this, you can lower this value to ensure that SQLAlchemy reconnects
-# before MySQL can drop the connection.
-idle_timeout = 3600
-
-# ================= Security groups related ========================
-# Each future datastore implementation should implement
-# its own oslo group with defined in it:
-# - tcp_ports; upd_ports;
-
-[profiler]
-# If False fully disable profiling feature.
-#enabled = False
-# If False doesn't trace SQL requests.
-#trace_sqlalchemy = True
-
-[oslo_messaging_notifications]
-
-#
-# From oslo.messaging
-#
-
-# The Driver(s) to handle sending notifications. Possible
-# values are messaging, messagingv2, routing, log, test, noop
-# (multi valued)
-# Deprecated group/name - [DEFAULT]/notification_driver
-#driver =
-
-# A URL representing the messaging driver to use for
-# notifications. If not set, we fall back to the same
-# configuration used for RPC. (string value)
-# Deprecated group/name - [DEFAULT]/notification_transport_url
-#transport_url = <None>
-
-# AMQP topic used for OpenStack notifications. (list value)
-# Deprecated group/name - [rpc_notifier2]/topics
-# Deprecated group/name - [DEFAULT]/notification_topics
-#topics = notifications
-
-# The maximum number of attempts to re-send a notification
-# message which failed to be delivered due to a recoverable
-# error. 0 - No retry, -1 - indefinite (integer value)
-#retry = -1
-
-[mysql]
-# Whether to permit ICMP. default is False.
-icmp = True
-# Format (single port or port range): A, B-C
-# where C greater than B
-tcp_ports = 3306
-volume_support = True
-device_path = /dev/vdb
-
-[redis]
-# Format (single port or port range): A, B-C
-# where C greater than B
-tcp_ports = 6379, 16379
-volume_support = True
-device_path = /dev/vdb
-
-
-[cassandra]
-tcp_ports = 7000, 7001, 9042, 9160
-volume_support = True
-device_path = /dev/vdb
-
-[couchbase]
-tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199
-volume_support = True
-device_path = /dev/vdb
-
-[mongodb]
-volume_support = True
-device_path = /dev/vdb
-
-[vertica]
-tcp_ports = 5433, 5434, 22, 5444, 5450, 4803
-udp_ports = 5433, 4803, 4804, 6453
-volume_support = True
-device_path = /dev/vdb
-mount_point = /var/lib/vertica
-taskmanager_strategy = trove.common.strategies.cluster.experimental.vertica.taskmanager.VerticaTaskManagerStrategy