summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorThiago da Silva <thiagodasilva@gmail.com>2018-09-11 16:28:55 -0400
committerThiago da Silva <thiagodasilva@gmail.com>2018-09-17 12:51:34 -0400
commit5d70396247a81cbaf6aaf114a5b4dcfb7ace7eb9 (patch)
tree6fe04060fedb5383653ce321f8632043b652390c /tools
parent956172623c6684dae82bb1c856cb55d7012131a7 (diff)
downloadswift-5d70396247a81cbaf6aaf114a5b4dcfb7ace7eb9.tar.gz
add multinode functional test to swift
This new test job adds multinode testing to swift. It currently configures 5 nodes: test-runner, proxy, account, container and object. It starts by checkout out the latest release and then 'upgrades' the storage nodes to the current patch under test. That means that it will the test-runner and the proxy are still running the latest release. Future work would include a new set of tests where it will add objects to the cluster prior to upgrading and then try to read those objects after the upgrade. It should also be noted that the 'upgrade' currently only means a new 'git checkout' the configuration files are not being updated. A future patch should fix that too. Change-Id: If8e09a082fc024257a98cf332de9a36a18d2adc5
Diffstat (limited to 'tools')
-rw-r--r--tools/playbooks/common/install_dependencies.yaml (renamed from tools/playbooks/saio_single_node_setup/install_dependencies.yaml)6
-rw-r--r--tools/playbooks/multinode_setup/common_config.yaml75
-rw-r--r--tools/playbooks/multinode_setup/configure_loopback.yaml64
-rw-r--r--tools/playbooks/multinode_setup/make_rings.yaml120
-rw-r--r--tools/playbooks/multinode_setup/pre.yaml8
-rw-r--r--tools/playbooks/multinode_setup/run.yaml42
-rwxr-xr-xtools/playbooks/multinode_setup/templates/make_multinode_rings.j238
-rw-r--r--tools/playbooks/multinode_setup/templates/rc.local.j28
-rw-r--r--tools/playbooks/multinode_setup/templates/test.conf.j2122
-rw-r--r--tools/playbooks/probetests/post.yaml17
10 files changed, 498 insertions, 2 deletions
diff --git a/tools/playbooks/saio_single_node_setup/install_dependencies.yaml b/tools/playbooks/common/install_dependencies.yaml
index d34e3648c..b2fe7fe8e 100644
--- a/tools/playbooks/saio_single_node_setup/install_dependencies.yaml
+++ b/tools/playbooks/common/install_dependencies.yaml
@@ -22,4 +22,8 @@
- python-pyeclib
- python-nose
- python-swiftclient
- become: true
+
+ - name: install python modules with pip
+ pip: name={{ item }} state=present extra_args='--upgrade'
+ with_items:
+ - crudini
diff --git a/tools/playbooks/multinode_setup/common_config.yaml b/tools/playbooks/multinode_setup/common_config.yaml
new file mode 100644
index 000000000..8a8b0847f
--- /dev/null
+++ b/tools/playbooks/multinode_setup/common_config.yaml
@@ -0,0 +1,75 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+- hosts: swift-cluster
+ become: true
+ tasks:
+
+ - name: create /var/run/swift
+ file:
+ path: '/var/run/swift'
+ owner: '{{ ansible_user_id }}'
+ group: '{{ ansible_user_gid }}'
+ state: directory
+
+ - name: create /var/cache/swift
+ file:
+ path: '/var/cache/swift'
+ owner: '{{ ansible_user_id }}'
+ group: '{{ ansible_user_gid }}'
+ state: directory
+
+ - name: create rc.local from template
+ template: src=rc.local.j2 dest=/etc/rc.d/rc.local owner=root group=root mode=0755
+
+ - name: set selinux to permissive
+ selinux: policy=targeted state=disabled
+
+ - name: configure rsyslog
+ command: cp {{ zuul.project.src_dir }}/doc/saio/rsyslog.d/10-swift.conf /etc/rsyslog.d/
+
+ - name: modify /etc/rsyslog.conf
+ lineinfile: dest=/etc/rsyslog.conf
+ line="$PrivDropToGroup adm"
+ create=yes
+ insertafter="^#### GLOBAL DIRECTIVES"
+
+ - name: assure /var/log/swift directory exists
+ file:
+ path: '/var/log/swift'
+ state: directory
+ owner: root
+ group: adm
+ mode: 'g+wt'
+
+ - name: restart rsyslog
+ service: name=rsyslog state=restarted enabled=yes
+
+ - name: clean up /etc/swift directory
+ file:
+ path: '/etc/swift'
+ state: absent
+
+ - name: create /etc/swift directory
+ file:
+ path: '/etc/swift'
+ state: directory
+ owner: '{{ ansible_user_id }}'
+ group: '{{ ansible_user_gid }}'
+
+- hosts: test-runner
+ tasks:
+ - name: add new env. variable for running tests
+ lineinfile: dest=/home/{{ ansible_ssh_user }}/.bashrc line="export SWIFT_TEST_CONFIG_FILE=/home/{{ ansible_ssh_user}}/test.conf"
+
+ - name: copy the sample configuration files for running tests
+ template: src=test.conf.j2 dest=/home/{{ ansible_ssh_user }}/test.conf
diff --git a/tools/playbooks/multinode_setup/configure_loopback.yaml b/tools/playbooks/multinode_setup/configure_loopback.yaml
new file mode 100644
index 000000000..7a82e132a
--- /dev/null
+++ b/tools/playbooks/multinode_setup/configure_loopback.yaml
@@ -0,0 +1,64 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+- hosts: storage
+ become: true
+ tasks:
+ - name: assure /srv directory exists
+ file:
+ path: '/srv'
+ state: directory
+
+ - name: create loopback device
+ command: truncate -s 1GB /srv/swift-disk creates=/srv/swift-disk
+
+ - name: create filesystem /srv/swift-disk
+ become: true
+ filesystem: fstype=xfs dev=/srv/swift-disk
+
+ - name: create mount path /mnt/sdb1
+ file:
+ path: '/mnt/sdb1'
+ state: directory
+
+ - name: mount /mnt/sdb1
+ mount: name=/mnt/sdb1 src=/srv/swift-disk fstype=xfs opts="loop,noatime,nodiratime,nobarrier,logbufs=8" dump=0 passno=0 state=mounted
+
+ - name: create sub-partitions
+ file:
+ path: '/mnt/sdb1/{{ item }}'
+ state: directory
+ owner: '{{ ansible_user_id }}'
+ group: '{{ ansible_user_gid }}'
+ with_items:
+ - 1
+
+ - name: create symlinks
+ become: true
+ file:
+ src: '/mnt/sdb1/{{ item }}'
+ dest: '/srv/{{ item }}'
+ owner: '{{ ansible_user_id }}'
+ group: '{{ ansible_user_gid }}'
+ state: link
+ with_items:
+ - 1
+
+ - name: create node partition directories
+ file:
+ path: '/srv/1/node/sdb{{ item }}'
+ owner: '{{ ansible_user_id }}'
+ group: '{{ ansible_user_gid }}'
+ state: directory
+ with_items:
+ - [1, 2, 3, 4, 5, 6, 7, 8]
+
diff --git a/tools/playbooks/multinode_setup/make_rings.yaml b/tools/playbooks/multinode_setup/make_rings.yaml
new file mode 100644
index 000000000..e2ed794b1
--- /dev/null
+++ b/tools/playbooks/multinode_setup/make_rings.yaml
@@ -0,0 +1,120 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+- hosts: all
+ tasks:
+ - name: get latest release
+ shell:
+ cmd: git describe --abbrev=0
+ executable: /bin/bash
+ chdir: '{{ zuul.project.src_dir }}'
+ register: latest_swift_release
+
+ - name: "checkout a previous version: {{ previous_swift_version | default(latest_swift_release.stdout) }}"
+ shell:
+ cmd: git checkout {{ previous_swift_verion | default(latest_swift_release.stdout) }} -b previous_swift_version
+ executable: /bin/bash
+ chdir: '{{ zuul.project.src_dir }}'
+
+ - name: install swift
+ become: true
+ shell:
+ cmd: python setup.py develop
+ executable: /bin/bash
+ chdir: '{{ zuul.project.src_dir }}'
+
+
+- hosts: proxy
+ become: true
+ tasks:
+ - name: start memcache
+ service: name=memcached state=started enabled=yes
+
+ - name: copy proxy-server.conf file
+ command: cp -r {{ zuul.project.src_dir }}/doc/saio/swift/proxy-server.conf /etc/swift
+
+ - name: set the options in the proxy config file
+ shell:
+ cmd: |
+ crudini --set /etc/swift/proxy-server.conf DEFAULT bind_ip {{ hostvars['proxy1'].nodepool.public_ipv4 }}
+ crudini --set /etc/swift/proxy-server.conf DEFAULT user {{ ansible_user_id }}
+ executable: /bin/bash
+
+- hosts: account
+ become: true
+ tasks:
+ - name: copy account-server.conf file
+ command: cp -r {{ zuul.project.src_dir }}/doc/saio/swift/account-server/1.conf /etc/swift/account-server.conf
+
+ - name: set the options in the account config file
+ shell:
+ cmd: |
+ crudini --set /etc/swift/account-server.conf DEFAULT bind_ip {{ hostvars['account1'].nodepool.public_ipv4 }}
+ crudini --set /etc/swift/account-server.conf DEFAULT user {{ ansible_user_id }}
+ executable: /bin/bash
+
+- hosts: container
+ become: true
+ tasks:
+ - name: copy container-server.conf file
+ command: cp -r {{ zuul.project.src_dir }}/doc/saio/swift/container-server/1.conf /etc/swift/container-server.conf
+
+ - name: set the options in the container config file
+ shell:
+ cmd: |
+ crudini --set /etc/swift/container-server.conf DEFAULT bind_ip {{ hostvars['container1'].nodepool.public_ipv4 }}
+ crudini --set /etc/swift/container-server.conf DEFAULT user {{ ansible_user_id }}
+ executable: /bin/bash
+
+- hosts: object
+ become: true
+ tasks:
+ - name: copy object-server.conf file
+ command: cp -r {{ zuul.project.src_dir }}/doc/saio/swift/object-server/1.conf /etc/swift/object-server.conf
+
+ - name: set the options in the object config file
+ shell:
+ cmd: |
+ crudini --set /etc/swift/object-server.conf DEFAULT bind_ip {{ hostvars['object1'].nodepool.public_ipv4 }}
+ crudini --set /etc/swift/object-server.conf DEFAULT user {{ ansible_user_id }}
+ executable: /bin/bash
+
+- hosts: swift-cluster
+ become: true
+ tasks:
+ - name: copy swift.conf
+ command: cp -r {{ zuul.project.src_dir }}/doc/saio/swift/swift.conf /etc/swift
+
+ - name: set correct ownership of /etc/swift
+ file: path=/etc/swift owner={{ ansible_user_id }} group={{ ansible_user_gid }} recurse=yes
+
+- hosts: test-runner
+ tasks:
+
+ - name: create remakerings from template
+ template: src=make_multinode_rings.j2 dest=/home/{{ ansible_ssh_user }}/make_multinode_rings mode=0755
+
+ - name: create rings dir
+ file: >
+ path=/home/{{ ansible_ssh_user }}/rings
+ state=directory
+
+ - name: make rings
+ shell:
+ cmd: /home/{{ ansible_ssh_user }}/make_multinode_rings
+ executable: /bin/bash
+
+ - name: scp rings to all swift-cluster nodes
+ command: scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=5 -o ConnectionAttempts=360 /home/{{ ansible_ssh_user }}/rings/{{ item[0] }} {{ ansible_ssh_user }}@{{ hostvars[item[1]].nodepool.public_ipv4 }}:/etc/swift
+ with_nested:
+ - ['account.ring.gz', 'container.ring.gz', 'object.ring.gz', 'object-1.ring.gz', 'object-2.ring.gz']
+ - "{{ groups['swift-cluster'] }}"
diff --git a/tools/playbooks/multinode_setup/pre.yaml b/tools/playbooks/multinode_setup/pre.yaml
new file mode 100644
index 000000000..d7e4670a8
--- /dev/null
+++ b/tools/playbooks/multinode_setup/pre.yaml
@@ -0,0 +1,8 @@
+- hosts: all
+ roles:
+ # Run bindep and test-setup after devstack so that they won't interfere
+ - role: bindep
+ bindep_profile: test
+ bindep_dir: "{{ zuul_work_dir }}"
+ - test-setup
+ - ensure-tox
diff --git a/tools/playbooks/multinode_setup/run.yaml b/tools/playbooks/multinode_setup/run.yaml
new file mode 100644
index 000000000..a2eca8eb7
--- /dev/null
+++ b/tools/playbooks/multinode_setup/run.yaml
@@ -0,0 +1,42 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+- hosts: storage
+ tasks:
+
+ - name: checkout master swift
+ shell:
+ cmd: git checkout master
+ executable: /bin/bash
+ chdir: '{{ zuul.project.src_dir }}'
+
+ - name: install swift
+ become: true
+ shell:
+ cmd: python setup.py develop
+ executable: /bin/bash
+ chdir: '{{ zuul.project.src_dir }}'
+
+- hosts: swift-cluster
+ tasks:
+ - name: start services
+ command: swift-init main start
+
+- hosts: test-runner
+ tasks:
+ - name: Run func tests with tempauth users
+ include_role:
+ name: tox
+ vars:
+ tox_envlist: func
+ tox_environment:
+ SWIFT_TEST_CONFIG_FILE: /home/{{ ansible_ssh_user }}/test.conf
diff --git a/tools/playbooks/multinode_setup/templates/make_multinode_rings.j2 b/tools/playbooks/multinode_setup/templates/make_multinode_rings.j2
new file mode 100755
index 000000000..8c951228a
--- /dev/null
+++ b/tools/playbooks/multinode_setup/templates/make_multinode_rings.j2
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+set -e
+
+cd /home/{{ ansible_ssh_user }}/rings
+
+rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
+
+swift-ring-builder object.builder create 10 3 1
+swift-ring-builder object.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb1 1
+swift-ring-builder object.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb2 1
+swift-ring-builder object.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb3 1
+swift-ring-builder object.builder rebalance
+swift-ring-builder object-1.builder create 10 2 1
+swift-ring-builder object-1.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb1 1
+swift-ring-builder object-1.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb2 1
+swift-ring-builder object-1.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb3 1
+swift-ring-builder object-1.builder rebalance
+swift-ring-builder object-2.builder create 10 6 1
+swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb1 1
+swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb2 1
+swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb3 1
+swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb4 1
+swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb5 1
+swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb6 1
+swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb7 1
+swift-ring-builder object-2.builder add r1z1-{{ hostvars['object1'].nodepool.public_ipv4 }}:6010/sdb8 1
+swift-ring-builder object-2.builder rebalance
+swift-ring-builder container.builder create 10 3 1
+swift-ring-builder container.builder add r1z1-{{ hostvars['container1'].nodepool.public_ipv4 }}:6011/sdb1 1
+swift-ring-builder container.builder add r1z1-{{ hostvars['container1'].nodepool.public_ipv4 }}:6011/sdb2 1
+swift-ring-builder container.builder add r1z1-{{ hostvars['container1'].nodepool.public_ipv4 }}:6011/sdb3 1
+swift-ring-builder container.builder rebalance
+swift-ring-builder account.builder create 10 3 1
+swift-ring-builder account.builder add r1z1-{{ hostvars['account1'].nodepool.public_ipv4 }}:6012/sdb1 1
+swift-ring-builder account.builder add r1z1-{{ hostvars['account1'].nodepool.public_ipv4 }}:6012/sdb2 1
+swift-ring-builder account.builder add r1z1-{{ hostvars['account1'].nodepool.public_ipv4 }}:6012/sdb3 1
+swift-ring-builder account.builder rebalance
diff --git a/tools/playbooks/multinode_setup/templates/rc.local.j2 b/tools/playbooks/multinode_setup/templates/rc.local.j2
new file mode 100644
index 000000000..6e783bb6c
--- /dev/null
+++ b/tools/playbooks/multinode_setup/templates/rc.local.j2
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+mkdir -p /var/cache/swift
+chown {{ ansible_user_id }}:{{ ansible_user_gid }} /var/cache/swift*
+mkdir -p /var/run/swift
+chown {{ ansible_user_id }}:{{ ansible_user_gid }} /var/run/swift
+
+exit 0
diff --git a/tools/playbooks/multinode_setup/templates/test.conf.j2 b/tools/playbooks/multinode_setup/templates/test.conf.j2
new file mode 100644
index 000000000..9be3a23f1
--- /dev/null
+++ b/tools/playbooks/multinode_setup/templates/test.conf.j2
@@ -0,0 +1,122 @@
+[func_test]
+# Sample config for Swift with tempauth
+auth_host = {{ hostvars['proxy1'].nodepool.public_ipv4 }}
+auth_port = 8080
+auth_ssl = no
+auth_prefix = /auth/
+# Sample config for Swift with Keystone v2 API.
+# For keystone v2 change auth_version to 2 and auth_prefix to /v2.0/.
+# And "allow_account_management" should not be set "true".
+#auth_version = 3
+#auth_host = localhost
+#auth_port = 5000
+#auth_ssl = no
+#auth_prefix = /v3/
+
+# Primary functional test account (needs admin access to the account)
+account = test
+username = tester
+password = testing
+s3_access_key = test:tester
+s3_secret_key = testing
+
+# User on a second account (needs admin access to the account)
+account2 = test2
+username2 = tester2
+password2 = testing2
+
+# User on same account as first, but without admin access
+username3 = tester3
+password3 = testing3
+# s3api requires the same account with the primary one and different users
+s3_access_key2 = test:tester3
+s3_secret_key2 = testing3
+
+# Fourth user is required for keystone v3 specific tests.
+# Account must be in a non-default domain.
+#account4 = test4
+#username4 = tester4
+#password4 = testing4
+#domain4 = test-domain
+
+# Fifth user is required for service token-specific tests.
+# The account must be different from the primary test account.
+# The user must not have a group (tempauth) or role (keystoneauth) on
+# the primary test account. The user must have a group/role that is unique
+# and not given to the primary tester and is specified in the options
+# <prefix>_require_group (tempauth) or <prefix>_service_roles (keystoneauth).
+#account5 = test5
+#username5 = tester5
+#password5 = testing5
+
+# The service_prefix option is used for service token-specific tests.
+# If service_prefix or username5 above is not supplied, the tests are skipped.
+# To set the value and enable the service token tests, look at the
+# reseller_prefix option in /etc/swift/proxy-server.conf. There must be at
+# least two prefixes. If not, add a prefix as follows (where we add SERVICE):
+# reseller_prefix = AUTH, SERVICE
+# The service_prefix must match the <prefix> used in <prefix>_require_group
+# (tempauth) or <prefix>_service_roles (keystoneauth); for example:
+# SERVICE_require_group = service
+# SERVICE_service_roles = service
+# Note: Do not enable service token tests if the first prefix in
+# reseller_prefix is the empty prefix AND the primary functional test
+# account contains an underscore.
+#service_prefix = SERVICE
+
+# Sixth user is required for access control tests.
+# Account must have a role for reseller_admin_role(keystoneauth).
+#account6 = test
+#username6 = tester6
+#password6 = testing6
+
+collate = C
+
+# Only necessary if a pre-existing server uses self-signed certificate
+insecure = no
+
+# Tests that are dependent on domain_remap middleware being installed also
+# require one of the domain_remap storage_domain values to be specified here,
+# otherwise those tests will be skipped.
+storage_domain =
+
+[unit_test]
+fake_syslog = False
+
+[probe_test]
+# check_server_timeout = 30
+# validate_rsync = false
+
+[swift-constraints]
+# The functional test runner will try to use the constraint values provided in
+# the swift-constraints section of test.conf.
+#
+# If a constraint value does not exist in that section, or because the
+# swift-constraints section does not exist, the constraints values found in
+# the /info API call (if successful) will be used.
+#
+# If a constraint value cannot be found in the /info results, either because
+# the /info API call failed, or a value is not present, the constraint value
+# used will fall back to those loaded by the constraints module at time of
+# import (which will attempt to load /etc/swift/swift.conf, see the
+# swift.common.constraints module for more information).
+#
+# Note that the cluster must have "sane" values for the test suite to pass
+# (for some definition of sane).
+#
+#max_file_size = 5368709122
+#max_meta_name_length = 128
+#max_meta_value_length = 256
+#max_meta_count = 90
+#max_meta_overall_size = 4096
+#max_header_size = 8192
+#extra_header_count = 0
+#max_object_name_length = 1024
+#container_listing_limit = 10000
+#account_listing_limit = 10000
+#max_account_name_length = 256
+#max_container_name_length = 256
+
+# Newer swift versions default to strict cors mode, but older ones were the
+# opposite.
+#strict_cors_mode = true
diff --git a/tools/playbooks/probetests/post.yaml b/tools/playbooks/probetests/post.yaml
index e95a59ead..e9dfea1ca 100644
--- a/tools/playbooks/probetests/post.yaml
+++ b/tools/playbooks/probetests/post.yaml
@@ -9,8 +9,23 @@
recurse: yes
- name: Copy swift logs from worker nodes to executor node
synchronize:
- src: '/var/log/swift'
+ src: '/var/log/swift/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
+ failed_when: false
+ - name: Ensure swift configs are readable before syncing
+ file:
+ path: '/etc/swift'
+ mode: u=rwX,g=rX,o=rX
+ state: directory
+ recurse: yes
+ - name: Copy swift config from worker nodes to executor node
+ synchronize:
+ src: '/etc/swift/'
+ dest: '{{ zuul.executor.log_root }}'
+ mode: pull
+ copy_links: true
+ verify_host: true
+ failed_when: false