summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChung-Yen Chang <chung-yen.chang@10gen.com>2016-09-20 12:53:41 -0700
committerChung-Yen Chang <chung-yen.chang@10gen.com>2016-09-20 12:53:41 -0700
commit3d26ac950716bc63e25b1911e7674bf489e3ee52 (patch)
tree85e61fc08c847d87d6c69e00b4e0f7087abcb5f1
parent98327c6718bfb62e8fe8c29fda2d12f4cb4d962d (diff)
downloadmongo-3d26ac950716bc63e25b1911e7674bf489e3ee52.tar.gz
SERVER-25138: remove perf test depdendencies
This change include part of the changes from SERVER-26074 to remove some dead code.
-rw-r--r--etc/system_perf.yml203
1 files changed, 85 insertions, 118 deletions
diff --git a/etc/system_perf.yml b/etc/system_perf.yml
index 6917f3a0477..007202c160a 100644
--- a/etc/system_perf.yml
+++ b/etc/system_perf.yml
@@ -9,19 +9,6 @@ post:
params:
file_location: work/report.json
- command: shell.cleanup
- - command: shell.exec
- # destroy the cluster
- params:
- working_dir: work
- script: |
- set -e
- set -o verbose
- source ./dsienv.sh
- if [ ! -f "test.success" ]; then
- yes yes | ./terraform destroy
- if [ $? != 0 ]; then yes yes | ./terraform destroy; fi
- echo "Cluster DESTROYED."
- fi
functions:
"prepare environment":
@@ -79,57 +66,80 @@ functions:
params:
working_dir: work
script: |
- set -v
- ./terraform get --update
- "bring up cluster":
+ set -v
+ ./terraform get --update
- command: shell.exec
- # bring up the cluster
+ # set up $HOME/infrastructure_provisioning to keep track of resources,
+ # and allow Evergreen to release resources from there
params:
- working_dir: work
script: |
- # to create a mongod EC2 cluster
- set -e
set -o verbose
- pwd
- ls
- cat dsienv.sh
- source ./dsienv.sh
- # create all resources and instances
- $DSI_PATH/bin/setup-cluster.sh ${cluster} ../terraform
- - command: shell.exec # End on setup-cluster.sh so it's error code is scripts error code
- # After cluster up
+ if [ ! -d "$HOME/infrastructure_provisioning" ]; then
+ echo "copying terraform to Evergreen host"
+ mkdir $HOME/infrastructure_provisioning
+ cp -r terraform $HOME/infrastructure_provisioning/.
+ cp -r modules $HOME/infrastructure_provisioning/.
+ echo "copying infrastructure_teardown.sh to Evergreen host"
+ cp src/dsi/dsi/bin/destroy_cluster.sh $HOME/infrastructure_provisioning/terraform/infrastructure_teardown.sh
+ fi
+ ls -l $HOME/infrastructure_provisioning
+
+ "infrastructure provisioning":
+ - command: shell.exec
+ # if $HOME/infrastructure_provisioning exists, get info about provisioned resources
+ # from there otherwise provision resources from the cloud
params:
working_dir: work
script: |
- echo "EC2 Cluster CREATED."
- tar -czvf cluster_config.tgz infrastructure_provisioning.out.yml ips.sh ips.py terraform.tfstate cluster.tf terraform.tfvars variables.tf
- - command: s3.put
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- local_file: "work/cluster_config.tgz"
- remote_file: dsi-v3.2/${build_variant}/${revision}/cluster_configs/cluster_config-${build_id}.tgz
- bucket: mciuploads
- permissions: public-read
- content_type: ${content_type|application/x-gzip}
- display_name: ${cluster}-cluster-config
-
- "restore cluster":
- - command: s3.get
- params:
- aws_key: ${aws_key}
- aws_secret: ${aws_secret}
- remote_file: dsi-v3.2/${build_variant}/${revision}/cluster_configs/cluster_config-${build_id}.tgz
- bucket: mciuploads
- local_file: "work/cluster_config.tgz"
+ set -e
+ set -o verbose
+ if [ -e "$HOME/infrastructure_provisioning/terraform/terraform.tfstate" ]; then
+ echo "Restrieving info for existing resources"
+ cp $HOME/infrastructure_provisioning/terraform/terraform.tfstate .
+ else
+ echo "No existing resources found"
+ fi
- command: shell.exec
+ # call setup-cluster.sh, in most cases this only updates expire-on tag
params:
working_dir: work
- silent: true
script: |
- set -e
+ # don't run this with "set -e" so we can set up properly for the teardown.sh
set -o verbose
- tar -xf cluster_config.tgz
+ source ./dsienv.sh
+ if [ ! -e $HOME/infrastructure_provisioning/terraform/provisioned.${cluster} ]; then
+ echo "Provisinging new resources."
+ $DSI_PATH/bin/setup-cluster.sh ${cluster} ../terraform
+ else
+ # on host with pre-existing resources, call terraform directly to
+ # avoid recreating instances due to terraform apply concurrency limitation
+ echo "Update expire-on tag for existing resources."
+ ./terraform apply -var-file=cluster.json
+ # call env.sh to generate ips.sh, etc. for downstream modules
+ $DSI_PATH/bin/env.sh
+ fi
+ # handle the case when setup-cluster.sh fail
+ if [ $? -eq 0 ]; then
+ echo "Resource provisioned/updated."
+ # copy terraform information needed for teardown
+ cp {terraform.tfstate,cluster.tf,terraform.tfvars,security.tf} $HOME/infrastructure_provisioning/terraform/.
+ rsync -vr ../modules $HOME/infrastructure_provisioning/modules
+ cd $HOME/infrastructure_provisioning/terraform
+ ./terraform get
+ # use provisioned.${shard} to indicate the type of clusters held by EVG host
+ # remove previous information and keep the latest cluster type
+ rm provisioned.*
+ touch provisioned.${cluster}
+ echo "Provisioning state updated on Evergreen host."
+ else
+ echo "Failed to provision resources. Cleaning up partial state."
+ yes yes | ./terraform destroy
+ if [ $? != 0 ]; then yes yes | ./terraform destroy; fi
+ echo "Resource released."
+ rm -r $HOME/infrastructure_provisioning
+ echo "Cleaned up provisioning state on Evergreen host. Exiting test."
+ exit 1
+ fi
"configure mongodb cluster":
- command: shell.exec
@@ -172,30 +182,6 @@ functions:
params:
name: "perf"
file: "work/perf.json"
- - command: shell.exec
- params:
- working_dir: work
- script: |
- set -e
- set -v
- touch test.success
-
- "destroy cluster":
- - command: shell.exec
- # destroy the cluster
- params:
- working_dir: work
- script: |
- set -e
- set -o verbose
- source ./dsienv.sh
- # destroy the EC2 cluster
- yes yes | ./terraform destroy
- # make sure we destroy the cluster in case of AWS API timing issue
- yes yes | ./terraform destroy
- echo "Cluster DESTROYED."
- echo "All perf results"
- cd ..
"make test log artifact":
- command: shell.exec
@@ -208,8 +194,6 @@ functions:
cd reports
# move additional file here
cp ../infrastructure_provisioning.out.yml .
- cp ../ips.sh .
- cp ../ips.py .
if [ -f "../terraform.log" ]; then cp ../terraform.log .; fi
cp ../perf.json .
cd ..
@@ -266,8 +250,7 @@ functions:
silent: true
script: |
set -o errexit
- set -o verbose
- TAGS="3.1.8-Baseline 3.2.0-Baseline"
+ TAGS="3.1.8-Baseline 3.2.0-Baseline 3.2.9-Baseline"
PROJECT="sys-perf-3.2"
OVERRIDEFILE="../src/dsi/dsi/analysis/v3.2/system_perf_override.json"
python -u ../src/dsi/dsi/analysis/dashboard_gen.py --rev ${revision} -f history.json -t tags.json --refTag $TAGS --overrideFile $OVERRIDEFILE --project_id sys-perf --task_name ${task_name} --variant ${build_variant} --jira-user ${perf_jira_user} --jira-password ${perf_jira_pw} || true
@@ -363,21 +346,13 @@ tasks:
content_type: ${content_type|application/x-gzip}
display_name: mongodb.tar.gz
-
-# The industry_benchmarks_WT task runs the "bring up cluster" task and is
-# the only one to do so - all other tasks run "restore cluster". As such,
-# all buildvariants must run industry_benchmarks_WT and run it first.
-
-# When adding or removing tasks, you also must ensure that the final task
-# and only the final task runs the "destroy cluster" function.
-
- name: industry_benchmarks_WT
depends_on:
- name: compile
variant: linux-standalone
commands:
- func: "prepare environment"
- - func: "bring up cluster"
+ - func: "infrastructure provisioning"
- func: "configure mongodb cluster"
vars:
storageEngine: "wiredTiger"
@@ -396,11 +371,11 @@ tasks:
- name: industry_benchmarks_MMAPv1
depends_on:
- - name: core_workloads_WT
- status : "*"
+ - name: compile
+ variant: linux-standalone
commands:
- func: "prepare environment"
- - func: "restore cluster"
+ - func: "infrastructure provisioning"
- func: "configure mongodb cluster"
vars:
storageEngine: "mmapv1"
@@ -419,11 +394,11 @@ tasks:
- name: core_workloads_WT
depends_on:
- - name: industry_benchmarks_WT
- status : "*"
+ - name: compile
+ variant: linux-standalone
commands:
- func: "prepare environment"
- - func: "restore cluster"
+ - func: "infrastructure provisioning"
- func: "configure mongodb cluster"
vars:
storageEngine: "wiredTiger"
@@ -440,11 +415,11 @@ tasks:
- name: core_workloads_MMAPv1
depends_on:
- - name: industry_benchmarks_MMAPv1
- status : "*"
+ - name: compile
+ variant: linux-standalone
commands:
- func: "prepare environment"
- - func: "restore cluster"
+ - func: "infrastructure provisioning"
- func: "configure mongodb cluster"
vars:
storageEngine: "mmapv1"
@@ -457,7 +432,6 @@ tasks:
- func: "upload log file"
vars:
test: "core_workloads_MMAPv1"
- - func: "destroy cluster"
- func: "analyze"
- name: industry_benchmarks_WT_oplog_comp
@@ -478,8 +452,6 @@ tasks:
- name: industry_benchmarks_MMAPv1_oplog_comp
depends_on:
- - name: core_workloads_WT_oplog_comp
- status: "*"
- name: industry_benchmarks_MMAPv1
variant: linux-standalone
status: "*"
@@ -496,8 +468,6 @@ tasks:
- name: core_workloads_WT_oplog_comp
depends_on:
- - name: industry_benchmarks_WT_oplog_comp
- status: "*"
- name: core_workloads_WT
variant: linux-standalone
status: "*"
@@ -514,8 +484,6 @@ tasks:
- name: core_workloads_MMAPv1_oplog_comp
depends_on:
- - name: industry_benchmarks_MMAPv1_oplog_comp
- status: "*"
- name: core_workloads_MMAPv1
variant: linux-standalone
status: "*"
@@ -536,7 +504,7 @@ tasks:
variant: linux-standalone
commands:
- func: "prepare environment"
- - func: "bring up cluster"
+ - func: "infrastructure provisioning"
- func: "configure mongodb cluster"
vars:
storageEngine: "wiredTiger"
@@ -553,11 +521,11 @@ tasks:
- name: initialsync_MMAPv1
depends_on:
- - name: initialsync_WT
- status: "*"
+ - name: compile
+ variant : linux-standalone
commands:
- func: "prepare environment"
- - func: "restore cluster"
+ - func: "infrastructure provisioning"
- func: "configure mongodb cluster"
vars:
storageEngine: "mmapv1"
@@ -570,7 +538,6 @@ tasks:
- func: "upload log file"
vars:
test: "initialsync_MMAPv1"
- - func: "destroy cluster"
- func: "analyze"
@@ -598,7 +565,7 @@ modules:
#######################################
-# Buildvariants #
+# Linux Buildvariants #
#######################################
buildvariants:
@@ -616,7 +583,7 @@ buildvariants:
platform: linux
project: &project dsi-v3.2
run_on:
- - "rhel70-perf-standalone"
+ - "rhel70-perf-single"
tasks:
- name: industry_benchmarks_WT
- name: core_workloads_WT
@@ -634,7 +601,7 @@ buildvariants:
platform: linux
project: *project
run_on:
- - "rhel70-perf-standalone"
+ - "rhel70-perf-single"
tasks:
- name: compile
distros:
@@ -655,7 +622,7 @@ buildvariants:
platform: linux
project: *project
run_on:
- - "rhel70-perf-standalone"
+ - "rhel70-perf-shard"
tasks:
- name: industry_benchmarks_WT
- name: core_workloads_WT
@@ -673,7 +640,7 @@ buildvariants:
platform: linux
project: *project
run_on:
- - "rhel70-perf-standalone"
+ - "rhel70-perf-replset"
tasks:
- name: industry_benchmarks_WT
- name: core_workloads_WT
@@ -686,12 +653,12 @@ buildvariants:
modules: *modules
expansions:
compile_flags: -j$(grep -c ^processor /proc/cpuinfo) CC=/opt/mongodbtoolchain/bin/gcc CXX=/opt/mongodbtoolchain/bin/g++ --release
- setup: replica
+ setup: replica-2node.dr
cluster: replica
platform: linux
project: *project
run_on:
- - "rhel70-perf-standalone"
+ - "rhel70-perf-replset"
tasks:
- name: initialsync_WT
- name: initialsync_MMAPv1
@@ -703,7 +670,7 @@ buildvariants:
expansions:
project: *project
run_on:
- - "rhel70-perf-standalone"
+ - "rhel70-perf-single"
tasks:
- name: industry_benchmarks_WT_oplog_comp
- name: core_workloads_WT_oplog_comp