summaryrefslogtreecommitdiff
path: root/docs/release.sh
blob: f6dc2ec59f55823517a32b0100a87f2a612c37b0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
#!/usr/bin/env bash
set -e

set -o pipefail

usage() {
	cat >&2 <<'EOF'
To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file 
(with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file)
and set the AWS_S3_BUCKET env var to the name of your bucket.

make AWS_S3_BUCKET=docs-stage.docker.com docs-release

will then push the documentation site to your s3 bucket.
EOF
	exit 1
}

[ "$AWS_S3_BUCKET" ] || usage

VERSION=$(cat VERSION)

if [ "$$AWS_S3_BUCKET" == "docs.docker.com" ]; then
	if [ "${VERSION%-dev}" != "$VERSION" ]; then
		echo "Please do not push '-dev' documentation to docs.docker.com ($VERSION)"
		exit 1
	fi
fi

export BUCKET=$AWS_S3_BUCKET

export AWS_CONFIG_FILE=$(pwd)/awsconfig
[ -e "$AWS_CONFIG_FILE" ] || usage
export AWS_DEFAULT_PROFILE=$BUCKET

echo "cfg file: $AWS_CONFIG_FILE ; profile: $AWS_DEFAULT_PROFILE"

setup_s3() {
	echo "Create $BUCKET"
	# Try creating the bucket. Ignore errors (it might already exist).
	aws s3 mb --profile $BUCKET s3://$BUCKET 2>/dev/null || true
	# Check access to the bucket.
	echo "test $BUCKET exists"
	aws s3 --profile $BUCKET ls s3://$BUCKET
	# Make the bucket accessible through website endpoints.
	echo "make $BUCKET accessible as a website"
	#aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html
	s3conf=$(cat s3_website.json | envsubst)
	echo
	echo $s3conf
	echo
	aws s3api --profile $BUCKET put-bucket-website --bucket $BUCKET --website-configuration "$s3conf"
}

build_current_documentation() {
	mkdocs build
}

upload_current_documentation() {
	src=site/
	dst=s3://$BUCKET$1

	echo
	echo "Uploading $src"
	echo "  to $dst"
	echo
	#s3cmd --recursive --follow-symlinks --preserve --acl-public sync "$src" "$dst"
	#aws s3 cp --profile $BUCKET --cache-control "max-age=3600" --acl public-read "site/search_content.json" "$dst"

	# a really complicated way to send only the files we want
	# if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go
	endings=( json html xml css js gif png JPG ttf svg woff)
	for i in ${endings[@]}; do
		include=""
		for j in ${endings[@]}; do
			if [ "$i" != "$j" ];then
				include="$include --exclude *.$j"
			fi
		done
		echo "uploading *.$i"
		run="aws s3 sync --profile $BUCKET --cache-control \"max-age=3600\" --acl public-read \
			$include \
			--exclude *.txt \
			--exclude *.text* \
			--exclude *Dockerfile \
			--exclude *.DS_Store \
			--exclude *.psd \
			--exclude *.ai \
			--exclude *.eot \
			--exclude *.otf \
			--exclude *.rej \
			--exclude *.rst \
			--exclude *.orig \
			--exclude *.py \
			$src $dst"
		echo "======================="
		#echo "$run"
		#echo "======================="
		$run
	done
}

setup_s3
build_current_documentation
upload_current_documentation

# Remove the last version - 1.0.2-dev -> 1.0
MAJOR_MINOR="v${VERSION%.*}"

#build again with /v1.0/ prefix
sed -i "s/^site_url:.*/site_url: \/$MAJOR_MINOR\//" mkdocs.yml
build_current_documentation
upload_current_documentation "/$MAJOR_MINOR/"