diff options
-rw-r--r-- | app/workers/pages_worker.rb | 101 |
1 files changed, 61 insertions, 40 deletions
diff --git a/app/workers/pages_worker.rb b/app/workers/pages_worker.rb index 59f4b4f16f4..c34259c15f1 100644 --- a/app/workers/pages_worker.rb +++ b/app/workers/pages_worker.rb @@ -12,62 +12,83 @@ class PagesWorker return unless valid? # Create status notifying the deployment of pages - @status = GenericCommitStatus.new( - project: project, - commit: build.commit, - user: build.user, - ref: build.ref, - stage: 'deploy', - name: 'pages:deploy' - ) + @status = create_status @status.run! - - FileUtils.mkdir_p(tmp_path) - - # Calculate dd parameters: we limit the size of pages - max_size = current_application_settings.max_pages_size.megabytes - max_size ||= MAX_SIZE - blocks = 1 + max_size / BLOCK_SIZE + raise 'pages are outdated' unless latest? # Create temporary directory in which we will extract the artifacts - Dir.mktmpdir(nil, tmp_path) do |temp_path| - # We manually extract the archive and limit the archive size with dd - results = Open3.pipeline(%W(gunzip -c #{artifacts}), - %W(dd bs=#{BLOCK_SIZE} count=#{blocks}), - %W(tar -x -C #{temp_path} public/), - err: '/dev/null') - return unless results.compact.all?(&:success?) + Dir.mktmpdir(nil, tmp_path) do |archive_path| + results = extract_archive(archive_path) + raise 'pages failed to extract' unless results.all?(&:success?) # Check if we did extract public directory - temp_public_path = File.join(temp_path, 'public') - return unless Dir.exists?(temp_public_path) + archive_public_path = File.join(archive_path, 'public') + raise 'pages miss the public folder' unless Dir.exists?(archive_public_path) + raise 'pages are outdated' unless latest? + deploy_page!(archive_public_path) - FileUtils.mkdir_p(pages_path) + @status.success + end + rescue => e + fail(e.message, !latest?) + end - # Ignore deployment if the HEAD changed when we were extracting the archive - return unless valid? + private - # Do atomic move of pages - # Move and removal may not be atomic, but they are significantly faster then extracting and removal - # 1. We move deployed public to previous public path (file removal is slow) - # 2. We move temporary public to be deployed public - # 3. We remove previous public path - FileUtils.move(public_path, previous_public_path, force: true) - FileUtils.move(temp_public_path, public_path) - FileUtils.rm_r(previous_public_path, force: true) + def create_status + GenericCommitStatus.new( + project: project, + commit: build.commit, + user: build.user, + ref: build.ref, + stage: 'deploy', + name: 'pages:deploy' + ) + end - @status.success - end + def extract_archive(temp_path) + results = Open3.pipeline(%W(gunzip -c #{artifacts}), + %W(dd bs=#{BLOCK_SIZE} count=#{blocks}), + %W(tar -x -C #{temp_path} public/), + err: '/dev/null') + results.compact + end + + def deploy_page!(archive_public_path) + # Do atomic move of pages + # Move and removal may not be atomic, but they are significantly faster then extracting and removal + # 1. We move deployed public to previous public path (file removal is slow) + # 2. We move temporary public to be deployed public + # 3. We remove previous public path + FileUtils.mkdir_p(pages_path) + FileUtils.move(public_path, previous_public_path, force: true) + FileUtils.move(archive_public_path, public_path) ensure - @status.drop if @status && @status.active? + FileUtils.rm_r(previous_public_path, force: true) end - private + def fail(message, allow_failure = true) + @status.allow_failure = allow_failure + @status.description = message + @status.drop + end def valid? + build && build.artifacts_file? + end + + def latest? # check if sha for the ref is still the most recent one # this helps in case when multiple deployments happens - build && build.artifacts_file? && sha == latest_sha + sha == latest_sha + end + + def blocks + # Calculate dd parameters: we limit the size of pages + max_size = current_application_settings.max_pages_size.megabytes + max_size ||= MAX_SIZE + blocks = 1 + max_size / BLOCK_SIZE + blocks end def build |