summaryrefslogtreecommitdiff
path: root/config/initializers/carrierwave_patch.rb
blob: c8c6f75949c0d4dc9fbd025494121e46d85a3977 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# frozen_string_literal: true

require "carrierwave/storage/fog"

# This pulls in https://github.com/carrierwaveuploader/carrierwave/pull/2504 to support
# sending AWS S3 encryption headers when copying objects.
#
# This patch also incorporates
# https://github.com/carrierwaveuploader/carrierwave/pull/2375 to
# provide Azure support
# and https://github.com/carrierwaveuploader/carrierwave/pull/2397 to
# support custom expire_at. This is already in CarrierWave v2.1.x, but
# upgrading this gem is a significant task:
# https://gitlab.com/gitlab-org/gitlab/-/issues/216067
module CarrierWave
  module Storage
    class Fog < Abstract
      class File
        def copy_to(new_path)
          # fog-aws needs multipart uploads to copy files above 5 GB,
          # and it is currently the only Fog provider that supports
          # multithreaded uploads (https://github.com/fog/fog-aws/pull/579).
          # Multithreaded uploads are essential for copying large amounts of data
          # within the request timeout.
          if ::Feature.enabled?(:s3_multithreaded_uploads, default_enabled: true) && fog_provider == 'AWS'
            # AWS SDK uses 10 threads by default and a multipart chunk size of 10 MB
            file.concurrency = 10
            file.multipart_chunk_size = 10485760
            file.copy(@uploader.fog_directory, new_path, copy_to_options)
          else
            # Some Fog providers may issue a GET request (https://github.com/fog/fog-google/issues/512)
            # instead of a HEAD request after the transfer completes,
            # which might cause the file to be downloaded locally.
            # We fallback to the original copy_object for non-AWS providers.
            connection.copy_object(@uploader.fog_directory, file.key, @uploader.fog_directory, new_path, copy_to_options)
          end

          CarrierWave::Storage::Fog::File.new(@uploader, @base, new_path)
        end

        def copy_to_options
          acl_header.merge(@uploader.fog_attributes)
        end

        def authenticated_url(options = {})
          if %w[AWS Google Rackspace OpenStack AzureRM].include?(@uploader.fog_credentials[:provider])
            # avoid a get by using local references
            local_directory = connection.directories.new(key: @uploader.fog_directory)
            local_file = local_directory.files.new(key: path)
            expire_at = options[:expire_at] || ::Fog::Time.now + @uploader.fog_authenticated_url_expiration
            case @uploader.fog_credentials[:provider]
            when 'AWS', 'Google'
              # Older versions of fog-google do not support options as a parameter
              if url_options_supported?(local_file)
                local_file.url(expire_at, options)
              else
                warn "Options hash not supported in #{local_file.class}. You may need to upgrade your Fog provider."
                local_file.url(expire_at)
              end
            when 'Rackspace'
              connection.get_object_https_url(@uploader.fog_directory, path, expire_at, options)
            when 'OpenStack'
              connection.get_object_https_url(@uploader.fog_directory, path, expire_at)
            else
              local_file.url(expire_at)
            end
          end
        end
      end
    end
  end
end