summaryrefslogtreecommitdiff
path: root/lib/object_storage/direct_upload.rb
diff options
context:
space:
mode:
Diffstat (limited to 'lib/object_storage/direct_upload.rb')
-rw-r--r--lib/object_storage/direct_upload.rb59
1 files changed, 38 insertions, 21 deletions
diff --git a/lib/object_storage/direct_upload.rb b/lib/object_storage/direct_upload.rb
index 76f92f62e9c..90199114f2c 100644
--- a/lib/object_storage/direct_upload.rb
+++ b/lib/object_storage/direct_upload.rb
@@ -22,20 +22,20 @@ module ObjectStorage
MAXIMUM_MULTIPART_PARTS = 100
MINIMUM_MULTIPART_SIZE = 5.megabytes
- attr_reader :credentials, :bucket_name, :object_name
- attr_reader :has_length, :maximum_size, :consolidated_settings
+ attr_reader :config, :credentials, :bucket_name, :object_name
+ attr_reader :has_length, :maximum_size
- def initialize(credentials, bucket_name, object_name, has_length:, maximum_size: nil, consolidated_settings: false)
+ def initialize(config, object_name, has_length:, maximum_size: nil)
unless has_length
raise ArgumentError, 'maximum_size has to be specified if length is unknown' unless maximum_size
end
- @credentials = credentials
- @bucket_name = bucket_name
+ @config = config
+ @credentials = config.credentials
+ @bucket_name = config.bucket
@object_name = object_name
@has_length = has_length
@maximum_size = maximum_size
- @consolidated_settings = consolidated_settings
end
def to_hash
@@ -62,8 +62,16 @@ module ObjectStorage
end
def workhorse_client_hash
- return {} unless aws?
+ if config.aws?
+ workhorse_aws_hash
+ elsif config.azure?
+ workhorse_azure_hash
+ else
+ {}
+ end
+ end
+ def workhorse_aws_hash
{
UseWorkhorseClient: use_workhorse_s3_client?,
RemoteTempObjectID: object_name,
@@ -73,8 +81,25 @@ module ObjectStorage
Bucket: bucket_name,
Region: credentials[:region],
Endpoint: credentials[:endpoint],
- PathStyle: credentials.fetch(:path_style, false),
- UseIamProfile: credentials.fetch(:use_iam_profile, false)
+ PathStyle: config.use_path_style?,
+ UseIamProfile: config.use_iam_profile?,
+ ServerSideEncryption: config.server_side_encryption,
+ SSEKMSKeyID: config.server_side_encryption_kms_key_id
+ }.compact
+ }
+ }
+ end
+
+ def workhorse_azure_hash
+ {
+ # Azure requires Workhorse client because direct uploads can't
+ # use pre-signed URLs without buffering the whole file to disk.
+ UseWorkhorseClient: true,
+ RemoteTempObjectID: object_name,
+ ObjectStorage: {
+ Provider: 'AzureRM',
+ GoCloudConfig: {
+ URL: "azblob://#{bucket_name}"
}
}
}
@@ -82,7 +107,7 @@ module ObjectStorage
def use_workhorse_s3_client?
return false unless Feature.enabled?(:use_workhorse_s3_client, default_enabled: true)
- return false unless credentials.fetch(:use_iam_profile, false) || consolidated_settings
+ return false unless config.use_iam_profile? || config.consolidated_settings?
# The Golang AWS SDK does not support V2 signatures
return false unless credentials.fetch(:aws_signature_version, 4).to_i >= 4
@@ -95,7 +120,7 @@ module ObjectStorage
# Implements https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
def get_url
- if google?
+ if config.google?
connection.get_object_https_url(bucket_name, object_name, expire_at)
else
connection.get_object_url(bucket_name, object_name, expire_at)
@@ -169,23 +194,15 @@ module ObjectStorage
].min
end
- def aws?
- provider == 'AWS'
- end
-
- def google?
- provider == 'Google'
- end
-
def requires_multipart_upload?
- aws? && !has_length
+ config.aws? && !has_length
end
def upload_id
return unless requires_multipart_upload?
strong_memoize(:upload_id) do
- new_upload = connection.initiate_multipart_upload(bucket_name, object_name)
+ new_upload = connection.initiate_multipart_upload(bucket_name, object_name, config.fog_attributes)
new_upload.body["UploadId"]
end
end