summaryrefslogtreecommitdiff
path: root/awsauth.py
diff options
context:
space:
mode:
authorKenneth Reitz <me@kennethreitz.com>2012-01-13 21:51:10 -0500
committerKenneth Reitz <me@kennethreitz.com>2012-01-13 21:51:10 -0500
commita00c9093ef031ca34c3cb799b579758edc1c8592 (patch)
tree5ed6784555cb5c1466f98e15e88d742b7e430e6d /awsauth.py
parent775313189ad1e151db9036bdb0935e0377ae7f30 (diff)
downloadpython-requests-aws-a00c9093ef031ca34c3cb799b579758edc1c8592.tar.gz
re-style
Diffstat (limited to 'awsauth.py')
-rw-r--r--awsauth.py141
1 files changed, 80 insertions, 61 deletions
diff --git a/awsauth.py b/awsauth.py
index 7ddac4f..eddcb6a 100644
--- a/awsauth.py
+++ b/awsauth.py
@@ -1,94 +1,113 @@
+# -*- coding: utf-8 -*-
+
import base64
import hmac
-import urllib
+import urllib
+
from hashlib import sha1 as sha
from urlparse import urlparse
from email.utils import formatdate
+
from requests.auth import AuthBase
+
class S3Auth(AuthBase):
- # List of Query String Arguments of Interest
- special_params = ['acl', 'location', 'logging', 'partNumber', 'policy',
- 'requestPayment', 'torrent', 'versioning', 'versionId',
- 'versions', 'website', 'uploads', 'uploadId',
- 'response-content-type', 'response-content-language',
- 'response-expires', 'reponse-cache-control',
- 'response-content-disposition',
- 'response-content-encoding']
+ """Attaches AWS Authentication to the given Request object."""
+ # List of Query String Arguments of Interest
+ special_params = [
+ 'acl', 'location', 'logging', 'partNumber', 'policy', 'requestPayment',
+ 'torrent', 'versioning', 'versionId', 'versions', 'website', 'uploads',
+ 'uploadId', 'response-content-type', 'response-content-language',
+ 'response-expires', 'reponse-cache-control',
+ 'response-content-disposition', 'response-content-encoding'
+ ]
- """Attaches AWS Authentication to the given Request object."""
def __init__(self, access_key, secret_key):
self.access_key = str(access_key)
self.secret_key = str(secret_key)
def __call__(self, r):
- #Create date header if it is not created yet
- if not r.headers.has_key('date') and not r.headers.has_key('x-amz-date'):
+ # Create date header if it is not created yet.
+ if not 'date' in r.headers and not 'x-amz-date' in r.headers:
r.headers['date'] = formatdate(timeval=None, localtime=False, usegmt=True)
+
r.headers['Authorization'] = 'AWS %s:%s'%(self.access_key, self.get_signature(r))
return r
- def get_signature(self, r):
- h = hmac.new(self.secret_key, self.get_canonical_string(r), digestmod=sha)
+ def get_signature(self, r):
+ h = hmac.new(self.secret_key, self.get_canonical_string(r), digestmod=sha)
return base64.encodestring(h.digest()).strip()
- def get_canonical_string(self, r):
+ def get_canonical_string(self, r):
parsedurl = urlparse(r.url)
objectkey = parsedurl.path[1:]
query_args = parsedurl.query.split('&')
+
#Sort alphabetical
query_args.sort()
bucket = ''
- if len(parsedurl.netloc.split('.')) == 4:
- bucket = parsedurl.netloc.split('.')[0]
- if len(parsedurl.netloc.split('.')) == 3 and parsedurl.netloc.split('.')[0].lower() != 's3':
- bucket = parsedurl.netloc.split('.')[0]
-
- interesting_headers = {}
- for key in r.headers:
- lk = key.lower()
- if r.headers[key] != None and (lk in ['content-md5', 'content-type', 'date'] or
- lk.startswith('x-amz-')):
- interesting_headers[lk] = r.headers[key].strip()
-
- # these keys get empty strings if they don't exist
- if not interesting_headers.has_key('content-type'):
- interesting_headers['content-type'] = ''
- if not interesting_headers.has_key('content-md5'):
- interesting_headers['content-md5'] = ''
-
- # If x-amz-date is used it supersedes the date header.
- if interesting_headers.has_key('x-amz-date'):
- interesting_headers['date'] = ''
-
- sorted_header_keys = interesting_headers.keys()
- sorted_header_keys.sort()
-
- buf = "%s\n" % r.method
- for key in sorted_header_keys:
- val = interesting_headers[key]
- if key.startswith('x-amz-'):
- buf += "%s:%s\n" % (key, val)
- else:
- buf += "%s\n" % val
-
- # append the bucket if it exists
- if bucket != "":
- buf += "/%s" % bucket
-
- # add the objectkey. even if it doesn't exist, add the slash
- buf += "/%s" % urllib.quote_plus(objectkey)
-
- params_found = False
+
+ split = parsedurl.netloc.split('.')
+
+ if len(split) == 4:
+ bucket = split[0]
+
+ if len(split) == 3 and split[0].lower() != 's3':
+ bucket = split[0]
+
+ interesting_headers = {}
+ ok_keys = ['content-md5', 'content-type', 'date']
+
+ for key in r.headers:
+ lk = key.lower()
+ if r.headers[key] is not None and (lk in ok_keys or lk.startswith('x-amz-')):
+ interesting_headers[lk] = r.headers[key].strip()
+
+ # these keys get empty strings if they don't exist
+ if not interesting_headers.has_key('content-type'):
+ interesting_headers['content-type'] = ''
+
+ if not interesting_headers.has_key('content-md5'):
+ interesting_headers['content-md5'] = ''
+
+ # If x-amz-date is used it supersedes the date header.
+ if interesting_headers.has_key('x-amz-date'):
+ interesting_headers['date'] = ''
+
+ sorted_header_keys = interesting_headers.keys()
+ sorted_header_keys.sort()
+
+ buf = '%s\n' % r.method
+
+ for key in sorted_header_keys:
+ val = interesting_headers[key]
+
+ if key.startswith('x-amz-'):
+ buf += '%s:%s\n' % (key, val)
+ else:
+ buf += '%s\n' % val
+
+ # append the bucket if it exists
+ if bucket != '':
+ buf += '/%s' % bucket
+
+ # add the objectkey. even if it doesn't exist, add the slash
+ buf += '/%s' % urllib.quote_plus(objectkey)
+
+ params_found = False
+
# handle special query string arguments
for q in query_args:
k = q.split('=')[0]
if k in self.special_params:
+
if params_found:
- buf += "&%s"%q
- else:
- buf += "?%s"%q
- params_found = True
- return buf
+ buf += '&%s' % q
+ else:
+ buf += '?%s' % q
+
+ params_found = True
+
+ return buf