summaryrefslogtreecommitdiff
path: root/openid/fetchers.py
diff options
context:
space:
mode:
authorVlastimil Zíma <vlastimil.zima@nic.cz>2017-11-24 15:07:29 +0100
committerVlastimil Zíma <vlastimil.zima@nic.cz>2017-11-29 08:38:30 +0100
commit8f0ff0d27771514d16a415b8ac76d18ea0809f38 (patch)
tree06b56cd9666f85aec459e9264e4c724f86c70f0d /openid/fetchers.py
parentf58d7cee3e9f4bff9854dc10ffcd105fb3bc6619 (diff)
downloadopenid-8f0ff0d27771514d16a415b8ac76d18ea0809f38.tar.gz
Pepify and add flake8
Diffstat (limited to 'openid/fetchers.py')
-rw-r--r--openid/fetchers.py28
1 files changed, 23 insertions, 5 deletions
diff --git a/openid/fetchers.py b/openid/fetchers.py
index b30f895..750b5f5 100644
--- a/openid/fetchers.py
+++ b/openid/fetchers.py
@@ -32,6 +32,7 @@ except ImportError:
USER_AGENT = "python-openid/%s (%s)" % (openid.__version__, sys.platform)
MAX_RESPONSE_KB = 1024
+
def fetch(url, body=None, headers=None):
"""Invoke the fetch method on the default fetcher. Most users
should need only this method.
@@ -41,6 +42,7 @@ def fetch(url, body=None, headers=None):
fetcher = getDefaultFetcher()
return fetcher.fetch(url, body, headers)
+
def createHTTPFetcher():
"""Create a default HTTP fetcher instance
@@ -52,11 +54,13 @@ def createHTTPFetcher():
return fetcher
+
# Contains the currently set HTTP fetcher. If it is set to None, the
# library will call createHTTPFetcher() to set it. Do not access this
# variable outside of this module.
_default_fetcher = None
+
def getDefaultFetcher():
"""Return the default fetcher instance
if no fetcher has been set, it will create a default fetcher.
@@ -71,6 +75,7 @@ def getDefaultFetcher():
return _default_fetcher
+
def setDefaultFetcher(fetcher, wrap_exceptions=True):
"""Set the default fetcher
@@ -91,6 +96,7 @@ def setDefaultFetcher(fetcher, wrap_exceptions=True):
else:
_default_fetcher = ExceptionWrappingFetcher(fetcher)
+
def usingCurl():
"""Whether the currently set HTTP fetcher is a Curl HTTP fetcher."""
fetcher = getDefaultFetcher()
@@ -98,6 +104,7 @@ def usingCurl():
fetcher = fetcher.fetcher
return isinstance(fetcher, CurlHTTPFetcher)
+
class HTTPResponse(object):
"""XXX document attributes"""
headers = None
@@ -116,6 +123,7 @@ class HTTPResponse(object):
self.status,
self.final_url)
+
class HTTPFetcher(object):
"""
This class is the interface for openid HTTP fetchers. This
@@ -145,19 +153,23 @@ class HTTPFetcher(object):
"""
raise NotImplementedError
+
def _allowedURL(url):
return url.startswith('http://') or url.startswith('https://')
+
class HTTPFetchingError(Exception):
"""Exception that is wrapped around all exceptions that are raised
by the underlying fetcher when using the ExceptionWrappingFetcher
@ivar why: The exception that caused this exception
"""
+
def __init__(self, why=None):
Exception.__init__(self, why)
self.why = why
+
class ExceptionWrappingFetcher(HTTPFetcher):
"""Fetcher wrapper which wraps all exceptions to `HTTPFetchingError`."""
@@ -175,6 +187,7 @@ class ExceptionWrappingFetcher(HTTPFetcher):
raise HTTPFetchingError(why=exc_inst)
+
class Urllib2Fetcher(HTTPFetcher):
"""An C{L{HTTPFetcher}} that uses urllib2.
"""
@@ -201,7 +214,7 @@ class Urllib2Fetcher(HTTPFetcher):
return self._makeResponse(f)
finally:
f.close()
- except urllib2.HTTPError, why:
+ except urllib2.HTTPError as why:
try:
return self._makeResponse(why)
finally:
@@ -220,6 +233,7 @@ class Urllib2Fetcher(HTTPFetcher):
return resp
+
class HTTPError(HTTPFetchingError):
"""
This exception is raised by the C{L{CurlHTTPFetcher}} when it
@@ -228,12 +242,14 @@ class HTTPError(HTTPFetchingError):
pass
# XXX: define what we mean by paranoid, and make sure it is.
+
+
class CurlHTTPFetcher(HTTPFetcher):
"""
An C{L{HTTPFetcher}} that uses pycurl for fetching.
See U{http://pycurl.sourceforge.net/}.
"""
- ALLOWED_TIME = 20 # seconds
+ ALLOWED_TIME = 20 # seconds
def __init__(self):
HTTPFetcher.__init__(self)
@@ -244,7 +260,7 @@ class CurlHTTPFetcher(HTTPFetcher):
header_file.seek(0)
# Remove the status line from the beginning of the input
- unused_http_status_line = header_file.readline().lower ()
+ unused_http_status_line = header_file.readline().lower()
if unused_http_status_line.startswith('http/1.1 100 '):
unused_http_status_line = header_file.readline()
unused_http_status_line = header_file.readline()
@@ -309,8 +325,9 @@ class CurlHTTPFetcher(HTTPFetcher):
raise HTTPError("Fetching URL not allowed: %r" % (url,))
data = cStringIO.StringIO()
+
def write_data(chunk):
- if data.tell() > 1024*MAX_RESPONSE_KB:
+ if data.tell() > 1024 * MAX_RESPONSE_KB:
return 0
else:
return data.write(chunk)
@@ -350,6 +367,7 @@ class CurlHTTPFetcher(HTTPFetcher):
finally:
c.close()
+
class HTTPLib2Fetcher(HTTPFetcher):
"""A fetcher that uses C{httplib2} for performing HTTP
requests. This implementation supports HTTP caching.
@@ -419,4 +437,4 @@ class HTTPLib2Fetcher(HTTPFetcher):
final_url=final_url,
headers=dict(httplib2_response.items()),
status=httplib2_response.status,
- )
+ )