summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--M2Crypto/ASN1.py22
-rw-r--r--M2Crypto/AuthCookie.py12
-rw-r--r--M2Crypto/BIO.py58
-rwxr-xr-xM2Crypto/BN.py6
-rw-r--r--M2Crypto/DH.py6
-rw-r--r--M2Crypto/DSA.py134
-rw-r--r--M2Crypto/EC.py50
-rw-r--r--M2Crypto/EVP.py42
-rw-r--r--M2Crypto/Engine.py6
-rw-r--r--M2Crypto/Err.py2
-rw-r--r--M2Crypto/PGP/PublicKey.py4
-rw-r--r--M2Crypto/PGP/PublicKeyRing.py4
-rw-r--r--M2Crypto/PGP/RSA.py2
-rw-r--r--M2Crypto/PGP/packet.py32
-rw-r--r--M2Crypto/RC4.py6
-rw-r--r--M2Crypto/RSA.py60
-rw-r--r--M2Crypto/SMIME.py52
-rw-r--r--M2Crypto/SSL/Checker.py18
-rw-r--r--M2Crypto/SSL/SSLServer.py4
-rw-r--r--M2Crypto/SSL/Session.py2
-rw-r--r--M2Crypto/SSL/TwistedProtocolWrapper.py60
-rw-r--r--M2Crypto/SSL/cb.py2
-rw-r--r--M2Crypto/SSL/timeout.py2
-rw-r--r--M2Crypto/X509.py122
-rw-r--r--M2Crypto/ftpslib.py6
-rw-r--r--M2Crypto/m2urllib.py6
-rw-r--r--M2Crypto/m2urllib2.py6
-rw-r--r--M2Crypto/threading.py2
-rw-r--r--M2Crypto/util.py8
-rw-r--r--contrib/SimpleX509create.py198
-rw-r--r--contrib/isaac.httpslib.py38
-rw-r--r--contrib/smimeplus.py22
-rw-r--r--demo/CipherSaber/CipherSaber.py2
-rw-r--r--demo/Zope/ZServer/HTTPS_Server.py26
-rw-r--r--demo/Zope/ZServer/medusa/ftps_server.py40
-rw-r--r--demo/Zope/ZServer/medusa/https_server.py4
-rw-r--r--demo/Zope/lib/python/Products/GuardedFile/GuardedFile.py4
-rw-r--r--demo/Zope/lib/python/Products/ZSmime/SmimeTag.py4
-rw-r--r--demo/Zope/utilities/x509_user.py12
-rw-r--r--demo/Zope/z2s.py16
-rw-r--r--demo/Zope27/install_dir/lib/python/ZServer/HTTPS_Server.py26
-rw-r--r--demo/Zope27/install_dir/lib/python/ZServer/medusa/https_server.py4
-rw-r--r--demo/ZopeX3/install_dir/lib/python/zope/server/http/https_server.py2
-rw-r--r--demo/dhtest.py2
-rw-r--r--demo/dsa_bench.py40
-rw-r--r--demo/dsatest.py4
-rw-r--r--demo/ec/ecdhtest.py4
-rw-r--r--demo/ec/ecdsatest.py4
-rwxr-xr-xdemo/https.howto/get_https.py4
-rw-r--r--demo/https.howto/https_cli.py4
-rw-r--r--demo/https.howto/orig_https_srv.py2
-rw-r--r--demo/medusa/START.py4
-rw-r--r--demo/medusa/START_xmlrpc.py4
-rw-r--r--demo/medusa/asynchat.py440
-rw-r--r--demo/medusa/asyncore.py834
-rw-r--r--demo/medusa/counter.py66
-rw-r--r--demo/medusa/default_handler.py326
-rw-r--r--demo/medusa/filesys.py716
-rw-r--r--demo/medusa/ftp_server.py1984
-rw-r--r--demo/medusa/ftps_server.py40
-rw-r--r--demo/medusa/http_date.py148
-rw-r--r--demo/medusa/http_server.py1424
-rw-r--r--demo/medusa/https_server.py4
-rw-r--r--demo/medusa/logger.py374
-rw-r--r--demo/medusa/m_syslog.py208
-rw-r--r--demo/medusa/poison_handler.py6
-rw-r--r--demo/medusa/producers.py522
-rw-r--r--demo/medusa/put_handler.py196
-rw-r--r--demo/medusa/redirecting_handler.py70
-rw-r--r--demo/medusa/status_handler.py454
-rw-r--r--demo/medusa/virtual_handler.py78
-rw-r--r--demo/medusa/xmlrpc_handler.py146
-rw-r--r--demo/medusa054/START.py4
-rw-r--r--demo/medusa054/START_xmlrpc.py4
-rw-r--r--demo/medusa054/ftps_server.py40
-rw-r--r--demo/medusa054/https_server.py4
-rw-r--r--demo/medusa054/poison_handler.py6
-rw-r--r--demo/perf/memio.py2
-rw-r--r--demo/perf/sha1.py4
-rw-r--r--demo/pgp/pgpstep.py24
-rw-r--r--demo/rsa_bench.py58
-rw-r--r--demo/rsatest.py4
-rw-r--r--demo/smime.howto/decrypt.py2
-rw-r--r--demo/smime.howto/dv.py2
-rw-r--r--demo/smime.howto/encrypt.py2
-rw-r--r--demo/smime.howto/se.py2
-rw-r--r--demo/smime.howto/sendsmime.py10
-rw-r--r--demo/smime/sendsmime.py10
-rw-r--r--demo/smime/test.py22
-rw-r--r--demo/smime/unsmime.py2
-rw-r--r--demo/ssl/c.py4
-rw-r--r--demo/ssl/c_bio.py4
-rw-r--r--demo/ssl/echo-eg.py4
-rw-r--r--demo/ssl/echo.py4
-rw-r--r--demo/ssl/echod-async.py12
-rw-r--r--demo/ssl/echod-eg1.py6
-rw-r--r--demo/ssl/echod-forking.py6
-rw-r--r--demo/ssl/echod-iterative.py4
-rw-r--r--demo/ssl/echod-thread.py8
-rw-r--r--demo/ssl/echod-threading.py6
-rw-r--r--demo/ssl/echod_lib.py4
-rw-r--r--demo/ssl/ftp_tls.py4
-rw-r--r--demo/ssl/http_cli_20.py2
-rw-r--r--demo/ssl/https_cli.py6
-rw-r--r--demo/ssl/https_cli_async.py12
-rw-r--r--demo/ssl/https_srv.py2
-rw-r--r--demo/ssl/s_client.py2
-rw-r--r--demo/ssl/s_server.py6
-rw-r--r--demo/ssl/server3.py2
-rw-r--r--demo/ssl/sess.py18
-rw-r--r--demo/ssl/sess2.py10
-rw-r--r--demo/ssl/socklib.py2
-rwxr-xr-xdemo/ssl/twistedsslclient.py4
-rw-r--r--demo/ssl/xmlrpc_srv.py2
-rw-r--r--demo/tinderbox/killableprocess.py12
-rwxr-xr-xdemo/tinderbox/slave.py36
-rw-r--r--demo/tinderbox/winprocess.py10
-rw-r--r--demo/x509/ca.py6
-rw-r--r--demo/x509/proxy_destroy.py26
-rw-r--r--demo/x509/proxy_info.py30
-rw-r--r--demo/x509/proxy_init.py18
-rw-r--r--demo/x509/proxylib.py2
-rw-r--r--demo/x509/x509auth.py24
-rw-r--r--tests/alltests.py16
-rw-r--r--tests/test_asn1.py12
-rw-r--r--tests/test_authcookie.py4
-rw-r--r--tests/test_bio.py14
-rw-r--r--tests/test_bio_file.py2
-rw-r--r--tests/test_bio_iobuf.py4
-rw-r--r--tests/test_bio_membuf.py4
-rwxr-xr-xtests/test_bn.py16
-rw-r--r--tests/test_dh.py6
-rw-r--r--tests/test_ec_curves.py42
-rw-r--r--tests/test_ecdh.py2
-rw-r--r--tests/test_ecdsa.py6
-rw-r--r--tests/test_engine.py6
-rw-r--r--tests/test_evp.py84
-rw-r--r--tests/test_obj.py6
-rw-r--r--tests/test_pgp.py2
-rw-r--r--tests/test_smime.py78
-rw-r--r--tests/test_ssl_offline.py8
-rw-r--r--tests/test_ssl_win.py36
142 files changed, 5045 insertions, 5045 deletions
diff --git a/M2Crypto/ASN1.py b/M2Crypto/ASN1.py
index bb306ef..cb3891f 100644
--- a/M2Crypto/ASN1.py
+++ b/M2Crypto/ASN1.py
@@ -24,7 +24,7 @@ class ASN1_Integer:
def __init__(self, asn1int, _pyfree=0):
self.asn1int = asn1int
self._pyfree = _pyfree
-
+
def __cmp__(self, other):
return m2.asn1_integer_cmp(self.asn1int, other.asn1int)
@@ -34,9 +34,9 @@ class ASN1_Integer:
class ASN1_String:
-
+
m2_asn1_string_free = m2.asn1_string_free
-
+
def __init__(self, asn1str, _pyfree=0):
self.asn1str = asn1str
self._pyfree = _pyfree
@@ -49,10 +49,10 @@ class ASN1_String:
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_asn1_string_free(self.asn1str)
-
+
def _ptr(self):
return self.asn1str
-
+
def as_text(self, flags=0):
buf = BIO.MemoryBuffer()
m2.asn1_string_print_ex( buf.bio_ptr(), self.asn1str, flags)
@@ -60,13 +60,13 @@ class ASN1_String:
class ASN1_Object:
-
+
m2_asn1_object_free = m2.asn1_object_free
def __init__(self, asn1obj, _pyfree=0):
self.asn1obj = asn1obj
self._pyfree = _pyfree
-
+
def __del__(self):
if self._pyfree:
self.m2_asn1_object_free(self.asn1obj)
@@ -77,10 +77,10 @@ class ASN1_Object:
class _UTC(datetime.tzinfo):
def tzname(self, dt):
return "UTC"
-
+
def dst(self, dt):
return datetime.timedelta(0)
-
+
def utcoffset(self, dt):
return datetime.timedelta(0)
@@ -137,11 +137,11 @@ class ASN1_UTCTIME:
else:
self.asn1_utctime = m2.asn1_utctime_new ()
self._pyfree = 1
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_asn1_utctime_free(self.asn1_utctime)
-
+
def __str__(self):
assert m2.asn1_utctime_type_check(self.asn1_utctime), "'asn1_utctime' type error'"
buf = BIO.MemoryBuffer()
diff --git a/M2Crypto/AuthCookie.py b/M2Crypto/AuthCookie.py
index a70d073..4a3cd49 100644
--- a/M2Crypto/AuthCookie.py
+++ b/M2Crypto/AuthCookie.py
@@ -5,7 +5,7 @@ Copyright (c) 1999-2002 Ng Pheng Siong. All rights reserved."""
# M2Crypto
import Rand, m2
-# Python. Cookie is bundled with Python 2.x.
+# Python. Cookie is bundled with Python 2.x.
import Cookie, binascii, re, time
@@ -38,10 +38,10 @@ class AuthCookieJar:
def __init__(self):
self._key = Rand.rand_bytes(self._keylen)
-
+
def _hmac(self, key, data):
return binascii.b2a_base64(m2.hmac(key, data, m2.sha1()))[:-1]
-
+
def makeCookie(self, expiry, data):
dough = mix(expiry, data)
return AuthCookie(expiry, data, dough, self._hmac(self._key, dough))
@@ -57,7 +57,7 @@ class AuthCookieJar:
and (c.output() == cookie.output())
def isGoodCookieString(self, cookie_str):
- c = Cookie.SmartCookie()
+ c = Cookie.SmartCookie()
c.load(cookie_str)
if _TOKEN not in c:
return 0
@@ -70,7 +70,7 @@ class AuthCookieJar:
class AuthCookie:
-
+
def __init__(self, expiry, data, dough, mac):
self._expiry = expiry
self._data = data
@@ -104,7 +104,7 @@ class AuthCookie:
"""Return 1 if the cookie has expired, 0 otherwise."""
return (time.time() > self._expiry)
- # XXX Following methods are for WebKit only. These should be pushed
+ # XXX Following methods are for WebKit only. These should be pushed
# to WKAuthCookie.
def name(self):
return self._name
diff --git a/M2Crypto/BIO.py b/M2Crypto/BIO.py
index f66ec17..d1c556b 100644
--- a/M2Crypto/BIO.py
+++ b/M2Crypto/BIO.py
@@ -2,7 +2,7 @@
Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved."""
-import m2
+import m2
# Deprecated
from m2 import bio_do_handshake as bio_do_ssl_handshake
@@ -25,7 +25,7 @@ class BIO:
self._close_cb = _close_cb
self.closed = 0
self.write_closed = 0
-
+
def __del__(self):
if self._pyfree:
self.m2_bio_free(self.bio)
@@ -78,7 +78,7 @@ class BIO:
def writeable(self):
return (not self.closed) and (not self.write_closed)
-
+
def write(self, data):
if not self.writeable():
raise IOError('cannot write')
@@ -104,10 +104,10 @@ class BIO:
def should_retry(self):
"""
Can the call be attempted again, or was there an error
- ie do_handshake
-
+ ie do_handshake
+
"""
- return m2.bio_should_retry(self.bio)
+ return m2.bio_should_retry(self.bio)
def should_read(self):
"""
@@ -115,22 +115,22 @@ class BIO:
should read more data
"""
return m2.bio_should_read(self.bio)
-
+
def should_write(self):
"""
Returns whether the cause of the condition is the bio
should write more data
"""
return m2.bio_should_write(self.bio)
-
+
class MemoryBuffer(BIO):
"""
- Object interface to BIO_s_mem.
-
- Empirical testing suggests that this class performs less well than cStringIO,
- because cStringIO is implemented in C, whereas this class is implemented in
- Python. Thus, the recommended practice is to use cStringIO for regular work and
+ Object interface to BIO_s_mem.
+
+ Empirical testing suggests that this class performs less well than cStringIO,
+ because cStringIO is implemented in C, whereas this class is implemented in
+ Python. Thus, the recommended practice is to use cStringIO for regular work and
convert said cStringIO object to a MemoryBuffer object only when necessary.
"""
@@ -151,7 +151,7 @@ class MemoryBuffer(BIO):
return m2.bio_read(self.bio, size)
else:
return m2.bio_read(self.bio, m2.bio_ctrl_pending(self.bio))
-
+
# Backwards-compatibility.
getvalue = read_all = read
@@ -165,8 +165,8 @@ class MemoryBuffer(BIO):
class File(BIO):
"""
- Object interface to BIO_s_fp.
-
+ Object interface to BIO_s_fp.
+
This class interfaces Python to OpenSSL functions that expect BIO *. For
general file manipulation in Python, use Python's builtin file object.
"""
@@ -189,8 +189,8 @@ def openfile(filename, mode='rb'):
class IOBuffer(BIO):
"""
- Object interface to BIO_f_buffer.
-
+ Object interface to BIO_f_buffer.
+
Its principal function is to be BIO_push()'ed on top of a BIO_f_ssl, so
that makefile() of said underlying SSL socket works.
"""
@@ -203,12 +203,12 @@ class IOBuffer(BIO):
self.io = m2.bio_new(m2.bio_f_buffer())
self.bio = m2.bio_push(self.io, under_bio._ptr())
# This reference keeps the underlying BIO alive while we're not closed.
- self._under_bio = under_bio
+ self._under_bio = under_bio
if 'w' in mode:
self.write_closed = 0
else:
self.write_closed = 1
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_bio_pop(self.bio)
@@ -234,7 +234,7 @@ class CipherStream(BIO):
self.obio = obio
self.bio = m2.bio_new(m2.bio_f_cipher())
self.closed = 0
-
+
def __del__(self):
if not getattr(self, 'closed', 1):
self.close()
@@ -243,7 +243,7 @@ class CipherStream(BIO):
self.m2_bio_pop(self.bio)
self.m2_bio_free(self.bio)
self.closed = 1
-
+
def write_close(self):
self.obio.write_close()
@@ -251,34 +251,34 @@ class CipherStream(BIO):
cipher = getattr(m2, algo, None)
if cipher is None:
raise ValueError('unknown cipher', algo)
- m2.bio_set_cipher(self.bio, cipher(), key, iv, op)
+ m2.bio_set_cipher(self.bio, cipher(), key, iv, op)
m2.bio_push(self.bio, self.obio._ptr())
class SSLBio(BIO):
"""
- Object interface to BIO_f_ssl
+ Object interface to BIO_f_ssl
"""
def __init__(self, _pyfree=1):
BIO.__init__(self, _pyfree)
self.bio = m2.bio_new(m2.bio_f_ssl())
self.closed = 0
-
+
def set_ssl(self, conn, close_flag=m2.bio_noclose):
"""
Sets the bio to the SSL pointer which is
- contained in the connection object.
+ contained in the connection object.
"""
- self._pyfree = 0
+ self._pyfree = 0
m2.bio_set_ssl(self.bio, conn.ssl, close_flag)
if close_flag == m2.bio_noclose:
conn.set_ssl_close_flag(m2.bio_close)
-
+
def do_handshake(self):
"""
Do the handshake.
-
+
Return 1 if the handshake completes
Return 0 or a negative number if there is a problem
"""
diff --git a/M2Crypto/BN.py b/M2Crypto/BN.py
index ec741c0..ac2a781 100755
--- a/M2Crypto/BN.py
+++ b/M2Crypto/BN.py
@@ -9,7 +9,7 @@ import m2
def rand(bits, top=-1, bottom=0):
"""
Generate cryptographically strong random number.
-
+
@param bits: Length of random number in bits.
@param top: If -1, the most significant bit can be 0. If 0, the most
significant bit is 1, and if 1, the two most significant
@@ -22,7 +22,7 @@ def rand(bits, top=-1, bottom=0):
def rand_range(range):
"""
Generate a random number in a range.
-
+
@param range: Upper limit for range.
@return: A random number in the range [0, range)
"""
@@ -43,5 +43,5 @@ def randfname(length):
fname = []
for x in range(length):
fname += [letters[m2.bn_rand_range(lettersLen)]]
-
+
return ''.join(fname)
diff --git a/M2Crypto/DH.py b/M2Crypto/DH.py
index 84bdf3b..a2e55cf 100644
--- a/M2Crypto/DH.py
+++ b/M2Crypto/DH.py
@@ -22,7 +22,7 @@ class DH:
assert m2.dh_type_check(dh)
self.dh = dh
self._pyfree = _pyfree
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_dh_free(self.dh)
@@ -53,10 +53,10 @@ class DH:
def check_params(self):
assert m2.dh_type_check(self.dh), "'dh' type error"
return m2.dh_check(self.dh)
-
+
def gen_key(self):
assert m2.dh_type_check(self.dh), "'dh' type error"
- m2.dh_generate_key(self.dh)
+ m2.dh_generate_key(self.dh)
def compute_key(self, pubkey):
assert m2.dh_type_check(self.dh), "'dh' type error"
diff --git a/M2Crypto/DSA.py b/M2Crypto/DSA.py
index 25ca08d..9d20107 100644
--- a/M2Crypto/DSA.py
+++ b/M2Crypto/DSA.py
@@ -21,16 +21,16 @@ class DSA:
"""
This class is a context supporting DSA key and parameter
values, signing and verifying.
-
+
Simple example::
-
+
from M2Crypto import EVP, DSA, util
-
+
message = 'Kilroy was here!'
md = EVP.MessageDigest('sha1')
- md.update(message)
+ md.update(message)
digest = md.final()
-
+
dsa = DSA.gen_params(1024)
dsa.gen_key()
r, s = dsa.sign(digest)
@@ -50,7 +50,7 @@ class DSA:
assert m2.dsa_type_check(dsa), "'dsa' type error"
self.dsa = dsa
self._pyfree = _pyfree
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_dsa_free(self.dsa)
@@ -58,7 +58,7 @@ class DSA:
def __len__(self):
"""
Return the key length.
-
+
@rtype: int
@return: the DSA key length in bits
"""
@@ -68,9 +68,9 @@ class DSA:
def __getattr__(self, name):
"""
Return specified DSA parameters and key values.
-
+
@type name: str
- @param name: name of variable to be returned. Must be
+ @param name: name of variable to be returned. Must be
one of 'p', 'q', 'g', 'pub', 'priv'.
@rtype: str
@return: value of specified variable (a "byte string")
@@ -93,7 +93,7 @@ class DSA:
def set_params(self, p, q, g):
"""
Set new parameters.
-
+
@warning: This does not change the private key, so it may be
unsafe to use this method. It is better to use
gen_params function to create a new DSA object.
@@ -107,12 +107,12 @@ class DSA:
Generate a key pair.
"""
assert m2.dsa_type_check(self.dsa), "'dsa' type error"
- m2.dsa_gen_key(self.dsa)
+ m2.dsa_gen_key(self.dsa)
def save_params(self, filename):
"""
Save the DSA parameters to a file.
-
+
@type filename: str
@param filename: Save the DSA parameters to this file.
@return: 1 (true) if successful
@@ -125,18 +125,18 @@ class DSA:
def save_params_bio(self, bio):
"""
Save DSA parameters to a BIO object.
-
+
@type bio: M2Crypto.BIO object
@param bio: Save DSA parameters to this object.
@return: 1 (true) if successful
"""
return m2.dsa_write_params_bio(self.dsa, bio._ptr())
- def save_key(self, filename, cipher='aes_128_cbc',
+ def save_key(self, filename, cipher='aes_128_cbc',
callback=util.passphrase_callback):
"""
Save the DSA key pair to a file.
-
+
@type filename: str
@param filename: Save the DSA key pair to this file.
@type cipher: str
@@ -149,11 +149,11 @@ class DSA:
bio.close()
return ret
- def save_key_bio(self, bio, cipher='aes_128_cbc',
+ def save_key_bio(self, bio, cipher='aes_128_cbc',
callback=util.passphrase_callback):
"""
Save DSA key pair to a BIO object.
-
+
@type bio: M2Crypto.BIO object
@param bio: Save DSA parameters to this object.
@type cipher: str
@@ -162,7 +162,7 @@ class DSA:
@return: 1 (true) if successful
"""
if cipher is None:
- return m2.dsa_write_key_bio_no_cipher(self.dsa,
+ return m2.dsa_write_key_bio_no_cipher(self.dsa,
bio._ptr(), callback)
else:
ciph = getattr(m2, cipher, None)
@@ -175,9 +175,9 @@ class DSA:
def save_pub_key(self, filename):
"""
Save the DSA public key (with parameters) to a file.
-
+
@type filename: str
- @param filename: Save DSA public key (with parameters)
+ @param filename: Save DSA public key (with parameters)
to this file.
@return: 1 (true) if successful
"""
@@ -189,9 +189,9 @@ class DSA:
def save_pub_key_bio(self, bio):
"""
Save DSA public key (with parameters) to a BIO object.
-
+
@type bio: M2Crypto.BIO object
- @param bio: Save DSA public key (with parameters)
+ @param bio: Save DSA public key (with parameters)
to this object.
@return: 1 (true) if successful
"""
@@ -200,9 +200,9 @@ class DSA:
def sign(self, digest):
"""
Sign the digest.
-
+
@type digest: str
- @param digest: SHA-1 hash of message (same as output
+ @param digest: SHA-1 hash of message (same as output
from MessageDigest, a "byte string")
@rtype: tuple
@return: DSA signature, a tuple of two values, r and s,
@@ -210,14 +210,14 @@ class DSA:
"""
assert self.check_key(), 'key is not initialised'
return m2.dsa_sign(self.dsa, digest)
-
+
def verify(self, digest, r, s):
"""
- Verify a newly calculated digest against the signature
+ Verify a newly calculated digest against the signature
values r and s.
-
+
@type digest: str
- @param digest: SHA-1 hash of message (same as output
+ @param digest: SHA-1 hash of message (same as output
from MessageDigest, a "byte string")
@type r: str
@param r: r value of the signature, a "byte string"
@@ -232,7 +232,7 @@ class DSA:
def sign_asn1(self, digest):
assert self.check_key(), 'key is not initialised'
return m2.dsa_sign_asn1(self.dsa, digest)
-
+
def verify_asn1(self, digest, blob):
assert self.check_key(), 'key is not initialised'
return m2.dsa_verify_asn1(self.dsa, digest, blob)
@@ -240,22 +240,22 @@ class DSA:
def check_key(self):
"""
Check to be sure the DSA object has a valid private key.
-
+
@rtype: int
@return: 1 (true) if a valid private key
"""
assert m2.dsa_type_check(self.dsa), "'dsa' type error"
return m2.dsa_check_key(self.dsa)
-
+
class DSA_pub(DSA):
"""
- This class is a DSA context that only supports a public key
- and verification. It does NOT support a private key or
+ This class is a DSA context that only supports a public key
+ and verification. It does NOT support a private key or
signing.
-
+
"""
def sign(self, *argv):
@@ -265,25 +265,25 @@ class DSA_pub(DSA):
def check_key(self):
return m2.dsa_check_pub_key(self.dsa)
-
+
save_key = DSA.save_pub_key
save_key_bio = DSA.save_pub_key_bio
#---------------------------------------------------------------
-# factories and other functions
+# factories and other functions
def gen_params(bits, callback=util.genparam_callback):
"""
- Factory function that generates DSA parameters and
+ Factory function that generates DSA parameters and
instantiates a DSA object from the output.
@type bits: int
- @param bits: The length of the prime to be generated. If
+ @param bits: The length of the prime to be generated. If
'bits' < 512, it is set to 512.
@type callback: function
- @param callback: A Python callback object that will be
- invoked during parameter generation; it usual
+ @param callback: A Python callback object that will be
+ invoked during parameter generation; it usual
purpose is to provide visual feedback.
@rtype: DSA
@return: instance of DSA.
@@ -315,15 +315,15 @@ def set_params(p, q, g):
def load_params(file, callback=util.passphrase_callback):
"""
- Factory function that instantiates a DSA object with DSA
+ Factory function that instantiates a DSA object with DSA
parameters from a file.
@type file: str
- @param file: Names the file (a path) that contains the PEM
- representation of the DSA parameters.
+ @param file: Names the file (a path) that contains the PEM
+ representation of the DSA parameters.
@type callback: A Python callable
- @param callback: A Python callback object that will be
- invoked if the DSA parameters file is
+ @param callback: A Python callback object that will be
+ invoked if the DSA parameters file is
passphrase-protected.
@rtype: DSA
@return: instance of DSA.
@@ -340,11 +340,11 @@ def load_params_bio(bio, callback=util.passphrase_callback):
parameters from a M2Crypto.BIO object.
@type bio: M2Crypto.BIO object
- @param bio: Contains the PEM representation of the DSA
- parameters.
+ @param bio: Contains the PEM representation of the DSA
+ parameters.
@type callback: A Python callable
- @param callback: A Python callback object that will be
- invoked if the DSA parameters file is
+ @param callback: A Python callback object that will be
+ invoked if the DSA parameters file is
passphrase-protected.
@rtype: DSA
@return: instance of DSA.
@@ -361,11 +361,11 @@ def load_key(file, callback=util.passphrase_callback):
PEM encoded DSA key pair.
@type file: str
- @param file: Names the file (a path) that contains the PEM
- representation of the DSA key pair.
+ @param file: Names the file (a path) that contains the PEM
+ representation of the DSA key pair.
@type callback: A Python callable
- @param callback: A Python callback object that will be
- invoked if the DSA key pair is
+ @param callback: A Python callback object that will be
+ invoked if the DSA key pair is
passphrase-protected.
@rtype: DSA
@return: instance of DSA.
@@ -382,11 +382,11 @@ def load_key_bio(bio, callback=util.passphrase_callback):
PEM encoded DSA key pair.
@type bio: M2Crypto.BIO object
- @param bio: Contains the PEM representation of the DSA
- key pair.
+ @param bio: Contains the PEM representation of the DSA
+ key pair.
@type callback: A Python callable
- @param callback: A Python callback object that will be
- invoked if the DSA key pair is
+ @param callback: A Python callback object that will be
+ invoked if the DSA key pair is
passphrase-protected.
@rtype: DSA
@return: instance of DSA.
@@ -400,15 +400,15 @@ def load_key_bio(bio, callback=util.passphrase_callback):
def load_pub_key(file, callback=util.passphrase_callback):
"""
Factory function that instantiates a DSA_pub object using
- a DSA public key contained in PEM file. The PEM file
+ a DSA public key contained in PEM file. The PEM file
must contain the parameters in addition to the public key.
@type file: str
- @param file: Names the file (a path) that contains the PEM
- representation of the DSA public key.
+ @param file: Names the file (a path) that contains the PEM
+ representation of the DSA public key.
@type callback: A Python callable
- @param callback: A Python callback object that will be
- invoked should the DSA public key be
+ @param callback: A Python callback object that will be
+ invoked should the DSA public key be
passphrase-protected.
@rtype: DSA_pub
@return: instance of DSA_pub.
@@ -422,15 +422,15 @@ def load_pub_key(file, callback=util.passphrase_callback):
def load_pub_key_bio(bio, callback=util.passphrase_callback):
"""
Factory function that instantiates a DSA_pub object using
- a DSA public key contained in PEM format. The PEM
+ a DSA public key contained in PEM format. The PEM
must contain the parameters in addition to the public key.
@type bio: M2Crypto.BIO object
- @param bio: Contains the PEM representation of the DSA
- public key (with params).
+ @param bio: Contains the PEM representation of the DSA
+ public key (with params).
@type callback: A Python callable
- @param callback: A Python callback object that will be
- invoked should the DSA public key be
+ @param callback: A Python callback object that will be
+ invoked should the DSA public key be
passphrase-protected.
@rtype: DSA_pub
@return: instance of DSA_pub.
diff --git a/M2Crypto/EC.py b/M2Crypto/EC.py
index 261ed22..15afec5 100644
--- a/M2Crypto/EC.py
+++ b/M2Crypto/EC.py
@@ -5,7 +5,7 @@ M2Crypto wrapper for OpenSSL ECDH/ECDSA API.
Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved.
-Portions copyright (c) 2005-2006 Vrije Universiteit Amsterdam.
+Portions copyright (c) 2005-2006 Vrije Universiteit Amsterdam.
All rights reserved."""
import util, BIO, m2
@@ -83,11 +83,11 @@ NID_wap_wsg_idm_ecid_wtls10 = m2.NID_wap_wsg_idm_ecid_wtls10
NID_wap_wsg_idm_ecid_wtls11 = m2.NID_wap_wsg_idm_ecid_wtls11
NID_wap_wsg_idm_ecid_wtls12 = m2.NID_wap_wsg_idm_ecid_wtls12
-# The following two curves, according to OpenSSL, have a
-# "Questionable extension field!" and are not supported by
+# The following two curves, according to OpenSSL, have a
+# "Questionable extension field!" and are not supported by
# the OpenSSL inverse function. ECError: no inverse.
-# As such they cannot be used for signing. They might,
-# however, be usable for encryption but that has not
+# As such they cannot be used for signing. They might,
+# however, be usable for encryption but that has not
# been tested. Until thir usefulness can be established,
# they are not supported at this time.
# NID_ipsec3 = m2.NID_ipsec3
@@ -101,7 +101,7 @@ class EC:
"""
m2_ec_key_free = m2.ec_key_free
-
+
def __init__(self, ec, _pyfree=0):
assert m2.ec_key_type_check(ec), "'ec' type error"
self.ec = ec
@@ -123,7 +123,7 @@ class EC:
to create an EC key pair.
"""
assert m2.ec_key_type_check(self.ec), "'ec' type error"
- m2.ec_key_gen_key(self.ec)
+ m2.ec_key_gen_key(self.ec)
def pub(self):
# Don't let python free
@@ -136,7 +136,7 @@ class EC:
"""
assert self._check_key_type(), "'ec' type error"
return m2.ecdsa_sign(self.ec, digest)
-
+
def verify_dsa(self, digest, r, s):
"""
Verify the given digest using ECDSA. r and s are the ECDSA
@@ -148,16 +148,16 @@ class EC:
def sign_dsa_asn1(self, digest):
assert self._check_key_type(), "'ec' type error"
return m2.ecdsa_sign_asn1(self.ec, digest)
-
+
def verify_dsa_asn1(self, digest, blob):
assert self._check_key_type(), "'ec' type error"
return m2.ecdsa_verify_asn1(self.ec, digest, blob)
def compute_dh_key(self,pub_key):
"""
- Compute the ECDH shared key of this key pair and the given public
- key object. They must both use the same curve. Returns the
- shared key in binary as a buffer object. No Key Derivation Function is
+ Compute the ECDH shared key of this key pair and the given public
+ key object. They must both use the same curve. Returns the
+ shared key in binary as a buffer object. No Key Derivation Function is
applied.
"""
assert self.check_key(), 'key is not initialised'
@@ -207,14 +207,14 @@ class EC:
"""
bio = BIO.openfile(file, 'wb')
return self.save_key_bio(bio, cipher, callback)
-
+
def save_pub_key_bio(self, bio):
"""
Save the public key to an M2Crypto.BIO.BIO object in PEM format.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object to save key to.
- """
+ """
return m2.ec_key_write_pubkey(self.ec, bio._ptr())
def save_pub_key(self, file):
@@ -226,7 +226,7 @@ class EC:
"""
bio = BIO.openfile(file, 'wb')
return m2.ec_key_write_pubkey(self.ec, bio._ptr())
-
+
def _check_key_type(self):
return m2.ec_key_type_check(self.ec)
@@ -234,11 +234,11 @@ class EC:
assert m2.ec_key_type_check(self.ec), "'ec' type error"
return m2.ec_key_check_key(self.ec)
-
+
class EC_pub(EC):
"""
- Object interface to an EC public key.
+ Object interface to an EC public key.
((don't like this implementation inheritance))
"""
def __init__(self,ec,_pyfree=0):
@@ -261,10 +261,10 @@ class EC_pub(EC):
def gen_params(curve):
"""
- Factory function that generates EC parameters and
+ Factory function that generates EC parameters and
instantiates a EC object from the output.
- @param curve: This is the OpenSSL nid of the curve to use.
+ @param curve: This is the OpenSSL nid of the curve to use.
"""
return EC(m2.ec_key_new_by_curve_name(curve), 1)
@@ -273,10 +273,10 @@ def load_key(file, callback=util.passphrase_callback):
"""
Factory function that instantiates a EC object.
- @param file: Names the file that contains the PEM representation
+ @param file: Names the file that contains the PEM representation
of the EC key pair.
- @param callback: Python callback object that will be invoked
+ @param callback: Python callback object that will be invoked
if the EC key pair is passphrase-protected.
"""
bio = BIO.openfile(file)
@@ -288,9 +288,9 @@ def load_key_bio(bio, callback=util.passphrase_callback):
Factory function that instantiates a EC object.
@param bio: M2Crypto.BIO object that contains the PEM
- representation of the EC key pair.
+ representation of the EC key pair.
- @param callback: Python callback object that will be invoked
+ @param callback: Python callback object that will be invoked
if the EC key pair is passphrase-protected.
"""
return EC(m2.ec_key_read_bio(bio._ptr(), callback), 1)
@@ -305,7 +305,7 @@ def load_pub_key(file):
@rtype: M2Crypto.EC.EC_pub
@return: M2Crypto.EC.EC_pub object.
"""
- bio = BIO.openfile(file)
+ bio = BIO.openfile(file)
return load_pub_key_bio(bio)
@@ -319,7 +319,7 @@ def load_pub_key_bio(bio):
@rtype: M2Crypto.EC.EC_pub
@return: M2Crypto.EC.EC_pub object.
- """
+ """
ec = m2.ec_key_read_pubkey(bio._ptr())
if ec is None:
ec_error()
diff --git a/M2Crypto/EVP.py b/M2Crypto/EVP.py
index f6ff6b7..0f92e85 100644
--- a/M2Crypto/EVP.py
+++ b/M2Crypto/EVP.py
@@ -17,13 +17,13 @@ m2.evp_init(EVPError)
def pbkdf2(password, salt, iter, keylen):
"""
Derive a key from password using PBKDF2 algorithm specified in RFC 2898.
-
+
@param password: Derive the key from this password.
@type password: str
@param salt: Salt.
@type salt: str
@param iter: Number of iterations to perform.
- @type iter: int
+ @type iter: int
@param keylen: Length of key to produce.
@type keylen: int
@return: Key.
@@ -44,7 +44,7 @@ class MessageDigest:
self.md=md()
self.ctx=m2.md_ctx_new()
m2.digest_init(self.ctx, self.md)
-
+
def __del__(self):
if getattr(self, 'ctx', None):
self.m2_md_ctx_free(self.ctx)
@@ -52,7 +52,7 @@ class MessageDigest:
def update(self, data):
"""
Add data to be digested.
-
+
@return: -1 for Python error, 1 for success, 0 for OpenSSL failure.
"""
return m2.digest_update(self.ctx, data)
@@ -61,11 +61,11 @@ class MessageDigest:
return m2.digest_final(self.ctx)
# Deprecated.
- digest = final
+ digest = final
class HMAC:
-
+
m2_hmac_ctx_free = m2.hmac_ctx_free
def __init__(self, key, algo='sha1'):
@@ -75,7 +75,7 @@ class HMAC:
self.md=md()
self.ctx=m2.hmac_ctx_new()
m2.hmac_init(self.ctx, key, self.md)
-
+
def __del__(self):
if getattr(self, 'ctx', None):
self.m2_hmac_ctx_free(self.ctx)
@@ -88,7 +88,7 @@ class HMAC:
def final(self):
return m2.hmac_final(self.ctx)
-
+
digest=final
def hmac(key, data, algo='sha1'):
@@ -116,9 +116,9 @@ class Cipher:
m2.cipher_init(self.ctx, self.cipher, key, iv, op)
self.set_padding(padding)
del key
-
+
def __del__(self):
- if getattr(self, 'ctx', None):
+ if getattr(self, 'ctx', None):
self.m2_cipher_ctx_free(self.ctx)
def update(self, data):
@@ -128,14 +128,14 @@ class Cipher:
return m2.cipher_final(self.ctx)
def set_padding(self, padding=1):
- return m2.cipher_set_padding(self.ctx, padding)
+ return m2.cipher_set_padding(self.ctx, padding)
class PKey:
"""
Public Key
"""
-
+
m2_pkey_free = m2.pkey_free
m2_md_ctx_free = m2.md_ctx_free
@@ -147,7 +147,7 @@ class PKey:
self.pkey = m2.pkey_new()
self._pyfree = 1
self._set_context(md)
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_pkey_free(self.pkey)
@@ -239,7 +239,7 @@ class PKey:
@param capture: If true (default), this PKey object will own the RSA
object, meaning that once the PKey object gets
deleted it is no longer safe to use the RSA object.
-
+
@rtype: int
@return: Return 1 for success and 0 for failure.
"""
@@ -259,7 +259,7 @@ class PKey:
rsa_ptr = m2.pkey_get1_rsa(self.pkey)
if rsa_ptr is None:
raise ValueError("PKey instance is not holding a RSA key")
-
+
rsa = RSA.RSA_pub(rsa_ptr, 1)
return rsa
@@ -277,7 +277,7 @@ class PKey:
@type callback: Python callable
@param callback: A Python callable object that is invoked
- to acquire a passphrase with which to protect the key.
+ to acquire a passphrase with which to protect the key.
The default is util.passphrase_callback.
"""
bio = BIO.openfile(file, 'wb')
@@ -297,7 +297,7 @@ class PKey:
@type callback: Python callable
@param callback: A Python callable object that is invoked
- to acquire a passphrase with which to protect the key.
+ to acquire a passphrase with which to protect the key.
The default is util.passphrase_callback.
"""
if cipher is None:
@@ -319,7 +319,7 @@ class PKey:
@type callback: Python callable
@param callback: A Python callable object that is invoked
- to acquire a passphrase with which to protect the key.
+ to acquire a passphrase with which to protect the key.
The default is util.passphrase_callback.
"""
bio = BIO.MemoryBuffer()
@@ -333,19 +333,19 @@ class PKey:
buf = m2.pkey_as_der(self.pkey)
bio = BIO.MemoryBuffer(buf)
return bio.read_all()
-
+
def size(self):
"""
Return the size of the key in bytes.
"""
return m2.pkey_size(self.pkey)
-
+
def get_modulus(self):
"""
Return the modulus in hex format.
"""
return m2.pkey_get_modulus(self.pkey)
-
+
def load_key(file, callback=util.passphrase_callback):
"""
diff --git a/M2Crypto/Engine.py b/M2Crypto/Engine.py
index d6b879b..8688f2d 100644
--- a/M2Crypto/Engine.py
+++ b/M2Crypto/Engine.py
@@ -16,7 +16,7 @@ class Engine:
"""Wrapper for ENGINE object."""
m2_engine_free = m2.engine_free
-
+
def __init__(self, id = None, _ptr = None, _pyfree = 1):
"""Create new Engine from ENGINE pointer or obtain by id"""
if not _ptr and not id:
@@ -34,10 +34,10 @@ class Engine:
def init(self):
"""Obtain a functional reference to the engine.
-
+
@return: 0 on error, non-zero on success."""
return m2.engine_init(self._ptr)
-
+
def finish(self):
"""Release a functional and structural reference to the engine."""
return m2.engine_finish(self._ptr)
diff --git a/M2Crypto/Err.py b/M2Crypto/Err.py
index 3588f76..f8cfe04 100644
--- a/M2Crypto/Err.py
+++ b/M2Crypto/Err.py
@@ -45,5 +45,5 @@ class SSLError(Exception):
class M2CryptoError(Exception):
pass
-
+
diff --git a/M2Crypto/PGP/PublicKey.py b/M2Crypto/PGP/PublicKey.py
index 9ea0870..290bc44 100644
--- a/M2Crypto/PGP/PublicKey.py
+++ b/M2Crypto/PGP/PublicKey.py
@@ -33,10 +33,10 @@ class PublicKey:
self._signature.append(s_pkt)
else:
self._signature = [s_pkt]
-
+
def __getitem__(self, id):
return self._userid[id]
-
+
def __setitem__(self, *args):
raise NotImplementedError
diff --git a/M2Crypto/PGP/PublicKeyRing.py b/M2Crypto/PGP/PublicKeyRing.py
index a8447b3..1a827f1 100644
--- a/M2Crypto/PGP/PublicKeyRing.py
+++ b/M2Crypto/PGP/PublicKeyRing.py
@@ -23,7 +23,7 @@ class PublicKeyRing:
ps = packet_stream(self._keyring)
while 1:
- pkt = ps.read()
+ pkt = ps.read()
if pkt is None:
break
@@ -51,7 +51,7 @@ class PublicKeyRing:
self._spurious.append(pkt)
ps.close()
-
+
def __getitem__(self, id):
return self._userid[id][0]
diff --git a/M2Crypto/PGP/RSA.py b/M2Crypto/PGP/RSA.py
index cb0e909..e5b4d09 100644
--- a/M2Crypto/PGP/RSA.py
+++ b/M2Crypto/PGP/RSA.py
@@ -25,7 +25,7 @@ def new_pub_key(e_n):
'n' is the RSA composite of primes; it is a string in OpenSSL's binary format,
i.e., a number of bytes in big-endian.
- """
+ """
import warnings
warnings.warn('Deprecated. No maintainer for PGP. If you use this, please inform M2Crypto maintainer.', DeprecationWarning)
diff --git a/M2Crypto/PGP/packet.py b/M2Crypto/PGP/packet.py
index cdbb3e6..601312e 100644
--- a/M2Crypto/PGP/packet.py
+++ b/M2Crypto/PGP/packet.py
@@ -2,8 +2,8 @@
This module implements PGP packets per RFC1991 and various source distributions.
-Each packet type is represented by a class; packet classes derive from
-the abstract 'packet' class.
+Each packet type is represented by a class; packet classes derive from
+the abstract 'packet' class.
The 'message digest' packet type, mentioned but not documented in RFC1991,
is not implemented.
@@ -13,7 +13,7 @@ Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved."""
# XXX Work-in-progress.
# Be liberal in what you accept.
-# Be conservative in what you send.
+# Be conservative in what you send.
# Be lazy in what you eval.
import struct
@@ -45,7 +45,7 @@ class packet:
def __init__(self, ctb, body=None):
import warnings
warnings.warn('Deprecated. No maintainer for PGP. If you use this, please inform M2Crypto maintainer.', DeprecationWarning)
-
+
self.ctb = ctb
if body is not None:
self.body = StringIO(body)
@@ -83,11 +83,11 @@ class packet:
return None
def _llf(self, lenf):
- if lenf < 256:
+ if lenf < 256:
return (0, chr(lenf))
elif lenf < 65536:
return (1, struct.pack('>H', lenf))
- else:
+ else:
assert lenf < long(2)**32
return (2, struct.pack('>L', lenf))
@@ -108,7 +108,7 @@ class public_key_packet(packet):
self._nlen = self.body.read(2)
nlen = (struct.unpack('>H', self._nlen)[0] + 7) / 8
self._n = self.body.read(nlen)
-
+
self._elen = self.body.read(2)
elen = (struct.unpack('>H', self._elen)[0] + 7) / 8
self._e = self.body.read(elen)
@@ -155,7 +155,7 @@ class userid_packet(packet):
self.body = self.body.getvalue()
return self.ctb + self.body
- def userid(self):
+ def userid(self):
return self._userid
@@ -225,7 +225,7 @@ class private_key_packet(packet):
self._nlen = self.body.read(2)
nlen = (struct.unpack('>H', self._nlen)[0] + 7) / 8
self._n = self.body.read(nlen)
-
+
self._elen = self.body.read(2)
elen = (struct.unpack('>H', self._elen)[0] + 7) / 8
self._e = self.body.read(elen)
@@ -235,13 +235,13 @@ class private_key_packet(packet):
self._iv = self.body.read(8)
else:
self._iv = None
-
+
for param in ['d', 'p', 'q', 'u']:
_plen = self.body.read(2)
setattr(self, '_'+param+'len', _plen)
plen = (struct.unpack('>H', _plen)[0] + 7) / 8
setattr(self, '_'+param, self.body.read(plen))
-
+
self._cksum = self.body.read(2)
def is_encrypted(self):
@@ -264,7 +264,7 @@ class pke_packet(packet):
self._version = self.body.read(1)
self._keyid = self.body.read(8)
self._pkc = ord(self.body.read(1))
-
+
deklen = (struct.unpack('>H', self.body.read(2))[0] + 7 ) / 8
self._dek = octx_to_num(self.body.read(deklen))
@@ -299,7 +299,7 @@ class compressed_packet(packet):
return stream
-_FACTORY = {
+_FACTORY = {
1 : pke_packet,
2 : signature_packet,
#3 : message_digest_packet, # XXX not implemented
@@ -313,7 +313,7 @@ _FACTORY = {
14 : comment_packet,
pke_packet : 1,
signature_packet : 2,
- #3 : message_digest_packet,
+ #3 : message_digest_packet,
private_key_packet : 5,
public_key_packet : 6,
#8 : compressed_packet,
@@ -372,8 +372,8 @@ class packet_stream:
raise XXXError('corrupted packet')
self._count = self.stream.tell()
- try:
- return _FACTORY[ctbt](ctb0, body)
+ try:
+ return _FACTORY[ctbt](ctb0, body)
except KeyError:
return packet(ctb0, body)
diff --git a/M2Crypto/RC4.py b/M2Crypto/RC4.py
index 1b5d408..123bb2a 100644
--- a/M2Crypto/RC4.py
+++ b/M2Crypto/RC4.py
@@ -14,13 +14,13 @@ class RC4:
self.cipher = rc4_new()
if key:
rc4_set_key(self.cipher, key)
-
+
def __del__(self):
- if getattr(self, 'cipher', None):
+ if getattr(self, 'cipher', None):
self.rc4_free(self.cipher)
def set_key(self, key):
- rc4_set_key(self.cipher, key)
+ rc4_set_key(self.cipher, key)
def update(self, data):
return rc4_update(self.cipher, data)
diff --git a/M2Crypto/RSA.py b/M2Crypto/RSA.py
index e7fc8a0..ae0a154 100644
--- a/M2Crypto/RSA.py
+++ b/M2Crypto/RSA.py
@@ -26,7 +26,7 @@ class RSA:
assert m2.rsa_type_check(rsa), "'rsa' type error"
self.rsa = rsa
self._pyfree = _pyfree
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_rsa_free(self.rsa)
@@ -144,7 +144,7 @@ class RSA:
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object to save key to.
- """
+ """
return m2.rsa_write_pub_key(self.rsa, bio._ptr())
def save_pub_key(self, file):
@@ -163,7 +163,7 @@ class RSA:
def sign_rsassa_pss(self, digest, algo='sha1', salt_length=20):
"""
Signs a digest with the private key using RSASSA-PSS
-
+
@requires: OpenSSL 0.9.7h or later.
@type digest: str
@@ -171,7 +171,7 @@ class RSA:
@type salt_length: int
@param salt_length: The length of the salt to use
-
+
@type algo: str
@param algo: The hash algorithm to use
@@ -179,11 +179,11 @@ class RSA:
"""
hash = getattr(m2, algo, None)
if hash is None:
- raise ValueError('not such hash algorithm %s' % hash_algo)
+ raise ValueError('not such hash algorithm %s' % hash_algo)
signature = m2.rsa_padding_add_pkcs1_pss(self.rsa, digest, hash(), salt_length)
-
- return self.private_encrypt(signature, m2.no_padding)
+
+ return self.private_encrypt(signature, m2.no_padding)
def verify_rsassa_pss(self, data, signature, algo='sha1', salt_length=20):
"""
@@ -196,7 +196,7 @@ class RSA:
@type signature: str
@param signature: The signature signed with RSASSA-PSS
-
+
@type salt_length: int
@param salt_length: The length of the salt that was used
@@ -204,14 +204,14 @@ class RSA:
@param algo: The hash algorithm to use
@return: 1 or 0, depending on whether the signature was
- verified or not.
+ verified or not.
"""
hash = getattr(m2, algo, None)
if hash is None:
- raise ValueError('not such hash algorithm %s' % hash_algo)
+ raise ValueError('not such hash algorithm %s' % hash_algo)
plain_signature = self.public_decrypt(signature, m2.no_padding)
-
+
return m2.rsa_verify_pkcs1_pss(self.rsa, data, plain_signature, hash(), salt_length)
def sign(self, digest, algo='sha1'):
@@ -223,17 +223,17 @@ class RSA:
@type algo: str
@param algo: The method that created the digest.
- Legal values are 'sha1','sha224', 'sha256', 'ripemd160',
+ Legal values are 'sha1','sha224', 'sha256', 'ripemd160',
and 'md5'.
-
+
@return: a string which is the signature
"""
- digest_type = getattr(m2, 'NID_' + algo, None)
+ digest_type = getattr(m2, 'NID_' + algo, None)
if digest_type is None:
raise ValueError('unknown algorithm', algo)
-
- return m2.rsa_sign(self.rsa, digest, digest_type)
-
+
+ return m2.rsa_sign(self.rsa, digest, digest_type)
+
def verify(self, data, signature, algo='sha1'):
"""
Verifies the signature with the public key
@@ -245,18 +245,18 @@ class RSA:
@param signature: The signature signed with the private key
@type algo: str
- @param algo: The method use to create digest from the data
+ @param algo: The method use to create digest from the data
before it was signed. Legal values are 'sha1','sha224',
'sha256', 'ripemd160', and 'md5'.
@return: True or False, depending on whether the signature was
- verified.
+ verified.
"""
digest_type = getattr(m2, 'NID_' + algo, None)
if digest_type is None:
raise ValueError('unknown algorithm', algo)
-
- return m2.rsa_verify(self.rsa, data, signature, digest_type)
+
+ return m2.rsa_verify(self.rsa, data, signature, digest_type)
class RSA_pub(RSA):
@@ -270,7 +270,7 @@ class RSA_pub(RSA):
raise RSAError('use factory function new_pub_key() to set (e, n)')
else:
self.__dict__[name] = value
-
+
def private_encrypt(self, *argv):
raise RSAError('RSA_pub object has no private key')
@@ -327,7 +327,7 @@ def gen_key(bits, e, callback=keygen_callback):
@rtype: M2Crypto.RSA.RSA
@return: M2Crypto.RSA.RSA object.
- """
+ """
return RSA(m2.rsa_generate_key(bits, e, callback), 1)
@@ -401,7 +401,7 @@ def load_pub_key(file):
@rtype: M2Crypto.RSA.RSA_pub
@return: M2Crypto.RSA.RSA_pub object.
"""
- bio = BIO.openfile(file)
+ bio = BIO.openfile(file)
return load_pub_key_bio(bio)
@@ -415,7 +415,7 @@ def load_pub_key_bio(bio):
@rtype: M2Crypto.RSA.RSA_pub
@return: M2Crypto.RSA.RSA_pub object.
- """
+ """
rsa = m2.rsa_read_pub_key(bio._ptr())
if rsa is None:
rsa_error()
@@ -427,18 +427,18 @@ def new_pub_key(e_n):
Instantiate an RSA_pub object from an (e, n) tuple.
@type e: string
- @param e: The RSA public exponent; it is a string in OpenSSL's MPINT
- format - 4-byte big-endian bit-count followed by the appropriate
+ @param e: The RSA public exponent; it is a string in OpenSSL's MPINT
+ format - 4-byte big-endian bit-count followed by the appropriate
number of bits.
@type n: string
- @param n: The RSA composite of primes; it is a string in OpenSSL's MPINT
- format - 4-byte big-endian bit-count followed by the appropriate
+ @param n: The RSA composite of primes; it is a string in OpenSSL's MPINT
+ format - 4-byte big-endian bit-count followed by the appropriate
number of bits.
@rtype: M2Crypto.RSA.RSA_pub
@return: M2Crypto.RSA.RSA_pub object.
- """
+ """
(e, n) = e_n
rsa = m2.rsa_new()
m2.rsa_set_e(rsa, e)
diff --git a/M2Crypto/SMIME.py b/M2Crypto/SMIME.py
index fafcbce..afc705b 100644
--- a/M2Crypto/SMIME.py
+++ b/M2Crypto/SMIME.py
@@ -5,20 +5,20 @@ Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved."""
import BIO, EVP, X509, Err, util
import m2
-PKCS7_TEXT = m2.PKCS7_TEXT
-PKCS7_NOCERTS = m2.PKCS7_NOCERTS
-PKCS7_NOSIGS = m2.PKCS7_NOSIGS
-PKCS7_NOCHAIN = m2.PKCS7_NOCHAIN
-PKCS7_NOINTERN = m2.PKCS7_NOINTERN
-PKCS7_NOVERIFY = m2.PKCS7_NOVERIFY
-PKCS7_DETACHED = m2.PKCS7_DETACHED
-PKCS7_BINARY = m2.PKCS7_BINARY
-PKCS7_NOATTR = m2.PKCS7_NOATTR
-
-PKCS7_SIGNED = m2.PKCS7_SIGNED
-PKCS7_ENVELOPED = m2.PKCS7_ENVELOPED
-PKCS7_SIGNED_ENVELOPED = m2.PKCS7_SIGNED_ENVELOPED # Deprecated
-PKCS7_DATA = m2.PKCS7_DATA
+PKCS7_TEXT = m2.PKCS7_TEXT
+PKCS7_NOCERTS = m2.PKCS7_NOCERTS
+PKCS7_NOSIGS = m2.PKCS7_NOSIGS
+PKCS7_NOCHAIN = m2.PKCS7_NOCHAIN
+PKCS7_NOINTERN = m2.PKCS7_NOINTERN
+PKCS7_NOVERIFY = m2.PKCS7_NOVERIFY
+PKCS7_DETACHED = m2.PKCS7_DETACHED
+PKCS7_BINARY = m2.PKCS7_BINARY
+PKCS7_NOATTR = m2.PKCS7_NOATTR
+
+PKCS7_SIGNED = m2.PKCS7_SIGNED
+PKCS7_ENVELOPED = m2.PKCS7_ENVELOPED
+PKCS7_SIGNED_ENVELOPED = m2.PKCS7_SIGNED_ENVELOPED # Deprecated
+PKCS7_DATA = m2.PKCS7_DATA
class PKCS7_Error(Exception): pass
@@ -35,7 +35,7 @@ class PKCS7:
else:
self.pkcs7 = m2.pkcs7_new()
self._pyfree = 1
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_pkcs7_free(self.pkcs7)
@@ -55,7 +55,7 @@ class PKCS7:
def write_der(self, bio):
return m2.pkcs7_write_bio_der(self.pkcs7, bio._ptr())
- def get0_signers(self, certs, flags = 0):
+ def get0_signers(self, certs, flags = 0):
return X509.X509_Stack(m2.pkcs7_get0_signers(self.pkcs7,
certs.stack, flags), 1)
@@ -69,36 +69,36 @@ def load_pkcs7(p7file):
p7_ptr = m2.pkcs7_read_bio(bio)
finally:
m2.bio_free(bio)
-
+
if p7_ptr is None:
raise PKCS7_Error(Err.get_error())
return PKCS7(p7_ptr, 1)
-
+
def load_pkcs7_bio(p7_bio):
p7_ptr = m2.pkcs7_read_bio(p7_bio._ptr())
if p7_ptr is None:
raise PKCS7_Error(Err.get_error())
return PKCS7(p7_ptr, 1)
-
+
def smime_load_pkcs7(p7file):
bio = m2.bio_new_file(p7file, 'r')
if bio is None:
raise BIO.BIOError(Err.get_error())
-
+
try:
p7_ptr, bio_ptr = m2.smime_read_pkcs7(bio)
finally:
m2.bio_free(bio)
-
+
if p7_ptr is None:
raise SMIME_Error(Err.get_error())
if bio_ptr is None:
return PKCS7(p7_ptr, 1), None
else:
return PKCS7(p7_ptr, 1), BIO.BIO(bio_ptr, 1)
-
+
def smime_load_pkcs7_bio(p7_bio):
p7_ptr, bio_ptr = m2.smime_read_pkcs7(p7_bio._ptr())
@@ -108,8 +108,8 @@ def smime_load_pkcs7_bio(p7_bio):
return PKCS7(p7_ptr, 1), None
else:
return PKCS7(p7_ptr, 1), BIO.BIO(bio_ptr, 1)
-
-
+
+
class Cipher:
"""
@@ -192,13 +192,13 @@ class SMIME:
if not hasattr(self, 'pkey'):
raise SMIME_Error('no private key: use load_key()')
if hasattr(self, 'x509_stack'):
- pkcs7 = m2.pkcs7_sign1(self.x509._ptr(), self.pkey._ptr(),
+ pkcs7 = m2.pkcs7_sign1(self.x509._ptr(), self.pkey._ptr(),
self.x509_stack._ptr(), data_bio._ptr(), flags)
if pkcs7 is None:
raise SMIME_Error(Err.get_error())
return PKCS7(pkcs7, 1)
else:
- pkcs7 = m2.pkcs7_sign0(self.x509._ptr(), self.pkey._ptr(),
+ pkcs7 = m2.pkcs7_sign0(self.x509._ptr(), self.pkey._ptr(),
data_bio._ptr(), flags)
if pkcs7 is None:
raise SMIME_Error(Err.get_error())
diff --git a/M2Crypto/SSL/Checker.py b/M2Crypto/SSL/Checker.py
index 9bcd2f7..0f2ee09 100644
--- a/M2Crypto/SSL/Checker.py
+++ b/M2Crypto/SSL/Checker.py
@@ -29,7 +29,7 @@ class WrongHost(SSLVerificationError):
This exception will be raised if the certificate returned by the
peer was issued for a different host than we tried to connect to.
This could be due to a server misconfiguration or an active attack.
-
+
@param expectedHost: The name of the host we expected to find in the
certificate.
@param actualHost: The name of the host we actually found in the
@@ -39,12 +39,12 @@ class WrongHost(SSLVerificationError):
"""
if fieldName not in ('commonName', 'subjectAltName'):
raise ValueError('Unknown fieldName, should be either commonName or subjectAltName')
-
+
SSLVerificationError.__init__(self)
self.expectedHost = expectedHost
self.actualHost = actualHost
self.fieldName = fieldName
-
+
def __str__(self):
s = 'Peer certificate %s does not match host, expected %s, got %s' \
% (self.fieldName, self.expectedHost, self.actualHost)
@@ -54,9 +54,9 @@ class WrongHost(SSLVerificationError):
class Checker:
-
+
numericIpMatch = re.compile('^[0-9]+(\.[0-9]+)*$')
-
+
def __init__(self, host=None, peerCertHash=None, peerCertDigest='sha1'):
self.host = host
self.fingerprint = peerCertHash
@@ -68,7 +68,7 @@ class Checker:
if host is not None:
self.host = host
-
+
if self.fingerprint:
if self.digest not in ('sha1', 'md5'):
raise ValueError('unsupported digest "%s"' %(self.digest))
@@ -76,7 +76,7 @@ class Checker:
if (self.digest == 'sha1' and len(self.fingerprint) != 40) or \
(self.digest == 'md5' and len(self.fingerprint) != 32):
raise WrongCertificate('peer certificate fingerprint length does not match')
-
+
der = peerCert.as_der()
md = EVP.MessageDigest(self.digest)
md.update(der)
@@ -94,7 +94,7 @@ class Checker:
if self._splitSubjectAltName(self.host, subjectAltName):
hostValidationPassed = True
elif self.useSubjectAltNameOnly:
- raise WrongHost(expectedHost=self.host,
+ raise WrongHost(expectedHost=self.host,
actualHost=subjectAltName,
fieldName='subjectAltName')
except LookupError:
@@ -167,7 +167,7 @@ class Checker:
if self._matchIPAddress(host, certHost[11:]):
return True
return False
-
+
def _match(self, host, certHost):
"""
diff --git a/M2Crypto/SSL/SSLServer.py b/M2Crypto/SSL/SSLServer.py
index 0f87dd9..4d8132e 100644
--- a/M2Crypto/SSL/SSLServer.py
+++ b/M2Crypto/SSL/SSLServer.py
@@ -17,7 +17,7 @@ from M2Crypto import m2
class SSLServer(SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass, ssl_context, bind_and_activate=True):
- """
+ """
Superclass says: Constructor. May be extended, do not override.
This class says: Ho-hum.
"""
@@ -26,7 +26,7 @@ class SSLServer(SocketServer.TCPServer):
self.socket=Connection(self.ssl_ctx)
if bind_and_activate:
self.server_bind()
- self.server_activate()
+ self.server_activate()
def handle_request(self):
request = None
diff --git a/M2Crypto/SSL/Session.py b/M2Crypto/SSL/Session.py
index 1edf5b0..9d21d73 100644
--- a/M2Crypto/SSL/Session.py
+++ b/M2Crypto/SSL/Session.py
@@ -14,7 +14,7 @@ class Session:
assert session is not None
self.session = session
self._pyfree = _pyfree
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_ssl_session_free(self.session)
diff --git a/M2Crypto/SSL/TwistedProtocolWrapper.py b/M2Crypto/SSL/TwistedProtocolWrapper.py
index a5a9e02..c4e037e 100644
--- a/M2Crypto/SSL/TwistedProtocolWrapper.py
+++ b/M2Crypto/SSL/TwistedProtocolWrapper.py
@@ -29,8 +29,8 @@ def connectSSL(host, port, factory, contextFactory, timeout=30,
postConnectionCheck=Checker.Checker()):
"""
A convenience function to start an SSL/TLS connection using Twisted.
-
- See IReactorSSL interface in Twisted.
+
+ See IReactorSSL interface in Twisted.
"""
wrappingFactory = policies.WrappingFactory(factory)
wrappingFactory.protocol = lambda factory, wrappedProtocol: \
@@ -41,17 +41,17 @@ def connectSSL(host, port, factory, contextFactory, timeout=30,
contextFactory=contextFactory,
postConnectionCheck=postConnectionCheck)
return reactor.connectTCP(host, port, wrappingFactory, timeout, bindAddress)
-
+
def connectTCP(host, port, factory, timeout=30, bindAddress=None,
reactor=twisted.internet.reactor,
postConnectionCheck=Checker.Checker()):
"""
- A convenience function to start a TCP connection using Twisted.
+ A convenience function to start a TCP connection using Twisted.
NOTE: You must call startTLS(ctx) to go into SSL/TLS mode.
- See IReactorTCP interface in Twisted.
+ See IReactorTCP interface in Twisted.
"""
wrappingFactory = policies.WrappingFactory(factory)
wrappingFactory.protocol = lambda factory, wrappedProtocol: \
@@ -65,12 +65,12 @@ def connectTCP(host, port, factory, timeout=30, bindAddress=None,
def listenSSL(port, factory, contextFactory, backlog=5, interface='',
- reactor=twisted.internet.reactor,
+ reactor=twisted.internet.reactor,
postConnectionCheck=_alwaysSucceedsPostConnectionCheck):
"""
- A convenience function to listen for SSL/TLS connections using Twisted.
+ A convenience function to listen for SSL/TLS connections using Twisted.
- See IReactorSSL interface in Twisted.
+ See IReactorSSL interface in Twisted.
"""
wrappingFactory = policies.WrappingFactory(factory)
wrappingFactory.protocol = lambda factory, wrappedProtocol: \
@@ -84,14 +84,14 @@ def listenSSL(port, factory, contextFactory, backlog=5, interface='',
def listenTCP(port, factory, backlog=5, interface='',
- reactor=twisted.internet.reactor,
+ reactor=twisted.internet.reactor,
postConnectionCheck=None):
"""
- A convenience function to listen for TCP connections using Twisted.
-
+ A convenience function to listen for TCP connections using Twisted.
+
NOTE: You must call startTLS(ctx) to go into SSL/TLS mode.
- See IReactorTCP interface in Twisted.
+ See IReactorTCP interface in Twisted.
"""
wrappingFactory = policies.WrappingFactory(factory)
wrappingFactory.protocol = lambda factory, wrappedProtocol: \
@@ -109,15 +109,15 @@ class _BioProxy:
The purpose of this class is to eliminate the __del__ method from
TLSProtocolWrapper, and thus letting it be garbage collected.
"""
-
+
m2_bio_free_all = m2.bio_free_all
def __init__(self, bio):
self.bio = bio
-
+
def _ptr(self):
return self.bio
-
+
def __del__(self):
if self.bio is not None:
self.m2_bio_free_all(self.bio)
@@ -128,15 +128,15 @@ class _SSLProxy:
The purpose of this class is to eliminate the __del__ method from
TLSProtocolWrapper, and thus letting it be garbage collected.
"""
-
+
m2_ssl_free = m2.ssl_free
def __init__(self, ssl):
self.ssl = ssl
-
+
def _ptr(self):
return self.ssl
-
+
def __del__(self):
if self.ssl is not None:
self.m2_ssl_free(self.ssl)
@@ -145,13 +145,13 @@ class _SSLProxy:
class TLSProtocolWrapper(ProtocolWrapper):
"""
A SSL/TLS protocol wrapper to be used with Twisted. Typically
- you would not use this class directly. Use connectTCP,
+ you would not use this class directly. Use connectTCP,
connectSSL, listenTCP, listenSSL functions defined above,
which will hook in this class.
"""
implements(ITLSTransport)
-
+
def __init__(self, factory, wrappedProtocol, startPassThrough, client,
contextFactory, postConnectionCheck):
"""
@@ -172,13 +172,13 @@ class TLSProtocolWrapper(ProtocolWrapper):
#ProtocolWrapper.__init__(self, factory, wrappedProtocol)
#XXX: Twisted 2.0 has a new addition where the wrappingFactory is
# set as the factory of the wrappedProtocol. This is an issue
- # as the wrap should be transparent. What we want is
+ # as the wrap should be transparent. What we want is
# the factory of the wrappedProtocol to be the wrappedFactory and
# not the outer wrappingFactory. This is how it was implemented in
# Twisted 1.3
self.factory = factory
self.wrappedProtocol = wrappedProtocol
-
+
# wrappedProtocol == client/server instance
# factory.wrappedFactory == client/server factory
@@ -195,7 +195,7 @@ class TLSProtocolWrapper(ProtocolWrapper):
if not startPassThrough:
self.startTLS(contextFactory.getContext())
-
+
def clear(self):
"""
Clear this instance, after which it is ready for reuse.
@@ -213,7 +213,7 @@ class TLSProtocolWrapper(ProtocolWrapper):
self.helloDone = 0
# We can reuse self.ctx and it will be deleted automatically
# when this instance dies
-
+
def startTLS(self, ctx):
"""
Start SSL/TLS. If this is not called, this instance just passes data
@@ -241,7 +241,7 @@ class TLSProtocolWrapper(ProtocolWrapper):
m2.ssl_set_connect_state(self.ssl._ptr())
else:
m2.ssl_set_accept_state(self.ssl._ptr())
-
+
m2.ssl_set_bio(self.ssl._ptr(), self.internalBio, self.internalBio)
m2.bio_set_ssl(self.sslBio._ptr(), self.ssl._ptr(), m2.bio_noclose)
@@ -353,7 +353,7 @@ class TLSProtocolWrapper(ProtocolWrapper):
m2bio_should_retry = m2.bio_should_retry
m2bio_ctrl_pending = m2.bio_ctrl_pending
m2bio_read = m2.bio_read
-
+
while 1:
g = m2bio_ctrl_get_write_guarantee(sslBioPtr)
if g > 0 and self.data != '' or clientHello:
@@ -361,9 +361,9 @@ class TLSProtocolWrapper(ProtocolWrapper):
if r <= 0:
assert(m2bio_should_retry(sslBioPtr))
else:
- assert(self.checked)
+ assert(self.checked)
self.data = self.data[r:]
-
+
pending = m2bio_ctrl_pending(networkBio)
if pending:
d = m2bio_read(networkBio, pending)
@@ -387,7 +387,7 @@ class TLSProtocolWrapper(ProtocolWrapper):
m2bio_should_retry = m2.bio_should_retry
m2bio_ctrl_pending = m2.bio_ctrl_pending
m2bio_read = m2.bio_read
-
+
while 1:
g = m2bio_ctrl_get_write_guarantee(networkBio)
if g > 0 and self.encrypted != '':
@@ -396,7 +396,7 @@ class TLSProtocolWrapper(ProtocolWrapper):
assert(m2bio_should_retry(networkBio))
else:
self.encrypted = self.encrypted[r:]
-
+
pending = m2bio_ctrl_pending(sslBioPtr)
if pending:
d = m2bio_read(sslBioPtr, pending)
diff --git a/M2Crypto/SSL/cb.py b/M2Crypto/SSL/cb.py
index c1710cc..60f9dbd 100644
--- a/M2Crypto/SSL/cb.py
+++ b/M2Crypto/SSL/cb.py
@@ -26,7 +26,7 @@ unknown_issuer = [
def ssl_verify_callback(ssl_ctx_ptr, x509_ptr, errnum, errdepth, ok):
# Deprecated
ssl_ctx = Context.map()[long(ssl_ctx_ptr)]
- if errnum in unknown_issuer:
+ if errnum in unknown_issuer:
if ssl_ctx.get_allow_unknown_ca():
sys.stderr.write("policy: %s: permitted...\n" % (m2.x509_get_verify_error(errnum)))
sys.stderr.flush()
diff --git a/M2Crypto/SSL/timeout.py b/M2Crypto/SSL/timeout.py
index d76556d..cb95dfa 100644
--- a/M2Crypto/SSL/timeout.py
+++ b/M2Crypto/SSL/timeout.py
@@ -10,7 +10,7 @@ __all__ = ['DEFAULT_TIMEOUT', 'timeout', 'struct_to_timeout', 'struct_size']
import struct
from M2Crypto import m2
-DEFAULT_TIMEOUT = 600
+DEFAULT_TIMEOUT = 600
class timeout:
diff --git a/M2Crypto/X509.py b/M2Crypto/X509.py
index 61ce3c6..d52fb0b 100644
--- a/M2Crypto/X509.py
+++ b/M2Crypto/X509.py
@@ -32,16 +32,16 @@ def new_extension(name, value, critical=0, _pyfree=1):
x509_ext_ptr = m2.x509v3_ext_conf(lhash, ctx, name, value)
x509_ext = X509_Extension(x509_ext_ptr, _pyfree)
x509_ext.set_critical(critical)
- return x509_ext
+ return x509_ext
class X509_Extension:
"""
X509 Extension
"""
-
+
m2_x509_extension_free = m2.x509_extension_free
-
+
def __init__(self, x509_ext_ptr=None, _pyfree=1):
self.x509_ext = x509_ext_ptr
self._pyfree = _pyfree
@@ -64,7 +64,7 @@ class X509_Extension:
set this extension to critical.
"""
return m2.x509_extension_set_critical(self.x509_ext, critical)
-
+
def get_critical(self):
"""
Return whether or not this is a critical extension.
@@ -73,7 +73,7 @@ class X509_Extension:
@return: Nonzero if this is a critical extension.
"""
return m2.x509_extension_get_critical(self.x509_ext)
-
+
def get_name(self):
"""
Get the extension name, for example 'subjectAltName'.
@@ -83,19 +83,19 @@ class X509_Extension:
def get_value(self, flag=0, indent=0):
"""
Get the extension value, for example 'DNS:www.example.com'.
-
+
@param flag: Flag to control what and how to print.
@param indent: How many spaces to print before actual value.
"""
buf=BIO.MemoryBuffer()
m2.x509_ext_print(buf.bio_ptr(), self.x509_ext, flag, indent)
- return buf.read_all()
+ return buf.read_all()
class X509_Extension_Stack:
"""
X509 Extension Stack
-
+
@warning: Do not modify the underlying OpenSSL stack
except through this interface, or use any OpenSSL functions that do so
indirectly. Doing so will get the OpenSSL stack and the internal pystack
@@ -117,7 +117,7 @@ class X509_Extension_Stack:
self.stack = m2.sk_x509_extension_new_null()
self._pyfree = 1
self.pystack = [] # This must be kept in sync with self.stack
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_sk_x509_extension_free(self.stack)
@@ -128,10 +128,10 @@ class X509_Extension_Stack:
def __getitem__(self, idx):
return self.pystack[idx]
-
+
def __iter__(self):
return iter(self.pystack)
-
+
def _ptr(self):
return self.stack
@@ -151,7 +151,7 @@ class X509_Extension_Stack:
def pop(self):
"""
Pop X509_Extension object from the stack.
-
+
@return: X509_Extension popped
"""
x509_ext_ptr = m2.sk_x509_extension_pop(self.stack)
@@ -171,7 +171,7 @@ class X509_Name_Entry:
def __init__(self, x509_name_entry, _pyfree=0):
self.x509_name_entry = x509_name_entry
self._pyfree = _pyfree
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_x509_name_entry_free(self.x509_name_entry)
@@ -189,14 +189,14 @@ class X509_Name_Entry:
def get_object(self):
return ASN1.ASN1_Object(m2.x509_name_entry_get_object(self.x509_name_entry))
-
+
def get_data(self):
return ASN1.ASN1_String(m2.x509_name_entry_get_data(self.x509_name_entry))
def create_by_txt( self, field, type, entry, len):
return m2.x509_name_entry_create_by_txt(self.x509_name_entry._ptr(),
field, type, entry, len)
-
+
class X509_Name:
"""
@@ -234,18 +234,18 @@ class X509_Name:
else:
self.x509_name = m2.x509_name_new ()
self._pyfree = 1
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_x509_name_free(self.x509_name)
def __str__(self):
- assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
+ assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
return m2.x509_name_oneline(self.x509_name)
def __getattr__(self, attr):
if attr in self.nid:
- assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
+ assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
return m2.x509_name_by_nid(self.x509_name, self.nid[attr])
if attr in self.__dict__:
@@ -262,7 +262,7 @@ class X509_Name:
def __len__(self):
return m2.x509_name_entry_count(self.x509_name)
-
+
def __getitem__(self, idx):
if not 0 <= idx < self.entry_count():
raise IndexError("index out of range")
@@ -273,7 +273,7 @@ class X509_Name:
yield self[i]
def _ptr(self):
- #assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
+ #assert m2.x509_name_type_check(self.x509_name), "'x509_name' type error"
return self.x509_name
def add_entry_by_txt(self, field, type, entry, len, loc, set):
@@ -282,7 +282,7 @@ class X509_Name:
def entry_count( self ):
return m2.x509_name_entry_count( self.x509_name )
-
+
def get_entries_by_nid(self, nid):
ret = []
lastpos = -1
@@ -292,16 +292,16 @@ class X509_Name:
lastpos)
if lastpos == -1:
break
-
+
ret.append(self[lastpos])
-
+
return ret
-
+
def as_text(self, indent=0, flags=m2.XN_FLAG_COMPAT):
"""
as_text returns the name as a string.
-
- @param indent: Each line in multiline format is indented
+
+ @param indent: Each line in multiline format is indented
by this many spaces.
@param flags: Flags that control how the output should be formatted.
"""
@@ -333,7 +333,7 @@ class X509:
else:
self.x509 = m2.x509_new ()
self._pyfree = 1
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_x509_free(self.x509)
@@ -510,7 +510,7 @@ class X509:
m2x509_get_ext = m2.x509_get_ext
m2x509_extension_get_name = m2.x509_extension_get_name
x509 = self.x509
-
+
for i in range(m2.x509_get_ext_count(x509)):
extPtr = m2x509_get_ext(x509, i)
if m2x509_extension_get_name(extPtr) == name:
@@ -528,7 +528,7 @@ class X509:
"""
if index < 0 or index >= self.get_ext_count():
raise IndexError
-
+
return X509_Extension(m2.x509_get_ext(self.x509, index),
_pyfree=0)
@@ -536,7 +536,7 @@ class X509:
"""
Get X509 extension count.
"""
- return m2.x509_get_ext_count(self.x509)
+ return m2.x509_get_ext_count(self.x509)
def sign(self, pkey, md):
"""
@@ -560,21 +560,21 @@ class X509:
return m2.x509_verify(self.x509, pkey.pkey)
else:
return m2.x509_verify(self.x509, self.get_pubkey().pkey)
-
+
def check_ca(self):
"""
Check if the certificate is a Certificate Authority (CA) certificate.
-
+
@return: 0 if the certificate is not CA, nonzero otherwise.
-
- @requires: OpenSSL 0.9.8 or newer
+
+ @requires: OpenSSL 0.9.8 or newer
"""
return m2.x509_check_ca(self.x509)
-
+
def check_purpose(self, id, ca):
"""
Check if the certificate's purpose matches the asked purpose.
-
+
@param id: Purpose id. See X509_PURPOSE_* constants.
@param ca: 1 if the certificate should be CA, 0 otherwise.
@return: 0 if the certificate purpose does not match, nonzero otherwise.
@@ -584,7 +584,7 @@ class X509:
def get_fingerprint(self, md='md5'):
"""
Get the fingerprint of the certificate.
-
+
@param md: Message digest algorithm to use.
@return: String containing the fingerprint in hex format.
"""
@@ -680,18 +680,18 @@ class X509_Store_Context:
def __init__(self, x509_store_ctx, _pyfree=0):
self.ctx = x509_store_ctx
self._pyfree = _pyfree
-
+
def __del__(self):
if self._pyfree:
self.m2_x509_store_ctx_free(self.ctx)
-
+
def _ptr(self):
return self.ctx
-
+
def get_current_cert(self):
"""
Get current X.509 certificate.
-
+
@warning: The returned certificate is NOT refcounted, so you can not
rely on it being valid once the store context goes away or is modified.
"""
@@ -702,23 +702,23 @@ class X509_Store_Context:
Get error code.
"""
return m2.x509_store_ctx_get_error(self.ctx)
-
+
def get_error_depth(self):
"""
Get error depth.
"""
return m2.x509_store_ctx_get_error_depth(self.ctx)
-
+
def get1_chain(self):
"""
Get certificate chain.
-
+
@return: Reference counted (i.e. safe to use even after the store
context goes away) stack of certificates in the chain.
@rtype: X509_Stack
"""
return X509_Stack(m2.x509_store_ctx_get1_chain(self.ctx), 1, 1)
-
+
class X509_Store:
"""
@@ -734,7 +734,7 @@ class X509_Store:
else:
self.store = m2.x509_store_new()
self._pyfree = 1
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_x509_store_free(self.store)
@@ -743,17 +743,17 @@ class X509_Store:
return self.store
def load_info(self, file):
- ret = m2.x509_store_load_locations(self.store, file)
+ ret = m2.x509_store_load_locations(self.store, file)
if ret < 1:
raise X509Error(Err.get_error())
return ret
load_locations = load_info
-
+
def add_x509(self, x509):
assert isinstance(x509, X509)
return m2.x509_store_add_cert(self.store, x509._ptr())
-
+
add_cert = add_x509
@@ -783,18 +783,18 @@ class X509_Stack:
self.stack = m2.sk_x509_new_null()
self._pyfree = 1
self.pystack = [] # This must be kept in sync with self.stack
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_sk_x509_free(self.stack)
-
+
def __len__(self):
assert m2.sk_x509_num(self.stack) == len(self.pystack)
return len(self.pystack)
def __getitem__(self, idx):
return self.pystack[idx]
-
+
def __iter__(self):
return iter(self.pystack)
@@ -804,7 +804,7 @@ class X509_Stack:
def push(self, x509):
"""
push an X509 certificate onto the stack.
-
+
@param x509: X509 object.
@return: The number of X509 objects currently on the stack.
"""
@@ -817,9 +817,9 @@ class X509_Stack:
def pop(self):
"""
pop a certificate from the stack.
-
+
@return: X509 object that was popped, or None if there is nothing
- to pop.
+ to pop.
"""
x509_ptr = m2.sk_x509_pop(self.stack)
if x509_ptr is None:
@@ -831,13 +831,13 @@ class X509_Stack:
"""
Return the stack as a DER encoded string
"""
- return m2.get_der_encoding_stack(self.stack)
+ return m2.get_der_encoding_stack(self.stack)
def new_stack_from_der(der_string):
"""
Create a new X509_Stack from DER string.
-
+
@return: X509_Stack
"""
stack_ptr = m2.make_stack_from_der_sequence(der_string)
@@ -860,11 +860,11 @@ class Request:
else:
self.req = m2.x509_req_new()
self._pyfree = 1
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_x509_req_free(self.req)
-
+
def as_text(self):
buf=BIO.MemoryBuffer()
m2.x509_req_print(buf.bio_ptr(), self.req)
@@ -883,7 +883,7 @@ class Request:
def save_pem(self, filename):
bio=BIO.openfile(filename, 'wb')
return m2.x509_req_write_pem(bio.bio_ptr(), self.req)
-
+
def save(self, filename, format=FORMAT_PEM):
"""
Saves X.509 certificate request to a file. Default output
@@ -1067,7 +1067,7 @@ class CRL:
else:
self.crl = m2.x509_crl_new()
self._pyfree = 1
-
+
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_x509_crl_free(self.crl)
diff --git a/M2Crypto/ftpslib.py b/M2Crypto/ftpslib.py
index b7d82fd..fbd2bd2 100644
--- a/M2Crypto/ftpslib.py
+++ b/M2Crypto/ftpslib.py
@@ -74,12 +74,12 @@ class FTP_TLS(FTP):
self.voidcmd('PBSZ 0')
self.voidcmd('PROT P')
self.prot = 1
-
+
def prot_c(self):
"""Set up data connection in the clear."""
self.voidcmd('PROT C')
self.prot = 0
-
+
def ntransfercmd(self, cmd, rest=None):
"""Initiate a data transfer."""
conn, size = FTP.ntransfercmd(self, cmd, rest)
@@ -90,5 +90,5 @@ class FTP_TLS(FTP):
conn.set_session(self.sock.get_session())
conn.connect_ssl()
return conn, size
-
+
diff --git a/M2Crypto/m2urllib.py b/M2Crypto/m2urllib.py
index 5300ba2..139c9a1 100644
--- a/M2Crypto/m2urllib.py
+++ b/M2Crypto/m2urllib.py
@@ -1,6 +1,6 @@
from __future__ import print_function
-"""M2Crypto enhancement to Python's urllib for handling
+"""M2Crypto enhancement to Python's urllib for handling
'https' url's.
Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved."""
@@ -66,7 +66,7 @@ def open_https(self, url, data=None, ssl_context=None):
return urllib.addinfourl(fp, resp.msg, "https:" + url)
# Stop again.
-# Minor brain surgery.
+# Minor brain surgery.
URLopener.open_https = open_https
-
+
diff --git a/M2Crypto/m2urllib2.py b/M2Crypto/m2urllib2.py
index 137e052..51b7570 100644
--- a/M2Crypto/m2urllib2.py
+++ b/M2Crypto/m2urllib2.py
@@ -1,5 +1,5 @@
"""
-M2Crypto enhancement to Python's urllib2 for handling
+M2Crypto enhancement to Python's urllib2 for handling
'https' url's.
Code from urllib2 is Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007
@@ -60,7 +60,7 @@ class HTTPSHandler(AbstractHTTPHandler):
# Our change: Check to see if we're using a proxy.
# Then create an appropriate ssl-aware connection.
- full_url = req.get_full_url()
+ full_url = req.get_full_url()
target_host = urlparse.urlparse(full_url)[1]
if (target_host != host):
@@ -106,7 +106,7 @@ class HTTPSHandler(AbstractHTTPHandler):
resp.msg = r.reason
return resp
-
+
https_request = AbstractHTTPHandler.do_request_
diff --git a/M2Crypto/threading.py b/M2Crypto/threading.py
index 4cb8149..a2ab001 100644
--- a/M2Crypto/threading.py
+++ b/M2Crypto/threading.py
@@ -1,5 +1,5 @@
"""
-M2Crypto threading support, required for multithreaded applications.
+M2Crypto threading support, required for multithreaded applications.
Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved."""
diff --git a/M2Crypto/util.py b/M2Crypto/util.py
index e866246..bd47956 100644
--- a/M2Crypto/util.py
+++ b/M2Crypto/util.py
@@ -1,8 +1,8 @@
"""
M2Crypto utility routines.
-
+
Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved.
-
+
Portions created by Open Source Applications Foundation (OSAF) are
Copyright (C) 2004 OSAF. All Rights Reserved.
"""
@@ -29,7 +29,7 @@ def h2b(s):
for i in range(start, len(s), 2):
num=string.atoi("%s"%(s[i:i+2],), 16)
ar.append(chr(num))
- return ar.tostring()
+ return ar.tostring()
def pkcs5_pad(data, blklen=8):
pad=(8-(len(data)%8))
@@ -56,7 +56,7 @@ def genparam_callback(p, n, out=sys.stdout):
def quiet_genparam_callback(p, n, out):
pass
-def passphrase_callback(v, prompt1='Enter passphrase:',
+def passphrase_callback(v, prompt1='Enter passphrase:',
prompt2='Verify passphrase:'):
from getpass import getpass
while 1:
diff --git a/contrib/SimpleX509create.py b/contrib/SimpleX509create.py
index 3a7c83c..6c6d007 100644
--- a/contrib/SimpleX509create.py
+++ b/contrib/SimpleX509create.py
@@ -15,149 +15,149 @@ MBSTRING_BMP = MBSTRING_FLAG | 2
class Cert:
- def __init__ ( self ):
- self.RsaKey = { 'KeyLength' : 1024,
- 'PubExponent' : 0x10001, # -> 65537
- 'keygen_callback' : self.callback
- }
+ def __init__ ( self ):
+ self.RsaKey = { 'KeyLength' : 1024,
+ 'PubExponent' : 0x10001, # -> 65537
+ 'keygen_callback' : self.callback
+ }
- self.KeyPair = None
- self.PKey = None
+ self.KeyPair = None
+ self.PKey = None
- self.X509Request = None
- self.X509Certificate = None
+ self.X509Request = None
+ self.X509Certificate = None
- def callback ( self, *args ):
- return 'p'
+ def callback ( self, *args ):
+ return 'p'
- def CreatePKey ( self ):
- self.KeyPair = M2Crypto.RSA.gen_key( self.RsaKey['KeyLength'], self.RsaKey['PubExponent'], self.RsaKey['keygen_callback'] )
- #PubKey = M2Crypto.RSA.new_pub_key( self.KeyPair.pub () )
+ def CreatePKey ( self ):
+ self.KeyPair = M2Crypto.RSA.gen_key( self.RsaKey['KeyLength'], self.RsaKey['PubExponent'], self.RsaKey['keygen_callback'] )
+ #PubKey = M2Crypto.RSA.new_pub_key( self.KeyPair.pub () )
- self.KeyPair.save_key( 'KeyPair.pem', cipher='des_ede3_cbc', callback=self.callback )
-
- self.PKey = M2Crypto.EVP.PKey ( md='sha1')
- self.PKey.assign_rsa ( self.KeyPair )
+ self.KeyPair.save_key( 'KeyPair.pem', cipher='des_ede3_cbc', callback=self.callback )
+ self.PKey = M2Crypto.EVP.PKey ( md='sha1')
+ self.PKey.assign_rsa ( self.KeyPair )
- def CreateX509Request ( self ):
- #
- # X509 REQUEST
- #
- self.X509Request = M2Crypto.X509.Request ()
+ def CreateX509Request ( self ):
+ #
+ # X509 REQUEST
+ #
- #
- # subject
- #
+ self.X509Request = M2Crypto.X509.Request ()
- X509Name = M2Crypto.X509.X509_Name ()
+ #
+ # subject
+ #
- X509Name.add_entry_by_txt ( field='C', type=MBSTRING_ASC, entry='austria', len=-1, loc=-1, set=0 ) # country name
- X509Name.add_entry_by_txt ( field='SP', type=MBSTRING_ASC, entry='kernten', len=-1, loc=-1, set=0 ) # state of province name
- X509Name.add_entry_by_txt ( field='L', type=MBSTRING_ASC, entry='stgallen', len=-1, loc=-1, set=0 ) # locality name
- X509Name.add_entry_by_txt ( field='O', type=MBSTRING_ASC, entry='labor', len=-1, loc=-1, set=0 ) # organization name
- X509Name.add_entry_by_txt ( field='OU', type=MBSTRING_ASC, entry='it-department', len=-1, loc=-1, set=0 ) # organizational unit name
- X509Name.add_entry_by_txt ( field='CN', type=MBSTRING_ASC, entry='Certificate client', len=-1, loc=-1, set=0 ) # common name
- X509Name.add_entry_by_txt ( field='Email', type=MBSTRING_ASC, entry='user@localhost', len=-1, loc=-1, set=0 ) # pkcs9 email address
- X509Name.add_entry_by_txt ( field='emailAddress', type=MBSTRING_ASC, entry='user@localhost', len=-1, loc=-1, set=0 ) # pkcs9 email address
+ X509Name = M2Crypto.X509.X509_Name ()
- self.X509Request.set_subject_name( X509Name )
+ X509Name.add_entry_by_txt ( field='C', type=MBSTRING_ASC, entry='austria', len=-1, loc=-1, set=0 ) # country name
+ X509Name.add_entry_by_txt ( field='SP', type=MBSTRING_ASC, entry='kernten', len=-1, loc=-1, set=0 ) # state of province name
+ X509Name.add_entry_by_txt ( field='L', type=MBSTRING_ASC, entry='stgallen', len=-1, loc=-1, set=0 ) # locality name
+ X509Name.add_entry_by_txt ( field='O', type=MBSTRING_ASC, entry='labor', len=-1, loc=-1, set=0 ) # organization name
+ X509Name.add_entry_by_txt ( field='OU', type=MBSTRING_ASC, entry='it-department', len=-1, loc=-1, set=0 ) # organizational unit name
+ X509Name.add_entry_by_txt ( field='CN', type=MBSTRING_ASC, entry='Certificate client', len=-1, loc=-1, set=0 ) # common name
+ X509Name.add_entry_by_txt ( field='Email', type=MBSTRING_ASC, entry='user@localhost', len=-1, loc=-1, set=0 ) # pkcs9 email address
+ X509Name.add_entry_by_txt ( field='emailAddress', type=MBSTRING_ASC, entry='user@localhost', len=-1, loc=-1, set=0 ) # pkcs9 email address
- #
- # publickey
- #
+ self.X509Request.set_subject_name( X509Name )
- self.X509Request.set_pubkey ( pkey=self.PKey )
- self.X509Request.sign ( pkey=self.PKey, md='sha1' )
- #print(X509Request.as_text ())
+ #
+ # publickey
+ #
+ self.X509Request.set_pubkey ( pkey=self.PKey )
+ self.X509Request.sign ( pkey=self.PKey, md='sha1' )
+ #print(X509Request.as_text ())
- def CreateX509Certificate ( self ):
- #
- # X509 CERTIFICATE
- #
- self.X509Certificate = M2Crypto.X509.X509 ()
+ def CreateX509Certificate ( self ):
+ #
+ # X509 CERTIFICATE
+ #
- #
- # version
- #
+ self.X509Certificate = M2Crypto.X509.X509 ()
- self.X509Certificate.set_version ( 0 )
+ #
+ # version
+ #
- #
- # time notBefore
- #
+ self.X509Certificate.set_version ( 0 )
- ASN1 = M2Crypto.ASN1.ASN1_UTCTIME ()
- ASN1.set_time ( 500 )
- self.X509Certificate.set_not_before( ASN1 )
+ #
+ # time notBefore
+ #
- #
- # time notAfter
- #
+ ASN1 = M2Crypto.ASN1.ASN1_UTCTIME ()
+ ASN1.set_time ( 500 )
+ self.X509Certificate.set_not_before( ASN1 )
- ASN1 = M2Crypto.ASN1.ASN1_UTCTIME ()
- ASN1.set_time ( 500 )
- self.X509Certificate.set_not_after( ASN1 )
+ #
+ # time notAfter
+ #
- #
- # public key
- #
+ ASN1 = M2Crypto.ASN1.ASN1_UTCTIME ()
+ ASN1.set_time ( 500 )
+ self.X509Certificate.set_not_after( ASN1 )
- self.X509Certificate.set_pubkey ( pkey=self.PKey )
-
- #
- # subject
- #
+ #
+ # public key
+ #
- X509Name = self.X509Request.get_subject ()
+ self.X509Certificate.set_pubkey ( pkey=self.PKey )
- #print(X509Name.entry_count ())
- #print(X509Name.as_text ())
+ #
+ # subject
+ #
- self.X509Certificate.set_subject_name( X509Name )
+ X509Name = self.X509Request.get_subject ()
- #
- # issuer
- #
+ #print(X509Name.entry_count ())
+ #print(X509Name.as_text ())
- X509Name = M2Crypto.X509.X509_Name ( M2Crypto.m2.x509_name_new () )
+ self.X509Certificate.set_subject_name( X509Name )
- X509Name.add_entry_by_txt ( field='C', type=MBSTRING_ASC, entry='germany', len=-1, loc=-1, set=0 ) # country name
- X509Name.add_entry_by_txt ( field='SP', type=MBSTRING_ASC, entry='bavaria', len=-1, loc=-1, set=0 ) # state of province name
- X509Name.add_entry_by_txt ( field='L', type=MBSTRING_ASC, entry='munich', len=-1, loc=-1, set=0 ) # locality name
- X509Name.add_entry_by_txt ( field='O', type=MBSTRING_ASC, entry='sbs', len=-1, loc=-1, set=0 ) # organization name
- X509Name.add_entry_by_txt ( field='OU', type=MBSTRING_ASC, entry='it-department', len=-1, loc=-1, set=0 ) # organizational unit name
- X509Name.add_entry_by_txt ( field='CN', type=MBSTRING_ASC, entry='Certificate Authority', len=-1, loc=-1, set=0 ) # common name
- X509Name.add_entry_by_txt ( field='Email', type=MBSTRING_ASC, entry='admin@localhost', len=-1, loc=-1, set=0 ) # pkcs9 email address
- X509Name.add_entry_by_txt ( field='emailAddress', type=MBSTRING_ASC, entry='admin@localhost', len=-1, loc=-1, set=0 ) # pkcs9 email address
+ #
+ # issuer
+ #
- #print(X509Name.entry_count ())
- #print(X509Name.as_text ())
+ X509Name = M2Crypto.X509.X509_Name ( M2Crypto.m2.x509_name_new () )
- self.X509Certificate.set_issuer_name( X509Name )
+ X509Name.add_entry_by_txt ( field='C', type=MBSTRING_ASC, entry='germany', len=-1, loc=-1, set=0 ) # country name
+ X509Name.add_entry_by_txt ( field='SP', type=MBSTRING_ASC, entry='bavaria', len=-1, loc=-1, set=0 ) # state of province name
+ X509Name.add_entry_by_txt ( field='L', type=MBSTRING_ASC, entry='munich', len=-1, loc=-1, set=0 ) # locality name
+ X509Name.add_entry_by_txt ( field='O', type=MBSTRING_ASC, entry='sbs', len=-1, loc=-1, set=0 ) # organization name
+ X509Name.add_entry_by_txt ( field='OU', type=MBSTRING_ASC, entry='it-department', len=-1, loc=-1, set=0 ) # organizational unit name
+ X509Name.add_entry_by_txt ( field='CN', type=MBSTRING_ASC, entry='Certificate Authority', len=-1, loc=-1, set=0 ) # common name
+ X509Name.add_entry_by_txt ( field='Email', type=MBSTRING_ASC, entry='admin@localhost', len=-1, loc=-1, set=0 ) # pkcs9 email address
+ X509Name.add_entry_by_txt ( field='emailAddress', type=MBSTRING_ASC, entry='admin@localhost', len=-1, loc=-1, set=0 ) # pkcs9 email address
- #
- # signing
- #
+ #print(X509Name.entry_count ())
+ #print(X509Name.as_text ())
- self.X509Certificate.sign( pkey=self.PKey, md='sha1' )
- print(self.X509Certificate.as_text ())
+ self.X509Certificate.set_issuer_name( X509Name )
+
+ #
+ # signing
+ #
+
+ self.X509Certificate.sign( pkey=self.PKey, md='sha1' )
+ print(self.X509Certificate.as_text ())
if __name__ == '__main__':
- run = Cert ()
- run.CreatePKey ()
- run.CreateX509Request ()
- run.CreateX509Certificate ()
+ run = Cert ()
+ run.CreatePKey ()
+ run.CreateX509Request ()
+ run.CreateX509Certificate ()
diff --git a/contrib/isaac.httpslib.py b/contrib/isaac.httpslib.py
index a5ea5e2..1415e4f 100644
--- a/contrib/isaac.httpslib.py
+++ b/contrib/isaac.httpslib.py
@@ -1,6 +1,6 @@
from __future__ import print_function
-"""M2Crypto support for Python 1.5.2 and Python 2.x's httplib.
+"""M2Crypto support for Python 1.5.2 and Python 2.x's httplib.
Copyright (c) 1999-2002 Ng Pheng Siong. All rights reserved."""
@@ -9,7 +9,7 @@ from httplib import *
import SSL
if sys.version[0] == '2':
-
+
if sys.version[:3] in ['2.1', '2.2']:
# In 2.1 and above, httplib exports "HTTP" only.
from httplib import HTTPConnection, HTTPS_PORT
@@ -17,16 +17,16 @@ if sys.version[0] == '2':
from httplib import HTTPResponse,FakeSocket
class HTTPSConnection(HTTPConnection):
-
+
"""
This class allows communication via SSL using M2Crypto.
"""
-
+
default_port = HTTPS_PORT
-
+
def __init__(self, host, port=None, **ssl):
keys = ssl.keys()
- try:
+ try:
keys.remove('key_file')
except ValueError:
pass
@@ -46,11 +46,11 @@ if sys.version[0] == '2':
except KeyError:
self.ssl_ctx = SSL.Context('sslv23')
HTTPConnection.__init__(self, host, port)
-
+
def connect(self):
self.sock = SSL.Connection(self.ssl_ctx)
self.sock.connect((self.host, self.port))
-
+
def close(self):
# This kludges around line 545 of httplib.py,
# which closes the connection in this object;
@@ -58,7 +58,7 @@ if sys.version[0] == '2':
# object.
#
# M2Crypto doesn't close-here-keep-open-there,
- # so, in effect, we don't close until the whole
+ # so, in effect, we don't close until the whole
# business is over and gc kicks in.
#
# Long-running callers beware leakage.
@@ -70,9 +70,9 @@ if sys.version[0] == '2':
class HTTPS(HTTP):
-
+
_connection_class = HTTPSConnection
-
+
def __init__(self, host='', port=None, **ssl):
HTTP.__init__(self, host, port)
try:
@@ -84,7 +84,7 @@ if sys.version[0] == '2':
elif sys.version[:3] == '1.5':
class HTTPS(HTTP):
-
+
def __init__(self, ssl_context, host='', port=None):
assert isinstance(ssl_context, SSL.Context)
self.debuglevel=0
@@ -92,7 +92,7 @@ elif sys.version[:3] == '1.5':
self.ssl_ctx=ssl_context
if host:
self.connect(host, port)
-
+
def connect(self, host, port=None):
# Cribbed from httplib.HTTP.
if not port:
@@ -112,7 +112,7 @@ elif sys.version[:3] == '1.5':
class HTTPProxyConnection(HTTPConnection):
"""
This class provides HTTP access through (authenticated) proxies.
-
+
Example:
If the HTTP proxy address is proxy.your.org:8080, an authenticated proxy
(one which requires a username/password combination in order to serve
@@ -163,7 +163,7 @@ class HTTPProxyConnection(HTTPConnection):
HTTPConnection.putrequest(self, method, newurl)
# Add proxy-specific headers
self._add_auth_proxy_header()
-
+
def _add_auth_proxy_header(self):
"""Adds an HTTP header for authenticated proxies
"""
@@ -179,7 +179,7 @@ class HTTPProxyConnection(HTTPConnection):
class HTTPSProxyResponse(HTTPResponse):
"""
Replacement class for HTTPResponse
- Proxy responses (made through SSL) have to keep the connection open
+ Proxy responses (made through SSL) have to keep the connection open
after the initial request, since the connection is tunneled to the SSL
host with the CONNECT method.
"""
@@ -189,7 +189,7 @@ class HTTPSProxyResponse(HTTPResponse):
class HTTPSProxyConnection(HTTPProxyConnection):
"""This class provides HTTP access through (authenticated) proxies.
-
+
Example:
If the HTTP proxy address is proxy.your.org:8080, an authenticated proxy
(one which requires a username/password combination in order to serve
@@ -219,12 +219,12 @@ class HTTPSProxyConnection(HTTPProxyConnection):
HTTPProxyConnection.__init__(self, proxy, host, port, username, password)
def connect(self):
- """Connect (using SSL) to the host and port specified in __init__
+ """Connect (using SSL) to the host and port specified in __init__
(through a proxy)."""
import socket
# Set the connection with the proxy
HTTPProxyConnection.connect(self)
- # Use the stock HTTPConnection putrequest
+ # Use the stock HTTPConnection putrequest
host = "%s:%s" % (self._host, self._port)
HTTPConnection.putrequest(self, "CONNECT", host)
# Add proxy-specific stuff
diff --git a/contrib/smimeplus.py b/contrib/smimeplus.py
index 57adb50..2cb7fd7 100644
--- a/contrib/smimeplus.py
+++ b/contrib/smimeplus.py
@@ -17,7 +17,7 @@ class smimeplus(object):
self.setcacert(cacert)
self.randfile = randfile
self.__loadrand()
-
+
def __passcallback(self, v):
"""private key passphrase callback function"""
return self.passphrase
@@ -43,7 +43,7 @@ class smimeplus(object):
return _data
def __pack(self, msg):
- """Convert 'msg' to string and put it into an memory buffer for
+ """Convert 'msg' to string and put it into an memory buffer for
openssl operation"""
return M2Crypto.BIO.MemoryBuffer(self.__gettext(msg))
@@ -72,7 +72,7 @@ class smimeplus(object):
def verify(self, smsg, scert):
"""Verify to see if 'smsg' was signed by 'scert', and scert was
- issued by cacert of this object. Return message signed if success,
+ issued by cacert of this object. Return message signed if success,
None otherwise"""
# Load signer's cert.
_x509 = M2Crypto.X509.load_cert_bio(self.__pack(scert))
@@ -89,7 +89,7 @@ class smimeplus(object):
_sender = M2Crypto.SMIME.SMIME()
_sender.set_x509_stack(_stack)
_sender.set_x509_store(_store)
-
+
# Load signed message, verify it, and return result
_p7, _data = M2Crypto.SMIME.smime_load_pkcs7_bio(self.__pack(smsg))
try:
@@ -100,23 +100,23 @@ class smimeplus(object):
def encrypt(self, rcert, msg):
# Instantiate an SMIME object.
_sender = M2Crypto.SMIME.SMIME()
-
+
# Load target cert to encrypt to.
_x509 = M2Crypto.X509.load_cert_bio(self.__pack(rcert))
_stack = M2Crypto.X509.X509_Stack()
_stack.push(_x509)
_sender.set_x509_stack(_stack)
-
+
_sender.set_cipher(M2Crypto.SMIME.Cipher(self.cipher))
-
+
# Encrypt the buffer.
_buf = self.__pack(self.__gettext(msg))
_p7 = _sender.encrypt(_buf)
-
+
# Output p7 in mail-friendly format.
_out = self.__pack('')
_sender.write(_out, _p7)
-
+
# Save the PRNG's state.
self.__saverand()
@@ -129,10 +129,10 @@ class smimeplus(object):
_sender = M2Crypto.SMIME.SMIME()
_sender.load_key_bio(self.__pack(self.key), self.__pack(self.cert),
callback=self.__passcallback)
-
+
# Load the encrypted data.
_p7, _data = M2Crypto.SMIME.smime_load_pkcs7_bio(self.__pack(emsg))
-
+
# Decrypt p7.
try:
return _sender.decrypt(_p7)
diff --git a/demo/CipherSaber/CipherSaber.py b/demo/CipherSaber/CipherSaber.py
index ece62e7..be65259 100644
--- a/demo/CipherSaber/CipherSaber.py
+++ b/demo/CipherSaber/CipherSaber.py
@@ -54,7 +54,7 @@ del pp, iv
while 1:
buf = inf.read()
- if not buf:
+ if not buf:
break
outf.write(ci.update(buf))
outf.write(ci.final())
diff --git a/demo/Zope/ZServer/HTTPS_Server.py b/demo/Zope/ZServer/HTTPS_Server.py
index 06a2668..c0d56f0 100644
--- a/demo/Zope/ZServer/HTTPS_Server.py
+++ b/demo/Zope/ZServer/HTTPS_Server.py
@@ -19,7 +19,7 @@ changes from Medusa's http_server:
Request Threads -- Requests are processed by threads from a thread
pool.
-
+
Output Handling -- Output is pushed directly into the producer
fifo by the request-handling thread. The HTTP server does not do
any post-processing such as chunking.
@@ -43,8 +43,8 @@ changes from Zope's HTTP server:
REMOTE_USER to the client's subject distinguished name (DN) from
the certificate. Zope's REMOTE_USER machinery takes care of the
rest, e.g., in conjunction with the RemoteUserFolder product.
-
-"""
+
+"""
import sys, time, types
@@ -95,22 +95,22 @@ class zhttps_channel(https_channel):
closed=0
zombie_timeout=100*60 # 100 minutes
-
+
def __init__(self, server, conn, addr):
https_channel.__init__(self, server, conn, addr)
self.queue=[]
self.working=0
self.peer_found=0
-
+
def push(self, producer, send=1):
# this is thread-safe when send is false
- # note, that strings are not wrapped in
+ # note, that strings are not wrapped in
# producers by default
if self.closed:
return
self.producer_fifo.push(producer)
if send: self.initiate_send()
-
+
push_with_producer=push
def work(self):
@@ -121,7 +121,7 @@ class zhttps_channel(https_channel):
try: module_name, request, response=self.queue.pop(0)
except: return
handle(module_name, request, response)
-
+
def close(self):
self.closed=1
while self.queue:
@@ -151,11 +151,11 @@ class zhttps_channel(https_channel):
channel.close()
-class zhttps_server(https_server):
+class zhttps_server(https_server):
"https server"
-
+
SERVER_IDENT='ZServerSSL/%s' % (ZSERVER_SSL_VERSION,)
-
+
channel_class = zhttps_channel
shutup = 0
@@ -163,7 +163,7 @@ class zhttps_server(https_server):
self.shutup = 1
https_server.__init__(self, ip, port, ssl_ctx, resolver, logger_object)
self.ssl_ctx = ssl_ctx
- self.shutup = 0
+ self.shutup = 0
self.log_info('(%s) HTTPS server started at %s\n'
'\tHostname: %s\n\tPort: %d' % (
self.SERVER_IDENT,
@@ -171,7 +171,7 @@ class zhttps_server(https_server):
self.server_name,
self.server_port
))
-
+
def log_info(self, message, type='info'):
if self.shutup: return
dispatcher.log_info(self, message, type)
diff --git a/demo/Zope/ZServer/medusa/ftps_server.py b/demo/Zope/ZServer/medusa/ftps_server.py
index 4b5f8c9..c106213 100644
--- a/demo/Zope/ZServer/medusa/ftps_server.py
+++ b/demo/Zope/ZServer/medusa/ftps_server.py
@@ -1,4 +1,4 @@
-"""An FTP/TLS server built on Medusa's ftp_server.
+"""An FTP/TLS server built on Medusa's ftp_server.
Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved."""
@@ -15,7 +15,7 @@ from M2Crypto import SSL
VERSION_STRING='0.09'
class ftp_tls_channel(ftp_server.ftp_channel):
-
+
"""FTP/TLS server channel for Medusa."""
def __init__(self, server, ssl_ctx, conn, addr):
@@ -52,7 +52,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
self._ssl_accepting = 0
else:
try:
- ftp_server.ftp_channel.handle_read(self)
+ ftp_server.ftp_channel.handle_read(self)
except SSL.SSLError as what:
if str(what) == 'unexpected eof':
self.close()
@@ -67,7 +67,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
self._ssl_accepting = 0
else:
try:
- ftp_server.ftp_channel.handle_write(self)
+ ftp_server.ftp_channel.handle_write(self)
except SSL.SSLError as what:
if str(what) == 'unexpected eof':
self.close()
@@ -116,7 +116,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
if string.find(command, 'stor') != -1:
while command and command[0] not in string.letters:
command = command[1:]
-
+
func_name = 'cmd_%s' % command
if command != 'pass':
self.log('<== %s' % repr(self.in_buffer)[1:-1])
@@ -126,8 +126,8 @@ class ftp_tls_channel(ftp_server.ftp_channel):
self.in_buffer = ''
if not hasattr(self, func_name):
self.command_not_understood(line[0])
- return
-
+ return
+
func = getattr(self, func_name)
if not self.check_command_authorization(command):
self.command_not_authorized(command)
@@ -217,7 +217,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
else:
self.respond('234 AUTH TLS successful')
self._ssl_accepting = 1
- self.socket = SSL.Connection(self.ssl_ctx, self.socket)
+ self.socket = SSL.Connection(self.ssl_ctx, self.socket)
self.socket.setup_addr(self.addr)
self.socket.setup_ssl()
self.socket.set_accept_state()
@@ -227,7 +227,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer. For
- FTP/TLS the only valid value for the parameter is '0'; any
+ FTP/TLS the only valid value for the parameter is '0'; any
other value is accepted but ignored."""
if not (self._ssl_accepting or self._ssl_accepted):
return self.respond('503 AUTH TLS must be issued prior to PBSZ')
@@ -235,21 +235,21 @@ class ftp_tls_channel(ftp_server.ftp_channel):
self.respond('200 PBSZ=0 successful.')
def cmd_prot(self, line):
- """Negotiate the security level of the data connection."""
+ """Negotiate the security level of the data connection."""
if self._pbsz is None:
return self.respond('503 PBSZ must be issued prior to PROT')
if line[1] == 'C':
self.respond('200 Protection set to Clear')
self._pbsz = None
self._prot = None
- elif line[1] == 'P':
+ elif line[1] == 'P':
self.respond('200 Protection set to Private')
self._prot = 1
elif line[1] in ('S', 'E'):
self.respond('536 PROT %s unsupported' % line[1])
else:
self.respond('504 PROT %s unsupported' % line[1])
-
+
class ftp_tls_server(ftp_server.ftp_server):
@@ -334,8 +334,8 @@ class nbio_ftp_tls_actor:
return self._ssl_handshake_ok
def handle_connect(self):
- """Handle a data connection that occurs after this instance came
- into being. When this handler is triggered, self.socket has been
+ """Handle a data connection that occurs after this instance came
+ into being. When this handler is triggered, self.socket has been
created and refers to the underlying connected socket."""
self.socket = SSL.Connection(self.ssl_ctx, self.socket)
self.socket.setup_addr(self.client_addr)
@@ -370,7 +370,7 @@ class nbio_ftp_tls_actor:
self.close()
self.log_info('recv: closing channel %s %s' % (repr(self), what))
return ''
-
+
class tls_xmit_channel(nbio_ftp_tls_actor, ftp_server.xmit_channel):
@@ -401,17 +401,17 @@ class tls_xmit_channel(nbio_ftp_tls_actor, ftp_server.xmit_channel):
"""Handle a read event: either continue with TLS negotiation
or let the application handle this event."""
if self.tls_neg_ok():
- ftp_server.xmit_channel.handle_read(self)
+ ftp_server.xmit_channel.handle_read(self)
def handle_write(self):
"""Handle a write event: either continue with TLS negotiation
or let the application handle this event."""
if self.tls_neg_ok():
- ftp_server.xmit_channel.handle_write(self)
+ ftp_server.xmit_channel.handle_write(self)
class tls_recv_channel(nbio_ftp_tls_actor, ftp_server.recv_channel):
-
+
"""TLS driver for a receive-only data connection."""
def __init__(self, channel, conn, ssl_ctx, client_addr, fd):
@@ -427,12 +427,12 @@ class tls_recv_channel(nbio_ftp_tls_actor, ftp_server.recv_channel):
"""Handle a read event: either continue with TLS negotiation
or let the application handle this event."""
if self.tls_neg_ok():
- ftp_server.recv_channel.handle_read(self)
+ ftp_server.recv_channel.handle_read(self)
def handle_write(self):
"""Handle a write event: either continue with TLS negotiation
or let the application handle this event."""
if self.tls_neg_ok():
- ftp_server.recv_channel.handle_write(self)
+ ftp_server.recv_channel.handle_write(self)
diff --git a/demo/Zope/ZServer/medusa/https_server.py b/demo/Zope/ZServer/medusa/https_server.py
index b720fd7..d4a9f1f 100644
--- a/demo/Zope/ZServer/medusa/https_server.py
+++ b/demo/Zope/ZServer/medusa/https_server.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
-"""A https server built on Medusa's http_server.
+"""A https server built on Medusa's http_server.
Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved."""
@@ -55,7 +55,7 @@ class https_server(http_server.http_server):
def __init__(self, ip, port, ssl_ctx, resolver=None, logger_object=None):
http_server.http_server.__init__(self, ip, port, resolver, logger_object)
self.ssl_ctx=ssl_ctx
-
+
def handle_accept(self):
# Cribbed from http_server.
self.total_clients.increment()
diff --git a/demo/Zope/lib/python/Products/GuardedFile/GuardedFile.py b/demo/Zope/lib/python/Products/GuardedFile/GuardedFile.py
index 1e3137f..c1f0fc7 100644
--- a/demo/Zope/lib/python/Products/GuardedFile/GuardedFile.py
+++ b/demo/Zope/lib/python/Products/GuardedFile/GuardedFile.py
@@ -38,7 +38,7 @@ def manage_addGuardedFile(self, id, file, title='', precondition='', content_typ
title ='Success!',
message='GuardedFile "%s" has been created.' % id,
action ='manage_main')
-
+
class GuardedFile(File):
"""A File object accessible by proxy only."""
@@ -49,5 +49,5 @@ class GuardedFile(File):
role = "proxy_for_%s" % self.__name__
container._delRoles([role], None)
self.manage_delLocalRoles(self.users_with_local_role(role))
-
+
diff --git a/demo/Zope/lib/python/Products/ZSmime/SmimeTag.py b/demo/Zope/lib/python/Products/ZSmime/SmimeTag.py
index 12dd923..0e02145 100644
--- a/demo/Zope/lib/python/Products/ZSmime/SmimeTag.py
+++ b/demo/Zope/lib/python/Products/ZSmime/SmimeTag.py
@@ -55,14 +55,14 @@ class SmimeTag:
# Prepare to S/MIME.
s = SMIME.SMIME()
- # Render the signer key, load into BIO.
+ # Render the signer key, load into BIO.
try:
signer = Var(self.signer).render(md)
except ParseError:
raise SmimeError('Invalid parameter "signer".')
signer_key_bio = BIO.MemoryBuffer(signer)
signer_cert_bio = BIO.MemoryBuffer(signer) # XXX Kludge.
-
+
# Sign the data.
s.load_key_bio(signer_key_bio, signer_cert_bio)
p7 = s.sign(data_bio, flags=SMIME.PKCS7_TEXT)
diff --git a/demo/Zope/utilities/x509_user.py b/demo/Zope/utilities/x509_user.py
index 6026c32..5d212f3 100644
--- a/demo/Zope/utilities/x509_user.py
+++ b/demo/Zope/utilities/x509_user.py
@@ -1,20 +1,20 @@
#!/usr/bin/env python
"""
-This is a very simple program to manage the access_x509 database. The
+This is a very simple program to manage the access_x509 database. The
overriding goal is program portability, hence its use of 'anydbm'.
Invoke it thusly:
- x509_user.py
- -u <username>
+ x509_user.py
+ -u <username>
[ -x <X.509 subject DN> ]
- [ -f <database> ]
+ [ -f <database> ]
<username> is the Zope username; it must be present.
<X.509 subject DN> is the X.509 certificate's subject distinguished name
-to associate with the user. If it is present, the association is created
+to associate with the user. If it is present, the association is created
or updated. If it is absent, the association is removed.
<database> defaults to 'access_x509'.
@@ -22,7 +22,7 @@ or updated. If it is absent, the association is removed.
(I told you this is a dumb program.)
-To read the subject distinguished name from the certificate 'client.pem',
+To read the subject distinguished name from the certificate 'client.pem',
invoke 'openssl' thusly:
openssl x509 -subject -noout -in client.pem
diff --git a/demo/Zope/z2s.py b/demo/Zope/z2s.py
index 2927c74..5ee9474 100644
--- a/demo/Zope/z2s.py
+++ b/demo/Zope/z2s.py
@@ -34,7 +34,7 @@ Options:
This option controls whether a management process will be created
that restarts Zope after a shutdown or crash.
-
+
If the argument to -Z is non-null (e.g. "-Z1" or "-Zyes"), a
management process will be used. If the argument to -Z is "-", or
"0", (e.g. "-Z-" or "-Z0"), a management process will not be used.
@@ -439,7 +439,7 @@ try:
READ_ONLY=0
if sys.platform == 'win32':
USE_DAEMON = 0
-
+
# Get environment variables
for a in args:
@@ -653,7 +653,7 @@ try:
from ZServer import resolver, logger, asyncore
from ZServer import zhttp_server, zhttp_handler
- from ZServer import zhttps_server, zhttps0_handler, zhttps_handler
+ from ZServer import zhttps_server, zhttps0_handler, zhttps_handler
from ZServer.WebDAVSrcHandler import WebDAVSrcHandler
from ZServer import PCGIServer,FTPServer,FCGIServer
@@ -666,21 +666,21 @@ try:
## In X509_REMOTE_USER mode, we log the client cert's subject DN.
if X509_REMOTE_USER:
-
+
import base64, string, time
def log (self, bytes):
user_agent=self.get_header('user-agent')
if not user_agent: user_agent=''
referer=self.get_header('referer')
- if not referer: referer=''
+ if not referer: referer=''
get_peer_cert = getattr(self.channel, 'get_peer_cert', None)
if get_peer_cert is not None:
name = str(get_peer_cert().get_subject())
else:
name = 'Anonymous'
- auth=self.get_header('Authorization')
+ auth=self.get_header('Authorization')
if auth is not None:
if string.lower(auth[:6]) == 'basic ':
try: decoded=base64.decodestring(auth[6:])
@@ -789,7 +789,7 @@ try:
else:
ssl_ctx.set_verify(SSL.verify_none, 10)
if type(HTTPS_PORT) is type(0): HTTPS_PORT=((IP_ADDRESS, HTTPS_PORT),)
-
+
for address, port in HTTPS_PORT:
hss = zhttps_server(
ip=address,
@@ -797,7 +797,7 @@ try:
ssl_ctx=ssl_ctx,
resolver=rs,
logger_object=lg)
-
+
try:
del HTTPS_ENV['HTTP']
except KeyError:
diff --git a/demo/Zope27/install_dir/lib/python/ZServer/HTTPS_Server.py b/demo/Zope27/install_dir/lib/python/ZServer/HTTPS_Server.py
index 49b6177..ac511e1 100644
--- a/demo/Zope27/install_dir/lib/python/ZServer/HTTPS_Server.py
+++ b/demo/Zope27/install_dir/lib/python/ZServer/HTTPS_Server.py
@@ -19,7 +19,7 @@ changes from Medusa's http_server:
Request Threads -- Requests are processed by threads from a thread
pool.
-
+
Output Handling -- Output is pushed directly into the producer
fifo by the request-handling thread. The HTTP server does not do
any post-processing such as chunking.
@@ -43,8 +43,8 @@ changes from Zope's HTTP server:
REMOTE_USER to the client's subject distinguished name (DN) from
the certificate. Zope's REMOTE_USER machinery takes care of the
rest, e.g., in conjunction with the RemoteUserFolder product.
-
-"""
+
+"""
import sys, time, types
@@ -95,22 +95,22 @@ class zhttps_channel(https_channel):
closed=0
zombie_timeout=100*60 # 100 minutes
-
+
def __init__(self, server, conn, addr):
https_channel.__init__(self, server, conn, addr)
self.queue=[]
self.working=0
self.peer_found=0
-
+
def push(self, producer, send=1):
# this is thread-safe when send is false
- # note, that strings are not wrapped in
+ # note, that strings are not wrapped in
# producers by default
if self.closed:
return
self.producer_fifo.push(producer)
if send: self.initiate_send()
-
+
push_with_producer=push
def work(self):
@@ -121,7 +121,7 @@ class zhttps_channel(https_channel):
try: module_name, request, response=self.queue.pop(0)
except: return
handle(module_name, request, response)
-
+
def close(self):
self.closed=1
while self.queue:
@@ -151,11 +151,11 @@ class zhttps_channel(https_channel):
channel.close()
-class zhttps_server(https_server):
+class zhttps_server(https_server):
"https server"
-
+
SERVER_IDENT='ZServerSSL/%s' % (ZSERVER_SSL_VERSION,)
-
+
channel_class = zhttps_channel
shutup = 0
@@ -163,7 +163,7 @@ class zhttps_server(https_server):
self.shutup = 1
https_server.__init__(self, ip, port, ssl_ctx, resolver, logger_object)
self.ssl_ctx = ssl_ctx
- self.shutup = 0
+ self.shutup = 0
self.log_info('(%s) HTTPS server started at %s\n'
'\tHostname: %s\n\tPort: %d' % (
self.SERVER_IDENT,
@@ -171,7 +171,7 @@ class zhttps_server(https_server):
self.server_name,
self.server_port
))
-
+
def log_info(self, message, type='info'):
if self.shutup: return
dispatcher.log_info(self, message, type)
diff --git a/demo/Zope27/install_dir/lib/python/ZServer/medusa/https_server.py b/demo/Zope27/install_dir/lib/python/ZServer/medusa/https_server.py
index 53bdfd6..fdc3c1e 100644
--- a/demo/Zope27/install_dir/lib/python/ZServer/medusa/https_server.py
+++ b/demo/Zope27/install_dir/lib/python/ZServer/medusa/https_server.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
-"""A https server built on Medusa's http_server.
+"""A https server built on Medusa's http_server.
Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved."""
@@ -55,7 +55,7 @@ class https_server(http_server.http_server):
def __init__(self, ip, port, ssl_ctx, resolver=None, logger_object=None):
http_server.http_server.__init__(self, ip, port, resolver, logger_object)
self.ssl_ctx=ssl_ctx
-
+
def handle_accept(self):
# Cribbed from http_server.
self.total_clients.increment()
diff --git a/demo/ZopeX3/install_dir/lib/python/zope/server/http/https_server.py b/demo/ZopeX3/install_dir/lib/python/zope/server/http/https_server.py
index 886d98f..0936f1e 100644
--- a/demo/ZopeX3/install_dir/lib/python/zope/server/http/https_server.py
+++ b/demo/ZopeX3/install_dir/lib/python/zope/server/http/https_server.py
@@ -49,7 +49,7 @@ class HTTPS_Server(HTTPServer):
HTTPServer.__init__(self, ip, port, task_dispatcher, adj, start, hit_log, verbose)
if ssl_ctx is None:
self.ssl_ctx = make_ssl_context(os.path.realpath(__file__))
- else:
+ else:
self.ssl_ctx = ssl_ctx
def executeRequest(self, task):
diff --git a/demo/dhtest.py b/demo/dhtest.py
index 2d17e6b..158e81d 100644
--- a/demo/dhtest.py
+++ b/demo/dhtest.py
@@ -24,6 +24,6 @@ def test():
print('b.key = ', repr(b.compute_key(a.pub)))
if __name__=='__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
test()
Rand.save_file('randpool.dat')
diff --git a/demo/dsa_bench.py b/demo/dsa_bench.py
index 4c27a03..260b092 100644
--- a/demo/dsa_bench.py
+++ b/demo/dsa_bench.py
@@ -10,23 +10,23 @@ from __future__ import print_function
makenewkey showpubkey showdigest showprofile
md5 sha1 sha256 sha512
<key length>
-
+
NB:
- DSA is formally defined with SHA-1 and key length 1024.
- The OpenSSL implementation actually supports most any
+ DSA is formally defined with SHA-1 and key length 1024.
+ The OpenSSL implementation actually supports most any
hashing algorithm and key length, as long as the key
length is longer than the digest length. If not SHA-1
and 1024, you should be very clear. The use of "DSA"
without any qualifiers implies SHA-1 and 1024.
-
+
Larry Bugbee
November 2006
-
-
- Some portions are Copyright (c) 1999-2003 Ng Pheng Siong.
+
+
+ Some portions are Copyright (c) 1999-2003 Ng Pheng Siong.
All rights reserved.
- Portions created by Open Source Applications Foundation
+ Portions created by Open Source Applications Foundation
(OSAF) are Copyright (C) 2004 OSAF. All Rights Reserved.
"""
@@ -43,7 +43,7 @@ showpubkey = 0 # 1 = show the public key value
showdigest = 0 # 1 = show the digest value
showprofile = 0 # 1 = use the python profiler
-hashalgs = ['md5', 'ripemd160', 'sha1',
+hashalgs = ['md5', 'ripemd160', 'sha1',
'sha224', 'sha256', 'sha384', 'sha512']
# default hashing algorithm
@@ -89,7 +89,7 @@ def speed():
for i in range(N2):
dsa.verify(dgst, r, s)
print(' %d verifications: %8.2fs' % (N2, (time() - t1)))
-
+
def test_speed(dsa, dgst):
print(' measuring speed...')
if showprofile:
@@ -103,12 +103,12 @@ def test_speed(dsa, dgst):
def main(keylen, hashalg):
global dsa, dgst # this exists ONLY for speed testing
-
- Rand.load_file('randpool.dat', -1)
-
+
+ Rand.load_file('randpool.dat', -1)
+
pvtkeyfilename = 'DSA%dpvtkey.pem' % (keylen)
- pubkeyfilename = 'DSA%dpubkey.pem' % (keylen)
-
+ pubkeyfilename = 'DSA%dpubkey.pem' % (keylen)
+
if makenewkey:
print(' making and saving a new key')
dsa = DSA.gen_params(keylen)
@@ -119,16 +119,16 @@ def main(keylen, hashalg):
print(' loading an existing key')
dsa = DSA.load_key(pvtkeyfilename)
print(' dsa key length:', len(dsa))
-
+
if not dsa.check_key():
raise 'key is not initialised'
-
+
if showpubkey:
dsa_pub = dsa.pub
pub_pem = base64.encodestring(dsa_pub)
print(' PEM public key is: \n',pub_pem)
- # since we are testing signing and verification, let's not
+ # since we are testing signing and verification, let's not
# be fussy about the digest. Just make one.
md = EVP.MessageDigest(hashalg)
md.update('can you spell subliminal channel?')
@@ -136,7 +136,7 @@ def main(keylen, hashalg):
print(' hash algorithm: %s' % hashalg)
if showdigest:
print(' %s digest: \n%s' % (hashalg, base64.encodestring(dgst)))
-
+
test(dsa, dgst)
# test_asn1(dsa, dgst)
test_speed(dsa, dgst)
@@ -169,7 +169,7 @@ if __name__=='__main__':
except:
print('\n *** argument "%s" not understood ***' % arg)
print_usage()
-
+
main(keylen, hashalg)
diff --git a/demo/dsatest.py b/demo/dsatest.py
index 8dad5a2..2da0f6f 100644
--- a/demo/dsatest.py
+++ b/demo/dsatest.py
@@ -43,7 +43,7 @@ def speed():
for i in range(N2):
d.verify(dgst, r, s)
print('%d verifications: %8.2fs' % (N2, (time() - t1)))
-
+
def test_speed():
print('measuring speed...')
import profile
@@ -51,7 +51,7 @@ def test_speed():
if __name__=='__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
test()
test_asn1()
#test_speed()
diff --git a/demo/ec/ecdhtest.py b/demo/ec/ecdhtest.py
index 0e70996..e73dc26 100644
--- a/demo/ec/ecdhtest.py
+++ b/demo/ec/ecdhtest.py
@@ -6,7 +6,7 @@ from __future__ import print_function
Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved.
-Portions copyright (c) 2005-2006 Vrije Universiteit Amsterdam.
+Portions copyright (c) 2005-2006 Vrije Universiteit Amsterdam.
All rights reserved."""
from M2Crypto import EC,Rand
@@ -28,6 +28,6 @@ def test():
if __name__=='__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
test()
Rand.save_file('randpool.dat')
diff --git a/demo/ec/ecdsatest.py b/demo/ec/ecdsatest.py
index 1258cd2..a807adb 100644
--- a/demo/ec/ecdsatest.py
+++ b/demo/ec/ecdsatest.py
@@ -54,7 +54,7 @@ def speed():
for i in range(N2):
ec.verify(dgst, r, s)
print('%d verifications: %8.2fs' % (N2, (time() - t1)))
-
+
def test_speed():
print('measuring speed...')
import profile
@@ -62,7 +62,7 @@ def test_speed():
if __name__=='__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
test()
test_asn1()
#test_speed()
diff --git a/demo/https.howto/get_https.py b/demo/https.howto/get_https.py
index 31d607e..87dcecd 100755
--- a/demo/https.howto/get_https.py
+++ b/demo/https.howto/get_https.py
@@ -24,12 +24,12 @@ def get_https():
resp = h.getresponse()
while 1:
data = resp.read()
- if not data:
+ if not data:
break
print(data)
h.close()
-Rand.load_file('../randpool.dat', -1)
+Rand.load_file('../randpool.dat', -1)
get_https()
Rand.save_file('../randpool.dat')
diff --git a/demo/https.howto/https_cli.py b/demo/https.howto/https_cli.py
index bb34625..02f81bb 100644
--- a/demo/https.howto/https_cli.py
+++ b/demo/https.howto/https_cli.py
@@ -30,7 +30,7 @@ def test_httpslib():
c = 0
while 1:
# Either of following two works.
- #data = f.readline()
+ #data = f.readline()
data = resp.read()
if not data: break
c = c + len(data)
@@ -41,7 +41,7 @@ def test_httpslib():
if __name__=='__main__':
- Rand.load_file('../randpool.dat', -1)
+ Rand.load_file('../randpool.dat', -1)
#threading.init()
test_httpslib()
#threading.cleanup()
diff --git a/demo/https.howto/orig_https_srv.py b/demo/https.howto/orig_https_srv.py
index 83be0fb..74e9874 100644
--- a/demo/https.howto/orig_https_srv.py
+++ b/demo/https.howto/orig_https_srv.py
@@ -1,6 +1,6 @@
"""This server extends BaseHTTPServer and SimpleHTTPServer thusly:
1. One thread per connection.
-2. Generates directory listings.
+2. Generates directory listings.
In addition, it has the following properties:
1. Works over HTTPS only.
diff --git a/demo/medusa/START.py b/demo/medusa/START.py
index e73cc7d..5a10edd 100644
--- a/demo/medusa/START.py
+++ b/demo/medusa/START.py
@@ -7,7 +7,7 @@ import os
import os.path
import sys
-# Medusa
+# Medusa
import asyncore
import default_handler
import filesys
@@ -27,7 +27,7 @@ FTP_PORT = 9021
hs=http_server.http_server('', HTTP_PORT)
-Rand.load_file('../randpool.dat', -1)
+Rand.load_file('../randpool.dat', -1)
ssl_ctx=SSL.Context('sslv23')
ssl_ctx.load_cert('server.pem')
ssl_ctx.load_verify_location('ca.pem')
diff --git a/demo/medusa/START_xmlrpc.py b/demo/medusa/START_xmlrpc.py
index 5efd0ca..c451198 100644
--- a/demo/medusa/START_xmlrpc.py
+++ b/demo/medusa/START_xmlrpc.py
@@ -7,7 +7,7 @@ import os
import os.path
import sys
-# Medusa
+# Medusa
import asyncore
import default_handler
import filesys
@@ -28,7 +28,7 @@ HTTPS_PORT=9443
hs=http_server.http_server('', HTTP_PORT)
-Rand.load_file('../randpool.dat', -1)
+Rand.load_file('../randpool.dat', -1)
ssl_ctx=SSL.Context('sslv23')
ssl_ctx.load_cert('server.pem')
#ssl_ctx.load_verify_location('ca.pem')
diff --git a/demo/medusa/asynchat.py b/demo/medusa/asynchat.py
index ef9f78b..ce198c5 100644
--- a/demo/medusa/asynchat.py
+++ b/demo/medusa/asynchat.py
@@ -1,12 +1,12 @@
# -*- Mode: Python; tab-width: 4 -*-
-# $Id$
-# Author: Sam Rushing <rushing@nightmare.com>
+# $Id$
+# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
-#
+#
# All Rights Reserved
-#
+#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
@@ -15,7 +15,7 @@
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
-#
+#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
@@ -51,224 +51,224 @@ import asyncore
import string
class async_chat (asyncore.dispatcher):
- """This is an abstract class. You must derive from this class, and add
- the two methods collect_incoming_data() and found_terminator()"""
-
- # these are overridable defaults
-
- ac_in_buffer_size = 4096
- ac_out_buffer_size = 4096
-
- def __init__ (self, conn=None):
- self.ac_in_buffer = ''
- self.ac_out_buffer = ''
- self.producer_fifo = fifo()
- asyncore.dispatcher.__init__ (self, conn)
-
- def set_terminator (self, term):
- "Set the input delimiter. Can be a fixed string of any length, an integer, or None"
- self.terminator = term
-
- def get_terminator (self):
- return self.terminator
-
- # grab some more data from the socket,
- # throw it to the collector method,
- # check for the terminator,
- # if found, transition to the next state.
-
- def handle_read (self):
-
- try:
- data = self.recv (self.ac_in_buffer_size)
- except socket.error as why:
- self.handle_error()
- return
-
- self.ac_in_buffer = self.ac_in_buffer + data
-
- # Continue to search for self.terminator in self.ac_in_buffer,
- # while calling self.collect_incoming_data. The while loop
- # is necessary because we might read several data+terminator
- # combos with a single recv(1024).
-
- while self.ac_in_buffer:
- lb = len(self.ac_in_buffer)
- terminator = self.get_terminator()
- if terminator is None:
- # no terminator, collect it all
- self.collect_incoming_data (self.ac_in_buffer)
- self.ac_in_buffer = ''
- elif type(terminator) == type(0):
- # numeric terminator
- n = terminator
- if lb < n:
- self.collect_incoming_data (self.ac_in_buffer)
- self.ac_in_buffer = ''
- self.terminator = self.terminator - lb
- else:
- self.collect_incoming_data (self.ac_in_buffer[:n])
- self.ac_in_buffer = self.ac_in_buffer[n:]
- self.terminator = 0
- self.found_terminator()
- else:
- # 3 cases:
- # 1) end of buffer matches terminator exactly:
- # collect data, transition
- # 2) end of buffer matches some prefix:
- # collect data to the prefix
- # 3) end of buffer does not match any prefix:
- # collect data
- terminator_len = len(terminator)
- index = string.find (self.ac_in_buffer, terminator)
- if index != -1:
- # we found the terminator
- if index > 0:
- # don't bother reporting the empty string (source of subtle bugs)
- self.collect_incoming_data (self.ac_in_buffer[:index])
- self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
- # This does the Right Thing if the terminator is changed here.
- self.found_terminator()
- else:
- # check for a prefix of the terminator
- index = find_prefix_at_end (self.ac_in_buffer, terminator)
- if index:
- if index != lb:
- # we found a prefix, collect up to the prefix
- self.collect_incoming_data (self.ac_in_buffer[:-index])
- self.ac_in_buffer = self.ac_in_buffer[-index:]
- break
- else:
- # no prefix, collect it all
- self.collect_incoming_data (self.ac_in_buffer)
- self.ac_in_buffer = ''
-
- def handle_write (self):
- self.initiate_send ()
-
- def handle_close (self):
- self.close()
-
- def push (self, data):
- self.producer_fifo.push (simple_producer (data))
- self.initiate_send()
-
- def push_with_producer (self, producer):
- self.producer_fifo.push (producer)
- self.initiate_send()
-
- def readable (self):
- "predicate for inclusion in the readable for select()"
- return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
-
- def writable (self):
- "predicate for inclusion in the writable for select()"
- # return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
- # this is about twice as fast, though not as clear.
- return not (
- (self.ac_out_buffer is '') and
- self.producer_fifo.is_empty() and
- self.connected
- )
-
- def close_when_done (self):
- "automatically close this channel once the outgoing queue is empty"
- self.producer_fifo.push (None)
-
- # refill the outgoing buffer by calling the more() method
- # of the first producer in the queue
- def refill_buffer (self):
- _string_type = type('')
- while 1:
- if len(self.producer_fifo):
- p = self.producer_fifo.first()
- # a 'None' in the producer fifo is a sentinel,
- # telling us to close the channel.
- if p is None:
- if not self.ac_out_buffer:
- self.producer_fifo.pop()
- self.close()
- return
- elif type(p) is _string_type:
- self.producer_fifo.pop()
- self.ac_out_buffer = self.ac_out_buffer + p
- return
- data = p.more()
- if data:
- self.ac_out_buffer = self.ac_out_buffer + data
- return
- else:
- self.producer_fifo.pop()
- else:
- return
-
- def initiate_send (self):
- obs = self.ac_out_buffer_size
- # try to refill the buffer
- if (len (self.ac_out_buffer) < obs):
- self.refill_buffer()
-
- if self.ac_out_buffer and self.connected:
- # try to send the buffer
- try:
- num_sent = self.send (self.ac_out_buffer[:obs])
- if num_sent:
- self.ac_out_buffer = self.ac_out_buffer[num_sent:]
-
- except socket.error as why:
- self.handle_error()
- return
-
- def discard_buffers (self):
- # Emergencies only!
- self.ac_in_buffer = ''
- self.ac_out_buffer = ''
- while self.producer_fifo:
- self.producer_fifo.pop()
+ """This is an abstract class. You must derive from this class, and add
+ the two methods collect_incoming_data() and found_terminator()"""
+
+ # these are overridable defaults
+
+ ac_in_buffer_size = 4096
+ ac_out_buffer_size = 4096
+
+ def __init__ (self, conn=None):
+ self.ac_in_buffer = ''
+ self.ac_out_buffer = ''
+ self.producer_fifo = fifo()
+ asyncore.dispatcher.__init__ (self, conn)
+
+ def set_terminator (self, term):
+ "Set the input delimiter. Can be a fixed string of any length, an integer, or None"
+ self.terminator = term
+
+ def get_terminator (self):
+ return self.terminator
+
+ # grab some more data from the socket,
+ # throw it to the collector method,
+ # check for the terminator,
+ # if found, transition to the next state.
+
+ def handle_read (self):
+
+ try:
+ data = self.recv (self.ac_in_buffer_size)
+ except socket.error as why:
+ self.handle_error()
+ return
+
+ self.ac_in_buffer = self.ac_in_buffer + data
+
+ # Continue to search for self.terminator in self.ac_in_buffer,
+ # while calling self.collect_incoming_data. The while loop
+ # is necessary because we might read several data+terminator
+ # combos with a single recv(1024).
+
+ while self.ac_in_buffer:
+ lb = len(self.ac_in_buffer)
+ terminator = self.get_terminator()
+ if terminator is None:
+ # no terminator, collect it all
+ self.collect_incoming_data (self.ac_in_buffer)
+ self.ac_in_buffer = ''
+ elif type(terminator) == type(0):
+ # numeric terminator
+ n = terminator
+ if lb < n:
+ self.collect_incoming_data (self.ac_in_buffer)
+ self.ac_in_buffer = ''
+ self.terminator = self.terminator - lb
+ else:
+ self.collect_incoming_data (self.ac_in_buffer[:n])
+ self.ac_in_buffer = self.ac_in_buffer[n:]
+ self.terminator = 0
+ self.found_terminator()
+ else:
+ # 3 cases:
+ # 1) end of buffer matches terminator exactly:
+ # collect data, transition
+ # 2) end of buffer matches some prefix:
+ # collect data to the prefix
+ # 3) end of buffer does not match any prefix:
+ # collect data
+ terminator_len = len(terminator)
+ index = string.find (self.ac_in_buffer, terminator)
+ if index != -1:
+ # we found the terminator
+ if index > 0:
+ # don't bother reporting the empty string (source of subtle bugs)
+ self.collect_incoming_data (self.ac_in_buffer[:index])
+ self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
+ # This does the Right Thing if the terminator is changed here.
+ self.found_terminator()
+ else:
+ # check for a prefix of the terminator
+ index = find_prefix_at_end (self.ac_in_buffer, terminator)
+ if index:
+ if index != lb:
+ # we found a prefix, collect up to the prefix
+ self.collect_incoming_data (self.ac_in_buffer[:-index])
+ self.ac_in_buffer = self.ac_in_buffer[-index:]
+ break
+ else:
+ # no prefix, collect it all
+ self.collect_incoming_data (self.ac_in_buffer)
+ self.ac_in_buffer = ''
+
+ def handle_write (self):
+ self.initiate_send ()
+
+ def handle_close (self):
+ self.close()
+
+ def push (self, data):
+ self.producer_fifo.push (simple_producer (data))
+ self.initiate_send()
+
+ def push_with_producer (self, producer):
+ self.producer_fifo.push (producer)
+ self.initiate_send()
+
+ def readable (self):
+ "predicate for inclusion in the readable for select()"
+ return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
+
+ def writable (self):
+ "predicate for inclusion in the writable for select()"
+ # return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
+ # this is about twice as fast, though not as clear.
+ return not (
+ (self.ac_out_buffer is '') and
+ self.producer_fifo.is_empty() and
+ self.connected
+ )
+
+ def close_when_done (self):
+ "automatically close this channel once the outgoing queue is empty"
+ self.producer_fifo.push (None)
+
+ # refill the outgoing buffer by calling the more() method
+ # of the first producer in the queue
+ def refill_buffer (self):
+ _string_type = type('')
+ while 1:
+ if len(self.producer_fifo):
+ p = self.producer_fifo.first()
+ # a 'None' in the producer fifo is a sentinel,
+ # telling us to close the channel.
+ if p is None:
+ if not self.ac_out_buffer:
+ self.producer_fifo.pop()
+ self.close()
+ return
+ elif type(p) is _string_type:
+ self.producer_fifo.pop()
+ self.ac_out_buffer = self.ac_out_buffer + p
+ return
+ data = p.more()
+ if data:
+ self.ac_out_buffer = self.ac_out_buffer + data
+ return
+ else:
+ self.producer_fifo.pop()
+ else:
+ return
+
+ def initiate_send (self):
+ obs = self.ac_out_buffer_size
+ # try to refill the buffer
+ if (len (self.ac_out_buffer) < obs):
+ self.refill_buffer()
+
+ if self.ac_out_buffer and self.connected:
+ # try to send the buffer
+ try:
+ num_sent = self.send (self.ac_out_buffer[:obs])
+ if num_sent:
+ self.ac_out_buffer = self.ac_out_buffer[num_sent:]
+
+ except socket.error as why:
+ self.handle_error()
+ return
+
+ def discard_buffers (self):
+ # Emergencies only!
+ self.ac_in_buffer = ''
+ self.ac_out_buffer = ''
+ while self.producer_fifo:
+ self.producer_fifo.pop()
class simple_producer:
- def __init__ (self, data, buffer_size=512):
- self.data = data
- self.buffer_size = buffer_size
+ def __init__ (self, data, buffer_size=512):
+ self.data = data
+ self.buffer_size = buffer_size
- def more (self):
- if len (self.data) > self.buffer_size:
- result = self.data[:self.buffer_size]
- self.data = self.data[self.buffer_size:]
- return result
- else:
- result = self.data
- self.data = ''
- return result
+ def more (self):
+ if len (self.data) > self.buffer_size:
+ result = self.data[:self.buffer_size]
+ self.data = self.data[self.buffer_size:]
+ return result
+ else:
+ result = self.data
+ self.data = ''
+ return result
class fifo:
- def __init__ (self, list=None):
- if not list:
- self.list = []
- else:
- self.list = list
-
- def __len__ (self):
- return len(self.list)
-
- def is_empty (self):
- return self.list == []
-
- def first (self):
- return self.list[0]
-
- def push (self, data):
- self.list.append (data)
-
- def pop (self):
- if self.list:
- result = self.list[0]
- del self.list[0]
- return (1, result)
- else:
- return (0, None)
+ def __init__ (self, list=None):
+ if not list:
+ self.list = []
+ else:
+ self.list = list
+
+ def __len__ (self):
+ return len(self.list)
+
+ def is_empty (self):
+ return self.list == []
+
+ def first (self):
+ return self.list[0]
+
+ def push (self, data):
+ self.list.append (data)
+
+ def pop (self):
+ if self.list:
+ result = self.list[0]
+ del self.list[0]
+ return (1, result)
+ else:
+ return (0, None)
# Given 'haystack', see if any prefix of 'needle' is at its end. This
# assumes an exact match has already been checked. Return the number of
@@ -286,7 +286,7 @@ class fifo:
# regex: 14035/s
def find_prefix_at_end (haystack, needle):
- l = len(needle) - 1
- while l and not haystack.endswith(needle[:l]):
- l -= 1
- return l
+ l = len(needle) - 1
+ while l and not haystack.endswith(needle[:l]):
+ l -= 1
+ return l
diff --git a/demo/medusa/asyncore.py b/demo/medusa/asyncore.py
index ef7344b..ac98db4 100644
--- a/demo/medusa/asyncore.py
+++ b/demo/medusa/asyncore.py
@@ -1,12 +1,12 @@
# -*- Mode: Python; tab-width: 4 -*-
-# $Id$
-# Author: Sam Rushing <rushing@nightmare.com>
+# $Id$
+# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
-#
+#
# All Rights Reserved
-#
+#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
@@ -15,7 +15,7 @@
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
-#
+#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
@@ -30,22 +30,22 @@ from __future__ import print_function
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
-than one thing at a time". Multi-threaded programming is the simplest and
+than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
-rarely CPU-bound, however.
+rarely CPU-bound, however.
-If your operating system supports the select() system call in its I/O
+If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
-sophisticated high-performance network servers and clients a snap.
+sophisticated high-performance network servers and clients a snap.
"""
import exceptions
import select
@@ -55,96 +55,96 @@ import sys
import os
if os.name == 'nt':
- EWOULDBLOCK = 10035
- EINPROGRESS = 10036
- EALREADY = 10037
- ECONNRESET = 10054
- ENOTCONN = 10057
- ESHUTDOWN = 10058
+ EWOULDBLOCK = 10035
+ EINPROGRESS = 10036
+ EALREADY = 10037
+ ECONNRESET = 10054
+ ENOTCONN = 10057
+ ESHUTDOWN = 10058
else:
- from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, ENOTCONN, ESHUTDOWN
+ from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, ENOTCONN, ESHUTDOWN
try:
- socket_map
+ socket_map
except NameError:
- socket_map = {}
+ socket_map = {}
class ExitNow (exceptions.Exception):
- pass
+ pass
DEBUG = 0
def poll (timeout=0.0, map=None):
- global DEBUG
- if map is None:
- map = socket_map
- if map:
- r = []; w = []; e = []
- for fd, obj in map.items():
- if obj.readable():
- r.append (fd)
- if obj.writable():
- w.append (fd)
- r,w,e = select.select (r,w,e, timeout)
-
- if DEBUG:
- print(r,w,e)
-
- for fd in r:
- try:
- obj = map[fd]
- try:
- obj.handle_read_event()
- except ExitNow:
- raise ExitNow
- except:
- obj.handle_error()
- except KeyError:
- pass
-
- for fd in w:
- try:
- obj = map[fd]
- try:
- obj.handle_write_event()
- except ExitNow:
- raise ExitNow
- except:
- obj.handle_error()
- except KeyError:
- pass
+ global DEBUG
+ if map is None:
+ map = socket_map
+ if map:
+ r = []; w = []; e = []
+ for fd, obj in map.items():
+ if obj.readable():
+ r.append (fd)
+ if obj.writable():
+ w.append (fd)
+ r,w,e = select.select (r,w,e, timeout)
+
+ if DEBUG:
+ print(r,w,e)
+
+ for fd in r:
+ try:
+ obj = map[fd]
+ try:
+ obj.handle_read_event()
+ except ExitNow:
+ raise ExitNow
+ except:
+ obj.handle_error()
+ except KeyError:
+ pass
+
+ for fd in w:
+ try:
+ obj = map[fd]
+ try:
+ obj.handle_write_event()
+ except ExitNow:
+ raise ExitNow
+ except:
+ obj.handle_error()
+ except KeyError:
+ pass
def poll2 (timeout=0.0, map=None):
- import poll
- if map is None:
- map=socket_map
- # timeout is in milliseconds
- timeout = int(timeout*1000)
- if map:
- l = []
- for fd, obj in map.items():
- flags = 0
- if obj.readable():
- flags = poll.POLLIN
- if obj.writable():
- flags = flags | poll.POLLOUT
- if flags:
- l.append ((fd, flags))
- r = poll.poll (l, timeout)
- for fd, flags in r:
- try:
- obj = map[fd]
- try:
- if (flags & poll.POLLIN):
- obj.handle_read_event()
- if (flags & poll.POLLOUT):
- obj.handle_write_event()
- except ExitNow:
- raise ExitNow
- except:
- obj.handle_error()
- except KeyError:
- pass
+ import poll
+ if map is None:
+ map=socket_map
+ # timeout is in milliseconds
+ timeout = int(timeout*1000)
+ if map:
+ l = []
+ for fd, obj in map.items():
+ flags = 0
+ if obj.readable():
+ flags = poll.POLLIN
+ if obj.writable():
+ flags = flags | poll.POLLOUT
+ if flags:
+ l.append ((fd, flags))
+ r = poll.poll (l, timeout)
+ for fd, flags in r:
+ try:
+ obj = map[fd]
+ try:
+ if (flags & poll.POLLIN):
+ obj.handle_read_event()
+ if (flags & poll.POLLOUT):
+ obj.handle_write_event()
+ except ExitNow:
+ raise ExitNow
+ except:
+ obj.handle_error()
+ except KeyError:
+ pass
def poll3 (timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
@@ -181,257 +181,257 @@ def poll3 (timeout=0.0, map=None):
def loop (timeout=30.0, use_poll=0, map=None):
- if use_poll:
- if hasattr (select, 'poll'):
- poll_fun = poll3
- else:
- poll_fun = poll2
- else:
- poll_fun = poll
+ if use_poll:
+ if hasattr (select, 'poll'):
+ poll_fun = poll3
+ else:
+ poll_fun = poll2
+ else:
+ poll_fun = poll
- if map is None:
- map=socket_map
+ if map is None:
+ map=socket_map
- while map:
- poll_fun (timeout, map)
+ while map:
+ poll_fun (timeout, map)
class dispatcher:
- debug = 0
- connected = 0
- accepting = 0
- closing = 0
- addr = None
-
- def __init__ (self, sock=None, map=None):
- if sock:
- self.set_socket (sock, map)
- # I think it should inherit this anyway
- self.socket.setblocking (0)
- self.connected = 1
-
- def __repr__ (self):
- try:
- status = []
- if self.accepting and self.addr:
- status.append ('listening')
- elif self.connected:
- status.append ('connected')
- if self.addr:
- status.append ('%s:%d' % self.addr)
- return '<%s %s at %x>' % (
- self.__class__.__name__,
- string.join (status, ' '),
- id(self)
- )
- except:
- try:
- ar = repr(self.addr)
- except:
- ar = 'no self.addr!'
-
- return '<__repr__ (self) failed for object at %x (addr=%s)>' % (id(self),ar)
-
- def add_channel (self, map=None):
- #self.log_info ('adding channel %s' % self)
- if map is None:
- map=socket_map
- map [self._fileno] = self
-
- def del_channel (self, map=None):
- fd = self._fileno
- if map is None:
- map=socket_map
- if fd in map:
- #self.log_info ('closing channel %d:%s' % (fd, self))
- del map [fd]
-
- def create_socket (self, family, type):
- self.family_and_type = family, type
- self.socket = socket.socket (family, type)
- self.socket.setblocking(0)
- self._fileno = self.socket.fileno()
- self.add_channel()
-
- def set_socket (self, sock, map=None):
- self.__dict__['socket'] = sock
- self._fileno = sock.fileno()
- self.add_channel (map)
-
- def set_reuse_addr (self):
- # try to re-use a server port if possible
- try:
- self.socket.setsockopt (
- socket.SOL_SOCKET, socket.SO_REUSEADDR,
- self.socket.getsockopt (socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1
- )
- except:
- pass
-
- # ==================================================
- # predicates for select()
- # these are used as filters for the lists of sockets
- # to pass to select().
- # ==================================================
-
- def readable (self):
- return 1
-
- if os.name == 'mac':
- # The macintosh will select a listening socket for
- # write if you let it. What might this mean?
- def writable (self):
- return not self.accepting
- else:
- def writable (self):
- return 1
-
- # ==================================================
- # socket object methods.
- # ==================================================
-
- def listen (self, num):
- self.accepting = 1
- if os.name == 'nt' and num > 5:
- num = 1
- return self.socket.listen (num)
-
- def bind (self, addr):
- self.addr = addr
- return self.socket.bind (addr)
-
- def connect (self, address):
- self.connected = 0
- try:
- self.socket.connect (address)
- except socket.error as why:
- if why[0] in (EINPROGRESS, EALREADY, EWOULDBLOCK):
- return
- else:
- raise socket.error(why[0])
- self.connected = 1
- self.handle_connect()
-
- def accept (self):
- try:
- conn, addr = self.socket.accept()
- return conn, addr
- except socket.error as why:
- if why[0] == EWOULDBLOCK:
- pass
- else:
- raise socket.error(why[0])
-
- def send (self, data):
- try:
- result = self.socket.send (data)
- return result
- except socket.error as why:
- if why[0] == EWOULDBLOCK:
- return 0
- else:
- raise socket.error(why[0])
- return 0
-
- def recv (self, buffer_size):
- try:
- data = self.socket.recv (buffer_size)
- if not data:
- # a closed connection is indicated by signaling
- # a read condition, and having recv() return 0.
- self.handle_close()
- return ''
- else:
- return data
- except socket.error as why:
- # winsock sometimes throws ENOTCONN
- if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
- self.handle_close()
- return ''
- else:
- raise socket.error(why[0])
-
- def close (self):
- self.del_channel()
- self.socket.close()
-
- # cheap inheritance, used to pass all other attribute
- # references to the underlying socket object.
- def __getattr__ (self, attr):
- return getattr (self.socket, attr)
-
- # log and log_info maybe overriden to provide more sophisitcated
- # logging and warning methods. In general, log is for 'hit' logging
- # and 'log_info' is for informational, warning and error logging.
-
- def log (self, message):
- sys.stderr.write ('log: %s\n' % str(message))
-
- def log_info (self, message, type='info'):
- if __debug__ or type != 'info':
- print('%s: %s' % (type, message))
-
- def handle_read_event (self):
- if self.accepting:
- # for an accepting socket, getting a read implies
- # that we are connected
- if not self.connected:
- self.connected = 1
- self.handle_accept()
- elif not self.connected:
- self.handle_connect()
- self.connected = 1
- self.handle_read()
- else:
- self.handle_read()
-
- def handle_write_event (self):
- # getting a write implies that we are connected
- if not self.connected:
- self.handle_connect()
- self.connected = 1
- self.handle_write()
-
- def handle_expt_event (self):
- self.handle_expt()
-
- def handle_error (self):
- (file,fun,line), t, v, tbinfo = compact_traceback()
-
- # sometimes a user repr method will crash.
- try:
- self_repr = repr (self)
- except:
- self_repr = '<__repr__ (self) failed for object at %0x>' % id(self)
-
- self.log_info (
- 'uncaptured python exception, closing channel %s (%s:%s %s)' % (
- self_repr,
- t,
- v,
- tbinfo
- ),
- 'error'
- )
- self.close()
-
- def handle_expt (self):
- self.log_info ('unhandled exception', 'warning')
-
- def handle_read (self):
- self.log_info ('unhandled read event', 'warning')
-
- def handle_write (self):
- self.log_info ('unhandled write event', 'warning')
-
- def handle_connect (self):
- self.log_info ('unhandled connect event', 'warning')
-
- def handle_accept (self):
- self.log_info ('unhandled accept event', 'warning')
-
- def handle_close (self):
- self.log_info ('unhandled close event', 'warning')
- self.close()
+ debug = 0
+ connected = 0
+ accepting = 0
+ closing = 0
+ addr = None
+
+ def __init__ (self, sock=None, map=None):
+ if sock:
+ self.set_socket (sock, map)
+ # I think it should inherit this anyway
+ self.socket.setblocking (0)
+ self.connected = 1
+
+ def __repr__ (self):
+ try:
+ status = []
+ if self.accepting and self.addr:
+ status.append ('listening')
+ elif self.connected:
+ status.append ('connected')
+ if self.addr:
+ status.append ('%s:%d' % self.addr)
+ return '<%s %s at %x>' % (
+ self.__class__.__name__,
+ string.join (status, ' '),
+ id(self)
+ )
+ except:
+ try:
+ ar = repr(self.addr)
+ except:
+ ar = 'no self.addr!'
+
+ return '<__repr__ (self) failed for object at %x (addr=%s)>' % (id(self),ar)
+
+ def add_channel (self, map=None):
+ #self.log_info ('adding channel %s' % self)
+ if map is None:
+ map=socket_map
+ map [self._fileno] = self
+
+ def del_channel (self, map=None):
+ fd = self._fileno
+ if map is None:
+ map=socket_map
+ if fd in map:
+ #self.log_info ('closing channel %d:%s' % (fd, self))
+ del map [fd]
+
+ def create_socket (self, family, type):
+ self.family_and_type = family, type
+ self.socket = socket.socket (family, type)
+ self.socket.setblocking(0)
+ self._fileno = self.socket.fileno()
+ self.add_channel()
+
+ def set_socket (self, sock, map=None):
+ self.__dict__['socket'] = sock
+ self._fileno = sock.fileno()
+ self.add_channel (map)
+
+ def set_reuse_addr (self):
+ # try to re-use a server port if possible
+ try:
+ self.socket.setsockopt (
+ socket.SOL_SOCKET, socket.SO_REUSEADDR,
+ self.socket.getsockopt (socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1
+ )
+ except:
+ pass
+
+ # ==================================================
+ # predicates for select()
+ # these are used as filters for the lists of sockets
+ # to pass to select().
+ # ==================================================
+
+ def readable (self):
+ return 1
+
+ if os.name == 'mac':
+ # The macintosh will select a listening socket for
+ # write if you let it. What might this mean?
+ def writable (self):
+ return not self.accepting
+ else:
+ def writable (self):
+ return 1
+
+ # ==================================================
+ # socket object methods.
+ # ==================================================
+
+ def listen (self, num):
+ self.accepting = 1
+ if os.name == 'nt' and num > 5:
+ num = 1
+ return self.socket.listen (num)
+
+ def bind (self, addr):
+ self.addr = addr
+ return self.socket.bind (addr)
+
+ def connect (self, address):
+ self.connected = 0
+ try:
+ self.socket.connect (address)
+ except socket.error as why:
+ if why[0] in (EINPROGRESS, EALREADY, EWOULDBLOCK):
+ return
+ else:
+ raise socket.error(why[0])
+ self.connected = 1
+ self.handle_connect()
+
+ def accept (self):
+ try:
+ conn, addr = self.socket.accept()
+ return conn, addr
+ except socket.error as why:
+ if why[0] == EWOULDBLOCK:
+ pass
+ else:
+ raise socket.error(why[0])
+
+ def send (self, data):
+ try:
+ result = self.socket.send (data)
+ return result
+ except socket.error as why:
+ if why[0] == EWOULDBLOCK:
+ return 0
+ else:
+ raise socket.error(why[0])
+ return 0
+
+ def recv (self, buffer_size):
+ try:
+ data = self.socket.recv (buffer_size)
+ if not data:
+ # a closed connection is indicated by signaling
+ # a read condition, and having recv() return 0.
+ self.handle_close()
+ return ''
+ else:
+ return data
+ except socket.error as why:
+ # winsock sometimes throws ENOTCONN
+ if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
+ self.handle_close()
+ return ''
+ else:
+ raise socket.error(why[0])
+
+ def close (self):
+ self.del_channel()
+ self.socket.close()
+
+ # cheap inheritance, used to pass all other attribute
+ # references to the underlying socket object.
+ def __getattr__ (self, attr):
+ return getattr (self.socket, attr)
+
+ # log and log_info maybe overriden to provide more sophisitcated
+ # logging and warning methods. In general, log is for 'hit' logging
+ # and 'log_info' is for informational, warning and error logging.
+
+ def log (self, message):
+ sys.stderr.write ('log: %s\n' % str(message))
+
+ def log_info (self, message, type='info'):
+ if __debug__ or type != 'info':
+ print('%s: %s' % (type, message))
+
+ def handle_read_event (self):
+ if self.accepting:
+ # for an accepting socket, getting a read implies
+ # that we are connected
+ if not self.connected:
+ self.connected = 1
+ self.handle_accept()
+ elif not self.connected:
+ self.handle_connect()
+ self.connected = 1
+ self.handle_read()
+ else:
+ self.handle_read()
+
+ def handle_write_event (self):
+ # getting a write implies that we are connected
+ if not self.connected:
+ self.handle_connect()
+ self.connected = 1
+ self.handle_write()
+
+ def handle_expt_event (self):
+ self.handle_expt()
+
+ def handle_error (self):
+ (file,fun,line), t, v, tbinfo = compact_traceback()
+
+ # sometimes a user repr method will crash.
+ try:
+ self_repr = repr (self)
+ except:
+ self_repr = '<__repr__ (self) failed for object at %0x>' % id(self)
+
+ self.log_info (
+ 'uncaptured python exception, closing channel %s (%s:%s %s)' % (
+ self_repr,
+ t,
+ v,
+ tbinfo
+ ),
+ 'error'
+ )
+ self.close()
+
+ def handle_expt (self):
+ self.log_info ('unhandled exception', 'warning')
+
+ def handle_read (self):
+ self.log_info ('unhandled read event', 'warning')
+
+ def handle_write (self):
+ self.log_info ('unhandled write event', 'warning')
+
+ def handle_connect (self):
+ self.log_info ('unhandled connect event', 'warning')
+
+ def handle_accept (self):
+ self.log_info ('unhandled accept event', 'warning')
+
+ def handle_close (self):
+ self.log_info ('unhandled close event', 'warning')
+ self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
@@ -439,63 +439,63 @@ class dispatcher:
# ---------------------------------------------------------------------------
class dispatcher_with_send (dispatcher):
- def __init__ (self, sock=None):
- dispatcher.__init__ (self, sock)
- self.out_buffer = ''
+ def __init__ (self, sock=None):
+ dispatcher.__init__ (self, sock)
+ self.out_buffer = ''
- def initiate_send (self):
- num_sent = 0
- num_sent = dispatcher.send (self, self.out_buffer[:512])
- self.out_buffer = self.out_buffer[num_sent:]
+ def initiate_send (self):
+ num_sent = 0
+ num_sent = dispatcher.send (self, self.out_buffer[:512])
+ self.out_buffer = self.out_buffer[num_sent:]
- def handle_write (self):
- self.initiate_send()
+ def handle_write (self):
+ self.initiate_send()
- def writable (self):
- return (not self.connected) or len(self.out_buffer)
+ def writable (self):
+ return (not self.connected) or len(self.out_buffer)
- def send (self, data):
- if self.debug:
- self.log_info ('sending %s' % repr(data))
- self.out_buffer = self.out_buffer + data
- self.initiate_send()
+ def send (self, data):
+ if self.debug:
+ self.log_info ('sending %s' % repr(data))
+ self.out_buffer = self.out_buffer + data
+ self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback ():
- t,v,tb = sys.exc_info()
- tbinfo = []
- while 1:
- tbinfo.append ((
- tb.tb_frame.f_code.co_filename,
- tb.tb_frame.f_code.co_name,
- str(tb.tb_lineno)
- ))
- tb = tb.tb_next
- if not tb:
- break
-
- # just to be safe
- del tb
-
- file, function, line = tbinfo[-1]
- info = '[' + string.join (
- map (
- lambda x: string.join (x, '|'),
- tbinfo
- ),
- '] ['
- ) + ']'
- return (file, function, line), t, v, info
+ t,v,tb = sys.exc_info()
+ tbinfo = []
+ while 1:
+ tbinfo.append ((
+ tb.tb_frame.f_code.co_filename,
+ tb.tb_frame.f_code.co_name,
+ str(tb.tb_lineno)
+ ))
+ tb = tb.tb_next
+ if not tb:
+ break
+
+ # just to be safe
+ del tb
+
+ file, function, line = tbinfo[-1]
+ info = '[' + string.join (
+ map (
+ lambda x: string.join (x, '|'),
+ tbinfo
+ ),
+ '] ['
+ ) + ']'
+ return (file, function, line), t, v, info
def close_all (map=None):
- if map is None:
- map=socket_map
- for x in map.values():
- x.socket.close()
- map.clear()
+ if map is None:
+ map=socket_map
+ for x in map.values():
+ x.socket.close()
+ map.clear()
# Asynchronous File I/O:
#
@@ -512,42 +512,42 @@ def close_all (map=None):
import os
if os.name == 'posix':
- import fcntl
- import FCNTL
-
- class file_wrapper:
- # here we override just enough to make a file
- # look like a socket for the purposes of asyncore.
- def __init__ (self, fd):
- self.fd = fd
-
- def recv (self, *args):
- return apply (os.read, (self.fd,)+args)
-
- def send (self, *args):
- return apply (os.write, (self.fd,)+args)
-
- read = recv
- write = send
-
- def close (self):
- return os.close (self.fd)
-
- def fileno (self):
- return self.fd
-
- class file_dispatcher (dispatcher):
- def __init__ (self, fd):
- dispatcher.__init__ (self)
- self.connected = 1
- # set it to non-blocking mode
- flags = fcntl.fcntl (fd, FCNTL.F_GETFL, 0)
- flags = flags | FCNTL.O_NONBLOCK
- fcntl.fcntl (fd, FCNTL.F_SETFL, flags)
- self.set_file (fd)
-
- def set_file (self, fd):
- self._fileno = fd
- self.socket = file_wrapper (fd)
- self.add_channel()
+ import fcntl
+ import FCNTL
+
+ class file_wrapper:
+ # here we override just enough to make a file
+ # look like a socket for the purposes of asyncore.
+ def __init__ (self, fd):
+ self.fd = fd
+
+ def recv (self, *args):
+ return apply (os.read, (self.fd,)+args)
+
+ def send (self, *args):
+ return apply (os.write, (self.fd,)+args)
+
+ read = recv
+ write = send
+
+ def close (self):
+ return os.close (self.fd)
+
+ def fileno (self):
+ return self.fd
+
+ class file_dispatcher (dispatcher):
+ def __init__ (self, fd):
+ dispatcher.__init__ (self)
+ self.connected = 1
+ # set it to non-blocking mode
+ flags = fcntl.fcntl (fd, FCNTL.F_GETFL, 0)
+ flags = flags | FCNTL.O_NONBLOCK
+ fcntl.fcntl (fd, FCNTL.F_SETFL, flags)
+ self.set_file (fd)
+
+ def set_file (self, fd):
+ self._fileno = fd
+ self.socket = file_wrapper (fd)
+ self.add_channel()
diff --git a/demo/medusa/counter.py b/demo/medusa/counter.py
index 1f40704..cb09fd0 100644
--- a/demo/medusa/counter.py
+++ b/demo/medusa/counter.py
@@ -15,36 +15,36 @@
from __future__ import print_function
class counter:
- "general-purpose counter"
-
- def __init__ (self, initial_value=0):
- self.value = initial_value
-
- def increment (self, delta=1):
- result = self.value
- try:
- self.value = self.value + delta
- except OverflowError:
- self.value = long(self.value) + delta
- return result
-
- def decrement (self, delta=1):
- result = self.value
- try:
- self.value = self.value - delta
- except OverflowError:
- self.value = long(self.value) - delta
- return result
-
- def as_long (self):
- return long(self.value)
-
- def __nonzero__ (self):
- return self.value != 0
-
- def __repr__ (self):
- return '<counter value=%s at %x>' % (self.value, id(self))
-
- def __str__ (self):
- return str(long(self.value))
- #return str(long(self.value))[:-1]
+ "general-purpose counter"
+
+ def __init__ (self, initial_value=0):
+ self.value = initial_value
+
+ def increment (self, delta=1):
+ result = self.value
+ try:
+ self.value = self.value + delta
+ except OverflowError:
+ self.value = long(self.value) + delta
+ return result
+
+ def decrement (self, delta=1):
+ result = self.value
+ try:
+ self.value = self.value - delta
+ except OverflowError:
+ self.value = long(self.value) - delta
+ return result
+
+ def as_long (self):
+ return long(self.value)
+
+ def __nonzero__ (self):
+ return self.value != 0
+
+ def __repr__ (self):
+ return '<counter value=%s at %x>' % (self.value, id(self))
+
+ def __str__ (self):
+ return str(long(self.value))
+ #return str(long(self.value))[:-1]
diff --git a/demo/medusa/default_handler.py b/demo/medusa/default_handler.py
index 1d13ad3..02f44a1 100644
--- a/demo/medusa/default_handler.py
+++ b/demo/medusa/default_handler.py
@@ -1,8 +1,8 @@
# -*- Mode: Python; tab-width: 4 -*-
#
-# Author: Sam Rushing <rushing@nightmare.com>
-# Copyright 1997 by Sam Rushing
-# All Rights Reserved.
+# Author: Sam Rushing <rushing@nightmare.com>
+# Copyright 1997 by Sam Rushing
+# All Rights Reserved.
#
# standard python modules
@@ -39,177 +39,177 @@ from counter import counter
class default_handler:
- valid_commands = ['get', 'head']
-
- IDENT = 'Default HTTP Request Handler'
-
- # Pathnames that are tried when a URI resolves to a directory name
- directory_defaults = [
- 'index.html',
- 'default.html'
- ]
-
- default_file_producer = producers.file_producer
-
- def __init__ (self, filesystem):
- self.filesystem = filesystem
- # count total hits
- self.hit_counter = counter()
- # count file deliveries
- self.file_counter = counter()
- # count cache hits
- self.cache_counter = counter()
-
- hit_counter = 0
-
- def __repr__ (self):
- return '<%s (%s hits) at %x>' % (
- self.IDENT,
- self.hit_counter,
- id (self)
- )
-
- # always match, since this is a default
- def match (self, request):
- return 1
-
- # handle a file request, with caching.
-
- def handle_request (self, request):
-
- if request.command not in self.valid_commands:
- request.error (400) # bad request
- return
-
- self.hit_counter.increment()
-
- path, params, query, fragment = request.split_uri()
-
- if '%' in path:
- path = unquote (path)
-
- # strip off all leading slashes
- while path and path[0] == '/':
- path = path[1:]
-
- if self.filesystem.isdir (path):
- if path and path[-1] != '/':
- request['Location'] = 'http://%s/%s/' % (
- request.channel.server.server_name,
- path
- )
- request.error (301)
- return
-
- # we could also generate a directory listing here,
- # may want to move this into another method for that
- # purpose
- found = 0
- if path and path[-1] != '/':
- path = path + '/'
- for default in self.directory_defaults:
- p = path + default
- if self.filesystem.isfile (p):
- path = p
- found = 1
- break
- if not found:
- request.error (404) # Not Found
- return
-
- elif not self.filesystem.isfile (path):
- request.error (404) # Not Found
- return
-
- file_length = self.filesystem.stat (path)[stat.ST_SIZE]
-
- ims = get_header_match (IF_MODIFIED_SINCE, request.header)
-
- length_match = 1
- if ims:
- length = ims.group (4)
- if length:
- try:
- length = string.atoi (length)
- if length != file_length:
- length_match = 0
- except:
- pass
-
- ims_date = 0
-
- if ims:
- ims_date = http_date.parse_http_date (ims.group (1))
-
- try:
- mtime = self.filesystem.stat (path)[stat.ST_MTIME]
- except:
- request.error (404)
- return
-
- if length_match and ims_date:
- if mtime <= ims_date:
- request.reply_code = 304
- request.done()
- self.cache_counter.increment()
- return
- try:
- file = self.filesystem.open (path, 'rb')
- except IOError:
- request.error (404)
- return
-
- request['Last-Modified'] = http_date.build_http_date (mtime)
- request['Content-Length'] = file_length
- self.set_content_type (path, request)
-
- if request.command == 'get':
- request.push (self.default_file_producer (file))
-
- self.file_counter.increment()
- request.done()
-
- def set_content_type (self, path, request):
- ext = string.lower (get_extension (path))
- if ext in mime_type_table.content_type_map:
- request['Content-Type'] = mime_type_table.content_type_map[ext]
- else:
- # TODO: test a chunk off the front of the file for 8-bit
- # characters, and use application/octet-stream instead.
- request['Content-Type'] = 'text/plain'
-
- def status (self):
- return producers.simple_producer (
- '<li>%s' % status_handler.html_repr (self)
- + '<ul>'
- + ' <li><b>Total Hits:</b> %s' % self.hit_counter
- + ' <li><b>Files Delivered:</b> %s' % self.file_counter
- + ' <li><b>Cache Hits:</b> %s' % self.cache_counter
- + '</ul>'
- )
+ valid_commands = ['get', 'head']
+
+ IDENT = 'Default HTTP Request Handler'
+
+ # Pathnames that are tried when a URI resolves to a directory name
+ directory_defaults = [
+ 'index.html',
+ 'default.html'
+ ]
+
+ default_file_producer = producers.file_producer
+
+ def __init__ (self, filesystem):
+ self.filesystem = filesystem
+ # count total hits
+ self.hit_counter = counter()
+ # count file deliveries
+ self.file_counter = counter()
+ # count cache hits
+ self.cache_counter = counter()
+
+ hit_counter = 0
+
+ def __repr__ (self):
+ return '<%s (%s hits) at %x>' % (
+ self.IDENT,
+ self.hit_counter,
+ id (self)
+ )
+
+ # always match, since this is a default
+ def match (self, request):
+ return 1
+
+ # handle a file request, with caching.
+
+ def handle_request (self, request):
+
+ if request.command not in self.valid_commands:
+ request.error (400) # bad request
+ return
+
+ self.hit_counter.increment()
+
+ path, params, query, fragment = request.split_uri()
+
+ if '%' in path:
+ path = unquote (path)
+
+ # strip off all leading slashes
+ while path and path[0] == '/':
+ path = path[1:]
+
+ if self.filesystem.isdir (path):
+ if path and path[-1] != '/':
+ request['Location'] = 'http://%s/%s/' % (
+ request.channel.server.server_name,
+ path
+ )
+ request.error (301)
+ return
+
+ # we could also generate a directory listing here,
+ # may want to move this into another method for that
+ # purpose
+ found = 0
+ if path and path[-1] != '/':
+ path = path + '/'
+ for default in self.directory_defaults:
+ p = path + default
+ if self.filesystem.isfile (p):
+ path = p
+ found = 1
+ break
+ if not found:
+ request.error (404) # Not Found
+ return
+
+ elif not self.filesystem.isfile (path):
+ request.error (404) # Not Found
+ return
+
+ file_length = self.filesystem.stat (path)[stat.ST_SIZE]
+
+ ims = get_header_match (IF_MODIFIED_SINCE, request.header)
+
+ length_match = 1
+ if ims:
+ length = ims.group (4)
+ if length:
+ try:
+ length = string.atoi (length)
+ if length != file_length:
+ length_match = 0
+ except:
+ pass
+
+ ims_date = 0
+
+ if ims:
+ ims_date = http_date.parse_http_date (ims.group (1))
+
+ try:
+ mtime = self.filesystem.stat (path)[stat.ST_MTIME]
+ except:
+ request.error (404)
+ return
+
+ if length_match and ims_date:
+ if mtime <= ims_date:
+ request.reply_code = 304
+ request.done()
+ self.cache_counter.increment()
+ return
+ try:
+ file = self.filesystem.open (path, 'rb')
+ except IOError:
+ request.error (404)
+ return
+
+ request['Last-Modified'] = http_date.build_http_date (mtime)
+ request['Content-Length'] = file_length
+ self.set_content_type (path, request)
+
+ if request.command == 'get':
+ request.push (self.default_file_producer (file))
+
+ self.file_counter.increment()
+ request.done()
+
+ def set_content_type (self, path, request):
+ ext = string.lower (get_extension (path))
+ if ext in mime_type_table.content_type_map:
+ request['Content-Type'] = mime_type_table.content_type_map[ext]
+ else:
+ # TODO: test a chunk off the front of the file for 8-bit
+ # characters, and use application/octet-stream instead.
+ request['Content-Type'] = 'text/plain'
+
+ def status (self):
+ return producers.simple_producer (
+ '<li>%s' % status_handler.html_repr (self)
+ + '<ul>'
+ + ' <li><b>Total Hits:</b> %s' % self.hit_counter
+ + ' <li><b>Files Delivered:</b> %s' % self.file_counter
+ + ' <li><b>Cache Hits:</b> %s' % self.cache_counter
+ + '</ul>'
+ )
# HTTP/1.0 doesn't say anything about the "; length=nnnn" addition
# to this header. I suppose it's purpose is to avoid the overhead
# of parsing dates...
IF_MODIFIED_SINCE = re.compile (
- 'If-Modified-Since: ([^;]+)((; length=([0-9]+)$)|$)',
- re.IGNORECASE
- )
+ 'If-Modified-Since: ([^;]+)((; length=([0-9]+)$)|$)',
+ re.IGNORECASE
+ )
USER_AGENT = re.compile ('User-Agent: (.*)', re.IGNORECASE)
CONTENT_TYPE = re.compile (
- r'Content-Type: ([^;]+)((; boundary=([A-Za-z0-9\'\(\)+_,./:=?-]+)$)|$)',
- re.IGNORECASE
- )
+ r'Content-Type: ([^;]+)((; boundary=([A-Za-z0-9\'\(\)+_,./:=?-]+)$)|$)',
+ re.IGNORECASE
+ )
get_header = http_server.get_header
get_header_match = http_server.get_header_match
def get_extension (path):
- dirsep = string.rfind (path, '/')
- dotsep = string.rfind (path, '.')
- if dotsep > dirsep:
- return path[dotsep+1:]
- else:
- return ''
+ dirsep = string.rfind (path, '/')
+ dotsep = string.rfind (path, '.')
+ if dotsep > dirsep:
+ return path[dotsep+1:]
+ else:
+ return ''
diff --git a/demo/medusa/filesys.py b/demo/medusa/filesys.py
index 7448e70..2be39ce 100644
--- a/demo/medusa/filesys.py
+++ b/demo/medusa/filesys.py
@@ -1,6 +1,6 @@
# -*- Mode: Python; tab-width: 4 -*-
-# $Id$
-# Author: Sam Rushing <rushing@nightmare.com>
+# $Id$
+# Author: Sam Rushing <rushing@nightmare.com>
#
# Generic filesystem interface.
#
@@ -17,49 +17,49 @@
from __future__ import print_function
class abstract_filesystem:
- def __init__ (self):
- pass
+ def __init__ (self):
+ pass
- def current_directory (self):
- "Return a string representing the current directory."
- pass
+ def current_directory (self):
+ "Return a string representing the current directory."
+ pass
- def listdir (self, path, long=0):
- """Return a listing of the directory at 'path' The empty string
- indicates the current directory. If 'long' is set, instead
- return a list of (name, stat_info) tuples
- """
- pass
+ def listdir (self, path, long=0):
+ """Return a listing of the directory at 'path' The empty string
+ indicates the current directory. If 'long' is set, instead
+ return a list of (name, stat_info) tuples
+ """
+ pass
- def open (self, path, mode):
- "Return an open file object"
- pass
+ def open (self, path, mode):
+ "Return an open file object"
+ pass
- def stat (self, path):
- "Return the equivalent of os.stat() on the given path."
- pass
+ def stat (self, path):
+ "Return the equivalent of os.stat() on the given path."
+ pass
- def isdir (self, path):
- "Does the path represent a directory?"
- pass
+ def isdir (self, path):
+ "Does the path represent a directory?"
+ pass
- def isfile (self, path):
- "Does the path represent a plain file?"
- pass
+ def isfile (self, path):
+ "Does the path represent a plain file?"
+ pass
- def cwd (self, path):
- "Change the working directory."
- pass
+ def cwd (self, path):
+ "Change the working directory."
+ pass
- def cdup (self):
- "Change to the parent of the current directory."
- pass
+ def cdup (self):
+ "Change to the parent of the current directory."
+ pass
- def longify (self, path):
- """Return a 'long' representation of the filename
- [for the output of the LIST command]"""
- pass
+ def longify (self, path):
+ """Return a 'long' representation of the filename
+ [for the output of the LIST command]"""
+ pass
# standard wrapper around a unix-like filesystem, with a 'false root'
# capability.
@@ -77,251 +77,251 @@ import stat
import string
def safe_stat (path):
- try:
- return (path, os.stat (path))
- except:
- return None
+ try:
+ return (path, os.stat (path))
+ except:
+ return None
import regsub
import glob
class os_filesystem:
- path_module = os.path
-
- # set this to zero if you want to disable pathname globbing.
- # [we currently don't glob, anyway]
- do_globbing = 1
-
- def __init__ (self, root, wd='/'):
- self.root = root
- self.wd = wd
-
- def current_directory (self):
- return self.wd
-
- def isfile (self, path):
- p = self.normalize (self.path_module.join (self.wd, path))
- return self.path_module.isfile (self.translate(p))
-
- def isdir (self, path):
- p = self.normalize (self.path_module.join (self.wd, path))
- return self.path_module.isdir (self.translate(p))
-
- def cwd (self, path):
- p = self.normalize (self.path_module.join (self.wd, path))
- translated_path = self.translate(p)
- if not self.path_module.isdir (translated_path):
- return 0
- else:
- old_dir = os.getcwd()
- # temporarily change to that directory, in order
- # to see if we have permission to do so.
- try:
- can = 0
- try:
- os.chdir (translated_path)
- can = 1
- self.wd = p
- except:
- pass
- finally:
- if can:
- os.chdir (old_dir)
- return can
-
- def cdup (self):
- return self.cwd ('..')
-
- def listdir (self, path, long=0):
- p = self.translate (path)
- # I think we should glob, but limit it to the current
- # directory only.
- ld = os.listdir (p)
- if not long:
- return list_producer (ld, 0, None)
- else:
- old_dir = os.getcwd()
- try:
- os.chdir (p)
- # if os.stat fails we ignore that file.
- result = filter (None, map (safe_stat, ld))
- finally:
- os.chdir (old_dir)
- return list_producer (result, 1, self.longify)
-
- # TODO: implement a cache w/timeout for stat()
- def stat (self, path):
- p = self.translate (path)
- return os.stat (p)
-
- def open (self, path, mode):
- p = self.translate (path)
- return open (p, mode)
-
- def unlink (self, path):
- p = self.translate (path)
- return os.unlink (p)
-
- def mkdir (self, path):
- p = self.translate (path)
- return os.mkdir (p)
-
- def rmdir (self, path):
- p = self.translate (path)
- return os.rmdir (p)
-
- # utility methods
- def normalize (self, path):
- # watch for the ever-sneaky '/+' path element
- path = regsub.gsub ('/+', '/', path)
- p = self.path_module.normpath (path)
- # remove 'dangling' cdup's.
- if len(p) > 2 and p[:3] == '/..':
- p = '/'
- return p
-
- def translate (self, path):
- # we need to join together three separate
- # path components, and do it safely.
- # <real_root>/<current_directory>/<path>
- # use the operating system's path separator.
- path = string.join (string.split (path, '/'), os.sep)
- p = self.normalize (self.path_module.join (self.wd, path))
- p = self.normalize (self.path_module.join (self.root, p[1:]))
- return p
-
- def longify (self, (path, stat_info)):
- return unix_longify (path, stat_info)
-
- def __repr__ (self):
- return '<unix-style fs root:%s wd:%s>' % (
- self.root,
- self.wd
- )
+ path_module = os.path
+
+ # set this to zero if you want to disable pathname globbing.
+ # [we currently don't glob, anyway]
+ do_globbing = 1
+
+ def __init__ (self, root, wd='/'):
+ self.root = root
+ self.wd = wd
+
+ def current_directory (self):
+ return self.wd
+
+ def isfile (self, path):
+ p = self.normalize (self.path_module.join (self.wd, path))
+ return self.path_module.isfile (self.translate(p))
+
+ def isdir (self, path):
+ p = self.normalize (self.path_module.join (self.wd, path))
+ return self.path_module.isdir (self.translate(p))
+
+ def cwd (self, path):
+ p = self.normalize (self.path_module.join (self.wd, path))
+ translated_path = self.translate(p)
+ if not self.path_module.isdir (translated_path):
+ return 0
+ else:
+ old_dir = os.getcwd()
+ # temporarily change to that directory, in order
+ # to see if we have permission to do so.
+ try:
+ can = 0
+ try:
+ os.chdir (translated_path)
+ can = 1
+ self.wd = p
+ except:
+ pass
+ finally:
+ if can:
+ os.chdir (old_dir)
+ return can
+
+ def cdup (self):
+ return self.cwd ('..')
+
+ def listdir (self, path, long=0):
+ p = self.translate (path)
+ # I think we should glob, but limit it to the current
+ # directory only.
+ ld = os.listdir (p)
+ if not long:
+ return list_producer (ld, 0, None)
+ else:
+ old_dir = os.getcwd()
+ try:
+ os.chdir (p)
+ # if os.stat fails we ignore that file.
+ result = filter (None, map (safe_stat, ld))
+ finally:
+ os.chdir (old_dir)
+ return list_producer (result, 1, self.longify)
+
+ # TODO: implement a cache w/timeout for stat()
+ def stat (self, path):
+ p = self.translate (path)
+ return os.stat (p)
+
+ def open (self, path, mode):
+ p = self.translate (path)
+ return open (p, mode)
+
+ def unlink (self, path):
+ p = self.translate (path)
+ return os.unlink (p)
+
+ def mkdir (self, path):
+ p = self.translate (path)
+ return os.mkdir (p)
+
+ def rmdir (self, path):
+ p = self.translate (path)
+ return os.rmdir (p)
+
+ # utility methods
+ def normalize (self, path):
+ # watch for the ever-sneaky '/+' path element
+ path = regsub.gsub ('/+', '/', path)
+ p = self.path_module.normpath (path)
+ # remove 'dangling' cdup's.
+ if len(p) > 2 and p[:3] == '/..':
+ p = '/'
+ return p
+
+ def translate (self, path):
+ # we need to join together three separate
+ # path components, and do it safely.
+ # <real_root>/<current_directory>/<path>
+ # use the operating system's path separator.
+ path = string.join (string.split (path, '/'), os.sep)
+ p = self.normalize (self.path_module.join (self.wd, path))
+ p = self.normalize (self.path_module.join (self.root, p[1:]))
+ return p
+
+ def longify (self, (path, stat_info)):
+ return unix_longify (path, stat_info)
+
+ def __repr__ (self):
+ return '<unix-style fs root:%s wd:%s>' % (
+ self.root,
+ self.wd
+ )
if os.name == 'posix':
- class unix_filesystem (os_filesystem):
- pass
-
- class schizophrenic_unix_filesystem (os_filesystem):
- PROCESS_UID = os.getuid()
- PROCESS_EUID = os.geteuid()
- PROCESS_GID = os.getgid()
- PROCESS_EGID = os.getegid()
-
- def __init__ (self, root, wd='/', persona=(None, None)):
- os_filesystem.__init__ (self, root, wd)
- self.persona = persona
-
- def become_persona (self):
- if self.persona is not (None, None):
- uid, gid = self.persona
- # the order of these is important!
- os.setegid (gid)
- os.seteuid (uid)
-
- def become_nobody (self):
- if self.persona is not (None, None):
- os.seteuid (self.PROCESS_UID)
- os.setegid (self.PROCESS_GID)
-
- # cwd, cdup, open, listdir
- def cwd (self, path):
- try:
- self.become_persona()
- return os_filesystem.cwd (self, path)
- finally:
- self.become_nobody()
-
- def cdup (self, path):
- try:
- self.become_persona()
- return os_filesystem.cdup (self)
- finally:
- self.become_nobody()
-
- def open (self, filename, mode):
- try:
- self.become_persona()
- return os_filesystem.open (self, filename, mode)
- finally:
- self.become_nobody()
-
- def listdir (self, path, long=0):
- try:
- self.become_persona()
- return os_filesystem.listdir (self, path, long)
- finally:
- self.become_nobody()
+ class unix_filesystem (os_filesystem):
+ pass
+
+ class schizophrenic_unix_filesystem (os_filesystem):
+ PROCESS_UID = os.getuid()
+ PROCESS_EUID = os.geteuid()
+ PROCESS_GID = os.getgid()
+ PROCESS_EGID = os.getegid()
+
+ def __init__ (self, root, wd='/', persona=(None, None)):
+ os_filesystem.__init__ (self, root, wd)
+ self.persona = persona
+
+ def become_persona (self):
+ if self.persona is not (None, None):
+ uid, gid = self.persona
+ # the order of these is important!
+ os.setegid (gid)
+ os.seteuid (uid)
+
+ def become_nobody (self):
+ if self.persona is not (None, None):
+ os.seteuid (self.PROCESS_UID)
+ os.setegid (self.PROCESS_GID)
+
+ # cwd, cdup, open, listdir
+ def cwd (self, path):
+ try:
+ self.become_persona()
+ return os_filesystem.cwd (self, path)
+ finally:
+ self.become_nobody()
+
+ def cdup (self, path):
+ try:
+ self.become_persona()
+ return os_filesystem.cdup (self)
+ finally:
+ self.become_nobody()
+
+ def open (self, filename, mode):
+ try:
+ self.become_persona()
+ return os_filesystem.open (self, filename, mode)
+ finally:
+ self.become_nobody()
+
+ def listdir (self, path, long=0):
+ try:
+ self.become_persona()
+ return os_filesystem.listdir (self, path, long)
+ finally:
+ self.become_nobody()
# This hasn't been very reliable across different platforms.
# maybe think about a separate 'directory server'.
#
-# import posixpath
-# import fcntl
-# import FCNTL
-# import select
-# import asyncore
+# import posixpath
+# import fcntl
+# import FCNTL
+# import select
+# import asyncore
#
-# # pipes /bin/ls for directory listings.
-# class unix_filesystem (os_filesystem):
-# pass
-# path_module = posixpath
+# # pipes /bin/ls for directory listings.
+# class unix_filesystem (os_filesystem):
+# pass
+# path_module = posixpath
#
-# def listdir (self, path, long=0):
-# p = self.translate (path)
-# if not long:
-# return list_producer (os.listdir (p), 0, None)
-# else:
-# command = '/bin/ls -l %s' % p
-# print('opening pipe to "%s"' % command)
-# fd = os.popen (command, 'rt')
-# return pipe_channel (fd)
+# def listdir (self, path, long=0):
+# p = self.translate (path)
+# if not long:
+# return list_producer (os.listdir (p), 0, None)
+# else:
+# command = '/bin/ls -l %s' % p
+# print('opening pipe to "%s"' % command)
+# fd = os.popen (command, 'rt')
+# return pipe_channel (fd)
#
-# # this is both a dispatcher, _and_ a producer
-# class pipe_channel (asyncore.file_dispatcher):
-# buffer_size = 4096
+# # this is both a dispatcher, _and_ a producer
+# class pipe_channel (asyncore.file_dispatcher):
+# buffer_size = 4096
#
-# def __init__ (self, fd):
-# asyncore.file_dispatcher.__init__ (self, fd)
-# self.fd = fd
-# self.done = 0
-# self.data = ''
+# def __init__ (self, fd):
+# asyncore.file_dispatcher.__init__ (self, fd)
+# self.fd = fd
+# self.done = 0
+# self.data = ''
#
-# def handle_read (self):
-# if len (self.data) < self.buffer_size:
-# self.data = self.data + self.fd.read (self.buffer_size)
-# #print('%s.handle_read() => len(self.data) == %d' % (self, len(self.data)))
+# def handle_read (self):
+# if len (self.data) < self.buffer_size:
+# self.data = self.data + self.fd.read (self.buffer_size)
+# #print('%s.handle_read() => len(self.data) == %d' % (self, len(self.data)))
#
-# def handle_expt (self):
-# #print('%s.handle_expt()' % self)
-# self.done = 1
+# def handle_expt (self):
+# #print('%s.handle_expt()' % self)
+# self.done = 1
#
-# def ready (self):
-# #print('%s.ready() => %d' % (self, len(self.data)))
-# return ((len (self.data) > 0) or self.done)
+# def ready (self):
+# #print('%s.ready() => %d' % (self, len(self.data)))
+# return ((len (self.data) > 0) or self.done)
#
-# def more (self):
-# if self.data:
-# r = self.data
-# self.data = ''
-# elif self.done:
-# self.close()
-# self.downstream.finished()
-# r = ''
-# else:
-# r = None
-# #print('%s.more() => %s' % (self, (r and len(r))))
-# return r
+# def more (self):
+# if self.data:
+# r = self.data
+# self.data = ''
+# elif self.done:
+# self.close()
+# self.downstream.finished()
+# r = ''
+# else:
+# r = None
+# #print('%s.more() => %s' % (self, (r and len(r))))
+# return r
# For the 'real' root, we could obtain a list of drives, and then
# use that. Doesn't win32 provide such a 'real' filesystem?
# [yes, I think something like this "\\.\c\windows"]
class msdos_filesystem (os_filesystem):
- def longify (self, (path, stat_info)):
- return msdos_longify (path, stat_info)
+ def longify (self, (path, stat_info)):
+ return msdos_longify (path, stat_info)
# A merged filesystem will let you plug other filesystems together.
# We really need the equivalent of a 'mount' capability - this seems
@@ -332,81 +332,81 @@ class msdos_filesystem (os_filesystem):
# with the http server.
class merged_filesystem:
- def __init__ (self, *fsys):
- pass
+ def __init__ (self, *fsys):
+ pass
# this matches the output of NT's ftp server (when in
# MSDOS mode) exactly.
def msdos_longify (file, stat_info):
- if stat.S_ISDIR (stat_info[stat.ST_MODE]):
- dir = '<DIR>'
- else:
- dir = ' '
- date = msdos_date (stat_info[stat.ST_MTIME])
- return '%s %s %8d %s' % (
- date,
- dir,
- stat_info[stat.ST_SIZE],
- file
- )
+ if stat.S_ISDIR (stat_info[stat.ST_MODE]):
+ dir = '<DIR>'
+ else:
+ dir = ' '
+ date = msdos_date (stat_info[stat.ST_MTIME])
+ return '%s %s %8d %s' % (
+ date,
+ dir,
+ stat_info[stat.ST_SIZE],
+ file
+ )
def msdos_date (t):
- try:
- info = time.gmtime (t)
- except:
- info = time.gmtime (0)
- # year, month, day, hour, minute, second, ...
- if info[3] > 11:
- merid = 'PM'
- info[3] = info[3] - 12
- else:
- merid = 'AM'
- return '%02d-%02d-%02d %02d:%02d%s' % (
- info[1],
- info[2],
- info[0]%100,
- info[3],
- info[4],
- merid
- )
+ try:
+ info = time.gmtime (t)
+ except:
+ info = time.gmtime (0)
+ # year, month, day, hour, minute, second, ...
+ if info[3] > 11:
+ merid = 'PM'
+ info[3] = info[3] - 12
+ else:
+ merid = 'AM'
+ return '%02d-%02d-%02d %02d:%02d%s' % (
+ info[1],
+ info[2],
+ info[0]%100,
+ info[3],
+ info[4],
+ merid
+ )
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
- 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
mode_table = {
- '0':'---',
- '1':'--x',
- '2':'-w-',
- '3':'-wx',
- '4':'r--',
- '5':'r-x',
- '6':'rw-',
- '7':'rwx'
- }
+ '0':'---',
+ '1':'--x',
+ '2':'-w-',
+ '3':'-wx',
+ '4':'r--',
+ '5':'r-x',
+ '6':'rw-',
+ '7':'rwx'
+ }
import time
def unix_longify (file, stat_info):
- # for now, only pay attention to the lower bits
- mode = ('%o' % stat_info[stat.ST_MODE])[-3:]
- mode = string.join (map (lambda x: mode_table[x], mode), '')
- if stat.S_ISDIR (stat_info[stat.ST_MODE]):
- dirchar = 'd'
- else:
- dirchar = '-'
- date = ls_date (long(time.time()), stat_info[stat.ST_MTIME])
- return '%s%s %3d %-8d %-8d %8d %s %s' % (
- dirchar,
- mode,
- stat_info[stat.ST_NLINK],
- stat_info[stat.ST_UID],
- stat_info[stat.ST_GID],
- stat_info[stat.ST_SIZE],
- date,
- file
- )
-
+ # for now, only pay attention to the lower bits
+ mode = ('%o' % stat_info[stat.ST_MODE])[-3:]
+ mode = string.join (map (lambda x: mode_table[x], mode), '')
+ if stat.S_ISDIR (stat_info[stat.ST_MODE]):
+ dirchar = 'd'
+ else:
+ dirchar = '-'
+ date = ls_date (long(time.time()), stat_info[stat.ST_MTIME])
+ return '%s%s %3d %-8d %-8d %8d %s %s' % (
+ dirchar,
+ mode,
+ stat_info[stat.ST_NLINK],
+ stat_info[stat.ST_UID],
+ stat_info[stat.ST_GID],
+ stat_info[stat.ST_SIZE],
+ date,
+ file
+ )
+
# Emulate the unix 'ls' command's date field.
# it has two formats - if the date is more than 180
# days in the past, then it's like this:
@@ -415,54 +415,54 @@ def unix_longify (file, stat_info):
# Oct 19 17:33
def ls_date (now, t):
- try:
- info = time.gmtime (t)
- except:
- info = time.gmtime (0)
- # 15,600,000 == 86,400 * 180
- if (now - t) > 15600000:
- return '%s %2d %d' % (
- months[info[1]-1],
- info[2],
- info[0]
- )
- else:
- return '%s %2d %02d:%02d' % (
- months[info[1]-1],
- info[2],
- info[3],
- info[4]
- )
+ try:
+ info = time.gmtime (t)
+ except:
+ info = time.gmtime (0)
+ # 15,600,000 == 86,400 * 180
+ if (now - t) > 15600000:
+ return '%s %2d %d' % (
+ months[info[1]-1],
+ info[2],
+ info[0]
+ )
+ else:
+ return '%s %2d %02d:%02d' % (
+ months[info[1]-1],
+ info[2],
+ info[3],
+ info[4]
+ )
# ===========================================================================
# Producers
# ===========================================================================
class list_producer:
- def __init__ (self, file_list, long, longify):
- self.file_list = file_list
- self.long = long
- self.longify = longify
- self.done = 0
-
- def ready (self):
- if len(self.file_list):
- return 1
- else:
- if not self.done:
- self.done = 1
- return 0
- return (len(self.file_list) > 0)
-
- # this should do a pushd/popd
- def more (self):
- if not self.file_list:
- return ''
- else:
- # do a few at a time
- bunch = self.file_list[:50]
- if self.long:
- bunch = map (self.longify, bunch)
- self.file_list = self.file_list[50:]
- return string.joinfields (bunch, '\r\n') + '\r\n'
+ def __init__ (self, file_list, long, longify):
+ self.file_list = file_list
+ self.long = long
+ self.longify = longify
+ self.done = 0
+
+ def ready (self):
+ if len(self.file_list):
+ return 1
+ else:
+ if not self.done:
+ self.done = 1
+ return 0
+ return (len(self.file_list) > 0)
+
+ # this should do a pushd/popd
+ def more (self):
+ if not self.file_list:
+ return ''
+ else:
+ # do a few at a time
+ bunch = self.file_list[:50]
+ if self.long:
+ bunch = map (self.longify, bunch)
+ self.file_list = self.file_list[50:]
+ return string.joinfields (bunch, '\r\n') + '\r\n'
diff --git a/demo/medusa/ftp_server.py b/demo/medusa/ftp_server.py
index 25e5ee2..7521857 100644
--- a/demo/medusa/ftp_server.py
+++ b/demo/medusa/ftp_server.py
@@ -1,12 +1,12 @@
# -*- Mode: Python; tab-width: 4 -*-
-# Author: Sam Rushing <rushing@nightmare.com>
-# Copyright 1996-2000 by Sam Rushing
-# All Rights Reserved.
+# Author: Sam Rushing <rushing@nightmare.com>
+# Copyright 1996-2000 by Sam Rushing
+# All Rights Reserved.
#
# An extensible, configurable, asynchronous FTP server.
-#
+#
# All socket I/O is non-blocking, however file I/O is currently
# blocking. Eventually file I/O may be made non-blocking, too, if it
# seems necessary. Currently the only CPU-intensive operation is
@@ -59,580 +59,580 @@ import string
class ftp_channel (asynchat.async_chat):
- # defaults for a reliable __repr__
- addr = ('unknown','0')
-
- # unset this in a derived class in order
- # to enable the commands in 'self.write_commands'
- read_only = 1
- write_commands = ['appe','dele','mkd','rmd','rnfr','rnto','stor','stou']
-
- restart_position = 0
-
- # comply with (possibly troublesome) RFC959 requirements
- # This is necessary to correctly run an active data connection
- # through a firewall that triggers on the source port (expected
- # to be 'L-1', or 20 in the normal case).
- bind_local_minus_one = 0
-
- def __init__ (self, server, conn, addr):
- self.server = server
- self.current_mode = 'a'
- self.addr = addr
- asynchat.async_chat.__init__ (self, conn)
- self.set_terminator ('\r\n')
-
- # client data port. Defaults to 'the same as the control connection'.
- self.client_addr = (addr[0], 21)
-
- self.client_dc = None
- self.in_buffer = ''
- self.closing = 0
- self.passive_acceptor = None
- self.passive_connection = None
- self.filesystem = None
- self.authorized = 0
- # send the greeting
- self.respond (
- '220 %s FTP server (Medusa Async V%s [experimental]) ready.' % (
- self.server.hostname,
- VERSION
- )
- )
-
-# def __del__ (self):
-# print('ftp_channel.__del__()')
-
- # --------------------------------------------------
- # async-library methods
- # --------------------------------------------------
-
- def handle_expt (self):
- # this is handled below. not sure what I could
- # do here to make that code less kludgish.
- pass
-
- def collect_incoming_data (self, data):
- self.in_buffer = self.in_buffer + data
- if len(self.in_buffer) > 4096:
- # silently truncate really long lines
- # (possible denial-of-service attack)
- self.in_buffer = ''
-
- def found_terminator (self):
-
- line = self.in_buffer
-
- if not len(line):
- return
-
- sp = string.find (line, ' ')
- if sp != -1:
- line = [line[:sp], line[sp+1:]]
- else:
- line = [line]
-
- command = string.lower (line[0])
- # watch especially for 'urgent' abort commands.
- if string.find (command, 'abor') != -1:
- # strip off telnet sync chars and the like...
- while command and command[0] not in string.letters:
- command = command[1:]
- fun_name = 'cmd_%s' % command
- if command != 'pass':
- self.log ('<== %s' % repr(self.in_buffer)[1:-1])
- else:
- self.log ('<== %s' % line[0]+' <password>')
- self.in_buffer = ''
- if not hasattr (self, fun_name):
- self.command_not_understood (line[0])
- return
- fun = getattr (self, fun_name)
- if (not self.authorized) and (command not in ('user', 'pass', 'help', 'quit')):
- self.respond ('530 Please log in with USER and PASS')
- elif (not self.check_command_authorization (command)):
- self.command_not_authorized (command)
- else:
- try:
- result = apply (fun, (line,))
- except:
- self.server.total_exceptions.increment()
- (file, fun, line), t,v, tbinfo = asyncore.compact_traceback()
- if self.client_dc:
- try:
- self.client_dc.close()
- except:
- pass
- self.respond (
- '451 Server Error: %s, %s: file: %s line: %s' % (
- t,v,file,line,
- )
- )
-
- closed = 0
- def close (self):
- if not self.closed:
- self.closed = 1
- if self.passive_acceptor:
- self.passive_acceptor.close()
- if self.client_dc:
- self.client_dc.close()
- self.server.closed_sessions.increment()
- asynchat.async_chat.close (self)
-
- # --------------------------------------------------
- # filesystem interface functions.
- # override these to provide access control or perform
- # other functions.
- # --------------------------------------------------
-
- def cwd (self, line):
- return self.filesystem.cwd (line[1])
-
- def cdup (self, line):
- return self.filesystem.cdup()
-
- def open (self, path, mode):
- return self.filesystem.open (path, mode)
-
- # returns a producer
- def listdir (self, path, long=0):
- return self.filesystem.listdir (path, long)
-
- def get_dir_list (self, line, long=0):
- # we need to scan the command line for arguments to '/bin/ls'...
- args = line[1:]
- path_args = []
- for arg in args:
- if arg[0] != '-':
- path_args.append (arg)
- else:
- # ignore arguments
- pass
- if len(path_args) < 1:
- dir = '.'
- else:
- dir = path_args[0]
- return self.listdir (dir, long)
-
- # --------------------------------------------------
- # authorization methods
- # --------------------------------------------------
-
- def check_command_authorization (self, command):
- if command in self.write_commands and self.read_only:
- return 0
- else:
- return 1
-
- # --------------------------------------------------
- # utility methods
- # --------------------------------------------------
-
- def log (self, message):
- self.server.logger.log (
- self.addr[0],
- '%d %s' % (
- self.addr[1], message
- )
- )
-
- def respond (self, resp):
- self.log ('==> %s' % resp)
- self.push (resp + '\r\n')
-
- def command_not_understood (self, command):
- self.respond ("500 '%s': command not understood." % command)
-
- def command_not_authorized (self, command):
- self.respond (
- "530 You are not authorized to perform the '%s' command" % (
- command
- )
- )
-
- def make_xmit_channel (self):
- # In PASV mode, the connection may or may _not_ have been made
- # yet. [although in most cases it is... FTP Explorer being
- # the only exception I've yet seen]. This gets somewhat confusing
- # because things may happen in any order...
- pa = self.passive_acceptor
- if pa:
- if pa.ready:
- # a connection has already been made.
- conn, addr = self.passive_acceptor.ready
- cdc = xmit_channel (self, addr)
- cdc.set_socket (conn)
- cdc.connected = 1
- self.passive_acceptor.close()
- self.passive_acceptor = None
- else:
- # we're still waiting for a connect to the PASV port.
- cdc = xmit_channel (self)
- else:
- # not in PASV mode.
- ip, port = self.client_addr
- cdc = xmit_channel (self, self.client_addr)
- cdc.create_socket (socket.AF_INET, socket.SOCK_STREAM)
- if self.bind_local_minus_one:
- cdc.bind (('', self.server.port - 1))
- try:
- cdc.connect ((ip, port))
- except socket.error as why:
- self.respond ("425 Can't build data connection")
- self.client_dc = cdc
-
- # pretty much the same as xmit, but only right on the verge of
- # being worth a merge.
- def make_recv_channel (self, fd):
- pa = self.passive_acceptor
- if pa:
- if pa.ready:
- # a connection has already been made.
- conn, addr = pa.ready
- cdc = recv_channel (self, addr, fd)
- cdc.set_socket (conn)
- cdc.connected = 1
- self.passive_acceptor.close()
- self.passive_acceptor = None
- else:
- # we're still waiting for a connect to the PASV port.
- cdc = recv_channel (self, None, fd)
- else:
- # not in PASV mode.
- ip, port = self.client_addr
- cdc = recv_channel (self, self.client_addr, fd)
- cdc.create_socket (socket.AF_INET, socket.SOCK_STREAM)
- try:
- cdc.connect ((ip, port))
- except socket.error as why:
- self.respond ("425 Can't build data connection")
- self.client_dc = cdc
-
- type_map = {
- 'a':'ASCII',
- 'i':'Binary',
- 'e':'EBCDIC',
- 'l':'Binary'
- }
-
- type_mode_map = {
- 'a':'t',
- 'i':'b',
- 'e':'b',
- 'l':'b'
- }
-
- # --------------------------------------------------
- # command methods
- # --------------------------------------------------
-
- def cmd_type (self, line):
- 'specify data transfer type'
- # ascii, ebcdic, image, local <byte size>
- t = string.lower (line[1])
- # no support for EBCDIC
- # if t not in ['a','e','i','l']:
- if t not in ['a','i','l']:
- self.command_not_understood (string.join (line))
- elif t == 'l' and (len(line) > 2 and line[2] != '8'):
- self.respond ('504 Byte size must be 8')
- else:
- self.current_mode = t
- self.respond ('200 Type set to %s.' % self.type_map[t])
-
-
- def cmd_quit (self, line):
- 'terminate session'
- self.respond ('221 Goodbye.')
- self.close_when_done()
-
- def cmd_port (self, line):
- 'specify data connection port'
- info = string.split (line[1], ',')
- ip = string.join (info[:4], '.')
- port = string.atoi(info[4])*256 + string.atoi(info[5])
- # how many data connections at a time?
- # I'm assuming one for now...
- # TODO: we should (optionally) verify that the
- # ip number belongs to the client. [wu-ftpd does this?]
- self.client_addr = (ip, port)
- self.respond ('200 PORT command successful.')
-
- def new_passive_acceptor (self):
- # ensure that only one of these exists at a time.
- if self.passive_acceptor is not None:
- self.passive_acceptor.close()
- self.passive_acceptor = None
- self.passive_acceptor = passive_acceptor (self)
- return self.passive_acceptor
-
- def cmd_pasv (self, line):
- 'prepare for server-to-server transfer'
- pc = self.new_passive_acceptor()
- port = pc.addr[1]
- ip_addr = pc.control_channel.getsockname()[0]
- self.respond (
- '227 Entering Passive Mode (%s,%d,%d)' % (
- string.join (string.split (ip_addr, '.'), ','),
- port/256,
- port%256
- )
- )
- self.client_dc = None
-
- def cmd_nlst (self, line):
- 'give name list of files in directory'
- # ncftp adds the -FC argument for the user-visible 'nlist'
- # command. We could try to emulate ls flags, but not just yet.
- if '-FC' in line:
- line.remove ('-FC')
- try:
- dir_list_producer = self.get_dir_list (line, 0)
- except os.error as why:
- self.respond ('550 Could not list directory: %s' % repr(why))
- return
- self.respond (
- '150 Opening %s mode data connection for file list' % (
- self.type_map[self.current_mode]
- )
- )
- self.make_xmit_channel()
- self.client_dc.push_with_producer (dir_list_producer)
- self.client_dc.close_when_done()
-
- def cmd_list (self, line):
- 'give list files in a directory'
- try:
- dir_list_producer = self.get_dir_list (line, 1)
- except os.error as why:
- self.respond ('550 Could not list directory: %s' % repr(why))
- return
- self.respond (
- '150 Opening %s mode data connection for file list' % (
- self.type_map[self.current_mode]
- )
- )
- self.make_xmit_channel()
- self.client_dc.push_with_producer (dir_list_producer)
- self.client_dc.close_when_done()
-
- def cmd_cwd (self, line):
- 'change working directory'
- if self.cwd (line):
- self.respond ('250 CWD command successful.')
- else:
- self.respond ('550 No such directory.')
-
- def cmd_cdup (self, line):
- 'change to parent of current working directory'
- if self.cdup(line):
- self.respond ('250 CDUP command successful.')
- else:
- self.respond ('550 No such directory.')
-
- def cmd_pwd (self, line):
- 'print the current working directory'
- self.respond (
- '257 "%s" is the current directory.' % (
- self.filesystem.current_directory()
- )
- )
-
- # modification time
- # example output:
- # 213 19960301204320
- def cmd_mdtm (self, line):
- 'show last modification time of file'
- filename = line[1]
- if not self.filesystem.isfile (filename):
- self.respond ('550 "%s" is not a file' % filename)
- else:
- mtime = time.gmtime(self.filesystem.stat(filename)[stat.ST_MTIME])
- self.respond (
- '213 %4d%02d%02d%02d%02d%02d' % (
- mtime[0],
- mtime[1],
- mtime[2],
- mtime[3],
- mtime[4],
- mtime[5]
- )
- )
-
- def cmd_noop (self, line):
- 'do nothing'
- self.respond ('200 NOOP command successful.')
-
- def cmd_size (self, line):
- 'return size of file'
- filename = line[1]
- if not self.filesystem.isfile (filename):
- self.respond ('550 "%s" is not a file' % filename)
- else:
- self.respond (
- '213 %d' % (self.filesystem.stat(filename)[stat.ST_SIZE])
- )
-
- def cmd_retr (self, line):
- 'retrieve a file'
- if len(line) < 2:
- self.command_not_understood (string.join (line))
- else:
- file = line[1]
- if not self.filesystem.isfile (file):
- self.log_info ('checking %s' % file)
- self.respond ('550 No such file')
- else:
- try:
- # FIXME: for some reason, 'rt' isn't working on win95
- mode = 'r'+self.type_mode_map[self.current_mode]
- fd = self.open (file, mode)
- except IOError as why:
- self.respond ('553 could not open file for reading: %s' % (repr(why)))
- return
- self.respond (
- "150 Opening %s mode data connection for file '%s'" % (
- self.type_map[self.current_mode],
- file
- )
- )
- self.make_xmit_channel()
-
- if self.restart_position:
- # try to position the file as requested, but
- # give up silently on failure (the 'file object'
- # may not support seek())
- try:
- fd.seek (self.restart_position)
- except:
- pass
- self.restart_position = 0
-
- self.client_dc.push_with_producer (
- file_producer (self, self.client_dc, fd)
- )
- self.client_dc.close_when_done()
-
- def cmd_stor (self, line, mode='wb'):
- 'store a file'
- if len (line) < 2:
- self.command_not_understood (string.join (line))
- else:
- if self.restart_position:
- restart_position = 0
- self.respond ('553 restart on STOR not yet supported')
- return
- file = line[1]
- # todo: handle that type flag
- try:
- fd = self.open (file, mode)
- except IOError as why:
- self.respond ('553 could not open file for writing: %s' % (repr(why)))
- return
- self.respond (
- '150 Opening %s connection for %s' % (
- self.type_map[self.current_mode],
- file
- )
- )
- self.make_recv_channel (fd)
-
- def cmd_abor (self, line):
- 'abort operation'
- if self.client_dc:
- self.client_dc.close()
- self.respond ('226 ABOR command successful.')
-
- def cmd_appe (self, line):
- 'append to a file'
- return self.cmd_stor (line, 'ab')
-
- def cmd_dele (self, line):
- if len (line) != 2:
- self.command_not_understood (string.join (line))
- else:
- file = line[1]
- if self.filesystem.isfile (file):
- try:
- self.filesystem.unlink (file)
- self.respond ('250 DELE command successful.')
- except:
- self.respond ('550 error deleting file.')
- else:
- self.respond ('550 %s: No such file.' % file)
-
- def cmd_mkd (self, line):
- if len (line) != 2:
- self.command.not_understood (string.join (line))
- else:
- path = line[1]
- try:
- self.filesystem.mkdir (path)
- self.respond ('257 MKD command successful.')
- except:
- self.respond ('550 error creating directory.')
-
- def cmd_rmd (self, line):
- if len (line) != 2:
- self.command.not_understood (string.join (line))
- else:
- path = line[1]
- try:
- self.filesystem.rmdir (path)
- self.respond ('250 RMD command successful.')
- except:
- self.respond ('550 error removing directory.')
-
- def cmd_user (self, line):
- 'specify user name'
- if len(line) > 1:
- self.user = line[1]
- self.respond ('331 Password required.')
- else:
- self.command_not_understood (string.join (line))
-
- def cmd_pass (self, line):
- 'specify password'
- if len(line) < 2:
- pw = ''
- else:
- pw = line[1]
- result, message, fs = self.server.authorizer.authorize (self, self.user, pw)
- if result:
- self.respond ('230 %s' % message)
- self.filesystem = fs
- self.authorized = 1
- self.log_info('Successful login: Filesystem=%s' % repr(fs))
- else:
- self.respond ('530 %s' % message)
-
- def cmd_rest (self, line):
- 'restart incomplete transfer'
- try:
- pos = string.atoi (line[1])
- except ValueError:
- self.command_not_understood (string.join (line))
- self.restart_position = pos
- self.respond (
- '350 Restarting at %d. Send STORE or RETRIEVE to initiate transfer.' % pos
- )
-
- def cmd_stru (self, line):
- 'obsolete - set file transfer structure'
- if line[1] in 'fF':
- # f == 'file'
- self.respond ('200 STRU F Ok')
- else:
- self.respond ('504 Unimplemented STRU type')
-
- def cmd_mode (self, line):
- 'obsolete - set file transfer mode'
- if line[1] in 'sS':
- # f == 'file'
- self.respond ('200 MODE S Ok')
- else:
- self.respond ('502 Unimplemented MODE type')
+ # defaults for a reliable __repr__
+ addr = ('unknown','0')
+
+ # unset this in a derived class in order
+ # to enable the commands in 'self.write_commands'
+ read_only = 1
+ write_commands = ['appe','dele','mkd','rmd','rnfr','rnto','stor','stou']
+
+ restart_position = 0
+
+ # comply with (possibly troublesome) RFC959 requirements
+ # This is necessary to correctly run an active data connection
+ # through a firewall that triggers on the source port (expected
+ # to be 'L-1', or 20 in the normal case).
+ bind_local_minus_one = 0
+
+ def __init__ (self, server, conn, addr):
+ self.server = server
+ self.current_mode = 'a'
+ self.addr = addr
+ asynchat.async_chat.__init__ (self, conn)
+ self.set_terminator ('\r\n')
+
+ # client data port. Defaults to 'the same as the control connection'.
+ self.client_addr = (addr[0], 21)
+
+ self.client_dc = None
+ self.in_buffer = ''
+ self.closing = 0
+ self.passive_acceptor = None
+ self.passive_connection = None
+ self.filesystem = None
+ self.authorized = 0
+ # send the greeting
+ self.respond (
+ '220 %s FTP server (Medusa Async V%s [experimental]) ready.' % (
+ self.server.hostname,
+ VERSION
+ )
+ )
+
+# def __del__ (self):
+# print('ftp_channel.__del__()')
+
+ # --------------------------------------------------
+ # async-library methods
+ # --------------------------------------------------
+
+ def handle_expt (self):
+ # this is handled below. not sure what I could
+ # do here to make that code less kludgish.
+ pass
+
+ def collect_incoming_data (self, data):
+ self.in_buffer = self.in_buffer + data
+ if len(self.in_buffer) > 4096:
+ # silently truncate really long lines
+ # (possible denial-of-service attack)
+ self.in_buffer = ''
+
+ def found_terminator (self):
+
+ line = self.in_buffer
+
+ if not len(line):
+ return
+
+ sp = string.find (line, ' ')
+ if sp != -1:
+ line = [line[:sp], line[sp+1:]]
+ else:
+ line = [line]
+
+ command = string.lower (line[0])
+ # watch especially for 'urgent' abort commands.
+ if string.find (command, 'abor') != -1:
+ # strip off telnet sync chars and the like...
+ while command and command[0] not in string.letters:
+ command = command[1:]
+ fun_name = 'cmd_%s' % command
+ if command != 'pass':
+ self.log ('<== %s' % repr(self.in_buffer)[1:-1])
+ else:
+ self.log ('<== %s' % line[0]+' <password>')
+ self.in_buffer = ''
+ if not hasattr (self, fun_name):
+ self.command_not_understood (line[0])
+ return
+ fun = getattr (self, fun_name)
+ if (not self.authorized) and (command not in ('user', 'pass', 'help', 'quit')):
+ self.respond ('530 Please log in with USER and PASS')
+ elif (not self.check_command_authorization (command)):
+ self.command_not_authorized (command)
+ else:
+ try:
+ result = apply (fun, (line,))
+ except:
+ self.server.total_exceptions.increment()
+ (file, fun, line), t,v, tbinfo = asyncore.compact_traceback()
+ if self.client_dc:
+ try:
+ self.client_dc.close()
+ except:
+ pass
+ self.respond (
+ '451 Server Error: %s, %s: file: %s line: %s' % (
+ t,v,file,line,
+ )
+ )
+
+ closed = 0
+ def close (self):
+ if not self.closed:
+ self.closed = 1
+ if self.passive_acceptor:
+ self.passive_acceptor.close()
+ if self.client_dc:
+ self.client_dc.close()
+ self.server.closed_sessions.increment()
+ asynchat.async_chat.close (self)
+
+ # --------------------------------------------------
+ # filesystem interface functions.
+ # override these to provide access control or perform
+ # other functions.
+ # --------------------------------------------------
+
+ def cwd (self, line):
+ return self.filesystem.cwd (line[1])
+
+ def cdup (self, line):
+ return self.filesystem.cdup()
+
+ def open (self, path, mode):
+ return self.filesystem.open (path, mode)
+
+ # returns a producer
+ def listdir (self, path, long=0):
+ return self.filesystem.listdir (path, long)
+
+ def get_dir_list (self, line, long=0):
+ # we need to scan the command line for arguments to '/bin/ls'...
+ args = line[1:]
+ path_args = []
+ for arg in args:
+ if arg[0] != '-':
+ path_args.append (arg)
+ else:
+ # ignore arguments
+ pass
+ if len(path_args) < 1:
+ dir = '.'
+ else:
+ dir = path_args[0]
+ return self.listdir (dir, long)
+
+ # --------------------------------------------------
+ # authorization methods
+ # --------------------------------------------------
+
+ def check_command_authorization (self, command):
+ if command in self.write_commands and self.read_only:
+ return 0
+ else:
+ return 1
+
+ # --------------------------------------------------
+ # utility methods
+ # --------------------------------------------------
+
+ def log (self, message):
+ self.server.logger.log (
+ self.addr[0],
+ '%d %s' % (
+ self.addr[1], message
+ )
+ )
+
+ def respond (self, resp):
+ self.log ('==> %s' % resp)
+ self.push (resp + '\r\n')
+
+ def command_not_understood (self, command):
+ self.respond ("500 '%s': command not understood." % command)
+
+ def command_not_authorized (self, command):
+ self.respond (
+ "530 You are not authorized to perform the '%s' command" % (
+ command
+ )
+ )
+
+ def make_xmit_channel (self):
+ # In PASV mode, the connection may or may _not_ have been made
+ # yet. [although in most cases it is... FTP Explorer being
+ # the only exception I've yet seen]. This gets somewhat confusing
+ # because things may happen in any order...
+ pa = self.passive_acceptor
+ if pa:
+ if pa.ready:
+ # a connection has already been made.
+ conn, addr = self.passive_acceptor.ready
+ cdc = xmit_channel (self, addr)
+ cdc.set_socket (conn)
+ cdc.connected = 1
+ self.passive_acceptor.close()
+ self.passive_acceptor = None
+ else:
+ # we're still waiting for a connect to the PASV port.
+ cdc = xmit_channel (self)
+ else:
+ # not in PASV mode.
+ ip, port = self.client_addr
+ cdc = xmit_channel (self, self.client_addr)
+ cdc.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+ if self.bind_local_minus_one:
+ cdc.bind (('', self.server.port - 1))
+ try:
+ cdc.connect ((ip, port))
+ except socket.error as why:
+ self.respond ("425 Can't build data connection")
+ self.client_dc = cdc
+
+ # pretty much the same as xmit, but only right on the verge of
+ # being worth a merge.
+ def make_recv_channel (self, fd):
+ pa = self.passive_acceptor
+ if pa:
+ if pa.ready:
+ # a connection has already been made.
+ conn, addr = pa.ready
+ cdc = recv_channel (self, addr, fd)
+ cdc.set_socket (conn)
+ cdc.connected = 1
+ self.passive_acceptor.close()
+ self.passive_acceptor = None
+ else:
+ # we're still waiting for a connect to the PASV port.
+ cdc = recv_channel (self, None, fd)
+ else:
+ # not in PASV mode.
+ ip, port = self.client_addr
+ cdc = recv_channel (self, self.client_addr, fd)
+ cdc.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ cdc.connect ((ip, port))
+ except socket.error as why:
+ self.respond ("425 Can't build data connection")
+ self.client_dc = cdc
+
+ type_map = {
+ 'a':'ASCII',
+ 'i':'Binary',
+ 'e':'EBCDIC',
+ 'l':'Binary'
+ }
+
+ type_mode_map = {
+ 'a':'t',
+ 'i':'b',
+ 'e':'b',
+ 'l':'b'
+ }
+
+ # --------------------------------------------------
+ # command methods
+ # --------------------------------------------------
+
+ def cmd_type (self, line):
+ 'specify data transfer type'
+ # ascii, ebcdic, image, local <byte size>
+ t = string.lower (line[1])
+ # no support for EBCDIC
+ # if t not in ['a','e','i','l']:
+ if t not in ['a','i','l']:
+ self.command_not_understood (string.join (line))
+ elif t == 'l' and (len(line) > 2 and line[2] != '8'):
+ self.respond ('504 Byte size must be 8')
+ else:
+ self.current_mode = t
+ self.respond ('200 Type set to %s.' % self.type_map[t])
+
+
+ def cmd_quit (self, line):
+ 'terminate session'
+ self.respond ('221 Goodbye.')
+ self.close_when_done()
+
+ def cmd_port (self, line):
+ 'specify data connection port'
+ info = string.split (line[1], ',')
+ ip = string.join (info[:4], '.')
+ port = string.atoi(info[4])*256 + string.atoi(info[5])
+ # how many data connections at a time?
+ # I'm assuming one for now...
+ # TODO: we should (optionally) verify that the
+ # ip number belongs to the client. [wu-ftpd does this?]
+ self.client_addr = (ip, port)
+ self.respond ('200 PORT command successful.')
+
+ def new_passive_acceptor (self):
+ # ensure that only one of these exists at a time.
+ if self.passive_acceptor is not None:
+ self.passive_acceptor.close()
+ self.passive_acceptor = None
+ self.passive_acceptor = passive_acceptor (self)
+ return self.passive_acceptor
+
+ def cmd_pasv (self, line):
+ 'prepare for server-to-server transfer'
+ pc = self.new_passive_acceptor()
+ port = pc.addr[1]
+ ip_addr = pc.control_channel.getsockname()[0]
+ self.respond (
+ '227 Entering Passive Mode (%s,%d,%d)' % (
+ string.join (string.split (ip_addr, '.'), ','),
+ port/256,
+ port%256
+ )
+ )
+ self.client_dc = None
+
+ def cmd_nlst (self, line):
+ 'give name list of files in directory'
+ # ncftp adds the -FC argument for the user-visible 'nlist'
+ # command. We could try to emulate ls flags, but not just yet.
+ if '-FC' in line:
+ line.remove ('-FC')
+ try:
+ dir_list_producer = self.get_dir_list (line, 0)
+ except os.error as why:
+ self.respond ('550 Could not list directory: %s' % repr(why))
+ return
+ self.respond (
+ '150 Opening %s mode data connection for file list' % (
+ self.type_map[self.current_mode]
+ )
+ )
+ self.make_xmit_channel()
+ self.client_dc.push_with_producer (dir_list_producer)
+ self.client_dc.close_when_done()
+
+ def cmd_list (self, line):
+ 'give list files in a directory'
+ try:
+ dir_list_producer = self.get_dir_list (line, 1)
+ except os.error as why:
+ self.respond ('550 Could not list directory: %s' % repr(why))
+ return
+ self.respond (
+ '150 Opening %s mode data connection for file list' % (
+ self.type_map[self.current_mode]
+ )
+ )
+ self.make_xmit_channel()
+ self.client_dc.push_with_producer (dir_list_producer)
+ self.client_dc.close_when_done()
+
+ def cmd_cwd (self, line):
+ 'change working directory'
+ if self.cwd (line):
+ self.respond ('250 CWD command successful.')
+ else:
+ self.respond ('550 No such directory.')
+
+ def cmd_cdup (self, line):
+ 'change to parent of current working directory'
+ if self.cdup(line):
+ self.respond ('250 CDUP command successful.')
+ else:
+ self.respond ('550 No such directory.')
+
+ def cmd_pwd (self, line):
+ 'print the current working directory'
+ self.respond (
+ '257 "%s" is the current directory.' % (
+ self.filesystem.current_directory()
+ )
+ )
+
+ # modification time
+ # example output:
+ # 213 19960301204320
+ def cmd_mdtm (self, line):
+ 'show last modification time of file'
+ filename = line[1]
+ if not self.filesystem.isfile (filename):
+ self.respond ('550 "%s" is not a file' % filename)
+ else:
+ mtime = time.gmtime(self.filesystem.stat(filename)[stat.ST_MTIME])
+ self.respond (
+ '213 %4d%02d%02d%02d%02d%02d' % (
+ mtime[0],
+ mtime[1],
+ mtime[2],
+ mtime[3],
+ mtime[4],
+ mtime[5]
+ )
+ )
+
+ def cmd_noop (self, line):
+ 'do nothing'
+ self.respond ('200 NOOP command successful.')
+
+ def cmd_size (self, line):
+ 'return size of file'
+ filename = line[1]
+ if not self.filesystem.isfile (filename):
+ self.respond ('550 "%s" is not a file' % filename)
+ else:
+ self.respond (
+ '213 %d' % (self.filesystem.stat(filename)[stat.ST_SIZE])
+ )
+
+ def cmd_retr (self, line):
+ 'retrieve a file'
+ if len(line) < 2:
+ self.command_not_understood (string.join (line))
+ else:
+ file = line[1]
+ if not self.filesystem.isfile (file):
+ self.log_info ('checking %s' % file)
+ self.respond ('550 No such file')
+ else:
+ try:
+ # FIXME: for some reason, 'rt' isn't working on win95
+ mode = 'r'+self.type_mode_map[self.current_mode]
+ fd = self.open (file, mode)
+ except IOError as why:
+ self.respond ('553 could not open file for reading: %s' % (repr(why)))
+ return
+ self.respond (
+ "150 Opening %s mode data connection for file '%s'" % (
+ self.type_map[self.current_mode],
+ file
+ )
+ )
+ self.make_xmit_channel()
+
+ if self.restart_position:
+ # try to position the file as requested, but
+ # give up silently on failure (the 'file object'
+ # may not support seek())
+ try:
+ fd.seek (self.restart_position)
+ except:
+ pass
+ self.restart_position = 0
+
+ self.client_dc.push_with_producer (
+ file_producer (self, self.client_dc, fd)
+ )
+ self.client_dc.close_when_done()
+
+ def cmd_stor (self, line, mode='wb'):
+ 'store a file'
+ if len (line) < 2:
+ self.command_not_understood (string.join (line))
+ else:
+ if self.restart_position:
+ restart_position = 0
+ self.respond ('553 restart on STOR not yet supported')
+ return
+ file = line[1]
+ # todo: handle that type flag
+ try:
+ fd = self.open (file, mode)
+ except IOError as why:
+ self.respond ('553 could not open file for writing: %s' % (repr(why)))
+ return
+ self.respond (
+ '150 Opening %s connection for %s' % (
+ self.type_map[self.current_mode],
+ file
+ )
+ )
+ self.make_recv_channel (fd)
+
+ def cmd_abor (self, line):
+ 'abort operation'
+ if self.client_dc:
+ self.client_dc.close()
+ self.respond ('226 ABOR command successful.')
+
+ def cmd_appe (self, line):
+ 'append to a file'
+ return self.cmd_stor (line, 'ab')
+
+ def cmd_dele (self, line):
+ if len (line) != 2:
+ self.command_not_understood (string.join (line))
+ else:
+ file = line[1]
+ if self.filesystem.isfile (file):
+ try:
+ self.filesystem.unlink (file)
+ self.respond ('250 DELE command successful.')
+ except:
+ self.respond ('550 error deleting file.')
+ else:
+ self.respond ('550 %s: No such file.' % file)
+
+ def cmd_mkd (self, line):
+ if len (line) != 2:
+ self.command.not_understood (string.join (line))
+ else:
+ path = line[1]
+ try:
+ self.filesystem.mkdir (path)
+ self.respond ('257 MKD command successful.')
+ except:
+ self.respond ('550 error creating directory.')
+
+ def cmd_rmd (self, line):
+ if len (line) != 2:
+ self.command.not_understood (string.join (line))
+ else:
+ path = line[1]
+ try:
+ self.filesystem.rmdir (path)
+ self.respond ('250 RMD command successful.')
+ except:
+ self.respond ('550 error removing directory.')
+
+ def cmd_user (self, line):
+ 'specify user name'
+ if len(line) > 1:
+ self.user = line[1]
+ self.respond ('331 Password required.')
+ else:
+ self.command_not_understood (string.join (line))
+
+ def cmd_pass (self, line):
+ 'specify password'
+ if len(line) < 2:
+ pw = ''
+ else:
+ pw = line[1]
+ result, message, fs = self.server.authorizer.authorize (self, self.user, pw)
+ if result:
+ self.respond ('230 %s' % message)
+ self.filesystem = fs
+ self.authorized = 1
+ self.log_info('Successful login: Filesystem=%s' % repr(fs))
+ else:
+ self.respond ('530 %s' % message)
+
+ def cmd_rest (self, line):
+ 'restart incomplete transfer'
+ try:
+ pos = string.atoi (line[1])
+ except ValueError:
+ self.command_not_understood (string.join (line))
+ self.restart_position = pos
+ self.respond (
+ '350 Restarting at %d. Send STORE or RETRIEVE to initiate transfer.' % pos
+ )
+
+ def cmd_stru (self, line):
+ 'obsolete - set file transfer structure'
+ if line[1] in 'fF':
+ # f == 'file'
+ self.respond ('200 STRU F Ok')
+ else:
+ self.respond ('504 Unimplemented STRU type')
+
+ def cmd_mode (self, line):
+ 'obsolete - set file transfer mode'
+ if line[1] in 'sS':
+ # f == 'file'
+ self.respond ('200 MODE S Ok')
+ else:
+ self.respond ('502 Unimplemented MODE type')
# The stat command has two personalities. Normally it returns status
# information about the current connection. But if given an argument,
@@ -640,146 +640,146 @@ class ftp_channel (asynchat.async_chat):
# control connection. Strange. But wuftpd, ftpd, and nt's ftp server
# all support it.
#
-## def cmd_stat (self, line):
-## 'return status of server'
-## pass
-
- def cmd_syst (self, line):
- 'show operating system type of server system'
- # Replying to this command is of questionable utility, because
- # this server does not behave in a predictable way w.r.t. the
- # output of the LIST command. We emulate Unix ls output, but
- # on win32 the pathname can contain drive information at the front
- # Currently, the combination of ensuring that os.sep == '/'
- # and removing the leading slash when necessary seems to work.
- # [cd'ing to another drive also works]
- #
- # This is how wuftpd responds, and is probably
- # the most expected. The main purpose of this reply is so that
- # the client knows to expect Unix ls-style LIST output.
- self.respond ('215 UNIX Type: L8')
- # one disadvantage to this is that some client programs
- # assume they can pass args to /bin/ls.
- # a few typical responses:
- # 215 UNIX Type: L8 (wuftpd)
- # 215 Windows_NT version 3.51
- # 215 VMS MultiNet V3.3
- # 500 'SYST': command not understood. (SVR4)
-
- def cmd_help (self, line):
- 'give help information'
- # find all the methods that match 'cmd_xxxx',
- # use their docstrings for the help response.
- attrs = dir(self.__class__)
- help_lines = []
- for attr in attrs:
- if attr[:4] == 'cmd_':
- x = getattr (self, attr)
- if type(x) == type(self.cmd_help):
- if x.__doc__:
- help_lines.append ('\t%s\t%s' % (attr[4:], x.__doc__))
- if help_lines:
- self.push ('214-The following commands are recognized\r\n')
- self.push_with_producer (producers.lines_producer (help_lines))
- self.push ('214\r\n')
- else:
- self.push ('214-\r\n\tHelp Unavailable\r\n214\r\n')
+## def cmd_stat (self, line):
+## 'return status of server'
+## pass
+
+ def cmd_syst (self, line):
+ 'show operating system type of server system'
+ # Replying to this command is of questionable utility, because
+ # this server does not behave in a predictable way w.r.t. the
+ # output of the LIST command. We emulate Unix ls output, but
+ # on win32 the pathname can contain drive information at the front
+ # Currently, the combination of ensuring that os.sep == '/'
+ # and removing the leading slash when necessary seems to work.
+ # [cd'ing to another drive also works]
+ #
+ # This is how wuftpd responds, and is probably
+ # the most expected. The main purpose of this reply is so that
+ # the client knows to expect Unix ls-style LIST output.
+ self.respond ('215 UNIX Type: L8')
+ # one disadvantage to this is that some client programs
+ # assume they can pass args to /bin/ls.
+ # a few typical responses:
+ # 215 UNIX Type: L8 (wuftpd)
+ # 215 Windows_NT version 3.51
+ # 215 VMS MultiNet V3.3
+ # 500 'SYST': command not understood. (SVR4)
+
+ def cmd_help (self, line):
+ 'give help information'
+ # find all the methods that match 'cmd_xxxx',
+ # use their docstrings for the help response.
+ attrs = dir(self.__class__)
+ help_lines = []
+ for attr in attrs:
+ if attr[:4] == 'cmd_':
+ x = getattr (self, attr)
+ if type(x) == type(self.cmd_help):
+ if x.__doc__:
+ help_lines.append ('\t%s\t%s' % (attr[4:], x.__doc__))
+ if help_lines:
+ self.push ('214-The following commands are recognized\r\n')
+ self.push_with_producer (producers.lines_producer (help_lines))
+ self.push ('214\r\n')
+ else:
+ self.push ('214-\r\n\tHelp Unavailable\r\n214\r\n')
class ftp_server (asyncore.dispatcher):
- # override this to spawn a different FTP channel class.
- ftp_channel_class = ftp_channel
-
- SERVER_IDENT = 'FTP Server (V%s)' % VERSION
-
- def __init__ (
- self,
- authorizer,
- hostname =None,
- ip ='',
- port =21,
- resolver =None,
- logger_object=logger.file_logger (sys.stdout)
- ):
- self.ip = ip
- self.port = port
- self.authorizer = authorizer
-
- if hostname is None:
- self.hostname = socket.gethostname()
- else:
- self.hostname = hostname
-
- # statistics
- self.total_sessions = counter()
- self.closed_sessions = counter()
- self.total_files_out = counter()
- self.total_files_in = counter()
- self.total_bytes_out = counter()
- self.total_bytes_in = counter()
- self.total_exceptions = counter()
- #
- asyncore.dispatcher.__init__ (self)
- self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-
- self.set_reuse_addr()
- self.bind ((self.ip, self.port))
- self.listen (5)
-
- if not logger_object:
- logger_object = sys.stdout
-
- if resolver:
- self.logger = logger.resolving_logger (resolver, logger_object)
- else:
- self.logger = logger.unresolving_logger (logger_object)
-
- self.log_info('FTP server started at %s\n\tAuthorizer:%s\n\tHostname: %s\n\tPort: %d' % (
- time.ctime(time.time()),
- repr (self.authorizer),
- self.hostname,
- self.port)
- )
-
- def writable (self):
- return 0
-
- def handle_read (self):
- pass
-
- def handle_connect (self):
- pass
-
- def handle_accept (self):
- conn, addr = self.accept()
- self.total_sessions.increment()
- self.log_info('Incoming connection from %s:%d' % (addr[0], addr[1]))
- self.ftp_channel_class (self, conn, addr)
-
- # return a producer describing the state of the server
- def status (self):
-
- def nice_bytes (n):
- return string.join (status_handler.english_bytes (n))
-
- return producers.lines_producer (
- ['<h2>%s</h2>' % self.SERVER_IDENT,
- '<br>Listening on <b>Host:</b> %s' % self.hostname,
- '<b>Port:</b> %d' % self.port,
- '<br>Sessions',
- '<b>Total:</b> %s' % self.total_sessions,
- '<b>Current:</b> %d' % (self.total_sessions.as_long() - self.closed_sessions.as_long()),
- '<br>Files',
- '<b>Sent:</b> %s' % self.total_files_out,
- '<b>Received:</b> %s' % self.total_files_in,
- '<br>Bytes',
- '<b>Sent:</b> %s' % nice_bytes (self.total_bytes_out.as_long()),
- '<b>Received:</b> %s' % nice_bytes (self.total_bytes_in.as_long()),
- '<br>Exceptions: %s' % self.total_exceptions,
- ]
- )
+ # override this to spawn a different FTP channel class.
+ ftp_channel_class = ftp_channel
+
+ SERVER_IDENT = 'FTP Server (V%s)' % VERSION
+
+ def __init__ (
+ self,
+ authorizer,
+ hostname =None,
+ ip ='',
+ port =21,
+ resolver =None,
+ logger_object=logger.file_logger (sys.stdout)
+ ):
+ self.ip = ip
+ self.port = port
+ self.authorizer = authorizer
+
+ if hostname is None:
+ self.hostname = socket.gethostname()
+ else:
+ self.hostname = hostname
+
+ # statistics
+ self.total_sessions = counter()
+ self.closed_sessions = counter()
+ self.total_files_out = counter()
+ self.total_files_in = counter()
+ self.total_bytes_out = counter()
+ self.total_bytes_in = counter()
+ self.total_exceptions = counter()
+ #
+ asyncore.dispatcher.__init__ (self)
+ self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+
+ self.set_reuse_addr()
+ self.bind ((self.ip, self.port))
+ self.listen (5)
+
+ if not logger_object:
+ logger_object = sys.stdout
+
+ if resolver:
+ self.logger = logger.resolving_logger (resolver, logger_object)
+ else:
+ self.logger = logger.unresolving_logger (logger_object)
+
+ self.log_info('FTP server started at %s\n\tAuthorizer:%s\n\tHostname: %s\n\tPort: %d' % (
+ time.ctime(time.time()),
+ repr (self.authorizer),
+ self.hostname,
+ self.port)
+ )
+
+ def writable (self):
+ return 0
+
+ def handle_read (self):
+ pass
+
+ def handle_connect (self):
+ pass
+
+ def handle_accept (self):
+ conn, addr = self.accept()
+ self.total_sessions.increment()
+ self.log_info('Incoming connection from %s:%d' % (addr[0], addr[1]))
+ self.ftp_channel_class (self, conn, addr)
+
+ # return a producer describing the state of the server
+ def status (self):
+
+ def nice_bytes (n):
+ return string.join (status_handler.english_bytes (n))
+
+ return producers.lines_producer (
+ ['<h2>%s</h2>' % self.SERVER_IDENT,
+ '<br>Listening on <b>Host:</b> %s' % self.hostname,
+ '<b>Port:</b> %d' % self.port,
+ '<br>Sessions',
+ '<b>Total:</b> %s' % self.total_sessions,
+ '<b>Current:</b> %d' % (self.total_sessions.as_long() - self.closed_sessions.as_long()),
+ '<br>Files',
+ '<b>Sent:</b> %s' % self.total_files_out,
+ '<b>Received:</b> %s' % self.total_files_in,
+ '<br>Bytes',
+ '<b>Sent:</b> %s' % nice_bytes (self.total_bytes_out.as_long()),
+ '<b>Received:</b> %s' % nice_bytes (self.total_bytes_in.as_long()),
+ '<br>Exceptions: %s' % self.total_exceptions,
+ ]
+ )
# ======================================================================
-# Data Channel Classes
+# Data Channel Classes
# ======================================================================
# This socket accepts a data connection, used when the server has been
@@ -806,162 +806,162 @@ class ftp_server (asyncore.dispatcher):
#
class passive_acceptor (asyncore.dispatcher):
- ready = None
-
- def __init__ (self, control_channel):
- # connect_fun (conn, addr)
- asyncore.dispatcher.__init__ (self)
- self.control_channel = control_channel
- self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
- # bind to an address on the interface that the
- # control connection is coming from.
- self.bind ((
- self.control_channel.getsockname()[0],
- 0
- ))
- self.addr = self.getsockname()
- self.listen (1)
-
-# def __del__ (self):
-# print('passive_acceptor.__del__()')
-
- def log (self, *ignore):
- pass
-
- def handle_accept (self):
- conn, addr = self.accept()
- dc = self.control_channel.client_dc
- if dc is not None:
- dc.set_socket (conn)
- dc.addr = addr
- dc.connected = 1
- self.control_channel.passive_acceptor = None
- else:
- self.ready = conn, addr
- self.close()
+ ready = None
+
+ def __init__ (self, control_channel):
+ # connect_fun (conn, addr)
+ asyncore.dispatcher.__init__ (self)
+ self.control_channel = control_channel
+ self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+ # bind to an address on the interface that the
+ # control connection is coming from.
+ self.bind ((
+ self.control_channel.getsockname()[0],
+ 0
+ ))
+ self.addr = self.getsockname()
+ self.listen (1)
+
+# def __del__ (self):
+# print('passive_acceptor.__del__()')
+
+ def log (self, *ignore):
+ pass
+
+ def handle_accept (self):
+ conn, addr = self.accept()
+ dc = self.control_channel.client_dc
+ if dc is not None:
+ dc.set_socket (conn)
+ dc.addr = addr
+ dc.connected = 1
+ self.control_channel.passive_acceptor = None
+ else:
+ self.ready = conn, addr
+ self.close()
class xmit_channel (asynchat.async_chat):
- # for an ethernet, you want this to be fairly large, in fact, it
- # _must_ be large for performance comparable to an ftpd. [64k] we
- # ought to investigate automatically-sized buffers...
-
- ac_out_buffer_size = 16384
- bytes_out = 0
-
- def __init__ (self, channel, client_addr=None):
- self.channel = channel
- self.client_addr = client_addr
- asynchat.async_chat.__init__ (self)
-
-# def __del__ (self):
-# print('xmit_channel.__del__()')
-
- def log (*args):
- pass
-
- def readable (self):
- return not self.connected
-
- def writable (self):
- return 1
-
- def send (self, data):
- result = asynchat.async_chat.send (self, data)
- self.bytes_out = self.bytes_out + result
- return result
-
- def handle_error (self):
- # usually this is to catch an unexpected disconnect.
- self.log_info ('unexpected disconnect on data xmit channel', 'error')
- try:
- self.close()
- except:
- pass
-
- # TODO: there's a better way to do this. we need to be able to
- # put 'events' in the producer fifo. to do this cleanly we need
- # to reposition the 'producer' fifo as an 'event' fifo.
-
- def close (self):
- c = self.channel
- s = c.server
- c.client_dc = None
- s.total_files_out.increment()
- s.total_bytes_out.increment (self.bytes_out)
- if not len(self.producer_fifo):
- c.respond ('226 Transfer complete')
- elif not c.closed:
- c.respond ('426 Connection closed; transfer aborted')
- del c
- del s
- del self.channel
- asynchat.async_chat.close (self)
+ # for an ethernet, you want this to be fairly large, in fact, it
+ # _must_ be large for performance comparable to an ftpd. [64k] we
+ # ought to investigate automatically-sized buffers...
+
+ ac_out_buffer_size = 16384
+ bytes_out = 0
+
+ def __init__ (self, channel, client_addr=None):
+ self.channel = channel
+ self.client_addr = client_addr
+ asynchat.async_chat.__init__ (self)
+
+# def __del__ (self):
+# print('xmit_channel.__del__()')
+
+ def log (*args):
+ pass
+
+ def readable (self):
+ return not self.connected
+
+ def writable (self):
+ return 1
+
+ def send (self, data):
+ result = asynchat.async_chat.send (self, data)
+ self.bytes_out = self.bytes_out + result
+ return result
+
+ def handle_error (self):
+ # usually this is to catch an unexpected disconnect.
+ self.log_info ('unexpected disconnect on data xmit channel', 'error')
+ try:
+ self.close()
+ except:
+ pass
+
+ # TODO: there's a better way to do this. we need to be able to
+ # put 'events' in the producer fifo. to do this cleanly we need
+ # to reposition the 'producer' fifo as an 'event' fifo.
+
+ def close (self):
+ c = self.channel
+ s = c.server
+ c.client_dc = None
+ s.total_files_out.increment()
+ s.total_bytes_out.increment (self.bytes_out)
+ if not len(self.producer_fifo):
+ c.respond ('226 Transfer complete')
+ elif not c.closed:
+ c.respond ('426 Connection closed; transfer aborted')
+ del c
+ del s
+ del self.channel
+ asynchat.async_chat.close (self)
class recv_channel (asyncore.dispatcher):
- def __init__ (self, channel, client_addr, fd):
- self.channel = channel
- self.client_addr = client_addr
- self.fd = fd
- asyncore.dispatcher.__init__ (self)
- self.bytes_in = counter()
-
- def log (self, *ignore):
- pass
-
- def handle_connect (self):
- pass
-
- def writable (self):
- return 0
-
- def recv (*args):
- result = apply (asyncore.dispatcher.recv, args)
- self = args[0]
- self.bytes_in.increment(len(result))
- return result
-
- buffer_size = 8192
-
- def handle_read (self):
- block = self.recv (self.buffer_size)
- if block:
- try:
- self.fd.write (block)
- except IOError:
- self.log_info ('got exception writing block...', 'error')
-
- def handle_close (self):
- s = self.channel.server
- s.total_files_in.increment()
- s.total_bytes_in.increment(self.bytes_in.as_long())
- self.fd.close()
- self.channel.respond ('226 Transfer complete.')
- self.close()
+ def __init__ (self, channel, client_addr, fd):
+ self.channel = channel
+ self.client_addr = client_addr
+ self.fd = fd
+ asyncore.dispatcher.__init__ (self)
+ self.bytes_in = counter()
+
+ def log (self, *ignore):
+ pass
+
+ def handle_connect (self):
+ pass
+
+ def writable (self):
+ return 0
+
+ def recv (*args):
+ result = apply (asyncore.dispatcher.recv, args)
+ self = args[0]
+ self.bytes_in.increment(len(result))
+ return result
+
+ buffer_size = 8192
+
+ def handle_read (self):
+ block = self.recv (self.buffer_size)
+ if block:
+ try:
+ self.fd.write (block)
+ except IOError:
+ self.log_info ('got exception writing block...', 'error')
+
+ def handle_close (self):
+ s = self.channel.server
+ s.total_files_in.increment()
+ s.total_bytes_in.increment(self.bytes_in.as_long())
+ self.fd.close()
+ self.channel.respond ('226 Transfer complete.')
+ self.close()
import filesys
# not much of a doorman! 8^)
class dummy_authorizer:
- def __init__ (self, root='/'):
- self.root = root
- def authorize (self, channel, username, password):
- channel.persona = -1, -1
- channel.read_only = 1
- return 1, 'Ok.', filesys.os_filesystem (self.root)
+ def __init__ (self, root='/'):
+ self.root = root
+ def authorize (self, channel, username, password):
+ channel.persona = -1, -1
+ channel.read_only = 1
+ return 1, 'Ok.', filesys.os_filesystem (self.root)
class anon_authorizer:
- def __init__ (self, root='/'):
- self.root = root
-
- def authorize (self, channel, username, password):
- if username in ('ftp', 'anonymous'):
- channel.persona = -1, -1
- channel.read_only = 1
- return 1, 'Ok.', filesys.os_filesystem (self.root)
- else:
- return 0, 'Password invalid.', None
+ def __init__ (self, root='/'):
+ self.root = root
+
+ def authorize (self, channel, username, password):
+ if username in ('ftp', 'anonymous'):
+ channel.persona = -1, -1
+ channel.read_only = 1
+ return 1, 'Ok.', filesys.os_filesystem (self.root)
+ else:
+ return 0, 'Password invalid.', None
# ===========================================================================
# Unix-specific improvements
@@ -969,161 +969,161 @@ class anon_authorizer:
if os.name == 'posix':
- class unix_authorizer:
- # return a trio of (success, reply_string, filesystem)
- def authorize (self, channel, username, password):
- import crypt
- import pwd
- try:
- info = pwd.getpwnam (username)
- except KeyError:
- return 0, 'No such user.', None
- mangled = info[1]
- if crypt.crypt (password, mangled[:2]) == mangled:
- channel.read_only = 0
- fs = filesys.schizophrenic_unix_filesystem (
- '/',
- info[5],
- persona = (info[2], info[3])
- )
- return 1, 'Login successful.', fs
- else:
- return 0, 'Password invalid.', None
-
- def __repr__ (self):
- return '<standard unix authorizer>'
-
- # simple anonymous ftp support
- class unix_authorizer_with_anonymous (unix_authorizer):
- def __init__ (self, root=None, real_users=0):
- self.root = root
- self.real_users = real_users
-
- def authorize (self, channel, username, password):
- if string.lower(username) in ['anonymous', 'ftp']:
- import pwd
- try:
- # ok, here we run into lots of confusion.
- # on some os', anon runs under user 'nobody',
- # on others as 'ftp'. ownership is also critical.
- # need to investigate.
- # linux: new linuxen seem to have nobody's UID=-1,
- # which is an illegal value. Use ftp.
- ftp_user_info = pwd.getpwnam ('ftp')
- if string.lower(os.uname()[0]) == 'linux':
- nobody_user_info = pwd.getpwnam ('ftp')
- else:
- nobody_user_info = pwd.getpwnam ('nobody')
- channel.read_only = 1
- if self.root is None:
- self.root = ftp_user_info[5]
- fs = filesys.unix_filesystem (self.root, '/')
- return 1, 'Anonymous Login Successful', fs
- except KeyError:
- return 0, 'Anonymous account not set up', None
- elif self.real_users:
- return unix_authorizer.authorize (
- self,
- channel,
- username,
- password
- )
- else:
- return 0, 'User logins not allowed', None
+ class unix_authorizer:
+ # return a trio of (success, reply_string, filesystem)
+ def authorize (self, channel, username, password):
+ import crypt
+ import pwd
+ try:
+ info = pwd.getpwnam (username)
+ except KeyError:
+ return 0, 'No such user.', None
+ mangled = info[1]
+ if crypt.crypt (password, mangled[:2]) == mangled:
+ channel.read_only = 0
+ fs = filesys.schizophrenic_unix_filesystem (
+ '/',
+ info[5],
+ persona = (info[2], info[3])
+ )
+ return 1, 'Login successful.', fs
+ else:
+ return 0, 'Password invalid.', None
+
+ def __repr__ (self):
+ return '<standard unix authorizer>'
+
+ # simple anonymous ftp support
+ class unix_authorizer_with_anonymous (unix_authorizer):
+ def __init__ (self, root=None, real_users=0):
+ self.root = root
+ self.real_users = real_users
+
+ def authorize (self, channel, username, password):
+ if string.lower(username) in ['anonymous', 'ftp']:
+ import pwd
+ try:
+ # ok, here we run into lots of confusion.
+ # on some os', anon runs under user 'nobody',
+ # on others as 'ftp'. ownership is also critical.
+ # need to investigate.
+ # linux: new linuxen seem to have nobody's UID=-1,
+ # which is an illegal value. Use ftp.
+ ftp_user_info = pwd.getpwnam ('ftp')
+ if string.lower(os.uname()[0]) == 'linux':
+ nobody_user_info = pwd.getpwnam ('ftp')
+ else:
+ nobody_user_info = pwd.getpwnam ('nobody')
+ channel.read_only = 1
+ if self.root is None:
+ self.root = ftp_user_info[5]
+ fs = filesys.unix_filesystem (self.root, '/')
+ return 1, 'Anonymous Login Successful', fs
+ except KeyError:
+ return 0, 'Anonymous account not set up', None
+ elif self.real_users:
+ return unix_authorizer.authorize (
+ self,
+ channel,
+ username,
+ password
+ )
+ else:
+ return 0, 'User logins not allowed', None
class file_producer:
- block_size = 16384
- def __init__ (self, server, dc, fd):
- self.fd = fd
- self.done = 0
-
- def more (self):
- if self.done:
- return ''
- else:
- block = self.fd.read (self.block_size)
- if not block:
- self.fd.close()
- self.done = 1
- return block
+ block_size = 16384
+ def __init__ (self, server, dc, fd):
+ self.fd = fd
+ self.done = 0
+
+ def more (self):
+ if self.done:
+ return ''
+ else:
+ block = self.fd.read (self.block_size)
+ if not block:
+ self.fd.close()
+ self.done = 1
+ return block
# usage: ftp_server /PATH/TO/FTP/ROOT PORT
# for example:
# $ ftp_server /home/users/ftp 8021
if os.name == 'posix':
- def test (port='8021'):
- import sys
- fs = ftp_server (
- unix_authorizer(),
- port=string.atoi (port)
- )
- try:
- asyncore.loop()
- except KeyboardInterrupt:
- self.log_info('FTP server shutting down. (received SIGINT)', 'warning')
- # close everything down on SIGINT.
- # of course this should be a cleaner shutdown.
- asyncore.close_all()
-
- if __name__ == '__main__':
- test (sys.argv[1])
+ def test (port='8021'):
+ import sys
+ fs = ftp_server (
+ unix_authorizer(),
+ port=string.atoi (port)
+ )
+ try:
+ asyncore.loop()
+ except KeyboardInterrupt:
+ self.log_info('FTP server shutting down. (received SIGINT)', 'warning')
+ # close everything down on SIGINT.
+ # of course this should be a cleaner shutdown.
+ asyncore.close_all()
+
+ if __name__ == '__main__':
+ test (sys.argv[1])
# not unix
else:
- def test ():
- fs = ftp_server (dummy_authorizer())
- if __name__ == '__main__':
- test ()
+ def test ():
+ fs = ftp_server (dummy_authorizer())
+ if __name__ == '__main__':
+ test ()
# this is the command list from the wuftpd man page
# '*' means we've implemented it.
# '!' requires write access
#
command_documentation = {
- 'abor': 'abort previous command', #*
- 'acct': 'specify account (ignored)',
- 'allo': 'allocate storage (vacuously)',
- 'appe': 'append to a file', #*!
- 'cdup': 'change to parent of current working directory', #*
- 'cwd': 'change working directory', #*
- 'dele': 'delete a file', #!
- 'help': 'give help information', #*
- 'list': 'give list files in a directory', #*
- 'mkd': 'make a directory', #!
- 'mdtm': 'show last modification time of file', #*
- 'mode': 'specify data transfer mode',
- 'nlst': 'give name list of files in directory', #*
- 'noop': 'do nothing', #*
- 'pass': 'specify password', #*
- 'pasv': 'prepare for server-to-server transfer', #*
- 'port': 'specify data connection port', #*
- 'pwd': 'print the current working directory', #*
- 'quit': 'terminate session', #*
- 'rest': 'restart incomplete transfer', #*
- 'retr': 'retrieve a file', #*
- 'rmd': 'remove a directory', #!
- 'rnfr': 'specify rename-from file name', #!
- 'rnto': 'specify rename-to file name', #!
- 'site': 'non-standard commands (see next section)',
- 'size': 'return size of file', #*
- 'stat': 'return status of server', #*
- 'stor': 'store a file', #*!
- 'stou': 'store a file with a unique name', #!
- 'stru': 'specify data transfer structure',
- 'syst': 'show operating system type of server system', #*
- 'type': 'specify data transfer type', #*
- 'user': 'specify user name', #*
- 'xcup': 'change to parent of current working directory (deprecated)',
- 'xcwd': 'change working directory (deprecated)',
- 'xmkd': 'make a directory (deprecated)', #!
- 'xpwd': 'print the current working directory (deprecated)',
- 'xrmd': 'remove a directory (deprecated)', #!
+ 'abor': 'abort previous command', #*
+ 'acct': 'specify account (ignored)',
+ 'allo': 'allocate storage (vacuously)',
+ 'appe': 'append to a file', #*!
+ 'cdup': 'change to parent of current working directory', #*
+ 'cwd': 'change working directory', #*
+ 'dele': 'delete a file', #!
+ 'help': 'give help information', #*
+ 'list': 'give list files in a directory', #*
+ 'mkd': 'make a directory', #!
+ 'mdtm': 'show last modification time of file', #*
+ 'mode': 'specify data transfer mode',
+ 'nlst': 'give name list of files in directory', #*
+ 'noop': 'do nothing', #*
+ 'pass': 'specify password', #*
+ 'pasv': 'prepare for server-to-server transfer', #*
+ 'port': 'specify data connection port', #*
+ 'pwd': 'print the current working directory', #*
+ 'quit': 'terminate session', #*
+ 'rest': 'restart incomplete transfer', #*
+ 'retr': 'retrieve a file', #*
+ 'rmd': 'remove a directory', #!
+ 'rnfr': 'specify rename-from file name', #!
+ 'rnto': 'specify rename-to file name', #!
+ 'site': 'non-standard commands (see next section)',
+ 'size': 'return size of file', #*
+ 'stat': 'return status of server', #*
+ 'stor': 'store a file', #*!
+ 'stou': 'store a file with a unique name', #!
+ 'stru': 'specify data transfer structure',
+ 'syst': 'show operating system type of server system', #*
+ 'type': 'specify data transfer type', #*
+ 'user': 'specify user name', #*
+ 'xcup': 'change to parent of current working directory (deprecated)',
+ 'xcwd': 'change working directory (deprecated)',
+ 'xmkd': 'make a directory (deprecated)', #!
+ 'xpwd': 'print the current working directory (deprecated)',
+ 'xrmd': 'remove a directory (deprecated)', #!
}
# debugging aid (linux)
def get_vm_size ():
- return string.atoi (string.split(open ('/proc/self/stat').readline())[22])
+ return string.atoi (string.split(open ('/proc/self/stat').readline())[22])
def print_vm():
- print('vm: %8dk' % (get_vm_size()/1024))
+ print('vm: %8dk' % (get_vm_size()/1024))
diff --git a/demo/medusa/ftps_server.py b/demo/medusa/ftps_server.py
index 4b5f8c9..c106213 100644
--- a/demo/medusa/ftps_server.py
+++ b/demo/medusa/ftps_server.py
@@ -1,4 +1,4 @@
-"""An FTP/TLS server built on Medusa's ftp_server.
+"""An FTP/TLS server built on Medusa's ftp_server.
Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved."""
@@ -15,7 +15,7 @@ from M2Crypto import SSL
VERSION_STRING='0.09'
class ftp_tls_channel(ftp_server.ftp_channel):
-
+
"""FTP/TLS server channel for Medusa."""
def __init__(self, server, ssl_ctx, conn, addr):
@@ -52,7 +52,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
self._ssl_accepting = 0
else:
try:
- ftp_server.ftp_channel.handle_read(self)
+ ftp_server.ftp_channel.handle_read(self)
except SSL.SSLError as what:
if str(what) == 'unexpected eof':
self.close()
@@ -67,7 +67,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
self._ssl_accepting = 0
else:
try:
- ftp_server.ftp_channel.handle_write(self)
+ ftp_server.ftp_channel.handle_write(self)
except SSL.SSLError as what:
if str(what) == 'unexpected eof':
self.close()
@@ -116,7 +116,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
if string.find(command, 'stor') != -1:
while command and command[0] not in string.letters:
command = command[1:]
-
+
func_name = 'cmd_%s' % command
if command != 'pass':
self.log('<== %s' % repr(self.in_buffer)[1:-1])
@@ -126,8 +126,8 @@ class ftp_tls_channel(ftp_server.ftp_channel):
self.in_buffer = ''
if not hasattr(self, func_name):
self.command_not_understood(line[0])
- return
-
+ return
+
func = getattr(self, func_name)
if not self.check_command_authorization(command):
self.command_not_authorized(command)
@@ -217,7 +217,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
else:
self.respond('234 AUTH TLS successful')
self._ssl_accepting = 1
- self.socket = SSL.Connection(self.ssl_ctx, self.socket)
+ self.socket = SSL.Connection(self.ssl_ctx, self.socket)
self.socket.setup_addr(self.addr)
self.socket.setup_ssl()
self.socket.set_accept_state()
@@ -227,7 +227,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer. For
- FTP/TLS the only valid value for the parameter is '0'; any
+ FTP/TLS the only valid value for the parameter is '0'; any
other value is accepted but ignored."""
if not (self._ssl_accepting or self._ssl_accepted):
return self.respond('503 AUTH TLS must be issued prior to PBSZ')
@@ -235,21 +235,21 @@ class ftp_tls_channel(ftp_server.ftp_channel):
self.respond('200 PBSZ=0 successful.')
def cmd_prot(self, line):
- """Negotiate the security level of the data connection."""
+ """Negotiate the security level of the data connection."""
if self._pbsz is None:
return self.respond('503 PBSZ must be issued prior to PROT')
if line[1] == 'C':
self.respond('200 Protection set to Clear')
self._pbsz = None
self._prot = None
- elif line[1] == 'P':
+ elif line[1] == 'P':
self.respond('200 Protection set to Private')
self._prot = 1
elif line[1] in ('S', 'E'):
self.respond('536 PROT %s unsupported' % line[1])
else:
self.respond('504 PROT %s unsupported' % line[1])
-
+
class ftp_tls_server(ftp_server.ftp_server):
@@ -334,8 +334,8 @@ class nbio_ftp_tls_actor:
return self._ssl_handshake_ok
def handle_connect(self):
- """Handle a data connection that occurs after this instance came
- into being. When this handler is triggered, self.socket has been
+ """Handle a data connection that occurs after this instance came
+ into being. When this handler is triggered, self.socket has been
created and refers to the underlying connected socket."""
self.socket = SSL.Connection(self.ssl_ctx, self.socket)
self.socket.setup_addr(self.client_addr)
@@ -370,7 +370,7 @@ class nbio_ftp_tls_actor:
self.close()
self.log_info('recv: closing channel %s %s' % (repr(self), what))
return ''
-
+
class tls_xmit_channel(nbio_ftp_tls_actor, ftp_server.xmit_channel):
@@ -401,17 +401,17 @@ class tls_xmit_channel(nbio_ftp_tls_actor, ftp_server.xmit_channel):
"""Handle a read event: either continue with TLS negotiation
or let the application handle this event."""
if self.tls_neg_ok():
- ftp_server.xmit_channel.handle_read(self)
+ ftp_server.xmit_channel.handle_read(self)
def handle_write(self):
"""Handle a write event: either continue with TLS negotiation
or let the application handle this event."""
if self.tls_neg_ok():
- ftp_server.xmit_channel.handle_write(self)
+ ftp_server.xmit_channel.handle_write(self)
class tls_recv_channel(nbio_ftp_tls_actor, ftp_server.recv_channel):
-
+
"""TLS driver for a receive-only data connection."""
def __init__(self, channel, conn, ssl_ctx, client_addr, fd):
@@ -427,12 +427,12 @@ class tls_recv_channel(nbio_ftp_tls_actor, ftp_server.recv_channel):
"""Handle a read event: either continue with TLS negotiation
or let the application handle this event."""
if self.tls_neg_ok():
- ftp_server.recv_channel.handle_read(self)
+ ftp_server.recv_channel.handle_read(self)
def handle_write(self):
"""Handle a write event: either continue with TLS negotiation
or let the application handle this event."""
if self.tls_neg_ok():
- ftp_server.recv_channel.handle_write(self)
+ ftp_server.recv_channel.handle_write(self)
diff --git a/demo/medusa/http_date.py b/demo/medusa/http_date.py
index c40f70d..695b28c 100644
--- a/demo/medusa/http_date.py
+++ b/demo/medusa/http_date.py
@@ -5,13 +5,13 @@ import string
import time
def concat (*args):
- return ''.join (args)
+ return ''.join (args)
def join (seq, field=' '):
- return field.join (seq)
+ return field.join (seq)
def group (s):
- return '(' + s + ')'
+ return '(' + s + ')'
short_days = ['sun','mon','tue','wed','thu','fri','sat']
long_days = ['sunday','monday','tuesday','wednesday','thursday','friday','saturday']
@@ -21,8 +21,8 @@ long_day_reg = group (join (long_days, '|'))
daymap = {}
for i in range(7):
- daymap[short_days[i]] = i
- daymap[long_days[i]] = i
+ daymap[short_days[i]] = i
+ daymap[long_days[i]] = i
hms_reg = join (3 * [group('[0-9][0-9]')], ':')
@@ -30,7 +30,7 @@ months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec
monmap = {}
for i in range(12):
- monmap[months[i]] = i+1
+ monmap[months[i]] = i+1
months_reg = group (join (months, '|'))
@@ -41,86 +41,86 @@ months_reg = group (join (months, '|'))
# rfc822 format
rfc822_date = join (
- [concat (short_day_reg,','), # day
- group('[0-9][0-9]?'), # date
- months_reg, # month
- group('[0-9]+'), # year
- hms_reg, # hour minute second
- 'gmt'
- ],
- ' '
- )
+ [concat (short_day_reg,','), # day
+ group('[0-9][0-9]?'), # date
+ months_reg, # month
+ group('[0-9]+'), # year
+ hms_reg, # hour minute second
+ 'gmt'
+ ],
+ ' '
+ )
rfc822_reg = re.compile (rfc822_date)
def unpack_rfc822 (m):
- g = m.group
- a = string.atoi
- return (
- a(g(4)), # year
- monmap[g(3)], # month
- a(g(2)), # day
- a(g(5)), # hour
- a(g(6)), # minute
- a(g(7)), # second
- 0,
- 0,
- 0
- )
+ g = m.group
+ a = string.atoi
+ return (
+ a(g(4)), # year
+ monmap[g(3)], # month
+ a(g(2)), # day
+ a(g(5)), # hour
+ a(g(6)), # minute
+ a(g(7)), # second
+ 0,
+ 0,
+ 0
+ )
# rfc850 format
rfc850_date = join (
- [concat (long_day_reg,','),
- join (
- [group ('[0-9][0-9]?'),
- months_reg,
- group ('[0-9]+')
- ],
- '-'
- ),
- hms_reg,
- 'gmt'
- ],
- ' '
- )
+ [concat (long_day_reg,','),
+ join (
+ [group ('[0-9][0-9]?'),
+ months_reg,
+ group ('[0-9]+')
+ ],
+ '-'
+ ),
+ hms_reg,
+ 'gmt'
+ ],
+ ' '
+ )
rfc850_reg = re.compile (rfc850_date)
# they actually unpack the same way
def unpack_rfc850 (m):
- g = m.group
- a = string.atoi
- return (
- a(g(4)), # year
- monmap[g(3)], # month
- a(g(2)), # day
- a(g(5)), # hour
- a(g(6)), # minute
- a(g(7)), # second
- 0,
- 0,
- 0
- )
-
-# parsdate.parsedate - ~700/sec.
-# parse_http_date - ~1333/sec.
+ g = m.group
+ a = string.atoi
+ return (
+ a(g(4)), # year
+ monmap[g(3)], # month
+ a(g(2)), # day
+ a(g(5)), # hour
+ a(g(6)), # minute
+ a(g(7)), # second
+ 0,
+ 0,
+ 0
+ )
+
+# parsdate.parsedate - ~700/sec.
+# parse_http_date - ~1333/sec.
def build_http_date (when):
- return time.strftime ('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(when))
+ return time.strftime ('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(when))
def parse_http_date (d):
- d = string.lower (d)
- tz = time.timezone
- m = rfc850_reg.match (d)
- if m and m.end() == len(d):
- retval = int (time.mktime (unpack_rfc850(m)) - tz)
- else:
- m = rfc822_reg.match (d)
- if m and m.end() == len(d):
- retval = int (time.mktime (unpack_rfc822(m)) - tz)
- else:
- return 0
- # Thanks to Craig Silverstein <csilvers@google.com> for pointing
- # out the DST discrepancy
- if time.daylight and time.localtime(retval)[-1] == 1: # DST correction
- retval = retval + (tz - time.altzone)
- return retval
+ d = string.lower (d)
+ tz = time.timezone
+ m = rfc850_reg.match (d)
+ if m and m.end() == len(d):
+ retval = int (time.mktime (unpack_rfc850(m)) - tz)
+ else:
+ m = rfc822_reg.match (d)
+ if m and m.end() == len(d):
+ retval = int (time.mktime (unpack_rfc822(m)) - tz)
+ else:
+ return 0
+ # Thanks to Craig Silverstein <csilvers@google.com> for pointing
+ # out the DST discrepancy
+ if time.daylight and time.localtime(retval)[-1] == 1: # DST correction
+ retval = retval + (tz - time.altzone)
+ return retval
diff --git a/demo/medusa/http_server.py b/demo/medusa/http_server.py
index 0965d7c..76a8b1b 100644
--- a/demo/medusa/http_server.py
+++ b/demo/medusa/http_server.py
@@ -1,9 +1,9 @@
#! /usr/local/bin/python
# -*- Mode: Python; tab-width: 4 -*-
#
-# Author: Sam Rushing <rushing@nightmare.com>
-# Copyright 1996-2000 by Sam Rushing
-# All Rights Reserved.
+# Author: Sam Rushing <rushing@nightmare.com>
+# Copyright 1996-2000 by Sam Rushing
+# All Rights Reserved.
#
from __future__ import print_function
@@ -33,754 +33,754 @@ from counter import counter
from urllib import unquote
# ===========================================================================
-# Request Object
+# Request Object
# ===========================================================================
class http_request:
- # default reply code
- reply_code = 200
-
- request_counter = counter()
-
- # Whether to automatically use chunked encoding when
- #
- # HTTP version is 1.1
- # Content-Length is not set
- # Chunked encoding is not already in effect
- #
- # If your clients are having trouble, you might want to disable this.
- use_chunked = 1
-
- # by default, this request object ignores user data.
- collector = None
-
- def __init__ (self, *args):
- # unpack information about the request
- (self.channel, self.request,
- self.command, self.uri, self.version,
- self.header) = args
-
- self.outgoing = fifo()
- self.reply_headers = {
- 'Server' : 'Medusa/%s' % VERSION_STRING,
- 'Date' : http_date.build_http_date (time.time())
- }
- self.request_number = http_request.request_counter.increment()
- self._split_uri = None
- self._header_cache = {}
-
- # --------------------------------------------------
- # reply header management
- # --------------------------------------------------
- def __setitem__ (self, key, value):
- self.reply_headers[key] = value
-
- def __getitem__ (self, key):
- return self.reply_headers[key]
-
- def has_key (self, key):
- return key in self.reply_headers
-
- def build_reply_header (self):
- return string.join (
- [self.response(self.reply_code)] + map (
- lambda x: '%s: %s' % x,
- self.reply_headers.items()
- ),
- '\r\n'
- ) + '\r\n\r\n'
-
- # --------------------------------------------------
- # split a uri
- # --------------------------------------------------
-
- # <path>;<params>?<query>#<fragment>
- path_regex = re.compile (
- # path params query fragment
- r'([^;?#]*)(;[^?#]*)?(\?[^#]*)?(#.*)?'
- )
-
- def split_uri (self):
- if self._split_uri is None:
- m = self.path_regex.match (self.uri)
- if m.end() != len(self.uri):
- raise ValueError("Broken URI")
- else:
- self._split_uri = m.groups()
- return self._split_uri
-
- def get_header_with_regex (self, head_reg, group):
- for line in self.header:
- m = head_reg.match (line)
- if m.end() == len(line):
- return head_reg.group (group)
- return ''
-
- def get_header (self, header):
- header = string.lower (header)
- hc = self._header_cache
- if header not in hc:
- h = header + ': '
- hl = len(h)
- for line in self.header:
- if string.lower (line[:hl]) == h:
- r = line[hl:]
- hc[header] = r
- return r
- hc[header] = None
- return None
- else:
- return hc[header]
-
- # --------------------------------------------------
- # user data
- # --------------------------------------------------
-
- def collect_incoming_data (self, data):
- if self.collector:
- self.collector.collect_incoming_data (data)
- else:
- self.log_info(
- 'Dropping %d bytes of incoming request data' % len(data),
- 'warning'
- )
-
- def found_terminator (self):
- if self.collector:
- self.collector.found_terminator()
- else:
- self.log_info (
- 'Unexpected end-of-record for incoming request',
- 'warning'
- )
-
- def push (self, thing):
- if type(thing) == type(''):
- self.outgoing.push (producers.simple_producer (thing))
- else:
- self.outgoing.push (thing)
-
- def response (self, code=200):
- message = self.responses[code]
- self.reply_code = code
- return 'HTTP/%s %d %s' % (self.version, code, message)
-
- def error (self, code):
- self.reply_code = code
- message = self.responses[code]
- s = self.DEFAULT_ERROR_MESSAGE % {
- 'code': code,
- 'message': message,
- }
- self['Content-Length'] = len(s)
- self['Content-Type'] = 'text/html'
- # make an error reply
- self.push (s)
- self.done()
-
- # can also be used for empty replies
- reply_now = error
-
- def done (self):
- "finalize this transaction - send output to the http channel"
-
- # ----------------------------------------
- # persistent connection management
- # ----------------------------------------
-
- # --- BUCKLE UP! ----
-
- connection = string.lower (get_header (CONNECTION, self.header))
-
- close_it = 0
- wrap_in_chunking = 0
-
- if self.version == '1.0':
- if connection == 'keep-alive':
- if 'Content-Length' not in self:
- close_it = 1
- else:
- self['Connection'] = 'Keep-Alive'
- else:
- close_it = 1
- elif self.version == '1.1':
- if connection == 'close':
- close_it = 1
- elif 'Content-Length' not in self:
- if 'Transfer-Encoding' in self:
- if not self['Transfer-Encoding'] == 'chunked':
- close_it = 1
- elif self.use_chunked:
- self['Transfer-Encoding'] = 'chunked'
- wrap_in_chunking = 1
- else:
- close_it = 1
- elif self.version is None:
- # Although we don't *really* support http/0.9 (because we'd have to
- # use \r\n as a terminator, and it would just yuck up a lot of stuff)
- # it's very common for developers to not want to type a version number
- # when using telnet to debug a server.
- close_it = 1
-
- outgoing_header = producers.simple_producer (self.build_reply_header())
-
- if close_it:
- self['Connection'] = 'close'
-
- if wrap_in_chunking:
- outgoing_producer = producers.chunked_producer (
- producers.composite_producer (self.outgoing)
- )
- # prepend the header
- outgoing_producer = producers.composite_producer (
- fifo([outgoing_header, outgoing_producer])
- )
- else:
- # prepend the header
- self.outgoing.push_front (outgoing_header)
- outgoing_producer = producers.composite_producer (self.outgoing)
-
- # apply a few final transformations to the output
- self.channel.push_with_producer (
- # globbing gives us large packets
- producers.globbing_producer (
- # hooking lets us log the number of bytes sent
- producers.hooked_producer (
- outgoing_producer,
- self.log
- )
- )
- )
-
- self.channel.current_request = None
-
- if close_it:
- self.channel.close_when_done()
-
- def log_date_string (self, when):
- return time.strftime (
- '%d/%b/%Y:%H:%M:%S ',
- time.gmtime(when)
- ) + tz_for_log
-
- def log (self, bytes):
- self.channel.server.logger.log (
- self.channel.addr[0],
- '%d - - [%s] "%s" %d %d\n' % (
- self.channel.addr[1],
- self.log_date_string (time.time()),
- self.request,
- self.reply_code,
- bytes
- )
- )
-
- responses = {
- 100: "Continue",
- 101: "Switching Protocols",
- 200: "OK",
- 201: "Created",
- 202: "Accepted",
- 203: "Non-Authoritative Information",
- 204: "No Content",
- 205: "Reset Content",
- 206: "Partial Content",
- 300: "Multiple Choices",
- 301: "Moved Permanently",
- 302: "Moved Temporarily",
- 303: "See Other",
- 304: "Not Modified",
- 305: "Use Proxy",
- 400: "Bad Request",
- 401: "Unauthorized",
- 402: "Payment Required",
- 403: "Forbidden",
- 404: "Not Found",
- 405: "Method Not Allowed",
- 406: "Not Acceptable",
- 407: "Proxy Authentication Required",
- 408: "Request Time-out",
- 409: "Conflict",
- 410: "Gone",
- 411: "Length Required",
- 412: "Precondition Failed",
- 413: "Request Entity Too Large",
- 414: "Request-URI Too Large",
- 415: "Unsupported Media Type",
- 500: "Internal Server Error",
- 501: "Not Implemented",
- 502: "Bad Gateway",
- 503: "Service Unavailable",
- 504: "Gateway Time-out",
- 505: "HTTP Version not supported"
- }
-
- # Default error message
- DEFAULT_ERROR_MESSAGE = string.join (
- ['<head>',
- '<title>Error response</title>',
- '</head>',
- '<body>',
- '<h1>Error response</h1>',
- '<p>Error code %(code)d.',
- '<p>Message: %(message)s.',
- '</body>',
- ''
- ],
- '\r\n'
- )
+ # default reply code
+ reply_code = 200
+
+ request_counter = counter()
+
+ # Whether to automatically use chunked encoding when
+ #
+ # HTTP version is 1.1
+ # Content-Length is not set
+ # Chunked encoding is not already in effect
+ #
+ # If your clients are having trouble, you might want to disable this.
+ use_chunked = 1
+
+ # by default, this request object ignores user data.
+ collector = None
+
+ def __init__ (self, *args):
+ # unpack information about the request
+ (self.channel, self.request,
+ self.command, self.uri, self.version,
+ self.header) = args
+
+ self.outgoing = fifo()
+ self.reply_headers = {
+ 'Server' : 'Medusa/%s' % VERSION_STRING,
+ 'Date' : http_date.build_http_date (time.time())
+ }
+ self.request_number = http_request.request_counter.increment()
+ self._split_uri = None
+ self._header_cache = {}
+
+ # --------------------------------------------------
+ # reply header management
+ # --------------------------------------------------
+ def __setitem__ (self, key, value):
+ self.reply_headers[key] = value
+
+ def __getitem__ (self, key):
+ return self.reply_headers[key]
+
+ def has_key (self, key):
+ return key in self.reply_headers
+
+ def build_reply_header (self):
+ return string.join (
+ [self.response(self.reply_code)] + map (
+ lambda x: '%s: %s' % x,
+ self.reply_headers.items()
+ ),
+ '\r\n'
+ ) + '\r\n\r\n'
+
+ # --------------------------------------------------
+ # split a uri
+ # --------------------------------------------------
+
+ # <path>;<params>?<query>#<fragment>
+ path_regex = re.compile (
+ # path params query fragment
+ r'([^;?#]*)(;[^?#]*)?(\?[^#]*)?(#.*)?'
+ )
+
+ def split_uri (self):
+ if self._split_uri is None:
+ m = self.path_regex.match (self.uri)
+ if m.end() != len(self.uri):
+ raise ValueError("Broken URI")
+ else:
+ self._split_uri = m.groups()
+ return self._split_uri
+
+ def get_header_with_regex (self, head_reg, group):
+ for line in self.header:
+ m = head_reg.match (line)
+ if m.end() == len(line):
+ return head_reg.group (group)
+ return ''
+
+ def get_header (self, header):
+ header = string.lower (header)
+ hc = self._header_cache
+ if header not in hc:
+ h = header + ': '
+ hl = len(h)
+ for line in self.header:
+ if string.lower (line[:hl]) == h:
+ r = line[hl:]
+ hc[header] = r
+ return r
+ hc[header] = None
+ return None
+ else:
+ return hc[header]
+
+ # --------------------------------------------------
+ # user data
+ # --------------------------------------------------
+
+ def collect_incoming_data (self, data):
+ if self.collector:
+ self.collector.collect_incoming_data (data)
+ else:
+ self.log_info(
+ 'Dropping %d bytes of incoming request data' % len(data),
+ 'warning'
+ )
+
+ def found_terminator (self):
+ if self.collector:
+ self.collector.found_terminator()
+ else:
+ self.log_info (
+ 'Unexpected end-of-record for incoming request',
+ 'warning'
+ )
+
+ def push (self, thing):
+ if type(thing) == type(''):
+ self.outgoing.push (producers.simple_producer (thing))
+ else:
+ self.outgoing.push (thing)
+
+ def response (self, code=200):
+ message = self.responses[code]
+ self.reply_code = code
+ return 'HTTP/%s %d %s' % (self.version, code, message)
+
+ def error (self, code):
+ self.reply_code = code
+ message = self.responses[code]
+ s = self.DEFAULT_ERROR_MESSAGE % {
+ 'code': code,
+ 'message': message,
+ }
+ self['Content-Length'] = len(s)
+ self['Content-Type'] = 'text/html'
+ # make an error reply
+ self.push (s)
+ self.done()
+
+ # can also be used for empty replies
+ reply_now = error
+
+ def done (self):
+ "finalize this transaction - send output to the http channel"
+
+ # ----------------------------------------
+ # persistent connection management
+ # ----------------------------------------
+
+ # --- BUCKLE UP! ----
+
+ connection = string.lower (get_header (CONNECTION, self.header))
+
+ close_it = 0
+ wrap_in_chunking = 0
+
+ if self.version == '1.0':
+ if connection == 'keep-alive':
+ if 'Content-Length' not in self:
+ close_it = 1
+ else:
+ self['Connection'] = 'Keep-Alive'
+ else:
+ close_it = 1
+ elif self.version == '1.1':
+ if connection == 'close':
+ close_it = 1
+ elif 'Content-Length' not in self:
+ if 'Transfer-Encoding' in self:
+ if not self['Transfer-Encoding'] == 'chunked':
+ close_it = 1
+ elif self.use_chunked:
+ self['Transfer-Encoding'] = 'chunked'
+ wrap_in_chunking = 1
+ else:
+ close_it = 1
+ elif self.version is None:
+ # Although we don't *really* support http/0.9 (because we'd have to
+ # use \r\n as a terminator, and it would just yuck up a lot of stuff)
+ # it's very common for developers to not want to type a version number
+ # when using telnet to debug a server.
+ close_it = 1
+
+ outgoing_header = producers.simple_producer (self.build_reply_header())
+
+ if close_it:
+ self['Connection'] = 'close'
+
+ if wrap_in_chunking:
+ outgoing_producer = producers.chunked_producer (
+ producers.composite_producer (self.outgoing)
+ )
+ # prepend the header
+ outgoing_producer = producers.composite_producer (
+ fifo([outgoing_header, outgoing_producer])
+ )
+ else:
+ # prepend the header
+ self.outgoing.push_front (outgoing_header)
+ outgoing_producer = producers.composite_producer (self.outgoing)
+
+ # apply a few final transformations to the output
+ self.channel.push_with_producer (
+ # globbing gives us large packets
+ producers.globbing_producer (
+ # hooking lets us log the number of bytes sent
+ producers.hooked_producer (
+ outgoing_producer,
+ self.log
+ )
+ )
+ )
+
+ self.channel.current_request = None
+
+ if close_it:
+ self.channel.close_when_done()
+
+ def log_date_string (self, when):
+ return time.strftime (
+ '%d/%b/%Y:%H:%M:%S ',
+ time.gmtime(when)
+ ) + tz_for_log
+
+ def log (self, bytes):
+ self.channel.server.logger.log (
+ self.channel.addr[0],
+ '%d - - [%s] "%s" %d %d\n' % (
+ self.channel.addr[1],
+ self.log_date_string (time.time()),
+ self.request,
+ self.reply_code,
+ bytes
+ )
+ )
+
+ responses = {
+ 100: "Continue",
+ 101: "Switching Protocols",
+ 200: "OK",
+ 201: "Created",
+ 202: "Accepted",
+ 203: "Non-Authoritative Information",
+ 204: "No Content",
+ 205: "Reset Content",
+ 206: "Partial Content",
+ 300: "Multiple Choices",
+ 301: "Moved Permanently",
+ 302: "Moved Temporarily",
+ 303: "See Other",
+ 304: "Not Modified",
+ 305: "Use Proxy",
+ 400: "Bad Request",
+ 401: "Unauthorized",
+ 402: "Payment Required",
+ 403: "Forbidden",
+ 404: "Not Found",
+ 405: "Method Not Allowed",
+ 406: "Not Acceptable",
+ 407: "Proxy Authentication Required",
+ 408: "Request Time-out",
+ 409: "Conflict",
+ 410: "Gone",
+ 411: "Length Required",
+ 412: "Precondition Failed",
+ 413: "Request Entity Too Large",
+ 414: "Request-URI Too Large",
+ 415: "Unsupported Media Type",
+ 500: "Internal Server Error",
+ 501: "Not Implemented",
+ 502: "Bad Gateway",
+ 503: "Service Unavailable",
+ 504: "Gateway Time-out",
+ 505: "HTTP Version not supported"
+ }
+
+ # Default error message
+ DEFAULT_ERROR_MESSAGE = string.join (
+ ['<head>',
+ '<title>Error response</title>',
+ '</head>',
+ '<body>',
+ '<h1>Error response</h1>',
+ '<p>Error code %(code)d.',
+ '<p>Message: %(message)s.',
+ '</body>',
+ ''
+ ],
+ '\r\n'
+ )
# ===========================================================================
-# HTTP Channel Object
+# HTTP Channel Object
# ===========================================================================
class http_channel (asynchat.async_chat):
- # use a larger default output buffer
- ac_out_buffer_size = 1<<16
-
- current_request = None
- channel_counter = counter()
-
- def __init__ (self, server, conn, addr):
- self.channel_number = http_channel.channel_counter.increment()
- self.request_counter = counter()
- asynchat.async_chat.__init__ (self, conn)
- self.server = server
- self.addr = addr
- self.set_terminator ('\r\n\r\n')
- self.in_buffer = ''
- self.creation_time = int (time.time())
- self.check_maintenance()
-
- def __repr__ (self):
- ar = asynchat.async_chat.__repr__(self)[1:-1]
- return '<%s channel#: %s requests:%s>' % (
- ar,
- self.channel_number,
- self.request_counter
- )
-
- # Channel Counter, Maintenance Interval...
- maintenance_interval = 500
-
- def check_maintenance (self):
- if not self.channel_number % self.maintenance_interval:
- self.maintenance()
-
- def maintenance (self):
- self.kill_zombies()
-
- # 30-minute zombie timeout. status_handler also knows how to kill zombies.
- zombie_timeout = 30 * 60
-
- def kill_zombies (self):
- now = int (time.time())
- for channel in asyncore.socket_map.values():
- if channel.__class__ == self.__class__:
- if (now - channel.creation_time) > channel.zombie_timeout:
- channel.close()
-
- # --------------------------------------------------
- # send/recv overrides, good place for instrumentation.
- # --------------------------------------------------
-
- # this information needs to get into the request object,
- # so that it may log correctly.
- def send (self, data):
- result = asynchat.async_chat.send (self, data)
- self.server.bytes_out.increment (len(data))
- return result
-
- def recv (self, buffer_size):
- try:
- result = asynchat.async_chat.recv (self, buffer_size)
- self.server.bytes_in.increment (len(result))
- return result
- except MemoryError:
- # --- Save a Trip to Your Service Provider ---
- # It's possible for a process to eat up all the memory of
- # the machine, and put it in an extremely wedged state,
- # where medusa keeps running and can't be shut down. This
- # is where MemoryError tends to get thrown, though of
- # course it could get thrown elsewhere.
- sys.exit ("Out of Memory!")
-
- def handle_error (self):
- t, v = sys.exc_info()[:2]
- if t is SystemExit:
- raise t(v)
- else:
- asynchat.async_chat.handle_error (self)
-
- def log (self, *args):
- pass
-
- # --------------------------------------------------
- # async_chat methods
- # --------------------------------------------------
-
- def collect_incoming_data (self, data):
- if self.current_request:
- # we are receiving data (probably POST data) for a request
- self.current_request.collect_incoming_data (data)
- else:
- # we are receiving header (request) data
- self.in_buffer = self.in_buffer + data
-
- def found_terminator (self):
- if self.current_request:
- self.current_request.found_terminator()
- else:
- header = self.in_buffer
- self.in_buffer = ''
- lines = string.split (header, '\r\n')
-
- # --------------------------------------------------
- # crack the request header
- # --------------------------------------------------
-
- while lines and not lines[0]:
- # as per the suggestion of http-1.1 section 4.1, (and
- # Eric Parker <eparker@zyvex.com>), ignore a leading
- # blank lines (buggy browsers tack it onto the end of
- # POST requests)
- lines = lines[1:]
-
- if not lines:
- self.close_when_done()
- return
-
- request = lines[0]
-
- # unquote path if necessary (thanks to Skip Montaro for pointing
- # out that we must unquote in piecemeal fashion).
- if '%' in request:
- request = unquote (request)
-
- command, uri, version = crack_request (request)
- header = join_headers (lines[1:])
-
- r = http_request (self, request, command, uri, version, header)
- self.request_counter.increment()
- self.server.total_requests.increment()
-
- if command is None:
- self.log_info ('Bad HTTP request: %s' % repr(request), 'error')
- r.error (400)
- return
-
- # --------------------------------------------------
- # handler selection and dispatch
- # --------------------------------------------------
- for h in self.server.handlers:
- if h.match (r):
- try:
- self.current_request = r
- # This isn't used anywhere.
- # r.handler = h # CYCLE
- h.handle_request (r)
- except:
- self.server.exceptions.increment()
- (file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
- self.log_info(
- 'Server Error: %s, %s: file: %s line: %s' % (t,v,file,line),
- 'error')
- try:
- r.error (500)
- except:
- pass
- return
-
- # no handlers, so complain
- r.error (404)
-
- def writable (self):
- # this is just the normal async_chat 'writable', here for comparison
- return self.ac_out_buffer or len(self.producer_fifo)
-
- def writable_for_proxy (self):
- # this version of writable supports the idea of a 'stalled' producer
- # [i.e., it's not ready to produce any output yet] This is needed by
- # the proxy, which will be waiting for the magic combination of
- # 1) hostname resolved
- # 2) connection made
- # 3) data available.
- if self.ac_out_buffer:
- return 1
- elif len(self.producer_fifo):
- p = self.producer_fifo.first()
- if hasattr (p, 'stalled'):
- return not p.stalled()
- else:
- return 1
+ # use a larger default output buffer
+ ac_out_buffer_size = 1<<16
+
+ current_request = None
+ channel_counter = counter()
+
+ def __init__ (self, server, conn, addr):
+ self.channel_number = http_channel.channel_counter.increment()
+ self.request_counter = counter()
+ asynchat.async_chat.__init__ (self, conn)
+ self.server = server
+ self.addr = addr
+ self.set_terminator ('\r\n\r\n')
+ self.in_buffer = ''
+ self.creation_time = int (time.time())
+ self.check_maintenance()
+
+ def __repr__ (self):
+ ar = asynchat.async_chat.__repr__(self)[1:-1]
+ return '<%s channel#: %s requests:%s>' % (
+ ar,
+ self.channel_number,
+ self.request_counter
+ )
+
+ # Channel Counter, Maintenance Interval...
+ maintenance_interval = 500
+
+ def check_maintenance (self):
+ if not self.channel_number % self.maintenance_interval:
+ self.maintenance()
+
+ def maintenance (self):
+ self.kill_zombies()
+
+ # 30-minute zombie timeout. status_handler also knows how to kill zombies.
+ zombie_timeout = 30 * 60
+
+ def kill_zombies (self):
+ now = int (time.time())
+ for channel in asyncore.socket_map.values():
+ if channel.__class__ == self.__class__:
+ if (now - channel.creation_time) > channel.zombie_timeout:
+ channel.close()
+
+ # --------------------------------------------------
+ # send/recv overrides, good place for instrumentation.
+ # --------------------------------------------------
+
+ # this information needs to get into the request object,
+ # so that it may log correctly.
+ def send (self, data):
+ result = asynchat.async_chat.send (self, data)
+ self.server.bytes_out.increment (len(data))
+ return result
+
+ def recv (self, buffer_size):
+ try:
+ result = asynchat.async_chat.recv (self, buffer_size)
+ self.server.bytes_in.increment (len(result))
+ return result
+ except MemoryError:
+ # --- Save a Trip to Your Service Provider ---
+ # It's possible for a process to eat up all the memory of
+ # the machine, and put it in an extremely wedged state,
+ # where medusa keeps running and can't be shut down. This
+ # is where MemoryError tends to get thrown, though of
+ # course it could get thrown elsewhere.
+ sys.exit ("Out of Memory!")
+
+ def handle_error (self):
+ t, v = sys.exc_info()[:2]
+ if t is SystemExit:
+ raise t(v)
+ else:
+ asynchat.async_chat.handle_error (self)
+
+ def log (self, *args):
+ pass
+
+ # --------------------------------------------------
+ # async_chat methods
+ # --------------------------------------------------
+
+ def collect_incoming_data (self, data):
+ if self.current_request:
+ # we are receiving data (probably POST data) for a request
+ self.current_request.collect_incoming_data (data)
+ else:
+ # we are receiving header (request) data
+ self.in_buffer = self.in_buffer + data
+
+ def found_terminator (self):
+ if self.current_request:
+ self.current_request.found_terminator()
+ else:
+ header = self.in_buffer
+ self.in_buffer = ''
+ lines = string.split (header, '\r\n')
+
+ # --------------------------------------------------
+ # crack the request header
+ # --------------------------------------------------
+
+ while lines and not lines[0]:
+ # as per the suggestion of http-1.1 section 4.1, (and
+ # Eric Parker <eparker@zyvex.com>), ignore a leading
+ # blank lines (buggy browsers tack it onto the end of
+ # POST requests)
+ lines = lines[1:]
+
+ if not lines:
+ self.close_when_done()
+ return
+
+ request = lines[0]
+
+ # unquote path if necessary (thanks to Skip Montaro for pointing
+ # out that we must unquote in piecemeal fashion).
+ if '%' in request:
+ request = unquote (request)
+
+ command, uri, version = crack_request (request)
+ header = join_headers (lines[1:])
+
+ r = http_request (self, request, command, uri, version, header)
+ self.request_counter.increment()
+ self.server.total_requests.increment()
+
+ if command is None:
+ self.log_info ('Bad HTTP request: %s' % repr(request), 'error')
+ r.error (400)
+ return
+
+ # --------------------------------------------------
+ # handler selection and dispatch
+ # --------------------------------------------------
+ for h in self.server.handlers:
+ if h.match (r):
+ try:
+ self.current_request = r
+ # This isn't used anywhere.
+ # r.handler = h # CYCLE
+ h.handle_request (r)
+ except:
+ self.server.exceptions.increment()
+ (file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
+ self.log_info(
+ 'Server Error: %s, %s: file: %s line: %s' % (t,v,file,line),
+ 'error')
+ try:
+ r.error (500)
+ except:
+ pass
+ return
+
+ # no handlers, so complain
+ r.error (404)
+
+ def writable (self):
+ # this is just the normal async_chat 'writable', here for comparison
+ return self.ac_out_buffer or len(self.producer_fifo)
+
+ def writable_for_proxy (self):
+ # this version of writable supports the idea of a 'stalled' producer
+ # [i.e., it's not ready to produce any output yet] This is needed by
+ # the proxy, which will be waiting for the magic combination of
+ # 1) hostname resolved
+ # 2) connection made
+ # 3) data available.
+ if self.ac_out_buffer:
+ return 1
+ elif len(self.producer_fifo):
+ p = self.producer_fifo.first()
+ if hasattr (p, 'stalled'):
+ return not p.stalled()
+ else:
+ return 1
# ===========================================================================
-# HTTP Server Object
+# HTTP Server Object
# ===========================================================================
class http_server (asyncore.dispatcher):
- SERVER_IDENT = 'HTTP Server (V%s)' % VERSION_STRING
-
- channel_class = http_channel
-
- def __init__ (self, ip, port, resolver=None, logger_object=None):
- self.ip = ip
- self.port = port
- asyncore.dispatcher.__init__ (self)
- self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
-
- self.handlers = []
-
- if not logger_object:
- logger_object = logger.file_logger (sys.stdout)
-
- self.set_reuse_addr()
- self.bind ((ip, port))
-
- # lower this to 5 if your OS complains
- self.listen (1024)
-
- host, port = self.socket.getsockname()
- if not ip:
- self.log_info('Computing default hostname', 'warning')
- ip = socket.gethostbyname (socket.gethostname())
- try:
- self.server_name = socket.gethostbyaddr (ip)[0]
- except socket.error:
- self.log_info('Cannot do reverse lookup', 'warning')
- self.server_name = ip # use the IP address as the "hostname"
-
- self.server_port = port
- self.total_clients = counter()
- self.total_requests = counter()
- self.exceptions = counter()
- self.bytes_out = counter()
- self.bytes_in = counter()
-
- if not logger_object:
- logger_object = logger.file_logger (sys.stdout)
-
- if resolver:
- self.logger = logger.resolving_logger (resolver, logger_object)
- else:
- self.logger = logger.unresolving_logger (logger_object)
-
- self.log_info (
- 'Medusa (V%s) started at %s'
- '\n\tHostname: %s'
- '\n\tPort:%d'
- '\n' % (
- VERSION_STRING,
- time.ctime(time.time()),
- self.server_name,
- port,
- )
- )
-
- def writable (self):
- return 0
-
- def handle_read (self):
- pass
-
- def readable (self):
- return self.accepting
-
- def handle_connect (self):
- pass
-
- def handle_accept (self):
- self.total_clients.increment()
- try:
- conn, addr = self.accept()
- except socket.error:
- # linux: on rare occasions we get a bogus socket back from
- # accept. socketmodule.c:makesockaddr complains that the
- # address family is unknown. We don't want the whole server
- # to shut down because of this.
- self.log_info ('warning: server accept() threw an exception', 'warning')
- return
- except TypeError:
- # unpack non-sequence. this can happen when a read event
- # fires on a listening socket, but when we call accept()
- # we get EWOULDBLOCK, so dispatcher.accept() returns None.
- # Seen on FreeBSD3.
- self.log_info ('warning: server accept() threw EWOULDBLOCK', 'warning')
- return
-
- self.channel_class (self, conn, addr)
-
- def install_handler (self, handler, back=0):
- if back:
- self.handlers.append (handler)
- else:
- self.handlers.insert (0, handler)
-
- def remove_handler (self, handler):
- self.handlers.remove (handler)
-
- def status (self):
- def nice_bytes (n):
- return string.join (status_handler.english_bytes (n))
-
- handler_stats = filter (None, map (maybe_status, self.handlers))
-
- if self.total_clients:
- ratio = self.total_requests.as_long() / float(self.total_clients.as_long())
- else:
- ratio = 0.0
-
- return producers.composite_producer (
- fifo ([producers.lines_producer (
- ['<h2>%s</h2>' % self.SERVER_IDENT,
- '<br>Listening on: <b>Host:</b> %s' % self.server_name,
- '<b>Port:</b> %d' % self.port,
- '<p><ul>'
- '<li>Total <b>Clients:</b> %s' % self.total_clients,
- '<b>Requests:</b> %s' % self.total_requests,
- '<b>Requests/Client:</b> %.1f' % (ratio),
- '<li>Total <b>Bytes In:</b> %s' % (nice_bytes (self.bytes_in.as_long())),
- '<b>Bytes Out:</b> %s' % (nice_bytes (self.bytes_out.as_long())),
- '<li>Total <b>Exceptions:</b> %s' % self.exceptions,
- '</ul><p>'
- '<b>Extension List</b><ul>',
- ])] + handler_stats + [producers.simple_producer('</ul>')]
- )
- )
+ SERVER_IDENT = 'HTTP Server (V%s)' % VERSION_STRING
+
+ channel_class = http_channel
+
+ def __init__ (self, ip, port, resolver=None, logger_object=None):
+ self.ip = ip
+ self.port = port
+ asyncore.dispatcher.__init__ (self)
+ self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+
+ self.handlers = []
+
+ if not logger_object:
+ logger_object = logger.file_logger (sys.stdout)
+
+ self.set_reuse_addr()
+ self.bind ((ip, port))
+
+ # lower this to 5 if your OS complains
+ self.listen (1024)
+
+ host, port = self.socket.getsockname()
+ if not ip:
+ self.log_info('Computing default hostname', 'warning')
+ ip = socket.gethostbyname (socket.gethostname())
+ try:
+ self.server_name = socket.gethostbyaddr (ip)[0]
+ except socket.error:
+ self.log_info('Cannot do reverse lookup', 'warning')
+ self.server_name = ip # use the IP address as the "hostname"
+
+ self.server_port = port
+ self.total_clients = counter()
+ self.total_requests = counter()
+ self.exceptions = counter()
+ self.bytes_out = counter()
+ self.bytes_in = counter()
+
+ if not logger_object:
+ logger_object = logger.file_logger (sys.stdout)
+
+ if resolver:
+ self.logger = logger.resolving_logger (resolver, logger_object)
+ else:
+ self.logger = logger.unresolving_logger (logger_object)
+
+ self.log_info (
+ 'Medusa (V%s) started at %s'
+ '\n\tHostname: %s'
+ '\n\tPort:%d'
+ '\n' % (
+ VERSION_STRING,
+ time.ctime(time.time()),
+ self.server_name,
+ port,
+ )
+ )
+
+ def writable (self):
+ return 0
+
+ def handle_read (self):
+ pass
+
+ def readable (self):
+ return self.accepting
+
+ def handle_connect (self):
+ pass
+
+ def handle_accept (self):
+ self.total_clients.increment()
+ try:
+ conn, addr = self.accept()
+ except socket.error:
+ # linux: on rare occasions we get a bogus socket back from
+ # accept. socketmodule.c:makesockaddr complains that the
+ # address family is unknown. We don't want the whole server
+ # to shut down because of this.
+ self.log_info ('warning: server accept() threw an exception', 'warning')
+ return
+ except TypeError:
+ # unpack non-sequence. this can happen when a read event
+ # fires on a listening socket, but when we call accept()
+ # we get EWOULDBLOCK, so dispatcher.accept() returns None.
+ # Seen on FreeBSD3.
+ self.log_info ('warning: server accept() threw EWOULDBLOCK', 'warning')
+ return
+
+ self.channel_class (self, conn, addr)
+
+ def install_handler (self, handler, back=0):
+ if back:
+ self.handlers.append (handler)
+ else:
+ self.handlers.insert (0, handler)
+
+ def remove_handler (self, handler):
+ self.handlers.remove (handler)
+
+ def status (self):
+ def nice_bytes (n):
+ return string.join (status_handler.english_bytes (n))
+
+ handler_stats = filter (None, map (maybe_status, self.handlers))
+
+ if self.total_clients:
+ ratio = self.total_requests.as_long() / float(self.total_clients.as_long())
+ else:
+ ratio = 0.0
+
+ return producers.composite_producer (
+ fifo ([producers.lines_producer (
+ ['<h2>%s</h2>' % self.SERVER_IDENT,
+ '<br>Listening on: <b>Host:</b> %s' % self.server_name,
+ '<b>Port:</b> %d' % self.port,
+ '<p><ul>'
+ '<li>Total <b>Clients:</b> %s' % self.total_clients,
+ '<b>Requests:</b> %s' % self.total_requests,
+ '<b>Requests/Client:</b> %.1f' % (ratio),
+ '<li>Total <b>Bytes In:</b> %s' % (nice_bytes (self.bytes_in.as_long())),
+ '<b>Bytes Out:</b> %s' % (nice_bytes (self.bytes_out.as_long())),
+ '<li>Total <b>Exceptions:</b> %s' % self.exceptions,
+ '</ul><p>'
+ '<b>Extension List</b><ul>',
+ ])] + handler_stats + [producers.simple_producer('</ul>')]
+ )
+ )
def maybe_status (thing):
- if hasattr (thing, 'status'):
- return thing.status()
- else:
- return None
+ if hasattr (thing, 'status'):
+ return thing.status()
+ else:
+ return None
CONNECTION = re.compile ('Connection: (.*)', re.IGNORECASE)
# merge multi-line headers
# [486dx2: ~500/sec]
def join_headers (headers):
- r = []
- for i in range(len(headers)):
- if headers[i][0] in ' \t':
- r[-1] = r[-1] + headers[i][1:]
- else:
- r.append (headers[i])
- return r
+ r = []
+ for i in range(len(headers)):
+ if headers[i][0] in ' \t':
+ r[-1] = r[-1] + headers[i][1:]
+ else:
+ r.append (headers[i])
+ return r
def get_header (head_reg, lines, group=1):
- for line in lines:
- m = head_reg.match (line)
- if m and m.end() == len(line):
- return m.group (group)
- return ''
+ for line in lines:
+ m = head_reg.match (line)
+ if m and m.end() == len(line):
+ return m.group (group)
+ return ''
def get_header_match (head_reg, lines):
- for line in lines:
- m = head_reg.match (line)
- if m and m.end() == len(line):
- return m
- return ''
+ for line in lines:
+ m = head_reg.match (line)
+ if m and m.end() == len(line):
+ return m
+ return ''
REQUEST = re.compile ('([^ ]+) ([^ ]+)(( HTTP/([0-9.]+))$|$)')
def crack_request (r):
- m = REQUEST.match (r)
- if m.end() == len(r):
- if m.group(3):
- version = m.group(5)
- else:
- version = None
- return string.lower (m.group(1)), m.group(2), version
- else:
- return None, None, None
+ m = REQUEST.match (r)
+ if m.end() == len(r):
+ if m.group(3):
+ version = m.group(5)
+ else:
+ version = None
+ return string.lower (m.group(1)), m.group(2), version
+ else:
+ return None, None, None
class fifo:
- def __init__ (self, list=None):
- if not list:
- self.list = []
- else:
- self.list = list
-
- def __len__ (self):
- return len(self.list)
-
- def first (self):
- return self.list[0]
-
- def push_front (self, object):
- self.list.insert (0, object)
-
- def push (self, data):
- self.list.append (data)
-
- def pop (self):
- if self.list:
- result = self.list[0]
- del self.list[0]
- return (1, result)
- else:
- return (0, None)
+ def __init__ (self, list=None):
+ if not list:
+ self.list = []
+ else:
+ self.list = list
+
+ def __len__ (self):
+ return len(self.list)
+
+ def first (self):
+ return self.list[0]
+
+ def push_front (self, object):
+ self.list.insert (0, object)
+
+ def push (self, data):
+ self.list.append (data)
+
+ def pop (self):
+ if self.list:
+ result = self.list[0]
+ del self.list[0]
+ return (1, result)
+ else:
+ return (0, None)
def compute_timezone_for_log ():
- if time.daylight:
- tz = time.altzone
- else:
- tz = time.timezone
- if tz > 0:
- neg = 1
- else:
- neg = 0
- tz = -tz
- h, rem = divmod (tz, 3600)
- m, rem = divmod (rem, 60)
- if neg:
- return '-%02d%02d' % (h, m)
- else:
- return '+%02d%02d' % (h, m)
+ if time.daylight:
+ tz = time.altzone
+ else:
+ tz = time.timezone
+ if tz > 0:
+ neg = 1
+ else:
+ neg = 0
+ tz = -tz
+ h, rem = divmod (tz, 3600)
+ m, rem = divmod (rem, 60)
+ if neg:
+ return '-%02d%02d' % (h, m)
+ else:
+ return '+%02d%02d' % (h, m)
# if you run this program over a TZ change boundary, this will be invalid.
tz_for_log = compute_timezone_for_log()
if __name__ == '__main__':
- import sys
- if len(sys.argv) < 2:
- print('usage: %s <root> <port>' % (sys.argv[0]))
- else:
- import monitor
- import filesys
- import default_handler
- import status_handler
- import ftp_server
- import chat_server
- import resolver
- import logger
- rs = resolver.caching_resolver ('127.0.0.1')
- lg = logger.file_logger (sys.stdout)
- ms = monitor.secure_monitor_server ('fnord', '127.0.0.1', 9999)
- fs = filesys.os_filesystem (sys.argv[1])
- dh = default_handler.default_handler (fs)
- hs = http_server ('', string.atoi (sys.argv[2]), rs, lg)
- hs.install_handler (dh)
- ftp = ftp_server.ftp_server (
- ftp_server.dummy_authorizer(sys.argv[1]),
- port=8021,
- resolver=rs,
- logger_object=lg
- )
- cs = chat_server.chat_server ('', 7777)
- sh = status_handler.status_extension([hs,ms,ftp,cs,rs])
- hs.install_handler (sh)
- if ('-p' in sys.argv):
- def profile_loop ():
- try:
- asyncore.loop()
- except KeyboardInterrupt:
- pass
- import profile
- profile.run ('profile_loop()', 'profile.out')
- else:
- asyncore.loop()
+ import sys
+ if len(sys.argv) < 2:
+ print('usage: %s <root> <port>' % (sys.argv[0]))
+ else:
+ import monitor
+ import filesys
+ import default_handler
+ import status_handler
+ import ftp_server
+ import chat_server
+ import resolver
+ import logger
+ rs = resolver.caching_resolver ('127.0.0.1')
+ lg = logger.file_logger (sys.stdout)
+ ms = monitor.secure_monitor_server ('fnord', '127.0.0.1', 9999)
+ fs = filesys.os_filesystem (sys.argv[1])
+ dh = default_handler.default_handler (fs)
+ hs = http_server ('', string.atoi (sys.argv[2]), rs, lg)
+ hs.install_handler (dh)
+ ftp = ftp_server.ftp_server (
+ ftp_server.dummy_authorizer(sys.argv[1]),
+ port=8021,
+ resolver=rs,
+ logger_object=lg
+ )
+ cs = chat_server.chat_server ('', 7777)
+ sh = status_handler.status_extension([hs,ms,ftp,cs,rs])
+ hs.install_handler (sh)
+ if ('-p' in sys.argv):
+ def profile_loop ():
+ try:
+ asyncore.loop()
+ except KeyboardInterrupt:
+ pass
+ import profile
+ profile.run ('profile_loop()', 'profile.out')
+ else:
+ asyncore.loop()
diff --git a/demo/medusa/https_server.py b/demo/medusa/https_server.py
index 194be34..47d7476 100644
--- a/demo/medusa/https_server.py
+++ b/demo/medusa/https_server.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
-"""A https server built on Medusa's http_server.
+"""A https server built on Medusa's http_server.
Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved."""
@@ -55,7 +55,7 @@ class https_server(http_server.http_server):
sys.stdout.write(self.SERVER_IDENT + '\n\n')
sys.stdout.flush()
self.ssl_ctx=ssl_ctx
-
+
def handle_accept(self):
# Cribbed from http_server.
self.total_clients.increment()
diff --git a/demo/medusa/logger.py b/demo/medusa/logger.py
index 48be665..99a76bb 100644
--- a/demo/medusa/logger.py
+++ b/demo/medusa/logger.py
@@ -26,55 +26,55 @@ import stat # v
# a file-like object that captures output, and
# makes sure to flush it always... this could
# be connected to:
-# o stdio file
-# o low-level file
-# o socket channel
-# o syslog output...
+# o stdio file
+# o low-level file
+# o socket channel
+# o syslog output...
class file_logger:
-
- # pass this either a path or a file object.
- def __init__ (self, file, flush=1, mode='a'):
- if type(file) == type(''):
- if (file == '-'):
- import sys
- self.file = sys.stdout
- else:
- self.file = open (file, mode)
- else:
- self.file = file
- self.do_flush = flush
-
- def __repr__ (self):
- return '<file logger: %s>' % self.file
-
- def write (self, data):
- self.file.write (data)
- self.maybe_flush()
-
- def writeline (self, line):
- self.file.writeline (line)
- self.maybe_flush()
-
- def writelines (self, lines):
- self.file.writelines (lines)
- self.maybe_flush()
-
- def maybe_flush (self):
- if self.do_flush:
- self.file.flush()
-
- def flush (self):
- self.file.flush()
-
- def softspace (self, *args):
- pass
-
- def log (self, message):
- if message[-1] not in ('\r', '\n'):
- self.write (message + '\n')
- else:
- self.write (message)
+
+ # pass this either a path or a file object.
+ def __init__ (self, file, flush=1, mode='a'):
+ if type(file) == type(''):
+ if (file == '-'):
+ import sys
+ self.file = sys.stdout
+ else:
+ self.file = open (file, mode)
+ else:
+ self.file = file
+ self.do_flush = flush
+
+ def __repr__ (self):
+ return '<file logger: %s>' % self.file
+
+ def write (self, data):
+ self.file.write (data)
+ self.maybe_flush()
+
+ def writeline (self, line):
+ self.file.writeline (line)
+ self.maybe_flush()
+
+ def writelines (self, lines):
+ self.file.writelines (lines)
+ self.maybe_flush()
+
+ def maybe_flush (self):
+ if self.do_flush:
+ self.file.flush()
+
+ def flush (self):
+ self.file.flush()
+
+ def softspace (self, *args):
+ pass
+
+ def log (self, message):
+ if message[-1] not in ('\r', '\n'):
+ self.write (message + '\n')
+ else:
+ self.write (message)
# like a file_logger, but it must be attached to a filename.
# When the log gets too full, or a certain time has passed,
@@ -83,64 +83,64 @@ class file_logger:
# would take time, during which medusa would do nothing else.
class rotating_file_logger (file_logger):
-
- # If freq is non-None we back up "daily", "weekly", or "monthly".
- # Else if maxsize is non-None we back up whenever the log gets
- # to big. If both are None we never back up.
- def __init__ (self, file, freq=None, maxsize=None, flush=1, mode='a'):
- self.filename = file
- self.mode = mode
- self.file = open (file, mode)
- self.freq = freq
- self.maxsize = maxsize
- self.rotate_when = self.next_backup(self.freq)
- self.do_flush = flush
-
- def __repr__ (self):
- return '<rotating-file logger: %s>' % self.file
-
- # We back up at midnight every 1) day, 2) monday, or 3) 1st of month
- def next_backup (self, freq):
- (yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
- if freq == 'daily':
- return time.mktime(yr,mo,day+1, 0,0,0, 0,0,-1)
- elif freq == 'weekly':
- return time.mktime(yr,mo,day-wd+7, 0,0,0, 0,0,-1) # wd(monday)==0
- elif freq == 'monthly':
- return time.mktime(yr,mo+1,1, 0,0,0, 0,0,-1)
- else:
- return None # not a date-based backup
-
- def maybe_flush (self): # rotate first if necessary
- self.maybe_rotate()
- if self.do_flush: # from file_logger()
- self.file.flush()
-
- def maybe_rotate (self):
- if self.freq and time.time() > self.rotate_when:
- self.rotate()
- self.rotate_when = self.next_backup(self.freq)
- elif self.maxsize: # rotate when we get too big
- try:
- if os.stat(self.filename)[stat.ST_SIZE] > self.maxsize:
- self.rotate()
- except os.error: # file not found, probably
- self.rotate() # will create a new file
-
- def rotate (self):
- (yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
- try:
- self.file.close()
- newname = '%s.ends%04d%02d%02d' % (self.filename, yr, mo, day)
- try:
- open(newname, "r").close() # check if file exists
- newname = newname + "-%02d%02d%02d" % (hr, min, sec)
- except: # YEARMODY is unique
- pass
- os.rename(self.filename, newname)
- self.file = open(self.filename, self.mode)
- except:
- pass
+
+ # If freq is non-None we back up "daily", "weekly", or "monthly".
+ # Else if maxsize is non-None we back up whenever the log gets
+ # to big. If both are None we never back up.
+ def __init__ (self, file, freq=None, maxsize=None, flush=1, mode='a'):
+ self.filename = file
+ self.mode = mode
+ self.file = open (file, mode)
+ self.freq = freq
+ self.maxsize = maxsize
+ self.rotate_when = self.next_backup(self.freq)
+ self.do_flush = flush
+
+ def __repr__ (self):
+ return '<rotating-file logger: %s>' % self.file
+
+ # We back up at midnight every 1) day, 2) monday, or 3) 1st of month
+ def next_backup (self, freq):
+ (yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
+ if freq == 'daily':
+ return time.mktime(yr,mo,day+1, 0,0,0, 0,0,-1)
+ elif freq == 'weekly':
+ return time.mktime(yr,mo,day-wd+7, 0,0,0, 0,0,-1) # wd(monday)==0
+ elif freq == 'monthly':
+ return time.mktime(yr,mo+1,1, 0,0,0, 0,0,-1)
+ else:
+ return None # not a date-based backup
+
+ def maybe_flush (self): # rotate first if necessary
+ self.maybe_rotate()
+ if self.do_flush: # from file_logger()
+ self.file.flush()
+
+ def maybe_rotate (self):
+ if self.freq and time.time() > self.rotate_when:
+ self.rotate()
+ self.rotate_when = self.next_backup(self.freq)
+ elif self.maxsize: # rotate when we get too big
+ try:
+ if os.stat(self.filename)[stat.ST_SIZE] > self.maxsize:
+ self.rotate()
+ except os.error: # file not found, probably
+ self.rotate() # will create a new file
+
+ def rotate (self):
+ (yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
+ try:
+ self.file.close()
+ newname = '%s.ends%04d%02d%02d' % (self.filename, yr, mo, day)
+ try:
+ open(newname, "r").close() # check if file exists
+ newname = newname + "-%02d%02d%02d" % (hr, min, sec)
+ except: # YEARMODY is unique
+ pass
+ os.rename(self.filename, newname)
+ self.file = open(self.filename, self.mode)
+ except:
+ pass
# syslog is a line-oriented log protocol - this class would be
# appropriate for FTP or HTTP logs, but not for dumping stderr to.
@@ -155,108 +155,108 @@ import m_syslog
syslog_logger = m_syslog.syslog_client
class syslog_logger (m_syslog.syslog_client):
- def __init__ (self, address, facility='user'):
- m_syslog.syslog_client.__init__ (self, address)
- self.facility = m_syslog.facility_names[facility]
- self.address=address
-
- def __repr__ (self):
- return '<syslog logger address=%s>' % (repr(self.address))
-
- def log (self, message):
- m_syslog.syslog_client.log (
- self,
- message,
- facility=self.facility,
- priority=m_syslog.LOG_INFO
- )
+ def __init__ (self, address, facility='user'):
+ m_syslog.syslog_client.__init__ (self, address)
+ self.facility = m_syslog.facility_names[facility]
+ self.address=address
+
+ def __repr__ (self):
+ return '<syslog logger address=%s>' % (repr(self.address))
+
+ def log (self, message):
+ m_syslog.syslog_client.log (
+ self,
+ message,
+ facility=self.facility,
+ priority=m_syslog.LOG_INFO
+ )
# log to a stream socket, asynchronously
class socket_logger (asynchat.async_chat):
- def __init__ (self, address):
+ def __init__ (self, address):
+
+ if type(address) == type(''):
+ self.create_socket (socket.AF_UNIX, socket.SOCK_STREAM)
+ else:
+ self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
- if type(address) == type(''):
- self.create_socket (socket.AF_UNIX, socket.SOCK_STREAM)
- else:
- self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
+ self.connect (address)
+ self.address = address
- self.connect (address)
- self.address = address
-
- def __repr__ (self):
- return '<socket logger: address=%s>' % (self.address)
+ def __repr__ (self):
+ return '<socket logger: address=%s>' % (self.address)
- def log (self, message):
- if message[-2:] != '\r\n':
- self.socket.push (message + '\r\n')
- else:
- self.socket.push (message)
+ def log (self, message):
+ if message[-2:] != '\r\n':
+ self.socket.push (message + '\r\n')
+ else:
+ self.socket.push (message)
# log to multiple places
class multi_logger:
- def __init__ (self, loggers):
- self.loggers = loggers
+ def __init__ (self, loggers):
+ self.loggers = loggers
- def __repr__ (self):
- return '<multi logger: %s>' % (repr(self.loggers))
+ def __repr__ (self):
+ return '<multi logger: %s>' % (repr(self.loggers))
- def log (self, message):
- for logger in self.loggers:
- logger.log (message)
+ def log (self, message):
+ for logger in self.loggers:
+ logger.log (message)
class resolving_logger:
- """Feed (ip, message) combinations into this logger to get a
- resolved hostname in front of the message. The message will not
- be logged until the PTR request finishes (or fails)."""
-
- def __init__ (self, resolver, logger):
- self.resolver = resolver
- self.logger = logger
-
- class logger_thunk:
- def __init__ (self, message, logger):
- self.message = message
- self.logger = logger
-
- def __call__ (self, host, ttl, answer):
- if not answer:
- answer = host
- self.logger.log ('%s:%s' % (answer, self.message))
-
- def log (self, ip, message):
- self.resolver.resolve_ptr (
- ip,
- self.logger_thunk (
- message,
- self.logger
- )
- )
+ """Feed (ip, message) combinations into this logger to get a
+ resolved hostname in front of the message. The message will not
+ be logged until the PTR request finishes (or fails)."""
+
+ def __init__ (self, resolver, logger):
+ self.resolver = resolver
+ self.logger = logger
+
+ class logger_thunk:
+ def __init__ (self, message, logger):
+ self.message = message
+ self.logger = logger
+
+ def __call__ (self, host, ttl, answer):
+ if not answer:
+ answer = host
+ self.logger.log ('%s:%s' % (answer, self.message))
+
+ def log (self, ip, message):
+ self.resolver.resolve_ptr (
+ ip,
+ self.logger_thunk (
+ message,
+ self.logger
+ )
+ )
class unresolving_logger:
- "Just in case you don't want to resolve"
- def __init__ (self, logger):
- self.logger = logger
+ "Just in case you don't want to resolve"
+ def __init__ (self, logger):
+ self.logger = logger
- def log (self, ip, message):
- self.logger.log ('%s:%s' % (ip, message))
+ def log (self, ip, message):
+ self.logger.log ('%s:%s' % (ip, message))
def strip_eol (line):
- while line and line[-1] in '\r\n':
- line = line[:-1]
- return line
+ while line and line[-1] in '\r\n':
+ line = line[:-1]
+ return line
class tail_logger:
- "Keep track of the last <size> log messages"
- def __init__ (self, logger, size=500):
- self.size = size
- self.logger = logger
- self.messages = []
-
- def log (self, message):
- self.messages.append (strip_eol (message))
- if len (self.messages) > self.size:
- del self.messages[0]
- self.logger.log (message)
+ "Keep track of the last <size> log messages"
+ def __init__ (self, logger, size=500):
+ self.size = size
+ self.logger = logger
+ self.messages = []
+
+ def log (self, message):
+ self.messages.append (strip_eol (message))
+ if len (self.messages) > self.size:
+ del self.messages[0]
+ self.logger.log (message)
diff --git a/demo/medusa/m_syslog.py b/demo/medusa/m_syslog.py
index bb376db..57c3924 100644
--- a/demo/medusa/m_syslog.py
+++ b/demo/medusa/m_syslog.py
@@ -2,9 +2,9 @@
# ======================================================================
# Copyright 1997 by Sam Rushing
-#
+#
# All Rights Reserved
-#
+#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
@@ -13,7 +13,7 @@
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
-#
+#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
@@ -65,113 +65,113 @@ Usage:
#
# priorities (these are ordered)
-LOG_EMERG = 0 # system is unusable
-LOG_ALERT = 1 # action must be taken immediately
-LOG_CRIT = 2 # critical conditions
-LOG_ERR = 3 # error conditions
-LOG_WARNING = 4 # warning conditions
-LOG_NOTICE = 5 # normal but significant condition
-LOG_INFO = 6 # informational
-LOG_DEBUG = 7 # debug-level messages
-
-# facility codes
-LOG_KERN = 0 # kernel messages
-LOG_USER = 1 # random user-level messages
-LOG_MAIL = 2 # mail system
-LOG_DAEMON = 3 # system daemons
-LOG_AUTH = 4 # security/authorization messages
-LOG_SYSLOG = 5 # messages generated internally by syslogd
-LOG_LPR = 6 # line printer subsystem
-LOG_NEWS = 7 # network news subsystem
-LOG_UUCP = 8 # UUCP subsystem
-LOG_CRON = 9 # clock daemon
-LOG_AUTHPRIV = 10 # security/authorization messages (private)
-
-# other codes through 15 reserved for system use
-LOG_LOCAL0 = 16 # reserved for local use
-LOG_LOCAL1 = 17 # reserved for local use
-LOG_LOCAL2 = 18 # reserved for local use
-LOG_LOCAL3 = 19 # reserved for local use
-LOG_LOCAL4 = 20 # reserved for local use
-LOG_LOCAL5 = 21 # reserved for local use
-LOG_LOCAL6 = 22 # reserved for local use
-LOG_LOCAL7 = 23 # reserved for local use
+LOG_EMERG = 0 # system is unusable
+LOG_ALERT = 1 # action must be taken immediately
+LOG_CRIT = 2 # critical conditions
+LOG_ERR = 3 # error conditions
+LOG_WARNING = 4 # warning conditions
+LOG_NOTICE = 5 # normal but significant condition
+LOG_INFO = 6 # informational
+LOG_DEBUG = 7 # debug-level messages
+
+# facility codes
+LOG_KERN = 0 # kernel messages
+LOG_USER = 1 # random user-level messages
+LOG_MAIL = 2 # mail system
+LOG_DAEMON = 3 # system daemons
+LOG_AUTH = 4 # security/authorization messages
+LOG_SYSLOG = 5 # messages generated internally by syslogd
+LOG_LPR = 6 # line printer subsystem
+LOG_NEWS = 7 # network news subsystem
+LOG_UUCP = 8 # UUCP subsystem
+LOG_CRON = 9 # clock daemon
+LOG_AUTHPRIV = 10 # security/authorization messages (private)
+
+# other codes through 15 reserved for system use
+LOG_LOCAL0 = 16 # reserved for local use
+LOG_LOCAL1 = 17 # reserved for local use
+LOG_LOCAL2 = 18 # reserved for local use
+LOG_LOCAL3 = 19 # reserved for local use
+LOG_LOCAL4 = 20 # reserved for local use
+LOG_LOCAL5 = 21 # reserved for local use
+LOG_LOCAL6 = 22 # reserved for local use
+LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
- "alert": LOG_ALERT,
- "crit": LOG_CRIT,
- "debug": LOG_DEBUG,
- "emerg": LOG_EMERG,
- "err": LOG_ERR,
- "error": LOG_ERR, # DEPRECATED
- "info": LOG_INFO,
- "notice": LOG_NOTICE,
- "panic": LOG_EMERG, # DEPRECATED
- "warn": LOG_WARNING, # DEPRECATED
- "warning": LOG_WARNING,
- }
+ "alert": LOG_ALERT,
+ "crit": LOG_CRIT,
+ "debug": LOG_DEBUG,
+ "emerg": LOG_EMERG,
+ "err": LOG_ERR,
+ "error": LOG_ERR, # DEPRECATED
+ "info": LOG_INFO,
+ "notice": LOG_NOTICE,
+ "panic": LOG_EMERG, # DEPRECATED
+ "warn": LOG_WARNING, # DEPRECATED
+ "warning": LOG_WARNING,
+ }
facility_names = {
- "auth": LOG_AUTH,
- "authpriv": LOG_AUTHPRIV,
- "cron": LOG_CRON,
- "daemon": LOG_DAEMON,
- "kern": LOG_KERN,
- "lpr": LOG_LPR,
- "mail": LOG_MAIL,
- "news": LOG_NEWS,
- "security": LOG_AUTH, # DEPRECATED
- "syslog": LOG_SYSLOG,
- "user": LOG_USER,
- "uucp": LOG_UUCP,
- "local0": LOG_LOCAL0,
- "local1": LOG_LOCAL1,
- "local2": LOG_LOCAL2,
- "local3": LOG_LOCAL3,
- "local4": LOG_LOCAL4,
- "local5": LOG_LOCAL5,
- "local6": LOG_LOCAL6,
- "local7": LOG_LOCAL7,
- }
+ "auth": LOG_AUTH,
+ "authpriv": LOG_AUTHPRIV,
+ "cron": LOG_CRON,
+ "daemon": LOG_DAEMON,
+ "kern": LOG_KERN,
+ "lpr": LOG_LPR,
+ "mail": LOG_MAIL,
+ "news": LOG_NEWS,
+ "security": LOG_AUTH, # DEPRECATED
+ "syslog": LOG_SYSLOG,
+ "user": LOG_USER,
+ "uucp": LOG_UUCP,
+ "local0": LOG_LOCAL0,
+ "local1": LOG_LOCAL1,
+ "local2": LOG_LOCAL2,
+ "local3": LOG_LOCAL3,
+ "local4": LOG_LOCAL4,
+ "local5": LOG_LOCAL5,
+ "local6": LOG_LOCAL6,
+ "local7": LOG_LOCAL7,
+ }
import socket
class syslog_client:
- def __init__ (self, address='/dev/log'):
- self.address = address
- if type (address) == type(''):
- self.socket = socket.socket (socket.AF_UNIX, socket.SOCK_STREAM)
- self.socket.connect (address)
- self.unix = 1
- else:
- self.socket = socket.socket (socket.AF_INET, socket.SOCK_DGRAM)
- self.unix = 0
-
- # curious: when talking to the unix-domain '/dev/log' socket, a
- # zero-terminator seems to be required. this string is placed
- # into a class variable so that it can be overridden if
- # necessary.
-
- log_format_string = '<%d>%s\000'
-
- def log (self, message, facility=LOG_USER, priority=LOG_INFO):
- message = self.log_format_string % (
- self.encode_priority (facility, priority),
- message
- )
- if self.unix:
- self.socket.send (message)
- else:
- self.socket.sendto (message, self.address)
-
- def encode_priority (self, facility, priority):
- if type(facility) == type(''):
- facility = facility_names[facility]
- if type(priority) == type(''):
- priority = priority_names[priority]
- return (facility<<3) | priority
-
- def close (self):
- if self.unix:
- self.socket.close()
+ def __init__ (self, address='/dev/log'):
+ self.address = address
+ if type (address) == type(''):
+ self.socket = socket.socket (socket.AF_UNIX, socket.SOCK_STREAM)
+ self.socket.connect (address)
+ self.unix = 1
+ else:
+ self.socket = socket.socket (socket.AF_INET, socket.SOCK_DGRAM)
+ self.unix = 0
+
+ # curious: when talking to the unix-domain '/dev/log' socket, a
+ # zero-terminator seems to be required. this string is placed
+ # into a class variable so that it can be overridden if
+ # necessary.
+
+ log_format_string = '<%d>%s\000'
+
+ def log (self, message, facility=LOG_USER, priority=LOG_INFO):
+ message = self.log_format_string % (
+ self.encode_priority (facility, priority),
+ message
+ )
+ if self.unix:
+ self.socket.send (message)
+ else:
+ self.socket.sendto (message, self.address)
+
+ def encode_priority (self, facility, priority):
+ if type(facility) == type(''):
+ facility = facility_names[facility]
+ if type(priority) == type(''):
+ priority = priority_names[priority]
+ return (facility<<3) | priority
+
+ def close (self):
+ if self.unix:
+ self.socket.close()
diff --git a/demo/medusa/poison_handler.py b/demo/medusa/poison_handler.py
index acc78ab..61001a7 100644
--- a/demo/medusa/poison_handler.py
+++ b/demo/medusa/poison_handler.py
@@ -9,7 +9,7 @@ RESP_HEAD="""\
RESP_MIDDLE="""
<h2>M2Crypto https server demonstration</h2>
-This web page is generated by the "poison" http request handler.
+This web page is generated by the "poison" http request handler.
<br>
The links just go on and on and on...
<br><br>
@@ -32,7 +32,7 @@ def makepage(numlinks):
url='\r\n'
numlinks=whrandom.randint(2, numlinks)
- for i in range(numlinks):
+ for i in range(numlinks):
url=url+'<a href="/poison/'
for u in range(whrandom.randint(3, 15)):
pick=whrandom.randint(0, numchar-1)
@@ -51,7 +51,7 @@ def makepage(numlinks):
class poison_handler:
- """This is a clone of webpoison - every URL returns a page of URLs, each of which
+ """This is a clone of webpoison - every URL returns a page of URLs, each of which
returns a page of URLs, each of _which_ returns a page of URLs, ad infinitum.
The objective is to sucker address-harvesting bots run by spammers."""
diff --git a/demo/medusa/producers.py b/demo/medusa/producers.py
index 2138258..4496365 100644
--- a/demo/medusa/producers.py
+++ b/demo/medusa/producers.py
@@ -12,98 +12,98 @@ producer, then wrap this with the 'chunked' transfer-encoding producer.
"""
class simple_producer:
- "producer for a string"
- def __init__ (self, data, buffer_size=1024):
- self.data = data
- self.buffer_size = buffer_size
-
- def more (self):
- if len (self.data) > self.buffer_size:
- result = self.data[:self.buffer_size]
- self.data = self.data[self.buffer_size:]
- return result
- else:
- result = self.data
- self.data = ''
- return result
+ "producer for a string"
+ def __init__ (self, data, buffer_size=1024):
+ self.data = data
+ self.buffer_size = buffer_size
+
+ def more (self):
+ if len (self.data) > self.buffer_size:
+ result = self.data[:self.buffer_size]
+ self.data = self.data[self.buffer_size:]
+ return result
+ else:
+ result = self.data
+ self.data = ''
+ return result
class scanning_producer:
- "like simple_producer, but more efficient for large strings"
- def __init__ (self, data, buffer_size=1024):
- self.data = data
- self.buffer_size = buffer_size
- self.pos = 0
-
- def more (self):
- if self.pos < len(self.data):
- lp = self.pos
- rp = min (
- len(self.data),
- self.pos + self.buffer_size
- )
- result = self.data[lp:rp]
- self.pos = self.pos + len(result)
- return result
- else:
- return ''
+ "like simple_producer, but more efficient for large strings"
+ def __init__ (self, data, buffer_size=1024):
+ self.data = data
+ self.buffer_size = buffer_size
+ self.pos = 0
+
+ def more (self):
+ if self.pos < len(self.data):
+ lp = self.pos
+ rp = min (
+ len(self.data),
+ self.pos + self.buffer_size
+ )
+ result = self.data[lp:rp]
+ self.pos = self.pos + len(result)
+ return result
+ else:
+ return ''
class lines_producer:
- "producer for a list of lines"
+ "producer for a list of lines"
- def __init__ (self, lines):
- self.lines = lines
+ def __init__ (self, lines):
+ self.lines = lines
- def ready (self):
- return len(self.lines)
+ def ready (self):
+ return len(self.lines)
- def more (self):
- if self.lines:
- chunk = self.lines[:50]
- self.lines = self.lines[50:]
- return string.join (chunk, '\r\n') + '\r\n'
- else:
- return ''
+ def more (self):
+ if self.lines:
+ chunk = self.lines[:50]
+ self.lines = self.lines[50:]
+ return string.join (chunk, '\r\n') + '\r\n'
+ else:
+ return ''
class buffer_list_producer:
- "producer for a list of buffers"
+ "producer for a list of buffers"
- # i.e., data == string.join (buffers, '')
-
- def __init__ (self, buffers):
+ # i.e., data == string.join (buffers, '')
- self.index = 0
- self.buffers = buffers
+ def __init__ (self, buffers):
- def more (self):
- if self.index >= len(self.buffers):
- return ''
- else:
- data = self.buffers[self.index]
- self.index = self.index + 1
- return data
+ self.index = 0
+ self.buffers = buffers
+
+ def more (self):
+ if self.index >= len(self.buffers):
+ return ''
+ else:
+ data = self.buffers[self.index]
+ self.index = self.index + 1
+ return data
class file_producer:
- "producer wrapper for file[-like] objects"
-
- # match http_channel's outgoing buffer size
- out_buffer_size = 1<<16
-
- def __init__ (self, file):
- self.done = 0
- self.file = file
-
- def more (self):
- if self.done:
- return ''
- else:
- data = self.file.read (self.out_buffer_size)
- if not data:
- self.file.close()
- del self.file
- self.done = 1
- return ''
- else:
- return data
+ "producer wrapper for file[-like] objects"
+
+ # match http_channel's outgoing buffer size
+ out_buffer_size = 1<<16
+
+ def __init__ (self, file):
+ self.done = 0
+ self.file = file
+
+ def more (self):
+ if self.done:
+ return ''
+ else:
+ data = self.file.read (self.out_buffer_size)
+ if not data:
+ self.file.close()
+ del self.file
+ self.done = 1
+ return ''
+ else:
+ return data
# A simple output producer. This one does not [yet] have
# the safety feature builtin to the monitor channel: runaway
@@ -113,105 +113,105 @@ class file_producer:
# of this object.
class output_producer:
- "Acts like an output file; suitable for capturing sys.stdout"
- def __init__ (self):
- self.data = ''
-
- def write (self, data):
- lines = string.splitfields (data, '\n')
- data = string.join (lines, '\r\n')
- self.data = self.data + data
-
- def writeline (self, line):
- self.data = self.data + line + '\r\n'
-
- def writelines (self, lines):
- self.data = self.data + string.joinfields (
- lines,
- '\r\n'
- ) + '\r\n'
-
- def ready (self):
- return (len (self.data) > 0)
-
- def flush (self):
- pass
-
- def softspace (self, *args):
- pass
-
- def more (self):
- if self.data:
- result = self.data[:512]
- self.data = self.data[512:]
- return result
- else:
- return ''
+ "Acts like an output file; suitable for capturing sys.stdout"
+ def __init__ (self):
+ self.data = ''
+
+ def write (self, data):
+ lines = string.splitfields (data, '\n')
+ data = string.join (lines, '\r\n')
+ self.data = self.data + data
+
+ def writeline (self, line):
+ self.data = self.data + line + '\r\n'
+
+ def writelines (self, lines):
+ self.data = self.data + string.joinfields (
+ lines,
+ '\r\n'
+ ) + '\r\n'
+
+ def ready (self):
+ return (len (self.data) > 0)
+
+ def flush (self):
+ pass
+
+ def softspace (self, *args):
+ pass
+
+ def more (self):
+ if self.data:
+ result = self.data[:512]
+ self.data = self.data[512:]
+ return result
+ else:
+ return ''
class composite_producer:
- "combine a fifo of producers into one"
- def __init__ (self, producers):
- self.producers = producers
-
- def more (self):
- while len(self.producers):
- p = self.producers.first()
- d = p.more()
- if d:
- return d
- else:
- self.producers.pop()
- else:
- return ''
+ "combine a fifo of producers into one"
+ def __init__ (self, producers):
+ self.producers = producers
+
+ def more (self):
+ while len(self.producers):
+ p = self.producers.first()
+ d = p.more()
+ if d:
+ return d
+ else:
+ self.producers.pop()
+ else:
+ return ''
class globbing_producer:
- """
- 'glob' the output from a producer into a particular buffer size.
- helps reduce the number of calls to send(). [this appears to
- gain about 30% performance on requests to a single channel]
- """
-
- def __init__ (self, producer, buffer_size=1<<16):
- self.producer = producer
- self.buffer = ''
- self.buffer_size = buffer_size
-
- def more (self):
- while len(self.buffer) < self.buffer_size:
- data = self.producer.more()
- if data:
- self.buffer = self.buffer + data
- else:
- break
- r = self.buffer
- self.buffer = ''
- return r
+ """
+ 'glob' the output from a producer into a particular buffer size.
+ helps reduce the number of calls to send(). [this appears to
+ gain about 30% performance on requests to a single channel]
+ """
+
+ def __init__ (self, producer, buffer_size=1<<16):
+ self.producer = producer
+ self.buffer = ''
+ self.buffer_size = buffer_size
+
+ def more (self):
+ while len(self.buffer) < self.buffer_size:
+ data = self.producer.more()
+ if data:
+ self.buffer = self.buffer + data
+ else:
+ break
+ r = self.buffer
+ self.buffer = ''
+ return r
class hooked_producer:
- """
- A producer that will call <function> when it empties,.
- with an argument of the number of bytes produced. Useful
- for logging/instrumentation purposes.
- """
-
- def __init__ (self, producer, function):
- self.producer = producer
- self.function = function
- self.bytes = 0
-
- def more (self):
- if self.producer:
- result = self.producer.more()
- if not result:
- self.producer = None
- self.function (self.bytes)
- else:
- self.bytes = self.bytes + len(result)
- return result
- else:
- return ''
+ """
+ A producer that will call <function> when it empties,.
+ with an argument of the number of bytes produced. Useful
+ for logging/instrumentation purposes.
+ """
+
+ def __init__ (self, producer, function):
+ self.producer = producer
+ self.function = function
+ self.bytes = 0
+
+ def more (self):
+ if self.producer:
+ result = self.producer.more()
+ if not result:
+ self.producer = None
+ self.function (self.bytes)
+ else:
+ self.bytes = self.bytes + len(result)
+ return result
+ else:
+ return ''
# HTTP 1.1 emphasizes that an advertised Content-Length header MUST be
# correct. In the face of Strange Files, it is conceivable that
@@ -222,35 +222,35 @@ class hooked_producer:
# How beautifully it blends with the concept of the producer.
class chunked_producer:
- """A producer that implements the 'chunked' transfer coding for HTTP/1.1.
- Here is a sample usage:
- request['Transfer-Encoding'] = 'chunked'
- request.push (
- producers.chunked_producer (your_producer)
- )
- request.done()
- """
-
- def __init__ (self, producer, footers=None):
- self.producer = producer
- self.footers = footers
-
- def more (self):
- if self.producer:
- data = self.producer.more()
- if data:
- return '%x\r\n%s\r\n' % (len(data), data)
- else:
- self.producer = None
- if self.footers:
- return string.join (
- ['0'] + self.footers,
- '\r\n'
- ) + '\r\n\r\n'
- else:
- return '0\r\n\r\n'
- else:
- return ''
+ """A producer that implements the 'chunked' transfer coding for HTTP/1.1.
+ Here is a sample usage:
+ request['Transfer-Encoding'] = 'chunked'
+ request.push (
+ producers.chunked_producer (your_producer)
+ )
+ request.done()
+ """
+
+ def __init__ (self, producer, footers=None):
+ self.producer = producer
+ self.footers = footers
+
+ def more (self):
+ if self.producer:
+ data = self.producer.more()
+ if data:
+ return '%x\r\n%s\r\n' % (len(data), data)
+ else:
+ self.producer = None
+ if self.footers:
+ return string.join (
+ ['0'] + self.footers,
+ '\r\n'
+ ) + '\r\n\r\n'
+ else:
+ return '0\r\n\r\n'
+ else:
+ return ''
# Unfortunately this isn't very useful right now (Aug 97), because
# apparently the browsers don't do on-the-fly decompression. Which
@@ -258,72 +258,72 @@ class chunked_producer:
# low-bandwidth clients (i.e., most everyone).
try:
- import zlib
+ import zlib
except ImportError:
- zlib = None
+ zlib = None
class compressed_producer:
- """
- Compress another producer on-the-fly, using ZLIB
- [Unfortunately, none of the current browsers seem to support this]
- """
-
- # Note: It's not very efficient to have the server repeatedly
- # compressing your outgoing files: compress them ahead of time, or
- # use a compress-once-and-store scheme. However, if you have low
- # bandwidth and low traffic, this may make more sense than
- # maintaining your source files compressed.
- #
- # Can also be used for compressing dynamically-produced output.
-
- def __init__ (self, producer, level=5):
- self.producer = producer
- self.compressor = zlib.compressobj (level)
-
- def more (self):
- if self.producer:
- cdata = ''
- # feed until we get some output
- while not cdata:
- data = self.producer.more()
- if not data:
- self.producer = None
- return self.compressor.flush()
- else:
- cdata = self.compressor.compress (data)
- return cdata
- else:
- return ''
+ """
+ Compress another producer on-the-fly, using ZLIB
+ [Unfortunately, none of the current browsers seem to support this]
+ """
+
+ # Note: It's not very efficient to have the server repeatedly
+ # compressing your outgoing files: compress them ahead of time, or
+ # use a compress-once-and-store scheme. However, if you have low
+ # bandwidth and low traffic, this may make more sense than
+ # maintaining your source files compressed.
+ #
+ # Can also be used for compressing dynamically-produced output.
+
+ def __init__ (self, producer, level=5):
+ self.producer = producer
+ self.compressor = zlib.compressobj (level)
+
+ def more (self):
+ if self.producer:
+ cdata = ''
+ # feed until we get some output
+ while not cdata:
+ data = self.producer.more()
+ if not data:
+ self.producer = None
+ return self.compressor.flush()
+ else:
+ cdata = self.compressor.compress (data)
+ return cdata
+ else:
+ return ''
class escaping_producer:
- "A producer that escapes a sequence of characters"
- " Common usage: escaping the CRLF.CRLF sequence in SMTP, NNTP, etc..."
-
- def __init__ (self, producer, esc_from='\r\n.', esc_to='\r\n..'):
- self.producer = producer
- self.esc_from = esc_from
- self.esc_to = esc_to
- self.buffer = ''
- from asynchat import find_prefix_at_end
- self.find_prefix_at_end = find_prefix_at_end
-
- def more (self):
- esc_from = self.esc_from
- esc_to = self.esc_to
-
- buffer = self.buffer + self.producer.more()
-
- if buffer:
- buffer = string.replace (buffer, esc_from, esc_to)
- i = self.find_prefix_at_end (buffer, esc_from)
- if i:
- # we found a prefix
- self.buffer = buffer[-i:]
- return buffer[:-i]
- else:
- # no prefix, return it all
- self.buffer = ''
- return buffer
- else:
- return buffer
+ "A producer that escapes a sequence of characters"
+ " Common usage: escaping the CRLF.CRLF sequence in SMTP, NNTP, etc..."
+
+ def __init__ (self, producer, esc_from='\r\n.', esc_to='\r\n..'):
+ self.producer = producer
+ self.esc_from = esc_from
+ self.esc_to = esc_to
+ self.buffer = ''
+ from asynchat import find_prefix_at_end
+ self.find_prefix_at_end = find_prefix_at_end
+
+ def more (self):
+ esc_from = self.esc_from
+ esc_to = self.esc_to
+
+ buffer = self.buffer + self.producer.more()
+
+ if buffer:
+ buffer = string.replace (buffer, esc_from, esc_to)
+ i = self.find_prefix_at_end (buffer, esc_from)
+ if i:
+ # we found a prefix
+ self.buffer = buffer[-i:]
+ return buffer[:-i]
+ else:
+ # no prefix, return it all
+ self.buffer = ''
+ return buffer
+ else:
+ return buffer
diff --git a/demo/medusa/put_handler.py b/demo/medusa/put_handler.py
index a158d12..e8f3fc7 100644
--- a/demo/medusa/put_handler.py
+++ b/demo/medusa/put_handler.py
@@ -1,8 +1,8 @@
# -*- Mode: Python; tab-width: 4 -*-
#
-# Author: Sam Rushing <rushing@nightmare.com>
-# Copyright 1996-2000 by Sam Rushing
-# All Rights Reserved.
+# Author: Sam Rushing <rushing@nightmare.com>
+# Copyright 1996-2000 by Sam Rushing
+# All Rights Reserved.
#
from __future__ import print_function
@@ -11,105 +11,105 @@ import re
import string
import default_handler
-unquote = default_handler.unquote
-get_header = default_handler.get_header
+unquote = default_handler.unquote
+get_header = default_handler.get_header
last_request = None
class put_handler:
- def __init__ (self, filesystem, uri_regex):
- self.filesystem = filesystem
- if type (uri_regex) == type(''):
- self.uri_regex = re.compile (uri_regex)
- else:
- self.uri_regex = uri_regex
-
- def match (self, request):
- uri = request.uri
- if request.command == 'put':
- m = self.uri_regex.match (uri)
- if m and m.end() == len(uri):
- return 1
- return 0
-
- def handle_request (self, request):
-
- path, params, query, fragment = request.split_uri()
-
- # strip off leading slashes
- while path and path[0] == '/':
- path = path[1:]
-
- if '%' in path:
- path = unquote (path)
-
- # make sure there's a content-length header
- cl = get_header (CONTENT_LENGTH, request.header)
- if not cl:
- request.error (411)
- return
- else:
- cl = string.atoi (cl)
-
- # don't let the try to overwrite a directory
- if self.filesystem.isdir (path):
- request.error (405)
- return
-
- is_update = self.filesystem.isfile (path)
-
- try:
- output_file = self.filesystem.open (path, 'wb')
- except:
- request.error (405)
- return
-
- request.collector = put_collector (output_file, cl, request, is_update)
-
- # no terminator while receiving PUT data
- request.channel.set_terminator (None)
-
- # don't respond yet, wait until we've received the data...
-
+ def __init__ (self, filesystem, uri_regex):
+ self.filesystem = filesystem
+ if type (uri_regex) == type(''):
+ self.uri_regex = re.compile (uri_regex)
+ else:
+ self.uri_regex = uri_regex
+
+ def match (self, request):
+ uri = request.uri
+ if request.command == 'put':
+ m = self.uri_regex.match (uri)
+ if m and m.end() == len(uri):
+ return 1
+ return 0
+
+ def handle_request (self, request):
+
+ path, params, query, fragment = request.split_uri()
+
+ # strip off leading slashes
+ while path and path[0] == '/':
+ path = path[1:]
+
+ if '%' in path:
+ path = unquote (path)
+
+ # make sure there's a content-length header
+ cl = get_header (CONTENT_LENGTH, request.header)
+ if not cl:
+ request.error (411)
+ return
+ else:
+ cl = string.atoi (cl)
+
+ # don't let the try to overwrite a directory
+ if self.filesystem.isdir (path):
+ request.error (405)
+ return
+
+ is_update = self.filesystem.isfile (path)
+
+ try:
+ output_file = self.filesystem.open (path, 'wb')
+ except:
+ request.error (405)
+ return
+
+ request.collector = put_collector (output_file, cl, request, is_update)
+
+ # no terminator while receiving PUT data
+ request.channel.set_terminator (None)
+
+ # don't respond yet, wait until we've received the data...
+
class put_collector:
- def __init__ (self, file, length, request, is_update):
- self.file = file
- self.length = length
- self.request = request
- self.is_update = is_update
- self.bytes_in = 0
-
- def collect_incoming_data (self, data):
- ld = len(data)
- bi = self.bytes_in
- if (bi + ld) >= self.length:
- # last bit of data
- chunk = self.length - bi
- self.file.write (data[:chunk])
- self.file.close()
-
- if chunk != ld:
- print('orphaned %d bytes: <%s>' % (ld - chunk, repr(data[chunk:])))
-
- # do some housekeeping
- r = self.request
- ch = r.channel
- ch.current_request = None
- # set the terminator back to the default
- ch.set_terminator ('\r\n\r\n')
- if self.is_update:
- r.reply_code = 204 # No content
- r.done()
- else:
- r.reply_now (201) # Created
- # avoid circular reference
- del self.request
- else:
- self.file.write (data)
- self.bytes_in = self.bytes_in + ld
-
- def found_terminator (self):
- # shouldn't be called
- pass
+ def __init__ (self, file, length, request, is_update):
+ self.file = file
+ self.length = length
+ self.request = request
+ self.is_update = is_update
+ self.bytes_in = 0
+
+ def collect_incoming_data (self, data):
+ ld = len(data)
+ bi = self.bytes_in
+ if (bi + ld) >= self.length:
+ # last bit of data
+ chunk = self.length - bi
+ self.file.write (data[:chunk])
+ self.file.close()
+
+ if chunk != ld:
+ print('orphaned %d bytes: <%s>' % (ld - chunk, repr(data[chunk:])))
+
+ # do some housekeeping
+ r = self.request
+ ch = r.channel
+ ch.current_request = None
+ # set the terminator back to the default
+ ch.set_terminator ('\r\n\r\n')
+ if self.is_update:
+ r.reply_code = 204 # No content
+ r.done()
+ else:
+ r.reply_now (201) # Created
+ # avoid circular reference
+ del self.request
+ else:
+ self.file.write (data)
+ self.bytes_in = self.bytes_in + ld
+
+ def found_terminator (self):
+ # shouldn't be called
+ pass
CONTENT_LENGTH = re.compile ('Content-Length: ([0-9]+)', re.IGNORECASE)
diff --git a/demo/medusa/redirecting_handler.py b/demo/medusa/redirecting_handler.py
index e6a1e90..f696c91 100644
--- a/demo/medusa/redirecting_handler.py
+++ b/demo/medusa/redirecting_handler.py
@@ -1,8 +1,8 @@
# -*- Mode: Python; tab-width: 4 -*-
#
-# Author: Sam Rushing <rushing@nightmare.com>
-# Copyright 1996-2000 by Sam Rushing
-# All Rights Reserved.
+# Author: Sam Rushing <rushing@nightmare.com>
+# Copyright 1996-2000 by Sam Rushing
+# All Rights Reserved.
#
import re
@@ -10,35 +10,35 @@ import counter
class redirecting_handler:
- def __init__ (self, pattern, redirect, regex_flag=re.IGNORECASE):
- self.pattern = pattern
- self.redirect = redirect
- self.patreg = re.compile (pattern, regex_flag)
- self.hits = counter.counter()
-
- def match (self, request):
- m = self.patref.match (request.uri)
- return (m and (m.end() == len(request.uri)))
-
- def handle_request (self, request):
- self.hits.increment()
- m = self.patreg.match (request.uri)
- part = m.group(1)
-
- request['Location'] = self.redirect % part
- request.error (302) # moved temporarily
-
- def __repr__ (self):
- return '<Redirecting Handler at %08x [%s => %s]>' % (
- id(self),
- repr(self.pattern),
- repr(self.redirect)
- )
-
- def status (self):
- import producers
- return producers.simple_producer (
- '<li> Redirecting Handler %s => %s <b>Hits</b>: %s' % (
- self.pattern, self.redirect, self.hits
- )
- )
+ def __init__ (self, pattern, redirect, regex_flag=re.IGNORECASE):
+ self.pattern = pattern
+ self.redirect = redirect
+ self.patreg = re.compile (pattern, regex_flag)
+ self.hits = counter.counter()
+
+ def match (self, request):
+ m = self.patref.match (request.uri)
+ return (m and (m.end() == len(request.uri)))
+
+ def handle_request (self, request):
+ self.hits.increment()
+ m = self.patreg.match (request.uri)
+ part = m.group(1)
+
+ request['Location'] = self.redirect % part
+ request.error (302) # moved temporarily
+
+ def __repr__ (self):
+ return '<Redirecting Handler at %08x [%s => %s]>' % (
+ id(self),
+ repr(self.pattern),
+ repr(self.redirect)
+ )
+
+ def status (self):
+ import producers
+ return producers.simple_producer (
+ '<li> Redirecting Handler %s => %s <b>Hits</b>: %s' % (
+ self.pattern, self.redirect, self.hits
+ )
+ )
diff --git a/demo/medusa/status_handler.py b/demo/medusa/status_handler.py
index 5ad5c21..71f7c8f 100644
--- a/demo/medusa/status_handler.py
+++ b/demo/medusa/status_handler.py
@@ -2,7 +2,7 @@
VERSION_STRING = "$Id$"
-#
+#
# medusa status extension
#
@@ -19,224 +19,224 @@ from counter import counter
START_TIME = long(time.time())
class status_extension:
- hit_counter = counter()
-
- def __init__ (self, objects, statusdir='/status', allow_emergency_debug=0):
- self.objects = objects
- self.statusdir = statusdir
- self.allow_emergency_debug = allow_emergency_debug
- # We use /status instead of statusdir here because it's too
- # hard to pass statusdir to the logger, who makes the HREF
- # to the object dir. We don't need the security-through-
- # obscurity here in any case, because the id is obscurity enough
- self.hyper_regex = re.compile('/status/object/([0-9]+)/.*')
- self.hyper_objects = []
- for object in objects:
- self.register_hyper_object (object)
-
- def __repr__ (self):
- return '<Status Extension (%s hits) at %x>' % (
- self.hit_counter,
- id(self)
- )
-
- def match (self, request):
- path, params, query, fragment = request.split_uri()
- # For reasons explained above, we don't use statusdir for /object
- return (path[:len(self.statusdir)] == self.statusdir or
- path[:len("/status/object/")] == '/status/object/')
-
- # Possible Targets:
- # /status
- # /status/channel_list
- # /status/medusa.gif
-
- # can we have 'clickable' objects?
- # [yes, we can use id(x) and do a linear search]
-
- # Dynamic producers:
- # HTTP/1.0: we must close the channel, because it's dynamic output
- # HTTP/1.1: we can use the chunked transfer-encoding, and leave
- # it open.
-
- def handle_request (self, request):
- [path, params, query, fragment] = request.split_uri()
- self.hit_counter.increment()
- if path == self.statusdir: # and not a subdirectory
- up_time = string.join (english_time (long(time.time()) - START_TIME))
- request['Content-Type'] = 'text/html'
- request.push (
- '<html>'
- '<title>Medusa Status Reports</title>'
- '<body bgcolor="#ffffff">'
- '<h1>Medusa Status Reports</h1>'
- '<b>Up:</b> %s' % up_time
- )
- for i in range(len(self.objects)):
- request.push (self.objects[i].status())
- request.push ('<hr>\r\n')
- request.push (
- '<p><a href="%s/channel_list">Channel List</a>'
- '<hr>'
- '<img src="%s/medusa.gif" align=right width=%d height=%d>'
- '</body></html>' % (
- self.statusdir,
- self.statusdir,
- medusa_gif.width,
- medusa_gif.height
- )
- )
- request.done()
- elif path == self.statusdir + '/channel_list':
- request['Content-Type'] = 'text/html'
- request.push ('<html><body>')
- request.push(channel_list_producer(self.statusdir))
- request.push (
- '<hr>'
- '<img src="%s/medusa.gif" align=right width=%d height=%d>' % (
- self.statusdir,
- medusa_gif.width,
- medusa_gif.height
- ) +
- '</body></html>'
- )
- request.done()
-
- elif path == self.statusdir + '/medusa.gif':
- request['Content-Type'] = 'image/gif'
- request['Content-Length'] = len(medusa_gif.data)
- request.push (medusa_gif.data)
- request.done()
-
- elif path == self.statusdir + '/close_zombies':
- message = (
- '<h2>Closing all zombie http client connections...</h2>'
- '<p><a href="%s">Back to the status page</a>' % self.statusdir
- )
- request['Content-Type'] = 'text/html'
- request['Content-Length'] = len (message)
- request.push (message)
- now = int (time.time())
- for channel in asyncore.socket_map.keys():
- if channel.__class__ == http_server.http_channel:
- if channel != request.channel:
- if (now - channel.creation_time) > channel.zombie_timeout:
- channel.close()
- request.done()
-
- # Emergency Debug Mode
- # If a server is running away from you, don't KILL it!
- # Move all the AF_INET server ports and perform an autopsy...
- # [disabled by default to protect the innocent]
- elif self.allow_emergency_debug and path == self.statusdir + '/emergency_debug':
- request.push ('<html>Moving All Servers...</html>')
- request.done()
- for channel in asyncore.socket_map.keys():
- if channel.accepting:
- if type(channel.addr) is type(()):
- ip, port = channel.addr
- channel.socket.close()
- channel.del_channel()
- channel.addr = (ip, port+10000)
- fam, typ = channel.family_and_type
- channel.create_socket (fam, typ)
- channel.set_reuse_addr()
- channel.bind (channel.addr)
- channel.listen(5)
-
- else:
- m = self.hyper_regex.match (path)
- if m:
- oid = string.atoi (m.group (1))
- for object in self.hyper_objects:
- if id (object) == oid:
- if hasattr (object, 'hyper_respond'):
- object.hyper_respond (self, path, request)
- else:
- request.error (404)
- return
-
- def status (self):
- return producers.simple_producer (
- '<li>Status Extension <b>Hits</b> : %s' % self.hit_counter
- )
-
- def register_hyper_object (self, object):
- if not object in self.hyper_objects:
- self.hyper_objects.append (object)
+ hit_counter = counter()
+
+ def __init__ (self, objects, statusdir='/status', allow_emergency_debug=0):
+ self.objects = objects
+ self.statusdir = statusdir
+ self.allow_emergency_debug = allow_emergency_debug
+ # We use /status instead of statusdir here because it's too
+ # hard to pass statusdir to the logger, who makes the HREF
+ # to the object dir. We don't need the security-through-
+ # obscurity here in any case, because the id is obscurity enough
+ self.hyper_regex = re.compile('/status/object/([0-9]+)/.*')
+ self.hyper_objects = []
+ for object in objects:
+ self.register_hyper_object (object)
+
+ def __repr__ (self):
+ return '<Status Extension (%s hits) at %x>' % (
+ self.hit_counter,
+ id(self)
+ )
+
+ def match (self, request):
+ path, params, query, fragment = request.split_uri()
+ # For reasons explained above, we don't use statusdir for /object
+ return (path[:len(self.statusdir)] == self.statusdir or
+ path[:len("/status/object/")] == '/status/object/')
+
+ # Possible Targets:
+ # /status
+ # /status/channel_list
+ # /status/medusa.gif
+
+ # can we have 'clickable' objects?
+ # [yes, we can use id(x) and do a linear search]
+
+ # Dynamic producers:
+ # HTTP/1.0: we must close the channel, because it's dynamic output
+ # HTTP/1.1: we can use the chunked transfer-encoding, and leave
+ # it open.
+
+ def handle_request (self, request):
+ [path, params, query, fragment] = request.split_uri()
+ self.hit_counter.increment()
+ if path == self.statusdir: # and not a subdirectory
+ up_time = string.join (english_time (long(time.time()) - START_TIME))
+ request['Content-Type'] = 'text/html'
+ request.push (
+ '<html>'
+ '<title>Medusa Status Reports</title>'
+ '<body bgcolor="#ffffff">'
+ '<h1>Medusa Status Reports</h1>'
+ '<b>Up:</b> %s' % up_time
+ )
+ for i in range(len(self.objects)):
+ request.push (self.objects[i].status())
+ request.push ('<hr>\r\n')
+ request.push (
+ '<p><a href="%s/channel_list">Channel List</a>'
+ '<hr>'
+ '<img src="%s/medusa.gif" align=right width=%d height=%d>'
+ '</body></html>' % (
+ self.statusdir,
+ self.statusdir,
+ medusa_gif.width,
+ medusa_gif.height
+ )
+ )
+ request.done()
+ elif path == self.statusdir + '/channel_list':
+ request['Content-Type'] = 'text/html'
+ request.push ('<html><body>')
+ request.push(channel_list_producer(self.statusdir))
+ request.push (
+ '<hr>'
+ '<img src="%s/medusa.gif" align=right width=%d height=%d>' % (
+ self.statusdir,
+ medusa_gif.width,
+ medusa_gif.height
+ ) +
+ '</body></html>'
+ )
+ request.done()
+
+ elif path == self.statusdir + '/medusa.gif':
+ request['Content-Type'] = 'image/gif'
+ request['Content-Length'] = len(medusa_gif.data)
+ request.push (medusa_gif.data)
+ request.done()
+
+ elif path == self.statusdir + '/close_zombies':
+ message = (
+ '<h2>Closing all zombie http client connections...</h2>'
+ '<p><a href="%s">Back to the status page</a>' % self.statusdir
+ )
+ request['Content-Type'] = 'text/html'
+ request['Content-Length'] = len (message)
+ request.push (message)
+ now = int (time.time())
+ for channel in asyncore.socket_map.keys():
+ if channel.__class__ == http_server.http_channel:
+ if channel != request.channel:
+ if (now - channel.creation_time) > channel.zombie_timeout:
+ channel.close()
+ request.done()
+
+ # Emergency Debug Mode
+ # If a server is running away from you, don't KILL it!
+ # Move all the AF_INET server ports and perform an autopsy...
+ # [disabled by default to protect the innocent]
+ elif self.allow_emergency_debug and path == self.statusdir + '/emergency_debug':
+ request.push ('<html>Moving All Servers...</html>')
+ request.done()
+ for channel in asyncore.socket_map.keys():
+ if channel.accepting:
+ if type(channel.addr) is type(()):
+ ip, port = channel.addr
+ channel.socket.close()
+ channel.del_channel()
+ channel.addr = (ip, port+10000)
+ fam, typ = channel.family_and_type
+ channel.create_socket (fam, typ)
+ channel.set_reuse_addr()
+ channel.bind (channel.addr)
+ channel.listen(5)
+
+ else:
+ m = self.hyper_regex.match (path)
+ if m:
+ oid = string.atoi (m.group (1))
+ for object in self.hyper_objects:
+ if id (object) == oid:
+ if hasattr (object, 'hyper_respond'):
+ object.hyper_respond (self, path, request)
+ else:
+ request.error (404)
+ return
+
+ def status (self):
+ return producers.simple_producer (
+ '<li>Status Extension <b>Hits</b> : %s' % self.hit_counter
+ )
+
+ def register_hyper_object (self, object):
+ if not object in self.hyper_objects:
+ self.hyper_objects.append (object)
import logger
class logger_for_status (logger.tail_logger):
- def status (self):
- return 'Last %d log entries for: %s' % (
- len (self.messages),
- html_repr (self)
- )
+ def status (self):
+ return 'Last %d log entries for: %s' % (
+ len (self.messages),
+ html_repr (self)
+ )
- def hyper_respond (self, sh, path, request):
- request['Content-Type'] = 'text/plain'
- messages = self.messages[:]
- messages.reverse()
- request.push (lines_producer (messages))
- request.done()
+ def hyper_respond (self, sh, path, request):
+ request['Content-Type'] = 'text/plain'
+ messages = self.messages[:]
+ messages.reverse()
+ request.push (lines_producer (messages))
+ request.done()
class lines_producer:
- def __init__ (self, lines):
- self.lines = lines
+ def __init__ (self, lines):
+ self.lines = lines
- def ready (self):
- return len(self.lines)
+ def ready (self):
+ return len(self.lines)
- def more (self):
- if self.lines:
- chunk = self.lines[:50]
- self.lines = self.lines[50:]
- return string.join (chunk, '\r\n') + '\r\n'
- else:
- return ''
+ def more (self):
+ if self.lines:
+ chunk = self.lines[:50]
+ self.lines = self.lines[50:]
+ return string.join (chunk, '\r\n') + '\r\n'
+ else:
+ return ''
class channel_list_producer (lines_producer):
- def __init__ (self, statusdir):
- channel_reprs = map (
- lambda x: '&lt;' + repr(x)[1:-1] + '&gt;',
- asyncore.socket_map.values()
- )
- channel_reprs.sort()
- lines_producer.__init__ (
- self,
- ['<h1>Active Channel List</h1>',
- '<pre>'
- ] + channel_reprs + [
- '</pre>',
- '<p><a href="%s">Status Report</a>' % statusdir
- ]
- )
+ def __init__ (self, statusdir):
+ channel_reprs = map (
+ lambda x: '&lt;' + repr(x)[1:-1] + '&gt;',
+ asyncore.socket_map.values()
+ )
+ channel_reprs.sort()
+ lines_producer.__init__ (
+ self,
+ ['<h1>Active Channel List</h1>',
+ '<pre>'
+ ] + channel_reprs + [
+ '</pre>',
+ '<p><a href="%s">Status Report</a>' % statusdir
+ ]
+ )
# this really needs a full-blown quoter...
def sanitize (s):
- if '<' in s:
- s = string.join (string.split (s, '<'), '&lt;')
- if '>' in s:
- s = string.join (string.split (s, '>'), '&gt;')
- return s
+ if '<' in s:
+ s = string.join (string.split (s, '<'), '&lt;')
+ if '>' in s:
+ s = string.join (string.split (s, '>'), '&gt;')
+ return s
def html_repr (object):
- so = sanitize (repr (object))
- if hasattr (object, 'hyper_respond'):
- return '<a href="/status/object/%d/">%s</a>' % (id (object), so)
- else:
- return so
+ so = sanitize (repr (object))
+ if hasattr (object, 'hyper_respond'):
+ return '<a href="/status/object/%d/">%s</a>' % (id (object), so)
+ else:
+ return so
def html_reprs (list, front='', back=''):
- reprs = map (
- lambda x,f=front,b=back: '%s%s%s' % (f,x,b),
- map (lambda x: sanitize (html_repr(x)), list)
- )
- reprs.sort()
- return reprs
+ reprs = map (
+ lambda x,f=front,b=back: '%s%s%s' % (f,x,b),
+ map (lambda x: sanitize (html_repr(x)), list)
+ )
+ reprs.sort()
+ return reprs
# for example, tera, giga, mega, kilo
# p_d (n, (1024, 1024, 1024, 1024))
@@ -245,38 +245,38 @@ def html_reprs (list, front='', back=''):
# p_d (n, (60, 60, 24))
def progressive_divide (n, parts):
- result = []
- for part in parts:
- n, rem = divmod (n, part)
- result.append (rem)
- result.append (n)
- return result
+ result = []
+ for part in parts:
+ n, rem = divmod (n, part)
+ result.append (rem)
+ result.append (n)
+ return result
# b,k,m,g,t
def split_by_units (n, units, dividers, format_string):
- divs = progressive_divide (n, dividers)
- result = []
- for i in range(len(units)):
- if divs[i]:
- result.append (format_string % (divs[i], units[i]))
- result.reverse()
- if not result:
- return [format_string % (0, units[0])]
- else:
- return result
+ divs = progressive_divide (n, dividers)
+ result = []
+ for i in range(len(units)):
+ if divs[i]:
+ result.append (format_string % (divs[i], units[i]))
+ result.reverse()
+ if not result:
+ return [format_string % (0, units[0])]
+ else:
+ return result
def english_bytes (n):
- return split_by_units (
- n,
- ('','K','M','G','T'),
- (1024, 1024, 1024, 1024, 1024),
- '%d %sB'
- )
+ return split_by_units (
+ n,
+ ('','K','M','G','T'),
+ (1024, 1024, 1024, 1024, 1024),
+ '%d %sB'
+ )
def english_time (n):
- return split_by_units (
- n,
- ('secs', 'mins', 'hours', 'days', 'weeks', 'years'),
- ( 60, 60, 24, 7, 52),
- '%d %s'
- )
+ return split_by_units (
+ n,
+ ('secs', 'mins', 'hours', 'days', 'weeks', 'years'),
+ ( 60, 60, 24, 7, 52),
+ '%d %s'
+ )
diff --git a/demo/medusa/virtual_handler.py b/demo/medusa/virtual_handler.py
index 846ee5f..1b2c752 100644
--- a/demo/medusa/virtual_handler.py
+++ b/demo/medusa/virtual_handler.py
@@ -10,51 +10,51 @@ get_header = default_handler.get_header
class virtual_handler:
- """HTTP request handler for an HTTP/1.0-style virtual host. Each
- Virtual host must have a different IP"""
+ """HTTP request handler for an HTTP/1.0-style virtual host. Each
+ Virtual host must have a different IP"""
- def __init__ (self, handler, hostname):
- self.handler = handler
- self.hostname = hostname
- try:
- self.ip = socket.gethostbyname (hostname)
- except socket.error:
- raise ValueError("Virtual Hostname %s does not appear to be registered in the DNS" % hostname)
+ def __init__ (self, handler, hostname):
+ self.handler = handler
+ self.hostname = hostname
+ try:
+ self.ip = socket.gethostbyname (hostname)
+ except socket.error:
+ raise ValueError("Virtual Hostname %s does not appear to be registered in the DNS" % hostname)
- def match (self, request):
- if (request.channel.addr[0] == self.ip):
- return 1
- else:
- return 0
+ def match (self, request):
+ if (request.channel.addr[0] == self.ip):
+ return 1
+ else:
+ return 0
- def handle_request (self, request):
- return self.handler.handle_request (request)
+ def handle_request (self, request):
+ return self.handler.handle_request (request)
- def __repr__ (self):
- return '<virtual request handler for %s>' % self.hostname
+ def __repr__ (self):
+ return '<virtual request handler for %s>' % self.hostname
class virtual_handler_with_host:
- """HTTP request handler for HTTP/1.1-style virtual hosts. This
- matches by checking the value of the 'Host' header in the request.
- You actually don't _have_ to support HTTP/1.1 to use this, since
- many browsers now send the 'Host' header. This is a Good Thing."""
-
- def __init__ (self, handler, hostname):
- self.handler = handler
- self.hostname = hostname
-
- def match (self, request):
- host = get_header (HOST, request.header)
- if host == self.hostname:
- return 1
- else:
- return 0
-
- def handle_request (self, request):
- return self.handler.handle_request (request)
-
- def __repr__ (self):
- return '<virtual request handler for %s>' % self.hostname
+ """HTTP request handler for HTTP/1.1-style virtual hosts. This
+ matches by checking the value of the 'Host' header in the request.
+ You actually don't _have_ to support HTTP/1.1 to use this, since
+ many browsers now send the 'Host' header. This is a Good Thing."""
+
+ def __init__ (self, handler, hostname):
+ self.handler = handler
+ self.hostname = hostname
+
+ def match (self, request):
+ host = get_header (HOST, request.header)
+ if host == self.hostname:
+ return 1
+ else:
+ return 0
+
+ def handle_request (self, request):
+ return self.handler.handle_request (request)
+
+ def __repr__ (self):
+ return '<virtual request handler for %s>' % self.hostname
diff --git a/demo/medusa/xmlrpc_handler.py b/demo/medusa/xmlrpc_handler.py
index 994b5cd..ee2a163 100644
--- a/demo/medusa/xmlrpc_handler.py
+++ b/demo/medusa/xmlrpc_handler.py
@@ -17,90 +17,90 @@ import sys
class xmlrpc_handler:
- def match (self, request):
- # Note: /RPC2 is not required by the spec, so you may override this method.
- if request.uri[:5] == '/RPC2':
- return 1
- else:
- return 0
-
- def handle_request (self, request):
- [path, params, query, fragment] = request.split_uri()
-
- if request.command in ('post', 'put'):
- request.collector = collector (self, request)
- else:
- request.error (400)
-
- def continue_request (self, data, request):
- params, method = xmlrpclib.loads (data)
- try:
- # generate response
- try:
- response = self.call (method, params)
- if type(response) != type(()):
- response = (response,)
- except:
- # report exception back to server
- response = xmlrpclib.dumps (
- xmlrpclib.Fault (1, "%s:%s" % (sys.exc_type, sys.exc_value))
- )
- else:
- response = xmlrpclib.dumps (response, methodresponse=1)
- except:
- # internal error, report as HTTP server error
- request.error (500)
- else:
- # got a valid XML RPC response
- request['Content-Type'] = 'text/xml'
- request.push (response)
- request.done()
-
- def call (self, method, params):
- # override this method to implement RPC methods
- raise "NotYetImplemented"
+ def match (self, request):
+ # Note: /RPC2 is not required by the spec, so you may override this method.
+ if request.uri[:5] == '/RPC2':
+ return 1
+ else:
+ return 0
+
+ def handle_request (self, request):
+ [path, params, query, fragment] = request.split_uri()
+
+ if request.command in ('post', 'put'):
+ request.collector = collector (self, request)
+ else:
+ request.error (400)
+
+ def continue_request (self, data, request):
+ params, method = xmlrpclib.loads (data)
+ try:
+ # generate response
+ try:
+ response = self.call (method, params)
+ if type(response) != type(()):
+ response = (response,)
+ except:
+ # report exception back to server
+ response = xmlrpclib.dumps (
+ xmlrpclib.Fault (1, "%s:%s" % (sys.exc_type, sys.exc_value))
+ )
+ else:
+ response = xmlrpclib.dumps (response, methodresponse=1)
+ except:
+ # internal error, report as HTTP server error
+ request.error (500)
+ else:
+ # got a valid XML RPC response
+ request['Content-Type'] = 'text/xml'
+ request.push (response)
+ request.done()
+
+ def call (self, method, params):
+ # override this method to implement RPC methods
+ raise "NotYetImplemented"
class collector:
- "gathers input for POST and PUT requests"
+ "gathers input for POST and PUT requests"
- def __init__ (self, handler, request):
+ def __init__ (self, handler, request):
- self.handler = handler
- self.request = request
- self.data = ''
+ self.handler = handler
+ self.request = request
+ self.data = ''
- # make sure there's a content-length header
- cl = request.get_header ('content-length')
+ # make sure there's a content-length header
+ cl = request.get_header ('content-length')
- if not cl:
- request.error (411)
- else:
- cl = string.atoi (cl)
- # using a 'numeric' terminator
- self.request.channel.set_terminator (cl)
+ if not cl:
+ request.error (411)
+ else:
+ cl = string.atoi (cl)
+ # using a 'numeric' terminator
+ self.request.channel.set_terminator (cl)
- def collect_incoming_data (self, data):
- self.data = self.data + data
+ def collect_incoming_data (self, data):
+ self.data = self.data + data
- def found_terminator (self):
- # set the terminator back to the default
- self.request.channel.set_terminator ('\r\n\r\n')
- self.handler.continue_request (self.data, self.request)
+ def found_terminator (self):
+ # set the terminator back to the default
+ self.request.channel.set_terminator ('\r\n\r\n')
+ self.handler.continue_request (self.data, self.request)
if __name__ == '__main__':
- class rpc_demo (xmlrpc_handler):
-
- def call (self, method, params):
- print('method="%s" params=%s' % (method, params))
- return "Sure, that works"
+ class rpc_demo (xmlrpc_handler):
- import asyncore
- import http_server
+ def call (self, method, params):
+ print('method="%s" params=%s' % (method, params))
+ return "Sure, that works"
- hs = http_server.http_server ('', 8000)
- rpc = rpc_demo()
- hs.install_handler (rpc)
-
- asyncore.loop()
+ import asyncore
+ import http_server
+
+ hs = http_server.http_server ('', 8000)
+ rpc = rpc_demo()
+ hs.install_handler (rpc)
+
+ asyncore.loop()
diff --git a/demo/medusa054/START.py b/demo/medusa054/START.py
index 0346fb5..1480035 100644
--- a/demo/medusa054/START.py
+++ b/demo/medusa054/START.py
@@ -7,7 +7,7 @@ import os
import os.path
import sys
-# Medusa
+# Medusa
import asyncore
import default_handler
import filesys
@@ -27,7 +27,7 @@ FTP_PORT = 39021
hs=http_server.http_server('', HTTP_PORT)
-Rand.load_file('../randpool.dat', -1)
+Rand.load_file('../randpool.dat', -1)
ssl_ctx=SSL.Context('sslv23')
ssl_ctx.load_cert('server.pem')
ssl_ctx.load_verify_locations('ca.pem', '')
diff --git a/demo/medusa054/START_xmlrpc.py b/demo/medusa054/START_xmlrpc.py
index ff725c9..dcca122 100644
--- a/demo/medusa054/START_xmlrpc.py
+++ b/demo/medusa054/START_xmlrpc.py
@@ -7,7 +7,7 @@ import os
import os.path
import sys
-# Medusa
+# Medusa
import asyncore
import default_handler
import filesys
@@ -28,7 +28,7 @@ HTTPS_PORT=39443
hs=http_server.http_server('', HTTP_PORT)
-Rand.load_file('../randpool.dat', -1)
+Rand.load_file('../randpool.dat', -1)
ssl_ctx=SSL.Context('sslv23')
ssl_ctx.load_cert('server.pem')
#ssl_ctx.load_verify_location('ca.pem')
diff --git a/demo/medusa054/ftps_server.py b/demo/medusa054/ftps_server.py
index 8638156..4ce77c7 100644
--- a/demo/medusa054/ftps_server.py
+++ b/demo/medusa054/ftps_server.py
@@ -1,4 +1,4 @@
-"""An FTP/TLS server built on Medusa's ftp_server.
+"""An FTP/TLS server built on Medusa's ftp_server.
Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved."""
@@ -15,7 +15,7 @@ from M2Crypto import SSL, version
VERSION_STRING=version
class ftp_tls_channel(ftp_server.ftp_channel):
-
+
"""FTP/TLS server channel for Medusa."""
def __init__(self, server, ssl_ctx, conn, addr):
@@ -52,7 +52,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
self._ssl_accepting = 0
else:
try:
- ftp_server.ftp_channel.handle_read(self)
+ ftp_server.ftp_channel.handle_read(self)
except SSL.SSLError as what:
if str(what) == 'unexpected eof':
self.close()
@@ -67,7 +67,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
self._ssl_accepting = 0
else:
try:
- ftp_server.ftp_channel.handle_write(self)
+ ftp_server.ftp_channel.handle_write(self)
except SSL.SSLError as what:
if str(what) == 'unexpected eof':
self.close()
@@ -116,7 +116,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
if string.find(command, 'stor') != -1:
while command and command[0] not in string.letters:
command = command[1:]
-
+
func_name = 'cmd_%s' % command
if command != 'pass':
self.log('<== %s' % repr(self.in_buffer)[1:-1])
@@ -126,8 +126,8 @@ class ftp_tls_channel(ftp_server.ftp_channel):
self.in_buffer = ''
if not hasattr(self, func_name):
self.command_not_understood(line[0])
- return
-
+ return
+
func = getattr(self, func_name)
if not self.check_command_authorization(command):
self.command_not_authorized(command)
@@ -217,7 +217,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
else:
self.respond('234 AUTH TLS successful')
self._ssl_accepting = 1
- self.socket = SSL.Connection(self.ssl_ctx, self.socket)
+ self.socket = SSL.Connection(self.ssl_ctx, self.socket)
self.socket.setup_addr(self.addr)
self.socket.setup_ssl()
self.socket.set_accept_state()
@@ -227,7 +227,7 @@ class ftp_tls_channel(ftp_server.ftp_channel):
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer. For
- FTP/TLS the only valid value for the parameter is '0'; any
+ FTP/TLS the only valid value for the parameter is '0'; any
other value is accepted but ignored."""
if not (self._ssl_accepting or self._ssl_accepted):
return self.respond('503 AUTH TLS must be issued prior to PBSZ')
@@ -235,21 +235,21 @@ class ftp_tls_channel(ftp_server.ftp_channel):
self.respond('200 PBSZ=0 successful.')
def cmd_prot(self, line):
- """Negotiate the security level of the data connection."""
+ """Negotiate the security level of the data connection."""
if self._pbsz is None:
return self.respond('503 PBSZ must be issued prior to PROT')
if line[1] == 'C':
self.respond('200 Protection set to Clear')
self._pbsz = None
self._prot = None
- elif line[1] == 'P':
+ elif line[1] == 'P':
self.respond('200 Protection set to Private')
self._prot = 1
elif line[1] in ('S', 'E'):
self.respond('536 PROT %s unsupported' % line[1])
else:
self.respond('504 PROT %s unsupported' % line[1])
-
+
class ftp_tls_server(ftp_server.ftp_server):
@@ -334,8 +334,8 @@ class nbio_ftp_tls_actor:
return self._ssl_handshake_ok
def handle_connect(self):
- """Handle a data connection that occurs after this instance came
- into being. When this handler is triggered, self.socket has been
+ """Handle a data connection that occurs after this instance came
+ into being. When this handler is triggered, self.socket has been
created and refers to the underlying connected socket."""
self.socket = SSL.Connection(self.ssl_ctx, self.socket)
self.socket.setup_addr(self.client_addr)
@@ -370,7 +370,7 @@ class nbio_ftp_tls_actor:
self.close()
self.log_info('recv: closing channel %s %s' % (repr(self), what))
return ''
-
+
class tls_xmit_channel(nbio_ftp_tls_actor, ftp_server.xmit_channel):
@@ -401,17 +401,17 @@ class tls_xmit_channel(nbio_ftp_tls_actor, ftp_server.xmit_channel):
"""Handle a read event: either continue with TLS negotiation
or let the application handle this event."""
if self.tls_neg_ok():
- ftp_server.xmit_channel.handle_read(self)
+ ftp_server.xmit_channel.handle_read(self)
def handle_write(self):
"""Handle a write event: either continue with TLS negotiation
or let the application handle this event."""
if self.tls_neg_ok():
- ftp_server.xmit_channel.handle_write(self)
+ ftp_server.xmit_channel.handle_write(self)
class tls_recv_channel(nbio_ftp_tls_actor, ftp_server.recv_channel):
-
+
"""TLS driver for a receive-only data connection."""
def __init__(self, channel, conn, ssl_ctx, client_addr, fd):
@@ -427,12 +427,12 @@ class tls_recv_channel(nbio_ftp_tls_actor, ftp_server.recv_channel):
"""Handle a read event: either continue with TLS negotiation
or let the application handle this event."""
if self.tls_neg_ok():
- ftp_server.recv_channel.handle_read(self)
+ ftp_server.recv_channel.handle_read(self)
def handle_write(self):
"""Handle a write event: either continue with TLS negotiation
or let the application handle this event."""
if self.tls_neg_ok():
- ftp_server.recv_channel.handle_write(self)
+ ftp_server.recv_channel.handle_write(self)
diff --git a/demo/medusa054/https_server.py b/demo/medusa054/https_server.py
index fb1a797..a61eacd 100644
--- a/demo/medusa054/https_server.py
+++ b/demo/medusa054/https_server.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
-"""A https server built on Medusa's http_server.
+"""A https server built on Medusa's http_server.
Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved."""
@@ -57,7 +57,7 @@ class https_server(http_server.http_server):
sys.stdout.write(self.SERVER_IDENT + '\n\n')
sys.stdout.flush()
self.ssl_ctx=ssl_ctx
-
+
def handle_accept(self):
# Cribbed from http_server.
self.total_clients.increment()
diff --git a/demo/medusa054/poison_handler.py b/demo/medusa054/poison_handler.py
index acc78ab..61001a7 100644
--- a/demo/medusa054/poison_handler.py
+++ b/demo/medusa054/poison_handler.py
@@ -9,7 +9,7 @@ RESP_HEAD="""\
RESP_MIDDLE="""
<h2>M2Crypto https server demonstration</h2>
-This web page is generated by the "poison" http request handler.
+This web page is generated by the "poison" http request handler.
<br>
The links just go on and on and on...
<br><br>
@@ -32,7 +32,7 @@ def makepage(numlinks):
url='\r\n'
numlinks=whrandom.randint(2, numlinks)
- for i in range(numlinks):
+ for i in range(numlinks):
url=url+'<a href="/poison/'
for u in range(whrandom.randint(3, 15)):
pick=whrandom.randint(0, numchar-1)
@@ -51,7 +51,7 @@ def makepage(numlinks):
class poison_handler:
- """This is a clone of webpoison - every URL returns a page of URLs, each of which
+ """This is a clone of webpoison - every URL returns a page of URLs, each of which
returns a page of URLs, each of _which_ returns a page of URLs, ad infinitum.
The objective is to sucker address-harvesting bots run by spammers."""
diff --git a/demo/perf/memio.py b/demo/perf/memio.py
index 238d85c..53f7de6 100644
--- a/demo/perf/memio.py
+++ b/demo/perf/memio.py
@@ -2,7 +2,7 @@
"""A comparison of Python's cStringIO and M2Crypto's MemoryBuffer,
the outcome of which is that MemoryBuffer suffers from doing too much
-in Python.
+in Python.
Two way to optimise MemoryBuffer:
1. Create MemoryBufferIn and MemoryBufferOut a la StringI and StringO.
diff --git a/demo/perf/sha1.py b/demo/perf/sha1.py
index 6d85289..ca320de 100644
--- a/demo/perf/sha1.py
+++ b/demo/perf/sha1.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python2.0
-"""A comparison of Python's sha and M2Crypto.EVP.MessageDigest,
-the outcome of which is that EVP.MessageDigest suffers from doing
+"""A comparison of Python's sha and M2Crypto.EVP.MessageDigest,
+the outcome of which is that EVP.MessageDigest suffers from doing
too much in Python."""
import profile
diff --git a/demo/pgp/pgpstep.py b/demo/pgp/pgpstep.py
index cc0bf25..3885341 100644
--- a/demo/pgp/pgpstep.py
+++ b/demo/pgp/pgpstep.py
@@ -19,7 +19,7 @@ def desc_public_key(pkt):
print('e =', repr(pkt._e))
print('n =', repr(pkt._n))
print()
-
+
def desc_trust(pkt):
print('packet = trust')
print('trustworthiness = <ignored>')
@@ -99,17 +99,17 @@ if __name__ == '__main__':
import sys
count = 0
for arg in sys.argv[1:]:
- f = open(arg, 'rb')
- ps = PGP.packet_stream(f)
- while 1:
- pkt = ps.read()
- if pkt is None:
- break
- elif pkt:
- print('-' * 70)
- DESC[pkt.__class__](pkt)
- count = count + ps.count()
- ps.close()
+ f = open(arg, 'rb')
+ ps = PGP.packet_stream(f)
+ while 1:
+ pkt = ps.read()
+ if pkt is None:
+ break
+ elif pkt:
+ print('-' * 70)
+ DESC[pkt.__class__](pkt)
+ count = count + ps.count()
+ ps.close()
print('-' * 70)
print('Total octets processed =', count)
diff --git a/demo/rsa_bench.py b/demo/rsa_bench.py
index bcc4e2e..9129353 100644
--- a/demo/rsa_bench.py
+++ b/demo/rsa_bench.py
@@ -10,15 +10,15 @@ from __future__ import print_function
makenewkey showdigest showprofile
md5 sha1 sha256 sha512
<key length>
-
+
Larry Bugbee
November 2006
-
-
- Some portions are Copyright (c) 1999-2003 Ng Pheng Siong.
+
+
+ Some portions are Copyright (c) 1999-2003 Ng Pheng Siong.
All rights reserved.
- Portions created by Open Source Applications Foundation
+ Portions created by Open Source Applications Foundation
(OSAF) are Copyright (C) 2004 OSAF. All Rights Reserved.
"""
@@ -34,7 +34,7 @@ showpubkey = 0 # 1 = show the public key value
showdigest = 0 # 1 = show the digest value
showprofile = 0 # 1 = use the python profiler
-hashalgs = ['md5', 'ripemd160', 'sha1',
+hashalgs = ['md5', 'ripemd160', 'sha1',
'sha224', 'sha256', 'sha384', 'sha512']
# default hashing algorithm
@@ -46,22 +46,22 @@ exponent = 65537
'''
There is some temptation to use an RSA exponent of 3
because 1) it is easy to remember and 2) it minimizes the
- effort of signature verification. Unfortunately there
- a couple of attacks based on the use of 3. From a draft
+ effort of signature verification. Unfortunately there
+ a couple of attacks based on the use of 3. From a draft
RFC (Easklake, Dec 2000):
- A public exponent of 3 minimizes the effort needed to
+ A public exponent of 3 minimizes the effort needed to
verify a signature. Use of 3 as the public exponent is
- weak for confidentiality uses since, if the same data
- can be collected encrypted under three different keys
- with an exponent of 3 then, using the Chinese Remainder
+ weak for confidentiality uses since, if the same data
+ can be collected encrypted under three different keys
+ with an exponent of 3 then, using the Chinese Remainder
Theorem [NETSEC], the original plain text can be easily
- recovered.
- This applies to confidentiality so it is not of major
- concern here. The second attack is a protocol implementation
- weakness and can be patched, but has the patch been applied?
- ...correctly? It is arguably better to get into the habit
- of using a stronger exponent and avoiding these and possible
- future attacks based on 3. I suggest getting in the habit
+ recovered.
+ This applies to confidentiality so it is not of major
+ concern here. The second attack is a protocol implementation
+ weakness and can be patched, but has the patch been applied?
+ ...correctly? It is arguably better to get into the habit
+ of using a stronger exponent and avoiding these and possible
+ future attacks based on 3. I suggest getting in the habit
of using something stronger. Some suggest using 65537.
'''
@@ -101,7 +101,7 @@ def speed():
for i in range(N2):
rsa.verify(dgst, sig)
print(' %d verifications: %8.2fs' % (N2, (time() - t1)))
-
+
def test_speed(rsa, dgst):
print(' measuring speed...')
if showprofile:
@@ -115,12 +115,12 @@ def test_speed(rsa, dgst):
def main(keylen, hashalg):
global rsa, dgst # this exists ONLY for speed testing
-
- Rand.load_file('randpool.dat', -1)
-
+
+ Rand.load_file('randpool.dat', -1)
+
pvtkeyfilename = 'rsa%dpvtkey.pem' % (keylen)
- pubkeyfilename = 'rsa%dpubkey.pem' % (keylen)
-
+ pubkeyfilename = 'rsa%dpubkey.pem' % (keylen)
+
if makenewkey:
print(' making and saving a new key')
rsa = RSA.gen_key(keylen, exponent)
@@ -130,11 +130,11 @@ def main(keylen, hashalg):
print(' loading an existing key')
rsa = RSA.load_key(pvtkeyfilename)
print(' rsa key length:', len(rsa))
-
+
if not rsa.check_key():
raise 'key is not initialised'
- # since we are testing signing and verification, let's not
+ # since we are testing signing and verification, let's not
# be fussy about the digest. Just make one.
md = EVP.MessageDigest(hashalg)
md.update('can you spell subliminal channel?')
@@ -142,7 +142,7 @@ def main(keylen, hashalg):
print(' hash algorithm: %s' % hashalg)
if showdigest:
print(' %s digest: \n%s' % (hashalg, base64.encodestring(dgst)))
-
+
test(rsa, dgst)
# test_asn1(rsa, dgst)
test_speed(rsa, dgst)
@@ -175,7 +175,7 @@ if __name__=='__main__':
except:
print('\n *** argument "%s" not understood ***' % arg)
print_usage()
-
+
main(keylen, hashalg)
diff --git a/demo/rsatest.py b/demo/rsatest.py
index 7489726..f867f99 100644
--- a/demo/rsatest.py
+++ b/demo/rsatest.py
@@ -27,7 +27,7 @@ def test_encrypt(padding):
def test_sign(padding):
print('testing private-key signing:', padding)
padding=eval('RSA.'+padding)
- ctxt=priv.private_encrypt(dgst, padding)
+ ctxt=priv.private_encrypt(dgst, padding)
ptxt=pub.public_decrypt(ctxt, padding)
if ptxt!=dgst:
print('private_decrypt -> public_encrypt: not ok')
@@ -38,7 +38,7 @@ def test0():
print(repr(priv.e), repr(priv.n))
if __name__=='__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
test_encrypt('pkcs1_padding')
test_encrypt('pkcs1_oaep_padding')
#test_encrypt('sslv23_padding')
diff --git a/demo/smime.howto/decrypt.py b/demo/smime.howto/decrypt.py
index d8b8d5d..97bf063 100644
--- a/demo/smime.howto/decrypt.py
+++ b/demo/smime.howto/decrypt.py
@@ -19,6 +19,6 @@ p7, data = SMIME.smime_load_pkcs7('encrypt.p7')
# Decrypt p7.
out = s.decrypt(p7)
-
+
print(out)
diff --git a/demo/smime.howto/dv.py b/demo/smime.howto/dv.py
index 68da049..0844b1c 100644
--- a/demo/smime.howto/dv.py
+++ b/demo/smime.howto/dv.py
@@ -17,7 +17,7 @@ s.load_key('recipient_key.pem', 'recipient.pem')
# Load the signed/encrypted data.
p7, data = SMIME.smime_load_pkcs7('se.p7')
-# After the above step, 'data' == None.
+# After the above step, 'data' == None.
# Decrypt p7. 'out' now contains a PKCS #7 signed blob.
out = s.decrypt(p7)
diff --git a/demo/smime.howto/encrypt.py b/demo/smime.howto/encrypt.py
index 3391845..7fdd2fd 100644
--- a/demo/smime.howto/encrypt.py
+++ b/demo/smime.howto/encrypt.py
@@ -31,7 +31,7 @@ s.set_cipher(SMIME.Cipher('des_ede3_cbc'))
# Encrypt the buffer.
p7 = s.encrypt(buf)
-
+
# Output p7 in mail-friendly format.
out = BIO.MemoryBuffer()
out.write('From: sender@example.dom\n')
diff --git a/demo/smime.howto/se.py b/demo/smime.howto/se.py
index b63a105..42e997d 100644
--- a/demo/smime.howto/se.py
+++ b/demo/smime.howto/se.py
@@ -41,7 +41,7 @@ s.write(tmp, p7)
# Encrypt the temporary buffer.
p7 = s.encrypt(tmp)
-
+
# Output p7 in mail-friendly format.
out = BIO.MemoryBuffer()
out.write('From: sender@example.dom\n')
diff --git a/demo/smime.howto/sendsmime.py b/demo/smime.howto/sendsmime.py
index d90663b..5c16fca 100644
--- a/demo/smime.howto/sendsmime.py
+++ b/demo/smime.howto/sendsmime.py
@@ -27,7 +27,7 @@ def sendsmime(from_addr, to_addrs, subject, msg, from_key, from_cert=None, to_ce
for x in to_certs:
sk.push(X509.load_cert(x))
s.set_x509_stack(sk)
- s.set_cipher(SMIME.Cipher('rc2_40_cbc'))
+ s.set_cipher(SMIME.Cipher('rc2_40_cbc'))
tmp_bio = BIO.MemoryBuffer()
if sign:
s.write(tmp_bio, p7)
@@ -38,7 +38,7 @@ def sendsmime(from_addr, to_addrs, subject, msg, from_key, from_cert=None, to_ce
out = BIO.MemoryBuffer()
out.write('From: %s\r\n' % from_addr)
out.write('To: %s\r\n' % string.join(to_addrs, ", "))
- out.write('Subject: %s\r\n' % subject)
+ out.write('Subject: %s\r\n' % subject)
if encrypt:
s.write(out, p7)
else:
@@ -58,7 +58,7 @@ def sendsmime(from_addr, to_addrs, subject, msg, from_key, from_cert=None, to_ce
msg = """
-S/MIME - Secure Multipurpose Internet Mail Extensions [RFC 2311, RFC 2312] -
+S/MIME - Secure Multipurpose Internet Mail Extensions [RFC 2311, RFC 2312] -
provides a consistent way to send and receive secure MIME data. Based on the
popular Internet MIME standard, S/MIME provides the following cryptographic
security services for electronic messaging applications - authentication,
@@ -72,8 +72,8 @@ S/MIME is implemented in Netscape Messenger and Microsoft Outlook.
if __name__ == '__main__':
- Rand.load_file('../randpool.dat', -1)
- sendsmime(from_addr = 'ngps@post1.com',
+ Rand.load_file('../randpool.dat', -1)
+ sendsmime(from_addr = 'ngps@post1.com',
to_addrs = ['popuser@nova.dyndns.org'],
subject = 'S/MIME testing',
msg = msg,
diff --git a/demo/smime/sendsmime.py b/demo/smime/sendsmime.py
index 1f75e76..c596bc9 100644
--- a/demo/smime/sendsmime.py
+++ b/demo/smime/sendsmime.py
@@ -27,7 +27,7 @@ def sendsmime(from_addr, to_addrs, subject, msg, from_key, from_cert=None, to_ce
for x in to_certs:
sk.push(X509.load_cert(x))
s.set_x509_stack(sk)
- s.set_cipher(SMIME.Cipher('rc2_40_cbc'))
+ s.set_cipher(SMIME.Cipher('rc2_40_cbc'))
tmp_bio = BIO.MemoryBuffer()
if sign:
s.write(tmp_bio, p7)
@@ -38,7 +38,7 @@ def sendsmime(from_addr, to_addrs, subject, msg, from_key, from_cert=None, to_ce
out = BIO.MemoryBuffer()
out.write('From: %s\r\n' % from_addr)
out.write('To: %s\r\n' % string.join(to_addrs, ", "))
- out.write('Subject: %s\r\n' % subject)
+ out.write('Subject: %s\r\n' % subject)
if encrypt:
s.write(out, p7)
else:
@@ -58,7 +58,7 @@ def sendsmime(from_addr, to_addrs, subject, msg, from_key, from_cert=None, to_ce
msg = """
-S/MIME - Secure Multipurpose Internet Mail Extensions [RFC 2311, RFC 2312] -
+S/MIME - Secure Multipurpose Internet Mail Extensions [RFC 2311, RFC 2312] -
provides a consistent way to send and receive secure MIME data. Based on the
popular Internet MIME standard, S/MIME provides the following cryptographic
security services for electronic messaging applications - authentication,
@@ -72,8 +72,8 @@ S/MIME is implemented in Netscape Messenger and Microsoft Outlook.
if __name__ == '__main__':
- Rand.load_file('../randpool.dat', -1)
- sendsmime(from_addr = 'ngps@post1.com',
+ Rand.load_file('../randpool.dat', -1)
+ sendsmime(from_addr = 'ngps@post1.com',
to_addrs = ['jerry','ngps@post1.com'],
subject = 'S/MIME testing',
msg = msg,
diff --git a/demo/smime/test.py b/demo/smime/test.py
index e5f56f1..ed6835d 100644
--- a/demo/smime/test.py
+++ b/demo/smime/test.py
@@ -9,7 +9,7 @@ Copyright (c) 2000 Ng Pheng Siong. All rights reserved."""
from M2Crypto import BIO, Rand, SMIME, X509
ptxt = """
-S/MIME - Secure Multipurpose Internet Mail Extensions [RFC 2311, RFC 2312] -
+S/MIME - Secure Multipurpose Internet Mail Extensions [RFC 2311, RFC 2312] -
provides a consistent way to send and receive secure MIME data. Based on the
popular Internet MIME standard, S/MIME provides the following cryptographic
security services for electronic messaging applications - authentication,
@@ -65,7 +65,7 @@ def verify_clear():
print('ok')
else:
print('not ok')
-
+
def verify_opaque():
print('test load & verify opaque...', end=' ')
s = SMIME.SMIME()
@@ -82,7 +82,7 @@ def verify_opaque():
print('ok')
else:
print('not ok')
-
+
def verify_netscape():
print('test load & verify netscape messager output...', end=' ')
s = SMIME.SMIME()
@@ -97,7 +97,7 @@ def verify_netscape():
v = s.verify(p7, data)
print('\n', v, '\n...ok')
-
+
def sv():
print('test sign/verify...', end=' ')
buf = makebuf()
@@ -113,7 +113,7 @@ def sv():
buf = makebuf() # Recreate buf, because sign() has consumed it.
bio = BIO.MemoryBuffer()
s.write(bio, p7, buf)
-
+
# Plumbing for verification: CA's cert.
st = X509.X509_Store()
st.load_info('ca.pem')
@@ -128,7 +128,7 @@ def sv():
# Verify.
p7, buf = SMIME.smime_load_pkcs7_bio(bio)
v = s.verify(p7, buf, flags=SMIME.PKCS7_DETACHED)
-
+
if v:
print('ok')
else:
@@ -146,17 +146,17 @@ def ed():
s.set_x509_stack(sk)
# Add a cipher.
- s.set_cipher(SMIME.Cipher('bf_cbc'))
+ s.set_cipher(SMIME.Cipher('bf_cbc'))
# Encrypt.
p7 = s.encrypt(buf)
-
+
# Load target's private key.
s.load_key('client.pem')
# Decrypt.
data = s.decrypt(p7)
-
+
if data:
print('ok')
else:
@@ -167,7 +167,7 @@ def zope_test():
print('test zophistry...')
f = open('client.pem')
cert_str = f.read()
- key_bio = BIO.MemoryBuffer(cert_str)
+ key_bio = BIO.MemoryBuffer(cert_str)
cert_bio = BIO.MemoryBuffer(cert_str) # XXX Kludge.
s = SMIME.SMIME()
s.load_key_bio(key_bio, cert_bio)
@@ -182,7 +182,7 @@ def leak_test():
if __name__ == '__main__':
- Rand.load_file('../randpool.dat', -1)
+ Rand.load_file('../randpool.dat', -1)
ed()
sign()
verify_opaque()
diff --git a/demo/smime/unsmime.py b/demo/smime/unsmime.py
index 7a8ae34..97ac971 100644
--- a/demo/smime/unsmime.py
+++ b/demo/smime/unsmime.py
@@ -46,7 +46,7 @@ def decrypt_verify(p7file, recip_key, signer_cert, ca_cert):
if __name__ == '__main__':
- Rand.load_file('../randpool.dat', -1)
+ Rand.load_file('../randpool.dat', -1)
decrypt_verify(BIO.File(sys.stdin), 'client.pem', 'client2.pem','ca.pem')
Rand.save_file('../randpool.dat')
diff --git a/demo/ssl/c.py b/demo/ssl/c.py
index bd03e6e..76f20e9 100644
--- a/demo/ssl/c.py
+++ b/demo/ssl/c.py
@@ -16,11 +16,11 @@ req_11 = 'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n'
def c_10():
- c_style(HOST, PORT, req_10)
+ c_style(HOST, PORT, req_10)
def c_11():
- c_style(HOST, PORT, req_11)
+ c_style(HOST, PORT, req_11)
def c_style(HOST, PORT, req):
diff --git a/demo/ssl/c_bio.py b/demo/ssl/c_bio.py
index 1c20e91..35a522b 100644
--- a/demo/ssl/c_bio.py
+++ b/demo/ssl/c_bio.py
@@ -16,11 +16,11 @@ req_11 = 'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n'
def c_10():
- c_style(HOST, PORT, req_10)
+ c_style(HOST, PORT, req_10)
def c_11():
- c_style(HOST, PORT, req_11)
+ c_style(HOST, PORT, req_11)
def c_style(HOST, PORT, req):
diff --git a/demo/ssl/echo-eg.py b/demo/ssl/echo-eg.py
index 9fe473a..a450d74 100644
--- a/demo/ssl/echo-eg.py
+++ b/demo/ssl/echo-eg.py
@@ -20,7 +20,7 @@ for opt in optlist:
elif '-p' in opt:
port = int(opt[1])
-Rand.load_file('../randpool.dat', -1)
+Rand.load_file('../randpool.dat', -1)
ctx = SSL.Context('sslv3')
ctx.load_cert('client.pem')
@@ -43,7 +43,7 @@ while 1:
sys.stdout.write(data)
sys.stdout.flush()
buf = sys.stdin.readline()
- if not buf:
+ if not buf:
break
s.send(buf)
diff --git a/demo/ssl/echo.py b/demo/ssl/echo.py
index a94ede2..dba85b4 100644
--- a/demo/ssl/echo.py
+++ b/demo/ssl/echo.py
@@ -20,7 +20,7 @@ for opt in optlist:
elif '-p' in opt:
port = int(opt[1])
-Rand.load_file('../randpool.dat', -1)
+Rand.load_file('../randpool.dat', -1)
ctx = SSL.Context('sslv3')
ctx.load_cert_chain('client.pem')
@@ -51,7 +51,7 @@ while 1:
sys.stdout.write(data)
sys.stdout.flush()
buf = sys.stdin.readline()
- if not buf:
+ if not buf:
break
s.send(buf)
diff --git a/demo/ssl/echod-async.py b/demo/ssl/echod-async.py
index b6aaff9..4d4a19c 100644
--- a/demo/ssl/echod-async.py
+++ b/demo/ssl/echod-async.py
@@ -27,9 +27,9 @@ class ssl_echo_channel(asyncore.dispatcher):
def writable(self):
return self._ssl_accepting or (len(self.buffer) > 0)
-
+
def handle_write(self):
- if self._ssl_accepting:
+ if self._ssl_accepting:
s = self.socket.accept_ssl()
if s:
self._ssl_accepting = 0
@@ -64,8 +64,8 @@ class ssl_echo_channel(asyncore.dispatcher):
pass
elif blob == '':
self.handle_close()
- else:
- self.buffer = self.buffer + blob
+ else:
+ self.buffer = self.buffer + blob
except SSL.SSLError as what:
if str(what) == 'unexpected eof':
self.handle_close()
@@ -86,7 +86,7 @@ class ssl_echo_server(SSL.ssl_dispatcher):
self.bind((addr, port))
self.listen(5)
self.ssl_ctx=ssl_context
-
+
def handle_accept(self):
try:
sock, addr = self.socket.accept()
@@ -103,7 +103,7 @@ class ssl_echo_server(SSL.ssl_dispatcher):
if __name__=='__main__':
- Rand.load_file('../randpool.dat', -1)
+ Rand.load_file('../randpool.dat', -1)
ctx = echod_lib.init_context('sslv23', 'server.pem', 'ca.pem', \
#SSL.verify_peer | SSL.verify_fail_if_no_peer_cert)
SSL.verify_none)
diff --git a/demo/ssl/echod-eg1.py b/demo/ssl/echod-eg1.py
index d706d54..0cba1e9 100644
--- a/demo/ssl/echod-eg1.py
+++ b/demo/ssl/echod-eg1.py
@@ -33,18 +33,18 @@ class ssl_echo_handler(SocketServer.BaseRequestHandler):
buf = self.request.read()
if not buf:
break
- self.request.write(buf)
+ self.request.write(buf)
def finish(self):
self.request.set_shutdown(SSL.SSL_SENT_SHUTDOWN|SSL.SSL_RECEIVED_SHUTDOWN)
self.request.close()
if __name__ == '__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
threading.init()
ctx = init_context('sslv23', 'dh1024.pem', 'server.pem', 'ca.pem', SSL.verify_peer)
s = SSL.SSLServer(('', 9999), ssl_echo_handler, ctx)
- s.serve_forever()
+ s.serve_forever()
threading.cleanup()
Rand.save_file('randpool.dat')
diff --git a/demo/ssl/echod-forking.py b/demo/ssl/echod-forking.py
index 43faab3..7c7878a 100644
--- a/demo/ssl/echod-forking.py
+++ b/demo/ssl/echod-forking.py
@@ -12,11 +12,11 @@ class ssl_echo_handler(echod_lib.ssl_echo_handler):
if __name__ == '__main__':
- Rand.load_file('../randpool.dat', -1)
- ctx = echod_lib.init_context('sslv23', 'server.pem', 'ca.pem',
+ Rand.load_file('../randpool.dat', -1)
+ ctx = echod_lib.init_context('sslv23', 'server.pem', 'ca.pem',
SSL.verify_peer | SSL.verify_fail_if_no_peer_cert)
ctx.set_tmp_dh('dh1024.pem')
s = SSL.ForkingSSLServer(('', 9999), ssl_echo_handler, ctx)
- s.serve_forever()
+ s.serve_forever()
Rand.save_file('../randpool.dat')
diff --git a/demo/ssl/echod-iterative.py b/demo/ssl/echod-iterative.py
index 2e68699..22150cf 100644
--- a/demo/ssl/echod-iterative.py
+++ b/demo/ssl/echod-iterative.py
@@ -12,12 +12,12 @@ class ssl_echo_handler(echod_lib.ssl_echo_handler):
if __name__=='__main__':
- Rand.load_file('../randpool.dat', -1)
+ Rand.load_file('../randpool.dat', -1)
ctx=echod_lib.init_context('sslv23', 'server.pem', 'ca.pem', \
SSL.verify_peer | SSL.verify_fail_if_no_peer_cert)
#SSL.verify_none)
ctx.set_tmp_dh('dh1024.pem')
s=SSL.SSLServer(('', 9999), ssl_echo_handler, ctx)
- s.serve_forever()
+ s.serve_forever()
Rand.save_file('../randpool.dat')
diff --git a/demo/ssl/echod-thread.py b/demo/ssl/echod-thread.py
index 74147cb..c343ae8 100644
--- a/demo/ssl/echod-thread.py
+++ b/demo/ssl/echod-thread.py
@@ -18,13 +18,13 @@ def echo_handler(sslctx, sock, addr):
sslconn.setup_ssl()
sslconn.set_accept_state()
sslconn.accept_ssl()
- sslconn.write(buffer)
+ sslconn.write(buffer)
while 1:
try:
buf = sslconn.read()
if not buf:
break
- sslconn.write(buf)
+ sslconn.write(buf)
except SSL.SSLError as what:
if str(what) == 'unexpected eof':
break
@@ -39,8 +39,8 @@ def echo_handler(sslctx, sock, addr):
if __name__=='__main__':
threading.init()
- Rand.load_file('../randpool.dat', -1)
- ctx=echod_lib.init_context('sslv23', 'server.pem', 'ca.pem',
+ Rand.load_file('../randpool.dat', -1)
+ ctx=echod_lib.init_context('sslv23', 'server.pem', 'ca.pem',
SSL.verify_peer | SSL.verify_fail_if_no_peer_cert)
ctx.set_tmp_dh('dh1024.pem')
sock = socket(AF_INET, SOCK_STREAM)
diff --git a/demo/ssl/echod-threading.py b/demo/ssl/echod-threading.py
index 81eaef3..9ba44df 100644
--- a/demo/ssl/echod-threading.py
+++ b/demo/ssl/echod-threading.py
@@ -20,12 +20,12 @@ class ssl_echo_handler(echod_lib.ssl_echo_handler):
if __name__=='__main__':
try:
threading.init()
- Rand.load_file('../randpool.dat', -1)
- ctx=echod_lib.init_context('sslv23', 'server.pem', 'ca.pem',
+ Rand.load_file('../randpool.dat', -1)
+ ctx=echod_lib.init_context('sslv23', 'server.pem', 'ca.pem',
SSL.verify_peer | SSL.verify_fail_if_no_peer_cert)
ctx.set_tmp_dh('dh1024.pem')
s=SSL.ThreadingSSLServer(('', 9999), ssl_echo_handler, ctx)
- s.serve_forever()
+ s.serve_forever()
Rand.save_file('../randpool.dat')
except:
threading.cleanup()
diff --git a/demo/ssl/echod_lib.py b/demo/ssl/echod_lib.py
index 2566fc6..0675438 100644
--- a/demo/ssl/echod_lib.py
+++ b/demo/ssl/echod_lib.py
@@ -11,7 +11,7 @@ def init_context(protocol, certfile, cafile, verify, verify_depth=10):
ctx = SSL.Context(protocol)
ctx.load_cert_chain(certfile)
ctx.load_verify_locations(cafile)
- ctx.set_client_CA_list_from_file(cafile)
+ ctx.set_client_CA_list_from_file(cafile)
ctx.set_verify(verify, verify_depth)
#ctx.set_allow_unknown_ca(1)
ctx.set_session_id_ctx('echod')
@@ -34,7 +34,7 @@ class ssl_echo_handler(SocketServer.BaseRequestHandler):
buf = self.request.read()
if not buf:
break
- self.request.write(buf)
+ self.request.write(buf)
except SSL.SSLError as what:
if str(what) == 'unexpected eof':
break
diff --git a/demo/ssl/ftp_tls.py b/demo/ssl/ftp_tls.py
index 163d87c..45f8418 100644
--- a/demo/ssl/ftp_tls.py
+++ b/demo/ssl/ftp_tls.py
@@ -3,7 +3,7 @@
"""Demo for M2Crypto.ftpslib's FTP/TLS client.
This client interoperates with M2Crypto's Medusa-based FTP/TLS
-server as well as Peter Runestig's patched-for-TLS OpenBSD FTP
+server as well as Peter Runestig's patched-for-TLS OpenBSD FTP
server.
Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved."""
@@ -15,7 +15,7 @@ def passive():
f = ftpslib.FTP_TLS(ssl_ctx=ctx)
f.connect('127.0.0.1', 39021)
f.auth_tls()
- f.set_pasv(1)
+ f.set_pasv(1)
f.login('ftp', 'ngps@')
f.prot_p()
f.retrlines('LIST')
diff --git a/demo/ssl/http_cli_20.py b/demo/ssl/http_cli_20.py
index 0ce4cef..42eb83a 100644
--- a/demo/ssl/http_cli_20.py
+++ b/demo/ssl/http_cli_20.py
@@ -11,7 +11,7 @@ def test_httplib():
resp = h.getresponse()
f = resp.fp
while 1:
- data = f.readline()
+ data = f.readline()
if not data:
break
sys.stdout.write(data)
diff --git a/demo/ssl/https_cli.py b/demo/ssl/https_cli.py
index f40969e..9350643 100644
--- a/demo/ssl/https_cli.py
+++ b/demo/ssl/https_cli.py
@@ -14,7 +14,7 @@ def test_httpslib():
ctx = SSL.Context('sslv23')
ctx.load_cert_chain('client.pem')
ctx.load_verify_locations('ca.pem', '')
- ctx.set_verify(SSL.verify_peer, 10)
+ ctx.set_verify(SSL.verify_peer, 10)
ctx.set_info_callback()
h = httpslib.HTTPSConnection('localhost', 19443, ssl_context=ctx)
h.set_debuglevel(1)
@@ -28,7 +28,7 @@ def test_httpslib():
c = 0
while 1:
# Either of following two works.
- #data = f.readline(4096)
+ #data = f.readline(4096)
data = resp.read(4096)
if not data: break
c = c + len(data)
@@ -39,7 +39,7 @@ def test_httpslib():
h.close()
if __name__=='__main__':
- Rand.load_file('../randpool.dat', -1)
+ Rand.load_file('../randpool.dat', -1)
#threading.init()
test_httpslib()
#threading.cleanup()
diff --git a/demo/ssl/https_cli_async.py b/demo/ssl/https_cli_async.py
index c079141..041de0b 100644
--- a/demo/ssl/https_cli_async.py
+++ b/demo/ssl/https_cli_async.py
@@ -2,8 +2,8 @@
from __future__ import print_function
-"""Demo for client-side ssl_dispatcher usage. Note that connect()
-is blocking. (Need fix?)
+"""Demo for client-side ssl_dispatcher usage. Note that connect()
+is blocking. (Need fix?)
This isn't really a HTTPS client; it's just a toy.
@@ -64,10 +64,10 @@ class https_client(SSL.ssl_dispatcher):
if __name__ == '__main__':
- Rand.load_file('../randpool.dat', -1)
+ Rand.load_file('../randpool.dat', -1)
ctx = SSL.Context()
- url = ('/jdk118/api/u-names.html',
- '/postgresql/xfunc-c.html',
+ url = ('/jdk118/api/u-names.html',
+ '/postgresql/xfunc-c.html',
'/python2.1/modindex.html')
for u in url:
https_client('localhost', u, ctx)
@@ -76,7 +76,7 @@ if __name__ == '__main__':
# Here's a sample output. Server is Apache+mod_ssl on localhost.
-# $ python https_cli_async.py
+# $ python https_cli_async.py
# 991501090.682: read 278 from /python2.1/modindex.html
# 991501090.684: read 278 from /postgresql/xfunc-c.html
# 991501090.742: read 4096 from /postgresql/xfunc-c.html
diff --git a/demo/ssl/https_srv.py b/demo/ssl/https_srv.py
index 6d12d80..757af3d 100644
--- a/demo/ssl/https_srv.py
+++ b/demo/ssl/https_srv.py
@@ -1,6 +1,6 @@
"""This server extends BaseHTTPServer and SimpleHTTPServer thusly:
1. One thread per connection.
-2. Generates directory listings.
+2. Generates directory listings.
In addition, it has the following properties:
1. Works over HTTPS only.
diff --git a/demo/ssl/s_client.py b/demo/ssl/s_client.py
index 10f7f33..831c682 100644
--- a/demo/ssl/s_client.py
+++ b/demo/ssl/s_client.py
@@ -43,7 +43,7 @@ def config(args):
cfg.protocol=[]
# First protocol found will be used.
- # Permutate the following tuple for preference.
+ # Permutate the following tuple for preference.
for p in ('tlsv1', 'sslv3', 'sslv2'):
if hasattr(cfg, p):
cfg.protocol.append(p)
diff --git a/demo/ssl/s_server.py b/demo/ssl/s_server.py
index c5fc0dc..66cae4f 100644
--- a/demo/ssl/s_server.py
+++ b/demo/ssl/s_server.py
@@ -43,7 +43,7 @@ def config(args):
cfg.protocol=[]
# First protocol found will be used.
- # Permutate the following tuple for preference.
+ # Permutate the following tuple for preference.
for p in ('tlsv1', 'sslv3', 'sslv2'):
if hasattr(cfg, p):
cfg.protocol.append(p)
@@ -124,8 +124,8 @@ class channel(SSL.ssl_dispatcher):
if blob is None:
pass
elif blob=='':
- self.handle_close()
- else:
+ self.handle_close()
+ else:
pass
if self.debug:
print('handle_read():', blob)
diff --git a/demo/ssl/server3.py b/demo/ssl/server3.py
index d2ed18d..0cfbd55 100644
--- a/demo/ssl/server3.py
+++ b/demo/ssl/server3.py
@@ -96,7 +96,7 @@ def server_thread(ctx, sock, addr):
else:
conn.clear()
print('SSL Connection closed')
-
+
if __name__=='__main__':
threading.init()
diff --git a/demo/ssl/sess.py b/demo/ssl/sess.py
index 588f1b3..2749056 100644
--- a/demo/ssl/sess.py
+++ b/demo/ssl/sess.py
@@ -1,8 +1,8 @@
from __future__ import print_function
-"""M2Crypto.SSL.Session client demo: This program requests a URL from
-a HTTPS server, saves the negotiated SSL session id, parses the HTML
-returned by the server, then requests each HREF in a separate thread
+"""M2Crypto.SSL.Session client demo: This program requests a URL from
+a HTTPS server, saves the negotiated SSL session id, parses the HTML
+returned by the server, then requests each HREF in a separate thread
using the saved SSL session id.
Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved."""
@@ -48,16 +48,16 @@ def handler(sslctx, host, port, href, recurs=0, sslsess=None):
if recurs:
for a in p.anchorlist:
req = 'GET %s HTTP/1.0\r\n\r\n' % a
- thr = Thread(target=handler,
+ thr = Thread(target=handler,
args=(sslctx, host, port, req, recurs-1, sslsess))
print("Thread =", thr.getName())
thr.start()
-
+
if __name__ == '__main__':
m2_threading.init()
- Rand.load_file('../randpool.dat', -1)
+ Rand.load_file('../randpool.dat', -1)
host = '127.0.0.1'
port = 9443
@@ -71,20 +71,20 @@ if __name__ == '__main__':
port = int(opt[1])
elif '-r' in opt:
req = opt[1]
-
+
ctx = SSL.Context('sslv3')
ctx.load_cert('client.pem')
ctx.load_verify_info('ca.pem')
ctx.load_client_ca('ca.pem')
ctx.set_verify(SSL.verify_none, 10)
-
+
req = 'GET %s HTTP/1.0\r\n\r\n' % req
start = Thread(target=handler, args=(ctx, host, port, req, 1))
print("Thread =", start.getName())
start.start()
start.join()
-
+
m2_threading.cleanup()
Rand.save_file('../randpool.dat')
diff --git a/demo/ssl/sess2.py b/demo/ssl/sess2.py
index 472ecb4..419071c 100644
--- a/demo/ssl/sess2.py
+++ b/demo/ssl/sess2.py
@@ -36,7 +36,7 @@ def handler(addr, sslctx, host, port, req, sslsess=None):
break
if addr != ADDR2:
- thr = Thread(target=handler,
+ thr = Thread(target=handler,
args=(ADDR2, sslctx, host, port, req, sslsess))
print("Thread =", thr.getName())
thr.start()
@@ -47,7 +47,7 @@ def handler(addr, sslctx, host, port, req, sslsess=None):
if __name__ == '__main__':
m2_threading.init()
- Rand.load_file('../randpool.dat', -1)
+ Rand.load_file('../randpool.dat', -1)
host = '127.0.0.1'
port = 443
@@ -61,19 +61,19 @@ if __name__ == '__main__':
port = int(opt[1])
elif '-r' in opt:
req = opt[1]
-
+
ctx = SSL.Context('sslv3')
ctx.load_cert('client.pem')
ctx.load_verify_info('ca.pem')
ctx.set_verify(SSL.verify_none, 10)
-
+
req = 'GET %s HTTP/1.0\r\n\r\n' % req
start = Thread(target=handler, args=(ADDR1, ctx, host, port, req))
print("Thread =", start.getName())
start.start()
start.join()
-
+
m2_threading.cleanup()
Rand.save_file('../randpool.dat')
diff --git a/demo/ssl/socklib.py b/demo/ssl/socklib.py
index e29dc82..65f7463 100644
--- a/demo/ssl/socklib.py
+++ b/demo/ssl/socklib.py
@@ -23,7 +23,7 @@ class ssl_socket(socket.socket):
def connect(self, addr, *args):
self.addr = addr
return super(ssl_socket, self).connect(addr, *args)
-
+
def close(self):
if hasattr(self, 'conn'):
self.conn.close()
diff --git a/demo/ssl/twistedsslclient.py b/demo/ssl/twistedsslclient.py
index 424ce9e..3c5e3d2 100755
--- a/demo/ssl/twistedsslclient.py
+++ b/demo/ssl/twistedsslclient.py
@@ -13,7 +13,7 @@ import twisted.protocols.basic as basic
import twisted.internet.reactor as reactor
import M2Crypto.SSL.TwistedProtocolWrapper as wrapper
import M2Crypto.SSL as SSL
-
+
class EchoClient(basic.LineReceiver):
def connectionMade(self):
self.sendLine('Hello World!')
@@ -21,7 +21,7 @@ class EchoClient(basic.LineReceiver):
def lineReceived(self, line):
print('received: "%s"' % line)
self.transport.loseConnection()
-
+
class EchoClientFactory(protocol.ClientFactory):
protocol = EchoClient
diff --git a/demo/ssl/xmlrpc_srv.py b/demo/ssl/xmlrpc_srv.py
index 7fb03be..cd36451 100644
--- a/demo/ssl/xmlrpc_srv.py
+++ b/demo/ssl/xmlrpc_srv.py
@@ -24,5 +24,5 @@ if __name__ == '__main__':
ctx = init_context('sslv23', 'server.pem', 'ca.pem', SSL.verify_none)
ctx.set_tmp_dh('dh1024.pem')
s = SSL.ThreadingSSLServer(('', 9443), xmlrpc_handler, ctx)
- s.serve_forever()
+ s.serve_forever()
diff --git a/demo/tinderbox/killableprocess.py b/demo/tinderbox/killableprocess.py
index 9c4fdb0..2212d10 100644
--- a/demo/tinderbox/killableprocess.py
+++ b/demo/tinderbox/killableprocess.py
@@ -108,7 +108,7 @@ class Popen(subprocess.Popen):
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= winprocess.STARTF_USESTDHANDLES
-
+
startupinfo.hStdInput = int(p2cread)
startupinfo.hStdOutput = int(c2pwrite)
startupinfo.hStdError = int(errwrite)
@@ -119,7 +119,7 @@ class Popen(subprocess.Popen):
args = comspec + " /c " + args
# We create a new job for this process, so that we can kill
- # the process and any sub-processes
+ # the process and any sub-processes
self._job = winprocess.CreateJobObject()
creationflags |= winprocess.CREATE_SUSPENDED
@@ -132,7 +132,7 @@ class Popen(subprocess.Popen):
creationflags,
winprocess.EnvironmentBlock(env),
cwd, startupinfo)
-
+
self._child_created = True
self._handle = hp
self._thread = ht
@@ -155,7 +155,7 @@ class Popen(subprocess.Popen):
winprocess.TerminateJobObject(self._job, 127)
else:
winprocess.TerminateProcess(self._handle, 127)
- self.returncode = 127
+ self.returncode = 127
else:
if sys.platform == 'cygwin':
cmd = "taskkill /f /pid " + str(self.pid)
@@ -189,7 +189,7 @@ class Popen(subprocess.Popen):
if timeout == -1:
subprocess.Popen.wait(self)
return self.returncode
-
+
starttime = time.time()
# Make sure there is a signal handler for SIGCHLD installed
@@ -201,7 +201,7 @@ class Popen(subprocess.Popen):
self._handle_exitstatus(sts)
signal.signal(signal.SIGCHLD, oldsignal)
return self.returncode
-
+
# time.sleep is interrupted by signals (good!)
newtimeout = timeout - time.time() + starttime
time.sleep(newtimeout)
diff --git a/demo/tinderbox/slave.py b/demo/tinderbox/slave.py
index 5c92ba1..7cdc5ca 100755
--- a/demo/tinderbox/slave.py
+++ b/demo/tinderbox/slave.py
@@ -18,7 +18,7 @@ name = identify your build slave, for example Ubuntu 8.04 32-bit
;;python = python --version
;;clean = rm -fr m2crypto
;;svn = svn co http://svn.osafoundation.org/m2crypto/trunk m2crypto
-;;patch =
+;;patch =
;;build = python setup.py clean --all build
;; OR another way to do tests without setuptools:
;;build = PYTHONPATH=build/lib-something python tests/alltests.py
@@ -68,7 +68,7 @@ def load_config(cfg='config.ini'):
# XXX copied from test_ssl
def zap_servers():
s = 's_server'
- fn = tempfile.mktemp()
+ fn = tempfile.mktemp()
cmd = 'ps | egrep %s > %s' % (s, fn)
os.system(cmd)
f = open(fn)
@@ -85,26 +85,26 @@ def zap_servers():
def build(commands, config):
status = 'success'
-
+
cwd = os.getcwd()
timeout = int(config.get('timeout') or 180)
-
+
bl.initLog('tbox.log', echo=debug_script)
-
+
starttime = int(time.time())
-
+
for command in commands:
- cmd = config.get(command)
+ cmd = config.get(command)
if not cmd:
cmd = DEFAULT_COMMANDS[command]
if not cmd:
continue
else:
cmd = cmd.split()
-
+
bl.log('*** %s, timeout=%ds' % (' '.join(cmd), timeout))
-
- exit_code = bl.runCommand(cmd, timeout=timeout)
+
+ exit_code = bl.runCommand(cmd, timeout=timeout)
if exit_code:
bl.log('*** error exit code = %d' % exit_code)
if command == 'test':
@@ -121,11 +121,11 @@ def build(commands, config):
break
if command == 'svn':
os.chdir('m2crypto')
-
+
timenow = int(time.time())
-
+
bl.closeLog()
-
+
os.chdir(cwd)
return 'tbox.log', starttime, timenow, status
@@ -145,13 +145,13 @@ tinderbox: buildname: %(buildname)s
tinderbox: errorparser: unix
tinderbox: END
-""" % {'from': config['from'], 'to': config['to'],
+""" % {'from': config['from'], 'to': config['to'],
'starttime': starttime, 'timenow': timenow,
'status': status,
'buildname': config['name']}
-
+
msg += open(logpath).read()
-
+
server = smtplib.SMTP(host=config['server'], port=int(config['port']))
if debug_script:
server.set_debuglevel(1)
@@ -163,8 +163,8 @@ tinderbox: END
if __name__ == '__main__':
- config = load_config()
-
+ config = load_config()
+
commands = ['uname', 'swig', 'cc', 'openssl', 'python', 'clean', 'svn',
'patch', 'build', 'test']
diff --git a/demo/tinderbox/winprocess.py b/demo/tinderbox/winprocess.py
index 4d867fe..d2deead 100644
--- a/demo/tinderbox/winprocess.py
+++ b/demo/tinderbox/winprocess.py
@@ -11,10 +11,10 @@
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
-#
+#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
@@ -51,7 +51,7 @@ class AutoHANDLE(HANDLE):
if self.value:
CloseHandle(self)
self.value = 0
-
+
def __del__(self):
self.Close()
@@ -74,7 +74,7 @@ class PROCESS_INFORMATION(Structure):
def __init__(self):
Structure.__init__(self)
-
+
self.cb = sizeof(self)
LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION)
@@ -127,7 +127,7 @@ class EnvironmentBlock:
for (key, value) in dict.iteritems()]
values.append("")
self._as_parameter_ = LPCWSTR("\0".join(values))
-
+
# CreateProcess()
CreateProcessProto = WINFUNCTYPE(BOOL, # Return type
diff --git a/demo/x509/ca.py b/demo/x509/ca.py
index 60c695f..bf080ee 100644
--- a/demo/x509/ca.py
+++ b/demo/x509/ca.py
@@ -24,7 +24,7 @@ def makePKey(key):
pkey = EVP.PKey()
pkey.assign_rsa(key)
return pkey
-
+
def makeRequest(pkey):
req = X509.Request()
req.set_version(2)
@@ -39,7 +39,7 @@ def makeRequest(pkey):
extstack.push(ext2)
assert(extstack[1].get_name() == 'nsComment')
-
+
req.add_extensions(extstack)
req.sign(pkey, 'sha1')
return req
@@ -82,7 +82,7 @@ def makeCert(req, caPkey):
assert(cert.get_ext('subjectAltName').get_name() == 'subjectAltName')
assert(cert.get_ext_at(0).get_name() == 'subjectAltName')
assert(cert.get_ext_at(0).get_value() == 'DNS:foobar.example.com')
-
+
return cert
def ca():
diff --git a/demo/x509/proxy_destroy.py b/demo/x509/proxy_destroy.py
index c79bd2f..345de88 100644
--- a/demo/x509/proxy_destroy.py
+++ b/demo/x509/proxy_destroy.py
@@ -1,14 +1,14 @@
#!/usr/bin/env python
############################################################################
# Matt Rodriguez, LBNL MKRodriguez@lbl.gov
-############################################################################
+############################################################################
"""
Script that destroys a proxy certificate file by overwriting its contents
before the file is removed
-"""
+"""
import proxylib
-import optparse, os
+import optparse, os
USAGEHELP = "proxy_destroy.py file1 file2 Destroys files listed"
JUNK = "LalalAlalaLalalALalalAlalaLalalALalalAlalaLalalALalalAlalaLalalA"
@@ -18,25 +18,25 @@ def scrub_file(filename):
Overwrite the file with junk, before removing it
"""
s = os.stat(filename)
- proxy_file = file(filename, "w")
- size = s.st_size
- while size > 64:
+ proxy_file = file(filename, "w")
+ size = s.st_size
+ while size > 64:
proxy_file.write(JUNK)
size -= 64
-
+
proxy_file.flush()
proxy_file.close()
- os.remove(filename)
+ os.remove(filename)
def main():
- parser = optparse.OptionParser()
+ parser = optparse.OptionParser()
parser.set_usage(USAGEHELP)
- opts, args = parser.parse_args()
+ opts, args = parser.parse_args()
if len(args) is 0:
- proxy_file = proxylib.get_proxy_filename()
- scrub_file(proxy_file)
-
+ proxy_file = proxylib.get_proxy_filename()
+ scrub_file(proxy_file)
+
for proxy_file in args:
scrub_file(proxy_file)
diff --git a/demo/x509/proxy_info.py b/demo/x509/proxy_info.py
index 90fe1c1..6aa078a 100644
--- a/demo/x509/proxy_info.py
+++ b/demo/x509/proxy_info.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
############################################################################
# Matt Rodriguez, LBNL MKRodriguez@lbl.gov
-############################################################################
+############################################################################
from __future__ import print_function
@@ -22,33 +22,33 @@ def print_info(proxy_cert):
cert = proxy_cert.getcert()
print("Subject: ", cert.get_subject().as_text())
print("Issuer: ", cert.get_issuer().as_text())
- pubkey = cert.get_pubkey()
+ pubkey = cert.get_pubkey()
size = pubkey.size()
print("Strength: ", size * 8)
after = cert.get_not_after()
- after_tuple = time.strptime(str(after),"%b %d %H:%M:%S %Y %Z")
+ after_tuple = time.strptime(str(after),"%b %d %H:%M:%S %Y %Z")
expires = calendar.timegm(after_tuple)
- now = datetime.timedelta(seconds=time.time())
- expires = datetime.timedelta(seconds=expires)
- td = expires - now
+ now = datetime.timedelta(seconds=time.time())
+ expires = datetime.timedelta(seconds=expires)
+ td = expires - now
if td.days < 0:
print("Time left: Proxy has expired.")
- else:
- hours = td.seconds / 3600
- hours += td.days * 24
+ else:
+ hours = td.seconds / 3600
+ hours += td.days * 24
minutes = (td.seconds % 3600) / 60
seconds = (td.seconds % 3600) % 60
print("Time left: %d:%d:%d" % (hours, minutes, seconds))
- fraction = round((float(td.seconds) / float(3600 * 24)), 1)
+ fraction = round((float(td.seconds) / float(3600 * 24)), 1)
print("Days left: ", str(td.days) + str(fraction)[1:])
-
-
-def main():
+
+
+def main():
parser = optparse.OptionParser()
parser.add_option("-f", "--file", dest="filename", help=FILEHELP)
(opts, args) = parser.parse_args()
- filename = opts.filename
- if filename is None:
+ filename = opts.filename
+ if filename is None:
proxyfile = proxylib.get_proxy_filename()
else:
proxyfile = filename
diff --git a/demo/x509/proxy_init.py b/demo/x509/proxy_init.py
index ac75b8e..ae47f57 100644
--- a/demo/x509/proxy_init.py
+++ b/demo/x509/proxy_init.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
############################################################################
-# Matt Rodriguez, LBNL MKRodriguez@lbl.gov
-############################################################################
+# Matt Rodriguez, LBNL MKRodriguez@lbl.gov
+############################################################################
from __future__ import print_function
@@ -9,8 +9,8 @@ from __future__ import print_function
script that generates a proxy certificate
"""
-import proxylib
-import optparse
+import proxylib
+import optparse
import sys
OUTHELP = "Location of the new proxy cert."
@@ -24,18 +24,18 @@ def main():
parser.add_option('-c', '--cert' , dest='cert', help=CERTHELP)
parser.add_option('-k', '--key', dest='key', help=KEYHELP)
parser.add_option('-v', '--valid', dest='valid', help=VALIDHELP)
- parser.add_option('-l', '--limited', action="store_true",
+ parser.add_option('-l', '--limited', action="store_true",
default=False, dest='limited', help=VALIDHELP)
(opts, args) = parser.parse_args()
- kw = {}
+ kw = {}
kw['cert'] = opts.cert
kw['key'] = opts.key
if opts.valid is None:
valid_tuple = (12, 0)
else:
- valid = opts.valid.split(':')
+ valid = opts.valid.split(':')
valid_tuple = tuple(map(int, valid))
- kw['valid'] = valid_tuple
+ kw['valid'] = valid_tuple
kw['full'] = not opts.limited
try:
proxy_factory = proxylib.ProxyFactory(kw)
@@ -48,5 +48,5 @@ def main():
proxy_cert.write(proxylib.get_proxy_filename())
else:
proxy_cert.write(opts.output)
-
+
if __name__ == "__main__": main()
diff --git a/demo/x509/proxylib.py b/demo/x509/proxylib.py
index 95741b7..b5fcd1b 100644
--- a/demo/x509/proxylib.py
+++ b/demo/x509/proxylib.py
@@ -125,7 +125,7 @@ class Proxy:
bio.write(self._issuer.as_pem())
bio.close()
os.chmod(proxypath, 0o600)
-
+
class ProxyFactory:
"""
diff --git a/demo/x509/x509auth.py b/demo/x509/x509auth.py
index 7f7909f..85d3168 100644
--- a/demo/x509/x509auth.py
+++ b/demo/x509/x509auth.py
@@ -136,7 +136,7 @@ class CertHandler:
self.CurrentObj['EvpPKey'] = M2Crypto.EVP.PKey ( md='sha1' )
self.CurrentObj['EvpPKey'].assign_rsa ( self.CurrentObj['RsaPKey'] )
self.CurrentObj['RsaPubKey'] = M2Crypto.RSA.new_pub_key( self.CurrentObj['RsaPKey'].pub () )
-
+
def CertFromPemRepr ( self, ObjName=None, PemCert=None ):
if ObjName in self.PemMap:
UsedPemCert = self.KeyEnv['X509Cert'][0] + '\n' + string.join( self.PemMap[ObjName]['X509Cert'], '\n' ) + '\n' + self.KeyEnv['X509Cert'][1] + '\n'
@@ -174,7 +174,7 @@ class CertHandler:
else:
self.CurrentObj = self.ObjMap[ ObjName ]
return ObjName
-
+
def ServerCert ( self ):
@@ -227,7 +227,7 @@ class CertHandler:
self.CurrentObj = self.ObjMap[ObjName]
if 'Subject' not in self.CurrentObj:
self.CurrentObj['Subject'] = { 'organizationalUnitName' : 'security', 'commonName' : ObjName, 'emailAddress' : 'ioclient@' + ObjName }
- # new pkey
+ # new pkey
self.CreatePKey ()
# new request
self.CreateReq ( SignEvpPKey=self.ObjMap['CA']['EvpPKey'] )
@@ -275,7 +275,7 @@ class CertHandler:
else:
return False
-
+
#--------------------------------
@@ -427,7 +427,7 @@ class CertHandler:
#--------------------------------
def ClientInit ( self, ObjName=None ):
"""
- generating AuthString
+ generating AuthString
Nonce messagedigest 'sha1', encrypted with own instance private key
Cert own instance X509 cert, PEM encoded
any linefeed charaters stripped out of the base64 code
@@ -521,7 +521,7 @@ class CertHandler:
except:
return False
- ( NonceServer, NonceBounce, ServerCert ) = re.split(':', PemBaseString )
+ ( NonceServer, NonceBounce, ServerCert ) = re.split(':', PemBaseString )
NoncePubServer = base64.decodestring( NonceServer ) # NonceServer
NoncePubBounce = base64.decodestring( NonceBounce ) # NonceBounce
PemServerCert = base64.decodestring( ServerCert ) # PemServerCert
@@ -531,7 +531,7 @@ class CertHandler:
except:
return False
- # verify X509 cert
+ # verify X509 cert
EvpPKey = self.ObjMap['CA']['EvpPKey']
if dir(EvpPKey).count('_ptr'):
Result = X509Cert.verify ( EvpPKey._ptr() )
@@ -558,7 +558,7 @@ class CertHandler:
- def ReplyInit ( self, ReplyObjName, ReplyBounce ):
+ def ReplyInit ( self, ReplyObjName, ReplyBounce ):
NonceDecrypted = self.CreateNonce ()
NoncePubInit = re.sub('\012', '', base64.encodestring( self.NonceEncryptPublic ( NonceDecrypted, RsaPubKey=self.ObjMap[ ReplyObjName ]['RsaPubKey'] )) )
NoncePubBounce = re.sub('\012', '', base64.encodestring( self.NonceEncryptPublic ( ReplyBounce, RsaPubKey=self.ObjMap[ ReplyObjName ]['RsaPubKey'] )) )
@@ -600,7 +600,7 @@ class CertHandler:
#-------------------------------------------------------------------------------------------
- # TEST
+ # TEST
#-------------------------------------------------------------------------------------------
def CreateCAForContainer ( self ):
self.CreateCert ()
@@ -650,15 +650,15 @@ class CertHandler:
#-----------------------------------------------------------------------------------------------
# MAIN
#
-# x509auth.py --ca
+# x509auth.py --ca
# will create a file "PyReprPem.txt" in the current directory
# append the contents of the file to the CertContainer in this script
#
-# x509auth.py --cert <ObjName>
+# x509auth.py --cert <ObjName>
# creates a file "PyReprPem.txt" in the current directory
# append the contents of the file to the CertContainer in this script
#
-# x509auth.py --test
+# x509auth.py --test
# running authentification tests with bounced nonce
#
#-----------------------------------------------------------------------------------------------
diff --git a/tests/alltests.py b/tests/alltests.py
index 41334e8..bf297a0 100644
--- a/tests/alltests.py
+++ b/tests/alltests.py
@@ -6,7 +6,7 @@ def suite():
from M2Crypto import m2
import os
import unittest
-
+
def my_import(name):
# See http://docs.python.org/lib/built-in-funcs.html#l2h-6
components = name.split('.')
@@ -61,13 +61,13 @@ def dump_garbage():
print('\nGarbage:')
gc.collect()
if len(gc.garbage):
-
+
print('\nLeaked objects:')
for x in gc.garbage:
s = str(x)
if len(s) > 77: s = s[:73]+'...'
print(type(x), '\n ', s)
-
+
print('There were %d leaks.' % len(gc.garbage))
else:
print('Python garbage collector did not detect any leaks.')
@@ -76,17 +76,17 @@ def dump_garbage():
def runall(report_leaks=0):
report_leaks = report_leaks
-
+
if report_leaks:
import gc
gc.enable()
gc.set_debug(gc.DEBUG_LEAK & ~gc.DEBUG_SAVEALL)
-
+
import os, unittest
from M2Crypto import Rand
-
+
try:
- Rand.load_file('tests/randpool.dat', -1)
+ Rand.load_file('tests/randpool.dat', -1)
unittest.TextTestRunner(verbosity=2).run(suite())
Rand.save_file('tests/randpool.dat')
finally:
@@ -96,7 +96,7 @@ def runall(report_leaks=0):
if report_leaks:
dump_garbage()
-
+
if __name__ == '__main__':
runall(0)
diff --git a/tests/test_asn1.py b/tests/test_asn1.py
index dac601a..7322302 100644
--- a/tests/test_asn1.py
+++ b/tests/test_asn1.py
@@ -25,7 +25,7 @@ class ASN1TestCase(unittest.TestCase):
text = 'hello there'
# In RFC2253 format:
# #040B68656C6C6F207468657265
- # h e l l o t h e r e
+ # h e l l o t h e r e
m2.asn1_string_set(asn1ptr, text)
a = ASN1.ASN1_String(asn1ptr, 1)
self.assertEqual(a.as_text(), 'hello there', a.as_text())
@@ -40,7 +40,7 @@ class ASN1TestCase(unittest.TestCase):
def test_UTCTIME(self):
asn1 = ASN1.ASN1_UTCTIME()
self.assertEqual(str(asn1), 'Bad time value')
-
+
format = '%b %d %H:%M:%S %Y GMT'
utcformat = '%y%m%d%H%M%SZ'
@@ -50,13 +50,13 @@ class ASN1TestCase(unittest.TestCase):
t1 = time.strptime(str(asn1), format)
t2 = time.strptime(s, utcformat)
self.assertEqual(t1, t2)
-
+
asn1.set_time(500)
#assert str(asn1) == 'Jan 1 00:08:20 1970 GMT'
t1 = time.strftime(format, time.strptime(str(asn1), format))
t2 = time.strftime(format, time.gmtime(500))
self.assertEqual(t1, t2)
-
+
t = long(time.time()) + time.timezone
asn1.set_time(t)
t1 = time.strftime(format, time.strptime(str(asn1), format))
@@ -81,13 +81,13 @@ class ASN1TestCase(unittest.TestCase):
t2 = str(asn1)
self.assertEqual(t1, t2)
self.assertEqual(str(udt), str(asn1.get_datetime()))
-
+
dt = dt.astimezone(ASN1.UTC)
asn1.set_datetime(dt)
t2 = str(asn1)
self.assertEqual(t1, t2)
self.assertEqual(str(udt), str(asn1.get_datetime()))
-
+
def suite():
return unittest.makeSuite(ASN1TestCase)
diff --git a/tests/test_authcookie.py b/tests/test_authcookie.py
index 53bffda..e5c00ce 100644
--- a/tests/test_authcookie.py
+++ b/tests/test_authcookie.py
@@ -59,7 +59,7 @@ class AuthCookieTestCase(unittest.TestCase):
def test_is_valid(self):
c = self.jar.makeCookie(self.exp, self.data)
self.assertTrue(self.jar.isGoodCookie(c))
-
+
def test_is_invalid_expired(self):
t = self.exp - 7200
c = self.jar.makeCookie(t, self.data)
@@ -146,7 +146,7 @@ def suite():
if __name__ == '__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
diff --git a/tests/test_bio.py b/tests/test_bio.py
index b3459e2..539e0f6 100644
--- a/tests/test_bio.py
+++ b/tests/test_bio.py
@@ -31,7 +31,7 @@ class CipherStreamTestCase(unittest.TestCase):
cf.write_close()
cf.close()
xxx = mem.read()
-
+
# Decrypt.
mem = BIO.MemoryBuffer(xxx)
cf = BIO.CipherStream(mem)
@@ -40,17 +40,17 @@ class CipherStreamTestCase(unittest.TestCase):
data2 = cf.read()
cf.close()
self.assertFalse(cf.readable())
-
+
with self.assertRaises(IOError):
cf.read()
with self.assertRaises(IOError):
cf.readline()
with self.assertRaises(IOError):
cf.readlines()
-
+
self.assertEqual(data, data2,
'%s algorithm cipher test failed' % algo)
-
+
def test_ciphers(self):
ciphers=[
'des_ede_ecb', 'des_ede_cbc', 'des_ede_cfb', 'des_ede_ofb',
@@ -58,7 +58,7 @@ class CipherStreamTestCase(unittest.TestCase):
'aes_128_ecb', 'aes_128_cbc', 'aes_128_cfb', 'aes_128_ofb',
'aes_192_ecb', 'aes_192_cbc', 'aes_192_cfb', 'aes_192_ofb',
'aes_256_ecb', 'aes_256_cbc', 'aes_256_cfb', 'aes_256_ofb']
- nonfips_ciphers=['bf_ecb', 'bf_cbc', 'bf_cfb', 'bf_ofb',
+ nonfips_ciphers=['bf_ecb', 'bf_cbc', 'bf_cfb', 'bf_ofb',
#'idea_ecb', 'idea_cbc', 'idea_cfb', 'idea_ofb',
'cast5_ecb', 'cast5_cbc', 'cast5_cfb', 'cast5_ofb',
#'rc5_ecb', 'rc5_cbc', 'rc5_cfb', 'rc5_ofb',
@@ -75,10 +75,10 @@ class CipherStreamTestCase(unittest.TestCase):
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CipherStreamTestCase))
- return suite
+ return suite
if __name__ == '__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
diff --git a/tests/test_bio_file.py b/tests/test_bio_file.py
index 9912c97..91504ec 100644
--- a/tests/test_bio_file.py
+++ b/tests/test_bio_file.py
@@ -76,7 +76,7 @@ def suite():
except ImportError:
pass
return unittest.makeSuite(FileTestCase)
-
+
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
diff --git a/tests/test_bio_iobuf.py b/tests/test_bio_iobuf.py
index 8dfe75f..e5fd919 100644
--- a/tests/test_bio_iobuf.py
+++ b/tests/test_bio_iobuf.py
@@ -40,7 +40,7 @@ class IOBufferTestCase(unittest.TestCase):
io = IOBuffer(mb)
out = io.read(chunk)
self.assertEqual(out, self.data[:chunk])
-
+
def test_read_more_than(self):
chunk = len(self.data) + 8
mb = MemoryBuffer(self.data)
@@ -88,7 +88,7 @@ class IOBufferTestCase(unittest.TestCase):
def suite():
return unittest.makeSuite(IOBufferTestCase)
-
+
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
diff --git a/tests/test_bio_membuf.py b/tests/test_bio_membuf.py
index f2d53fb..46931cf 100644
--- a/tests/test_bio_membuf.py
+++ b/tests/test_bio_membuf.py
@@ -38,7 +38,7 @@ class MemoryBufferTestCase(unittest.TestCase):
out = mb.read(chunk)
self.assertEqual(out, self.data[:chunk])
self.assertEqual(len(mb), (len(self.data)) - chunk)
-
+
def test_read_more_than(self):
chunk = len(self.data) + 8
mb = MemoryBuffer(self.data)
@@ -65,7 +65,7 @@ class MemoryBufferTestCase(unittest.TestCase):
def suite():
return unittest.makeSuite(MemoryBufferTestCase)
-
+
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
diff --git a/tests/test_bn.py b/tests/test_bn.py
index 6b4b929..a7eb4f5 100755
--- a/tests/test_bn.py
+++ b/tests/test_bn.py
@@ -22,7 +22,7 @@ class BNTestCase(unittest.TestCase):
# defaults
for x in range(loops):
r8 = BN.rand(8)
-
+
# top
for x in range(loops):
r8 = BN.rand(8, top=0)
@@ -30,7 +30,7 @@ class BNTestCase(unittest.TestCase):
for x in range(loops):
r8 = BN.rand(8, top=1)
assert r8 & 192
-
+
# bottom
for x in range(loops):
r8 = BN.rand(8, bottom=1)
@@ -46,38 +46,38 @@ class BNTestCase(unittest.TestCase):
r256 = BN.rand(256, top=0)
r512 = BN.rand(512, top=0)
assert r8 < r16 < r32 < r64 < r128 < r256 < r512 < (r512 + 1)
-
+
def test_rand_range(self):
# small range
for x in range(loops):
r = BN.rand_range(1)
self.assertEqual(r, 0)
-
+
for x in range(loops):
r = BN.rand_range(4)
assert 0 <= r < 4
-
+
# large range
r512 = BN.rand(512, top=0)
for x in range(loops):
r = BN.rand_range(r512)
assert 0 <= r < r512
-
+
def test_randfname(self):
m = re.compile('^[a-zA-Z0-9]{8}$')
for x in range(loops):
r = BN.randfname(8)
assert m.match(r)
-
+
def suite():
return unittest.makeSuite(BNTestCase)
if __name__ == '__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
diff --git a/tests/test_dh.py b/tests/test_dh.py
index 2d11dbf..1657a0c 100644
--- a/tests/test_dh.py
+++ b/tests/test_dh.py
@@ -16,10 +16,10 @@ class DHTestCase(unittest.TestCase):
params = 'tests/dhparam.pem'
def genparam_callback(self, *args):
- pass
+ pass
def genparam_callback2(self):
- pass
+ pass
def test_init_junk(self):
with self.assertRaises(TypeError):
@@ -66,7 +66,7 @@ def suite():
if __name__=='__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
diff --git a/tests/test_ec_curves.py b/tests/test_ec_curves.py
index 29022f8..b0b3f41 100644
--- a/tests/test_ec_curves.py
+++ b/tests/test_ec_curves.py
@@ -2,20 +2,20 @@
# XXX memory leaks
"""
Unit tests for M2Crypto.EC, the curves
-
+
There are several ways one could unittest elliptical curves
- but we are going to only validate that we are using the
+ but we are going to only validate that we are using the
OpenSSL curve and that it works with ECDSA. We will assume
- OpenSSL has validated the curves themselves.
-
- Also, some curves are shorter than a SHA-1 digest of 160
+ OpenSSL has validated the curves themselves.
+
+ Also, some curves are shorter than a SHA-1 digest of 160
bits. To keep the testing simple, we will take advantage
- of ECDSA's ability to sign any digest length and create a
+ of ECDSA's ability to sign any digest length and create a
digset string of only 48 bits. Remember we are testing our
ability to access the curve, not ECDSA itself.
-
+
Copyright (c) 2006 Larry Bugbee. All rights reserved.
-
+
"""
#import sha
@@ -42,7 +42,7 @@ curves = [
('secp256k1', 256),
('secp384r1', 384),
('secp521r1', 521),
-
+
('sect113r1', 113),
('sect113r2', 113),
('sect131r1', 131),
@@ -61,7 +61,7 @@ curves = [
('sect409r1', 409),
('sect571k1', 571),
('sect571r1', 571),
-
+
('X9_62_prime192v1', 192),
('X9_62_prime192v2', 192),
('X9_62_prime192v3', 192),
@@ -69,7 +69,7 @@ curves = [
('X9_62_prime239v2', 239),
('X9_62_prime239v3', 239),
('X9_62_prime256v1', 256),
-
+
('X9_62_c2pnb163v1', 163),
('X9_62_c2pnb163v2', 163),
('X9_62_c2pnb163v3', 163),
@@ -86,7 +86,7 @@ curves = [
('X9_62_c2tnb359v1', 359),
('X9_62_c2pnb368w1', 368),
('X9_62_c2tnb431r1', 431),
-
+
('wap_wsg_idm_ecid_wtls1', 113),
('wap_wsg_idm_ecid_wtls3', 163),
('wap_wsg_idm_ecid_wtls4', 113),
@@ -100,11 +100,11 @@ curves = [
('wap_wsg_idm_ecid_wtls12', 224),
]
-# The following two curves, according to OpenSSL, have a
-# "Questionable extension field!" and are not supported by
+# The following two curves, according to OpenSSL, have a
+# "Questionable extension field!" and are not supported by
# the OpenSSL inverse function. ECError: no inverse.
-# As such they cannot be used for signing. They might,
-# however, be usable for encryption but that has not
+# As such they cannot be used for signing. They might,
+# however, be usable for encryption but that has not
# been tested. Until thir usefulness can be established,
# they are not supported at this time.
#curves2 = [
@@ -114,8 +114,8 @@ curves = [
class ECCurveTests(unittest.TestCase):
#data = sha.sha('Kilroy was here!').digest() # 160 bits
- data = "digest" # keep short (48 bits) so lesser curves
- # will work... ECDSA requires curve be
+ data = "digest" # keep short (48 bits) so lesser curves
+ # will work... ECDSA requires curve be
# equal or longer than digest
def genkey(self, curveName, curveLen):
@@ -127,11 +127,11 @@ class ECCurveTests(unittest.TestCase):
'check_key() failure for "%s"' % curveName)
return ec
-# def check_ec_curves_genkey(self):
+# def check_ec_curves_genkey(self):
# for curveName, curveLen in curves2:
# self.genkey(curveName, curveLen)
#
-# self.assertRaises(AttributeError, self.genkey,
+# self.assertRaises(AttributeError, self.genkey,
# 'nosuchcurve', 1)
def sign_verify_ecdsa(self, curveName, curveLen):
@@ -158,6 +158,6 @@ def suite():
if __name__ == '__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
diff --git a/tests/test_ecdh.py b/tests/test_ecdh.py
index 5440170..073af1d 100644
--- a/tests/test_ecdh.py
+++ b/tests/test_ecdh.py
@@ -46,7 +46,7 @@ def suite():
if __name__=='__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
diff --git a/tests/test_ecdsa.py b/tests/test_ecdsa.py
index 3747a26..d75ae82 100644
--- a/tests/test_ecdsa.py
+++ b/tests/test_ecdsa.py
@@ -65,7 +65,7 @@ class ECDSATestCase(unittest.TestCase):
ec2 = EC.load_pub_key(self.pubkey)
assert ec2.verify_dsa(self.data, r, s)
assert not ec2.verify_dsa(self.data, s, r)
-
+
def test_genparam(self):
ec = EC.gen_params(EC.NID_sect233k1)
self.assertEqual(len(ec), 233)
@@ -73,10 +73,10 @@ class ECDSATestCase(unittest.TestCase):
def suite():
return unittest.makeSuite(ECDSATestCase)
-
+
if __name__ == '__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
diff --git a/tests/test_engine.py b/tests/test_engine.py
index edcd480..f92b51b 100644
--- a/tests/test_engine.py
+++ b/tests/test_engine.py
@@ -28,11 +28,11 @@ class EngineTestCase(unittest.TestCase):
e = Engine.Engine('openssl')
self.assertEqual(e.get_name(), 'Software engine support')
self.assertEqual(e.get_id(), 'openssl')
-
+
def test_by_id_dynamic(self):
Engine.load_dynamic()
Engine.Engine('dynamic')
-
+
def test_load_private(self):
Engine.load_openssl()
e = Engine.Engine('openssl')
@@ -48,7 +48,7 @@ class EngineTestCase(unittest.TestCase):
def suite():
return unittest.makeSuite(EngineTestCase)
-
+
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
diff --git a/tests/test_evp.py b/tests/test_evp.py
index 213616e..ed5eeaf 100644
--- a/tests/test_evp.py
+++ b/tests/test_evp.py
@@ -22,20 +22,20 @@ from fips import fips_mode
class EVPTestCase(unittest.TestCase):
def _gen_callback(self, *args):
pass
-
+
def _pass_callback(self, *args):
return 'foobar'
-
+
def _assign_rsa(self):
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
pkey = EVP.PKey()
pkey.assign_rsa(rsa, capture=0) # capture=1 should cause crash
return rsa
-
+
def test_assign(self):
rsa = self._assign_rsa()
rsa.check_key()
-
+
def test_pem(self):
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
pkey = EVP.PKey()
@@ -45,20 +45,20 @@ class EVPTestCase(unittest.TestCase):
with self.assertRaises(ValueError):
pkey.as_pem(cipher='noXX$$%%suchcipher',
callback=self._pass_callback)
-
+
def test_as_der(self):
"""
- Test DER encoding the PKey instance after assigning
+ Test DER encoding the PKey instance after assigning
a RSA key to it.
"""
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
pkey = EVP.PKey()
pkey.assign_rsa(rsa)
- der_blob = pkey.as_der()
+ der_blob = pkey.as_der()
#A quick but not thorough sanity check
self.assertEqual(len(der_blob), 160)
-
-
+
+
def test_MessageDigest(self):
with self.assertRaises(ValueError):
EVP.MessageDigest('sha513')
@@ -68,7 +68,7 @@ class EVPTestCase(unittest.TestCase):
def test_as_der_capture_key(self):
"""
- Test DER encoding the PKey instance after assigning
+ Test DER encoding the PKey instance after assigning
a RSA key to it. Have the PKey instance capture the RSA key.
"""
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
@@ -82,9 +82,9 @@ class EVPTestCase(unittest.TestCase):
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
pkey = EVP.PKey()
pkey.assign_rsa(rsa)
- size = pkey.size()
+ size = pkey.size()
self.assertEqual(size, 128)
-
+
def test_hmac(self):
self.assertEqual(util.octx_to_num(EVP.hmac('key', 'data')),
92800611269186718152770431077867383126636491933,
@@ -96,7 +96,7 @@ class EVPTestCase(unittest.TestCase):
self.assertEqual(util.octx_to_num(EVP.hmac('key', 'data', algo='ripemd160')),
1176807136224664126629105846386432860355826868536,
util.octx_to_num(EVP.hmac('key', 'data', algo='ripemd160')))
-
+
if m2.OPENSSL_VERSION_NUMBER >= 0x90800F:
self.assertEqual(util.octx_to_num(EVP.hmac('key', 'data', algo='sha224')),
2660082265842109788381286338540662430962855478412025487066970872635,
@@ -110,7 +110,7 @@ class EVPTestCase(unittest.TestCase):
self.assertEqual(util.octx_to_num(EVP.hmac('key', 'data', algo='sha512')),
3160730054100700080556942280820129108466291087966635156623014063982211353635774277148932854680195471287740489442390820077884317620321797003323909388868696,
util.octx_to_num(EVP.hmac('key', 'data', algo='sha512')))
-
+
with self.assertRaises(ValueError):
EVP.hmac('key', 'data', algo='sha513')
@@ -122,7 +122,7 @@ class EVPTestCase(unittest.TestCase):
rsa = RSA.gen_key(1024, 3, callback=self._gen_callback)
self.assertIsInstance(rsa, RSA.RSA)
pkey = EVP.PKey()
- pkey.assign_rsa(rsa)
+ pkey.assign_rsa(rsa)
rsa2 = pkey.get_rsa()
self.assertIsInstance(rsa2, RSA.RSA_pub)
self.assertEqual(rsa.e, rsa2.e)
@@ -132,14 +132,14 @@ class EVPTestCase(unittest.TestCase):
assert pem
assert pem2
self.assertNotEqual(pem, pem2)
-
+
message = "This is the message string"
digest = sha.sha(message).digest()
self.assertEqual(rsa.sign(digest), rsa2.sign(digest))
-
+
rsa3 = RSA.gen_key(1024, 3, callback=self._gen_callback)
self.assertNotEqual(rsa.sign(digest), rsa3.sign(digest))
-
+
def test_get_rsa_fail(self):
"""
Testing trying to retrieve the RSA key from the PKey instance
@@ -159,28 +159,28 @@ class EVPTestCase(unittest.TestCase):
mod = pkey.get_modulus()
self.assertGreater(len(mod), 0, mod)
self.assertEqual(len(mod.strip('0123456789ABCDEF')), 0)
-
+
def test_verify_final(self):
from M2Crypto import X509
pkey = EVP.load_key('tests/signer_key.pem')
pkey.sign_init()
pkey.sign_update('test message')
sig = pkey.sign_final()
-
+
# OK
x509 = X509.load_cert('tests/signer.pem')
pubkey = x509.get_pubkey()
pubkey.verify_init()
pubkey.verify_update('test message')
self.assertEqual(pubkey.verify_final(sig), 1)
-
+
# wrong cert
x509 = X509.load_cert('tests/x509.pem')
pubkey = x509.get_pubkey()
pubkey.verify_init()
pubkey.verify_update('test message')
self.assertEqual(pubkey.verify_final(sig), 0)
-
+
# wrong message
x509 = X509.load_cert('tests/signer.pem')
pubkey = x509.get_pubkey()
@@ -219,23 +219,23 @@ class CipherTestCase(unittest.TestCase):
enc = 1
dec = 0
otxt='against stupidity the gods themselves contend in vain'
-
+
k=EVP.Cipher(algo, 'goethe','12345678', enc, 1, 'sha1', 'saltsalt', 5)
pbuf=cStringIO.StringIO(otxt)
cbuf=cStringIO.StringIO()
ctxt=self.cipher_filter(k, pbuf, cbuf)
pbuf.close()
cbuf.close()
-
+
j=EVP.Cipher(algo, 'goethe','12345678', dec, 1, 'sha1', 'saltsalt', 5)
pbuf=cStringIO.StringIO()
cbuf=cStringIO.StringIO(ctxt)
ptxt=self.cipher_filter(j, cbuf, pbuf)
pbuf.close()
cbuf.close()
-
+
self.assertEqual(otxt, ptxt, '%s algorithm cipher test failed' % algo)
-
+
def test_ciphers(self):
ciphers=[
'des_ede_ecb', 'des_ede_cbc', 'des_ede_cfb', 'des_ede_ofb',
@@ -261,7 +261,7 @@ class CipherTestCase(unittest.TestCase):
self.try_algo(i)
except ValueError as e:
if str(e) != "('unknown cipher', 'idea_ecb')":
- raise
+ raise
# rc5 might not be compiled in
ciphers=['rc5_ecb', 'rc5_cbc', 'rc5_cfb', 'rc5_ofb']
@@ -270,11 +270,11 @@ class CipherTestCase(unittest.TestCase):
self.try_algo(i)
except ValueError as e:
if str(e) != "('unknown cipher', 'rc5_ecb')":
- raise
+ raise
with self.assertRaises(ValueError):
self.try_algo('nosuchalgo4567')
-
+
def test_AES(self):
enc = 1
dec = 0
@@ -287,7 +287,7 @@ class CipherTestCase(unittest.TestCase):
'PT': 'Single block msg',
'CT': 'e353779c1079aeb82708942dbe77181a',
},
-
+
#Case #2: Encrypting 32 bytes (2 blocks) using AES-CBC with 128-bit key
{
'KEY': 'c286696d887c9aa0611bbb3e2025a45a',
@@ -295,7 +295,7 @@ class CipherTestCase(unittest.TestCase):
'PT': unhexlify('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f'),
'CT': 'd296cd94c2cccf8a3a863028b5e1dc0a7586602d253cfff91b8266bea6d61ab1',
},
-
+
#Case #3: Encrypting 48 bytes (3 blocks) using AES-CBC with 128-bit key
{
'KEY': '6c3ea0477630ce21a2ce334aa746c2cd',
@@ -304,7 +304,7 @@ class CipherTestCase(unittest.TestCase):
'CT': 'd0a02b3836451753d493665d33f0e8862dea54cdb293abc7506939276772f8d5021c19216bad525c8579695d83ba2684',
},
]
-
+
# Test with padding
for test in tests:
# encrypt
@@ -366,7 +366,7 @@ class CipherTestCase(unittest.TestCase):
pbuf.close()
cbuf.close()
return plaintext
-
+
with self.assertRaises(EVP.EVPError):
decrypt(
unhexlify('941d3647a642fab26d9f99a195098b91252c652d07235b9db35758c401627711724637648e45cad0f1121751a1240a4134998cfdf3c4a95c72de2a2444de3f9e40d881d7f205630b0d8ce142fdaebd8d7fbab2aea3dc47f5f29a0e9b55aae59222671d8e2877e1fb5cd8ef1c427027e0'),
@@ -383,21 +383,21 @@ class CipherTestCase(unittest.TestCase):
class PBKDF2TestCase(unittest.TestCase):
def test_rfc3211_test_vectors(self):
from binascii import hexlify, unhexlify
-
+
password = 'password'
salt = unhexlify('12 34 56 78 78 56 34 12'.replace(' ', ''))
iter = 5
keylen = 8
ret = EVP.pbkdf2(password, salt, iter, keylen)
self.assertEqual(hexlify(ret), 'D1 DA A7 86 15 F2 87 E6'.replace(' ', '').lower())
-
+
password = 'All n-entities must communicate with other n-entities via n-1 entiteeheehees'
salt = unhexlify('12 34 56 78 78 56 34 12'.replace(' ', ''))
iter = 500
keylen = 16
ret = EVP.pbkdf2(password, salt, iter, keylen)
self.assertEqual(hexlify(ret), '6A 89 70 BF 68 C9 2C AE A8 4A 8D F2 85 10 85 86'.replace(' ', '').lower())
-
+
class HMACTestCase(unittest.TestCase):
data1=['', 'More text test vectors to stuff up EBCDIC machines :-)', \
@@ -436,7 +436,7 @@ class HMACTestCase(unittest.TestCase):
digest = hmac.final()
chain.append((digest, i))
return chain
-
+
def make_chain_hmac(self, key, start, input, algo='sha1'):
from M2Crypto.EVP import hmac
chain = []
@@ -446,7 +446,7 @@ class HMACTestCase(unittest.TestCase):
digest = hmac(digest, repr(i), algo)
chain.append((digest, i))
return chain
-
+
def verify_chain_hmac(self, key, start, chain, algo='sha1'):
from M2Crypto.EVP import hmac
digest = hmac(key, repr(start), algo)
@@ -458,7 +458,7 @@ class HMACTestCase(unittest.TestCase):
if digest != d:
return 0
return 1
-
+
def verify_chain_HMAC(self, key, start, chain, algo='sha1'):
hmac = EVP.HMAC(key, algo)
hmac.update(repr(start))
@@ -473,7 +473,7 @@ class HMACTestCase(unittest.TestCase):
if digest != d:
return 0
return 1
-
+
def test_complicated(self):
make_chain = self.make_chain_hmac
verify_chain = self.verify_chain_hmac
@@ -491,10 +491,10 @@ def suite():
suite.addTest(unittest.makeSuite(CipherTestCase))
suite.addTest(unittest.makeSuite(PBKDF2TestCase))
suite.addTest(unittest.makeSuite(HMACTestCase))
- return suite
+ return suite
if __name__ == '__main__':
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
diff --git a/tests/test_obj.py b/tests/test_obj.py
index 9e8239a..731a326 100644
--- a/tests/test_obj.py
+++ b/tests/test_obj.py
@@ -55,7 +55,7 @@ class ObjectsTestCase(unittest.TestCase):
m2.obj_obj2nid(m2.obj_txt2obj("CN", 0)), "obj2nid")
self.assertEqual(m2.obj_txt2nid("__unknown"),
0, "__unknown")
-
+
def test_tuple2tuple(self):
tup = ("CN", "someCommonName")
tup1 = x509_name_entry2tuple(tuple2x509_name_entry(tup))
@@ -67,7 +67,7 @@ class ObjectsTestCase(unittest.TestCase):
def test_unknown(self):
with self.assertRaises(ValueError):
tuple2x509_name_entry(("__unknown", "_"))
-
+
def test_x509_name(self):
n = X509.X509_Name()
n.C = 'US' # It seems this actually needs to be a real 2 letter country code
@@ -80,7 +80,7 @@ class ObjectsTestCase(unittest.TestCase):
n.serialNumber = '1234'
n.SN = 'surname'
n.GN = 'given name'
-
+
n.givenName = 'name given'
self.assertEqual(len(n), 11, len(n))
diff --git a/tests/test_pgp.py b/tests/test_pgp.py
index 3a2addf..60de55a 100644
--- a/tests/test_pgp.py
+++ b/tests/test_pgp.py
@@ -22,7 +22,7 @@ class PGPTestCase(unittest.TestCase):
s1 = EVP.MessageDigest('sha1')
s1.update(daft_pkt)
s1f = repr(s1.final())
-
+
buf = StringIO(daft_pkt)
ps = PGP.packet_stream(buf)
dift_pkt = ps.read()
diff --git a/tests/test_smime.py b/tests/test_smime.py
index f7b86a5..3879533 100644
--- a/tests/test_smime.py
+++ b/tests/test_smime.py
@@ -19,7 +19,7 @@ class SMIMETestCase(unittest.TestCase):
# XXX Ugly, but not sure what would be better
self.signed = self.test_sign()
self.encrypted = self.test_encrypt()
-
+
def test_load_bad(self):
s = SMIME.SMIME()
with self.assertRaises(EVP.EVPError):
@@ -41,7 +41,7 @@ class SMIMETestCase(unittest.TestCase):
def test_crlf(self):
self.assertEqual(SMIME.text_crlf('foobar'), 'Content-Type: text/plain\r\n\r\nfoobar')
self.assertEqual(SMIME.text_crlf_bio(BIO.MemoryBuffer('foobar')).read(), 'Content-Type: text/plain\r\n\r\nfoobar')
-
+
def test_sign(self):
buf = BIO.MemoryBuffer(self.cleartext)
s = SMIME.SMIME()
@@ -52,9 +52,9 @@ class SMIMETestCase(unittest.TestCase):
self.assertIsInstance(p7, SMIME.PKCS7, p7)
out = BIO.MemoryBuffer()
p7.write(out)
-
+
buf = out.read()
-
+
self.assertTrue(buf.startswith('-----BEGIN PKCS7-----'),
'-----BEGIN PKCS7-----')
buf = buf.strip()
@@ -63,50 +63,50 @@ class SMIMETestCase(unittest.TestCase):
self.assertGreater(len(buf),
len('-----END PKCS7-----') +
len('-----BEGIN PKCS7-----'))
-
+
s.write(out, p7, BIO.MemoryBuffer(self.cleartext))
return out
- def test_store_load_info(self):
+ def test_store_load_info(self):
st = X509.X509_Store()
with self.assertRaises(X509.X509Error):
st.load_info('tests/ca.pem-typoname')
- self.assertEqual(st.load_info('tests/ca.pem'), 1)
+ self.assertEqual(st.load_info('tests/ca.pem'), 1)
def test_verify(self):
s = SMIME.SMIME()
-
+
x509 = X509.load_cert('tests/signer.pem')
sk = X509.X509_Stack()
sk.push(x509)
s.set_x509_stack(sk)
-
+
st = X509.X509_Store()
st.load_info('tests/ca.pem')
s.set_x509_store(st)
-
+
p7, data = SMIME.smime_load_pkcs7_bio(self.signed)
-
+
self.assertIsInstance(p7, SMIME.PKCS7, p7)
v = s.verify(p7, data)
self.assertEqual(v, self.cleartext)
-
+
t = p7.get0_signers(sk)
self.assertEqual(len(t), 1)
self.assertEqual(t[0].as_pem(), x509.as_pem(), t[0].as_text())
def test_verifyBad(self):
s = SMIME.SMIME()
-
+
x509 = X509.load_cert('tests/recipient.pem')
sk = X509.X509_Stack()
sk.push(x509)
s.set_x509_stack(sk)
-
+
st = X509.X509_Store()
st.load_info('tests/recipient.pem')
s.set_x509_store(st)
-
+
p7, data = SMIME.smime_load_pkcs7_bio(self.signed)
self.assertIsInstance(p7, SMIME.PKCS7, p7)
with self.assertRaises(SMIME.PKCS7_Error):
@@ -126,36 +126,36 @@ class SMIMETestCase(unittest.TestCase):
s.set_cipher(SMIME.Cipher('des_ede3_cbc'))
p7 = s.encrypt(buf)
-
+
self.assertEqual(len(buf), 0)
self.assertEqual(p7.type(), SMIME.PKCS7_ENVELOPED,
p7.type())
self.assertIsInstance(p7, SMIME.PKCS7, p7)
out = BIO.MemoryBuffer()
p7.write(out)
-
+
buf = out.read()
-
+
self.assertTrue(buf.startswith('-----BEGIN PKCS7-----'))
buf = buf.strip()
self.assertTrue(buf.endswith('-----END PKCS7-----'))
self.assertGreater(len(buf),
len('-----END PKCS7-----') +
len('-----BEGIN PKCS7-----'))
-
+
s.write(out, p7)
return out
-
+
def test_decrypt(self):
s = SMIME.SMIME()
s.load_key('tests/recipient_key.pem', 'tests/recipient.pem')
-
+
p7, data = SMIME.smime_load_pkcs7_bio(self.encrypted)
self.assertIsInstance(p7, SMIME.PKCS7, p7)
with self.assertRaises(SMIME.SMIME_Error):
s.verify(p7) # No signer
-
+
out = s.decrypt(p7)
self.assertEqual(out, self.cleartext)
@@ -163,7 +163,7 @@ class SMIMETestCase(unittest.TestCase):
s = SMIME.SMIME()
s.load_key('tests/signer_key.pem', 'tests/signer.pem')
-
+
p7, data = SMIME.smime_load_pkcs7_bio(self.encrypted)
self.assertIsInstance(p7, SMIME.PKCS7, p7)
with self.assertRaises(SMIME.SMIME_Error):
@@ -176,45 +176,45 @@ class SMIMETestCase(unittest.TestCase):
def test_signEncryptDecryptVerify(self):
# sign
buf = BIO.MemoryBuffer(self.cleartext)
- s = SMIME.SMIME()
+ s = SMIME.SMIME()
s.load_key('tests/signer_key.pem', 'tests/signer.pem')
p7 = s.sign(buf)
-
+
# encrypt
x509 = X509.load_cert('tests/recipient.pem')
sk = X509.X509_Stack()
sk.push(x509)
s.set_x509_stack(sk)
-
+
s.set_cipher(SMIME.Cipher('des_ede3_cbc'))
-
+
tmp = BIO.MemoryBuffer()
s.write(tmp, p7)
p7 = s.encrypt(tmp)
-
+
signedEncrypted = BIO.MemoryBuffer()
s.write(signedEncrypted, p7)
# decrypt
s = SMIME.SMIME()
-
+
s.load_key('tests/recipient_key.pem', 'tests/recipient.pem')
-
+
p7, data = SMIME.smime_load_pkcs7_bio(signedEncrypted)
-
+
out = s.decrypt(p7)
-
+
# verify
x509 = X509.load_cert('tests/signer.pem')
sk = X509.X509_Stack()
sk.push(x509)
s.set_x509_stack(sk)
-
+
st = X509.X509_Store()
st.load_info('tests/ca.pem')
s.set_x509_store(st)
-
+
p7_bio = BIO.MemoryBuffer(out)
p7, data = SMIME.smime_load_pkcs7_bio(p7_bio)
v = s.verify(p7)
@@ -236,21 +236,21 @@ class WriteLoadTestCase(unittest.TestCase):
f = BIO.openfile(self.filenameSmime, 'wb')
self.assertEqual(s.write(f, p7, BIO.MemoryBuffer('some text')), 1)
f.close()
-
+
def test_write_pkcs7_der(self):
buf = BIO.MemoryBuffer()
self.assertEqual(SMIME.load_pkcs7(self.filename).write_der(buf), 1)
s = buf.read()
assert len(s) in (1188, 1204, 1243, 1263), len(s)
-
+
def test_load_pkcs7(self):
self.assertEqual(SMIME.load_pkcs7(self.filename).type(), SMIME.PKCS7_SIGNED)
-
+
def test_load_pkcs7_bio(self):
f = open(self.filename, 'rb')
buf = BIO.MemoryBuffer(f.read())
f.close()
-
+
self.assertEqual(SMIME.load_pkcs7_bio(buf).type(), SMIME.PKCS7_SIGNED)
def test_load_smime(self):
@@ -258,7 +258,7 @@ class WriteLoadTestCase(unittest.TestCase):
self.assertIsInstance(a, SMIME.PKCS7, a)
self.assertIsInstance(b, BIO.BIO, b)
self.assertEqual(a.type(), SMIME.PKCS7_SIGNED)
-
+
def test_load_smime_bio(self):
f = open(self.filenameSmime, 'rb')
buf = BIO.MemoryBuffer(f.read())
diff --git a/tests/test_ssl_offline.py b/tests/test_ssl_offline.py
index 7dcdbe9..6088fbd 100644
--- a/tests/test_ssl_offline.py
+++ b/tests/test_ssl_offline.py
@@ -26,16 +26,16 @@ class CheckerTestCase(unittest.TestCase):
assert check(x509, srv_host)
with self.assertRaises(Checker.WrongHost):
check(x509, 'example.com')
-
+
doctest.testmod(Checker)
-
+
class ContextTestCase(unittest.TestCase):
def test_ctx_load_verify_locations(self):
ctx = SSL.Context()
with self.assertRaises(ValueError):
ctx.load_verify_locations(None, None)
-
+
def test_map(self):
from M2Crypto.SSL.Context import map, _ctxmap
self.assertIsInstance(map(), _ctxmap)
@@ -58,7 +58,7 @@ def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CheckerTestCase))
suite.addTest(unittest.makeSuite(ContextTestCase))
- return suite
+ return suite
if __name__ == '__main__':
diff --git a/tests/test_ssl_win.py b/tests/test_ssl_win.py
index 86f8606..34e9835 100644
--- a/tests/test_ssl_win.py
+++ b/tests/test_ssl_win.py
@@ -1,8 +1,8 @@
#!/usr/bin/env python
-"""Unit tests for M2Crypto.SSL.
+"""Unit tests for M2Crypto.SSL.
-Win32 version - requires Mark Hammond's Win32 extensions and openssl.exe
+Win32 version - requires Mark Hammond's Win32 extensions and openssl.exe
on your PATH.
Copyright (c) 2000-2001 Ng Pheng Siong. All rights reserved."""
@@ -21,7 +21,7 @@ except ImportError:
if win32process:
from M2Crypto import Rand, SSL
import test_ssl
-
+
def find_openssl():
plist = os.environ['PATH'].split(';')
for p in plist:
@@ -32,44 +32,44 @@ if win32process:
except WindowsError:
pass
return None
-
-
+
+
srv_host = 'localhost'
srv_port = 64000
-
+
class SSLWinClientTestCase(test_ssl.SSLClientTestCase):
-
+
startupinfo = win32process.STARTUPINFO()
openssl = find_openssl()
-
+
def start_server(self, args):
# openssl must be started in the tests directory for it
# to find the .pem files
- os.chdir('tests')
+ os.chdir('tests')
try:
hproc, hthread, pid, tid = win32process.CreateProcess(self.openssl,
- string.join(args), None, None, 0, win32process.DETACHED_PROCESS,
+ string.join(args), None, None, 0, win32process.DETACHED_PROCESS,
None, None, self.startupinfo)
finally:
- os.chdir('..')
+ os.chdir('..')
time.sleep(0.3)
return hproc
-
+
def stop_server(self, hproc):
win32process.TerminateProcess(hproc, 0)
-
-
+
+
def suite():
return unittest.makeSuite(SSLWinClientTestCase)
-
+
def zap_servers():
pass
-
-
+
+
if __name__ == '__main__':
try:
if find_openssl() is not None:
- Rand.load_file('randpool.dat', -1)
+ Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
finally: