summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorwillmcgugan <willmcgugan@67cdc799-7952-0410-af00-57a81ceafa0f>2010-01-01 20:18:32 +0000
committerwillmcgugan <willmcgugan@67cdc799-7952-0410-af00-57a81ceafa0f>2010-01-01 20:18:32 +0000
commit671cf321fcc774c03d093fbae257e71b1df3d0ea (patch)
treee371fce423086808958d373d869b3fc66ef3e431 /fs
parentbb120439de4f17426e17cd4d0782295c8192a869 (diff)
downloadpyfilesystem-671cf321fcc774c03d093fbae257e71b1df3d0ea.tar.gz
Documentation, fixes, A ReadOnlyFS wrapper and a plain old FTP FS class
git-svn-id: http://pyfilesystem.googlecode.com/svn/trunk@309 67cdc799-7952-0410-af00-57a81ceafa0f
Diffstat (limited to 'fs')
-rw-r--r--fs/__init__.py20
-rw-r--r--fs/base.py54
-rw-r--r--fs/browsewin.py10
-rw-r--r--fs/errors.py12
-rw-r--r--fs/expose/django_storage.py4
-rw-r--r--fs/expose/fuse/__init__.py16
-rw-r--r--fs/expose/sftp.py16
-rw-r--r--fs/expose/xmlrpc.py6
-rw-r--r--fs/ftpfs.py1110
-rw-r--r--fs/memoryfs.py15
-rw-r--r--fs/mountfs.py25
-rw-r--r--fs/multifs.py18
-rw-r--r--fs/objecttree.py2
-rw-r--r--fs/osfs.py40
-rw-r--r--fs/path.py68
-rw-r--r--fs/remote.py15
-rw-r--r--fs/s3fs.py4
-rw-r--r--fs/sftpfs.py6
-rw-r--r--fs/tempfs.py16
-rw-r--r--fs/tests/__init__.py6
-rw-r--r--fs/tests/ftpserver.py19
-rw-r--r--fs/tests/test_fs.py15
-rw-r--r--fs/tests/test_ftpfs.py78
-rw-r--r--fs/utils.py122
-rw-r--r--fs/wrapfs.py50
-rw-r--r--fs/xattrs.py22
-rw-r--r--fs/zipfs.py20
27 files changed, 1608 insertions, 181 deletions
diff --git a/fs/__init__.py b/fs/__init__.py
index 3f1b352..aa242a7 100644
--- a/fs/__init__.py
+++ b/fs/__init__.py
@@ -26,4 +26,22 @@ from base import *
import errors
import path
-
+_thread_syncronize_default = True
+def set_thread_syncronize_default(sync):
+ """Sets the default thread synctonisation flag.
+
+ FS objects are made thread-safe through the use of a per-FS threading Lock
+ object. Since this can introduce an small overhead it can be disabled with
+ this function if the code is single-threaded.
+
+ :param sync: Set wether to use thread syncronization for new FS objects
+
+ """
+ global _thread_syncronization_default
+ _thread_syncronization_default = sync
+
+# Store some identifiers in the fs namespace
+import os
+SEEK_CUR = os.SEEK_CUR
+SEEK_END = os.SEEK_END
+SEEK_SET = os.SEEK_SET \ No newline at end of file
diff --git a/fs/base.py b/fs/base.py
index 77c2204..a0358b1 100644
--- a/fs/base.py
+++ b/fs/base.py
@@ -45,9 +45,9 @@ class DummyLock:
def silence_fserrors(f, *args, **kwargs):
"""Perform a function call and return None if FSError is thrown
- f -- Function to call
- args -- Parameters to f
- kwargs -- Keyword parameters to f
+ :param f: Function to call
+ :param args: Parameters to f
+ :param kwargs: Keyword parameters to f
"""
try:
@@ -103,16 +103,20 @@ class NullFile(object):
def writelines(self, *args, **kwargs):
pass
-
+try:
+ from functools import wraps
+except ImportError:
+ wraps = lambda f:f
+
def synchronize(func):
"""Decorator to synchronize a method on self._lock."""
+ @wraps(func)
def acquire_lock(self, *args, **kwargs):
self._lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self._lock.release()
- acquire_lock.__doc__ = func.__doc__
return acquire_lock
@@ -122,30 +126,6 @@ class FS(object):
An instance of a class derived from FS is an abstraction on some kind
of filesytem, such as the OS filesystem or a zip file.
- The following is the minimal set of methods that must be provided by
- a new FS subclass:
-
- * open -- open a file for reading/writing (like python's open() func)
- * isfile -- check whether a path exists and is a file
- * isdir -- check whether a path exists and is a directory
- * listdir -- list the contents of a directory
- * makedir -- create a new directory
- * remove -- remove an existing file
- * removedir -- remove an existing directory
- * rename -- atomically rename a file or directory
- * getinfo -- return information about the path e.g. size, mtime
-
- The following methods have a sensible default implementation, but FS
- subclasses are welcome to override them if a more efficient implementation
- can be provided:
-
- * getsyspath -- get a file's name in the local filesystem, if possible
- * exists -- check whether a path exists as file or directory
- * copy -- copy a file to a new location
- * move -- move a file to a new location
- * copydir -- recursively copy a directory to a new location
- * movedir -- recursively move a directory to a new location
-
"""
def __init__(self, thread_synchronize=False):
@@ -161,7 +141,7 @@ class FS(object):
self._lock = DummyLock()
def __del__(self):
- if not self.closed:
+ if not getattr(self, 'closed', True):
self.close()
def close(self):
@@ -308,6 +288,7 @@ class FS(object):
"""
raise UnsupportedError("list directory")
+
def _listdir_helper(self, path, entries,
wildcard=None,
@@ -411,7 +392,7 @@ class FS(object):
This is mainly for use as a debugging aid.
"""
if not self.exists(path):
- return "No description available"
+ return ''
try:
sys_path = self.getsyspath(path)
except NoSysPathError:
@@ -616,6 +597,8 @@ class FS(object):
:param overwrite: If True, then an existing file at the destination path
will be silently overwritten; if False then an exception
will be raised in this case.
+ :param chunk_size: Size of chunks to use when copying, if a simple copy
+ is required
"""
src_syspath = self.getsyspath(src, allow_none=True)
@@ -724,6 +707,9 @@ class FS(object):
src = abspath(src)
dst = abspath(dst)
+ if not overwrite and self.exists(dst):
+ raise DestinationExistsError(dst)
+
if dst:
self.makedir(dst, allow_recreate=overwrite)
@@ -786,13 +772,13 @@ class SubFS(FS):
def __str__(self):
return "<SubFS: %s in %s>" % (self.sub_dir, self.parent)
+
+ def __unicode__(self):
+ return u"<SubFS: %s in %s>" % (self.sub_dir, self.parent)
def __repr__(self):
return str(self)
- def __unicode__(self):
- return unicode(self.__str__())
-
def desc(self, path):
if self.isdir(path):
return "Sub dir of %s"%str(self.parent)
diff --git a/fs/browsewin.py b/fs/browsewin.py
index eccd6ae..3baef5d 100644
--- a/fs/browsewin.py
+++ b/fs/browsewin.py
@@ -1,5 +1,8 @@
#!/usr/bin/env python
"""
+fs.browsewin
+============
+
Creates a window which can be used to browse the contents of a filesystem.
To use, call the 'browse' method with a filesystem object. Double click a file
or directory to display its properties.
@@ -32,7 +35,7 @@ class InfoFrame(wx.Frame):
self.list_ctrl.SetColumnWidth(1, 300)
for key in keys:
- self.list_ctrl.Append((key, repr(info[key])))
+ self.list_ctrl.Append((key, repr(info.get(key))))
@@ -76,7 +79,6 @@ class BrowseFrame(wx.Frame):
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.OnItemExpanding)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnItemActivated)
-
wx.CallAfter(self.OnInit)
def OnInit(self):
@@ -97,7 +99,7 @@ class BrowseFrame(wx.Frame):
return
paths = [(self.fs.isdir(p), p) for p in self.fs.listdir(path, absolute=True)]
-
+
if not paths:
#self.tree.SetItemHasChildren(item_id, False)
#self.tree.Collapse(item_id)
@@ -163,7 +165,7 @@ def browse(fs):
"""Displays a window containing a tree control that displays an FS
object. Double-click a file/folder to display extra info.
- fs -- A filesystem object
+ :param fs: A filesystem object
"""
diff --git a/fs/errors.py b/fs/errors.py
index 6c0f0f9..e4f7f43 100644
--- a/fs/errors.py
+++ b/fs/errors.py
@@ -1,6 +1,7 @@
"""
+Defines the Exception classes thrown by PyFilesystem objects. Exceptions relating to the underling filesystem are translated in to one of the following Exceptions. Exceptions that relate to a path store that path in `self.path`.
- fs.errors: error class definitions for FS
+All Exception classes are derived from `FSError` which can be used as a catch-all exception.
"""
@@ -39,15 +40,15 @@ class FSError(Exception):
return str(self.msg % keys)
def __unicode__(self):
- keys = dict((k,v) for k,v in self.__dict__.iteritems())
- return unicode(self.msg) % keys
+ return unicode(self.msg) % self.__dict__
def __getstate__(self):
return self.__dict__.copy()
class PathError(FSError):
- """Exception for errors to do with a path string."""
+ """Exception for errors to do with a path string.
+ """
default_message = "Path is invalid: %(path)s"
def __init__(self,path="",**kwds):
@@ -73,7 +74,7 @@ class UnsupportedError(OperationFailedError):
class RemoteConnectionError(OperationFailedError):
"""Exception raised when operations encounter remote connection trouble."""
- default_message = "Unable to %(opname)s: remote connection errror"
+ default_message = "%(opname)s: remote connection errror"
class StorageSpaceError(OperationFailedError):
@@ -93,7 +94,6 @@ class OperationTimeoutError(OperationFailedError):
default_message = "Unable to %(opname)s: operation timed out"
-
class ResourceError(FSError):
"""Base exception class for error associated with a specific resource."""
default_message = "Unspecified resource error: %(path)s"
diff --git a/fs/expose/django_storage.py b/fs/expose/django_storage.py
index 6d5536f..86709b0 100644
--- a/fs/expose/django_storage.py
+++ b/fs/expose/django_storage.py
@@ -1,6 +1,8 @@
"""
+fs.expose.django
+================
- fs.expose.django: use an FS object for Django File Storage
+Use an FS object for Django File Storage
"""
diff --git a/fs/expose/fuse/__init__.py b/fs/expose/fuse/__init__.py
index 13b12c2..609b66e 100644
--- a/fs/expose/fuse/__init__.py
+++ b/fs/expose/fuse/__init__.py
@@ -1,14 +1,16 @@
"""
+fs.expose.fuse
+==============
- fs.expose.fuse: expose an FS object to the native filesystem via FUSE
+Expose an FS object to the native filesystem via FUSE
This module provides the necessary interfaces to mount an FS object into
-the local filesystem via FUSE:
+the local filesystem via FUSE::
http://fuse.sourceforge.net/
For simple usage, the function 'mount' takes an FS object and a local path,
-and exposes the given FS at that path:
+and exposes the given FS at that path::
>>> from fs.memoryfs import MemoryFS
>>> from fs.expose import fuse
@@ -20,7 +22,7 @@ and exposes the given FS at that path:
The above spawns a new background process to manage the FUSE event loop, which
can be controlled through the returned subprocess.Popen object. To avoid
-spawning a new process, set the 'foreground' option:
+spawning a new process, set the 'foreground' option::
>>> # This will block until the filesystem is unmounted
>>> fuse.mount(fs,"/mnt/my-memory-fs",foreground=True)
@@ -30,7 +32,7 @@ to the 'mount' function.
If you require finer control over the creation of the FUSE process, you can
instantiate the MountProcess class directly. It accepts all options available
-to subprocess.Popen:
+to subprocess.Popen::
>>> from subprocess import PIPE
>>> mp = fuse.MountProcess(fs,"/mnt/my-memory-fs",stderr=PIPE)
@@ -419,8 +421,8 @@ def mount(fs,path,foreground=False,ready_callback=None,unmount_callback=None,**k
keyword arguments will be passed through as options to the underlying
FUSE class. Some interesting options include:
- * nothreads: switch off threading in the FUSE event loop
- * fsname: name to display in the mount info table
+ * nothreads Switch off threading in the FUSE event loop
+ * fsname Name to display in the mount info table
"""
if foreground:
diff --git a/fs/expose/sftp.py b/fs/expose/sftp.py
index 37adf51..0742aeb 100644
--- a/fs/expose/sftp.py
+++ b/fs/expose/sftp.py
@@ -1,12 +1,14 @@
"""
+fs.expose.sftp
+==============
- fs.expose.sftp: expose an FS object over SFTP (via paramiko).
+Expose an FS object over SFTP (via paramiko).
This module provides the necessary interfaces to expose an FS object over
SFTP, plugging into the infratructure provided by the 'paramiko' module.
For simple usage, the class 'BaseSFTPServer' provides an all-in-one server
-class based on the standard SocketServer module. Use it like so:
+class based on the standard SocketServer module. Use it like so::
server = BaseSFTPServer((hostname,port),fs)
server.serve_forever()
@@ -227,7 +229,7 @@ class BaseSFTPServer(sockserv.TCPServer,paramiko.ServerInterface):
needed to expose an FS via SFTP.
Operation is in the standard SocketServer style. The target FS object
- can be passed into the constructor, or set as an attribute on the server:
+ can be passed into the constructor, or set as an attribute on the server::
server = BaseSFTPServer((hostname,port),fs)
server.serve_forever()
@@ -240,10 +242,10 @@ class BaseSFTPServer(sockserv.TCPServer,paramiko.ServerInterface):
FS. This is intentional, since we can't guess what your authentication
needs are. To protect the exposed FS, override the following methods:
- get_allowed_auths: determine the allowed auth modes
- check_auth_none: check auth with no credentials
- check_auth_password: check auth with a password
- check_auth_publickey: check auth with a public key
+ * get_allowed_auths Determine the allowed auth modes
+ * check_auth_none Check auth with no credentials
+ * check_auth_password Check auth with a password
+ * check_auth_publickey Check auth with a public key
"""
diff --git a/fs/expose/xmlrpc.py b/fs/expose/xmlrpc.py
index 0a96d33..9d0df94 100644
--- a/fs/expose/xmlrpc.py
+++ b/fs/expose/xmlrpc.py
@@ -1,6 +1,8 @@
"""
+fs.expose.xmlrpc
+================
- fs.expose.xmlrpc: server to expose an FS via XML-RPC
+Server to expose an FS via XML-RPC
This module provides the necessary infrastructure to expose an FS object
over XML-RPC. The main class is 'RPCFSServer', a SimpleXMLRPCServer subclass
@@ -136,7 +138,7 @@ class RPCFSServer(SimpleXMLRPCServer):
This class takes as its first argument an FS instance, and as its second
argument a (hostname,port) tuple on which to listen for XML-RPC requests.
- Example:
+ Example::
fs = OSFS('/var/srv/myfiles')
s = RPCFSServer(fs,("",8080))
diff --git a/fs/ftpfs.py b/fs/ftpfs.py
new file mode 100644
index 0000000..21de57d
--- /dev/null
+++ b/fs/ftpfs.py
@@ -0,0 +1,1110 @@
+"""
+fs.ftpfs
+========
+
+Filesystem for accessing an FTP server (uses ftplib in standard library)
+
+"""
+
+import fs
+from fs.base import *
+from fs.path import pathsplit
+
+from ftplib import FTP, _GLOBAL_DEFAULT_TIMEOUT, error_perm, error_temp, error_proto, error_reply
+import threading
+from time import sleep
+import datetime
+import re
+from socket import error as socket_error
+from functools import wraps
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+import time
+import sys
+
+# -----------------------------------------------
+# Taken from http://www.clapper.org/software/python/grizzled/
+# -----------------------------------------------
+
+class Enum(object):
+ def __init__(self, *names):
+ self._names_map = dict((name, i) for i, name in enumerate(names))
+
+ def __getattr__(self, name):
+ return self._names_map[name]
+
+MONTHS = ('jan', 'feb', 'mar', 'apr', 'may', 'jun',
+ 'jul', 'aug', 'sep', 'oct', 'nov', 'dec')
+
+MTIME_TYPE = Enum('UNKNOWN', 'LOCAL', 'REMOTE_MINUTE', 'REMOTE_DAY')
+"""
+``MTIME_TYPE`` identifies how a modification time ought to be interpreted
+(assuming the caller cares).
+
+ - ``LOCAL``: Time is local to the client, granular to (at least) the minute
+ - ``REMOTE_MINUTE``: Time is local to the server and granular to the minute
+ - ``REMOTE_DAY``: Time is local to the server and granular to the day.
+ - ``UNKNOWN``: Time's locale is unknown.
+"""
+
+ID_TYPE = Enum('UNKNOWN', 'FULL')
+"""
+``ID_TYPE`` identifies how a file's identifier should be interpreted.
+
+ - ``FULL``: The ID is known to be complete.
+ - ``UNKNOWN``: The ID is not set or its type is unknown.
+"""
+
+# ---------------------------------------------------------------------------
+# Globals
+# ---------------------------------------------------------------------------
+
+now = time.time()
+current_year = time.localtime().tm_year
+
+# ---------------------------------------------------------------------------
+# Classes
+# ---------------------------------------------------------------------------
+
+class FTPListData(object):
+ """
+ The `FTPListDataParser` class's ``parse_line()`` method returns an
+ instance of this class, capturing the parsed data.
+
+ :IVariables:
+ name : str
+ The name of the file, if parsable
+ try_cwd : bool
+ ``True`` if the entry might be a directory (i.e., the caller
+ might want to try an FTP ``CWD`` command), ``False`` if it
+ cannot possibly be a directory.
+ try_retr : bool
+ ``True`` if the entry might be a retrievable file (i.e., the caller
+ might want to try an FTP ``RETR`` command), ``False`` if it
+ cannot possibly be a file.
+ size : long
+ The file's size, in bytes
+ mtime : long
+ The file's modification time, as a value that can be passed to
+ ``time.localtime()``.
+ mtime_type : `MTIME_TYPE`
+ How to interpret the modification time. See `MTIME_TYPE`.
+ id : str
+ A unique identifier for the file. The unique identifier is unique
+ on the *server*. On a Unix system, this identifier might be the
+ device number and the file's inode; on other system's, it might
+ be something else. It's also possible for this field to be ``None``.
+ id_type : `ID_TYPE`
+ How to interpret the identifier. See `ID_TYPE`.
+ """
+
+ def __init__(self, raw_line):
+ self.raw_line = raw_line
+ self.name = None
+ self.try_cwd = False
+ self.try_retr = False
+ self.size = 0
+ self.mtime_type = MTIME_TYPE.UNKNOWN
+ self.mtime = 0
+ self.id_type = ID_TYPE.UNKNOWN
+ self.id = None
+
+class FTPListDataParser(object):
+ """
+ An ``FTPListDataParser`` object can be used to parse one or more lines
+ that were retrieved by an FTP ``LIST`` command that was sent to a remote
+ server.
+ """
+ def __init__(self):
+ pass
+
+ def parse_line(self, ftp_list_line):
+ """
+ Parse a line from an FTP ``LIST`` command.
+
+ :Parameters:
+ ftp_list_line : str
+ The line of output
+
+ :rtype: `FTPListData`
+ :return: An `FTPListData` object describing the parsed line, or
+ ``None`` if the line could not be parsed. Note that it's
+ possible for this method to return a partially-filled
+ `FTPListData` object (e.g., one without a name).
+ """
+ buf = ftp_list_line
+
+ if len(buf) < 2: # an empty name in EPLF, with no info, could be 2 chars
+ return None
+
+ c = buf[0]
+ if c == '+':
+ return self._parse_EPLF(buf)
+
+ elif c in 'bcdlps-':
+ return self._parse_unix_style(buf)
+
+ i = buf.find(';')
+ if i > 0:
+ return self._parse_multinet(buf, i)
+
+ if c in '0123456789':
+ return self._parse_msdos(buf)
+
+ return None
+
+ # UNIX ls does not show the year for dates in the last six months.
+ # So we have to guess the year.
+ #
+ # Apparently NetWare uses ``twelve months'' instead of ``six months''; ugh.
+ # Some versions of ls also fail to show the year for future dates.
+
+ def _guess_time(self, month, mday, hour=0, minute=0):
+ year = None
+ t = None
+
+ for year in range(current_year - 1, current_year + 100):
+ t = self._get_mtime(year, month, mday, hour, minute)
+ if (now - t) < (350 * 86400):
+ return t
+
+ return 0
+
+ def _get_mtime(self, year, month, mday, hour=0, minute=0, second=0):
+ return time.mktime((year, month, mday, hour, minute, second, 0, 0, -1))
+
+ def _get_month(self, buf):
+ if len(buf) == 3:
+ for i in range(0, 12):
+ if buf.lower().startswith(MONTHS[i]):
+ return i+1
+ return -1
+
+ def _parse_EPLF(self, buf):
+ result = FTPListData(buf)
+
+ # see http://cr.yp.to/ftp/list/eplf.html
+ #"+i8388621.29609,m824255902,/,\tdev"
+ #"+i8388621.44468,m839956783,r,s10376,\tRFCEPLF"
+ i = 1
+ for j in range(1, len(buf)):
+ if buf[j] == '\t':
+ result.name = buf[j+1:]
+ break
+
+ if buf[j] == ',':
+ c = buf[i]
+ if c == '/':
+ result.try_cwd = True
+ elif c == 'r':
+ result.try_retr = True
+ elif c == 's':
+ result.size = long(buf[i+1:j])
+ elif c == 'm':
+ result.mtime_type = MTIME_TYPE.LOCAL
+ result.mtime = long(buf[i+1:j])
+ elif c == 'i':
+ result.id_type = ID_TYPE.FULL
+ result.id = buf[i+1:j-i-1]
+
+ i = j + 1
+
+ return result
+
+ def _parse_unix_style(self, buf):
+ # UNIX-style listing, without inum and without blocks:
+ # "-rw-r--r-- 1 root other 531 Jan 29 03:26 README"
+ # "dr-xr-xr-x 2 root other 512 Apr 8 1994 etc"
+ # "dr-xr-xr-x 2 root 512 Apr 8 1994 etc"
+ # "lrwxrwxrwx 1 root other 7 Jan 25 00:17 bin -> usr/bin"
+ #
+ # Also produced by Microsoft's FTP servers for Windows:
+ # "---------- 1 owner group 1803128 Jul 10 10:18 ls-lR.Z"
+ # "d--------- 1 owner group 0 May 9 19:45 Softlib"
+ #
+ # Also WFTPD for MSDOS:
+ # "-rwxrwxrwx 1 noone nogroup 322 Aug 19 1996 message.ftp"
+ #
+ # Also NetWare:
+ # "d [R----F--] supervisor 512 Jan 16 18:53 login"
+ # "- [R----F--] rhesus 214059 Oct 20 15:27 cx.exe"
+ #
+ # Also NetPresenz for the Mac:
+ # "-------r-- 326 1391972 1392298 Nov 22 1995 MegaPhone.sit"
+ # "drwxrwxr-x folder 2 May 10 1996 network"
+
+ result = FTPListData(buf)
+
+ buflen = len(buf)
+ c = buf[0]
+ if c == 'd':
+ result.try_cwd = True
+ if c == '-':
+ result.try_retr = True
+ if c == 'l':
+ result.try_retr = True
+ result.try_cwd = True
+
+ state = 1
+ i = 0
+ tokens = buf.split()
+ for j in range(1, buflen):
+ if (buf[j] == ' ') and (buf[j - 1] != ' '):
+ if state == 1: # skipping perm
+ state = 2
+
+ elif state == 2: # skipping nlink
+ state = 3
+ if ((j - i) == 6) and (buf[i] == 'f'): # NetPresenz
+ state = 4
+
+ elif state == 3: # skipping UID/GID
+ state = 4
+
+ elif state == 4: # getting tentative size
+ try:
+ size = long(buf[i:j])
+ except ValueError:
+ pass
+ state = 5
+
+ elif state == 5: # searching for month, else getting tentative size
+ month = self._get_month(buf[i:j])
+ if month >= 0:
+ state = 6
+ else:
+ size = long(buf[i:j])
+
+ elif state == 6: # have size and month
+ mday = long(buf[i:j])
+ state = 7
+
+ elif state == 7: # have size, month, mday
+ if (j - i == 4) and (buf[i+1] == ':'):
+ hour = long(buf[i])
+ minute = long(buf[i+2:i+4])
+ result.mtime_type = MTIME_TYPE.REMOTE_MINUTE
+ result.mtime = self._guess_time(month, mday, hour, minute)
+ elif (j - i == 5) and (buf[i+2] == ':'):
+ hour = long(buf[i:i+2])
+ minute = long(buf[i+3:i+5])
+ result.mtime_type = MTIME_TYPE.REMOTE_MINUTE
+ result.mtime = self._guess_time(month, mday, hour, minute)
+ elif j - i >= 4:
+ year = long(buf[i:j])
+ result.mtimetype = MTIME_TYPE.REMOTE_DAY
+ result.mtime = self._get_mtime(year, month, mday)
+ else:
+ break
+
+ result.name = buf[j+1:]
+ state = 8
+ elif state == 8: # twiddling thumbs
+ pass
+
+ i = j + 1
+ while (i < buflen) and (buf[i] == ' '):
+ i += 1
+
+ #if state != 8:
+ #return None
+
+ result.size = size
+
+ if c == 'l':
+ i = 0
+ while (i + 3) < len(result.name):
+ if result.name[i:i+4] == ' -> ':
+ result.name = result.name[:i]
+ break
+ i += 1
+
+ # eliminate extra NetWare spaces
+ if (buf[1] == ' ') or (buf[1] == '['):
+ namelen = len(result.name)
+ if namelen > 3:
+ result.name = result.name.strip()
+
+ return result
+
+ def _parse_multinet(self, buf, i):
+
+ # MultiNet (some spaces removed from examples)
+ # "00README.TXT;1 2 30-DEC-1996 17:44 [SYSTEM] (RWED,RWED,RE,RE)"
+ # "CORE.DIR;1 1 8-SEP-1996 16:09 [SYSTEM] (RWE,RWE,RE,RE)"
+ # and non-MultiNet VMS:
+ #"CII-MANUAL.TEX;1 213/216 29-JAN-1996 03:33:12 [ANONYMOU,ANONYMOUS] (RWED,RWED,,)"
+
+ result = FTPListData(buf)
+ result.name = buf[:i]
+ buflen = len(buf)
+
+ if i > 4:
+ if buf[i-4:i] == '.DIR':
+ result.name = result.name[0:-4]
+ result.try_cwd = True
+
+ if not result.try_cwd:
+ result.try_retr = True
+
+ try:
+ i = buf.index(' ', i)
+ i = _skip(buf, i, ' ')
+ i = buf.index(' ', i)
+ i = _skip(buf, i, ' ')
+
+ j = i
+
+ j = buf.index('-', j)
+ mday = long(buf[i:j])
+
+ j = _skip(buf, j, '-')
+ i = j
+ j = buf.index('-', j)
+ month = self._get_month(buf[i:j])
+ if month < 0:
+ raise IndexError
+
+ j = _skip(buf, j, '-')
+ i = j
+ j = buf.index(' ', j)
+ year = long(buf[i:j])
+
+ j = _skip(buf, j, ' ')
+ i = j
+
+ j = buf.index(':', j)
+ hour = long(buf[i:j])
+ j = _skip(buf, j, ':')
+ i = j
+
+ while (buf[j] != ':') and (buf[j] != ' '):
+ j += 1
+ if j == buflen:
+ raise IndexError # abort, abort!
+
+ minute = long(buf[i:j])
+
+ result.mtimetype = MTIME_TYPE.REMOTE_MINUTE
+ result.mtime = self._get_mtime(year, month, mday, hour, minute)
+
+ except IndexError:
+ pass
+
+ return result
+
+ def _parse_msdos(self, buf):
+ # MSDOS format
+ # 04-27-00 09:09PM <DIR> licensed
+ # 07-18-00 10:16AM <DIR> pub
+ # 04-14-00 03:47PM 589 readme.htm
+
+ buflen = len(buf)
+ i = 0
+ j = 0
+
+ try:
+ result = FTPListData(buf)
+
+ j = buf.index('-', j)
+ month = long(buf[i:j])
+
+ j = _skip(buf, j, '-')
+ i = j
+ j = buf.index('-', j)
+ mday = long(buf[i:j])
+
+ j = _skip(buf, j, '-')
+ i = j
+ j = buf.index(' ', j)
+ year = long(buf[i:j])
+ if year < 50:
+ year += 2000
+ if year < 1000:
+ year += 1900
+
+ j = _skip(buf, j, ' ')
+ i = j
+ j = buf.index(':', j)
+ hour = long(buf[i:j])
+ j = _skip(buf, j, ':')
+ i = j
+ while not (buf[j] in 'AP'):
+ j += 1
+ if j == buflen:
+ raise IndexError
+ minute = long(buf[i:j])
+
+ if buf[j] == 'A':
+ j += 1
+ if j == buflen:
+ raise IndexError
+
+ if buf[j] == 'P':
+ hour = (hour + 12) % 24
+ j += 1
+ if j == buflen:
+ raise IndexError
+
+ if buf[j] == 'M':
+ j += 1
+ if j == buflen:
+ raise IndexError
+
+ j = _skip(buf, j, ' ')
+ if buf[j] == '<':
+ result.try_cwd = True
+ j = buf.index(' ', j)
+ else:
+ i = j
+ j = buf.index(' ', j)
+
+ result.size = long(buf[i:j])
+ result.try_retr = True
+
+ j = _skip(buf, j, ' ')
+
+ result.name = buf[j:]
+ result.mtimetype = MTIME_TYPE.REMOTE_MINUTE
+ result.mtime = self._get_mtime(year, month, mday, hour, minute)
+ except IndexError:
+ pass
+
+ return result
+
+
+# ---------------------------------------------------------------------------
+# Public Functions
+# ---------------------------------------------------------------------------
+
+def parse_ftp_list_line(ftp_list_line):
+ """
+ Convenience function that instantiates an `FTPListDataParser` object
+ and passes ``ftp_list_line`` to the object's ``parse_line()`` method,
+ returning the result.
+
+ :Parameters:
+ ftp_list_line : str
+ The line of output
+
+ :rtype: `FTPListData`
+ :return: An `FTPListData` object describing the parsed line, or
+ ``None`` if the line could not be parsed. Note that it's
+ possible for this method to return a partially-filled
+ `FTPListData` object (e.g., one without a name).
+ """
+ return FTPListDataParser().parse_line(ftp_list_line)
+
+# ---------------------------------------------------------------------------
+# Private Functions
+# ---------------------------------------------------------------------------
+
+def _skip(s, i, c):
+ while s[i] == c:
+ i += 1
+ if i == len(s):
+ raise IndexError
+ return i
+
+
+
+
+
+
+class _FTPFile(object):
+
+ """ A file-like that provides access to a file being streamed over ftp."""
+
+ def __init__(self, ftpfs, ftp, path, mode):
+ if not hasattr(self, '_lock'):
+ self._lock = threading.RLock()
+ self.ftpfs = ftpfs
+ self.ftp = ftp
+ self.path = path
+ self.mode = mode
+ self.read_pos = 0
+ self.write_pos = 0
+ self.closed = False
+ if 'r' in mode or 'a' in mode:
+ self.file_size = ftpfs.getsize(path)
+ self.conn = None
+
+ path = _encode(path)
+ #self._lock = ftpfs._lock
+
+ if 'r' in mode:
+ self.ftp.voidcmd('TYPE I')
+ self.conn = ftp.transfercmd('RETR '+path, None)
+
+ #self._ftp_thread = threading.Thread(target=do_read)
+ #self._ftp_thread.start()
+ elif 'w' in mode or 'a' in mode:
+ self.ftp.voidcmd('TYPE I')
+ if 'a' in mode:
+ self.write_pos = self.file_size
+ self.conn = self.ftp.transfercmd('APPE '+path)
+ else:
+ self.conn = self.ftp.transfercmd('STOR '+path)
+ #while 1:
+ # buf = fp.read(blocksize)
+ # if not buf: break
+ # conn.sendall(buf)
+ # if callback: callback(buf)
+ #conn.close()
+ #return self.voidresp()
+
+ #self._ftp_thread = threading.Thread(target=do_write)
+ #self._ftp_thread.start()
+
+ @synchronize
+ def read(self, size=None):
+ if self.conn is None:
+ return ''
+
+ chunks = []
+ if size is None:
+ while 1:
+ data = self.conn.recv(4096)
+ if not data:
+ self.conn.close()
+ self.conn = None
+ self.ftp.voidresp()
+ break
+ chunks.append(data)
+ self.read_pos += len(data)
+ return ''.join(chunks)
+
+ remaining_bytes = size
+ while remaining_bytes:
+ read_size = min(remaining_bytes, 4096)
+ data = self.conn.recv(read_size)
+ if not data:
+ self.conn.close()
+ self.conn = None
+ self.ftp.voidresp()
+ break
+ chunks.append(data)
+ self.read_pos += len(data)
+ remaining_bytes -= len(data)
+
+ return ''.join(chunks)
+
+ @synchronize
+ def write(self, data):
+
+ data_pos = 0
+ remaining_data = len(data)
+
+ while remaining_data:
+ chunk_size = min(remaining_data, 4096)
+ self.conn.sendall(data[data_pos:data_pos+chunk_size])
+ data_pos += chunk_size
+ remaining_data -= chunk_size
+ self.write_pos += chunk_size
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self,exc_type,exc_value,traceback):
+ self.close()
+
+ @synchronize
+ def flush(self):
+ return
+
+ def seek(self, pos, where=fs.SEEK_SET):
+ # Ftp doesn't support a real seek, so we close the transfer and resume
+ # it at the new position with the REST command
+ # I'm not sure how reliable this method is!
+ if not self.file_size:
+ raise ValueError("Seek only works with files open for read")
+
+ self._lock.acquire()
+ try:
+
+ current = self.tell()
+ new_pos = None
+ if where == fs.SEEK_SET:
+ new_pos = pos
+ elif where == fs.SEEK_CUR:
+ new_pos = current + pos
+ elif where == fs.SEEK_END:
+ new_pos = self.file_size + pos
+ if new_pos < 0:
+ raise ValueError("Can't seek before start of file")
+
+ if self.conn is not None:
+ self.conn.close()
+
+ finally:
+ self._lock.release()
+
+ self.close()
+ self._lock.acquire()
+ try:
+ self.ftp = self.ftpfs._open_ftp()
+ self.ftp.sendcmd('TYPE I')
+ self.ftp.sendcmd('REST %i' % (new_pos))
+ self.__init__(self.ftpfs, self.ftp, _encode(self.path), self.mode)
+ self.read_pos = new_pos
+ finally:
+ self._lock.release()
+
+ #raise UnsupportedError('ftp seek')
+
+ @synchronize
+ def tell(self):
+ if 'r' in self.mode:
+ return self.read_pos
+ else:
+ return self.write_pos
+
+ @synchronize
+ def close(self):
+ if self.conn is not None:
+ self.conn.close()
+ self.conn = None
+ self.ftp.voidresp()
+ if self.ftp is not None:
+ self.ftp.close()
+ self.closed = True
+
+ def __iter__(self):
+ return self.next()
+
+ def next(self):
+ """ Line iterator
+
+ This isn't terribly efficient. It would probably be better to do
+ a read followed by splitlines.
+ """
+ endings = '\r\n'
+ chars = []
+ while True:
+ char = self.read(1)
+ if not char:
+ yield ''.join(chars)
+ del chars[:]
+ break
+ chars.append(char)
+ if char in endings:
+ line = ''.join(chars)
+ del chars[:]
+ c = self.read(1)
+ if not char:
+ yield line
+ break
+ if c in endings and c != char:
+ yield line + c
+ else:
+ yield line
+ chars.append(c)
+
+
+
+def ftperrors(f):
+ @wraps(f)
+ def deco(self, *args, **kwargs):
+ try:
+ ret = f(self, *args, **kwargs)
+ except Exception, e:
+ #import traceback
+ #traceback.print_exc()
+ self._translate_exception(args[0] if args else '', e)
+ return ret
+ return deco
+
+
+def _encode(s):
+ if isinstance(s, unicode):
+ return s.encode('utf-8')
+ return s
+
+
+class FTPFS(FS):
+
+ _locals = threading.local()
+
+ def __init__(self, host='', user='', passwd='', acct='', timeout=_GLOBAL_DEFAULT_TIMEOUT,
+ port=21,
+ dircache=False,
+ max_buffer_size=128*1024*1024):
+ """
+ :param host:
+ :param user:
+ :param passwd:
+ :param timeout:
+ :param dircache: If True then directory information will be cached,
+ which will speed up operations such as isdir and isfile, but changes
+ to the ftp file structure will not be visible (till clear_dircache) is
+ called
+ :param max_buffer_size: Number of bytes to hold before blocking write operations.
+
+ """
+
+ super(FTPFS, self).__init__()
+
+ self.host = host
+ self.port = port
+ self.user = user
+ self.passwd = passwd
+ self.acct = acct
+ self.timeout = timeout
+
+ self._dircache = {}
+ self.use_dircache = dircache
+ self.max_buffer_size = max_buffer_size
+
+ self._locals._ftp = None
+ self._thread_ftps = set()
+ self.ftp
+
+
+ @synchronize
+ def get_ftp(self):
+ if getattr(self._locals, '_ftp', None) is None:
+ self._locals._ftp = self._open_ftp()
+ ftp = self._locals._ftp
+ self._thread_ftps.add(ftp)
+ return self._locals._ftp
+ def set_ftp(self, ftp):
+ self._locals._ftp = ftp
+ ftp = property(get_ftp, set_ftp)
+
+ @synchronize
+ def _open_ftp(self):
+ try:
+ ftp = FTP()
+ ftp.connect(self.host, self.port, self.timeout)
+ ftp.login(self.user, self.passwd, self.acct)
+ except socket_error, e:
+ raise RemoteConnectionError(str(e), details=e)
+ return ftp
+
+ def __getstate__(self):
+ state = super(FTPFS, self).__getstate__()
+ del state["_thread_ftps"]
+ return state
+
+ def __setstate__(self,state):
+ super(FTPFS, self).__setstate__(state)
+ self._thread_ftps = set()
+ self.ftp
+
+
+ def __str__(self):
+ return '<FTPFS %s>' % self.host
+
+ def __unicode__(self):
+ return u'<FTPFS %s>' % self.host
+
+ @convert_os_errors
+ def _translate_exception(self, path, exception):
+
+ """ Translates exceptions that my be thrown by the ftp code in to
+ FS exceptions
+
+ TODO: Flesh this out with more specific exceptions
+
+ """
+
+ if isinstance(exception, socket_error):
+ raise RemoteConnectionError(str(exception), details=exception)
+
+ elif isinstance(exception, error_temp):
+ code, message = str(exception).split(' ', 1)
+ raise RemoteConnectionError(str(exception), path=path, msg="FTP error: %s (see details)" % str(exception), details=exception)
+
+ elif isinstance(exception, error_perm):
+ code, message = str(exception).split(' ', 1)
+ code = int(code)
+ if code == 550:
+ raise ResourceNotFoundError(path)
+ raise PermissionDeniedError(str(exception), path=path, msg="FTP error: %s (see details)" % str(exception), details=exception)
+
+
+ raise exception
+
+ @ftperrors
+ @synchronize
+ def close(self):
+ for ftp in self._thread_ftps:
+ ftp.close()
+ self.closed = True
+
+ @ftperrors
+ @synchronize
+ def open(self, path, mode='r'):
+ if 'r' in mode:
+ if not self.isfile(path):
+ raise ResourceNotFoundError(path)
+ ftp = self._open_ftp()
+ f = _FTPFile(self, ftp, path, mode)
+ return f
+
+ @synchronize
+ def _readdir(self, path):
+
+ if self.use_dircache:
+ cached_dirlist = self._dircache.get(path)
+ if cached_dirlist is not None:
+ return cached_dirlist
+ dirlist = {}
+
+ parser = FTPListDataParser()
+
+ def on_line(line):
+ #print repr(line)
+ if not isinstance(line, unicode):
+ line = line.decode('utf-8')
+ info = parser.parse_line(line)
+ if info:
+ info = info.__dict__
+ dirlist[info['name']] = info
+
+ try:
+ self.ftp.dir(_encode(path), on_line)
+ except error_reply:
+ pass
+ self._dircache[path] = dirlist
+
+ return dirlist
+
+ @synchronize
+ def clear_dircache(self, path=None):
+ """
+ Clear cached directory information.
+
+ :path: Path of directory to clear cache for, or all directories if
+ None (the default)
+
+ """
+ if path is None:
+ self._dircache.clear()
+ if path in self._dircache:
+ del self._dircache[path]
+
+ @synchronize
+ @ftperrors
+ def _check_path(self, path, ignore_missing=False):
+ base, fname = pathsplit(abspath(path))
+ dirlist = self._readdir(base)
+ if fname and fname not in dirlist:
+ raise ResourceNotFoundError(path)
+ return dirlist, fname
+
+ def _get_dirlist(self, path):
+ base, fname = pathsplit(abspath(path))
+ dirlist = self._readdir(base)
+ return dirlist, fname
+
+ @synchronize
+ @ftperrors
+ def exists(self, path):
+ if path in ('', '/'):
+ return True
+ dirlist, fname = self._get_dirlist(path)
+ return fname in dirlist
+
+ @synchronize
+ @ftperrors
+ def isdir(self, path):
+ if path in ('', '/'):
+ return True
+ dirlist, fname = self._get_dirlist(path)
+ info = dirlist.get(fname)
+ if info is None:
+ return False
+ return info['try_cwd']
+
+ @synchronize
+ @ftperrors
+ def isfile(self, path):
+ if path in ('', '/'):
+ return False
+ dirlist, fname = self._get_dirlist(path)
+ info = dirlist.get(fname)
+ if info is None:
+ return False
+ return not info['try_cwd']
+
+ @ftperrors
+ @synchronize
+ def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
+ path = normpath(path)
+ if not self.exists(path):
+ raise ResourceNotFoundError(path)
+ if not self.isdir(path):
+ raise ResourceInvalidError(path)
+ paths = self._readdir(path).keys()
+
+ return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only)
+
+
+ @ftperrors
+ @synchronize
+ def makedir(self, path, recursive=False, allow_recreate=False):
+ if path in ('', '/'):
+ return
+ def checkdir(path):
+ try:
+ self.ftp.mkd(_encode(path))
+ except error_reply:
+ return
+ except error_perm, e:
+ if recursive or allow_recreate:
+ return
+ if str(e).split(' ', 1)[0]=='550':
+ raise DestinationExistsError(path)
+ else:
+ raise
+ if recursive:
+ for p in recursepath(path):
+ checkdir(p)
+ else:
+ base, dirname = pathsplit(path)
+ if not self.exists(base):
+ raise ParentDirectoryMissingError(path)
+
+ if not allow_recreate:
+ if self.exists(path):
+ if self.isfile(path):
+ raise ResourceInvalidError(path)
+ raise DestinationExistsError(path)
+ checkdir(path)
+
+
+ @ftperrors
+ @synchronize
+ def remove(self, path):
+ if not self.exists(path):
+ raise ResourceNotFoundError(path)
+ if not self.isfile(path):
+ raise ResourceInvalidError(path)
+ self.ftp.delete(_encode(path))
+
+ @ftperrors
+ @synchronize
+ def removedir(self, path, recursive=False, force=False):
+ if not self.exists(path):
+ raise ResourceNotFoundError(path)
+ if self.isfile(path):
+ raise ResourceInvalidError(path)
+
+ if not force:
+ for checkpath in self.listdir(path):
+ raise DirectoryNotEmptyError(path)
+ try:
+ if force:
+ for rpath in self.listdir(path, full=True):
+ try:
+ if self.isfile(rpath):
+ self.remove(rpath)
+ elif self.isdir(rpath):
+ self.removedir(rpath, force=force)
+ except FSError:
+ pass
+ self.ftp.rmd(_encode(path))
+ except error_reply:
+ pass
+ if recursive:
+ try:
+ self.removedir(dirname(path), recursive=True)
+ except DirectoryNotEmptyError:
+ pass
+
+ @ftperrors
+ @synchronize
+ def rename(self, src, dst):
+ try:
+ self.ftp.rename(_encode(src), _encode(dst))
+ except error_reply:
+ pass
+
+ @ftperrors
+ @synchronize
+ def getinfo(self, path):
+ dirlist, fname = self._check_path(path)
+ if not fname:
+ return {}
+ info = dirlist[fname].copy()
+ info['modified_time'] = datetime.datetime.fromtimestamp(info['mtime'])
+ info['created_time'] = info['modified_time']
+ return info
+
+ @ftperrors
+ @synchronize
+ def getsize(self, path):
+ self.ftp.sendcmd('TYPE I')
+ size = self.ftp.size(_encode(path))
+ if size is None:
+ dirlist, fname = self._check_path(path)
+ size = dirlist[fname].get('size')
+ if size is None:
+ raise OperationFailedError('getsize', path)
+ return size
+
+ @ftperrors
+ @synchronize
+ def desc(self, path):
+ dirlist, fname = self._check_path(path)
+ if fname not in dirlist:
+ raise ResourceNotFoundError(path)
+ return dirlist[fname].get('raw_line', 'No description available')
+
+ @ftperrors
+ @synchronize
+ def move(self, src, dst, overwrite=False, chunk_size=16384):
+
+ if not overwrite and self.exists(dst):
+ raise DestinationExistsError(dst)
+ try:
+ self.rename(src, dst)
+ except error_reply:
+ pass
+ except:
+ self.copy(src, dst)
+ self.remove(src)
+
+
+if __name__ == "__main__":
+
+ ftp_fs = FTPFS('ftp.ncsa.uiuc.edu')
+ #from fs.browsewin import browse
+ #browse(ftp_fs)
+
+ ftp_fs = FTPFS('127.0.0.1', 'user', '12345', dircache=True)
+ #f = ftp_fs.open('testout.txt', 'w')
+ #f.write("Testing writing to an ftp file!")
+ #f.write("\nHai!")
+ #f.close()
+
+ #ftp_fs.createfile(u"\N{GREEK CAPITAL LETTER KAPPA}", 'unicode!')
+
+ #kappa = u"\N{GREEK CAPITAL LETTER KAPPA}"
+ #ftp_fs.makedir(kappa)
+
+ #print repr(ftp_fs.listdir())
+
+ #print repr(ftp_fs.listdir())
+
+ #ftp_fs.makedir('a/b/c/d', recursive=True)
+ #print ftp_fs.getsize('/testout.txt')
+
+
+ #print f.read()
+ #for p in ftp_fs:
+ # print p
+
+ #from fs.utils import print_fs
+ #print_fs(ftp_fs)
+
+ #print ftp_fs.getsize('test.txt')
+
+ from fs.browsewin import browse
+ browse(ftp_fs)
+ \ No newline at end of file
diff --git a/fs/memoryfs.py b/fs/memoryfs.py
index 22f5059..36d997e 100644
--- a/fs/memoryfs.py
+++ b/fs/memoryfs.py
@@ -1,15 +1,18 @@
#!/usr/bin/env python
"""
+fs.memoryfs
+===========
- fs.memoryfs: A filesystem that exists only in memory
+A Filesystem that exists in memory only.
-Obviously that makes this particular filesystem very fast...
+File objects returned by MemoryFS.objects use StringIO objects for storage.
"""
import datetime
from fs.path import iteratepath
from fs.base import *
+from fs import _thread_syncronize_default
try:
from cStringIO import StringIO
@@ -166,12 +169,18 @@ class DirEntry(object):
class MemoryFS(FS):
+
+ """ An in-memory filesystem.
+
+ MemoryFS objects are very fast, but non-permantent. They are useful for creating a directory structure prior to writing it somewhere permanent.
+
+ """
def _make_dir_entry(self, *args, **kwargs):
return self.dir_entry_factory(*args, **kwargs)
def __init__(self, file_factory=None):
- FS.__init__(self, thread_synchronize=True)
+ FS.__init__(self, thread_synchronize=_thread_syncronize_default)
self.dir_entry_factory = DirEntry
self.file_factory = file_factory or MemoryFile
diff --git a/fs/mountfs.py b/fs/mountfs.py
index f9e27ee..e979700 100644
--- a/fs/mountfs.py
+++ b/fs/mountfs.py
@@ -1,7 +1,14 @@
-#!/usr/bin/env python
+"""
+fs.mountfs
+==========
+
+Contains MountFS class which is a virtual Filesystem which can have other Filesystems linked as branched directories, much like a symlink in Linux
+
+"""
from fs.base import *
from fs.objecttree import ObjectTree
+from fs import _thread_syncronize_default
class DirMount(object):
@@ -27,7 +34,7 @@ class MountFS(FS):
DirMount = DirMount
FileMount = FileMount
- def __init__(self, thread_synchronize=True):
+ def __init__(self, thread_synchronize=_thread_syncronize_default):
FS.__init__(self, thread_synchronize=thread_synchronize)
self.mount_tree = ObjectTree()
@@ -223,10 +230,10 @@ class MountFS(FS):
@synchronize
def mountdir(self, path, fs):
- """Mounts a directory on a given path.
+ """Mounts a host FS object on a given path.
- path -- A path within the MountFS
- fs -- A filesystem object to mount
+ :param path: A path within the MountFS
+ :param fs: A filesystem object to mount
"""
path = normpath(path)
@@ -235,11 +242,17 @@ class MountFS(FS):
@synchronize
def mountfile(self, path, open_callable=None, info_callable=None):
+ """Mounts a single file path. """
path = normpath(path)
self.mount_tree[path] = MountFS.FileMount(path, callable, info_callable)
@synchronize
- def unmount(self,path):
+ def unmount(self, path):
+ """Unmounds a path.
+
+ :param path: Path to unmount
+
+ """
path = normpath(path)
del self.mount_tree[path]
diff --git a/fs/multifs.py b/fs/multifs.py
index 90c2abe..6abc28d 100644
--- a/fs/multifs.py
+++ b/fs/multifs.py
@@ -1,7 +1,11 @@
-#!/usr/in/env python
+"""
+fs.multifs
+==========
+"""
from fs.base import FS, FSError
from fs.path import *
+from fs import _thread_syncronize_default
class MultiFS(FS):
@@ -14,7 +18,7 @@ class MultiFS(FS):
"""
def __init__(self):
- FS.__init__(self, thread_synchronize=True)
+ FS.__init__(self, thread_synchronize=_thread_syncronize_default)
self.fs_sequence = []
self.fs_lookup = {}
@@ -26,15 +30,15 @@ class MultiFS(FS):
__repr__ = __str__
def __unicode__(self):
- return unicode(self.__str__())
+ return u"<MultiFS: %s>" % ", ".join(unicode(fs) for fs in self.fs_sequence)
@synchronize
def addfs(self, name, fs):
"""Adds a filesystem to the MultiFS.
- name -- A unique name to refer to the filesystem being added
- fs -- The filesystem to add
+ :param name: A unique name to refer to the filesystem being added
+ :param fs: The filesystem to add
"""
if name in self.fs_lookup:
@@ -47,7 +51,7 @@ class MultiFS(FS):
def removefs(self, name):
"""Removes a filesystem from the sequence.
- name -- The name of the filesystem, as used in addfs
+ :param name: The name of the filesystem, as used in addfs
"""
if name not in self.fs_lookup:
@@ -75,7 +79,7 @@ class MultiFS(FS):
"""Retrieves the filesystem that a given path would delegate to.
Returns a tuple of the filesystem's name and the filesystem object itself.
- path -- A path in MultiFS
+ :param path: A path in MultiFS
"""
for fs in self:
diff --git a/fs/objecttree.py b/fs/objecttree.py
index e073298..2f04d90 100644
--- a/fs/objecttree.py
+++ b/fs/objecttree.py
@@ -94,7 +94,7 @@ class ObjectTree(object):
return self.root.keys()
def iterkeys(self):
- return self.root.keys()
+ return self.root.iterkeys()
def items(self):
return self.root.items()
diff --git a/fs/osfs.py b/fs/osfs.py
index a2746c5..08b6ff1 100644
--- a/fs/osfs.py
+++ b/fs/osfs.py
@@ -1,4 +1,17 @@
-#!/usr/bin/env python
+"""
+fs.osfs
+=======
+
+Exposes the OS Filesystem as an FS object.
+
+For example, to print all the files and directories in the OS root::
+
+ >>> from fs.osfs import OSFS
+ >>> home_fs = OSFS('/')
+ >>> print home_fs.listdir()
+
+"""
+
import os
import sys
@@ -6,6 +19,7 @@ import errno
from fs.base import *
from fs.path import *
+from fs import _thread_syncronize_default
try:
import xattr
@@ -27,7 +41,18 @@ class OSFS(FS):
methods in the os and os.path modules.
"""
- def __init__(self, root_path, dir_mode=0700, thread_synchronize=True, encoding=None):
+ def __init__(self, root_path, dir_mode=0700, thread_synchronize=_thread_syncronize_default, encoding=None):
+
+ """
+ Creates an FS object that represents the OS Filesystem under a given root path
+
+ :param root_path: The root OS path
+ :param dir_mode: srt
+ :param thread_syncronize: If True, this object will be thread-safe by use of a threading.Lock object
+ :param encoding: The encoding method for path strings
+
+ """
+
FS.__init__(self, thread_synchronize=thread_synchronize)
self.encoding = encoding
root_path = os.path.expanduser(os.path.expandvars(root_path))
@@ -45,6 +70,9 @@ class OSFS(FS):
def __str__(self):
return "<OSFS: %s>" % self.root_path
+
+ def __unicode__(self):
+ return u"<OSFS: %s>" % self.root_path
def getsyspath(self, path, allow_none=False):
path = relpath(normpath(path)).replace("/",os.sep)
@@ -113,17 +141,17 @@ class OSFS(FS):
raise
@convert_os_errors
- def removedir(self, path, recursive=False,force=False):
+ def removedir(self, path, recursive=False, force=False):
sys_path = self.getsyspath(path)
if force:
- for path2 in self.listdir(path,absolute=True,files_only=True):
+ for path2 in self.listdir(path, absolute=True, files_only=True):
try:
self.remove(path2)
except ResourceNotFoundError:
pass
- for path2 in self.listdir(path,absolute=True,dirs_only=True):
+ for path2 in self.listdir(path, absolute=True, dirs_only=True):
try:
- self.removedir(path2,force=True)
+ self.removedir(path2, force=True)
except ResourceNotFoundError:
pass
# Don't remove the root directory of this FS
diff --git a/fs/path.py b/fs/path.py
index 4cdb873..aba1722 100644
--- a/fs/path.py
+++ b/fs/path.py
@@ -1,6 +1,5 @@
"""
-
- fs.path: useful functions for FS path manipulation.
+Useful functions for FS path manipulation.
This is broadly similar to the standard 'os.path' module but works with
paths in the canonical format expected by all FS objects (backslash-separated,
@@ -16,6 +15,8 @@ def normpath(path):
duplicate slashes, replaces forward with backward slashes, and generally
tries very hard to return a new path string the canonical FS format.
If the path is invalid, ValueError will be raised.
+
+ :param path: Path to normalize
>>> normpath(r"foo\\bar\\baz")
'foo/bar/baz'
@@ -47,20 +48,41 @@ def normpath(path):
if not components:
components = [""]
components.insert(0,"")
- return "/".join(components)
+ if isinstance(path, unicode):
+ return u"/".join(components)
+ else:
+ return '/'.join(components)
def iteratepath(path, numsplits=None):
- """Iterate over the individual components of a path."""
+ """Iterate over the individual components of a path.
+
+ :param path: Path to iterate over
+ :numsplits: Maximum number of splits
+
+ """
path = relpath(normpath(path))
if not path:
return []
if numsplits == None:
- return path.split('/')
+ return map(None, path.split('/'))
else:
- return path.split('/', numsplits)
-
-
+ return map(None, path.split('/', numsplits))
+
+def recursepath(path, reverse=False):
+ """Iterate from root to path, returning intermediate paths"""
+
+ paths = list(iteratepath(path))
+
+ if reverse:
+ paths = []
+ while path.lstrip('/'):
+ paths.append(path)
+ path = dirname(path)
+ return paths
+ else:
+ return [u'/'.join(paths[:i+1]) for i in xrange(len(paths))]
+
def abspath(path):
"""Convert the given path to an absolute path.
@@ -69,9 +91,9 @@ def abspath(path):
"""
if not path:
- return "/"
- if path[0] != "/":
- return "/" + path
+ return u'/'
+ if not path.startswith('/'):
+ return u'/' + path
return path
@@ -80,6 +102,8 @@ def relpath(path):
This is the inverse of abspath(), stripping a leading '/' from the
path if it is present.
+
+ :param path: Path to adjust
"""
while path and path[0] == "/":
@@ -89,6 +113,8 @@ def relpath(path):
def pathjoin(*paths):
"""Joins any number of paths together, returning a new path string.
+
+ :param paths: Paths to join are given in positional arguments
>>> pathjoin('foo', 'bar', 'baz')
'foo/bar/baz'
@@ -111,7 +137,7 @@ def pathjoin(*paths):
path = normpath("/".join(relpaths))
if absolute and not path.startswith("/"):
- path = "/" + path
+ path = u"/" + path
return path
# Allow pathjoin() to be used as fs.path.join()
@@ -123,6 +149,8 @@ def pathsplit(path):
This function splits a path into a pair (head,tail) where 'tail' is the
last pathname component and 'head' is all preceeding components.
+
+ :param path: Path to split
>>> pathsplit("foo/bar")
('foo', 'bar')
@@ -133,7 +161,7 @@ def pathsplit(path):
"""
split = normpath(path).rsplit('/', 1)
if len(split) == 1:
- return ('', split[0])
+ return (u'', split[0])
return tuple(split)
# Allow pathsplit() to be used as fs.path.split()
@@ -145,6 +173,8 @@ def dirname(path):
This is always equivalent to the 'head' component of the value returned
by pathsplit(path).
+
+ :param path: A FS path
>>> dirname('foo/bar/baz')
'foo/bar'
@@ -158,6 +188,8 @@ def basename(path):
This is always equivalent to the 'head' component of the value returned
by pathsplit(path).
+
+ :param path: A FS path
>>> basename('foo/bar/baz')
'baz'
@@ -168,6 +200,9 @@ def basename(path):
def issamedir(path1, path2):
"""Return true if two paths reference a resource in the same directory.
+
+ :param path1: An FS path
+ :param path2: An FS path
>>> issamedir("foo/bar/baz.txt", "foo/bar/spam.txt")
True
@@ -180,7 +215,10 @@ def issamedir(path1, path2):
def isprefix(path1, path2):
"""Return true is path1 is a prefix of path2.
-
+
+ :param path1: An FS path
+ :param path2: An FS path
+
>>> isprefix("foo/bar", "foo/bar/spam.txt")
True
>>> isprefix("foo/bar/", "foo/bar")
@@ -204,6 +242,8 @@ def isprefix(path1, path2):
def forcedir(path):
"""Ensure the path ends with a trailing /
+
+ :param path: An FS path
>>> forcedir("foo/bar")
'foo/bar/'
diff --git a/fs/remote.py b/fs/remote.py
index 60600b9..c22e2a5 100644
--- a/fs/remote.py
+++ b/fs/remote.py
@@ -1,19 +1,22 @@
"""
- fs.remote: utilities for interfacing with remote filesystems
+fs.remote
+=========
+
+Utilities for interfacing with remote filesystems
This module provides reusable utility functions that can be used to construct
FS subclasses interfacing with a remote filesystem. These include:
- RemoteFileBuffer: a file-like object that locally buffers the contents
+ * RemoteFileBuffer: a file-like object that locally buffers the contents
of a remote file, writing them back on flush() or close().
- ConnectionManagerFS: a WrapFS subclass that tracks the connection state
+ * ConnectionManagerFS: a WrapFS subclass that tracks the connection state
of a remote FS, and allows client code to wait for
a connection to be re-established.
- CacheFS: a WrapFS subclass that caces file and directory meta-data in
+ * CacheFS: a WrapFS subclass that caces file and directory meta-data in
memory, to speed access to a remote FS.
"""
@@ -44,7 +47,7 @@ class RemoteFileBuffer(object):
The intended use-case is for a remote filesystem (e.g. S3FS) to return
instances of this class from its open() method, and to provide the
file-uploading logic in its setcontents() method, as in the following
- pseudo-code:
+ pseudo-code::
def open(self,path,mode="r"):
rf = self._get_remote_file(path)
@@ -176,7 +179,7 @@ class ConnectionManagerFS(LazyFS):
Since some remote FS classes can raise RemoteConnectionError during
initialisation, this class makes use of lazy initialization. The
remote FS can be specified as an FS instance, an FS subclass, or a
- (class,args) or (class,args,kwds) tuple. For example:
+ (class,args) or (class,args,kwds) tuple. For example::
>>> fs = ConnectionManagerFS(MyRemoteFS("http://www.example.com/"))
Traceback (most recent call last):
diff --git a/fs/s3fs.py b/fs/s3fs.py
index 0ccd4fe..ea63aa0 100644
--- a/fs/s3fs.py
+++ b/fs/s3fs.py
@@ -1,6 +1,8 @@
"""
+fs.s3fs
+=======
- fs.s3fs: FS subclass accessing files in Amazon S3
+FS subclass accessing files in Amazon S3
This module provides the class 'S3FS', which implements the FS filesystem
interface for objects stored in Amazon Simple Storage Service (S3).
diff --git a/fs/sftpfs.py b/fs/sftpfs.py
index 14cded2..e294289 100644
--- a/fs/sftpfs.py
+++ b/fs/sftpfs.py
@@ -1,6 +1,8 @@
"""
+fs.sftpfs
+=========
- fs.sftpfs: Filesystem accesing an SFTP server (via paramiko)
+Filesystem accessing an SFTP server (via paramiko)
"""
@@ -64,6 +66,8 @@ class SFTPFS(FS):
self._owns_transport = False
self._credentials = credentials
self._tlocal = thread_local()
+ self._transport = None
+ self._client = None
if isinstance(connection,paramiko.Channel):
self._transport = None
self._client = paramiko.SFTPClient(connection)
diff --git a/fs/tempfs.py b/fs/tempfs.py
index fc3e7c4..5c35043 100644
--- a/fs/tempfs.py
+++ b/fs/tempfs.py
@@ -1,4 +1,10 @@
-#!/usr/bin/env python
+"""
+fs.tempfs
+=========
+
+Make a temporary file system that exists in a folder provided by the OS. All files contained in a TempFS are removed when the `close` method is called (or when the TempFS is cleaned up by Python).
+
+"""
import os
import time
@@ -6,13 +12,14 @@ import tempfile
from fs.osfs import OSFS
from fs.errors import *
+from fs import _thread_syncronize_default
class TempFS(OSFS):
"""Create a Filesystem in a tempory directory (with tempfile.mkdtemp),
and removes it when the TempFS object is cleaned up."""
- def __init__(self, identifier=None, temp_dir=None, dir_mode=0700, thread_synchronize=True):
+ def __init__(self, identifier=None, temp_dir=None, dir_mode=0700, thread_synchronize=_thread_syncronize_default):
"""Creates a temporary Filesystem
identifier -- A string that is included in the name of the temporary directory,
@@ -29,13 +36,14 @@ class TempFS(OSFS):
__repr__ = __str__
def __unicode__(self):
- return unicode(self.__str__())
+ return u'<TempFS: %s>' % self._temp_dir
def close(self):
"""Removes the temporary directory.
This will be called automatically when the object is cleaned up by
- Python. Note that once this method has been called, the FS object may
+ Python, although it is advisable to call it manually.
+ Note that once this method has been called, the FS object may
no longer be used.
"""
# Depending on how resources are freed by the OS, there could
diff --git a/fs/tests/__init__.py b/fs/tests/__init__.py
index 17eda8f..0ef2881 100644
--- a/fs/tests/__init__.py
+++ b/fs/tests/__init__.py
@@ -9,7 +9,7 @@
# be captured by nose and reported appropriately
import sys
import logging
-logging.basicConfig(level=logging.ERROR,stream=sys.stdout)
+logging.basicConfig(level=logging.ERROR, stream=sys.stdout)
from fs.base import *
@@ -297,7 +297,7 @@ class FSTestCases:
check = self.check
contents = "If the implementation is hard to explain, it's a bad idea."
def makefile(path):
- self.fs.createfile(path,contents)
+ self.fs.createfile(path, contents)
self.fs.makedir("a")
self.fs.makedir("b")
@@ -700,7 +700,7 @@ class ThreadingTestCases:
self._runThreads(makedir,removedir)
if self.fs.isdir("testdir"):
self.assertEquals(len(errors),1)
- self.assertFalse(isinstance(errors[0],DestinationExistsError))
+ self.assertFalse(isinstance(errors[0],DestinationExistsError))
self.fs.removedir("testdir")
else:
self.assertEquals(len(errors),0)
diff --git a/fs/tests/ftpserver.py b/fs/tests/ftpserver.py
new file mode 100644
index 0000000..01492aa
--- /dev/null
+++ b/fs/tests/ftpserver.py
@@ -0,0 +1,19 @@
+try:
+ from pyftpdlib import ftpserver
+except ImportError:
+ print "Requires pyftpdlib <http://code.google.com/p/pyftpdlib/>"
+ raise
+
+import sys
+
+authorizer = ftpserver.DummyAuthorizer()
+authorizer.add_user("user", "12345", sys.argv[1], perm="elradfmw")
+authorizer.add_anonymous(sys.argv[1])
+
+handler = ftpserver.FTPHandler
+handler.authorizer = authorizer
+address = ("127.0.0.1", 21)
+
+ftpd = ftpserver.FTPServer(address, handler)
+ftpd.serve_forever()
+
diff --git a/fs/tests/test_fs.py b/fs/tests/test_fs.py
index 3574a67..47c9a3e 100644
--- a/fs/tests/test_fs.py
+++ b/fs/tests/test_fs.py
@@ -31,7 +31,6 @@ class TestOSFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
return os.path.exists(os.path.join(self.temp_dir, relpath(p)))
-
class TestSubFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
def setUp(self):
@@ -89,4 +88,18 @@ class TestTempFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
def check(self, p):
td = self.fs._temp_dir
return os.path.exists(os.path.join(td, relpath(p)))
+
+
+from fs import wrapfs
+class TestWrapFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
+
+ def setUp(self):
+ sys.setcheckinterval(1)
+ self.temp_dir = tempfile.mkdtemp(u"fstest")
+ self.fs = wrapfs.WrapFS(osfs.OSFS(self.temp_dir))
+ def tearDown(self):
+ shutil.rmtree(self.temp_dir)
+
+ def check(self, p):
+ return os.path.exists(os.path.join(self.temp_dir, relpath(p)))
diff --git a/fs/tests/test_ftpfs.py b/fs/tests/test_ftpfs.py
new file mode 100644
index 0000000..1e45df5
--- /dev/null
+++ b/fs/tests/test_ftpfs.py
@@ -0,0 +1,78 @@
+
+from fs.tests import FSTestCases, ThreadingTestCases
+
+import unittest
+
+import os
+import sys
+import shutil
+import tempfile
+import subprocess
+import time
+from os.path import abspath
+
+try:
+ from pyftpdlib import ftpserver
+except ImportError:
+ raise ImportError("Requires pyftpdlib <http://code.google.com/p/pyftpdlib/>")
+
+from fs.path import *
+
+from fs import ftpfs
+
+ftp_port = 30000
+class TestFTPFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
+
+ def setUp(self):
+ global ftp_port
+ #ftp_port += 1
+ use_port = str(ftp_port)
+ #ftp_port = 10000
+
+ sys.setcheckinterval(1)
+ self.temp_dir = tempfile.mkdtemp(u"ftpfstests")
+
+ self.ftp_server = subprocess.Popen(['python', abspath(__file__), self.temp_dir, str(use_port)])
+ # Need to sleep to allow ftp server to start
+ time.sleep(.2)
+ self.fs = ftpfs.FTPFS('127.0.0.1', 'user', '12345', port=use_port, timeout=5.0)
+
+
+ def tearDown(self):
+
+ if sys.platform == 'win32':
+ import win32api
+ win32api.TerminateProcess(int(process._handle), -1)
+ else:
+ os.system('kill '+str(self.ftp_server.pid))
+ shutil.rmtree(self.temp_dir)
+
+ def check(self, p):
+ return os.path.exists(os.path.join(self.temp_dir, relpath(p)))
+
+
+if __name__ == "__main__":
+
+ # Run an ftp server that exposes a given directory
+ import sys
+ authorizer = ftpserver.DummyAuthorizer()
+ authorizer.add_user("user", "12345", sys.argv[1], perm="elradfmw")
+ authorizer.add_anonymous(sys.argv[1])
+
+ def nolog(*args):
+ pass
+ ftpserver.log = nolog
+ ftpserver.logline = nolog
+
+ handler = ftpserver.FTPHandler
+ handler.authorizer = authorizer
+ address = ("127.0.0.1", int(sys.argv[2]))
+ #print address
+
+ ftpd = ftpserver.FTPServer(address, handler)
+
+ ftpd.serve_forever()
+
+
+
+
diff --git a/fs/utils.py b/fs/utils.py
index ead07ce..9c66f0e 100644
--- a/fs/utils.py
+++ b/fs/utils.py
@@ -1,33 +1,44 @@
"""
- fs.utils: high-level utility functions for working with FS objects.
+The `utils` module provides a number of utility functions that don't belong in the Filesystem interface. Generally the functions in this module work with multiple Filesystems, for instance moving and copying between non-similar Filesystems.
"""
import shutil
from fs.mountfs import MountFS
-from fs.path import pathjoin
+from fs.path import pathjoin, pathsplit
+from fs.errors import DestinationExistsError
-def copyfile(src_fs, src_path, dst_fs, dst_path, chunk_size=16384):
+
+def copyfile(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=16384):
"""Copy a file from one filesystem to another. Will use system copyfile, if both files have a syspath.
Otherwise file will be copied a chunk at a time.
- src_fs -- Source filesystem object
- src_path -- Source path
- dst_fs -- Destination filesystem object
- dst_path -- Destination filesystem object
- chunk_size -- Size of chunks to move if system copyfile is not available (default 16K)
+ :param src_fs: Source filesystem object
+ :param src_path: -- Source path
+ :param dst_fs: Destination filesystem object
+ :param dst_path: Destination filesystem object
+ :param chunk_size: Size of chunks to move if system copyfile is not available (default 16K)
"""
+
+ # If the src and dst fs objects are the same, then use a direct copy
+ if src_fs is dst_fs:
+ src_fs.copy(src_path, dst_path, overwrite=overwrite)
+ return
+
src_syspath = src_fs.getsyspath(src_path, allow_none=True)
dst_syspath = dst_fs.getsyspath(dst_path, allow_none=True)
+
+ if not overwrite and dst_fs.exists(dst_path):
+ raise DestinationExistsError(dst_path)
# System copy if there are two sys paths
if src_syspath is not None and dst_syspath is not None:
shutil.copyfile(src_syspath, dst_syspath)
return
- src, dst = None
+ src, dst = None, None
try:
# Chunk copy
@@ -47,26 +58,33 @@ def copyfile(src_fs, src_path, dst_fs, dst_path, chunk_size=16384):
dst.close()
-def movefile(src_fs, src_path, dst_fs, dst_path, chunk_size=16384):
+def movefile(src_fs, src_path, dst_fs, dst_path, overwrite=True, chunk_size=16384):
"""Move a file from one filesystem to another. Will use system copyfile, if both files have a syspath.
Otherwise file will be copied a chunk at a time.
- src_fs -- Source filesystem object
- src_path -- Source path
- dst_fs -- Destination filesystem object
- dst_path -- Destination filesystem object
- chunk_size -- Size of chunks to move if system copyfile is not available (default 16K)
+ :param src_fs: Source filesystem object
+ :param src_path: Source path
+ :param dst_fs: Destination filesystem object
+ :param dst_path: Destination filesystem object
+ :param chunk_size: Size of chunks to move if system copyfile is not available (default 16K)
"""
src_syspath = src_fs.getsyspath(src_path, allow_none=True)
dst_syspath = dst_fs.getsyspath(dst_path, allow_none=True)
+ if not overwrite and dst_fs.exists(dst_path):
+ raise DestinationExistsError(dst_path)
+
+ if src_fs is dst_fs:
+ src_fs.move(src_path, dst_path, overwrite=overwrite)
+ return
+
# System copy if there are two sys paths
if src_syspath is not None and dst_syspath is not None:
- shutil.movefile(src_syspath, dst_syspath)
+ shutil.move(src_syspath, dst_syspath)
return
- src, dst = None
+ src, dst = None, None
try:
# Chunk copy
@@ -79,7 +97,7 @@ def movefile(src_fs, src_path, dst_fs, dst_path, chunk_size=16384):
break
dst.write(chunk)
- src_fs.remove(src)
+ src_fs.remove(src_path)
finally:
if src is not None:
@@ -91,10 +109,10 @@ def movefile(src_fs, src_path, dst_fs, dst_path, chunk_size=16384):
def movedir(fs1, fs2, overwrite=False, ignore_errors=False, chunk_size=16384):
"""Moves contents of a directory from one filesystem to another.
- fs1 -- Source filesystem, or a tuple of (<filesystem>, <directory path>)
- fs2 -- Destination filesystem, or a tuple of (<filesystem>, <directory path>)
- ignore_errors -- If True, exceptions from file moves are ignored
- chunk_size -- Size of chunks to move if a simple copy is used
+ :param fs1: Source filesystem, or a tuple of (<filesystem>, <directory path>)
+ :param fs2: Destination filesystem, or a tuple of (<filesystem>, <directory path>)
+ :param ignore_errors: If True, exceptions from file moves are ignored
+ :param chunk_size: Size of chunks to move if a simple copy is used
"""
if isinstance(fs1, tuple):
@@ -102,6 +120,7 @@ def movedir(fs1, fs2, overwrite=False, ignore_errors=False, chunk_size=16384):
fs1 = fs1.opendir(dir1)
if isinstance(fs2, tuple):
fs2, dir2 = fs2
+ fs2.makedir(dir2, allow_recreate=True)
fs2 = fs2.opendir(dir2)
mount_fs = MountFS()
@@ -109,7 +128,7 @@ def movedir(fs1, fs2, overwrite=False, ignore_errors=False, chunk_size=16384):
mount_fs.mount('dst', fs2)
mount_fs.movedir('src', 'dst',
- overwrite=overwrite,
+ overwrite=True,
ignore_errors=ignore_errors,
chunk_size=chunk_size)
@@ -117,10 +136,10 @@ def movedir(fs1, fs2, overwrite=False, ignore_errors=False, chunk_size=16384):
def copydir(fs1, fs2, overwrite=False, ignore_errors=False, chunk_size=16384):
"""Copies contents of a directory from one filesystem to another.
- fs1 -- Source filesystem, or a tuple of (<filesystem>, <directory path>)
- fs2 -- Destination filesystem, or a tuple of (<filesystem>, <directory path>)
- ignore_errors -- If True, exceptions from file moves are ignored
- chunk_size -- Size of chunks to move if a simple copy is used
+ :param fs1: Source filesystem, or a tuple of (<filesystem>, <directory path>)
+ :param fs2: Destination filesystem, or a tuple of (<filesystem>, <directory path>)
+ :param ignore_errors: If True, exceptions from file moves are ignored
+ :param chunk_size: Size of chunks to move if a simple copy is used
"""
if isinstance(fs1, tuple):
@@ -128,13 +147,14 @@ def copydir(fs1, fs2, overwrite=False, ignore_errors=False, chunk_size=16384):
fs1 = fs1.opendir(dir1)
if isinstance(fs2, tuple):
fs2, dir2 = fs2
+ fs2.makedir(dir2, allow_recreate=True)
fs2 = fs2.opendir(dir2)
mount_fs = MountFS()
mount_fs.mount('src', fs1)
mount_fs.mount('dst', fs2)
mount_fs.copydir('src', 'dst',
- overwrite=overwrite,
+ overwrite=True,
ignore_errors=ignore_errors,
chunk_size=chunk_size)
@@ -142,28 +162,35 @@ def copydir(fs1, fs2, overwrite=False, ignore_errors=False, chunk_size=16384):
def countbytes(fs):
"""Returns the total number of bytes contained within files in a filesystem.
- fs -- A filesystem object
+ :param fs: A filesystem object
"""
total = sum(fs.getsize(f) for f in fs.walkfiles())
return total
-def find_duplicates(fs, compare_paths=None, quick=False, signature_chunk_size=16*1024, signature_size=10*16*1024):
+def find_duplicates(fs,
+ compare_paths=None,
+ quick=False,
+ signature_chunk_size=16*1024,
+ signature_size=10*16*1024):
"""A generator that yields the paths of duplicate files in an FS object.
Files are considered identical if the contents are the same (dates or
other attributes not take in to account).
- fs -- A filesystem object
- compare_paths -- An iterable of paths in the FS object, or all files if omited
- quick -- If set to True, the quick method of finding duplicates will be used,
- which can potentially return false positives if the files have the same
- size and start with the same data. Do not use when deleting files!
-
- signature_chunk_size -- The number of bytes to read before generating a
- signature checksum value
- signature_size -- The total number of bytes read to generate a signature
+ :param fs: A filesystem object
+ :param compare_paths: An iterable of paths within the FS object, or all files if omited
+ :param quick: If set to True, the quick method of finding duplicates will be used, which can potentially return false positives if the files have the same size and start with the same data. Do not use when deleting files!
+ :param signature_chunk_size: The number of bytes to read before generating a signature checksum value
+ :param signature_size: The total number of bytes read to generate a signature
+ For example, the following will list all the duplicate .jpg files in "~/Pictures"::
+
+ >>> from fs.utils import find_duplicates
+ >>> from fs.osfs import OSFS
+ >>> fs = OSFS('~/Pictures')
+ >>> for dups in find_duplicates(fs, fs.walkfiles('*.jpg')):
+ ... print list(dups)
"""
@@ -256,11 +283,19 @@ def print_fs(fs, path="/", max_levels=5, indent=' '*2):
"""Prints a filesystem listing to stdout (including sub dirs). Useful as a debugging aid.
Be careful about printing a OSFS, or any other large filesystem.
Without max_levels set, this function will traverse the entire directory tree.
+
+ For example, the following will print a tree of the files under the current working directory::
+
+ >>> from fs.osfs import *
+ >>> from fs.utils import *
+ >>> fs = OSFS('.')
+ >>> print_fs(fs)
+
- fs -- A filesystem object
- path -- Path of root to list (default "/")
- max_levels -- Maximum levels of dirs to list (default 5), set to None for no maximum
- indent -- String to indent each directory level (default two spaces)
+ :param fs: A filesystem object
+ :param path: Path of a directory to list (default "/")
+ :param max_levels: Maximum levels of dirs to list (default 5), set to None for no maximum
+ :param indent: String to indent each directory level (default two spaces)
"""
def print_dir(fs, path, level):
@@ -286,6 +321,7 @@ def print_fs(fs, path="/", max_levels=5, indent=' '*2):
print_dir(fs, path, 0)
+
if __name__ == "__main__":
from osfs import *
fs = OSFS('~/copytest')
diff --git a/fs/wrapfs.py b/fs/wrapfs.py
index 24d51d8..5195c4b 100644
--- a/fs/wrapfs.py
+++ b/fs/wrapfs.py
@@ -1,6 +1,8 @@
"""
+fs.wrapfs
+=========
- fs.wrapfs: class for wrapping an existing FS object with added functionality
+A class for wrapping an existing FS object with additional functionality.
This module provides the class WrapFS, a base class for objects that wrap
another FS object and provide some transformation of its contents. It could
@@ -121,10 +123,10 @@ class WrapFS(FS):
return self.wrapped_fs.hassyspath(self._encode(path))
@rewrite_errors
- def open(self,path,mode="r"):
- (mode,wmode) = self._adjust_mode(mode)
- f = self.wrapped_fs.open(self._encode(path),wmode)
- return self._file_wrap(f,mode)
+ def open(self, path, mode="r", **kwargs):
+ (mode, wmode) = self._adjust_mode(mode)
+ f = self.wrapped_fs.open(self._encode(path), wmode, **kwargs)
+ return self._file_wrap(f, mode)
@rewrite_errors
def exists(self,path):
@@ -509,3 +511,41 @@ class LimitSizeFile(object):
def __iter__(self):
return iter(self.file)
+
+class ReadOnlyFS(WrapFS):
+ """ Makes a FS object read only. Any operation that could potentially modify
+ the underlying file system will throw an UnsupportedError
+
+ Note that this isn't a secure sandbox, untrusted code could work around the
+ read-only restrictions by getting the base class. Its main purpose is to
+ provide a degree of safety if you want to protect an FS object from
+ modification.
+
+ """
+
+ def getsyspath(self, path, allow_none=False):
+ """ Doesn't technically modify the filesystem but could be used to work
+ around read-only restrictions. """
+ if allow_none:
+ return None
+ raise NoSysPathError(path)
+
+ def open(self, path, mode='r', **kwargs):
+ """ Only permit read access """
+ if 'w' in mode or 'a' in mode:
+ raise UnsupportedError('write')
+ return super(ReadOnlyFS, self).open(path, mode, **kwargs)
+
+ def _no_can_do(self, *args, **kwargs):
+ """ Replacement method for methods that can modify the file system """
+ raise UnsupportedError('write')
+
+ move = _no_can_do
+ movedir = _no_can_do
+ copy = _no_can_do
+ copydir = _no_can_do
+ makedir = _no_can_do
+ rename = _no_can_do
+ setxattr = _no_can_do
+ delattr = _no_can_do
+ \ No newline at end of file
diff --git a/fs/xattrs.py b/fs/xattrs.py
index 5921ad2..5ba0329 100644
--- a/fs/xattrs.py
+++ b/fs/xattrs.py
@@ -1,25 +1,23 @@
"""
+fs.xattrs
+=========
- fs.xattrs: extended attribute support for FS
+Extended attribute support for FS
This module defines a standard interface for FS subclasses that want to
support extended file attributes, and a WrapFS subclass that can simulate
-extended attributes on top of an ordinery FS.
+extended attributes on top of an ordinary FS.
FS instances offering extended attribute support must provide the following
methods:
- getxattr(path,name) - get the named attribute for the given path,
- or None if it does not exist
- setxattr(path,name,value) - set the named attribute for the given path
- to the given value
- delxattr(path,name) - delete the named attribute for the given path,
- raising KeyError if it does not exist
- listxattrs(path) - iterator over all stored attribute names for
- the given path
+ * getxattr(path,name) Get the named attribute for the given path, or None if it does not exist
+ * setxattr(path,name,value) Set the named attribute for the given path to the given value
+ * delxattr(path,name) Delete the named attribute for the given path, raising KeyError if it does not exist
+ * listxattrs(path) Iterate over all stored attribute names for the given path
If extended attributes are required by FS-consuming code, it should use the
-function 'ensure_xattrs'. This will interrogate an FS object to determine
+function 'ensure_xattrs'. This will interrogate an FS object to determine
if it has native xattr support, and return a wrapped version if it does not.
"""
@@ -41,6 +39,8 @@ def ensure_xattrs(fs):
Given an FS object, this function returns an equivalent FS that has support
for extended attributes. This may be the original object if they are
supported natively, or a wrapper class is they must be simulated.
+
+ :param fs: An FS object that must have xattrs
"""
try:
# This attr doesn't have to exist, None should be returned by default
diff --git a/fs/zipfs.py b/fs/zipfs.py
index 4ec4126..7508b14 100644
--- a/fs/zipfs.py
+++ b/fs/zipfs.py
@@ -1,4 +1,10 @@
-#!/usr/bin/env python
+"""
+fs.zipfs
+========
+
+A FS object that represents the contents of a Zip file
+
+"""
from fs.base import *
@@ -52,12 +58,12 @@ class ZipFS(FS):
def __init__(self, zip_file, mode="r", compression="deflated", allowZip64=False, encoding="CP437", thread_synchronize=True):
"""Create a FS that maps on to a zip file.
- zip_file -- A (system) path, or a file-like object
- mode -- Mode to open zip file: 'r' for reading, 'w' for writing or 'a' for appending
- compression -- Can be 'deflated' (default) to compress data or 'stored' to just store date
- allowZip64 -- Set to True to use zip files greater than 2 MB, default is False
- encoding -- The encoding to use for unicode filenames
- thread_synchronize -- Set to True (default) to enable thread-safety
+ :param zip_file: A (system) path, or a file-like object
+ :param mode: Mode to open zip file: 'r' for reading, 'w' for writing or 'a' for appending
+ :param compression: Can be 'deflated' (default) to compress data or 'stored' to just store date
+ :param allowZip64: -- Set to True to use zip files greater than 2 MB, default is False
+ :param encoding: -- The encoding to use for unicode filenames
+ :param thread_synchronize: -- Set to True (default) to enable thread-safety
"""
FS.__init__(self, thread_synchronize=thread_synchronize)