summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorrfkelly0 <rfkelly0@67cdc799-7952-0410-af00-57a81ceafa0f>2009-06-15 13:39:13 +0000
committerrfkelly0 <rfkelly0@67cdc799-7952-0410-af00-57a81ceafa0f>2009-06-15 13:39:13 +0000
commita1f8fab558a48d6670353662943449fa99b2f1b4 (patch)
treef38ca1b4afcfa6e3aa3229da35858522296feadd
parent12931688dbde926badf4226f14eab548cb15e126 (diff)
parent4b2f35152cd16af3a9cc076e3179607a3439e8b7 (diff)
downloadpyfilesystem-a1f8fab558a48d6670353662943449fa99b2f1b4.tar.gz
merge branch "rfk-ideas" into trunk
git-svn-id: http://pyfilesystem.googlecode.com/svn/trunk@173 67cdc799-7952-0410-af00-57a81ceafa0f
-rw-r--r--AUTHORS4
-rw-r--r--MANIFEST.in3
-rw-r--r--NOTES.txt38
-rw-r--r--fs/__init__.py33
-rw-r--r--fs/base.py645
-rw-r--r--fs/browsewin.py5
-rw-r--r--fs/errors.py179
-rw-r--r--fs/expose/__init__.py0
-rw-r--r--fs/expose/fuse/__init__.py437
-rw-r--r--fs/expose/fuse/fuse_ctypes.py603
-rw-r--r--fs/expose/sftp.py278
-rw-r--r--fs/expose/xmlrpc.py115
-rwxr-xr-xfs/fuseserver.py338
-rw-r--r--fs/helpers.py172
-rw-r--r--fs/memoryfs.py577
-rw-r--r--fs/mountfs.py121
-rw-r--r--fs/multifs.py59
-rw-r--r--fs/objecttree.py16
-rw-r--r--fs/osfs.py262
-rw-r--r--fs/path.py204
-rw-r--r--fs/rpcfs.py185
-rw-r--r--fs/s3fs.py199
-rw-r--r--fs/sftpfs.py284
-rw-r--r--fs/tempfs.py9
-rw-r--r--fs/tests.py806
-rw-r--r--fs/tests/__init__.py448
-rw-r--r--fs/tests/test_expose.py119
-rw-r--r--fs/tests/test_fs.py88
-rw-r--r--fs/tests/test_objecttree.py47
-rw-r--r--fs/tests/test_path.py96
-rw-r--r--fs/tests/test_s3fs.py49
-rw-r--r--fs/tests/test_xattr.py116
-rw-r--r--fs/tests/test_zipfs.py153
-rw-r--r--fs/utils.py16
-rw-r--r--fs/wrapfs.py223
-rw-r--r--fs/xattrs.py159
-rw-r--r--fs/zipfs.py73
-rw-r--r--setup.py2
38 files changed, 4510 insertions, 2651 deletions
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..2a23b18
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,4 @@
+
+Will McGugan (will@willmcgugan.com)
+Ryan Kelly (ryan@rfk.id.au)
+
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..0ee9f2f
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,3 @@
+
+include AUTHORS
+
diff --git a/NOTES.txt b/NOTES.txt
new file mode 100644
index 0000000..317b7d2
--- /dev/null
+++ b/NOTES.txt
@@ -0,0 +1,38 @@
+
+Rename 'helpers' module to 'path' since it's basically an imitation of os.path.
+Minify the contained functions:
+ - make normpath() do more e.g. collapse backrefs
+ - remove resolvepath() as it just confuses the issue
+ - resourcename() -> basename() in line with os.path notation
+ - remove isabsolutepath(), it wasn't used
+
+Put error class definitions in separate submodule 'errors'
+ - less redundancy (e.g. no more `raise UnsupportedError("UNSUPPORTED")`)
+ - deeper exception heirachy (e.g. `ParentDirectoryMissingError`)
+This is designed to allow me to be more optimistic; rather than checking all
+preconditions before trying an action, I can just let it fail and branch on
+the exception. Important for reducing the number of accesses to a remote FS.
+
+Remove the notion of hidden files from the base FS class.
+ - has lead to several bugs with copying/moving directories
+ - it's not the filesystem's business to decide what files I want to see
+ - moved the logic into a separate wrapper class "HideDotFiles"
+
+Remove xattr support from base FS class, making it a separate interface.
+ - has lead to several bugs with copying/moving files and directories
+ - now defined in fs.xattrs module
+ - SimulateXAttr wrapper class contains the same logic
+ - removexattr() -> delxattr() in line with python's get/set/del tradition
+
+Operational changes to the base methods:
+ - don't require makedir() to support the "mode" argument, since this only
+ makes sense for OS-level files.
+ - when copy() is given an existing directory as destination, raise an error
+ rather than copying into the directory (explicit is better than implicit)
+
+Split up the test definitions a little:
+ - use a separate FSTestCases mixin rather than subclassing TestOSFS
+ - path helpers testcases in their own module
+ - zipfs in its own module since it's different to all the others
+ - s3fs in its own module since it's very slow and costs money to test
+
diff --git a/fs/__init__.py b/fs/__init__.py
index f843da2..b13785e 100644
--- a/fs/__init__.py
+++ b/fs/__init__.py
@@ -1,19 +1,28 @@
"""
-A filesystem abstraction.
-"""
+ fs: a filesystem abstraction.
+
+This module provides an abstract base class 'FS' that defines a consistent
+interface to different kinds of filesystem, along with a range of concrete
+implementations of this interface such as:
-__version__ = "0.1.1dev"
+ OSFS: access the local filesystem, through the 'os' module
+ TempFS: a temporary filesystem that's automatically cleared on exit
+ MemoryFS: a filesystem that exists only in memory
+ ZipFS: access a zipfile like a filesystem
+ S3FS: access files stored in Amazon S3
+
+"""
+__version__ = "0.2.0"
__author__ = "Will McGugan (will@willmcgugan.com)"
+# 'base' imports * from 'path' and 'errors', so their contents
+# will be available here as well.
from base import *
-from helpers import *
-__all__ = ['memoryfs',
- 'mountfs',
- 'multifs',
- 'osfs',
- 'utils',
- 'zipfs',
- 'helpers',
- 'tempfs'] \ No newline at end of file
+
+# provide these by default so people cna be 'fs.path.basename' etc.
+import errors
+import path
+
+
diff --git a/fs/base.py b/fs/base.py
index d138a5a..01dfd80 100644
--- a/fs/base.py
+++ b/fs/base.py
@@ -1,8 +1,16 @@
#!/usr/bin/env python
+"""
-from helpers import *
-import os
-import os.path
+ fs.base: base class defining the FS abstraction.
+
+This module defines the most basic filesystem abstraction, the FS class.
+Instances of FS represent a filesystem containing files and directories
+that can be queried and manipulated. To implement a new kind of filesystem,
+start by sublcassing the base FS class.
+
+"""
+
+import os, os.path
import shutil
import fnmatch
import datetime
@@ -12,97 +20,12 @@ except ImportError:
import dummy_threading as threading
import dummy_threading
-try:
- import cPickle as pickle
-except ImportError:
- import pickle
-
-error_msgs = {
-
- "UNKNOWN_ERROR" : "No information on error: %(path)s",
-
- # UnsupportedError
- "UNSUPPORTED" : "Action is unsupported by this filesystem.",
-
- # OperationFailedError
- "LISTDIR_FAILED" : "Unable to get directory listing: %(path)s",
- "MAKEDIR_FAILED" : "Unable to create directory: %(path)s",
- "DELETE_FAILED" : "Unable to delete file: %(path)s",
- "RENAME_FAILED" : "Unable to rename file: %(path)s",
- "OPEN_FAILED" : "Unable to open file: %(path)s",
- "DIR_EXISTS" : "Directory exists (try allow_recreate=True): %(path)s",
- "REMOVE_FAILED" : "Unable to remove file: %(path)s",
- "REMOVEDIR_FAILED" : "Unable to remove dir: %(path)s",
- "GETSIZE_FAILED" : "Unable to retrieve size of resource: %(path)s",
- "COPYFILE_FAILED" : "Unable to copy file: %(path)s",
- "READ_FAILED" : "Unable to read from file: %(path)s",
- "XATTR_FAILED" : "Unable to access extended-attribute: %(path)s",
-
- # NoSysPathError
- "NO_SYS_PATH" : "No mapping to OS filesytem: %(path)s,",
+from fs.path import *
+from fs.errors import *
- # PathError
- "INVALID_PATH" : "Path is invalid: %(path)s",
-
- # ResourceLockedError
- "FILE_LOCKED" : "File is locked: %(path)s",
- "DIR_LOCKED" : "Dir is locked: %(path)s",
-
- # ResourceNotFoundError
- "NO_DIR" : "Directory does not exist: %(path)s",
- "NO_FILE" : "No such file: %(path)s",
- "NO_RESOURCE" : "No path to: %(path)s",
-
- # ResourceInvalid
- "WRONG_TYPE" : "Resource is not the type that was expected: %(path)s",
-
- # SystemError
- "OS_ERROR" : "Non specific OS error: %(path)s",
-}
-
-error_codes = error_msgs.keys()
-
-class FSError(Exception):
-
- """A catch all exception for FS objects."""
-
- def __init__(self, code, path=None, path2=None, msg=None, details=None):
- """A unified exception class that represents Filesystem errors.
-
- code -- A short identifier for the error
- path -- A path associated with the error
- msg -- An textual description of the error
- details -- Any additional details associated with the error
-
- """
-
- self.code = code
- self.msg = msg or error_msgs.get(code, error_msgs['UNKNOWN_ERROR'])
- self.path = path
- self.path2 = path2
- self.details = details
-
- def __str__(self):
- if self.details is None:
- msg = self.msg % dict((k, str(v)) for k, v in self.__dict__.iteritems())
- else:
- msg = self.msg % dict((k, str(v)) for k, v in self.__dict__.iteritems())
- msg += ", "+str(self.details)
-
- return '%s. %s' % (self.code, msg)
-
-class UnsupportedError(FSError): pass
-class OperationFailedError(FSError): pass
-class NoSysPathError(FSError): pass
-class PathError(FSError): pass
-class ResourceLockedError(FSError): pass
-class ResourceNotFoundError(FSError): pass
-class DestinationExistsError(FSError): pass
-class SystemError(FSError): pass
-class ResourceInvalid(FSError): pass
def silence_fserrors(f, *args, **kwargs):
- """Perform a function call and return None if any FSError exceptions are thrown/
+ """Perform a function call and return None if FSError is thrown
f -- Function to call
args -- Parameters to f
@@ -114,15 +37,15 @@ def silence_fserrors(f, *args, **kwargs):
except FSError:
return None
+
class NullFile(object):
+ """A NullFile is a file object that has no functionality.
- """A NullFile is a file object that has no functionality. Null files are
- returned by the 'safeopen' method in FS objects when the file does not exist.
- This can simplify code by negating the need to check if a file exists,
- or handling exceptions.
+ Null files are returned by the 'safeopen' method in FS objects when the
+ file doesn't exist. This can simplify code by negating the need to check
+ if a file exists, or handling exceptions.
"""
-
def __init__(self):
self.closed = False
@@ -163,41 +86,8 @@ class NullFile(object):
pass
-def print_fs(fs, path="/", max_levels=5, indent=' '*2):
- """Prints a filesystem listing to stdout (including sub dirs). Useful as a debugging aid.
- Be careful about printing a OSFS, or any other large filesystem.
- Without max_levels set, this function will traverse the entire directory tree.
-
- fs -- A filesystem object
- path -- Path of root to list (default "/")
- max_levels -- Maximum levels of dirs to list (default 5), set to None for no maximum
- indent -- String to indent each directory level (default two spaces)
-
- """
- def print_dir(fs, path, level):
- try:
- dir_listing = [(fs.isdir(pathjoin(path,p)), p) for p in fs.listdir(path)]
- except FSError, e:
- print indent*level + "... unabled to retrieve directory list (reason: %s) ..." % str(e)
- return
-
- dir_listing.sort(key = lambda (isdir, p):(not isdir, p.lower()))
-
- for is_dir, item in dir_listing:
-
- if is_dir:
- print indent*level + '[%s]' % item
- if max_levels is None or level < max_levels:
- print_dir(fs, pathjoin(path, item), level+1)
- if max_levels is not None:
- if level >= max_levels:
- print indent*(level+1) + "..."
- else:
- print indent*level + '%s' % item
- print_dir(fs, path, 0)
-
-
-def _synchronize(func):
+def synchronize(func):
+ """Decorator to synchronize a method on self._lock."""
def acquire_lock(self, *args, **kwargs):
self._lock.acquire()
try:
@@ -208,26 +98,50 @@ def _synchronize(func):
return acquire_lock
-
class FS(object):
-
- """The base class for Filesystem objects. An instance of a class derived from FS is an abstraction
- on some kind of filesytem, such as the OS filesystem or a zip file.
+ """The base class for Filesystem abstraction objects.
+
+ An instance of a class derived from FS is an abstraction on some kind
+ of filesytem, such as the OS filesystem or a zip file.
+
+ The following is the minimal set of methods that must be provided by
+ a new FS subclass:
+
+ * open -- open a file for reading/writing (like python's open() func)
+ * isfile -- check whether a path exists and is a file
+ * isdir -- check whether a path exists and is a directory
+ * listdir -- list the contents of a directory
+ * makedir -- create a new directory
+ * remove -- remove an existing file
+ * removedir -- remove an existing directory
+ * rename -- atomically rename a file or directory
+ * getinfo -- return information about the path e.g. size, mtime
+
+ The following methods have a sensible default implementation, but FS
+ subclasses are welcome to override them if a more efficient implementation
+ can be provided:
+
+ * getsyspath -- get a file's name in the local filesystem, if possible
+ * exists -- check whether a path exists as file or directory
+ * copy -- copy a file to a new location
+ * move -- move a file to a new location
+ * copydir -- recursively copy a directory to a new location
+ * movedir -- recursively move a directory to a new location
"""
- def __init__(self, thread_syncronize=False):
- """The baseclass for Filesystem objects.
+ def __init__(self, thread_synchronize=False):
+ """The base class for Filesystem objects.
thread_synconize -- If True, a lock object will be created for the
object, otherwise a dummy lock will be used.
-
"""
- if thread_syncronize:
+ if thread_synchronize:
self._lock = threading.RLock()
else:
self._lock = dummy_threading.RLock()
+
def __getstate__(self):
# Locks can't be pickled, so instead we just indicate the
# type of lock that should be there. None == no lock,
@@ -251,167 +165,197 @@ class FS(object):
else:
self._lock = dummy_threading.RLock()
- def _resolve(self, pathname):
- resolved_path = resolvepath(pathname)
- return resolved_path
-
- def _abspath(self, pathname):
- pathname = normpath(pathname)
-
- if not pathname.startswith('/'):
- return pathjoin('/', pathname)
- return pathname
def getsyspath(self, path, allow_none=False):
- """Returns the system path (a path recognised by the operating system) if present.
- If the path does not map to a system path (and allow_none is False) then a NoSysPathError exception is thrown.
+ """Returns the system path (a path recognised by the OS) if present.
- path -- A path within the filesystem
- allow_none -- If True, this method can return None if there is no system path
+ If the path does not map to a system path (and allow_none is False)
+ then a NoSysPathError exception is thrown.
+ path -- A path within the filesystem
+ allow_none -- If True, this method should return None if there is no
+ system path, rather than raising NoSysPathError
"""
if not allow_none:
- raise NoSysPathError("NO_SYS_PATH", path)
+ raise NoSysPathError(path=path)
return None
def hassyspath(self, path):
"""Return True if the path maps to a system path.
path -- Pach to check
-
"""
return self.getsyspath(path, None) is not None
+
def open(self, path, mode="r", **kwargs):
- """Opens a file.
+ """Open a the given path as a file-like object.
path -- Path to file that should be opened
- mode -- Mode of file to open, identical too the mode string used in
- 'file' and 'open' builtins
- kwargs -- Additional (optional) keyword parameters that may be required to open the file
-
+ mode -- Mode of file to open, identical to the mode string used
+ in 'file' and 'open' builtins
+ kwargs -- Additional (optional) keyword parameters that may
+ be required to open the file
"""
- raise UnsupportedError("UNSUPPORTED")
+ raise UnsupportedError("open file")
def safeopen(self, *args, **kwargs):
- """Like 'open', but will return a NullFile if the file could not be opened."""
+ """Like 'open', but returns a NullFile if the file could't be opened."""
try:
f = self.open(*args, **kwargs)
except ResourceNotFoundError:
return NullFile()
return f
- def exists(self, path):
- """Returns True if the path references a valid resource.
-
- path -- A path to test
- """
- raise UnsupportedError("UNSUPPORTED")
+ def exists(self, path):
+ """Returns True if the path references a valid resource."""
+ return self.isfile(path) or self.isdir(path)
def isdir(self, path):
"""Returns True if a given path references a directory."""
- raise UnsupportedError("UNSUPPORTED")
+ raise UnsupportedError("check for directory")
def isfile(self, path):
"""Returns True if a given path references a file."""
- raise UnsupportedError("UNSUPPORTED")
+ raise UnsupportedError("check for file")
- def ishidden(self, path):
- """Returns True if the given path is hidden."""
- return path.startswith('.')
def listdir(self, path="./",
wildcard=None,
full=False,
absolute=False,
- hidden=True,
dirs_only=False,
files_only=False):
- """Lists all the files and directories in a path. Returns a list of paths.
+ """Lists all the files and directories in a path.
path -- Root of the path to list
- wildcard -- Only returns paths that match this wildcard, default does no matching
+ wildcard -- Only returns paths that match this wildcard
full -- Returns a full path
absolute -- Returns an absolute path
- hidden -- If True, return hidden files
dirs_only -- If True, only return directories
files_only -- If True, only return files
+ The directory contents are returned as a list of paths. If the
+ given path is not found then ResourceNotFoundError is raised;
+ if it exists but is not a directory, ResourceInvalidError is raised.
+ """
+ raise UnsupportedError("list directory")
+
+ def _listdir_helper(self, path, entries,
+ wildcard=None,
+ full=False,
+ absolute=False,
+ dirs_only=False,
+ files_only=False):
+ """A helper method called by listdir method that applies filtering.
+
+ Given the path to a directory and a list of the names of entries within
+ that directory, this method applies the semantics of the listdir()
+ keyword arguments. An appropriately modified and filtered list of
+ directory entries is returned.
"""
- raise UnsupportedError("UNSUPPORTED")
+ if dirs_only and files_only:
+ raise ValueError("dirs_only and files_only can not both be True")
- def makedir(self, path, mode=0777, recursive=False, allow_recreate=False):
- """Make a directory on the file system.
+ if wildcard is not None:
+ match = fnmatch.fnmatch
+ entries = [p for p in entries if match(p, wildcard)]
+
+ if dirs_only:
+ entries = [p for p in entries if self.isdir(pathjoin(path, p))]
+ elif files_only:
+ entries = [p for p in entries if self.isfile(pathjoin(path, p))]
+
+ if full:
+ entries = [pathjoin(path, p) for p in entries]
+ elif absolute:
+ entries = [abspath(pathjoin(path, p)) for p in entries]
+
+ return entries
+
+
+ def makedir(self, path, recursive=False, allow_recreate=False):
+ """Make a directory on the filesystem.
path -- Path of directory
- mode -- Permissions
recursive -- If True, also create intermediate directories
- allow_recreate -- If True, then re-creating a directory wont throw an exception
-
+ allow_recreate -- If True, re-creating a directory wont be an error
+
+ The following errors can be raised by this method:
+ * DestinationExistsError, if path is already a directory and
+ allow_recreate is False
+ * ParentDirectoryMissingError, if a containing directory is missing
+ and recursive is False
+ * ResourceInvalidError, if path is an existing file
"""
-
- raise UnsupportedError("UNSUPPORTED")
+ raise UnsupportedError("make directory")
def remove(self, path):
- """Remove a resource from the filesystem.
+ """Remove a file from the filesystem.
path -- Path of the resource to remove
+ This method can raise the following errors:
+ * ResourceNotFoundError, if the path does not exist
+ * ResourceInvalidError, if the path is a directory
"""
- raise UnsupportedError("UNSUPPORTED")
+ raise UnsupportedError("remove resource")
def removedir(self, path, recursive=False, force=False):
- """Remove a directory
+ """Remove a directory from the filesystem
path -- Path of the directory to remove
- recursive -- If True, then blank parent directories will be removed
+ recursive -- If True, then empty parent directories will be removed
force -- If True, any directory contents will be removed
+ This method can raise the following errors:
+ * ResourceNotFoundError, if the path does not exist
+ * ResourceInvalidError, if the path is not a directory
+ * DirectoryNotEmptyError, if the directory is not empty and
+ force is False
"""
- raise UnsupportedError("UNSUPPORTED")
+ raise UnsupportedError("remove directory")
def rename(self, src, dst):
"""Renames a file or directory
src -- Path to rename
dst -- New name (not a path)
-
"""
- raise UnsupportedError("UNSUPPORTED")
+ raise UnsupportedError("rename resource")
def getinfo(self, path):
"""Returns information for a path as a dictionary.
path -- A path to retrieve information for
-
"""
- raise UnsupportedError("UNSUPPORTED")
+ raise UnsupportedError("get resource info")
+
def desc(self, path):
- """Returns short descriptive text regarding a path. For use as a debugging aid.
+ """Returns short descriptive text regarding a path.
path -- A path to describe
+ This is mainly for use as a debugging aid.
"""
if not self.exists(path):
return "No description available"
-
try:
sys_path = self.getsyspath(path)
except NoSysPathError:
return "No description available"
-
if self.isdir(path):
return "OS dir, maps to %s" % sys_path
else:
return "OS file, maps to %s" % sys_path
+
def getcontents(self, path):
"""Returns the contents of a file as a string.
path -- path of file to read.
-
"""
f = None
try:
@@ -422,17 +366,22 @@ class FS(object):
if f is not None:
f.close()
- def createfile(self, path, data):
+ def createfile(self, path, data=""):
"""A convenience method to create a new file from a string.
path -- Path of the file to create
data -- A string containing the contents of the file
-
"""
f = None
try:
f = self.open(path, 'wb')
- f.write(data)
+ if hasattr(data,"read"):
+ chunk = data.read(1024*512)
+ while chunk:
+ f.write(chunk)
+ chunk = data.read(1024*512)
+ else:
+ f.write(data)
finally:
if f is not None:
f.close()
@@ -442,69 +391,25 @@ class FS(object):
"""Opens a directory and returns a FS object representing its contents.
path -- Path to directory to open
-
"""
if not self.exists(path):
- raise ResourceNotFoundError("NO_DIR", path)
-
+ raise ResourceNotFoundError(path)
sub_fs = SubFS(self, path)
return sub_fs
- def _listdir_helper(self, path, paths, wildcard, full, absolute, hidden, dirs_only, files_only):
- """A helper function called by listdir method that applies filtering."""
-
- if dirs_only and files_only:
- raise ValueError("dirs_only and files_only can not both be True")
-
- if wildcard is not None:
- match = fnmatch.fnmatch
- paths = [p for p in paths if match(p, wildcard)]
-
- if not hidden:
- paths = [p for p in paths if not self.ishidden(p)]
-
- if dirs_only:
- paths = [p for p in paths if self.isdir(pathjoin(path, p))]
- elif files_only:
- paths = [p for p in paths if self.isfile(pathjoin(path, p))]
-
- if full:
- paths = [pathjoin(path, p) for p in paths]
- elif absolute:
- paths = [self._abspath(pathjoin(path, p)) for p in paths]
-
- return paths
-
-
- def walkfiles(self, path="/", wildcard=None, dir_wildcard=None, search="breadth" ):
- """Like the 'walk' method, but just yields files.
-
- path -- Root path to start walking
- wildcard -- If given, only return files that match this wildcard
- dir_wildcard -- If given, only walk in to directories that match this wildcard
- search -- A string that identifies the method used to walk the directories,
- can be 'breadth' for a breadth first search, or 'depth' for a depth first
- search. Use 'depth' if you plan to create / delete files as you go.
-
- """
-
- for path, files in self.walk(path, wildcard, dir_wildcard, search):
- for f in files:
- yield pathjoin(path, f)
-
def walk(self, path="/", wildcard=None, dir_wildcard=None, search="breadth"):
"""Walks a directory tree and yields the root path and contents.
- Yields a tuple of the path of each directory and a list of its file contents.
+ Yields a tuple of the path of each directory and a list of its file
+ contents.
path -- Root path to start walking
wildcard -- If given, only return files that match this wildcard
- dir_wildcard -- If given, only walk in to directories that match this wildcard
- search -- A string that identifies the method used to walk the directories,
- can be 'breadth' for a breadth first search, or 'depth' for a depth first
- search. Use 'depth' if you plan to create / delete files as you go.
-
-
+ dir_wildcard -- If given, only walk directories that match the wildcard
+ search -- A string dentifying the method used to walk the directories.
+ Can be 'breadth' for a breadth first search, or 'depth' for a
+ depth first search. Use 'depth' if you plan to create or
+ delete files as you go.
"""
if search == "breadth":
dirs = [path]
@@ -543,16 +448,31 @@ class FS(object):
raise ValueError("Search should be 'breadth' or 'depth'")
+ def walkfiles(self, path="/", wildcard=None, dir_wildcard=None, search="breadth" ):
+ """Like the 'walk' method, but just yields files.
+
+ path -- Root path to start walking
+ wildcard -- If given, only return files that match this wildcard
+ dir_wildcard -- If given, only walk directories that match the wildcard
+ search -- A string dentifying the method used to walk the directories.
+ Can be 'breadth' for a breadth first search, or 'depth' for a
+ depth first search. Use 'depth' if you plan to create or
+ delete files as you go.
+ """
+ for path, files in self.walk(path, wildcard, dir_wildcard, search):
+ for f in files:
+ yield pathjoin(path, f)
+
+
def getsize(self, path):
"""Returns the size (in bytes) of a resource.
path -- A path to the resource
-
"""
info = self.getinfo(path)
size = info.get('size', None)
if 'size' is None:
- raise OperationFailedError("GETSIZE_FAILED", path)
+ raise OperationFailedError("get size of resource", path)
return size
def copy(self, src, dst, overwrite=False, chunk_size=16384):
@@ -560,20 +480,18 @@ class FS(object):
src -- The source path
dst -- The destination path
- overwrite -- If True, then the destination may be overwritten
- (if a file exists at that location). If False then an exception will be
- thrown if the destination exists
- chunk_size -- Size of chunks to use in copy, if a simple copy is required
-
+ overwrite -- If True, then an existing file at the destination may
+ be overwritten; If False then DestinationExistsError
+ will be raised.
+ chunk_size -- Size of chunks to use if a simple copy is required
"""
- if self.isdir(dst):
- dst = pathjoin( dirname(dst), resourcename(src) )
-
if not self.isfile(src):
- raise ResourceInvalid("WRONG_TYPE", src, msg="Source is not a file: %(path)s")
+ if self.isdir(src):
+ raise ResourceInvalidError(src,msg="Source is not a file: %(path)s")
+ raise ResourceNotFoundError(src)
if not overwrite and self.exists(dst):
- raise DestinationExistsError("COPYFILE_FAILED", src, dst, msg="Destination file exists: %(path2)s")
+ raise DestinationExistsError(dst)
src_syspath = self.getsyspath(src, allow_none=True)
dst_syspath = self.getsyspath(dst, allow_none=True)
@@ -597,99 +515,63 @@ class FS(object):
if dst_file is not None:
dst_file.close()
+
def move(self, src, dst, overwrite=False, chunk_size=16384):
"""Moves a file from one location to another.
src -- Source path
dst -- Destination path
- overwrite -- If True, then the destination may be overwritten
-
+ overwrite -- If True, then an existing file at the destination path
+ will be silently overwritte; if False then an exception
+ will be raised in this case.
"""
src_syspath = self.getsyspath(src, allow_none=True)
dst_syspath = self.getsyspath(dst, allow_none=True)
+ # Try to do an os-level rename if possible.
+ # Otherwise, fall back to copy-and-remove.
if src_syspath is not None and dst_syspath is not None:
- if not self.isfile(src):
- raise ResourceInvalid("WRONG_TYPE", src, msg="Source is not a file: %(path)s")
- if not overwrite and self.exists(dst):
- raise DestinationExistsError("MOVE_FAILED", src, dst, msg="Destination file exists: %(path2)s")
- shutil.move(src_syspath, dst_syspath)
- else:
- self.copy(src, dst, overwrite=overwrite, chunk_size=chunk_size)
- self.remove(src)
-
-
- def _get_attr_path(self, path):
- if self.isdir(path):
- return pathjoin(path, '.dirxattrs')
- else:
- dir_path, file_path = pathsplit(path)
- return pathjoin(dir_path, '.xattrs.'+file_path)
-
- def _get_attr_dict(self, path):
- attr_path = self._get_attr_path(path)
- if self.exists(attr_path):
- return pickle.loads(self.getcontents(attr_path))
- else:
- return {}
-
- def _set_attr_dict(self, path, attrs):
- attr_path = self._get_attr_path(path)
- self.setcontents(self._get_attr_path(path), pickle.dumps(attrs))
-
- def setxattr(self, path, key, value):
- attrs = self._get_attr_dict(path)
- attrs[key] = value
- self._set_attr_dict(path, attrs)
-
- def getxattr(self, path, key, default):
- attrs = self._get_attr_dict(path)
- return attrs.get(key, default)
-
- def removexattr(self, path, key):
- attrs = self._get_attr_dict(path)
- try:
- del attrs[key]
- except KeyError:
- pass
- self._set_attr_dict(path, attrs)
-
- def listxattrs(self, path):
- attrs = self._get_attr_dict(path)
- return self._get_attr_dict(path).keys()
-
- def updatexattrs(self, path, update_dict):
- d = self._get_attr_dict()
- d.update( dict([(k, v) for k,v in update_dict.iteritems()]) )
- self.set_attr_dict(self, path, d)
+ if not os.path.isfile(src_syspath):
+ if os.path.isdir(src_syspath):
+ raise ResourceInvalidError(src,msg="Source is not a file: %(path)s")
+ raise ResourceNotFoundError(src)
+ if not overwrite and os.path.exists(dst_syspath):
+ raise DestinationExistsError(dst)
+ try:
+ os.rename(src_syspath,dst_syspath)
+ return
+ except OSError:
+ pass
+ self.copy(src, dst, overwrite=overwrite, chunk_size=chunk_size)
+ self.remove(src)
- def getxattrs(self, path):
- return dict( [(k, self.getxattr(path, k)) for k in self.listxattrs(path)] )
def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
"""Moves a directory from one location to another.
src -- Source directory path
dst -- Destination directory path
- overwrite -- If True then any existing files in the destination directory will be overwritten
- ignore_errors -- If True then this method will ignore FSError exceptions when moving files
- chunk_size -- Size of chunks to use when copying, if a simple copy is required
-
+ overwrite -- If True then any existing files in the destination
+ directory will be overwritten
+ ignore_errors -- If True then this method will ignore FSError
+ exceptions when moving files
+ chunk_size -- Size of chunks to use when copying, if a simple copy
+ is required
"""
if not self.isdir(src):
- raise ResourceInvalid("WRONG_TYPE", src, msg="Source is not a dst: %(path)s")
+ raise ResourceInvalidError(src, msg="Source is not a directory: %(path)s")
if not overwrite and self.exists(dst):
- raise DestinationExistsError("MOVEDIR_FAILED", src, dst, msg="Destination exists: %(path2)s")
+ raise DestinationExistsError(dst)
src_syspath = self.getsyspath(src, allow_none=True)
dst_syspath = self.getsyspath(dst, allow_none=True)
if src_syspath is not None and dst_syspath is not None:
try:
- shutil.move(src_syspath, dst_syspath)
+ os.rename(src_syspath,dst_syspath)
return
- except WindowsError:
+ except OSError:
pass
def movefile_noerrors(src, dst, overwrite):
@@ -705,7 +587,7 @@ class FS(object):
self.makedir(dst, allow_recreate=True)
for dirname, filenames in self.walk(src, search="depth"):
- dst_dirname = makerelative(dirname[len(src):])
+ dst_dirname = relpath(dirname[len(src):])
dst_dirpath = pathjoin(dst, dst_dirname)
self.makedir(dst_dirpath, allow_recreate=True, recursive=True)
@@ -724,15 +606,16 @@ class FS(object):
src -- Source directory path
dst -- Destination directory path
- overwrite -- If True then any existing files in the destination directory will be overwritten
+ overwrite -- If True then any existing files in the destination
+ directory will be overwritten
ignore_errors -- If True, exceptions when copying will be ignored
- chunk_size -- Size of chunks to use when copying, if a simple copy is required
-
+ chunk_size -- Size of chunks to use when copying, if a simple copy
+ is required
"""
if not self.isdir(src):
- raise ResourceInvalid("WRONG_TYPE", src, msg="Source is not a dst: %(path)s")
+ raise ResourceInvalidError(src, msg="Source is not a directory: %(path)s")
if not overwrite and self.exists(dst):
- raise DestinationExistsError("COPYDIR_FAILED", dst, msg="Destination exists: %(path)s")
+ raise DestinationExistsError(dst)
def copyfile_noerrors(src, dst, overwrite):
try:
@@ -748,7 +631,7 @@ class FS(object):
self.makedir(dst, allow_recreate=True)
for dirname, filenames in self.walk(src):
- dst_dirname = makerelative(dirname[len(src):])
+ dst_dirname = relpath(dirname[len(src):])
dst_dirpath = pathjoin(dst, dst_dirname)
self.makedir(dst_dirpath, allow_recreate=True)
@@ -763,7 +646,6 @@ class FS(object):
"""Return True if a path contains no files.
path -- Path of a directory
-
"""
path = normpath(path)
iter_dir = iter(self.listdir(path))
@@ -775,23 +657,23 @@ class FS(object):
-
class SubFS(FS):
-
"""A SubFS represents a sub directory of another filesystem object.
- SubFS objects are return by opendir, which effectively creates a 'sandbox'
- filesystem that can only access files / dirs under a root path within its 'parent' dir.
+ SubFS objects are returned by opendir, which effectively creates a 'sandbox'
+ 'sandbox' filesystem that can only access files/dirs under a root path
+ within its 'parent' dir.
"""
def __init__(self, parent, sub_dir):
self.parent = parent
- self.sub_dir = parent._abspath(sub_dir)
+ self.sub_dir = abspath(normpath(sub_dir))
def __str__(self):
return "<SubFS: %s in %s>" % (self.sub_dir, self.parent)
- __repr__ = __str__
+ def __repr__(self):
+ return str(self)
def __unicode__(self):
return unicode(self.__str__())
@@ -803,7 +685,7 @@ class SubFS(FS):
return "File in sub dir of %s"%str(self.parent)
def _delegate(self, path):
- return pathjoin(self.sub_dir, resolvepath(makerelative(path)))
+ return pathjoin(self.sub_dir, relpath(normpath(path)))
def getsyspath(self, path, allow_none=False):
return self.parent.getsyspath(self._delegate(path), allow_none=allow_none)
@@ -816,7 +698,7 @@ class SubFS(FS):
def opendir(self, path):
if not self.exists(path):
- raise ResourceNotFoundError("NO_DIR", path)
+ raise ResourceNotFoundError(path)
path = self._delegate(path)
sub_fs = self.parent.opendir(path)
@@ -828,34 +710,43 @@ class SubFS(FS):
def isfile(self, path):
return self.parent.isfile(self._delegate(path))
- def ishidden(self, path):
- return self.parent.ishidden(self._delegate(path))
-
- def listdir(self, path="./", wildcard=None, full=False, absolute=False, hidden=True, dirs_only=False, files_only=False):
+ def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
paths = self.parent.listdir(self._delegate(path),
wildcard,
False,
False,
- hidden,
dirs_only,
files_only)
if absolute:
- listpath = resolvepath(path)
- paths = [makeabsolute(pathjoin(listpath, path)) for path in paths]
+ listpath = normpath(path)
+ paths = [abspath(pathjoin(listpath, path)) for path in paths]
elif full:
- listpath = resolvepath(path)
- paths = [makerelative(pathjoin(listpath, path)) for path in paths]
+ listpath = normpath(path)
+ paths = [relpath(pathjoin(listpath, path)) for path in paths]
return paths
- def makedir(self, path, mode=0777, recursive=False, allow_recreate=False):
- return self.parent.makedir(self._delegate(path), mode=mode, recursive=recursive, allow_recreate=allow_recreate)
+ def makedir(self, path, recursive=False, allow_recreate=False):
+ return self.parent.makedir(self._delegate(path), recursive=recursive, allow_recreate=allow_recreate)
def remove(self, path):
return self.parent.remove(self._delegate(path))
def removedir(self, path, recursive=False,force=False):
- self.parent.removedir(self._delegate(path), recursive=recursive, force=force)
+ # Careful not to recurse outside the subdir
+ if path in ("","/"):
+ if force:
+ for path2 in self.listdir(path,absolute=True,files_only=True):
+ self.remove(path2)
+ for path2 in self.listdir(path,absolute=True,dirs_only=True):
+ self.removedir(path2,force=True)
+ else:
+ self.parent.removedir(self._delegate(path),force=force)
+ if recursive:
+ try:
+ self.removedir(dirname(path),recursive=True)
+ except DirectoryNotEmptyError:
+ pass
def getinfo(self, path):
return self.parent.getinfo(self._delegate(path))
@@ -867,21 +758,25 @@ class SubFS(FS):
return self.parent.rename(self._delegate(src), self._delegate(dst))
-if __name__ == "__main__":
- import osfs
- import browsewin
-
- fs1 = osfs.OSFS('~/')
- fs2 = fs1.opendir("projects").opendir('prettycharts')
-
- for d, f in fs1.walk('/projects/prettycharts'):
- print d, f
-
- for f in fs1.walkfiles("/projects/prettycharts"):
- print f
-
- #print_fs(fs2)
-
+def flags_to_mode(flags):
+ """Convert an os.O_* bitmask into an FS mode string."""
+ if flags & os.O_EXCL:
+ raise UnsupportedError("open",msg="O_EXCL is not supported")
+ if flags & os.O_WRONLY:
+ if flags & os.O_TRUNC:
+ mode = "w"
+ elif flags & os.O_APPEND:
+ mode = "a"
+ else:
+ mode = "r+"
+ elif flags & os.O_RDWR:
+ if flags & os.O_TRUNC:
+ mode = "w+"
+ elif flags & os.O_APPEND:
+ mode = "a+"
+ else:
+ mode = "r+"
+ else:
+ mode = "r"
+ return mode
- #browsewin.browse(fs1)
- browsewin.browse(fs2)
diff --git a/fs/browsewin.py b/fs/browsewin.py
index 22ae99f..a5c8e65 100644
--- a/fs/browsewin.py
+++ b/fs/browsewin.py
@@ -151,15 +151,16 @@ class BrowseFrame(wx.Frame):
info_frame = InfoFrame(path, self.fs.desc(path), info)
info_frame.Show()
-def browse(fs):
+def browse(fs):
app = wx.PySimpleApp()
frame = BrowseFrame(fs)
frame.Show()
app.MainLoop()
-if __name__ == "__main__":
+if __name__ == "__main__":
from osfs import OSFS
home_fs = OSFS("~/")
browse(home_fs)
+
diff --git a/fs/errors.py b/fs/errors.py
new file mode 100644
index 0000000..372096a
--- /dev/null
+++ b/fs/errors.py
@@ -0,0 +1,179 @@
+"""
+
+ fs.errors: error class definitions for FS
+
+"""
+
+import sys
+import errno
+
+try:
+ from functools import wraps
+except ImportError:
+ def wraps(func):
+ def decorator(wfunc):
+ wfunc.__name__ == func.__name__
+ wfunc.__doc__ == func.__doc__
+ wfunc.__module__ == func.__module__
+ return decorator
+
+
+class FSError(Exception):
+ """Base exception class for the FS module."""
+ default_message = "Unspecified error"
+
+ def __init__(self,msg=None,details=None):
+ if msg is None:
+ msg = self.default_message
+ self.msg = msg
+ self.details = details
+
+ def __str__(self):
+ keys = dict((k,str(v)) for k,v in self.__dict__.iteritems())
+ return self.msg % keys
+
+ def __unicode__(self):
+ return unicode(str(self))
+
+
+class PathError(FSError):
+ """Exception for errors to do with a path string."""
+ default_message = "Path is invalid: %(path)s"
+
+ def __init__(self,path,**kwds):
+ self.path = path
+ super(PathError,self).__init__(**kwds)
+
+
+class OperationFailedError(FSError):
+ """Base exception class for errors associated with a specific operation."""
+ default_message = "Unable to %(opname)s: unspecified error [%(errno)s - %(details)s]"
+
+ def __init__(self,opname,path=None,**kwds):
+ self.opname = opname
+ self.path = path
+ self.errno = getattr(kwds.get("details",None),"errno",None)
+ super(OperationFailedError,self).__init__(**kwds)
+
+
+class UnsupportedError(OperationFailedError):
+ """Exception raised for operations that are not supported by the FS."""
+ default_message = "Unable to %(opname)s: not supported by this filesystem"
+
+
+class RemoteConnectionError(OperationFailedError):
+ """Exception raised when operations encounter remote connection trouble."""
+ default_message = "Unable to %(opname)s: remote connection errror"
+
+
+class StorageSpaceError(OperationFailedError):
+ """Exception raised when operations encounter storage space trouble."""
+ default_message = "Unable to %(opname)s: insufficient storage space"
+
+
+class PermissionDeniedError(OperationFailedError):
+ default_message = "Unable to %(opname)s: permission denied"
+
+
+
+class ResourceError(FSError):
+ """Base exception class for error associated with a specific resource."""
+ default_message = "Unspecified resource error: %(path)s"
+
+ def __init__(self,path,**kwds):
+ self.path = path
+ self.opname = kwds.pop("opname",None)
+ super(ResourceError,self).__init__(**kwds)
+
+
+class NoSysPathError(ResourceError):
+ """Exception raised when there is no syspath for a given path."""
+ default_message = "No mapping to OS filesystem: %(path)s"
+
+
+class ResourceNotFoundError(ResourceError):
+ """Exception raised when a required resource is not found."""
+ default_message = "Resource not found: %(path)s"
+
+
+class ResourceInvalidError(ResourceError):
+ """Exception raised when a resource is the wrong type."""
+ default_message = "Resource is invalid: %(path)s"
+
+
+class DestinationExistsError(ResourceError):
+ """Exception raised when a target destination already exists."""
+ default_message = "Destination exists: %(path)s"
+
+
+class DirectoryNotEmptyError(ResourceError):
+ """Exception raised when a directory to be removed is not empty."""
+ default_message = "Directory is not empty: %(path)s"
+
+
+class ParentDirectoryMissingError(ResourceError):
+ """Exception raised when a parent directory is missing."""
+ default_message = "Parent directory is missing: %(path)s"
+
+
+class ResourceLockedError(ResourceError):
+ """Exception raised when a resource can't be used because it is locked."""
+ default_message = "Resource is locked: %(path)s"
+
+
+
+def convert_fs_errors(func):
+ """Function wrapper to convert FSError instances into OSErrors."""
+ @wraps(func)
+ def wrapper(*args,**kwds):
+ try:
+ return func(*args,**kwds)
+ except ResourceNotFoundError, e:
+ raise OSError(errno.ENOENT,str(e))
+ except ResourceInvalidError, e:
+ raise OSError(errno.EINVAL,str(e))
+ except PermissionDeniedError, e:
+ raise OSError(errno.EACCESS,str(e))
+ except DirectoryNotEmptyError, e:
+ raise OSError(errno.ENOTEMPTY,str(e))
+ except DestinationExistsError, e:
+ raise OSError(errno.EEXIST,str(e))
+ except StorageSpaceError, e:
+ raise OSError(errno.ENOSPC,str(e))
+ except RemoteConnectionError, e:
+ raise OSError(errno.ENONET,str(e))
+ except UnsupportedError, e:
+ raise OSError(errno.ENOSYS,str(e))
+ except FSError, e:
+ raise OSError(errno.EFAULT,str(e))
+ return wrapper
+
+
+def convert_os_errors(func):
+ """Function wrapper to convert OSError/IOError instances into FSErrors."""
+ opname = func.__name__
+ @wraps(func)
+ def wrapper(*args,**kwds):
+ try:
+ return func(*args,**kwds)
+ except (OSError,IOError), e:
+ if not hasattr(e,"errno") or not e.errno:
+ raise OperationFailedError(opname,details=e)
+ if e.errno == errno.ENOENT:
+ raise ResourceNotFoundError(e.filename,opname=opname,details=e)
+ if e.errno == errno.ENOTEMPTY:
+ raise DirectoryNotEmptyError(e.filename,opname=opname,details=e)
+ if e.errno == errno.EEXIST:
+ raise DestinationExistsError(e.filename,opname=opname,details=e)
+ if e.errno == 183: # some sort of win32 equivalent to EEXIST
+ raise DestinationExistsError(e.filename,opname=opname,details=e)
+ if e.errno == errno.ENOTDIR:
+ raise ResourceInvalidError(e.filename,opname=opname,details=e)
+ if e.errno == errno.EISDIR:
+ raise ResourceInvalidError(e.filename,opname=opname,details=e)
+ if e.errno == errno.EINVAL:
+ raise ResourceInvalidError(e.filename,opname=opname,details=e)
+ raise OperationFailedError(opname,details=e)
+ return wrapper
+
+
diff --git a/fs/expose/__init__.py b/fs/expose/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/fs/expose/__init__.py
diff --git a/fs/expose/fuse/__init__.py b/fs/expose/fuse/__init__.py
new file mode 100644
index 0000000..dbf25ff
--- /dev/null
+++ b/fs/expose/fuse/__init__.py
@@ -0,0 +1,437 @@
+"""
+
+ fs.expose.fuse: expose an FS object to the native filesystem via FUSE
+
+This module provides the necessay interfaces to mount an FS object into
+the local filesystem via FUSE:
+
+ http://fuse.sourceforge.net/
+
+For simple usage, the function 'mount' takes an FS object and a local path,
+and exposes the given FS at that path:
+
+ >>> from fs.memoryfs import MemoryFS
+ >>> from fs.expose import fuse
+ >>> fs = MemoryFS()
+ >>> mp = fuse.mount(fs,"/mnt/my-memory-fs")
+ >>> mp.path
+ '/mnt/my-memory-fs'
+ >>> mp.unmount()
+
+The above spawns a new background process to manage the FUSE event loop, which
+can be controlled through the returned subprocess.Popen object. To avoid
+spawning a new process, set the 'foreground' option:
+
+ >>> # This will block until the filesystem is unmounted
+ >>> fuse.mount(fs,"/mnt/my-memory-fs",foreground=True)
+
+Any additional options for the FUSE process can be passed as keyword arguments
+to the 'mount' function.
+
+If you require finer control over the creation of the FUSE process, you can
+instantiate the MountProcess class directly. It accepts all options available
+to subprocess.Popen:
+
+ >>> from subprocess import PIPE
+ >>> mp = fuse.MountProcess(fs,"/mnt/my-memory-fs",stderr=PIPE)
+ >>> fuse_errors = mp.communicate()[1]
+
+The binding to FUSE is created via ctypes, using a custom version of the
+fuse.py code from Giorgos Verigakis:
+
+ http://code.google.com/p/fusepy/
+
+"""
+
+import os
+import sys
+import signal
+import errno
+import time
+import stat as statinfo
+import subprocess
+import pickle
+
+from fs.base import flags_to_mode
+from fs.errors import *
+from fs.path import *
+from fs.xattrs import ensure_xattrs
+
+import fuse_ctypes as fuse
+try:
+ fuse._libfuse.fuse_get_context
+except AttributeError:
+ raise ImportError("could not locate FUSE library")
+
+
+FUSE = fuse.FUSE
+Operations = fuse.Operations
+fuse_get_context = fuse.fuse_get_context
+
+STARTUP_TIME = time.time()
+
+
+def handle_fs_errors(func):
+ """Method decorator to report FS errors in the appropriate way.
+
+ This decorator catches all FS errors and translates them into an
+ equivalent OSError. It also makes the function return zero instead
+ of None as an indication of successful execution.
+ """
+ func = convert_fs_errors(func)
+ @wraps(func)
+ def wrapper(*args,**kwds):
+ res = func(*args,**kwds)
+ if res is None:
+ return 0
+ return res
+ return wrapper
+
+
+def get_stat_dict(fs,path):
+ """Build a 'stat' dictionary for the given file."""
+ uid, gid, pid = fuse_get_context()
+ info = fs.getinfo(path)
+ private_keys = [k for k in info if k.startswith("_")]
+ for k in private_keys:
+ del info[k]
+ # Basic stuff that is constant for all paths
+ info.setdefault("st_ino",0)
+ info.setdefault("st_dev",0)
+ info.setdefault("st_uid",uid)
+ info.setdefault("st_gid",gid)
+ info.setdefault("st_rdev",0)
+ info.setdefault("st_blksize",1024)
+ info.setdefault("st_blocks",1)
+ # The interesting stuff
+ info.setdefault("st_size",info.get("size",1024))
+ info.setdefault("st_mode",info.get('st_mode',0700))
+ if fs.isdir(path):
+ info["st_mode"] = info["st_mode"] | statinfo.S_IFDIR
+ info.setdefault("st_nlink",2)
+ else:
+ info["st_mode"] = info["st_mode"] | statinfo.S_IFREG
+ info.setdefault("st_nlink",1)
+ for (key1,key2) in [("st_atime","accessed_time"),("st_mtime","modified_time"),("st_ctime","created_time")]:
+ if key1 not in info:
+ if key2 in info:
+ info[key1] = time.mktime(info[key2].timetuple())
+ else:
+ info[key1] = STARTUP_TIME
+ return info
+
+
+class FSOperations(Operations):
+ """FUSE Operations interface delegating all activities to an FS object."""
+
+ def __init__(self,fs,on_init=None,on_destroy=None):
+ self.fs = ensure_xattrs(fs)
+ self._fhmap = {}
+ self._on_init = on_init
+ self._on_destroy = on_destroy
+
+ def _get_file(self,fh):
+ try:
+ return self._fhmap[fh]
+ except KeyError:
+ raise FSError("invalid file handle")
+
+ def _reg_file(self,f):
+ # TODO: a better handle-generation routine
+ fh = int(time.time()*1000)
+ self._fhmap.setdefault(fh,f)
+ if self._fhmap[fh] is not f:
+ return self._reg_file(f)
+ return fh
+
+ def init(self,conn):
+ if self._on_init:
+ self._on_init()
+
+ def destroy(self,data):
+ if self._on_destroy:
+ self._on_destroy()
+
+ @handle_fs_errors
+ def chmod(self,path,mode):
+ raise UnsupportedError("chmod")
+
+ @handle_fs_errors
+ def chown(self,path,uid,gid):
+ raise UnsupportedError("chown")
+
+ @handle_fs_errors
+ def create(self,path,mode,fi=None):
+ if fi is not None:
+ raise UnsupportedError("raw_fi")
+ return self._reg_file(self.fs.open(path,"w"))
+
+ @handle_fs_errors
+ def flush(self,path,fh):
+ self._get_file(fh).flush()
+
+ @handle_fs_errors
+ def getattr(self,path,fh=None):
+ return get_stat_dict(self.fs,path)
+
+ @handle_fs_errors
+ def getxattr(self,path,name,position=0):
+ try:
+ value = self.fs.getxattr(path,name)
+ except AttributeError:
+ raise UnsupportedError("getxattr")
+ else:
+ if value is None:
+ raise OSError(errno.ENOENT,"no attribute '%s'" % (name,))
+ return value
+
+ @handle_fs_errors
+ def link(self,target,souce):
+ raise UnsupportedError("link")
+
+ @handle_fs_errors
+ def listxattr(self,path):
+ try:
+ return self.fs.listxattrs(path)
+ except AttributeError:
+ raise UnsupportedError("listxattr")
+
+ @handle_fs_errors
+ def mkdir(self,path,mode):
+ try:
+ self.fs.makedir(path,mode)
+ except TypeError:
+ self.fs.makedir(path)
+
+ @handle_fs_errors
+ def mknod(self,path,mode,dev):
+ raise UnsupportedError("mknod")
+
+ @handle_fs_errors
+ def open(self,path,flags):
+ mode = flags_to_mode(flags)
+ return self._reg_file(self.fs.open(path,mode))
+
+ @handle_fs_errors
+ def read(self,path,size,offset,fh):
+ f = self._get_file(fh)
+ f.seek(offset)
+ return f.read(size)
+
+ @handle_fs_errors
+ def readdir(self,path,fh=None):
+ return ['.', '..'] + self.fs.listdir(path)
+
+ @handle_fs_errors
+ def readlink(self,path):
+ raise UnsupportedError("readlink")
+
+ @handle_fs_errors
+ def release(self,path,fh):
+ self._get_file(fh).close()
+ del self._fhmap[fh]
+
+ @handle_fs_errors
+ def removexattr(self,path,name):
+ try:
+ return self.fs.delxattr(path,name)
+ except AttributeError:
+ raise UnsupportedError("removexattr")
+
+ @handle_fs_errors
+ def rename(self,old,new):
+ if issamedir(old,new):
+ self.fs.rename(old,new)
+ else:
+ if self.fs.isdir(old):
+ self.fs.movedir(old,new)
+ else:
+ self.fs.move(old,new)
+
+ @handle_fs_errors
+ def rmdir(self, path):
+ self.fs.removedir(path)
+
+ @handle_fs_errors
+ def setxattr(self,path,name,value,options,position=0):
+ try:
+ return self.fs.setxattr(path,name,value)
+ except AttributeError:
+ raise UnsupportedError("setxattr")
+
+ @handle_fs_errors
+ def symlink(self, target, source):
+ raise UnsupportedError("symlink")
+
+ @handle_fs_errors
+ def truncate(self, path, length, fh=None):
+ if fh is None and length == 0:
+ self.fs.open(path,"w").close()
+ else:
+ if fh is None:
+ f = self.fs.open(path,"w+")
+ else:
+ f = self._get_file(fh)
+ if not hasattr(f,"truncate"):
+ raise UnsupportedError("trunace")
+ f.truncate(length)
+
+ @handle_fs_errors
+ def unlink(self, path):
+ self.fs.remove(path)
+
+ @handle_fs_errors
+ def utimens(self, path, times=None):
+ raise UnsupportedError("utimens")
+
+ @handle_fs_errors
+ def write(self, path, data, offset, fh):
+ f = self._get_file(fh)
+ f.seek(offset)
+ f.write(data)
+ return len(data)
+
+
+def mount(fs,path,foreground=False,ready_callback=None,**kwds):
+ """Mount the given FS at the given path, using FUSE.
+
+ By default, this function spawns a new background process to manage the
+ FUSE event loop. The return value in this case is an instance of the
+ 'MountProcess' class, a subprocess.Popen subclass.
+
+ If the keyword argument 'foreground' is given, we instead run the FUSE
+ main loop in the current process. In this case the function will block
+ until the filesystem is unmounted, then return None.
+
+ If the keyword argument 'ready_callback' is provided, it will be called
+ when the filesystem has been mounted and is ready for use. Any additional
+ keyword arguments will be passed through as options to the underlying
+ FUSE class. Some interesting options include:
+
+ * nothreads: switch off threading in the FUSE event loop
+ * fsname: name to display in the mount info table
+
+ """
+ if foreground:
+ ops = FSOperations(fs,on_init=ready_callback)
+ return FUSE(ops,path,foreground=foreground,**kwds)
+ else:
+ mp = MountProcess(fs,path,kwds)
+ if ready_callback:
+ ready_callback()
+ return mp
+
+
+def unmount(path):
+ """Unmount the given mount point.
+
+ This function shells out to the 'fusermount' program to unmount a
+ FUSE filesystem. It works, but it would probably be better to use the
+ 'unmount' method on the MountProcess class if you have it.
+ """
+ if os.system("fusermount -u '" + path + "'"):
+ raise OSError("filesystem could not be unmounted: " + path)
+
+
+class MountProcess(subprocess.Popen):
+ """subprocess.Popen subclass managing a FUSE mount.
+
+ This is a subclass of subprocess.Popen, designed for easy management of
+ a FUSE mount in a background process. Rather than specifying the command
+ to execute, pass in the FS object to be mounted, the target mount point
+ and a dictionary of options for the underlying FUSE class.
+
+ In order to be passed successfully to the new process, the FS object
+ must be pickleable. This restriction may be lifted in the future.
+
+ This class has an extra attribute 'path' giving the path to the mounted
+ filesystem, and an extra method 'unmount' that will cleanly unmount it
+ and terminate the process.
+
+ By default, the spawning process will block until it receives notification
+ that the filesystem has been mounted. Since this notification is sent
+ by writing to a pipe, using the 'close_fds' option on this class will
+ prevent it from being sent. You can also pass in the keyword argument
+ 'nowait' to continue without waiting for notification.
+
+ """
+
+ # This works by spawning a new python interpreter and passing it the
+ # pickled (fs,path,opts) tuple on the command-line. Something like this:
+ #
+ # python -c "import MountProcess; MountProcess._do_mount('..data..')
+ #
+ # It would be more efficient to do a straight os.fork() here, and would
+ # remove the need to pickle the FS. But API wise, I think it's much
+ # better for mount() to return a Popen instance than just a pid.
+ #
+ # In the future this class could implement its own forking logic and
+ # just copy the relevant bits of the Popen interface. For now, this
+ # spawn-a-new-interpreter solution is the easiest to get up and running.
+
+ def __init__(self,fs,path,fuse_opts={},nowait=False,**kwds):
+ self.path = path
+ if nowait or kwds.get("close_fds",False):
+ cmd = 'from fs.expose.fuse import MountProcess; '
+ cmd = cmd + 'MountProcess._do_mount_nowait(%s)'
+ cmd = cmd % (pickle.dumps((fs,path,fuse_opts)),)
+ cmd = cmd % (repr(pickle.dumps((fs,path,fuse_opts),-1)),)
+ cmd = [sys.executable,"-c",cmd]
+ super(MountProcess,self).__init__(cmd,**kwds)
+ else:
+ (r,w) = os.pipe()
+ cmd = 'from fs.expose.fuse import MountProcess; '
+ cmd = cmd + 'MountProcess._do_mount_wait(%s)'
+ cmd = cmd % (repr(pickle.dumps((fs,path,fuse_opts,r,w),-1)),)
+ cmd = [sys.executable,"-c",cmd]
+ super(MountProcess,self).__init__(cmd,**kwds)
+ os.close(w)
+ if os.read(r,1) != "S":
+ raise RuntimeError("A FUSE error occurred")
+
+ def unmount(self):
+ """Cleanly unmount the FUSE filesystem, terminating this subprocess."""
+ if hasattr(self,"terminate"):
+ self.terminate()
+ else:
+ os.kill(self.pid,signal.SIGTERM)
+ self.wait()
+
+ @staticmethod
+ def _do_mount_nowait(data):
+ """Perform the specified mount, return without waiting."""
+ (fs,path,opts) = pickle.loads(data)
+ opts["foreground"] = True
+ mount(fs,path,*opts)
+
+ @staticmethod
+ def _do_mount_wait(data):
+ """Perform the specified mount, signalling when ready."""
+ (fs,path,opts,r,w) = pickle.loads(data)
+ os.close(r)
+ opts["foreground"] = True
+ successful = []
+ def ready_callback():
+ successful.append(True)
+ os.write(w,"S")
+ os.close(w)
+ opts["ready_callback"] = ready_callback
+ try:
+ mount(fs,path,**opts)
+ except Exception:
+ pass
+ if not successful:
+ os.write(w,"E")
+
+
+
+if __name__ == "__main__":
+ import os, os.path
+ from fs.tempfs import TempFS
+ mount_point = os.path.join(os.environ["HOME"],"fs.expose.fuse")
+ if not os.path.exists(mount_point):
+ os.makedirs(mount_point)
+ def ready_callback():
+ print "READY"
+ mount(TempFS(),mount_point,foreground=True,ready_callback=ready_callback)
+
diff --git a/fs/expose/fuse/fuse_ctypes.py b/fs/expose/fuse/fuse_ctypes.py
new file mode 100644
index 0000000..691a4c1
--- /dev/null
+++ b/fs/expose/fuse/fuse_ctypes.py
@@ -0,0 +1,603 @@
+#
+# [rfk,05/06/09] I've patched this to add support for the init() and
+# destroy() callbacks and will submit the patch upstream
+# sometime soon...
+#
+# Copyright (c) 2008 Giorgos Verigakis <verigak@gmail.com>
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import division
+
+from ctypes import *
+from ctypes.util import find_library
+from errno import EFAULT
+from functools import partial
+from platform import machine, system
+from traceback import print_exc
+
+
+class c_timespec(Structure):
+ _fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
+
+class c_utimbuf(Structure):
+ _fields_ = [('actime', c_timespec), ('modtime', c_timespec)]
+
+class c_stat(Structure):
+ pass # Platform dependent
+
+_system = system()
+if _system == 'Darwin':
+ _libiconv = CDLL(find_library("iconv"), RTLD_GLOBAL) # libfuse dependency
+ ENOTSUP = 45
+ c_dev_t = c_int32
+ c_fsblkcnt_t = c_ulong
+ c_fsfilcnt_t = c_ulong
+ c_gid_t = c_uint32
+ c_mode_t = c_uint16
+ c_off_t = c_int64
+ c_pid_t = c_int32
+ c_uid_t = c_uint32
+ setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
+ c_size_t, c_int, c_uint32)
+ getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
+ c_size_t, c_uint32)
+ c_stat._fields_ = [
+ ('st_dev', c_dev_t),
+ ('st_ino', c_uint32),
+ ('st_mode', c_mode_t),
+ ('st_nlink', c_uint16),
+ ('st_uid', c_uid_t),
+ ('st_gid', c_gid_t),
+ ('st_rdev', c_dev_t),
+ ('st_atimespec', c_timespec),
+ ('st_mtimespec', c_timespec),
+ ('st_ctimespec', c_timespec),
+ ('st_size', c_off_t),
+ ('st_blocks', c_int64),
+ ('st_blksize', c_int32)]
+elif _system == 'Linux':
+ ENOTSUP = 95
+ c_dev_t = c_ulonglong
+ c_fsblkcnt_t = c_ulonglong
+ c_fsfilcnt_t = c_ulonglong
+ c_gid_t = c_uint
+ c_mode_t = c_uint
+ c_off_t = c_longlong
+ c_pid_t = c_int
+ c_uid_t = c_uint
+ setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
+ getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
+
+ _machine = machine()
+ if _machine == 'i686':
+ c_stat._fields_ = [
+ ('st_dev', c_dev_t),
+ ('__pad1', c_ushort),
+ ('__st_ino', c_ulong),
+ ('st_mode', c_mode_t),
+ ('st_nlink', c_uint),
+ ('st_uid', c_uid_t),
+ ('st_gid', c_gid_t),
+ ('st_rdev', c_dev_t),
+ ('__pad2', c_ushort),
+ ('st_size', c_off_t),
+ ('st_blksize', c_long),
+ ('st_blocks', c_longlong),
+ ('st_atimespec', c_timespec),
+ ('st_mtimespec', c_timespec),
+ ('st_ctimespec', c_timespec),
+ ('st_ino', c_ulonglong)]
+ elif machine() == 'x86_64':
+ c_stat._fields_ = [
+ ('st_dev', c_dev_t),
+ ('st_ino', c_ulong),
+ ('st_nlink', c_ulong),
+ ('st_mode', c_mode_t),
+ ('st_uid', c_uid_t),
+ ('st_gid', c_gid_t),
+ ('__pad0', c_int),
+ ('st_rdev', c_dev_t),
+ ('st_size', c_off_t),
+ ('st_blksize', c_long),
+ ('st_blocks', c_long),
+ ('st_atimespec', c_timespec),
+ ('st_mtimespec', c_timespec),
+ ('st_ctimespec', c_timespec)]
+ else:
+ raise NotImplementedError('Linux %s is not supported.' % _machine)
+else:
+ raise NotImplementedError('%s is not supported.' % _system)
+
+
+class c_statvfs(Structure):
+ _fields_ = [
+ ('f_bsize', c_ulong),
+ ('f_frsize', c_ulong),
+ ('f_blocks', c_fsblkcnt_t),
+ ('f_bfree', c_fsblkcnt_t),
+ ('f_bavail', c_fsblkcnt_t),
+ ('f_files', c_fsfilcnt_t),
+ ('f_ffree', c_fsfilcnt_t),
+ ('f_favail', c_fsfilcnt_t)]
+
+class fuse_file_info(Structure):
+ _fields_ = [
+ ('flags', c_int),
+ ('fh_old', c_ulong),
+ ('writepage', c_int),
+ ('direct_io', c_uint, 1),
+ ('keep_cache', c_uint, 1),
+ ('flush', c_uint, 1),
+ ('padding', c_uint, 29),
+ ('fh', c_uint64),
+ ('lock_owner', c_uint64)]
+
+class fuse_context(Structure):
+ _fields_ = [
+ ('fuse', c_voidp),
+ ('uid', c_uid_t),
+ ('gid', c_gid_t),
+ ('pid', c_pid_t),
+ ('private_data', c_voidp)]
+
+class fuse_conn_info(Structure):
+ _fields_ = [
+ ('proto_major', c_uint),
+ ('proto_minor', c_uint),
+ ('async_read', c_uint),
+ ('max_write', c_uint),
+ ('max_readahead', c_uint),
+ ('capable', c_uint),
+ ('want', c_uint),
+ ('reserved', c_uint*25)]
+
+class fuse_operations(Structure):
+ _fields_ = [
+ ('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))),
+ ('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
+ ('getdir', c_voidp), # Deprecated, use readdir
+ ('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)),
+ ('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
+ ('unlink', CFUNCTYPE(c_int, c_char_p)),
+ ('rmdir', CFUNCTYPE(c_int, c_char_p)),
+ ('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)),
+ ('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)),
+ ('link', CFUNCTYPE(c_int, c_char_p, c_char_p)),
+ ('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
+ ('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)),
+ ('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)),
+ ('utime', c_voidp), # Deprecated, use utimens
+ ('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
+ ('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
+ POINTER(fuse_file_info))),
+ ('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
+ POINTER(fuse_file_info))),
+ ('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))),
+ ('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
+ ('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
+ ('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
+ ('setxattr', setxattr_t),
+ ('getxattr', getxattr_t),
+ ('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
+ ('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)),
+ ('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
+ ('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp,
+ c_char_p, POINTER(c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))),
+ ('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
+ ('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
+ ('init', CFUNCTYPE(c_voidp, POINTER(fuse_conn_info))),
+ ('destroy', CFUNCTYPE(None, c_voidp)),
+ ('access', CFUNCTYPE(c_int, c_char_p, c_int)),
+ ('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(fuse_file_info))),
+ ('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(fuse_file_info))),
+ ('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat),
+ POINTER(fuse_file_info))),
+ ('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)),
+ ('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))),
+ ('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong)))]
+
+
+def time_of_timespec(ts):
+ return ts.tv_sec + 1.0 * ts.tv_nsec / 10 ** 9
+
+def set_st_attrs(st, attrs):
+ for key, val in attrs.items():
+ if key in ('st_atime', 'st_mtime', 'st_ctime'):
+ timespec = getattr(st, key + 'spec')
+ timespec.tv_sec = int(val)
+ timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9)
+ elif hasattr(st, key):
+ setattr(st, key, val)
+
+def _operation_wrapper(func, *args, **kwargs):
+ """Decorator for the methods of class FUSE"""
+ try:
+ return func(*args, **kwargs) or 0
+ except OSError, e:
+ return -(e.errno or EFAULT)
+ except:
+ #print_exc()
+ return -EFAULT
+
+_libfuse = CDLL(find_library("fuse"))
+
+
+def fuse_get_context():
+ """Returns a (uid, gid, pid) tuple"""
+ p = _libfuse.fuse_get_context()
+ ctx = cast(p, POINTER(fuse_context)).contents
+ return ctx.uid, ctx.gid, ctx.pid
+
+
+class FUSE(object):
+ """This class is the lower level interface and should not be subclassed
+ under normal use. Its methods are called by fuse.
+ Assumes API version 2.6 or later."""
+
+ def __init__(self, operations, mountpoint, raw_fi=False, **kwargs):
+ """Setting raw_fi to True will cause FUSE to pass the fuse_file_info
+ class as is to Operations, instead of just the fh field.
+ This gives you access to direct_io, keep_cache, etc."""
+
+ self.operations = operations
+ self.raw_fi = raw_fi
+ args = ['fuse']
+ if kwargs.pop('foreground', False):
+ args.append('-f')
+ if kwargs.pop('debug', False):
+ args.append('-d')
+ if kwargs.pop('nothreads', False):
+ args.append('-s')
+ kwargs.setdefault('fsname', operations.__class__.__name__)
+ args.append('-o')
+ args.append(','.join(key if val == True else '%s=%s' % (key, val)
+ for key, val in kwargs.items()))
+ args.append(mountpoint)
+ argv = (c_char_p * len(args))(*args)
+
+ fuse_ops = fuse_operations()
+ for name, prototype in fuse_operations._fields_:
+ if prototype != c_voidp and getattr(operations, name, None):
+ op = partial(_operation_wrapper, getattr(self, name))
+ setattr(fuse_ops, name, prototype(op))
+ _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops),
+ sizeof(fuse_ops), None)
+ del self.operations # Invoke the destructor
+
+ def init(self,conn):
+ return self.operations("init",conn)
+
+ def destroy(self,data):
+ return self.operations("destroy",data)
+
+ def getattr(self, path, buf):
+ return self.fgetattr(path, buf, None)
+
+ def readlink(self, path, buf, bufsize):
+ ret = self.operations('readlink', path)
+ memmove(buf, create_string_buffer(ret), bufsize)
+ return 0
+
+ def mknod(self, path, mode, dev):
+ return self.operations('mknod', path, mode, dev)
+
+ def mkdir(self, path, mode):
+ return self.operations('mkdir', path, mode)
+
+ def unlink(self, path):
+ return self.operations('unlink', path)
+
+ def rmdir(self, path):
+ return self.operations('rmdir', path)
+
+ def symlink(self, source, target):
+ return self.operations('symlink', target, source)
+
+ def rename(self, old, new):
+ return self.operations('rename', old, new)
+
+ def link(self, source, target):
+ return self.operations('link', target, source)
+
+ def chmod(self, path, mode):
+ return self.operations('chmod', path, mode)
+
+ def chown(self, path, uid, gid):
+ return self.operations('chown', path, uid, gid)
+
+ def truncate(self, path, length):
+ return self.operations('truncate', path, length)
+
+ def open(self, path, fip):
+ fi = fip.contents
+ if self.raw_fi:
+ return self.operations('open', path, fi)
+ else:
+ fi.fh = self.operations('open', path, fi.flags)
+ return 0
+
+ def read(self, path, buf, size, offset, fip):
+ fh = fip.contents if self.raw_fi else fip.contents.fh
+ ret = self.operations('read', path, size, offset, fh)
+ if ret:
+ memmove(buf, create_string_buffer(ret), size)
+ return len(ret)
+
+ def write(self, path, buf, size, offset, fip):
+ data = string_at(buf, size)
+ fh = fip.contents if self.raw_fi else fip.contents.fh
+ return self.operations('write', path, data, offset, fh)
+
+ def statfs(self, path, buf):
+ stv = buf.contents
+ attrs = self.operations('statfs', path)
+ for key, val in attrs.items():
+ if hasattr(stv, key):
+ setattr(stv, key, val)
+ return 0
+
+ def flush(self, path, fip):
+ fh = fip.contents if self.raw_fi else fip.contents.fh
+ return self.operations('flush', path, fh)
+
+ def release(self, path, fip):
+ fh = fip.contents if self.raw_fi else fip.contents.fh
+ return self.operations('release', path, fh)
+
+ def fsync(self, path, datasync, fip):
+ fh = fip.contents if self.raw_fi else fip.contents.fh
+ return self.operations('fsync', path, datasync, fh)
+
+ def setxattr(self, path, name, value, size, options, *args):
+ s = string_at(value, size)
+ return self.operations('setxattr', path, name, s, options, *args)
+
+ def getxattr(self, path, name, value, size, *args):
+ ret = self.operations('getxattr', path, name, *args)
+ buf = create_string_buffer(ret)
+ if bool(value):
+ memmove(value, buf, size)
+ return len(ret)
+
+ def listxattr(self, path, namebuf, size):
+ ret = self.operations('listxattr', path)
+ if not ret:
+ return 0
+ buf = create_string_buffer('\x00'.join(ret))
+ if bool(namebuf):
+ memmove(namebuf, buf, size)
+ return len(buf)
+
+ def removexattr(self, path, name):
+ return self.operations('removexattr', path, name)
+
+ def opendir(self, path, fip):
+ # Ignore raw_fi
+ fip.contents.fh = self.operations('opendir', path)
+ return 0
+
+ def readdir(self, path, buf, filler, offset, fip):
+ # Ignore raw_fi
+ for item in self.operations('readdir', path, fip.contents.fh):
+ if isinstance(item, str):
+ name, st, offset = item, None, 0
+ else:
+ name, attrs, offset = item
+ if attrs:
+ st = c_stat()
+ set_st_attrs(st, attrs)
+ else:
+ st = None
+ filler(buf, name, st, offset)
+ return 0
+
+ def releasedir(self, path, fip):
+ # Ignore raw_fi
+ return self.operations('releasedir', path, fip.contents.fh)
+
+ def fsyncdir(self, path, datasync, fip):
+ # Ignore raw_fi
+ return self.operations('fsyncdir', path, datasync, fip.contents.fh)
+
+ def access(self, path, amode):
+ return self.operations('access', path, amode)
+
+ def create(self, path, mode, fip):
+ fi = fip.contents
+ if self.raw_fi:
+ return self.operations('create', path, mode, fi)
+ else:
+ fi.fh = self.operations('create', path, mode)
+ return 0
+
+ def ftruncate(self, path, length, fip):
+ fh = fip.contents if self.raw_fi else fip.contents.fh
+ return self.operations('truncate', path, length, fh)
+
+ def fgetattr(self, path, buf, fip):
+ memset(buf, 0, sizeof(c_stat))
+ st = buf.contents
+ fh = fip and (fip.contents if self.raw_fi else fip.contents.fh)
+ attrs = self.operations('getattr', path, fh)
+ set_st_attrs(st, attrs)
+ return 0
+
+ def lock(self, path, fip, cmd, lock):
+ fh = fip.contents if self.raw_fi else fip.contents.fh
+ return self.operations('lock', path, fh, cmd, lock)
+
+ def utimens(self, path, buf):
+ if buf:
+ atime = time_of_timespec(buf.contents.actime)
+ mtime = time_of_timespec(buf.contents.modtime)
+ times = (atime, mtime)
+ else:
+ times = None
+ return self.operations('utimens', path, times)
+
+ def bmap(self, path, blocksize, idx):
+ return self.operations('bmap', path, blocksize, idx)
+
+
+from errno import EACCES, ENOENT
+from stat import S_IFDIR
+
+class Operations:
+ """This class should be subclassed and passed as an argument to FUSE on
+ initialization. All operations should raise an OSError exception on
+ error.
+
+ When in doubt of what an operation should do, check the FUSE header
+ file or the corresponding system call man page."""
+
+ def __call__(self, op, *args):
+ if not hasattr(self, op):
+ raise OSError(EFAULT, '')
+ return getattr(self, op)(*args)
+
+ def on_init(self,conn):
+ pass
+
+ def on_destroy(self,data):
+ pass
+
+ def access(self, path, amode):
+ return 0
+
+ bmap = None
+
+ def chmod(self, path, mode):
+ raise OSError(EACCES, '')
+
+ def chown(self, path, uid, gid):
+ raise OSError(EACCES, '')
+
+ def create(self, path, mode, fi=None):
+ """When raw_fi is False (default case), fi is None and create should
+ return a numerical file handle.
+ When raw_fi is True the file handle should be set directly by create
+ and return 0."""
+ raise OSError(EACCES, '')
+
+ def flush(self, path, fh):
+ return 0
+
+ def fsync(self, path, datasync, fh):
+ return 0
+
+ def fsyncdir(self, path, datasync, fh):
+ return 0
+
+ def getattr(self, path, fh=None):
+ """Returns a dictionary with keys identical to the stat C structure
+ of stat(2).
+ st_atime, st_mtime and st_ctime should be floats."""
+ if path != '/':
+ raise OSError(ENOENT, '')
+ return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
+
+ def getxattr(self, path, name, position=0):
+ raise OSError(ENOTSUP, '')
+
+ def link(self, target, source):
+ raise OSError(EACCES, '')
+
+ def listxattr(self, path):
+ return []
+
+ lock = None
+
+ def mkdir(self, path, mode):
+ raise OSError(EACCES, '')
+
+ def mknod(self, path, mode, dev):
+ raise OSError(EACCES, '')
+
+ def open(self, path, flags):
+ """When raw_fi is False (default case), open should return a numerical
+ file handle.
+ When raw_fi is True the signature of open becomes:
+ open(self, path, fi)
+ and the file handle should be set directly."""
+ return 0
+
+ def opendir(self, path):
+ """Returns a numerical file handle."""
+ return 0
+
+ def read(self, path, size, offset, fh):
+ """Returns a string containing the data requested."""
+ raise OSError(EACCES, '')
+
+ def readdir(self, path, fh):
+ """Can return either a list of names, or a list of (name, attrs, offset)
+ tuples. attrs is a dict as in getattr."""
+ return ['.', '..']
+
+ def readlink(self, path):
+ raise OSError(EACCES, '')
+
+ def release(self, path, fh):
+ return 0
+
+ def releasedir(self, path, fh):
+ return 0
+
+ def removexattr(self, path, name):
+ raise OSError(ENOTSUP, '')
+
+ def rename(self, old, new):
+ raise OSError(EACCES, '')
+
+ def rmdir(self, path):
+ raise OSError(EACCES, '')
+
+ def setxattr(self, path, name, value, options, position=0):
+ raise OSError(ENOTSUP, '')
+
+ def statfs(self, path):
+ """Returns a dictionary with keys identical to the statvfs C structure
+ of statvfs(3). The f_frsize, f_favail, f_fsid and f_flag fields are
+ ignored by FUSE though."""
+ return {}
+
+ def symlink(self, target, source):
+ raise OSError(EACCES, '')
+
+ def truncate(self, path, length, fh=None):
+ raise OSError(EACCES, '')
+
+ def unlink(self, path):
+ raise OSError(EACCES, '')
+
+ def utimens(self, path, times=None):
+ """Times is a (atime, mtime) tuple. If None use current time."""
+ return 0
+
+ def write(self, path, data, offset, fh):
+ raise OSError(EACCES, '')
+
+
+class LoggingMixIn:
+ def __call__(self, op, path, *args):
+ print '->', op, path, repr(args)
+ ret = '[Unknown Error]'
+ try:
+ ret = getattr(self, op)(path, *args)
+ return ret
+ except OSError, e:
+ ret = str(e)
+ raise
+ finally:
+ print '<-', op, repr(ret)
diff --git a/fs/expose/sftp.py b/fs/expose/sftp.py
new file mode 100644
index 0000000..0cd3682
--- /dev/null
+++ b/fs/expose/sftp.py
@@ -0,0 +1,278 @@
+"""
+
+ fs.expose.sftp: expose an FS object over SFTP (via paramiko).
+
+This module provides the necessary interfaces to expose an FS object over
+SFTP, plugging into the infratructure provided by the 'paramiko' module.
+
+For simple usage, the class 'BaseSFTPServer' provides an all-in-one server
+class based on the standard SocketServer module. Use it like so:
+
+ server = BaseSFTPServer((hostname,port),fs)
+ server.serve_forever()
+
+Note that the base class allows UNAUTHENTICATED ACCESS by default. For more
+serious work you will probably want to subclass it and override methods such
+as check_auth_password() and get_allowed_auths().
+
+To integrate this module into an existing server framework based on paramiko,
+the 'SFTPServerInterface' class provides a concrete implementation of the
+paramiko.SFTPServerInterface protocol. If you don't understand what this
+is, you probably don't want to use it.
+
+"""
+
+import os
+import stat as statinfo
+import time
+import SocketServer as sockserv
+import threading
+from StringIO import StringIO
+
+import paramiko
+
+from fs.base import flags_to_mode
+from fs.path import *
+from fs.errors import *
+
+
+from fs.errors import wraps
+
+
+# Default host key used by BaseSFTPServer
+#
+DEFAULT_HOST_KEY = paramiko.RSAKey.from_private_key(StringIO("-----BEGIN RSA PRIVATE KEY-----\nMIICXgIBAAKCAIEAl7sAF0x2O/HwLhG68b1uG8KHSOTqe3Cdlj5i/1RhO7E2BJ4B\n3jhKYDYtupRnMFbpu7fb21A24w3Y3W5gXzywBxR6dP2HgiSDVecoDg2uSYPjnlDk\nHrRuviSBG3XpJ/awn1DObxRIvJP4/sCqcMY8Ro/3qfmid5WmMpdCZ3EBeC0CAwEA\nAQKCAIBSGefUs5UOnr190C49/GiGMN6PPP78SFWdJKjgzEHI0P0PxofwPLlSEj7w\nRLkJWR4kazpWE7N/bNC6EK2pGueMN9Ag2GxdIRC5r1y8pdYbAkuFFwq9Tqa6j5B0\nGkkwEhrcFNBGx8UfzHESXe/uE16F+e8l6xBMcXLMJVo9Xjui6QJBAL9MsJEx93iO\nzwjoRpSNzWyZFhiHbcGJ0NahWzc3wASRU6L9M3JZ1VkabRuWwKNuEzEHNK8cLbRl\nTyH0mceWXcsCQQDLDEuWcOeoDteEpNhVJFkXJJfwZ4Rlxu42MDsQQ/paJCjt2ONU\nWBn/P6iYDTvxrt/8+CtLfYc+QQkrTnKn3cLnAkEAk3ixXR0h46Rj4j/9uSOfyyow\nqHQunlZ50hvNz8GAm4TU7v82m96449nFZtFObC69SLx/VsboTPsUh96idgRrBQJA\nQBfGeFt1VGAy+YTLYLzTfnGnoFQcv7+2i9ZXnn/Gs9N8M+/lekdBFYgzoKN0y4pG\n2+Q+Tlr2aNlAmrHtkT13+wJAJVgZATPI5X3UO0Wdf24f/w9+OY+QxKGl86tTQXzE\n4bwvYtUGufMIHiNeWP66i6fYCucXCMYtx6Xgu2hpdZZpFw==\n-----END RSA PRIVATE KEY-----\n"))
+
+
+def report_sftp_errors(func):
+ """Decorator to catch and report FS errors as SFTP error codes.
+
+ Any FSError exceptions are caught and translated into an appropriate
+ return code, while other exceptions are passed through untouched.
+ """
+ @wraps(func)
+ def wrapper(*args,**kwds):
+ try:
+ return func(*args,**kwds)
+ except ResourceNotFoundError, e:
+ return paramiko.SFTP_NO_SUCH_FILE
+ except UnsupportedError, e:
+ return paramiko.SFTP_OP_UNSUPPORTED
+ except FSError, e:
+ return paramiko.SFTP_FAILURE
+ return wrapper
+
+
+class SFTPServerInterface(paramiko.SFTPServerInterface):
+ """SFTPServerInferface implementation that exposes an FS object.
+
+ This SFTPServerInterface subclass expects a single additional argument,
+ the fs object to be exposed. Use it to set up a transport subsystem
+ handler like so:
+
+ t.set_subsystem_handler("sftp",SFTPServer,SFTPServerInterface,fs)
+
+ If this all looks too complicated, you might consider the BaseSFTPServer
+ class also provided by this module - it automatically creates the enclosing
+ paramiko server infrastructure.
+ """
+
+ def __init__(self,server,fs,*args,**kwds):
+ self.fs = fs
+ super(SFTPServerInterface,self).__init__(server,*args,**kwds)
+
+ @report_sftp_errors
+ def open(self,path,flags,attr):
+ return SFTPHandle(self,path,flags)
+
+ @report_sftp_errors
+ def list_folder(self,path):
+ stats = []
+ for entry in self.fs.listdir(path,absolute=True):
+ stats.append(self.stat(entry))
+ return stats
+
+ @report_sftp_errors
+ def stat(self,path):
+ info = self.fs.getinfo(path)
+ stat = paramiko.SFTPAttributes()
+ stat.filename = basename(path)
+ stat.st_size = info.get("size")
+ stat.st_atime = time.mktime(info.get("accessed_time").timetuple())
+ stat.st_mtime = time.mktime(info.get("modified_time").timetuple())
+ if self.fs.isdir(path):
+ stat.st_mode = 0777 | statinfo.S_IFDIR
+ else:
+ stat.st_mode = 0777 | statinfo.S_IFREG
+ return stat
+
+ def lstat(self,path):
+ return self.stat(path)
+
+ @report_sftp_errors
+ def remove(self,path):
+ self.fs.remove(path)
+ return paramiko.SFTP_OK
+
+ @report_sftp_errors
+ def rename(self,oldpath,newpath):
+ if self.fs.isfile(oldpath):
+ self.fs.move(oldpath,newpath)
+ else:
+ self.fs.movedir(oldpath,newpath)
+ return paramiko.SFTP_OK
+
+ @report_sftp_errors
+ def mkdir(self,path,attr):
+ self.fs.makedir(path)
+ return paramiko.SFTP_OK
+
+ @report_sftp_errors
+ def rmdir(self,path):
+ self.fs.removedir(path)
+ return paramiko.SFTP_OK
+
+ def canonicalize(self,path):
+ return abspath(normpath(path))
+
+ def chattr(self,path,attr):
+ return paramiko.SFTP_OP_UNSUPPORTED
+
+ def readlink(self,path):
+ return paramiko.SFTP_OP_UNSUPPORTED
+
+ def symlink(self,path):
+ return paramiko.SFTP_OP_UNSUPPORTED
+
+
+class SFTPHandle(paramiko.SFTPHandle):
+ """SFTP file handler pointing to a file in an FS object.
+
+ This is a simple file wrapper for SFTPServerInterface, passing read
+ and write requests directly through the to underlying file from the FS.
+ """
+
+ def __init__(self,owner,path,flags):
+ super(SFTPHandle,self).__init__(flags)
+ mode = flags_to_mode(flags)
+ self.owner = owner
+ self.path = path
+ self._file = owner.fs.open(path,mode)
+
+ @report_sftp_errors
+ def close(self):
+ self._file.close()
+ return paramiko.SFTP_OK
+
+ @report_sftp_errors
+ def read(self,offset,length):
+ self._file.seek(offset)
+ return self._file.read(length)
+
+ @report_sftp_errors
+ def write(self,offset,data):
+ self._file.seek(offset)
+ self._file.write(data)
+ return paramiko.SFTP_OK
+
+ def stat(self):
+ return self.owner.stat(self.path)
+
+ def chattr(self,attr):
+ return self.owner.chattr(self.path,attr)
+
+
+class SFTPRequestHandler(sockserv.StreamRequestHandler):
+ """SockerServer RequestHandler subclass for BaseSFTPServer.
+
+ This RequestHandler subclass creates a paramiko Transport, sets up the
+ sftp subsystem, and hands off the the transport's own request handling
+ thread. Note that paramiko.Transport uses a separate thread by default,
+ so there is no need to use TreadingMixIn.
+ """
+
+ def handle(self):
+ t = paramiko.Transport(self.request)
+ t.add_server_key(self.server.host_key)
+ t.set_subsystem_handler("sftp",paramiko.SFTPServer,SFTPServerInterface,self.server.fs)
+ # Note that this actually spawns a new thread to handle the requests.
+ # (Actually, paramiko.Transport is a subclass of Thread)
+ t.start_server(server=self.server)
+
+
+class BaseSFTPServer(sockserv.TCPServer,paramiko.ServerInterface):
+ """SocketServer.TCPServer subclass exposing an FS via SFTP.
+
+ BaseSFTPServer combines a simple SocketServer.TCPServer subclass with an
+ implementation of paramiko.ServerInterface, providing everything that's
+ needed to expose an FS via SFTP.
+
+ Operation is in the standard SocketServer style. The target FS object
+ can be passed into the constructor, or set as an attribute on the server:
+
+ server = BaseSFTPServer((hostname,port),fs)
+ server.serve_forever()
+
+ It is also possible to specify the host key used by the sever by setting
+ the 'host_key' attribute. If this is not specified, it will default to
+ the key found in the DEFAULT_HOST_KEY variable.
+
+ Note that this base class allows UNAUTHENTICATED ACCESS to the exposed
+ FS. This is intentional, since we can't guess what your authentication
+ needs are. To protect the exposed FS, override the following methods:
+
+ get_allowed_auths: determine the allowed auth modes
+ check_auth_none: check auth with no credentials
+ check_auth_password: check auth with a password
+ check_auth_publickey: check auth with a public key
+
+ """
+
+ def __init__(self,address,fs=None,host_key=None,RequestHandlerClass=None):
+ self.fs = fs
+ if host_key is None:
+ host_key = DEFAULT_HOST_KEY
+ self.host_key = host_key
+ if RequestHandlerClass is None:
+ RequestHandlerClass = SFTPRequestHandler
+ sockserv.TCPServer.__init__(self,address,RequestHandlerClass)
+
+ def close_request(self,request):
+ # paramiko.Transport closes itself when finished.
+ # If we close it here, we'll break the Transport thread.
+ pass
+
+ def check_channel_request(self,kind,chanid):
+ if kind == 'session':
+ return paramiko.OPEN_SUCCEEDED
+ return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
+
+ def check_auth_none(self,username):
+ """Check whether the user can proceed without authentication."""
+ return paramiko.AUTH_SUCCESSFUL
+
+ def check_auth_publickey(self,username,key):
+ """Check whether the given public key is valid for authentication."""
+ return paramiko.AUTH_FAILED
+
+ def check_auth_password(self,username,password):
+ """Check whether the given password is valid for authentication."""
+ return paramiko.AUTH_FAILED
+
+ def get_allowed_auths(self,username):
+ """Return list of allowed auth modes.
+
+ The available modes are "node", "password" and "publickey".
+ """
+ return ("none",)
+
+
+# When called from the command-line, expose a TempFS for testing purposes
+if __name__ == "__main__":
+ from fs.tempfs import TempFS
+ server = BaseSFTPServer(("localhost",8022),TempFS())
+ try:
+ server.serve_forever()
+ except (SystemExit,KeyboardInterrupt):
+ server.server_close()
+
diff --git a/fs/expose/xmlrpc.py b/fs/expose/xmlrpc.py
new file mode 100644
index 0000000..1f57a16
--- /dev/null
+++ b/fs/expose/xmlrpc.py
@@ -0,0 +1,115 @@
+"""
+
+ fs.expose.xmlrpc: server to expose an FS via XML-RPC
+
+This module provides the necessary infrastructure to expose an FS object
+over XML-RPC. The main class is 'RPCFSServer', a SimpleXMLRPCServer subclass
+designed to expose an underlying FS.
+
+If you need to use a more powerful server than SimpleXMLRPCServer, you can
+use the RPCFSInterface class to provide an XML-RPC-compatible wrapper around
+an FS object, which can then be exposed using whatever server you choose
+(e.g. Twisted's XML-RPC server).
+
+"""
+
+import xmlrpclib
+from SimpleXMLRPCServer import SimpleXMLRPCServer
+
+
+class RPCFSInterface(object):
+ """Wrapper to expose an FS via a XML-RPC compatible interface.
+
+ The only real trick is using xmlrpclib.Binary objects to transport
+ the contents of files.
+ """
+
+ def __init__(self,fs):
+ self.fs = fs
+
+ def get_contents(self,path):
+ data = self.fs.getcontents(path)
+ return xmlrpclib.Binary(data)
+
+ def set_contents(self,path,data):
+ self.fs.createfile(path,data.data)
+
+ def exists(self,path):
+ return self.fs.exists(path)
+
+ def isdir(self,path):
+ return self.fs.isdir(path)
+
+ def isfile(self,path):
+ return self.fs.isfile(path)
+
+ def listdir(self,path="./",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False):
+ return list(self.fs.listdir(path,wildcard,full,absolute,dirs_only,files_only))
+
+ def makedir(self,path,recursive=False,allow_recreate=False):
+ return self.fs.makedir(path,recursive,allow_recreate)
+
+ def remove(self,path):
+ return self.fs.remove(path)
+
+ def removedir(self,path,recursive=False,force=False):
+ return self.fs.removedir(path,recursive,force)
+
+ def rename(self,src,dst):
+ return self.fs.rename(src,dst)
+
+ def getinfo(self,path):
+ return self.fs.getinfo(path)
+
+ def desc(self,path):
+ return self.fs.desc(path)
+
+ def getattr(self,path,attr):
+ return self.fs.getattr(path,attr)
+
+ def setattr(self,path,attr,value):
+ return self.fs.setattr(path,attr,value)
+
+ def copy(self,src,dst,overwrite=False,chunk_size=16384):
+ return self.fs.copy(src,dst,overwrite,chunk_size)
+
+ def move(self,src,dst,overwrite=False,chunk_size=16384):
+ return self.fs.move(src,dst,overwrite,chunk_size)
+
+ def movedir(self,src,dst,overwrite=False,ignore_errors=False,chunk_size=16384):
+ return self.fs.movedir(src,dst,overwrite,ignore_errors,chunk_size)
+
+ def copydir(self,src,dst,overwrite=False,ignore_errors=False,chunk_size=16384):
+ return self.fs.copydir(src,dst,overwrite,ignore_errors,chunk_size)
+
+
+class RPCFSServer(SimpleXMLRPCServer):
+ """Server to expose an FS object via XML-RPC.
+
+ This class takes as its first argument an FS instance, and as its second
+ argument a (hostname,port) tuple on which to listen for XML-RPC requests.
+ Example:
+
+ fs = OSFS('/var/srv/myfiles')
+ s = RPCFSServer(fs,("",8080))
+ s.serve_forever()
+
+ To cleanly shut down the server after calling serve_forever, set the
+ attribute "serve_more_requests" to False.
+ """
+
+ def __init__(self,fs,addr,requestHandler=None,logRequests=None):
+ kwds = dict(allow_none=True)
+ if requestHandler is not None:
+ kwds['requestHandler'] = requestHandler
+ if logRequests is not None:
+ kwds['logRequests'] = logRequests
+ self.serve_more_requests = True
+ SimpleXMLRPCServer.__init__(self,addr,**kwds)
+ self.register_instance(RPCFSInterface(fs))
+
+ def serve_forever(self):
+ """Override serve_forever to allow graceful shutdown."""
+ while self.serve_more_requests:
+ self.handle_request()
+
diff --git a/fs/fuseserver.py b/fs/fuseserver.py
deleted file mode 100755
index d96c4ae..0000000
--- a/fs/fuseserver.py
+++ /dev/null
@@ -1,338 +0,0 @@
-#!/usr/bin/env python
-
-
-import base
-
-import fuse
-fuse.fuse_python_api = (0, 2)
-
-
-from datetime import datetime
-import time
-from os import errno
-
-import sys
-from stat import *
-
-def showtb(f):
-
- def run(*args, **kwargs):
- print
- print "-"*80
- print f, args, kwargs
- try:
- ret = f(*args, **kwargs)
- print "\tReturned:", repr(ret)
- return ret
- except Exception, e:
- print e
- raise
- print "-"*80
- print
- return run
-
-"""
-::*'''<code>open(path, flags)</code>'''
-
-::*'''<code>create(path, flags, mode)</code>'''
-
-::*'''<code>read(path, length, offset, fh=None)</code>'''
-
-::*'''<code>write(path, buf, offset, fh=None)</code>'''
-
-::*'''<code>fgetattr(path, fh=None)</code>'''
-
-::*'''<code>ftruncate(path, len, fh=None)</code>'''
-
-::*'''<code>flush(path, fh=None)</code>'''
-
-::*'''<code>release(path, fh=None)</code>'''
-
-::*'''<code>fsync(path, fdatasync, fh=None)</code>'''
-
-"""
-
-
-class FuseFile(object):
-
- def __init__(self, f):
- self.f = f
-
-
-
-
-
-_run_t = time.time()
-class FSFUSE(fuse.Fuse):
-
- def __init__(self, fs, *args, **kwargs):
- fuse.Fuse.__init__(self, *args, **kwargs)
- self._fs = fs
-
- @showtb
- def fsinit(self):
- return 0
-
- def __getattr__(self, name):
- print name
- raise AttributeError
-
- #@showtb
- def getattr(self, path):
-
- if not self._fs.exists(path):
- return -errno.ENOENT
-
- class Stat(fuse.Stat):
- def __init__(self, context, fs, path):
- fuse.Stat.__init__(self)
- info = fs.getinfo(path)
- isdir = fs.isdir(path)
-
- fsize = fs.getsize(path) or 1024
- self.st_ino = 0
- self.st_dev = 0
- self.st_nlink = 2 if isdir else 1
- self.st_blksize = fsize
- self.st_mode = info.get('st_mode', S_IFDIR | 0755 if isdir else S_IFREG | 0666)
- print self.st_mode
- self.st_uid = context['uid']
- self.st_gid = context['gid']
- self.st_rdev = 0
- self.st_size = fsize
- self.st_blocks = 1
-
- for key, value in info.iteritems():
- if not key.startswith('_'):
- setattr(self, key, value)
-
- def do_time(attr, key):
- if not hasattr(self, attr):
- if key in info:
- info_t = info[key]
- setattr(self, attr, time.mktime(info_t.timetuple()))
- else:
- setattr(self, attr, _run_t)
-
- do_time('st_atime', 'accessed_time')
- do_time('st_mtime', 'modified_time')
- do_time('st_ctime', 'created_time')
-
- #for v in dir(self):
- # if not v.startswith('_'):
- # print v, getattr(self, v)
-
- return Stat(self.GetContext(), self._fs, path)
-
- @showtb
- def chmod(self, path, mode):
- return 0
-
- @showtb
- def chown(self, path, user, group):
- return 0
-
- @showtb
- def utime(self, path, times):
- return 0
-
- @showtb
- def utimens(self, path, times):
- return 0
-
- @showtb
- def fsyncdir(self):
- pass
-
- @showtb
- def bmap(self):
- return 0
-
- @showtb
- def ftruncate(self, path, flags, fh):
- if fh is not None:
- fh.truncate()
- fh.flush()
- return 0
-
- def fsdestroy(self):
- return 0
-
- @showtb
- def statfs(self):
- return (0, 0, 0, 0, 0, 0, 0)
-
-
-
- #def setattr
- #
- #
- #@showtb
- #def getdir(self, path, offset):
- # paths = ['.', '..']
- # paths += self._fs.listdir(path)
- # print repr(paths)
- #
- # for p in paths:
- # yield fuse.Direntry(p)
-
- @showtb
- def opendir(self, path):
- return 0
-
- @showtb
- def getxattr(self, path, name, default):
- return self._fs.getattr(path, name, default)
-
- @showtb
- def setxattr(self, path, name, value):
- self._fs.setattr(path, name)
- return 0
-
- @showtb
- def removeattr(self, path, name):
- self._fs.removeattr(path, name)
- return 0
-
- @showtb
- def listxattr(self, path, something):
- return self._fs.listattrs(path)
-
- @showtb
- def open(self, path, flags):
- return self._fs.open(path, flags=flags)
-
- @showtb
- def create(self, path, flags, mode):
- return self._fs.open(path, "w")
-
- @showtb
- def read(self, path, length, offset, fh=None):
- if fh:
- fh.seek(offset)
- return fh.read(length)
-
- @showtb
- def write(self, path, buf, offset, fh=None):
- if fh:
- fh.seek(offset)
- # FUSE seems to expect a return value of the number of bytes written,
- # but Python file objects don't return that information,
- # so we will assume all bytes are written...
- bytes_written = fh.write(buf) or len(buf)
- return bytes_written
-
- @showtb
- def release(self, path, flags, fh=None):
- if fh:
- fh.close()
- return 0
-
- @showtb
- def flush(self, path, fh=None):
- if fh:
- try:
- fh.flush()
- except base.FSError:
- return 0
- return 0
-
- @showtb
- def access(self, path, *args, **kwargs):
- return 0
-
-
- #@showtb
- def readdir(self, path, offset):
- paths = ['.', '..']
- paths += self._fs.listdir(path)
- return [fuse.Direntry(p) for p in paths]
-
- #@showtb
- #def fgetattr(self, path, fh=None):
- # fh.flush()
- # return self.getattr(path)
-
- @showtb
- def readlink(self, path):
- return path
-
- @showtb
- def symlink(self, path, path1):
- return 0
-
-
- @showtb
- def mknod(self, path, mode, rdev):
- f = None
- try:
- f = self._fs.open(path, mode)
- finally:
- f.close()
- return 0
-
- @showtb
- def mkdir(self, path, mode):
- self._fs.mkdir(path, mode)
- return 0
-
- @showtb
- def rmdir(self, path):
- self._fs.removedir(path, True)
- return 0
-
- @showtb
- def unlink(self, path):
- try:
- self._fs.remove(path)
- except base.FSError:
- return 0
- return 0
-
- #symlink(target, name)
-
- @showtb
- def rename(self, old, new):
- self._fs.rename(old, new)
- return 0
-
-
-
- #@showtb
- #def read(self, path, size, offset):
- # pass
-
-
-
-def main(fs):
- usage="""
- FSFS: Exposes an FS
- """ + fuse.Fuse.fusage
-
- server = FSFUSE(fs, version="%prog 0.1",
- usage=usage, dash_s_do='setsingle')
-
- #server.readdir('.', 0)
-
- server.parse(errex=1)
- server.main()
-
-
-if __name__ == "__main__":
-
- import memoryfs
- import osfs
- mem_fs = memoryfs.MemoryFS()
- mem_fs.makedir("test")
- mem_fs.createfile("a.txt", "This is a test")
- mem_fs.createfile("test/b.txt", "This is in a sub-dir")
-
-
- #fs = osfs.OSFS('/home/will/fusetest/')
- #main(fs)
-
- main(mem_fs)
-
- # To run do ./fuserserver.py -d -f testfs
- # This will map a fs.memoryfs to testfs/ on the local filesystem under tests/fs
- # To unmouont, do fusermount -u testfs \ No newline at end of file
diff --git a/fs/helpers.py b/fs/helpers.py
deleted file mode 100644
index 23e0ef4..0000000
--- a/fs/helpers.py
+++ /dev/null
@@ -1,172 +0,0 @@
-"""Contains a number of standalone functions for path manipulation."""
-
-from itertools import chain
-
-def _iteratepath(path, numsplits=None):
-
- path = resolvepath(path)
- if not path:
- return []
-
- if numsplits == None:
- return filter(lambda p:bool(p), path.split('/'))
- else:
- return filter(lambda p:bool(p), path.split('/', numsplits))
-
-
-
-def isabsolutepath(path):
- """Returns True if a given path is absolute.
-
- >>> isabsolutepath("a/b/c")
- False
-
- >>> isabsolutepath("/foo/bar")
- True
-
- """
- if path:
- return path[0] in '\\/'
- return False
-
-def normpath(path):
- """Normalizes a path to be in the formated expected by FS objects.
- Returns a new path string.
-
- >>> normpath(r"foo\\bar\\baz")
- 'foo/bar/baz'
-
- """
- return path.replace('\\', '/')
-
-
-def pathjoin(*paths):
- """Joins any number of paths together. Returns a new path string.
-
- paths -- An iterable of path strings
-
- >>> pathjoin('foo', 'bar', 'baz')
- 'foo/bar/baz'
-
- >>> pathjoin('foo/bar', '../baz')
- 'foo/baz'
-
- """
- absolute = False
-
- relpaths = []
- for p in paths:
- if p:
- if p[0] in '\\/':
- del relpaths[:]
- absolute = True
- relpaths.append(p)
-
- pathstack = []
-
- for component in chain(*(normpath(path).split('/') for path in relpaths)):
- if component == "..":
- if not pathstack:
- raise ValueError("Relative path is invalid")
- sub = pathstack.pop()
- elif component == ".":
- pass
- elif component:
- pathstack.append(component)
-
- if absolute:
- return "/" + "/".join(pathstack)
- else:
- return "/".join(pathstack)
-
-
-def pathsplit(path):
- """Splits a path on a path separator. Returns a tuple containing the path up
- to that last separator and the remaining path component.
-
- >>> pathsplit("foo/bar")
- ('foo', 'bar')
-
- >>> pathsplit("foo/bar/baz")
- ('foo/bar', 'baz')
-
- """
-
- split = normpath(path).rsplit('/', 1)
- if len(split) == 1:
- return ('', split[0])
- return tuple(split)
-
-def dirname(path):
- """Returns the parent directory of a path.
-
- path -- A FS path
-
- >>> dirname('foo/bar/baz')
- 'foo/bar'
-
- """
- return pathsplit(path)[0]
-
-def resourcename(path):
- """Returns the resource references by a path.
-
- path -- A FS path
-
- >>> resourcename('foo/bar/baz')
- 'baz'
-
- """
- return pathsplit(path)[1]
-
-def resolvepath(path):
- """Normalises the path and removes any relative path components.
-
- path -- A path string
-
- >>> resolvepath(r"foo\\bar\\..\\baz")
- 'foo/baz'
-
- """
- return pathjoin(path)
-
-def makerelative(path):
- """Makes a path relative by removing initial separator.
-
- path -- A path
-
- >>> makerelative("/foo/bar")
- 'foo/bar'
-
- """
- path = normpath(path)
- if path.startswith('/'):
- return path[1:]
- return path
-
-def makeabsolute(path):
- """Makes a path absolute by adding a separater at the beginning of the path.
-
- path -- A path
-
- >>> makeabsolute("foo/bar/baz")
- '/foo/bar/baz'
-
- """
- path = normpath(path)
- if not path.startswith('/'):
- return '/'+path
- return path
-
-def issamedir(path1, path2):
- """Return true if two paths reference a resource in the same directory.
-
- path1 -- First path
- path2 -- Second path
-
- >>> issamedir("foo/bar/baz.txt", "foo/bar/spam.txt")
- True
- >>> issamedir("foo/bar/baz/txt", "spam/eggs/spam.txt")
- False
- """
- return pathsplit(resolvepath(path1))[0] == pathsplit(resolvepath(path2))[0]
diff --git a/fs/memoryfs.py b/fs/memoryfs.py
index 89f5049..278f906 100644
--- a/fs/memoryfs.py
+++ b/fs/memoryfs.py
@@ -1,25 +1,29 @@
#!/usr/bin/env python
"""
-A filesystem that exists only in memory, which obviously makes it very fast.
+
+ fs.memoryfs: A filesystem that exists only in memory
+
+Obviously that makes this particular filesystem very fast...
"""
-import os
import datetime
-from helpers import _iteratepath
-from base import *
+from fs.path import iteratepath
+from fs.base import *
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
+
def _check_mode(mode, mode_chars):
for c in mode_chars:
if c not in mode:
return False
return True
+
class MemoryFile(object):
def __init__(self, path, memory_fs, value, mode):
@@ -108,59 +112,67 @@ class MemoryFile(object):
def writelines(self, *args, **kwargs):
return self.mem_file.writelines(*args, **kwargs)
+ def __enter__(self):
+ return self
-class MemoryFS(FS):
+ def __exit__(self,exc_type,exc_value,traceback):
+ self.close()
+ return False
- class DirEntry(object):
- def __init__(self, type, name, contents=None):
- assert type in ("dir", "file"), "Type must be dir or file!"
+class DirEntry(object):
- self.type = type
- self.name = name
- self.permissions = None
+ def __init__(self, type, name, contents=None):
- if contents is None and type == "dir":
- contents = {}
+ assert type in ("dir", "file"), "Type must be dir or file!"
- self.open_files = []
- self.contents = contents
- self.data = None
- self.locks = 0
- self.created_time = datetime.datetime.now()
+ self.type = type
+ self.name = name
- def lock(self):
- self.locks += 1
+ if contents is None and type == "dir":
+ contents = {}
- def unlock(self):
- self.locks -=1
- assert self.locks >=0, "Lock / Unlock mismatch!"
+ self.open_files = []
+ self.contents = contents
+ self.data = None
+ self.locks = 0
+ self.created_time = datetime.datetime.now()
- def desc_contents(self):
- if self.isfile():
- return "<file %s>"%self.name
- elif self.isdir():
- return "<dir %s>"%"".join( "%s: %s"% (k, v.desc_contents()) for k, v in self.contents.iteritems())
+ def lock(self):
+ self.locks += 1
- def isdir(self):
- return self.type == "dir"
+ def unlock(self):
+ self.locks -=1
+ assert self.locks >=0, "Lock / Unlock mismatch!"
- def isfile(self):
- return self.type == "file"
+ def desc_contents(self):
+ if self.isfile():
+ return "<file %s>"%self.name
+ elif self.isdir():
+ return "<dir %s>"%"".join( "%s: %s"% (k, v.desc_contents()) for k, v in self.contents.iteritems())
- def islocked(self):
- return self.locks > 0
+ def isdir(self):
+ return self.type == "dir"
- def __str__(self):
- return "%s: %s" % (self.name, self.desc_contents())
+ def isfile(self):
+ return self.type == "file"
+
+ def islocked(self):
+ return self.locks > 0
+
+ def __str__(self):
+ return "%s: %s" % (self.name, self.desc_contents())
+
+
+class MemoryFS(FS):
def _make_dir_entry(self, *args, **kwargs):
return self.dir_entry_factory(*args, **kwargs)
def __init__(self, file_factory=None):
- FS.__init__(self, thread_syncronize=True)
- self.dir_entry_factory = MemoryFS.DirEntry
+ FS.__init__(self, thread_synchronize=True)
+ self.dir_entry_factory = DirEntry
self.file_factory = file_factory or MemoryFile
self.root = self._make_dir_entry('dir', 'root')
@@ -173,363 +185,274 @@ class MemoryFS(FS):
def __unicode__(self):
return unicode(self.__str__())
+ @synchronize
def _get_dir_entry(self, dirpath):
- self._lock.acquire()
- try:
- current_dir = self.root
- for path_component in _iteratepath(dirpath):
- if current_dir.contents is None:
- return None
- dir_entry = current_dir.contents.get(path_component, None)
- if dir_entry is None:
- return None
- current_dir = dir_entry
-
- return current_dir
- finally:
- self._lock.release()
+ current_dir = self.root
+ for path_component in iteratepath(dirpath):
+ if current_dir.contents is None:
+ return None
+ dir_entry = current_dir.contents.get(path_component, None)
+ if dir_entry is None:
+ return None
+ current_dir = dir_entry
+ return current_dir
+ @synchronize
def desc(self, path):
- self._lock.acquire()
- try:
- if self.isdir(path):
- return "Memory dir"
- elif self.isfile(path):
- return "Memory file object"
- else:
- return "No description available"
- finally:
- self._lock.release()
+ if self.isdir(path):
+ return "Memory dir"
+ elif self.isfile(path):
+ return "Memory file object"
+ else:
+ return "No description available"
+ @synchronize
def isdir(self, path):
- self._lock.acquire()
- try:
- dir_item = self._get_dir_entry(self._resolve(path))
- if dir_item is None:
- return False
- return dir_item.isdir()
- finally:
- self._lock.release()
+ dir_item = self._get_dir_entry(normpath(path))
+ if dir_item is None:
+ return False
+ return dir_item.isdir()
+ @synchronize
def isfile(self, path):
- self._lock.acquire()
- try:
- dir_item = self._get_dir_entry(self._resolve(path))
- if dir_item is None:
- return False
- return dir_item.isfile()
- finally:
- self._lock.release()
+ dir_item = self._get_dir_entry(normpath(path))
+ if dir_item is None:
+ return False
+ return dir_item.isfile()
+ @synchronize
def exists(self, path):
- self._lock.acquire()
- try:
- return self._get_dir_entry(path) is not None
- finally:
- self._lock.release()
+ return self._get_dir_entry(path) is not None
- def makedir(self, dirname, mode=0777, recursive=False, allow_recreate=False):
+ @synchronize
+ def makedir(self, dirname, recursive=False, allow_recreate=False):
if not dirname:
- raise PathError("INVALID_PATH", "Path is empty")
- self._lock.acquire()
- try:
- fullpath = dirname
- dirpath, dirname = pathsplit(dirname)
-
- if recursive:
- parent_dir = self._get_dir_entry(dirpath)
- if parent_dir is not None:
- if parent_dir.isfile():
- raise ResourceNotFoundError("NO_DIR", dirname, msg="Can not create a directory, because path references a file: %(path)s")
- else:
- if not allow_recreate:
- if dirname in parent_dir.contents:
- raise OperationFailedError("MAKEDIR_FAILED", dirname, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
-
- current_dir = self.root
- for path_component in _iteratepath(dirpath)[:-1]:
- dir_item = current_dir.contents.get(path_component, None)
- if dir_item is None:
- break
- if not dir_item.isdir():
- raise ResourceNotFoundError("NO_DIR", dirname, msg="Can not create a directory, because path references a file: %(path)s")
- current_dir = dir_item
+ raise PathError("", "Path is empty")
+ fullpath = dirname
+ dirpath, dirname = pathsplit(dirname)
+
+ if recursive:
+ parent_dir = self._get_dir_entry(dirpath)
+ if parent_dir is not None:
+ if parent_dir.isfile():
+ raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
+ else:
+ if not allow_recreate:
+ if dirname in parent_dir.contents:
+ raise DestinationExistsError(dirname, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
- current_dir = self.root
- for path_component in _iteratepath(dirpath):
- dir_item = current_dir.contents.get(path_component, None)
- if dir_item is None:
- new_dir = self._make_dir_entry("dir", path_component)
- current_dir.contents[path_component] = new_dir
- current_dir = new_dir
- else:
- current_dir = dir_item
+ current_dir = self.root
+ for path_component in iteratepath(dirpath)[:-1]:
+ dir_item = current_dir.contents.get(path_component, None)
+ if dir_item is None:
+ break
+ if not dir_item.isdir():
+ raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
+ current_dir = dir_item
- parent_dir = current_dir
+ current_dir = self.root
+ for path_component in iteratepath(dirpath):
+ dir_item = current_dir.contents.get(path_component, None)
+ if dir_item is None:
+ new_dir = self._make_dir_entry("dir", path_component)
+ current_dir.contents[path_component] = new_dir
+ current_dir = new_dir
+ else:
+ current_dir = dir_item
- else:
- parent_dir = self._get_dir_entry(dirpath)
- if parent_dir is None:
- raise ResourceNotFoundError("NO_DIR", dirname, msg="Could not make dir, as parent dir does not exist: %(path)s")
+ parent_dir = current_dir
- dir_item = parent_dir.contents.get(dirname, None)
- if dir_item is not None:
- if dir_item.isdir():
- if not allow_recreate:
- raise FSError("DIR_EXISTS", dirname)
- else:
- raise ResourceNotFoundError("NO_DIR", dirname, msg="Can not create a directory, because path references a file: %(path)s")
+ else:
+ parent_dir = self._get_dir_entry(dirpath)
+ if parent_dir is None:
+ raise ParentDirectoryMissingError(dirname, msg="Could not make dir, as parent dir does not exist: %(path)s")
+
+ dir_item = parent_dir.contents.get(dirname, None)
+ if dir_item is not None:
+ if dir_item.isdir():
+ if not allow_recreate:
+ raise DestinationExistsError(dirname)
+ else:
+ raise ResourceInvalidError(dirname, msg="Can not create a directory, because path references a file: %(path)s")
- if dir_item is None:
- parent_dir.contents[dirname] = self._make_dir_entry("dir", dirname)
+ if dir_item is None:
+ parent_dir.contents[dirname] = self._make_dir_entry("dir", dirname)
- return self
- finally:
- self._lock.release()
+ return self
def _orphan_files(self, file_dir_entry):
for f in file_dir_entry.open_files:
f.close()
+ @synchronize
def _lock_dir_entry(self, path):
- self._lock.acquire()
- try:
- dir_entry = self._get_dir_entry(path)
- dir_entry.lock()
- finally:
- self._lock.release()
+ dir_entry = self._get_dir_entry(path)
+ dir_entry.lock()
+ @synchronize
def _unlock_dir_entry(self, path):
- self._lock.acquire()
- try:
- dir_entry = self._get_dir_entry(path)
- dir_entry.unlock()
- finally:
- self._lock.release()
+ dir_entry = self._get_dir_entry(path)
+ dir_entry.unlock()
+ @synchronize
def _is_dir_locked(self, path):
- self._lock.acquire()
- try:
- dir_entry = self._get_dir_entry(path)
- return dir_entry.islocked()
- finally:
- self._lock.release()
+ dir_entry = self._get_dir_entry(path)
+ return dir_entry.islocked()
+ @synchronize
def open(self, path, mode="r", **kwargs):
- self._lock.acquire()
- try:
- filepath, filename = pathsplit(path)
- parent_dir_entry = self._get_dir_entry(filepath)
+ filepath, filename = pathsplit(path)
+ parent_dir_entry = self._get_dir_entry(filepath)
- if parent_dir_entry is None or not parent_dir_entry.isdir():
- raise ResourceNotFoundError("NO_FILE", path)
+ if parent_dir_entry is None or not parent_dir_entry.isdir():
+ raise ResourceNotFoundError(path)
- if 'r' in mode or 'a' in mode:
- if filename not in parent_dir_entry.contents:
- raise ResourceNotFoundError("NO_FILE", path)
+ if 'r' in mode or 'a' in mode:
+ if filename not in parent_dir_entry.contents:
+ raise ResourceNotFoundError(path)
- file_dir_entry = parent_dir_entry.contents[filename]
+ file_dir_entry = parent_dir_entry.contents[filename]
- if 'a' in mode and file_dir_entry.islocked():
- raise ResourceLockedError("FILE_LOCKED", path)
+ if 'a' in mode and file_dir_entry.islocked():
+ raise ResourceLockedError(path)
- self._lock_dir_entry(path)
- mem_file = self.file_factory(path, self, file_dir_entry.data, mode)
- file_dir_entry.open_files.append(mem_file)
- return mem_file
+ self._lock_dir_entry(path)
+ mem_file = self.file_factory(path, self, file_dir_entry.data, mode)
+ file_dir_entry.open_files.append(mem_file)
+ return mem_file
- elif 'w' in mode:
- if filename not in parent_dir_entry.contents:
- file_dir_entry = self._make_dir_entry("file", filename)
- parent_dir_entry.contents[filename] = file_dir_entry
- else:
- file_dir_entry = parent_dir_entry.contents[filename]
+ elif 'w' in mode:
+ if filename not in parent_dir_entry.contents:
+ file_dir_entry = self._make_dir_entry("file", filename)
+ parent_dir_entry.contents[filename] = file_dir_entry
+ else:
+ file_dir_entry = parent_dir_entry.contents[filename]
- if file_dir_entry.islocked():
- raise ResourceLockedError("FILE_LOCKED", path)
+ if file_dir_entry.islocked():
+ raise ResourceLockedError(path)
- self._lock_dir_entry(path)
+ self._lock_dir_entry(path)
- mem_file = self.file_factory(path, self, None, mode)
- file_dir_entry.open_files.append(mem_file)
- return mem_file
+ mem_file = self.file_factory(path, self, None, mode)
+ file_dir_entry.open_files.append(mem_file)
+ return mem_file
- if parent_dir_entry is None:
- raise ResourceNotFoundError("NO_FILE", path)
- finally:
- self._lock.release()
+ if parent_dir_entry is None:
+ raise ResourceNotFoundError(path)
+ @synchronize
def remove(self, path):
- self._lock.acquire()
- try:
- dir_entry = self._get_dir_entry(path)
+ dir_entry = self._get_dir_entry(path)
- if dir_entry is None:
- raise ResourceNotFoundError("NO_FILE", path)
+ if dir_entry is None:
+ raise ResourceNotFoundError(path)
- if dir_entry.islocked():
- self._orphan_files(dir_entry)
- #raise ResourceLockedError("FILE_LOCKED", path)
+ if dir_entry.islocked():
+ self._orphan_files(dir_entry)
+ #raise ResourceLockedError("FILE_LOCKED", path)
- pathname, dirname = pathsplit(path)
+ if dir_entry.isdir():
+ raise ResourceInvalidError(path,msg="That's a directory, not a file: %(path)s")
- parent_dir = self._get_dir_entry(pathname)
+ pathname, dirname = pathsplit(path)
- del parent_dir.contents[dirname]
- finally:
- self._lock.release()
+ parent_dir = self._get_dir_entry(pathname)
- def removedir(self, path, recursive=False, force=False):
- self._lock.acquire()
- try:
- dir_entry = self._get_dir_entry(path)
+ del parent_dir.contents[dirname]
- if dir_entry is None:
- raise ResourceNotFoundError("NO_DIR", path)
- if dir_entry.islocked():
- raise ResourceLockedError("FILE_LOCKED", path)
- if not dir_entry.isdir():
- raise ResourceInvalid("WRONG_TYPE", path, msg="Can't remove resource, its not a directory: %(path)s" )
-
- if dir_entry.contents and not force:
- raise OperationFailedError("REMOVEDIR_FAILED", "Directory is not empty: %(path)s")
-
- if recursive:
- rpathname = path
- while rpathname:
- rpathname, dirname = pathsplit(rpathname)
- parent_dir = self._get_dir_entry(rpathname)
- del parent_dir.contents[dirname]
- else:
- pathname, dirname = pathsplit(path)
- parent_dir = self._get_dir_entry(pathname)
+ @synchronize
+ def removedir(self, path, recursive=False, force=False):
+ dir_entry = self._get_dir_entry(path)
+
+ if dir_entry is None:
+ raise ResourceNotFoundError(path)
+ if dir_entry.islocked():
+ raise ResourceLockedError(path)
+ if not dir_entry.isdir():
+ raise ResourceInvalidError(path, msg="Can't remove resource, its not a directory: %(path)s" )
+
+ if dir_entry.contents and not force:
+ raise DirectoryNotEmptyError(path)
+
+ if recursive:
+ rpathname = path
+ while rpathname:
+ rpathname, dirname = pathsplit(rpathname)
+ parent_dir = self._get_dir_entry(rpathname)
del parent_dir.contents[dirname]
+ else:
+ pathname, dirname = pathsplit(path)
+ parent_dir = self._get_dir_entry(pathname)
+ del parent_dir.contents[dirname]
- finally:
- self._lock.release()
+ @synchronize
def rename(self, src, dst):
if not issamedir(src, dst):
raise ValueError("Destination path must the same directory (use the move method for moving to a different directory)")
- self._lock.acquire()
- try:
-
- dst = pathsplit(dst)[-1]
- dir_entry = self._get_dir_entry(src)
- if dir_entry is None:
- raise ResourceNotFoundError("NO_DIR", src)
- #if dir_entry.islocked():
- # raise ResourceLockedError("FILE_LOCKED", src)
+ dst = pathsplit(dst)[-1]
- open_files = dir_entry.open_files[:]
- for f in open_files:
- f.flush()
- f.path = dst
+ dir_entry = self._get_dir_entry(src)
+ if dir_entry is None:
+ raise ResourceNotFoundError(src)
+ #if dir_entry.islocked():
+ # raise ResourceLockedError("FILE_LOCKED", src)
- dst_dir_entry = self._get_dir_entry(dst)
- if dst_dir_entry is not None:
- raise OperationFailedError("RENAME_FAILED", "Destination exists: %(path)s", src + " -> " + dst )
+ open_files = dir_entry.open_files[:]
+ for f in open_files:
+ f.flush()
+ f.path = dst
- pathname, dirname = pathsplit(src)
- parent_dir = self._get_dir_entry(pathname)
- parent_dir.contents[dst] = parent_dir.contents[dirname]
- parent_dir.name = dst
- del parent_dir.contents[dirname]
+ dst_dir_entry = self._get_dir_entry(dst)
+ if dst_dir_entry is not None:
+ raise DestinationExistsError(path)
- finally:
- self._lock.release()
+ pathname, dirname = pathsplit(src)
+ parent_dir = self._get_dir_entry(pathname)
+ parent_dir.contents[dst] = parent_dir.contents[dirname]
+ parent_dir.name = dst
+ del parent_dir.contents[dirname]
+ @synchronize
def _on_close_memory_file(self, open_file, path, value):
- self._lock.acquire()
- try:
- filepath, filename = pathsplit(path)
- dir_entry = self._get_dir_entry(path)
- if dir_entry is not None and value is not None:
- dir_entry.data = value
- dir_entry.open_files.remove(open_file)
- self._unlock_dir_entry(path)
- finally:
- self._lock.release()
-
- def _on_flush_memory_file(self, path, value):
- self._lock.acquire()
- try:
- filepath, filename = pathsplit(path)
- dir_entry = self._get_dir_entry(path)
+ filepath, filename = pathsplit(path)
+ dir_entry = self._get_dir_entry(path)
+ if dir_entry is not None and value is not None:
dir_entry.data = value
- finally:
- self._lock.release()
-
-
-
- def listdir(self, path="/", wildcard=None, full=False, absolute=False, hidden=True, dirs_only=False, files_only=False):
- self._lock.acquire()
- try:
- dir_entry = self._get_dir_entry(path)
- if dir_entry is None:
- raise ResourceNotFoundError("NO_DIR", path)
- paths = dir_entry.contents.keys()
- return self._listdir_helper(path, paths, wildcard, full, absolute, hidden, dirs_only, files_only)
- finally:
- self._lock.release()
+ dir_entry.open_files.remove(open_file)
+ self._unlock_dir_entry(path)
+ @synchronize
+ def _on_flush_memory_file(self, path, value):
+ filepath, filename = pathsplit(path)
+ dir_entry = self._get_dir_entry(path)
+ dir_entry.data = value
+
+ @synchronize
+ def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
+ dir_entry = self._get_dir_entry(path)
+ if dir_entry is None:
+ raise ResourceNotFoundError(path)
+ if dir_entry.isfile():
+ raise ResourceInvalidError(path,msg="that's a file, not a directory: %(path)s")
+ paths = dir_entry.contents.keys()
+ return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only)
+
+ @synchronize
def getinfo(self, path):
- self._lock.acquire()
- try:
- dir_entry = self._get_dir_entry(path)
-
- if dir_entry is None:
- raise ResourceNotFoundError("NO_RESOURCE", path)
-
- info = {}
- info['created_time'] = dir_entry.created_time
-
- if dir_entry.isfile():
- info['size'] = len(dir_entry.data or '')
-
- return info
- finally:
- self._lock.release()
-
-
-
-def main():
-
- mem_fs = MemoryFS()
- mem_fs.makedir('test/test2', recursive=True)
- mem_fs.makedir('test/A', recursive=True)
- mem_fs.makedir('test/A/B', recursive=True)
-
-
-
- mem_fs.open("test/readme.txt", 'w').write("Hello, World!")
-
- mem_fs.open("test/readme.txt", 'wa').write("\nSecond Line")
-
- print mem_fs.open("test/readme.txt", 'r').read()
-
-
- f1 = mem_fs.open("/test/readme.txt", 'r')
- f2 = mem_fs.open("/test/readme.txt", 'r')
- print f1.read(10)
- print f2.read(10)
- f1.close()
- f2.close()
- f3 = mem_fs.open("/test/readme.txt", 'w')
-
+ dir_entry = self._get_dir_entry(path)
+ if dir_entry is None:
+ raise ResourceNotFoundError(path)
- #print mem_fs.listdir('test')
- #print mem_fs.isdir("test/test2")
- #print mem_fs.root
- print_fs(mem_fs)
+ info = {}
+ info['created_time'] = dir_entry.created_time
- from browsewin import browse
- browse(mem_fs)
+ if dir_entry.isfile():
+ info['size'] = len(dir_entry.data or '')
+ return info
-if __name__ == "__main__":
- main()
diff --git a/fs/mountfs.py b/fs/mountfs.py
index e00d2f6..96913af 100644
--- a/fs/mountfs.py
+++ b/fs/mountfs.py
@@ -4,27 +4,32 @@ from base import *
from objecttree import ObjectTree
from memoryfs import MemoryFS
-class MountFS(FS):
- """A filesystem that delegates to other filesystems."""
+class DirMount(object):
+ def __init__(self, path, fs):
+ self.path = path
+ self.fs = fs
- class DirMount(object):
- def __init__(self, path, fs):
- self.path = path
- self.fs = fs
+ def __str__(self):
+ return "Mount point: %s"%self.path
- def __str__(self):
- return "Mount point: %s"%self.path
- class FileMount(object):
- def __init__(self, path, open_callable, info_callable=None):
- self.open_callable = open_callable
- def no_info_callable(path):
- return {}
- self.info_callable = info_callable or no_info_callable
+class FileMount(object):
+ def __init__(self, path, open_callable, info_callable=None):
+ self.open_callable = open_callable
+ def no_info_callable(path):
+ return {}
+ self.info_callable = info_callable or no_info_callable
- def __init__(self, thread_syncronize=True):
- FS.__init__(self, thread_syncronize=thread_syncronize)
+
+class MountFS(FS):
+ """A filesystem that delegates to other filesystems."""
+
+ DirMount = DirMount
+ FileMount = FileMount
+
+ def __init__(self, thread_synchronize=True):
+ FS.__init__(self, thread_synchronize=thread_synchronize)
self.mount_tree = ObjectTree()
def __str__(self):
@@ -66,7 +71,7 @@ class MountFS(FS):
try:
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
- raise ResourceNotFoundError("NO_RESOURCE", path)
+ raise ResourceNotFoundError(path)
if fs is self:
object = self.mount_tree.get(path, None)
@@ -82,7 +87,7 @@ class MountFS(FS):
try:
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
- return ResourceNotFoundError("NO_RESOURCE", path)
+ return ResourceNotFoundError(path)
if fs is self:
object = self.mount_tree.get(path, None)
@@ -92,7 +97,7 @@ class MountFS(FS):
finally:
self._lock.release()
- def listdir(self, path="/", wildcard=None, full=False, absolute=False, hidden=True, dirs_only=False, files_only=False):
+ def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
self._lock.acquire()
try:
@@ -100,7 +105,7 @@ class MountFS(FS):
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
- raise ResourceNotFoundError("NO_DIR", path)
+ raise ResourceNotFoundError(path)
if fs is self:
if files_only:
@@ -112,7 +117,6 @@ class MountFS(FS):
wildcard,
full,
absolute,
- hidden,
dirs_only,
files_only)
else:
@@ -120,28 +124,27 @@ class MountFS(FS):
wildcard=wildcard,
full=False,
absolute=False,
- hidden=hidden,
dirs_only=dirs_only,
files_only=files_only)
if full or absolute:
if full:
- path = makeabsolute(path)
+ path = abspath(normpath(path))
else:
- path = makerelative(path)
+ path = relpath(normpath(path))
paths = [pathjoin(path, p) for p in paths]
return paths
finally:
self._lock.release()
- def makedir(self, path, mode=0777, recursive=False, allow_recreate=False):
+ def makedir(self, path, recursive=False, allow_recreate=False):
path = normpath(path)
self._lock.acquire()
try:
fs, mount_path, delegate_path = self._delegate(path)
if fs is self:
- raise UnsupportedError("UNSUPPORTED", msg="Can only makedir for mounted paths" )
- return fs.makedir(delegate_path, mode, recursive=recursive, allow_recreate=allow_recreate)
+ raise UnsupportedError("make directory", msg="Can only makedir for mounted paths" )
+ return fs.makedir(delegate_path, recursive=recursive, allow_recreate=allow_recreate)
finally:
self._lock.release()
@@ -158,7 +161,7 @@ class MountFS(FS):
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
- raise ResourceNotFoundError("NO_FILE", path)
+ raise ResourceNotFoundError(path)
return fs.open(delegate_path, mode, **kwargs)
@@ -190,9 +193,9 @@ class MountFS(FS):
path = normpath(path)
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
- raise ResourceNotFoundError("NO_FILE", path)
+ raise ResourceNotFoundError(path)
if fs is self:
- raise UnsupportedError("UNSUPPORTED", msg="Can only remove paths within a mounted dir" )
+ raise UnsupportedError("remove file", msg="Can only remove paths within a mounted dir")
return fs.remove(delegate_path)
finally:
@@ -207,10 +210,10 @@ class MountFS(FS):
fs, mount_path, delegate_path = self._delegate(path)
if fs is None or fs is self:
- raise OperationFailedError("REMOVEDIR_FAILED", path, msg="Can not removedir for an un-mounted path")
+ raise ResourceInvalidError(path, msg="Can not removedir for an un-mounted path")
if not force and not fs.isdirempty(delegate_path):
- raise OperationFailedError("REMOVEDIR_FAILED", "Directory is not empty: %(path)s")
+ raise DirectoryNotEmptyError("Directory is not empty: %(path)s")
return fs.removedir(delegate_path, recursive, force)
@@ -220,7 +223,7 @@ class MountFS(FS):
def rename(self, src, dst):
if not issamedir(src, dst):
- raise ValueError("Destination path must the same directory (user the move method for moving to a different directory)")
+ raise ValueError("Destination path must the same directory (use the move method for moving to a different directory)")
self._lock.acquire()
try:
@@ -228,7 +231,7 @@ class MountFS(FS):
fs2, mount_path2, delegate_path2 = self._delegate(dst)
if fs1 is not fs2:
- raise OperationFailedError("RENAME_FAILED", src)
+ raise OperationFailedError("rename resource", path=src)
if fs1 is not self:
return fs1.rename(delegate_path1, delegate_path2)
@@ -240,11 +243,11 @@ class MountFS(FS):
object2 = self.mount_tree.get(path_dst, None)
if object1 is None:
- raise NoResourceError("NO_RESOURCE", src)
+ raise ResourceNotFoundError(src)
# TODO!
- raise UnsupportedError("UNSUPPORTED", src)
+ raise UnsupportedError("rename resource", path=src)
finally:
self._lock.release()
@@ -280,7 +283,7 @@ class MountFS(FS):
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
- raise ResourceNotFoundError("NO_RESOURCE", path)
+ raise ResourceNotFoundError(path)
if fs is self:
if self.isfile(path):
@@ -297,13 +300,13 @@ class MountFS(FS):
fs, mount_path, delegate_path = self._delegate(path)
if fs is None:
- raise ResourceNotFoundError("NO_FILE", path)
+ raise ResourceNotFoundError(path)
if fs is self:
object = self.mount_tree.get(path, None)
if object is None or isinstance(object, dict):
- raise ResourceNotFoundError("NO_FILE", path)
+ raise ResourceNotFoundError(path)
size = self.mount_tree[path].info_callable(path).get("size", None)
return size
@@ -312,43 +315,3 @@ class MountFS(FS):
except:
self._lock.release()
-
-if __name__ == "__main__":
-
- help(MountFS)
-
- fs1 = MemoryFS()
- fs1.makedir("Memroot/B/C/D", recursive=True)
- fs1.open("test.txt", 'w').write("Hello, World!")
-
- #print_fs(fs1)
-
- mountfs = MountFS()
-
- mountfs.mountdir('1/2', fs1)
- mountfs.mountdir('1/another', fs1)
-
- def testfile(*args, **kwargs):
- print args, kwargs
-
- def testfile_info(*args, **kwargs):
- print "testfile_info", args, kwargs
- return {'size':100}
-
- mountfs.mountfile('filedir/file.txt', testfile, testfile_info)
-
- print mountfs.getinfo("filedir/file.txt")
-
- #print mountfs.listdir('1/2/Memroot/B/C')
-
- print mountfs.isdir("1")
-
- print mountfs.desc('1/2/Memroot/B')
- print_fs(mountfs)
-
- import browsewin
- browsewin.browse(mountfs)
-
- print mountfs.getinfo("1/2")
-
- #print mountfs._delegate('1/2/Memroot/B')
diff --git a/fs/multifs.py b/fs/multifs.py
index d41580a..3454e64 100644
--- a/fs/multifs.py
+++ b/fs/multifs.py
@@ -1,7 +1,8 @@
#!/usr/in/env python
-from base import FS, FSError
-from helpers import *
+from fs.base import FS, FSError
+from fs.path import *
+
class MultiFS(FS):
@@ -13,7 +14,7 @@ class MultiFS(FS):
"""
def __init__(self):
- FS.__init__(self, thread_syncronize=True)
+ FS.__init__(self, thread_synchronize=True)
self.fs_sequence = []
self.fs_lookup = {}
@@ -99,7 +100,7 @@ class MultiFS(FS):
for fs_name, fs_object in self.fs_lookup.iteritems():
if fs is fs_object:
return fs_name, fs
- raise ResourceNotFoundError("NO_RESOURCE", path, msg="Path does not map to any filesystem: %(path)s")
+ raise ResourceNotFoundError(path, msg="Path does not map to any filesystem: %(path)s")
finally:
self._lock.release()
@@ -109,7 +110,7 @@ class MultiFS(FS):
fs = self._delegate_search(path)
if fs is not None:
return fs.getsyspath(path, allow_none=allow_none)
- raise ResourceNotFoundError("NO_RESOURCE", path)
+ raise ResourceNotFoundError(path)
finally:
self._lock.release()
@@ -117,7 +118,7 @@ class MultiFS(FS):
self._lock.acquire()
try:
if not self.exists(path):
- raise ResourceNotFoundError("NO_RESOURCE", path)
+ raise ResourceNotFoundError(path)
name, fs = self.which(path)
if name is None:
@@ -135,7 +136,7 @@ class MultiFS(FS):
fs_file = fs.open(path, mode, **kwargs)
return fs_file
- raise ResourceNotFoundError("NO_FILE", path)
+ raise ResourceNotFoundError(path)
finally:
self._lock.release()
@@ -166,16 +167,6 @@ class MultiFS(FS):
finally:
self._lock.release()
- def ishidden(self, path):
- self._lock.acquire()
- try:
- fs = self._delegate_search(path)
- if fs is not None:
- return fs.isfile(path)
- return False
- finally:
- self._lock.release()
-
def listdir(self, path="./", *args, **kwargs):
self._lock.acquire()
try:
@@ -197,7 +188,7 @@ class MultiFS(FS):
if fs.exists(path):
fs.remove(path)
return
- raise ResourceNotFoundError("NO_FILE", path)
+ raise ResourceNotFoundError(path)
finally:
self._lock.release()
@@ -208,20 +199,20 @@ class MultiFS(FS):
if fs.isdir(path):
fs.removedir(path, recursive)
return
- raise ResourceNotFoundError("NO_DIR", path)
+ raise ResourceNotFoundError(path)
finally:
self._lock.release()
def rename(self, src, dst):
if not issamedir(src, dst):
- raise ValueError("Destination path must the same directory (user the move method for moving to a different directory)")
+ raise ValueError("Destination path must the same directory (use the move method for moving to a different directory)")
self._lock.acquire()
try:
for fs in self:
if fs.exists(src):
fs.rename(src, dst)
return
- raise FSError("NO_RESOURCE", path)
+ raise ResourceNotFoundError(path)
finally:
self._lock.release()
@@ -232,31 +223,7 @@ class MultiFS(FS):
if fs.exists(path):
return fs.getinfo(path)
- raise ResourceNotFoundError("NO_FILE", path)
+ raise ResourceNotFoundError(path)
finally:
self._lock.release()
-
-if __name__ == "__main__":
-
- import fs
- import osfs
- osfs = osfs.OSFS('~/')
- import memoryfs
-
- mem_fs = memoryfs.MemoryFS()
- mem_fs.makedir('projects/test2', recursive=True)
- mem_fs.makedir('projects/A', recursive=True)
- mem_fs.makedir('projects/A/B', recursive=True)
-
-
- mem_fs.open("projects/test2/readme.txt", 'w').write("Hello, World!")
- mem_fs.open("projects/A/readme.txt", 'w').write("\nSecond Line")
-
- multifs = MultiFS()
- multifs.addfs("osfs", osfs)
- multifs.addfs("mem_fs", mem_fs)
-
- import browsewin
-
- browsewin.browse(multifs) \ No newline at end of file
diff --git a/fs/objecttree.py b/fs/objecttree.py
index 014bcda..e073298 100644
--- a/fs/objecttree.py
+++ b/fs/objecttree.py
@@ -1,12 +1,10 @@
-#!/usr/bin/env python
-from helpers import _iteratepath, pathsplit
class _ObjectDict(dict):
pass
-class ObjectTree(object):
+class ObjectTree(object):
"""A class to facilitate the creation of tree structures."""
def __init__(self):
@@ -105,15 +103,3 @@ class ObjectTree(object):
return self.root.iteritems()
-if __name__ == "__main__":
-
- ot = ObjectTree()
- ot['a/b/c'] = "Hai!"
-
- print ot['a/b/c']
-
- print ot.partialget("/a/b/c/d/e/f")
-
- ot['a/b/c/d'] = "?"
-
- print ot['a/b/c'].keys() \ No newline at end of file
diff --git a/fs/osfs.py b/fs/osfs.py
index 9fc7342..a3a9519 100644
--- a/fs/osfs.py
+++ b/fs/osfs.py
@@ -1,7 +1,9 @@
#!/usr/bin/env python
-from base import *
-from helpers import *
+import os
+
+from fs.base import *
+from fs.path import *
try:
import xattr
@@ -9,103 +11,81 @@ except ImportError:
xattr = None
class OSFS(FS):
+ """Expose the underlying operating-system filesystem as an FS object.
- """The most basic of filesystems. Simply shadows the underlaying filesytem
- of the Operating System.
+ This is the most basic of filesystems, which simply shadows the underlaying
+ filesytem of the OS. Most of its methods simply defer to the corresponding
+ methods in the os and os.path modules.
"""
- def __init__(self, root_path, thread_syncronize=True):
- FS.__init__(self, thread_syncronize=thread_syncronize)
-
- expanded_path = normpath(os.path.expanduser(os.path.expandvars(root_path)))
-
+ def __init__(self, root_path, dir_mode=0700, thread_synchronize=True):
+ FS.__init__(self, thread_synchronize=thread_synchronize)
+ expanded_path = normpath(os.path.abspath(os.path.expanduser(os.path.expandvars(root_path))))
if not os.path.exists(expanded_path):
- raise ResourceNotFoundError("NO_DIR", expanded_path, msg="Root directory does not exist: %(path)s")
+ raise ResourceNotFoundError(expanded_path,msg="Root directory does not exist: %(path)s")
if not os.path.isdir(expanded_path):
- raise ResourceNotFoundError("NO_DIR", expanded_path, msg="Root path is not a directory: %(path)s")
-
+ raise ResourceInvalidError(expanded_path,msg="Root path is not a directory: %(path)s")
self.root_path = normpath(os.path.abspath(expanded_path))
+ self.dir_mode = dir_mode
def __str__(self):
return "<OSFS: %s>" % self.root_path
- __repr__ = __str__
-
def getsyspath(self, path, allow_none=False):
- sys_path = os.path.join(self.root_path, makerelative(self._resolve(path))).replace('/', os.sep)
+ sys_path = os.path.join(self.root_path, relpath(path)).replace('/', os.sep)
return sys_path
+ @convert_os_errors
def open(self, path, mode="r", **kwargs):
- try:
- f = open(self.getsyspath(path), mode, kwargs.get("buffering", -1))
- except IOError, e:
- if e.errno == 2:
- raise ResourceNotFoundError("NO_FILE", path)
- raise OperationFailedError("OPEN_FAILED", path, details=e, msg=str(e))
-
- return f
+ mode = filter(lambda c: c in "rwabt+",mode)
+ return open(self.getsyspath(path), mode, kwargs.get("buffering", -1))
+ @convert_os_errors
def exists(self, path):
path = self.getsyspath(path)
return os.path.exists(path)
+ @convert_os_errors
def isdir(self, path):
path = self.getsyspath(path)
return os.path.isdir(path)
+ @convert_os_errors
def isfile(self, path):
path = self.getsyspath(path)
return os.path.isfile(path)
- def ishidden(self, path):
- return path.startswith('.')
+ @convert_os_errors
+ def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
+ paths = os.listdir(self.getsyspath(path))
+ return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only)
- def listdir(self, path="./", wildcard=None, full=False, absolute=False, hidden=True, dirs_only=False, files_only=False):
- try:
- paths = os.listdir(self.getsyspath(path))
- except (OSError, IOError), e:
- raise OperationFailedError("LISTDIR_FAILED", path, details=e, msg="Unable to get directory listing: %(path)s - (%(details)s)")
-
- return self._listdir_helper(path, paths, wildcard, full, absolute, hidden, dirs_only, files_only)
-
- def makedir(self, path, mode=0777, recursive=False, allow_recreate=False):
+ @convert_os_errors
+ def makedir(self, path, recursive=False, allow_recreate=False):
sys_path = self.getsyspath(path)
-
try:
if recursive:
- os.makedirs(sys_path, mode)
+ os.makedirs(sys_path, self.dir_mode)
else:
- if not allow_recreate and self.exists(path):
- raise OperationFailedError("MAKEDIR_FAILED", dirname, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
- try:
- os.mkdir(sys_path, mode)
- except OSError, e:
- if allow_recreate:
- if e.errno != 17:
- raise OperationFailedError("MAKEDIR_FAILED", path)
- else:
- raise OperationFailedError("MAKEDIR_FAILED", path)
- except WindowsError, e:
- if allow_recreate:
- if e.errno != 183:
- raise OperationFailedError("MAKEDIR_FAILED", path)
- else:
- raise OperationFailedError("MAKEDIR_FAILED", path)
-
+ os.mkdir(sys_path, self.dir_mode)
except OSError, e:
- if e.errno == 17:
- return
+ if e.errno == 17 or e.errno == 183:
+ if self.isfile(path):
+ raise ResourceInvalidError(path,msg="Cannot create directory, there's already a file of that name: %(path)s")
+ if not allow_recreate:
+ raise DestinationExistsError(path,msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
+ elif e.errno == 2:
+ raise ParentDirectoryMissingError(path)
else:
- raise OperationFailedError("MAKEDIR_FAILED", path, details=e)
-
+ raise
+
+ @convert_os_errors
def remove(self, path):
sys_path = self.getsyspath(path)
- try:
- os.remove(sys_path)
- except OSError, e:
- raise OperationFailedError("REMOVE_FAILED", path, details=e)
+ os.remove(sys_path)
+ @convert_os_errors
def removedir(self, path, recursive=False,force=False):
sys_path = self.getsyspath(path)
# Don't remove the root directory of this FS
@@ -116,182 +96,70 @@ class OSFS(FS):
self.remove(path2)
for path2 in self.listdir(path,absolute=True,dirs_only=True):
self.removedir(path2,force=True)
- try:
- os.rmdir(sys_path)
- except OSError, e:
- raise OperationFailedError("REMOVEDIR_FAILED", path, details=e)
+ os.rmdir(sys_path)
# Using os.removedirs() for this can result in dirs being
# removed outside the root of this FS, so we recurse manually.
if recursive:
try:
self.removedir(dirname(path),recursive=True)
- except OperationFailedError:
+ except DirectoryNotEmptyError:
pass
+ @convert_os_errors
def rename(self, src, dst):
if not issamedir(src, dst):
- raise ValueError("Destination path must the same directory (user the move method for moving to a different directory)")
+ raise ValueError("Destination path must the same directory (use the move method for moving to a different directory)")
path_src = self.getsyspath(src)
path_dst = self.getsyspath(dst)
+ os.rename(path_src, path_dst)
- try:
- os.rename(path_src, path_dst)
- except OSError, e:
- raise OperationFailedError("RENAME_FAILED", src)
-
+ @convert_os_errors
def getinfo(self, path):
sys_path = self.getsyspath(path)
-
- try:
- stats = os.stat(sys_path)
- except OSError, e:
- raise FSError("UNKNOWN_ERROR", path, details=e)
-
+ stats = os.stat(sys_path)
info = dict((k, getattr(stats, k)) for k in dir(stats) if not k.startswith('__') )
-
info['size'] = info['st_size']
-
+ # TODO: this doesn't actually mean 'creation time' on unix
ct = info.get('st_ctime', None)
if ct is not None:
info['created_time'] = datetime.datetime.fromtimestamp(ct)
-
at = info.get('st_atime', None)
if at is not None:
info['accessed_time'] = datetime.datetime.fromtimestamp(at)
-
mt = info.get('st_mtime', None)
if mt is not None:
info['modified_time'] = datetime.datetime.fromtimestamp(at)
-
return info
-
+ @convert_os_errors
def getsize(self, path):
sys_path = self.getsyspath(path)
- try:
- stats = os.stat(sys_path)
- except OSError, e:
- raise FSError("UNKNOWN_ERROR", path, details=e)
-
+ stats = os.stat(sys_path)
return stats.st_size
- def setxattr(self, path, key, value):
- self._lock.acquire()
- try:
- if xattr is None:
- return FS.setxattr(self, path, key, value)
- try:
- xattr.xattr(self.getsyspath(path))[key]=value
- except IOError, e:
- if e.errno == 95:
- return FS.setxattr(self, path, key, value)
- else:
- raise OperationFailedError('XATTR_FAILED', path, details=e)
- finally:
- self._lock.release()
+ # Provide native xattr support if available
+ if xattr:
+ @convert_os_errors
+ def setxattr(self, path, key, value):
+ xattr.xattr(self.getsyspath(path))[key]=value
- def getxattr(self, path, key, default=None):
- self._lock.acquire()
- try:
- if xattr is None:
- return FS.getxattr(self, path, key, default)
+ @convert_os_errors
+ def getxattr(self, path, key, default=None):
try:
return xattr.xattr(self.getsyspath(path)).get(key)
- except IOError, e:
- if e.errno == 95:
- return FS.getxattr(self, path, key, default)
- else:
- raise OperationFailedError('XATTR_FAILED', path, details=e)
- finally:
- self._lock.release()
+ except KeyError:
+ return default
- def removexattr(self, path, key):
- self._lock.acquire()
- try:
- if xattr is None:
- return FS.removexattr(self, path, key)
+ @convert_os_errors
+ def delxattr(self, path, key):
try:
del xattr.xattr(self.getsyspath(path))[key]
except KeyError:
pass
- except IOError, e:
- if e.errono == 95:
- return FS.removexattr(self, path, key)
- else:
- raise OperationFailedError('XATTR_FAILED', path, details=e)
- finally:
- self._lock.release()
-
- def listxattrs(self, path):
- self._lock.acquire()
- try:
- if xattr is None:
- return FS.listxattrs(self, path)
- try:
- return xattr.xattr(self.getsyspath(path)).keys()
- except IOError, e:
- if errono == 95:
- return FS.listxattrs(self, path)
- else:
- raise OperationFailedError('XATTR_FAILED', path, details=e)
- finally:
- self._lock.release()
-
-
-
-if __name__ == "__main__":
-
-
- osfs = OSFS('testfs')
-
-
-
-
- #a = xattr.xattr('/home/will/projects/pyfilesystem/fs/testfs/test.txt')
- #a['user.comment'] = 'world'
- #print xattr.xattr('/home/will/projects/pyfilesystem/fs/testfs/test.txt').keys()
+ @convert_os_errors
+ def listxattrs(self, path):
+ return xattr.xattr(self.getsyspath(path)).keys()
- print osfs.listxattrs('test.txt')
- osfs.removexattr('test.txt', 'user.foo')
- #print osfs.listxattrs('test.txt')
- osfs.setxattr('test.txt', 'user.foo', 'bar')
- print osfs.getxattr('test.txt', 'user.foo')
- print osfs.listxattrs('test.txt')
- print osfs.getxattrs('test.txt')
- #
- #osfs = OSFS("~/projects")
- #
- #
- ##for p in osfs.walk("tagging-trunk", search='depth'):
- ## print p
- #
- #import browsewin
- #browsewin.browse(osfs)
- #
- #print_fs(osfs)
- #
- ##print osfs.listdir("/projects/fs")
- #
- ##sub_fs = osfs.open_dir("projects/")
- #
- ##print sub_fs
- #
- ##sub_fs.open('test.txt')
- #
- ##print sub_fs.listdir(dirs_only=True)
- ##print sub_fs.listdir()
- ##print_fs(sub_fs, max_levels=2)
- #
- ##for f in osfs.listdir():
- ## print f
- #
- ##print osfs.listdir('projects', dirs_only=True, wildcard="d*")
- #
- ##print_fs(osfs, 'projects/')
- #
- #print pathjoin('/', 'a')
- #
- #print pathjoin('a/b/c', '../../e/f')
diff --git a/fs/path.py b/fs/path.py
new file mode 100644
index 0000000..4f2275c
--- /dev/null
+++ b/fs/path.py
@@ -0,0 +1,204 @@
+"""
+
+ fs.path: useful functions for FS path manipulation.
+
+This is broadly similar to the standard 'os.path' module but works with
+paths in the canonical format expected by all FS objects (backslash-separated,
+optional leading slash).
+
+"""
+
+
+def normpath(path):
+ """Normalizes a path to be in the format expected by FS objects.
+
+ This function remove any leading or trailing slashes, collapses
+ duplicate slashes, replaces forward with backward slashes, and generally
+ tries very hard to return a new path string the canonical FS format.
+ If the path is invalid, ValueError will be raised.
+
+ >>> normpath(r"foo\\bar\\baz")
+ 'foo/bar/baz'
+
+ >>> normpath("/foo//bar/frob/../baz")
+ '/foo/bar/baz'
+
+ >>> normpath("foo/../../bar")
+ Traceback (most recent call last)
+ ...
+ ValueError: too many backrefs in path 'foo/../../bar'
+
+ """
+ if not path:
+ return path
+ components = []
+ for comp in path.replace('\\','/').split("/"):
+ if not comp or comp == ".":
+ pass
+ elif comp == "..":
+ try:
+ components.pop()
+ except IndexError:
+ err = "too many backrefs in path '%s'" % (path,)
+ raise ValueError(err)
+ else:
+ components.append(comp)
+ if path[0] in "\\/":
+ if not components:
+ components = [""]
+ components.insert(0,"")
+ return "/".join(components)
+
+
+def iteratepath(path, numsplits=None):
+ """Iterate over the individual components of a path."""
+ path = relpath(normpath(path))
+ if not path:
+ return []
+ if numsplits == None:
+ return path.split('/')
+ else:
+ return path.split('/', numsplits)
+
+
+def abspath(path):
+ """Convert the given path to an absolute path.
+
+ Since FS objects have no concept of a 'current directory' this simply
+ adds a leading '/' character if the path doesn't already have one.
+
+ """
+ if not path:
+ return "/"
+ if path[0] != "/":
+ return "/" + path
+ return path
+
+
+def relpath(path):
+ """Convert the given path to a relative path.
+
+ This is the inverse of abspath(), stripping a leading '/' from the
+ path if it is present.
+
+ """
+ while path and path[0] == "/":
+ path = path[1:]
+ return path
+
+
+def pathjoin(*paths):
+ """Joins any number of paths together, returning a new path string.
+
+ >>> pathjoin('foo', 'bar', 'baz')
+ 'foo/bar/baz'
+
+ >>> pathjoin('foo/bar', '../baz')
+ 'foo/baz'
+
+ >>> pathjoin('foo/bar', '/baz')
+ '/baz'
+
+ """
+ absolute = False
+ relpaths = []
+ for p in paths:
+ if p:
+ if p[0] in '\\/':
+ del relpaths[:]
+ absolute = True
+ relpaths.append(p)
+
+ path = normpath("/".join(relpaths))
+ if absolute and not path.startswith("/"):
+ path = "/" + path
+ return path
+
+# Allow pathjoin() to be used as fs.path.join()
+join = pathjoin
+
+
+def pathsplit(path):
+ """Splits a path into (head,tail) pair.
+
+ This function splits a path into a pair (head,tail) where 'tail' is the
+ last pathname component and 'head' is all preceeding components.
+
+ >>> pathsplit("foo/bar")
+ ('foo', 'bar')
+
+ >>> pathsplit("foo/bar/baz")
+ ('foo/bar', 'baz')
+
+ """
+ split = normpath(path).rsplit('/', 1)
+ if len(split) == 1:
+ return ('', split[0])
+ return tuple(split)
+
+# Allow pathsplit() to be used as fs.path.split()
+split = pathsplit
+
+
+def dirname(path):
+ """Returns the parent directory of a path.
+
+ This is always equivalent to the 'head' component of the value returned
+ by pathsplit(path).
+
+ >>> dirname('foo/bar/baz')
+ 'foo/bar'
+
+ """
+ return pathsplit(path)[0]
+
+
+def basename(path):
+ """Returns the basename of the resource referenced by a path.
+
+ This is always equivalent to the 'head' component of the value returned
+ by pathsplit(path).
+
+ >>> basename('foo/bar/baz')
+ 'baz'
+
+ """
+ return pathsplit(path)[1]
+
+
+def issamedir(path1, path2):
+ """Return true if two paths reference a resource in the same directory.
+
+ >>> issamedir("foo/bar/baz.txt", "foo/bar/spam.txt")
+ True
+ >>> issamedir("foo/bar/baz/txt", "spam/eggs/spam.txt")
+ False
+
+ """
+ return pathsplit(normpath(path1))[0] == pathsplit(normpath(path2))[0]
+
+
+def isprefix(path1,path2):
+ """Return true is path1 is a prefix of path2.
+
+ >>> isprefix("foo/bar", "foo/bar/spam.txt")
+ True
+ >>> isprefix("foo/bar/", "foo/bar")
+ True
+ >>> isprefix("foo/barry", "foo/baz/bar")
+ False
+ >>> isprefix("foo/bar/baz/", "foo/baz/bar")
+ False
+
+ """
+ bits1 = path1.split("/")
+ bits2 = path2.split("/")
+ while bits1 and bits1[-1] == "":
+ bits1.pop()
+ if len(bits1) > len(bits2):
+ return False
+ for (bit1,bit2) in zip(bits1,bits2):
+ if bit1 != bit2:
+ return False
+ return True
+
diff --git a/fs/rpcfs.py b/fs/rpcfs.py
index 6141aae..0146188 100644
--- a/fs/rpcfs.py
+++ b/fs/rpcfs.py
@@ -1,43 +1,28 @@
"""
- fs.rpcfs: Client and Server to expose an FS via XML-RPC
+ fs.rpcfs: client to access an FS via XML-RPC
-This module provides the following pair of classes that can be used to expose
-a remote filesystem using XML-RPC:
-
- RPCFSServer: a subclass of SimpleXMLRPCServer that exposes the methods
- of an FS instance via XML-RPC
-
- RPCFS: a subclass of FS that delegates all filesystem operations to
- a remote server using XML-RPC.
-
-If you need to use a more powerful server than SimpleXMLRPCServer, you can
-use the RPCFSInterface class to provide an XML-RPC-compatible wrapper around
-an FS object, which can then be exposed using whatever server you choose
-(e.g. Twisted's XML-RPC server).
+This module provides the class 'RPCFS' to access a remote FS object over
+XML-RPC. You probably want to use this in conjunction with the 'RPCFSServer'
+class from the fs.expose.xmlrpc module.
"""
import xmlrpclib
-from SimpleXMLRPCServer import SimpleXMLRPCServer
from fs.base import *
from StringIO import StringIO
-
-
-class ObjProxy:
- """Simple object proxy allowing us to replace read-only attributes.
-
- This is used to put a modified 'close' method on files returned by
- open(), such that they will be uploaded to the server when closed.
- """
-
- def __init__(self,obj):
- self._obj = obj
-
- def __getattr__(self,attr):
- return getattr(self._obj,attr)
+if hasattr(StringIO,"__exit__"):
+ class StringIO(StringIO):
+ pass
+else:
+ class StringIO(StringIO):
+ def __enter__(self):
+ return self
+ def __exit__(self,exc_type,exc_value,traceback):
+ self.close()
+ return False
def re_raise_faults(func):
@@ -103,7 +88,7 @@ class RPCFS(FS):
"""Access a filesystem exposed via XML-RPC.
This class provides the client-side logic for accessing a remote FS
- object, and is dual to the RPCFSServer class also defined in this module.
+ object, and is dual to the RPCFSServer class defined in fs.expose.xmlrpc.
Example:
@@ -116,21 +101,37 @@ class RPCFS(FS):
The only required argument is the uri of the server to connect
to. This will be passed to the underlying XML-RPC server proxy
- object along with the 'transport' argument if it is provided.
+ object, along with the 'transport' argument if it is provided.
"""
self.uri = uri
- if transport is not None:
- proxy = xmlrpclib.ServerProxy(uri,transport,allow_none=True)
+ self._transport = transport
+ self.proxy = self._make_proxy()
+
+ def _make_proxy(self):
+ kwds = dict(allow_none=True)
+ if self._transport is not None:
+ proxy = xmlrpclib.ServerProxy(self.uri,self._transport,**kwds)
else:
- proxy = xmlrpclib.ServerProxy(uri,allow_none=True)
- self.proxy = ReRaiseFaults(proxy)
+ proxy = xmlrpclib.ServerProxy(self.uri,**kwds)
+ return ReRaiseFaults(proxy)
def __str__(self):
return '<RPCFS: %s>' % (self.uri,)
- __repr__ = __str__
+ def __getstate__(self):
+ state = super(RPCFS,self).__getstate__()
+ try:
+ del state['proxy']
+ except KeyError:
+ pass
+ return state
- def open(self,path,mode):
+ def __setstate__(self,state):
+ for (k,v) in state.iteritems():
+ self.__dict__[k] = v
+ self.proxy = self._make_proxy()
+
+ def open(self,path,mode="r"):
# TODO: chunked transport of large files
if "w" in mode:
self.proxy.set_contents(path,xmlrpclib.Binary(""))
@@ -139,13 +140,13 @@ class RPCFS(FS):
data = self.proxy.get_contents(path).data
except IOError:
if "w" not in mode and "a" not in mode:
- raise ResourceNotFoundError("NO_FILE",path)
+ raise ResourceNotFoundError(path)
if not self.isdir(dirname(path)):
- raise OperationFailedError("OPEN_FAILED", path,msg="Parent directory does not exist")
+ raise ParentDirectoryMissingError(path)
self.proxy.set_contents(path,xmlrpclib.Binary(""))
else:
data = ""
- f = ObjProxy(StringIO(data))
+ f = StringIO(data)
if "a" not in mode:
f.seek(0,0)
else:
@@ -171,11 +172,11 @@ class RPCFS(FS):
def isfile(self,path):
return self.proxy.isfile(path)
- def listdir(self,path="./",wildcard=None,full=False,absolute=False,hidden=True,dirs_only=False,files_only=False):
- return self.proxy.listdir(path,wildcard,full,absolute,hidden,dirs_only,files_only)
+ def listdir(self,path="./",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False):
+ return self.proxy.listdir(path,wildcard,full,absolute,dirs_only,files_only)
- def makedir(self,path,mode=0777,recursive=False,allow_recreate=False):
- return self.proxy.makedir(path,mode,recursive,allow_recreate)
+ def makedir(self,path,recursive=False,allow_recreate=False):
+ return self.proxy.makedir(path,recursive,allow_recreate)
def remove(self,path):
return self.proxy.remove(path)
@@ -211,99 +212,3 @@ class RPCFS(FS):
return self.proxy.copydir(src,dst,overwrite,ignore_errors,chunk_size)
-class RPCFSInterface(object):
- """Wrapper to expose an FS via a XML-RPC compatible interface.
-
- The only real trick is using xmlrpclib.Binary objects to trasnport
- the contents of files.
- """
-
- def __init__(self,fs):
- self.fs = fs
-
- def get_contents(self,path):
- data = self.fs.getcontents(path)
- return xmlrpclib.Binary(data)
-
- def set_contents(self,path,data):
- self.fs.createfile(path,data.data)
-
- def exists(self,path):
- return self.fs.exists(path)
-
- def isdir(self,path):
- return self.fs.isdir(path)
-
- def isfile(self,path):
- return self.fs.isfile(path)
-
- def listdir(self,path="./",wildcard=None,full=False,absolute=False,hidden=True,dirs_only=False,files_only=False):
- return list(self.fs.listdir(path,wildcard,full,absolute,hidden,dirs_only,files_only))
-
- def makedir(self,path,mode=0777,recursive=False,allow_recreate=False):
- return self.fs.makedir(path,mode,recursive,allow_recreate)
-
- def remove(self,path):
- return self.fs.remove(path)
-
- def removedir(self,path,recursive=False,force=False):
- return self.fs.removedir(path,recursive,force)
-
- def rename(self,src,dst):
- return self.fs.rename(src,dst)
-
- def getinfo(self,path):
- return self.fs.getinfo(path)
-
- def desc(self,path):
- return self.fs.desc(path)
-
- def getattr(self,path,attr):
- return self.fs.getattr(path,attr)
-
- def setattr(self,path,attr,value):
- return self.fs.setattr(path,attr,value)
-
- def copy(self,src,dst,overwrite=False,chunk_size=16384):
- return self.fs.copy(src,dst,overwrite,chunk_size)
-
- def move(self,src,dst,overwrite=False,chunk_size=16384):
- return self.fs.move(src,dst,overwrite,chunk_size)
-
- def movedir(self,src,dst,overwrite=False,ignore_errors=False,chunk_size=16384):
- return self.fs.movedir(src,dst,overwrite,ignore_errors,chunk_size)
-
- def copydir(self,src,dst,overwrite=False,ignore_errors=False,chunk_size=16384):
- return self.fs.copydir(src,dst,overwrite,ignore_errors,chunk_size)
-
-
-class RPCFSServer(SimpleXMLRPCServer):
- """Server to expose an FS object via XML-RPC.
-
- This class takes as its first argument an FS instance, and as its second
- argument a (hostname,port) tuple on which to listen for XML-RPC requests.
- Example:
-
- fs = OSFS('/var/srv/myfiles')
- s = RPCFSServer(fs,("",8080))
- s.serve_forever()
-
- To cleanly shut down the server after calling serve_forever, set the
- attribute "serve_more_requests" to False.
- """
-
- def __init__(self,fs,addr,requestHandler=None,logRequests=None):
- kwds = dict(allow_none=True)
- if requestHandler is not None:
- kwds['requestHandler'] = requestHandler
- if logRequests is not None:
- kwds['logRequests'] = logRequests
- self.serve_more_requests = True
- SimpleXMLRPCServer.__init__(self,addr,**kwds)
- self.register_instance(RPCFSInterface(fs))
-
- def serve_forever(self):
- """Override serve_forever to allow graceful shutdown."""
- while self.serve_more_requests:
- self.handle_request()
-
diff --git a/fs/s3fs.py b/fs/s3fs.py
index 8517278..c0d2714 100644
--- a/fs/s3fs.py
+++ b/fs/s3fs.py
@@ -18,9 +18,65 @@ except ImportError:
from tempfile import NamedTemporaryFile as TempFile
from fs.base import *
-from fs.helpers import *
+
+
+class RemoteFileBuffer(object):
+ """File-like object providing buffer for local file operations.
+
+ Instances of this class manage a local tempfile buffer corresponding
+ to the contents of a remote file. All reads and writes happen locally,
+ with the content being copied to the remote file only on flush() or
+ close().
+
+ Instances of this class are returned by S3FS.open, but it is desgined
+ to be usable by any FS subclass that manages remote files.
+ """
+
+ def __init__(self,fs,path,mode):
+ self.file = TempFile()
+ self.fs = fs
+ self.path = path
+ self.mode = mode
+
+ def __del__(self):
+ if not self.closed:
+ self.close()
+
+ # This is lifted straight from the stdlib's tempfile.py
+ def __getattr__(self,name):
+ file = self.__dict__['file']
+ a = getattr(file, name)
+ if not issubclass(type(a), type(0)):
+ setattr(self, name, a)
+ return a
+
+ def __enter__(self):
+ self.file.__enter__()
+ return self
+
+ def __exit__(self,exc,value,tb):
+ self.close()
+ return False
+
+ def __iter__(self):
+ return iter(self.file)
+
+ def flush(self):
+ self.file.flush()
+ if "w" in self.mode or "a" in self.mode or "+" in self.mode:
+ pos = self.file.tell()
+ self.file.seek(0)
+ self.fs.setcontents(self.path,self.file)
+ self.file.seek(pos)
+
+ def close(self):
+ if "w" in self.mode or "a" in self.mode or "+" in self.mode:
+ self.file.seek(0)
+ self.fs.setcontents(self.path,self.file)
+ self.file.close()
+
class S3FS(FS):
"""A filesystem stored in Amazon S3.
@@ -38,10 +94,10 @@ class S3FS(FS):
PATH_MAX = None
NAME_MAX = None
- def __init__(self, bucket, prefix="", aws_access_key=None, aws_secret_key=None, separator="/", thread_syncronize=True,key_sync_timeout=1):
+ def __init__(self, bucket, prefix="", aws_access_key=None, aws_secret_key=None, separator="/", thread_synchronize=True,key_sync_timeout=1):
"""Constructor for S3FS objects.
- S3FS objects required the name of the S3 bucket in which to store
+ S3FS objects require the name of the S3 bucket in which to store
files, and can optionally be given a prefix under which the files
shoud be stored. The AWS public and private keys may be specified
as additional arguments; if they are not specified they will be
@@ -63,12 +119,13 @@ class S3FS(FS):
self._separator = separator
self._key_sync_timeout = key_sync_timeout
# Normalise prefix to this form: path/to/files/
+ prefix = normpath(prefix)
while prefix.startswith(separator):
prefix = prefix[1:]
if not prefix.endswith(separator) and prefix != "":
prefix = prefix + separator
self._prefix = prefix
- FS.__init__(self, thread_syncronize=thread_syncronize)
+ FS.__init__(self, thread_synchronize=thread_synchronize)
# Make _s3conn and _s3bukt properties that are created on demand,
# since they cannot be stored during pickling.
@@ -115,15 +172,12 @@ class S3FS(FS):
def _s3path(self,path):
"""Get the absolute path to a file stored in S3."""
- path = self._prefix + path
- path = self._separator.join(self._pathbits(path))
- return path
-
- def _pathbits(self,path):
- """Iterator over path components."""
- for bit in path.split("/"):
- if bit and bit != ".":
- yield bit
+ path = relpath(normpath(path))
+ path = self._separator.join(iteratepath(path))
+ s3path = self._prefix + path
+ if s3path and s3path[-1] == self._separator:
+ s3path = s3path[:-1]
+ return s3path
def _sync_key(self,k):
"""Synchronise on contents of the given key.
@@ -160,6 +214,10 @@ class S3FS(FS):
key.set_contents_from_file(contents)
return self._sync_key(key)
+ def setcontents(self,path,contents):
+ s3path = self._s3path(path)
+ self._sync_set_contents(s3path,contents)
+
def open(self,path,mode="r"):
"""Open the named file in the given mode.
@@ -167,7 +225,7 @@ class S3FS(FS):
so that it can be worked on efficiently. Any changes made to the
file are only sent back to S3 when the file is flushed or closed.
"""
- tf = TempFile()
+ buf = RemoteFileBuffer(self,path,mode)
s3path = self._s3path(path)
# Truncate the file if requested
if "w" in mode:
@@ -177,44 +235,17 @@ class S3FS(FS):
if k is None:
# Create the file if it's missing
if "w" not in mode and "a" not in mode:
- raise ResourceNotFoundError("NO_FILE",path)
+ raise ResourceNotFoundError(path)
if not self.isdir(dirname(path)):
- raise OperationFailedError("OPEN_FAILED", path,msg="Parent directory does not exist")
+ raise ParentDirectoryMissingError(path)
k = self._sync_set_contents(s3path,"")
else:
# Get the file contents into the tempfile.
if "r" in mode or "+" in mode or "a" in mode:
- k.get_contents_to_file(tf)
+ k.get_contents_to_file(buf)
if "a" not in mode:
- tf.seek(0)
- # Upload the tempfile when it is flushed or closed
- if "w" in mode or "a" in mode or "+" in mode:
- # Override flush()
- oldflush = tf.flush
- def newflush():
- oldflush()
- pos = tf.tell()
- tf.seek(0)
- self._sync_set_contents(k,tf)
- tf.seek(pos)
- tf.flush = newflush
- # Override close()
- oldclose = tf.close
- def newclose():
- tf.seek(0)
- self._sync_set_contents(k,tf)
- oldclose()
- tf.close = newclose
- # Override __exit__ if it exists
- try:
- oldexit = tf.__exit__
- def newexit(exc,value,tb):
- tf.close()
- return False
- tf.__exit__ = newexit
- except AttributeError:
- pass
- return tf
+ buf.seek(0)
+ return buf
def exists(self,path):
"""Check whether a path exists."""
@@ -237,7 +268,7 @@ class S3FS(FS):
"""Check whether a path exists and is a directory."""
s3path = self._s3path(path) + self._separator
# Root is always a directory
- if s3path == self._prefix:
+ if s3path == "/" or s3path == self._prefix:
return True
# Use a list request so that we return true if there are any files
# in that directory. This avoids requiring a special file for the
@@ -258,7 +289,7 @@ class S3FS(FS):
return True
return False
- def listdir(self,path="./",wildcard=None,full=False,absolute=False,hidden=True,dirs_only=False,files_only=False):
+ def listdir(self,path="./",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False):
"""List contents of a directory."""
s3path = self._s3path(path) + self._separator
if s3path == "/":
@@ -278,10 +309,12 @@ class S3FS(FS):
paths.append(nm)
if not isDir:
if s3path != self._prefix:
- raise OperationFailedError("LISTDIR_FAILED",path)
- return self._listdir_helper(path,paths,wildcard,full,absolute,hidden,dirs_only,files_only)
+ if self.isfile(path):
+ raise ResourceInvalidError(path,msg="that's not a directory: %(path)s")
+ raise ResourceNotFoundError(path)
+ return self._listdir_helper(path,paths,wildcard,full,absolute,dirs_only,files_only)
- def _listdir_helper(self,path,paths,wildcard,full,absolute,hidden,dirs_only,files_only):
+ def _listdir_helper(self,path,paths,wildcard,full,absolute,dirs_only,files_only):
"""Modify listdir helper to avoid additional calls to the server."""
if dirs_only and files_only:
raise ValueError("dirs_only and files_only can not both be True")
@@ -299,17 +332,14 @@ class S3FS(FS):
match = fnmatch.fnmatch
paths = [p for p in paths if match(p, wildcard)]
- if not hidden:
- paths = [p for p in paths if not self.ishidden(p)]
-
if full:
paths = [pathjoin(path, p) for p in paths]
elif absolute:
- paths = [self._abspath(pathjoin(path, p)) for p in paths]
+ paths = [abspath(pathjoin(path, p)) for p in paths]
return paths
- def makedir(self,path,mode=0777,recursive=False,allow_recreate=False):
+ def makedir(self,path,recursive=False,allow_recreate=False):
"""Create a directory at the given path.
The 'mode' argument is accepted for compatability with the standard
@@ -320,8 +350,10 @@ class S3FS(FS):
if s3pathD == self._prefix:
if allow_recreate:
return
- raise OperationFailedError("MAKEDIR_FAILED", path, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
- s3pathP = self._s3path(dirname(path[:-1])) + self._separator
+ raise DestinationExistsError(path, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
+ s3pathP = self._s3path(dirname(path))
+ if s3pathP:
+ s3pathP = s3pathP + self._separator
# Check various preconditions using list of parent dir
ks = self._s3bukt.list(prefix=s3pathP,delimiter=self._separator)
if s3pathP == self._prefix:
@@ -333,26 +365,33 @@ class S3FS(FS):
parentExists = True
if k.name == s3path:
# It's already a file
- raise OperationFailedError("MAKEDIR_FAILED", path, msg="Can not create a directory that already exists: %(path)s")
+ raise ResourceInvalidError(path, msg="Destination exists as a regular file: %(path)s")
if k.name == s3pathD:
# It's already a directory
if allow_recreate:
return
- raise OperationFailedError("MAKEDIR_FAILED", path, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
+ raise DestinationExistsError(path, msg="Can not create a directory that already exists (try allow_recreate=True): %(path)s")
# Create parent if required
if not parentExists:
if recursive:
- self.makedir(dirname(path[:-1]),mode,recursive,allow_recreate)
+ self.makedir(dirname(path),recursive,allow_recreate)
else:
- raise OperationFailedError("MAKEDIR_FAILED",path, msg="Parent directory does not exist: %(path)s")
+ raise ParentDirectoryMissingError(path, msg="Parent directory does not exist: %(path)s")
# Create an empty file representing the directory
# TODO: is there some standard scheme for representing empty dirs?
self._sync_set_contents(s3pathD,"")
def remove(self,path):
"""Remove the file at the given path."""
- # TODO: This will fail silently if the key doesn't exist
s3path = self._s3path(path)
+ ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
+ for k in ks:
+ if k.name == s3path:
+ break
+ if k.name.startswith(s3path + "/"):
+ raise ResourceInvalidError(path,msg="that's not a file: %(path)s")
+ else:
+ raise ResourceNotFoundError(path)
self._s3bukt.delete_key(s3path)
k = self._s3bukt.get_key(s3path)
while k:
@@ -360,7 +399,9 @@ class S3FS(FS):
def removedir(self,path,recursive=False,force=False):
"""Remove the directory at the given path."""
- s3path = self._s3path(path) + self._separator
+ s3path = self._s3path(path)
+ if s3path != self._prefix:
+ s3path = s3path + self._separator
if force:
# If we will be forcibly removing any directory contents, we
# might as well get the un-delimited list straight away.
@@ -368,17 +409,23 @@ class S3FS(FS):
else:
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
# Fail if the directory is not empty, or remove them if forced
+ found = False
for k in ks:
+ found = True
if k.name != s3path:
if not force:
- raise OperationFailedError("REMOVEDIR_FAILED",path)
+ raise DirectoryNotEmptyError(path)
self._s3bukt.delete_key(k.name)
+ if not found:
+ if self.isfile(path):
+ raise ResourceInvalidError(path,msg="removedir() called on a regular file: %(path)s")
+ raise ResourceNotFoundError(path)
self._s3bukt.delete_key(s3path)
- if recursive:
+ if recursive and path not in ("","/"):
pdir = dirname(path)
try:
self.removedir(pdir,recursive=True,force=False)
- except OperationFailedError:
+ except DirectoryNotEmptyError:
pass
def rename(self,src,dst):
@@ -390,11 +437,17 @@ class S3FS(FS):
def getinfo(self,path):
s3path = self._s3path(path)
+ if path in ("","/"):
+ return {}
k = self._s3bukt.get_key(s3path)
+ if k is None:
+ raise ResourceNotFoundError(path)
info = {}
- info['size'] = int(k.size)
+ if hasattr(k,"size"):
+ info['size'] = int(k.size)
fmt = "%a, %d %b %Y %H:%M:%S %Z"
- info['modified_time'] = datetime.datetime.strptime(k.last_modified,fmt)
+ if hasattr(k,"last_modified"):
+ info['modified_time'] = datetime.datetime.strptime(k.last_modified,fmt)
return info
def desc(self,path):
@@ -419,26 +472,26 @@ class S3FS(FS):
# It exists as a regular file
if k.name == s3path_dst:
if not overwrite:
- raise DestinationExistsError("COPYFILE_FAILED",src,dst,msg="Destination file exists: %(path2)s")
+ raise DestinationExistsError(dst)
dstOK = True
break
# Check if it refers to a directory. If so, we copy *into* it.
# Since S3 lists in lexicographic order, subsequent iterations
# of the loop will check for the existence of the new filename.
if k.name == s3path_dstD:
- nm = resourcename(src)
+ nm = basename(src)
dst = pathjoin(dirname(dst),nm)
s3path_dst = s3path_dstD + nm
dstOK = True
if not dstOK and not self.isdir(dirname(dst)):
- raise OperationFailedError("COPYFILE_FAILED",src,dst,msg="Destination directory does not exist")
+ raise ParentDirectoryMissingError(dst,msg="Destination directory does not exist: %(path)s")
# OK, now we can copy the file.
s3path_src = self._s3path(src)
try:
self._s3bukt.copy_key(s3path_dst,self._bucket_name,s3path_src)
except S3ResponseError, e:
if "404 Not Found" in str(e):
- raise ResourceInvalid("WRONG_TYPE", src, msg="Source is not a file: %(path)s")
+ raise ResourceInvalidError(src, msg="Source is not a file: %(path)s")
raise e
else:
k = self._s3bukt.get_key(s3path_dst)
diff --git a/fs/sftpfs.py b/fs/sftpfs.py
new file mode 100644
index 0000000..1d825b6
--- /dev/null
+++ b/fs/sftpfs.py
@@ -0,0 +1,284 @@
+"""
+
+ fs.sftpfs: Filesystem accesing an SFTP server (via paramiko)
+
+"""
+
+import datetime
+import stat as statinfo
+
+import paramiko
+
+from fs.base import *
+
+
+if not hasattr(paramiko.SFTPFile,"__enter__"):
+ paramiko.SFTPFile.__enter__ = lambda self: self
+ paramiko.SFTPFile.__exit__ = lambda self,et,ev,tb: self.close() and False
+
+
+class SFTPFS(FS):
+ """A filesystem stored on a remote SFTP server.
+
+ This is basically a compatability wrapper for the excellent SFTPClient
+ class in the paramiko module.
+ """
+
+ def __init__(self,connection,root="/",**credentials):
+ """SFTPFS constructor.
+
+ The only required argument is 'connection', which must be something
+ from which we can construct a paramiko.SFTPClient object. Possibile
+ values include:
+
+ * a hostname string
+ * a (hostname,port) tuple
+ * a paramiko.Transport instance
+ * a paramiko.Channel instance in "sftp" mode
+
+ The kwd argument 'root' specifies the root directory on the remote
+ machine - access to files outsite this root wil be prevented. Any
+ other keyword arguments are assumed to be credentials to be used when
+ connecting the transport.
+ """
+ self._owns_transport = False
+ self._credentials = credentials
+ if isinstance(connection,paramiko.Channel):
+ self.client = paramiko.SFTPClient(connection)
+ else:
+ if not isinstance(connection,paramiko.Transport):
+ connection = paramiko.Transport(connection)
+ self._owns_transport = True
+ if not connection.is_authenticated():
+ connection.connect(**credentials)
+ self.client = paramiko.SFTPClient.from_transport(connection)
+ self.root = abspath(normpath(root))
+
+ def __del__(self):
+ self.close()
+
+ def __getstate__(self):
+ state = super(SFTPFS,self).__getstate__()
+ if self._owns_transport:
+ state['client'] = self.client.get_channel().get_transport().getpeername()
+ return state
+
+ def __setstate__(self,state):
+ for (k,v) in state.iteritems():
+ self.__dict__[k] = v
+ if self._owns_transport:
+ t = paramiko.Transport(self.client)
+ t.connect(**self._credentials)
+ self.client = paramiko.SFTPClient.from_transport(t)
+
+ def close(self):
+ """Close the connection to the remote server."""
+ if getattr(self,"client",None):
+ if self._owns_transport:
+ t = self.client.get_channel().get_transport()
+ self.client.close()
+ t.close()
+ else:
+ self.client.close()
+ self.client = None
+
+ def _normpath(self,path):
+ npath = pathjoin(self.root,relpath(normpath(path)))
+ if not isprefix(self.root,npath):
+ raise PathError(path,msg="Path is outside root: %(path)s")
+ return npath
+
+ @convert_os_errors
+ def open(self,path,mode="r",bufsize=-1):
+ npath = self._normpath(path)
+ f = self.client.open(npath,mode,bufsize)
+ if self.isdir(path):
+ msg = "that's a directory: %(path)s"
+ raise ResourceInvalidError(path,msg=msg)
+ return f
+
+ @convert_os_errors
+ def exists(self,path):
+ npath = self._normpath(path)
+ try:
+ self.client.stat(npath)
+ except IOError, e:
+ if getattr(e,"errno",None) == 2:
+ return False
+ raise
+ return True
+
+ @convert_os_errors
+ def isdir(self,path):
+ npath = self._normpath(path)
+ try:
+ stat = self.client.stat(npath)
+ except IOError, e:
+ if getattr(e,"errno",None) == 2:
+ return False
+ raise
+ return statinfo.S_ISDIR(stat.st_mode)
+
+ @convert_os_errors
+ def isfile(self,path):
+ npath = self._normpath(path)
+ try:
+ stat = self.client.stat(npath)
+ except IOError, e:
+ if getattr(e,"errno",None) == 2:
+ return False
+ raise
+ return statinfo.S_ISREG(stat.st_mode)
+
+ @convert_os_errors
+ def listdir(self,path="./",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False):
+ npath = self._normpath(path)
+ try:
+ paths = self.client.listdir(npath)
+ except IOError, e:
+ if getattr(e,"errno",None) == 2:
+ if self.isfile(path):
+ raise ResourceInvalidError(path,msg="Can't list directory contents of a file: %(path)s")
+ raise ResourceNotFoundError(path)
+ elif self.isfile(path):
+ raise ResourceInvalidError(path,msg="Can't list directory contents of a file: %(path)s")
+ raise
+ return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only)
+
+ @convert_os_errors
+ def makedir(self,path,recursive=False,allow_recreate=False):
+ npath = self._normpath(path)
+ try:
+ self.client.mkdir(npath)
+ except IOError, e:
+ # Error code is unreliable, try to figure out what went wrong
+ try:
+ stat = self.client.stat(npath)
+ except IOError:
+ if not self.isdir(dirname(path)):
+ # Parent dir is missing
+ if not recursive:
+ raise ParentDirectoryMissingError(path)
+ self.makedir(dirname(path),recursive=True)
+ self.makedir(path,allow_recreate=allow_recreate)
+ else:
+ # Undetermined error, let the decorator handle it
+ raise
+ else:
+ # Destination exists
+ if statinfo.S_ISDIR(stat.st_mode):
+ if not allow_recreate:
+ raise DestinationExistsError(path,msg="Can't create a directory that already exists (try allow_recreate=True): %(path)s")
+ else:
+ raise ResourceInvalidError(path,msg="Can't create directory, there's already a file of that name: %(path)s")
+
+ @convert_os_errors
+ def remove(self,path):
+ npath = self._normpath(path)
+ try:
+ self.client.remove(npath)
+ except IOError, e:
+ if getattr(e,"errno",None) == 2:
+ raise ResourceNotFoundError(path)
+ elif self.isdir(path):
+ raise ResourceInvalidError(path,msg="Cannot use remove() on a directory: %(path)s")
+ raise
+
+ @convert_os_errors
+ def removedir(self,path,recursive=False,force=False):
+ npath = self._normpath(path)
+ if path in ("","/"):
+ return
+ if force:
+ for path2 in self.listdir(path,absolute=True):
+ try:
+ self.remove(path2)
+ except ResourceInvalidError:
+ self.removedir(path2,force=True)
+ try:
+ self.client.rmdir(npath)
+ except IOError, e:
+ if getattr(e,"errno",None) == 2:
+ if self.isfile(path):
+ raise ResourceInvalidError(path,msg="Can't use removedir() on a file: %(path)s")
+ raise ResourceNotFoundError(path)
+ elif self.listdir(path):
+ raise DirectoryNotEmptyError(path)
+ raise
+ if recursive:
+ try:
+ self.removedir(dirname(path),recursive=True)
+ except DirectoryNotEmptyError:
+ pass
+
+ @convert_os_errors
+ def rename(self,src,dst):
+ if not issamedir(src, dst):
+ raise ValueError("Destination path must the same directory (use the move method for moving to a different directory)")
+ nsrc = self._normpath(src)
+ ndst = self._normpath(dst)
+ try:
+ self.client.rename(nsrc,ndst)
+ except IOError, e:
+ if getattr(e,"errno",None) == 2:
+ raise ResourceNotFoundError(path)
+ raise
+
+ @convert_os_errors
+ def move(self,src,dst,overwrite=False,chunk_size=16384):
+ nsrc = self._normpath(src)
+ ndst = self._normpath(dst)
+ if overwrite and self.isfile(dst):
+ self.remove(dst)
+ try:
+ self.client.rename(nsrc,ndst)
+ except IOError, e:
+ if getattr(e,"errno",None) == 2:
+ raise ResourceNotFoundError(path)
+ if self.exists(dst):
+ raise DestinationExistsError(dst)
+ if not self.isdir(dirname(dst)):
+ raise ParentDirectoryMissingError(dst,msg="Destination directory does not exist: %(path)s")
+ raise
+
+ @convert_os_errors
+ def movedir(self,src,dst,overwrite=False,ignore_errors=False,chunk_size=16384):
+ nsrc = self._normpath(src)
+ ndst = self._normpath(dst)
+ if overwrite and self.isdir(dst):
+ self.removedir(dst)
+ try:
+ self.client.rename(nsrc,ndst)
+ except IOError, e:
+ if getattr(e,"errno",None) == 2:
+ raise ResourceNotFoundError(path)
+ if self.exists(dst):
+ raise DestinationExistsError(dst)
+ if not self.isdir(dirname(dst)):
+ raise ParentDirectoryMissingError(dst,msg="Destination directory does not exist: %(path)s")
+ raise
+
+ @convert_os_errors
+ def getinfo(self, path):
+ npath = self._normpath(path)
+ stats = self.client.stat(npath)
+ info = dict((k, getattr(stats, k)) for k in dir(stats) if not k.startswith('__') )
+ info['size'] = info['st_size']
+ ct = info.get('st_ctime', None)
+ if ct is not None:
+ info['created_time'] = datetime.datetime.fromtimestamp(ct)
+ at = info.get('st_atime', None)
+ if at is not None:
+ info['accessed_time'] = datetime.datetime.fromtimestamp(at)
+ mt = info.get('st_mtime', None)
+ if mt is not None:
+ info['modified_time'] = datetime.datetime.fromtimestamp(at)
+ return info
+
+ @convert_os_errors
+ def getsize(self, path):
+ npath = self._normpath(path)
+ stats = self.client.stat(npath)
+ return stats.st_size
+
+
diff --git a/fs/tempfs.py b/fs/tempfs.py
index dc4ca7e..993e297 100644
--- a/fs/tempfs.py
+++ b/fs/tempfs.py
@@ -9,16 +9,16 @@ class TempFS(OSFS):
"""Create a Filesystem in a tempory directory (with tempfile.mkdtemp),
and removes it when the TempFS object is cleaned up."""
- def __init__(self, identifier=None, thread_syncronize=True):
+ def __init__(self, identifier=None, temp_dir=None, dir_mode=0700, thread_synchronize=True):
"""Creates a temporary Filesystem
identifier -- A string that is included in the name of the temporary directory,
default uses "TempFS"
"""
- self._temp_dir = tempfile.mkdtemp(identifier or "TempFS")
+ self._temp_dir = tempfile.mkdtemp(identifier or "TempFS",dir=temp_dir)
self._cleaned = False
- OSFS.__init__(self, self._temp_dir, thread_syncronize=thread_syncronize)
+ OSFS.__init__(self, self._temp_dir, dir_mode=dir_mode, thread_synchronize=thread_synchronize)
def __str__(self):
return '<TempFS: %s>' % self._temp_dir
@@ -44,7 +44,4 @@ class TempFS(OSFS):
def __del__(self):
self.close()
-if __name__ == "__main__":
- tfs = TempFS()
- print tfs
diff --git a/fs/tests.py b/fs/tests.py
deleted file mode 100644
index deb880c..0000000
--- a/fs/tests.py
+++ /dev/null
@@ -1,806 +0,0 @@
-#!/usr/bin/env python
-
-import unittest
-import base as fs
-from helpers import *
-from helpers import _iteratepath
-import shutil
-
-class TestHelpers(unittest.TestCase):
-
- def test_isabsolutepath(self):
- tests = [ ('', False),
- ('/', True),
- ('/A/B', True),
- ('/asdasd', True),
- ('a/b/c', False),
- ]
- for path, result in tests:
- self.assertEqual(fs.isabsolutepath(path), result)
-
- def test_normpath(self):
- tests = [ ("\\a\\b\\c", "/a/b/c"),
- ("", ""),
- ("/a/b/c", "/a/b/c"),
- ]
- for path, result in tests:
- self.assertEqual(fs.normpath(path), result)
-
- def test_pathjoin(self):
- tests = [ ("", "a", "a"),
- ("a", "a", "a/a"),
- ("a/b", "../c", "a/c"),
- ("a/b/../c", "d", "a/c/d"),
- ("/a/b/c", "d", "/a/b/c/d"),
- ("/a/b/c", "../../../d", "/d"),
- ("a", "b", "c", "a/b/c"),
- ("a/b/c", "../d", "c", "a/b/d/c"),
- ("a/b/c", "../d", "/a", "/a"),
- ("aaa", "bbb/ccc", "aaa/bbb/ccc"),
- ("aaa", "bbb\ccc", "aaa/bbb/ccc"),
- ("aaa", "bbb", "ccc", "/aaa", "eee", "/aaa/eee"),
- ("a/b", "./d", "e", "a/b/d/e"),
- ("/", "/", "/"),
- ("/", "", "/"),
- ]
- for testpaths in tests:
- paths = testpaths[:-1]
- result = testpaths[-1]
- self.assertEqual(fs.pathjoin(*paths), result)
-
- self.assertRaises(ValueError, fs.pathjoin, "../")
- self.assertRaises(ValueError, fs.pathjoin, "./../")
- self.assertRaises(ValueError, fs.pathjoin, "a/b", "../../..")
- self.assertRaises(ValueError, fs.pathjoin, "a/b/../../../d")
-
- def test_makerelative(self):
- tests = [ ("/a/b", "a/b"),
- ("a/b", "a/b"),
- ("/", "") ]
-
- for path, result in tests:
- print path, result
- self.assertEqual(fs.makerelative(path), result)
-
- def test_makeabsolute(self):
- tests = [ ("/a/b", "/a/b"),
- ("a/b", "/a/b"),
- ("/", "/") ]
-
- for path, result in tests:
- self.assertEqual(fs.makeabsolute(path), result)
-
- def test_iteratepath(self):
- tests = [ ("a/b", ["a", "b"]),
- ("", [] ),
- ("aaa/bbb/ccc", ["aaa", "bbb", "ccc"]),
- ("a/b/c/../d", ["a", "b", "d"]) ]
-
- for path, results in tests:
- print repr(path), results
- for path_component, expected in zip(_iteratepath(path), results):
- self.assertEqual(path_component, expected)
-
- self.assertEqual(list(_iteratepath("a/b/c/d", 1)), ["a", "b/c/d"])
- self.assertEqual(list(_iteratepath("a/b/c/d", 2)), ["a", "b", "c/d"])
-
- def test_pathsplit(self):
- tests = [ ("a/b", ("a", "b")),
- ("a/b/c", ("a/b", "c")),
- ("a", ("", "a")),
- ("", ("", "")),
- ("/", ("", "")),
- ("foo/bar", ("foo", "bar")),
- ("foo/bar/baz", ("foo/bar", "baz")),
- ]
- for path, result in tests:
- self.assertEqual(fs.pathsplit(path), result)
-
-
-import objecttree
-
-class TestObjectTree(unittest.TestCase):
-
- def test_getset(self):
- ot = objecttree.ObjectTree()
- ot['foo'] = "bar"
- self.assertEqual(ot['foo'], 'bar')
-
- ot = objecttree.ObjectTree()
- ot['foo/bar'] = "baz"
- self.assertEqual(ot['foo'], {'bar':'baz'})
- self.assertEqual(ot['foo/bar'], 'baz')
-
- del ot['foo/bar']
- self.assertEqual(ot['foo'], {})
-
- ot = objecttree.ObjectTree()
- ot['a/b/c'] = "A"
- ot['a/b/d'] = "B"
- ot['a/b/e'] = "C"
- ot['a/b/f'] = "D"
- self.assertEqual(sorted(ot['a/b'].values()), ['A', 'B', 'C', 'D'])
- self.assert_(ot.get('a/b/x', -1) == -1)
-
- self.assert_('a/b/c' in ot)
- self.assert_('a/b/x' not in ot)
- self.assert_(ot.isobject('a/b/c'))
- self.assert_(ot.isobject('a/b/d'))
- self.assert_(not ot.isobject('a/b'))
-
- left, object, right = ot.partialget('a/b/e/f/g')
- self.assertEqual(left, "a/b/e")
- self.assertEqual(object, "C")
- self.assertEqual(right, "f/g")
-
-
-import tempfile
-import osfs
-import os
-
-class TestOSFS(unittest.TestCase):
-
- def setUp(self):
- self.temp_dir = tempfile.mkdtemp("fstest")
- self.fs = osfs.OSFS(self.temp_dir)
- print "Temp dir is", self.temp_dir
-
- def tearDown(self):
- shutil.rmtree(self.temp_dir)
-
- def check(self, p):
- return os.path.exists(os.path.join(self.temp_dir, makerelative(p)))
-
- def test_debug(self):
- str(self.fs)
- repr(self.fs)
- self.assert_(hasattr(self.fs, 'desc'))
-
- def test_makedir(self):
- check = self.check
-
- self.fs.makedir("a")
- self.assert_(check("a"))
- self.assertRaises(fs.FSError, self.fs.makedir, "a/b/c")
-
- self.fs.makedir("a/b/c", recursive=True)
- self.assert_(check("a/b/c"))
-
- self.fs.makedir("foo/bar/baz", recursive=True)
- self.assert_(check("foo/bar/baz"))
-
- self.fs.makedir("a/b/child")
- self.assert_(check("a/b/child"))
-
- self.fs.desc("a")
- self.fs.desc("a/b/child")
-
- def test_removedir(self):
- check = self.check
- self.fs.makedir("a")
- self.assert_(check("a"))
- self.fs.removedir("a")
- self.assert_(not check("a"))
- self.fs.makedir("a/b/c/d", recursive=True)
- self.assertRaises(fs.FSError, self.fs.removedir, "a/b")
- self.fs.removedir("a/b/c/d")
- self.assert_(not check("a/b/c/d"))
- self.fs.removedir("a/b/c")
- self.assert_(not check("a/b/c"))
- self.fs.removedir("a/b")
- self.assert_(not check("a/b"))
-
- self.fs.makedir("foo/bar/baz", recursive=True)
- self.fs.removedir("foo/bar/baz", recursive=True)
- self.assert_(not check("foo/bar/baz"))
- self.assert_(not check("foo/bar"))
- self.assert_(not check("foo"))
-
- self.fs.makedir("frollic/waggle", recursive=True)
- self.fs.createfile("frollic/waddle.txt","waddlewaddlewaddle")
- self.assertRaises(fs.OperationFailedError,self.fs.removedir,"frollic")
- self.fs.removedir("frollic",force=True)
- self.assert_(not check("frollic"))
-
- def test_listdir(self):
-
- def makefile(fname):
- f = self.fs.open(fname, "wb")
- f.write("*")
- f.close()
-
- makefile("a")
- makefile("b")
- makefile("foo")
- makefile("bar")
-
- d1 = self.fs.listdir()
- self.assertEqual(len(d1), 4)
- self.assertEqual(sorted(d1), ["a", "b", "bar", "foo"])
-
- d2 = self.fs.listdir(absolute=True)
- self.assertEqual(len(d2), 4)
- self.assertEqual(sorted(d2), ["/a", "/b", "/bar", "/foo"])
-
- self.fs.makedir("p/1/2/3", recursive=True)
- makefile("p/1/2/3/a")
- makefile("p/1/2/3/b")
- makefile("p/1/2/3/foo")
- makefile("p/1/2/3/bar")
-
- self.fs.makedir("q")
- dirs_only = self.fs.listdir(dirs_only=True)
- files_only = self.fs.listdir(files_only=True)
- self.assertEqual(sorted(dirs_only), ["p", "q"])
- self.assertEqual(sorted(files_only), ["a", "b", "bar", "foo"])
-
- d3 = self.fs.listdir("p/1/2/3")
- self.assertEqual(len(d3), 4)
- self.assertEqual(sorted(d3), ["a", "b", "bar", "foo"])
-
- d4 = self.fs.listdir("p/1/2/3", absolute=True)
- self.assertEqual(len(d4), 4)
- self.assertEqual(sorted(d4), ["/p/1/2/3/a", "/p/1/2/3/b", "/p/1/2/3/bar", "/p/1/2/3/foo"])
-
- d4 = self.fs.listdir("p/1/2/3", full=True)
- self.assertEqual(len(d4), 4)
- self.assertEqual(sorted(d4), ["p/1/2/3/a", "p/1/2/3/b", "p/1/2/3/bar", "p/1/2/3/foo"])
-
-
- def test_rename(self):
- check = self.check
- self.fs.open("foo.txt", 'wt').write("Hello, World!")
- self.assert_(check("foo.txt"))
- self.fs.rename("foo.txt", "bar.txt")
- self.assert_(check("bar.txt"))
- self.assert_(not check("foo.txt"))
-
- def test_info(self):
- test_str = "Hello, World!"
- f = self.fs.open("info.txt", 'wb')
- f.write(test_str)
- f.close()
- info = self.fs.getinfo("info.txt")
- self.assertEqual(info['size'], len(test_str))
- self.fs.desc("info.txt")
-
- def test_getsize(self):
- test_str = "*"*23
- f = self.fs.open("info.txt", 'wb')
- f.write(test_str)
- f.close()
- size = self.fs.getsize("info.txt")
- self.assertEqual(size, len(test_str))
-
- def test_movefile(self):
- check = self.check
- contents = "If the implementation is hard to explain, it's a bad idea."
- def makefile(path):
- f = self.fs.open(path, "wb")
- f.write(contents)
- f.close()
- def checkcontents(path):
- f = self.fs.open(path, "rb")
- check_contents = f.read()
- f.close()
- self.assertEqual(check_contents,contents)
- return contents == check_contents
-
- self.fs.makedir("foo/bar", recursive=True)
- makefile("foo/bar/a.txt")
- self.assert_(check("foo/bar/a.txt"))
- self.assert_(checkcontents("foo/bar/a.txt"))
- self.fs.move("foo/bar/a.txt", "foo/b.txt")
- self.assert_(not check("foo/bar/a.txt"))
- self.assert_(check("foo/b.txt"))
- self.assert_(checkcontents("foo/b.txt"))
-
- self.fs.move("foo/b.txt", "c.txt")
- fs.print_fs(self.fs)
- self.assert_(not check("foo/b.txt"))
- self.assert_(check("/c.txt"))
- self.assert_(checkcontents("/c.txt"))
-
- makefile("foo/bar/a.txt")
- self.assertRaises(fs.DestinationExistsError,self.fs.move,"foo/bar/a.txt","/c.txt")
- self.assert_(check("foo/bar/a.txt"))
- self.assert_(check("/c.txt"))
- self.fs.move("foo/bar/a.txt","/c.txt",overwrite=True)
- self.assert_(not check("foo/bar/a.txt"))
- self.assert_(check("/c.txt"))
-
-
- def test_movedir(self):
- check = self.check
- contents = "If the implementation is hard to explain, it's a bad idea."
- def makefile(path):
- f = self.fs.open(path, "wb")
- f.write(contents)
- f.close()
-
- self.fs.makedir("a")
- self.fs.makedir("b")
- makefile("a/1.txt")
- makefile("a/2.txt")
- makefile("a/3.txt")
- self.fs.makedir("a/foo/bar", recursive=True)
- makefile("a/foo/bar/baz.txt")
-
- self.fs.movedir("a", "copy of a")
-
- self.assert_(check("copy of a/1.txt"))
- self.assert_(check("copy of a/2.txt"))
- self.assert_(check("copy of a/3.txt"))
- self.assert_(check("copy of a/foo/bar/baz.txt"))
-
- self.assert_(not check("a/1.txt"))
- self.assert_(not check("a/2.txt"))
- self.assert_(not check("a/3.txt"))
- self.assert_(not check("a/foo/bar/baz.txt"))
- self.assert_(not check("a/foo/bar"))
- self.assert_(not check("a/foo"))
- self.assert_(not check("a"))
-
- self.fs.makedir("a")
- self.assertRaises(fs.DestinationExistsError,self.fs.movedir,"copy of a","a")
- self.fs.movedir("copy of a","a",overwrite=True)
- self.assert_(not check("copy of a"))
- self.assert_(check("a/1.txt"))
- self.assert_(check("a/2.txt"))
- self.assert_(check("a/3.txt"))
- self.assert_(check("a/foo/bar/baz.txt"))
-
-
- def test_copyfile(self):
- check = self.check
- contents = "If the implementation is hard to explain, it's a bad idea."
- def makefile(path,contents=contents):
- f = self.fs.open(path, "wb")
- f.write(contents)
- f.close()
- def checkcontents(path,contents=contents):
- f = self.fs.open(path, "rb")
- check_contents = f.read()
- f.close()
- self.assertEqual(check_contents,contents)
- return contents == check_contents
-
- self.fs.makedir("foo/bar", recursive=True)
- makefile("foo/bar/a.txt")
- self.assert_(check("foo/bar/a.txt"))
- self.assert_(checkcontents("foo/bar/a.txt"))
- self.fs.copy("foo/bar/a.txt", "foo/b.txt")
- self.assert_(check("foo/bar/a.txt"))
- self.assert_(check("foo/b.txt"))
- self.assert_(checkcontents("foo/b.txt"))
-
- self.fs.copy("foo/b.txt", "c.txt")
- self.assert_(check("foo/b.txt"))
- self.assert_(check("/c.txt"))
- self.assert_(checkcontents("/c.txt"))
-
- makefile("foo/bar/a.txt","different contents")
- self.assertRaises(fs.DestinationExistsError,self.fs.copy,"foo/bar/a.txt","/c.txt")
- self.assert_(checkcontents("/c.txt"))
- self.fs.copy("foo/bar/a.txt","/c.txt",overwrite=True)
- self.assert_(checkcontents("foo/bar/a.txt","different contents"))
- self.assert_(checkcontents("/c.txt","different contents"))
-
-
- def test_copydir(self):
- check = self.check
- contents = "If the implementation is hard to explain, it's a bad idea."
- def makefile(path):
- f = self.fs.open(path, "wb")
- f.write(contents)
- f.close()
-
- self.fs.makedir("a")
- self.fs.makedir("b")
- makefile("a/1.txt")
- makefile("a/2.txt")
- makefile("a/3.txt")
- self.fs.makedir("a/foo/bar", recursive=True)
- makefile("a/foo/bar/baz.txt")
-
- self.fs.copydir("a", "copy of a")
- self.assert_(check("copy of a/1.txt"))
- self.assert_(check("copy of a/2.txt"))
- self.assert_(check("copy of a/3.txt"))
- self.assert_(check("copy of a/foo/bar/baz.txt"))
-
- self.assert_(check("a/1.txt"))
- self.assert_(check("a/2.txt"))
- self.assert_(check("a/3.txt"))
- self.assert_(check("a/foo/bar/baz.txt"))
-
- self.assertRaises(fs.DestinationExistsError,self.fs.copydir,"a","b")
- self.fs.copydir("a","b",overwrite=True)
- self.assert_(check("b/1.txt"))
- self.assert_(check("b/2.txt"))
- self.assert_(check("b/3.txt"))
- self.assert_(check("b/foo/bar/baz.txt"))
-
-
- def test_copydir_with_hidden(self):
- check = self.check
- contents = "If the implementation is hard to explain, it's a bad idea."
- def makefile(path):
- f = self.fs.open(path, "wb")
- f.write(contents)
- f.close()
-
- self.fs.makedir("a")
- makefile("a/1.txt")
- makefile("a/2.txt")
- makefile("a/.hidden.txt")
-
- self.fs.copydir("a", "copy of a")
- self.assert_(check("copy of a/1.txt"))
- self.assert_(check("copy of a/2.txt"))
- self.assert_(check("copy of a/.hidden.txt"))
-
- self.assert_(check("a/1.txt"))
- self.assert_(check("a/2.txt"))
- self.assert_(check("a/.hidden.txt"))
-
- def test_readwriteappendseek(self):
- def checkcontents(path, check_contents):
- f = None
- try:
- f = self.fs.open(path, "rb")
- read_contents = f.read()
- finally:
- if f is not None:
- f.close()
- self.assertEqual(read_contents,check_contents)
- return read_contents == check_contents
- test_strings = ["Beautiful is better than ugly.",
- "Explicit is better than implicit.",
- "Simple is better than complex."]
- all_strings = "".join(test_strings)
-
- self.assertRaises(fs.ResourceNotFoundError, self.fs.open, "a.txt", "r")
- self.assert_(not self.fs.exists("a.txt"))
- f1 = self.fs.open("a.txt", "wb")
- pos = 0
- for s in test_strings:
- f1.write(s)
- pos += len(s)
- self.assertEqual(pos, f1.tell())
- f1.close()
- self.assert_(self.fs.exists("a.txt"))
- self.assert_(checkcontents("a.txt", all_strings))
-
- f2 = self.fs.open("b.txt", "wb")
- f2.write(test_strings[0])
- f2.close()
- self.assert_(checkcontents("b.txt", test_strings[0]))
- f3 = self.fs.open("b.txt", "ab")
- f3.write(test_strings[1])
- f3.write(test_strings[2])
- f3.close()
- self.assert_(checkcontents("b.txt", all_strings))
- f4 = self.fs.open("b.txt", "wb")
- f4.write(test_strings[2])
- f4.close()
- self.assert_(checkcontents("b.txt", test_strings[2]))
- f5 = self.fs.open("c.txt", "wb")
- for s in test_strings:
- f5.write(s+"\n")
- f5.close()
- f6 = self.fs.open("c.txt", "rb")
- for s, t in zip(f6, test_strings):
- self.assertEqual(s, t+"\n")
- f6.close()
- f7 = self.fs.open("c.txt", "rb")
- f7.seek(13)
- word = f7.read(6)
- self.assertEqual(word, "better")
- f7.seek(1, os.SEEK_CUR)
- word = f7.read(4)
- self.assertEqual(word, "than")
- f7.seek(-9, os.SEEK_END)
- word = f7.read(7)
- self.assertEqual(word, "complex")
- f7.close()
- self.assertEqual(self.fs.getcontents("a.txt"), all_strings)
-
-
-
-class TestSubFS(TestOSFS):
-
- def setUp(self):
- self.temp_dir = tempfile.mkdtemp("fstest")
- self.parent_fs = osfs.OSFS(self.temp_dir)
- self.parent_fs.makedir("foo/bar", recursive=True)
- self.fs = self.parent_fs.opendir("foo/bar")
- print "Temp dir is", self.temp_dir
-
- def tearDown(self):
- shutil.rmtree(self.temp_dir)
-
- def check(self, p):
- p = os.path.join("foo/bar", makerelative(p))
- full_p = os.path.join(self.temp_dir, p)
- return os.path.exists(full_p)
-
-
-import memoryfs
-class TestMemoryFS(TestOSFS):
-
- def setUp(self):
- self.fs = memoryfs.MemoryFS()
-
- def tearDown(self):
- pass
-
- def check(self, p):
- return self.fs.exists(p)
-
-
-import mountfs
-class TestMountFS(TestOSFS):
-
- def setUp(self):
- self.mount_fs = mountfs.MountFS()
- self.mem_fs = memoryfs.MemoryFS()
- self.mount_fs.mountdir("mounted/memfs", self.mem_fs)
- self.fs = self.mount_fs.opendir("mounted/memfs")
-
- def tearDown(self):
- pass
-
- def check(self, p):
- return self.mount_fs.exists(os.path.join("mounted/memfs", makerelative(p)))
-
-import tempfs
-class TestTempFS(TestOSFS):
-
- def setUp(self):
- self.fs = tempfs.TempFS()
-
- def tearDown(self):
- td = self.fs._temp_dir
- self.fs.close()
- self.assert_(not os.path.exists(td))
-
- def check(self, p):
- td = self.fs._temp_dir
- return os.path.exists(os.path.join(td, makerelative(p)))
-
-import zipfs
-import random
-import zipfile
-class TestReadZipFS(unittest.TestCase):
-
- def setUp(self):
- self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip"
- self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename)
-
- self.zf = zipfile.ZipFile(self.temp_filename, "w")
- zf = self.zf
- zf.writestr("a.txt", "Hello, World!")
- zf.writestr("b.txt", "b")
- zf.writestr("1.txt", "1")
- zf.writestr("foo/bar/baz.txt", "baz")
- zf.writestr("foo/second.txt", "hai")
- zf.close()
- self.fs = zipfs.ZipFS(self.temp_filename, "r")
-
- def tearDown(self):
- self.fs.close()
- os.remove(self.temp_filename)
-
- def check(self, p):
- try:
- self.zipfile.getinfo(p)
- return True
- except:
- return False
-
- def test_reads(self):
- def read_contents(path):
- f = self.fs.open(path)
- contents = f.read()
- return contents
- def check_contents(path, expected):
- self.assert_(read_contents(path)==expected)
- check_contents("a.txt", "Hello, World!")
- check_contents("1.txt", "1")
- check_contents("foo/bar/baz.txt", "baz")
-
- def test_getcontents(self):
- def read_contents(path):
- return self.fs.getcontents(path)
- def check_contents(path, expected):
- self.assert_(read_contents(path)==expected)
- check_contents("a.txt", "Hello, World!")
- check_contents("1.txt", "1")
- check_contents("foo/bar/baz.txt", "baz")
-
- def test_is(self):
- self.assert_(self.fs.isfile('a.txt'))
- self.assert_(self.fs.isfile('1.txt'))
- self.assert_(self.fs.isfile('foo/bar/baz.txt'))
- self.assert_(self.fs.isdir('foo'))
- self.assert_(self.fs.isdir('foo/bar'))
- self.assert_(self.fs.exists('a.txt'))
- self.assert_(self.fs.exists('1.txt'))
- self.assert_(self.fs.exists('foo/bar/baz.txt'))
- self.assert_(self.fs.exists('foo'))
- self.assert_(self.fs.exists('foo/bar'))
-
- def test_listdir(self):
-
- def check_listing(path, expected):
- dir_list = self.fs.listdir(path)
- self.assert_(sorted(dir_list) == sorted(expected))
- check_listing('/', ['a.txt', '1.txt', 'foo', 'b.txt'])
- check_listing('foo', ['second.txt', 'bar'])
- check_listing('foo/bar', ['baz.txt'])
-
-class TestWriteZipFS(unittest.TestCase):
-
- def setUp(self):
- self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip"
- self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename)
-
- zip_fs = zipfs.ZipFS(self.temp_filename, 'w')
-
- def makefile(filename, contents):
- if dirname(filename):
- zip_fs.makedir(dirname(filename), recursive=True, allow_recreate=True)
- f = zip_fs.open(filename, 'w')
- f.write(contents)
- f.close()
-
- makefile("a.txt", "Hello, World!")
- makefile("b.txt", "b")
- makefile("foo/bar/baz.txt", "baz")
- makefile("foo/second.txt", "hai")
-
- zip_fs.close()
-
- def tearDown(self):
- os.remove(self.temp_filename)
-
- def test_valid(self):
- zf = zipfile.ZipFile(self.temp_filename, "r")
- self.assert_(zf.testzip() is None)
- zf.close()
-
- def test_creation(self):
- zf = zipfile.ZipFile(self.temp_filename, "r")
- def check_contents(filename, contents):
- zcontents = zf.read(filename)
- self.assertEqual(contents, zcontents)
- check_contents("a.txt", "Hello, World!")
- check_contents("b.txt", "b")
- check_contents("foo/bar/baz.txt", "baz")
- check_contents("foo/second.txt", "hai")
-
-
-class TestAppendZipFS(TestWriteZipFS):
-
- def setUp(self):
- self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip"
- self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename)
-
- zip_fs = zipfs.ZipFS(self.temp_filename, 'w')
-
- def makefile(filename, contents):
- if dirname(filename):
- zip_fs.makedir(dirname(filename), recursive=True, allow_recreate=True)
- f = zip_fs.open(filename, 'w')
- f.write(contents)
- f.close()
-
- makefile("a.txt", "Hello, World!")
- makefile("b.txt", "b")
-
- zip_fs.close()
- zip_fs = zipfs.ZipFS(self.temp_filename, 'a')
-
- makefile("foo/bar/baz.txt", "baz")
- makefile("foo/second.txt", "hai")
-
- zip_fs.close()
-
-
-import s3fs
-class TestS3FS(TestOSFS):
-
- bucket = "test-s3fs.rfk.id.au"
-
- def setUp(self):
- self.fs = s3fs.S3FS(self.bucket,"/unittest/files")
- self._clear()
-
- def _clear(self):
- for (path,files) in self.fs.walk(search="depth"):
- for fn in files:
- self.fs.remove(pathjoin(path,fn))
- if path and path != "/":
- self.fs.removedir(path)
-
- def tearDown(self):
- self._clear()
- for k in self.fs._s3bukt.list():
- self.fs._s3bukt.delete_key(k)
- self.fs._s3conn.delete_bucket(self.bucket)
-
- def check(self, p):
- return self.fs.exists(p)
-
- def test_with_statement(self):
- import sys
- if sys.version_info[0] >= 2 and sys.version_info[1] >= 5:
- # A successful 'with' statement
- contents = "testing the with statement"
- code = "from __future__ import with_statement\n"
- code += "with self.fs.open('f.txt','w-') as testfile:\n"
- code += " testfile.write(contents)\n"
- code += "self.assertEquals(self.fs.getcontents('f.txt'),contents)"
- code = compile(code,"<string>",'exec')
- eval(code)
- # A 'with' statement raising an error
- contents = "testing the with statement"
- code = "from __future__ import with_statement\n"
- code += "with self.fs.open('f.txt','w-') as testfile:\n"
- code += " testfile.write(contents)\n"
- code += " raise ValueError\n"
- code = compile(code,"<string>",'exec')
- self.assertRaises(ValueError,eval,code,globals(),locals())
- self.assertEquals(self.fs.getcontents('f.txt'),contents)
-
-
-
-import rpcfs
-import socket
-import threading
-import time
-class TestRPCFS(TestOSFS):
-
- def setUp(self):
- self.port = 8000
- self.server = None
- while not self.server:
- try:
- self.server = rpcfs.RPCFSServer(tempfs.TempFS(),("localhost",self.port),logRequests=False)
- except socket.error, e:
- if e.args[1] == "Address already in use":
- self.port += 1
- else:
- raise e
- self.server_thread = threading.Thread(target=self._run_server)
- self.server_thread.start()
- self.fs = rpcfs.RPCFS("http://localhost:" + str(self.port))
-
- def _run_server(self):
- """Run the server, swallowing shutdown-related execptions."""
- try:
- self.server.serve_forever()
- except:
- pass
-
- def tearDown(self):
- try:
- # Shut the server down. We send one final request to
- # bump the socket and make it recognise the shutdown.
- self.server.serve_more_requests = False
- self.server.server_close()
- self.fs.exists("/")
- except Exception:
- pass
-
- def check(self, p):
- return self.fs.exists(p)
-
-
-if __name__ == "__main__":
- #t = TestFS()
- #t.setUp()
- #t.tearDown()
- import nose
- nose.main()
diff --git a/fs/tests/__init__.py b/fs/tests/__init__.py
new file mode 100644
index 0000000..61cd2c2
--- /dev/null
+++ b/fs/tests/__init__.py
@@ -0,0 +1,448 @@
+#!/usr/bin/env python
+"""
+
+ fs.tests: testcases for the fs module
+
+"""
+
+# Send any output from the logging module to stdout, so it will
+# be captured by nose and reported appropriately
+import sys
+import logging
+logging.basicConfig(level=logging.ERROR,stream=sys.stdout)
+
+from fs.base import *
+
+import os, os.path
+import pickle
+
+
+class FSTestCases:
+ """Base suite of testcases for filesystem implementations.
+
+ Any FS subclass should be capable of passing all of these tests.
+ To apply the tests to your own FS implementation, simply use FSTestCase
+ as a mixin for your own unittest.TestCase subclass and have the setUp
+ method set self.fs to an instance of your FS implementation.
+
+ This class is designed as a mixin so that it's not detected by test
+ loading tools such as nose.
+ """
+
+ def check(self, p):
+ """Check that a file exists within self.fs"""
+ return self.fs.exists(p)
+
+ def test_root_dir(self):
+ self.assertTrue(self.fs.isdir(""))
+ self.assertTrue(self.fs.isdir("/"))
+
+ def test_debug(self):
+ str(self.fs)
+ repr(self.fs)
+ self.assert_(hasattr(self.fs, 'desc'))
+
+ def test_writefile(self):
+ self.assertRaises(ResourceNotFoundError,self.fs.open,"test1.txt")
+ f = self.fs.open("test1.txt","w")
+ f.write("testing")
+ f.close()
+ self.check("test1.txt")
+ f = self.fs.open("test1.txt","r")
+ self.assertEquals(f.read(),"testing")
+ f.close()
+ f = self.fs.open("test1.txt","w")
+ f.write("test file overwrite")
+ f.close()
+ self.check("test1.txt")
+ f = self.fs.open("test1.txt","r")
+ self.assertEquals(f.read(),"test file overwrite")
+
+ def test_isdir_isfile(self):
+ self.assertFalse(self.fs.exists("dir1"))
+ self.assertFalse(self.fs.isdir("dir1"))
+ self.assertFalse(self.fs.isfile("a.txt"))
+ self.fs.createfile("a.txt")
+ self.assertFalse(self.fs.isdir("dir1"))
+ self.assertTrue(self.fs.exists("a.txt"))
+ self.assertTrue(self.fs.isfile("a.txt"))
+ self.fs.makedir("dir1")
+ self.assertTrue(self.fs.isdir("dir1"))
+ self.assertTrue(self.fs.exists("dir1"))
+ self.assertTrue(self.fs.exists("a.txt"))
+ self.fs.remove("a.txt")
+ self.assertFalse(self.fs.exists("a.txt"))
+
+ def test_listdir(self):
+ self.fs.createfile("a")
+ self.fs.createfile("b")
+ self.fs.createfile("foo")
+ self.fs.createfile("bar")
+ # Test listing of the root directory
+ d1 = self.fs.listdir()
+ self.assertEqual(len(d1), 4)
+ self.assertEqual(sorted(d1), ["a", "b", "bar", "foo"])
+ d1 = self.fs.listdir("")
+ self.assertEqual(len(d1), 4)
+ self.assertEqual(sorted(d1), ["a", "b", "bar", "foo"])
+ d1 = self.fs.listdir("/")
+ self.assertEqual(len(d1), 4)
+ # Test listing absolute paths
+ d2 = self.fs.listdir(absolute=True)
+ self.assertEqual(len(d2), 4)
+ self.assertEqual(sorted(d2), ["/a", "/b", "/bar", "/foo"])
+ # Create some deeper subdirectories, to make sure their
+ # contents are not inadvertantly included
+ self.fs.makedir("p/1/2/3",recursive=True)
+ self.fs.createfile("p/1/2/3/a")
+ self.fs.createfile("p/1/2/3/b")
+ self.fs.createfile("p/1/2/3/foo")
+ self.fs.createfile("p/1/2/3/bar")
+ self.fs.makedir("q")
+ # Test listing just files, just dirs, and wildcards
+ dirs_only = self.fs.listdir(dirs_only=True)
+ files_only = self.fs.listdir(files_only=True)
+ contains_a = self.fs.listdir(wildcard="*a*")
+ self.assertEqual(sorted(dirs_only), ["p", "q"])
+ self.assertEqual(sorted(files_only), ["a", "b", "bar", "foo"])
+ self.assertEqual(sorted(contains_a), ["a", "bar"])
+ # Test listing a subdirectory
+ d3 = self.fs.listdir("p/1/2/3")
+ self.assertEqual(len(d3), 4)
+ self.assertEqual(sorted(d3), ["a", "b", "bar", "foo"])
+ # Test listing a subdirectory with absoliute and full paths
+ d4 = self.fs.listdir("p/1/2/3", absolute=True)
+ self.assertEqual(len(d4), 4)
+ self.assertEqual(sorted(d4), ["/p/1/2/3/a", "/p/1/2/3/b", "/p/1/2/3/bar", "/p/1/2/3/foo"])
+ d4 = self.fs.listdir("p/1/2/3", full=True)
+ self.assertEqual(len(d4), 4)
+ self.assertEqual(sorted(d4), ["p/1/2/3/a", "p/1/2/3/b", "p/1/2/3/bar", "p/1/2/3/foo"])
+ # Test that appropriate errors are raised
+ self.assertRaises(ResourceNotFoundError,self.fs.listdir,"zebra")
+ self.assertRaises(ResourceInvalidError,self.fs.listdir,"foo")
+
+ def test_makedir(self):
+ check = self.check
+ self.fs.makedir("a")
+ self.assertTrue(check("a"))
+ self.assertRaises(ParentDirectoryMissingError,self.fs.makedir,"a/b/c")
+ self.fs.makedir("a/b/c", recursive=True)
+ self.assert_(check("a/b/c"))
+ self.fs.makedir("foo/bar/baz", recursive=True)
+ self.assert_(check("foo/bar/baz"))
+ self.fs.makedir("a/b/child")
+ self.assert_(check("a/b/child"))
+ self.assertRaises(DestinationExistsError,self.fs.makedir,"/a/b")
+ self.fs.makedir("/a/b",allow_recreate=True)
+ self.fs.createfile("/a/file")
+ self.assertRaises(ResourceInvalidError,self.fs.makedir,"a/file")
+
+ def test_remove(self):
+ self.fs.createfile("a.txt")
+ self.assertTrue(self.check("a.txt"))
+ self.fs.remove("a.txt")
+ self.assertFalse(self.check("a.txt"))
+ self.assertRaises(ResourceNotFoundError,self.fs.remove,"a.txt")
+ self.fs.makedir("dir1")
+ self.assertRaises(ResourceInvalidError,self.fs.remove,"dir1")
+ self.fs.createfile("/dir1/a.txt")
+ self.assertTrue(self.check("dir1/a.txt"))
+ self.fs.remove("dir1/a.txt")
+ self.assertFalse(self.check("/dir1/a.txt"))
+
+ def test_removedir(self):
+ check = self.check
+ self.fs.makedir("a")
+ self.assert_(check("a"))
+ self.fs.removedir("a")
+ self.assert_(not check("a"))
+ self.fs.makedir("a/b/c/d", recursive=True)
+ self.assertRaises(DirectoryNotEmptyError, self.fs.removedir, "a/b")
+ self.fs.removedir("a/b/c/d")
+ self.assert_(not check("a/b/c/d"))
+ self.fs.removedir("a/b/c")
+ self.assert_(not check("a/b/c"))
+ self.fs.removedir("a/b")
+ self.assert_(not check("a/b"))
+ # Test recursive removal of empty parent dirs
+ self.fs.makedir("foo/bar/baz", recursive=True)
+ self.fs.removedir("foo/bar/baz", recursive=True)
+ self.assert_(not check("foo/bar/baz"))
+ self.assert_(not check("foo/bar"))
+ self.assert_(not check("foo"))
+ # Ensure that force=True works as expected
+ self.fs.makedir("frollic/waggle", recursive=True)
+ self.fs.createfile("frollic/waddle.txt","waddlewaddlewaddle")
+ self.assertRaises(DirectoryNotEmptyError,self.fs.removedir,"frollic")
+ self.assertRaises(ResourceInvalidError,self.fs.removedir,"frollic/waddle.txt")
+ self.fs.removedir("frollic",force=True)
+ self.assert_(not check("frollic"))
+
+ def test_rename(self):
+ check = self.check
+ self.fs.createfile("foo.txt","Hello, World!")
+ self.assert_(check("foo.txt"))
+ self.fs.rename("foo.txt", "bar.txt")
+ self.assert_(check("bar.txt"))
+ self.assert_(not check("foo.txt"))
+
+ def test_info(self):
+ test_str = "Hello, World!"
+ self.fs.createfile("info.txt",test_str)
+ info = self.fs.getinfo("info.txt")
+ self.assertEqual(info['size'], len(test_str))
+ self.fs.desc("info.txt")
+
+ def test_getsize(self):
+ test_str = "*"*23
+ self.fs.createfile("info.txt",test_str)
+ size = self.fs.getsize("info.txt")
+ self.assertEqual(size, len(test_str))
+
+ def test_movefile(self):
+ check = self.check
+ contents = "If the implementation is hard to explain, it's a bad idea."
+ def makefile(path):
+ self.fs.createfile(path,contents)
+ def checkcontents(path):
+ check_contents = self.fs.getcontents(path)
+ self.assertEqual(check_contents,contents)
+ return contents == check_contents
+
+ self.fs.makedir("foo/bar", recursive=True)
+ makefile("foo/bar/a.txt")
+ self.assert_(check("foo/bar/a.txt"))
+ self.assert_(checkcontents("foo/bar/a.txt"))
+ self.fs.move("foo/bar/a.txt", "foo/b.txt")
+ self.assert_(not check("foo/bar/a.txt"))
+ self.assert_(check("foo/b.txt"))
+ self.assert_(checkcontents("foo/b.txt"))
+
+ self.fs.move("foo/b.txt", "c.txt")
+ self.assert_(not check("foo/b.txt"))
+ self.assert_(check("/c.txt"))
+ self.assert_(checkcontents("/c.txt"))
+
+ makefile("foo/bar/a.txt")
+ self.assertRaises(DestinationExistsError,self.fs.move,"foo/bar/a.txt","/c.txt")
+ self.assert_(check("foo/bar/a.txt"))
+ self.assert_(check("/c.txt"))
+ self.fs.move("foo/bar/a.txt","/c.txt",overwrite=True)
+ self.assert_(not check("foo/bar/a.txt"))
+ self.assert_(check("/c.txt"))
+
+
+ def test_movedir(self):
+ check = self.check
+ contents = "If the implementation is hard to explain, it's a bad idea."
+ def makefile(path):
+ self.fs.createfile(path,contents)
+
+ self.fs.makedir("a")
+ self.fs.makedir("b")
+ makefile("a/1.txt")
+ makefile("a/2.txt")
+ makefile("a/3.txt")
+ self.fs.makedir("a/foo/bar", recursive=True)
+ makefile("a/foo/bar/baz.txt")
+
+ self.fs.movedir("a", "copy of a")
+
+ self.assert_(check("copy of a/1.txt"))
+ self.assert_(check("copy of a/2.txt"))
+ self.assert_(check("copy of a/3.txt"))
+ self.assert_(check("copy of a/foo/bar/baz.txt"))
+
+ self.assert_(not check("a/1.txt"))
+ self.assert_(not check("a/2.txt"))
+ self.assert_(not check("a/3.txt"))
+ self.assert_(not check("a/foo/bar/baz.txt"))
+ self.assert_(not check("a/foo/bar"))
+ self.assert_(not check("a/foo"))
+ self.assert_(not check("a"))
+
+ self.fs.makedir("a")
+ self.assertRaises(DestinationExistsError,self.fs.movedir,"copy of a","a")
+ self.fs.movedir("copy of a","a",overwrite=True)
+ self.assert_(not check("copy of a"))
+ self.assert_(check("a/1.txt"))
+ self.assert_(check("a/2.txt"))
+ self.assert_(check("a/3.txt"))
+ self.assert_(check("a/foo/bar/baz.txt"))
+
+
+ def test_copyfile(self):
+ check = self.check
+ contents = "If the implementation is hard to explain, it's a bad idea."
+ def makefile(path,contents=contents):
+ self.fs.createfile(path,contents)
+ def checkcontents(path,contents=contents):
+ check_contents = self.fs.getcontents(path)
+ self.assertEqual(check_contents,contents)
+ return contents == check_contents
+
+ self.fs.makedir("foo/bar", recursive=True)
+ makefile("foo/bar/a.txt")
+ self.assert_(check("foo/bar/a.txt"))
+ self.assert_(checkcontents("foo/bar/a.txt"))
+ self.fs.copy("foo/bar/a.txt", "foo/b.txt")
+ self.assert_(check("foo/bar/a.txt"))
+ self.assert_(check("foo/b.txt"))
+ self.assert_(checkcontents("foo/b.txt"))
+
+ self.fs.copy("foo/b.txt", "c.txt")
+ self.assert_(check("foo/b.txt"))
+ self.assert_(check("/c.txt"))
+ self.assert_(checkcontents("/c.txt"))
+
+ makefile("foo/bar/a.txt","different contents")
+ self.assertRaises(DestinationExistsError,self.fs.copy,"foo/bar/a.txt","/c.txt")
+ self.assert_(checkcontents("/c.txt"))
+ self.fs.copy("foo/bar/a.txt","/c.txt",overwrite=True)
+ self.assert_(checkcontents("foo/bar/a.txt","different contents"))
+ self.assert_(checkcontents("/c.txt","different contents"))
+
+
+ def test_copydir(self):
+ check = self.check
+ contents = "If the implementation is hard to explain, it's a bad idea."
+ def makefile(path):
+ self.fs.createfile(path,contents)
+ def checkcontents(path):
+ check_contents = self.fs.getcontents(path)
+ self.assertEqual(check_contents,contents)
+ return contents == check_contents
+
+ self.fs.makedir("a")
+ self.fs.makedir("b")
+ makefile("a/1.txt")
+ makefile("a/2.txt")
+ makefile("a/3.txt")
+ self.fs.makedir("a/foo/bar", recursive=True)
+ makefile("a/foo/bar/baz.txt")
+
+ self.fs.copydir("a", "copy of a")
+ self.assert_(check("copy of a/1.txt"))
+ self.assert_(check("copy of a/2.txt"))
+ self.assert_(check("copy of a/3.txt"))
+ self.assert_(check("copy of a/foo/bar/baz.txt"))
+ checkcontents("copy of a/1.txt")
+
+ self.assert_(check("a/1.txt"))
+ self.assert_(check("a/2.txt"))
+ self.assert_(check("a/3.txt"))
+ self.assert_(check("a/foo/bar/baz.txt"))
+ checkcontents("a/1.txt")
+
+ self.assertRaises(DestinationExistsError,self.fs.copydir,"a","b")
+ self.fs.copydir("a","b",overwrite=True)
+ self.assert_(check("b/1.txt"))
+ self.assert_(check("b/2.txt"))
+ self.assert_(check("b/3.txt"))
+ self.assert_(check("b/foo/bar/baz.txt"))
+ checkcontents("b/1.txt")
+
+ def test_copydir_with_dotfile(self):
+ check = self.check
+ contents = "If the implementation is hard to explain, it's a bad idea."
+ def makefile(path):
+ self.fs.createfile(path,contents)
+
+ self.fs.makedir("a")
+ makefile("a/1.txt")
+ makefile("a/2.txt")
+ makefile("a/.hidden.txt")
+
+ self.fs.copydir("a", "copy of a")
+ self.assert_(check("copy of a/1.txt"))
+ self.assert_(check("copy of a/2.txt"))
+ self.assert_(check("copy of a/.hidden.txt"))
+
+ self.assert_(check("a/1.txt"))
+ self.assert_(check("a/2.txt"))
+ self.assert_(check("a/.hidden.txt"))
+
+ def test_readwriteappendseek(self):
+ def checkcontents(path, check_contents):
+ read_contents = self.fs.getcontents(path)
+ self.assertEqual(read_contents,check_contents)
+ return read_contents == check_contents
+ test_strings = ["Beautiful is better than ugly.",
+ "Explicit is better than implicit.",
+ "Simple is better than complex."]
+ all_strings = "".join(test_strings)
+
+ self.assertRaises(ResourceNotFoundError, self.fs.open, "a.txt", "r")
+ self.assert_(not self.fs.exists("a.txt"))
+ f1 = self.fs.open("a.txt", "wb")
+ pos = 0
+ for s in test_strings:
+ f1.write(s)
+ pos += len(s)
+ self.assertEqual(pos, f1.tell())
+ f1.close()
+ self.assert_(self.fs.exists("a.txt"))
+ self.assert_(checkcontents("a.txt", all_strings))
+
+ f2 = self.fs.open("b.txt", "wb")
+ f2.write(test_strings[0])
+ f2.close()
+ self.assert_(checkcontents("b.txt", test_strings[0]))
+ f3 = self.fs.open("b.txt", "ab")
+ f3.write(test_strings[1])
+ f3.write(test_strings[2])
+ f3.close()
+ self.assert_(checkcontents("b.txt", all_strings))
+ f4 = self.fs.open("b.txt", "wb")
+ f4.write(test_strings[2])
+ f4.close()
+ self.assert_(checkcontents("b.txt", test_strings[2]))
+ f5 = self.fs.open("c.txt", "wb")
+ for s in test_strings:
+ f5.write(s+"\n")
+ f5.close()
+ f6 = self.fs.open("c.txt", "rb")
+ for s, t in zip(f6, test_strings):
+ self.assertEqual(s, t+"\n")
+ f6.close()
+ f7 = self.fs.open("c.txt", "rb")
+ f7.seek(13)
+ word = f7.read(6)
+ self.assertEqual(word, "better")
+ f7.seek(1, os.SEEK_CUR)
+ word = f7.read(4)
+ self.assertEqual(word, "than")
+ f7.seek(-9, os.SEEK_END)
+ word = f7.read(7)
+ self.assertEqual(word, "complex")
+ f7.close()
+ self.assertEqual(self.fs.getcontents("a.txt"), all_strings)
+
+ def test_with_statement(self):
+ # This is a little tricky since 'with' is actually new syntax.
+ # We use eval() to make this method safe for old python versions.
+ import sys
+ if sys.version_info[0] >= 2 and sys.version_info[1] >= 5:
+ # A successful 'with' statement
+ contents = "testing the with statement"
+ code = "from __future__ import with_statement\n"
+ code += "with self.fs.open('f.txt','w-') as testfile:\n"
+ code += " testfile.write(contents)\n"
+ code += "self.assertEquals(self.fs.getcontents('f.txt'),contents)"
+ code = compile(code,"<string>",'exec')
+ eval(code)
+ # A 'with' statement raising an error
+ contents = "testing the with statement"
+ code = "from __future__ import with_statement\n"
+ code += "with self.fs.open('f.txt','w-') as testfile:\n"
+ code += " testfile.write(contents)\n"
+ code += " raise ValueError\n"
+ code = compile(code,"<string>",'exec')
+ self.assertRaises(ValueError,eval,code,globals(),locals())
+ self.assertEquals(self.fs.getcontents('f.txt'),contents)
+
+ def test_pickling(self):
+ self.fs.createfile("test1","hello world")
+ fs2 = pickle.loads(pickle.dumps(self.fs))
+ self.assert_(fs2.isfile("test1"))
+
diff --git a/fs/tests/test_expose.py b/fs/tests/test_expose.py
new file mode 100644
index 0000000..4fefc6b
--- /dev/null
+++ b/fs/tests/test_expose.py
@@ -0,0 +1,119 @@
+"""
+
+ fs.tests.test_expose: testcases for fs.expose and associated FS classes
+
+"""
+
+import unittest
+import sys
+import os, os.path
+import socket
+import threading
+import time
+
+from fs.tests import FSTestCases
+from fs.tempfs import TempFS
+from fs.osfs import OSFS
+from fs.path import *
+
+from fs import rpcfs
+from fs.expose.xmlrpc import RPCFSServer
+class TestRPCFS(unittest.TestCase,FSTestCases):
+
+ def makeServer(self,fs,addr):
+ return RPCFSServer(fs,addr,logRequests=False)
+
+ def startServer(self):
+ port = 8000
+ self.temp_fs = TempFS()
+ self.server = None
+ while not self.server:
+ try:
+ self.server = self.makeServer(self.temp_fs,("localhost",port))
+ except socket.error, e:
+ if e.args[1] == "Address already in use":
+ port += 1
+ else:
+ raise
+ self.server_addr = ("localhost",port)
+ self.serve_more_requests = True
+ self.server_thread = threading.Thread(target=self.runServer)
+ self.server_thread.start()
+
+ def runServer(self):
+ """Run the server, swallowing shutdown-related execptions."""
+ self.server.socket.settimeout(0.1)
+ try:
+ while self.serve_more_requests:
+ self.server.handle_request()
+ except Exception, e:
+ pass
+
+ def setUp(self):
+ self.startServer()
+ self.fs = rpcfs.RPCFS("http://%s:%d" % self.server_addr)
+
+ def tearDown(self):
+ self.serve_more_requests = False
+ try:
+ self.bump()
+ self.server.server_close()
+ except Exception:
+ pass
+ self.server_thread.join()
+ self.temp_fs.close()
+
+ def bump(self):
+ host, port = self.server_addr
+ for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
+ af, socktype, proto, cn, sa = res
+ sock = None
+ try:
+ sock = socket.socket(af, socktype, proto)
+ sock.settimeout(1)
+ sock.connect(sa)
+ sock.send("\n")
+ except socket.error, e:
+ pass
+ finally:
+ if sock is not None:
+ sock.close()
+
+
+from fs import sftpfs
+from fs.expose.sftp import BaseSFTPServer
+class TestSFTPFS(TestRPCFS):
+
+ def makeServer(self,fs,addr):
+ return BaseSFTPServer(addr,fs)
+
+ def setUp(self):
+ self.startServer()
+ self.fs = sftpfs.SFTPFS(self.server_addr)
+
+ def bump(self):
+ # paramiko doesn't like being bumped, just wait for it to timeout.
+ # TODO: do this using a paramiko.Transport() connection
+ pass
+
+
+from fs.expose import fuse
+from fs.osfs import OSFS
+class TestFUSE(unittest.TestCase,FSTestCases):
+
+ def setUp(self):
+ self.temp_fs = TempFS()
+ self.temp_fs.makedir("root")
+ self.temp_fs.makedir("mount")
+ self.mounted_fs = self.temp_fs.opendir("root")
+ self.mount_point = self.temp_fs.getsyspath("mount")
+ self.fs = OSFS(self.temp_fs.getsyspath("mount"))
+ self.mount_proc = fuse.mount(self.mounted_fs,self.mount_point)
+
+ def tearDown(self):
+ self.mount_proc.unmount()
+ self.temp_fs.close()
+
+ def check(self,p):
+ return self.mounted_fs.exists(p)
+
diff --git a/fs/tests/test_fs.py b/fs/tests/test_fs.py
new file mode 100644
index 0000000..49c099f
--- /dev/null
+++ b/fs/tests/test_fs.py
@@ -0,0 +1,88 @@
+"""
+
+ fs.tests.test_fs: testcases for basic FS implementations
+
+"""
+
+from fs.tests import FSTestCases
+
+import unittest
+
+import os
+import shutil
+import tempfile
+
+from fs.path import *
+
+
+from fs import osfs
+class TestOSFS(unittest.TestCase,FSTestCases):
+
+ def setUp(self):
+ self.temp_dir = tempfile.mkdtemp("fstest")
+ self.fs = osfs.OSFS(self.temp_dir)
+
+ def tearDown(self):
+ shutil.rmtree(self.temp_dir)
+
+ def check(self, p):
+ return os.path.exists(os.path.join(self.temp_dir, relpath(p)))
+
+
+
+class TestSubFS(unittest.TestCase,FSTestCases):
+
+ def setUp(self):
+ self.temp_dir = tempfile.mkdtemp("fstest")
+ self.parent_fs = osfs.OSFS(self.temp_dir)
+ self.parent_fs.makedir("foo/bar", recursive=True)
+ self.fs = self.parent_fs.opendir("foo/bar")
+
+ def tearDown(self):
+ shutil.rmtree(self.temp_dir)
+
+ def check(self, p):
+ p = os.path.join("foo/bar", relpath(p))
+ full_p = os.path.join(self.temp_dir, p)
+ return os.path.exists(full_p)
+
+
+from fs import memoryfs
+class TestMemoryFS(unittest.TestCase,FSTestCases):
+
+ def setUp(self):
+ self.fs = memoryfs.MemoryFS()
+
+
+from fs import mountfs
+class TestMountFS(unittest.TestCase,FSTestCases):
+
+ def setUp(self):
+ self.mount_fs = mountfs.MountFS()
+ self.mem_fs = memoryfs.MemoryFS()
+ self.mount_fs.mountdir("mounted/memfs", self.mem_fs)
+ self.fs = self.mount_fs.opendir("mounted/memfs")
+
+ def tearDown(self):
+ pass
+
+ def check(self, p):
+ return self.mount_fs.exists(os.path.join("mounted/memfs", relpath(p)))
+
+
+from fs import tempfs
+class TestTempFS(unittest.TestCase,FSTestCases):
+
+ def setUp(self):
+ self.fs = tempfs.TempFS()
+
+ def tearDown(self):
+ td = self.fs._temp_dir
+ self.fs.close()
+ self.assert_(not os.path.exists(td))
+
+ def check(self, p):
+ td = self.fs._temp_dir
+ return os.path.exists(os.path.join(td, relpath(p)))
+
+
diff --git a/fs/tests/test_objecttree.py b/fs/tests/test_objecttree.py
new file mode 100644
index 0000000..b8d9a12
--- /dev/null
+++ b/fs/tests/test_objecttree.py
@@ -0,0 +1,47 @@
+"""
+
+ fs.tests.test_objectree: testcases for the fs objecttree module
+
+"""
+
+
+import unittest
+
+import fs.tests
+from fs import objecttree
+
+class TestObjectTree(unittest.TestCase):
+ """Testcases for the ObjectTree class."""
+
+ def test_getset(self):
+ ot = objecttree.ObjectTree()
+ ot['foo'] = "bar"
+ self.assertEqual(ot['foo'], 'bar')
+
+ ot = objecttree.ObjectTree()
+ ot['foo/bar'] = "baz"
+ self.assertEqual(ot['foo'], {'bar':'baz'})
+ self.assertEqual(ot['foo/bar'], 'baz')
+
+ del ot['foo/bar']
+ self.assertEqual(ot['foo'], {})
+
+ ot = objecttree.ObjectTree()
+ ot['a/b/c'] = "A"
+ ot['a/b/d'] = "B"
+ ot['a/b/e'] = "C"
+ ot['a/b/f'] = "D"
+ self.assertEqual(sorted(ot['a/b'].values()), ['A', 'B', 'C', 'D'])
+ self.assert_(ot.get('a/b/x', -1) == -1)
+
+ self.assert_('a/b/c' in ot)
+ self.assert_('a/b/x' not in ot)
+ self.assert_(ot.isobject('a/b/c'))
+ self.assert_(ot.isobject('a/b/d'))
+ self.assert_(not ot.isobject('a/b'))
+
+ left, object, right = ot.partialget('a/b/e/f/g')
+ self.assertEqual(left, "a/b/e")
+ self.assertEqual(object, "C")
+ self.assertEqual(right, "f/g")
+
diff --git a/fs/tests/test_path.py b/fs/tests/test_path.py
new file mode 100644
index 0000000..8152eaa
--- /dev/null
+++ b/fs/tests/test_path.py
@@ -0,0 +1,96 @@
+"""
+
+ fs.tests.test_path: testcases for the fs path functions
+
+"""
+
+
+import unittest
+import fs.tests
+
+from fs.path import *
+
+class TestPathFunctions(unittest.TestCase):
+ """Testcases for FS path functions."""
+
+ def test_normpath(self):
+ tests = [ ("\\a\\b\\c", "/a/b/c"),
+ ("", ""),
+ ("/a/b/c", "/a/b/c"),
+ ("a/b/c", "a/b/c"),
+ ("a/b/../c/", "a/c"),
+ ("/","/"),
+ ]
+ for path, result in tests:
+ self.assertEqual(normpath(path), result)
+
+ def test_pathjoin(self):
+ tests = [ ("", "a", "a"),
+ ("a", "a", "a/a"),
+ ("a/b", "../c", "a/c"),
+ ("a/b/../c", "d", "a/c/d"),
+ ("/a/b/c", "d", "/a/b/c/d"),
+ ("/a/b/c", "../../../d", "/d"),
+ ("a", "b", "c", "a/b/c"),
+ ("a/b/c", "../d", "c", "a/b/d/c"),
+ ("a/b/c", "../d", "/a", "/a"),
+ ("aaa", "bbb/ccc", "aaa/bbb/ccc"),
+ ("aaa", "bbb\ccc", "aaa/bbb/ccc"),
+ ("aaa", "bbb", "ccc", "/aaa", "eee", "/aaa/eee"),
+ ("a/b", "./d", "e", "a/b/d/e"),
+ ("/", "/", "/"),
+ ("/", "", "/"),
+ ]
+ for testpaths in tests:
+ paths = testpaths[:-1]
+ result = testpaths[-1]
+ self.assertEqual(fs.pathjoin(*paths), result)
+
+ self.assertRaises(ValueError, fs.pathjoin, "../")
+ self.assertRaises(ValueError, fs.pathjoin, "./../")
+ self.assertRaises(ValueError, fs.pathjoin, "a/b", "../../..")
+ self.assertRaises(ValueError, fs.pathjoin, "a/b/../../../d")
+
+ def test_relpath(self):
+ tests = [ ("/a/b", "a/b"),
+ ("a/b", "a/b"),
+ ("/", "") ]
+
+ for path, result in tests:
+ self.assertEqual(fs.relpath(path), result)
+
+ def test_abspath(self):
+ tests = [ ("/a/b", "/a/b"),
+ ("a/b", "/a/b"),
+ ("/", "/") ]
+
+ for path, result in tests:
+ self.assertEqual(fs.abspath(path), result)
+
+ def test_iteratepath(self):
+ tests = [ ("a/b", ["a", "b"]),
+ ("", [] ),
+ ("aaa/bbb/ccc", ["aaa", "bbb", "ccc"]),
+ ("a/b/c/../d", ["a", "b", "d"]) ]
+
+ for path, results in tests:
+ for path_component, expected in zip(iteratepath(path), results):
+ self.assertEqual(path_component, expected)
+
+ self.assertEqual(list(iteratepath("a/b/c/d", 1)), ["a", "b/c/d"])
+ self.assertEqual(list(iteratepath("a/b/c/d", 2)), ["a", "b", "c/d"])
+
+ def test_pathsplit(self):
+ tests = [ ("a/b", ("a", "b")),
+ ("a/b/c", ("a/b", "c")),
+ ("a", ("", "a")),
+ ("", ("", "")),
+ ("/", ("", "")),
+ ("foo/bar", ("foo", "bar")),
+ ("foo/bar/baz", ("foo/bar", "baz")),
+ ]
+ for path, result in tests:
+ self.assertEqual(fs.pathsplit(path), result)
+
+
+
diff --git a/fs/tests/test_s3fs.py b/fs/tests/test_s3fs.py
new file mode 100644
index 0000000..74a4e9a
--- /dev/null
+++ b/fs/tests/test_s3fs.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+"""
+
+ fs.tests.test_s3fs: testcases for the S3FS module
+
+These tests are set up to be skipped by default, since they're very slow,
+require a valid AWS account, and cost money. You'll have to set the '__test__'
+attribute the True on te TestS3FS class to get them running.
+
+"""
+
+import unittest
+
+from fs.tests import FSTestCases
+from fs.path import *
+
+from fs import s3fs
+class TestS3FS(unittest.TestCase,FSTestCases):
+
+ # Disable the tests by default
+ __test__ = False
+
+ bucket = "test-s3fs.rfk.id.au"
+
+ def setUp(self):
+ self.fs = s3fs.S3FS(self.bucket)
+ self._clear()
+
+ def _clear(self):
+ for (path,files) in self.fs.walk(search="depth"):
+ for fn in files:
+ self.fs.remove(pathjoin(path,fn))
+ if path and path != "/":
+ self.fs.removedir(path)
+
+ def tearDown(self):
+ self._clear()
+ for k in self.fs._s3bukt.list():
+ self.fs._s3bukt.delete_key(k)
+ self.fs._s3conn.delete_bucket(self.bucket)
+
+
+
+class TestS3FS_prefix(TestS3FS):
+
+ def setUp(self):
+ self.fs = s3fs.S3FS(self.bucket,"/unittest/files")
+ self._clear()
+
diff --git a/fs/tests/test_xattr.py b/fs/tests/test_xattr.py
new file mode 100644
index 0000000..5eaa253
--- /dev/null
+++ b/fs/tests/test_xattr.py
@@ -0,0 +1,116 @@
+"""
+
+ fs.tests.test_xattr: testcases for extended attribute support
+
+"""
+
+import unittest
+import os
+
+from fs.path import *
+from fs.errors import *
+from fs.tests import FSTestCases
+
+
+class XAttrTestCases:
+ """Testcases for filesystems providing extended attribute support.
+
+ This class should be used as a mixin to the unittest.TestCase class
+ for filesystems that provide extended attribute support.
+ """
+
+ def test_getsetdel(self):
+ def do_getsetdel(p):
+ self.assertEqual(self.fs.getxattr(p,"xattr1"),None)
+ self.fs.setxattr(p,"xattr1","value1")
+ self.assertEqual(self.fs.getxattr(p,"xattr1"),"value1")
+ self.fs.delxattr(p,"xattr1")
+ self.assertEqual(self.fs.getxattr(p,"xattr1"),None)
+ self.fs.createfile("test.txt","hello")
+ do_getsetdel("test.txt")
+ self.assertRaises(ResourceNotFoundError,self.fs.getxattr,"test2.txt","xattr1")
+ self.fs.makedir("mystuff")
+ self.fs.createfile("/mystuff/test.txt","")
+ do_getsetdel("mystuff")
+ do_getsetdel("mystuff/test.txt")
+
+ def test_list_xattrs(self):
+ def do_list(p):
+ self.assertEquals(sorted(self.fs.listxattrs(p)),[])
+ self.fs.setxattr(p,"xattr1","value1")
+ self.assertEquals(sorted(self.fs.listxattrs(p)),["xattr1"])
+ self.fs.setxattr(p,"attr2","value2")
+ self.assertEquals(sorted(self.fs.listxattrs(p)),["attr2","xattr1"])
+ self.fs.delxattr(p,"xattr1")
+ self.assertEquals(sorted(self.fs.listxattrs(p)),["attr2"])
+ self.fs.delxattr(p,"attr2")
+ self.assertEquals(sorted(self.fs.listxattrs(p)),[])
+ self.fs.createfile("test.txt","hello")
+ do_list("test.txt")
+ self.fs.makedir("mystuff")
+ self.fs.createfile("/mystuff/test.txt","")
+ do_list("mystuff")
+ do_list("mystuff/test.txt")
+
+ def test_copy_xattrs(self):
+ self.fs.createfile("a.txt","content")
+ self.fs.setxattr("a.txt","myattr","myvalue")
+ self.fs.setxattr("a.txt","testattr","testvalue")
+ self.fs.makedir("stuff")
+ self.fs.copy("a.txt","stuff/a.txt")
+ self.assertTrue(self.fs.exists("stuff/a.txt"))
+ self.assertEquals(self.fs.getxattr("stuff/a.txt","myattr"),"myvalue")
+ self.assertEquals(self.fs.getxattr("stuff/a.txt","testattr"),"testvalue")
+ self.assertEquals(self.fs.getxattr("a.txt","myattr"),"myvalue")
+ self.assertEquals(self.fs.getxattr("a.txt","testattr"),"testvalue")
+ self.fs.setxattr("stuff","dirattr","a directory")
+ self.fs.copydir("stuff","stuff2")
+ self.assertEquals(self.fs.getxattr("stuff2/a.txt","myattr"),"myvalue")
+ self.assertEquals(self.fs.getxattr("stuff2/a.txt","testattr"),"testvalue")
+ self.assertEquals(self.fs.getxattr("stuff2","dirattr"),"a directory")
+ self.assertEquals(self.fs.getxattr("stuff","dirattr"),"a directory")
+
+ def test_move_xattrs(self):
+ self.fs.createfile("a.txt","content")
+ self.fs.setxattr("a.txt","myattr","myvalue")
+ self.fs.setxattr("a.txt","testattr","testvalue")
+ self.fs.makedir("stuff")
+ self.fs.move("a.txt","stuff/a.txt")
+ self.assertTrue(self.fs.exists("stuff/a.txt"))
+ self.assertEquals(self.fs.getxattr("stuff/a.txt","myattr"),"myvalue")
+ self.assertEquals(self.fs.getxattr("stuff/a.txt","testattr"),"testvalue")
+ self.fs.setxattr("stuff","dirattr","a directory")
+ self.fs.movedir("stuff","stuff2")
+ self.assertEquals(self.fs.getxattr("stuff2/a.txt","myattr"),"myvalue")
+ self.assertEquals(self.fs.getxattr("stuff2/a.txt","testattr"),"testvalue")
+ self.assertEquals(self.fs.getxattr("stuff2","dirattr"),"a directory")
+
+
+
+from fs.xattrs import ensure_xattrs
+
+from fs import tempfs
+class TestXAttr_TempFS(unittest.TestCase,FSTestCases,XAttrTestCases):
+
+ def setUp(self):
+ self.fs = ensure_xattrs(tempfs.TempFS())
+
+ def tearDown(self):
+ td = self.fs._temp_dir
+ self.fs.close()
+ self.assert_(not os.path.exists(td))
+
+ def check(self, p):
+ td = self.fs._temp_dir
+ return os.path.exists(os.path.join(td, relpath(p)))
+
+
+from fs import memoryfs
+class TestXAttr_MemoryFS(unittest.TestCase,FSTestCases,XAttrTestCases):
+
+ def setUp(self):
+ self.fs = ensure_xattrs(memoryfs.MemoryFS())
+
+ def check(self, p):
+ return self.fs.exists(p)
+
diff --git a/fs/tests/test_zipfs.py b/fs/tests/test_zipfs.py
new file mode 100644
index 0000000..fec01a6
--- /dev/null
+++ b/fs/tests/test_zipfs.py
@@ -0,0 +1,153 @@
+"""
+
+ fs.tests.test_zipfs: testcases for the ZipFS class
+
+"""
+
+import unittest
+import os
+import random
+import zipfile
+import tempfile
+
+import fs.tests
+from fs.path import *
+
+
+from fs import zipfs
+class TestReadZipFS(unittest.TestCase):
+
+ def setUp(self):
+ self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip"
+ self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename)
+
+ self.zf = zipfile.ZipFile(self.temp_filename, "w")
+ zf = self.zf
+ zf.writestr("a.txt", "Hello, World!")
+ zf.writestr("b.txt", "b")
+ zf.writestr("1.txt", "1")
+ zf.writestr("foo/bar/baz.txt", "baz")
+ zf.writestr("foo/second.txt", "hai")
+ zf.close()
+ self.fs = zipfs.ZipFS(self.temp_filename, "r")
+
+ def tearDown(self):
+ self.fs.close()
+ os.remove(self.temp_filename)
+
+ def check(self, p):
+ try:
+ self.zipfile.getinfo(p)
+ return True
+ except:
+ return False
+
+ def test_reads(self):
+ def read_contents(path):
+ f = self.fs.open(path)
+ contents = f.read()
+ return contents
+ def check_contents(path, expected):
+ self.assert_(read_contents(path)==expected)
+ check_contents("a.txt", "Hello, World!")
+ check_contents("1.txt", "1")
+ check_contents("foo/bar/baz.txt", "baz")
+
+ def test_getcontents(self):
+ def read_contents(path):
+ return self.fs.getcontents(path)
+ def check_contents(path, expected):
+ self.assert_(read_contents(path)==expected)
+ check_contents("a.txt", "Hello, World!")
+ check_contents("1.txt", "1")
+ check_contents("foo/bar/baz.txt", "baz")
+
+ def test_is(self):
+ self.assert_(self.fs.isfile('a.txt'))
+ self.assert_(self.fs.isfile('1.txt'))
+ self.assert_(self.fs.isfile('foo/bar/baz.txt'))
+ self.assert_(self.fs.isdir('foo'))
+ self.assert_(self.fs.isdir('foo/bar'))
+ self.assert_(self.fs.exists('a.txt'))
+ self.assert_(self.fs.exists('1.txt'))
+ self.assert_(self.fs.exists('foo/bar/baz.txt'))
+ self.assert_(self.fs.exists('foo'))
+ self.assert_(self.fs.exists('foo/bar'))
+
+ def test_listdir(self):
+
+ def check_listing(path, expected):
+ dir_list = self.fs.listdir(path)
+ self.assert_(sorted(dir_list) == sorted(expected))
+ check_listing('/', ['a.txt', '1.txt', 'foo', 'b.txt'])
+ check_listing('foo', ['second.txt', 'bar'])
+ check_listing('foo/bar', ['baz.txt'])
+
+
+class TestWriteZipFS(unittest.TestCase):
+
+ def setUp(self):
+ self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip"
+ self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename)
+
+ zip_fs = zipfs.ZipFS(self.temp_filename, 'w')
+
+ def makefile(filename, contents):
+ if dirname(filename):
+ zip_fs.makedir(dirname(filename), recursive=True, allow_recreate=True)
+ f = zip_fs.open(filename, 'w')
+ f.write(contents)
+ f.close()
+
+ makefile("a.txt", "Hello, World!")
+ makefile("b.txt", "b")
+ makefile("foo/bar/baz.txt", "baz")
+ makefile("foo/second.txt", "hai")
+
+ zip_fs.close()
+
+ def tearDown(self):
+ os.remove(self.temp_filename)
+
+ def test_valid(self):
+ zf = zipfile.ZipFile(self.temp_filename, "r")
+ self.assert_(zf.testzip() is None)
+ zf.close()
+
+ def test_creation(self):
+ zf = zipfile.ZipFile(self.temp_filename, "r")
+ def check_contents(filename, contents):
+ zcontents = zf.read(filename)
+ self.assertEqual(contents, zcontents)
+ check_contents("a.txt", "Hello, World!")
+ check_contents("b.txt", "b")
+ check_contents("foo/bar/baz.txt", "baz")
+ check_contents("foo/second.txt", "hai")
+
+
+class TestAppendZipFS(TestWriteZipFS):
+
+ def setUp(self):
+ self.temp_filename = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(6))+".zip"
+ self.temp_filename = os.path.join(tempfile.gettempdir(), self.temp_filename)
+
+ zip_fs = zipfs.ZipFS(self.temp_filename, 'w')
+
+ def makefile(filename, contents):
+ if dirname(filename):
+ zip_fs.makedir(dirname(filename), recursive=True, allow_recreate=True)
+ f = zip_fs.open(filename, 'w')
+ f.write(contents)
+ f.close()
+
+ makefile("a.txt", "Hello, World!")
+ makefile("b.txt", "b")
+
+ zip_fs.close()
+ zip_fs = zipfs.ZipFS(self.temp_filename, 'a')
+
+ makefile("foo/bar/baz.txt", "baz")
+ makefile("foo/second.txt", "hai")
+
+ zip_fs.close()
+
diff --git a/fs/utils.py b/fs/utils.py
index dd863eb..073c656 100644
--- a/fs/utils.py
+++ b/fs/utils.py
@@ -1,4 +1,8 @@
-"""Contains a number of high level utility functions for working with FS objects."""
+"""
+
+ fs.utils: high-level utility functions for working with FS objects.
+
+"""
import shutil
from mountfs import MountFS
@@ -82,6 +86,7 @@ def movefile(src_fs, src_path, dst_fs, dst_path, chunk_size=16384):
if dst is not None:
dst.close()
+
def movedir(fs1, fs2, ignore_errors=False, chunk_size=16384):
"""Moves contents of a directory from one filesystem to another.
@@ -103,6 +108,7 @@ def movedir(fs1, fs2, ignore_errors=False, chunk_size=16384):
mount_fs.mount('dir2', fs2)
mount_fs.movedir('dir1', 'dir2', ignore_errors=ignore_errors, chunk_size=chunk_size)
+
def copydir(fs1, fs2, ignore_errors=False, chunk_size=16384):
"""Copies contents of a directory from one filesystem to another.
@@ -124,11 +130,13 @@ def copydir(fs1, fs2, ignore_errors=False, chunk_size=16384):
mount_fs.mount('dir2', fs2)
mount_fs.copydir('dir1', 'dir2', ignore_errors=ignore_errors, chunk_size=chunk_size)
-def countbytes(count_fs):
+
+def countbytes(fs):
"""Returns the total number of bytes contained within files in a filesystem.
- count_fs -- A filesystem object
+ fs -- A filesystem object
"""
- total = sum(count_fs.getsize(f) for f in count_fs.walkfiles())
+ total = sum(fs.getsize(f) for f in fs.walkfiles())
return total
+
diff --git a/fs/wrapfs.py b/fs/wrapfs.py
new file mode 100644
index 0000000..daf91d1
--- /dev/null
+++ b/fs/wrapfs.py
@@ -0,0 +1,223 @@
+"""
+
+ fs.wrapfs: class for wrapping an existing FS object with added functionality
+
+This module provides the class WrapFS, a base class for objects that wrap
+another FS object and provide some transformation of its contents. It could
+be very useful for implementing e.g. transparent encryption or compression
+services.
+
+As a simple example of how this class could be used, the 'HideDotFiles' class
+implements the standard unix shell functionality of hiding dot files in
+directory listings.
+
+"""
+
+from fs.base import FS
+
+
+class WrapFS(FS):
+ """FS that wraps another FS, providing translation etc.
+
+ This class allows simple transforms to be applied to the names
+ and/or contents of files in an FS. It could be used to implement
+ e.g. compression or encryption in a relatively painless manner.
+
+ The following methods can be overridden to control how files are
+ accessed in the underlying FS object:
+
+ _file_wrap(file,mode): called for each file that is opened from
+ the underlying FS; may return a modified
+ file-like object.
+
+ _encode(path): encode a path for access in the underlying FS
+
+ _decode(path): decode a path from the underlying FS
+
+ If the required path translation proceeds one component at a time,
+ it may be simpler to override the _encode_name() and _decode_name()
+ methods.
+ """
+
+ def __init__(self,fs):
+ super(WrapFS,self).__init__()
+ self.wrapped_fs = fs
+
+ def _file_wrap(self,f,mode):
+ """Apply wrapping to an opened file."""
+ return f
+
+ def _encode_name(self,name):
+ """Encode path component for the underlying FS."""
+ return name
+
+ def _decode_name(self,name):
+ """Decode path component from the underlying FS."""
+ return name
+
+ def _encode(self,path):
+ """Encode path for the underlying FS."""
+ names = path.split("/")
+ e_names = []
+ for name in names:
+ if name == "":
+ e_names.append("")
+ else:
+ e_names.append(self._encode_name(name))
+ return "/".join(e_names)
+
+ def _decode(self,path):
+ """Decode path from the underlying FS."""
+ names = path.split("/")
+ d_names = []
+ for name in names:
+ if name == "":
+ d_names.append("")
+ else:
+ d_names.append(self._decode_name(name))
+ return "/".join(d_names)
+
+ def _adjust_mode(self,mode):
+ """Adjust the mode used to open a file in the underlying FS.
+
+ This method takes the mode given when opening a file, and should
+ return a two-tuple giving the mode to be used in this FS as first
+ item, and the mode to be used in the underlying FS as the second.
+
+ An example of why this is needed is a WrapFS subclass that does
+ transparent file compression - in this case files from the wrapped
+ FS cannot be opened in append mode.
+ """
+ return (mode,mode)
+
+ def getsyspath(self,path,allow_none=False):
+ return self.wrapped_fs.getsyspath(self._encode(path),allow_none)
+
+ def hassyspath(self,path):
+ return self.wrapped_fs.hassyspath(self._encode(path))
+
+ def open(self,path,mode="r"):
+ (mode,wmode) = self._adjust_mode(mode)
+ f = self.wrapped_fs.open(self._encode(path),wmode)
+ return self._file_wrap(f,mode)
+
+ def exists(self,path):
+ return self.wrapped_fs.exists(self._encode(path))
+
+ def isdir(self,path):
+ return self.wrapped_fs.isdir(self._encode(path))
+
+ def isfile(self,path):
+ return self.wrapped_fs.isfile(self._encode(path))
+
+ def listdir(self,path="",wildcard=None,full=False,absolute=False,dirs_only=False,files_only=False):
+ entries = []
+ for name in self.wrapped_fs.listdir(self._encode(path),wildcard=None,full=full,absolute=absolute,dirs_only=dirs_only,files_only=files_only):
+ entries.append(self._decode(name))
+ return self._listdir_helper(path,entries,wildcard=wildcard,full=False,absolute=False,dirs_only=False,files_only=False)
+
+ def makedir(self,path,*args,**kwds):
+ return self.wrapped_fs.makedir(self._encode(path),*args,**kwds)
+
+ def remove(self,path):
+ return self.wrapped_fs.remove(self._encode(path))
+
+ def removedir(self,path,*args,**kwds):
+ return self.wrapped_fs.removedir(self._encode(path),*args,**kwds)
+
+ def rename(self,src,dst):
+ return self.wrapped_fs.rename(self._encode(src),self._encode(dst))
+
+ def getinfo(self,path):
+ return self.wrapped_fs.getinfo(self._encode(path))
+
+ def desc(self,path):
+ return self.wrapped_fs.desc(self._encode(path))
+
+ def copy(self,src,dst,overwrite=False,chunk_size=16384):
+ return self.wrapped_fs.copy(self._encode(src),self._encode(dst),overwrite,chunk_size)
+
+ def move(self,src,dst,overwrite=False,chunk_size=16384):
+ return self.wrapped_fs.move(self._encode(src),self._encode(dst),overwrite,chunk_size)
+
+ def movedir(self,src,dst,overwrite=False,ignore_errors=False,chunk_size=16384):
+ return self.wrapped_fs.movedir(self._encode(src),self._encode(dst),overwrite,ignore_errors,chunk_size)
+
+ def copydir(self,src,dst,overwrite=False,ignore_errors=False,chunk_size=16384):
+ return self.wrapped_fs.copydir(self._encode(src),self._encode(dst),overwrite,ignore_errors,chunk_size)
+
+ def __getattr__(self,attr):
+ return getattr(self.wrapped_fs,attr)
+
+ def close(self):
+ if hasattr(self.wrapped_fs,"close"):
+ self.wrapped_fs.close()
+
+
+class HideDotFiles(WrapFS):
+ """FS wrapper class that hides dot-files in directory listings.
+
+ The listdir() function takes an extra keyword argument 'hidden'
+ indicating whether hidden dotfiles shoud be included in the output.
+ It is False by default.
+ """
+
+ def is_hidden(self,path):
+ """Check whether the given path should be hidden."""
+ return path and basename(path)[0] == "."
+
+ def _encode(self,path):
+ return path
+
+ def _decode(self,path):
+ return path
+
+ def listdir(self,path="",**kwds):
+ hidden = kwds.pop("hidden",True)
+ entries = self.wrapped_fs.listdir(path,**kwds)
+ if not hidden:
+ entries = [e for e in entries if not self.is_hidden(e)]
+ return entries
+
+ def walk(self, path="/", wildcard=None, dir_wildcard=None, search="breadth",hidden=False):
+ if search == "breadth":
+ dirs = [path]
+ while dirs:
+ current_path = dirs.pop()
+ paths = []
+ for filename in self.listdir(current_path,hidden=hidden):
+ path = pathjoin(current_path, filename)
+ if self.isdir(path):
+ if dir_wildcard is not None:
+ if fnmatch.fnmatch(path, dir_wilcard):
+ dirs.append(path)
+ else:
+ dirs.append(path)
+ else:
+ if wildcard is not None:
+ if fnmatch.fnmatch(path, wildcard):
+ paths.append(filename)
+ else:
+ paths.append(filename)
+ yield (current_path, paths)
+ elif search == "depth":
+ def recurse(recurse_path):
+ for path in self.listdir(recurse_path, wildcard=dir_wildcard, full=True, dirs_only=True,hidden=hidden):
+ for p in recurse(path):
+ yield p
+ yield (recurse_path, self.listdir(recurse_path, wildcard=wildcard, files_only=True,hidden=hidden))
+ for p in recurse(path):
+ yield p
+ else:
+ raise ValueError("Search should be 'breadth' or 'depth'")
+
+
+ def isdirempty(self, path):
+ path = normpath(path)
+ iter_dir = iter(self.listdir(path,hidden=True))
+ try:
+ iter_dir.next()
+ except StopIteration:
+ return True
+ return False
+
diff --git a/fs/xattrs.py b/fs/xattrs.py
new file mode 100644
index 0000000..27a8abc
--- /dev/null
+++ b/fs/xattrs.py
@@ -0,0 +1,159 @@
+"""
+
+ fs.xattrs: extended attribute support for FS
+
+This module defines a standard interface for FS subclasses that want to
+support extended file attributes, and a WrapFS subclass that can simulate
+extended attributes on top of an ordinery FS.
+
+FS instances offering extended attribute support must provide the following
+methods:
+
+ getxattr(path,name) - get the named attribute for the given path,
+ or None if it does not exist
+ setxattr(path,name,value) - set the named attribute for the given path
+ to the given value
+ delxattr(path,name) - delete the named attribute for the given path,
+ raising KeyError if it does not exist
+ listxattrs(path) - iterator over all stored attribute names for
+ the given path
+
+If extended attributes are required by FS-consuming code, it should use the
+function 'ensure_xattrs'. This will interrogate an FS object to determine
+if it has native xattr support, and return a wrapped version if it does not.
+"""
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+from fs.path import *
+from fs.errors import *
+from fs.wrapfs import WrapFS
+
+
+def ensure_xattrs(fs):
+ """Ensure that the given FS supports xattrs, simulating them if required.
+
+ Given an FS object, this function returns an equivalent FS that has support
+ for extended attributes. This may be the original object if they are
+ supported natively, or a wrapper class is they must be simulated.
+ """
+ try:
+ # This attr doesn't have to exist, None should be returned by default
+ fs.getxattr("/","testingx-xattr")
+ return fs
+ except Exception:
+ return SimulateXAttr(fs)
+
+
+class SimulateXAttr(WrapFS):
+ """FS wrapper class that simulates xattr support.
+
+ The following methods are supplied for manipulating extended attributes:
+ * xattrs: list all extended attribute names for a path
+ * getxattr: get an xattr of a path by name
+ * setxattr: set an xattr of a path by name
+ * delxattr: delete an xattr of a path by name
+
+ For each file in the underlying FS, this class maintains a corresponding
+ '.xattrs.FILENAME' file containing its extended attributes. Extended
+ attributes of a directory are stored in the file '.xattrs' within the
+ directory itself.
+ """
+
+ def _get_attr_path(self, path):
+ """Get the path of the file containing xattrs for the given path."""
+ if self.wrapped_fs.isdir(path):
+ attr_path = pathjoin(path, '.xattrs')
+ else:
+ dir_path, file_name = pathsplit(path)
+ attr_path = pathjoin(dir_path, '.xattrs.'+file_name)
+ return attr_path
+
+ def _is_attr_path(self, path):
+ """Check whether the given path references an xattrs file."""
+ _,name = pathsplit(path)
+ if name.startswith(".xattrs"):
+ return True
+ return False
+
+ def _get_attr_dict(self, path):
+ """Retrieve the xattr dictionary for the given path."""
+ attr_path = self._get_attr_path(path)
+ if self.wrapped_fs.exists(attr_path):
+ return pickle.loads(self.wrapped_fs.getcontents(attr_path))
+ else:
+ return {}
+
+ def _set_attr_dict(self, path, attrs):
+ """Store the xattr dictionary for the given path."""
+ attr_path = self._get_attr_path(path)
+ self.wrapped_fs.setcontents(attr_path, pickle.dumps(attrs))
+
+ def setxattr(self, path, key, value):
+ """Set an extended attribute on the given path."""
+ if not self.exists(path):
+ raise ResourceNotFoundError(path)
+ attrs = self._get_attr_dict(path)
+ attrs[key] = str(value)
+ self._set_attr_dict(path, attrs)
+
+ def getxattr(self, path, key, default=None):
+ """Retrieve an extended attribute for the given path."""
+ if not self.exists(path):
+ raise ResourceNotFoundError(path)
+ attrs = self._get_attr_dict(path)
+ return attrs.get(key, default)
+
+ def delxattr(self, path, key):
+ if not self.exists(path):
+ raise ResourceNotFoundError(path)
+ attrs = self._get_attr_dict(path)
+ try:
+ del attrs[key]
+ except KeyError:
+ pass
+ self._set_attr_dict(path, attrs)
+
+ def listxattrs(self,path):
+ """List all the extended attribute keys set on the given path."""
+ if not self.exists(path):
+ raise ResourceNotFoundError(path)
+ return self._get_attr_dict(path).keys()
+
+ def _encode(self,path):
+ """Prevent requests for operations on .xattr files."""
+ if self._is_attr_path(path):
+ raise PathError(path,msg="Paths cannot contain '.xattrs': %(path)s")
+ return path
+
+ def _decode(self,path):
+ return path
+
+ def listdir(self,path="",**kwds):
+ """Prevent .xattr from appearing in listings."""
+ entries = self.wrapped_fs.listdir(path,**kwds)
+ return [e for e in entries if not self._is_attr_path(e)]
+
+ def copy(self,src,dst,**kwds):
+ """Ensure xattrs are copied when copying a file."""
+ self.wrapped_fs.copy(self._encode(src),self._encode(dst),**kwds)
+ s_attr_file = self._get_attr_path(src)
+ d_attr_file = self._get_attr_path(dst)
+ try:
+ self.wrapped_fs.copy(s_attr_file,d_attr_file,overwrite=True)
+ except ResourceNotFoundError,e:
+ pass
+
+ def move(self,src,dst,**kwds):
+ """Ensure xattrs are preserved when moving a file."""
+ self.wrapped_fs.move(self._encode(src),self._encode(dst),**kwds)
+ s_attr_file = self._get_attr_path(src)
+ d_attr_file = self._get_attr_path(dst)
+ try:
+ self.wrapped_fs.move(s_attr_file,d_attr_file,overwrite=True)
+ except ResourceNotFoundError:
+ pass
+
diff --git a/fs/zipfs.py b/fs/zipfs.py
index 90f84d4..2d1bdec 100644
--- a/fs/zipfs.py
+++ b/fs/zipfs.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python
-from base import *
-from helpers import *
+from fs.base import *
from zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED
from memoryfs import MemoryFS
@@ -50,17 +49,17 @@ class ZipFS(FS):
"""A FileSystem that represents a zip file."""
- def __init__(self, zip_file, mode="r", compression="deflated", allowZip64=False, thread_syncronize=True):
+ def __init__(self, zip_file, mode="r", compression="deflated", allowZip64=False, thread_synchronize=True):
"""Create a FS that maps on to a zip file.
zip_file -- A (system) path, or a file-like object
mode -- Mode to open zip file: 'r' for reading, 'w' for writing or 'a' for appending
compression -- Can be 'deflated' (default) to compress data or 'stored' to just store date
allowZip64 -- Set to True to use zip files greater than 2 MB, default is False
- thread_syncronize -- Set to True (default) to enable thread-safety
+ thread_synchronize -- Set to True (default) to enable thread-safety
"""
- FS.__init__(self, thread_syncronize=thread_syncronize)
+ FS.__init__(self, thread_synchronize=thread_synchronize)
if compression == "deflated":
compression_type = ZIP_DEFLATED
elif compression == "stored":
@@ -75,7 +74,7 @@ class ZipFS(FS):
try:
self.zf = ZipFile(zip_file, mode, compression_type, allowZip64)
except IOError:
- raise ResourceNotFoundError("NO_FILE", str(zip_file), msg="Zip file does not exist: %(path)s")
+ raise ResourceNotFoundError(str(zip_file), msg="Zip file does not exist: %(path)s")
self.zip_path = str(zip_file)
self.temp_fs = None
@@ -129,11 +128,11 @@ class ZipFS(FS):
if 'r' in mode:
if self.zip_mode not in 'ra':
- raise OperationFailedError("OPEN_FAILED", path, msg="Zip file must be opened for reading ('r') or appending ('a')")
+ raise OperationFailedError("open file", path=path, msg="Zip file must be opened for reading ('r') or appending ('a')")
try:
contents = self.zf.read(path)
except KeyError:
- raise ResourceNotFoundError("NO_FILE", path)
+ raise ResourceNotFoundError(path)
return StringIO(contents)
if 'w' in mode:
@@ -154,14 +153,14 @@ class ZipFS(FS):
self._lock.acquire()
try:
if not self.exists(path):
- raise ResourceNotFoundError("NO_FILE", path)
+ raise ResourceNotFoundError(path)
path = normpath(path)
try:
contents = self.zf.read(path)
except KeyError:
- raise ResourceNotFoundError("NO_FILE", path)
+ raise ResourceNotFoundError(path)
except RuntimeError:
- raise OperationFailedError("READ_FAILED", path, "Zip file must be oppened with 'r' or 'a' to read")
+ raise OperationFailedError("read file", path=path, msg="Zip file must be oppened with 'r' or 'a' to read")
return contents
finally:
self._lock.release()
@@ -194,23 +193,23 @@ class ZipFS(FS):
try:
dirname = normpath(dirname)
if self.zip_mode not in "wa":
- raise OperationFailedError("MAKEDIR_FAILED", dirname, "Zip file must be opened for writing ('w') or appending ('a')")
+ raise OperationFailedError("create directory", path=dirname, msg="Zip file must be opened for writing ('w') or appending ('a')")
if not dirname.endswith('/'):
dirname += '/'
self._add_resource(dirname)
finally:
self._lock.release()
- def listdir(self, path="/", wildcard=None, full=False, absolute=False, hidden=True, dirs_only=False, files_only=False):
+ def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
- return self._path_fs.listdir(path, wildcard, full, absolute, hidden, dirs_only, files_only)
+ return self._path_fs.listdir(path, wildcard, full, absolute, dirs_only, files_only)
def getinfo(self, path):
self._lock.acquire()
try:
if not self.exists(path):
- return ResourceNotFoundError("NO_RESOURCE", path)
+ return ResourceNotFoundError(path)
path = normpath(path).lstrip('/')
try:
zi = self.zf.getinfo(path)
@@ -225,46 +224,4 @@ class ZipFS(FS):
finally:
self._lock.release()
-if __name__ == "__main__":
- def test():
- zfs = ZipFS("t.zip", "w")
- zfs.createfile("t.txt", "Hello, World!")
- zfs.close()
- rfs = ZipFS("t.zip", 'r')
- print rfs.getcontents("t.txt")
- print rfs.getcontents("w.txt")
-
- def test2():
- zfs = ZipFS("t2.zip", "r")
- print zfs.listdir("/tagging-trunk")
- print zfs.listdir("/")
- import browsewin
- browsewin.browse(zfs)
- zfs.close()
- #zfs.open("t.txt")
- #print zfs.listdir("/")
-
- test2()
-
- zfs = ZipFS("t3.zip", "w")
- zfs.createfile("t.txt", "Hello, World!")
- zfs.createfile("foo/bar/baz/t.txt", "Hello, World!")
-
- print zfs.getcontents('t.txt')
- #print zfs.isdir("t.txt")
- #print zfs.isfile("t.txt")
- #print zfs.isfile("foo/bar")
- zfs.close()
- zfs = ZipFS("t3.zip", "r")
- print "--"
- print zfs.listdir("foo")
- print zfs.isdir("foo/bar")
- print zfs.listdir("foo/bar")
- print zfs.listdir("foo/bar/baz")
- print_fs(zfs)
-
-
- #zfs = ZipFS("t3.zip", "r")
- #print zfs.zf.getinfo("asd.txt")
-
- #zfs.close()
+
diff --git a/setup.py b/setup.py
index 79cc224..bec3722 100644
--- a/setup.py
+++ b/setup.py
@@ -23,6 +23,6 @@ setup(name='fs',
url="http://code.google.com/p/pyfilesystem/",
download_url="http://code.google.com/p/pyfilesystem/downloads/list",
platforms = ['any'],
- packages=['fs'],
+ packages=['fs','fs.expose','fs.expose.fuse','fs.tests'],
classifiers=classifiers,
)