summaryrefslogtreecommitdiff
path: root/rdiff-backup/rdiff_backup
diff options
context:
space:
mode:
authorben <ben@2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109>2002-05-10 23:14:35 +0000
committerben <ben@2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109>2002-05-10 23:14:35 +0000
commit807241bc4f322edc6f95782291900362484263df (patch)
tree95dc93d87081ab901e31844867d4facca6207aac /rdiff-backup/rdiff_backup
parent5c059e737511644b0056b8326b52763c82efcac4 (diff)
downloadrdiff-backup-807241bc4f322edc6f95782291900362484263df.tar.gz
Lots of changes, see changelog for 0.7.4.
git-svn-id: http://svn.savannah.nongnu.org/svn/rdiff-backup/trunk@72 2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109
Diffstat (limited to 'rdiff-backup/rdiff_backup')
-rw-r--r--rdiff-backup/rdiff_backup/connection.py45
-rw-r--r--rdiff-backup/rdiff_backup/destructive_stepping.py63
-rw-r--r--rdiff-backup/rdiff_backup/highlevel.py47
-rw-r--r--rdiff-backup/rdiff_backup/increment.py118
-rw-r--r--rdiff-backup/rdiff_backup/lazy.py2
-rw-r--r--rdiff-backup/rdiff_backup/manage.py79
-rw-r--r--rdiff-backup/rdiff_backup/restore.py172
-rw-r--r--rdiff-backup/rdiff_backup/robust.py4
-rw-r--r--rdiff-backup/rdiff_backup/rpath.py34
-rw-r--r--rdiff-backup/rdiff_backup/selection.py35
10 files changed, 347 insertions, 252 deletions
diff --git a/rdiff-backup/rdiff_backup/connection.py b/rdiff-backup/rdiff_backup/connection.py
index 9842480..57d2fa5 100644
--- a/rdiff-backup/rdiff_backup/connection.py
+++ b/rdiff-backup/rdiff_backup/connection.py
@@ -92,6 +92,7 @@ class LowLevelPipeConnection(Connection):
b - string
q - quit signal
t - TempFile
+ d - DSRPath
R - RPath
r - RORPath only
c - PipeConnection object
@@ -118,6 +119,7 @@ class LowLevelPipeConnection(Connection):
if type(obj) is types.StringType: self._putbuf(obj, req_num)
elif isinstance(obj, Connection): self._putconn(obj, req_num)
elif isinstance(obj, TempFile): self._puttempfile(obj, req_num)
+ elif isinstance(obj, DSRPath): self._putdsrpath(obj, req_num)
elif isinstance(obj, RPath): self._putrpath(obj, req_num)
elif isinstance(obj, RORPath): self._putrorpath(obj, req_num)
elif ((hasattr(obj, "read") or hasattr(obj, "write"))
@@ -148,6 +150,11 @@ class LowLevelPipeConnection(Connection):
tempfile.index, tempfile.data)
self._write("t", cPickle.dumps(tf_repr, 1), req_num)
+ def _putdsrpath(self, dsrpath, req_num):
+ """Put DSRPath into pipe. See _putrpath"""
+ dsrpath_repr = (dsrpath.conn.conn_number, dsrpath.getstatedict())
+ self._write("d", cPickle.dumps(dsrpath_repr, 1), req_num)
+
def _putrpath(self, rpath, req_num):
"""Put an rpath into the pipe
@@ -219,23 +226,22 @@ class LowLevelPipeConnection(Connection):
ord(header_string[1]),
self._s2l(header_string[2:]))
except IndexError: raise ConnectionError()
- if format_string == "o": result = cPickle.loads(self._read(length))
- elif format_string == "b": result = self._read(length)
- elif format_string == "f":
- result = VirtualFile(self, int(self._read(length)))
+ if format_string == "q": raise ConnectionQuit("Received quit signal")
+
+ data = self._read(length)
+ if format_string == "o": result = cPickle.loads(data)
+ elif format_string == "b": result = data
+ elif format_string == "f": result = VirtualFile(self, int(data))
elif format_string == "i":
- result = RORPIter.FromFile(BufferedRead(
- VirtualFile(self, int(self._read(length)))))
- elif format_string == "t":
- result = self._gettempfile(self._read(length))
- elif format_string == "r":
- result = self._getrorpath(self._read(length))
- elif format_string == "R": result = self._getrpath(self._read(length))
- elif format_string == "c":
- result = Globals.connection_dict[int(self._read(length))]
+ result = RORPIter.FromFile(BufferedRead(VirtualFile(self,
+ int(data))))
+ elif format_string == "t": result = self._gettempfile(data)
+ elif format_string == "r": result = self._getrorpath(data)
+ elif format_string == "R": result = self._getrpath(data)
+ elif format_string == "d": result = self._getdsrpath(data)
else:
- assert format_string == "q", header_string
- raise ConnectionQuit("Received quit signal")
+ assert format_string == "c", header_string
+ result = Globals.connection_dict[int(data)]
Log.conn("received", result, req_num)
return (req_num, result)
@@ -255,6 +261,15 @@ class LowLevelPipeConnection(Connection):
conn_number, base, index, data = cPickle.loads(raw_rpath_buf)
return RPath(Globals.connection_dict[conn_number], base, index, data)
+ def _getdsrpath(self, raw_dsrpath_buf):
+ """Return DSRPath object indicated by buf"""
+ conn_number, state_dict = cPickle.loads(raw_dsrpath_buf)
+ empty_dsrp = DSRPath("bypass", Globals.local_connection, None)
+ empty_dsrp.__setstate__(state_dict)
+ empty_dsrp.conn = Globals.connection_dict[conn_number]
+ empty_dsrp.file = None
+ return empty_dsrp
+
def _close(self):
"""Close the pipes associated with the connection"""
self.outpipe.close()
diff --git a/rdiff-backup/rdiff_backup/destructive_stepping.py b/rdiff-backup/rdiff_backup/destructive_stepping.py
index c5e2faa..ff3b42a 100644
--- a/rdiff-backup/rdiff_backup/destructive_stepping.py
+++ b/rdiff-backup/rdiff_backup/destructive_stepping.py
@@ -1,4 +1,5 @@
from __future__ import generators
+import types
execfile("rorpiter.py")
#######################################################################
@@ -40,13 +41,17 @@ class DSRPath(RPath):
otherwise use the same arguments as the RPath initializer.
"""
- if len(args) == 2 and isinstance(args[0], RPath):
+ if len(args) == 1 and isinstance(args[0], RPath):
rp = args[0]
RPath.__init__(self, rp.conn, rp.base, rp.index)
else: RPath.__init__(self, *args)
- self.set_delays(source)
- self.set_init_perms(source)
+ if source != "bypass":
+ # "bypass" val is used when unpackaging over connection
+ assert source is None or source is 1
+ self.source = source
+ self.set_delays(source)
+ self.set_init_perms(source)
def set_delays(self, source):
"""Delay writing permissions and times where appropriate"""
@@ -59,13 +64,14 @@ class DSRPath(RPath):
# Now get atime right away if possible
if self.data.has_key('atime'): self.newatime = self.data['atime']
else: self.newatime = None
+ else: self.delay_atime = None
if source:
self.delay_mtime = None # we'll never change mtime of source file
else:
self.delay_mtime = 1
# Save mtime now for a dir, because it might inadvertantly change
- if self.isdir(): self.newmtime = self.getmtime()
+ if self.isdir(): self.newmtime = self.data['mtime']
else: self.newmtime = None
def set_init_perms(self, source):
@@ -75,26 +81,30 @@ class DSRPath(RPath):
self.chmod_bypass(0400)
else: self.warn("No read permissions")
elif self.isdir():
- if source and (not self.readable() or self.executable()):
+ if source and (not self.readable() or not self.executable()):
if Globals.change_source_perms and self.isowner():
self.chmod_bypass(0500)
- else: warn("No read or exec permission")
+ else: self.warn("No read or exec permission")
elif not source and not self.hasfullperms():
self.chmod_bypass(0700)
def warn(self, err):
Log("Received error '%s' when dealing with file %s, skipping..."
% (err, self.path), 1)
- raise DSRPermError(self.path)
+ raise DSRPPermError(self.path)
def __getstate__(self):
"""Return picklable state. See RPath __getstate__."""
assert self.conn is Globals.local_connection # Can't pickle a conn
+ return self.getstatedict()
+
+ def getstatedict(self):
+ """Return dictionary containing the attributes we can save"""
pickle_dict = {}
for attrib in ['index', 'data', 'delay_perms', 'newperms',
'delay_atime', 'newatime',
'delay_mtime', 'newmtime',
- 'path', 'base']:
+ 'path', 'base', 'source']:
if self.__dict__.has_key(attrib):
pickle_dict[attrib] = self.__dict__[attrib]
return pickle_dict
@@ -110,10 +120,17 @@ class DSRPath(RPath):
if self.delay_perms: self.newperms = self.data['perms'] = permissions
else: RPath.chmod(self, permissions)
+ def getperms(self):
+ """Return dsrp's intended permissions"""
+ if self.delay_perms and self.newperms is not None:
+ return self.newperms
+ else: return self.data['perms']
+
def chmod_bypass(self, permissions):
"""Change permissions without updating the data dictionary"""
self.delay_perms = 1
if self.newperms is None: self.newperms = self.getperms()
+ Log("DSRP: Perm bypass %s to %o" % (self.path, permissions), 8)
self.conn.os.chmod(self.path, permissions)
def settime(self, accesstime, modtime):
@@ -129,11 +146,25 @@ class DSRPath(RPath):
if self.delay_mtime: self.newmtime = self.data['mtime'] = modtime
else: RPath.setmtime(self, modtime)
+ def getmtime(self):
+ """Return dsrp's intended modification time"""
+ if self.delay_mtime and self.newmtime is not None:
+ return self.newmtime
+ else: return self.data['mtime']
+
+ def getatime(self):
+ """Return dsrp's intended access time"""
+ if self.delay_atime and self.newatime is not None:
+ return self.newatime
+ else: return self.data['atime']
+
def write_changes(self):
"""Write saved up permission/time changes"""
if not self.lstat(): return # File has been deleted in meantime
if self.delay_perms and self.newperms is not None:
+ Log("Finalizing permissions of dsrp %s to %s" %
+ (self.path, self.newperms), 8)
RPath.chmod(self, self.newperms)
do_atime = self.delay_atime and self.newatime is not None
@@ -145,6 +176,19 @@ class DSRPath(RPath):
elif not do_atime and do_mtime:
RPath.setmtime(self, self.newmtime)
+ def newpath(self, newpath, index = ()):
+ """Return similar DSRPath but with new path"""
+ return self.__class__(self.source, self.conn, newpath, index)
+
+ def append(self, ext):
+ """Return similar DSRPath with new extension"""
+ return self.__class__(self.source, self.conn, self.base,
+ self.index + (ext,))
+
+ def new_index(self, index):
+ """Return similar DSRPath with new index"""
+ return self.__class__(self.source, self.conn, self.base, index)
+
class DestructiveSteppingFinalizer(IterTreeReducer):
"""Finalizer that can work on an iterator of dsrpaths
@@ -155,11 +199,12 @@ class DestructiveSteppingFinalizer(IterTreeReducer):
coming back to it.
"""
+ dsrpath = None
def start_process(self, index, dsrpath):
self.dsrpath = dsrpath
def end_process(self):
- self.dsrpath.write_changes()
+ if self.dsrpath: self.dsrpath.write_changes()
diff --git a/rdiff-backup/rdiff_backup/highlevel.py b/rdiff-backup/rdiff_backup/highlevel.py
index d0bc743..36ba55a 100644
--- a/rdiff-backup/rdiff_backup/highlevel.py
+++ b/rdiff-backup/rdiff_backup/highlevel.py
@@ -24,12 +24,14 @@ class HighLevel:
accompanying diagram.
"""
- def Mirror(src_rpath, dest_rpath, checkpoint = 1, session_info = None):
+ def Mirror(src_rpath, dest_rpath, checkpoint = 1,
+ session_info = None, write_finaldata = 1):
"""Turn dest_rpath into a copy of src_rpath
Checkpoint true means to checkpoint periodically, otherwise
not. If session_info is given, try to resume Mirroring from
- that point.
+ that point. If write_finaldata is true, save extra data files
+ like hardlink_data. If it is false, make a complete mirror.
"""
SourceS = src_rpath.conn.HLSourceStruct
@@ -40,7 +42,8 @@ class HighLevel:
src_init_dsiter = SourceS.split_initial_dsiter()
dest_sigiter = DestS.get_sigs(dest_rpath, src_init_dsiter)
diffiter = SourceS.get_diffs_and_finalize(dest_sigiter)
- DestS.patch_and_finalize(dest_rpath, diffiter, checkpoint)
+ DestS.patch_and_finalize(dest_rpath, diffiter,
+ checkpoint, write_finaldata)
dest_rpath.setdata()
@@ -61,24 +64,6 @@ class HighLevel:
dest_rpath.setdata()
inc_rpath.setdata()
- def Restore(rest_time, mirror_base, rel_index, baseinc_tup, target_base):
- """Like Restore.RestoreRecursive but check arguments"""
- if (Globals.preserve_hardlinks != 0 and
- Hardlink.retrieve_final(rest_time)):
- Log("Hard link information found, attempting to preserve "
- "hard links.", 4)
- SetConnections.UpdateGlobal('preserve_hardlinks', 1)
- else: SetConnections.UpdateGlobal('preserve_hardlinks', None)
-
- if not isinstance(target_base, DSRPath):
- target_base = DSRPath(target_base.conn, target_base.base,
- target_base.index, target_base.data)
- if not isinstance(mirror_base, DSRPath):
- mirror_base = DSRPath(mirror_base.conn, mirror_base.base,
- mirror_base.index, mirror_base.data)
- Restore.RestoreRecursive(rest_time, mirror_base, rel_index,
- baseinc_tup, target_base)
-
MakeStatic(HighLevel)
@@ -164,7 +149,7 @@ class HLDestinationStruct:
def compare(src_rorp, dest_dsrp):
"""Return dest_dsrp if they are different, None if the same"""
if not dest_dsrp:
- dest_dsrp = DSRPath(baserp.conn, baserp.base, src_rorp.index)
+ dest_dsrp = cls.get_dsrp(baserp, src_rorp.index)
if dest_dsrp.lstat():
Log("Warning: Found unexpected destination file %s, "
"not processing it." % dest_dsrp.path, 2)
@@ -203,8 +188,9 @@ class HLDestinationStruct:
def get_dsrp(cls, dest_rpath, index):
"""Return initialized dsrp based on dest_rpath with given index"""
- return DSRPath(source = None, dest_rpath.conn,
- dest_rpath.base, index)
+ dsrp = DSRPath(None, dest_rpath.conn, dest_rpath.base, index)
+ if Globals.quoting_enabled: dsrp.quote_path()
+ return dsrp
def get_finalizer(cls):
"""Return finalizer, starting from session info if necessary"""
@@ -216,9 +202,13 @@ class HLDestinationStruct:
"""Return ITR, starting from state if necessary"""
if cls._session_info and cls._session_info.ITR:
return cls._session_info.ITR
- else: return IncrementITR(inc_rpath)
+ else:
+ iitr = IncrementITR(inc_rpath)
+ iitr.override_changed()
+ return iitr
- def patch_and_finalize(cls, dest_rpath, diffs, checkpoint = 1):
+ def patch_and_finalize(cls, dest_rpath, diffs,
+ checkpoint = 1, write_finaldata = 1):
"""Apply diffs and finalize"""
collated = RORPIter.CollateIterators(diffs, cls.initial_dsiter2)
finalizer = cls.get_finalizer()
@@ -242,7 +232,7 @@ class HLDestinationStruct:
if checkpoint: SaveState.checkpoint_mirror(finalizer, dsrp)
except: cls.handle_last_error(dsrp, finalizer)
finalizer.Finish()
- if Globals.preserve_hardlinks and Globals.rbdir:
+ if Globals.preserve_hardlinks and write_finaldata:
Hardlink.final_writedata()
if checkpoint: SaveState.checkpoint_remove()
@@ -300,8 +290,7 @@ class HLDestinationStruct:
Log.exception(1)
if ITR: SaveState.checkpoint_inc_backup(ITR, finalizer, dsrp, 1)
else: SaveState.checkpoint_mirror(finalizer, dsrp, 1)
- if Globals.preserve_hardlinks:
- Hardlink.final_checkpoint(Globals.rbdir)
+ if Globals.preserve_hardlinks: Hardlink.final_checkpoint(Globals.rbdir)
SaveState.touch_last_file_definitive()
raise
diff --git a/rdiff-backup/rdiff_backup/increment.py b/rdiff-backup/rdiff_backup/increment.py
index 446806b..b28b315 100644
--- a/rdiff-backup/rdiff_backup/increment.py
+++ b/rdiff-backup/rdiff_backup/increment.py
@@ -1,4 +1,4 @@
-execfile("selection.py")
+execfile("filename_mapping.py")
#######################################################################
#
@@ -85,10 +85,12 @@ class Inc:
"""Get new increment rp with given time suffix"""
addtostr = lambda s: "%s.%s.%s" % (s, timestr, typestr)
if rp.index:
- return rp.__class__(rp.conn, rp.base, rp.index[:-1] +
- (addtostr(rp.index[-1]),))
- else: return rp.__class__(rp.conn, addtostr(rp.base), rp.index)
-
+ incrp = rp.__class__(rp.conn, rp.base, rp.index[:-1] +
+ (addtostr(rp.index[-1]),))
+ else: incrp = rp.__class__(rp.conn, addtostr(rp.base), rp.index)
+ if Globals.quoting_enabled: incrp.quote_path()
+ return incrp
+
inctime = 0
while 1:
inctime = Resume.FindTime(rp.index, inctime)
@@ -123,7 +125,7 @@ class IncrementITR(IterTreeReducer):
def __init__(self, inc_rpath):
"""Set inc_rpath, an rpath of the base of the tree"""
self.inc_rpath = inc_rpath
- IterTreeReducer.__init__(inc_rpath)
+ IterTreeReducer.__init__(self, inc_rpath)
def start_process(self, index, diff_rorp, dsrp):
"""Initial processing of file
@@ -133,11 +135,21 @@ class IncrementITR(IterTreeReducer):
"""
incpref = self.inc_rpath.new_index(index)
+ if Globals.quoting_enabled: incpref.quote_path()
if dsrp.isdir():
self.init_dir(dsrp, diff_rorp, incpref)
self.setvals(diff_rorp, dsrp, incpref)
else: self.init_non_dir(dsrp, diff_rorp, incpref)
+ def override_changed(self):
+ """Set changed flag to true
+
+ This is used only at the top level of a backup, to make sure
+ that a marker is created recording every backup session.
+
+ """
+ self.changed = 1
+
def setvals(self, diff_rorp, dsrp, incpref):
"""Record given values in state dict since in directory
@@ -162,7 +174,7 @@ class IncrementITR(IterTreeReducer):
"""
if not (incpref.lstat() and incpref.isdir()): incpref.mkdir()
if diff_rorp and diff_rorp.isreg() and diff_rorp.file:
- tf = TempFileManager(dsrp)
+ tf = TempFileManager.new(dsrp)
RPathStatic.copy_with_attribs(diff_rorp, tf)
tf.set_attached_filetype(diff_rorp.get_attached_filetype())
self.directory_replacement = tf
@@ -170,7 +182,7 @@ class IncrementITR(IterTreeReducer):
def init_non_dir(self, dsrp, diff_rorp, incpref):
"""Process a non directory file (initial pass)"""
if not diff_rorp: return # no diff, so no change necessary
- if diff_rorp.isreg and (dsrp.isreg() or diff_rorp.isflaglinked()):
+ if diff_rorp.isreg() and (dsrp.isreg() or diff_rorp.isflaglinked()):
tf = TempFileManager.new(dsrp)
def init_thunk():
if diff_rorp.isflaglinked():
@@ -180,8 +192,8 @@ class IncrementITR(IterTreeReducer):
Inc.Increment_action(tf, dsrp, incpref).execute()
Robust.make_tf_robustaction(init_thunk, (tf,), (dsrp,)).execute()
else:
- Robust.chain([Inc.Increment_action(diff_rorp, dsrp, incref),
- RORPIter.patchonce_action(none, dsrp, diff_rorp)]
+ Robust.chain([Inc.Increment_action(diff_rorp, dsrp, incpref),
+ RORPIter.patchonce_action(None, dsrp, diff_rorp)]
).execute()
self.changed = 1
@@ -207,89 +219,3 @@ class IncrementITR(IterTreeReducer):
-
-
- def make_patch_increment_ITR(inc_rpath, initial_state = None):
- """Return IterTreeReducer that patches and increments"""
- def base_init(indexed_tuple):
- """Patch if appropriate, return (a,b) tuple
-
- a is true if found directory and thus didn't take action
-
- if a is false, b is true if some changes were made
-
- if a is true, b is the rp of a temporary file used to hold
- the diff_rorp's data (for dir -> normal file change), and
- false if none was necessary.
-
- """
- diff_rorp, dsrp = indexed_tuple
- incpref = inc_rpath.new_index(indexed_tuple.index)
- if dsrp.isdir(): return init_dir(dsrp, diff_rorp, incpref)
- else: return init_non_dir(dsrp, diff_rorp, incpref)
-
- def init_dir(dsrp, diff_rorp, incpref):
- """Initial processing of a directory
-
- Make the corresponding directory right away, but wait
- until the end to write the replacement. However, if the
- diff_rorp contains data, we must write it locally before
- continuing, or else that data will be lost in the stream.
-
- """
- if not (incpref.lstat() and incpref.isdir()): incpref.mkdir()
- if diff_rorp and diff_rorp.isreg() and diff_rorp.file:
- tf = TempFileManager.new(dsrp)
- RPathStatic.copy_with_attribs(diff_rorp, tf)
- tf.set_attached_filetype(diff_rorp.get_attached_filetype())
- return (1, tf)
- else: return (1, None)
-
- def init_non_dir(dsrp, diff_rorp, incpref):
- """Initial processing of non-directory
-
- If a reverse diff is called for it is generated by apply
- the forwards diff first on a temporary file.
-
- """
- if diff_rorp:
- if diff_rorp.isreg() and (dsrp.isreg() or
- diff_rorp.isflaglinked()):
- tf = TempFileManager.new(dsrp)
- def init_thunk():
- if diff_rorp.isflaglinked():
- Hardlink.link_rp(diff_rorp, tf, dsrp)
- else: Rdiff.patch_with_attribs_action(dsrp, diff_rorp,
- tf).execute()
- Inc.Increment_action(tf, dsrp, incpref).execute()
- Robust.make_tf_robustaction(init_thunk, (tf,),
- (dsrp,)).execute()
- else:
- Robust.chain([Inc.Increment_action(diff_rorp, dsrp,
- incpref),
- RORPIter.patchonce_action(
- None, dsrp, diff_rorp)]).execute()
- return (None, 1)
- return (None, None)
-
- def base_final(base_tuple, base_init_tuple, changed):
- """Patch directory if not done, return true iff made change"""
- if base_init_tuple[0]: # was directory
- diff_rorp, dsrp = base_tuple
- if changed or diff_rorp:
- if base_init_tuple[1]: diff_rorp = base_init_tuple[1]
- Inc.Increment(diff_rorp, dsrp,
- inc_rpath.new_index(base_tuple.index))
- if diff_rorp:
- RORPIter.patchonce_action(None, dsrp,
- diff_rorp).execute()
- if isinstance(diff_rorp, TempFile): diff_rorp.delete()
- return 1
- return None
- else: # changed iff base_init_tuple says it was
- return base_init_tuple[1]
-
- return IterTreeReducer(base_init, lambda x,y: x or y, None,
- base_final, initial_state)
-
-
diff --git a/rdiff-backup/rdiff_backup/lazy.py b/rdiff-backup/rdiff_backup/lazy.py
index 80cfa95..1bb2e2c 100644
--- a/rdiff-backup/rdiff_backup/lazy.py
+++ b/rdiff-backup/rdiff_backup/lazy.py
@@ -260,7 +260,7 @@ class IterTreeReducer:
"""
index = args[0]
- assert type(index) is types.TupleType
+ assert type(index) is types.TupleType, type(index)
if self.index is None:
self.start_process(*args)
diff --git a/rdiff-backup/rdiff_backup/manage.py b/rdiff-backup/rdiff_backup/manage.py
index c0f4a85..0c08872 100644
--- a/rdiff-backup/rdiff_backup/manage.py
+++ b/rdiff-backup/rdiff_backup/manage.py
@@ -12,37 +12,53 @@ class Manage:
"""Return Increments objects given the rdiff-backup data directory"""
return map(IncObj, Manage.find_incrps_with_base(datadir, "increments"))
- def find_incrps_with_base(dir_rp, basename):
- """Return list of incfiles with given basename in dir_rp"""
- rps = map(dir_rp.append, dir_rp.listdir())
- incrps = filter(RPath.isincfile, rps)
- result = filter(lambda rp: rp.getincbase_str() == basename, incrps)
- Log("find_incrps_with_base: found %d incs" % len(result), 6)
- return result
+ def get_file_type(rp):
+ """Returns one of "regular", "directory", "missing", or "special"."""
+ if not rp.lstat(): return "missing"
+ elif rp.isdir(): return "directory"
+ elif rp.isreg(): return "regular"
+ else: return "special"
- def describe_root_incs(datadir):
+ def get_inc_type(inc):
+ """Return file type increment represents"""
+ assert inc.isincfile()
+ type = inc.getinctype()
+ if type == "dir": return "directory"
+ elif type == "diff": return "regular"
+ elif type == "missing": return "missing"
+ elif type == "snapshot": return Manage.get_file_type(inc)
+ else: assert None, "Unknown type %s" % (type,)
+
+ def describe_incs_parsable(incs, mirror_time, mirrorrp):
+ """Return a string parsable by computer describing the increments
+
+ Each line is a time in seconds of the increment, and then the
+ type of the file. It will be sorted oldest to newest. For example:
+
+ 10000 regular
+ 20000 directory
+ 30000 special
+ 40000 missing
+ 50000 regular <- last will be the current mirror
+
+ """
+ incpairs = [(Time.stringtotime(inc.getinctime()), inc) for inc in incs]
+ incpairs.sort()
+ result = ["%s %s" % (time, Manage.get_inc_type(inc))
+ for time, inc in incpairs]
+ result.append("%s %s" % (mirror_time, Manage.get_file_type(mirrorrp)))
+ return "\n".join(result)
+
+ def describe_incs_human(incs, mirror_time, mirrorrp):
"""Return a string describing all the the root increments"""
- result = []
- currentrps = Manage.find_incrps_with_base(datadir, "current_mirror")
- if not currentrps:
- Log("Warning: no current mirror marker found", 1)
- elif len(currentrps) > 1:
- Log("Warning: multiple mirror markers found", 1)
- for rp in currentrps:
- result.append("Found mirror marker %s" % rp.path)
- result.append("Indicating latest mirror taken at %s" %
- Time.stringtopretty(rp.getinctime()))
- result.append("---------------------------------------------"
- "-------------")
-
- # Sort so they are in reverse order by time
- time_w_incobjs = map(lambda io: (-io.time, io),
- Manage.get_incobjs(datadir))
- time_w_incobjs.sort()
- incobjs = map(lambda x: x[1], time_w_incobjs)
- result.append("Found %d increments:" % len(incobjs))
- result.append("\n------------------------------------------\n".join(
- map(IncObj.full_description, incobjs)))
+ incpairs = [(Time.stringtotime(inc.getinctime()), inc) for inc in incs]
+ incpairs.sort()
+
+ result = ["Found %d increments:" % len(incpairs)]
+ for time, inc in incpairs:
+ result.append(" %s %s" %
+ (inc.dirsplit()[1], Time.timetopretty(time)))
+ result.append("Current mirror: %s" % Time.timetopretty(mirror_time))
return "\n".join(result)
def delete_earlier_than(baserp, time):
@@ -53,6 +69,11 @@ class Manage:
rdiff-backup-data directory should be the root of the tree.
"""
+ baserp.conn.Manage.delete_earlier_than_local(baserp, time)
+
+ def delete_earlier_than_local(baserp, time):
+ """Like delete_earlier_than, but run on local connection for speed"""
+ assert baserp.conn is Globals.local_connection
def yield_files(rp):
yield rp
if rp.isdir():
diff --git a/rdiff-backup/rdiff_backup/restore.py b/rdiff-backup/rdiff_backup/restore.py
index dcba7f3..0faa9b2 100644
--- a/rdiff-backup/rdiff_backup/restore.py
+++ b/rdiff-backup/rdiff_backup/restore.py
@@ -24,32 +24,78 @@ class Restore:
same index as mirror.
"""
- if not isinstance(mirror, DSRPath):
- mirror = DSRPath(source = 1, mirror)
- if not isinstance(target, DSRPath):
- target = DSRPath(source = None, target)
+ if not isinstance(mirror, DSRPath): mirror = DSRPath(1, mirror)
+ if not isinstance(target, DSRPath): target = DSRPath(None, target)
+
+ mirror_time = Restore.get_mirror_time()
+ rest_time = Restore.get_rest_time(rest_time)
+ inc_list = Restore.get_inclist(inc_rpath)
+ rid = RestoreIncrementData(inc_rpath.index, inc_rpath, inc_list)
+ rid.sortincseq(rest_time, mirror_time)
+ Restore.check_hardlinks(rest_time)
+ Restore.restore_recursive(inc_rpath.index, mirror, rid, target,
+ rest_time, mirror_time)
+
+ def get_mirror_time():
+ """Return the time (in seconds) of latest mirror"""
+ current_mirror_incs = \
+ Restore.get_inclist(Globals.rbdir.append("current_mirror"))
+ if not current_mirror_incs:
+ Log.FatalError("Could not get time of current mirror")
+ elif len(current_mirror_incs) > 1:
+ Log("Warning, two different dates for current mirror found", 2)
+ return Time.stringtotime(current_mirror_incs[0].getinctime())
+
+ def get_rest_time(old_rest_time):
+ """If old_rest_time is between two increments, return older time
+
+ There is a slightly tricky reason for doing this: The rest of
+ the code just ignores increments that are older than
+ rest_time. But sometimes we want to consider the very next
+ increment older than rest time, because rest_time will be
+ between two increments, and what was actually on the mirror
+ side will correspond to the older one.
+
+ So here we assume all rdiff-backup events were recorded in
+ "increments" increments, and if its in-between we pick the
+ older one here.
+ """
+ base_incs = Restore.get_inclist(Globals.rbdir.append("increments"))
+ if not base_incs: return old_rest_time
+ inctimes = [Time.stringtotime(inc.getinctime()) for inc in base_incs]
+ return max(filter(lambda time: time <= old_rest_time, inctimes))
+
+ def get_inclist(inc_rpath):
+ """Returns increments with given base"""
dirname, basename = inc_rpath.dirsplit()
parent_dir = RPath(inc_rpath.conn, dirname, ())
index = inc_rpath.index
- if inc_rpath.index:
+ if index:
get_inc_ext = lambda filename: \
RPath(inc_rpath.conn, inc_rpath.base,
inc_rpath.index[:-1] + (filename,))
else: get_inc_ext = lambda filename: \
- RPath(inc_rpath.conn, os.join(dirname, filename))
+ RPath(inc_rpath.conn, os.path.join(dirname, filename))
inc_list = []
for filename in parent_dir.listdir():
inc = get_inc_ext(filename)
- if inc.getincbase_str() == basename: inc_list.append(inc)
-
- rid = RestoreIncrementData(index, inc_rpath, inc_list)
- rid.sortincseq(rest_time)
- Restore.restore_recursive(index, mirror, rid, target, rest_time)
-
- def restore_recursive(index, mirror, rid, target, time):
+ if inc.isincfile() and inc.getincbase_str() == basename:
+ inc_list.append(inc)
+ return inc_list
+
+ def check_hardlinks(rest_time):
+ """Check for hard links and enable hard link support if found"""
+ if (Globals.preserve_hardlinks != 0 and
+ Hardlink.retrieve_final(rest_time)):
+ Log("Hard link information found, attempting to preserve "
+ "hard links.", 5)
+ SetConnections.UpdateGlobal('preserve_hardlinks', 1)
+ else: SetConnections.UpdateGlobal('preserve_hardlinks', None)
+
+ def restore_recursive(index, mirror, rid, target, time, mirror_time):
"""Recursive restore function.
rid is a RestoreIncrementData object whose inclist is already
@@ -66,14 +112,15 @@ class Restore:
mirror_finalizer = DestructiveSteppingFinalizer()
target_finalizer = DestructiveSteppingFinalizer()
- for rcd in Restore.yield_rcds(rid.index, mirror, rid, target, time):
+ for rcd in Restore.yield_rcds(rid.index, mirror, rid,
+ target, time, mirror_time):
rcd.RestoreFile()
- if rcd.mirror: mirror_finalizer(rcd.mirror)
- target_finalizer(rcd.target)
+ if rcd.mirror: mirror_finalizer(rcd.index, rcd.mirror)
+ target_finalizer(rcd.target.index, rcd.target)
target_finalizer.Finish()
mirror_finalizer.Finish()
- def yield_rcds(index, mirrorrp, rid, target, rest_time):
+ def yield_rcds(index, mirrorrp, rid, target, rest_time, mirror_time):
"""Iterate RestoreCombinedData objects starting with given args
rid is a RestoreCombinedData object. target is an rpath where
@@ -91,9 +138,10 @@ class Restore:
mirrorrp = None
rcd = RestoreCombinedData(rid, mirrorrp, target)
- if mirrorrp and mirrorrp.isdir() or rid and rid.inc_rpath.isdir():
+ if mirrorrp and mirrorrp.isdir() or \
+ rid and rid.inc_rpath and rid.inc_rpath.isdir():
sub_rcds = Restore.yield_sub_rcds(index, mirrorrp, rid,
- target, rest_time)
+ target, rest_time, mirror_time)
else: sub_rcds = None
if select_result == 1:
@@ -108,35 +156,39 @@ class Restore:
yield first
for sub_rcd in sub_rcds: yield sub_rcd
- def yield_collated_tuples_dir(index, mirrorrp, rid, target, rest_time):
+ def yield_sub_rcds(index, mirrorrp, rid, target, rest_time, mirror_time):
"""Yield collated tuples from inside given args"""
- if not Restore.check_dir_exists(mirrorrp, inc_tup): return
+ if not Restore.check_dir_exists(mirrorrp, rid): return
mirror_iter = Restore.yield_mirrorrps(mirrorrp)
- rid_iter = Restore.get_rids(rid, rest_time)
+ rid_iter = Restore.yield_rids(rid, rest_time, mirror_time)
for indexed_tup in RORPIter.CollateIterators(mirror_iter, rid_iter):
index = indexed_tup.index
new_mirrorrp, new_rid = indexed_tup
- for rcd in Restore.yield_collated_tuples(index, new_mirrorrp,
- new_rid, target.new_index(index), rest_time):
+ for rcd in Restore.yield_rcds(index, new_mirrorrp,
+ new_rid, target.append(index[-1]), rest_time, mirror_time):
yield rcd
- def check_dir_exists(mirrorrp, inc_tuple):
+ def check_dir_exists(mirrorrp, rid):
"""Return true if target should be a directory"""
- if inc_tuple and inc_tuple[1]:
+ if rid and rid.inc_list:
# Incs say dir if last (earliest) one is a dir increment
- return inc_tuple[1][-1].getinctype() == "dir"
+ return rid.inc_list[-1].getinctype() == "dir"
elif mirrorrp: return mirrorrp.isdir() # if no incs, copy mirror
else: return None
def yield_mirrorrps(mirrorrp):
"""Yield mirrorrps underneath given mirrorrp"""
if mirrorrp and mirrorrp.isdir():
- dirlist = mirrorrp.listdir()
- dirlist.sort()
- for filename in dirlist: yield mirrorrp.append(filename)
-
- def yield_rids(rid, rest_time):
+ if Globals.quoting_enabled:
+ for rp in FilenameMapping.get_quoted_dir_children(mirrorrp):
+ yield rp
+ else:
+ dirlist = mirrorrp.listdir()
+ dirlist.sort()
+ for filename in dirlist: yield mirrorrp.append(filename)
+
+ def yield_rids(rid, rest_time, mirror_time):
"""Yield RestoreIncrementData objects within given rid dir
If the rid doesn't correspond to a directory, don't yield any
@@ -148,16 +200,19 @@ class Restore:
if not rid or not rid.inc_rpath or not rid.inc_rpath.isdir(): return
rid_dict = {} # dictionary of basenames:rids
dirlist = rid.inc_rpath.listdir()
+ if Globals.quoting_enabled:
+ dirlist = [FilenameMapping.unquote(fn) for fn in dirlist]
def affirm_dict_indexed(basename):
"""Make sure the rid dictionary has given basename as key"""
- if not inc_list_dict.has_key(basename):
+ if not rid_dict.has_key(basename):
rid_dict[basename] = RestoreIncrementData(
rid.index + (basename,), None, []) # init with empty rid
def add_to_dict(filename):
"""Add filename to the inc tuple dictionary"""
rp = rid.inc_rpath.append(filename)
+ if Globals.quoting_enabled: rp.quote_path()
if rp.isincfile():
basename = rp.getincbase_str()
affirm_dict_indexed(basename)
@@ -167,14 +222,14 @@ class Restore:
rid_dict[filename].inc_rpath = rp
for filename in dirlist: add_to_dict(filename)
- keys = inc_list_dict.keys()
+ keys = rid_dict.keys()
keys.sort()
# sortincseq now to avoid descending .missing directories later
for key in keys:
rid = rid_dict[key]
if rid.inc_rpath or rid.inc_list:
- rid.sortincseq(rest_time)
+ rid.sortincseq(rest_time, mirror_time)
yield rid
MakeStatic(Restore)
@@ -192,26 +247,36 @@ class RestoreIncrementData:
self.inc_rpath = inc_rpath
self.inc_list = inc_list
- def sortincseq(self, rest_time):
+ def sortincseq(self, rest_time, mirror_time):
"""Sort self.inc_list sequence, throwing away irrelevant increments"""
- incpairs = map(lambda rp: (Time.stringtotime(rp.getinctime()), rp),
- self.inc_list)
- # Only consider increments at or after the time being restored
- incpairs = filter(lambda pair: pair[0] >= rest_time, incpairs)
+ if not self.inc_list or rest_time >= mirror_time:
+ self.inc_list = []
+ return
- # Now throw away older unnecessary increments
- incpairs.sort()
+ newer_incs = self.get_newer_incs(rest_time, mirror_time)
i = 0
- while(i < len(incpairs)):
+ while(i < len(newer_incs)):
# Only diff type increments require later versions
- if incpairs[i][1].getinctype() != "diff": break
+ if newer_incs[i].getinctype() != "diff": break
i = i+1
- incpairs = incpairs[:i+1]
+ self.inc_list = newer_incs[:i+1]
+ self.inc_list.reverse() # return in reversed order (latest first)
+
+ def get_newer_incs(self, rest_time, mirror_time):
+ """Return list of newer incs sorted by time (increasing)
- # Return increments in reversed order (latest first)
- incpairs.reverse()
- self.inc_list = map(lambda pair: pair[1], incpairs)
+ Also discard increments older than rest_time (rest_time we are
+ assuming is the exact time rdiff-backup was run, so no need to
+ consider the next oldest increment or any of that)
+ """
+ incpairs = []
+ for inc in self.inc_list:
+ time = Time.stringtotime(inc.getinctime())
+ if time >= rest_time: incpairs.append((time, inc))
+ incpairs.sort()
+ return [pair[1] for pair in incpairs]
+
class RestoreCombinedData:
"""Combine index information from increment and mirror directories
@@ -235,9 +300,12 @@ class RestoreCombinedData:
if mirror:
self.mirror = mirror
assert mirror.index == self.index
+ else: self.mirror = None
elif mirror:
self.index = mirror.index
self.mirror = mirror
+ self.inc_list = []
+ self.inc_rpath = None
else: assert None, "neither rid nor mirror given"
self.target = target
@@ -249,15 +317,15 @@ class RestoreCombinedData:
if self.restore_hardlink(): return
- if not inclist or inclist[0].getinctype() == "diff":
+ if not self.inc_list or self.inc_list[0].getinctype() == "diff":
assert self.mirror and self.mirror.lstat(), \
"No base to go with incs for %s" % self.target.path
RPath.copy_with_attribs(self.mirror, self.target)
for inc in self.inc_list: self.applyinc(inc, self.target)
- def log(self)
+ def log(self):
"""Log current restore action"""
- inc_string = ','.join(map(lambda x: x.path, self.inc_list))
+ inc_string = ','.join([inc.path for inc in self.inc_list])
Log("Restoring %s with increments %s to %s" %
(self.mirror and self.mirror.path,
inc_string, self.target.path), 5)
@@ -266,7 +334,7 @@ class RestoreCombinedData:
"""Hard link target and return true if hard linking appropriate"""
if (Globals.preserve_hardlinks and
Hardlink.restore_link(self.index, self.target)):
- RPath.copy_attribs(self.inc_list and inc_list[-1] or
+ RPath.copy_attribs(self.inc_list and self.inc_list[-1] or
self.mirror, self.target)
return 1
return None
diff --git a/rdiff-backup/rdiff_backup/robust.py b/rdiff-backup/rdiff_backup/robust.py
index 22f35b9..a71eabc 100644
--- a/rdiff-backup/rdiff_backup/robust.py
+++ b/rdiff-backup/rdiff_backup/robust.py
@@ -252,7 +252,7 @@ class TempFile(RPath):
if self.isdir() and not rp_dest.isdir():
# Cannot move a directory directly over another file
rp_dest.delete()
- if (isinstance(rp_dest, DSRPath) and rp_dest.perms_delayed
+ if (isinstance(rp_dest, DSRPath) and rp_dest.delay_perms
and not self.hasfullperms()):
# If we are moving to a delayed perm directory, delay
# permission change on destination.
@@ -531,7 +531,7 @@ class Resume:
Log("Last backup dated %s was aborted, but we aren't "
"resuming it." % Time.timetopretty(si.time), 2)
return None
- assert 0
+ assert None
MakeClass(Resume)
diff --git a/rdiff-backup/rdiff_backup/rpath.py b/rdiff-backup/rdiff_backup/rpath.py
index 0089bf6..110b89f 100644
--- a/rdiff-backup/rdiff_backup/rpath.py
+++ b/rdiff-backup/rdiff_backup/rpath.py
@@ -168,6 +168,17 @@ class RPathStatic:
rp_dest.data = rp_source.data
rp_source.data = {'type': None}
+ # If we are moving to a DSRPath, assume that the current times
+ # are the intended ones. We need to save them now in case
+ # they are changed later.
+ if isinstance(rp_dest, DSRPath):
+ if rp_dest.delay_mtime:
+ if 'mtime' in rp_dest.data:
+ rp_dest.setmtime(rp_dest.data['mtime'])
+ if rp_dest.delay_atime:
+ if 'atime' in rp_dest.data:
+ rp_dest.setatime(rp_dest.data['atime'])
+
def tupled_lstat(filename):
"""Like os.lstat, but return only a tuple, or None if os.error
@@ -413,7 +424,7 @@ class RPath(RORPath):
self.base = base
self.path = apply(os.path.join, (base,) + self.index)
self.file = None
- if data: self.data = data
+ if data or base is None: self.data = data
else: self.setdata()
def __str__(self):
@@ -493,6 +504,12 @@ class RPath(RORPath):
s = self.conn.reval("lambda path: os.lstat(path).st_rdev", self.path)
return (s >> 8, s & 0xff)
+ def quote_path(self):
+ """Set path from quoted version of index"""
+ quoted_list = [FilenameMapping.quote(path) for path in self.index]
+ self.path = apply(os.path.join, [self.base] + quoted_list)
+ self.setdata()
+
def chmod(self, permissions):
"""Wrapper around os.chmod"""
self.conn.os.chmod(self.path, permissions)
@@ -594,7 +611,8 @@ class RPath(RORPath):
if not self.lstat(): return # must have been deleted in meantime
elif self.isdir():
itm = RpathDeleter()
- for dsrp in Select(self, None).set_iter(): itm(dsrp.index, dsrp)
+ for dsrp in Select(DSRPath(None, self)).set_iter():
+ itm(dsrp.index, dsrp)
itm.Finish()
else: self.conn.os.unlink(self.path)
self.setdata()
@@ -616,7 +634,7 @@ class RPath(RORPath):
self.path.split("/")))
if self.path[0] == "/": newpath = "/" + newpath
elif not newpath: newpath = "."
- return self.__class__(self.conn, newpath, ())
+ return self.newpath(newpath)
def dirsplit(self):
"""Returns a tuple of strings (dirname, basename)
@@ -635,10 +653,20 @@ class RPath(RORPath):
comps = normed.path.split("/")
return "/".join(comps[:-1]), comps[-1]
+ def newpath(self, newpath, index = ()):
+ """Return new RPath with the same connection but different path"""
+ return self.__class__(self.conn, newpath, index)
+
def append(self, ext):
"""Return new RPath with same connection by adjoing ext"""
return self.__class__(self.conn, self.base, self.index + (ext,))
+ def append_path(self, ext, new_index = ()):
+ """Like append, but add ext to path instead of to index"""
+ assert not self.index # doesn't make sense if index isn't ()
+ return self.__class__(self.conn, os.path.join(self.base, ext),
+ new_index)
+
def new_index(self, index):
"""Return similar RPath but with new index"""
return self.__class__(self.conn, self.base, index)
diff --git a/rdiff-backup/rdiff_backup/selection.py b/rdiff-backup/rdiff_backup/selection.py
index aaa8639..cae6db3 100644
--- a/rdiff-backup/rdiff_backup/selection.py
+++ b/rdiff-backup/rdiff_backup/selection.py
@@ -60,21 +60,20 @@ class Select:
# This re should not match normal filenames, but usually just globs
glob_re = re.compile("(.*[*?[]|ignorecase\\:)", re.I | re.S)
- def __init__(self, rpath, source):
- """DSRPIterator initializer.
+ def __init__(self, dsrpath, quoted_filenames = None):
+ """DSRPIterator initializer. dsrp is the root directory
- rpath is the root dir. Source is true if rpath is the root of
- the source directory, and false for the mirror directory
+ When files have quoted characters in them, quoted_filenames
+ should be true. Then RPath's index will be the unquoted
+ version.
"""
- assert isinstance(rpath, RPath)
+ assert isinstance(dsrpath, DSRPath)
self.selection_functions = []
- self.source = source
- if isinstance(rpath, DSRPath): self.dsrpath = rpath
- else: self.dsrpath = DSRPath(rpath.conn, rpath.base,
- rpath.index, rpath.data)
+ self.dsrpath = dsrpath
self.prefix = self.dsrpath.path
-
+ self.quoting_on = Globals.quoting_enabled and quoted_filenames
+
def set_iter(self, starting_index = None, sel_func = None):
"""Initialize more variables, get ready to iterate
@@ -106,7 +105,7 @@ class Select:
"""
s = sel_func(dsrpath)
- if s === 0: return
+ if s == 0: return
elif s == 1: # File is included
yield dsrpath
if dsrpath.isdir():
@@ -122,11 +121,15 @@ class Select:
def iterate_in_dir(self, dsrpath, rec_func, sel_func):
"""Iterate the dsrps in directory dsrpath."""
- dir_listing = dsrpath.listdir()
- dir_listing.sort()
- for filename in dir_listing:
- for dsrp in rec_func(dsrpath.append(filename), rec_func, sel_func):
- yield dsrp
+ if self.quoting_on:
+ for subdir in FilenameMapping.get_quoted_dir_children(dsrpath):
+ for dsrp in rec_func(subdir, rec_func, sel_func): yield dsrp
+ else:
+ dir_listing = dsrpath.listdir()
+ dir_listing.sort()
+ for filename in dir_listing:
+ for dsrp in rec_func(dsrpath.append(filename),
+ rec_func, sel_func): yield dsrp
def iterate_starting_from(self, dsrpath, rec_func, sel_func):
"""Like Iterate, but only yield indicies > self.starting_index"""