diff options
author | ben <ben@2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109> | 2002-05-10 23:14:35 +0000 |
---|---|---|
committer | ben <ben@2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109> | 2002-05-10 23:14:35 +0000 |
commit | 807241bc4f322edc6f95782291900362484263df (patch) | |
tree | 95dc93d87081ab901e31844867d4facca6207aac | |
parent | 5c059e737511644b0056b8326b52763c82efcac4 (diff) | |
download | rdiff-backup-807241bc4f322edc6f95782291900362484263df.tar.gz |
Lots of changes, see changelog for 0.7.4.
git-svn-id: http://svn.savannah.nongnu.org/svn/rdiff-backup/trunk@72 2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109
25 files changed, 879 insertions, 643 deletions
diff --git a/rdiff-backup/rdiff_backup/connection.py b/rdiff-backup/rdiff_backup/connection.py index 9842480..57d2fa5 100644 --- a/rdiff-backup/rdiff_backup/connection.py +++ b/rdiff-backup/rdiff_backup/connection.py @@ -92,6 +92,7 @@ class LowLevelPipeConnection(Connection): b - string q - quit signal t - TempFile + d - DSRPath R - RPath r - RORPath only c - PipeConnection object @@ -118,6 +119,7 @@ class LowLevelPipeConnection(Connection): if type(obj) is types.StringType: self._putbuf(obj, req_num) elif isinstance(obj, Connection): self._putconn(obj, req_num) elif isinstance(obj, TempFile): self._puttempfile(obj, req_num) + elif isinstance(obj, DSRPath): self._putdsrpath(obj, req_num) elif isinstance(obj, RPath): self._putrpath(obj, req_num) elif isinstance(obj, RORPath): self._putrorpath(obj, req_num) elif ((hasattr(obj, "read") or hasattr(obj, "write")) @@ -148,6 +150,11 @@ class LowLevelPipeConnection(Connection): tempfile.index, tempfile.data) self._write("t", cPickle.dumps(tf_repr, 1), req_num) + def _putdsrpath(self, dsrpath, req_num): + """Put DSRPath into pipe. See _putrpath""" + dsrpath_repr = (dsrpath.conn.conn_number, dsrpath.getstatedict()) + self._write("d", cPickle.dumps(dsrpath_repr, 1), req_num) + def _putrpath(self, rpath, req_num): """Put an rpath into the pipe @@ -219,23 +226,22 @@ class LowLevelPipeConnection(Connection): ord(header_string[1]), self._s2l(header_string[2:])) except IndexError: raise ConnectionError() - if format_string == "o": result = cPickle.loads(self._read(length)) - elif format_string == "b": result = self._read(length) - elif format_string == "f": - result = VirtualFile(self, int(self._read(length))) + if format_string == "q": raise ConnectionQuit("Received quit signal") + + data = self._read(length) + if format_string == "o": result = cPickle.loads(data) + elif format_string == "b": result = data + elif format_string == "f": result = VirtualFile(self, int(data)) elif format_string == "i": - result = RORPIter.FromFile(BufferedRead( - VirtualFile(self, int(self._read(length))))) - elif format_string == "t": - result = self._gettempfile(self._read(length)) - elif format_string == "r": - result = self._getrorpath(self._read(length)) - elif format_string == "R": result = self._getrpath(self._read(length)) - elif format_string == "c": - result = Globals.connection_dict[int(self._read(length))] + result = RORPIter.FromFile(BufferedRead(VirtualFile(self, + int(data)))) + elif format_string == "t": result = self._gettempfile(data) + elif format_string == "r": result = self._getrorpath(data) + elif format_string == "R": result = self._getrpath(data) + elif format_string == "d": result = self._getdsrpath(data) else: - assert format_string == "q", header_string - raise ConnectionQuit("Received quit signal") + assert format_string == "c", header_string + result = Globals.connection_dict[int(data)] Log.conn("received", result, req_num) return (req_num, result) @@ -255,6 +261,15 @@ class LowLevelPipeConnection(Connection): conn_number, base, index, data = cPickle.loads(raw_rpath_buf) return RPath(Globals.connection_dict[conn_number], base, index, data) + def _getdsrpath(self, raw_dsrpath_buf): + """Return DSRPath object indicated by buf""" + conn_number, state_dict = cPickle.loads(raw_dsrpath_buf) + empty_dsrp = DSRPath("bypass", Globals.local_connection, None) + empty_dsrp.__setstate__(state_dict) + empty_dsrp.conn = Globals.connection_dict[conn_number] + empty_dsrp.file = None + return empty_dsrp + def _close(self): """Close the pipes associated with the connection""" self.outpipe.close() diff --git a/rdiff-backup/rdiff_backup/destructive_stepping.py b/rdiff-backup/rdiff_backup/destructive_stepping.py index c5e2faa..ff3b42a 100644 --- a/rdiff-backup/rdiff_backup/destructive_stepping.py +++ b/rdiff-backup/rdiff_backup/destructive_stepping.py @@ -1,4 +1,5 @@ from __future__ import generators +import types execfile("rorpiter.py") ####################################################################### @@ -40,13 +41,17 @@ class DSRPath(RPath): otherwise use the same arguments as the RPath initializer. """ - if len(args) == 2 and isinstance(args[0], RPath): + if len(args) == 1 and isinstance(args[0], RPath): rp = args[0] RPath.__init__(self, rp.conn, rp.base, rp.index) else: RPath.__init__(self, *args) - self.set_delays(source) - self.set_init_perms(source) + if source != "bypass": + # "bypass" val is used when unpackaging over connection + assert source is None or source is 1 + self.source = source + self.set_delays(source) + self.set_init_perms(source) def set_delays(self, source): """Delay writing permissions and times where appropriate""" @@ -59,13 +64,14 @@ class DSRPath(RPath): # Now get atime right away if possible if self.data.has_key('atime'): self.newatime = self.data['atime'] else: self.newatime = None + else: self.delay_atime = None if source: self.delay_mtime = None # we'll never change mtime of source file else: self.delay_mtime = 1 # Save mtime now for a dir, because it might inadvertantly change - if self.isdir(): self.newmtime = self.getmtime() + if self.isdir(): self.newmtime = self.data['mtime'] else: self.newmtime = None def set_init_perms(self, source): @@ -75,26 +81,30 @@ class DSRPath(RPath): self.chmod_bypass(0400) else: self.warn("No read permissions") elif self.isdir(): - if source and (not self.readable() or self.executable()): + if source and (not self.readable() or not self.executable()): if Globals.change_source_perms and self.isowner(): self.chmod_bypass(0500) - else: warn("No read or exec permission") + else: self.warn("No read or exec permission") elif not source and not self.hasfullperms(): self.chmod_bypass(0700) def warn(self, err): Log("Received error '%s' when dealing with file %s, skipping..." % (err, self.path), 1) - raise DSRPermError(self.path) + raise DSRPPermError(self.path) def __getstate__(self): """Return picklable state. See RPath __getstate__.""" assert self.conn is Globals.local_connection # Can't pickle a conn + return self.getstatedict() + + def getstatedict(self): + """Return dictionary containing the attributes we can save""" pickle_dict = {} for attrib in ['index', 'data', 'delay_perms', 'newperms', 'delay_atime', 'newatime', 'delay_mtime', 'newmtime', - 'path', 'base']: + 'path', 'base', 'source']: if self.__dict__.has_key(attrib): pickle_dict[attrib] = self.__dict__[attrib] return pickle_dict @@ -110,10 +120,17 @@ class DSRPath(RPath): if self.delay_perms: self.newperms = self.data['perms'] = permissions else: RPath.chmod(self, permissions) + def getperms(self): + """Return dsrp's intended permissions""" + if self.delay_perms and self.newperms is not None: + return self.newperms + else: return self.data['perms'] + def chmod_bypass(self, permissions): """Change permissions without updating the data dictionary""" self.delay_perms = 1 if self.newperms is None: self.newperms = self.getperms() + Log("DSRP: Perm bypass %s to %o" % (self.path, permissions), 8) self.conn.os.chmod(self.path, permissions) def settime(self, accesstime, modtime): @@ -129,11 +146,25 @@ class DSRPath(RPath): if self.delay_mtime: self.newmtime = self.data['mtime'] = modtime else: RPath.setmtime(self, modtime) + def getmtime(self): + """Return dsrp's intended modification time""" + if self.delay_mtime and self.newmtime is not None: + return self.newmtime + else: return self.data['mtime'] + + def getatime(self): + """Return dsrp's intended access time""" + if self.delay_atime and self.newatime is not None: + return self.newatime + else: return self.data['atime'] + def write_changes(self): """Write saved up permission/time changes""" if not self.lstat(): return # File has been deleted in meantime if self.delay_perms and self.newperms is not None: + Log("Finalizing permissions of dsrp %s to %s" % + (self.path, self.newperms), 8) RPath.chmod(self, self.newperms) do_atime = self.delay_atime and self.newatime is not None @@ -145,6 +176,19 @@ class DSRPath(RPath): elif not do_atime and do_mtime: RPath.setmtime(self, self.newmtime) + def newpath(self, newpath, index = ()): + """Return similar DSRPath but with new path""" + return self.__class__(self.source, self.conn, newpath, index) + + def append(self, ext): + """Return similar DSRPath with new extension""" + return self.__class__(self.source, self.conn, self.base, + self.index + (ext,)) + + def new_index(self, index): + """Return similar DSRPath with new index""" + return self.__class__(self.source, self.conn, self.base, index) + class DestructiveSteppingFinalizer(IterTreeReducer): """Finalizer that can work on an iterator of dsrpaths @@ -155,11 +199,12 @@ class DestructiveSteppingFinalizer(IterTreeReducer): coming back to it. """ + dsrpath = None def start_process(self, index, dsrpath): self.dsrpath = dsrpath def end_process(self): - self.dsrpath.write_changes() + if self.dsrpath: self.dsrpath.write_changes() diff --git a/rdiff-backup/rdiff_backup/highlevel.py b/rdiff-backup/rdiff_backup/highlevel.py index d0bc743..36ba55a 100644 --- a/rdiff-backup/rdiff_backup/highlevel.py +++ b/rdiff-backup/rdiff_backup/highlevel.py @@ -24,12 +24,14 @@ class HighLevel: accompanying diagram. """ - def Mirror(src_rpath, dest_rpath, checkpoint = 1, session_info = None): + def Mirror(src_rpath, dest_rpath, checkpoint = 1, + session_info = None, write_finaldata = 1): """Turn dest_rpath into a copy of src_rpath Checkpoint true means to checkpoint periodically, otherwise not. If session_info is given, try to resume Mirroring from - that point. + that point. If write_finaldata is true, save extra data files + like hardlink_data. If it is false, make a complete mirror. """ SourceS = src_rpath.conn.HLSourceStruct @@ -40,7 +42,8 @@ class HighLevel: src_init_dsiter = SourceS.split_initial_dsiter() dest_sigiter = DestS.get_sigs(dest_rpath, src_init_dsiter) diffiter = SourceS.get_diffs_and_finalize(dest_sigiter) - DestS.patch_and_finalize(dest_rpath, diffiter, checkpoint) + DestS.patch_and_finalize(dest_rpath, diffiter, + checkpoint, write_finaldata) dest_rpath.setdata() @@ -61,24 +64,6 @@ class HighLevel: dest_rpath.setdata() inc_rpath.setdata() - def Restore(rest_time, mirror_base, rel_index, baseinc_tup, target_base): - """Like Restore.RestoreRecursive but check arguments""" - if (Globals.preserve_hardlinks != 0 and - Hardlink.retrieve_final(rest_time)): - Log("Hard link information found, attempting to preserve " - "hard links.", 4) - SetConnections.UpdateGlobal('preserve_hardlinks', 1) - else: SetConnections.UpdateGlobal('preserve_hardlinks', None) - - if not isinstance(target_base, DSRPath): - target_base = DSRPath(target_base.conn, target_base.base, - target_base.index, target_base.data) - if not isinstance(mirror_base, DSRPath): - mirror_base = DSRPath(mirror_base.conn, mirror_base.base, - mirror_base.index, mirror_base.data) - Restore.RestoreRecursive(rest_time, mirror_base, rel_index, - baseinc_tup, target_base) - MakeStatic(HighLevel) @@ -164,7 +149,7 @@ class HLDestinationStruct: def compare(src_rorp, dest_dsrp): """Return dest_dsrp if they are different, None if the same""" if not dest_dsrp: - dest_dsrp = DSRPath(baserp.conn, baserp.base, src_rorp.index) + dest_dsrp = cls.get_dsrp(baserp, src_rorp.index) if dest_dsrp.lstat(): Log("Warning: Found unexpected destination file %s, " "not processing it." % dest_dsrp.path, 2) @@ -203,8 +188,9 @@ class HLDestinationStruct: def get_dsrp(cls, dest_rpath, index): """Return initialized dsrp based on dest_rpath with given index""" - return DSRPath(source = None, dest_rpath.conn, - dest_rpath.base, index) + dsrp = DSRPath(None, dest_rpath.conn, dest_rpath.base, index) + if Globals.quoting_enabled: dsrp.quote_path() + return dsrp def get_finalizer(cls): """Return finalizer, starting from session info if necessary""" @@ -216,9 +202,13 @@ class HLDestinationStruct: """Return ITR, starting from state if necessary""" if cls._session_info and cls._session_info.ITR: return cls._session_info.ITR - else: return IncrementITR(inc_rpath) + else: + iitr = IncrementITR(inc_rpath) + iitr.override_changed() + return iitr - def patch_and_finalize(cls, dest_rpath, diffs, checkpoint = 1): + def patch_and_finalize(cls, dest_rpath, diffs, + checkpoint = 1, write_finaldata = 1): """Apply diffs and finalize""" collated = RORPIter.CollateIterators(diffs, cls.initial_dsiter2) finalizer = cls.get_finalizer() @@ -242,7 +232,7 @@ class HLDestinationStruct: if checkpoint: SaveState.checkpoint_mirror(finalizer, dsrp) except: cls.handle_last_error(dsrp, finalizer) finalizer.Finish() - if Globals.preserve_hardlinks and Globals.rbdir: + if Globals.preserve_hardlinks and write_finaldata: Hardlink.final_writedata() if checkpoint: SaveState.checkpoint_remove() @@ -300,8 +290,7 @@ class HLDestinationStruct: Log.exception(1) if ITR: SaveState.checkpoint_inc_backup(ITR, finalizer, dsrp, 1) else: SaveState.checkpoint_mirror(finalizer, dsrp, 1) - if Globals.preserve_hardlinks: - Hardlink.final_checkpoint(Globals.rbdir) + if Globals.preserve_hardlinks: Hardlink.final_checkpoint(Globals.rbdir) SaveState.touch_last_file_definitive() raise diff --git a/rdiff-backup/rdiff_backup/increment.py b/rdiff-backup/rdiff_backup/increment.py index 446806b..b28b315 100644 --- a/rdiff-backup/rdiff_backup/increment.py +++ b/rdiff-backup/rdiff_backup/increment.py @@ -1,4 +1,4 @@ -execfile("selection.py") +execfile("filename_mapping.py") ####################################################################### # @@ -85,10 +85,12 @@ class Inc: """Get new increment rp with given time suffix""" addtostr = lambda s: "%s.%s.%s" % (s, timestr, typestr) if rp.index: - return rp.__class__(rp.conn, rp.base, rp.index[:-1] + - (addtostr(rp.index[-1]),)) - else: return rp.__class__(rp.conn, addtostr(rp.base), rp.index) - + incrp = rp.__class__(rp.conn, rp.base, rp.index[:-1] + + (addtostr(rp.index[-1]),)) + else: incrp = rp.__class__(rp.conn, addtostr(rp.base), rp.index) + if Globals.quoting_enabled: incrp.quote_path() + return incrp + inctime = 0 while 1: inctime = Resume.FindTime(rp.index, inctime) @@ -123,7 +125,7 @@ class IncrementITR(IterTreeReducer): def __init__(self, inc_rpath): """Set inc_rpath, an rpath of the base of the tree""" self.inc_rpath = inc_rpath - IterTreeReducer.__init__(inc_rpath) + IterTreeReducer.__init__(self, inc_rpath) def start_process(self, index, diff_rorp, dsrp): """Initial processing of file @@ -133,11 +135,21 @@ class IncrementITR(IterTreeReducer): """ incpref = self.inc_rpath.new_index(index) + if Globals.quoting_enabled: incpref.quote_path() if dsrp.isdir(): self.init_dir(dsrp, diff_rorp, incpref) self.setvals(diff_rorp, dsrp, incpref) else: self.init_non_dir(dsrp, diff_rorp, incpref) + def override_changed(self): + """Set changed flag to true + + This is used only at the top level of a backup, to make sure + that a marker is created recording every backup session. + + """ + self.changed = 1 + def setvals(self, diff_rorp, dsrp, incpref): """Record given values in state dict since in directory @@ -162,7 +174,7 @@ class IncrementITR(IterTreeReducer): """ if not (incpref.lstat() and incpref.isdir()): incpref.mkdir() if diff_rorp and diff_rorp.isreg() and diff_rorp.file: - tf = TempFileManager(dsrp) + tf = TempFileManager.new(dsrp) RPathStatic.copy_with_attribs(diff_rorp, tf) tf.set_attached_filetype(diff_rorp.get_attached_filetype()) self.directory_replacement = tf @@ -170,7 +182,7 @@ class IncrementITR(IterTreeReducer): def init_non_dir(self, dsrp, diff_rorp, incpref): """Process a non directory file (initial pass)""" if not diff_rorp: return # no diff, so no change necessary - if diff_rorp.isreg and (dsrp.isreg() or diff_rorp.isflaglinked()): + if diff_rorp.isreg() and (dsrp.isreg() or diff_rorp.isflaglinked()): tf = TempFileManager.new(dsrp) def init_thunk(): if diff_rorp.isflaglinked(): @@ -180,8 +192,8 @@ class IncrementITR(IterTreeReducer): Inc.Increment_action(tf, dsrp, incpref).execute() Robust.make_tf_robustaction(init_thunk, (tf,), (dsrp,)).execute() else: - Robust.chain([Inc.Increment_action(diff_rorp, dsrp, incref), - RORPIter.patchonce_action(none, dsrp, diff_rorp)] + Robust.chain([Inc.Increment_action(diff_rorp, dsrp, incpref), + RORPIter.patchonce_action(None, dsrp, diff_rorp)] ).execute() self.changed = 1 @@ -207,89 +219,3 @@ class IncrementITR(IterTreeReducer): - - - def make_patch_increment_ITR(inc_rpath, initial_state = None): - """Return IterTreeReducer that patches and increments""" - def base_init(indexed_tuple): - """Patch if appropriate, return (a,b) tuple - - a is true if found directory and thus didn't take action - - if a is false, b is true if some changes were made - - if a is true, b is the rp of a temporary file used to hold - the diff_rorp's data (for dir -> normal file change), and - false if none was necessary. - - """ - diff_rorp, dsrp = indexed_tuple - incpref = inc_rpath.new_index(indexed_tuple.index) - if dsrp.isdir(): return init_dir(dsrp, diff_rorp, incpref) - else: return init_non_dir(dsrp, diff_rorp, incpref) - - def init_dir(dsrp, diff_rorp, incpref): - """Initial processing of a directory - - Make the corresponding directory right away, but wait - until the end to write the replacement. However, if the - diff_rorp contains data, we must write it locally before - continuing, or else that data will be lost in the stream. - - """ - if not (incpref.lstat() and incpref.isdir()): incpref.mkdir() - if diff_rorp and diff_rorp.isreg() and diff_rorp.file: - tf = TempFileManager.new(dsrp) - RPathStatic.copy_with_attribs(diff_rorp, tf) - tf.set_attached_filetype(diff_rorp.get_attached_filetype()) - return (1, tf) - else: return (1, None) - - def init_non_dir(dsrp, diff_rorp, incpref): - """Initial processing of non-directory - - If a reverse diff is called for it is generated by apply - the forwards diff first on a temporary file. - - """ - if diff_rorp: - if diff_rorp.isreg() and (dsrp.isreg() or - diff_rorp.isflaglinked()): - tf = TempFileManager.new(dsrp) - def init_thunk(): - if diff_rorp.isflaglinked(): - Hardlink.link_rp(diff_rorp, tf, dsrp) - else: Rdiff.patch_with_attribs_action(dsrp, diff_rorp, - tf).execute() - Inc.Increment_action(tf, dsrp, incpref).execute() - Robust.make_tf_robustaction(init_thunk, (tf,), - (dsrp,)).execute() - else: - Robust.chain([Inc.Increment_action(diff_rorp, dsrp, - incpref), - RORPIter.patchonce_action( - None, dsrp, diff_rorp)]).execute() - return (None, 1) - return (None, None) - - def base_final(base_tuple, base_init_tuple, changed): - """Patch directory if not done, return true iff made change""" - if base_init_tuple[0]: # was directory - diff_rorp, dsrp = base_tuple - if changed or diff_rorp: - if base_init_tuple[1]: diff_rorp = base_init_tuple[1] - Inc.Increment(diff_rorp, dsrp, - inc_rpath.new_index(base_tuple.index)) - if diff_rorp: - RORPIter.patchonce_action(None, dsrp, - diff_rorp).execute() - if isinstance(diff_rorp, TempFile): diff_rorp.delete() - return 1 - return None - else: # changed iff base_init_tuple says it was - return base_init_tuple[1] - - return IterTreeReducer(base_init, lambda x,y: x or y, None, - base_final, initial_state) - - diff --git a/rdiff-backup/rdiff_backup/lazy.py b/rdiff-backup/rdiff_backup/lazy.py index 80cfa95..1bb2e2c 100644 --- a/rdiff-backup/rdiff_backup/lazy.py +++ b/rdiff-backup/rdiff_backup/lazy.py @@ -260,7 +260,7 @@ class IterTreeReducer: """ index = args[0] - assert type(index) is types.TupleType + assert type(index) is types.TupleType, type(index) if self.index is None: self.start_process(*args) diff --git a/rdiff-backup/rdiff_backup/manage.py b/rdiff-backup/rdiff_backup/manage.py index c0f4a85..0c08872 100644 --- a/rdiff-backup/rdiff_backup/manage.py +++ b/rdiff-backup/rdiff_backup/manage.py @@ -12,37 +12,53 @@ class Manage: """Return Increments objects given the rdiff-backup data directory""" return map(IncObj, Manage.find_incrps_with_base(datadir, "increments")) - def find_incrps_with_base(dir_rp, basename): - """Return list of incfiles with given basename in dir_rp""" - rps = map(dir_rp.append, dir_rp.listdir()) - incrps = filter(RPath.isincfile, rps) - result = filter(lambda rp: rp.getincbase_str() == basename, incrps) - Log("find_incrps_with_base: found %d incs" % len(result), 6) - return result + def get_file_type(rp): + """Returns one of "regular", "directory", "missing", or "special".""" + if not rp.lstat(): return "missing" + elif rp.isdir(): return "directory" + elif rp.isreg(): return "regular" + else: return "special" - def describe_root_incs(datadir): + def get_inc_type(inc): + """Return file type increment represents""" + assert inc.isincfile() + type = inc.getinctype() + if type == "dir": return "directory" + elif type == "diff": return "regular" + elif type == "missing": return "missing" + elif type == "snapshot": return Manage.get_file_type(inc) + else: assert None, "Unknown type %s" % (type,) + + def describe_incs_parsable(incs, mirror_time, mirrorrp): + """Return a string parsable by computer describing the increments + + Each line is a time in seconds of the increment, and then the + type of the file. It will be sorted oldest to newest. For example: + + 10000 regular + 20000 directory + 30000 special + 40000 missing + 50000 regular <- last will be the current mirror + + """ + incpairs = [(Time.stringtotime(inc.getinctime()), inc) for inc in incs] + incpairs.sort() + result = ["%s %s" % (time, Manage.get_inc_type(inc)) + for time, inc in incpairs] + result.append("%s %s" % (mirror_time, Manage.get_file_type(mirrorrp))) + return "\n".join(result) + + def describe_incs_human(incs, mirror_time, mirrorrp): """Return a string describing all the the root increments""" - result = [] - currentrps = Manage.find_incrps_with_base(datadir, "current_mirror") - if not currentrps: - Log("Warning: no current mirror marker found", 1) - elif len(currentrps) > 1: - Log("Warning: multiple mirror markers found", 1) - for rp in currentrps: - result.append("Found mirror marker %s" % rp.path) - result.append("Indicating latest mirror taken at %s" % - Time.stringtopretty(rp.getinctime())) - result.append("---------------------------------------------" - "-------------") - - # Sort so they are in reverse order by time - time_w_incobjs = map(lambda io: (-io.time, io), - Manage.get_incobjs(datadir)) - time_w_incobjs.sort() - incobjs = map(lambda x: x[1], time_w_incobjs) - result.append("Found %d increments:" % len(incobjs)) - result.append("\n------------------------------------------\n".join( - map(IncObj.full_description, incobjs))) + incpairs = [(Time.stringtotime(inc.getinctime()), inc) for inc in incs] + incpairs.sort() + + result = ["Found %d increments:" % len(incpairs)] + for time, inc in incpairs: + result.append(" %s %s" % + (inc.dirsplit()[1], Time.timetopretty(time))) + result.append("Current mirror: %s" % Time.timetopretty(mirror_time)) return "\n".join(result) def delete_earlier_than(baserp, time): @@ -53,6 +69,11 @@ class Manage: rdiff-backup-data directory should be the root of the tree. """ + baserp.conn.Manage.delete_earlier_than_local(baserp, time) + + def delete_earlier_than_local(baserp, time): + """Like delete_earlier_than, but run on local connection for speed""" + assert baserp.conn is Globals.local_connection def yield_files(rp): yield rp if rp.isdir(): diff --git a/rdiff-backup/rdiff_backup/restore.py b/rdiff-backup/rdiff_backup/restore.py index dcba7f3..0faa9b2 100644 --- a/rdiff-backup/rdiff_backup/restore.py +++ b/rdiff-backup/rdiff_backup/restore.py @@ -24,32 +24,78 @@ class Restore: same index as mirror. """ - if not isinstance(mirror, DSRPath): - mirror = DSRPath(source = 1, mirror) - if not isinstance(target, DSRPath): - target = DSRPath(source = None, target) + if not isinstance(mirror, DSRPath): mirror = DSRPath(1, mirror) + if not isinstance(target, DSRPath): target = DSRPath(None, target) + + mirror_time = Restore.get_mirror_time() + rest_time = Restore.get_rest_time(rest_time) + inc_list = Restore.get_inclist(inc_rpath) + rid = RestoreIncrementData(inc_rpath.index, inc_rpath, inc_list) + rid.sortincseq(rest_time, mirror_time) + Restore.check_hardlinks(rest_time) + Restore.restore_recursive(inc_rpath.index, mirror, rid, target, + rest_time, mirror_time) + + def get_mirror_time(): + """Return the time (in seconds) of latest mirror""" + current_mirror_incs = \ + Restore.get_inclist(Globals.rbdir.append("current_mirror")) + if not current_mirror_incs: + Log.FatalError("Could not get time of current mirror") + elif len(current_mirror_incs) > 1: + Log("Warning, two different dates for current mirror found", 2) + return Time.stringtotime(current_mirror_incs[0].getinctime()) + + def get_rest_time(old_rest_time): + """If old_rest_time is between two increments, return older time + + There is a slightly tricky reason for doing this: The rest of + the code just ignores increments that are older than + rest_time. But sometimes we want to consider the very next + increment older than rest time, because rest_time will be + between two increments, and what was actually on the mirror + side will correspond to the older one. + + So here we assume all rdiff-backup events were recorded in + "increments" increments, and if its in-between we pick the + older one here. + """ + base_incs = Restore.get_inclist(Globals.rbdir.append("increments")) + if not base_incs: return old_rest_time + inctimes = [Time.stringtotime(inc.getinctime()) for inc in base_incs] + return max(filter(lambda time: time <= old_rest_time, inctimes)) + + def get_inclist(inc_rpath): + """Returns increments with given base""" dirname, basename = inc_rpath.dirsplit() parent_dir = RPath(inc_rpath.conn, dirname, ()) index = inc_rpath.index - if inc_rpath.index: + if index: get_inc_ext = lambda filename: \ RPath(inc_rpath.conn, inc_rpath.base, inc_rpath.index[:-1] + (filename,)) else: get_inc_ext = lambda filename: \ - RPath(inc_rpath.conn, os.join(dirname, filename)) + RPath(inc_rpath.conn, os.path.join(dirname, filename)) inc_list = [] for filename in parent_dir.listdir(): inc = get_inc_ext(filename) - if inc.getincbase_str() == basename: inc_list.append(inc) - - rid = RestoreIncrementData(index, inc_rpath, inc_list) - rid.sortincseq(rest_time) - Restore.restore_recursive(index, mirror, rid, target, rest_time) - - def restore_recursive(index, mirror, rid, target, time): + if inc.isincfile() and inc.getincbase_str() == basename: + inc_list.append(inc) + return inc_list + + def check_hardlinks(rest_time): + """Check for hard links and enable hard link support if found""" + if (Globals.preserve_hardlinks != 0 and + Hardlink.retrieve_final(rest_time)): + Log("Hard link information found, attempting to preserve " + "hard links.", 5) + SetConnections.UpdateGlobal('preserve_hardlinks', 1) + else: SetConnections.UpdateGlobal('preserve_hardlinks', None) + + def restore_recursive(index, mirror, rid, target, time, mirror_time): """Recursive restore function. rid is a RestoreIncrementData object whose inclist is already @@ -66,14 +112,15 @@ class Restore: mirror_finalizer = DestructiveSteppingFinalizer() target_finalizer = DestructiveSteppingFinalizer() - for rcd in Restore.yield_rcds(rid.index, mirror, rid, target, time): + for rcd in Restore.yield_rcds(rid.index, mirror, rid, + target, time, mirror_time): rcd.RestoreFile() - if rcd.mirror: mirror_finalizer(rcd.mirror) - target_finalizer(rcd.target) + if rcd.mirror: mirror_finalizer(rcd.index, rcd.mirror) + target_finalizer(rcd.target.index, rcd.target) target_finalizer.Finish() mirror_finalizer.Finish() - def yield_rcds(index, mirrorrp, rid, target, rest_time): + def yield_rcds(index, mirrorrp, rid, target, rest_time, mirror_time): """Iterate RestoreCombinedData objects starting with given args rid is a RestoreCombinedData object. target is an rpath where @@ -91,9 +138,10 @@ class Restore: mirrorrp = None rcd = RestoreCombinedData(rid, mirrorrp, target) - if mirrorrp and mirrorrp.isdir() or rid and rid.inc_rpath.isdir(): + if mirrorrp and mirrorrp.isdir() or \ + rid and rid.inc_rpath and rid.inc_rpath.isdir(): sub_rcds = Restore.yield_sub_rcds(index, mirrorrp, rid, - target, rest_time) + target, rest_time, mirror_time) else: sub_rcds = None if select_result == 1: @@ -108,35 +156,39 @@ class Restore: yield first for sub_rcd in sub_rcds: yield sub_rcd - def yield_collated_tuples_dir(index, mirrorrp, rid, target, rest_time): + def yield_sub_rcds(index, mirrorrp, rid, target, rest_time, mirror_time): """Yield collated tuples from inside given args""" - if not Restore.check_dir_exists(mirrorrp, inc_tup): return + if not Restore.check_dir_exists(mirrorrp, rid): return mirror_iter = Restore.yield_mirrorrps(mirrorrp) - rid_iter = Restore.get_rids(rid, rest_time) + rid_iter = Restore.yield_rids(rid, rest_time, mirror_time) for indexed_tup in RORPIter.CollateIterators(mirror_iter, rid_iter): index = indexed_tup.index new_mirrorrp, new_rid = indexed_tup - for rcd in Restore.yield_collated_tuples(index, new_mirrorrp, - new_rid, target.new_index(index), rest_time): + for rcd in Restore.yield_rcds(index, new_mirrorrp, + new_rid, target.append(index[-1]), rest_time, mirror_time): yield rcd - def check_dir_exists(mirrorrp, inc_tuple): + def check_dir_exists(mirrorrp, rid): """Return true if target should be a directory""" - if inc_tuple and inc_tuple[1]: + if rid and rid.inc_list: # Incs say dir if last (earliest) one is a dir increment - return inc_tuple[1][-1].getinctype() == "dir" + return rid.inc_list[-1].getinctype() == "dir" elif mirrorrp: return mirrorrp.isdir() # if no incs, copy mirror else: return None def yield_mirrorrps(mirrorrp): """Yield mirrorrps underneath given mirrorrp""" if mirrorrp and mirrorrp.isdir(): - dirlist = mirrorrp.listdir() - dirlist.sort() - for filename in dirlist: yield mirrorrp.append(filename) - - def yield_rids(rid, rest_time): + if Globals.quoting_enabled: + for rp in FilenameMapping.get_quoted_dir_children(mirrorrp): + yield rp + else: + dirlist = mirrorrp.listdir() + dirlist.sort() + for filename in dirlist: yield mirrorrp.append(filename) + + def yield_rids(rid, rest_time, mirror_time): """Yield RestoreIncrementData objects within given rid dir If the rid doesn't correspond to a directory, don't yield any @@ -148,16 +200,19 @@ class Restore: if not rid or not rid.inc_rpath or not rid.inc_rpath.isdir(): return rid_dict = {} # dictionary of basenames:rids dirlist = rid.inc_rpath.listdir() + if Globals.quoting_enabled: + dirlist = [FilenameMapping.unquote(fn) for fn in dirlist] def affirm_dict_indexed(basename): """Make sure the rid dictionary has given basename as key""" - if not inc_list_dict.has_key(basename): + if not rid_dict.has_key(basename): rid_dict[basename] = RestoreIncrementData( rid.index + (basename,), None, []) # init with empty rid def add_to_dict(filename): """Add filename to the inc tuple dictionary""" rp = rid.inc_rpath.append(filename) + if Globals.quoting_enabled: rp.quote_path() if rp.isincfile(): basename = rp.getincbase_str() affirm_dict_indexed(basename) @@ -167,14 +222,14 @@ class Restore: rid_dict[filename].inc_rpath = rp for filename in dirlist: add_to_dict(filename) - keys = inc_list_dict.keys() + keys = rid_dict.keys() keys.sort() # sortincseq now to avoid descending .missing directories later for key in keys: rid = rid_dict[key] if rid.inc_rpath or rid.inc_list: - rid.sortincseq(rest_time) + rid.sortincseq(rest_time, mirror_time) yield rid MakeStatic(Restore) @@ -192,26 +247,36 @@ class RestoreIncrementData: self.inc_rpath = inc_rpath self.inc_list = inc_list - def sortincseq(self, rest_time): + def sortincseq(self, rest_time, mirror_time): """Sort self.inc_list sequence, throwing away irrelevant increments""" - incpairs = map(lambda rp: (Time.stringtotime(rp.getinctime()), rp), - self.inc_list) - # Only consider increments at or after the time being restored - incpairs = filter(lambda pair: pair[0] >= rest_time, incpairs) + if not self.inc_list or rest_time >= mirror_time: + self.inc_list = [] + return - # Now throw away older unnecessary increments - incpairs.sort() + newer_incs = self.get_newer_incs(rest_time, mirror_time) i = 0 - while(i < len(incpairs)): + while(i < len(newer_incs)): # Only diff type increments require later versions - if incpairs[i][1].getinctype() != "diff": break + if newer_incs[i].getinctype() != "diff": break i = i+1 - incpairs = incpairs[:i+1] + self.inc_list = newer_incs[:i+1] + self.inc_list.reverse() # return in reversed order (latest first) + + def get_newer_incs(self, rest_time, mirror_time): + """Return list of newer incs sorted by time (increasing) - # Return increments in reversed order (latest first) - incpairs.reverse() - self.inc_list = map(lambda pair: pair[1], incpairs) + Also discard increments older than rest_time (rest_time we are + assuming is the exact time rdiff-backup was run, so no need to + consider the next oldest increment or any of that) + """ + incpairs = [] + for inc in self.inc_list: + time = Time.stringtotime(inc.getinctime()) + if time >= rest_time: incpairs.append((time, inc)) + incpairs.sort() + return [pair[1] for pair in incpairs] + class RestoreCombinedData: """Combine index information from increment and mirror directories @@ -235,9 +300,12 @@ class RestoreCombinedData: if mirror: self.mirror = mirror assert mirror.index == self.index + else: self.mirror = None elif mirror: self.index = mirror.index self.mirror = mirror + self.inc_list = [] + self.inc_rpath = None else: assert None, "neither rid nor mirror given" self.target = target @@ -249,15 +317,15 @@ class RestoreCombinedData: if self.restore_hardlink(): return - if not inclist or inclist[0].getinctype() == "diff": + if not self.inc_list or self.inc_list[0].getinctype() == "diff": assert self.mirror and self.mirror.lstat(), \ "No base to go with incs for %s" % self.target.path RPath.copy_with_attribs(self.mirror, self.target) for inc in self.inc_list: self.applyinc(inc, self.target) - def log(self) + def log(self): """Log current restore action""" - inc_string = ','.join(map(lambda x: x.path, self.inc_list)) + inc_string = ','.join([inc.path for inc in self.inc_list]) Log("Restoring %s with increments %s to %s" % (self.mirror and self.mirror.path, inc_string, self.target.path), 5) @@ -266,7 +334,7 @@ class RestoreCombinedData: """Hard link target and return true if hard linking appropriate""" if (Globals.preserve_hardlinks and Hardlink.restore_link(self.index, self.target)): - RPath.copy_attribs(self.inc_list and inc_list[-1] or + RPath.copy_attribs(self.inc_list and self.inc_list[-1] or self.mirror, self.target) return 1 return None diff --git a/rdiff-backup/rdiff_backup/robust.py b/rdiff-backup/rdiff_backup/robust.py index 22f35b9..a71eabc 100644 --- a/rdiff-backup/rdiff_backup/robust.py +++ b/rdiff-backup/rdiff_backup/robust.py @@ -252,7 +252,7 @@ class TempFile(RPath): if self.isdir() and not rp_dest.isdir(): # Cannot move a directory directly over another file rp_dest.delete() - if (isinstance(rp_dest, DSRPath) and rp_dest.perms_delayed + if (isinstance(rp_dest, DSRPath) and rp_dest.delay_perms and not self.hasfullperms()): # If we are moving to a delayed perm directory, delay # permission change on destination. @@ -531,7 +531,7 @@ class Resume: Log("Last backup dated %s was aborted, but we aren't " "resuming it." % Time.timetopretty(si.time), 2) return None - assert 0 + assert None MakeClass(Resume) diff --git a/rdiff-backup/rdiff_backup/rpath.py b/rdiff-backup/rdiff_backup/rpath.py index 0089bf6..110b89f 100644 --- a/rdiff-backup/rdiff_backup/rpath.py +++ b/rdiff-backup/rdiff_backup/rpath.py @@ -168,6 +168,17 @@ class RPathStatic: rp_dest.data = rp_source.data rp_source.data = {'type': None} + # If we are moving to a DSRPath, assume that the current times + # are the intended ones. We need to save them now in case + # they are changed later. + if isinstance(rp_dest, DSRPath): + if rp_dest.delay_mtime: + if 'mtime' in rp_dest.data: + rp_dest.setmtime(rp_dest.data['mtime']) + if rp_dest.delay_atime: + if 'atime' in rp_dest.data: + rp_dest.setatime(rp_dest.data['atime']) + def tupled_lstat(filename): """Like os.lstat, but return only a tuple, or None if os.error @@ -413,7 +424,7 @@ class RPath(RORPath): self.base = base self.path = apply(os.path.join, (base,) + self.index) self.file = None - if data: self.data = data + if data or base is None: self.data = data else: self.setdata() def __str__(self): @@ -493,6 +504,12 @@ class RPath(RORPath): s = self.conn.reval("lambda path: os.lstat(path).st_rdev", self.path) return (s >> 8, s & 0xff) + def quote_path(self): + """Set path from quoted version of index""" + quoted_list = [FilenameMapping.quote(path) for path in self.index] + self.path = apply(os.path.join, [self.base] + quoted_list) + self.setdata() + def chmod(self, permissions): """Wrapper around os.chmod""" self.conn.os.chmod(self.path, permissions) @@ -594,7 +611,8 @@ class RPath(RORPath): if not self.lstat(): return # must have been deleted in meantime elif self.isdir(): itm = RpathDeleter() - for dsrp in Select(self, None).set_iter(): itm(dsrp.index, dsrp) + for dsrp in Select(DSRPath(None, self)).set_iter(): + itm(dsrp.index, dsrp) itm.Finish() else: self.conn.os.unlink(self.path) self.setdata() @@ -616,7 +634,7 @@ class RPath(RORPath): self.path.split("/"))) if self.path[0] == "/": newpath = "/" + newpath elif not newpath: newpath = "." - return self.__class__(self.conn, newpath, ()) + return self.newpath(newpath) def dirsplit(self): """Returns a tuple of strings (dirname, basename) @@ -635,10 +653,20 @@ class RPath(RORPath): comps = normed.path.split("/") return "/".join(comps[:-1]), comps[-1] + def newpath(self, newpath, index = ()): + """Return new RPath with the same connection but different path""" + return self.__class__(self.conn, newpath, index) + def append(self, ext): """Return new RPath with same connection by adjoing ext""" return self.__class__(self.conn, self.base, self.index + (ext,)) + def append_path(self, ext, new_index = ()): + """Like append, but add ext to path instead of to index""" + assert not self.index # doesn't make sense if index isn't () + return self.__class__(self.conn, os.path.join(self.base, ext), + new_index) + def new_index(self, index): """Return similar RPath but with new index""" return self.__class__(self.conn, self.base, index) diff --git a/rdiff-backup/rdiff_backup/selection.py b/rdiff-backup/rdiff_backup/selection.py index aaa8639..cae6db3 100644 --- a/rdiff-backup/rdiff_backup/selection.py +++ b/rdiff-backup/rdiff_backup/selection.py @@ -60,21 +60,20 @@ class Select: # This re should not match normal filenames, but usually just globs glob_re = re.compile("(.*[*?[]|ignorecase\\:)", re.I | re.S) - def __init__(self, rpath, source): - """DSRPIterator initializer. + def __init__(self, dsrpath, quoted_filenames = None): + """DSRPIterator initializer. dsrp is the root directory - rpath is the root dir. Source is true if rpath is the root of - the source directory, and false for the mirror directory + When files have quoted characters in them, quoted_filenames + should be true. Then RPath's index will be the unquoted + version. """ - assert isinstance(rpath, RPath) + assert isinstance(dsrpath, DSRPath) self.selection_functions = [] - self.source = source - if isinstance(rpath, DSRPath): self.dsrpath = rpath - else: self.dsrpath = DSRPath(rpath.conn, rpath.base, - rpath.index, rpath.data) + self.dsrpath = dsrpath self.prefix = self.dsrpath.path - + self.quoting_on = Globals.quoting_enabled and quoted_filenames + def set_iter(self, starting_index = None, sel_func = None): """Initialize more variables, get ready to iterate @@ -106,7 +105,7 @@ class Select: """ s = sel_func(dsrpath) - if s === 0: return + if s == 0: return elif s == 1: # File is included yield dsrpath if dsrpath.isdir(): @@ -122,11 +121,15 @@ class Select: def iterate_in_dir(self, dsrpath, rec_func, sel_func): """Iterate the dsrps in directory dsrpath.""" - dir_listing = dsrpath.listdir() - dir_listing.sort() - for filename in dir_listing: - for dsrp in rec_func(dsrpath.append(filename), rec_func, sel_func): - yield dsrp + if self.quoting_on: + for subdir in FilenameMapping.get_quoted_dir_children(dsrpath): + for dsrp in rec_func(subdir, rec_func, sel_func): yield dsrp + else: + dir_listing = dsrpath.listdir() + dir_listing.sort() + for filename in dir_listing: + for dsrp in rec_func(dsrpath.append(filename), + rec_func, sel_func): yield dsrp def iterate_starting_from(self, dsrpath, rec_func, sel_func): """Like Iterate, but only yield indicies > self.starting_index""" diff --git a/rdiff-backup/src/Make b/rdiff-backup/src/Make index 6e66656..b78ed95 100755 --- a/rdiff-backup/src/Make +++ b/rdiff-backup/src/Make @@ -23,9 +23,9 @@ def mystrip(filename): files = ["globals.py", "static.py", "lazy.py", "log.py", "ttime.py", "iterfile.py", "rdiff.py", "connection.py", "rpath.py", "hardlink.py", "robust.py", "rorpiter.py", - "destructive_stepping.py", "selection.py", "increment.py", - "restore.py", "manage.py", "highlevel.py", - "setconnections.py", "main.py"] + "destructive_stepping.py", "selection.py", + "filename_mapping.py", "increment.py", "restore.py", + "manage.py", "highlevel.py", "setconnections.py", "main.py"] os.system("cp header.py rdiff-backup") diff --git a/rdiff-backup/src/connection.py b/rdiff-backup/src/connection.py index 9842480..57d2fa5 100644 --- a/rdiff-backup/src/connection.py +++ b/rdiff-backup/src/connection.py @@ -92,6 +92,7 @@ class LowLevelPipeConnection(Connection): b - string q - quit signal t - TempFile + d - DSRPath R - RPath r - RORPath only c - PipeConnection object @@ -118,6 +119,7 @@ class LowLevelPipeConnection(Connection): if type(obj) is types.StringType: self._putbuf(obj, req_num) elif isinstance(obj, Connection): self._putconn(obj, req_num) elif isinstance(obj, TempFile): self._puttempfile(obj, req_num) + elif isinstance(obj, DSRPath): self._putdsrpath(obj, req_num) elif isinstance(obj, RPath): self._putrpath(obj, req_num) elif isinstance(obj, RORPath): self._putrorpath(obj, req_num) elif ((hasattr(obj, "read") or hasattr(obj, "write")) @@ -148,6 +150,11 @@ class LowLevelPipeConnection(Connection): tempfile.index, tempfile.data) self._write("t", cPickle.dumps(tf_repr, 1), req_num) + def _putdsrpath(self, dsrpath, req_num): + """Put DSRPath into pipe. See _putrpath""" + dsrpath_repr = (dsrpath.conn.conn_number, dsrpath.getstatedict()) + self._write("d", cPickle.dumps(dsrpath_repr, 1), req_num) + def _putrpath(self, rpath, req_num): """Put an rpath into the pipe @@ -219,23 +226,22 @@ class LowLevelPipeConnection(Connection): ord(header_string[1]), self._s2l(header_string[2:])) except IndexError: raise ConnectionError() - if format_string == "o": result = cPickle.loads(self._read(length)) - elif format_string == "b": result = self._read(length) - elif format_string == "f": - result = VirtualFile(self, int(self._read(length))) + if format_string == "q": raise ConnectionQuit("Received quit signal") + + data = self._read(length) + if format_string == "o": result = cPickle.loads(data) + elif format_string == "b": result = data + elif format_string == "f": result = VirtualFile(self, int(data)) elif format_string == "i": - result = RORPIter.FromFile(BufferedRead( - VirtualFile(self, int(self._read(length))))) - elif format_string == "t": - result = self._gettempfile(self._read(length)) - elif format_string == "r": - result = self._getrorpath(self._read(length)) - elif format_string == "R": result = self._getrpath(self._read(length)) - elif format_string == "c": - result = Globals.connection_dict[int(self._read(length))] + result = RORPIter.FromFile(BufferedRead(VirtualFile(self, + int(data)))) + elif format_string == "t": result = self._gettempfile(data) + elif format_string == "r": result = self._getrorpath(data) + elif format_string == "R": result = self._getrpath(data) + elif format_string == "d": result = self._getdsrpath(data) else: - assert format_string == "q", header_string - raise ConnectionQuit("Received quit signal") + assert format_string == "c", header_string + result = Globals.connection_dict[int(data)] Log.conn("received", result, req_num) return (req_num, result) @@ -255,6 +261,15 @@ class LowLevelPipeConnection(Connection): conn_number, base, index, data = cPickle.loads(raw_rpath_buf) return RPath(Globals.connection_dict[conn_number], base, index, data) + def _getdsrpath(self, raw_dsrpath_buf): + """Return DSRPath object indicated by buf""" + conn_number, state_dict = cPickle.loads(raw_dsrpath_buf) + empty_dsrp = DSRPath("bypass", Globals.local_connection, None) + empty_dsrp.__setstate__(state_dict) + empty_dsrp.conn = Globals.connection_dict[conn_number] + empty_dsrp.file = None + return empty_dsrp + def _close(self): """Close the pipes associated with the connection""" self.outpipe.close() diff --git a/rdiff-backup/src/destructive_stepping.py b/rdiff-backup/src/destructive_stepping.py index c5e2faa..ff3b42a 100644 --- a/rdiff-backup/src/destructive_stepping.py +++ b/rdiff-backup/src/destructive_stepping.py @@ -1,4 +1,5 @@ from __future__ import generators +import types execfile("rorpiter.py") ####################################################################### @@ -40,13 +41,17 @@ class DSRPath(RPath): otherwise use the same arguments as the RPath initializer. """ - if len(args) == 2 and isinstance(args[0], RPath): + if len(args) == 1 and isinstance(args[0], RPath): rp = args[0] RPath.__init__(self, rp.conn, rp.base, rp.index) else: RPath.__init__(self, *args) - self.set_delays(source) - self.set_init_perms(source) + if source != "bypass": + # "bypass" val is used when unpackaging over connection + assert source is None or source is 1 + self.source = source + self.set_delays(source) + self.set_init_perms(source) def set_delays(self, source): """Delay writing permissions and times where appropriate""" @@ -59,13 +64,14 @@ class DSRPath(RPath): # Now get atime right away if possible if self.data.has_key('atime'): self.newatime = self.data['atime'] else: self.newatime = None + else: self.delay_atime = None if source: self.delay_mtime = None # we'll never change mtime of source file else: self.delay_mtime = 1 # Save mtime now for a dir, because it might inadvertantly change - if self.isdir(): self.newmtime = self.getmtime() + if self.isdir(): self.newmtime = self.data['mtime'] else: self.newmtime = None def set_init_perms(self, source): @@ -75,26 +81,30 @@ class DSRPath(RPath): self.chmod_bypass(0400) else: self.warn("No read permissions") elif self.isdir(): - if source and (not self.readable() or self.executable()): + if source and (not self.readable() or not self.executable()): if Globals.change_source_perms and self.isowner(): self.chmod_bypass(0500) - else: warn("No read or exec permission") + else: self.warn("No read or exec permission") elif not source and not self.hasfullperms(): self.chmod_bypass(0700) def warn(self, err): Log("Received error '%s' when dealing with file %s, skipping..." % (err, self.path), 1) - raise DSRPermError(self.path) + raise DSRPPermError(self.path) def __getstate__(self): """Return picklable state. See RPath __getstate__.""" assert self.conn is Globals.local_connection # Can't pickle a conn + return self.getstatedict() + + def getstatedict(self): + """Return dictionary containing the attributes we can save""" pickle_dict = {} for attrib in ['index', 'data', 'delay_perms', 'newperms', 'delay_atime', 'newatime', 'delay_mtime', 'newmtime', - 'path', 'base']: + 'path', 'base', 'source']: if self.__dict__.has_key(attrib): pickle_dict[attrib] = self.__dict__[attrib] return pickle_dict @@ -110,10 +120,17 @@ class DSRPath(RPath): if self.delay_perms: self.newperms = self.data['perms'] = permissions else: RPath.chmod(self, permissions) + def getperms(self): + """Return dsrp's intended permissions""" + if self.delay_perms and self.newperms is not None: + return self.newperms + else: return self.data['perms'] + def chmod_bypass(self, permissions): """Change permissions without updating the data dictionary""" self.delay_perms = 1 if self.newperms is None: self.newperms = self.getperms() + Log("DSRP: Perm bypass %s to %o" % (self.path, permissions), 8) self.conn.os.chmod(self.path, permissions) def settime(self, accesstime, modtime): @@ -129,11 +146,25 @@ class DSRPath(RPath): if self.delay_mtime: self.newmtime = self.data['mtime'] = modtime else: RPath.setmtime(self, modtime) + def getmtime(self): + """Return dsrp's intended modification time""" + if self.delay_mtime and self.newmtime is not None: + return self.newmtime + else: return self.data['mtime'] + + def getatime(self): + """Return dsrp's intended access time""" + if self.delay_atime and self.newatime is not None: + return self.newatime + else: return self.data['atime'] + def write_changes(self): """Write saved up permission/time changes""" if not self.lstat(): return # File has been deleted in meantime if self.delay_perms and self.newperms is not None: + Log("Finalizing permissions of dsrp %s to %s" % + (self.path, self.newperms), 8) RPath.chmod(self, self.newperms) do_atime = self.delay_atime and self.newatime is not None @@ -145,6 +176,19 @@ class DSRPath(RPath): elif not do_atime and do_mtime: RPath.setmtime(self, self.newmtime) + def newpath(self, newpath, index = ()): + """Return similar DSRPath but with new path""" + return self.__class__(self.source, self.conn, newpath, index) + + def append(self, ext): + """Return similar DSRPath with new extension""" + return self.__class__(self.source, self.conn, self.base, + self.index + (ext,)) + + def new_index(self, index): + """Return similar DSRPath with new index""" + return self.__class__(self.source, self.conn, self.base, index) + class DestructiveSteppingFinalizer(IterTreeReducer): """Finalizer that can work on an iterator of dsrpaths @@ -155,11 +199,12 @@ class DestructiveSteppingFinalizer(IterTreeReducer): coming back to it. """ + dsrpath = None def start_process(self, index, dsrpath): self.dsrpath = dsrpath def end_process(self): - self.dsrpath.write_changes() + if self.dsrpath: self.dsrpath.write_changes() diff --git a/rdiff-backup/src/globals.py b/rdiff-backup/src/globals.py index 710b5e2..2f0b0ca 100644 --- a/rdiff-backup/src/globals.py +++ b/rdiff-backup/src/globals.py @@ -8,7 +8,7 @@ import re, os class Globals: # The current version of rdiff-backup - version = "0.7.3" + version = "0.7.4" # If this is set, use this value in seconds as the current time # instead of reading it from the clock. @@ -108,6 +108,18 @@ class Globals: # under MS windows NT. time_separator = ":" + # quoting_enabled is true if we should quote certain characters in + # filenames on the source side (see FilenameMapping for more + # info). chars_to_quote is a string whose characters should be + # quoted, and quoting_char is the character to quote with. + quoting_enabled = None + chars_to_quote = "" + quoting_char = ';' + + # If true, emit output intended to be easily readable by a + # computer. False means output is intended for humans. + parsable_output = None + # If true, then hardlinks will be preserved to mirror and recorded # in the increments directory. There is also a difference here # between None and 0. When restoring, None or 1 means to preserve @@ -180,12 +192,12 @@ class Globals: else: cls.__dict__[name] = re.compile(re_string) postset_regexp_local = classmethod(postset_regexp_local) - def set_select(cls, source, dsrpath, tuplelist): + def set_select(cls, dsrpath, tuplelist, quote_mode = None): """Initialize select object using tuplelist""" - if source: - cls.select_source = Select(dsrpath, 1) + if dsrpath.source: + cls.select_source = Select(dsrpath, quote_mode) cls.select_source.ParseArgs(tuplelist) else: - cls.select_mirror = Select(dsrpath, None) + cls.select_mirror = Select(dsrpath, quote_mode) cls.select_mirror.ParseArgs(tuplelist) set_select = classmethod(set_select) diff --git a/rdiff-backup/src/hardlink.py b/rdiff-backup/src/hardlink.py index deecd68..7531ad3 100644 --- a/rdiff-backup/src/hardlink.py +++ b/rdiff-backup/src/hardlink.py @@ -28,6 +28,11 @@ class Hardlink: _src_index_indicies = {} _dest_index_indicies = {} + # When a linked file is restored, its path is added to this dict, + # so it can be found when later paths being restored are linked to + # it. + _restore_index_path = {} + def get_inode_key(cls, rorp): """Return rorp's key for _inode_ dictionaries""" return (rorp.getinode(), rorp.getdevloc()) @@ -101,26 +106,29 @@ class Hardlink: """True if rorp's index is already linked to something on src side""" return len(cls.get_indicies(rorp, 1)) >= 2 - def restore_link(cls, mirror_rel_index, rpath): + def restore_link(cls, index, rpath): """Restores a linked file by linking it When restoring, all the hardlink data is already present, and - we can only link to something already written. Returns true - if succeeded in creating rpath, false if must restore rpath - normally. + we can only link to something already written. In either + case, add to the _restore_index_path dict, so we know later + that the file is available for hard + linking. + + Returns true if succeeded in creating rpath, false if must + restore rpath normally. """ - full_index = mirror_rel_index + rpath.index - if not cls._src_index_indicies.has_key(full_index): return None - truncated_list = [] - for index in cls._src_index_indicies[full_index]: - if index[:len(mirror_rel_index)] == mirror_rel_index: - truncated_list.append(index[len(mirror_rel_index):]) - - if not truncated_list or truncated_list[0] >= rpath.index: return None - srclink = RPath(rpath.conn, rpath.base, truncated_list[0]) - rpath.hardlink(srclink.path) - return 1 + if index not in cls._src_index_indicies: return None + for linked_index in cls._src_index_indicies[index]: + if linked_index in cls._restore_index_path: + srcpath = cls._restore_index_path[linked_index] + Log("Restoring %s by hard linking to %s" % + (rpath.path, srcpath), 6) + rpath.hardlink(srcpath) + return 1 + cls._restore_index_path[index] = rpath.path + return None def link_rp(cls, src_rorp, dest_rpath, dest_root = None): """Make dest_rpath into a link analogous to that of src_rorp""" diff --git a/rdiff-backup/src/highlevel.py b/rdiff-backup/src/highlevel.py index d0bc743..36ba55a 100644 --- a/rdiff-backup/src/highlevel.py +++ b/rdiff-backup/src/highlevel.py @@ -24,12 +24,14 @@ class HighLevel: accompanying diagram. """ - def Mirror(src_rpath, dest_rpath, checkpoint = 1, session_info = None): + def Mirror(src_rpath, dest_rpath, checkpoint = 1, + session_info = None, write_finaldata = 1): """Turn dest_rpath into a copy of src_rpath Checkpoint true means to checkpoint periodically, otherwise not. If session_info is given, try to resume Mirroring from - that point. + that point. If write_finaldata is true, save extra data files + like hardlink_data. If it is false, make a complete mirror. """ SourceS = src_rpath.conn.HLSourceStruct @@ -40,7 +42,8 @@ class HighLevel: src_init_dsiter = SourceS.split_initial_dsiter() dest_sigiter = DestS.get_sigs(dest_rpath, src_init_dsiter) diffiter = SourceS.get_diffs_and_finalize(dest_sigiter) - DestS.patch_and_finalize(dest_rpath, diffiter, checkpoint) + DestS.patch_and_finalize(dest_rpath, diffiter, + checkpoint, write_finaldata) dest_rpath.setdata() @@ -61,24 +64,6 @@ class HighLevel: dest_rpath.setdata() inc_rpath.setdata() - def Restore(rest_time, mirror_base, rel_index, baseinc_tup, target_base): - """Like Restore.RestoreRecursive but check arguments""" - if (Globals.preserve_hardlinks != 0 and - Hardlink.retrieve_final(rest_time)): - Log("Hard link information found, attempting to preserve " - "hard links.", 4) - SetConnections.UpdateGlobal('preserve_hardlinks', 1) - else: SetConnections.UpdateGlobal('preserve_hardlinks', None) - - if not isinstance(target_base, DSRPath): - target_base = DSRPath(target_base.conn, target_base.base, - target_base.index, target_base.data) - if not isinstance(mirror_base, DSRPath): - mirror_base = DSRPath(mirror_base.conn, mirror_base.base, - mirror_base.index, mirror_base.data) - Restore.RestoreRecursive(rest_time, mirror_base, rel_index, - baseinc_tup, target_base) - MakeStatic(HighLevel) @@ -164,7 +149,7 @@ class HLDestinationStruct: def compare(src_rorp, dest_dsrp): """Return dest_dsrp if they are different, None if the same""" if not dest_dsrp: - dest_dsrp = DSRPath(baserp.conn, baserp.base, src_rorp.index) + dest_dsrp = cls.get_dsrp(baserp, src_rorp.index) if dest_dsrp.lstat(): Log("Warning: Found unexpected destination file %s, " "not processing it." % dest_dsrp.path, 2) @@ -203,8 +188,9 @@ class HLDestinationStruct: def get_dsrp(cls, dest_rpath, index): """Return initialized dsrp based on dest_rpath with given index""" - return DSRPath(source = None, dest_rpath.conn, - dest_rpath.base, index) + dsrp = DSRPath(None, dest_rpath.conn, dest_rpath.base, index) + if Globals.quoting_enabled: dsrp.quote_path() + return dsrp def get_finalizer(cls): """Return finalizer, starting from session info if necessary""" @@ -216,9 +202,13 @@ class HLDestinationStruct: """Return ITR, starting from state if necessary""" if cls._session_info and cls._session_info.ITR: return cls._session_info.ITR - else: return IncrementITR(inc_rpath) + else: + iitr = IncrementITR(inc_rpath) + iitr.override_changed() + return iitr - def patch_and_finalize(cls, dest_rpath, diffs, checkpoint = 1): + def patch_and_finalize(cls, dest_rpath, diffs, + checkpoint = 1, write_finaldata = 1): """Apply diffs and finalize""" collated = RORPIter.CollateIterators(diffs, cls.initial_dsiter2) finalizer = cls.get_finalizer() @@ -242,7 +232,7 @@ class HLDestinationStruct: if checkpoint: SaveState.checkpoint_mirror(finalizer, dsrp) except: cls.handle_last_error(dsrp, finalizer) finalizer.Finish() - if Globals.preserve_hardlinks and Globals.rbdir: + if Globals.preserve_hardlinks and write_finaldata: Hardlink.final_writedata() if checkpoint: SaveState.checkpoint_remove() @@ -300,8 +290,7 @@ class HLDestinationStruct: Log.exception(1) if ITR: SaveState.checkpoint_inc_backup(ITR, finalizer, dsrp, 1) else: SaveState.checkpoint_mirror(finalizer, dsrp, 1) - if Globals.preserve_hardlinks: - Hardlink.final_checkpoint(Globals.rbdir) + if Globals.preserve_hardlinks: Hardlink.final_checkpoint(Globals.rbdir) SaveState.touch_last_file_definitive() raise diff --git a/rdiff-backup/src/increment.py b/rdiff-backup/src/increment.py index 446806b..b28b315 100644 --- a/rdiff-backup/src/increment.py +++ b/rdiff-backup/src/increment.py @@ -1,4 +1,4 @@ -execfile("selection.py") +execfile("filename_mapping.py") ####################################################################### # @@ -85,10 +85,12 @@ class Inc: """Get new increment rp with given time suffix""" addtostr = lambda s: "%s.%s.%s" % (s, timestr, typestr) if rp.index: - return rp.__class__(rp.conn, rp.base, rp.index[:-1] + - (addtostr(rp.index[-1]),)) - else: return rp.__class__(rp.conn, addtostr(rp.base), rp.index) - + incrp = rp.__class__(rp.conn, rp.base, rp.index[:-1] + + (addtostr(rp.index[-1]),)) + else: incrp = rp.__class__(rp.conn, addtostr(rp.base), rp.index) + if Globals.quoting_enabled: incrp.quote_path() + return incrp + inctime = 0 while 1: inctime = Resume.FindTime(rp.index, inctime) @@ -123,7 +125,7 @@ class IncrementITR(IterTreeReducer): def __init__(self, inc_rpath): """Set inc_rpath, an rpath of the base of the tree""" self.inc_rpath = inc_rpath - IterTreeReducer.__init__(inc_rpath) + IterTreeReducer.__init__(self, inc_rpath) def start_process(self, index, diff_rorp, dsrp): """Initial processing of file @@ -133,11 +135,21 @@ class IncrementITR(IterTreeReducer): """ incpref = self.inc_rpath.new_index(index) + if Globals.quoting_enabled: incpref.quote_path() if dsrp.isdir(): self.init_dir(dsrp, diff_rorp, incpref) self.setvals(diff_rorp, dsrp, incpref) else: self.init_non_dir(dsrp, diff_rorp, incpref) + def override_changed(self): + """Set changed flag to true + + This is used only at the top level of a backup, to make sure + that a marker is created recording every backup session. + + """ + self.changed = 1 + def setvals(self, diff_rorp, dsrp, incpref): """Record given values in state dict since in directory @@ -162,7 +174,7 @@ class IncrementITR(IterTreeReducer): """ if not (incpref.lstat() and incpref.isdir()): incpref.mkdir() if diff_rorp and diff_rorp.isreg() and diff_rorp.file: - tf = TempFileManager(dsrp) + tf = TempFileManager.new(dsrp) RPathStatic.copy_with_attribs(diff_rorp, tf) tf.set_attached_filetype(diff_rorp.get_attached_filetype()) self.directory_replacement = tf @@ -170,7 +182,7 @@ class IncrementITR(IterTreeReducer): def init_non_dir(self, dsrp, diff_rorp, incpref): """Process a non directory file (initial pass)""" if not diff_rorp: return # no diff, so no change necessary - if diff_rorp.isreg and (dsrp.isreg() or diff_rorp.isflaglinked()): + if diff_rorp.isreg() and (dsrp.isreg() or diff_rorp.isflaglinked()): tf = TempFileManager.new(dsrp) def init_thunk(): if diff_rorp.isflaglinked(): @@ -180,8 +192,8 @@ class IncrementITR(IterTreeReducer): Inc.Increment_action(tf, dsrp, incpref).execute() Robust.make_tf_robustaction(init_thunk, (tf,), (dsrp,)).execute() else: - Robust.chain([Inc.Increment_action(diff_rorp, dsrp, incref), - RORPIter.patchonce_action(none, dsrp, diff_rorp)] + Robust.chain([Inc.Increment_action(diff_rorp, dsrp, incpref), + RORPIter.patchonce_action(None, dsrp, diff_rorp)] ).execute() self.changed = 1 @@ -207,89 +219,3 @@ class IncrementITR(IterTreeReducer): - - - def make_patch_increment_ITR(inc_rpath, initial_state = None): - """Return IterTreeReducer that patches and increments""" - def base_init(indexed_tuple): - """Patch if appropriate, return (a,b) tuple - - a is true if found directory and thus didn't take action - - if a is false, b is true if some changes were made - - if a is true, b is the rp of a temporary file used to hold - the diff_rorp's data (for dir -> normal file change), and - false if none was necessary. - - """ - diff_rorp, dsrp = indexed_tuple - incpref = inc_rpath.new_index(indexed_tuple.index) - if dsrp.isdir(): return init_dir(dsrp, diff_rorp, incpref) - else: return init_non_dir(dsrp, diff_rorp, incpref) - - def init_dir(dsrp, diff_rorp, incpref): - """Initial processing of a directory - - Make the corresponding directory right away, but wait - until the end to write the replacement. However, if the - diff_rorp contains data, we must write it locally before - continuing, or else that data will be lost in the stream. - - """ - if not (incpref.lstat() and incpref.isdir()): incpref.mkdir() - if diff_rorp and diff_rorp.isreg() and diff_rorp.file: - tf = TempFileManager.new(dsrp) - RPathStatic.copy_with_attribs(diff_rorp, tf) - tf.set_attached_filetype(diff_rorp.get_attached_filetype()) - return (1, tf) - else: return (1, None) - - def init_non_dir(dsrp, diff_rorp, incpref): - """Initial processing of non-directory - - If a reverse diff is called for it is generated by apply - the forwards diff first on a temporary file. - - """ - if diff_rorp: - if diff_rorp.isreg() and (dsrp.isreg() or - diff_rorp.isflaglinked()): - tf = TempFileManager.new(dsrp) - def init_thunk(): - if diff_rorp.isflaglinked(): - Hardlink.link_rp(diff_rorp, tf, dsrp) - else: Rdiff.patch_with_attribs_action(dsrp, diff_rorp, - tf).execute() - Inc.Increment_action(tf, dsrp, incpref).execute() - Robust.make_tf_robustaction(init_thunk, (tf,), - (dsrp,)).execute() - else: - Robust.chain([Inc.Increment_action(diff_rorp, dsrp, - incpref), - RORPIter.patchonce_action( - None, dsrp, diff_rorp)]).execute() - return (None, 1) - return (None, None) - - def base_final(base_tuple, base_init_tuple, changed): - """Patch directory if not done, return true iff made change""" - if base_init_tuple[0]: # was directory - diff_rorp, dsrp = base_tuple - if changed or diff_rorp: - if base_init_tuple[1]: diff_rorp = base_init_tuple[1] - Inc.Increment(diff_rorp, dsrp, - inc_rpath.new_index(base_tuple.index)) - if diff_rorp: - RORPIter.patchonce_action(None, dsrp, - diff_rorp).execute() - if isinstance(diff_rorp, TempFile): diff_rorp.delete() - return 1 - return None - else: # changed iff base_init_tuple says it was - return base_init_tuple[1] - - return IterTreeReducer(base_init, lambda x,y: x or y, None, - base_final, initial_state) - - diff --git a/rdiff-backup/src/lazy.py b/rdiff-backup/src/lazy.py index 80cfa95..1bb2e2c 100644 --- a/rdiff-backup/src/lazy.py +++ b/rdiff-backup/src/lazy.py @@ -260,7 +260,7 @@ class IterTreeReducer: """ index = args[0] - assert type(index) is types.TupleType + assert type(index) is types.TupleType, type(index) if self.index is None: self.start_process(*args) diff --git a/rdiff-backup/src/main.py b/rdiff-backup/src/main.py index 557cefd..162cecf 100755 --- a/rdiff-backup/src/main.py +++ b/rdiff-backup/src/main.py @@ -22,20 +22,21 @@ class Main: try: return open(filename, "r") except IOError: Log.FatalError("Error opening file %s" % filename) - try: optlist, self.args = getopt.getopt(sys.argv[1:], "blmsv:V", + try: optlist, self.args = getopt.getopt(sys.argv[1:], "blmr:sv:V", ["backup-mode", "change-source-perms", - "checkpoint-interval=", "current-time=", "exclude=", - "exclude-device-files", "exclude-filelist=", - "exclude-filelist-stdin", "exclude-mirror=", - "exclude-regexp=", "force", "include=", - "include-filelist=", "include-filelist-stdin", - "include-regexp=", "list-increments", "mirror-only", - "no-compression", "no-compression-regexp=", - "no-hard-links", "no-resume", "remote-cmd=", - "remote-schema=", "remove-older-than=", "resume", - "resume-window=", "server", "terminal-verbosity=", - "test-server", "verbosity", "version", - "windows-time-format"]) + "chars-to-quote=", "checkpoint-interval=", + "current-time=", "exclude=", "exclude-device-files", + "exclude-filelist=", "exclude-filelist-stdin", + "exclude-mirror=", "exclude-regexp=", "force", + "include=", "include-filelist=", + "include-filelist-stdin", "include-regexp=", + "list-increments", "mirror-only", "no-compression", + "no-compression-regexp=", "no-hard-links", "no-resume", + "parsable-output", "quoting-char=", "remote-cmd=", + "remote-schema=", "remove-older-than=", + "restore-as-of=", "resume", "resume-window=", "server", + "terminal-verbosity=", "test-server", "verbosity", + "version", "windows-mode", "windows-time-format"]) except getopt.error: self.commandline_error("Error parsing commandline options") @@ -43,6 +44,9 @@ class Main: if opt == "-b" or opt == "--backup-mode": self.action = "backup" elif opt == "--change-source-perms": Globals.set('change_source_perms', 1) + elif opt == "--chars-to-quote": + Globals.set('chars_to_quote', arg) + Globals.set('quoting_enabled', 1) elif opt == "--checkpoint-interval": Globals.set_integer('checkpoint_interval', arg) elif opt == "--current-time": @@ -75,6 +79,13 @@ class Main: Globals.set("no_compression_regexp_string", arg) elif opt == "--no-hard-links": Globals.set('preserve_hardlinks', 0) elif opt == '--no-resume': Globals.resume = 0 + elif opt == "-r" or opt == "--restore-as-of": + self.restore_timestr = arg + self.action = "restore-as-of" + elif opt == "--parsable-output": Globals.set('parsable_output', 1) + elif opt == "--quoting-char": + Globals.set('quoting_char', arg) + Globals.set('quoting_enabled', 1) elif opt == "--remote-cmd": self.remote_cmd = arg elif opt == "--remote-schema": self.remote_schema = arg elif opt == "--remove-older-than": @@ -84,14 +95,16 @@ class Main: elif opt == '--resume-window': Globals.set_integer('resume_window', arg) elif opt == "-s" or opt == "--server": self.action = "server" - elif opt == "--terminal-verbosity": - Log.setterm_verbosity(arg) + elif opt == "--terminal-verbosity": Log.setterm_verbosity(arg) elif opt == "--test-server": self.action = "test-server" elif opt == "-V" or opt == "--version": print "rdiff-backup " + Globals.version sys.exit(0) - elif opt == "-v" or opt == "--verbosity": - Log.setverbosity(arg) + elif opt == "-v" or opt == "--verbosity": Log.setverbosity(arg) + elif opt == "--windows-mode": + Globals.set('time_separator', "_") + Globals.set('chars_to_quote', ":") + Globals.set('quoting_enabled', 1) elif opt == '--windows-time-format': Globals.set('time_separator', "_") else: Log.FatalError("Unknown option %s" % opt) @@ -112,7 +125,8 @@ class Main: self.commandline_error("No arguments given") if l > 0 and self.action == "server": self.commandline_error("Too many arguments given") - if l < 2 and (self.action == "backup" or self.action == "mirror"): + if l < 2 and (self.action == "backup" or self.action == "mirror" or + self.action == "restore-as-of"): self.commandline_error("Two arguments are required " "(source, destination).") if l == 2 and (self.action == "list-increments" or @@ -136,6 +150,8 @@ class Main: for rp in rps: rp.setdata() # Update with userinfo os.umask(077) + Time.setcurtime(Globals.current_time) + FilenameMapping.set_init_quote_vals() # This is because I originally didn't think compiled regexps # could be pickled, and so must be compiled on remote side. @@ -147,7 +163,8 @@ class Main: if self.action == "server": PipeConnection(sys.stdin, sys.stdout).Server() elif self.action == "backup": self.Backup(rps[0], rps[1]) - elif self.action == "restore": apply(self.Restore, rps) + elif self.action == "restore": self.Restore(*rps) + elif self.action == "restore-as-of": self.RestoreAsOf(rps[0], rps[1]) elif self.action == "mirror": self.Mirror(rps[0], rps[1]) elif self.action == "test-server": SetConnections.TestConnections() elif self.action == "list-increments": self.ListIncrements(rps[0]) @@ -175,7 +192,12 @@ class Main: """Turn dest_path into a copy of src_path""" Log("Mirroring %s to %s" % (src_rp.path, dest_rp.path), 5) self.mirror_check_paths(src_rp, dest_rp) - HighLevel.Mirror(src_rp, dest_rp, None) # No checkpointing - no rbdir + # Since no "rdiff-backup-data" dir, use root of destination. + SetConnections.UpdateGlobal('rbdir', dest_rp) + SetConnections.BackupInitConnections(src_rp.conn, dest_rp.conn) + RSI = Globals.backup_writer.Resume.ResumeCheck() + SaveState.init_filenames(None) + HighLevel.Mirror(src_rp, dest_rp, 1, RSI, None) def mirror_check_paths(self, rpin, rpout): """Check paths and return rpin, rpout""" @@ -193,7 +215,6 @@ rdiff-backup with the --force option if you want to mirror anyway.""" % SetConnections.BackupInitConnections(rpin.conn, rpout.conn) self.backup_init_select(rpin, rpout) self.backup_init_dirs(rpin, rpout) - Time.setcurtime(Globals.current_time) RSI = Globals.backup_writer.Resume.ResumeCheck() if self.prevtime: Time.setprevtime(self.prevtime) @@ -206,8 +227,9 @@ rdiff-backup with the --force option if you want to mirror anyway.""" % def backup_init_select(self, rpin, rpout): """Create Select objects on source and dest connections""" - rpin.conn.Globals.set_select(1, rpin, self.select_opts) - rpout.conn.Globals.set_select(None, rpout, self.select_mirror_opts) + rpin.conn.Globals.set_select(DSRPath(1, rpin), self.select_opts) + rpout.conn.Globals.set_select(DSRPath(None, rpout), + self.select_mirror_opts, 1) def backup_init_dirs(self, rpin, rpout): """Make sure rpin and rpout are valid, init data dir and logging""" @@ -267,9 +289,8 @@ may need to use the --exclude option.""" % (rpout.path, rpin.path), 2) def backup_get_mirrorrps(self): """Return list of current_mirror rps""" if not self.datadir.isdir(): return [] - mirrorfiles = filter(lambda f: f.startswith("current_mirror."), - self.datadir.listdir()) - mirrorrps = map(lambda x: self.datadir.append(x), mirrorfiles) + mirrorrps = [self.datadir.append(fn) for fn in self.datadir.listdir() + if fn.startswith("current_mirror.")] return filter(lambda rp: rp.isincfile(), mirrorrps) def backup_get_mirrortime(self): @@ -299,22 +320,45 @@ went wrong during your last backup? Using """ + mirrorrps[-1].path, 2) def Restore(self, src_rp, dest_rp = None): - """Main restoring function - take src_path to dest_path""" - Log("Starting Restore", 5) + """Main restoring function + + Here src_rp should be an increment file, and if dest_rp is + missing it defaults to the base of the increment. + + """ rpin, rpout = self.restore_check_paths(src_rp, dest_rp) - self.restore_init_select(rpin, rpout) - inc_tup = self.restore_get_inctup(rpin) - mirror_base, mirror_rel_index = self.restore_get_mirror(rpin) - rtime = Time.stringtotime(rpin.getinctime()) + time = Time.stringtotime(rpin.getinctime()) + self.restore_common(rpin, rpout, time) + + def RestoreAsOf(self, rpin, target): + """Secondary syntax for restore operation + + rpin - RPath of mirror file to restore (not nec. with correct index) + target - RPath of place to put restored file + + """ + self.restore_check_paths(rpin, target, 1) + try: time = Time.genstrtotime(self.restore_timestr) + except TimeError, exp: Log.FatalError(str(exp)) + self.restore_common(rpin, target, time) + + def restore_common(self, rpin, target, time): + """Restore operation common to Restore and RestoreAsOf""" + Log("Starting Restore", 5) + mirror_root, index = self.restore_get_root(rpin) + mirror = mirror_root.new_index(index) + inc_rpath = self.datadir.append_path('increments', index) + self.restore_init_select(mirror_root, target) Log.open_logfile(self.datadir.append("restore.log")) - HighLevel.Restore(rtime, mirror_base, mirror_rel_index, inc_tup, rpout) + Restore.Restore(inc_rpath, mirror, target, time) - def restore_check_paths(self, rpin, rpout): + def restore_check_paths(self, rpin, rpout, restoreasof = None): """Check paths and return pair of corresponding rps""" - if not rpin.lstat(): - Log.FatalError("Increment file %s does not exist" % rpin.path) - if not rpin.isincfile(): - Log.FatalError("""File %s does not look like an increment file. + if not restoreasof: + if not rpin.lstat(): + Log.FatalError("Source file %s does not exist" % rpin.path) + elif not rpin.isincfile(): + Log.FatalError("""File %s does not look like an increment file. Try restoring from an increment file (the filenames look like "foobar.2001-09-01T04:49:04-07:00.diff").""" % rpin.path) @@ -322,8 +366,8 @@ Try restoring from an increment file (the filenames look like if not rpout: rpout = RPath(Globals.local_connection, rpin.getincbase_str()) if rpout.lstat(): - Log.FatalError("Restore target %s already exists. " - "Will not overwrite." % rpout.path) + Log.FatalError("Restore target %s already exists," + "and will not be overwritten." % rpout.path) return rpin, rpout def restore_init_select(self, rpin, rpout): @@ -334,81 +378,64 @@ Try restoring from an increment file (the filenames look like the restore operation isn't. """ - Globals.set_select(1, rpin, self.select_mirror_opts) - Globals.set_select(None, rpout, self.select_opts) - - def restore_get_inctup(self, rpin): - """Return increment tuple (incrp, list of incs)""" - rpin_dir = rpin.dirsplit()[0] - if not rpin_dir: rpin_dir = "/" - rpin_dir_rp = RPath(rpin.conn, rpin_dir) - incbase = rpin.getincbase() - incbasename = incbase.dirsplit()[1] - inclist = filter(lambda rp: rp.isincfile() and - rp.getincbase_str() == incbasename, - map(rpin_dir_rp.append, rpin_dir_rp.listdir())) - return IndexedTuple((), (incbase, inclist)) - - def restore_get_mirror(self, rpin): - """Return (mirror file, relative index) and set the data dir + Globals.set_select(DSRPath(1, rpin), self.select_mirror_opts) + Globals.set_select(DSRPath(None, rpout), self.select_opts) + + def restore_get_root(self, rpin): + """Return (mirror root, index) and set the data dir The idea here is to keep backing up on the path until we find - something named "rdiff-backup-data". Then use that as a - reference to calculate the oldfile. This could fail if the - increment file is pointed to in a funny way, using symlinks or - somesuch. + a directory that contains "rdiff-backup-data". That is the + mirror root. If the path from there starts + "rdiff-backup-data/increments*", then the index is the + remainder minus that. Otherwise the index is just the path + minus the root. - The mirror file will have index (), so also return the index - relative to the rootrp. + All this could fail if the increment file is pointed to in a + funny way, using symlinks or somesuch. """ - pathcomps = os.path.join(rpin.conn.os.getcwd(), - rpin.getincbase().path).split("/") - for i in range(1, len(pathcomps)): - datadirrp = RPath(rpin.conn, "/".join(pathcomps[:i+1])) - if pathcomps[i] == "rdiff-backup-data" and datadirrp.isdir(): - break - else: Log.FatalError("Unable to find rdiff-backup-data dir") - - Globals.rbdir = self.datadir = datadirrp - rootrp = RPath(rpin.conn, "/".join(pathcomps[:i])) - if not rootrp.lstat(): - Log.FatalError("Root of mirror area %s does not exist" % - rootrp.path) - else: Log("Using root mirror %s" % rootrp.path, 6) - - from_datadir = pathcomps[i+1:] - if not from_datadir: raise RestoreError("Problem finding mirror file") - rel_index = tuple(from_datadir[1:]) - mirrorrp = RPath(rootrp.conn, - apply(os.path.join, (rootrp.path,) + rel_index)) - Log("Using mirror file %s" % mirrorrp.path, 6) - return (mirrorrp, rel_index) - - - def ListIncrements(self, rootrp): - """Print out a summary of the increments and their times""" - datadir = self.li_getdatadir(rootrp, - """Unable to open rdiff-backup-data dir. - -The argument to rdiff-backup -l or rdiff-backup --list-increments -should be the root of the target backup directory, of which -rdiff-backup-data is a subdirectory. So, if you ran - -rdiff-backup /home/foo /mnt/back/bar + if rpin.isincfile(): relpath = rpin.getincbase().path + else: relpath = rpin.path + pathcomps = os.path.join(rpin.conn.os.getcwd(), relpath).split("/") + assert len(pathcomps) >= 2 # path should be relative to / + + i = len(pathcomps) + while i >= 2: + parent_dir = RPath(rpin.conn, "/".join(pathcomps[:i])) + if (parent_dir.isdir() and + "rdiff-backup-data" in parent_dir.listdir()): break + i = i-1 + else: Log.FatalError("Unable to find rdiff-backup-data directory") + + self.rootrp = rootrp = parent_dir + Log("Using mirror root directory %s" % rootrp.path, 6) + + self.datadir = rootrp.append_path("rdiff-backup-data") + SetConnections.UpdateGlobal('rbdir', self.datadir) + if not self.datadir.isdir(): + Log.FatalError("Unable to read rdiff-backup-data directory %s" % + self.datadir.path) -earlier, try: + from_datadir = tuple(pathcomps[i:]) + if not from_datadir or from_datadir[0] != "rdiff-backup-data": + return (rootrp, from_datadir) # in mirror, not increments + assert from_datadir[1] == "increments" + return (rootrp, from_datadir[2:]) -rdiff-backup -l /mnt/back/bar -""") - print Manage.describe_root_incs(datadir) - def li_getdatadir(self, rootrp, errormsg): - """Return data dir if can find it, otherwise use errormsg""" - datadir = rootrp.append("rdiff-backup-data") - if not datadir.lstat() or not datadir.isdir(): - Log.FatalError(errormsg) - return datadir + def ListIncrements(self, rp): + """Print out a summary of the increments and their times""" + mirror_root, index = self.restore_get_root(rp) + Globals.rbdir = datadir = \ + mirror_root.append_path("rdiff-backup-data") + mirrorrp = mirror_root.new_index(index) + inc_rpath = datadir.append_path('increments', index) + incs = Restore.get_inclist(inc_rpath) + mirror_time = Restore.get_mirror_time() + if Globals.parsable_output: + print Manage.describe_incs_parsable(incs, mirror_time, mirrorrp) + else: print Manage.describe_incs_human(incs, mirror_time, mirrorrp) def RemoveOlderThan(self, rootrp): @@ -417,7 +444,8 @@ rdiff-backup -l /mnt/back/bar """Unable to open rdiff-backup-data dir. Try finding the increments first using --list-increments.""") - time = self.rot_get_earliest_time() + try: time = Time.genstrtotime(self.remove_older_than_string) + except TimeError, exp: Log.FatalError(str(exp)) timep = Time.timetopretty(time) Log("Deleting increment(s) before %s" % timep, 4) incobjs = filter(lambda x: x.time < time, Manage.get_incobjs(datadir)) @@ -433,11 +461,6 @@ Try finding the increments first using --list-increments.""") incobjs_time), 3) Manage.delete_earlier_than(datadir, time) - def rot_get_earliest_time(self): - """Return earliest time in seconds that will not be deleted""" - seconds = Time.intstringtoseconds(self.remove_older_than_string) - return time.time() - seconds - if __name__ == "__main__" and not globals().has_key('__no_execute__'): diff --git a/rdiff-backup/src/manage.py b/rdiff-backup/src/manage.py index c0f4a85..0c08872 100644 --- a/rdiff-backup/src/manage.py +++ b/rdiff-backup/src/manage.py @@ -12,37 +12,53 @@ class Manage: """Return Increments objects given the rdiff-backup data directory""" return map(IncObj, Manage.find_incrps_with_base(datadir, "increments")) - def find_incrps_with_base(dir_rp, basename): - """Return list of incfiles with given basename in dir_rp""" - rps = map(dir_rp.append, dir_rp.listdir()) - incrps = filter(RPath.isincfile, rps) - result = filter(lambda rp: rp.getincbase_str() == basename, incrps) - Log("find_incrps_with_base: found %d incs" % len(result), 6) - return result + def get_file_type(rp): + """Returns one of "regular", "directory", "missing", or "special".""" + if not rp.lstat(): return "missing" + elif rp.isdir(): return "directory" + elif rp.isreg(): return "regular" + else: return "special" - def describe_root_incs(datadir): + def get_inc_type(inc): + """Return file type increment represents""" + assert inc.isincfile() + type = inc.getinctype() + if type == "dir": return "directory" + elif type == "diff": return "regular" + elif type == "missing": return "missing" + elif type == "snapshot": return Manage.get_file_type(inc) + else: assert None, "Unknown type %s" % (type,) + + def describe_incs_parsable(incs, mirror_time, mirrorrp): + """Return a string parsable by computer describing the increments + + Each line is a time in seconds of the increment, and then the + type of the file. It will be sorted oldest to newest. For example: + + 10000 regular + 20000 directory + 30000 special + 40000 missing + 50000 regular <- last will be the current mirror + + """ + incpairs = [(Time.stringtotime(inc.getinctime()), inc) for inc in incs] + incpairs.sort() + result = ["%s %s" % (time, Manage.get_inc_type(inc)) + for time, inc in incpairs] + result.append("%s %s" % (mirror_time, Manage.get_file_type(mirrorrp))) + return "\n".join(result) + + def describe_incs_human(incs, mirror_time, mirrorrp): """Return a string describing all the the root increments""" - result = [] - currentrps = Manage.find_incrps_with_base(datadir, "current_mirror") - if not currentrps: - Log("Warning: no current mirror marker found", 1) - elif len(currentrps) > 1: - Log("Warning: multiple mirror markers found", 1) - for rp in currentrps: - result.append("Found mirror marker %s" % rp.path) - result.append("Indicating latest mirror taken at %s" % - Time.stringtopretty(rp.getinctime())) - result.append("---------------------------------------------" - "-------------") - - # Sort so they are in reverse order by time - time_w_incobjs = map(lambda io: (-io.time, io), - Manage.get_incobjs(datadir)) - time_w_incobjs.sort() - incobjs = map(lambda x: x[1], time_w_incobjs) - result.append("Found %d increments:" % len(incobjs)) - result.append("\n------------------------------------------\n".join( - map(IncObj.full_description, incobjs))) + incpairs = [(Time.stringtotime(inc.getinctime()), inc) for inc in incs] + incpairs.sort() + + result = ["Found %d increments:" % len(incpairs)] + for time, inc in incpairs: + result.append(" %s %s" % + (inc.dirsplit()[1], Time.timetopretty(time))) + result.append("Current mirror: %s" % Time.timetopretty(mirror_time)) return "\n".join(result) def delete_earlier_than(baserp, time): @@ -53,6 +69,11 @@ class Manage: rdiff-backup-data directory should be the root of the tree. """ + baserp.conn.Manage.delete_earlier_than_local(baserp, time) + + def delete_earlier_than_local(baserp, time): + """Like delete_earlier_than, but run on local connection for speed""" + assert baserp.conn is Globals.local_connection def yield_files(rp): yield rp if rp.isdir(): diff --git a/rdiff-backup/src/restore.py b/rdiff-backup/src/restore.py index dcba7f3..0faa9b2 100644 --- a/rdiff-backup/src/restore.py +++ b/rdiff-backup/src/restore.py @@ -24,32 +24,78 @@ class Restore: same index as mirror. """ - if not isinstance(mirror, DSRPath): - mirror = DSRPath(source = 1, mirror) - if not isinstance(target, DSRPath): - target = DSRPath(source = None, target) + if not isinstance(mirror, DSRPath): mirror = DSRPath(1, mirror) + if not isinstance(target, DSRPath): target = DSRPath(None, target) + + mirror_time = Restore.get_mirror_time() + rest_time = Restore.get_rest_time(rest_time) + inc_list = Restore.get_inclist(inc_rpath) + rid = RestoreIncrementData(inc_rpath.index, inc_rpath, inc_list) + rid.sortincseq(rest_time, mirror_time) + Restore.check_hardlinks(rest_time) + Restore.restore_recursive(inc_rpath.index, mirror, rid, target, + rest_time, mirror_time) + + def get_mirror_time(): + """Return the time (in seconds) of latest mirror""" + current_mirror_incs = \ + Restore.get_inclist(Globals.rbdir.append("current_mirror")) + if not current_mirror_incs: + Log.FatalError("Could not get time of current mirror") + elif len(current_mirror_incs) > 1: + Log("Warning, two different dates for current mirror found", 2) + return Time.stringtotime(current_mirror_incs[0].getinctime()) + + def get_rest_time(old_rest_time): + """If old_rest_time is between two increments, return older time + + There is a slightly tricky reason for doing this: The rest of + the code just ignores increments that are older than + rest_time. But sometimes we want to consider the very next + increment older than rest time, because rest_time will be + between two increments, and what was actually on the mirror + side will correspond to the older one. + + So here we assume all rdiff-backup events were recorded in + "increments" increments, and if its in-between we pick the + older one here. + """ + base_incs = Restore.get_inclist(Globals.rbdir.append("increments")) + if not base_incs: return old_rest_time + inctimes = [Time.stringtotime(inc.getinctime()) for inc in base_incs] + return max(filter(lambda time: time <= old_rest_time, inctimes)) + + def get_inclist(inc_rpath): + """Returns increments with given base""" dirname, basename = inc_rpath.dirsplit() parent_dir = RPath(inc_rpath.conn, dirname, ()) index = inc_rpath.index - if inc_rpath.index: + if index: get_inc_ext = lambda filename: \ RPath(inc_rpath.conn, inc_rpath.base, inc_rpath.index[:-1] + (filename,)) else: get_inc_ext = lambda filename: \ - RPath(inc_rpath.conn, os.join(dirname, filename)) + RPath(inc_rpath.conn, os.path.join(dirname, filename)) inc_list = [] for filename in parent_dir.listdir(): inc = get_inc_ext(filename) - if inc.getincbase_str() == basename: inc_list.append(inc) - - rid = RestoreIncrementData(index, inc_rpath, inc_list) - rid.sortincseq(rest_time) - Restore.restore_recursive(index, mirror, rid, target, rest_time) - - def restore_recursive(index, mirror, rid, target, time): + if inc.isincfile() and inc.getincbase_str() == basename: + inc_list.append(inc) + return inc_list + + def check_hardlinks(rest_time): + """Check for hard links and enable hard link support if found""" + if (Globals.preserve_hardlinks != 0 and + Hardlink.retrieve_final(rest_time)): + Log("Hard link information found, attempting to preserve " + "hard links.", 5) + SetConnections.UpdateGlobal('preserve_hardlinks', 1) + else: SetConnections.UpdateGlobal('preserve_hardlinks', None) + + def restore_recursive(index, mirror, rid, target, time, mirror_time): """Recursive restore function. rid is a RestoreIncrementData object whose inclist is already @@ -66,14 +112,15 @@ class Restore: mirror_finalizer = DestructiveSteppingFinalizer() target_finalizer = DestructiveSteppingFinalizer() - for rcd in Restore.yield_rcds(rid.index, mirror, rid, target, time): + for rcd in Restore.yield_rcds(rid.index, mirror, rid, + target, time, mirror_time): rcd.RestoreFile() - if rcd.mirror: mirror_finalizer(rcd.mirror) - target_finalizer(rcd.target) + if rcd.mirror: mirror_finalizer(rcd.index, rcd.mirror) + target_finalizer(rcd.target.index, rcd.target) target_finalizer.Finish() mirror_finalizer.Finish() - def yield_rcds(index, mirrorrp, rid, target, rest_time): + def yield_rcds(index, mirrorrp, rid, target, rest_time, mirror_time): """Iterate RestoreCombinedData objects starting with given args rid is a RestoreCombinedData object. target is an rpath where @@ -91,9 +138,10 @@ class Restore: mirrorrp = None rcd = RestoreCombinedData(rid, mirrorrp, target) - if mirrorrp and mirrorrp.isdir() or rid and rid.inc_rpath.isdir(): + if mirrorrp and mirrorrp.isdir() or \ + rid and rid.inc_rpath and rid.inc_rpath.isdir(): sub_rcds = Restore.yield_sub_rcds(index, mirrorrp, rid, - target, rest_time) + target, rest_time, mirror_time) else: sub_rcds = None if select_result == 1: @@ -108,35 +156,39 @@ class Restore: yield first for sub_rcd in sub_rcds: yield sub_rcd - def yield_collated_tuples_dir(index, mirrorrp, rid, target, rest_time): + def yield_sub_rcds(index, mirrorrp, rid, target, rest_time, mirror_time): """Yield collated tuples from inside given args""" - if not Restore.check_dir_exists(mirrorrp, inc_tup): return + if not Restore.check_dir_exists(mirrorrp, rid): return mirror_iter = Restore.yield_mirrorrps(mirrorrp) - rid_iter = Restore.get_rids(rid, rest_time) + rid_iter = Restore.yield_rids(rid, rest_time, mirror_time) for indexed_tup in RORPIter.CollateIterators(mirror_iter, rid_iter): index = indexed_tup.index new_mirrorrp, new_rid = indexed_tup - for rcd in Restore.yield_collated_tuples(index, new_mirrorrp, - new_rid, target.new_index(index), rest_time): + for rcd in Restore.yield_rcds(index, new_mirrorrp, + new_rid, target.append(index[-1]), rest_time, mirror_time): yield rcd - def check_dir_exists(mirrorrp, inc_tuple): + def check_dir_exists(mirrorrp, rid): """Return true if target should be a directory""" - if inc_tuple and inc_tuple[1]: + if rid and rid.inc_list: # Incs say dir if last (earliest) one is a dir increment - return inc_tuple[1][-1].getinctype() == "dir" + return rid.inc_list[-1].getinctype() == "dir" elif mirrorrp: return mirrorrp.isdir() # if no incs, copy mirror else: return None def yield_mirrorrps(mirrorrp): """Yield mirrorrps underneath given mirrorrp""" if mirrorrp and mirrorrp.isdir(): - dirlist = mirrorrp.listdir() - dirlist.sort() - for filename in dirlist: yield mirrorrp.append(filename) - - def yield_rids(rid, rest_time): + if Globals.quoting_enabled: + for rp in FilenameMapping.get_quoted_dir_children(mirrorrp): + yield rp + else: + dirlist = mirrorrp.listdir() + dirlist.sort() + for filename in dirlist: yield mirrorrp.append(filename) + + def yield_rids(rid, rest_time, mirror_time): """Yield RestoreIncrementData objects within given rid dir If the rid doesn't correspond to a directory, don't yield any @@ -148,16 +200,19 @@ class Restore: if not rid or not rid.inc_rpath or not rid.inc_rpath.isdir(): return rid_dict = {} # dictionary of basenames:rids dirlist = rid.inc_rpath.listdir() + if Globals.quoting_enabled: + dirlist = [FilenameMapping.unquote(fn) for fn in dirlist] def affirm_dict_indexed(basename): """Make sure the rid dictionary has given basename as key""" - if not inc_list_dict.has_key(basename): + if not rid_dict.has_key(basename): rid_dict[basename] = RestoreIncrementData( rid.index + (basename,), None, []) # init with empty rid def add_to_dict(filename): """Add filename to the inc tuple dictionary""" rp = rid.inc_rpath.append(filename) + if Globals.quoting_enabled: rp.quote_path() if rp.isincfile(): basename = rp.getincbase_str() affirm_dict_indexed(basename) @@ -167,14 +222,14 @@ class Restore: rid_dict[filename].inc_rpath = rp for filename in dirlist: add_to_dict(filename) - keys = inc_list_dict.keys() + keys = rid_dict.keys() keys.sort() # sortincseq now to avoid descending .missing directories later for key in keys: rid = rid_dict[key] if rid.inc_rpath or rid.inc_list: - rid.sortincseq(rest_time) + rid.sortincseq(rest_time, mirror_time) yield rid MakeStatic(Restore) @@ -192,26 +247,36 @@ class RestoreIncrementData: self.inc_rpath = inc_rpath self.inc_list = inc_list - def sortincseq(self, rest_time): + def sortincseq(self, rest_time, mirror_time): """Sort self.inc_list sequence, throwing away irrelevant increments""" - incpairs = map(lambda rp: (Time.stringtotime(rp.getinctime()), rp), - self.inc_list) - # Only consider increments at or after the time being restored - incpairs = filter(lambda pair: pair[0] >= rest_time, incpairs) + if not self.inc_list or rest_time >= mirror_time: + self.inc_list = [] + return - # Now throw away older unnecessary increments - incpairs.sort() + newer_incs = self.get_newer_incs(rest_time, mirror_time) i = 0 - while(i < len(incpairs)): + while(i < len(newer_incs)): # Only diff type increments require later versions - if incpairs[i][1].getinctype() != "diff": break + if newer_incs[i].getinctype() != "diff": break i = i+1 - incpairs = incpairs[:i+1] + self.inc_list = newer_incs[:i+1] + self.inc_list.reverse() # return in reversed order (latest first) + + def get_newer_incs(self, rest_time, mirror_time): + """Return list of newer incs sorted by time (increasing) - # Return increments in reversed order (latest first) - incpairs.reverse() - self.inc_list = map(lambda pair: pair[1], incpairs) + Also discard increments older than rest_time (rest_time we are + assuming is the exact time rdiff-backup was run, so no need to + consider the next oldest increment or any of that) + """ + incpairs = [] + for inc in self.inc_list: + time = Time.stringtotime(inc.getinctime()) + if time >= rest_time: incpairs.append((time, inc)) + incpairs.sort() + return [pair[1] for pair in incpairs] + class RestoreCombinedData: """Combine index information from increment and mirror directories @@ -235,9 +300,12 @@ class RestoreCombinedData: if mirror: self.mirror = mirror assert mirror.index == self.index + else: self.mirror = None elif mirror: self.index = mirror.index self.mirror = mirror + self.inc_list = [] + self.inc_rpath = None else: assert None, "neither rid nor mirror given" self.target = target @@ -249,15 +317,15 @@ class RestoreCombinedData: if self.restore_hardlink(): return - if not inclist or inclist[0].getinctype() == "diff": + if not self.inc_list or self.inc_list[0].getinctype() == "diff": assert self.mirror and self.mirror.lstat(), \ "No base to go with incs for %s" % self.target.path RPath.copy_with_attribs(self.mirror, self.target) for inc in self.inc_list: self.applyinc(inc, self.target) - def log(self) + def log(self): """Log current restore action""" - inc_string = ','.join(map(lambda x: x.path, self.inc_list)) + inc_string = ','.join([inc.path for inc in self.inc_list]) Log("Restoring %s with increments %s to %s" % (self.mirror and self.mirror.path, inc_string, self.target.path), 5) @@ -266,7 +334,7 @@ class RestoreCombinedData: """Hard link target and return true if hard linking appropriate""" if (Globals.preserve_hardlinks and Hardlink.restore_link(self.index, self.target)): - RPath.copy_attribs(self.inc_list and inc_list[-1] or + RPath.copy_attribs(self.inc_list and self.inc_list[-1] or self.mirror, self.target) return 1 return None diff --git a/rdiff-backup/src/robust.py b/rdiff-backup/src/robust.py index 22f35b9..a71eabc 100644 --- a/rdiff-backup/src/robust.py +++ b/rdiff-backup/src/robust.py @@ -252,7 +252,7 @@ class TempFile(RPath): if self.isdir() and not rp_dest.isdir(): # Cannot move a directory directly over another file rp_dest.delete() - if (isinstance(rp_dest, DSRPath) and rp_dest.perms_delayed + if (isinstance(rp_dest, DSRPath) and rp_dest.delay_perms and not self.hasfullperms()): # If we are moving to a delayed perm directory, delay # permission change on destination. @@ -531,7 +531,7 @@ class Resume: Log("Last backup dated %s was aborted, but we aren't " "resuming it." % Time.timetopretty(si.time), 2) return None - assert 0 + assert None MakeClass(Resume) diff --git a/rdiff-backup/src/rpath.py b/rdiff-backup/src/rpath.py index 0089bf6..110b89f 100644 --- a/rdiff-backup/src/rpath.py +++ b/rdiff-backup/src/rpath.py @@ -168,6 +168,17 @@ class RPathStatic: rp_dest.data = rp_source.data rp_source.data = {'type': None} + # If we are moving to a DSRPath, assume that the current times + # are the intended ones. We need to save them now in case + # they are changed later. + if isinstance(rp_dest, DSRPath): + if rp_dest.delay_mtime: + if 'mtime' in rp_dest.data: + rp_dest.setmtime(rp_dest.data['mtime']) + if rp_dest.delay_atime: + if 'atime' in rp_dest.data: + rp_dest.setatime(rp_dest.data['atime']) + def tupled_lstat(filename): """Like os.lstat, but return only a tuple, or None if os.error @@ -413,7 +424,7 @@ class RPath(RORPath): self.base = base self.path = apply(os.path.join, (base,) + self.index) self.file = None - if data: self.data = data + if data or base is None: self.data = data else: self.setdata() def __str__(self): @@ -493,6 +504,12 @@ class RPath(RORPath): s = self.conn.reval("lambda path: os.lstat(path).st_rdev", self.path) return (s >> 8, s & 0xff) + def quote_path(self): + """Set path from quoted version of index""" + quoted_list = [FilenameMapping.quote(path) for path in self.index] + self.path = apply(os.path.join, [self.base] + quoted_list) + self.setdata() + def chmod(self, permissions): """Wrapper around os.chmod""" self.conn.os.chmod(self.path, permissions) @@ -594,7 +611,8 @@ class RPath(RORPath): if not self.lstat(): return # must have been deleted in meantime elif self.isdir(): itm = RpathDeleter() - for dsrp in Select(self, None).set_iter(): itm(dsrp.index, dsrp) + for dsrp in Select(DSRPath(None, self)).set_iter(): + itm(dsrp.index, dsrp) itm.Finish() else: self.conn.os.unlink(self.path) self.setdata() @@ -616,7 +634,7 @@ class RPath(RORPath): self.path.split("/"))) if self.path[0] == "/": newpath = "/" + newpath elif not newpath: newpath = "." - return self.__class__(self.conn, newpath, ()) + return self.newpath(newpath) def dirsplit(self): """Returns a tuple of strings (dirname, basename) @@ -635,10 +653,20 @@ class RPath(RORPath): comps = normed.path.split("/") return "/".join(comps[:-1]), comps[-1] + def newpath(self, newpath, index = ()): + """Return new RPath with the same connection but different path""" + return self.__class__(self.conn, newpath, index) + def append(self, ext): """Return new RPath with same connection by adjoing ext""" return self.__class__(self.conn, self.base, self.index + (ext,)) + def append_path(self, ext, new_index = ()): + """Like append, but add ext to path instead of to index""" + assert not self.index # doesn't make sense if index isn't () + return self.__class__(self.conn, os.path.join(self.base, ext), + new_index) + def new_index(self, index): """Return similar RPath but with new index""" return self.__class__(self.conn, self.base, index) diff --git a/rdiff-backup/src/selection.py b/rdiff-backup/src/selection.py index aaa8639..cae6db3 100644 --- a/rdiff-backup/src/selection.py +++ b/rdiff-backup/src/selection.py @@ -60,21 +60,20 @@ class Select: # This re should not match normal filenames, but usually just globs glob_re = re.compile("(.*[*?[]|ignorecase\\:)", re.I | re.S) - def __init__(self, rpath, source): - """DSRPIterator initializer. + def __init__(self, dsrpath, quoted_filenames = None): + """DSRPIterator initializer. dsrp is the root directory - rpath is the root dir. Source is true if rpath is the root of - the source directory, and false for the mirror directory + When files have quoted characters in them, quoted_filenames + should be true. Then RPath's index will be the unquoted + version. """ - assert isinstance(rpath, RPath) + assert isinstance(dsrpath, DSRPath) self.selection_functions = [] - self.source = source - if isinstance(rpath, DSRPath): self.dsrpath = rpath - else: self.dsrpath = DSRPath(rpath.conn, rpath.base, - rpath.index, rpath.data) + self.dsrpath = dsrpath self.prefix = self.dsrpath.path - + self.quoting_on = Globals.quoting_enabled and quoted_filenames + def set_iter(self, starting_index = None, sel_func = None): """Initialize more variables, get ready to iterate @@ -106,7 +105,7 @@ class Select: """ s = sel_func(dsrpath) - if s === 0: return + if s == 0: return elif s == 1: # File is included yield dsrpath if dsrpath.isdir(): @@ -122,11 +121,15 @@ class Select: def iterate_in_dir(self, dsrpath, rec_func, sel_func): """Iterate the dsrps in directory dsrpath.""" - dir_listing = dsrpath.listdir() - dir_listing.sort() - for filename in dir_listing: - for dsrp in rec_func(dsrpath.append(filename), rec_func, sel_func): - yield dsrp + if self.quoting_on: + for subdir in FilenameMapping.get_quoted_dir_children(dsrpath): + for dsrp in rec_func(subdir, rec_func, sel_func): yield dsrp + else: + dir_listing = dsrpath.listdir() + dir_listing.sort() + for filename in dir_listing: + for dsrp in rec_func(dsrpath.append(filename), + rec_func, sel_func): yield dsrp def iterate_starting_from(self, dsrpath, rec_func, sel_func): """Like Iterate, but only yield indicies > self.starting_index""" diff --git a/rdiff-backup/src/ttime.py b/rdiff-backup/src/ttime.py index f6d041c..166c3f8 100644 --- a/rdiff-backup/src/ttime.py +++ b/rdiff-backup/src/ttime.py @@ -12,6 +12,7 @@ class Time: """Functions which act on the time""" _interval_conv_dict = {"s": 1, "m": 60, "h": 3600, "D": 86400, "W": 7*86400, "M": 30*86400, "Y": 365*86400} + _integer_regexp = re.compile("^[0-9]+$") _interval_regexp = re.compile("^([0-9]+)([smhDWMY])") _genstr_date_regexp1 = re.compile("^(?P<year>[0-9]{4})[-/]" "(?P<month>[0-9]{1,2})[-/](?P<day>[0-9]{1,2})$") @@ -70,7 +71,7 @@ class Time: utc_in_secs = time.mktime(timetuple) - time.altzone else: utc_in_secs = time.mktime(timetuple) - time.timezone - return utc_in_secs + cls.tzdtoseconds(timestring[19:]) + return long(utc_in_secs) + cls.tzdtoseconds(timestring[19:]) except (TypeError, ValueError, AssertionError): return None def timetopretty(cls, timeinseconds): @@ -155,8 +156,10 @@ strings, like "2002-04-26T04:22:01-07:00" (strings like "2002-04-26T04:22:01" are also acceptable - rdiff-backup will use the current time zone), or ordinary dates like 2/4/1997 or 2001-04-23 (various combinations are acceptable, but the month always precedes -the day). -""" % timestr) +the day).""" % timestr) + + # Test for straight integer + if cls._integer_regexp.search(timestr): return int(timestr) # Test for w3-datetime format, possibly missing tzd t = cls.stringtotime(timestr) or cls.stringtotime(timestr+cls.gettzd()) |