summaryrefslogtreecommitdiff
path: root/rdiff-backup/rdiff_backup
diff options
context:
space:
mode:
Diffstat (limited to 'rdiff-backup/rdiff_backup')
-rw-r--r--rdiff-backup/rdiff_backup/Main.py8
-rw-r--r--rdiff-backup/rdiff_backup/Security.py7
-rw-r--r--rdiff-backup/rdiff_backup/SetConnections.py4
-rw-r--r--rdiff-backup/rdiff_backup/backup.py121
-rw-r--r--rdiff-backup/rdiff_backup/connection.py2
-rw-r--r--rdiff-backup/rdiff_backup/increment.py3
-rw-r--r--rdiff-backup/rdiff_backup/regress.py83
-rw-r--r--rdiff-backup/rdiff_backup/restore.py2
-rw-r--r--rdiff-backup/rdiff_backup/rpath.py55
9 files changed, 191 insertions, 94 deletions
diff --git a/rdiff-backup/rdiff_backup/Main.py b/rdiff-backup/rdiff_backup/Main.py
index 859bf97..27ef56c 100644
--- a/rdiff-backup/rdiff_backup/Main.py
+++ b/rdiff-backup/rdiff_backup/Main.py
@@ -562,17 +562,19 @@ def ListChangedSince(rp):
def CheckDest(dest_rp):
"""Check the destination directory, """
+ if Globals.rbdir is None:
+ SetConnections.UpdateGlobal('rbdir',
+ dest_rp.append_path("rdiff-backup-data"))
need_check = checkdest_need_check(dest_rp)
if need_check is None:
Log.FatalError("No destination dir found at %s" % (dest_rp.path,))
elif need_check == 0:
Log.FatalError("Destination dir %s does not need checking" %
(dest_rp.path,))
- regress.Regress(dest_rp)
+ dest_rp.conn.regress.Regress(dest_rp)
def checkdest_need_check(dest_rp):
"""Return None if no dest dir found, 1 if dest dir needs check, 0 o/w"""
- assert dest_rp.conn is Globals.rbdir.conn
if not dest_rp.isdir() or not Globals.rbdir.isdir(): return None
curmirroot = Globals.rbdir.append("current_mirror")
curmir_incs = restore.get_inclist(curmirroot)
@@ -600,4 +602,4 @@ def checkdest_if_necessary(dest_rp):
need_check = checkdest_need_check(dest_rp)
if need_check == 1:
Log("Previous backup seems to have failed, checking now.", 2)
- regress.Regress(dest_rp)
+ dest_rp.conn.regress.Regress(dest_rp)
diff --git a/rdiff-backup/rdiff_backup/Security.py b/rdiff-backup/rdiff_backup/Security.py
index a10330c..9a136e2 100644
--- a/rdiff-backup/rdiff_backup/Security.py
+++ b/rdiff-backup/rdiff_backup/Security.py
@@ -57,8 +57,9 @@ def set_security_level(action, cmdpairs):
if Globals.server: return
cp1 = cmdpairs[0]
if len(cmdpairs) > 1: cp2 = cmdpairs[1]
+ else: cp2 = cp1
- if action == "backup":
+ if action == "backup" or action == "check-destination-dir":
if bothlocal(cp1, cp2) or bothremote(cp1, cp2):
sec_level = "minimal"
rdir = tempfile.gettempdir()
@@ -140,7 +141,9 @@ def set_allowed_requests(sec_level):
"backup.DestinationStruct.patch_and_finalize",
"backup.DestinationStruct.patch_increment_and_finalize",
"Main.backup_touch_curmirror_local",
- "Globals.ITRB.increment_stat"])
+ "Globals.ITRB.increment_stat",
+ "statistics.record_error",
+ "log.ErrorLog.write_if_open"])
if Globals.server:
allowed_requests.extend(
["SetConnections.init_connection_remote",
diff --git a/rdiff-backup/rdiff_backup/SetConnections.py b/rdiff-backup/rdiff_backup/SetConnections.py
index b74aec5..62cff00 100644
--- a/rdiff-backup/rdiff_backup/SetConnections.py
+++ b/rdiff-backup/rdiff_backup/SetConnections.py
@@ -151,7 +151,9 @@ Couldn't start up the remote connection by executing
Remember that, under the default settings, rdiff-backup must be
installed in the PATH on the remote system. See the man page for more
-information.""" % (exception, remote_cmd))
+information on this. This message may also be displayed if the remote
+version of rdiff-backup is quite different from the local version (%s)."""
+ % (exception, remote_cmd, Globals.version))
if remote_version != Globals.version:
Log("Warning: Local version %s does not match remote version %s."
diff --git a/rdiff-backup/rdiff_backup/backup.py b/rdiff-backup/rdiff_backup/backup.py
index 6e87048..9e24e56 100644
--- a/rdiff-backup/rdiff_backup/backup.py
+++ b/rdiff-backup/rdiff_backup/backup.py
@@ -20,6 +20,7 @@
"""High level functions for mirroring and mirror+incrementing"""
from __future__ import generators
+import errno
import Globals, metadata, rorpiter, TempFile, Hardlink, robust, increment, \
rpath, static, log, selection, Time, Rdiff, statistics
@@ -153,7 +154,7 @@ class DestinationStruct:
Hardlink.islinked(src_rorp or dest_rorp)):
dest_sig = rpath.RORPath(index)
dest_sig.flaglinked(Hardlink.get_link_index(dest_sig))
- elif dest_rorp:
+ elif dest_rorp:
dest_sig = dest_rorp.getRORPath()
if dest_rorp.isreg():
dest_rp = dest_base_rpath.new_index(index)
@@ -196,10 +197,11 @@ class CacheCollatedPostProcess:
receives.
2. The metadata must match what is stored in the destination
- directory. If there is an error we do not update the dest
- directory for that file, and the old metadata is used. Thus
- we cannot write any metadata until we know the file has been
- procesed correctly.
+ directory. If there is an error, either we do not update the
+ dest directory for that file and the old metadata is used, or
+ the file is deleted on the other end.. Thus we cannot write
+ any metadata until we know the file has been procesed
+ correctly.
The class caches older source_rorps and dest_rps so the patch
function can retrieve them if necessary. The patch function can
@@ -218,8 +220,9 @@ class CacheCollatedPostProcess:
# the following should map indicies to lists [source_rorp,
# dest_rorp, changed_flag, success_flag] where changed_flag
# should be true if the rorps are different, and success_flag
- # should be true if dest_rorp has been successfully updated to
- # source_rorp. They both default to false.
+ # should be 1 if dest_rorp has been successfully updated to
+ # source_rorp, and 2 if the destination file is deleted
+ # entirely. They both default to false (0).
self.cache_dict = {}
self.cache_indicies = []
@@ -268,17 +271,26 @@ class CacheCollatedPostProcess:
if not changed or success:
if source_rorp: self.statfileobj.add_source_file(source_rorp)
if dest_rorp: self.statfileobj.add_dest_file(dest_rorp)
- if success:
+ if success == 0: metadata_rorp = dest_rorp
+ elif success == 1:
self.statfileobj.add_changed(source_rorp, dest_rorp)
metadata_rorp = source_rorp
- else: metadata_rorp = dest_rorp
+ else: metadata_rorp = None
if metadata_rorp and metadata_rorp.lstat():
metadata.WriteMetadata(metadata_rorp)
+ def in_cache(self, index):
+ """Return true if given index is cached"""
+ return self.cache_dict.has_key(index)
+
def flag_success(self, index):
"""Signal that the file with given index was updated successfully"""
self.cache_dict[index][3] = 1
+ def flag_deleted(self, index):
+ """Signal that the destination file was deleted"""
+ self.cache_dict[index][3] = 2
+
def flag_changed(self, index):
"""Signal that the file with given index has changed"""
self.cache_dict[index][2] = 1
@@ -291,6 +303,10 @@ class CacheCollatedPostProcess:
"""Retrieve source_rorp with given index from cache"""
return self.cache_dict[index][0]
+ def get_mirror_rorp(self, index):
+ """Retrieve mirror_rorp with given index from cache"""
+ return self.cache_dict[index][1]
+
def close(self):
"""Process the remaining elements in the cache"""
while self.cache_indicies: self.shorten_cache()
@@ -335,9 +351,12 @@ class PatchITRB(rorpiter.ITRBranch):
rp = self.get_rp_from_root(index)
tf = TempFile.new(rp)
if self.patch_to_temp(rp, diff_rorp, tf):
- if tf.lstat(): rpath.rename(tf, rp)
- elif rp.lstat(): rp.delete()
- self.CCPP.flag_success(index)
+ if tf.lstat():
+ rpath.rename(tf, rp)
+ self.CCPP.flag_success(index)
+ elif rp.lstat():
+ rp.delete()
+ self.CCPP.flag_deleted(index)
else:
tf.setdata()
if tf.lstat(): tf.delete()
@@ -355,7 +374,23 @@ class PatchITRB(rorpiter.ITRBranch):
if robust.check_common_error(self.error_handler,
Rdiff.patch_local, (basis_rp, diff_rorp, new)) == 0: return 0
if new.lstat(): rpath.copy_attribs(diff_rorp, new)
- return 1
+ return self.matches_cached_rorp(diff_rorp, new)
+
+ def matches_cached_rorp(self, diff_rorp, new_rp):
+ """Return true if new_rp matches cached src rorp
+
+ This is a final check to make sure the temp file just written
+ matches the stats which we got earlier. If it doesn't it
+ could confuse the regress operation. This is only necessary
+ for regular files.
+
+ """
+ if not new_rp.isreg(): return 1
+ cached_rorp = self.CCPP.get_source_rorp(diff_rorp.index)
+ if cached_rorp.equal_loose(new_rp): return 1
+ log.ErrorLog.write_if_open("UpdateError", diff_rorp, "Updated mirror "
+ "temp file %s does not match source" % (new_rp.path,))
+ return 0
def write_special(self, diff_rorp, new):
"""Write diff_rorp (which holds special file) to new"""
@@ -370,7 +405,8 @@ class PatchITRB(rorpiter.ITRBranch):
base_rp = self.base_rp = self.get_rp_from_root(index)
assert diff_rorp.isdir() or base_rp.isdir() or not base_rp.index
if diff_rorp.isdir(): self.prepare_dir(diff_rorp, base_rp)
- else: self.set_dir_replacement(diff_rorp, base_rp)
+ elif self.set_dir_replacement(diff_rorp, base_rp):
+ self.CCPP.flag_success(index)
def set_dir_replacement(self, diff_rorp, base_rp):
"""Set self.dir_replacement, which holds data until done with dir
@@ -380,8 +416,15 @@ class PatchITRB(rorpiter.ITRBranch):
"""
assert diff_rorp.get_attached_filetype() == 'snapshot'
self.dir_replacement = TempFile.new(base_rp)
- rpath.copy_with_attribs(diff_rorp, self.dir_replacement)
+ if not self.patch_to_temp(None, diff_rorp, self.dir_replacement):
+ if self.dir_replacement.lstat(): self.dir_replacement.delete()
+ # Was an error, so now restore original directory
+ rpath.copy_with_attribs(self.CCPP.get_mirror_rorp(diff_rorp.index),
+ self.dir_replacement)
+ success = 0
+ else: success = 1
if base_rp.isdir(): base_rp.chmod(0700)
+ return success
def prepare_dir(self, diff_rorp, base_rp):
"""Prepare base_rp to turn into a directory"""
@@ -389,6 +432,10 @@ class PatchITRB(rorpiter.ITRBranch):
if not base_rp.isdir():
if base_rp.lstat(): base_rp.delete()
base_rp.mkdir()
+ self.CCPP.flag_success(diff_rorp.index)
+ else: # maybe no change, so query CCPP before tagging success
+ if self.CCPP.in_cache(diff_rorp.index):
+ self.CCPP.flag_success(diff_rorp.index)
base_rp.chmod(0700)
def end_process(self):
@@ -401,7 +448,6 @@ class PatchITRB(rorpiter.ITRBranch):
self.base_rp.rmdir()
if self.dir_replacement.lstat():
rpath.rename(self.dir_replacement, self.base_rp)
- self.CCPP.flag_success(self.base_rp.index)
class IncrementITRB(PatchITRB):
@@ -421,25 +467,48 @@ class IncrementITRB(PatchITRB):
self.cached_incrp = self.inc_root_rp.new_index(index)
return self.cached_incrp
+ def inc_with_checking(self, new, old, inc_rp):
+ """Produce increment taking new to old checking for errors"""
+ try: inc = increment.Increment(new, old, inc_rp)
+ except OSError, exc:
+ if (errno.errorcode.has_key(exc[0]) and
+ errno.errorcode[exc[0]] == 'ENAMETOOLONG'):
+ self.error_handler(exc, old)
+ return None
+ else: raise
+ return inc
+
def fast_process(self, index, diff_rorp):
"""Patch base_rp with diff_rorp and write increment (neither is dir)"""
rp = self.get_rp_from_root(index)
tf = TempFile.new(rp)
- self.patch_to_temp(rp, diff_rorp, tf)
- increment.Increment(tf, rp, self.get_incrp(index))
- if tf.lstat(): rpath.rename(tf, rp)
- else: rp.delete()
- self.CCPP.flag_success(index)
+ if self.patch_to_temp(rp, diff_rorp, tf):
+ inc = self.inc_with_checking(tf, rp, self.get_incrp(index))
+ if inc is not None:
+ if inc.isreg():
+ inc.fsync_with_dir() # Write inc before rp changed
+ if tf.lstat():
+ rpath.rename(tf, rp)
+ self.CCPP.flag_success(index)
+ elif rp.lstat():
+ rp.delete()
+ self.CCPP.flag_deleted(index)
+ return # normal return, otherwise error occurred
+ tf.setdata()
+ if tf.lstat(): tf.delete()
def start_process(self, index, diff_rorp):
"""Start processing directory"""
base_rp = self.base_rp = self.get_rp_from_root(index)
assert diff_rorp.isdir() or base_rp.isdir()
if diff_rorp.isdir():
- increment.Increment(diff_rorp, base_rp, self.get_incrp(index))
+ inc = self.inc_with_checking(diff_rorp, base_rp,
+ self.get_incrp(index))
+ if inc and inc.isreg():
+ inc.fsync_with_dir() # must writte inc before rp changed
self.prepare_dir(diff_rorp, base_rp)
- else:
- self.set_dir_replacement(diff_rorp, base_rp)
- increment.Increment(self.dir_replacement, base_rp,
- self.get_incrp(index))
+ elif (self.set_dir_replacement(diff_rorp, base_rp) and
+ self.inc_with_checking(self.dir_replacement, base_rp,
+ self.get_incrp(index))):
+ self.CCPP.flag_success(index)
diff --git a/rdiff-backup/rdiff_backup/connection.py b/rdiff-backup/rdiff_backup/connection.py
index 8b0da50..c1d2f70 100644
--- a/rdiff-backup/rdiff_backup/connection.py
+++ b/rdiff-backup/rdiff_backup/connection.py
@@ -512,7 +512,7 @@ class VirtualFile:
import Globals, Time, Rdiff, Hardlink, FilenameMapping, C, Security, \
Main, rorpiter, selection, increment, statistics, manage, lazy, \
iterfile, rpath, robust, restore, manage, backup, connection, \
- TempFile, SetConnections, librsync, log
+ TempFile, SetConnections, librsync, log, regress
Globals.local_connection = LocalConnection()
Globals.connections.append(Globals.local_connection)
diff --git a/rdiff-backup/rdiff_backup/increment.py b/rdiff-backup/rdiff_backup/increment.py
index 8df04dc..21612f8 100644
--- a/rdiff-backup/rdiff_backup/increment.py
+++ b/rdiff-backup/rdiff_backup/increment.py
@@ -33,9 +33,6 @@ def Increment(new, mirror, incpref):
file to incpref.
"""
- if not (new and new.lstat() or mirror.lstat()):
- return None # Files deleted in meantime, do nothing
-
log.Log("Incrementing mirror file " + mirror.path, 5)
if ((new and new.isdir()) or mirror.isdir()) and not incpref.isdir():
incpref.mkdir()
diff --git a/rdiff-backup/rdiff_backup/regress.py b/rdiff-backup/rdiff_backup/regress.py
index bfd5b62..1180d2a 100644
--- a/rdiff-backup/rdiff_backup/regress.py
+++ b/rdiff-backup/rdiff_backup/regress.py
@@ -34,7 +34,7 @@ recovered.
"""
from __future__ import generators
-import Globals, restore, log, rorpiter, journal, TempFile, metadata, rpath
+import Globals, restore, log, rorpiter, TempFile, metadata, rpath, C, Time
# regress_time should be set to the time we want to regress back to
# (usually the time of the last successful backup)
@@ -43,10 +43,6 @@ regress_time = None
# This should be set to the latest unsuccessful backup time
unsuccessful_backup_time = None
-# This is set by certain tests and allows overriding of global time
-# variables.
-time_override_mode = None
-
class RegressException(Exception):
"""Raised on any exception in regress process"""
@@ -71,6 +67,7 @@ def Regress(mirror_rp):
ITR = rorpiter.IterTreeReducer(RegressITRB, [])
for rf in iterate_meta_rfs(mirror_rp, inc_rpath): ITR(rf.index, rf)
ITR.Finish()
+ remove_rbdir_increments()
def set_regress_time():
"""Set global regress_time to previous sucessful backup
@@ -80,18 +77,14 @@ def set_regress_time():
"""
global regress_time, unsuccessful_backup_time
- if time_override_mode:
- assert regress_time and unsuccessful_backup_time
- return
-
curmir_incs = restore.get_inclist(Globals.rbdir.append("current_mirror"))
assert len(curmir_incs) == 2, \
"Found %s current_mirror flags, expected 2" % len(curmir_incs)
inctimes = [inc.getinctime() for inc in curmir_incs]
inctimes.sort()
regress_time = inctimes[0]
- unsucessful_backup_time = inctimes[-1]
- log.Log("Regressing to " + Time.timetopretty(regress_time), 5)
+ unsuccessful_backup_time = inctimes[-1]
+ log.Log("Regressing to " + Time.timetopretty(regress_time), 4)
def set_restore_times():
"""Set _rest_time and _mirror_time in the restore module
@@ -103,6 +96,20 @@ def set_restore_times():
restore._mirror_time = unsuccessful_backup_time
restore._rest_time = regress_time
+def remove_rbdir_increments():
+ """Delete the increments in the rdiff-backup-data directory"""
+ old_current_mirror = None
+ for filename in Globals.rbdir.listdir():
+ rp = Globals.rbdir.append(filename)
+ if rp.isincfile() and rp.getinctime() == unsuccessful_backup_time:
+ if rp.getincbase_str() == "current_mirror": old_current_mirror = rp
+ else:
+ log.Log("Removing rdiff-backup-data increment " + rp.path, 5)
+ rp.delete()
+ if old_current_mirror:
+ C.sync() # Sync first, since we are marking dest dir as good now
+ old_current_mirror.delete()
+
def iterate_raw_rfs(mirror_rp, inc_rp):
"""Iterate all RegressFile objects in mirror/inc directory"""
root_rf = RegressFile(mirror_rp, inc_rp, restore.get_inclist(inc_rp))
@@ -132,6 +139,11 @@ def iterate_meta_rfs(mirror_rp, inc_rp):
raw_rfs = iterate_raw_rfs(mirror_rp, inc_rp)
collated = rorpiter.Collate2Iters(raw_rfs, yield_metadata())
for raw_rf, metadata_rorp in collated:
+ if not raw_rf:
+ log.Log("Warning, metadata file has entry for %s,\n"
+ "but there are no associated files." %
+ (metadata_rorp.get_indexpath(),), 2)
+ continue
raw_rf.set_metadata_rorp(metadata_rorp)
yield raw_rf
@@ -146,11 +158,8 @@ class RegressFile(restore.RestoreFile):
"""
def __init__(self, mirror_rp, inc_rp, inc_list):
restore.RestoreFile.__init__(self, mirror_rp, inc_rp, inc_list)
- assert len(self.relevant_incs) <= 2, "Too many incs"
- if len(self.relevant_incs) == 2:
- self.regress_inc = self.relevant_incs[-1]
- else: self.regress_inc = None
-
+ self.set_regress_inc()
+
def set_metadata_rorp(self, metadata_rorp):
"""Set self.metadata_rorp, creating empty if given None"""
if metadata_rorp: self.metadata_rorp = metadata_rorp
@@ -161,6 +170,13 @@ class RegressFile(restore.RestoreFile):
return ((self.metadata_rorp and self.metadata_rorp.isdir()) or
(self.mirror_rp and self.mirror_rp.isdir()))
+ def set_regress_inc(self):
+ """Set self.regress_inc to increment to be removed (or None)"""
+ newer_incs = self.get_newer_incs()
+ assert len(newer_incs) <= 1, "Too many recent increments"
+ if newer_incs: self.regress_inc = newer_incs[0] # first is mirror_rp
+ else: self.regress_inc = None
+
class RegressITRB(rorpiter.ITRBranch):
"""Turn back state of dest directory (use with IterTreeReducer)
@@ -168,7 +184,7 @@ class RegressITRB(rorpiter.ITRBranch):
The arguments to the ITR will be RegressFiles. There are two main
assumptions this procedure makes (besides those mentioned above):
- 1. The mirror_rp and the metadata_rorp cmp_attribs correctly iff
+ 1. The mirror_rp and the metadata_rorp equal_loose correctly iff
they contain the same data. If this is the case, then the inc
file is unnecessary and we can delete it.
@@ -189,13 +205,16 @@ class RegressITRB(rorpiter.ITRBranch):
def fast_process(self, index, rf):
"""Process when nothing is a directory"""
- if (not rf.metadata_rorp.lstat() or not rf.mirror_rp.lstat() or
- not rpath.cmp_attribs(rf.metadata_rorp, rf.mirror_rp)):
+ if not rf.metadata_rorp.equal_loose(rf.mirror_rp):
+ log.Log("Regressing file %s" %
+ (rf.metadata_rorp.get_indexpath()), 5)
if rf.metadata_rorp.isreg(): self.restore_orig_regfile(rf)
else:
if rf.mirror_rp.lstat(): rf.mirror_rp.delete()
rpath.copy_with_attribs(rf.metadata_rorp, rf.mirror_rp)
- if rf.regress_inc: rf.regress_inc.delete()
+ if rf.regress_inc:
+ log.Log("Deleting increment " + rf.regress_inc.path, 5)
+ rf.regress_inc.delete()
def restore_orig_regfile(self, rf):
"""Restore original regular file
@@ -233,29 +252,21 @@ class RegressITRB(rorpiter.ITRBranch):
rf = self.rf
if rf.metadata_rorp.isdir():
if rf.mirror_rp.isdir():
- if not rpath.cmp_attribs(rf.metadata_rorp, rf.mirror_rp):
+ rf.mirror_rp.setdata()
+ if not rf.metadata_rorp.equal_loose(rf.mirror_rp):
+ log.Log("Regressing attributes of " + rf.mirror_rp.path, 5)
rpath.copy_attribs(rf.metadata_rorp, rf.mirror_rp)
else:
rf.mirror_rp.delete()
+ log.Log("Regressing file " + rf.mirror_rp.path, 5)
rpath.copy_with_attribs(rf.metadata_rorp, rf.mirror_rp)
else: # replacing a dir with some other kind of file
assert rf.mirror_rp.isdir()
+ log.Log("Replacing directory " + rf.mirror_rp.path, 5)
if rf.metadata_rorp.isreg(): self.restore_orig_regfile(rf)
else:
rf.mirror_rp.delete()
rpath.copy_with_attribs(rf.metadata_rorp, rf.mirror_rp)
- if rf.regress_inc: rf.regress_inc.delete()
-
- def on_error(self, exc, *args):
- """This is run on any exception, raises RegressException
-
- RegressException should be fatal. We don't want to tolerate
- the kinds of errors we would when backing up.
-
- """
- if args and args[0] and isinstance(args[0], tuple):
- filename = "/".join(args[0])
- elif self.index: filename = "/".join(*self.index)
- else: filename = "."
- log.Log("Error '%s' processing %s" % (exc, filename), 2)
- raise RegressException("Error during Regress")
+ if rf.regress_inc:
+ log.Log("Deleting increment " + rf.regress_inc.path, 5)
+ rf.regress_inc.delete()
diff --git a/rdiff-backup/rdiff_backup/restore.py b/rdiff-backup/rdiff_backup/restore.py
index 67b8bad..62c1af3 100644
--- a/rdiff-backup/rdiff_backup/restore.py
+++ b/rdiff-backup/rdiff_backup/restore.py
@@ -369,7 +369,7 @@ class RestoreFile:
def get_restore_fp(self):
"""Return file object of restored data"""
- assert self.relevant_incs[-1].isreg(), "Not a regular file"
+ assert self.relevant_incs[-1].isreg()
current_fp = self.get_first_fp()
for inc_diff in self.relevant_incs[1:]:
log.Log("Applying patch %s" % (inc_diff.get_indexpath(),), 7)
diff --git a/rdiff-backup/rdiff_backup/rpath.py b/rdiff-backup/rdiff_backup/rpath.py
index 16cc577..b689a85 100644
--- a/rdiff-backup/rdiff_backup/rpath.py
+++ b/rdiff-backup/rdiff_backup/rpath.py
@@ -185,22 +185,6 @@ def copy_with_attribs(rpin, rpout, compress = 0):
copy(rpin, rpout, compress)
if rpin.lstat(): copy_attribs(rpin, rpout)
-def quick_cmp_with_attribs(rp1, rp2):
- """Quicker version of cmp_with_attribs
-
- Instead of reading all of each file, assume that regular files
- are the same if the attributes compare.
-
- """
- if not cmp_attribs(rp1, rp2): return None
- if rp1.isreg() and rp2.isreg() and (rp1.getlen() == rp2.getlen()):
- return 1
- return cmp(rp1, rp2)
-
-def cmp_with_attribs(rp1, rp2):
- """Combine cmp and cmp_attribs"""
- return cmp_attribs(rp1, rp2) and cmp(rp1, rp2)
-
def rename(rp_source, rp_dest):
"""Rename rp_source to rp_dest"""
assert rp_source.conn is rp_dest.conn
@@ -209,15 +193,14 @@ def rename(rp_source, rp_dest):
if not rp_source.lstat(): rp_dest.delete()
else:
if rp_dest.lstat() and rp_source.getinode() == rp_dest.getinode():
- assert 0, ("Rename over same inode: %s to %s" %
- (rp_source.path, rp_dest.path))
+ log.Log("Warning: Attempt to rename over same inode: %s to %s"
+ % (rp_source.path, rp_dest.path), 2)
# You can't rename one hard linked file over another
rp_source.delete()
else: rp_source.conn.os.rename(rp_source.path, rp_dest.path)
rp_dest.data = rp_source.data
rp_source.data = {'type': None}
-
def tupled_lstat(filename):
"""Like os.lstat, but return only a tuple, or None if os.error
@@ -273,6 +256,8 @@ class RORPath:
self.data = {'type': None}
self.file = None
+ def __nonzero__(self): return 1
+
def __eq__(self, other):
"""True iff the two rorpaths are equivalent"""
if self.index != other.index: return None
@@ -284,12 +269,40 @@ class RORPath:
elif key == 'atime' and not Globals.preserve_atime: pass
elif key == 'devloc' or key == 'nlink': pass
elif key == 'size' and not self.isreg(): pass
- elif key == 'inode' and (not self.isreg() or
- not Globals.compare_inode): pass
+ elif (key == 'inode' and
+ (not self.isreg() or self.getnumlinks() == 1 or
+ not Globals.compare_inode)): pass
elif (not other.data.has_key(key) or
self.data[key] != other.data[key]): return None
return 1
+ def equal_loose(self, other):
+ """True iff the two rorpaths are kinda equivalent
+
+ Sometimes because of missing permissions, a file cannot be
+ replicated exactly on the remote side. This function tells
+ you whether the two files are close enough. self must be the
+ file with more information.
+
+ """
+ for key in self.data.keys(): # compare dicts key by key
+ if ((key == 'uid' or key == 'gid') and
+ (self.issym() or not Globals.change_ownership)):
+ # Don't compare gid/uid for symlinks, and only root
+ # can change ownership
+ pass
+ elif (key == 'type' and self.isspecial() and
+ other.isreg() and other.getsize() == 0):
+ # Special files may be replaced with 0 len regular files
+ pass
+ elif key == 'atime' and not Globals.preserve_atime: pass
+ elif key == 'devloc' or key == 'nlink': pass
+ elif key == 'size' and not self.isreg(): pass
+ elif key == 'inode': pass
+ elif (not other.data.has_key(key) or
+ self.data[key] != other.data[key]): return 0
+ return 1
+
def equal_verbose(self, other, check_index = 1,
compare_inodes = 0, compare_ownership = 0):
"""Like __eq__, but log more information. Useful when testing"""