summaryrefslogtreecommitdiff
path: root/rdiff-backup/rdiff_backup
diff options
context:
space:
mode:
authorbescoto <bescoto@2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109>2002-12-23 06:53:18 +0000
committerbescoto <bescoto@2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109>2002-12-23 06:53:18 +0000
commit9a0da726e2172321cdc1dcd21441f4ffc41e7931 (patch)
tree7f25f848386ca501b7f08c08c21af16f0d71330c /rdiff-backup/rdiff_backup
parente95a61773adb2f98499cf13ff543f4249ee38226 (diff)
downloadrdiff-backup-9a0da726e2172321cdc1dcd21441f4ffc41e7931.tar.gz
Major refactoring - avoid use of 'from XX import *' in favor of more
normal 'import XXX' syntax. The previous way was an artifact from earlier versions where the whole program fit in one file. git-svn-id: http://svn.savannah.nongnu.org/svn/rdiff-backup/trunk@252 2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109
Diffstat (limited to 'rdiff-backup/rdiff_backup')
-rw-r--r--rdiff-backup/rdiff_backup/FilenameMapping.py26
-rw-r--r--rdiff-backup/rdiff_backup/Globals.py8
-rw-r--r--rdiff-backup/rdiff_backup/Hardlink.py24
-rw-r--r--rdiff-backup/rdiff_backup/Main.py74
-rw-r--r--rdiff-backup/rdiff_backup/MiscStats.py23
-rw-r--r--rdiff-backup/rdiff_backup/Rdiff.py26
-rw-r--r--rdiff-backup/rdiff_backup/Security.py77
-rw-r--r--rdiff-backup/rdiff_backup/SetConnections.py18
-rw-r--r--rdiff-backup/rdiff_backup/connection.py53
-rw-r--r--rdiff-backup/rdiff_backup/destructive_stepping.py64
-rw-r--r--rdiff-backup/rdiff_backup/filelist.py3
-rw-r--r--rdiff-backup/rdiff_backup/highlevel.py89
-rw-r--r--rdiff-backup/rdiff_backup/increment.py253
-rw-r--r--rdiff-backup/rdiff_backup/iterfile.py9
-rw-r--r--rdiff-backup/rdiff_backup/lazy.py168
-rw-r--r--rdiff-backup/rdiff_backup/log.py4
-rw-r--r--rdiff-backup/rdiff_backup/manage.py157
-rw-r--r--rdiff-backup/rdiff_backup/restore.py438
-rw-r--r--rdiff-backup/rdiff_backup/robust.py531
-rw-r--r--rdiff-backup/rdiff_backup/rorpiter.py747
-rw-r--r--rdiff-backup/rdiff_backup/rpath.py449
-rw-r--r--rdiff-backup/rdiff_backup/selection.py40
-rw-r--r--rdiff-backup/rdiff_backup/statistics.py18
23 files changed, 1628 insertions, 1671 deletions
diff --git a/rdiff-backup/rdiff_backup/FilenameMapping.py b/rdiff-backup/rdiff_backup/FilenameMapping.py
index c160bed..e305f68 100644
--- a/rdiff-backup/rdiff_backup/FilenameMapping.py
+++ b/rdiff-backup/rdiff_backup/FilenameMapping.py
@@ -27,9 +27,7 @@ them over the usual 255 character limit.
"""
import re
-from log import *
-from robust import *
-import Globals
+import Globals, log
max_filename_length = 255
@@ -55,8 +53,8 @@ def set_init_quote_vals_local():
global chars_to_quote, quoting_char
chars_to_quote = Globals.chars_to_quote
if len(Globals.quoting_char) != 1:
- Log.FatalError("Expected single character for quoting char,"
- "got '%s' instead" % (Globals.quoting_char,))
+ log.Log.FatalError("Expected single character for quoting char,"
+ "got '%s' instead" % (Globals.quoting_char,))
quoting_char = Globals.quoting_char
init_quoting_regexps()
@@ -68,8 +66,8 @@ def init_quoting_regexps():
re.compile("[%s%s]" % (chars_to_quote, quoting_char), re.S)
unquoting_regexp = re.compile("%s[0-9]{3}" % quoting_char, re.S)
except re.error:
- Log.FatalError("Error '%s' when processing char quote list %s" %
- (re.error, chars_to_quote))
+ log.Log.FatalError("Error '%s' when processing char quote list %s" %
+ (re.error, chars_to_quote))
def quote(path):
"""Return quoted version of given path
@@ -95,18 +93,4 @@ def unquote_single(match):
assert len(match.group()) == 4
return chr(int(match.group()[1:]))
-def get_quoted_dir_children(rpath):
- """For rpath directory, return list of quoted children in dir"""
- if not rpath.isdir(): return []
- dir_pairs = [(unquote(filename), filename)
- for filename in Robust.listrp(rpath)]
- dir_pairs.sort() # sort by real index, not quoted part
- child_list = []
- for unquoted, filename in dir_pairs:
- childrp = rpath.append(unquoted)
- childrp.quote_path()
- child_list.append(childrp)
- return child_list
-
-
diff --git a/rdiff-backup/rdiff_backup/Globals.py b/rdiff-backup/rdiff_backup/Globals.py
index 1ba7490..50271d3 100644
--- a/rdiff-backup/rdiff_backup/Globals.py
+++ b/rdiff-backup/rdiff_backup/Globals.py
@@ -246,7 +246,7 @@ def postset_regexp_local(name, re_string, flags):
if flags: globals()[name] = re.compile(re_string, flags)
else: globals()[name] = re.compile(re_string)
-def set_select(source, rpath, tuplelist, quote_mode, *filelists):
+def set_select(source, Sel_Obj, rpath, tuplelist, quote_mode, *filelists):
"""Initialize select object using tuplelist
Note that each list in filelists must each be passed as
@@ -256,12 +256,8 @@ def set_select(source, rpath, tuplelist, quote_mode, *filelists):
"""
global select_source, select_mirror
- sel = Select(rpath, quote_mode)
+ sel = Sel_Obj(rpath, quote_mode)
sel.ParseArgs(tuplelist, filelists)
if source: select_source = sel
else: select_mirror = sel
-
-from rpath import * # kludge to avoid circularity - not needed in this module
-from log import * # another kludge
-from selection import *
diff --git a/rdiff-backup/rdiff_backup/Hardlink.py b/rdiff-backup/rdiff_backup/Hardlink.py
index 38dcad8..ec375fd 100644
--- a/rdiff-backup/rdiff_backup/Hardlink.py
+++ b/rdiff-backup/rdiff_backup/Hardlink.py
@@ -32,7 +32,7 @@ side. The source side should only transmit inode information.
from __future__ import generators
import cPickle
-
+import Globals, Time, TempFile, rpath, log, robust
# In all of these lists of indicies are the values. The keys in
# _inode_ ones are (inode, devloc) pairs.
@@ -138,8 +138,8 @@ def restore_link(index, rpath):
for linked_index in _src_index_indicies[index]:
if linked_index in _restore_index_path:
srcpath = _restore_index_path[linked_index]
- Log("Restoring %s by hard linking to %s" %
- (rpath.path, srcpath), 6)
+ log.Log("Restoring %s by hard linking to %s" %
+ (rpath.path, srcpath), 6)
rpath.hardlink(srcpath)
return 1
_restore_index_path[index] = rpath.path
@@ -148,8 +148,8 @@ def restore_link(index, rpath):
def link_rp(src_rorp, dest_rpath, dest_root = None):
"""Make dest_rpath into a link analogous to that of src_rorp"""
if not dest_root: dest_root = dest_rpath # use base of dest_rpath
- dest_link_rpath = RPath(dest_root.conn, dest_root.base,
- get_indicies(src_rorp, 1)[0])
+ dest_link_rpath = rpath.RPath(dest_root.conn, dest_root.base,
+ get_indicies(src_rorp, 1)[0])
dest_rpath.hardlink(dest_link_rpath.path)
def write_linkdict(rpath, dict, compress = None):
@@ -161,13 +161,13 @@ def write_linkdict(rpath, dict, compress = None):
"""
assert (Globals.isbackup_writer and
rpath.conn is Globals.local_connection)
- tf = TempFileManager.new(rpath)
+ tf = TempFile.new(rpath)
def init():
fp = tf.open("wb", compress)
cPickle.dump(dict, fp)
assert not fp.close()
tf.setdata()
- Robust.make_tf_robustaction(init, (tf,), (rpath,)).execute()
+ robust.make_tf_robustaction(init, (tf,), (rpath,)).execute()
def get_linkrp(data_rpath, time, prefix):
"""Return RPath of linkdata, or None if cannot find"""
@@ -191,7 +191,7 @@ def final_writedata():
"""Write final checkpoint data to rbdir after successful backup"""
global final_inc
if _src_index_indicies:
- Log("Writing hard link data", 6)
+ log.Log("Writing hard link data", 6)
if Globals.compression:
final_inc = Globals.rbdir.append("hardlink_data.%s.data.gz" %
Time.curtimestr)
@@ -218,7 +218,7 @@ def final_checkpoint(data_rpath):
after every 20 seconds or whatever, but just at the end.
"""
- Log("Writing intermediate hard link data to disk", 2)
+ log.Log("Writing intermediate hard link data to disk", 2)
src_inode_rp = data_rpath.append("hardlink_source_inode_checkpoint."
"%s.data" % Time.curtimestr)
src_index_rp = data_rpath.append("hardlink_source_index_checkpoint."
@@ -251,7 +251,7 @@ def retrieve_checkpoint(data_rpath, time):
dest_index = get_linkdata(data_rpath, time,
"hardlink_dest_index_checkpoint")
except cPickle.UnpicklingError:
- Log("Unpickling Error", 2)
+ log.Log("Unpickling Error", 2)
return None
if (src_inode is None or src_index is None or
dest_inode is None or dest_index is None): return None
@@ -271,7 +271,3 @@ def remove_all_checkpoints():
rp.delete()
-from log import *
-from robust import *
-from rpath import *
-import Globals, Time
diff --git a/rdiff-backup/rdiff_backup/Main.py b/rdiff-backup/rdiff_backup/Main.py
index f130875..d6b977c 100644
--- a/rdiff-backup/rdiff_backup/Main.py
+++ b/rdiff-backup/rdiff_backup/Main.py
@@ -20,16 +20,10 @@
"""Start (and end) here - read arguments, set global settings, etc."""
from __future__ import generators
-import getopt, sys, re
-from log import *
-from lazy import *
-from connection import *
-from rpath import *
-from robust import *
-from restore import *
-from highlevel import *
-from manage import *
-import Globals, Time, SetConnections
+import getopt, sys, re, os
+from log import Log
+import Globals, Time, SetConnections, selection, robust, rpath, \
+ manage, highlevel, connection, restore, FilenameMapping, Security
action = None
@@ -164,7 +158,7 @@ def set_action():
if l == 0: commandline_error("No arguments given")
elif l == 1: action = "restore"
elif l == 2:
- if RPath(Globals.local_connection, args[0]).isincfile():
+ if rpath.RPath(Globals.local_connection, args[0]).isincfile():
action = "restore"
else: action = "backup"
else: commandline_error("Too many arguments given")
@@ -207,13 +201,14 @@ def misc_setup(rps):
Globals.postset_regexp('no_compression_regexp',
Globals.no_compression_regexp_string)
- for conn in Globals.connections: Robust.install_signal_handlers()
+ for conn in Globals.connections: robust.install_signal_handlers()
def take_action(rps):
"""Do whatever action says"""
- if action == "server": PipeConnection(sys.stdin, sys.stdout).Server()
+ if action == "server":
+ connection.PipeConnection(sys.stdin, sys.stdout).Server()
elif action == "backup": Backup(rps[0], rps[1])
- elif action == "restore": restore(*rps)
+ elif action == "restore": Restore(*rps)
elif action == "restore-as-of": RestoreAsOf(rps[0], rps[1])
elif action == "test-server": SetConnections.TestConnections()
elif action == "list-changed-since": ListChangedSince(rps[0])
@@ -247,14 +242,16 @@ def Backup(rpin, rpout):
backup_init_dirs(rpin, rpout)
if prevtime:
Time.setprevtime(prevtime)
- HighLevel.Mirror_and_increment(rpin, rpout, incdir)
- else: HighLevel.Mirror(rpin, rpout, incdir)
+ highlevel.HighLevel.Mirror_and_increment(rpin, rpout, incdir)
+ else: highlevel.HighLevel.Mirror(rpin, rpout, incdir)
rpout.conn.Main.backup_touch_curmirror_local(rpin, rpout)
def backup_init_select(rpin, rpout):
"""Create Select objects on source and dest connections"""
- rpin.conn.Globals.set_select(1, rpin, select_opts, None, *select_files)
- rpout.conn.Globals.set_select(0, rpout, select_mirror_opts, 1)
+ rpin.conn.Globals.set_select(1, selection.Select,
+ rpin, select_opts, None, *select_files)
+ rpout.conn.Globals.set_select(0, selection.Select,
+ rpout, select_mirror_opts, 1)
def backup_init_dirs(rpin, rpout):
"""Make sure rpin and rpout are valid, init data dir and logging"""
@@ -273,7 +270,7 @@ def backup_init_dirs(rpin, rpout):
datadir = rpout.append("rdiff-backup-data")
SetConnections.UpdateGlobal('rbdir', datadir)
- incdir = RPath(rpout.conn, os.path.join(datadir.path, "increments"))
+ incdir = rpath.RPath(rpout.conn, os.path.join(datadir.path, "increments"))
prevtime = backup_get_mirrortime()
if rpout.lstat():
@@ -336,14 +333,14 @@ def backup_touch_curmirror_local(rpin, rpout):
"""
datadir = Globals.rbdir
- map(RPath.delete, backup_get_mirrorrps())
+ map(rpath.RPath.delete, backup_get_mirrorrps())
mirrorrp = datadir.append("current_mirror.%s.%s" % (Time.curtimestr,
"data"))
Log("Touching mirror marker %s" % mirrorrp.path, 6)
mirrorrp.touch()
- RPath.copy_attribs(rpin, rpout)
+ rpath.copy_attribs(rpin, rpout)
-def restore(src_rp, dest_rp = None):
+def Restore(src_rp, dest_rp = None):
"""Main restoring function
Here src_rp should be an increment file, and if dest_rp is
@@ -373,7 +370,7 @@ def restore_common(rpin, target, time):
inc_rpath = datadir.append_path('increments', index)
restore_init_select(mirror_root, target)
restore_start_log(rpin, target, time)
- Restore.Restore(inc_rpath, mirror, target, time)
+ restore.Restore(inc_rpath, mirror, target, time)
Log("Restore ended", 4)
def restore_start_log(rpin, target, time):
@@ -398,8 +395,8 @@ def restore_check_paths(rpin, rpout, restoreasof = None):
Try restoring from an increment file (the filenames look like
"foobar.2001-09-01T04:49:04-07:00.diff").""" % rpin.path)
- if not rpout: rpout = RPath(Globals.local_connection,
- rpin.getincbase_str())
+ if not rpout: rpout = rpath.RPath(Globals.local_connection,
+ rpin.getincbase_str())
if rpout.lstat():
Log.FatalError("Restore target %s already exists, "
"and will not be overwritten." % rpout.path)
@@ -413,8 +410,9 @@ def restore_init_select(rpin, rpout):
the restore operation isn't.
"""
- Globals.set_select(1, rpin, select_mirror_opts, None)
- Globals.set_select(0, rpout, select_opts, None, *select_files)
+ Globals.set_select(1, selection.Select, rpin, select_mirror_opts, None)
+ Globals.set_select(0, selection.Select,
+ rpout, select_opts, None, *select_files)
def restore_get_root(rpin):
"""Return (mirror root, index) and set the data dir
@@ -438,7 +436,7 @@ def restore_get_root(rpin):
i = len(pathcomps)
while i >= 2:
- parent_dir = RPath(rpin.conn, "/".join(pathcomps[:i]))
+ parent_dir = rpath.RPath(rpin.conn, "/".join(pathcomps[:i]))
if (parent_dir.isdir() and
"rdiff-backup-data" in parent_dir.listdir()): break
i = i-1
@@ -467,11 +465,11 @@ def ListIncrements(rp):
mirror_root.append_path("rdiff-backup-data")
mirrorrp = mirror_root.new_index(index)
inc_rpath = datadir.append_path('increments', index)
- incs = Restore.get_inclist(inc_rpath)
- mirror_time = Restore.get_mirror_time()
+ incs = restore.get_inclist(inc_rpath)
+ mirror_time = restore.get_mirror_time()
if Globals.parsable_output:
- print Manage.describe_incs_parsable(incs, mirror_time, mirrorrp)
- else: print Manage.describe_incs_human(incs, mirror_time, mirrorrp)
+ print manage.describe_incs_parsable(incs, mirror_time, mirrorrp)
+ else: print manage.describe_incs_human(incs, mirror_time, mirrorrp)
def CalculateAverage(rps):
@@ -495,7 +493,7 @@ def RemoveOlderThan(rootrp):
Log("Deleting increment(s) before %s" % timep, 4)
times_in_secs = map(lambda inc: Time.stringtotime(inc.getinctime()),
- Restore.get_inclist(datadir.append("increments")))
+ restore.get_inclist(datadir.append("increments")))
times_in_secs = filter(lambda t: t < time, times_in_secs)
if not times_in_secs:
Log.FatalError("No increments older than %s found" % timep)
@@ -510,7 +508,7 @@ def RemoveOlderThan(rootrp):
if len(times_in_secs) == 1:
Log("Deleting increment at time:\n" + inc_pretty_time, 3)
else: Log("Deleting increments at times:\n" + inc_pretty_time, 3)
- Manage.delete_earlier_than(datadir, time)
+ manage.delete_earlier_than(datadir, time)
def ListChangedSince(rp):
@@ -519,12 +517,12 @@ def ListChangedSince(rp):
except Time.TimeException, exc: Log.FatalError(str(exc))
mirror_root, index = restore_get_root(rp)
Globals.rbdir = datadir = mirror_root.append_path("rdiff-backup-data")
- mirror_time = Restore.get_mirror_time()
+ mirror_time = restore.get_mirror_time()
def get_rids_recursive(rid):
"""Yield all the rids under rid that have inc newer than rest_time"""
yield rid
- for sub_rid in Restore.yield_rids(rid, rest_time, mirror_time):
+ for sub_rid in restore.yield_rids(rid, rest_time, mirror_time):
for sub_sub_rid in get_rids_recursive(sub_rid): yield sub_sub_rid
def determineChangeType(incList):
@@ -538,8 +536,8 @@ def ListChangedSince(rp):
else: return "Unknown!"
inc_rpath = datadir.append_path('increments', index)
- inc_list = Restore.get_inclist(inc_rpath)
- root_rid = RestoreIncrementData(index, inc_rpath, inc_list)
+ inc_list = restore.get_inclist(inc_rpath)
+ root_rid = restore.RestoreIncrementData(index, inc_rpath, inc_list)
for rid in get_rids_recursive(root_rid):
if rid.inc_list:
if not rid.index: path = "."
diff --git a/rdiff-backup/rdiff_backup/MiscStats.py b/rdiff-backup/rdiff_backup/MiscStats.py
index ff02ff3..75a7bf9 100644
--- a/rdiff-backup/rdiff_backup/MiscStats.py
+++ b/rdiff-backup/rdiff_backup/MiscStats.py
@@ -19,8 +19,8 @@
"""Misc statistics methods, pertaining to dir and session stat files"""
-from statistics import *
-
+import time
+import Globals, Hardlink, increment, log, statistics, Time
# This is the RPath of the directory statistics file, and the
# associated open file. It will hold a line of statistics for
@@ -34,7 +34,7 @@ _dir_stats_header = """# rdiff-backup directory statistics file
#
# Each line is in the following format:
# RelativeDirName %s
-""" % " ".join(StatsObj.stat_file_attrs)
+""" % " ".join(statistics.StatsObj.stat_file_attrs)
def open_dir_stats_file():
"""Open directory statistics file, write header"""
@@ -43,12 +43,12 @@ def open_dir_stats_file():
if Globals.compression: suffix = "data.gz"
else: suffix = "data"
- _dir_stats_rp = Inc.get_inc(Globals.rbdir.append("directory_statistics"),
- Time.curtime, suffix)
+ _dir_stats_rp = increment.get_inc(
+ Globals.rbdir.append("directory_statistics"), Time.curtime, suffix)
if _dir_stats_rp.lstat():
- Log("Warning, statistics file %s already exists, appending" %
- _dir_stats_rp.path, 2)
+ log.Log("Warning, statistics file %s already exists, appending" %
+ _dir_stats_rp.path, 2)
_dir_stats_fp = _dir_stats_rp.open("ab", Globals.compression)
else: _dir_stats_fp = _dir_stats_rp.open("wb", Globals.compression)
_dir_stats_fp.write(_dir_stats_header)
@@ -68,8 +68,8 @@ def close_dir_stats_file():
def write_session_statistics(statobj):
"""Write session statistics into file, log"""
- stat_inc = Inc.get_inc(Globals.rbdir.append("session_statistics"),
- Time.curtime, "data")
+ stat_inc = increment.get_inc(
+ Globals.rbdir.append("session_statistics"), Time.curtime, "data")
statobj.StartTime = Time.curtime
statobj.EndTime = time.time()
@@ -85,9 +85,8 @@ def write_session_statistics(statobj):
statobj.write_stats_to_rp(stat_inc)
if Globals.print_statistics:
message = statobj.get_stats_logstring("Session statistics")
- Log.log_to_file(message)
+ log.Log.log_to_file(message)
Globals.client_conn.sys.stdout.write(message)
-from increment import *
-import Hardlink
+
diff --git a/rdiff-backup/rdiff_backup/Rdiff.py b/rdiff-backup/rdiff_backup/Rdiff.py
index cc12cfc..23bfda3 100644
--- a/rdiff-backup/rdiff_backup/Rdiff.py
+++ b/rdiff-backup/rdiff_backup/Rdiff.py
@@ -25,10 +25,10 @@ RobustAction and the like.
"""
import os, librsync
+from log import Log
+import robust, TempFile, Globals
-class RdiffException(Exception): pass
-
def get_signature(rp):
"""Take signature of rpin file and return in file object"""
Log("Getting signature of %s" % rp.path, 7)
@@ -52,9 +52,9 @@ def write_delta_action(basis, new, delta, compress = None):
before written to delta.
"""
- delta_tf = TempFileManager.new(delta)
+ delta_tf = TempFile.new(delta)
def init(): write_delta(basis, new, delta_tf, compress)
- return Robust.make_tf_robustaction(init, delta_tf, delta)
+ return robust.make_tf_robustaction(init, delta_tf, delta)
def write_delta(basis, new, delta, compress = None):
"""Write rdiff delta which brings basis to new"""
@@ -74,12 +74,12 @@ def patch_action(rp_basis, rp_delta, rp_out = None, out_tf = None,
"""
if not rp_out: rp_out = rp_basis
- if not out_tf: out_tf = TempFileManager.new(rp_out)
+ if not out_tf: out_tf = TempFile.new(rp_out)
def init():
rp_basis.conn.Rdiff.patch_local(rp_basis, rp_delta,
out_tf, delta_compressed)
out_tf.setdata()
- return Robust.make_tf_robustaction(init, out_tf, rp_out)
+ return robust.make_tf_robustaction(init, out_tf, rp_out)
def patch_local(rp_basis, rp_delta, outrp, delta_compressed = None):
"""Patch routine that must be run on rp_basis.conn
@@ -99,20 +99,20 @@ def patch_local(rp_basis, rp_delta, outrp, delta_compressed = None):
def patch_with_attribs_action(rp_basis, rp_delta, rp_out = None):
"""Like patch_action, but also transfers attributs from rp_delta"""
if not rp_out: rp_out = rp_basis
- tf = TempFileManager.new(rp_out)
- return Robust.chain_nested(patch_action(rp_basis, rp_delta, rp_out, tf),
- Robust.copy_attribs_action(rp_delta, tf))
+ tf = TempFile.new(rp_out)
+ return robust.chain_nested(patch_action(rp_basis, rp_delta, rp_out, tf),
+ robust.copy_attribs_action(rp_delta, tf))
def copy_action(rpin, rpout):
"""Use rdiff to copy rpin to rpout, conserving bandwidth"""
if not rpin.isreg() or not rpout.isreg() or rpin.conn is rpout.conn:
# rdiff not applicable, fallback to regular copying
- return Robust.copy_action(rpin, rpout)
+ return robust.copy_action(rpin, rpout)
Log("Rdiff copying %s to %s" % (rpin.path, rpout.path), 6)
- out_tf = TempFileManager.new(rpout)
+ out_tf = TempFile.new(rpout)
def init(): rpout.conn.Rdiff.copy_local(rpin, rpout, out_tf)
- return Robust.make_tf_robustaction(init, out_tf, rpout)
+ return robust.make_tf_robustaction(init, out_tf, rpout)
def copy_local(rpin, rpout, rpnew):
"""Write rpnew == rpin using rpout as basis. rpout and rpnew local"""
@@ -122,6 +122,4 @@ def copy_local(rpin, rpout, rpnew):
rpnew.write_from_fileobj(librsync.PatchedFile(rpout.open("rb"), deltafile))
-from log import *
-from robust import *
diff --git a/rdiff-backup/rdiff_backup/Security.py b/rdiff-backup/rdiff_backup/Security.py
index 24923ef..9760041 100644
--- a/rdiff-backup/rdiff_backup/Security.py
+++ b/rdiff-backup/rdiff_backup/Security.py
@@ -20,8 +20,7 @@
"""Functions to make sure remote requests are kosher"""
import sys, tempfile
-import Globals, Main
-from rpath import *
+import Globals, Main, rpath
class Violation(Exception):
"""Exception that indicates an improper request has been received"""
@@ -76,8 +75,8 @@ def set_security_level(action, cmdpairs):
rdir = tempfile.gettempdir()
elif islocal(cp1):
sec_level = "read-only"
- rdir = Main.restore_get_root(RPath(Globals.local_connection,
- getpath(cp1)))[0].path
+ rdir = Main.restore_get_root(rpath.RPath(Globals.local_connection,
+ getpath(cp1)))[0].path
else:
assert islocal(cp2)
sec_level = "all"
@@ -101,8 +100,8 @@ def set_security_level(action, cmdpairs):
else: assert 0, "Unknown action %s" % action
Globals.security_level = sec_level
- Globals.restrict_path = RPath(Globals.local_connection,
- rdir).normalize().path
+ Globals.restrict_path = rpath.RPath(Globals.local_connection,
+ rdir).normalize().path
def set_allowed_requests(sec_level):
"""Set the allowed requests list using the security level"""
@@ -111,44 +110,46 @@ def set_allowed_requests(sec_level):
allowed_requests = ["VirtualFile.readfromid", "VirtualFile.closebyid",
"Globals.get", "Globals.is_not_None",
"Globals.get_dict_val",
- "Log.open_logfile_allconn",
- "Log.close_logfile_allconn",
+ "log.Log.open_logfile_allconn",
+ "log.Log.close_logfile_allconn",
"SetConnections.add_redirected_conn",
"RedirectedRun",
"sys.stdout.write"]
if sec_level == "minimal": pass
elif sec_level == "read-only" or sec_level == "update-only":
- allowed_requests.extend(["C.make_file_dict",
- "os.getuid",
- "os.listdir",
- "Time.setcurtime_local",
- "Resume.ResumeCheck",
- "HLSourceStruct.split_initial_dsiter",
- "HLSourceStruct.get_diffs_and_finalize",
- "RPathStatic.gzip_open_local_read",
- "RPathStatic.open_local_read"])
+ allowed_requests.extend(
+ ["C.make_file_dict",
+ "os.getuid",
+ "os.listdir",
+ "Time.setcurtime_local",
+ "robust.Resume.ResumeCheck",
+ "highlevel.HLSourceStruct.split_initial_dsiter",
+ "highlevel.HLSourceStruct.get_diffs_and_finalize",
+ "rpath.gzip_open_local_read",
+ "rpath.open_local_read"])
if sec_level == "update-only":
- allowed_requests. \
- extend(["Log.open_logfile_local", "Log.close_logfile_local",
- "Log.close_logfile_allconn", "Log.log_to_file",
- "SaveState.init_filenames",
- "SaveState.touch_last_file",
- "HLDestinationStruct.get_sigs",
- "HLDestinationStruct.patch_w_datadir_writes",
- "HLDestinationStruct.patch_and_finalize",
- "HLDestinationStruct.patch_increment_and_finalize",
- "Main.backup_touch_curmirror_local",
- "Globals.ITRB.increment_stat"])
+ allowed_requests.extend(
+ ["Log.open_logfile_local", "Log.close_logfile_local",
+ "Log.close_logfile_allconn", "Log.log_to_file",
+ "robust.SaveState.init_filenames",
+ "robust.SaveState.touch_last_file",
+ "highlevel.HLDestinationStruct.get_sigs",
+ "highlevel.HLDestinationStruct.patch_w_datadir_writes",
+ "highlevel.HLDestinationStruct.patch_and_finalize",
+ "highlevel.HLDestinationStruct.patch_increment_and_finalize",
+ "Main.backup_touch_curmirror_local",
+ "Globals.ITRB.increment_stat"])
if Globals.server:
- allowed_requests.extend(["SetConnections.init_connection_remote",
- "Log.setverbosity",
- "Log.setterm_verbosity",
- "Time.setprevtime_local",
- "FilenameMapping.set_init_quote_vals_local",
- "Globals.postset_regexp_local",
- "Globals.set_select",
- "HLSourceStruct.set_session_info",
- "HLDestinationStruct.set_session_info"])
+ allowed_requests.extend(
+ ["SetConnections.init_connection_remote",
+ "Log.setverbosity",
+ "Log.setterm_verbosity",
+ "Time.setprevtime_local",
+ "FilenameMapping.set_init_quote_vals_local",
+ "Globals.postset_regexp_local",
+ "Globals.set_select",
+ "highlevel.HLSourceStruct.set_session_info",
+ "highlevel.HLDestinationStruct.set_session_info"])
def vet_request(request, arglist):
"""Examine request for security violations"""
@@ -156,7 +157,7 @@ def vet_request(request, arglist):
security_level = Globals.security_level
if Globals.restrict_path:
for arg in arglist:
- if isinstance(arg, RPath): vet_rpath(arg)
+ if isinstance(arg, rpath.RPath): vet_rpath(arg)
if security_level == "all": return
if request.function_string in allowed_requests: return
if request.function_string == "Globals.set":
diff --git a/rdiff-backup/rdiff_backup/SetConnections.py b/rdiff-backup/rdiff_backup/SetConnections.py
index 3bdc36f..495aa87 100644
--- a/rdiff-backup/rdiff_backup/SetConnections.py
+++ b/rdiff-backup/rdiff_backup/SetConnections.py
@@ -25,6 +25,10 @@ the related connections.
"""
+import os
+from log import Log
+import Globals, FilenameMapping, connection, rpath
+
# This is the schema that determines how rdiff-backup will open a
# pipe to the remote system. If the file is given as A::B, %s will
# be substituted with A in the schema.
@@ -68,7 +72,7 @@ def cmdpair2rp(cmd_pair):
cmd, filename = cmd_pair
if cmd: conn = init_connection(cmd)
else: conn = Globals.local_connection
- return RPath(conn, filename).normalize()
+ return rpath.RPath(conn, filename).normalize()
def desc2cmd_pairs(desc_pair):
"""Return pair (remote_cmd, filename) from desc_pair"""
@@ -127,7 +131,7 @@ def init_connection(remote_cmd):
Log("Executing " + remote_cmd, 4)
stdin, stdout = os.popen2(remote_cmd)
conn_number = len(Globals.connections)
- conn = PipeConnection(stdout, stdin, conn_number)
+ conn = connection.PipeConnection(stdout, stdin, conn_number)
check_connection_version(conn, remote_cmd)
Log("Registering connection %d" % conn_number, 7)
@@ -138,7 +142,7 @@ def init_connection(remote_cmd):
def check_connection_version(conn, remote_cmd):
"""Log warning if connection has different version"""
try: remote_version = conn.Globals.get('version')
- except ConnectionReadError, exception:
+ except connection.ConnectionReadError, exception:
Log.FatalError("""%s
Couldn't start up the remote connection by executing
@@ -184,7 +188,7 @@ def init_connection_remote(conn_number):
def add_redirected_conn(conn_number):
"""Run on server side - tell about redirected connection"""
Globals.connection_dict[conn_number] = \
- RedirectedConnection(conn_number)
+ connection.RedirectedConnection(conn_number)
def UpdateGlobal(setting_name, val):
"""Update value of global variable across all connections"""
@@ -230,9 +234,3 @@ Local version: %s
Remote version: %s""" % (Globals.version, version)
else: print "Server OK"
-
-from log import *
-from rpath import *
-from connection import *
-import Globals, FilenameMapping
-
diff --git a/rdiff-backup/rdiff_backup/connection.py b/rdiff-backup/rdiff_backup/connection.py
index 09e0a92..dc4fb1e 100644
--- a/rdiff-backup/rdiff_backup/connection.py
+++ b/rdiff-backup/rdiff_backup/connection.py
@@ -20,7 +20,7 @@
"""Support code for remote execution and data transfer"""
from __future__ import generators
-import types, os, tempfile, cPickle, shutil, traceback, pickle, socket
+import types, os, tempfile, cPickle, shutil, traceback, pickle, socket, sys
class ConnectionError(Exception): pass
@@ -121,11 +121,13 @@ class LowLevelPipeConnection(Connection):
"""Put an object into the pipe (will send raw if string)"""
Log.conn("sending", obj, req_num)
if type(obj) is types.StringType: self._putbuf(obj, req_num)
- elif isinstance(obj, Connection): self._putconn(obj, req_num)
- elif isinstance(obj, TempFile): self._puttempfile(obj, req_num)
- elif isinstance(obj, DSRPath): self._putdsrpath(obj, req_num)
- elif isinstance(obj, RPath): self._putrpath(obj, req_num)
- elif isinstance(obj, RORPath): self._putrorpath(obj, req_num)
+ elif isinstance(obj, connection.Connection):self._putconn(obj, req_num)
+ elif isinstance(obj, TempFile.TempFile):
+ self._puttempfile(obj, req_num)
+ elif isinstance(obj, destructive_stepping.DSRPath):
+ self._putdsrpath(obj, req_num)
+ elif isinstance(obj, rpath.RPath): self._putrpath(obj, req_num)
+ elif isinstance(obj, rpath.RORPath): self._putrorpath(obj, req_num)
elif ((hasattr(obj, "read") or hasattr(obj, "write"))
and hasattr(obj, "close")): self._putfile(obj, req_num)
elif hasattr(obj, "next"): self._putiter(obj, req_num)
@@ -146,7 +148,7 @@ class LowLevelPipeConnection(Connection):
def _putiter(self, iterator, req_num):
"""Put an iterator through the pipe"""
- self._write("i", str(VirtualFile.new(RORPIter.ToFile(iterator))),
+ self._write("i", str(VirtualFile.new(rorpiter.ToFile(iterator))),
req_num)
def _puttempfile(self, tempfile, req_num):
@@ -239,8 +241,8 @@ class LowLevelPipeConnection(Connection):
elif format_string == "b": result = data
elif format_string == "f": result = VirtualFile(self, int(data))
elif format_string == "i":
- result = RORPIter.FromFile(BufferedRead(VirtualFile(self,
- int(data))))
+ result = rorpiter.FromFile(iterfile.BufferedRead(
+ VirtualFile(self, int(data))))
elif format_string == "t": result = self._gettempfile(data)
elif format_string == "r": result = self._getrorpath(data)
elif format_string == "R": result = self._getrpath(data)
@@ -254,23 +256,25 @@ class LowLevelPipeConnection(Connection):
def _getrorpath(self, raw_rorpath_buf):
"""Reconstruct RORPath object from raw data"""
index, data = cPickle.loads(raw_rorpath_buf)
- return RORPath(index, data)
+ return rpath.RORPath(index, data)
def _gettempfile(self, raw_tf_buf):
"""Return TempFile object indicated by raw_tf_buf"""
conn_number, base, index, data = cPickle.loads(raw_tf_buf)
- return TempFile(Globals.connection_dict[conn_number],
- base, index, data)
+ return TempFile.TempFile(Globals.connection_dict[conn_number],
+ base, index, data)
def _getrpath(self, raw_rpath_buf):
"""Return RPath object indicated by raw_rpath_buf"""
conn_number, base, index, data = cPickle.loads(raw_rpath_buf)
- return RPath(Globals.connection_dict[conn_number], base, index, data)
+ return rpath.RPath(Globals.connection_dict[conn_number],
+ base, index, data)
def _getdsrpath(self, raw_dsrpath_buf):
"""Return DSRPath object indicated by buf"""
conn_number, state_dict = cPickle.loads(raw_dsrpath_buf)
- empty_dsrp = DSRPath("bypass", Globals.local_connection, None)
+ empty_dsrp = destructive_stepping.DSRPath("bypass",
+ Globals.local_connection, None)
empty_dsrp.__setstate__(state_dict)
empty_dsrp.conn = Globals.connection_dict[conn_number]
empty_dsrp.file = None
@@ -538,22 +542,11 @@ class VirtualFile:
# everything has to be available here for remote connection's use, but
# put at bottom to reduce circularities.
-import Globals, Time, Rdiff, Hardlink, FilenameMapping, C, Security, Main
-from static import *
-from lazy import *
-from log import *
-from iterfile import *
-from connection import *
-from rpath import *
-from robust import *
-from rorpiter import *
-from selection import *
-from statistics import *
-from increment import *
-from restore import *
-from manage import *
-from highlevel import *
-
+import Globals, Time, Rdiff, Hardlink, FilenameMapping, C, Security, \
+ Main, rorpiter, selection, increment, statistics, manage, lazy, \
+ iterfile, rpath, robust, restore, manage, highlevel, connection, \
+ TempFile, destructive_stepping, SetConnections
+from log import Log
Globals.local_connection = LocalConnection()
Globals.connections.append(Globals.local_connection)
diff --git a/rdiff-backup/rdiff_backup/destructive_stepping.py b/rdiff-backup/rdiff_backup/destructive_stepping.py
index fdce815..6dc77e7 100644
--- a/rdiff-backup/rdiff_backup/destructive_stepping.py
+++ b/rdiff-backup/rdiff_backup/destructive_stepping.py
@@ -1,3 +1,4 @@
+
# Copyright 2002 Ben Escoto
#
# This file is part of rdiff-backup.
@@ -21,14 +22,14 @@
from __future__ import generators
import types
-from rpath import *
-from lazy import *
+import Globals, rpath, log
+
class DSRPPermError(Exception):
"""Exception used when a DSRPath can't get sufficient permissions"""
pass
-class DSRPath(RPath):
+class DSRPath(rpath.RPath):
"""Destructive Stepping RPath
Sometimes when we traverse the directory tree, even when we just
@@ -59,11 +60,11 @@ class DSRPath(RPath):
"""
if base == 0:
- assert isinstance(conn_or_rp, RPath)
- RPath.__init__(self, conn_or_rp.conn,
- conn_or_rp.base, conn_or_rp.index)
+ assert isinstance(conn_or_rp, rpath.RPath)
+ rpath.RPath.__init__(self, conn_or_rp.conn,
+ conn_or_rp.base, conn_or_rp.index)
self.path = conn_or_rp.path # conn_or_rp may be quoted
- else: RPath.__init__(self, conn_or_rp, base, index)
+ else: rpath.RPath.__init__(self, conn_or_rp, base, index)
if source != "bypass":
# "bypass" val is used when unpackaging over connection
@@ -107,8 +108,8 @@ class DSRPath(RPath):
if not self.hasfullperms(): self.chmod_bypass(0700)
def warn(self, err):
- Log("Received error '%s' when dealing with file %s, skipping..."
- % (err, self.path), 1)
+ log.Log("Received error '%s' when dealing with file %s, skipping..."
+ % (err, self.path), 1)
raise DSRPPermError(self.path)
def __getstate__(self):
@@ -136,7 +137,7 @@ class DSRPath(RPath):
def chmod(self, permissions):
"""Change permissions, delaying if self.perms_delayed is set"""
if self.delay_perms: self.newperms = self.data['perms'] = permissions
- else: RPath.chmod(self, permissions)
+ else: rpath.RPath.chmod(self, permissions)
def getperms(self):
"""Return dsrp's intended permissions"""
@@ -148,7 +149,7 @@ class DSRPath(RPath):
"""Change permissions without updating the data dictionary"""
self.delay_perms = 1
if self.newperms is None: self.newperms = self.getperms()
- Log("DSRP: Perm bypass %s to %o" % (self.path, permissions), 8)
+ log.Log("DSRP: Perm bypass %s to %o" % (self.path, permissions), 8)
self.conn.os.chmod(self.path, permissions)
def settime(self, accesstime, modtime):
@@ -157,12 +158,12 @@ class DSRPath(RPath):
if self.delay_mtime: self.newmtime = self.data['mtime'] = modtime
if not self.delay_atime or not self.delay_mtime:
- RPath.settime(self, accesstime, modtime)
+ rpath.RPath.settime(self, accesstime, modtime)
def setmtime(self, modtime):
"""Change mtime, delaying if self.times_delayed is set"""
if self.delay_mtime: self.newmtime = self.data['mtime'] = modtime
- else: RPath.setmtime(self, modtime)
+ else: rpath.RPath.setmtime(self, modtime)
def getmtime(self):
"""Return dsrp's intended modification time"""
@@ -181,18 +182,18 @@ class DSRPath(RPath):
if not self.lstat(): return # File has been deleted in meantime
if self.delay_perms and self.newperms is not None:
- Log("Finalizing permissions of dsrp %s to %s" %
- (self.path, self.newperms), 8)
- RPath.chmod(self, self.newperms)
+ log.Log("Finalizing permissions of dsrp %s to %s" %
+ (self.path, self.newperms), 8)
+ rpath.RPath.chmod(self, self.newperms)
do_atime = self.delay_atime and self.newatime is not None
do_mtime = self.delay_mtime and self.newmtime is not None
if do_atime and do_mtime:
- RPath.settime(self, self.newatime, self.newmtime)
+ rpath.RPath.settime(self, self.newatime, self.newmtime)
elif do_atime and not do_mtime:
- RPath.settime(self, self.newatime, self.getmtime())
+ rpath.RPath.settime(self, self.newatime, self.getmtime())
elif not do_atime and do_mtime:
- RPath.setmtime(self, self.newmtime)
+ rpath.RPath.setmtime(self, self.newmtime)
def newpath(self, newpath, index = ()):
"""Return similar DSRPath but with new path"""
@@ -208,29 +209,4 @@ class DSRPath(RPath):
return self.__class__(self.source, self.conn, self.base, index)
-class DestructiveSteppingFinalizer(ITRBranch):
- """Finalizer that can work on an iterator of dsrpaths
-
- The reason we have to use an IterTreeReducer is that some files
- should be updated immediately, but for directories we sometimes
- need to update all the files in the directory before finally
- coming back to it.
-
- """
- dsrpath = None
- def start_process(self, index, dsrpath):
- self.dsrpath = dsrpath
-
- def end_process(self):
- if self.dsrpath: self.dsrpath.write_changes()
-
- def can_fast_process(self, index, dsrpath):
- return not self.dsrpath.isdir()
-
- def fast_process(self, index, dsrpath):
- if self.dsrpath: self.dsrpath.write_changes()
-
-from log import *
-from robust import *
-import Globals
diff --git a/rdiff-backup/rdiff_backup/filelist.py b/rdiff-backup/rdiff_backup/filelist.py
index bfce82f..a969047 100644
--- a/rdiff-backup/rdiff_backup/filelist.py
+++ b/rdiff-backup/rdiff_backup/filelist.py
@@ -1,6 +1,5 @@
from __future__ import generators
-from manage import *
-from rpath import *
+import rpath, manage
#######################################################################
#
diff --git a/rdiff-backup/rdiff_backup/highlevel.py b/rdiff-backup/rdiff_backup/highlevel.py
index bcb07d6..f93388f 100644
--- a/rdiff-backup/rdiff_backup/highlevel.py
+++ b/rdiff-backup/rdiff_backup/highlevel.py
@@ -20,17 +20,8 @@
"""High level functions for mirroring, mirror & inc, etc."""
from __future__ import generators
-from static import *
-
-class SkipFileException(Exception):
- """Signal that the current file should be skipped but then continue
-
- This exception will often be raised when there is problem reading
- an individual file, but it makes sense for the rest of the backup
- to keep going.
-
- """
- pass
+import Globals, MiscStats, metadata, rorpiter, TempFile, \
+ Hardlink, robust, increment, rpath, lazy, static, log
class HighLevel:
@@ -48,8 +39,8 @@ class HighLevel:
Otherwise only mirror and don't create any extra files.
"""
- SourceS = src_rpath.conn.HLSourceStruct
- DestS = dest_rpath.conn.HLDestinationStruct
+ SourceS = src_rpath.conn.highlevel.HLSourceStruct
+ DestS = dest_rpath.conn.highlevel.HLDestinationStruct
src_init_dsiter = SourceS.split_initial_dsiter()
dest_sigiter = DestS.get_sigs(dest_rpath, src_init_dsiter)
@@ -61,8 +52,8 @@ class HighLevel:
def Mirror_and_increment(src_rpath, dest_rpath, inc_rpath,
session_info = None):
"""Mirror + put increments in tree based at inc_rpath"""
- SourceS = src_rpath.conn.HLSourceStruct
- DestS = dest_rpath.conn.HLDestinationStruct
+ SourceS = src_rpath.conn.highlevel.HLSourceStruct
+ DestS = dest_rpath.conn.highlevel.HLDestinationStruct
src_init_dsiter = SourceS.split_initial_dsiter()
dest_sigiter = DestS.get_sigs(dest_rpath, src_init_dsiter)
@@ -72,7 +63,7 @@ class HighLevel:
dest_rpath.setdata()
inc_rpath.setdata()
-MakeStatic(HighLevel)
+static.MakeStatic(HighLevel)
class HLSourceStruct:
@@ -80,7 +71,7 @@ class HLSourceStruct:
def split_initial_dsiter(cls):
"""Set iterators of all dsrps from rpath, returning one"""
dsiter = Globals.select_source.set_iter()
- initial_dsiter1, cls.initial_dsiter2 = Iter.multiplex(dsiter, 2)
+ initial_dsiter1, cls.initial_dsiter2 = lazy.Iter.multiplex(dsiter, 2)
return initial_dsiter1
def get_diffs_and_finalize(cls, sigiter):
@@ -90,10 +81,10 @@ class HLSourceStruct:
dissimilar files.
"""
- collated = RORPIter.CollateIterators(cls.initial_dsiter2, sigiter)
+ collated = rorpiter.CollateIterators(cls.initial_dsiter2, sigiter)
def error_handler(exc, dest_sig, rp):
- Log("Error %s producing a diff of %s" %
- (exc, rp and rp.path), 2)
+ log.Log("Error %s producing a diff of %s" %
+ (exc, rp and rp.path), 2)
return None
def diffs():
@@ -101,12 +92,12 @@ class HLSourceStruct:
if dest_sig:
if dest_sig.isplaceholder(): yield dest_sig
else:
- diff = Robust.check_common_error(
- error_handler, RORPIter.diffonce, [dest_sig, rp])
+ diff = robust.check_common_error(
+ error_handler, rorpiter.diffonce, [dest_sig, rp])
if diff: yield diff
return diffs()
-MakeClass(HLSourceStruct)
+static.MakeClass(HLSourceStruct)
class HLDestinationStruct:
@@ -115,7 +106,7 @@ class HLDestinationStruct:
def split_initial_dsiter(cls):
"""Set initial_dsiters (iteration of all rps from rpath)"""
result, cls.initial_dsiter2 = \
- Iter.multiplex(Globals.select_mirror.set_iter(), 2)
+ lazy.Iter.multiplex(Globals.select_mirror.set_iter(), 2)
return result
def get_dissimilar(cls, baserp, src_init_iter, dest_init_iter):
@@ -134,14 +125,14 @@ class HLDestinationStruct:
will depend on the Globals.conn_bufsize value.
"""
- collated = RORPIter.CollateIterators(src_init_iter, dest_init_iter)
+ collated = rorpiter.CollateIterators(src_init_iter, dest_init_iter)
def compare(src_rorp, dest_dsrp):
"""Return dest_dsrp if they are different, None if the same"""
if not dest_dsrp:
dest_dsrp = cls.get_dsrp(baserp, src_rorp.index)
if dest_dsrp.lstat():
- Log("Warning: Found unexpected destination file %s, "
- "not processing it." % dest_dsrp.path, 2)
+ log.Log("Warning: Found unexpected destination file %s, "
+ "not processing it." % dest_dsrp.path, 2)
return None
elif (src_rorp and src_rorp == dest_dsrp and
(not Globals.preserve_hardlinks or
@@ -162,7 +153,7 @@ class HLDestinationStruct:
counter = 0
yield dsrp
elif counter == 20:
- placeholder = RORPath(src_rorp.index)
+ placeholder = rpath.RORPath(src_rorp.index)
placeholder.make_placeholder()
counter = 0
yield placeholder
@@ -185,11 +176,11 @@ class HLDestinationStruct:
metadata.CloseMetadata()
dup = duplicate_with_write(src_init_iter)
dissimilars = cls.get_dissimilar(baserp, dup, dest_iters1)
- return RORPIter.Signatures(dissimilars)
+ return rorpiter.Signatures(dissimilars)
def get_dsrp(cls, dest_rpath, index):
"""Return initialized rpath based on dest_rpath with given index"""
- rp = RPath(dest_rpath.conn, dest_rpath.base, index)
+ rp = rpath.RPath(dest_rpath.conn, dest_rpath.base, index)
if Globals.quoting_enabled: rp.quote_path()
return rp
@@ -197,14 +188,16 @@ class HLDestinationStruct:
"""Return finalizer, starting from session info if necessary"""
old_finalizer = cls._session_info and cls._session_info.finalizer
if old_finalizer: return old_finalizer
- else: return IterTreeReducer(DestructiveSteppingFinalizer, [])
+ else: return rorpiter.IterTreeReducer(
+ rorpiter.DestructiveSteppingFinalizer, [])
def get_ITR(cls, inc_rpath):
"""Return ITR, starting from state if necessary"""
if cls._session_info and cls._session_info.ITR:
return cls._session_info.ITR
else:
- iitr = IterTreeReducer(IncrementITRB, [inc_rpath])
+ iitr = rorpiter.IterTreeReducer(increment.IncrementITRB,
+ [inc_rpath])
iitr.root_branch.override_changed()
Globals.ITRB = iitr.root_branch
iitr.root_branch.Errors = 0
@@ -214,38 +207,38 @@ class HLDestinationStruct:
"""Return MirrorITR, starting from state if available"""
if cls._session_info and cls._session_info.ITR:
return cls._session_info.ITR
- ITR = IterTreeReducer(MirrorITRB, [inc_rpath])
+ ITR = rorpiter.IterTreeReducer(increment.MirrorITRB, [inc_rpath])
Globals.ITRB = ITR.root_branch
ITR.root_branch.Errors = 0
return ITR
def patch_and_finalize(cls, dest_rpath, diffs):
"""Apply diffs and finalize"""
- collated = RORPIter.CollateIterators(diffs, cls.initial_dsiter2)
+ collated = rorpiter.CollateIterators(diffs, cls.initial_dsiter2)
#finalizer = cls.get_finalizer()
diff_rorp, rp = None, None
def patch(diff_rorp, dsrp):
if not dsrp: dsrp = cls.get_dsrp(dest_rpath, diff_rorp.index)
if diff_rorp and not diff_rorp.isplaceholder():
- RORPIter.patchonce_action(None, dsrp, diff_rorp).execute()
+ rorpiter.patchonce_action(None, dsrp, diff_rorp).execute()
return dsrp
def error_handler(exc, diff_rorp, dsrp):
filename = dsrp and dsrp.path or os.path.join(*diff_rorp.index)
- Log("Error: %s processing file %s" % (exc, filename), 2)
+ log.Log("Error: %s processing file %s" % (exc, filename), 2)
for indexed_tuple in collated:
- Log(lambda: "Processing %s" % str(indexed_tuple), 7)
+ log.Log(lambda: "Processing %s" % str(indexed_tuple), 7)
diff_rorp, dsrp = indexed_tuple
- dsrp = Robust.check_common_error(error_handler, patch,
+ dsrp = robust.check_common_error(error_handler, patch,
[diff_rorp, dsrp])
#finalizer(dsrp.index, dsrp)
#finalizer.Finish()
def patch_w_datadir_writes(cls, dest_rpath, diffs, inc_rpath):
"""Apply diffs and finalize, with checkpointing and statistics"""
- collated = RORPIter.CollateIterators(diffs, cls.initial_dsiter2)
+ collated = rorpiter.CollateIterators(diffs, cls.initial_dsiter2)
#finalizer, ITR = cls.get_finalizer(), cls.get_MirrorITR(inc_rpath)
finalizer, ITR = None, cls.get_MirrorITR(inc_rpath)
MiscStats.open_dir_stats_file()
@@ -253,7 +246,7 @@ class HLDestinationStruct:
try:
for indexed_tuple in collated:
- Log(lambda: "Processing %s" % str(indexed_tuple), 7)
+ log.Log(lambda: "Processing %s" % str(indexed_tuple), 7)
diff_rorp, dsrp = indexed_tuple
if not dsrp: dsrp = cls.get_dsrp(dest_rpath, diff_rorp.index)
if diff_rorp and diff_rorp.isplaceholder(): diff_rorp = None
@@ -270,7 +263,7 @@ class HLDestinationStruct:
def patch_increment_and_finalize(cls, dest_rpath, diffs, inc_rpath):
"""Apply diffs, write increment if necessary, and finalize"""
- collated = RORPIter.CollateIterators(diffs, cls.initial_dsiter2)
+ collated = rorpiter.CollateIterators(diffs, cls.initial_dsiter2)
#finalizer, ITR = cls.get_finalizer(), cls.get_ITR(inc_rpath)
finalizer, ITR = None, cls.get_ITR(inc_rpath)
MiscStats.open_dir_stats_file()
@@ -278,7 +271,7 @@ class HLDestinationStruct:
try:
for indexed_tuple in collated:
- Log(lambda: "Processing %s" % str(indexed_tuple), 7)
+ log.Log(lambda: "Processing %s" % str(indexed_tuple), 7)
diff_rorp, dsrp = indexed_tuple
index = indexed_tuple.index
if not dsrp: dsrp = cls.get_dsrp(dest_rpath, index)
@@ -296,18 +289,12 @@ class HLDestinationStruct:
def handle_last_error(cls, dsrp, finalizer, ITR):
"""If catch fatal error, try to checkpoint before exiting"""
- Log.exception(1, 2)
- TracebackArchive.log()
+ log.Log.exception(1, 2)
+ robust.TracebackArchive.log()
#SaveState.checkpoint(ITR, finalizer, dsrp, 1)
#if Globals.preserve_hardlinks: Hardlink.final_checkpoint(Globals.rbdir)
#SaveState.touch_last_file_definitive()
raise
-MakeClass(HLDestinationStruct)
+static.MakeClass(HLDestinationStruct)
-from log import *
-from rpath import *
-from robust import *
-from increment import *
-from rorpiter import *
-import Globals, Hardlink, MiscStats, metadata
diff --git a/rdiff-backup/rdiff_backup/increment.py b/rdiff-backup/rdiff_backup/increment.py
index 5040c40..46afd42 100644
--- a/rdiff-backup/rdiff_backup/increment.py
+++ b/rdiff-backup/rdiff_backup/increment.py
@@ -17,119 +17,119 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
-"""Provides Inc and *ITR classes, which relate to writing increment files"""
+"""Provides functions and *ITR classes, for writing increment files"""
import traceback
-from static import *
-from statistics import *
-from lazy import *
+from log import Log
+import Globals, Time, MiscStats, rorpiter, TempFile, robust, \
+ statistics, rpath, static, lazy, Rdiff, Hardlink
-class Inc:
- """Class containing increment functions"""
- def Increment_action(new, mirror, incpref):
- """Main file incrementing function, returns RobustAction
- new is the file on the active partition,
- mirror is the mirrored file from the last backup,
- incpref is the prefix of the increment file.
+def Increment_action(new, mirror, incpref):
+ """Main file incrementing function, returns robust.Action
- This function basically moves the information about the mirror
- file to incpref.
+ new is the file on the active partition,
+ mirror is the mirrored file from the last backup,
+ incpref is the prefix of the increment file.
- The returned RobustAction when executed should return the name
- of the incfile, or None if none was created.
+ This function basically moves the information about the mirror
+ file to incpref.
- """
- if not (new and new.lstat() or mirror.lstat()):
- return Robust.null_action # Files deleted in meantime, do nothing
-
- Log("Incrementing mirror file " + mirror.path, 5)
- if ((new and new.isdir()) or mirror.isdir()) and not incpref.isdir():
- incpref.mkdir()
-
- if not mirror.lstat(): return Inc.makemissing_action(incpref)
- elif mirror.isdir(): return Inc.makedir_action(mirror, incpref)
- elif new.isreg() and mirror.isreg():
- return Inc.makediff_action(new, mirror, incpref)
- else: return Inc.makesnapshot_action(mirror, incpref)
-
- def Increment(new, mirror, incpref):
- return Inc.Increment_action(new, mirror, incpref).execute()
-
- def makemissing_action(incpref):
- """Signify that mirror file was missing"""
- def final(init_val):
- incrp = Inc.get_inc_ext(incpref, "missing")
- incrp.touch()
- return incrp
- return RobustAction(None, final, None)
-
- def makesnapshot_action(mirror, incpref):
- """Copy mirror to incfile, since new is quite different"""
- if (mirror.isreg() and Globals.compression and
- not Globals.no_compression_regexp.match(mirror.path)):
- snapshotrp = Inc.get_inc_ext(incpref, "snapshot.gz")
- return Robust.copy_with_attribs_action(mirror, snapshotrp, 1)
- else:
- snapshotrp = Inc.get_inc_ext(incpref, "snapshot")
- return Robust.copy_with_attribs_action(mirror, snapshotrp, None)
-
- def makediff_action(new, mirror, incpref):
- """Make incfile which is a diff new -> mirror"""
- if (Globals.compression and
- not Globals.no_compression_regexp.match(mirror.path)):
- diff = Inc.get_inc_ext(incpref, "diff.gz")
- compress = 1
- else:
- diff = Inc.get_inc_ext(incpref, "diff")
- compress = None
-
- diff_tf = TempFileManager.new(diff)
- def init():
- Rdiff.write_delta(new, mirror, diff_tf, compress)
- RPath.copy_attribs(mirror, diff_tf)
- return diff
- return Robust.make_tf_robustaction(init, diff_tf, diff)
-
- def makedir_action(mirrordir, incpref):
- """Make file indicating directory mirrordir has changed"""
- dirsign = Inc.get_inc_ext(incpref, "dir")
- tf = TempFileManager.new(dirsign)
- def init():
- tf.touch()
- RPath.copy_attribs(mirrordir, tf)
- return dirsign
- return Robust.make_tf_robustaction(init, tf, dirsign)
-
- def get_inc(rp, time, typestr):
- """Return increment like rp but with time and typestr suffixes"""
- addtostr = lambda s: "%s.%s.%s" % (s, Time.timetostring(time), typestr)
- if rp.index:
- incrp = rp.__class__(rp.conn, rp.base, rp.index[:-1] +
- (addtostr(rp.index[-1]),))
- else: incrp = rp.__class__(rp.conn, addtostr(rp.base), rp.index)
- if Globals.quoting_enabled: incrp.quote_path()
- return incrp
-
- def get_inc_ext(rp, typestr):
- """Return increment with specified type and correct time
+ The returned robust.Action when executed should return the name
+ of the incfile, or None if none was created.
- If the file exists, then probably a previous backup has been
- aborted. We then keep asking FindTime to get a time later
- than the one that already has an inc file.
-
- """
- inctime = 0
- while 1:
- inctime = Resume.FindTime(rp.index, inctime)
- incrp = Inc.get_inc(rp, inctime, typestr)
- if not incrp.lstat(): break
+ """
+ if not (new and new.lstat() or mirror.lstat()):
+ return robust.null_action # Files deleted in meantime, do nothing
+
+ Log("Incrementing mirror file " + mirror.path, 5)
+ if ((new and new.isdir()) or mirror.isdir()) and not incpref.isdir():
+ incpref.mkdir()
+
+ if not mirror.lstat(): return makemissing_action(incpref)
+ elif mirror.isdir(): return makedir_action(mirror, incpref)
+ elif new.isreg() and mirror.isreg():
+ return makediff_action(new, mirror, incpref)
+ else: return makesnapshot_action(mirror, incpref)
+
+def Increment(new, mirror, incpref):
+ return Increment_action(new, mirror, incpref).execute()
+
+def makemissing_action(incpref):
+ """Signify that mirror file was missing"""
+ def final(init_val):
+ incrp = get_inc_ext(incpref, "missing")
+ incrp.touch()
return incrp
+ return robust.Action(None, final, None)
+
+def makesnapshot_action(mirror, incpref):
+ """Copy mirror to incfile, since new is quite different"""
+ if (mirror.isreg() and Globals.compression and
+ not Globals.no_compression_regexp.match(mirror.path)):
+ snapshotrp = get_inc_ext(incpref, "snapshot.gz")
+ return robust.copy_with_attribs_action(mirror, snapshotrp, 1)
+ else:
+ snapshotrp = get_inc_ext(incpref, "snapshot")
+ return robust.copy_with_attribs_action(mirror, snapshotrp, None)
+
+def makediff_action(new, mirror, incpref):
+ """Make incfile which is a diff new -> mirror"""
+ if (Globals.compression and
+ not Globals.no_compression_regexp.match(mirror.path)):
+ diff = get_inc_ext(incpref, "diff.gz")
+ compress = 1
+ else:
+ diff = get_inc_ext(incpref, "diff")
+ compress = None
+
+ diff_tf = TempFile.new(diff)
+ def init():
+ Rdiff.write_delta(new, mirror, diff_tf, compress)
+ rpath.copy_attribs(mirror, diff_tf)
+ return diff
+ return robust.make_tf_robustaction(init, diff_tf, diff)
+
+def makedir_action(mirrordir, incpref):
+ """Make file indicating directory mirrordir has changed"""
+ dirsign = get_inc_ext(incpref, "dir")
+ tf = TempFile.new(dirsign)
+ def init():
+ tf.touch()
+ rpath.copy_attribs(mirrordir, tf)
+ return dirsign
+ return robust.make_tf_robustaction(init, tf, dirsign)
+
+def get_inc(rp, time, typestr):
+ """Return increment like rp but with time and typestr suffixes"""
+ addtostr = lambda s: "%s.%s.%s" % (s, Time.timetostring(time), typestr)
+ if rp.index:
+ incrp = rp.__class__(rp.conn, rp.base, rp.index[:-1] +
+ (addtostr(rp.index[-1]),))
+ else: incrp = rp.__class__(rp.conn, addtostr(rp.base), rp.index)
+ if Globals.quoting_enabled: incrp.quote_path()
+ return incrp
+
+def get_inc_ext(rp, typestr):
+ """Return increment with specified type and correct time
+
+ If the file exists, then probably a previous backup has been
+ aborted. We then keep asking FindTime to get a time later
+ than the one that already has an inc file.
-MakeStatic(Inc)
+ """
+ inctime = 0
+ while 1:
+ #inctime = robust.Resume.FindTime(rp.index, inctime)
+ inctime = Time.prevtime
+ incrp = get_inc(rp, inctime, typestr)
+ if not incrp.lstat(): break
+ else:
+ assert 0, "Inc file already present"
+ return incrp
-class IncrementITRB(StatsITRB):
+class IncrementITRB(statistics.ITRB):
"""Patch and increment mirror directory
This has to be an ITR because directories that have files in them
@@ -159,7 +159,7 @@ class IncrementITRB(StatsITRB):
def __init__(self, inc_rpath):
"""Set inc_rpath, an rpath of the base of the tree"""
self.inc_rpath = inc_rpath
- StatsITRB.__init__(self)
+ statistics.ITRB.__init__(self)
def start_process(self, index, diff_rorp, dsrp):
"""Initial processing of file
@@ -209,12 +209,12 @@ class IncrementITRB(StatsITRB):
"""
if not (incpref.lstat() and incpref.isdir()): incpref.mkdir()
if diff_rorp and diff_rorp.isreg() and diff_rorp.file:
- tf = TempFileManager.new(dsrp)
+ tf = TempFile.new(dsrp)
def init():
- RPathStatic.copy_with_attribs(diff_rorp, tf)
+ rpath.copy_with_attribs(diff_rorp, tf)
tf.set_attached_filetype(diff_rorp.get_attached_filetype())
def error(exc, ran_init, init_val): tf.delete()
- RobustAction(init, None, error).execute()
+ robust.Action(init, None, error).execute()
self.directory_replacement = tf
def init_non_dir(self, dsrp, diff_rorp, incpref):
@@ -223,16 +223,16 @@ class IncrementITRB(StatsITRB):
if diff_rorp.isreg() and (dsrp.isreg() or diff_rorp.isflaglinked()):
# Write updated mirror to temp file so we can compute
# reverse diff locally
- mirror_tf = TempFileManager.new(dsrp)
- old_dsrp_tf = TempFileManager.new(dsrp)
+ mirror_tf = TempFile.new(dsrp)
+ old_dsrp_tf = TempFile.new(dsrp)
def init_thunk():
if diff_rorp.isflaglinked():
Hardlink.link_rp(diff_rorp, mirror_tf, dsrp)
else: Rdiff.patch_with_attribs_action(dsrp, diff_rorp,
mirror_tf).execute()
- self.incrp = Inc.Increment_action(mirror_tf, dsrp,
+ self.incrp = Increment_action(mirror_tf, dsrp,
incpref).execute()
- if dsrp.lstat(): RPathStatic.rename(dsrp, old_dsrp_tf)
+ if dsrp.lstat(): rpath.rename(dsrp, old_dsrp_tf)
mirror_tf.rename(dsrp)
def final(init_val): old_dsrp_tf.delete()
@@ -243,10 +243,10 @@ class IncrementITRB(StatsITRB):
if self.incrp: self.incrp.delete()
mirror_tf.delete()
- RobustAction(init_thunk, final, error).execute()
- else: self.incrp = Robust.chain(
- Inc.Increment_action(diff_rorp, dsrp, incpref),
- RORPIter.patchonce_action(None, dsrp, diff_rorp)).execute()[0]
+ robust.Action(init_thunk, final, error).execute()
+ else: self.incrp = robust.chain(
+ Increment_action(diff_rorp, dsrp, incpref),
+ rorpiter.patchonce_action(None, dsrp, diff_rorp)).execute()[0]
self.changed = 1
@@ -257,14 +257,14 @@ class IncrementITRB(StatsITRB):
or self.directory_replacement):
if self.directory_replacement:
tf = self.directory_replacement
- self.incrp = Robust.chain(
- Inc.Increment_action(tf, dsrp, incpref),
- RORPIter.patchonce_action(None, dsrp, tf)).execute()[0]
+ self.incrp = robust.chain(
+ Increment_action(tf, dsrp, incpref),
+ rorpiter.patchonce_action(None, dsrp, tf)).execute()[0]
tf.delete()
else:
- self.incrp = Inc.Increment(diff_rorp, dsrp, incpref)
+ self.incrp = Increment(diff_rorp, dsrp, incpref)
if diff_rorp:
- RORPIter.patchonce_action(None, dsrp, diff_rorp).execute()
+ rorpiter.patchonce_action(None, dsrp, diff_rorp).execute()
self.end_stats(diff_rorp, dsrp, self.incrp)
if self.mirror_isdirectory or dsrp.isdir():
@@ -276,7 +276,7 @@ class IncrementITRB(StatsITRB):
def fast_process(self, index, diff_rorp, dsrp):
"""Just update statistics"""
- StatsITRB.fast_process(self, dsrp)
+ statistics.ITRB.fast_process(self, dsrp)
def branch_process(self, branch):
"""Update statistics, and the has_changed flag if change in branch"""
@@ -285,14 +285,14 @@ class IncrementITRB(StatsITRB):
self.add_file_stats(branch)
-class MirrorITRB(StatsITRB):
+class MirrorITRB(statistics.ITRB):
"""Like IncrementITR, but only patch mirror directory, don't increment"""
# This is always None since no increments will be created
incrp = None
def __init__(self, inc_rpath):
"""Set inc_rpath, an rpath of the base of the inc tree"""
self.inc_rpath = inc_rpath
- StatsITRB.__init__(self)
+ statistics.ITRB.__init__(self)
def start_process(self, index, diff_rorp, mirror_dsrp):
"""Initialize statistics and do actual writing to mirror"""
@@ -305,7 +305,7 @@ class MirrorITRB(StatsITRB):
mirror_dsrp.delete()
mirror_dsrp.mkdir()
elif diff_rorp and not diff_rorp.isplaceholder():
- RORPIter.patchonce_action(None, mirror_dsrp, diff_rorp).execute()
+ rorpiter.patchonce_action(None, mirror_dsrp, diff_rorp).execute()
self.incpref = self.inc_rpath.new_index(index)
self.diff_rorp, self.mirror_dsrp = diff_rorp, mirror_dsrp
@@ -314,7 +314,7 @@ class MirrorITRB(StatsITRB):
"""Update statistics when leaving"""
self.end_stats(self.diff_rorp, self.mirror_dsrp)
if self.mirror_dsrp.isdir():
- RPathStatic.copy_attribs(self.diff_rorp, self.mirror_dsrp)
+ rpath.copy_attribs(self.diff_rorp, self.mirror_dsrp)
MiscStats.write_dir_stats_line(self, self.mirror_dsrp.index)
def can_fast_process(self, index, diff_rorp, mirror_dsrp):
@@ -323,7 +323,7 @@ class MirrorITRB(StatsITRB):
def fast_process(self, index, diff_rorp, mirror_dsrp):
"""Just update statistics"""
- StatsITRB.fast_process(self, mirror_dsrp)
+ statistics.ITRB.fast_process(self, mirror_dsrp)
def branch_process(self, branch):
"""Update statistics with subdirectory results"""
@@ -331,9 +331,4 @@ class MirrorITRB(StatsITRB):
self.add_file_stats(branch)
-from log import *
-from rpath import *
-from robust import *
-from rorpiter import *
-import Globals, Time, MiscStats
diff --git a/rdiff-backup/rdiff_backup/iterfile.py b/rdiff-backup/rdiff_backup/iterfile.py
index f95b4e8..84be7cc 100644
--- a/rdiff-backup/rdiff_backup/iterfile.py
+++ b/rdiff-backup/rdiff_backup/iterfile.py
@@ -20,7 +20,7 @@
"""Convert an iterator to a file object and vice-versa"""
import cPickle, array
-import Globals, C
+import Globals, C, robust, log
class IterFileException(Exception): pass
@@ -200,7 +200,7 @@ class FileWrappingIter:
def addfromfile(self):
"""Read a chunk from the current file and return it"""
# Check file read for errors, buf = "" if find one
- buf = Robust.check_common_error(self.read_error_handler,
+ buf = robust.check_common_error(self.read_error_handler,
self.currently_in_file.read,
[Globals.blocksize])
if not buf:
@@ -210,7 +210,7 @@ class FileWrappingIter:
def read_error_handler(self, exc, blocksize):
"""Log error when reading from file"""
- Log("Error '%s' reading from fileobj, truncating" % (str(exc),), 2)
+ log.Log("Error '%s' reading from fileobj, truncating" % (str(exc),), 2)
return ""
def _l2s_old(self, l):
@@ -253,5 +253,4 @@ class BufferedRead:
def close(self): return self.file.close()
-from log import *
-from robust import *
+
diff --git a/rdiff-backup/rdiff_backup/lazy.py b/rdiff-backup/rdiff_backup/lazy.py
index 7fa80fe..fda7dc2 100644
--- a/rdiff-backup/rdiff_backup/lazy.py
+++ b/rdiff-backup/rdiff_backup/lazy.py
@@ -21,7 +21,8 @@
from __future__ import generators
import os, stat, types
-from static import *
+import static
+
class Iter:
"""Hold static methods for the manipulation of lazy iterators"""
@@ -163,7 +164,7 @@ class Iter:
return tuple(map(make_iterator, range(num_of_forks)))
-MakeStatic(Iter)
+static.MakeStatic(Iter)
class IterMultiplex2:
@@ -200,166 +201,3 @@ class IterMultiplex2:
else: elem = buf.pop(0) # a is in front, subtract an element
self.a_leading_by -= 1
yield elem
-
-
-class IterTreeReducer:
- """Tree style reducer object for iterator
-
- The indicies of a RORPIter form a tree type structure. This class
- can be used on each element of an iter in sequence and the result
- will be as if the corresponding tree was reduced. This tries to
- bridge the gap between the tree nature of directories, and the
- iterator nature of the connection between hosts and the temporal
- order in which the files are processed.
-
- """
- def __init__(self, branch_class, branch_args):
- """ITR initializer"""
- self.branch_class = branch_class
- self.branch_args = branch_args
- self.index = None
- self.root_branch = branch_class(*branch_args)
- self.branches = [self.root_branch]
-
- def finish_branches(self, index):
- """Run Finish() on all branches index has passed
-
- When we pass out of a branch, delete it and process it with
- the parent. The innermost branches will be the last in the
- list. Return None if we are out of the entire tree, and 1
- otherwise.
-
- """
- branches = self.branches
- while 1:
- to_be_finished = branches[-1]
- base_index = to_be_finished.base_index
- if base_index != index[:len(base_index)]:
- # out of the tree, finish with to_be_finished
- to_be_finished.call_end_proc()
- del branches[-1]
- if not branches: return None
- branches[-1].branch_process(to_be_finished)
- else: return 1
-
- def add_branch(self, index):
- """Return branch of type self.branch_class, add to branch list"""
- branch = self.branch_class(*self.branch_args)
- branch.base_index = index
- self.branches.append(branch)
- return branch
-
- def process_w_branch(self, branch, args):
- """Run start_process on latest branch"""
- Robust.check_common_error(branch.on_error,
- branch.start_process, args)
- if not branch.caught_exception: branch.start_successful = 1
-
- def Finish(self):
- """Call at end of sequence to tie everything up"""
- while 1:
- to_be_finished = self.branches.pop()
- to_be_finished.call_end_proc()
- if not self.branches: break
- self.branches[-1].branch_process(to_be_finished)
-
- def __call__(self, *args):
- """Process args, where args[0] is current position in iterator
-
- Returns true if args successfully processed, false if index is
- not in the current tree and thus the final result is
- available.
-
- Also note below we set self.index after doing the necessary
- start processing, in case there is a crash in the middle.
-
- """
- index = args[0]
- if self.index is None:
- self.root_branch.base_index = index
- self.process_w_branch(self.root_branch, args)
- self.index = index
- return 1
-
- if index <= self.index:
- Log("Warning: oldindex %s >= newindex %s" % (self.index, index), 2)
- return 1
-
- if self.finish_branches(index) is None:
- return None # We are no longer in the main tree
- last_branch = self.branches[-1]
- if last_branch.start_successful:
- if last_branch.can_fast_process(*args):
- last_branch.fast_process(*args)
- else:
- branch = self.add_branch(index)
- self.process_w_branch(branch, args)
- else: last_branch.log_prev_error(index)
-
- self.index = index
- return 1
-
-
-class ITRBranch:
- """Helper class for IterTreeReducer below
-
- There are five stub functions below: start_process, end_process,
- branch_process, can_fast_process, and fast_process. A class that
- subclasses this one will probably fill in these functions to do
- more.
-
- It is important that this class be pickable, so keep that in mind
- when subclassing (this is used to resume failed sessions).
-
- """
- base_index = index = None
- finished = None
- caught_exception = start_successful = None
-
- def call_end_proc(self):
- """Runs the end_process on self, checking for errors"""
- if self.finished or not self.start_successful:
- self.caught_exception = 1
- if self.caught_exception: self.log_prev_error(self.base_index)
- else: Robust.check_common_error(self.on_error, self.end_process)
- self.finished = 1
-
- def start_process(self, *args):
- """Do some initial processing (stub)"""
- pass
-
- def end_process(self):
- """Do any final processing before leaving branch (stub)"""
- pass
-
- def branch_process(self, branch):
- """Process a branch right after it is finished (stub)"""
- assert branch.finished
- pass
-
- def can_fast_process(self, *args):
- """True if object can be processed without new branch (stub)"""
- return None
-
- def fast_process(self, *args):
- """Process args without new child branch (stub)"""
- pass
-
- def on_error(self, exc, *args):
- """This is run on any exception in start/end-process"""
- self.caught_exception = 1
- if args and args[0] and isinstance(args[0], tuple):
- filename = os.path.join(*args[0])
- elif self.index: filename = os.path.join(*self.index)
- else: filename = "."
- Log("Error '%s' processing %s" % (exc, filename), 2)
-
- def log_prev_error(self, index):
- """Call function if no pending exception"""
- Log("Skipping %s because of previous error" %
- (os.path.join(*index),), 2)
-
-
-# Put at bottom to prevent (viciously) circular module dependencies
-from robust import *
-from log import *
diff --git a/rdiff-backup/rdiff_backup/log.py b/rdiff-backup/rdiff_backup/log.py
index 5c03b27..0f9c4f3 100644
--- a/rdiff-backup/rdiff_backup/log.py
+++ b/rdiff-backup/rdiff_backup/log.py
@@ -20,6 +20,7 @@
"""Manage logging, displaying and recording messages with required verbosity"""
import time, sys, traceback, types
+import Globals
class LoggerError(Exception): pass
@@ -151,6 +152,7 @@ class Logger:
def FatalError(self, message):
self("Fatal Error: " + message, 1)
+ import Main
Main.cleanup()
sys.exit(1)
@@ -180,4 +182,4 @@ class Logger:
logging_func(self.exception_to_string(), verbosity)
Log = Logger()
-import Globals, Main
+
diff --git a/rdiff-backup/rdiff_backup/manage.py b/rdiff-backup/rdiff_backup/manage.py
index 2e1d7b6..f974147 100644
--- a/rdiff-backup/rdiff_backup/manage.py
+++ b/rdiff-backup/rdiff_backup/manage.py
@@ -20,91 +20,86 @@
"""list, delete, and otherwise manage increments"""
from __future__ import generators
-from static import *
-from log import *
-import Globals, Time
+from log import Log
+import Globals, Time, static, manage
class ManageException(Exception): pass
-class Manage:
- def get_file_type(rp):
- """Returns one of "regular", "directory", "missing", or "special"."""
- if not rp.lstat(): return "missing"
- elif rp.isdir(): return "directory"
- elif rp.isreg(): return "regular"
- else: return "special"
-
- def get_inc_type(inc):
- """Return file type increment represents"""
- assert inc.isincfile()
- type = inc.getinctype()
- if type == "dir": return "directory"
- elif type == "diff": return "regular"
- elif type == "missing": return "missing"
- elif type == "snapshot": return Manage.get_file_type(inc)
- else: assert None, "Unknown type %s" % (type,)
-
- def describe_incs_parsable(incs, mirror_time, mirrorrp):
- """Return a string parsable by computer describing the increments
-
- Each line is a time in seconds of the increment, and then the
- type of the file. It will be sorted oldest to newest. For example:
-
- 10000 regular
- 20000 directory
- 30000 special
- 40000 missing
- 50000 regular <- last will be the current mirror
-
- """
- incpairs = [(Time.stringtotime(inc.getinctime()), inc) for inc in incs]
- incpairs.sort()
- result = ["%s %s" % (time, Manage.get_inc_type(inc))
- for time, inc in incpairs]
- result.append("%s %s" % (mirror_time, Manage.get_file_type(mirrorrp)))
- return "\n".join(result)
-
- def describe_incs_human(incs, mirror_time, mirrorrp):
- """Return a string describing all the the root increments"""
- incpairs = [(Time.stringtotime(inc.getinctime()), inc) for inc in incs]
- incpairs.sort()
-
- result = ["Found %d increments:" % len(incpairs)]
- for time, inc in incpairs:
- result.append(" %s %s" %
- (inc.dirsplit()[1], Time.timetopretty(time)))
- result.append("Current mirror: %s" % Time.timetopretty(mirror_time))
- return "\n".join(result)
-
- def delete_earlier_than(baserp, time):
- """Deleting increments older than time in directory baserp
-
- time is in seconds. It will then delete any empty directories
- in the tree. To process the entire backup area, the
- rdiff-backup-data directory should be the root of the tree.
-
- """
- baserp.conn.Manage.delete_earlier_than_local(baserp, time)
-
- def delete_earlier_than_local(baserp, time):
- """Like delete_earlier_than, but run on local connection for speed"""
- assert baserp.conn is Globals.local_connection
- def yield_files(rp):
- yield rp
- if rp.isdir():
- for filename in rp.listdir():
- for sub_rp in yield_files(rp.append(filename)):
- yield sub_rp
-
- for rp in yield_files(baserp):
- if ((rp.isincfile() and
- Time.stringtotime(rp.getinctime()) < time) or
- (rp.isdir() and not rp.listdir())):
- Log("Deleting increment file %s" % rp.path, 5)
- rp.delete()
-
-MakeStatic(Manage)
+def get_file_type(rp):
+ """Returns one of "regular", "directory", "missing", or "special"."""
+ if not rp.lstat(): return "missing"
+ elif rp.isdir(): return "directory"
+ elif rp.isreg(): return "regular"
+ else: return "special"
+
+def get_inc_type(inc):
+ """Return file type increment represents"""
+ assert inc.isincfile()
+ type = inc.getinctype()
+ if type == "dir": return "directory"
+ elif type == "diff": return "regular"
+ elif type == "missing": return "missing"
+ elif type == "snapshot": return get_file_type(inc)
+ else: assert None, "Unknown type %s" % (type,)
+
+def describe_incs_parsable(incs, mirror_time, mirrorrp):
+ """Return a string parsable by computer describing the increments
+
+ Each line is a time in seconds of the increment, and then the
+ type of the file. It will be sorted oldest to newest. For example:
+
+ 10000 regular
+ 20000 directory
+ 30000 special
+ 40000 missing
+ 50000 regular <- last will be the current mirror
+
+ """
+ incpairs = [(Time.stringtotime(inc.getinctime()), inc) for inc in incs]
+ incpairs.sort()
+ result = ["%s %s" % (time, get_inc_type(inc)) for time, inc in incpairs]
+ result.append("%s %s" % (mirror_time, get_file_type(mirrorrp)))
+ return "\n".join(result)
+
+def describe_incs_human(incs, mirror_time, mirrorrp):
+ """Return a string describing all the the root increments"""
+ incpairs = [(Time.stringtotime(inc.getinctime()), inc) for inc in incs]
+ incpairs.sort()
+
+ result = ["Found %d increments:" % len(incpairs)]
+ for time, inc in incpairs:
+ result.append(" %s %s" %
+ (inc.dirsplit()[1], Time.timetopretty(time)))
+ result.append("Current mirror: %s" % Time.timetopretty(mirror_time))
+ return "\n".join(result)
+
+def delete_earlier_than(baserp, time):
+ """Deleting increments older than time in directory baserp
+
+ time is in seconds. It will then delete any empty directories
+ in the tree. To process the entire backup area, the
+ rdiff-backup-data directory should be the root of the tree.
+
+ """
+ baserp.conn.manage.delete_earlier_than_local(baserp, time)
+
+def delete_earlier_than_local(baserp, time):
+ """Like delete_earlier_than, but run on local connection for speed"""
+ assert baserp.conn is Globals.local_connection
+ def yield_files(rp):
+ yield rp
+ if rp.isdir():
+ for filename in rp.listdir():
+ for sub_rp in yield_files(rp.append(filename)):
+ yield sub_rp
+
+ for rp in yield_files(baserp):
+ if ((rp.isincfile() and
+ Time.stringtotime(rp.getinctime()) < time) or
+ (rp.isdir() and not rp.listdir())):
+ Log("Deleting increment file %s" % rp.path, 5)
+ rp.delete()
class IncObj:
diff --git a/rdiff-backup/rdiff_backup/restore.py b/rdiff-backup/rdiff_backup/restore.py
index 9ca279e..40720a4 100644
--- a/rdiff-backup/rdiff_backup/restore.py
+++ b/rdiff-backup/rdiff_backup/restore.py
@@ -20,238 +20,237 @@
"""Read increment files and restore to original"""
from __future__ import generators
-import tempfile
-from static import *
+import tempfile, os
+from log import Log
+import Globals, Time, Rdiff, Hardlink, FilenameMapping, SetConnections, \
+ rorpiter, selection, destructive_stepping, rpath, lazy
class RestoreError(Exception): pass
-class Restore:
- def Restore(inc_rpath, mirror, target, rest_time):
- """Recursively restore inc_rpath and mirror to target at rest_time
+def Restore(inc_rpath, mirror, target, rest_time):
+ """Recursively restore inc_rpath and mirror to target at rest_time
- Like restore_recusive below, but with a more friendly
- interface (it converts to DSRPaths if necessary, finds the inc
- files with the appropriate base, and makes rid).
+ Like restore_recusive below, but with a more friendly
+ interface (it converts to DSRPaths if necessary, finds the inc
+ files with the appropriate base, and makes rid).
- rest_time is the time in seconds to restore to;
+ rest_time is the time in seconds to restore to;
- inc_rpath should not be the name of an increment file, but the
- increment file shorn of its suffixes and thus should have the
- same index as mirror.
+ inc_rpath should not be the name of an increment file, but the
+ increment file shorn of its suffixes and thus should have the
+ same index as mirror.
- """
- if not isinstance(target, DSRPath): target = DSRPath(None, target)
-
- mirror_time = Restore.get_mirror_time()
- rest_time = Restore.get_rest_time(rest_time, mirror_time)
- inc_list = Restore.get_inclist(inc_rpath)
- rid = RestoreIncrementData(inc_rpath.index, inc_rpath, inc_list)
- rid.sortincseq(rest_time, mirror_time)
- Restore.check_hardlinks(rest_time)
- Restore.restore_recursive(inc_rpath.index, mirror, rid, target,
- rest_time, mirror_time)
-
- def get_mirror_time():
- """Return the time (in seconds) of latest mirror"""
- current_mirror_incs = \
- Restore.get_inclist(Globals.rbdir.append("current_mirror"))
- if not current_mirror_incs:
- Log.FatalError("Could not get time of current mirror")
- elif len(current_mirror_incs) > 1:
- Log("Warning, two different dates for current mirror found", 2)
- return Time.stringtotime(current_mirror_incs[0].getinctime())
-
- def get_rest_time(old_rest_time, mirror_time):
- """If old_rest_time is between two increments, return older time
-
- There is a slightly tricky reason for doing this: The rest of
- the code just ignores increments that are older than
- rest_time. But sometimes we want to consider the very next
- increment older than rest time, because rest_time will be
- between two increments, and what was actually on the mirror
- side will correspond to the older one.
-
- So here we assume all rdiff-backup events were recorded in
- "increments" increments, and if its in-between we pick the
- older one here.
+ """
+ if not isinstance(target, destructive_stepping.DSRPath):
+ target = destructive_stepping.DSRPath(None, target)
+
+ mirror_time = get_mirror_time()
+ rest_time = get_rest_time(rest_time, mirror_time)
+ inc_list = get_inclist(inc_rpath)
+ rid = RestoreIncrementData(inc_rpath.index, inc_rpath, inc_list)
+ rid.sortincseq(rest_time, mirror_time)
+ check_hardlinks(rest_time)
+ restore_recursive(inc_rpath.index, mirror, rid, target,
+ rest_time, mirror_time)
+
+def get_mirror_time():
+ """Return the time (in seconds) of latest mirror"""
+ current_mirror_incs = get_inclist(Globals.rbdir.append("current_mirror"))
+ if not current_mirror_incs:
+ Log.FatalError("Could not get time of current mirror")
+ elif len(current_mirror_incs) > 1:
+ Log("Warning, two different dates for current mirror found", 2)
+ return Time.stringtotime(current_mirror_incs[0].getinctime())
+
+def get_rest_time(old_rest_time, mirror_time):
+ """If old_rest_time is between two increments, return older time
+
+ There is a slightly tricky reason for doing this: The rest of
+ the code just ignores increments that are older than
+ rest_time. But sometimes we want to consider the very next
+ increment older than rest time, because rest_time will be
+ between two increments, and what was actually on the mirror
+ side will correspond to the older one.
+
+ So here we assume all rdiff-backup events were recorded in
+ "increments" increments, and if its in-between we pick the
+ older one here.
- """
- base_incs = Restore.get_inclist(Globals.rbdir.append("increments"))
- if not base_incs: return old_rest_time
- inctimes = [Time.stringtotime(inc.getinctime()) for inc in base_incs]
- inctimes.append(mirror_time)
- older_times = filter(lambda time: time <= old_rest_time, inctimes)
- if older_times: return max(older_times)
- else: # restore time older than oldest increment, just return that
- return min(inctimes)
-
- def get_inclist(inc_rpath):
- """Returns increments with given base"""
- dirname, basename = inc_rpath.dirsplit()
- parent_dir = RPath(inc_rpath.conn, dirname, ())
- if not parent_dir.isdir(): return [] # inc directory not created yet
- index = inc_rpath.index
-
- if index:
- get_inc_ext = lambda filename: \
- RPath(inc_rpath.conn, inc_rpath.base,
- inc_rpath.index[:-1] + (filename,))
- else: get_inc_ext = lambda filename: \
- RPath(inc_rpath.conn, os.path.join(dirname, filename))
-
- inc_list = []
- for filename in parent_dir.listdir():
- inc = get_inc_ext(filename)
- if inc.isincfile() and inc.getincbase_str() == basename:
- inc_list.append(inc)
- return inc_list
-
- def check_hardlinks(rest_time):
- """Check for hard links and enable hard link support if found"""
- if (Globals.preserve_hardlinks != 0 and
- Hardlink.retrieve_final(rest_time)):
- Log("Hard link information found, attempting to preserve "
- "hard links.", 5)
- SetConnections.UpdateGlobal('preserve_hardlinks', 1)
- else: SetConnections.UpdateGlobal('preserve_hardlinks', None)
-
- def restore_recursive(index, mirror, rid, target, time, mirror_time):
- """Recursive restore function.
-
- rid is a RestoreIncrementData object whose inclist is already
- sortedincseq'd, and target is the dsrp to restore to.
-
- Note that target may have a different index than mirror and
- rid, because we may be restoring a file whose index is, say
- ('foo','bar') to a target whose path does not contain
- "foo/bar".
+ """
+ base_incs = get_inclist(Globals.rbdir.append("increments"))
+ if not base_incs: return old_rest_time
+ inctimes = [Time.stringtotime(inc.getinctime()) for inc in base_incs]
+ inctimes.append(mirror_time)
+ older_times = filter(lambda time: time <= old_rest_time, inctimes)
+ if older_times: return max(older_times)
+ else: # restore time older than oldest increment, just return that
+ return min(inctimes)
+
+def get_inclist(inc_rpath):
+ """Returns increments with given base"""
+ dirname, basename = inc_rpath.dirsplit()
+ parent_dir = rpath.RPath(inc_rpath.conn, dirname, ())
+ if not parent_dir.isdir(): return [] # inc directory not created yet
+ index = inc_rpath.index
+
+ if index:
+ get_inc_ext = lambda filename: \
+ rpath.RPath(inc_rpath.conn, inc_rpath.base,
+ inc_rpath.index[:-1] + (filename,))
+ else: get_inc_ext = lambda filename: \
+ rpath.RPath(inc_rpath.conn, os.path.join(dirname, filename))
+
+ inc_list = []
+ for filename in parent_dir.listdir():
+ inc = get_inc_ext(filename)
+ if inc.isincfile() and inc.getincbase_str() == basename:
+ inc_list.append(inc)
+ return inc_list
+
+def check_hardlinks(rest_time):
+ """Check for hard links and enable hard link support if found"""
+ if (Globals.preserve_hardlinks != 0 and
+ Hardlink.retrieve_final(rest_time)):
+ Log("Hard link information found, attempting to preserve "
+ "hard links.", 5)
+ SetConnections.UpdateGlobal('preserve_hardlinks', 1)
+ else: SetConnections.UpdateGlobal('preserve_hardlinks', None)
+
+def restore_recursive(index, mirror, rid, target, time, mirror_time):
+ """Recursive restore function.
+
+ rid is a RestoreIncrementData object whose inclist is already
+ sortedincseq'd, and target is the dsrp to restore to.
+
+ Note that target may have a different index than mirror and
+ rid, because we may be restoring a file whose index is, say
+ ('foo','bar') to a target whose path does not contain
+ "foo/bar".
- """
- assert isinstance(target, DSRPath)
- assert mirror.index == rid.index
+ """
+ assert isinstance(target, destructive_stepping.DSRPath)
+ assert mirror.index == rid.index
- target_finalizer = IterTreeReducer(DestructiveSteppingFinalizer, ())
- for rcd in Restore.yield_rcds(rid.index, mirror, rid,
- target, time, mirror_time):
- rcd.RestoreFile()
- #if rcd.mirror: mirror_finalizer(rcd.index, rcd.mirror)
- target_finalizer(rcd.target.index, rcd.target)
- target_finalizer.Finish()
+ target_finalizer = rorpiter.IterTreeReducer(
+ rorpiter.DestructiveSteppingFinalizer, ())
+ for rcd in yield_rcds(rid.index, mirror, rid, target, time, mirror_time):
+ rcd.RestoreFile()
+ #if rcd.mirror: mirror_finalizer(rcd.index, rcd.mirror)
+ target_finalizer(rcd.target.index, rcd.target)
+ target_finalizer.Finish()
- def yield_rcds(index, mirrorrp, rid, target, rest_time, mirror_time):
- """Iterate RestoreCombinedData objects starting with given args
+def yield_rcds(index, mirrorrp, rid, target, rest_time, mirror_time):
+ """Iterate RestoreCombinedData objects starting with given args
- rid is a RestoreCombinedData object. target is an rpath where
- the created file should go.
+ rid is a RestoreCombinedData object. target is an rpath where
+ the created file should go.
- In this case the "mirror" directory is treated as the source,
- and we are actually copying stuff onto what Select considers
- the source directory.
+ In this case the "mirror" directory is treated as the source,
+ and we are actually copying stuff onto what Select considers
+ the source directory.
- """
- select_result = Globals.select_mirror.Select(target)
- if select_result == 0: return
+ """
+ select_result = Globals.select_mirror.Select(target)
+ if select_result == 0: return
+
+ if mirrorrp and not Globals.select_source.Select(mirrorrp):
+ mirrorrp = None
+ rcd = RestoreCombinedData(rid, mirrorrp, target)
+
+ if mirrorrp and mirrorrp.isdir() or \
+ rid and rid.inc_rpath and rid.inc_rpath.isdir():
+ sub_rcds = yield_sub_rcds(index, mirrorrp, rid,
+ target, rest_time, mirror_time)
+ else: sub_rcds = None
+
+ if select_result == 1:
+ yield rcd
+ if sub_rcds:
+ for sub_rcd in sub_rcds: yield sub_rcd
+ elif select_result == 2:
+ if sub_rcds:
+ try: first = sub_rcds.next()
+ except StopIteration: return # no tuples found inside, skip
+ yield rcd
+ yield first
+ for sub_rcd in sub_rcds: yield sub_rcd
+
+def yield_sub_rcds(index, mirrorrp, rid, target, rest_time, mirror_time):
+ """Yield collated tuples from inside given args"""
+ if not check_dir_exists(mirrorrp, rid): return
+ mirror_iter = yield_mirrorrps(mirrorrp)
+ rid_iter = yield_rids(rid, rest_time, mirror_time)
+
+ for indexed_tup in rorpiter.CollateIterators(mirror_iter, rid_iter):
+ index = indexed_tup.index
+ new_mirrorrp, new_rid = indexed_tup
+ for rcd in yield_rcds(index, new_mirrorrp, new_rid,
+ target.append(index[-1]), rest_time, mirror_time):
+ yield rcd
- if mirrorrp and not Globals.select_source.Select(mirrorrp):
- mirrorrp = None
- rcd = RestoreCombinedData(rid, mirrorrp, target)
+def check_dir_exists(mirrorrp, rid):
+ """Return true if target should be a directory"""
+ if rid and rid.inc_list:
+ # Incs say dir if last (earliest) one is a dir increment
+ return rid.inc_list[-1].getinctype() == "dir"
+ elif mirrorrp: return mirrorrp.isdir() # if no incs, copy mirror
+ else: return None
+
+def yield_mirrorrps(mirrorrp):
+ """Yield mirrorrps underneath given mirrorrp"""
+ if mirrorrp and mirrorrp.isdir():
+ if Globals.quoting_enabled:
+ for rp in selection.get_quoted_dir_children(mirrorrp):
+ yield rp
+ else:
+ dirlist = mirrorrp.listdir()
+ dirlist.sort()
+ for filename in dirlist: yield mirrorrp.append(filename)
- if mirrorrp and mirrorrp.isdir() or \
- rid and rid.inc_rpath and rid.inc_rpath.isdir():
- sub_rcds = Restore.yield_sub_rcds(index, mirrorrp, rid,
- target, rest_time, mirror_time)
- else: sub_rcds = None
+def yield_rids(rid, rest_time, mirror_time):
+ """Yield RestoreIncrementData objects within given rid dir
- if select_result == 1:
- yield rcd
- if sub_rcds:
- for sub_rcd in sub_rcds: yield sub_rcd
- elif select_result == 2:
- if sub_rcds:
- try: first = sub_rcds.next()
- except StopIteration: return # no tuples found inside, skip
- yield rcd
- yield first
- for sub_rcd in sub_rcds: yield sub_rcd
-
- def yield_sub_rcds(index, mirrorrp, rid, target, rest_time, mirror_time):
- """Yield collated tuples from inside given args"""
- if not Restore.check_dir_exists(mirrorrp, rid): return
- mirror_iter = Restore.yield_mirrorrps(mirrorrp)
- rid_iter = Restore.yield_rids(rid, rest_time, mirror_time)
-
- for indexed_tup in RORPIter.CollateIterators(mirror_iter, rid_iter):
- index = indexed_tup.index
- new_mirrorrp, new_rid = indexed_tup
- for rcd in Restore.yield_rcds(index, new_mirrorrp,
- new_rid, target.append(index[-1]), rest_time, mirror_time):
- yield rcd
-
- def check_dir_exists(mirrorrp, rid):
- """Return true if target should be a directory"""
- if rid and rid.inc_list:
- # Incs say dir if last (earliest) one is a dir increment
- return rid.inc_list[-1].getinctype() == "dir"
- elif mirrorrp: return mirrorrp.isdir() # if no incs, copy mirror
- else: return None
-
- def yield_mirrorrps(mirrorrp):
- """Yield mirrorrps underneath given mirrorrp"""
- if mirrorrp and mirrorrp.isdir():
- if Globals.quoting_enabled:
- for rp in FilenameMapping.get_quoted_dir_children(mirrorrp):
- yield rp
- else:
- dirlist = mirrorrp.listdir()
- dirlist.sort()
- for filename in dirlist: yield mirrorrp.append(filename)
-
- def yield_rids(rid, rest_time, mirror_time):
- """Yield RestoreIncrementData objects within given rid dir
-
- If the rid doesn't correspond to a directory, don't yield any
- elements. If there are increments whose corresponding base
- doesn't exist, the first element will be None. All the rpaths
- involved correspond to files in the increment directory.
+ If the rid doesn't correspond to a directory, don't yield any
+ elements. If there are increments whose corresponding base
+ doesn't exist, the first element will be None. All the rpaths
+ involved correspond to files in the increment directory.
- """
- if not rid or not rid.inc_rpath or not rid.inc_rpath.isdir(): return
- rid_dict = {} # dictionary of basenames:rids
- dirlist = rid.inc_rpath.listdir()
- if Globals.quoting_enabled:
- dirlist = [FilenameMapping.unquote(fn) for fn in dirlist]
-
- def affirm_dict_indexed(basename):
- """Make sure the rid dictionary has given basename as key"""
- if not rid_dict.has_key(basename):
- rid_dict[basename] = RestoreIncrementData(
- rid.index + (basename,), None, []) # init with empty rid
-
- def add_to_dict(filename):
- """Add filename to the inc tuple dictionary"""
- rp = rid.inc_rpath.append(filename)
- if Globals.quoting_enabled: rp.quote_path()
- if rp.isincfile() and rp.getinctype() != 'data':
- basename = rp.getincbase_str()
- affirm_dict_indexed(basename)
- rid_dict[basename].inc_list.append(rp)
- elif rp.isdir():
- affirm_dict_indexed(filename)
- rid_dict[filename].inc_rpath = rp
-
- for filename in dirlist: add_to_dict(filename)
- keys = rid_dict.keys()
- keys.sort()
-
- # sortincseq now to avoid descending .missing directories later
- for key in keys:
- rid = rid_dict[key]
- if rid.inc_rpath or rid.inc_list:
- rid.sortincseq(rest_time, mirror_time)
- yield rid
-
-MakeStatic(Restore)
+ """
+ if not rid or not rid.inc_rpath or not rid.inc_rpath.isdir(): return
+ rid_dict = {} # dictionary of basenames:rids
+ dirlist = rid.inc_rpath.listdir()
+ if Globals.quoting_enabled:
+ dirlist = [FilenameMapping.unquote(fn) for fn in dirlist]
+
+ def affirm_dict_indexed(basename):
+ """Make sure the rid dictionary has given basename as key"""
+ if not rid_dict.has_key(basename):
+ rid_dict[basename] = RestoreIncrementData(
+ rid.index + (basename,), None, []) # init with empty rid
+
+ def add_to_dict(filename):
+ """Add filename to the inc tuple dictionary"""
+ rp = rid.inc_rpath.append(filename)
+ if Globals.quoting_enabled: rp.quote_path()
+ if rp.isincfile() and rp.getinctype() != 'data':
+ basename = rp.getincbase_str()
+ affirm_dict_indexed(basename)
+ rid_dict[basename].inc_list.append(rp)
+ elif rp.isdir():
+ affirm_dict_indexed(filename)
+ rid_dict[filename].inc_rpath = rp
+
+ for filename in dirlist: add_to_dict(filename)
+ keys = rid_dict.keys()
+ keys.sort()
+
+ # sortincseq now to avoid descending .missing directories later
+ for key in keys:
+ rid = rid_dict[key]
+ if rid.inc_rpath or rid.inc_list:
+ rid.sortincseq(rest_time, mirror_time)
+ yield rid
class RestoreIncrementData:
@@ -339,7 +338,7 @@ class RestoreCombinedData:
if not self.inc_list or self.inc_list[0].getinctype() == "diff":
assert self.mirror and self.mirror.lstat(), \
"No base to go with incs for %s" % self.target.path
- RPath.copy_with_attribs(self.mirror, self.target)
+ rpath.copy_with_attribs(self.mirror, self.target)
for inc in self.inc_list: self.applyinc(inc, self.target)
def log(self):
@@ -353,7 +352,7 @@ class RestoreCombinedData:
"""Hard link target and return true if hard linking appropriate"""
if (Globals.preserve_hardlinks and
Hardlink.restore_link(self.index, self.target)):
- RPath.copy_attribs(self.inc_list and self.inc_list[-1] or
+ rpath.copy_attribs(self.inc_list and self.inc_list[-1] or
self.mirror, self.target)
return 1
return None
@@ -377,13 +376,8 @@ class RestoreCombinedData:
elif inctype == "snapshot":
if inc.isinccompressed():
target.write_from_fileobj(inc.open("rb", compress = 1))
- else: RPath.copy(inc, target)
+ else: rpath.copy(inc, target)
else: raise RestoreError("Unknown inctype %s" % inctype)
- RPath.copy_attribs(inc, target)
+ rpath.copy_attribs(inc, target)
-from log import *
-from destructive_stepping import *
-from rpath import *
-from rorpiter import *
-import Globals, Time, Rdiff, Hardlink, FilenameMapping, SetConnections
diff --git a/rdiff-backup/rdiff_backup/robust.py b/rdiff-backup/rdiff_backup/robust.py
index be7f1e8..67f32be 100644
--- a/rdiff-backup/rdiff_backup/robust.py
+++ b/rdiff-backup/rdiff_backup/robust.py
@@ -46,13 +46,16 @@ able to narrow down the possibilities.
"""
-import tempfile, errno, signal, cPickle, C
-from static import *
+import os, time
+from log import Log
+import Time, librsync, errno, signal, cPickle, C, \
+ Hardlink, TempFile, static, rpath, Globals
-class RobustAction:
+
+class Action:
"""Represents a file operation to be accomplished later"""
def __init__(self, init_thunk, final_func, error_handler):
- """RobustAction initializer
+ """Action initializer
All the thunks are functions whose return value will be
ignored. init_thunk should not make any irreversible changes
@@ -96,217 +99,212 @@ class RobustAction:
def default_error_handler(self, exc, ran_init, init_val): pass
-class Robust:
- """Contains various methods designed to make things safer"""
- null_action = RobustAction(None, None, None)
- def chain(*robust_action_list):
- """Return chain tying together a number of robust actions
-
- The whole chain will be aborted if some error occurs in
- initialization stage of any of the component actions.
-
- """
- ras_with_started_inits, init_return_vals = [], []
- def init():
- for ra in robust_action_list:
- ras_with_started_inits.append(ra)
- init_return_vals.append(ra.init_thunk())
- return init_return_vals
- def final(init_return_vals):
- final_vals = []
- for ra, init_val in zip(robust_action_list, init_return_vals):
- final_vals.append(ra.final_func(init_val))
- return final_vals
- def error(exc, ran_init, init_val):
- for ra, init_val in zip(ras_with_started_inits, init_return_vals):
- ra.error_handler(exc, 1, init_val)
- for ra in ras_with_started_inits[len(init_return_vals):]:
- ra.error_handler(exc, None, None)
- return RobustAction(init, final, error)
-
- def chain_nested(*robust_action_list):
- """Like chain but final actions performed in reverse order"""
- ras_with_started_inits, init_vals = [], []
- def init():
- for ra in robust_action_list:
- ras_with_started_inits.append(ra)
- init_vals.append(ra.init_thunk())
- return init_vals
- def final(init_vals):
- ras_and_inits = zip(robust_action_list, init_vals)
- ras_and_inits.reverse()
- final_vals = []
- for ra, init_val in ras_and_inits:
- final_vals.append(ra.final_func(init_val))
- return final_vals
- def error(exc, ran_init, init_val):
- for ra, init_val in zip(ras_with_started_inits, init_vals):
- ra.error_handler(exc, 1, init_val)
- for ra in ras_with_started_inits[len(init_vals):]:
- ra.error_handler(exc, None, None)
- return RobustAction(init, final, error)
-
- def make_tf_robustaction(init_thunk, tempfiles, final_renames = None):
- """Shortcut RobustAction creator when only tempfiles involved
-
- Often the robust action will just consist of some initial
- stage, renaming tempfiles in the final stage, and deleting
- them if there is an error. This function makes it easier to
- create RobustActions of that type.
-
- """
- if isinstance(tempfiles, TempFile): tempfiles = (tempfiles,)
- if isinstance(final_renames, RPath): final_renames = (final_renames,)
- if final_renames is None: final_renames = [None] * len(tempfiles)
- assert len(tempfiles) == len(final_renames)
-
- def final(init_val): # rename tempfiles to final positions
- for tempfile, destination in zip(tempfiles, final_renames):
- if destination:
- if destination.isdir(): # Cannot rename over directory
- destination.delete()
- tempfile.rename(destination)
- return init_val
- def error(exc, ran_init, init_val):
- for tf in tempfiles: tf.delete()
- return RobustAction(init_thunk, final, error)
-
- def copy_action(rorpin, rpout):
- """Return robust action copying rorpin to rpout
-
- The source can be a rorp or an rpath. Does not recurse. If
- directories copied, then just exit (output directory not
- overwritten).
-
- """
- tfl = [None] # Need some mutable state to hold tf value
- def init():
- if not (rorpin.isdir() and rpout.isdir()): # already a dir
- tfl[0] = tf = TempFileManager.new(rpout)
- if rorpin.isreg(): tf.write_from_fileobj(rorpin.open("rb"))
- else: RPath.copy(rorpin, tf)
- return tf
- else: return None
- def final(tf):
- if tf and tf.lstat():
- if rpout.isdir(): rpout.delete()
- tf.rename(rpout)
- return rpout
- def error(exc, ran_init, init_val):
- if tfl[0]: tfl[0].delete()
- return RobustAction(init, final, error)
-
- def copy_with_attribs_action(rorpin, rpout, compress = None):
- """Like copy_action but also copy attributes"""
- tfl = [None] # Need some mutable state for error handler
- def init():
- if not (rorpin.isdir() and rpout.isdir()): # already a dir
- tfl[0] = tf = TempFileManager.new(rpout)
- if rorpin.isreg():
- tf.write_from_fileobj(rorpin.open("rb"), compress)
- else: RPath.copy(rorpin, tf)
- if tf.lstat(): # Some files, like sockets, won't be created
- RPathStatic.copy_attribs(rorpin, tf)
- return tf
- else: return None
- def final(tf):
- if rorpin.isdir() and rpout.isdir():
- RPath.copy_attribs(rorpin, rpout)
- elif tf and tf.lstat():
- if rpout.isdir(): rpout.delete() # can't rename over dir
- tf.rename(rpout)
- return rpout
- def error(exc, ran_init, init_val):
- if tfl[0]: tfl[0].delete()
- return RobustAction(init, final, error)
-
- def copy_attribs_action(rorpin, rpout):
- """Return action which just copies attributes
-
- Copying attributes is already pretty atomic, so just run
- normal sequence.
-
- """
- def final(init_val):
- RPath.copy_attribs(rorpin, rpout)
- return rpout
- return RobustAction(None, final, None)
-
- def symlink_action(rpath, linktext):
- """Return symlink action by moving one file over another"""
- tf = TempFileManager.new(rpath)
- def init(): tf.symlink(linktext)
- return Robust.make_tf_robustaction(init, tf, rpath)
-
- def destructive_write_action(rp, s):
- """Return action writing string s to rpath rp in robust way
-
- This will overwrite any data currently in rp.
-
- """
- tf = TempFileManager.new(rp)
- def init():
- fp = tf.open("wb")
- fp.write(s)
- fp.close()
- tf.setdata()
- return Robust.make_tf_robustaction(init, tf, rp)
+null_action = Action(None, None, None)
+def chain(*robust_action_list):
+ """Return chain tying together a number of robust actions
+
+ The whole chain will be aborted if some error occurs in
+ initialization stage of any of the component actions.
+
+ """
+ ras_with_started_inits, init_return_vals = [], []
+ def init():
+ for ra in robust_action_list:
+ ras_with_started_inits.append(ra)
+ init_return_vals.append(ra.init_thunk())
+ return init_return_vals
+ def final(init_return_vals):
+ final_vals = []
+ for ra, init_val in zip(robust_action_list, init_return_vals):
+ final_vals.append(ra.final_func(init_val))
+ return final_vals
+ def error(exc, ran_init, init_val):
+ for ra, init_val in zip(ras_with_started_inits, init_return_vals):
+ ra.error_handler(exc, 1, init_val)
+ for ra in ras_with_started_inits[len(init_return_vals):]:
+ ra.error_handler(exc, None, None)
+ return Action(init, final, error)
+
+def chain_nested(*robust_action_list):
+ """Like chain but final actions performed in reverse order"""
+ ras_with_started_inits, init_vals = [], []
+ def init():
+ for ra in robust_action_list:
+ ras_with_started_inits.append(ra)
+ init_vals.append(ra.init_thunk())
+ return init_vals
+ def final(init_vals):
+ ras_and_inits = zip(robust_action_list, init_vals)
+ ras_and_inits.reverse()
+ final_vals = []
+ for ra, init_val in ras_and_inits:
+ final_vals.append(ra.final_func(init_val))
+ return final_vals
+ def error(exc, ran_init, init_val):
+ for ra, init_val in zip(ras_with_started_inits, init_vals):
+ ra.error_handler(exc, 1, init_val)
+ for ra in ras_with_started_inits[len(init_vals):]:
+ ra.error_handler(exc, None, None)
+ return Action(init, final, error)
+
+def make_tf_robustaction(init_thunk, tempfiles, final_renames = None):
+ """Shortcut Action creator when only tempfiles involved
+
+ Often the robust action will just consist of some initial
+ stage, renaming tempfiles in the final stage, and deleting
+ them if there is an error. This function makes it easier to
+ create Actions of that type.
+
+ """
+ if isinstance(tempfiles, TempFile.TempFile): tempfiles = (tempfiles,)
+ if isinstance(final_renames, rpath.RPath): final_renames = (final_renames,)
+ if final_renames is None: final_renames = [None] * len(tempfiles)
+ assert len(tempfiles) == len(final_renames)
+
+ def final(init_val): # rename tempfiles to final positions
+ for tempfile, destination in zip(tempfiles, final_renames):
+ if destination:
+ if destination.isdir(): # Cannot rename over directory
+ destination.delete()
+ tempfile.rename(destination)
+ return init_val
+ def error(exc, ran_init, init_val):
+ for tf in tempfiles: tf.delete()
+ return Action(init_thunk, final, error)
+
+def copy_action(rorpin, rpout):
+ """Return robust action copying rorpin to rpout
+
+ The source can be a rorp or an rpath. Does not recurse. If
+ directories copied, then just exit (output directory not
+ overwritten).
+
+ """
+ tfl = [None] # Need some mutable state to hold tf value
+ def init():
+ if not (rorpin.isdir() and rpout.isdir()): # already a dir
+ tfl[0] = tf = TempFile.new(rpout)
+ if rorpin.isreg(): tf.write_from_fileobj(rorpin.open("rb"))
+ else: rpath.copy(rorpin, tf)
+ return tf
+ else: return None
+ def final(tf):
+ if tf and tf.lstat():
+ if rpout.isdir(): rpout.delete()
+ tf.rename(rpout)
+ return rpout
+ def error(exc, ran_init, init_val):
+ if tfl[0]: tfl[0].delete()
+ return Action(init, final, error)
+
+def copy_with_attribs_action(rorpin, rpout, compress = None):
+ """Like copy_action but also copy attributes"""
+ tfl = [None] # Need some mutable state for error handler
+ def init():
+ if not (rorpin.isdir() and rpout.isdir()): # already a dir
+ tfl[0] = tf = TempFile.new(rpout)
+ if rorpin.isreg():
+ tf.write_from_fileobj(rorpin.open("rb"), compress)
+ else: rpath.copy(rorpin, tf)
+ if tf.lstat(): # Some files, like sockets, won't be created
+ rpath.copy_attribs(rorpin, tf)
+ return tf
+ else: return None
+ def final(tf):
+ if rorpin.isdir() and rpout.isdir():
+ rpath.copy_attribs(rorpin, rpout)
+ elif tf and tf.lstat():
+ if rpout.isdir(): rpout.delete() # can't rename over dir
+ tf.rename(rpout)
+ return rpout
+ def error(exc, ran_init, init_val):
+ if tfl[0]: tfl[0].delete()
+ return Action(init, final, error)
+
+def copy_attribs_action(rorpin, rpout):
+ """Return action which just copies attributes
+
+ Copying attributes is already pretty atomic, so just run
+ normal sequence.
+
+ """
+ def final(init_val):
+ rpath.copy_attribs(rorpin, rpout)
+ return rpout
+ return Action(None, final, None)
+
+def symlink_action(rpath, linktext):
+ """Return symlink action by moving one file over another"""
+ tf = TempFile.new(rpath)
+ def init(): tf.symlink(linktext)
+ return make_tf_robustaction(init, tf, rpath)
+
+def destructive_write_action(rp, s):
+ """Return action writing string s to rpath rp in robust way
+
+ This will overwrite any data currently in rp.
+
+ """
+ tf = TempFile.new(rp)
+ def init():
+ fp = tf.open("wb")
+ fp.write(s)
+ fp.close()
+ tf.setdata()
+ return make_tf_robustaction(init, tf, rp)
- def check_common_error(error_handler, function, args = []):
- """Apply function to args, if error, run error_handler on exception
+def check_common_error(error_handler, function, args = []):
+ """Apply function to args, if error, run error_handler on exception
- This uses the catch_error predicate below to only catch
- certain exceptions which seems innocent enough.
+ This uses the catch_error predicate below to only catch
+ certain exceptions which seems innocent enough.
- """
- try: return function(*args)
- except Exception, exc:
- TracebackArchive.add([function] + list(args))
- if Robust.catch_error(exc):
- Log.exception()
- conn = Globals.backup_writer
- if conn is not None: # increment error count
- ITRB_exists = conn.Globals.is_not_None('ITRB')
- if ITRB_exists: conn.Globals.ITRB.increment_stat('Errors')
- if error_handler: return error_handler(exc, *args)
- else: return
- Log.exception(1, 2)
- raise
-
- def catch_error(exc):
- """Return true if exception exc should be caught"""
- for exception_class in (SkipFileException, DSRPPermError,
- RPathException, Rdiff.RdiffException,
- librsync.librsyncError,
- C.UnknownFileTypeError):
- if isinstance(exc, exception_class): return 1
- if (isinstance(exc, EnvironmentError) and
- errno.errorcode[exc[0]] in ('EPERM', 'ENOENT', 'EACCES', 'EBUSY',
- 'EEXIST', 'ENOTDIR', 'ENAMETOOLONG',
- 'EINTR', 'ENOTEMPTY', 'EIO', 'ETXTBSY',
- 'ESRCH', 'EINVAL')):
- return 1
- return 0
-
- def listrp(rp):
- """Like rp.listdir() but return [] if error, and sort results"""
- def error_handler(exc):
- Log("Error listing directory %s" % rp.path, 2)
- return []
- dir_listing = Robust.check_common_error(error_handler, rp.listdir)
- dir_listing.sort()
- return dir_listing
-
- def signal_handler(signum, frame):
- """This is called when signal signum is caught"""
- raise SignalException(signum)
-
- def install_signal_handlers():
- """Install signal handlers on current connection"""
- for signum in [signal.SIGQUIT, signal.SIGHUP, signal.SIGTERM]:
- signal.signal(signum, Robust.signal_handler)
-
-MakeStatic(Robust)
+ """
+ try: return function(*args)
+ except Exception, exc:
+ TracebackArchive.add([function] + list(args))
+ if catch_error(exc):
+ Log.exception()
+ conn = Globals.backup_writer
+ if conn is not None: # increment error count
+ ITRB_exists = conn.Globals.is_not_None('ITRB')
+ if ITRB_exists: conn.Globals.ITRB.increment_stat('Errors')
+ if error_handler: return error_handler(exc, *args)
+ else: return
+ Log.exception(1, 2)
+ raise
+
+def catch_error(exc):
+ """Return true if exception exc should be caught"""
+
+ for exception_class in (rpath.SkipFileException, rpath.RPathException,
+ librsync.librsyncError, C.UnknownFileTypeError):
+ if isinstance(exc, exception_class): return 1
+ if (isinstance(exc, EnvironmentError) and
+ errno.errorcode[exc[0]] in ('EPERM', 'ENOENT', 'EACCES', 'EBUSY',
+ 'EEXIST', 'ENOTDIR', 'ENAMETOOLONG',
+ 'EINTR', 'ENOTEMPTY', 'EIO', 'ETXTBSY',
+ 'ESRCH', 'EINVAL')):
+ return 1
+ return 0
+
+def listrp(rp):
+ """Like rp.listdir() but return [] if error, and sort results"""
+ def error_handler(exc):
+ Log("Error listing directory %s" % rp.path, 2)
+ return []
+ dir_listing = check_common_error(error_handler, rp.listdir)
+ dir_listing.sort()
+ return dir_listing
+
+def signal_handler(signum, frame):
+ """This is called when signal signum is caught"""
+ raise SignalException(signum)
+
+def install_signal_handlers():
+ """Install signal handlers on current connection"""
+ for signum in [signal.SIGQUIT, signal.SIGHUP, signal.SIGTERM]:
+ signal.signal(signum, signal_handler)
class SignalException(Exception):
@@ -335,91 +333,7 @@ class TracebackArchive:
"-------------------------------------------" %
("\n".join(cls._traceback_strings),), 3)
-MakeClass(TracebackArchive)
-
-
-class TempFileManager:
- """Manage temp files"""
-
- # This is a connection-specific list of temp files, to be cleaned
- # up before rdiff-backup exits.
- _tempfiles = []
-
- # To make collisions less likely, this gets put in the file name
- # and incremented whenever a new file is requested.
- _tfindex = 0
-
- def new(cls, rp_base, same_dir = 1):
- """Return new tempfile that isn't in use.
-
- If same_dir, tempfile will be in same directory as rp_base.
- Otherwise, use tempfile module to get filename.
-
- """
- conn = rp_base.conn
- if conn is not Globals.local_connection:
- return conn.TempFileManager.new(rp_base, same_dir)
-
- def find_unused(conn, dir):
- """Find an unused tempfile with connection conn in directory dir"""
- while 1:
- if cls._tfindex > 100000000:
- Log("Resetting index", 2)
- cls._tfindex = 0
- tf = TempFile(conn, os.path.join(dir,
- "rdiff-backup.tmp.%d" % cls._tfindex))
- cls._tfindex = cls._tfindex+1
- if not tf.lstat(): return tf
-
- if same_dir: tf = find_unused(conn, rp_base.dirsplit()[0])
- else: tf = TempFile(conn, tempfile.mktemp())
- cls._tempfiles.append(tf)
- return tf
-
- def remove_listing(cls, tempfile):
- """Remove listing of tempfile"""
- if Globals.local_connection is not tempfile.conn:
- tempfile.conn.TempFileManager.remove_listing(tempfile)
- elif tempfile in cls._tempfiles: cls._tempfiles.remove(tempfile)
-
- def delete_all(cls):
- """Delete all remaining tempfiles"""
- for tf in cls._tempfiles[:]: tf.delete()
-
-MakeClass(TempFileManager)
-
-
-from rpath import *
-
-class TempFile(RPath):
- """Like an RPath, but keep track of which ones are still here"""
- def rename(self, rp_dest):
- """Rename temp file to permanent location, possibly overwriting"""
- if self.isdir() and not rp_dest.isdir():
- # Cannot move a directory directly over another file
- rp_dest.delete()
- if (isinstance(rp_dest, DSRPath) and rp_dest.delay_perms
- and not self.hasfullperms()):
- # If we are moving to a delayed perm directory, delay
- # permission change on destination.
- rp_dest.chmod(self.getperms())
- self.chmod(0700)
- RPathStatic.rename(self, rp_dest)
-
- # Sometimes this just seems to fail silently, as in one
- # hardlinked twin is moved over the other. So check to make
- # sure below.
- self.setdata()
- if self.lstat():
- rp_dest.delete()
- RPathStatic.rename(self, rp_dest)
- self.setdata()
- if self.lstat(): raise OSError("Cannot rename tmp file correctly")
- TempFileManager.remove_listing(self)
-
- def delete(self):
- RPath.delete(self)
- TempFileManager.remove_listing(self)
+static.MakeClass(TracebackArchive)
class SaveState:
@@ -470,9 +384,8 @@ class SaveState:
if last_file_rorp:
symtext = apply(os.path.join,
('increments',) + last_file_rorp.index)
- return Robust.symlink_action(cls._last_file_sym, symtext)
- else: return RobustAction(None, lambda init_val: cls.touch_last_file(),
- None)
+ return symlink_action(cls._last_file_sym, symtext)
+ else: return Action(None, lambda init_val: cls.touch_last_file(), None)
def checkpoint(cls, ITR, finalizer, last_file_rorp, override = None):
"""Save states of tree reducer and finalizer during inc backup
@@ -486,9 +399,8 @@ class SaveState:
cls._last_checkpoint_time = time.time()
Log("Writing checkpoint time %s" % cls._last_checkpoint_time, 7)
state_string = cPickle.dumps((ITR, finalizer))
- Robust.chain(Robust.destructive_write_action(cls._checkpoint_rp,
- state_string),
- cls.record_last_file_action(last_file_rorp)).execute()
+ chain(destructive_write_action(cls._checkpoint_rp, state_string),
+ cls.record_last_file_action(last_file_rorp)).execute()
def checkpoint_needed(cls):
"""Returns true if another checkpoint is called for"""
@@ -500,7 +412,7 @@ class SaveState:
for rp in Resume.get_relevant_rps(): rp.delete()
if Globals.preserve_hardlinks: Hardlink.remove_all_checkpoints()
-MakeClass(SaveState)
+static.MakeClass(SaveState)
class ResumeException(Exception):
@@ -527,8 +439,8 @@ class Resume:
for si in cls.get_sis_covering_index(index):
if si.time > later_than: return si.time
- raise SkipFileException("Index %s already covered, skipping" %
- str(index))
+ raise rpath.SkipFileException("Index %s already covered, skipping" %
+ str(index))
def get_sis_covering_index(cls, index):
"""Return sorted list of SessionInfos which may cover index
@@ -667,7 +579,7 @@ class Resume:
return None
assert None
-MakeClass(Resume)
+static.MakeClass(Resume)
class ResumeSessionInfo:
@@ -691,8 +603,3 @@ class ResumeSessionInfo:
self.ITR, self.finalizer, = ITR, finalizer
-from log import *
-from destructive_stepping import *
-import Time, Rdiff, librsync
-from highlevel import *
-
diff --git a/rdiff-backup/rdiff_backup/rorpiter.py b/rdiff-backup/rdiff_backup/rorpiter.py
index 2e9bd06..875ab1e 100644
--- a/rdiff-backup/rdiff_backup/rorpiter.py
+++ b/rdiff-backup/rdiff_backup/rorpiter.py
@@ -17,248 +17,240 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
-"""Operations on Iterators of Read Only Remote Paths"""
+"""Operations on Iterators of Read Only Remote Paths
+
+The main structure will be an iterator that yields RORPaths.
+Every RORPath has a "raw" form that makes it more amenable to
+being turned into a file. The raw form of the iterator yields
+each RORPath in the form of the tuple (index, data_dictionary,
+files), where files is the number of files attached (usually 1 or
+0). After that, if a file is attached, it yields that file.
+
+"""
from __future__ import generators
-import tempfile, UserList, types, librsync
-from static import *
-from log import *
-from rpath import *
-from robust import *
-from iterfile import *
-import Globals, Rdiff, Hardlink
+import tempfile, UserList, types, librsync, Globals, Rdiff, \
+ Hardlink, robust, log, static, rpath, iterfile, TempFile
+
class RORPIterException(Exception): pass
-class RORPIter:
- """Functions relating to iterators of Read Only RPaths
+def ToRaw(rorp_iter):
+ """Convert a rorp iterator to raw form"""
+ for rorp in rorp_iter:
+ if rorp.file:
+ yield (rorp.index, rorp.data, 1)
+ yield rorp.file
+ else: yield (rorp.index, rorp.data, 0)
+
+def FromRaw(raw_iter):
+ """Convert raw rorp iter back to standard form"""
+ for index, data, num_files in raw_iter:
+ rorp = rpath.RORPath(index, data)
+ if num_files:
+ assert num_files == 1, "Only one file accepted right now"
+ rorp.setfile(getnext(raw_iter))
+ yield rorp
+
+def ToFile(rorp_iter):
+ """Return file version of iterator"""
+ return iterfile.FileWrappingIter(ToRaw(rorp_iter))
+
+def FromFile(fileobj):
+ """Recover rorp iterator from file interface"""
+ return FromRaw(iterfile.IterWrappingFile(fileobj))
+
+def IterateRPaths(base_rp):
+ """Return an iterator yielding RPaths with given base rp"""
+ yield base_rp
+ if base_rp.isdir():
+ dirlisting = base_rp.listdir()
+ dirlisting.sort()
+ for filename in dirlisting:
+ for rp in IterateRPaths(base_rp.append(filename)):
+ yield rp
+
+def Signatures(rp_iter):
+ """Yield signatures of rpaths in given rp_iter"""
+ def error_handler(exc, rp):
+ log.Log("Error generating signature for %s" % rp.path)
+ return None
+
+ for rp in rp_iter:
+ if rp.isplaceholder(): yield rp
+ else:
+ rorp = rp.getRORPath()
+ if rp.isreg():
+ if rp.isflaglinked(): rorp.flaglinked()
+ else:
+ fp = robust.check_common_error(
+ error_handler, Rdiff.get_signature, (rp,))
+ if fp: rorp.setfile(fp)
+ else: continue
+ yield rorp
+
+def GetSignatureIter(base_rp):
+ """Return a signature iterator recurring over the base_rp"""
+ return Signatures(IterateRPaths(base_rp))
+
+def CollateIterators(*rorp_iters):
+ """Collate RORPath iterators by index
- The main structure will be an iterator that yields RORPaths.
- Every RORPath has a "raw" form that makes it more amenable to
- being turned into a file. The raw form of the iterator yields
- each RORPath in the form of the tuple (index, data_dictionary,
- files), where files is the number of files attached (usually 1 or
- 0). After that, if a file is attached, it yields that file.
+ So it takes two or more iterators of rorps and returns an
+ iterator yielding tuples like (rorp1, rorp2) with the same
+ index. If one or the other lacks that index, it will be None
"""
- def ToRaw(rorp_iter):
- """Convert a rorp iterator to raw form"""
- for rorp in rorp_iter:
- if rorp.file:
- yield (rorp.index, rorp.data, 1)
- yield rorp.file
- else: yield (rorp.index, rorp.data, 0)
-
- def FromRaw(raw_iter):
- """Convert raw rorp iter back to standard form"""
- for index, data, num_files in raw_iter:
- rorp = RORPath(index, data)
- if num_files:
- assert num_files == 1, "Only one file accepted right now"
- rorp.setfile(RORPIter.getnext(raw_iter))
- yield rorp
+ # overflow[i] means that iter[i] has been exhausted
+ # rorps[i] is None means that it is time to replenish it.
+ iter_num = len(rorp_iters)
+ if iter_num == 2:
+ return Collate2Iters(rorp_iters[0], rorp_iters[1])
+ overflow = [None] * iter_num
+ rorps = overflow[:]
+
+ def setrorps(overflow, rorps):
+ """Set the overflow and rorps list"""
+ for i in range(iter_num):
+ if not overflow[i] and rorps[i] is None:
+ try: rorps[i] = rorp_iters[i].next()
+ except StopIteration:
+ overflow[i] = 1
+ rorps[i] = None
- def ToFile(rorp_iter):
- """Return file version of iterator"""
- return FileWrappingIter(RORPIter.ToRaw(rorp_iter))
-
- def FromFile(fileobj):
- """Recover rorp iterator from file interface"""
- return RORPIter.FromRaw(IterWrappingFile(fileobj))
-
- def IterateRPaths(base_rp):
- """Return an iterator yielding RPaths with given base rp"""
- yield base_rp
- if base_rp.isdir():
- dirlisting = base_rp.listdir()
- dirlisting.sort()
- for filename in dirlisting:
- for rp in RORPIter.IterateRPaths(base_rp.append(filename)):
- yield rp
-
- def Signatures(rp_iter):
- """Yield signatures of rpaths in given rp_iter"""
- def error_handler(exc, rp):
- Log("Error generating signature for %s" % rp.path)
- return None
-
- for rp in rp_iter:
- if rp.isplaceholder(): yield rp
- else:
- rorp = rp.getRORPath()
- if rp.isreg():
- if rp.isflaglinked(): rorp.flaglinked()
- else:
- fp = Robust.check_common_error(
- error_handler, Rdiff.get_signature, (rp,))
- if fp: rorp.setfile(fp)
- else: continue
- yield rorp
-
- def GetSignatureIter(base_rp):
- """Return a signature iterator recurring over the base_rp"""
- return RORPIter.Signatures(RORPIter.IterateRPaths(base_rp))
-
- def CollateIterators(*rorp_iters):
- """Collate RORPath iterators by index
-
- So it takes two or more iterators of rorps and returns an
- iterator yielding tuples like (rorp1, rorp2) with the same
- index. If one or the other lacks that index, it will be None
+ def getleastindex(rorps):
+ """Return the first index in rorps, assuming rorps isn't empty"""
+ return min(map(lambda rorp: rorp.index,
+ filter(lambda x: x, rorps)))
- """
- # overflow[i] means that iter[i] has been exhausted
- # rorps[i] is None means that it is time to replenish it.
- iter_num = len(rorp_iters)
- if iter_num == 2:
- return RORPIter.Collate2Iters(rorp_iters[0], rorp_iters[1])
- overflow = [None] * iter_num
- rorps = overflow[:]
-
- def setrorps(overflow, rorps):
- """Set the overflow and rorps list"""
- for i in range(iter_num):
- if not overflow[i] and rorps[i] is None:
- try: rorps[i] = rorp_iters[i].next()
- except StopIteration:
- overflow[i] = 1
- rorps[i] = None
-
- def getleastindex(rorps):
- """Return the first index in rorps, assuming rorps isn't empty"""
- return min(map(lambda rorp: rorp.index,
- filter(lambda x: x, rorps)))
-
- def yield_tuples(iter_num, overflow, rorps):
- while 1:
- setrorps(overflow, rorps)
- if not None in overflow: break
-
- index = getleastindex(rorps)
- yieldval = []
- for i in range(iter_num):
- if rorps[i] and rorps[i].index == index:
- yieldval.append(rorps[i])
- rorps[i] = None
- else: yieldval.append(None)
- yield IndexedTuple(index, yieldval)
- return yield_tuples(iter_num, overflow, rorps)
-
- def Collate2Iters(riter1, riter2):
- """Special case of CollateIterators with 2 arguments
-
- This does the same thing but is faster because it doesn't have
- to consider the >2 iterator case. Profiler says speed is
- important here.
-
- """
- relem1, relem2 = None, None
+ def yield_tuples(iter_num, overflow, rorps):
while 1:
- if not relem1:
- try: relem1 = riter1.next()
- except StopIteration:
- if relem2: yield IndexedTuple(index2, (None, relem2))
- for relem2 in riter2:
- yield IndexedTuple(relem2.index, (None, relem2))
- break
- index1 = relem1.index
- if not relem2:
- try: relem2 = riter2.next()
- except StopIteration:
- if relem1: yield IndexedTuple(index1, (relem1, None))
- for relem1 in riter1:
- yield IndexedTuple(relem1.index, (relem1, None))
- break
- index2 = relem2.index
-
- if index1 < index2:
- yield IndexedTuple(index1, (relem1, None))
- relem1 = None
- elif index1 == index2:
- yield IndexedTuple(index1, (relem1, relem2))
- relem1, relem2 = None, None
- else: # index2 is less
- yield IndexedTuple(index2, (None, relem2))
- relem2 = None
-
- def getnext(iter):
- """Return the next element of an iterator, raising error if none"""
- try: next = iter.next()
- except StopIteration: raise RORPIterException("Unexpected end to iter")
- return next
-
- def GetDiffIter(sig_iter, new_iter):
- """Return delta iterator from sig_iter to new_iter
-
- The accompanying file for each will be a delta as produced by
- rdiff, unless the destination file does not exist, in which
- case it will be the file in its entirety.
-
- sig_iter may be composed of rorps, but new_iter should have
- full RPaths.
+ setrorps(overflow, rorps)
+ if not None in overflow: break
- """
- collated_iter = RORPIter.CollateIterators(sig_iter, new_iter)
- for rorp, rp in collated_iter: yield RORPIter.diffonce(rorp, rp)
-
- def diffonce(sig_rorp, new_rp):
- """Return one diff rorp, based from signature rorp and orig rp"""
- if sig_rorp and Globals.preserve_hardlinks and sig_rorp.isflaglinked():
- if new_rp: diff_rorp = new_rp.getRORPath()
- else: diff_rorp = RORPath(sig_rorp.index)
- diff_rorp.flaglinked()
- return diff_rorp
- elif sig_rorp and sig_rorp.isreg() and new_rp and new_rp.isreg():
- diff_rorp = new_rp.getRORPath()
- #fp = sig_rorp.open("rb")
- #print "---------------------", fp
- #tmp_sig_rp = RPath(Globals.local_connection, "/tmp/sig")
- #tmp_sig_rp.delete()
- #tmp_sig_rp.write_from_fileobj(fp)
- #diff_rorp.setfile(Rdiff.get_delta_sigfileobj(tmp_sig_rp.open("rb"),
- # new_rp))
- diff_rorp.setfile(Rdiff.get_delta_sigfileobj(sig_rorp.open("rb"),
- new_rp))
- diff_rorp.set_attached_filetype('diff')
+ index = getleastindex(rorps)
+ yieldval = []
+ for i in range(iter_num):
+ if rorps[i] and rorps[i].index == index:
+ yieldval.append(rorps[i])
+ rorps[i] = None
+ else: yieldval.append(None)
+ yield IndexedTuple(index, yieldval)
+ return yield_tuples(iter_num, overflow, rorps)
+
+def Collate2Iters(riter1, riter2):
+ """Special case of CollateIterators with 2 arguments
+
+ This does the same thing but is faster because it doesn't have
+ to consider the >2 iterator case. Profiler says speed is
+ important here.
+
+ """
+ relem1, relem2 = None, None
+ while 1:
+ if not relem1:
+ try: relem1 = riter1.next()
+ except StopIteration:
+ if relem2: yield IndexedTuple(index2, (None, relem2))
+ for relem2 in riter2:
+ yield IndexedTuple(relem2.index, (None, relem2))
+ break
+ index1 = relem1.index
+ if not relem2:
+ try: relem2 = riter2.next()
+ except StopIteration:
+ if relem1: yield IndexedTuple(index1, (relem1, None))
+ for relem1 in riter1:
+ yield IndexedTuple(relem1.index, (relem1, None))
+ break
+ index2 = relem2.index
+
+ if index1 < index2:
+ yield IndexedTuple(index1, (relem1, None))
+ relem1 = None
+ elif index1 == index2:
+ yield IndexedTuple(index1, (relem1, relem2))
+ relem1, relem2 = None, None
+ else: # index2 is less
+ yield IndexedTuple(index2, (None, relem2))
+ relem2 = None
+
+def getnext(iter):
+ """Return the next element of an iterator, raising error if none"""
+ try: next = iter.next()
+ except StopIteration: raise RORPIterException("Unexpected end to iter")
+ return next
+
+def GetDiffIter(sig_iter, new_iter):
+ """Return delta iterator from sig_iter to new_iter
+
+ The accompanying file for each will be a delta as produced by
+ rdiff, unless the destination file does not exist, in which
+ case it will be the file in its entirety.
+
+ sig_iter may be composed of rorps, but new_iter should have
+ full RPaths.
+
+ """
+ collated_iter = CollateIterators(sig_iter, new_iter)
+ for rorp, rp in collated_iter: yield diffonce(rorp, rp)
+
+def diffonce(sig_rorp, new_rp):
+ """Return one diff rorp, based from signature rorp and orig rp"""
+ if sig_rorp and Globals.preserve_hardlinks and sig_rorp.isflaglinked():
+ if new_rp: diff_rorp = new_rp.getRORPath()
+ else: diff_rorp = rpath.RORPath(sig_rorp.index)
+ diff_rorp.flaglinked()
+ return diff_rorp
+ elif sig_rorp and sig_rorp.isreg() and new_rp and new_rp.isreg():
+ diff_rorp = new_rp.getRORPath()
+ #fp = sig_rorp.open("rb")
+ #print "---------------------", fp
+ #tmp_sig_rp = RPath(Globals.local_connection, "/tmp/sig")
+ #tmp_sig_rp.delete()
+ #tmp_sig_rp.write_from_fileobj(fp)
+ #diff_rorp.setfile(Rdiff.get_delta_sigfileobj(tmp_sig_rp.open("rb"),
+ # new_rp))
+ diff_rorp.setfile(Rdiff.get_delta_sigfileobj(sig_rorp.open("rb"),
+ new_rp))
+ diff_rorp.set_attached_filetype('diff')
+ return diff_rorp
+ else:
+ # Just send over originial if diff isn't appropriate
+ if sig_rorp: sig_rorp.close_if_necessary()
+ if not new_rp: return rpath.RORPath(sig_rorp.index)
+ elif new_rp.isreg():
+ diff_rorp = new_rp.getRORPath(1)
+ diff_rorp.set_attached_filetype('snapshot')
return diff_rorp
- else:
- # Just send over originial if diff isn't appropriate
- if sig_rorp: sig_rorp.close_if_necessary()
- if not new_rp: return RORPath(sig_rorp.index)
- elif new_rp.isreg():
- diff_rorp = new_rp.getRORPath(1)
- diff_rorp.set_attached_filetype('snapshot')
- return diff_rorp
- else: return new_rp.getRORPath()
-
- def PatchIter(base_rp, diff_iter):
- """Patch the appropriate rps in basis_iter using diff_iter"""
- basis_iter = RORPIter.IterateRPaths(base_rp)
- collated_iter = RORPIter.CollateIterators(basis_iter, diff_iter)
- for basisrp, diff_rorp in collated_iter:
- RORPIter.patchonce_action(base_rp, basisrp, diff_rorp).execute()
-
- def patchonce_action(base_rp, basisrp, diff_rorp):
- """Return action patching basisrp using diff_rorp"""
- assert diff_rorp, "Missing diff index %s" % basisrp.index
- if not diff_rorp.lstat():
- return RobustAction(None, lambda init_val: basisrp.delete(), None)
-
- if Globals.preserve_hardlinks and diff_rorp.isflaglinked():
- if not basisrp: basisrp = base_rp.new_index(diff_rorp.index)
- tf = TempFileManager.new(basisrp)
- def init(): Hardlink.link_rp(diff_rorp, tf, basisrp)
- return Robust.make_tf_robustaction(init, tf, basisrp)
- elif basisrp and basisrp.isreg() and diff_rorp.isreg():
- if diff_rorp.get_attached_filetype() != 'diff':
- raise RPathException("File %s appears to have changed during"
- " processing, skipping" % (basisrp.path,))
- return Rdiff.patch_with_attribs_action(basisrp, diff_rorp)
- else: # Diff contains whole file, just copy it over
- if not basisrp: basisrp = base_rp.new_index(diff_rorp.index)
- return Robust.copy_with_attribs_action(diff_rorp, basisrp)
-
-MakeStatic(RORPIter)
+ else: return new_rp.getRORPath()
+
+def PatchIter(base_rp, diff_iter):
+ """Patch the appropriate rps in basis_iter using diff_iter"""
+ basis_iter = IterateRPaths(base_rp)
+ collated_iter = CollateIterators(basis_iter, diff_iter)
+ for basisrp, diff_rorp in collated_iter:
+ patchonce_action(base_rp, basisrp, diff_rorp).execute()
+
+def patchonce_action(base_rp, basisrp, diff_rorp):
+ """Return action patching basisrp using diff_rorp"""
+ assert diff_rorp, "Missing diff index %s" % basisrp.index
+ if not diff_rorp.lstat():
+ return robust.Action(None, lambda init_val: basisrp.delete(), None)
+
+ if Globals.preserve_hardlinks and diff_rorp.isflaglinked():
+ if not basisrp: basisrp = base_rp.new_index(diff_rorp.index)
+ tf = TempFile.new(basisrp)
+ def init(): Hardlink.link_rp(diff_rorp, tf, basisrp)
+ return robust.make_tf_robustaction(init, tf, basisrp)
+ elif basisrp and basisrp.isreg() and diff_rorp.isreg():
+ if diff_rorp.get_attached_filetype() != 'diff':
+ raise rpath.RPathException("File %s appears to have changed during"
+ " processing, skipping" % (basisrp.path,))
+ return Rdiff.patch_with_attribs_action(basisrp, diff_rorp)
+ else: # Diff contains whole file, just copy it over
+ if not basisrp: basisrp = base_rp.new_index(diff_rorp.index)
+ return robust.copy_with_attribs_action(diff_rorp, basisrp)
class IndexedTuple(UserList.UserList):
@@ -299,3 +291,300 @@ class IndexedTuple(UserList.UserList):
def __str__(self):
return "(%s).%s" % (", ".join(map(str, self.data)), self.index)
+
+
+class DirHandler:
+ """Handle directories when entering and exiting in mirror
+
+ The problem is that we may need to write to a directory that may
+ have only read and exec permissions. Also, when leaving a
+ directory tree, we may have modified the directory and thus
+ changed the mod and access times. These need to be updated when
+ leaving.
+
+ """
+ def __init__(self, rootrp):
+ """DirHandler initializer - call with root rpath of mirror dir"""
+ self.rootrp = rootrp
+ assert rootrp.index == ()
+ self.cur_dir_index = None # Current directory we have descended into
+ self.last_index = None # last index processed
+
+ # This dictionary maps indicies to (rpath, (atime, mtime),
+ # perms) triples. Either or both of the time pair and perms
+ # can be None, which means not to update the times or the
+ # perms when leaving. We don't have to update the perms if we
+ # didn't have to change them in the first place. If a
+ # directory is explicitly given, then we don't have to update
+ # anything because it will be done by the normal process.
+ self.index_dict = {}
+
+ def process_old_directories(self, new_dir_index):
+ """Update times/permissions for directories we are leaving
+
+ Returns greatest index of the current index that has been seen
+ before (i.e. no need to process up to then as new dir).
+
+ """
+ if self.cur_dir_index is None: return -1 # no previous directory
+
+ i = len(self.cur_dir_index)
+ while 1:
+ if new_dir_index[:i] == self.cur_dir_index[:i]:
+ return i
+ self.process_old_dir(self.cur_dir_index[:i])
+ i-=1
+
+ def process_old_dir(self, dir_index):
+ """Process outstanding changes for given dir index"""
+ rpath, times, perms = self.index_dict[dir_index]
+ if times: apply(rpath.settime, times)
+ if perms: rpath.chmod(perms)
+
+ def init_new_dirs(self, rpath, new_dir_index, common_dir_index):
+ """Initialize any new directories
+
+ Record the time, and change permissions if no write access.
+ Use rpath if it is given to access permissions and times.
+
+ """
+ for i in range(common_dir_index, len(new_dir_index)):
+ process_index = new_dir_index[:i]
+ if rpath.index == process_index:
+ self.index_dict[process_index] = (None, None, None)
+ else:
+ new_rpath = self.rootrp.new_index(process_index)
+ if new_rpath.hasfullperms(): perms = None
+ else: perms = new_rpath.getperms()
+ times = (new_rpath.getatime(), new_rpath.getmtime())
+ self.index_dict[process_index] = new_rpath, times, perms
+
+ def __call__(self, rpath):
+ """Given rpath, process containing directories"""
+ if rpath.isdir(): new_dir_index = rpath.index
+ elif not rpath.index: return # no directory contains root
+ else: new_dir_index = rpath.index[:-1]
+
+ common_dir_index = self.process_old_directories(new_dir_index)
+ self.init_new_dirs(rpath, new_dir_index, common_dir_index)
+ self.cur_dir_index = new_dir_index
+
+ def Finish(self):
+ """Process any remaining directories"""
+ indicies = self.index_dict.keys()
+ indicies.sort()
+ assert len(indicies) >= 1, indicies
+ indicies.reverse()
+ map(self.process_old_dir, indicies)
+
+
+def FillInIter(rpiter, rootrp):
+ """Given ordered rpiter and rootrp, fill in missing indicies with rpaths
+
+ For instance, suppose rpiter contains rpaths with indicies (),
+ (1,2), (2,5). Then return iter with rpaths (), (1,), (1,2), (2,),
+ (2,5). This is used when we need to process directories before or
+ after processing a file in that directory.
+
+ """
+ # Handle first element as special case
+ first_rp = rpiter.next() # StopIteration gets passed upwards
+ cur_index = first_rp.index
+ for i in range(len(cur_index)):
+ yield rootrp.new_index(cur_index[:i])
+ yield first_rp
+ del first_rp
+ old_index = cur_index
+
+ # Now do the others (1,2,3) (1,4,5)
+ for rp in rpiter:
+ cur_index = rp.index
+ if not cur_index[:-1] == old_index[:-1]: # Handle special case quickly
+ for i in range(1, len(cur_index)): # i==0 case already handled
+ if cur_index[:i] != old_index[:i]:
+ yield rootrp.new_index(cur_index[:i])
+ yield rp
+ old_index = cur_index
+
+
+class IterTreeReducer:
+ """Tree style reducer object for iterator
+
+ The indicies of a RORPIter form a tree type structure. This class
+ can be used on each element of an iter in sequence and the result
+ will be as if the corresponding tree was reduced. This tries to
+ bridge the gap between the tree nature of directories, and the
+ iterator nature of the connection between hosts and the temporal
+ order in which the files are processed.
+
+ """
+ def __init__(self, branch_class, branch_args):
+ """ITR initializer"""
+ self.branch_class = branch_class
+ self.branch_args = branch_args
+ self.index = None
+ self.root_branch = branch_class(*branch_args)
+ self.branches = [self.root_branch]
+
+ def finish_branches(self, index):
+ """Run Finish() on all branches index has passed
+
+ When we pass out of a branch, delete it and process it with
+ the parent. The innermost branches will be the last in the
+ list. Return None if we are out of the entire tree, and 1
+ otherwise.
+
+ """
+ branches = self.branches
+ while 1:
+ to_be_finished = branches[-1]
+ base_index = to_be_finished.base_index
+ if base_index != index[:len(base_index)]:
+ # out of the tree, finish with to_be_finished
+ to_be_finished.call_end_proc()
+ del branches[-1]
+ if not branches: return None
+ branches[-1].branch_process(to_be_finished)
+ else: return 1
+
+ def add_branch(self, index):
+ """Return branch of type self.branch_class, add to branch list"""
+ branch = self.branch_class(*self.branch_args)
+ branch.base_index = index
+ self.branches.append(branch)
+ return branch
+
+ def process_w_branch(self, branch, args):
+ """Run start_process on latest branch"""
+ robust.check_common_error(branch.on_error,
+ branch.start_process, args)
+ if not branch.caught_exception: branch.start_successful = 1
+
+ def Finish(self):
+ """Call at end of sequence to tie everything up"""
+ while 1:
+ to_be_finished = self.branches.pop()
+ to_be_finished.call_end_proc()
+ if not self.branches: break
+ self.branches[-1].branch_process(to_be_finished)
+
+ def __call__(self, *args):
+ """Process args, where args[0] is current position in iterator
+
+ Returns true if args successfully processed, false if index is
+ not in the current tree and thus the final result is
+ available.
+
+ Also note below we set self.index after doing the necessary
+ start processing, in case there is a crash in the middle.
+
+ """
+ index = args[0]
+ if self.index is None:
+ self.root_branch.base_index = index
+ self.process_w_branch(self.root_branch, args)
+ self.index = index
+ return 1
+
+ if index <= self.index:
+ log.Log("Warning: oldindex %s >= newindex %s" %
+ (self.index, index), 2)
+ return 1
+
+ if self.finish_branches(index) is None:
+ return None # We are no longer in the main tree
+ last_branch = self.branches[-1]
+ if last_branch.start_successful:
+ if last_branch.can_fast_process(*args):
+ last_branch.fast_process(*args)
+ else:
+ branch = self.add_branch(index)
+ self.process_w_branch(branch, args)
+ else: last_branch.log_prev_error(index)
+
+ self.index = index
+ return 1
+
+
+class ITRBranch:
+ """Helper class for IterTreeReducer below
+
+ There are five stub functions below: start_process, end_process,
+ branch_process, can_fast_process, and fast_process. A class that
+ subclasses this one will probably fill in these functions to do
+ more.
+
+ It is important that this class be pickable, so keep that in mind
+ when subclassing (this is used to resume failed sessions).
+
+ """
+ base_index = index = None
+ finished = None
+ caught_exception = start_successful = None
+
+ def call_end_proc(self):
+ """Runs the end_process on self, checking for errors"""
+ if self.finished or not self.start_successful:
+ self.caught_exception = 1
+ if self.caught_exception: self.log_prev_error(self.base_index)
+ else: robust.check_common_error(self.on_error, self.end_process)
+ self.finished = 1
+
+ def start_process(self, *args):
+ """Do some initial processing (stub)"""
+ pass
+
+ def end_process(self):
+ """Do any final processing before leaving branch (stub)"""
+ pass
+
+ def branch_process(self, branch):
+ """Process a branch right after it is finished (stub)"""
+ assert branch.finished
+ pass
+
+ def can_fast_process(self, *args):
+ """True if object can be processed without new branch (stub)"""
+ return None
+
+ def fast_process(self, *args):
+ """Process args without new child branch (stub)"""
+ pass
+
+ def on_error(self, exc, *args):
+ """This is run on any exception in start/end-process"""
+ self.caught_exception = 1
+ if args and args[0] and isinstance(args[0], tuple):
+ filename = os.path.join(*args[0])
+ elif self.index: filename = os.path.join(*self.index)
+ else: filename = "."
+ log.Log("Error '%s' processing %s" % (exc, filename), 2)
+
+ def log_prev_error(self, index):
+ """Call function if no pending exception"""
+ log.Log("Skipping %s because of previous error" %
+ (os.path.join(*index),), 2)
+
+
+class DestructiveSteppingFinalizer(ITRBranch):
+ """Finalizer that can work on an iterator of dsrpaths
+
+ The reason we have to use an IterTreeReducer is that some files
+ should be updated immediately, but for directories we sometimes
+ need to update all the files in the directory before finally
+ coming back to it.
+
+ """
+ dsrpath = None
+ def start_process(self, index, dsrpath):
+ self.dsrpath = dsrpath
+
+ def end_process(self):
+ if self.dsrpath: self.dsrpath.write_changes()
+
+ def can_fast_process(self, index, dsrpath):
+ return not self.dsrpath.isdir()
+
+ def fast_process(self, index, dsrpath):
+ if self.dsrpath: self.dsrpath.write_changes()
+
diff --git a/rdiff-backup/rdiff_backup/rpath.py b/rdiff-backup/rdiff_backup/rpath.py
index 5773dd7..98914ec 100644
--- a/rdiff-backup/rdiff_backup/rpath.py
+++ b/rdiff-backup/rdiff_backup/rpath.py
@@ -35,217 +35,215 @@ are dealing with are local or remote.
"""
-import os, stat, re, sys, shutil, gzip, socket
-from static import *
+import os, stat, re, sys, shutil, gzip, socket, time, shutil
+import Globals, FilenameMapping, Time, static, log
+class SkipFileException(Exception):
+ """Signal that the current file should be skipped but then continue
+
+ This exception will often be raised when there is problem reading
+ an individual file, but it makes sense for the rest of the backup
+ to keep going.
+
+ """
+ pass
+
class RPathException(Exception): pass
-class RPathStatic:
- """Contains static methods for use with RPaths"""
- def copyfileobj(inputfp, outputfp):
- """Copies file inputfp to outputfp in blocksize intervals"""
- blocksize = Globals.blocksize
- while 1:
- inbuf = inputfp.read(blocksize)
- if not inbuf: break
- outputfp.write(inbuf)
-
- def cmpfileobj(fp1, fp2):
- """True if file objects fp1 and fp2 contain same data"""
- blocksize = Globals.blocksize
- while 1:
- buf1 = fp1.read(blocksize)
- buf2 = fp2.read(blocksize)
- if buf1 != buf2: return None
- elif not buf1: return 1
-
- def check_for_files(*rps):
- """Make sure that all the rps exist, raise error if not"""
- for rp in rps:
- if not rp.lstat():
- raise RPathException("File %s does not exist" % rp.path)
-
- def move(rpin, rpout):
- """Move rpin to rpout, renaming if possible"""
- try: RPath.rename(rpin, rpout)
- except os.error:
- RPath.copy(rpin, rpout)
- rpin.delete()
-
- def copy(rpin, rpout):
- """Copy RPath rpin to rpout. Works for symlinks, dirs, etc."""
- Log("Regular copying %s to %s" % (rpin.index, rpout.path), 6)
- if not rpin.lstat():
- raise RPathException, ("File %s does not exist" % rpin.index)
-
- if rpout.lstat():
- if rpin.isreg() or not RPath.cmp(rpin, rpout):
- rpout.delete() # easier to write that compare
- else: return
-
- if rpin.isreg(): RPath.copy_reg_file(rpin, rpout)
- elif rpin.isdir(): rpout.mkdir()
- elif rpin.issym(): rpout.symlink(rpin.readlink())
- elif rpin.ischardev():
- major, minor = rpin.getdevnums()
- rpout.makedev("c", major, minor)
- elif rpin.isblkdev():
- major, minor = rpin.getdevnums()
- rpout.makedev("b", major, minor)
- elif rpin.isfifo(): rpout.mkfifo()
- elif rpin.issock(): rpout.mksock()
- else: raise RPathException("File %s has unknown type" % rpin.path)
-
- def copy_reg_file(rpin, rpout):
- """Copy regular file rpin to rpout, possibly avoiding connection"""
- try:
- if rpout.conn is rpin.conn:
- rpout.conn.shutil.copyfile(rpin.path, rpout.path)
- rpout.setdata()
- return
- except AttributeError: pass
- rpout.write_from_fileobj(rpin.open("rb"))
-
- def cmp(rpin, rpout):
- """True if rpin has the same data as rpout
-
- cmp does not compare file ownership, permissions, or times, or
- examine the contents of a directory.
+def copyfileobj(inputfp, outputfp):
+ """Copies file inputfp to outputfp in blocksize intervals"""
+ blocksize = Globals.blocksize
+ while 1:
+ inbuf = inputfp.read(blocksize)
+ if not inbuf: break
+ outputfp.write(inbuf)
+
+def cmpfileobj(fp1, fp2):
+ """True if file objects fp1 and fp2 contain same data"""
+ blocksize = Globals.blocksize
+ while 1:
+ buf1 = fp1.read(blocksize)
+ buf2 = fp2.read(blocksize)
+ if buf1 != buf2: return None
+ elif not buf1: return 1
+
+def check_for_files(*rps):
+ """Make sure that all the rps exist, raise error if not"""
+ for rp in rps:
+ if not rp.lstat():
+ raise RPathException("File %s does not exist" % rp.path)
+
+def move(rpin, rpout):
+ """Move rpin to rpout, renaming if possible"""
+ try: rename(rpin, rpout)
+ except os.error:
+ copy(rpin, rpout)
+ rpin.delete()
+
+def copy(rpin, rpout):
+ """Copy RPath rpin to rpout. Works for symlinks, dirs, etc."""
+ log.Log("Regular copying %s to %s" % (rpin.index, rpout.path), 6)
+ if not rpin.lstat():
+ raise RPathException, ("File %s does not exist" % rpin.index)
+
+ if rpout.lstat():
+ if rpin.isreg() or not cmp(rpin, rpout):
+ rpout.delete() # easier to write that compare
+ else: return
+
+ if rpin.isreg(): copy_reg_file(rpin, rpout)
+ elif rpin.isdir(): rpout.mkdir()
+ elif rpin.issym(): rpout.symlink(rpin.readlink())
+ elif rpin.ischardev():
+ major, minor = rpin.getdevnums()
+ rpout.makedev("c", major, minor)
+ elif rpin.isblkdev():
+ major, minor = rpin.getdevnums()
+ rpout.makedev("b", major, minor)
+ elif rpin.isfifo(): rpout.mkfifo()
+ elif rpin.issock(): rpout.mksock()
+ else: raise RPathException("File %s has unknown type" % rpin.path)
+
+def copy_reg_file(rpin, rpout):
+ """Copy regular file rpin to rpout, possibly avoiding connection"""
+ try:
+ if rpout.conn is rpin.conn:
+ rpout.conn.shutil.copyfile(rpin.path, rpout.path)
+ rpout.setdata()
+ return
+ except AttributeError: pass
+ rpout.write_from_fileobj(rpin.open("rb"))
+
+def cmp(rpin, rpout):
+ """True if rpin has the same data as rpout
+
+ cmp does not compare file ownership, permissions, or times, or
+ examine the contents of a directory.
- """
- RPath.check_for_files(rpin, rpout)
- if rpin.isreg():
- if not rpout.isreg(): return None
- fp1, fp2 = rpin.open("rb"), rpout.open("rb")
- result = RPathStatic.cmpfileobj(fp1, fp2)
- if fp1.close() or fp2.close():
- raise RPathException("Error closing file")
- return result
- elif rpin.isdir(): return rpout.isdir()
- elif rpin.issym():
- return rpout.issym() and (rpin.readlink() == rpout.readlink())
- elif rpin.ischardev():
- return rpout.ischardev() and \
- (rpin.getdevnums() == rpout.getdevnums())
- elif rpin.isblkdev():
- return rpout.isblkdev() and \
- (rpin.getdevnums() == rpout.getdevnums())
- elif rpin.isfifo(): return rpout.isfifo()
- elif rpin.issock(): return rpout.issock()
- else: raise RPathException("File %s has unknown type" % rpin.path)
-
- def copy_attribs(rpin, rpout):
- """Change file attributes of rpout to match rpin
-
- Only changes the chmoddable bits, uid/gid ownership, and
- timestamps, so both must already exist.
+ """
+ check_for_files(rpin, rpout)
+ if rpin.isreg():
+ if not rpout.isreg(): return None
+ fp1, fp2 = rpin.open("rb"), rpout.open("rb")
+ result = cmpfileobj(fp1, fp2)
+ if fp1.close() or fp2.close():
+ raise RPathException("Error closing file")
+ return result
+ elif rpin.isdir(): return rpout.isdir()
+ elif rpin.issym():
+ return rpout.issym() and (rpin.readlink() == rpout.readlink())
+ elif rpin.ischardev():
+ return rpout.ischardev() and \
+ (rpin.getdevnums() == rpout.getdevnums())
+ elif rpin.isblkdev():
+ return rpout.isblkdev() and \
+ (rpin.getdevnums() == rpout.getdevnums())
+ elif rpin.isfifo(): return rpout.isfifo()
+ elif rpin.issock(): return rpout.issock()
+ else: raise RPathException("File %s has unknown type" % rpin.path)
+
+def copy_attribs(rpin, rpout):
+ """Change file attributes of rpout to match rpin
+
+ Only changes the chmoddable bits, uid/gid ownership, and
+ timestamps, so both must already exist.
- """
- Log("Copying attributes from %s to %s" % (rpin.index, rpout.path), 7)
- RPath.check_for_files(rpin, rpout)
- if rpin.issym(): return # symlinks have no valid attributes
- if Globals.change_ownership: apply(rpout.chown, rpin.getuidgid())
- rpout.chmod(rpin.getperms())
- if not rpin.isdev(): rpout.setmtime(rpin.getmtime())
+ """
+ log.Log("Copying attributes from %s to %s" %
+ (rpin.index, rpout.path), 7)
+ check_for_files(rpin, rpout)
+ if rpin.issym(): return # symlinks have no valid attributes
+ if Globals.change_ownership: apply(rpout.chown, rpin.getuidgid())
+ rpout.chmod(rpin.getperms())
+ if not rpin.isdev(): rpout.setmtime(rpin.getmtime())
- def cmp_attribs(rp1, rp2):
- """True if rp1 has the same file attributes as rp2
+def cmp_attribs(rp1, rp2):
+ """True if rp1 has the same file attributes as rp2
- Does not compare file access times. If not changing
- ownership, do not check user/group id.
+ Does not compare file access times. If not changing
+ ownership, do not check user/group id.
- """
- RPath.check_for_files(rp1, rp2)
- if Globals.change_ownership and rp1.getuidgid() != rp2.getuidgid():
- result = None
- elif rp1.getperms() != rp2.getperms(): result = None
- elif rp1.issym() and rp2.issym(): # Don't check times for some types
- result = 1
- elif rp1.isblkdev() and rp2.isblkdev(): result = 1
- elif rp1.ischardev() and rp2.ischardev(): result = 1
- else: result = (rp1.getmtime() == rp2.getmtime())
- Log("Compare attribs %s and %s: %s" % (rp1.path, rp2.path, result), 7)
- return result
+ """
+ check_for_files(rp1, rp2)
+ if Globals.change_ownership and rp1.getuidgid() != rp2.getuidgid():
+ result = None
+ elif rp1.getperms() != rp2.getperms(): result = None
+ elif rp1.issym() and rp2.issym(): # Don't check times for some types
+ result = 1
+ elif rp1.isblkdev() and rp2.isblkdev(): result = 1
+ elif rp1.ischardev() and rp2.ischardev(): result = 1
+ else: result = (rp1.getmtime() == rp2.getmtime())
+ log.Log("Compare attribs of %s and %s: %s" %
+ (rp1.path, rp2.path, result), 7)
+ return result
+
+def copy_with_attribs(rpin, rpout):
+ """Copy file and then copy over attributes"""
+ copy(rpin, rpout)
+ copy_attribs(rpin, rpout)
+
+def quick_cmp_with_attribs(rp1, rp2):
+ """Quicker version of cmp_with_attribs
+
+ Instead of reading all of each file, assume that regular files
+ are the same if the attributes compare.
- def copy_with_attribs(rpin, rpout):
- """Copy file and then copy over attributes"""
- RPath.copy(rpin, rpout)
- RPath.copy_attribs(rpin, rpout)
+ """
+ if not cmp_attribs(rp1, rp2): return None
+ if rp1.isreg() and rp2.isreg() and (rp1.getlen() == rp2.getlen()):
+ return 1
+ return cmp(rp1, rp2)
- def quick_cmp_with_attribs(rp1, rp2):
- """Quicker version of cmp_with_attribs
+def cmp_with_attribs(rp1, rp2):
+ """Combine cmp and cmp_attribs"""
+ return cmp_attribs(rp1, rp2) and cmp(rp1, rp2)
- Instead of reading all of each file, assume that regular files
- are the same if the attributes compare.
+def rename(rp_source, rp_dest):
+ """Rename rp_source to rp_dest"""
+ assert rp_source.conn is rp_dest.conn
+ log.Log(lambda: "Renaming %s to %s" %
+ (rp_source.path, rp_dest.path), 7)
+ rp_source.conn.os.rename(rp_source.path, rp_dest.path)
+ rp_dest.data = rp_source.data
+ rp_source.data = {'type': None}
- """
- if not RPath.cmp_attribs(rp1, rp2): return None
- if rp1.isreg() and rp2.isreg() and (rp1.getlen() == rp2.getlen()):
- return 1
- return RPath.cmp(rp1, rp2)
-
- def cmp_with_attribs(rp1, rp2):
- """Combine cmp and cmp_attribs"""
- return RPath.cmp_attribs(rp1, rp2) and RPath.cmp(rp1, rp2)
-
- def rename(rp_source, rp_dest):
- """Rename rp_source to rp_dest"""
- assert rp_source.conn is rp_dest.conn
- Log(lambda: "Renaming %s to %s" % (rp_source.path, rp_dest.path), 7)
- rp_source.conn.os.rename(rp_source.path, rp_dest.path)
- rp_dest.data = rp_source.data
- rp_source.data = {'type': None}
-
- # If we are moving to a DSRPath, assume that the current times
- # are the intended ones. We need to save them now in case
- # they are changed later.
- if isinstance(rp_dest, DSRPath):
- if rp_dest.delay_mtime:
- if 'mtime' in rp_dest.data:
- rp_dest.setmtime(rp_dest.data['mtime'])
- if rp_dest.delay_atime:
- if 'atime' in rp_dest.data:
- rp_dest.setatime(rp_dest.data['atime'])
-
- def tupled_lstat(filename):
- """Like os.lstat, but return only a tuple, or None if os.error
-
- Later versions of os.lstat return a special lstat object,
- which can confuse the pickler and cause errors in remote
- operations. This has been fixed in Python 2.2.1.
+def tupled_lstat(filename):
+ """Like os.lstat, but return only a tuple, or None if os.error
- """
- try: return tuple(os.lstat(filename))
- except os.error: return None
+ Later versions of os.lstat return a special lstat object,
+ which can confuse the pickler and cause errors in remote
+ operations. This has been fixed in Python 2.2.1.
- def make_socket_local(rpath):
- """Make a local socket at the given path
+ """
+ try: return tuple(os.lstat(filename))
+ except os.error: return None
- This takes an rpath so that it will be checked by Security.
- (Miscellaneous strings will not be.)
+def make_socket_local(rpath):
+ """Make a local socket at the given path
- """
- assert rpath.conn is Globals.local_connection
- s = socket.socket(socket.AF_UNIX)
- try: s.bind(rpath.path)
- except socket.error, exc:
- raise SkipFileException("Socket error: " + str(exc))
+ This takes an rpath so that it will be checked by Security.
+ (Miscellaneous strings will not be.)
- def gzip_open_local_read(rpath):
- """Return open GzipFile. See security note directly above"""
- assert rpath.conn is Globals.local_connection
- return gzip.GzipFile(rpath.path, "rb")
+ """
+ assert rpath.conn is Globals.local_connection
+ s = socket.socket(socket.AF_UNIX)
+ try: s.bind(rpath.path)
+ except socket.error, exc:
+ raise SkipFileException("Socket error: " + str(exc))
- def open_local_read(rpath):
- """Return open file (provided for security reasons)"""
- assert rpath.conn is Globals.local_connection
- return open(rpath.path, "rb")
+def gzip_open_local_read(rpath):
+ """Return open GzipFile. See security note directly above"""
+ assert rpath.conn is Globals.local_connection
+ return gzip.GzipFile(rpath.path, "rb")
-MakeStatic(RPathStatic)
+def open_local_read(rpath):
+ """Return open file (provided for security reasons)"""
+ assert rpath.conn is Globals.local_connection
+ return open(rpath.path, "rb")
-class RORPath(RPathStatic):
+class RORPath:
"""Read Only RPath - carry information about a path
These contain information about a file, and possible the file's
@@ -280,7 +278,7 @@ class RORPath(RPathStatic):
def equal_verbose(self, other):
"""Like __eq__, but log more information. Useful when testing"""
if self.index != other.index:
- Log("Index %s != index %s" % (self.index, other.index), 2)
+ log.Log("Index %s != index %s" % (self.index, other.index), 2)
return None
for key in self.data.keys(): # compare dicts key by key
@@ -289,16 +287,16 @@ class RORPath(RPathStatic):
# Don't compare gid/uid for symlinks or if not change_ownership
pass
elif key == 'mtime':
- Log("%s differs only in mtime, skipping" % (self.path,), 2)
+ log.Log("%s differs only in mtime, skipping" % (self.path,), 2)
elif key == 'atime' and not Globals.preserve_atime: pass
elif key == 'devloc' or key == 'inode' or key == 'nlink': pass
elif key == 'size' and not self.isreg(): pass
elif (not other.data.has_key(key) or
self.data[key] != other.data[key]):
if not other.data.has_key(key):
- Log("Second is missing key %s" % (key,), 2)
- else: Log("Value of %s differs: %s vs %s" %
- (key, self.data[key], other.data[key]), 2)
+ log.Log("Second is missing key %s" % (key,), 2)
+ else: log.Log("Value of %s differs: %s vs %s" %
+ (key, self.data[key], other.data[key]), 2)
return None
return 1
@@ -548,7 +546,7 @@ class RPath(RORPath):
def make_file_dict_old(self):
"""Create the data dictionary"""
- statblock = self.conn.RPathStatic.tupled_lstat(self.path)
+ statblock = self.conn.rpath.tupled_lstat(self.path)
if statblock is None:
return {'type':None}
data = {}
@@ -614,14 +612,14 @@ class RPath(RORPath):
def settime(self, accesstime, modtime):
"""Change file modification times"""
- Log("Setting time of %s to %d" % (self.path, modtime), 7)
+ log.Log("Setting time of %s to %d" % (self.path, modtime), 7)
self.conn.os.utime(self.path, (accesstime, modtime))
self.data['atime'] = accesstime
self.data['mtime'] = modtime
def setmtime(self, modtime):
"""Set only modtime (access time to present)"""
- Log(lambda: "Setting time of %s to %d" % (self.path, modtime), 7)
+ log.Log(lambda: "Setting time of %s to %d" % (self.path, modtime), 7)
self.conn.os.utime(self.path, (time.time(), modtime))
self.data['mtime'] = modtime
@@ -632,12 +630,12 @@ class RPath(RORPath):
self.data['gid'] = gid
def mkdir(self):
- Log("Making directory " + self.path, 6)
+ log.Log("Making directory " + self.path, 6)
self.conn.os.mkdir(self.path)
self.setdata()
def rmdir(self):
- Log("Removing directory " + self.path, 6)
+ log.Log("Removing directory " + self.path, 6)
self.conn.os.rmdir(self.path)
self.data = {'type': None}
@@ -664,13 +662,13 @@ class RPath(RORPath):
def mksock(self):
"""Make a socket at self.path"""
- self.conn.RPathStatic.make_socket_local(self)
+ self.conn.rpath.make_socket_local(self)
self.setdata()
assert self.issock()
def touch(self):
"""Make sure file at self.path exists"""
- Log("Touching " + self.path, 7)
+ log.Log("Touching " + self.path, 7)
self.conn.open(self.path, "w").close()
self.setdata()
assert self.isreg()
@@ -704,15 +702,14 @@ class RPath(RORPath):
def delete(self):
"""Delete file at self.path. Recursively deletes directories."""
- Log("Deleting %s" % self.path, 7)
+ log.Log("Deleting %s" % self.path, 7)
self.setdata()
if not self.lstat():
- Log("Warning: %s does not exist---deleted in meantime?"
+ log.Log("Warning: %s does not exist---deleted in meantime?"
% (self.path,), 2)
elif self.isdir():
- itm = IterTreeReducer(RpathDeleter, [])
- for rp in Select(self).set_iter(): itm(rp.index, rp)
- itm.Finish()
+ try: self.rmdir()
+ except os.error: shutil.rmtree(self.path)
else: self.conn.os.unlink(self.path)
self.setdata()
@@ -784,11 +781,11 @@ class RPath(RORPath):
if compress:
if mode == "r" or mode == "rb":
- return self.conn.RPathStatic.gzip_open_local_read(self)
+ return self.conn.rpath.gzip_open_local_read(self)
else: return self.conn.gzip.GzipFile(self.path, mode)
else:
if mode == "r" or mode == "rb":
- return self.conn.RPathStatic.open_local_read(self)
+ return self.conn.rpath.open_local_read(self)
else: return self.conn.open(self.path, mode)
def write_from_fileobj(self, fp, compress = None):
@@ -798,10 +795,10 @@ class RPath(RORPath):
written to self.
"""
- Log("Writing file object to " + self.path, 7)
+ log.Log("Writing file object to " + self.path, 7)
assert not self.lstat(), "File %s already exists" % self.path
outfp = self.open("wb", compress = compress)
- RPath.copyfileobj(fp, outfp)
+ copyfileobj(fp, outfp)
if fp.close() or outfp.close():
raise RPathException("Error closing file")
self.setdata()
@@ -890,20 +887,20 @@ class RPathFileHook:
# Import these late to avoid circular dependencies
-import FilenameMapping
-from lazy import *
-from selection import *
-from highlevel import *
-
-class RpathDeleter(ITRBranch):
- """Delete a directory. Called by RPath.delete()"""
- def start_process(self, index, rp):
- self.rp = rp
-
- def end_process(self):
- if self.rp.isdir(): self.rp.rmdir()
- else: self.rp.delete()
-
- def can_fast_process(self, index, rp): return not rp.isdir()
- def fast_process(self, index, rp): rp.delete()
+#import FilenameMapping
+#from lazy import *
+#from selection import *
+#from highlevel import *
+
+#class RpathDeleter(ITRBranch):
+# """Delete a directory. Called by RPath.delete()"""
+# def start_process(self, index, rp):
+# self.rp = rp
+#
+# def end_process(self):
+# if self.rp.isdir(): self.rp.rmdir()
+# else: self.rp.delete()
+#
+# def can_fast_process(self, index, rp): return not rp.isdir()
+# def fast_process(self, index, rp): rp.delete()
diff --git a/rdiff-backup/rdiff_backup/selection.py b/rdiff-backup/rdiff_backup/selection.py
index 70203f4..4ca6863 100644
--- a/rdiff-backup/rdiff_backup/selection.py
+++ b/rdiff-backup/rdiff_backup/selection.py
@@ -26,9 +26,8 @@ documentation on what this code does can be found on the man page.
from __future__ import generators
import re
-from log import *
-from robust import *
-import FilenameMapping
+from log import Log
+import FilenameMapping, robust, rpath, Globals
class SelectError(Exception):
@@ -81,7 +80,7 @@ class Select:
# This re should not match normal filenames, but usually just globs
glob_re = re.compile("(.*[*?[]|ignorecase\\:)", re.I | re.S)
- def __init__(self, rpath, quoted_filenames = None):
+ def __init__(self, rootrp, quoted_filenames = None):
"""Select initializer. rpath is the root directory
When files have quoted characters in them, quoted_filenames
@@ -89,9 +88,9 @@ class Select:
version.
"""
- assert isinstance(rpath, RPath)
+ assert isinstance(rootrp, rpath.RPath)
self.selection_functions = []
- self.rpath = rpath
+ self.rpath = rootrp
self.prefix = self.rpath.path
self.quoting_on = Globals.quoting_enabled and quoted_filenames
@@ -141,8 +140,8 @@ class Select:
and should be included iff something inside is included.
"""
- for filename in Robust.listrp(rpath):
- new_rpath = Robust.check_common_error(error_handler,
+ for filename in robust.listrp(rpath):
+ new_rpath = robust.check_common_error(error_handler,
rpath.append, (filename,))
if new_rpath:
s = sel_func(new_rpath)
@@ -204,12 +203,12 @@ class Select:
return None
if self.quoting_on:
- for subdir in FilenameMapping.get_quoted_dir_children(rpath):
+ for subdir in get_quoted_dir_children(rpath):
for rp in rec_func(subdir, rec_func, sel_func):
yield rp
else:
- for filename in Robust.listrp(rpath):
- new_rp = Robust.check_common_error(
+ for filename in robust.listrp(rpath):
+ new_rp = robust.check_common_error(
error_handler, rpath.append, [filename])
if new_rp:
for rp in rec_func(new_rp, rec_func, sel_func):
@@ -646,3 +645,22 @@ probably isn't what you meant.""" %
return res
+def get_quoted_dir_children(rpath):
+ """For rpath directory, return list of quoted children in dir
+
+ This used to be in FilenameMapping, but was moved because it
+ depends on the robust.listrp routine.
+
+ """
+ if not rpath.isdir(): return []
+ dir_pairs = [(FilenameMapping.unquote(filename), filename)
+ for filename in robust.listrp(rpath)]
+ dir_pairs.sort() # sort by real index, not quoted part
+ child_list = []
+ for unquoted, filename in dir_pairs:
+ childrp = rpath.append(unquoted)
+ childrp.quote_path()
+ child_list.append(childrp)
+ return child_list
+
+
diff --git a/rdiff-backup/rdiff_backup/statistics.py b/rdiff-backup/rdiff_backup/statistics.py
index 261b2f3..c1bf55b 100644
--- a/rdiff-backup/rdiff_backup/statistics.py
+++ b/rdiff-backup/rdiff_backup/statistics.py
@@ -19,9 +19,8 @@
"""Generate and process aggregated backup information"""
-from lazy import *
-import re
-
+import re, os
+import Globals, TempFile, robust, Time, rorpiter
class StatsException(Exception): pass
@@ -216,12 +215,12 @@ class StatsObj:
def write_stats_to_rp(self, rp):
"""Write statistics string to given rpath"""
- tf = TempFileManager.new(rp)
+ tf = TempFile.new(rp)
def init_thunk():
fp = tf.open("w")
fp.write(self.get_stats_string())
fp.close()
- Robust.make_tf_robustaction(init_thunk, (tf,), (rp,)).execute()
+ robust.make_tf_robustaction(init_thunk, (tf,), (rp,)).execute()
def read_stats_from_rp(self, rp):
"""Set statistics from rpath, return self for convenience"""
@@ -264,7 +263,7 @@ class StatsObj:
return s
-class StatsITRB(ITRBranch, StatsObj):
+class ITRB(rorpiter.ITRBranch, StatsObj):
"""Keep track of per directory statistics
This is subclassed by the mirroring and incrementing ITRs.
@@ -339,7 +338,6 @@ class StatsITRB(ITRBranch, StatsObj):
self.__dict__[attr] += branch.__dict__[attr]
-from log import *
-from increment import *
-from robust import *
-import Globals
+
+
+