summaryrefslogtreecommitdiff
path: root/rdiff-backup/rdiff_backup
diff options
context:
space:
mode:
authorben <ben@2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109>2002-06-16 07:12:39 +0000
committerben <ben@2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109>2002-06-16 07:12:39 +0000
commitca4ace407c938d58c7fe33cb872b0705635b39cf (patch)
treefc404794ca9ec272acaaa84fdb83433c79296596 /rdiff-backup/rdiff_backup
parent7d34f23699cc540bd1986cb3ae62d52952ede596 (diff)
downloadrdiff-backup-ca4ace407c938d58c7fe33cb872b0705635b39cf.tar.gz
Adapted everything to new exploded format
git-svn-id: http://svn.savannah.nongnu.org/svn/rdiff-backup/trunk@130 2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109
Diffstat (limited to 'rdiff-backup/rdiff_backup')
-rw-r--r--rdiff-backup/rdiff_backup/FilenameMapping.py94
-rw-r--r--rdiff-backup/rdiff_backup/Globals.py226
-rw-r--r--rdiff-backup/rdiff_backup/Hardlink.py262
-rw-r--r--rdiff-backup/rdiff_backup/Main.py487
-rwxr-xr-xrdiff-backup/rdiff_backup/Make.old38
-rw-r--r--rdiff-backup/rdiff_backup/MiscStats.py72
-rw-r--r--rdiff-backup/rdiff_backup/Rdiff.py181
-rw-r--r--rdiff-backup/rdiff_backup/SetConnections.py219
-rw-r--r--rdiff-backup/rdiff_backup/Time.py199
-rw-r--r--rdiff-backup/rdiff_backup/connection.py40
-rw-r--r--rdiff-backup/rdiff_backup/destructive_stepping.py6
-rw-r--r--rdiff-backup/rdiff_backup/highlevel.py21
-rw-r--r--rdiff-backup/rdiff_backup/increment.py15
-rw-r--r--rdiff-backup/rdiff_backup/iterfile.py2
-rw-r--r--rdiff-backup/rdiff_backup/lazy.py6
-rw-r--r--rdiff-backup/rdiff_backup/log.py6
-rw-r--r--rdiff-backup/rdiff_backup/manage.py5
-rw-r--r--rdiff-backup/rdiff_backup/restore.py9
-rw-r--r--rdiff-backup/rdiff_backup/robust.py14
-rw-r--r--rdiff-backup/rdiff_backup/rorpiter.py9
-rw-r--r--rdiff-backup/rdiff_backup/rpath.py6
-rw-r--r--rdiff-backup/rdiff_backup/selection.py6
-rw-r--r--rdiff-backup/rdiff_backup/static.py2
-rw-r--r--rdiff-backup/rdiff_backup/statistics.py76
24 files changed, 1893 insertions, 108 deletions
diff --git a/rdiff-backup/rdiff_backup/FilenameMapping.py b/rdiff-backup/rdiff_backup/FilenameMapping.py
new file mode 100644
index 0000000..104519d
--- /dev/null
+++ b/rdiff-backup/rdiff_backup/FilenameMapping.py
@@ -0,0 +1,94 @@
+import re
+from log import *
+import Globals
+
+#######################################################################
+#
+# filename_mapping - used to coordinate related filenames
+#
+# For instance, some source filenames may contain characters not
+# allowed on the mirror end. Also, if a source filename is very long
+# (say 240 characters), the extra characters added to related
+# increments may put them over the usual 255 character limit.
+#
+
+"""Contains class methods which coordinate related filenames"""
+max_filename_length = 255
+
+# If true, enable character quoting, and set characters making
+# regex-style range.
+chars_to_quote = None
+
+# These compiled regular expressions are used in quoting and unquoting
+chars_to_quote_regexp = None
+unquoting_regexp = None
+
+# Use given char to quote. Default is set in Globals.
+quoting_char = None
+
+
+def set_init_quote_vals():
+ """Set quoting value from Globals on all conns"""
+ for conn in Globals.connections:
+ conn.FilenameMapping.set_init_quote_vals_local()
+
+def set_init_quote_vals_local():
+ """Set value on local connection, initialize regexps"""
+ global chars_to_quote
+ chars_to_quote = Globals.chars_to_quote
+ if len(Globals.quoting_char) != 1:
+ Log.FatalError("Expected single character for quoting char,"
+ "got '%s' instead" % (Globals.quoting_char,))
+ quoting_char = Globals.quoting_char
+ init_quoting_regexps()
+
+def init_quoting_regexps():
+ """Compile quoting regular expressions"""
+ global chars_to_quote_regexp, unquoting_regexp
+ try:
+ chars_to_quote_regexp = \
+ re.compile("[%s%s]" % (chars_to_quote, quoting_char), re.S)
+ unquoting_regexp = re.compile("%s[0-9]{3}" % quoting_char, re.S)
+ except re.error:
+ Log.FatalError("Error '%s' when processing char quote list %s" %
+ (re.error, chars_to_quote))
+
+def quote(path):
+ """Return quoted version of given path
+
+ Any characters quoted will be replaced by the quoting char and
+ the ascii number of the character. For instance, "10:11:12"
+ would go to "10;05811;05812" if ":" were quoted and ";" were
+ the quoting character.
+
+ """
+ return chars_to_quote_regexp.sub(quote_single, path)
+
+def quote_single(match):
+ """Return replacement for a single character"""
+ return "%s%03d" % (quoting_char, ord(match.group()))
+
+def unquote(path):
+ """Return original version of quoted filename"""
+ return unquoting_regexp.sub(unquote_single, path)
+
+def unquote_single(match):
+ """Unquote a single quoted character"""
+ assert len(match.group()) == 4
+ return chr(int(match.group()[1:]))
+
+def get_quoted_dir_children(rpath):
+ """For rpath directory, return list of quoted children in dir"""
+ if not rpath.isdir(): return []
+ dir_pairs = [(unquote(filename), filename)
+ for filename in Robust.listrp(rpath)]
+ dir_pairs.sort() # sort by real index, not quoted part
+ child_list = []
+ for unquoted, filename in dir_pairs:
+ childrp = rpath.append(unquoted)
+ childrp.quote_path()
+ child_list.append(childrp)
+ return child_list
+
+
+
diff --git a/rdiff-backup/rdiff_backup/Globals.py b/rdiff-backup/rdiff_backup/Globals.py
new file mode 100644
index 0000000..ca6e8d1
--- /dev/null
+++ b/rdiff-backup/rdiff_backup/Globals.py
@@ -0,0 +1,226 @@
+import re, os
+
+# The current version of rdiff-backup
+version = "0.8.0"
+
+# If this is set, use this value in seconds as the current time
+# instead of reading it from the clock.
+current_time = None
+
+# This determines how many bytes to read at a time when copying
+blocksize = 32768
+
+# This is used by the BufferedRead class to determine how many
+# bytes to request from the underlying file per read(). Larger
+# values may save on connection overhead and latency.
+conn_bufsize = 98304
+
+# True if script is running as a server
+server = None
+
+# uid and gid of the owner of the rdiff-backup process. This can
+# vary depending on the connection.
+process_uid = os.getuid()
+process_gid = os.getgid()
+
+# If true, when copying attributes, also change target's uid/gid
+change_ownership = None
+
+# If true, change the permissions of unwriteable mirror files
+# (such as directories) so that they can be written, and then
+# change them back. This defaults to 1 just in case the process
+# is not running as root (root doesn't need to change
+# permissions).
+change_mirror_perms = (process_uid != 0)
+
+# If true, temporarily change permissions of unreadable files in
+# the source directory to make sure we can read all files.
+change_source_perms = None
+
+# If true, try to reset the atimes of the source partition.
+preserve_atime = None
+
+# This will be set as soon as the LocalConnection class loads
+local_connection = None
+
+# All connections should be added to the following list, so
+# further global changes can be propagated to the remote systems.
+# The first element should be Globals.local_connection. For a
+# server, the second is the connection to the client.
+connections = []
+
+# Each process should have a connection number unique to the
+# session. The client has connection number 0.
+connection_number = 0
+
+# Dictionary pairing connection numbers with connections. Set in
+# SetConnections for all connections.
+connection_dict = {}
+
+# True if the script is the end that reads the source directory
+# for backups. It is true for purely local sessions.
+isbackup_reader = None
+
+# Connection of the real backup reader (for which isbackup_reader
+# is true)
+backup_reader = None
+
+# True if the script is the end that writes to the increment and
+# mirror directories. True for purely local sessions.
+isbackup_writer = None
+
+# Connection of the backup writer
+backup_writer = None
+
+# True if this process is the client invoked by the user
+isclient = None
+
+# Connection of the client
+client_conn = None
+
+# This list is used by the set function below. When a new
+# connection is created with init_connection, its Globals class
+# will match this one for all the variables mentioned in this
+# list.
+changed_settings = []
+
+# rdiff-backup will try to checkpoint its state every
+# checkpoint_interval seconds. Then when resuming, at most this
+# amount of time is lost.
+checkpoint_interval = 20
+
+# The RPath of the rdiff-backup-data directory.
+rbdir = None
+
+# Indicates if a resume or a lack of resume is forced. This
+# should be None for the default. 0 means don't resume, and 1
+# means resume.
+resume = None
+
+# If there has been an aborted backup fewer than this many seconds
+# ago, attempt to resume it where it left off instead of starting
+# a new one.
+resume_window = 7200
+
+# This string is used when recognizing and creating time strings.
+# If the time_separator is ":", then W3 datetime strings like
+# 2001-12-07T04:22:01-07:00 are produced. It can be set to "_" to
+# make filenames that don't contain colons, which aren't allowed
+# under MS windows NT.
+time_separator = ":"
+
+# quoting_enabled is true if we should quote certain characters in
+# filenames on the source side (see FilenameMapping for more
+# info). chars_to_quote is a string whose characters should be
+# quoted, and quoting_char is the character to quote with.
+quoting_enabled = None
+chars_to_quote = ""
+quoting_char = ';'
+
+# If true, emit output intended to be easily readable by a
+# computer. False means output is intended for humans.
+parsable_output = None
+
+# If true, then hardlinks will be preserved to mirror and recorded
+# in the increments directory. There is also a difference here
+# between None and 0. When restoring, None or 1 means to preserve
+# hardlinks iff can find a hardlink dictionary. 0 means ignore
+# hardlink information regardless.
+preserve_hardlinks = 1
+
+# If this is false, then rdiff-backup will not compress any
+# increments. Default is to compress based on regexp below.
+compression = 1
+
+# Increments based on files whose names match this
+# case-insensitive regular expression won't be compressed (applies
+# to .snapshots and .diffs). The second below will be the
+# compiled version of the first.
+no_compression_regexp_string = "(?i).*\\.(gz|z|bz|bz2|tgz|zip|rpm|deb|" \
+ "jpg|gif|png|jp2|mp3|ogg|avi|wmv|mpeg|mpg|rm|mov)$"
+no_compression_regexp = None
+
+# If true, filelists and directory statistics will be split on
+# nulls instead of newlines.
+null_separator = None
+
+# Determines whether or not ssh will be run with the -C switch
+ssh_compression = 1
+
+# If true, print statistics after successful backup
+print_statistics = None
+
+# On the reader and writer connections, the following will be
+# replaced by the source and mirror Select objects respectively.
+select_source, select_mirror = None, None
+
+# On the backup writer connection, holds the main incrementing
+# function. Access is provided to increment error counts.
+ITR = None
+
+def get(name):
+ """Return the value of something in this module"""
+ return globals()[name]
+
+def is_not_None(name):
+ """Returns true if value is not None"""
+ return globals()[name] is not None
+
+def set(name, val):
+ """Set the value of something in this module
+
+ Use this instead of writing the values directly if the setting
+ matters to remote sides. This function updates the
+ changed_settings list, so other connections know to copy the
+ changes.
+
+ """
+ changed_settings.append(name)
+ globals()[name] = val
+
+def set_integer(name, val):
+ """Like set, but make sure val is an integer"""
+ try: intval = int(val)
+ except ValueError:
+ Log.FatalError("Variable %s must be set to an integer -\n"
+ "received %s instead." % (name, val))
+ set(name, intval)
+
+def get_dict_val(name, key):
+ """Return val from dictionary in this class"""
+ return globals()[name][key]
+
+def set_dict_val(name, key, val):
+ """Set value for dictionary in this class"""
+ globals()[name][key] = val
+
+def postset_regexp(name, re_string, flags = None):
+ """Compile re_string on all existing connections, set to name"""
+ for conn in connections:
+ conn.Globals.postset_regexp_local(name, re_string, flags)
+
+def postset_regexp_local(name, re_string, flags):
+ """Set name to compiled re_string locally"""
+ if flags: globals()[name] = re.compile(re_string, flags)
+ else: globals()[name] = re.compile(re_string)
+
+def set_select(dsrpath, tuplelist, quote_mode, *filelists):
+ """Initialize select object using tuplelist
+
+ Note that each list in filelists must each be passed as
+ separate arguments, so each is recognized as a file by the
+ connection. Otherwise we will get an error because a list
+ containing files can't be pickled.
+
+ """
+ global select_source, select_mirror
+ if dsrpath.source:
+ select_source = Select(dsrpath, quote_mode)
+ select_source.ParseArgs(tuplelist, filelists)
+ else:
+ select_mirror = Select(dsrpath, quote_mode)
+ select_mirror.ParseArgs(tuplelist, filelists)
+
+
+from rpath import * # kludge to avoid circularity - not needed in this module
+from selection import *
diff --git a/rdiff-backup/rdiff_backup/Hardlink.py b/rdiff-backup/rdiff_backup/Hardlink.py
new file mode 100644
index 0000000..9389b6f
--- /dev/null
+++ b/rdiff-backup/rdiff_backup/Hardlink.py
@@ -0,0 +1,262 @@
+from __future__ import generators
+import cPickle
+
+#######################################################################
+#
+# hardlink - code for preserving and restoring hardlinks
+#
+# If the preserve_hardlinks option is selected, linked files in the
+# source directory will be linked in the mirror directory. Linked
+# files are treated like any other with respect to incrementing, but a
+# database of all links will be recorded at each session, so linked
+# files can still be restored from the increments.
+#
+
+"""Hardlink class methods and data
+
+All these functions are meant to be executed on the destination
+side. The source side should only transmit inode information.
+
+"""
+
+# In all of these lists of indicies are the values. The keys in
+# _inode_ ones are (inode, devloc) pairs.
+_src_inode_indicies = {}
+_dest_inode_indicies = {}
+
+# The keys for these two are just indicies. They share values
+# with the earlier dictionaries.
+_src_index_indicies = {}
+_dest_index_indicies = {}
+
+# When a linked file is restored, its path is added to this dict,
+# so it can be found when later paths being restored are linked to
+# it.
+_restore_index_path = {}
+
+def get_inode_key(rorp):
+ """Return rorp's key for _inode_ dictionaries"""
+ return (rorp.getinode(), rorp.getdevloc())
+
+def get_indicies(rorp, source):
+ """Return a list of similarly linked indicies, using rorp's index"""
+ if source: dict = _src_index_indicies
+ else: dict = _dest_index_indicies
+ try: return dict[rorp.index]
+ except KeyError: return []
+
+def add_rorp(rorp, source):
+ """Process new rorp and update hard link dictionaries
+
+ First enter it into src_inode_indicies. If we have already
+ seen all the hard links, then we can delete the entry.
+ Everything must stay recorded in src_index_indicies though.
+
+ """
+ if not rorp.isreg() or rorp.getnumlinks() < 2: return
+
+ if source:
+ inode_dict, index_dict = _src_inode_indicies, _src_index_indicies
+ else: inode_dict, index_dict = _dest_inode_indicies, _dest_index_indicies
+
+ rp_inode_key = get_inode_key(rorp)
+ if inode_dict.has_key(rp_inode_key):
+ index_list = inode_dict[rp_inode_key]
+ index_list.append(rorp.index)
+ if len(index_list) == rorp.getnumlinks():
+ del inode_dict[rp_inode_key]
+ else: # make new entry in both src dicts
+ index_list = [rorp.index]
+ inode_dict[rp_inode_key] = index_list
+ index_dict[rorp.index] = index_list
+
+def add_rorp_iter(iter, source):
+ """Return new rorp iterator like iter that add_rorp's first"""
+ for rorp in iter:
+ add_rorp(rorp, source)
+ yield rorp
+
+def rorp_eq(src_rorp, dest_rorp):
+ """Compare hardlinked for equality
+
+ Two files may otherwise seem equal but be hardlinked in
+ different ways. This function considers them equal enough if
+ they have been hardlinked correctly to the previously seen
+ indicies.
+
+ """
+ assert src_rorp.index == dest_rorp.index
+ if (not src_rorp.isreg() or not dest_rorp.isreg() or
+ src_rorp.getnumlinks() == dest_rorp.getnumlinks() == 1):
+ return 1 # Hard links don't apply
+
+ src_index_list = get_indicies(src_rorp, 1)
+ dest_index_list = get_indicies(dest_rorp, None)
+
+ # If a list only has one element, then it is only hardlinked
+ # to itself so far, so that is not a genuine difference yet.
+ if not src_index_list or len(src_index_list) == 1:
+ return not dest_index_list or len(dest_index_list) == 1
+ if not dest_index_list or len(dest_index_list) == 1: return None
+
+ # Both index lists exist and are non-empty
+ return src_index_list == dest_index_list # they are always sorted
+
+def islinked(rorp):
+ """True if rorp's index is already linked to something on src side"""
+ return len(get_indicies(rorp, 1)) >= 2
+
+def restore_link(index, rpath):
+ """Restores a linked file by linking it
+
+ When restoring, all the hardlink data is already present, and
+ we can only link to something already written. In either
+ case, add to the _restore_index_path dict, so we know later
+ that the file is available for hard
+ linking.
+
+ Returns true if succeeded in creating rpath, false if must
+ restore rpath normally.
+
+ """
+ if index not in _src_index_indicies: return None
+ for linked_index in _src_index_indicies[index]:
+ if linked_index in _restore_index_path:
+ srcpath = _restore_index_path[linked_index]
+ Log("Restoring %s by hard linking to %s" %
+ (rpath.path, srcpath), 6)
+ rpath.hardlink(srcpath)
+ return 1
+ _restore_index_path[index] = rpath.path
+ return None
+
+def link_rp(src_rorp, dest_rpath, dest_root = None):
+ """Make dest_rpath into a link analogous to that of src_rorp"""
+ if not dest_root: dest_root = dest_rpath # use base of dest_rpath
+ dest_link_rpath = RPath(dest_root.conn, dest_root.base,
+ get_indicies(src_rorp, 1)[0])
+ dest_rpath.hardlink(dest_link_rpath.path)
+
+def write_linkdict(rpath, dict, compress = None):
+ """Write link data to the rbdata dir
+
+ It is stored as the a big pickled dictionary dated to match
+ the current hardlinks.
+
+ """
+ assert (Globals.isbackup_writer and
+ rpath.conn is Globals.local_connection)
+ tf = TempFileManager.new(rpath)
+ def init():
+ fp = tf.open("wb", compress)
+ cPickle.dump(dict, fp)
+ assert not fp.close()
+ tf.setdata()
+ Robust.make_tf_robustaction(init, (tf,), (rpath,)).execute()
+
+def get_linkrp(data_rpath, time, prefix):
+ """Return RPath of linkdata, or None if cannot find"""
+ for rp in map(data_rpath.append, data_rpath.listdir()):
+ if (rp.isincfile() and rp.getincbase_str() == prefix and
+ (rp.getinctype() == 'snapshot' or rp.getinctype() == 'data')
+ and Time.stringtotime(rp.getinctime()) == time):
+ return rp
+ return None
+
+def get_linkdata(data_rpath, time, prefix = 'hardlink_data'):
+ """Return index dictionary written by write_linkdata at time"""
+ rp = get_linkrp(data_rpath, time, prefix)
+ if not rp: return None
+ fp = rp.open("rb", rp.isinccompressed())
+ index_dict = cPickle.load(fp)
+ assert not fp.close()
+ return index_dict
+
+def final_writedata():
+ """Write final checkpoint data to rbdir after successful backup"""
+ global final_inc
+ if _src_index_indicies:
+ Log("Writing hard link data", 6)
+ if Globals.compression:
+ final_inc = Globals.rbdir.append("hardlink_data.%s.data.gz" %
+ Time.curtimestr)
+ else: final_inc = Globals.rbdir.append("hardlink_data.%s.data" %
+ Time.curtimestr)
+ write_linkdict(final_inc, _src_index_indicies, Globals.compression)
+ else: # no hardlinks, so writing unnecessary
+ final_inc = None
+
+def retrieve_final(time):
+ """Set source index dictionary from hardlink_data file if avail"""
+ global _src_index_indicies
+ hd = get_linkdata(Globals.rbdir, time)
+ if hd is None: return None
+ _src_index_indicies = hd
+ return 1
+
+def final_checkpoint(data_rpath):
+ """Write contents of the four dictionaries to the data dir
+
+ If rdiff-backup receives a fatal error, it may still be able
+ to save the contents of the four hard link dictionaries.
+ Because these dictionaries may be big, they are not saved
+ after every 20 seconds or whatever, but just at the end.
+
+ """
+ Log("Writing intermediate hard link data to disk", 2)
+ src_inode_rp = data_rpath.append("hardlink_source_inode_checkpoint."
+ "%s.data" % Time.curtimestr)
+ src_index_rp = data_rpath.append("hardlink_source_index_checkpoint."
+ "%s.data" % Time.curtimestr)
+ dest_inode_rp = data_rpath.append("hardlink_dest_inode_checkpoint."
+ "%s.data" % Time.curtimestr)
+ dest_index_rp = data_rpath.append("hardlink_dest_index_checkpoint."
+ "%s.data" % Time.curtimestr)
+ for (rp, dict) in ((src_inode_rp, _src_inode_indicies),
+ (src_index_rp, _src_index_indicies),
+ (dest_inode_rp, _dest_inode_indicies),
+ (dest_index_rp, _dest_index_indicies)):
+ write_linkdict(rp, dict)
+
+def retrieve_checkpoint(data_rpath, time):
+ """Retrieve hardlink data from final checkpoint
+
+ Return true if the retrieval worked, false otherwise.
+
+ """
+ global _src_inode_indicies, _src_index_indicies
+ global _dest_inode_indicies, _dest_index_indicies
+ try:
+ src_inode = get_linkdata(data_rpath, time,
+ "hardlink_source_inode_checkpoint")
+ src_index = get_linkdata(data_rpath, time,
+ "hardlink_source_index_checkpoint")
+ dest_inode = get_linkdata(data_rpath, time,
+ "hardlink_dest_inode_checkpoint")
+ dest_index = get_linkdata(data_rpath, time,
+ "hardlink_dest_index_checkpoint")
+ except cPickle.UnpicklingError:
+ Log("Unpickling Error", 2)
+ return None
+ if (src_inode is None or src_index is None or
+ dest_inode is None or dest_index is None): return None
+ _src_inode_indicies, _src_index_indicies = src_inode, src_index
+ _dest_inode_indicies, _dest_index_indicies = dest_inode, dest_index
+ return 1
+
+def remove_all_checkpoints():
+ """Remove all hardlink checkpoint information from directory"""
+ prefix_list = ["hardlink_source_inode_checkpoint",
+ "hardlink_source_index_checkpoint",
+ "hardlink_dest_inode_checkpoint",
+ "hardlink_dest_index_checkpoint"]
+ for rp in map(Globals.rbdir.append, Globals.rbdir.listdir()):
+ if (rp.isincfile() and rp.getincbase_str() in prefix_list and
+ (rp.getinctype() == 'snapshot' or rp.getinctype() == 'data')):
+ rp.delete()
+
+
+from log import *
+from robust import *
+from rpath import *
+import Globals, Time
diff --git a/rdiff-backup/rdiff_backup/Main.py b/rdiff-backup/rdiff_backup/Main.py
new file mode 100644
index 0000000..94ca04a
--- /dev/null
+++ b/rdiff-backup/rdiff_backup/Main.py
@@ -0,0 +1,487 @@
+import getopt, sys, re
+from log import *
+from lazy import *
+from connection import *
+from rpath import *
+from destructive_stepping import *
+from robust import *
+from restore import *
+from highlevel import *
+from manage import *
+import Globals, Time, SetConnections
+
+#######################################################################
+#
+# main - Start here: Read arguments, set global settings, etc.
+#
+action = None
+remote_cmd, remote_schema = None, None
+force = None
+select_opts, select_mirror_opts = [], []
+select_files = []
+
+def parse_cmdlineoptions(arglist):
+ """Parse argument list and set global preferences"""
+ global args, action, force, restore_timestr, remote_cmd, remote_schema
+ global remove_older_than_string
+ def sel_fl(filename):
+ """Helper function for including/excluding filelists below"""
+ try: return open(filename, "r")
+ except IOError: Log.FatalError("Error opening file %s" % filename)
+
+ try: optlist, args = getopt.getopt(arglist, "blmr:sv:V",
+ ["backup-mode", "calculate-average",
+ "change-source-perms", "chars-to-quote=",
+ "checkpoint-interval=", "current-time=", "exclude=",
+ "exclude-device-files", "exclude-filelist=",
+ "exclude-filelist-stdin", "exclude-mirror=",
+ "exclude-regexp=", "force", "include=",
+ "include-filelist=", "include-filelist-stdin",
+ "include-regexp=", "list-increments", "mirror-only",
+ "no-compression", "no-compression-regexp=",
+ "no-hard-links", "no-resume", "null-separator",
+ "parsable-output", "print-statistics", "quoting-char=",
+ "remote-cmd=", "remote-schema=", "remove-older-than=",
+ "restore-as-of=", "resume", "resume-window=", "server",
+ "ssh-no-compression", "terminal-verbosity=",
+ "test-server", "verbosity", "version", "windows-mode",
+ "windows-time-format"])
+ except getopt.error, e:
+ commandline_error("Bad commandline options: %s" % str(e))
+
+ for opt, arg in optlist:
+ if opt == "-b" or opt == "--backup-mode": action = "backup"
+ elif opt == "--calculate-average": action = "calculate-average"
+ elif opt == "--change-source-perms":
+ Globals.set('change_source_perms', 1)
+ elif opt == "--chars-to-quote":
+ Globals.set('chars_to_quote', arg)
+ Globals.set('quoting_enabled', 1)
+ elif opt == "--checkpoint-interval":
+ Globals.set_integer('checkpoint_interval', arg)
+ elif opt == "--current-time":
+ Globals.set_integer('current_time', arg)
+ elif opt == "--exclude": select_opts.append((opt, arg))
+ elif opt == "--exclude-device-files": select_opts.append((opt, arg))
+ elif opt == "--exclude-filelist":
+ select_opts.append((opt, arg))
+ select_files.append(sel_fl(arg))
+ elif opt == "--exclude-filelist-stdin":
+ select_opts.append(("--exclude-filelist", "standard input"))
+ select_files.append(sys.stdin)
+ elif opt == "--exclude-mirror":
+ select_mirror_opts.append(("--exclude", arg))
+ elif opt == "--exclude-regexp": select_opts.append((opt, arg))
+ elif opt == "--force": force = 1
+ elif opt == "--include": select_opts.append((opt, arg))
+ elif opt == "--include-filelist":
+ select_opts.append((opt, arg))
+ select_files.append(sel_fl(arg))
+ elif opt == "--include-filelist-stdin":
+ select_opts.append(("--include-filelist", "standard input"))
+ select_files.append(sys.stdin)
+ elif opt == "--include-regexp": select_opts.append((opt, arg))
+ elif opt == "-l" or opt == "--list-increments":
+ action = "list-increments"
+ elif opt == "-m" or opt == "--mirror-only": action = "mirror"
+ elif opt == "--no-compression": Globals.set("compression", None)
+ elif opt == "--no-compression-regexp":
+ Globals.set("no_compression_regexp_string", arg)
+ elif opt == "--no-hard-links": Globals.set('preserve_hardlinks', 0)
+ elif opt == '--no-resume': Globals.resume = 0
+ elif opt == "--null-separator": Globals.set("null_separator", 1)
+ elif opt == "-r" or opt == "--restore-as-of":
+ restore_timestr, action = arg, "restore-as-of"
+ elif opt == "--parsable-output": Globals.set('parsable_output', 1)
+ elif opt == "--print-statistics":
+ Globals.set('print_statistics', 1)
+ elif opt == "--quoting-char":
+ Globals.set('quoting_char', arg)
+ Globals.set('quoting_enabled', 1)
+ elif opt == "--remote-cmd": remote_cmd = arg
+ elif opt == "--remote-schema": remote_schema = arg
+ elif opt == "--remove-older-than":
+ remove_older_than_string = arg
+ action = "remove-older-than"
+ elif opt == '--resume': Globals.resume = 1
+ elif opt == '--resume-window':
+ Globals.set_integer('resume_window', arg)
+ elif opt == "-s" or opt == "--server": action = "server"
+ elif opt == "--ssh-no-compression":
+ Globals.set('ssh_compression', None)
+ elif opt == "--terminal-verbosity": Log.setterm_verbosity(arg)
+ elif opt == "--test-server": action = "test-server"
+ elif opt == "-V" or opt == "--version":
+ print "rdiff-backup " + Globals.version
+ sys.exit(0)
+ elif opt == "-v" or opt == "--verbosity": Log.setverbosity(arg)
+ elif opt == "--windows-mode":
+ Globals.set('time_separator', "_")
+ Globals.set('chars_to_quote', ":")
+ Globals.set('quoting_enabled', 1)
+ elif opt == '--windows-time-format':
+ Globals.set('time_separator', "_")
+ else: Log.FatalError("Unknown option %s" % opt)
+
+def set_action():
+ """Check arguments and try to set action"""
+ global action
+ l = len(args)
+ if not action:
+ if l == 0: commandline_error("No arguments given")
+ elif l == 1: action = "restore"
+ elif l == 2:
+ if RPath(Globals.local_connection, args[0]).isincfile():
+ action = "restore"
+ else: action = "backup"
+ else: commandline_error("Too many arguments given")
+
+ if l == 0 and action != "server" and action != "test-server":
+ commandline_error("No arguments given")
+ if l > 0 and action == "server":
+ commandline_error("Too many arguments given")
+ if l < 2 and (action == "backup" or action == "mirror" or
+ action == "restore-as-of"):
+ commandline_error("Two arguments are required (source, destination).")
+ if l == 2 and (action == "list-increments" or
+ action == "remove-older-than"):
+ commandline_error("Only use one argument, "
+ "the root of the backup directory")
+ if l > 2 and action != "calculate-average":
+ commandline_error("Too many arguments given")
+
+def commandline_error(message):
+ sys.stderr.write("Error: %s\n" % message)
+ sys.stderr.write("See the rdiff-backup manual page for instructions\n")
+ sys.exit(1)
+
+def misc_setup(rps):
+ """Set default change ownership flag, umask, relay regexps"""
+ if ((len(rps) == 2 and rps[1].conn.os.getuid() == 0) or
+ (len(rps) < 2 and os.getuid() == 0)):
+ # Allow change_ownership if destination connection is root
+ for conn in Globals.connections:
+ conn.Globals.set('change_ownership', 1)
+ for rp in rps: rp.setdata() # Update with userinfo
+
+ os.umask(077)
+ Time.setcurtime(Globals.current_time)
+ FilenameMapping.set_init_quote_vals()
+ Globals.set("isclient", 1)
+ SetConnections.UpdateGlobal("client_conn", Globals.local_connection)
+
+ # This is because I originally didn't think compiled regexps
+ # could be pickled, and so must be compiled on remote side.
+ Globals.postset_regexp('no_compression_regexp',
+ Globals.no_compression_regexp_string)
+
+ for conn in Globals.connections: Robust.install_signal_handlers()
+
+def take_action(rps):
+ """Do whatever action says"""
+ if action == "server": PipeConnection(sys.stdin, sys.stdout).Server()
+ elif action == "backup": Backup(rps[0], rps[1])
+ elif action == "restore": restore(*rps)
+ elif action == "restore-as-of": RestoreAsOf(rps[0], rps[1])
+ elif action == "mirror": Mirror(rps[0], rps[1])
+ elif action == "test-server": SetConnections.TestConnections()
+ elif action == "list-increments": ListIncrements(rps[0])
+ elif action == "remove-older-than": RemoveOlderThan(rps[0])
+ elif action == "calculate-average": CalculateAverage(rps)
+ else: raise AssertionError("Unknown action " + action)
+
+def cleanup():
+ """Do any last minute cleaning before exiting"""
+ Log("Cleaning up", 6)
+ Log.close_logfile()
+ if not Globals.server: SetConnections.CloseConnections()
+
+def Main(arglist):
+ """Start everything up!"""
+ parse_cmdlineoptions(arglist)
+ set_action()
+ rps = SetConnections.InitRPs(args, remote_schema, remote_cmd)
+ misc_setup(rps)
+ take_action(rps)
+ cleanup()
+
+
+def Mirror(src_rp, dest_rp):
+ """Turn dest_path into a copy of src_path"""
+ Log("Mirroring %s to %s" % (src_rp.path, dest_rp.path), 5)
+ mirror_check_paths(src_rp, dest_rp)
+ # Since no "rdiff-backup-data" dir, use root of destination.
+ SetConnections.UpdateGlobal('rbdir', dest_rp)
+ SetConnections.BackupInitConnections(src_rp.conn, dest_rp.conn)
+ HighLevel.Mirror(src_rp, dest_rp)
+
+def mirror_check_paths(rpin, rpout):
+ """Check paths and return rpin, rpout"""
+ if not rpin.lstat():
+ Log.FatalError("Source directory %s does not exist" % rpin.path)
+ if rpout.lstat() and not force: Log.FatalError(
+"""Destination %s exists so continuing could mess it up. Run
+rdiff-backup with the --force option if you want to mirror anyway.""" %
+ rpout.path)
+
+
+def Backup(rpin, rpout):
+ """Backup, possibly incrementally, src_path to dest_path."""
+ SetConnections.BackupInitConnections(rpin.conn, rpout.conn)
+ backup_init_select(rpin, rpout)
+ backup_init_dirs(rpin, rpout)
+ RSI = Globals.backup_writer.Resume.ResumeCheck()
+ SaveState.init_filenames()
+ if prevtime:
+ Time.setprevtime(prevtime)
+ HighLevel.Mirror_and_increment(rpin, rpout, incdir, RSI)
+ else: HighLevel.Mirror(rpin, rpout, incdir, RSI)
+ backup_touch_curmirror(rpin, rpout)
+
+def backup_init_select(rpin, rpout):
+ """Create Select objects on source and dest connections"""
+ rpin.conn.Globals.set_select(DSRPath(1, rpin), select_opts,
+ None, *select_files)
+ rpout.conn.Globals.set_select(DSRPath(None, rpout), select_mirror_opts, 1)
+
+def backup_init_dirs(rpin, rpout):
+ """Make sure rpin and rpout are valid, init data dir and logging"""
+ global datadir, incdir, prevtime
+ if rpout.lstat() and not rpout.isdir():
+ if not force: Log.FatalError("Destination %s exists and is not a "
+ "directory" % rpout.path)
+ else:
+ Log("Deleting %s" % rpout.path, 3)
+ rpout.delete()
+
+ if not rpin.lstat():
+ Log.FatalError("Source directory %s does not exist" % rpin.path)
+ elif not rpin.isdir():
+ Log.FatalError("Source %s is not a directory" % rpin.path)
+
+ datadir = rpout.append("rdiff-backup-data")
+ SetConnections.UpdateGlobal('rbdir', datadir)
+ incdir = RPath(rpout.conn, os.path.join(datadir.path, "increments"))
+ prevtime = backup_get_mirrortime()
+
+ if rpout.lstat():
+ if rpout.isdir() and not rpout.listdir(): # rpout is empty dir
+ rpout.chmod(0700) # just make sure permissions aren't too lax
+ elif not datadir.lstat() and not force: Log.FatalError(
+"""Destination directory %s exists, but does not look like a
+rdiff-backup directory. Running rdiff-backup like this could mess up
+what is currently in it. If you want to overwrite it, run
+rdiff-backup with the --force option.""" % rpout.path)
+
+ if not rpout.lstat():
+ try: rpout.mkdir()
+ except os.error:
+ Log.FatalError("Unable to create directory %s" % rpout.path)
+ if not datadir.lstat(): datadir.mkdir()
+ if Log.verbosity > 0:
+ Log.open_logfile(datadir.append("backup.log"))
+ backup_warn_if_infinite_regress(rpin, rpout)
+
+def backup_warn_if_infinite_regress(rpin, rpout):
+ """Warn user if destination area contained in source area"""
+ if rpout.conn is rpin.conn: # it's meaningful to compare paths
+ if ((len(rpout.path) > len(rpin.path)+1 and
+ rpout.path[:len(rpin.path)] == rpin.path and
+ rpout.path[len(rpin.path)] == '/') or
+ (rpin.path == "." and rpout.path[0] != '/' and
+ rpout.path[:2] != '..')):
+ # Just a few heuristics, we don't have to get every case
+ if Globals.backup_reader.Globals.select_source.Select(rpout): Log(
+"""Warning: The destination directory '%s' may be contained in the
+source directory '%s'. This could cause an infinite regress. You
+may need to use the --exclude option.""" % (rpout.path, rpin.path), 2)
+
+def backup_get_mirrorrps():
+ """Return list of current_mirror rps"""
+ if not datadir.isdir(): return []
+ mirrorrps = [datadir.append(fn) for fn in datadir.listdir()
+ if fn.startswith("current_mirror.")]
+ return filter(lambda rp: rp.isincfile(), mirrorrps)
+
+def backup_get_mirrortime():
+ """Return time in seconds of previous mirror, or None if cannot"""
+ mirrorrps = backup_get_mirrorrps()
+ if not mirrorrps: return None
+ if len(mirrorrps) > 1:
+ Log(
+"""Warning: duplicate current_mirror files found. Perhaps something
+went wrong during your last backup? Using """ + mirrorrps[-1].path, 2)
+
+ timestr = mirrorrps[-1].getinctime()
+ return Time.stringtotime(timestr)
+
+def backup_touch_curmirror(rpin, rpout):
+ """Make a file like current_mirror.time.data to record time
+
+ Also updates rpout so mod times don't get messed up.
+
+ """
+ map(RPath.delete, backup_get_mirrorrps())
+ mirrorrp = datadir.append("current_mirror.%s.%s" % (Time.curtimestr,
+ "data"))
+ Log("Touching mirror marker %s" % mirrorrp.path, 6)
+ mirrorrp.touch()
+ RPath.copy_attribs(rpin, rpout)
+
+
+def restore(src_rp, dest_rp = None):
+ """Main restoring function
+
+ Here src_rp should be an increment file, and if dest_rp is
+ missing it defaults to the base of the increment.
+
+ """
+ rpin, rpout = restore_check_paths(src_rp, dest_rp)
+ time = Time.stringtotime(rpin.getinctime())
+ restore_common(rpin, rpout, time)
+
+def RestoreAsOf(rpin, target):
+ """Secondary syntax for restore operation
+
+ rpin - RPath of mirror file to restore (not nec. with correct index)
+ target - RPath of place to put restored file
+
+ """
+ restore_check_paths(rpin, target, 1)
+ try: time = Time.genstrtotime(restore_timestr)
+ except Time.TimeException, exc: Log.FatalError(str(exc))
+ restore_common(rpin, target, time)
+
+def restore_common(rpin, target, time):
+ """Restore operation common to Restore and RestoreAsOf"""
+ Log("Starting Restore", 5)
+ mirror_root, index = restore_get_root(rpin)
+ mirror = mirror_root.new_index(index)
+ inc_rpath = datadir.append_path('increments', index)
+ restore_init_select(mirror_root, target)
+ Log.open_logfile(datadir.append("restore.log"))
+ Restore.Restore(inc_rpath, mirror, target, time)
+
+def restore_check_paths(rpin, rpout, restoreasof = None):
+ """Check paths and return pair of corresponding rps"""
+ if not restoreasof:
+ if not rpin.lstat():
+ Log.FatalError("Source file %s does not exist" % rpin.path)
+ elif not rpin.isincfile():
+ Log.FatalError("""File %s does not look like an increment file.
+
+Try restoring from an increment file (the filenames look like
+"foobar.2001-09-01T04:49:04-07:00.diff").""" % rpin.path)
+
+ if not rpout: rpout = RPath(Globals.local_connection,
+ rpin.getincbase_str())
+ if rpout.lstat():
+ Log.FatalError("Restore target %s already exists, "
+ "and will not be overwritten." % rpout.path)
+ return rpin, rpout
+
+def restore_init_select(rpin, rpout):
+ """Initialize Select
+
+ Unlike the backup selections, here they are on the local
+ connection, because the backup operation is pipelined in a way
+ the restore operation isn't.
+
+ """
+ Globals.set_select(DSRPath(1, rpin), select_mirror_opts, None)
+ Globals.set_select(DSRPath(None, rpout), select_opts, None, *select_files)
+
+def restore_get_root(rpin):
+ """Return (mirror root, index) and set the data dir
+
+ The idea here is to keep backing up on the path until we find
+ a directory that contains "rdiff-backup-data". That is the
+ mirror root. If the path from there starts
+ "rdiff-backup-data/increments*", then the index is the
+ remainder minus that. Otherwise the index is just the path
+ minus the root.
+
+ All this could fail if the increment file is pointed to in a
+ funny way, using symlinks or somesuch.
+
+ """
+ global datadir
+ if rpin.isincfile(): relpath = rpin.getincbase().path
+ else: relpath = rpin.path
+ pathcomps = os.path.join(rpin.conn.os.getcwd(), relpath).split("/")
+ assert len(pathcomps) >= 2 # path should be relative to /
+
+ i = len(pathcomps)
+ while i >= 2:
+ parent_dir = RPath(rpin.conn, "/".join(pathcomps[:i]))
+ if (parent_dir.isdir() and
+ "rdiff-backup-data" in parent_dir.listdir()): break
+ i = i-1
+ else: Log.FatalError("Unable to find rdiff-backup-data directory")
+
+ rootrp = parent_dir
+ Log("Using mirror root directory %s" % rootrp.path, 6)
+
+ datadir = rootrp.append_path("rdiff-backup-data")
+ SetConnections.UpdateGlobal('rbdir', datadir)
+ if not datadir.isdir():
+ Log.FatalError("Unable to read rdiff-backup-data directory %s" %
+ datadir.path)
+
+ from_datadir = tuple(pathcomps[i:])
+ if not from_datadir or from_datadir[0] != "rdiff-backup-data":
+ return (rootrp, from_datadir) # in mirror, not increments
+ assert from_datadir[1] == "increments"
+ return (rootrp, from_datadir[2:])
+
+
+def ListIncrements(rp):
+ """Print out a summary of the increments and their times"""
+ mirror_root, index = restore_get_root(rp)
+ Globals.rbdir = datadir = \
+ mirror_root.append_path("rdiff-backup-data")
+ mirrorrp = mirror_root.new_index(index)
+ inc_rpath = datadir.append_path('increments', index)
+ incs = Restore.get_inclist(inc_rpath)
+ mirror_time = Restore.get_mirror_time()
+ if Globals.parsable_output:
+ print Manage.describe_incs_parsable(incs, mirror_time, mirrorrp)
+ else: print Manage.describe_incs_human(incs, mirror_time, mirrorrp)
+
+
+def CalculateAverage(rps):
+ """Print out the average of the given statistics files"""
+ statobjs = map(lambda rp: StatsObj().read_stats_from_rp(rp), rps)
+ average_stats = StatsObj().set_to_average(statobjs)
+ print average_stats.get_stats_logstring(
+ "Average of %d stat files" % len(rps))
+
+
+def RemoveOlderThan(rootrp):
+ """Remove all increment files older than a certain time"""
+ datadir = rootrp.append("rdiff-backup-data")
+ if not datadir.lstat() or not datadir.isdir():
+ Log.FatalError("Unable to open rdiff-backup-data dir %s" %
+ (datadir.path,))
+
+ try: time = Time.genstrtotime(remove_older_than_string)
+ except TimeError, exc: Log.FatalError(str(exc))
+ timep = Time.timetopretty(time)
+ Log("Deleting increment(s) before %s" % timep, 4)
+
+ itimes = [Time.stringtopretty(inc.getinctime())
+ for inc in Restore.get_inclist(datadir.append("increments"))
+ if Time.stringtotime(inc.getinctime()) < time]
+
+ if not itimes:
+ Log.FatalError("No increments older than %s found" % timep)
+ inc_pretty_time = "\n".join(itimes)
+ if len(itimes) > 1 and not force:
+ Log.FatalError("Found %d relevant increments, dated:\n%s"
+ "\nIf you want to delete multiple increments in this way, "
+ "use the --force." % (len(itimes), inc_pretty_time))
+
+ Log("Deleting increment%sat times:\n%s" %
+ (len(itimes) == 1 and " " or "s ", inc_pretty_time), 3)
+ Manage.delete_earlier_than(datadir, time)
+
diff --git a/rdiff-backup/rdiff_backup/Make.old b/rdiff-backup/rdiff_backup/Make.old
new file mode 100755
index 0000000..2b79ffe
--- /dev/null
+++ b/rdiff-backup/rdiff_backup/Make.old
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+"""Read component files of rdiff-backup, and glue them together after
+removing unnecessary bits."""
+
+import os
+
+def mystrip(filename):
+ """Open filename, read input, strip appropriately, and return contents"""
+ fp = open(filename, "r")
+ lines = fp.readlines()
+ fp.close()
+
+ i = 0
+ while(lines[i][:60] !=
+ "############################################################"):
+ i = i+1
+
+ return "".join(lines[i:]).strip() + "\n\n\n"
+
+
+
+files = ["globals.py", "static.py", "lazy.py", "log.py", "ttime.py",
+ "iterfile.py", "rdiff.py", "connection.py", "rpath.py",
+ "hardlink.py", "robust.py", "rorpiter.py",
+ "destructive_stepping.py", "selection.py",
+ "filename_mapping.py", "statistics.py", "increment.py",
+ "restore.py", "manage.py", "highlevel.py",
+ "setconnections.py", "main.py"]
+
+os.system("cp header.py rdiff-backup")
+
+outfp = open("rdiff-backup", "a")
+for file in files:
+ outfp.write(mystrip(file))
+outfp.close()
+
+os.system("chmod 755 rdiff-backup")
diff --git a/rdiff-backup/rdiff_backup/MiscStats.py b/rdiff-backup/rdiff_backup/MiscStats.py
new file mode 100644
index 0000000..cd62dd6
--- /dev/null
+++ b/rdiff-backup/rdiff_backup/MiscStats.py
@@ -0,0 +1,72 @@
+from statistics import *
+
+"""Misc statistics methods, pertaining to dir and session stat files"""
+# This is the RPath of the directory statistics file, and the
+# associated open file. It will hold a line of statistics for
+# each directory that is backed up.
+_dir_stats_rp = None
+_dir_stats_fp = None
+
+# This goes at the beginning of the directory statistics file and
+# explains the format.
+_dir_stats_header = """# rdiff-backup directory statistics file
+#
+# Each line is in the following format:
+# RelativeDirName %s
+""" % " ".join(StatsObj.stat_file_attrs)
+
+def open_dir_stats_file():
+ """Open directory statistics file, write header"""
+ global _dir_stats_fp, _dir_stats_rp
+ assert not _dir_stats_fp, "Directory file already open"
+
+ if Globals.compression: suffix = "data.gz"
+ else: suffix = "data"
+ _dir_stats_rp = Inc.get_inc(Globals.rbdir.append("directory_statistics"),
+ Time.curtime, suffix)
+
+ if _dir_stats_rp.lstat():
+ Log("Warning, statistics file %s already exists, appending" %
+ _dir_stats_rp.path, 2)
+ _dir_stats_fp = _dir_stats_rp.open("ab", Globals.compression)
+ else: _dir_stats_fp = _dir_stats_rp.open("wb", Globals.compression)
+ _dir_stats_fp.write(_dir_stats_header)
+
+def write_dir_stats_line(statobj, index):
+ """Write info from statobj about rpath to statistics file"""
+ if Globals.null_separator:
+ _dir_stats_fp.write(statobj.get_stats_line(index, None) + "\0")
+ else: _dir_stats_fp.write(statobj.get_stats_line(index) + "\n")
+
+def close_dir_stats_file():
+ """Close directory statistics file if its open"""
+ global _dir_stats_fp
+ if _dir_stats_fp:
+ _dir_stats_fp.close()
+ _dir_stats_fp = None
+
+def write_session_statistics(statobj):
+ """Write session statistics into file, log"""
+ stat_inc = Inc.get_inc(Globals.rbdir.append("session_statistics"),
+ Time.curtime, "data")
+ statobj.StartTime = Time.curtime
+ statobj.EndTime = time.time()
+
+ # include hardlink data and dir stats in size of increments
+ if Globals.preserve_hardlinks and Hardlink.final_inc:
+ # include hardlink data in size of increments
+ statobj.IncrementFiles += 1
+ statobj.IncrementFileSize += Hardlink.final_inc.getsize()
+ if _dir_stats_rp and _dir_stats_rp.lstat():
+ statobj.IncrementFiles += 1
+ statobj.IncrementFileSize += _dir_stats_rp.getsize()
+
+ statobj.write_stats_to_rp(stat_inc)
+ if Globals.print_statistics:
+ message = statobj.get_stats_logstring("Session statistics")
+ Log.log_to_file(message)
+ Globals.client_conn.sys.stdout.write(message)
+
+
+from increment import *
+import Hardlink
diff --git a/rdiff-backup/rdiff_backup/Rdiff.py b/rdiff-backup/rdiff_backup/Rdiff.py
new file mode 100644
index 0000000..c9895cb
--- /dev/null
+++ b/rdiff-backup/rdiff_backup/Rdiff.py
@@ -0,0 +1,181 @@
+import os, popen2
+
+#######################################################################
+#
+# rdiff - Invoke rdiff utility to make signatures, deltas, or patch
+#
+# All these operations should be done in a relatively safe manner
+# using RobustAction and the like.
+
+class RdiffException(Exception): pass
+
+def get_signature(rp):
+ """Take signature of rpin file and return in file object"""
+ Log("Getting signature of %s" % rp.path, 7)
+ return rp.conn.Rdiff.Popen(['rdiff', 'signature', rp.path])
+
+def get_delta_sigfileobj(sig_fileobj, rp_new):
+ """Like get_delta but signature is in a file object"""
+ sig_tf = TempFileManager.new(rp_new, None)
+ sig_tf.write_from_fileobj(sig_fileobj)
+ rdiff_popen_obj = get_delta_sigrp(sig_tf, rp_new)
+ rdiff_popen_obj.set_thunk(sig_tf.delete)
+ return rdiff_popen_obj
+
+def get_delta_sigrp(rp_signature, rp_new):
+ """Take signature rp and new rp, return delta file object"""
+ assert rp_signature.conn is rp_new.conn
+ Log("Getting delta of %s with signature %s" %
+ (rp_new.path, rp_signature.path), 7)
+ return rp_new.conn.Rdiff.Popen(['rdiff', 'delta',
+ rp_signature.path, rp_new.path])
+
+def write_delta_action(basis, new, delta, compress = None):
+ """Return action writing delta which brings basis to new
+
+ If compress is true, the output of rdiff will be gzipped
+ before written to delta.
+
+ """
+ sig_tf = TempFileManager.new(new, None)
+ delta_tf = TempFileManager.new(delta)
+ def init(): write_delta(basis, new, delta_tf, compress, sig_tf)
+ return Robust.make_tf_robustaction(init, (sig_tf, delta_tf),
+ (None, delta))
+
+def write_delta(basis, new, delta, compress = None, sig_tf = None):
+ """Write rdiff delta which brings basis to new"""
+ Log("Writing delta %s from %s -> %s" %
+ (basis.path, new.path, delta.path), 7)
+ if not sig_tf: sig_tf = TempFileManager.new(new, None)
+ sig_tf.write_from_fileobj(get_signature(basis))
+ delta.write_from_fileobj(get_delta_sigrp(sig_tf, new), compress)
+ sig_tf.delete()
+
+def patch_action(rp_basis, rp_delta, rp_out = None,
+ out_tf = None, delta_compressed = None):
+ """Return RobustAction which patches rp_basis with rp_delta
+
+ If rp_out is None, put output in rp_basis. Will use TempFile
+ out_tf it is specified. If delta_compressed is true, the
+ delta file will be decompressed before processing with rdiff.
+
+ """
+ if not rp_out: rp_out = rp_basis
+ else: assert rp_out.conn is rp_basis.conn
+ if (delta_compressed or
+ not (isinstance(rp_delta, RPath) and isinstance(rp_basis, RPath)
+ and rp_basis.conn is rp_delta.conn)):
+ if delta_compressed:
+ assert isinstance(rp_delta, RPath)
+ return patch_fileobj_action(rp_basis, rp_delta.open('rb', 1),
+ rp_out, out_tf)
+ else: return patch_fileobj_action(rp_basis, rp_delta.open('rb'),
+ rp_out, out_tf)
+
+ # Files are uncompressed on same connection, run rdiff
+ if out_tf is None: out_tf = TempFileManager.new(rp_out)
+ def init():
+ Log("Patching %s using %s to %s via %s" %
+ (rp_basis.path, rp_delta.path, rp_out.path, out_tf.path), 7)
+ cmdlist = ["rdiff", "patch", rp_basis.path,
+ rp_delta.path, out_tf.path]
+ return_val = rp_basis.conn.os.spawnvp(os.P_WAIT, 'rdiff', cmdlist)
+ out_tf.setdata()
+ if return_val != 0 or not out_tf.lstat():
+ RdiffException("Error running %s" % cmdlist)
+ return Robust.make_tf_robustaction(init, (out_tf,), (rp_out,))
+
+def patch_fileobj_action(rp_basis, delta_fileobj, rp_out = None,
+ out_tf = None, delta_compressed = None):
+ """Like patch_action but diff is given in fileobj form
+
+ Nest a writing of a tempfile with the actual patching to
+ create a new action. We have to nest so that the tempfile
+ will be around until the patching finishes.
+
+ """
+ if not rp_out: rp_out = rp_basis
+ delta_tf = TempFileManager.new(rp_out, None)
+ def init(): delta_tf.write_from_fileobj(delta_fileobj)
+ def final(init_val): delta_tf.delete()
+ def error(exc, ran_init, init_val): delta_tf.delete()
+ write_delta_action = RobustAction(init, final, error)
+ return Robust.chain(write_delta_action, patch_action(rp_basis, delta_tf,
+ rp_out, out_tf))
+
+def patch_with_attribs_action(rp_basis, rp_delta, rp_out = None):
+ """Like patch_action, but also transfers attributs from rp_delta"""
+ if not rp_out: rp_out = rp_basis
+ tf = TempFileManager.new(rp_out)
+ return Robust.chain_nested(patch_action(rp_basis, rp_delta, rp_out, tf),
+ Robust.copy_attribs_action(rp_delta, tf))
+
+def copy_action(rpin, rpout):
+ """Use rdiff to copy rpin to rpout, conserving bandwidth"""
+ if not rpin.isreg() or not rpout.isreg() or rpin.conn is rpout.conn:
+ # rdiff not applicable, fallback to regular copying
+ return Robust.copy_action(rpin, rpout)
+
+ Log("Rdiff copying %s to %s" % (rpin.path, rpout.path), 6)
+ delta_tf = TempFileManager.new(rpout, None)
+ return Robust.chain(write_delta_action(rpout, rpin, delta_tf),
+ patch_action(rpout, delta_tf),
+ RobustAction(lambda: None, delta_tf.delete,
+ lambda exc: delta_tf.delete))
+
+
+class Popen:
+ """Spawn process and treat stdout as file object
+
+ Instead of using popen, which evaluates arguments with the shell
+ and thus may lead to security holes (thanks to Jamie Heilman for
+ this point), use the popen2 class and discard stdin.
+
+ When closed, this object checks to make sure the process exited
+ cleanly, and executes closing_thunk.
+
+ """
+ def __init__(self, cmdlist, closing_thunk = None):
+ """RdiffFilehook initializer
+
+ fileobj is the file we are emulating
+ thunk is called with no parameters right after the file is closed
+
+ """
+ assert type(cmdlist) is types.ListType
+ self.p3obj = popen2.Popen3(cmdlist)
+ self.fileobj = self.p3obj.fromchild
+ self.closing_thunk = closing_thunk
+ self.cmdlist = cmdlist
+
+ def set_thunk(self, closing_thunk):
+ """Set closing_thunk if not already"""
+ assert not self.closing_thunk
+ self.closing_thunk = closing_thunk
+
+ def read(self, length = -1): return self.fileobj.read(length)
+
+ def close(self):
+ closeval = self.fileobj.close()
+ if self.closing_thunk: self.closing_thunk()
+ exitval = self.p3obj.poll()
+ if exitval == 0: return closeval
+ elif exitval == 256:
+ Log("Failure probably because %s couldn't be found in PATH."
+ % self.cmdlist[0], 2)
+ assert 0, "rdiff not found"
+ elif exitval == -1:
+ # There may a race condition where a process closes
+ # but doesn't provide its exitval fast enough.
+ Log("Waiting for process to close", 8)
+ time.sleep(0.2)
+ exitval = self.p3obj.poll()
+ if exitval == 0: return closeval
+ raise RdiffException("%s exited with non-zero value %d" %
+ (self.cmdlist, exitval))
+
+
+from log import *
+from robust import *
+
diff --git a/rdiff-backup/rdiff_backup/SetConnections.py b/rdiff-backup/rdiff_backup/SetConnections.py
new file mode 100644
index 0000000..be3fdfd
--- /dev/null
+++ b/rdiff-backup/rdiff_backup/SetConnections.py
@@ -0,0 +1,219 @@
+#######################################################################
+#
+# setconnections - Parse initial arguments and establish connections
+#
+
+"""Parse args and setup connections
+
+The methods in this class are used once by Main to parse file
+descriptions like bescoto@folly.stanford.edu:/usr/bin/ls and to
+set up the related connections.
+
+"""
+
+class SetConnectionsException(Exception): pass
+
+
+# This is the schema that determines how rdiff-backup will open a
+# pipe to the remote system. If the file is given as A::B, %s will
+# be substituted with A in the schema.
+__cmd_schema = 'ssh -C %s rdiff-backup --server'
+__cmd_schema_no_compress = 'ssh %s rdiff-backup --server'
+
+# This is a list of remote commands used to start the connections.
+# The first is None because it is the local connection.
+__conn_remote_cmds = [None]
+
+def InitRPs(arglist, remote_schema = None, remote_cmd = None):
+ """Map the given file descriptions into rpaths and return list"""
+ global __cmd_schema
+ if remote_schema: __cmd_schema = remote_schema
+ elif not Globals.ssh_compression: __cmd_schema = __cmd_schema_no_compress
+
+ if not arglist: return []
+ desc_pairs = map(parse_file_desc, arglist)
+
+ if filter(lambda x: x[0], desc_pairs): # True if any host_info found
+ if remote_cmd:
+ Log.FatalError("The --remote-cmd flag is not compatible "
+ "with remote file descriptions.")
+ elif remote_schema:
+ Log("Remote schema option ignored - no remote file "
+ "descriptions.", 2)
+
+ cmd_pairs = map(desc2cmd_pairs, desc_pairs)
+ if remote_cmd: # last file description gets remote_cmd
+ cmd_pairs[-1] = (remote_cmd, cmd_pairs[-1][1])
+ return map(cmdpair2rp, cmd_pairs)
+
+def cmdpair2rp(cmd_pair):
+ """Return RPath from cmd_pair (remote_cmd, filename)"""
+ cmd, filename = cmd_pair
+ if cmd: conn = init_connection(cmd)
+ else: conn = Globals.local_connection
+ return RPath(conn, filename)
+
+def desc2cmd_pairs(desc_pair):
+ """Return pair (remote_cmd, filename) from desc_pair"""
+ host_info, filename = desc_pair
+ if not host_info: return (None, filename)
+ else: return (fill_schema(host_info), filename)
+
+def parse_file_desc(file_desc):
+ """Parse file description returning pair (host_info, filename)
+
+ In other words, bescoto@folly.stanford.edu::/usr/bin/ls =>
+ ("bescoto@folly.stanford.edu", "/usr/bin/ls"). The
+ complication is to allow for quoting of : by a \. If the
+ string is not separated by :, then the host_info is None.
+
+ """
+ def check_len(i):
+ if i >= len(file_desc):
+ raise SetConnectionsException(
+ "Unexpected end to file description %s" % file_desc)
+
+ host_info_list, i, last_was_quoted = [], 0, None
+ while 1:
+ if i == len(file_desc):
+ return (None, file_desc)
+
+ if file_desc[i] == '\\':
+ i = i+1
+ check_len(i)
+ last_was_quoted = 1
+ elif (file_desc[i] == ":" and i > 0 and file_desc[i-1] == ":"
+ and not last_was_quoted):
+ host_info_list.pop() # Remove last colon from name
+ break
+ else: last_was_quoted = None
+ host_info_list.append(file_desc[i])
+ i = i+1
+
+ check_len(i+1)
+ return ("".join(host_info_list), file_desc[i+1:])
+
+def fill_schema(host_info):
+ """Fills host_info into the schema and returns remote command"""
+ return __cmd_schema % host_info
+
+def init_connection(remote_cmd):
+ """Run remote_cmd, register connection, and then return it
+
+ If remote_cmd is None, then the local connection will be
+ returned. This also updates some settings on the remote side,
+ like global settings, its connection number, and verbosity.
+
+ """
+ if not remote_cmd: return Globals.local_connection
+
+ Log("Executing " + remote_cmd, 4)
+ stdin, stdout = os.popen2(remote_cmd)
+ conn_number = len(Globals.connections)
+ conn = PipeConnection(stdout, stdin, conn_number)
+
+ check_connection_version(conn, remote_cmd)
+ Log("Registering connection %d" % conn_number, 7)
+ init_connection_routing(conn, conn_number, remote_cmd)
+ init_connection_settings(conn)
+ return conn
+
+def check_connection_version(conn, remote_cmd):
+ """Log warning if connection has different version"""
+ try: remote_version = conn.Globals.get('version')
+ except ConnectionReadError, exception:
+ Log.FatalError("""%s
+
+Couldn't start up the remote connection by executing
+
+ %s
+
+Remember that, under the default settings, rdiff-backup must be
+installed in the PATH on the remote system. See the man page for more
+information.""" % (exception, remote_cmd))
+
+ if remote_version != Globals.version:
+ Log("Warning: Local version %s does not match remote version %s."
+ % (Globals.version, remote_version), 2)
+
+def init_connection_routing(conn, conn_number, remote_cmd):
+ """Called by init_connection, establish routing, conn dict"""
+ Globals.connection_dict[conn_number] = conn
+
+ conn.SetConnections.init_connection_remote(conn_number)
+ for other_remote_conn in Globals.connections[1:]:
+ conn.SetConnections.add_redirected_conn(
+ other_remote_conn.conn_number)
+ other_remote_conn.SetConnections.add_redirected_conn(conn_number)
+
+ Globals.connections.append(conn)
+ __conn_remote_cmds.append(remote_cmd)
+
+def init_connection_settings(conn):
+ """Tell new conn about log settings and updated globals"""
+ conn.Log.setverbosity(Log.verbosity)
+ conn.Log.setterm_verbosity(Log.term_verbosity)
+ for setting_name in Globals.changed_settings:
+ conn.Globals.set(setting_name, Globals.get(setting_name))
+
+def init_connection_remote(conn_number):
+ """Run on server side to tell self that have given conn_number"""
+ Globals.connection_number = conn_number
+ Globals.local_connection.conn_number = conn_number
+ Globals.connection_dict[0] = Globals.connections[1]
+ Globals.connection_dict[conn_number] = Globals.local_connection
+
+def add_redirected_conn(conn_number):
+ """Run on server side - tell about redirected connection"""
+ Globals.connection_dict[conn_number] = \
+ RedirectedConnection(conn_number)
+
+def UpdateGlobal(setting_name, val):
+ """Update value of global variable across all connections"""
+ for conn in Globals.connections:
+ conn.Globals.set(setting_name, val)
+
+def BackupInitConnections(reading_conn, writing_conn):
+ """Backup specific connection initialization"""
+ reading_conn.Globals.set("isbackup_reader", 1)
+ writing_conn.Globals.set("isbackup_writer", 1)
+ UpdateGlobal("backup_reader", reading_conn)
+ UpdateGlobal("backup_writer", writing_conn)
+
+def CloseConnections():
+ """Close all connections. Run by client"""
+ assert not Globals.server
+ for conn in Globals.connections: conn.quit()
+ del Globals.connections[1:] # Only leave local connection
+ Globals.connection_dict = {0: Globals.local_connection}
+ Globals.backup_reader = Globals.isbackup_reader = \
+ Globals.backup_writer = Globals.isbackup_writer = None
+
+def TestConnections():
+ """Test connections, printing results"""
+ if len(Globals.connections) == 1: print "No remote connections specified"
+ else:
+ for i in range(1, len(Globals.connections)): test_connection(i)
+
+def test_connection(conn_number):
+ """Test connection. conn_number 0 is the local connection"""
+ print "Testing server started by: ", __conn_remote_cmds[conn_number]
+ conn = Globals.connections[conn_number]
+ try:
+ assert conn.pow(2,3) == 8
+ assert conn.os.path.join("a", "b") == "a/b"
+ version = conn.reval("lambda: Globals.version")
+ except:
+ sys.stderr.write("Server tests failed\n")
+ raise
+ if not version == Globals.version:
+ print """Server may work, but there is a version mismatch:
+Local version: %s
+Remote version: %s""" % (Globals.version, version)
+ else: print "Server OK"
+
+
+from log import *
+from rpath import *
+from connection import *
+import Globals
diff --git a/rdiff-backup/rdiff_backup/Time.py b/rdiff-backup/rdiff_backup/Time.py
new file mode 100644
index 0000000..4eb2107
--- /dev/null
+++ b/rdiff-backup/rdiff_backup/Time.py
@@ -0,0 +1,199 @@
+import time, types, re
+import Globals
+
+#######################################################################
+#
+# ttime - Provide Time class, which contains time related functions.
+#
+
+class TimeException(Exception): pass
+
+_interval_conv_dict = {"s": 1, "m": 60, "h": 3600, "D": 86400,
+ "W": 7*86400, "M": 30*86400, "Y": 365*86400}
+_integer_regexp = re.compile("^[0-9]+$")
+_interval_regexp = re.compile("^([0-9]+)([smhDWMY])")
+_genstr_date_regexp1 = re.compile("^(?P<year>[0-9]{4})[-/]"
+ "(?P<month>[0-9]{1,2})[-/](?P<day>[0-9]{1,2})$")
+_genstr_date_regexp2 = re.compile("^(?P<month>[0-9]{1,2})[-/]"
+ "(?P<day>[0-9]{1,2})[-/](?P<year>[0-9]{4})$")
+curtime = curtimestr = None
+
+def setcurtime(curtime = None):
+ """Sets the current time in curtime and curtimestr on all systems"""
+ t = curtime or time.time()
+ for conn in Globals.connections:
+ conn.Time.setcurtime_local(t, timetostring(t))
+
+def setcurtime_local(timeinseconds, timestr):
+ """Only set the current time locally"""
+ global curtime, curtimestr
+ curtime, curtimestr = timeinseconds, timestr
+
+def setprevtime(timeinseconds):
+ """Sets the previous inc time in prevtime and prevtimestr"""
+ assert timeinseconds > 0, timeinseconds
+ timestr = timetostring(timeinseconds)
+ for conn in Globals.connections:
+ conn.Time.setprevtime_local(timeinseconds, timestr)
+
+def setprevtime_local(timeinseconds, timestr):
+ """Like setprevtime but only set the local version"""
+ global prevtime, prevtimestr
+ prevtime, prevtimestr = timeinseconds, timestr
+
+def timetostring(timeinseconds):
+ """Return w3 datetime compliant listing of timeinseconds"""
+ return time.strftime("%Y-%m-%dT%H" + Globals.time_separator +
+ "%M" + Globals.time_separator + "%S",
+ time.localtime(timeinseconds)) + gettzd()
+
+def stringtotime(timestring):
+ """Return time in seconds from w3 timestring
+
+ If there is an error parsing the string, or it doesn't look
+ like a w3 datetime string, return None.
+
+ """
+ try:
+ date, daytime = timestring[:19].split("T")
+ year, month, day = map(int, date.split("-"))
+ hour, minute, second = map(int,
+ daytime.split(Globals.time_separator))
+ assert 1900 < year < 2100, year
+ assert 1 <= month <= 12
+ assert 1 <= day <= 31
+ assert 0 <= hour <= 23
+ assert 0 <= minute <= 59
+ assert 0 <= second <= 61 # leap seconds
+ timetuple = (year, month, day, hour, minute, second, -1, -1, -1)
+ if time.daylight:
+ utc_in_secs = time.mktime(timetuple) - time.altzone
+ else: utc_in_secs = time.mktime(timetuple) - time.timezone
+
+ return long(utc_in_secs) + tzdtoseconds(timestring[19:])
+ except (TypeError, ValueError, AssertionError): return None
+
+def timetopretty(timeinseconds):
+ """Return pretty version of time"""
+ return time.asctime(time.localtime(timeinseconds))
+
+def stringtopretty(timestring):
+ """Return pretty version of time given w3 time string"""
+ return timetopretty(stringtotime(timestring))
+
+def inttopretty(seconds):
+ """Convert num of seconds to readable string like "2 hours"."""
+ partlist = []
+ hours, seconds = divmod(seconds, 3600)
+ if hours > 1: partlist.append("%d hours" % hours)
+ elif hours == 1: partlist.append("1 hour")
+
+ minutes, seconds = divmod(seconds, 60)
+ if minutes > 1: partlist.append("%d minutes" % minutes)
+ elif minutes == 1: partlist.append("1 minute")
+
+ if seconds == 1: partlist.append("1 second")
+ elif not partlist or seconds > 1:
+ if isinstance(seconds, int) or isinstance(seconds, long):
+ partlist.append("%s seconds" % seconds)
+ else: partlist.append("%.2f seconds" % seconds)
+ return " ".join(partlist)
+
+def intstringtoseconds(interval_string):
+ """Convert a string expressing an interval (e.g. "4D2s") to seconds"""
+ def error():
+ raise TimeException("""Bad interval string "%s"
+
+Intervals are specified like 2Y (2 years) or 2h30m (2.5 hours). The
+allowed special characters are s, m, h, D, W, M, and Y. See the man
+page for more information.
+""" % interval_string)
+ if len(interval_string) < 2: error()
+
+ total = 0
+ while interval_string:
+ match = _interval_regexp.match(interval_string)
+ if not match: error()
+ num, ext = int(match.group(1)), match.group(2)
+ if not ext in _interval_conv_dict or num < 0: error()
+ total += num*_interval_conv_dict[ext]
+ interval_string = interval_string[match.end(0):]
+ return total
+
+def gettzd():
+ """Return w3's timezone identification string.
+
+ Expresed as [+/-]hh:mm. For instance, PST is -08:00. Zone is
+ coincides with what localtime(), etc., use.
+
+ """
+ if time.daylight: offset = -1 * time.altzone/60
+ else: offset = -1 * time.timezone/60
+ if offset > 0: prefix = "+"
+ elif offset < 0: prefix = "-"
+ else: return "Z" # time is already in UTC
+
+ hours, minutes = map(abs, divmod(offset, 60))
+ assert 0 <= hours <= 23
+ assert 0 <= minutes <= 59
+ return "%s%02d%s%02d" % (prefix, hours,
+ Globals.time_separator, minutes)
+
+def tzdtoseconds(tzd):
+ """Given w3 compliant TZD, return how far ahead UTC is"""
+ if tzd == "Z": return 0
+ assert len(tzd) == 6 # only accept forms like +08:00 for now
+ assert (tzd[0] == "-" or tzd[0] == "+") and \
+ tzd[3] == Globals.time_separator
+ return -60 * (60 * int(tzd[:3]) + int(tzd[4:]))
+
+def cmp(time1, time2):
+ """Compare time1 and time2 and return -1, 0, or 1"""
+ if type(time1) is types.StringType:
+ time1 = stringtotime(time1)
+ assert time1 is not None
+ if type(time2) is types.StringType:
+ time2 = stringtotime(time2)
+ assert time2 is not None
+
+ if time1 < time2: return -1
+ elif time1 == time2: return 0
+ else: return 1
+
+def genstrtotime(timestr, curtime = None):
+ """Convert a generic time string to a time in seconds"""
+ if curtime is None: curtime = globals()['curtime']
+ if timestr == "now": return curtime
+
+ def error():
+ raise TimeException("""Bad time string "%s"
+
+The acceptible time strings are intervals (like "3D64s"), w3-datetime
+strings, like "2002-04-26T04:22:01-07:00" (strings like
+"2002-04-26T04:22:01" are also acceptable - rdiff-backup will use the
+current time zone), or ordinary dates like 2/4/1997 or 2001-04-23
+(various combinations are acceptable, but the month always precedes
+the day).""" % timestr)
+
+ # Test for straight integer
+ if _integer_regexp.search(timestr): return int(timestr)
+
+ # Test for w3-datetime format, possibly missing tzd
+ t = stringtotime(timestr) or stringtotime(timestr+gettzd())
+ if t: return t
+
+ try: # test for an interval, like "2 days ago"
+ return curtime - intstringtoseconds(timestr)
+ except TimeException: pass
+
+ # Now check for dates like 2001/3/23
+ match = _genstr_date_regexp1.search(timestr) or \
+ _genstr_date_regexp2.search(timestr)
+ if not match: error()
+ timestr = "%s-%02d-%02dT00:00:00%s" % (match.group('year'),
+ int(match.group('month')), int(match.group('day')), gettzd())
+ t = stringtotime(timestr)
+ if t: return t
+ else: error()
+
+
diff --git a/rdiff-backup/rdiff_backup/connection.py b/rdiff-backup/rdiff_backup/connection.py
index deff577..74d413d 100644
--- a/rdiff-backup/rdiff_backup/connection.py
+++ b/rdiff-backup/rdiff_backup/connection.py
@@ -1,5 +1,4 @@
from __future__ import generators
-execfile("rdiff.py")
import types, os, tempfile, cPickle, shutil, traceback
#######################################################################
@@ -38,10 +37,9 @@ class LocalConnection(Connection):
self.conn_number = 0 # changed by SetConnections for server
def __getattr__(self, name):
- try: return globals()[name]
- except KeyError:
- try: return __builtins__.__dict__[name]
- except KeyError: raise NameError, name
+ if name in globals(): return globals()[name]
+ elif isinstance(__builtins__, dict): return __builtins__[name]
+ else: return __builtins__.__dict__[name]
def __setattr__(self, name, value):
globals()[name] = value
@@ -56,11 +54,6 @@ class LocalConnection(Connection):
def quit(self): pass
-Globals.local_connection = LocalConnection()
-Globals.connections.append(Globals.local_connection)
-# Following changed by server in SetConnections
-Globals.connection_dict[0] = Globals.local_connection
-
class ConnectionRequest:
"""Simple wrapper around a PipeConnection request"""
@@ -493,3 +486,30 @@ class VirtualFile:
line = self.readline()
if not line: break
yield line
+
+
+# everything has to be available here for remote connection's use, but
+# put at bottom to reduce circularities.
+import Globals, Time, Rdiff, Hardlink, FilenameMapping
+from static import *
+from lazy import *
+from log import *
+from iterfile import *
+from connection import *
+from rpath import *
+from robust import *
+from rorpiter import *
+from destructive_stepping import *
+from selection import *
+from statistics import *
+from increment import *
+from restore import *
+from manage import *
+from highlevel import *
+
+
+Globals.local_connection = LocalConnection()
+Globals.connections.append(Globals.local_connection)
+# Following changed by server in SetConnections
+Globals.connection_dict[0] = Globals.local_connection
+
diff --git a/rdiff-backup/rdiff_backup/destructive_stepping.py b/rdiff-backup/rdiff_backup/destructive_stepping.py
index 7dfde11..a64ecbc 100644
--- a/rdiff-backup/rdiff_backup/destructive_stepping.py
+++ b/rdiff-backup/rdiff_backup/destructive_stepping.py
@@ -1,6 +1,7 @@
from __future__ import generators
import types
-execfile("rorpiter.py")
+from rpath import *
+from lazy import *
#######################################################################
#
@@ -206,3 +207,6 @@ class DestructiveSteppingFinalizer(ErrorITR):
if self.dsrpath: self.dsrpath.write_changes()
+from log import *
+from robust import *
+import Globals
diff --git a/rdiff-backup/rdiff_backup/highlevel.py b/rdiff-backup/rdiff_backup/highlevel.py
index 8c95a1f..0b477d8 100644
--- a/rdiff-backup/rdiff_backup/highlevel.py
+++ b/rdiff-backup/rdiff_backup/highlevel.py
@@ -1,5 +1,12 @@
from __future__ import generators
-execfile("manage.py")
+from static import *
+from log import *
+from rpath import *
+from robust import *
+from increment import *
+from destructive_stepping import *
+from rorpiter import *
+import Globals, Hardlink, MiscStats
#######################################################################
#
@@ -248,7 +255,7 @@ class HLDestinationStruct:
"""Apply diffs and finalize, with checkpointing and statistics"""
collated = RORPIter.CollateIterators(diffs, cls.initial_dsiter2)
finalizer, ITR = cls.get_finalizer(), cls.get_MirrorITR(inc_rpath)
- Stats.open_dir_stats_file()
+ MiscStats.open_dir_stats_file()
dsrp, finished_dsrp = None, None
try:
@@ -266,15 +273,15 @@ class HLDestinationStruct:
except: cls.handle_last_error(finished_dsrp, finalizer, ITR)
if Globals.preserve_hardlinks: Hardlink.final_writedata()
- Stats.close_dir_stats_file()
- Stats.write_session_statistics(ITR)
+ MiscStats.close_dir_stats_file()
+ MiscStats.write_session_statistics(ITR)
SaveState.checkpoint_remove()
def patch_increment_and_finalize(cls, dest_rpath, diffs, inc_rpath):
"""Apply diffs, write increment if necessary, and finalize"""
collated = RORPIter.CollateIterators(diffs, cls.initial_dsiter2)
finalizer, ITR = cls.get_finalizer(), cls.get_ITR(inc_rpath)
- Stats.open_dir_stats_file()
+ MiscStats.open_dir_stats_file()
dsrp, finished_dsrp = None, None
try:
@@ -293,8 +300,8 @@ class HLDestinationStruct:
except: cls.handle_last_error(finished_dsrp, finalizer, ITR)
if Globals.preserve_hardlinks: Hardlink.final_writedata()
- Stats.close_dir_stats_file()
- Stats.write_session_statistics(ITR)
+ MiscStats.close_dir_stats_file()
+ MiscStats.write_session_statistics(ITR)
SaveState.checkpoint_remove()
def handle_last_error(cls, dsrp, finalizer, ITR):
diff --git a/rdiff-backup/rdiff_backup/increment.py b/rdiff-backup/rdiff_backup/increment.py
index b03b464..d5543a0 100644
--- a/rdiff-backup/rdiff_backup/increment.py
+++ b/rdiff-backup/rdiff_backup/increment.py
@@ -1,5 +1,7 @@
import traceback
-execfile("statistics.py")
+from static import *
+from statistics import *
+from lazy import *
#######################################################################
#
@@ -256,7 +258,7 @@ class IncrementITR(ErrorITR, StatsITR):
self.end_stats(diff_rorp, dsrp, self.incrp)
if self.mirror_isdirectory or dsrp.isdir():
- Stats.write_dir_stats_line(self, dsrp.index)
+ MiscStats.write_dir_stats_line(self, dsrp.index)
def branch_process(self, subinstance):
"""Update statistics, and the has_changed flag if change in branch"""
@@ -286,8 +288,15 @@ class MirrorITR(ErrorITR, StatsITR):
"""Update statistics when leaving"""
self.end_stats(self.diff_rorp, self.mirror_dsrp)
if self.mirror_dsrp.isdir():
- Stats.write_dir_stats_line(self, self.mirror_dsrp.index)
+ MiscStats.write_dir_stats_line(self, self.mirror_dsrp.index)
def branch_process(self, subinstance):
"""Update statistics with subdirectory results"""
self.add_file_stats(subinstance)
+
+
+from log import *
+from rpath import *
+from robust import *
+from rorpiter import *
+import Globals, Time, MiscStats
diff --git a/rdiff-backup/rdiff_backup/iterfile.py b/rdiff-backup/rdiff_backup/iterfile.py
index 21629b2..26cc952 100644
--- a/rdiff-backup/rdiff_backup/iterfile.py
+++ b/rdiff-backup/rdiff_backup/iterfile.py
@@ -1,5 +1,5 @@
-execfile("ttime.py")
import cPickle
+import Globals
#######################################################################
#
diff --git a/rdiff-backup/rdiff_backup/lazy.py b/rdiff-backup/rdiff_backup/lazy.py
index 425a9c4..1eb0211 100644
--- a/rdiff-backup/rdiff_backup/lazy.py
+++ b/rdiff-backup/rdiff_backup/lazy.py
@@ -1,6 +1,6 @@
from __future__ import generators
-execfile("static.py")
import os, stat, types
+from static import *
#######################################################################
#
@@ -324,3 +324,7 @@ class ErrorITR(IterTreeReducer):
Log("Error '%s' processing %s" % (exc, filename), 2)
+# Put at bottom to prevent (viciously) circular module dependencies
+from robust import *
+from log import *
+
diff --git a/rdiff-backup/rdiff_backup/log.py b/rdiff-backup/rdiff_backup/log.py
index 6b4ba06..aade607 100644
--- a/rdiff-backup/rdiff_backup/log.py
+++ b/rdiff-backup/rdiff_backup/log.py
@@ -1,5 +1,4 @@
-import time, sys, traceback
-execfile("lazy.py")
+import time, sys, traceback, types
#######################################################################
#
@@ -132,7 +131,7 @@ class Logger:
def FatalError(self, message):
self("Fatal Error: " + message, 1)
- Globals.Main.cleanup()
+ Main.cleanup()
sys.exit(1)
def exception_to_string(self):
@@ -158,3 +157,4 @@ class Logger:
logging_func(self.exception_to_string(), verbosity)
Log = Logger()
+import Globals, Main
diff --git a/rdiff-backup/rdiff_backup/manage.py b/rdiff-backup/rdiff_backup/manage.py
index 0c08872..4dad8b1 100644
--- a/rdiff-backup/rdiff_backup/manage.py
+++ b/rdiff-backup/rdiff_backup/manage.py
@@ -1,4 +1,7 @@
-execfile("restore.py")
+from __future__ import generators
+from static import *
+from log import *
+import Globals, Time
#######################################################################
#
diff --git a/rdiff-backup/rdiff_backup/restore.py b/rdiff-backup/rdiff_backup/restore.py
index 30820b8..5202854 100644
--- a/rdiff-backup/rdiff_backup/restore.py
+++ b/rdiff-backup/rdiff_backup/restore.py
@@ -1,6 +1,6 @@
from __future__ import generators
-execfile("increment.py")
import tempfile
+from static import *
#######################################################################
#
@@ -362,3 +362,10 @@ class RestoreCombinedData:
else: RPath.copy(inc, target)
else: raise RestoreError("Unknown inctype %s" % inctype)
RPath.copy_attribs(inc, target)
+
+
+from log import *
+from destructive_stepping import *
+from rpath import *
+from rorpiter import *
+import Globals, Time, Rdiff, Hardlink, FilenameMapping, SetConnections
diff --git a/rdiff-backup/rdiff_backup/robust.py b/rdiff-backup/rdiff_backup/robust.py
index e539827..3c9851c 100644
--- a/rdiff-backup/rdiff_backup/robust.py
+++ b/rdiff-backup/rdiff_backup/robust.py
@@ -1,5 +1,5 @@
-import tempfile, errno, signal
-execfile("hardlink.py")
+import tempfile, errno, signal, cPickle
+from static import *
#######################################################################
#
@@ -243,7 +243,7 @@ class Robust:
"""
try: return function(*args)
except (EnvironmentError, SkipFileException, DSRPPermError,
- RPathException, RdiffException), exc:
+ RPathException, Rdiff.RdiffException), exc:
TracebackArchive.add()
if (not isinstance(exc, EnvironmentError) or
(errno.errorcode[exc[0]] in
@@ -356,6 +356,8 @@ class TempFileManager:
MakeClass(TempFileManager)
+from rpath import *
+
class TempFile(RPath):
"""Like an RPath, but keep track of which ones are still here"""
def rename(self, rp_dest):
@@ -642,3 +644,9 @@ class ResumeSessionInfo:
self.last_index = last_index
self.last_definitive = last_definitive
self.ITR, self.finalizer, = ITR, finalizer
+
+
+from log import *
+from destructive_stepping import *
+import Time, Rdiff
+from highlevel import *
diff --git a/rdiff-backup/rdiff_backup/rorpiter.py b/rdiff-backup/rdiff_backup/rorpiter.py
index efa0303..03705aa 100644
--- a/rdiff-backup/rdiff_backup/rorpiter.py
+++ b/rdiff-backup/rdiff_backup/rorpiter.py
@@ -1,6 +1,11 @@
-execfile("robust.py")
from __future__ import generators
-import tempfile, UserList
+import tempfile, UserList, types
+from static import *
+from log import *
+from rpath import *
+from robust import *
+from iterfile import *
+import Globals, Rdiff, Hardlink
#######################################################################
#
diff --git a/rdiff-backup/rdiff_backup/rpath.py b/rdiff-backup/rdiff_backup/rpath.py
index c6bcca6..73910be 100644
--- a/rdiff-backup/rdiff_backup/rpath.py
+++ b/rdiff-backup/rdiff_backup/rpath.py
@@ -1,5 +1,5 @@
-execfile("connection.py")
import os, stat, re, sys, shutil, gzip
+from static import *
#######################################################################
#
@@ -778,6 +778,10 @@ class RPathFileHook:
self.closing_thunk()
return result
+# Import these late to avoid circular dependencies
+from lazy import *
+from selection import *
+from destructive_stepping import *
class RpathDeleter(IterTreeReducer):
"""Delete a directory. Called by RPath.delete()"""
diff --git a/rdiff-backup/rdiff_backup/selection.py b/rdiff-backup/rdiff_backup/selection.py
index 3d1f0e2..4fee9ee 100644
--- a/rdiff-backup/rdiff_backup/selection.py
+++ b/rdiff-backup/rdiff_backup/selection.py
@@ -1,6 +1,9 @@
from __future__ import generators
-execfile("destructive_stepping.py")
import re
+from log import *
+from robust import *
+from destructive_stepping import *
+
#######################################################################
#
@@ -521,3 +524,4 @@ probably isn't what you meant.""" %
else: res = res + re.escape(c)
return res
+
diff --git a/rdiff-backup/rdiff_backup/static.py b/rdiff-backup/rdiff_backup/static.py
index 2e97cd0..0355f44 100644
--- a/rdiff-backup/rdiff_backup/static.py
+++ b/rdiff-backup/rdiff_backup/static.py
@@ -1,5 +1,3 @@
-execfile("globals.py")
-
#######################################################################
#
# static - MakeStatic and MakeClass
diff --git a/rdiff-backup/rdiff_backup/statistics.py b/rdiff-backup/rdiff_backup/statistics.py
index a91a681..16dd881 100644
--- a/rdiff-backup/rdiff_backup/statistics.py
+++ b/rdiff-backup/rdiff_backup/statistics.py
@@ -1,4 +1,4 @@
-execfile("filename_mapping.py")
+from lazy import *
#######################################################################
#
@@ -277,73 +277,7 @@ class StatsITR(IterTreeReducer, StatsObj):
self.__dict__[attr] += subinstance.__dict__[attr]
-class Stats:
- """Misc statistics methods, pertaining to dir and session stat files"""
- # This is the RPath of the directory statistics file, and the
- # associated open file. It will hold a line of statistics for
- # each directory that is backed up.
- _dir_stats_rp = None
- _dir_stats_fp = None
-
- # This goes at the beginning of the directory statistics file and
- # explains the format.
- _dir_stats_header = """# rdiff-backup directory statistics file
-#
-# Each line is in the following format:
-# RelativeDirName %s
-""" % " ".join(StatsObj.stat_file_attrs)
-
- def open_dir_stats_file(cls):
- """Open directory statistics file, write header"""
- assert not cls._dir_stats_fp, "Directory file already open"
-
- if Globals.compression: suffix = "data.gz"
- else: suffix = "data"
- cls._dir_stats_rp = Inc.get_inc(Globals.rbdir.append(
- "directory_statistics"), Time.curtime, suffix)
-
- if cls._dir_stats_rp.lstat():
- Log("Warning, statistics file %s already exists, appending" %
- cls._dir_stats_rp.path, 2)
- cls._dir_stats_fp = cls._dir_stats_rp.open("ab",
- Globals.compression)
- else: cls._dir_stats_fp = \
- cls._dir_stats_rp.open("wb", Globals.compression)
- cls._dir_stats_fp.write(cls._dir_stats_header)
-
- def write_dir_stats_line(cls, statobj, index):
- """Write info from statobj about rpath to statistics file"""
- if Globals.null_separator:
- cls._dir_stats_fp.write(statobj.get_stats_line(index, None) + "\0")
- else: cls._dir_stats_fp.write(statobj.get_stats_line(index) + "\n")
-
- def close_dir_stats_file(cls):
- """Close directory statistics file if its open"""
- if cls._dir_stats_fp:
- cls._dir_stats_fp.close()
- cls._dir_stats_fp = None
-
- def write_session_statistics(cls, statobj):
- """Write session statistics into file, log"""
- stat_inc = Inc.get_inc(Globals.rbdir.append("session_statistics"),
- Time.curtime, "data")
- statobj.StartTime = Time.curtime
- statobj.EndTime = time.time()
-
- # include hardlink data and dir stats in size of increments
- if Globals.preserve_hardlinks and Hardlink.final_inc:
- # include hardlink data in size of increments
- statobj.IncrementFiles += 1
- statobj.IncrementFileSize += Hardlink.final_inc.getsize()
- if cls._dir_stats_rp and cls._dir_stats_rp.lstat():
- statobj.IncrementFiles += 1
- statobj.IncrementFileSize += cls._dir_stats_rp.getsize()
-
- statobj.write_stats_to_rp(stat_inc)
- if Globals.print_statistics:
- message = statobj.get_stats_logstring("Session statistics")
- Log.log_to_file(message)
- Globals.client_conn.sys.stdout.write(message)
-
-MakeClass(Stats)
-
+from log import *
+from increment import *
+from robust import *
+import Globals