diff options
Diffstat (limited to 'git/index')
-rw-r--r-- | git/index/__init__.py | 2 | ||||
-rw-r--r-- | git/index/base.py | 222 | ||||
-rw-r--r-- | git/index/fun.py | 131 | ||||
-rw-r--r-- | git/index/typ.py | 34 | ||||
-rw-r--r-- | git/index/util.py | 19 |
5 files changed, 209 insertions, 199 deletions
diff --git a/git/index/__init__.py b/git/index/__init__.py index fe4a7f59..4a495c33 100644 --- a/git/index/__init__.py +++ b/git/index/__init__.py @@ -1,4 +1,4 @@ """Initialize the index package""" from base import * -from typ import *
\ No newline at end of file +from typ import * diff --git a/git/index/base.py b/git/index/base.py index c2b90218..c200f05f 100644 --- a/git/index/base.py +++ b/git/index/base.py @@ -13,86 +13,87 @@ from cStringIO import StringIO from stat import S_ISLNK from typ import ( - BaseIndexEntry, - IndexEntry, - ) + BaseIndexEntry, + IndexEntry, +) from util import ( - TemporaryFileSwap, - post_clear_cache, - default_index, - git_working_dir - ) + TemporaryFileSwap, + post_clear_cache, + default_index, + git_working_dir +) import git.objects import git.diff as diff from git.exc import ( - GitCommandError, - CheckoutError - ) + GitCommandError, + CheckoutError +) from git.objects import ( - Blob, - Submodule, - Tree, - Object, - Commit, - ) + Blob, + Submodule, + Tree, + Object, + Commit, +) from git.objects.util import Serializable from git.util import ( - IndexFileSHA1Writer, - LazyMixin, - LockedFD, - join_path_native, - file_contents_ro, - to_native_path_linux, - to_native_path - ) + IndexFileSHA1Writer, + LazyMixin, + LockedFD, + join_path_native, + file_contents_ro, + to_native_path_linux, + to_native_path +) from fun import ( - entry_key, - write_cache, - read_cache, - aggressive_tree_merge, - write_tree_from_cache, - stat_mode_to_index_mode, - S_IFGITLINK - ) + entry_key, + write_cache, + read_cache, + aggressive_tree_merge, + write_tree_from_cache, + stat_mode_to_index_mode, + S_IFGITLINK +) from git.base import IStream from git.util import to_bin_sha from itertools import izip -__all__ = ( 'IndexFile', 'CheckoutError' ) +__all__ = ('IndexFile', 'CheckoutError') class IndexFile(LazyMixin, diff.Diffable, Serializable): + """ Implements an Index that can be manipulated using a native implementation in order to save git command function calls wherever possible. - + It provides custom merging facilities allowing to merge without actually changing your index or your working tree. This way you can perform own test-merges based on the index only without having to deal with the working copy. This is useful in case of partial working trees. ``Entries`` - + The index contains an entries dict whose keys are tuples of type IndexEntry to facilitate access. You may read the entries dict or manipulate it using IndexEntry instance, i.e.:: - + index.entries[index.entry_key(index_entry_instance)] = index_entry_instance - + Make sure you use index.write() once you are done manipulating the index directly before operating on it using the git command""" __slots__ = ("repo", "version", "entries", "_extension_data", "_file_path") _VERSION = 2 # latest version we support - S_IFGITLINK = S_IFGITLINK # a submodule + S_IFGITLINK = S_IFGITLINK # a submodule def __init__(self, repo, file_path=None): """Initialize this Index instance, optionally from the given ``file_path``. @@ -119,14 +120,14 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): return # END exception handling - # Here it comes: on windows in python 2.5, memory maps aren't closed properly - # Hence we are in trouble if we try to delete a file that is memory mapped, + # Here it comes: on windows in python 2.5, memory maps aren't closed properly + # Hence we are in trouble if we try to delete a file that is memory mapped, # which happens during read-tree. # In this case, we will just read the memory in directly. # Its insanely bad ... I am disappointed ! - allow_mmap = (os.name != 'nt' or sys.version_info[1] > 5) + allow_mmap = (os.name != 'nt' or sys.version_info[1] > 5) stream = file_contents_ro(fd, stream=True, allow_mmap=allow_mmap) - + try: self._deserialize(stream) finally: @@ -153,30 +154,29 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): pass # END exception handling - #{ Serializable Interface + #{ Serializable Interface def _deserialize(self, stream): """Initialize this instance with index values read from the given stream""" self.version, self.entries, self._extension_data, conten_sha = read_cache(stream) return self - + def _entries_sorted(self): """:return: list of entries, in a sorted fashion, first by path, then by stage""" entries_sorted = self.entries.values() entries_sorted.sort(key=lambda e: (e.path, e.stage)) # use path/stage as sort key return entries_sorted - + def _serialize(self, stream, ignore_tree_extension_data=False): entries = self._entries_sorted() write_cache(entries, stream, - (ignore_tree_extension_data and None) or self._extension_data) + (ignore_tree_extension_data and None) or self._extension_data) return self - - + #} END serializable interface - def write(self, file_path = None, ignore_tree_extension_data=False): + def write(self, file_path=None, ignore_tree_extension_data=False): """Write the current state to our file path or to the given one :param file_path: @@ -197,14 +197,14 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): :return: self""" # make sure we have our entries read before getting a write lock - # else it would be done when streaming. This can happen + # else it would be done when streaming. This can happen # if one doesn't change the index, but writes it right away self.entries lfd = LockedFD(file_path or self._file_path) stream = lfd.open(write=True, stream=True) - + self._serialize(stream, ignore_tree_extension_data) - + lfd.commit() # make sure we represent what we have written @@ -263,16 +263,15 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): If you intend to write such a merged Index, supply an alternate file_path to its 'write' method.""" base_entries = aggressive_tree_merge(repo.odb, [to_bin_sha(str(t)) for t in tree_sha]) - + inst = cls(repo) # convert to entries dict - entries = dict(izip(((e.path, e.stage) for e in base_entries), + entries = dict(izip(((e.path, e.stage) for e in base_entries), (IndexEntry.from_base(e) for e in base_entries))) - + inst.entries = entries return inst - @classmethod def from_tree(cls, repo, *treeish, **kwargs): """Merge the given treeish revisions into a new index which is returned. @@ -313,7 +312,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): arg_list = list() # ignore that working tree and index possibly are out of date - if len(treeish)>1: + if len(treeish) > 1: # drop unmerged entries when reading our index and merging arg_list.append("--reset") # handle non-trivial cases the way a real merge does @@ -322,7 +321,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): # tmp file created in git home directory to be sure renaming # works - /tmp/ dirs could be on another device - tmp_index = tempfile.mktemp('','',repo.git_dir) + tmp_index = tempfile.mktemp('', '', repo.git_dir) arg_list.append("--index-output=%s" % tmp_index) arg_list.extend(treeish) @@ -379,8 +378,8 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): # END path exception handling # END for each path - def _write_path_to_stdin(self, proc, filepath, item, fmakeexc, fprogress, - read_from_stdout=True): + def _write_path_to_stdin(self, proc, filepath, item, fmakeexc, fprogress, + read_from_stdout=True): """Write path to proc.stdin and make sure it processes the item, including progress. :return: stdout string @@ -409,7 +408,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): fprogress(filepath, True, item) return rval - def iter_blobs(self, predicate = lambda t: True): + def iter_blobs(self, predicate=lambda t: True): """ :return: Iterator yielding tuples of Blob objects and stages, tuple(stage, Blob) @@ -418,7 +417,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): iterator. A default filter, the BlobFilter, allows you to yield blobs only if they match a given list of paths. """ for entry in self.entries.itervalues(): - # TODO: is it necessary to convert the mode ? We did that when adding + # TODO: is it necessary to convert the mode ? We did that when adding # it to the index, right ? mode = stat_mode_to_index_mode(entry.mode) blob = entry.to_blob(self.repo) @@ -471,13 +470,13 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): for blob in iter_blobs: stage_null_key = (blob.path, 0) if stage_null_key in self.entries: - raise ValueError( "Path %r already exists at stage 0" % blob.path ) + raise ValueError("Path %r already exists at stage 0" % blob.path) # END assert blob is not stage 0 already # delete all possible stages for stage in (1, 2, 3): try: - del( self.entries[(blob.path, stage)]) + del(self.entries[(blob.path, stage)]) except KeyError: pass # END ignore key errors @@ -502,7 +501,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): def write_tree(self): """Writes this index to a corresponding Tree object into the repository's object database and return it. - + :return: Tree object representing this index :note: The tree will be written even if one or more objects the tree refers to does not yet exist in the object database. This could happen if you added @@ -516,17 +515,16 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): mdb = git.db.py.mem.PureMemoryDB() entries = self._entries_sorted() binsha, tree_items = write_tree_from_cache(entries, mdb, slice(0, len(entries))) - + # copy changed trees only mdb.stream_copy(mdb.sha_iter(), self.repo.odb) - - + # note: additional deserialization could be saved if write_tree_from_cache # would return sorted tree entries root_tree = Tree(self.repo, binsha, path='') root_tree._cache = tree_items return root_tree - + def _process_diff_args(self, args): try: args.pop(args.index(self)) @@ -540,9 +538,9 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): if it is not within our git direcotory""" if not os.path.isabs(path): return path - relative_path = path.replace(self.repo.working_tree_dir+os.sep, "") + relative_path = path.replace(self.repo.working_tree_dir + os.sep, "") if relative_path == path: - raise ValueError("Absolute path %r is not in git repository at %r" % (path,self.repo.working_tree_dir)) + raise ValueError("Absolute path %r is not in git repository at %r" % (path, self.repo.working_tree_dir)) return relative_path def _preprocess_add_items(self, items): @@ -563,8 +561,8 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): return (paths, entries) @git_working_dir - def add(self, items, force=True, fprogress=lambda *args: None, path_rewriter=None, - write=True): + def add(self, items, force=True, fprogress=lambda *args: None, path_rewriter=None, + write=True): """Add files from the working tree, specific blobs or BaseIndexEntries to the index. @@ -639,7 +637,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): :param write: If True, the index will be written once it was altered. Otherwise the changes only exist in memory and are not available to git commands. - + :return: List(BaseIndexEntries) representing the entries just actually added. @@ -656,16 +654,15 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): if paths and path_rewriter: for path in paths: abspath = os.path.abspath(path) - gitrelative_path = abspath[len(self.repo.working_tree_dir)+1:] - blob = Blob(self.repo, Blob.NULL_BIN_SHA, - stat_mode_to_index_mode(os.stat(abspath).st_mode), + gitrelative_path = abspath[len(self.repo.working_tree_dir) + 1:] + blob = Blob(self.repo, Blob.NULL_BIN_SHA, + stat_mode_to_index_mode(os.stat(abspath).st_mode), to_native_path_linux(gitrelative_path)) entries.append(BaseIndexEntry.from_blob(blob)) # END for each path del(paths[:]) # END rewrite paths - def store_path(filepath): """Store file at filepath in the database and return the base index entry""" st = os.lstat(filepath) # handles non-symlinks as well @@ -678,11 +675,10 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): fprogress(filepath, False, filepath) istream = self.repo.odb.store(IStream(Blob.type, st.st_size, stream)) fprogress(filepath, True, filepath) - return BaseIndexEntry((stat_mode_to_index_mode(st.st_mode), - istream.binsha, 0, to_native_path_linux(filepath))) + return BaseIndexEntry((stat_mode_to_index_mode(st.st_mode), + istream.binsha, 0, to_native_path_linux(filepath))) # END utility method - # HANDLE PATHS if paths: assert len(entries_added) == 0 @@ -692,22 +688,22 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): # END for each filepath # END path handling - # HANDLE ENTRIES if entries: - null_mode_entries = [ e for e in entries if e.mode == 0 ] + null_mode_entries = [e for e in entries if e.mode == 0] if null_mode_entries: - raise ValueError("At least one Entry has a null-mode - please use index.remove to remove files for clarity") + raise ValueError( + "At least one Entry has a null-mode - please use index.remove to remove files for clarity") # END null mode should be remove # HANLDE ENTRY OBJECT CREATION # create objects if required, otherwise go with the existing shas - null_entries_indices = [ i for i,e in enumerate(entries) if e.binsha == Object.NULL_BIN_SHA ] + null_entries_indices = [i for i, e in enumerate(entries) if e.binsha == Object.NULL_BIN_SHA] if null_entries_indices: for ei in null_entries_indices: null_entry = entries[ei] new_entry = store_path(null_entry.path) - + # update null entry entries[ei] = BaseIndexEntry((null_entry.mode, new_entry.binsha, null_entry.stage, null_entry.path)) # END for each entry index @@ -717,7 +713,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): # If we have to rewrite the entries, do so now, after we have generated # all object sha's if path_rewriter: - for i,e in enumerate(entries): + for i, e in enumerate(entries): entries[i] = BaseIndexEntry((e.mode, e.binsha, e.stage, path_rewriter(e))) # END for each entry # END handle path rewriting @@ -737,11 +733,11 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): # add the new entries to this instance for entry in entries_added: self.entries[(entry.path, 0)] = IndexEntry.from_base(entry) - + if write: self.write() # END handle write - + return entries_added def _items_to_rela_paths(self, items): @@ -749,7 +745,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): may be absolute or relative paths, entries or blobs""" paths = list() for item in items: - if isinstance(item, (BaseIndexEntry,(Blob, Submodule))): + if isinstance(item, (BaseIndexEntry, (Blob, Submodule))): paths.append(self._to_relative_path(item.path)) elif isinstance(item, basestring): paths.append(self._to_relative_path(item)) @@ -807,7 +803,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): # process output to gain proper paths # rm 'path' - return [ p[4:-1] for p in removed_paths ] + return [p[4:-1] for p in removed_paths] @post_clear_cache @default_index @@ -853,7 +849,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): # parse result - first 0:n/2 lines are 'checking ', the remaining ones # are the 'renaming' ones which we parse - for ln in xrange(len(mvlines)/2, len(mvlines)): + for ln in xrange(len(mvlines) / 2, len(mvlines)): tokens = mvlines[ln].split(' to ') assert len(tokens) == 2, "Too many tokens in %s" % mvlines[ln] @@ -867,7 +863,6 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): return out # END handle dryrun - # now apply the actual operation kwargs.pop('dry_run') self.repo.git.mv(args, paths, **kwargs) @@ -888,7 +883,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): return Commit.create_from_tree(self.repo, tree, message, parent_commits, head) @classmethod - def _flush_stdin_and_wait(cls, proc, ignore_stdout = False): + def _flush_stdin_and_wait(cls, proc, ignore_stdout=False): proc.stdin.flush() proc.stdin.close() stdout = '' @@ -902,7 +897,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): def checkout(self, paths=None, force=False, fprogress=lambda *args: None, **kwargs): """Checkout the given paths or all files from the version known to the index into the working tree. - + :note: Be sure you have written pending changes using the ``write`` method in case you have altered the enties dictionary directly @@ -935,7 +930,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): ( as opposed to the original git command who ignores them ). Raise GitCommandError if error lines could not be parsed - this truly is an exceptional state - + .. note:: The checkout is limited to checking out the files in the index. Files which are not in the index anymore and exist in the working tree will not be deleted. This behaviour is fundamentally @@ -987,10 +982,10 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): raise GitCommandError(("git-checkout-index", ), 128, stderr) if failed_files: valid_files = list(set(iter_checked_out_files) - set(failed_files)) - raise CheckoutError("Some files could not be checked out from the index due to local modifications", failed_files, valid_files, failed_reasons) + raise CheckoutError( + "Some files could not be checked out from the index due to local modifications", failed_files, valid_files, failed_reasons) # END stderr handler - if paths is None: args.append("--all") kwargs['as_process'] = 1 @@ -998,7 +993,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): proc = self.repo.git.checkout_index(*args, **kwargs) proc.wait() fprogress(None, True, None) - rval_iter = ( e.path for e in self.entries.itervalues() ) + rval_iter = (e.path for e in self.entries.itervalues()) handle_stderr(proc, rval_iter) return rval_iter else: @@ -1006,15 +1001,15 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): paths = [paths] # make sure we have our entries loaded before we start checkout_index - # which will hold a lock on it. We try to get the lock as well during + # which will hold a lock on it. We try to get the lock as well during # our entries initialization self.entries - + args.append("--stdin") kwargs['as_process'] = True kwargs['istream'] = subprocess.PIPE proc = self.repo.git.checkout_index(args, **kwargs) - make_exc = lambda : GitCommandError(("git-checkout-index",)+tuple(args), 128, proc.stderr.read()) + make_exc = lambda: GitCommandError(("git-checkout-index",) + tuple(args), 128, proc.stderr.read()) checked_out_files = list() for path in paths: @@ -1031,8 +1026,8 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): for entry in self.entries.itervalues(): if entry.path.startswith(dir): p = entry.path - self._write_path_to_stdin(proc, p, p, make_exc, - fprogress, read_from_stdout=False) + self._write_path_to_stdin(proc, p, p, make_exc, + fprogress, read_from_stdout=False) checked_out_files.append(p) path_is_directory = True # END if entry is in directory @@ -1040,8 +1035,8 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): # END path exception handlnig if not path_is_directory: - self._write_path_to_stdin(proc, co_path, path, make_exc, - fprogress, read_from_stdout=False) + self._write_path_to_stdin(proc, co_path, path, make_exc, + fprogress, read_from_stdout=False) checked_out_files.append(co_path) # END path is a file # END for each path @@ -1067,11 +1062,11 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): If False, the working tree will not be touched Please note that changes to the working copy will be discarded without warning ! - + :param head: If True, the head will be set to the given commit. This is False by default, but if True, this method behaves like HEAD.reset. - + :param paths: if given as an iterable of absolute or repository-relative paths, only these will be reset to their state at the given commit'ish. The paths need to exist at the commit, otherwise an exception will be @@ -1079,7 +1074,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): :param kwargs: Additional keyword arguments passed to git-reset - + .. note:: IndexFile.reset, as opposed to HEAD.reset, will not delete anyfiles in order to maintain a consistent working tree. Instead, it will just checkout the files according to their state in the index. @@ -1109,11 +1104,11 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): # END for each path # END handle paths self.write() - + if working_tree: self.checkout(paths=paths, force=True) # END handle working tree - + if head: self.repo.head.set_commit(self.repo.commit(commit), logmsg="%s: Updating HEAD" % commit) # END handle head change @@ -1151,8 +1146,7 @@ class IndexFile(LazyMixin, diff.Diffable, Serializable): # if other is not None here, something is wrong if other is not None: - raise ValueError( "other must be None, Diffable.Index, a Tree or Commit, was %r" % other ) + raise ValueError("other must be None, Diffable.Index, a Tree or Commit, was %r" % other) # diff against working copy - can be handled by superclass natively return super(IndexFile, self).diff(other, paths, create_patch, **kwargs) - diff --git a/git/index/fun.py b/git/index/fun.py index 390bb269..b3ad98a4 100644 --- a/git/index/fun.py +++ b/git/index/fun.py @@ -2,14 +2,14 @@ # more versatile # NOTE: Autodoc hates it if this is a docstring from stat import ( - S_IFDIR, - S_IFLNK, - S_ISLNK, - S_IFDIR, - S_ISDIR, - S_IFMT, - S_IFREG, - ) + S_IFDIR, + S_IFLNK, + S_ISLNK, + S_IFDIR, + S_ISDIR, + S_IFMT, + S_IFREG, +) S_IFGITLINK = S_IFLNK | S_IFDIR # a submodule @@ -18,29 +18,29 @@ from cStringIO import StringIO from git.util import IndexFileSHA1Writer from git.exc import UnmergedEntriesError from git.objects.fun import ( - tree_to_stream, - traverse_tree_recursive, - traverse_trees_recursive - ) + tree_to_stream, + traverse_tree_recursive, + traverse_trees_recursive +) from typ import ( - BaseIndexEntry, - IndexEntry, - CE_NAMEMASK, - CE_STAGESHIFT - ) + BaseIndexEntry, + IndexEntry, + CE_NAMEMASK, + CE_STAGESHIFT +) CE_NAMEMASK_INV = ~CE_NAMEMASK -from util import ( - pack, - unpack - ) +from util import ( + pack, + unpack +) from git.base import IStream from git.typ import str_tree_type -__all__ = ('write_cache', 'read_cache', 'write_tree_from_cache', 'entry_key', - 'stat_mode_to_index_mode', 'S_IFGITLINK') +__all__ = ('write_cache', 'read_cache', 'write_tree_from_cache', 'entry_key', + 'stat_mode_to_index_mode', 'S_IFGITLINK') def stat_mode_to_index_mode(mode): @@ -55,19 +55,19 @@ def stat_mode_to_index_mode(mode): def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1Writer): """Write the cache represented by entries to a stream - + :param entries: **sorted** list of entries :param stream: stream to wrap into the AdapterStreamCls - it is used for final output. - + :param ShaStreamCls: Type to use when writing to the stream. It produces a sha while writing to it, before the data is passed on to the wrapped stream - + :param extension_data: any kind of data to write as a trailer, it must begin a 4 byte identifier, followed by its size ( 4 bytes )""" # wrap the stream into a compatible writer stream = ShaStreamCls(stream) - + tell = stream.tell write = stream.write @@ -86,7 +86,7 @@ def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1 assert plen == len(path), "Path %s too long to fit into index" % entry[3] flags = plen | (entry[2] & CE_NAMEMASK_INV) # clear possible previous values write(pack(">LLLLLL20sH", entry[6], entry[7], entry[0], - entry[8], entry[9], entry[10], entry[1], flags)) + entry[8], entry[9], entry[10], entry[1], flags)) write(path) real_size = ((tell() - beginoffset + 8) & ~7) write("\0" * ((beginoffset + real_size) - tell())) @@ -98,17 +98,19 @@ def write_cache(entries, stream, extension_data=None, ShaStreamCls=IndexFileSHA1 # write the sha over the content stream.write_sha() - + + def read_header(stream): - """Return tuple(version_long, num_entries) from the given stream""" - type_id = stream.read(4) - if type_id != "DIRC": - raise AssertionError("Invalid index file header: %r" % type_id) - version, num_entries = unpack(">LL", stream.read(4 * 2)) - - # TODO: handle version 3: extended data, see read-cache.c - assert version in (1, 2) - return version, num_entries + """Return tuple(version_long, num_entries) from the given stream""" + type_id = stream.read(4) + if type_id != "DIRC": + raise AssertionError("Invalid index file header: %r" % type_id) + version, num_entries = unpack(">LL", stream.read(4 * 2)) + + # TODO: handle version 3: extended data, see read-cache.c + assert version in (1, 2) + return version, num_entries + def entry_key(*entry): """:return: Key suitable to be used for the index.entries dictionary @@ -119,6 +121,7 @@ def entry_key(*entry): return tuple(entry) # END handle entry + def read_cache(stream): """Read a cache file from the given stream :return: tuple(version, entries_dict, extension_data, content_sha) @@ -130,7 +133,7 @@ def read_cache(stream): version, num_entries = read_header(stream) count = 0 entries = dict() - + read = stream.read tell = stream.tell while count < num_entries: @@ -141,7 +144,7 @@ def read_cache(stream): unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2)) path_size = flags & CE_NAMEMASK path = read(path_size) - + real_size = ((tell() - beginoffset + 8) & ~7) data = read((beginoffset + real_size) - tell()) entry = IndexEntry((mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size)) @@ -157,19 +160,21 @@ def read_cache(stream): # 4 bytes length of chunk # repeated 0 - N times extension_data = stream.read(~0) - assert len(extension_data) > 19, "Index Footer was not at least a sha on content as it was only %i bytes in size" % len(extension_data) + assert len(extension_data) > 19, "Index Footer was not at least a sha on content as it was only %i bytes in size" % len( + extension_data) content_sha = extension_data[-20:] # truncate the sha in the end as we will dynamically create it anyway extension_data = extension_data[:-20] - + return (version, entries, extension_data, content_sha) - + + def write_tree_from_cache(entries, odb, sl, si=0): """Create a tree from the given sorted list of entries and put the respective trees into the given object database - + :param entries: **sorted** list of IndexEntries :param odb: object database to store the trees in :param si: start index at which we should start creating subtrees @@ -202,28 +207,30 @@ def write_tree_from_cache(entries, odb, sl, si=0): # END abort on base mismatch xi += 1 # END find common base - + # enter recursion # ci - 1 as we want to count our current item as well - sha, tree_entry_list = write_tree_from_cache(entries, odb, slice(ci-1, xi), rbound+1) + sha, tree_entry_list = write_tree_from_cache(entries, odb, slice(ci - 1, xi), rbound + 1) tree_items_append((sha, S_IFDIR, base)) - + # skip ahead ci = xi - # END handle bounds + # END handle bounds # END for each entry - + # finally create the tree sio = StringIO() tree_to_stream(tree_items, sio.write) sio.seek(0) - + istream = odb.store(IStream(str_tree_type, len(sio.getvalue()), sio)) return (istream.binsha, tree_items) - + + def _tree_entry_to_baseindexentry(tree_entry, stage): - return BaseIndexEntry((tree_entry[1], tree_entry[0], stage <<CE_STAGESHIFT, tree_entry[2])) - + return BaseIndexEntry((tree_entry[1], tree_entry[0], stage << CE_STAGESHIFT, tree_entry[2])) + + def aggressive_tree_merge(odb, tree_shas): """ :return: list of BaseIndexEntries representing the aggressive merge of the given @@ -235,16 +242,16 @@ def aggressive_tree_merge(odb, tree_shas): If 3 are given, a 3 way merge is performed""" out = list() out_append = out.append - + # one and two way is the same for us, as we don't have to handle an existing # index, instrea - if len(tree_shas) in (1,2): + if len(tree_shas) in (1, 2): for entry in traverse_tree_recursive(odb, tree_shas[-1], ''): out_append(_tree_entry_to_baseindexentry(entry, 0)) # END for each entry return out - # END handle single tree - + # END handle single tree + if len(tree_shas) > 3: raise ValueError("Cannot handle %i trees at once" % len(tree_shas)) @@ -259,7 +266,7 @@ def aggressive_tree_merge(odb, tree_shas): # its a conflict, otherwise we take the changed version # This should be the most common branch, so it comes first if( base[0] != ours[0] and base[0] != theirs[0] and ours[0] != theirs[0] ) or \ - ( base[1] != ours[1] and base[1] != theirs[1] and ours[1] != theirs[1] ): + (base[1] != ours[1] and base[1] != theirs[1] and ours[1] != theirs[1]): # changed by both out_append(_tree_entry_to_baseindexentry(base, 1)) out_append(_tree_entry_to_baseindexentry(ours, 2)) @@ -271,11 +278,11 @@ def aggressive_tree_merge(odb, tree_shas): # either nobody changed it, or they did. In either # case, use theirs out_append(_tree_entry_to_baseindexentry(theirs, 0)) - # END handle modification + # END handle modification else: - + if ours[0] != base[0] or ours[1] != base[1]: - # they deleted it, we changed it, conflict + # they deleted it, we changed it, conflict out_append(_tree_entry_to_baseindexentry(base, 1)) out_append(_tree_entry_to_baseindexentry(ours, 2)) # else: @@ -293,7 +300,7 @@ def aggressive_tree_merge(odb, tree_shas): out_append(_tree_entry_to_baseindexentry(base, 1)) out_append(_tree_entry_to_baseindexentry(theirs, 3)) # END theirs changed - #else: + # else: # theirs didnt change # pass # END handle theirs diff --git a/git/index/typ.py b/git/index/typ.py index 7f27d869..0e64d28c 100644 --- a/git/index/typ.py +++ b/git/index/typ.py @@ -1,13 +1,13 @@ """Module with additional types used by the index""" from util import ( - pack, - unpack - ) + pack, + unpack +) from binascii import ( - b2a_hex, - ) + b2a_hex, +) from git.objects import Blob __all__ = ('BlobFilter', 'BaseIndexEntry', 'IndexEntry') @@ -21,7 +21,9 @@ CE_STAGESHIFT = 12 #} END invariants + class BlobFilter(object): + """ Predicate to be used by iter_blobs allowing to filter only return blobs which match the given list of directories or files. @@ -47,6 +49,7 @@ class BlobFilter(object): class BaseIndexEntry(tuple): + """Small Brother of an index entry which can be created to describe changes done to the index in which case plenty of additional information is not requried. @@ -56,7 +59,7 @@ class BaseIndexEntry(tuple): def __str__(self): return "%o %s %i\t%s" % (self.mode, self.hexsha, self.stage, self.path) - + def __repr__(self): return "(%o, %s, %i, %s)" % (self.mode, self.hexsha, self.stage, self.path) @@ -69,7 +72,7 @@ class BaseIndexEntry(tuple): def binsha(self): """binary sha of the blob """ return self[1] - + @property def hexsha(self): """hex version of our sha""" @@ -78,12 +81,12 @@ class BaseIndexEntry(tuple): @property def stage(self): """Stage of the entry, either: - + * 0 = default stage * 1 = stage before a merge or common ancestor entry in case of a 3 way merge * 2 = stage of entries from the 'left' side of the merge * 3 = stage of entries from the right side of the merge - + :note: For more information, see http://www.kernel.org/pub/software/scm/git/docs/git-read-tree.html """ return (self[2] & CE_STAGEMASK) >> CE_STAGESHIFT @@ -99,16 +102,17 @@ class BaseIndexEntry(tuple): return self[2] @classmethod - def from_blob(cls, blob, stage = 0): + def from_blob(cls, blob, stage=0): """:return: Fully equipped BaseIndexEntry at the given stage""" return cls((blob.mode, blob.binsha, stage << CE_STAGESHIFT, blob.path)) - + def to_blob(self, repo): """:return: Blob using the information of this index entry""" - return Blob(repo, self.binsha, self.mode, self.path) + return Blob(repo, self.binsha, self.mode, self.path) class IndexEntry(BaseIndexEntry): + """Allows convenient access to IndexEntry data without completely unpacking it. Attributes usully accessed often are cached in the tuple whereas others are @@ -152,7 +156,7 @@ class IndexEntry(BaseIndexEntry): def size(self): """:return: Uncompressed size of the blob """ return self[10] - + @classmethod def from_base(cls, base): """ @@ -165,9 +169,7 @@ class IndexEntry(BaseIndexEntry): return IndexEntry((base.mode, base.binsha, base.flags, base.path, time, time, 0, 0, 0, 0, 0)) @classmethod - def from_blob(cls, blob, stage = 0): + def from_blob(cls, blob, stage=0): """:return: Minimal entry resembling the given blob object""" time = pack(">LL", 0, 0) return IndexEntry((blob.mode, blob.binsha, stage << CE_STAGESHIFT, blob.path, time, time, 0, 0, 0, 0, blob.size)) - - diff --git a/git/index/util.py b/git/index/util.py index 59f8d591..97f4c5e5 100644 --- a/git/index/util.py +++ b/git/index/util.py @@ -3,9 +3,9 @@ import struct import tempfile import os -__all__ = ( 'TemporaryFileSwap', 'post_clear_cache', 'default_index', 'git_working_dir' ) +__all__ = ('TemporaryFileSwap', 'post_clear_cache', 'default_index', 'git_working_dir') -#{ Aliases +#{ Aliases pack = struct.pack unpack = struct.unpack @@ -13,13 +13,14 @@ unpack = struct.unpack #} END aliases class TemporaryFileSwap(object): + """Utility class moving a file to a temporary location within the same directory and moving it back on to where on object deletion.""" __slots__ = ("file_path", "tmp_file_path") def __init__(self, file_path): self.file_path = file_path - self.tmp_file_path = self.file_path + tempfile.mktemp('','','') + self.tmp_file_path = self.file_path + tempfile.mktemp('', '', '') # it may be that the source does not exist try: os.rename(self.file_path, self.tmp_file_path) @@ -34,7 +35,7 @@ class TemporaryFileSwap(object): # END temp file exists -#{ Decorators +#{ Decorators def post_clear_cache(func): """Decorator for functions that alter the index using the git command. This would @@ -45,6 +46,7 @@ def post_clear_cache(func): This decorator will not be required once all functions are implemented natively which in fact is possible, but probably not feasible performance wise. """ + def post_clear_cache_if_not_raised(self, *args, **kwargs): rval = func(self, *args, **kwargs) self._delete_entries_cache() @@ -54,22 +56,27 @@ def post_clear_cache(func): post_clear_cache_if_not_raised.__name__ = func.__name__ return post_clear_cache_if_not_raised + def default_index(func): """Decorator assuring the wrapped method may only run if we are the default repository index. This is as we rely on git commands that operate on that index only. """ + def check_default_index(self, *args, **kwargs): if self._file_path != self._index_path(): - raise AssertionError( "Cannot call %r on indices that do not represent the default git index" % func.__name__ ) + raise AssertionError( + "Cannot call %r on indices that do not represent the default git index" % func.__name__) return func(self, *args, **kwargs) # END wrpaper method check_default_index.__name__ = func.__name__ return check_default_index + def git_working_dir(func): """Decorator which changes the current working dir to the one of the git repository in order to assure relative paths are handled correctly""" + def set_git_working_dir(self, *args, **kwargs): cur_wd = os.getcwd() os.chdir(self.repo.working_tree_dir) @@ -79,7 +86,7 @@ def git_working_dir(func): os.chdir(cur_wd) # END handle working dir # END wrapper - + set_git_working_dir.__name__ = func.__name__ return set_git_working_dir |