summaryrefslogtreecommitdiff
path: root/hgext/largefiles
diff options
context:
space:
mode:
Diffstat (limited to 'hgext/largefiles')
-rw-r--r--hgext/largefiles/CONTRIBUTORS4
-rw-r--r--hgext/largefiles/__init__.py102
-rw-r--r--hgext/largefiles/basestore.py195
-rw-r--r--hgext/largefiles/lfcommands.py549
-rw-r--r--hgext/largefiles/lfutil.py467
-rw-r--r--hgext/largefiles/localstore.py82
-rw-r--r--hgext/largefiles/overrides.py1080
-rw-r--r--hgext/largefiles/proto.py173
-rw-r--r--hgext/largefiles/remotestore.py110
-rw-r--r--hgext/largefiles/reposetup.py475
-rw-r--r--hgext/largefiles/uisetup.py167
-rw-r--r--hgext/largefiles/wirestore.py37
12 files changed, 0 insertions, 3441 deletions
diff --git a/hgext/largefiles/CONTRIBUTORS b/hgext/largefiles/CONTRIBUTORS
deleted file mode 100644
index 9bef457..0000000
--- a/hgext/largefiles/CONTRIBUTORS
+++ /dev/null
@@ -1,4 +0,0 @@
-Greg Ward, author of the original bfiles extension
-Na'Tosha Bard of Unity Technologies
-Fog Creek Software
-Special thanks to the University of Toronto and the UCOSP program
diff --git a/hgext/largefiles/__init__.py b/hgext/largefiles/__init__.py
deleted file mode 100644
index 12c80fa..0000000
--- a/hgext/largefiles/__init__.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''track large binary files
-
-Large binary files tend to be not very compressible, not very
-diffable, and not at all mergeable. Such files are not handled
-efficiently by Mercurial's storage format (revlog), which is based on
-compressed binary deltas; storing large binary files as regular
-Mercurial files wastes bandwidth and disk space and increases
-Mercurial's memory usage. The largefiles extension addresses these
-problems by adding a centralized client-server layer on top of
-Mercurial: largefiles live in a *central store* out on the network
-somewhere, and you only fetch the revisions that you need when you
-need them.
-
-largefiles works by maintaining a "standin file" in .hglf/ for each
-largefile. The standins are small (41 bytes: an SHA-1 hash plus
-newline) and are tracked by Mercurial. Largefile revisions are
-identified by the SHA-1 hash of their contents, which is written to
-the standin. largefiles uses that revision ID to get/put largefile
-revisions from/to the central store. This saves both disk space and
-bandwidth, since you don't need to retrieve all historical revisions
-of large files when you clone or pull.
-
-To start a new repository or add new large binary files, just add
---large to your :hg:`add` command. For example::
-
- $ dd if=/dev/urandom of=randomdata count=2000
- $ hg add --large randomdata
- $ hg commit -m 'add randomdata as a largefile'
-
-When you push a changeset that adds/modifies largefiles to a remote
-repository, its largefile revisions will be uploaded along with it.
-Note that the remote Mercurial must also have the largefiles extension
-enabled for this to work.
-
-When you pull a changeset that affects largefiles from a remote
-repository, Mercurial behaves as normal. However, when you update to
-such a revision, any largefiles needed by that revision are downloaded
-and cached (if they have never been downloaded before). This means
-that network access may be required to update to changesets you have
-not previously updated to.
-
-If you already have large files tracked by Mercurial without the
-largefiles extension, you will need to convert your repository in
-order to benefit from largefiles. This is done with the
-:hg:`lfconvert` command::
-
- $ hg lfconvert --size 10 oldrepo newrepo
-
-In repositories that already have largefiles in them, any new file
-over 10MB will automatically be added as a largefile. To change this
-threshold, set ``largefiles.minsize`` in your Mercurial config file
-to the minimum size in megabytes to track as a largefile, or use the
---lfsize option to the add command (also in megabytes)::
-
- [largefiles]
- minsize = 2
-
- $ hg add --lfsize 2
-
-The ``largefiles.patterns`` config option allows you to specify a list
-of filename patterns (see :hg:`help patterns`) that should always be
-tracked as largefiles::
-
- [largefiles]
- patterns =
- *.jpg
- re:.*\.(png|bmp)$
- library.zip
- content/audio/*
-
-Files that match one of these patterns will be added as largefiles
-regardless of their size.
-
-The ``largefiles.minsize`` and ``largefiles.patterns`` config options
-will be ignored for any repositories not already containing a
-largefile. To add the first largefile to a repository, you must
-explicitly do so with the --large flag passed to the :hg:`add`
-command.
-'''
-
-from mercurial import commands
-
-import lfcommands
-import reposetup
-import uisetup
-
-testedwith = 'internal'
-
-reposetup = reposetup.reposetup
-uisetup = uisetup.uisetup
-
-commands.norepo += " lfconvert"
-
-cmdtable = lfcommands.cmdtable
diff --git a/hgext/largefiles/basestore.py b/hgext/largefiles/basestore.py
deleted file mode 100644
index 55aa4a0..0000000
--- a/hgext/largefiles/basestore.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''base class for store implementations and store-related utility code'''
-
-import binascii
-import re
-
-from mercurial import util, node, hg
-from mercurial.i18n import _
-
-import lfutil
-
-class StoreError(Exception):
- '''Raised when there is a problem getting files from or putting
- files to a central store.'''
- def __init__(self, filename, hash, url, detail):
- self.filename = filename
- self.hash = hash
- self.url = url
- self.detail = detail
-
- def longmessage(self):
- if self.url:
- return ('%s: %s\n'
- '(failed URL: %s)\n'
- % (self.filename, self.detail, self.url))
- else:
- return ('%s: %s\n'
- '(no default or default-push path set in hgrc)\n'
- % (self.filename, self.detail))
-
- def __str__(self):
- return "%s: %s" % (self.url, self.detail)
-
-class basestore(object):
- def __init__(self, ui, repo, url):
- self.ui = ui
- self.repo = repo
- self.url = url
-
- def put(self, source, hash):
- '''Put source file into the store under <filename>/<hash>.'''
- raise NotImplementedError('abstract method')
-
- def exists(self, hashes):
- '''Check to see if the store contains the given hashes.'''
- raise NotImplementedError('abstract method')
-
- def get(self, files):
- '''Get the specified largefiles from the store and write to local
- files under repo.root. files is a list of (filename, hash)
- tuples. Return (success, missing), lists of files successfuly
- downloaded and those not found in the store. success is a list
- of (filename, hash) tuples; missing is a list of filenames that
- we could not get. (The detailed error message will already have
- been presented to the user, so missing is just supplied as a
- summary.)'''
- success = []
- missing = []
- ui = self.ui
-
- at = 0
- for filename, hash in files:
- ui.progress(_('getting largefiles'), at, unit='lfile',
- total=len(files))
- at += 1
- ui.note(_('getting %s:%s\n') % (filename, hash))
-
- storefilename = lfutil.storepath(self.repo, hash)
- tmpfile = util.atomictempfile(storefilename,
- createmode=self.repo.store.createmode)
-
- try:
- hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
- except StoreError, err:
- ui.warn(err.longmessage())
- hhash = ""
-
- if hhash != hash:
- if hhash != "":
- ui.warn(_('%s: data corruption (expected %s, got %s)\n')
- % (filename, hash, hhash))
- tmpfile.discard() # no-op if it's already closed
- missing.append(filename)
- continue
-
- tmpfile.close()
- lfutil.linktousercache(self.repo, hash)
- success.append((filename, hhash))
-
- ui.progress(_('getting largefiles'), None)
- return (success, missing)
-
- def verify(self, revs, contents=False):
- '''Verify the existence (and, optionally, contents) of every big
- file revision referenced by every changeset in revs.
- Return 0 if all is well, non-zero on any errors.'''
- write = self.ui.write
- failed = False
-
- write(_('searching %d changesets for largefiles\n') % len(revs))
- verified = set() # set of (filename, filenode) tuples
-
- for rev in revs:
- cctx = self.repo[rev]
- cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
-
- failed = util.any(self._verifyfile(
- cctx, cset, contents, standin, verified) for standin in cctx)
-
- numrevs = len(verified)
- numlfiles = len(set([fname for (fname, fnode) in verified]))
- if contents:
- write(_('verified contents of %d revisions of %d largefiles\n')
- % (numrevs, numlfiles))
- else:
- write(_('verified existence of %d revisions of %d largefiles\n')
- % (numrevs, numlfiles))
-
- return int(failed)
-
- def _getfile(self, tmpfile, filename, hash):
- '''Fetch one revision of one file from the store and write it
- to tmpfile. Compute the hash of the file on-the-fly as it
- downloads and return the binary hash. Close tmpfile. Raise
- StoreError if unable to download the file (e.g. it does not
- exist in the store).'''
- raise NotImplementedError('abstract method')
-
- def _verifyfile(self, cctx, cset, contents, standin, verified):
- '''Perform the actual verification of a file in the store.
- '''
- raise NotImplementedError('abstract method')
-
-import localstore, wirestore
-
-_storeprovider = {
- 'file': [localstore.localstore],
- 'http': [wirestore.wirestore],
- 'https': [wirestore.wirestore],
- 'ssh': [wirestore.wirestore],
- }
-
-_scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
-
-# During clone this function is passed the src's ui object
-# but it needs the dest's ui object so it can read out of
-# the config file. Use repo.ui instead.
-def _openstore(repo, remote=None, put=False):
- ui = repo.ui
-
- if not remote:
- lfpullsource = getattr(repo, 'lfpullsource', None)
- if lfpullsource:
- path = ui.expandpath(lfpullsource)
- else:
- path = ui.expandpath('default-push', 'default')
-
- # ui.expandpath() leaves 'default-push' and 'default' alone if
- # they cannot be expanded: fallback to the empty string,
- # meaning the current directory.
- if path == 'default-push' or path == 'default':
- path = ''
- remote = repo
- else:
- remote = hg.peer(repo, {}, path)
-
- # The path could be a scheme so use Mercurial's normal functionality
- # to resolve the scheme to a repository and use its path
- path = util.safehasattr(remote, 'url') and remote.url() or remote.path
-
- match = _scheme_re.match(path)
- if not match: # regular filesystem path
- scheme = 'file'
- else:
- scheme = match.group(1)
-
- try:
- storeproviders = _storeprovider[scheme]
- except KeyError:
- raise util.Abort(_('unsupported URL scheme %r') % scheme)
-
- for classobj in storeproviders:
- try:
- return classobj(ui, repo, remote)
- except lfutil.storeprotonotcapable:
- pass
-
- raise util.Abort(_('%s does not appear to be a largefile store') % path)
diff --git a/hgext/largefiles/lfcommands.py b/hgext/largefiles/lfcommands.py
deleted file mode 100644
index de42edd..0000000
--- a/hgext/largefiles/lfcommands.py
+++ /dev/null
@@ -1,549 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''High-level command function for lfconvert, plus the cmdtable.'''
-
-import os
-import shutil
-
-from mercurial import util, match as match_, hg, node, context, error, \
- cmdutil, scmutil
-from mercurial.i18n import _
-from mercurial.lock import release
-
-import lfutil
-import basestore
-
-# -- Commands ----------------------------------------------------------
-
-def lfconvert(ui, src, dest, *pats, **opts):
- '''convert a normal repository to a largefiles repository
-
- Convert repository SOURCE to a new repository DEST, identical to
- SOURCE except that certain files will be converted as largefiles:
- specifically, any file that matches any PATTERN *or* whose size is
- above the minimum size threshold is converted as a largefile. The
- size used to determine whether or not to track a file as a
- largefile is the size of the first version of the file. The
- minimum size can be specified either with --size or in
- configuration as ``largefiles.size``.
-
- After running this command you will need to make sure that
- largefiles is enabled anywhere you intend to push the new
- repository.
-
- Use --to-normal to convert largefiles back to normal files; after
- this, the DEST repository can be used without largefiles at all.'''
-
- if opts['to_normal']:
- tolfile = False
- else:
- tolfile = True
- size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
-
- if not hg.islocal(src):
- raise util.Abort(_('%s is not a local Mercurial repo') % src)
- if not hg.islocal(dest):
- raise util.Abort(_('%s is not a local Mercurial repo') % dest)
-
- rsrc = hg.repository(ui, src)
- ui.status(_('initializing destination %s\n') % dest)
- rdst = hg.repository(ui, dest, create=True)
-
- success = False
- dstwlock = dstlock = None
- try:
- # Lock destination to prevent modification while it is converted to.
- # Don't need to lock src because we are just reading from its history
- # which can't change.
- dstwlock = rdst.wlock()
- dstlock = rdst.lock()
-
- # Get a list of all changesets in the source. The easy way to do this
- # is to simply walk the changelog, using changelog.nodesbewteen().
- # Take a look at mercurial/revlog.py:639 for more details.
- # Use a generator instead of a list to decrease memory usage
- ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
- rsrc.heads())[0])
- revmap = {node.nullid: node.nullid}
- if tolfile:
- lfiles = set()
- normalfiles = set()
- if not pats:
- pats = ui.configlist(lfutil.longname, 'patterns', default=[])
- if pats:
- matcher = match_.match(rsrc.root, '', list(pats))
- else:
- matcher = None
-
- lfiletohash = {}
- for ctx in ctxs:
- ui.progress(_('converting revisions'), ctx.rev(),
- unit=_('revision'), total=rsrc['tip'].rev())
- _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
- lfiles, normalfiles, matcher, size, lfiletohash)
- ui.progress(_('converting revisions'), None)
-
- if os.path.exists(rdst.wjoin(lfutil.shortname)):
- shutil.rmtree(rdst.wjoin(lfutil.shortname))
-
- for f in lfiletohash.keys():
- if os.path.isfile(rdst.wjoin(f)):
- os.unlink(rdst.wjoin(f))
- try:
- os.removedirs(os.path.dirname(rdst.wjoin(f)))
- except OSError:
- pass
-
- # If there were any files converted to largefiles, add largefiles
- # to the destination repository's requirements.
- if lfiles:
- rdst.requirements.add('largefiles')
- rdst._writerequirements()
- else:
- for ctx in ctxs:
- ui.progress(_('converting revisions'), ctx.rev(),
- unit=_('revision'), total=rsrc['tip'].rev())
- _addchangeset(ui, rsrc, rdst, ctx, revmap)
-
- ui.progress(_('converting revisions'), None)
- success = True
- finally:
- rdst.dirstate.clear()
- release(dstlock, dstwlock)
- if not success:
- # we failed, remove the new directory
- shutil.rmtree(rdst.root)
-
-def _addchangeset(ui, rsrc, rdst, ctx, revmap):
- # Convert src parents to dst parents
- parents = _convertparents(ctx, revmap)
-
- # Generate list of changed files
- files = _getchangedfiles(ctx, parents)
-
- def getfilectx(repo, memctx, f):
- if lfutil.standin(f) in files:
- # if the file isn't in the manifest then it was removed
- # or renamed, raise IOError to indicate this
- try:
- fctx = ctx.filectx(lfutil.standin(f))
- except error.LookupError:
- raise IOError
- renamed = fctx.renamed()
- if renamed:
- renamed = lfutil.splitstandin(renamed[0])
-
- hash = fctx.data().strip()
- path = lfutil.findfile(rsrc, hash)
- ### TODO: What if the file is not cached?
- data = ''
- fd = None
- try:
- fd = open(path, 'rb')
- data = fd.read()
- finally:
- if fd:
- fd.close()
- return context.memfilectx(f, data, 'l' in fctx.flags(),
- 'x' in fctx.flags(), renamed)
- else:
- return _getnormalcontext(repo.ui, ctx, f, revmap)
-
- dstfiles = []
- for file in files:
- if lfutil.isstandin(file):
- dstfiles.append(lfutil.splitstandin(file))
- else:
- dstfiles.append(file)
- # Commit
- _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
-
-def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
- matcher, size, lfiletohash):
- # Convert src parents to dst parents
- parents = _convertparents(ctx, revmap)
-
- # Generate list of changed files
- files = _getchangedfiles(ctx, parents)
-
- dstfiles = []
- for f in files:
- if f not in lfiles and f not in normalfiles:
- islfile = _islfile(f, ctx, matcher, size)
- # If this file was renamed or copied then copy
- # the lfileness of its predecessor
- if f in ctx.manifest():
- fctx = ctx.filectx(f)
- renamed = fctx.renamed()
- renamedlfile = renamed and renamed[0] in lfiles
- islfile |= renamedlfile
- if 'l' in fctx.flags():
- if renamedlfile:
- raise util.Abort(
- _('renamed/copied largefile %s becomes symlink')
- % f)
- islfile = False
- if islfile:
- lfiles.add(f)
- else:
- normalfiles.add(f)
-
- if f in lfiles:
- dstfiles.append(lfutil.standin(f))
- # largefile in manifest if it has not been removed/renamed
- if f in ctx.manifest():
- fctx = ctx.filectx(f)
- if 'l' in fctx.flags():
- renamed = fctx.renamed()
- if renamed and renamed[0] in lfiles:
- raise util.Abort(_('largefile %s becomes symlink') % f)
-
- # largefile was modified, update standins
- fullpath = rdst.wjoin(f)
- util.makedirs(os.path.dirname(fullpath))
- m = util.sha1('')
- m.update(ctx[f].data())
- hash = m.hexdigest()
- if f not in lfiletohash or lfiletohash[f] != hash:
- try:
- fd = open(fullpath, 'wb')
- fd.write(ctx[f].data())
- finally:
- if fd:
- fd.close()
- executable = 'x' in ctx[f].flags()
- os.chmod(fullpath, lfutil.getmode(executable))
- lfutil.writestandin(rdst, lfutil.standin(f), hash,
- executable)
- lfiletohash[f] = hash
- else:
- # normal file
- dstfiles.append(f)
-
- def getfilectx(repo, memctx, f):
- if lfutil.isstandin(f):
- # if the file isn't in the manifest then it was removed
- # or renamed, raise IOError to indicate this
- srcfname = lfutil.splitstandin(f)
- try:
- fctx = ctx.filectx(srcfname)
- except error.LookupError:
- raise IOError
- renamed = fctx.renamed()
- if renamed:
- # standin is always a largefile because largefile-ness
- # doesn't change after rename or copy
- renamed = lfutil.standin(renamed[0])
-
- return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
- fctx.flags(), 'x' in fctx.flags(), renamed)
- else:
- return _getnormalcontext(repo.ui, ctx, f, revmap)
-
- # Commit
- _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
-
-def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
- mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
- getfilectx, ctx.user(), ctx.date(), ctx.extra())
- ret = rdst.commitctx(mctx)
- rdst.setparents(ret)
- revmap[ctx.node()] = rdst.changelog.tip()
-
-# Generate list of changed files
-def _getchangedfiles(ctx, parents):
- files = set(ctx.files())
- if node.nullid not in parents:
- mc = ctx.manifest()
- mp1 = ctx.parents()[0].manifest()
- mp2 = ctx.parents()[1].manifest()
- files |= (set(mp1) | set(mp2)) - set(mc)
- for f in mc:
- if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
- files.add(f)
- return files
-
-# Convert src parents to dst parents
-def _convertparents(ctx, revmap):
- parents = []
- for p in ctx.parents():
- parents.append(revmap[p.node()])
- while len(parents) < 2:
- parents.append(node.nullid)
- return parents
-
-# Get memfilectx for a normal file
-def _getnormalcontext(ui, ctx, f, revmap):
- try:
- fctx = ctx.filectx(f)
- except error.LookupError:
- raise IOError
- renamed = fctx.renamed()
- if renamed:
- renamed = renamed[0]
-
- data = fctx.data()
- if f == '.hgtags':
- data = _converttags (ui, revmap, data)
- return context.memfilectx(f, data, 'l' in fctx.flags(),
- 'x' in fctx.flags(), renamed)
-
-# Remap tag data using a revision map
-def _converttags(ui, revmap, data):
- newdata = []
- for line in data.splitlines():
- try:
- id, name = line.split(' ', 1)
- except ValueError:
- ui.warn(_('skipping incorrectly formatted tag %s\n'
- % line))
- continue
- try:
- newid = node.bin(id)
- except TypeError:
- ui.warn(_('skipping incorrectly formatted id %s\n'
- % id))
- continue
- try:
- newdata.append('%s %s\n' % (node.hex(revmap[newid]),
- name))
- except KeyError:
- ui.warn(_('no mapping for id %s\n') % id)
- continue
- return ''.join(newdata)
-
-def _islfile(file, ctx, matcher, size):
- '''Return true if file should be considered a largefile, i.e.
- matcher matches it or it is larger than size.'''
- # never store special .hg* files as largefiles
- if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
- return False
- if matcher and matcher(file):
- return True
- try:
- return ctx.filectx(file).size() >= size * 1024 * 1024
- except error.LookupError:
- return False
-
-def uploadlfiles(ui, rsrc, rdst, files):
- '''upload largefiles to the central store'''
-
- if not files:
- return
-
- store = basestore._openstore(rsrc, rdst, put=True)
-
- at = 0
- ui.debug("sending statlfile command for %d largefiles\n" % len(files))
- retval = store.exists(files)
- files = filter(lambda h: not retval[h], files)
- ui.debug("%d largefiles need to be uploaded\n" % len(files))
-
- for hash in files:
- ui.progress(_('uploading largefiles'), at, unit='largefile',
- total=len(files))
- source = lfutil.findfile(rsrc, hash)
- if not source:
- raise util.Abort(_('largefile %s missing from store'
- ' (needs to be uploaded)') % hash)
- # XXX check for errors here
- store.put(source, hash)
- at += 1
- ui.progress(_('uploading largefiles'), None)
-
-def verifylfiles(ui, repo, all=False, contents=False):
- '''Verify that every big file revision in the current changeset
- exists in the central store. With --contents, also verify that
- the contents of each big file revision are correct (SHA-1 hash
- matches the revision ID). With --all, check every changeset in
- this repository.'''
- if all:
- # Pass a list to the function rather than an iterator because we know a
- # list will work.
- revs = range(len(repo))
- else:
- revs = ['.']
-
- store = basestore._openstore(repo)
- return store.verify(revs, contents=contents)
-
-def cachelfiles(ui, repo, node, filelist=None):
- '''cachelfiles ensures that all largefiles needed by the specified revision
- are present in the repository's largefile cache.
-
- returns a tuple (cached, missing). cached is the list of files downloaded
- by this operation; missing is the list of files that were needed but could
- not be found.'''
- lfiles = lfutil.listlfiles(repo, node)
- if filelist:
- lfiles = set(lfiles) & set(filelist)
- toget = []
-
- for lfile in lfiles:
- # If we are mid-merge, then we have to trust the standin that is in the
- # working copy to have the correct hashvalue. This is because the
- # original hg.merge() already updated the standin as part of the normal
- # merge process -- we just have to udpate the largefile to match.
- if (getattr(repo, "_ismerging", False) and
- os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
- expectedhash = lfutil.readstandin(repo, lfile)
- else:
- expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
-
- # if it exists and its hash matches, it might have been locally
- # modified before updating and the user chose 'local'. in this case,
- # it will not be in any store, so don't look for it.
- if ((not os.path.exists(repo.wjoin(lfile)) or
- expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
- not lfutil.findfile(repo, expectedhash)):
- toget.append((lfile, expectedhash))
-
- if toget:
- store = basestore._openstore(repo)
- ret = store.get(toget)
- return ret
-
- return ([], [])
-
-def downloadlfiles(ui, repo, rev=None):
- matchfn = scmutil.match(repo[None],
- [repo.wjoin(lfutil.shortname)], {})
- def prepare(ctx, fns):
- pass
- totalsuccess = 0
- totalmissing = 0
- for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
- prepare):
- success, missing = cachelfiles(ui, repo, ctx.node())
- totalsuccess += len(success)
- totalmissing += len(missing)
- ui.status(_("%d additional largefiles cached\n") % totalsuccess)
- if totalmissing > 0:
- ui.status(_("%d largefiles failed to download\n") % totalmissing)
- return totalsuccess, totalmissing
-
-def updatelfiles(ui, repo, filelist=None, printmessage=True):
- wlock = repo.wlock()
- try:
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
-
- if filelist is not None:
- lfiles = [f for f in lfiles if f in filelist]
-
- printed = False
- if printmessage and lfiles:
- ui.status(_('getting changed largefiles\n'))
- printed = True
- cachelfiles(ui, repo, '.', lfiles)
-
- updated, removed = 0, 0
- for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
- # increment the appropriate counter according to _updatelfile's
- # return value
- updated += i > 0 and i or 0
- removed -= i < 0 and i or 0
- if printmessage and (removed or updated) and not printed:
- ui.status(_('getting changed largefiles\n'))
- printed = True
-
- lfdirstate.write()
- if printed and printmessage:
- ui.status(_('%d largefiles updated, %d removed\n') % (updated,
- removed))
- finally:
- wlock.release()
-
-def _updatelfile(repo, lfdirstate, lfile):
- '''updates a single largefile and copies the state of its standin from
- the repository's dirstate to its state in the lfdirstate.
-
- returns 1 if the file was modified, -1 if the file was removed, 0 if the
- file was unchanged, and None if the needed largefile was missing from the
- cache.'''
- ret = 0
- abslfile = repo.wjoin(lfile)
- absstandin = repo.wjoin(lfutil.standin(lfile))
- if os.path.exists(absstandin):
- if os.path.exists(absstandin+'.orig'):
- shutil.copyfile(abslfile, abslfile+'.orig')
- expecthash = lfutil.readstandin(repo, lfile)
- if (expecthash != '' and
- (not os.path.exists(abslfile) or
- expecthash != lfutil.hashfile(abslfile))):
- if not lfutil.copyfromcache(repo, expecthash, lfile):
- # use normallookup() to allocate entry in largefiles dirstate,
- # because lack of it misleads lfilesrepo.status() into
- # recognition that such cache missing files are REMOVED.
- lfdirstate.normallookup(lfile)
- return None # don't try to set the mode
- else:
- # Synchronize largefile dirstate to the last modified time of
- # the file
- lfdirstate.normal(lfile)
- ret = 1
- mode = os.stat(absstandin).st_mode
- if mode != os.stat(abslfile).st_mode:
- os.chmod(abslfile, mode)
- ret = 1
- else:
- # Remove lfiles for which the standin is deleted, unless the
- # lfile is added to the repository again. This happens when a
- # largefile is converted back to a normal file: the standin
- # disappears, but a new (normal) file appears as the lfile.
- if os.path.exists(abslfile) and lfile not in repo[None]:
- util.unlinkpath(abslfile)
- ret = -1
- state = repo.dirstate[lfutil.standin(lfile)]
- if state == 'n':
- # When rebasing, we need to synchronize the standin and the largefile,
- # because otherwise the largefile will get reverted. But for commit's
- # sake, we have to mark the file as unclean.
- if getattr(repo, "_isrebasing", False):
- lfdirstate.normallookup(lfile)
- else:
- lfdirstate.normal(lfile)
- elif state == 'r':
- lfdirstate.remove(lfile)
- elif state == 'a':
- lfdirstate.add(lfile)
- elif state == '?':
- lfdirstate.drop(lfile)
- return ret
-
-def catlfile(repo, lfile, rev, filename):
- hash = lfutil.readstandin(repo, lfile, rev)
- if not lfutil.inusercache(repo.ui, hash):
- store = basestore._openstore(repo)
- success, missing = store.get([(lfile, hash)])
- if len(success) != 1:
- raise util.Abort(
- _('largefile %s is not in cache and could not be downloaded')
- % lfile)
- path = lfutil.usercachepath(repo.ui, hash)
- fpout = cmdutil.makefileobj(repo, filename)
- fpin = open(path, "rb")
- fpout.write(fpin.read())
- fpout.close()
- fpin.close()
- return 0
-
-# -- hg commands declarations ------------------------------------------------
-
-cmdtable = {
- 'lfconvert': (lfconvert,
- [('s', 'size', '',
- _('minimum size (MB) for files to be converted '
- 'as largefiles'),
- 'SIZE'),
- ('', 'to-normal', False,
- _('convert from a largefiles repo to a normal repo')),
- ],
- _('hg lfconvert SOURCE DEST [FILE ...]')),
- }
diff --git a/hgext/largefiles/lfutil.py b/hgext/largefiles/lfutil.py
deleted file mode 100644
index 6a64d89..0000000
--- a/hgext/largefiles/lfutil.py
+++ /dev/null
@@ -1,467 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''largefiles utility code: must not import other modules in this package.'''
-
-import os
-import errno
-import platform
-import shutil
-import stat
-
-from mercurial import dirstate, httpconnection, match as match_, util, scmutil
-from mercurial.i18n import _
-
-shortname = '.hglf'
-longname = 'largefiles'
-
-
-# -- Portability wrappers ----------------------------------------------
-
-def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
- return dirstate.walk(matcher, [], unknown, ignored)
-
-def repoadd(repo, list):
- add = repo[None].add
- return add(list)
-
-def reporemove(repo, list, unlink=False):
- def remove(list, unlink):
- wlock = repo.wlock()
- try:
- if unlink:
- for f in list:
- try:
- util.unlinkpath(repo.wjoin(f))
- except OSError, inst:
- if inst.errno != errno.ENOENT:
- raise
- repo[None].forget(list)
- finally:
- wlock.release()
- return remove(list, unlink=unlink)
-
-def repoforget(repo, list):
- forget = repo[None].forget
- return forget(list)
-
-def findoutgoing(repo, remote, force):
- from mercurial import discovery
- common, _anyinc, _heads = discovery.findcommonincoming(repo,
- remote.peer(), force=force)
- return repo.changelog.findmissing(common)
-
-# -- Private worker functions ------------------------------------------
-
-def getminsize(ui, assumelfiles, opt, default=10):
- lfsize = opt
- if not lfsize and assumelfiles:
- lfsize = ui.config(longname, 'minsize', default=default)
- if lfsize:
- try:
- lfsize = float(lfsize)
- except ValueError:
- raise util.Abort(_('largefiles: size must be number (not %s)\n')
- % lfsize)
- if lfsize is None:
- raise util.Abort(_('minimum size for largefiles must be specified'))
- return lfsize
-
-def link(src, dest):
- try:
- util.oslink(src, dest)
- except OSError:
- # if hardlinks fail, fallback on atomic copy
- dst = util.atomictempfile(dest)
- for chunk in util.filechunkiter(open(src, 'rb')):
- dst.write(chunk)
- dst.close()
- os.chmod(dest, os.stat(src).st_mode)
-
-def usercachepath(ui, hash):
- path = ui.configpath(longname, 'usercache', None)
- if path:
- path = os.path.join(path, hash)
- else:
- if os.name == 'nt':
- appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
- if appdata:
- path = os.path.join(appdata, longname, hash)
- elif platform.system() == 'Darwin':
- home = os.getenv('HOME')
- if home:
- path = os.path.join(home, 'Library', 'Caches',
- longname, hash)
- elif os.name == 'posix':
- path = os.getenv('XDG_CACHE_HOME')
- if path:
- path = os.path.join(path, longname, hash)
- else:
- home = os.getenv('HOME')
- if home:
- path = os.path.join(home, '.cache', longname, hash)
- else:
- raise util.Abort(_('unknown operating system: %s\n') % os.name)
- return path
-
-def inusercache(ui, hash):
- path = usercachepath(ui, hash)
- return path and os.path.exists(path)
-
-def findfile(repo, hash):
- if instore(repo, hash):
- repo.ui.note(_('found %s in store\n') % hash)
- return storepath(repo, hash)
- elif inusercache(repo.ui, hash):
- repo.ui.note(_('found %s in system cache\n') % hash)
- path = storepath(repo, hash)
- util.makedirs(os.path.dirname(path))
- link(usercachepath(repo.ui, hash), path)
- return path
- return None
-
-class largefilesdirstate(dirstate.dirstate):
- def __getitem__(self, key):
- return super(largefilesdirstate, self).__getitem__(unixpath(key))
- def normal(self, f):
- return super(largefilesdirstate, self).normal(unixpath(f))
- def remove(self, f):
- return super(largefilesdirstate, self).remove(unixpath(f))
- def add(self, f):
- return super(largefilesdirstate, self).add(unixpath(f))
- def drop(self, f):
- return super(largefilesdirstate, self).drop(unixpath(f))
- def forget(self, f):
- return super(largefilesdirstate, self).forget(unixpath(f))
- def normallookup(self, f):
- return super(largefilesdirstate, self).normallookup(unixpath(f))
-
-def openlfdirstate(ui, repo):
- '''
- Return a dirstate object that tracks largefiles: i.e. its root is
- the repo root, but it is saved in .hg/largefiles/dirstate.
- '''
- admin = repo.join(longname)
- opener = scmutil.opener(admin)
- lfdirstate = largefilesdirstate(opener, ui, repo.root,
- repo.dirstate._validate)
-
- # If the largefiles dirstate does not exist, populate and create
- # it. This ensures that we create it on the first meaningful
- # largefiles operation in a new clone.
- if not os.path.exists(os.path.join(admin, 'dirstate')):
- util.makedirs(admin)
- matcher = getstandinmatcher(repo)
- for standin in dirstatewalk(repo.dirstate, matcher):
- lfile = splitstandin(standin)
- hash = readstandin(repo, lfile)
- lfdirstate.normallookup(lfile)
- try:
- if hash == hashfile(repo.wjoin(lfile)):
- lfdirstate.normal(lfile)
- except OSError, err:
- if err.errno != errno.ENOENT:
- raise
- return lfdirstate
-
-def lfdirstatestatus(lfdirstate, repo, rev):
- match = match_.always(repo.root, repo.getcwd())
- s = lfdirstate.status(match, [], False, False, False)
- unsure, modified, added, removed, missing, unknown, ignored, clean = s
- for lfile in unsure:
- if repo[rev][standin(lfile)].data().strip() != \
- hashfile(repo.wjoin(lfile)):
- modified.append(lfile)
- else:
- clean.append(lfile)
- lfdirstate.normal(lfile)
- return (modified, added, removed, missing, unknown, ignored, clean)
-
-def listlfiles(repo, rev=None, matcher=None):
- '''return a list of largefiles in the working copy or the
- specified changeset'''
-
- if matcher is None:
- matcher = getstandinmatcher(repo)
-
- # ignore unknown files in working directory
- return [splitstandin(f)
- for f in repo[rev].walk(matcher)
- if rev is not None or repo.dirstate[f] != '?']
-
-def instore(repo, hash):
- return os.path.exists(storepath(repo, hash))
-
-def storepath(repo, hash):
- return repo.join(os.path.join(longname, hash))
-
-def copyfromcache(repo, hash, filename):
- '''Copy the specified largefile from the repo or system cache to
- filename in the repository. Return true on success or false if the
- file was not found in either cache (which should not happened:
- this is meant to be called only after ensuring that the needed
- largefile exists in the cache).'''
- path = findfile(repo, hash)
- if path is None:
- return False
- util.makedirs(os.path.dirname(repo.wjoin(filename)))
- # The write may fail before the file is fully written, but we
- # don't use atomic writes in the working copy.
- shutil.copy(path, repo.wjoin(filename))
- return True
-
-def copytostore(repo, rev, file, uploaded=False):
- hash = readstandin(repo, file)
- if instore(repo, hash):
- return
- copytostoreabsolute(repo, repo.wjoin(file), hash)
-
-def copyalltostore(repo, node):
- '''Copy all largefiles in a given revision to the store'''
-
- ctx = repo[node]
- for filename in ctx.files():
- if isstandin(filename) and filename in ctx.manifest():
- realfile = splitstandin(filename)
- copytostore(repo, ctx.node(), realfile)
-
-
-def copytostoreabsolute(repo, file, hash):
- util.makedirs(os.path.dirname(storepath(repo, hash)))
- if inusercache(repo.ui, hash):
- link(usercachepath(repo.ui, hash), storepath(repo, hash))
- else:
- dst = util.atomictempfile(storepath(repo, hash),
- createmode=repo.store.createmode)
- for chunk in util.filechunkiter(open(file, 'rb')):
- dst.write(chunk)
- dst.close()
- linktousercache(repo, hash)
-
-def linktousercache(repo, hash):
- path = usercachepath(repo.ui, hash)
- if path:
- util.makedirs(os.path.dirname(path))
- link(storepath(repo, hash), path)
-
-def getstandinmatcher(repo, pats=[], opts={}):
- '''Return a match object that applies pats to the standin directory'''
- standindir = repo.pathto(shortname)
- if pats:
- # patterns supplied: search standin directory relative to current dir
- cwd = repo.getcwd()
- if os.path.isabs(cwd):
- # cwd is an absolute path for hg -R <reponame>
- # work relative to the repository root in this case
- cwd = ''
- pats = [os.path.join(standindir, cwd, pat) for pat in pats]
- elif os.path.isdir(standindir):
- # no patterns: relative to repo root
- pats = [standindir]
- else:
- # no patterns and no standin dir: return matcher that matches nothing
- match = match_.match(repo.root, None, [], exact=True)
- match.matchfn = lambda f: False
- return match
- return getmatcher(repo, pats, opts, showbad=False)
-
-def getmatcher(repo, pats=[], opts={}, showbad=True):
- '''Wrapper around scmutil.match() that adds showbad: if false,
- neuter the match object's bad() method so it does not print any
- warnings about missing files or directories.'''
- match = scmutil.match(repo[None], pats, opts)
-
- if not showbad:
- match.bad = lambda f, msg: None
- return match
-
-def composestandinmatcher(repo, rmatcher):
- '''Return a matcher that accepts standins corresponding to the
- files accepted by rmatcher. Pass the list of files in the matcher
- as the paths specified by the user.'''
- smatcher = getstandinmatcher(repo, rmatcher.files())
- isstandin = smatcher.matchfn
- def composedmatchfn(f):
- return isstandin(f) and rmatcher.matchfn(splitstandin(f))
- smatcher.matchfn = composedmatchfn
-
- return smatcher
-
-def standin(filename):
- '''Return the repo-relative path to the standin for the specified big
- file.'''
- # Notes:
- # 1) Most callers want an absolute path, but _createstandin() needs
- # it repo-relative so lfadd() can pass it to repoadd(). So leave
- # it up to the caller to use repo.wjoin() to get an absolute path.
- # 2) Join with '/' because that's what dirstate always uses, even on
- # Windows. Change existing separator to '/' first in case we are
- # passed filenames from an external source (like the command line).
- return shortname + '/' + util.pconvert(filename)
-
-def isstandin(filename):
- '''Return true if filename is a big file standin. filename must be
- in Mercurial's internal form (slash-separated).'''
- return filename.startswith(shortname + '/')
-
-def splitstandin(filename):
- # Split on / because that's what dirstate always uses, even on Windows.
- # Change local separator to / first just in case we are passed filenames
- # from an external source (like the command line).
- bits = util.pconvert(filename).split('/', 1)
- if len(bits) == 2 and bits[0] == shortname:
- return bits[1]
- else:
- return None
-
-def updatestandin(repo, standin):
- file = repo.wjoin(splitstandin(standin))
- if os.path.exists(file):
- hash = hashfile(file)
- executable = getexecutable(file)
- writestandin(repo, standin, hash, executable)
-
-def readstandin(repo, filename, node=None):
- '''read hex hash from standin for filename at given node, or working
- directory if no node is given'''
- return repo[node][standin(filename)].data().strip()
-
-def writestandin(repo, standin, hash, executable):
- '''write hash to <repo.root>/<standin>'''
- writehash(hash, repo.wjoin(standin), executable)
-
-def copyandhash(instream, outfile):
- '''Read bytes from instream (iterable) and write them to outfile,
- computing the SHA-1 hash of the data along the way. Close outfile
- when done and return the binary hash.'''
- hasher = util.sha1('')
- for data in instream:
- hasher.update(data)
- outfile.write(data)
-
- # Blecch: closing a file that somebody else opened is rude and
- # wrong. But it's so darn convenient and practical! After all,
- # outfile was opened just to copy and hash.
- outfile.close()
-
- return hasher.digest()
-
-def hashrepofile(repo, file):
- return hashfile(repo.wjoin(file))
-
-def hashfile(file):
- if not os.path.exists(file):
- return ''
- hasher = util.sha1('')
- fd = open(file, 'rb')
- for data in blockstream(fd):
- hasher.update(data)
- fd.close()
- return hasher.hexdigest()
-
-class limitreader(object):
- def __init__(self, f, limit):
- self.f = f
- self.limit = limit
-
- def read(self, length):
- if self.limit == 0:
- return ''
- length = length > self.limit and self.limit or length
- self.limit -= length
- return self.f.read(length)
-
- def close(self):
- pass
-
-def blockstream(infile, blocksize=128 * 1024):
- """Generator that yields blocks of data from infile and closes infile."""
- while True:
- data = infile.read(blocksize)
- if not data:
- break
- yield data
- # same blecch as copyandhash() above
- infile.close()
-
-def writehash(hash, filename, executable):
- util.makedirs(os.path.dirname(filename))
- util.writefile(filename, hash + '\n')
- os.chmod(filename, getmode(executable))
-
-def getexecutable(filename):
- mode = os.stat(filename).st_mode
- return ((mode & stat.S_IXUSR) and
- (mode & stat.S_IXGRP) and
- (mode & stat.S_IXOTH))
-
-def getmode(executable):
- if executable:
- return 0755
- else:
- return 0644
-
-def urljoin(first, second, *arg):
- def join(left, right):
- if not left.endswith('/'):
- left += '/'
- if right.startswith('/'):
- right = right[1:]
- return left + right
-
- url = join(first, second)
- for a in arg:
- url = join(url, a)
- return url
-
-def hexsha1(data):
- """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
- object data"""
- h = util.sha1()
- for chunk in util.filechunkiter(data):
- h.update(chunk)
- return h.hexdigest()
-
-def httpsendfile(ui, filename):
- return httpconnection.httpsendfile(ui, filename, 'rb')
-
-def unixpath(path):
- '''Return a version of path normalized for use with the lfdirstate.'''
- return util.pconvert(os.path.normpath(path))
-
-def islfilesrepo(repo):
- return ('largefiles' in repo.requirements and
- util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
-
-class storeprotonotcapable(Exception):
- def __init__(self, storetypes):
- self.storetypes = storetypes
-
-def getcurrentheads(repo):
- branches = repo.branchmap()
- heads = []
- for branch in branches:
- newheads = repo.branchheads(branch)
- heads = heads + newheads
- return heads
-
-def getstandinsstate(repo):
- standins = []
- matcher = getstandinmatcher(repo)
- for standin in dirstatewalk(repo.dirstate, matcher):
- lfile = splitstandin(standin)
- standins.append((lfile, readstandin(repo, lfile)))
- return standins
-
-def getlfilestoupdate(oldstandins, newstandins):
- changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
- filelist = []
- for f in changedstandins:
- if f[0] not in filelist:
- filelist.append(f[0])
- return filelist
diff --git a/hgext/largefiles/localstore.py b/hgext/largefiles/localstore.py
deleted file mode 100644
index 4995743..0000000
--- a/hgext/largefiles/localstore.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''store class for local filesystem'''
-
-import os
-
-from mercurial import util
-from mercurial.i18n import _
-
-import lfutil
-import basestore
-
-class localstore(basestore.basestore):
- '''localstore first attempts to grab files out of the store in the remote
- Mercurial repository. Failling that, it attempts to grab the files from
- the user cache.'''
-
- def __init__(self, ui, repo, remote):
- url = os.path.join(remote.local().path, '.hg', lfutil.longname)
- super(localstore, self).__init__(ui, repo, util.expandpath(url))
- self.remote = remote.local()
-
- def put(self, source, hash):
- util.makedirs(os.path.dirname(lfutil.storepath(self.remote, hash)))
- if lfutil.instore(self.remote, hash):
- return
- lfutil.link(lfutil.storepath(self.repo, hash),
- lfutil.storepath(self.remote, hash))
-
- def exists(self, hash):
- return lfutil.instore(self.remote, hash)
-
- def _getfile(self, tmpfile, filename, hash):
- if lfutil.instore(self.remote, hash):
- path = lfutil.storepath(self.remote, hash)
- elif lfutil.inusercache(self.ui, hash):
- path = lfutil.usercachepath(self.ui, hash)
- else:
- raise basestore.StoreError(filename, hash, '',
- _("can't get file locally"))
- fd = open(path, 'rb')
- try:
- return lfutil.copyandhash(fd, tmpfile)
- finally:
- fd.close()
-
- def _verifyfile(self, cctx, cset, contents, standin, verified):
- filename = lfutil.splitstandin(standin)
- if not filename:
- return False
- fctx = cctx[standin]
- key = (filename, fctx.filenode())
- if key in verified:
- return False
-
- expecthash = fctx.data()[0:40]
- verified.add(key)
- if not lfutil.instore(self.remote, expecthash):
- self.ui.warn(
- _('changeset %s: %s missing\n'
- ' (looked for hash %s)\n')
- % (cset, filename, expecthash))
- return True # failed
-
- if contents:
- storepath = lfutil.storepath(self.remote, expecthash)
- actualhash = lfutil.hashfile(storepath)
- if actualhash != expecthash:
- self.ui.warn(
- _('changeset %s: %s: contents differ\n'
- ' (%s:\n'
- ' expected hash %s,\n'
- ' but got %s)\n')
- % (cset, filename, storepath, expecthash, actualhash))
- return True # failed
- return False
diff --git a/hgext/largefiles/overrides.py b/hgext/largefiles/overrides.py
deleted file mode 100644
index 3b42695..0000000
--- a/hgext/largefiles/overrides.py
+++ /dev/null
@@ -1,1080 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''Overridden Mercurial commands and functions for the largefiles extension'''
-
-import os
-import copy
-
-from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
- node, archival, error, merge
-from mercurial.i18n import _
-from mercurial.node import hex
-from hgext import rebase
-
-import lfutil
-import lfcommands
-
-# -- Utility functions: commonly/repeatedly needed functionality ---------------
-
-def installnormalfilesmatchfn(manifest):
- '''overrides scmutil.match so that the matcher it returns will ignore all
- largefiles'''
- oldmatch = None # for the closure
- def overridematch(ctx, pats=[], opts={}, globbed=False,
- default='relpath'):
- match = oldmatch(ctx, pats, opts, globbed, default)
- m = copy.copy(match)
- notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
- manifest)
- m._files = filter(notlfile, m._files)
- m._fmap = set(m._files)
- origmatchfn = m.matchfn
- m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
- return m
- oldmatch = installmatchfn(overridematch)
-
-def installmatchfn(f):
- oldmatch = scmutil.match
- setattr(f, 'oldmatch', oldmatch)
- scmutil.match = f
- return oldmatch
-
-def restorematchfn():
- '''restores scmutil.match to what it was before installnormalfilesmatchfn
- was called. no-op if scmutil.match is its original function.
-
- Note that n calls to installnormalfilesmatchfn will require n calls to
- restore matchfn to reverse'''
- scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
-
-def addlargefiles(ui, repo, *pats, **opts):
- large = opts.pop('large', None)
- lfsize = lfutil.getminsize(
- ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
-
- lfmatcher = None
- if lfutil.islfilesrepo(repo):
- lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
- if lfpats:
- lfmatcher = match_.match(repo.root, '', list(lfpats))
-
- lfnames = []
- m = scmutil.match(repo[None], pats, opts)
- m.bad = lambda x, y: None
- wctx = repo[None]
- for f in repo.walk(m):
- exact = m.exact(f)
- lfile = lfutil.standin(f) in wctx
- nfile = f in wctx
- exists = lfile or nfile
-
- # Don't warn the user when they attempt to add a normal tracked file.
- # The normal add code will do that for us.
- if exact and exists:
- if lfile:
- ui.warn(_('%s already a largefile\n') % f)
- continue
-
- if (exact or not exists) and not lfutil.isstandin(f):
- wfile = repo.wjoin(f)
-
- # In case the file was removed previously, but not committed
- # (issue3507)
- if not os.path.exists(wfile):
- continue
-
- abovemin = (lfsize and
- os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
- if large or abovemin or (lfmatcher and lfmatcher(f)):
- lfnames.append(f)
- if ui.verbose or not exact:
- ui.status(_('adding %s as a largefile\n') % m.rel(f))
-
- bad = []
- standins = []
-
- # Need to lock, otherwise there could be a race condition between
- # when standins are created and added to the repo.
- wlock = repo.wlock()
- try:
- if not opts.get('dry_run'):
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- for f in lfnames:
- standinname = lfutil.standin(f)
- lfutil.writestandin(repo, standinname, hash='',
- executable=lfutil.getexecutable(repo.wjoin(f)))
- standins.append(standinname)
- if lfdirstate[f] == 'r':
- lfdirstate.normallookup(f)
- else:
- lfdirstate.add(f)
- lfdirstate.write()
- bad += [lfutil.splitstandin(f)
- for f in lfutil.repoadd(repo, standins)
- if f in m.files()]
- finally:
- wlock.release()
- return bad
-
-def removelargefiles(ui, repo, *pats, **opts):
- after = opts.get('after')
- if not pats and not after:
- raise util.Abort(_('no files specified'))
- m = scmutil.match(repo[None], pats, opts)
- try:
- repo.lfstatus = True
- s = repo.status(match=m, clean=True)
- finally:
- repo.lfstatus = False
- manifest = repo[None].manifest()
- modified, added, deleted, clean = [[f for f in list
- if lfutil.standin(f) in manifest]
- for list in [s[0], s[1], s[3], s[6]]]
-
- def warn(files, reason):
- for f in files:
- ui.warn(_('not removing %s: %s (use forget to undo)\n')
- % (m.rel(f), reason))
-
- if after:
- remove, forget = deleted, []
- warn(modified + added + clean, _('file still exists'))
- else:
- remove, forget = deleted + clean, []
- warn(modified, _('file is modified'))
- warn(added, _('file has been marked for add'))
-
- for f in sorted(remove + forget):
- if ui.verbose or not m.exact(f):
- ui.status(_('removing %s\n') % m.rel(f))
-
- # Need to lock because standin files are deleted then removed from the
- # repository and we could race inbetween.
- wlock = repo.wlock()
- try:
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- for f in remove:
- if not after:
- # If this is being called by addremove, notify the user that we
- # are removing the file.
- if getattr(repo, "_isaddremove", False):
- ui.status(_('removing %s\n') % f)
- if os.path.exists(repo.wjoin(f)):
- util.unlinkpath(repo.wjoin(f))
- lfdirstate.remove(f)
- lfdirstate.write()
- forget = [lfutil.standin(f) for f in forget]
- remove = [lfutil.standin(f) for f in remove]
- lfutil.repoforget(repo, forget)
- # If this is being called by addremove, let the original addremove
- # function handle this.
- if not getattr(repo, "_isaddremove", False):
- lfutil.reporemove(repo, remove, unlink=True)
- else:
- lfutil.reporemove(repo, remove, unlink=False)
- finally:
- wlock.release()
-
-# For overriding mercurial.hgweb.webcommands so that largefiles will
-# appear at their right place in the manifests.
-def decodepath(orig, path):
- return lfutil.splitstandin(path) or path
-
-# -- Wrappers: modify existing commands --------------------------------
-
-# Add works by going through the files that the user wanted to add and
-# checking if they should be added as largefiles. Then it makes a new
-# matcher which matches only the normal files and runs the original
-# version of add.
-def overrideadd(orig, ui, repo, *pats, **opts):
- normal = opts.pop('normal')
- if normal:
- if opts.get('large'):
- raise util.Abort(_('--normal cannot be used with --large'))
- return orig(ui, repo, *pats, **opts)
- bad = addlargefiles(ui, repo, *pats, **opts)
- installnormalfilesmatchfn(repo[None].manifest())
- result = orig(ui, repo, *pats, **opts)
- restorematchfn()
-
- return (result == 1 or bad) and 1 or 0
-
-def overrideremove(orig, ui, repo, *pats, **opts):
- installnormalfilesmatchfn(repo[None].manifest())
- orig(ui, repo, *pats, **opts)
- restorematchfn()
- removelargefiles(ui, repo, *pats, **opts)
-
-def overridestatusfn(orig, repo, rev2, **opts):
- try:
- repo._repo.lfstatus = True
- return orig(repo, rev2, **opts)
- finally:
- repo._repo.lfstatus = False
-
-def overridestatus(orig, ui, repo, *pats, **opts):
- try:
- repo.lfstatus = True
- return orig(ui, repo, *pats, **opts)
- finally:
- repo.lfstatus = False
-
-def overridedirty(orig, repo, ignoreupdate=False):
- try:
- repo._repo.lfstatus = True
- return orig(repo, ignoreupdate)
- finally:
- repo._repo.lfstatus = False
-
-def overridelog(orig, ui, repo, *pats, **opts):
- try:
- repo.lfstatus = True
- orig(ui, repo, *pats, **opts)
- finally:
- repo.lfstatus = False
-
-def overrideverify(orig, ui, repo, *pats, **opts):
- large = opts.pop('large', False)
- all = opts.pop('lfa', False)
- contents = opts.pop('lfc', False)
-
- result = orig(ui, repo, *pats, **opts)
- if large:
- result = result or lfcommands.verifylfiles(ui, repo, all, contents)
- return result
-
-# Override needs to refresh standins so that update's normal merge
-# will go through properly. Then the other update hook (overriding repo.update)
-# will get the new files. Filemerge is also overriden so that the merge
-# will merge standins correctly.
-def overrideupdate(orig, ui, repo, *pats, **opts):
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
- False, False)
- (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
-
- # Need to lock between the standins getting updated and their
- # largefiles getting updated
- wlock = repo.wlock()
- try:
- if opts['check']:
- mod = len(modified) > 0
- for lfile in unsure:
- standin = lfutil.standin(lfile)
- if repo['.'][standin].data().strip() != \
- lfutil.hashfile(repo.wjoin(lfile)):
- mod = True
- else:
- lfdirstate.normal(lfile)
- lfdirstate.write()
- if mod:
- raise util.Abort(_('uncommitted local changes'))
- # XXX handle removed differently
- if not opts['clean']:
- for lfile in unsure + modified + added:
- lfutil.updatestandin(repo, lfutil.standin(lfile))
- finally:
- wlock.release()
- return orig(ui, repo, *pats, **opts)
-
-# Before starting the manifest merge, merge.updates will call
-# _checkunknown to check if there are any files in the merged-in
-# changeset that collide with unknown files in the working copy.
-#
-# The largefiles are seen as unknown, so this prevents us from merging
-# in a file 'foo' if we already have a largefile with the same name.
-#
-# The overridden function filters the unknown files by removing any
-# largefiles. This makes the merge proceed and we can then handle this
-# case further in the overridden manifestmerge function below.
-def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
- if lfutil.standin(f) in wctx:
- return False
- return origfn(repo, wctx, mctx, f)
-
-# The manifest merge handles conflicts on the manifest level. We want
-# to handle changes in largefile-ness of files at this level too.
-#
-# The strategy is to run the original manifestmerge and then process
-# the action list it outputs. There are two cases we need to deal with:
-#
-# 1. Normal file in p1, largefile in p2. Here the largefile is
-# detected via its standin file, which will enter the working copy
-# with a "get" action. It is not "merge" since the standin is all
-# Mercurial is concerned with at this level -- the link to the
-# existing normal file is not relevant here.
-#
-# 2. Largefile in p1, normal file in p2. Here we get a "merge" action
-# since the largefile will be present in the working copy and
-# different from the normal file in p2. Mercurial therefore
-# triggers a merge action.
-#
-# In both cases, we prompt the user and emit new actions to either
-# remove the standin (if the normal file was kept) or to remove the
-# normal file and get the standin (if the largefile was kept). The
-# default prompt answer is to use the largefile version since it was
-# presumably changed on purpose.
-#
-# Finally, the merge.applyupdates function will then take care of
-# writing the files into the working copy and lfcommands.updatelfiles
-# will update the largefiles.
-def overridemanifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
- actions = origfn(repo, p1, p2, pa, overwrite, partial)
- processed = []
-
- for action in actions:
- if overwrite:
- processed.append(action)
- continue
- f, m = action[:2]
-
- choices = (_('&Largefile'), _('&Normal file'))
- if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
- # Case 1: normal file in the working copy, largefile in
- # the second parent
- lfile = lfutil.splitstandin(f)
- standin = f
- msg = _('%s has been turned into a largefile\n'
- 'use (l)argefile or keep as (n)ormal file?') % lfile
- if repo.ui.promptchoice(msg, choices, 0) == 0:
- processed.append((lfile, "r"))
- processed.append((standin, "g", p2.flags(standin)))
- else:
- processed.append((standin, "r"))
- elif m == "g" and lfutil.standin(f) in p1 and f in p2:
- # Case 2: largefile in the working copy, normal file in
- # the second parent
- standin = lfutil.standin(f)
- lfile = f
- msg = _('%s has been turned into a normal file\n'
- 'keep as (l)argefile or use (n)ormal file?') % lfile
- if repo.ui.promptchoice(msg, choices, 0) == 0:
- processed.append((lfile, "r"))
- else:
- processed.append((standin, "r"))
- processed.append((lfile, "g", p2.flags(lfile)))
- else:
- processed.append(action)
-
- return processed
-
-# Override filemerge to prompt the user about how they wish to merge
-# largefiles. This will handle identical edits, and copy/rename +
-# edit without prompting the user.
-def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
- # Use better variable names here. Because this is a wrapper we cannot
- # change the variable names in the function declaration.
- fcdest, fcother, fcancestor = fcd, fco, fca
- if not lfutil.isstandin(orig):
- return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
- else:
- if not fcother.cmp(fcdest): # files identical?
- return None
-
- # backwards, use working dir parent as ancestor
- if fcancestor == fcother:
- fcancestor = fcdest.parents()[0]
-
- if orig != fcother.path():
- repo.ui.status(_('merging %s and %s to %s\n')
- % (lfutil.splitstandin(orig),
- lfutil.splitstandin(fcother.path()),
- lfutil.splitstandin(fcdest.path())))
- else:
- repo.ui.status(_('merging %s\n')
- % lfutil.splitstandin(fcdest.path()))
-
- if fcancestor.path() != fcother.path() and fcother.data() == \
- fcancestor.data():
- return 0
- if fcancestor.path() != fcdest.path() and fcdest.data() == \
- fcancestor.data():
- repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
- return 0
-
- if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
- 'keep (l)ocal or take (o)ther?') %
- lfutil.splitstandin(orig),
- (_('&Local'), _('&Other')), 0) == 0:
- return 0
- else:
- repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
- return 0
-
-# Copy first changes the matchers to match standins instead of
-# largefiles. Then it overrides util.copyfile in that function it
-# checks if the destination largefile already exists. It also keeps a
-# list of copied files so that the largefiles can be copied and the
-# dirstate updated.
-def overridecopy(orig, ui, repo, pats, opts, rename=False):
- # doesn't remove largefile on rename
- if len(pats) < 2:
- # this isn't legal, let the original function deal with it
- return orig(ui, repo, pats, opts, rename)
-
- def makestandin(relpath):
- path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
- return os.path.join(repo.wjoin(lfutil.standin(path)))
-
- fullpats = scmutil.expandpats(pats)
- dest = fullpats[-1]
-
- if os.path.isdir(dest):
- if not os.path.isdir(makestandin(dest)):
- os.makedirs(makestandin(dest))
- # This could copy both lfiles and normal files in one command,
- # but we don't want to do that. First replace their matcher to
- # only match normal files and run it, then replace it to just
- # match largefiles and run it again.
- nonormalfiles = False
- nolfiles = False
- try:
- try:
- installnormalfilesmatchfn(repo[None].manifest())
- result = orig(ui, repo, pats, opts, rename)
- except util.Abort, e:
- if str(e) != _('no files to copy'):
- raise e
- else:
- nonormalfiles = True
- result = 0
- finally:
- restorematchfn()
-
- # The first rename can cause our current working directory to be removed.
- # In that case there is nothing left to copy/rename so just quit.
- try:
- repo.getcwd()
- except OSError:
- return result
-
- try:
- try:
- # When we call orig below it creates the standins but we don't add
- # them to the dir state until later so lock during that time.
- wlock = repo.wlock()
-
- manifest = repo[None].manifest()
- oldmatch = None # for the closure
- def overridematch(ctx, pats=[], opts={}, globbed=False,
- default='relpath'):
- newpats = []
- # The patterns were previously mangled to add the standin
- # directory; we need to remove that now
- for pat in pats:
- if match_.patkind(pat) is None and lfutil.shortname in pat:
- newpats.append(pat.replace(lfutil.shortname, ''))
- else:
- newpats.append(pat)
- match = oldmatch(ctx, newpats, opts, globbed, default)
- m = copy.copy(match)
- lfile = lambda f: lfutil.standin(f) in manifest
- m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
- m._fmap = set(m._files)
- origmatchfn = m.matchfn
- m.matchfn = lambda f: (lfutil.isstandin(f) and
- (f in manifest) and
- origmatchfn(lfutil.splitstandin(f)) or
- None)
- return m
- oldmatch = installmatchfn(overridematch)
- listpats = []
- for pat in pats:
- if match_.patkind(pat) is not None:
- listpats.append(pat)
- else:
- listpats.append(makestandin(pat))
-
- try:
- origcopyfile = util.copyfile
- copiedfiles = []
- def overridecopyfile(src, dest):
- if (lfutil.shortname in src and
- dest.startswith(repo.wjoin(lfutil.shortname))):
- destlfile = dest.replace(lfutil.shortname, '')
- if not opts['force'] and os.path.exists(destlfile):
- raise IOError('',
- _('destination largefile already exists'))
- copiedfiles.append((src, dest))
- origcopyfile(src, dest)
-
- util.copyfile = overridecopyfile
- result += orig(ui, repo, listpats, opts, rename)
- finally:
- util.copyfile = origcopyfile
-
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- for (src, dest) in copiedfiles:
- if (lfutil.shortname in src and
- dest.startswith(repo.wjoin(lfutil.shortname))):
- srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
- destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
- destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
- if not os.path.isdir(destlfiledir):
- os.makedirs(destlfiledir)
- if rename:
- os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
- lfdirstate.remove(srclfile)
- else:
- util.copyfile(repo.wjoin(srclfile),
- repo.wjoin(destlfile))
-
- lfdirstate.add(destlfile)
- lfdirstate.write()
- except util.Abort, e:
- if str(e) != _('no files to copy'):
- raise e
- else:
- nolfiles = True
- finally:
- restorematchfn()
- wlock.release()
-
- if nolfiles and nonormalfiles:
- raise util.Abort(_('no files to copy'))
-
- return result
-
-# When the user calls revert, we have to be careful to not revert any
-# changes to other largefiles accidentally. This means we have to keep
-# track of the largefiles that are being reverted so we only pull down
-# the necessary largefiles.
-#
-# Standins are only updated (to match the hash of largefiles) before
-# commits. Update the standins then run the original revert, changing
-# the matcher to hit standins instead of largefiles. Based on the
-# resulting standins update the largefiles. Then return the standins
-# to their proper state
-def overriderevert(orig, ui, repo, *pats, **opts):
- # Because we put the standins in a bad state (by updating them)
- # and then return them to a correct state we need to lock to
- # prevent others from changing them in their incorrect state.
- wlock = repo.wlock()
- try:
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- (modified, added, removed, missing, unknown, ignored, clean) = \
- lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
- for lfile in modified:
- lfutil.updatestandin(repo, lfutil.standin(lfile))
- for lfile in missing:
- if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
- os.unlink(repo.wjoin(lfutil.standin(lfile)))
-
- try:
- ctx = scmutil.revsingle(repo, opts.get('rev'))
- oldmatch = None # for the closure
- def overridematch(ctx, pats=[], opts={}, globbed=False,
- default='relpath'):
- match = oldmatch(ctx, pats, opts, globbed, default)
- m = copy.copy(match)
- def tostandin(f):
- if lfutil.standin(f) in ctx:
- return lfutil.standin(f)
- elif lfutil.standin(f) in repo[None]:
- return None
- return f
- m._files = [tostandin(f) for f in m._files]
- m._files = [f for f in m._files if f is not None]
- m._fmap = set(m._files)
- origmatchfn = m.matchfn
- def matchfn(f):
- if lfutil.isstandin(f):
- # We need to keep track of what largefiles are being
- # matched so we know which ones to update later --
- # otherwise we accidentally revert changes to other
- # largefiles. This is repo-specific, so duckpunch the
- # repo object to keep the list of largefiles for us
- # later.
- if origmatchfn(lfutil.splitstandin(f)) and \
- (f in repo[None] or f in ctx):
- lfileslist = getattr(repo, '_lfilestoupdate', [])
- lfileslist.append(lfutil.splitstandin(f))
- repo._lfilestoupdate = lfileslist
- return True
- else:
- return False
- return origmatchfn(f)
- m.matchfn = matchfn
- return m
- oldmatch = installmatchfn(overridematch)
- scmutil.match
- matches = overridematch(repo[None], pats, opts)
- orig(ui, repo, *pats, **opts)
- finally:
- restorematchfn()
- lfileslist = getattr(repo, '_lfilestoupdate', [])
- lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
- printmessage=False)
-
- # empty out the largefiles list so we start fresh next time
- repo._lfilestoupdate = []
- for lfile in modified:
- if lfile in lfileslist:
- if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
- in repo['.']:
- lfutil.writestandin(repo, lfutil.standin(lfile),
- repo['.'][lfile].data().strip(),
- 'x' in repo['.'][lfile].flags())
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- for lfile in added:
- standin = lfutil.standin(lfile)
- if standin not in ctx and (standin in matches or opts.get('all')):
- if lfile in lfdirstate:
- lfdirstate.drop(lfile)
- util.unlinkpath(repo.wjoin(standin))
- lfdirstate.write()
- finally:
- wlock.release()
-
-def hgupdate(orig, repo, node):
- # Only call updatelfiles the standins that have changed to save time
- oldstandins = lfutil.getstandinsstate(repo)
- result = orig(repo, node)
- newstandins = lfutil.getstandinsstate(repo)
- filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
- lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
- return result
-
-def hgclean(orig, repo, node, show_stats=True):
- result = orig(repo, node, show_stats)
- lfcommands.updatelfiles(repo.ui, repo)
- return result
-
-def hgmerge(orig, repo, node, force=None, remind=True):
- # Mark the repo as being in the middle of a merge, so that
- # updatelfiles() will know that it needs to trust the standins in
- # the working copy, not in the standins in the current node
- repo._ismerging = True
- try:
- result = orig(repo, node, force, remind)
- lfcommands.updatelfiles(repo.ui, repo)
- finally:
- repo._ismerging = False
- return result
-
-# When we rebase a repository with remotely changed largefiles, we need to
-# take some extra care so that the largefiles are correctly updated in the
-# working copy
-def overridepull(orig, ui, repo, source=None, **opts):
- revsprepull = len(repo)
- if opts.get('rebase', False):
- repo._isrebasing = True
- try:
- if opts.get('update'):
- del opts['update']
- ui.debug('--update and --rebase are not compatible, ignoring '
- 'the update flag\n')
- del opts['rebase']
- cmdutil.bailifchanged(repo)
- origpostincoming = commands.postincoming
- def _dummy(*args, **kwargs):
- pass
- commands.postincoming = _dummy
- repo.lfpullsource = source
- if not source:
- source = 'default'
- try:
- result = commands.pull(ui, repo, source, **opts)
- finally:
- commands.postincoming = origpostincoming
- revspostpull = len(repo)
- if revspostpull > revsprepull:
- result = result or rebase.rebase(ui, repo)
- finally:
- repo._isrebasing = False
- else:
- repo.lfpullsource = source
- if not source:
- source = 'default'
- oldheads = lfutil.getcurrentheads(repo)
- result = orig(ui, repo, source, **opts)
- # If we do not have the new largefiles for any new heads we pulled, we
- # will run into a problem later if we try to merge or rebase with one of
- # these heads, so cache the largefiles now direclty into the system
- # cache.
- ui.status(_("caching new largefiles\n"))
- numcached = 0
- heads = lfutil.getcurrentheads(repo)
- newheads = set(heads).difference(set(oldheads))
- for head in newheads:
- (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
- numcached += len(cached)
- ui.status(_("%d largefiles cached\n") % numcached)
- if opts.get('all_largefiles'):
- revspostpull = len(repo)
- revs = []
- for rev in xrange(revsprepull + 1, revspostpull):
- revs.append(repo[rev].rev())
- lfcommands.downloadlfiles(ui, repo, revs)
- return result
-
-def overrideclone(orig, ui, source, dest=None, **opts):
- if dest is None:
- dest = hg.defaultdest(source)
- if opts.get('all_largefiles') and not hg.islocal(dest):
- raise util.Abort(_(
- '--all-largefiles is incompatible with non-local destination %s' %
- dest))
- result = hg.clone(ui, opts, source, dest,
- pull=opts.get('pull'),
- stream=opts.get('uncompressed'),
- rev=opts.get('rev'),
- update=True, # required for successful walkchangerevs
- branch=opts.get('branch'))
- if result is None:
- return True
- if opts.get('all_largefiles'):
- sourcerepo, destrepo = result
- success, missing = lfcommands.downloadlfiles(ui, destrepo.local(), None)
- return missing != 0
- return result is None
-
-def overriderebase(orig, ui, repo, **opts):
- repo._isrebasing = True
- try:
- orig(ui, repo, **opts)
- finally:
- repo._isrebasing = False
-
-def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
- prefix=None, mtime=None, subrepos=None):
- # No need to lock because we are only reading history and
- # largefile caches, neither of which are modified.
- lfcommands.cachelfiles(repo.ui, repo, node)
-
- if kind not in archival.archivers:
- raise util.Abort(_("unknown archive type '%s'") % kind)
-
- ctx = repo[node]
-
- if kind == 'files':
- if prefix:
- raise util.Abort(
- _('cannot give prefix when archiving to files'))
- else:
- prefix = archival.tidyprefix(dest, kind, prefix)
-
- def write(name, mode, islink, getdata):
- if matchfn and not matchfn(name):
- return
- data = getdata()
- if decode:
- data = repo.wwritedata(name, data)
- archiver.addfile(prefix + name, mode, islink, data)
-
- archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
-
- if repo.ui.configbool("ui", "archivemeta", True):
- def metadata():
- base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
- hex(repo.changelog.node(0)), hex(node), ctx.branch())
-
- tags = ''.join('tag: %s\n' % t for t in ctx.tags()
- if repo.tagtype(t) == 'global')
- if not tags:
- repo.ui.pushbuffer()
- opts = {'template': '{latesttag}\n{latesttagdistance}',
- 'style': '', 'patch': None, 'git': None}
- cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
- ltags, dist = repo.ui.popbuffer().split('\n')
- tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
- tags += 'latesttagdistance: %s\n' % dist
-
- return base + tags
-
- write('.hg_archival.txt', 0644, False, metadata)
-
- for f in ctx:
- ff = ctx.flags(f)
- getdata = ctx[f].data
- if lfutil.isstandin(f):
- path = lfutil.findfile(repo, getdata().strip())
- if path is None:
- raise util.Abort(
- _('largefile %s not found in repo store or system cache')
- % lfutil.splitstandin(f))
- f = lfutil.splitstandin(f)
-
- def getdatafn():
- fd = None
- try:
- fd = open(path, 'rb')
- return fd.read()
- finally:
- if fd:
- fd.close()
-
- getdata = getdatafn
- write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
-
- if subrepos:
- for subpath in ctx.substate:
- sub = ctx.sub(subpath)
- submatch = match_.narrowmatcher(subpath, matchfn)
- sub.archive(repo.ui, archiver, prefix, submatch)
-
- archiver.done()
-
-def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
- rev = repo._state[1]
- ctx = repo._repo[rev]
-
- lfcommands.cachelfiles(ui, repo._repo, ctx.node())
-
- def write(name, mode, islink, getdata):
- # At this point, the standin has been replaced with the largefile name,
- # so the normal matcher works here without the lfutil variants.
- if match and not match(f):
- return
- data = getdata()
-
- archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
-
- for f in ctx:
- ff = ctx.flags(f)
- getdata = ctx[f].data
- if lfutil.isstandin(f):
- path = lfutil.findfile(repo._repo, getdata().strip())
- if path is None:
- raise util.Abort(
- _('largefile %s not found in repo store or system cache')
- % lfutil.splitstandin(f))
- f = lfutil.splitstandin(f)
-
- def getdatafn():
- fd = None
- try:
- fd = open(os.path.join(prefix, path), 'rb')
- return fd.read()
- finally:
- if fd:
- fd.close()
-
- getdata = getdatafn
-
- write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
-
- for subpath in ctx.substate:
- sub = ctx.sub(subpath)
- submatch = match_.narrowmatcher(subpath, match)
- sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
- submatch)
-
-# If a largefile is modified, the change is not reflected in its
-# standin until a commit. cmdutil.bailifchanged() raises an exception
-# if the repo has uncommitted changes. Wrap it to also check if
-# largefiles were changed. This is used by bisect and backout.
-def overridebailifchanged(orig, repo):
- orig(repo)
- repo.lfstatus = True
- modified, added, removed, deleted = repo.status()[:4]
- repo.lfstatus = False
- if modified or added or removed or deleted:
- raise util.Abort(_('outstanding uncommitted changes'))
-
-# Fetch doesn't use cmdutil.bailifchanged so override it to add the check
-def overridefetch(orig, ui, repo, *pats, **opts):
- repo.lfstatus = True
- modified, added, removed, deleted = repo.status()[:4]
- repo.lfstatus = False
- if modified or added or removed or deleted:
- raise util.Abort(_('outstanding uncommitted changes'))
- return orig(ui, repo, *pats, **opts)
-
-def overrideforget(orig, ui, repo, *pats, **opts):
- installnormalfilesmatchfn(repo[None].manifest())
- orig(ui, repo, *pats, **opts)
- restorematchfn()
- m = scmutil.match(repo[None], pats, opts)
-
- try:
- repo.lfstatus = True
- s = repo.status(match=m, clean=True)
- finally:
- repo.lfstatus = False
- forget = sorted(s[0] + s[1] + s[3] + s[6])
- forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
-
- for f in forget:
- if lfutil.standin(f) not in repo.dirstate and not \
- os.path.isdir(m.rel(lfutil.standin(f))):
- ui.warn(_('not removing %s: file is already untracked\n')
- % m.rel(f))
-
- for f in forget:
- if ui.verbose or not m.exact(f):
- ui.status(_('removing %s\n') % m.rel(f))
-
- # Need to lock because standin files are deleted then removed from the
- # repository and we could race inbetween.
- wlock = repo.wlock()
- try:
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- for f in forget:
- if lfdirstate[f] == 'a':
- lfdirstate.drop(f)
- else:
- lfdirstate.remove(f)
- lfdirstate.write()
- lfutil.reporemove(repo, [lfutil.standin(f) for f in forget],
- unlink=True)
- finally:
- wlock.release()
-
-def getoutgoinglfiles(ui, repo, dest=None, **opts):
- dest = ui.expandpath(dest or 'default-push', dest or 'default')
- dest, branches = hg.parseurl(dest, opts.get('branch'))
- revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
- if revs:
- revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
-
- try:
- remote = hg.peer(repo, opts, dest)
- except error.RepoError:
- return None
- o = lfutil.findoutgoing(repo, remote, False)
- if not o:
- return None
- o = repo.changelog.nodesbetween(o, revs)[0]
- if opts.get('newest_first'):
- o.reverse()
-
- toupload = set()
- for n in o:
- parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
- ctx = repo[n]
- files = set(ctx.files())
- if len(parents) == 2:
- mc = ctx.manifest()
- mp1 = ctx.parents()[0].manifest()
- mp2 = ctx.parents()[1].manifest()
- for f in mp1:
- if f not in mc:
- files.add(f)
- for f in mp2:
- if f not in mc:
- files.add(f)
- for f in mc:
- if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
- files.add(f)
- toupload = toupload.union(
- set([f for f in files if lfutil.isstandin(f) and f in ctx]))
- return toupload
-
-def overrideoutgoing(orig, ui, repo, dest=None, **opts):
- orig(ui, repo, dest, **opts)
-
- if opts.pop('large', None):
- toupload = getoutgoinglfiles(ui, repo, dest, **opts)
- if toupload is None:
- ui.status(_('largefiles: No remote repo\n'))
- else:
- ui.status(_('largefiles to upload:\n'))
- for file in toupload:
- ui.status(lfutil.splitstandin(file) + '\n')
- ui.status('\n')
-
-def overridesummary(orig, ui, repo, *pats, **opts):
- try:
- repo.lfstatus = True
- orig(ui, repo, *pats, **opts)
- finally:
- repo.lfstatus = False
-
- if opts.pop('large', None):
- toupload = getoutgoinglfiles(ui, repo, None, **opts)
- if toupload is None:
- ui.status(_('largefiles: No remote repo\n'))
- else:
- ui.status(_('largefiles: %d to upload\n') % len(toupload))
-
-def overrideaddremove(orig, ui, repo, *pats, **opts):
- if not lfutil.islfilesrepo(repo):
- return orig(ui, repo, *pats, **opts)
- # Get the list of missing largefiles so we can remove them
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
- False, False)
- (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
-
- # Call into the normal remove code, but the removing of the standin, we want
- # to have handled by original addremove. Monkey patching here makes sure
- # we don't remove the standin in the largefiles code, preventing a very
- # confused state later.
- if missing:
- m = [repo.wjoin(f) for f in missing]
- repo._isaddremove = True
- removelargefiles(ui, repo, *m, **opts)
- repo._isaddremove = False
- # Call into the normal add code, and any files that *should* be added as
- # largefiles will be
- addlargefiles(ui, repo, *pats, **opts)
- # Now that we've handled largefiles, hand off to the original addremove
- # function to take care of the rest. Make sure it doesn't do anything with
- # largefiles by installing a matcher that will ignore them.
- installnormalfilesmatchfn(repo[None].manifest())
- result = orig(ui, repo, *pats, **opts)
- restorematchfn()
- return result
-
-# Calling purge with --all will cause the largefiles to be deleted.
-# Override repo.status to prevent this from happening.
-def overridepurge(orig, ui, repo, *dirs, **opts):
- oldstatus = repo.status
- def overridestatus(node1='.', node2=None, match=None, ignored=False,
- clean=False, unknown=False, listsubrepos=False):
- r = oldstatus(node1, node2, match, ignored, clean, unknown,
- listsubrepos)
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- modified, added, removed, deleted, unknown, ignored, clean = r
- unknown = [f for f in unknown if lfdirstate[f] == '?']
- ignored = [f for f in ignored if lfdirstate[f] == '?']
- return modified, added, removed, deleted, unknown, ignored, clean
- repo.status = overridestatus
- orig(ui, repo, *dirs, **opts)
- repo.status = oldstatus
-
-def overriderollback(orig, ui, repo, **opts):
- result = orig(ui, repo, **opts)
- merge.update(repo, node=None, branchmerge=False, force=True,
- partial=lfutil.isstandin)
- wlock = repo.wlock()
- try:
- lfdirstate = lfutil.openlfdirstate(ui, repo)
- lfiles = lfutil.listlfiles(repo)
- oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
- for file in lfiles:
- if file in oldlfiles:
- lfdirstate.normallookup(file)
- else:
- lfdirstate.add(file)
- lfdirstate.write()
- finally:
- wlock.release()
- return result
-
-def overridetransplant(orig, ui, repo, *revs, **opts):
- try:
- oldstandins = lfutil.getstandinsstate(repo)
- repo._istransplanting = True
- result = orig(ui, repo, *revs, **opts)
- newstandins = lfutil.getstandinsstate(repo)
- filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
- lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
- printmessage=True)
- finally:
- repo._istransplanting = False
- return result
-
-def overridecat(orig, ui, repo, file1, *pats, **opts):
- ctx = scmutil.revsingle(repo, opts.get('rev'))
- if not lfutil.standin(file1) in ctx:
- result = orig(ui, repo, file1, *pats, **opts)
- return result
- return lfcommands.catlfile(repo, file1, ctx.rev(), opts.get('output'))
diff --git a/hgext/largefiles/proto.py b/hgext/largefiles/proto.py
deleted file mode 100644
index de89e32..0000000
--- a/hgext/largefiles/proto.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright 2011 Fog Creek Software
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-import os
-import urllib2
-
-from mercurial import error, httppeer, util, wireproto
-from mercurial.wireproto import batchable, future
-from mercurial.i18n import _
-
-import lfutil
-
-LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
- '\n\nPlease enable it in your Mercurial config '
- 'file.\n')
-
-def putlfile(repo, proto, sha):
- '''Put a largefile into a repository's local store and into the
- user cache.'''
- proto.redirect()
-
- path = lfutil.storepath(repo, sha)
- util.makedirs(os.path.dirname(path))
- tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
-
- try:
- try:
- proto.getfile(tmpfp)
- tmpfp._fp.seek(0)
- if sha != lfutil.hexsha1(tmpfp._fp):
- raise IOError(0, _('largefile contents do not match hash'))
- tmpfp.close()
- lfutil.linktousercache(repo, sha)
- except IOError, e:
- repo.ui.warn(_('largefiles: failed to put %s into store: %s') %
- (sha, e.strerror))
- return wireproto.pushres(1)
- finally:
- tmpfp.discard()
-
- return wireproto.pushres(0)
-
-def getlfile(repo, proto, sha):
- '''Retrieve a largefile from the repository-local cache or system
- cache.'''
- filename = lfutil.findfile(repo, sha)
- if not filename:
- raise util.Abort(_('requested largefile %s not present in cache') % sha)
- f = open(filename, 'rb')
- length = os.fstat(f.fileno())[6]
-
- # Since we can't set an HTTP content-length header here, and
- # Mercurial core provides no way to give the length of a streamres
- # (and reading the entire file into RAM would be ill-advised), we
- # just send the length on the first line of the response, like the
- # ssh proto does for string responses.
- def generator():
- yield '%d\n' % length
- for chunk in f:
- yield chunk
- return wireproto.streamres(generator())
-
-def statlfile(repo, proto, sha):
- '''Return '2\n' if the largefile is missing, '1\n' if it has a
- mismatched checksum, or '0\n' if it is in good condition'''
- filename = lfutil.findfile(repo, sha)
- if not filename:
- return '2\n'
- fd = None
- try:
- fd = open(filename, 'rb')
- return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
- finally:
- if fd:
- fd.close()
-
-def wirereposetup(ui, repo):
- class lfileswirerepository(repo.__class__):
- def putlfile(self, sha, fd):
- # unfortunately, httprepository._callpush tries to convert its
- # input file-like into a bundle before sending it, so we can't use
- # it ...
- if issubclass(self.__class__, httppeer.httppeer):
- res = None
- try:
- res = self._call('putlfile', data=fd, sha=sha,
- headers={'content-type':'application/mercurial-0.1'})
- d, output = res.split('\n', 1)
- for l in output.splitlines(True):
- self.ui.warn(_('remote: '), l, '\n')
- return int(d)
- except (ValueError, urllib2.HTTPError):
- self.ui.warn(_('unexpected putlfile response: %s') % res)
- return 1
- # ... but we can't use sshrepository._call because the data=
- # argument won't get sent, and _callpush does exactly what we want
- # in this case: send the data straight through
- else:
- try:
- ret, output = self._callpush("putlfile", fd, sha=sha)
- if ret == "":
- raise error.ResponseError(_('putlfile failed:'),
- output)
- return int(ret)
- except IOError:
- return 1
- except ValueError:
- raise error.ResponseError(
- _('putlfile failed (unexpected response):'), ret)
-
- def getlfile(self, sha):
- stream = self._callstream("getlfile", sha=sha)
- length = stream.readline()
- try:
- length = int(length)
- except ValueError:
- self._abort(error.ResponseError(_("unexpected response:"),
- length))
- return (length, stream)
-
- @batchable
- def statlfile(self, sha):
- f = future()
- result = {'sha': sha}
- yield result, f
- try:
- yield int(f.value)
- except (ValueError, urllib2.HTTPError):
- # If the server returns anything but an integer followed by a
- # newline, newline, it's not speaking our language; if we get
- # an HTTP error, we can't be sure the largefile is present;
- # either way, consider it missing.
- yield 2
-
- repo.__class__ = lfileswirerepository
-
-# advertise the largefiles=serve capability
-def capabilities(repo, proto):
- return capabilitiesorig(repo, proto) + ' largefiles=serve'
-
-# duplicate what Mercurial's new out-of-band errors mechanism does, because
-# clients old and new alike both handle it well
-def webprotorefuseclient(self, message):
- self.req.header([('Content-Type', 'application/hg-error')])
- return message
-
-def sshprotorefuseclient(self, message):
- self.ui.write_err('%s\n-\n' % message)
- self.fout.write('\n')
- self.fout.flush()
-
- return ''
-
-def heads(repo, proto):
- if lfutil.islfilesrepo(repo):
- return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
- return wireproto.heads(repo, proto)
-
-def sshrepocallstream(self, cmd, **args):
- if cmd == 'heads' and self.capable('largefiles'):
- cmd = 'lheads'
- if cmd == 'batch' and self.capable('largefiles'):
- args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
- return ssholdcallstream(self, cmd, **args)
-
-def httprepocallstream(self, cmd, **args):
- if cmd == 'heads' and self.capable('largefiles'):
- cmd = 'lheads'
- if cmd == 'batch' and self.capable('largefiles'):
- args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
- return httpoldcallstream(self, cmd, **args)
diff --git a/hgext/largefiles/remotestore.py b/hgext/largefiles/remotestore.py
deleted file mode 100644
index 6c3d371..0000000
--- a/hgext/largefiles/remotestore.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''remote largefile store; the base class for servestore'''
-
-import urllib2
-
-from mercurial import util
-from mercurial.i18n import _
-from mercurial.wireproto import remotebatch
-
-import lfutil
-import basestore
-
-class remotestore(basestore.basestore):
- '''a largefile store accessed over a network'''
- def __init__(self, ui, repo, url):
- super(remotestore, self).__init__(ui, repo, url)
-
- def put(self, source, hash):
- if self.sendfile(source, hash):
- raise util.Abort(
- _('remotestore: could not put %s to remote store %s')
- % (source, self.url))
- self.ui.debug(
- _('remotestore: put %s to remote store %s') % (source, self.url))
-
- def exists(self, hashes):
- return self._verify(hashes)
-
- def sendfile(self, filename, hash):
- self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash))
- fd = None
- try:
- try:
- fd = lfutil.httpsendfile(self.ui, filename)
- except IOError, e:
- raise util.Abort(
- _('remotestore: could not open file %s: %s')
- % (filename, str(e)))
- return self._put(hash, fd)
- finally:
- if fd:
- fd.close()
-
- def _getfile(self, tmpfile, filename, hash):
- # quit if the largefile isn't there
- stat = self._stat(hash)
- if stat == 1:
- raise util.Abort(_('remotestore: largefile %s is invalid') % hash)
- elif stat == 2:
- raise util.Abort(_('remotestore: largefile %s is missing') % hash)
-
- try:
- length, infile = self._get(hash)
- except urllib2.HTTPError, e:
- # 401s get converted to util.Aborts; everything else is fine being
- # turned into a StoreError
- raise basestore.StoreError(filename, hash, self.url, str(e))
- except urllib2.URLError, e:
- # This usually indicates a connection problem, so don't
- # keep trying with the other files... they will probably
- # all fail too.
- raise util.Abort('%s: %s' % (self.url, e.reason))
- except IOError, e:
- raise basestore.StoreError(filename, hash, self.url, str(e))
-
- # Mercurial does not close its SSH connections after writing a stream
- if length is not None:
- infile = lfutil.limitreader(infile, length)
- return lfutil.copyandhash(lfutil.blockstream(infile), tmpfile)
-
- def _verify(self, hashes):
- return self._stat(hashes)
-
- def _verifyfile(self, cctx, cset, contents, standin, verified):
- filename = lfutil.splitstandin(standin)
- if not filename:
- return False
- fctx = cctx[standin]
- key = (filename, fctx.filenode())
- if key in verified:
- return False
-
- verified.add(key)
-
- stat = self._stat(hash)
- if not stat:
- return False
- elif stat == 1:
- self.ui.warn(
- _('changeset %s: %s: contents differ\n')
- % (cset, filename))
- return True # failed
- elif stat == 2:
- self.ui.warn(
- _('changeset %s: %s missing\n')
- % (cset, filename))
- return True # failed
- else:
- raise RuntimeError('verify failed: unexpected response from '
- 'statlfile (%r)' % stat)
-
- def batch(self):
- '''Support for remote batching.'''
- return remotebatch(self)
-
diff --git a/hgext/largefiles/reposetup.py b/hgext/largefiles/reposetup.py
deleted file mode 100644
index 04ab704..0000000
--- a/hgext/largefiles/reposetup.py
+++ /dev/null
@@ -1,475 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''setup for largefiles repositories: reposetup'''
-import copy
-import types
-import os
-
-from mercurial import context, error, manifest, match as match_, util
-from mercurial import node as node_
-from mercurial.i18n import _
-
-import lfcommands
-import proto
-import lfutil
-
-def reposetup(ui, repo):
- # wire repositories should be given new wireproto functions but not the
- # other largefiles modifications
- if not repo.local():
- return proto.wirereposetup(ui, repo)
-
- for name in ('status', 'commitctx', 'commit', 'push'):
- method = getattr(repo, name)
- if (isinstance(method, types.FunctionType) and
- method.func_name == 'wrap'):
- ui.warn(_('largefiles: repo method %r appears to have already been'
- ' wrapped by another extension: '
- 'largefiles may behave incorrectly\n')
- % name)
-
- class lfilesrepo(repo.__class__):
- lfstatus = False
- def status_nolfiles(self, *args, **kwargs):
- return super(lfilesrepo, self).status(*args, **kwargs)
-
- # When lfstatus is set, return a context that gives the names
- # of largefiles instead of their corresponding standins and
- # identifies the largefiles as always binary, regardless of
- # their actual contents.
- def __getitem__(self, changeid):
- ctx = super(lfilesrepo, self).__getitem__(changeid)
- if self.lfstatus:
- class lfilesmanifestdict(manifest.manifestdict):
- def __contains__(self, filename):
- if super(lfilesmanifestdict,
- self).__contains__(filename):
- return True
- return super(lfilesmanifestdict,
- self).__contains__(lfutil.standin(filename))
- class lfilesctx(ctx.__class__):
- def files(self):
- filenames = super(lfilesctx, self).files()
- return [lfutil.splitstandin(f) or f for f in filenames]
- def manifest(self):
- man1 = super(lfilesctx, self).manifest()
- man1.__class__ = lfilesmanifestdict
- return man1
- def filectx(self, path, fileid=None, filelog=None):
- try:
- if filelog is not None:
- result = super(lfilesctx, self).filectx(
- path, fileid, filelog)
- else:
- result = super(lfilesctx, self).filectx(
- path, fileid)
- except error.LookupError:
- # Adding a null character will cause Mercurial to
- # identify this as a binary file.
- if filelog is not None:
- result = super(lfilesctx, self).filectx(
- lfutil.standin(path), fileid, filelog)
- else:
- result = super(lfilesctx, self).filectx(
- lfutil.standin(path), fileid)
- olddata = result.data
- result.data = lambda: olddata() + '\0'
- return result
- ctx.__class__ = lfilesctx
- return ctx
-
- # Figure out the status of big files and insert them into the
- # appropriate list in the result. Also removes standin files
- # from the listing. Revert to the original status if
- # self.lfstatus is False.
- def status(self, node1='.', node2=None, match=None, ignored=False,
- clean=False, unknown=False, listsubrepos=False):
- listignored, listclean, listunknown = ignored, clean, unknown
- if not self.lfstatus:
- return super(lfilesrepo, self).status(node1, node2, match,
- listignored, listclean, listunknown, listsubrepos)
- else:
- # some calls in this function rely on the old version of status
- self.lfstatus = False
- if isinstance(node1, context.changectx):
- ctx1 = node1
- else:
- ctx1 = repo[node1]
- if isinstance(node2, context.changectx):
- ctx2 = node2
- else:
- ctx2 = repo[node2]
- working = ctx2.rev() is None
- parentworking = working and ctx1 == self['.']
-
- def inctx(file, ctx):
- try:
- if ctx.rev() is None:
- return file in ctx.manifest()
- ctx[file]
- return True
- except KeyError:
- return False
-
- if match is None:
- match = match_.always(self.root, self.getcwd())
-
- # First check if there were files specified on the
- # command line. If there were, and none of them were
- # largefiles, we should just bail here and let super
- # handle it -- thus gaining a big performance boost.
- lfdirstate = lfutil.openlfdirstate(ui, self)
- if match.files() and not match.anypats():
- for f in lfdirstate:
- if match(f):
- break
- else:
- return super(lfilesrepo, self).status(node1, node2,
- match, listignored, listclean,
- listunknown, listsubrepos)
-
- # Create a copy of match that matches standins instead
- # of largefiles.
- def tostandins(files):
- if not working:
- return files
- newfiles = []
- dirstate = repo.dirstate
- for f in files:
- sf = lfutil.standin(f)
- if sf in dirstate:
- newfiles.append(sf)
- elif sf in dirstate.dirs():
- # Directory entries could be regular or
- # standin, check both
- newfiles.extend((f, sf))
- else:
- newfiles.append(f)
- return newfiles
-
- # Create a function that we can use to override what is
- # normally the ignore matcher. We've already checked
- # for ignored files on the first dirstate walk, and
- # unecessarily re-checking here causes a huge performance
- # hit because lfdirstate only knows about largefiles
- def _ignoreoverride(self):
- return False
-
- m = copy.copy(match)
- m._files = tostandins(m._files)
-
- # Get ignored files here even if we weren't asked for them; we
- # must use the result here for filtering later
- result = super(lfilesrepo, self).status(node1, node2, m,
- True, clean, unknown, listsubrepos)
- if working:
- try:
- # Any non-largefiles that were explicitly listed must be
- # taken out or lfdirstate.status will report an error.
- # The status of these files was already computed using
- # super's status.
- # Override lfdirstate's ignore matcher to not do
- # anything
- origignore = lfdirstate._ignore
- lfdirstate._ignore = _ignoreoverride
-
- def sfindirstate(f):
- sf = lfutil.standin(f)
- dirstate = repo.dirstate
- return sf in dirstate or sf in dirstate.dirs()
- match._files = [f for f in match._files
- if sfindirstate(f)]
- # Don't waste time getting the ignored and unknown
- # files again; we already have them
- s = lfdirstate.status(match, [], False,
- listclean, False)
- (unsure, modified, added, removed, missing, unknown,
- ignored, clean) = s
- # Replace the list of ignored and unknown files with
- # the previously caclulated lists, and strip out the
- # largefiles
- lfiles = set(lfdirstate._map)
- ignored = set(result[5]).difference(lfiles)
- unknown = set(result[4]).difference(lfiles)
- if parentworking:
- for lfile in unsure:
- standin = lfutil.standin(lfile)
- if standin not in ctx1:
- # from second parent
- modified.append(lfile)
- elif ctx1[standin].data().strip() \
- != lfutil.hashfile(self.wjoin(lfile)):
- modified.append(lfile)
- else:
- clean.append(lfile)
- lfdirstate.normal(lfile)
- else:
- tocheck = unsure + modified + added + clean
- modified, added, clean = [], [], []
-
- for lfile in tocheck:
- standin = lfutil.standin(lfile)
- if inctx(standin, ctx1):
- if ctx1[standin].data().strip() != \
- lfutil.hashfile(self.wjoin(lfile)):
- modified.append(lfile)
- else:
- clean.append(lfile)
- else:
- added.append(lfile)
- finally:
- # Replace the original ignore function
- lfdirstate._ignore = origignore
-
- for standin in ctx1.manifest():
- if not lfutil.isstandin(standin):
- continue
- lfile = lfutil.splitstandin(standin)
- if not match(lfile):
- continue
- if lfile not in lfdirstate:
- removed.append(lfile)
-
- # Filter result lists
- result = list(result)
-
- # Largefiles are not really removed when they're
- # still in the normal dirstate. Likewise, normal
- # files are not really removed if it's still in
- # lfdirstate. This happens in merges where files
- # change type.
- removed = [f for f in removed if f not in repo.dirstate]
- result[2] = [f for f in result[2] if f not in lfdirstate]
-
- # Unknown files
- unknown = set(unknown).difference(ignored)
- result[4] = [f for f in unknown
- if (repo.dirstate[f] == '?' and
- not lfutil.isstandin(f))]
- # Ignored files were calculated earlier by the dirstate,
- # and we already stripped out the largefiles from the list
- result[5] = ignored
- # combine normal files and largefiles
- normals = [[fn for fn in filelist
- if not lfutil.isstandin(fn)]
- for filelist in result]
- lfiles = (modified, added, removed, missing, [], [], clean)
- result = [sorted(list1 + list2)
- for (list1, list2) in zip(normals, lfiles)]
- else:
- def toname(f):
- if lfutil.isstandin(f):
- return lfutil.splitstandin(f)
- return f
- result = [[toname(f) for f in items] for items in result]
-
- if not listunknown:
- result[4] = []
- if not listignored:
- result[5] = []
- if not listclean:
- result[6] = []
- self.lfstatus = True
- return result
-
- # As part of committing, copy all of the largefiles into the
- # cache.
- def commitctx(self, *args, **kwargs):
- node = super(lfilesrepo, self).commitctx(*args, **kwargs)
- lfutil.copyalltostore(self, node)
- return node
-
- # Before commit, largefile standins have not had their
- # contents updated to reflect the hash of their largefile.
- # Do that here.
- def commit(self, text="", user=None, date=None, match=None,
- force=False, editor=False, extra={}):
- orig = super(lfilesrepo, self).commit
-
- wlock = repo.wlock()
- try:
- # Case 0: Rebase or Transplant
- # We have to take the time to pull down the new largefiles now.
- # Otherwise, any largefiles that were modified in the
- # destination changesets get overwritten, either by the rebase
- # or in the first commit after the rebase or transplant.
- # updatelfiles will update the dirstate to mark any pulled
- # largefiles as modified
- if getattr(repo, "_isrebasing", False) or \
- getattr(repo, "_istransplanting", False):
- lfcommands.updatelfiles(repo.ui, repo, filelist=None,
- printmessage=False)
- result = orig(text=text, user=user, date=date, match=match,
- force=force, editor=editor, extra=extra)
- return result
- # Case 1: user calls commit with no specific files or
- # include/exclude patterns: refresh and commit all files that
- # are "dirty".
- if ((match is None) or
- (not match.anypats() and not match.files())):
- # Spend a bit of time here to get a list of files we know
- # are modified so we can compare only against those.
- # It can cost a lot of time (several seconds)
- # otherwise to update all standins if the largefiles are
- # large.
- lfdirstate = lfutil.openlfdirstate(ui, self)
- dirtymatch = match_.always(repo.root, repo.getcwd())
- s = lfdirstate.status(dirtymatch, [], False, False, False)
- modifiedfiles = []
- for i in s:
- modifiedfiles.extend(i)
- lfiles = lfutil.listlfiles(self)
- # this only loops through largefiles that exist (not
- # removed/renamed)
- for lfile in lfiles:
- if lfile in modifiedfiles:
- if os.path.exists(
- self.wjoin(lfutil.standin(lfile))):
- # this handles the case where a rebase is being
- # performed and the working copy is not updated
- # yet.
- if os.path.exists(self.wjoin(lfile)):
- lfutil.updatestandin(self,
- lfutil.standin(lfile))
- lfdirstate.normal(lfile)
-
- result = orig(text=text, user=user, date=date, match=match,
- force=force, editor=editor, extra=extra)
-
- if result is not None:
- for lfile in lfdirstate:
- if lfile in modifiedfiles:
- if (not os.path.exists(repo.wjoin(
- lfutil.standin(lfile)))) or \
- (not os.path.exists(repo.wjoin(lfile))):
- lfdirstate.drop(lfile)
-
- # This needs to be after commit; otherwise precommit hooks
- # get the wrong status
- lfdirstate.write()
- return result
-
- for f in match.files():
- if lfutil.isstandin(f):
- raise util.Abort(
- _('file "%s" is a largefile standin') % f,
- hint=('commit the largefile itself instead'))
-
- # Case 2: user calls commit with specified patterns: refresh
- # any matching big files.
- smatcher = lfutil.composestandinmatcher(self, match)
- standins = lfutil.dirstatewalk(self.dirstate, smatcher)
-
- # No matching big files: get out of the way and pass control to
- # the usual commit() method.
- if not standins:
- return orig(text=text, user=user, date=date, match=match,
- force=force, editor=editor, extra=extra)
-
- # Refresh all matching big files. It's possible that the
- # commit will end up failing, in which case the big files will
- # stay refreshed. No harm done: the user modified them and
- # asked to commit them, so sooner or later we're going to
- # refresh the standins. Might as well leave them refreshed.
- lfdirstate = lfutil.openlfdirstate(ui, self)
- for standin in standins:
- lfile = lfutil.splitstandin(standin)
- if lfdirstate[lfile] <> 'r':
- lfutil.updatestandin(self, standin)
- lfdirstate.normal(lfile)
- else:
- lfdirstate.drop(lfile)
-
- # Cook up a new matcher that only matches regular files or
- # standins corresponding to the big files requested by the
- # user. Have to modify _files to prevent commit() from
- # complaining "not tracked" for big files.
- lfiles = lfutil.listlfiles(repo)
- match = copy.copy(match)
- origmatchfn = match.matchfn
-
- # Check both the list of largefiles and the list of
- # standins because if a largefile was removed, it
- # won't be in the list of largefiles at this point
- match._files += sorted(standins)
-
- actualfiles = []
- for f in match._files:
- fstandin = lfutil.standin(f)
-
- # ignore known largefiles and standins
- if f in lfiles or fstandin in standins:
- continue
-
- # append directory separator to avoid collisions
- if not fstandin.endswith(os.sep):
- fstandin += os.sep
-
- actualfiles.append(f)
- match._files = actualfiles
-
- def matchfn(f):
- if origmatchfn(f):
- return f not in lfiles
- else:
- return f in standins
-
- match.matchfn = matchfn
- result = orig(text=text, user=user, date=date, match=match,
- force=force, editor=editor, extra=extra)
- # This needs to be after commit; otherwise precommit hooks
- # get the wrong status
- lfdirstate.write()
- return result
- finally:
- wlock.release()
-
- def push(self, remote, force=False, revs=None, newbranch=False):
- o = lfutil.findoutgoing(repo, remote, force)
- if o:
- toupload = set()
- o = repo.changelog.nodesbetween(o, revs)[0]
- for n in o:
- parents = [p for p in repo.changelog.parents(n)
- if p != node_.nullid]
- ctx = repo[n]
- files = set(ctx.files())
- if len(parents) == 2:
- mc = ctx.manifest()
- mp1 = ctx.parents()[0].manifest()
- mp2 = ctx.parents()[1].manifest()
- for f in mp1:
- if f not in mc:
- files.add(f)
- for f in mp2:
- if f not in mc:
- files.add(f)
- for f in mc:
- if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
- None):
- files.add(f)
-
- toupload = toupload.union(
- set([ctx[f].data().strip()
- for f in files
- if lfutil.isstandin(f) and f in ctx]))
- lfcommands.uploadlfiles(ui, self, remote, toupload)
- return super(lfilesrepo, self).push(remote, force, revs,
- newbranch)
-
- repo.__class__ = lfilesrepo
-
- def checkrequireslfiles(ui, repo, **kwargs):
- if 'largefiles' not in repo.requirements and util.any(
- lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
- repo.requirements.add('largefiles')
- repo._writerequirements()
-
- ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
- ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
diff --git a/hgext/largefiles/uisetup.py b/hgext/largefiles/uisetup.py
deleted file mode 100644
index e50190b..0000000
--- a/hgext/largefiles/uisetup.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''setup for largefiles extension: uisetup'''
-
-from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
- httppeer, localrepo, merge, sshpeer, sshserver, wireproto
-from mercurial.i18n import _
-from mercurial.hgweb import hgweb_mod, protocol, webcommands
-from mercurial.subrepo import hgsubrepo
-
-import overrides
-import proto
-
-def uisetup(ui):
- # Disable auto-status for some commands which assume that all
- # files in the result are under Mercurial's control
-
- entry = extensions.wrapcommand(commands.table, 'add',
- overrides.overrideadd)
- addopt = [('', 'large', None, _('add as largefile')),
- ('', 'normal', None, _('add as normal file')),
- ('', 'lfsize', '', _('add all files above this size '
- '(in megabytes) as largefiles '
- '(default: 10)'))]
- entry[1].extend(addopt)
-
- entry = extensions.wrapcommand(commands.table, 'addremove',
- overrides.overrideaddremove)
- entry = extensions.wrapcommand(commands.table, 'remove',
- overrides.overrideremove)
- entry = extensions.wrapcommand(commands.table, 'forget',
- overrides.overrideforget)
-
- # Subrepos call status function
- entry = extensions.wrapcommand(commands.table, 'status',
- overrides.overridestatus)
- entry = extensions.wrapfunction(hgsubrepo, 'status',
- overrides.overridestatusfn)
-
- entry = extensions.wrapcommand(commands.table, 'log',
- overrides.overridelog)
- entry = extensions.wrapcommand(commands.table, 'rollback',
- overrides.overriderollback)
- entry = extensions.wrapcommand(commands.table, 'verify',
- overrides.overrideverify)
-
- verifyopt = [('', 'large', None, _('verify largefiles')),
- ('', 'lfa', None,
- _('verify all revisions of largefiles not just current')),
- ('', 'lfc', None,
- _('verify largefile contents not just existence'))]
- entry[1].extend(verifyopt)
-
- entry = extensions.wrapcommand(commands.table, 'outgoing',
- overrides.overrideoutgoing)
- outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
- entry[1].extend(outgoingopt)
- entry = extensions.wrapcommand(commands.table, 'summary',
- overrides.overridesummary)
- summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
- entry[1].extend(summaryopt)
-
- entry = extensions.wrapcommand(commands.table, 'update',
- overrides.overrideupdate)
- entry = extensions.wrapcommand(commands.table, 'pull',
- overrides.overridepull)
- pullopt = [('', 'all-largefiles', None,
- _('download all pulled versions of largefiles'))]
- entry[1].extend(pullopt)
- entry = extensions.wrapcommand(commands.table, 'clone',
- overrides.overrideclone)
- cloneopt = [('', 'all-largefiles', None,
- _('download all versions of all largefiles'))]
-
- entry[1].extend(cloneopt)
- entry = extensions.wrapcommand(commands.table, 'cat',
- overrides.overridecat)
- entry = extensions.wrapfunction(merge, '_checkunknownfile',
- overrides.overridecheckunknownfile)
- entry = extensions.wrapfunction(merge, 'manifestmerge',
- overrides.overridemanifestmerge)
- entry = extensions.wrapfunction(filemerge, 'filemerge',
- overrides.overridefilemerge)
- entry = extensions.wrapfunction(cmdutil, 'copy',
- overrides.overridecopy)
-
- # Summary calls dirty on the subrepos
- entry = extensions.wrapfunction(hgsubrepo, 'dirty',
- overrides.overridedirty)
-
- # Backout calls revert so we need to override both the command and the
- # function
- entry = extensions.wrapcommand(commands.table, 'revert',
- overrides.overriderevert)
- entry = extensions.wrapfunction(commands, 'revert',
- overrides.overriderevert)
-
- # clone uses hg._update instead of hg.update even though they are the
- # same function... so wrap both of them)
- extensions.wrapfunction(hg, 'update', overrides.hgupdate)
- extensions.wrapfunction(hg, '_update', overrides.hgupdate)
- extensions.wrapfunction(hg, 'clean', overrides.hgclean)
- extensions.wrapfunction(hg, 'merge', overrides.hgmerge)
-
- extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
- extensions.wrapfunction(hgsubrepo, 'archive', overrides.hgsubrepoarchive)
- extensions.wrapfunction(cmdutil, 'bailifchanged',
- overrides.overridebailifchanged)
-
- # create the new wireproto commands ...
- wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
- wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
- wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
-
- # ... and wrap some existing ones
- wireproto.commands['capabilities'] = (proto.capabilities, '')
- wireproto.commands['heads'] = (proto.heads, '')
- wireproto.commands['lheads'] = (wireproto.heads, '')
-
- # make putlfile behave the same as push and {get,stat}lfile behave
- # the same as pull w.r.t. permissions checks
- hgweb_mod.perms['putlfile'] = 'push'
- hgweb_mod.perms['getlfile'] = 'pull'
- hgweb_mod.perms['statlfile'] = 'pull'
-
- extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
-
- # the hello wireproto command uses wireproto.capabilities, so it won't see
- # our largefiles capability unless we replace the actual function as well.
- proto.capabilitiesorig = wireproto.capabilities
- wireproto.capabilities = proto.capabilities
-
- # these let us reject non-largefiles clients and make them display
- # our error messages
- protocol.webproto.refuseclient = proto.webprotorefuseclient
- sshserver.sshserver.refuseclient = proto.sshprotorefuseclient
-
- # can't do this in reposetup because it needs to have happened before
- # wirerepo.__init__ is called
- proto.ssholdcallstream = sshpeer.sshpeer._callstream
- proto.httpoldcallstream = httppeer.httppeer._callstream
- sshpeer.sshpeer._callstream = proto.sshrepocallstream
- httppeer.httppeer._callstream = proto.httprepocallstream
-
- # don't die on seeing a repo with the largefiles requirement
- localrepo.localrepository.supported |= set(['largefiles'])
-
- # override some extensions' stuff as well
- for name, module in extensions.extensions():
- if name == 'fetch':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
- overrides.overridefetch)
- if name == 'purge':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
- overrides.overridepurge)
- if name == 'rebase':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
- overrides.overriderebase)
- if name == 'transplant':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
- overrides.overridetransplant)
diff --git a/hgext/largefiles/wirestore.py b/hgext/largefiles/wirestore.py
deleted file mode 100644
index a394cf0..0000000
--- a/hgext/largefiles/wirestore.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2010-2011 Fog Creek Software
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''largefile store working over Mercurial's wire protocol'''
-
-import lfutil
-import remotestore
-
-class wirestore(remotestore.remotestore):
- def __init__(self, ui, repo, remote):
- cap = remote.capable('largefiles')
- if not cap:
- raise lfutil.storeprotonotcapable([])
- storetypes = cap.split(',')
- if 'serve' not in storetypes:
- raise lfutil.storeprotonotcapable(storetypes)
- self.remote = remote
- super(wirestore, self).__init__(ui, repo, remote.url())
-
- def _put(self, hash, fd):
- return self.remote.putlfile(hash, fd)
-
- def _get(self, hash):
- return self.remote.getlfile(hash)
-
- def _stat(self, hashes):
- batch = self.remote.batch()
- futures = {}
- for hash in hashes:
- futures[hash] = batch.statlfile(hash)
- batch.submit()
- retval = {}
- for hash in hashes:
- retval[hash] = not futures[hash].value
- return retval