summaryrefslogtreecommitdiff
path: root/mercurial/discovery.py
diff options
context:
space:
mode:
Diffstat (limited to 'mercurial/discovery.py')
-rw-r--r--mercurial/discovery.py448
1 files changed, 135 insertions, 313 deletions
diff --git a/mercurial/discovery.py b/mercurial/discovery.py
index 0eb27a8..88a7475 100644
--- a/mercurial/discovery.py
+++ b/mercurial/discovery.py
@@ -7,7 +7,7 @@
from node import nullid, short
from i18n import _
-import util, setdiscovery, treediscovery, phases, obsolete
+import util, setdiscovery, treediscovery
def findcommonincoming(repo, remote, heads=None, force=False):
"""Return a tuple (common, anyincoming, heads) used to identify the common
@@ -46,324 +46,146 @@ def findcommonincoming(repo, remote, heads=None, force=False):
common, anyinc, srvheads = res
return (list(common), anyinc, heads or list(srvheads))
-class outgoing(object):
- '''Represents the set of nodes present in a local repo but not in a
- (possibly) remote one.
+def findcommonoutgoing(repo, other, onlyheads=None, force=False, commoninc=None):
+ '''Return a tuple (common, anyoutgoing, heads) used to identify the set
+ of nodes present in repo but not in other.
- Members:
+ If onlyheads is given, only nodes ancestral to nodes in onlyheads (inclusive)
+ are included. If you already know the local repo's heads, passing them in
+ onlyheads is faster than letting them be recomputed here.
- missing is a list of all nodes present in local but not in remote.
- common is a list of all nodes shared between the two repos.
- excluded is the list of missing changeset that shouldn't be sent remotely.
- missingheads is the list of heads of missing.
- commonheads is the list of heads of common.
-
- The sets are computed on demand from the heads, unless provided upfront
- by discovery.'''
-
- def __init__(self, revlog, commonheads, missingheads):
- self.commonheads = commonheads
- self.missingheads = missingheads
- self._revlog = revlog
- self._common = None
- self._missing = None
- self.excluded = []
-
- def _computecommonmissing(self):
- sets = self._revlog.findcommonmissing(self.commonheads,
- self.missingheads)
- self._common, self._missing = sets
-
- @util.propertycache
- def common(self):
- if self._common is None:
- self._computecommonmissing()
- return self._common
-
- @util.propertycache
- def missing(self):
- if self._missing is None:
- self._computecommonmissing()
- return self._missing
-
-def findcommonoutgoing(repo, other, onlyheads=None, force=False,
- commoninc=None, portable=False):
- '''Return an outgoing instance to identify the nodes present in repo but
- not in other.
-
- If onlyheads is given, only nodes ancestral to nodes in onlyheads
- (inclusive) are included. If you already know the local repo's heads,
- passing them in onlyheads is faster than letting them be recomputed here.
-
- If commoninc is given, it must be the result of a prior call to
+ If commoninc is given, it must the the result of a prior call to
findcommonincoming(repo, other, force) to avoid recomputing it here.
- If portable is given, compute more conservative common and missingheads,
- to make bundles created from the instance more portable.'''
- # declare an empty outgoing object to be filled later
- og = outgoing(repo.changelog, None, None)
-
- # get common set if not provided
- if commoninc is None:
- commoninc = findcommonincoming(repo, other, force=force)
- og.commonheads, _any, _hds = commoninc
-
- # compute outgoing
- mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
- if not mayexclude:
- og.missingheads = onlyheads or repo.heads()
- elif onlyheads is None:
- # use visible heads as it should be cached
- og.missingheads = visibleheads(repo)
- og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
- else:
- # compute common, missing and exclude secret stuff
- sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
- og._common, allmissing = sets
- og._missing = missing = []
- og.excluded = excluded = []
- for node in allmissing:
- ctx = repo[node]
- if ctx.phase() >= phases.secret or ctx.extinct():
- excluded.append(node)
- else:
- missing.append(node)
- if len(missing) == len(allmissing):
- missingheads = onlyheads
- else: # update missing heads
- missingheads = phases.newheads(repo, onlyheads, excluded)
- og.missingheads = missingheads
- if portable:
- # recompute common and missingheads as if -r<rev> had been given for
- # each head of missing, and --base <rev> for each head of the proper
- # ancestors of missing
- og._computecommonmissing()
- cl = repo.changelog
- missingrevs = set(cl.rev(n) for n in og._missing)
- og._common = set(cl.ancestors(missingrevs)) - missingrevs
- commonheads = set(og.commonheads)
- og.missingheads = [h for h in og.missingheads if h not in commonheads]
-
- return og
-
-def _headssummary(repo, remote, outgoing):
- """compute a summary of branch and heads status before and after push
-
- return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
-
- - branch: the branch name
- - remoteheads: the list of remote heads known locally
- None is the branch is new
- - newheads: the new remote heads (known locally) with outgoing pushed
- - unsyncedheads: the list of remote heads unknown locally.
- """
- cl = repo.changelog
- headssum = {}
- # A. Create set of branches involved in the push.
- branches = set(repo[n].branch() for n in outgoing.missing)
- remotemap = remote.branchmap()
- newbranches = branches - set(remotemap)
- branches.difference_update(newbranches)
-
- # A. register remote heads
- remotebranches = set()
- for branch, heads in remote.branchmap().iteritems():
- remotebranches.add(branch)
- known = []
- unsynced = []
- for h in heads:
- if h in cl.nodemap:
- known.append(h)
- else:
- unsynced.append(h)
- headssum[branch] = (known, list(known), unsynced)
- # B. add new branch data
- missingctx = list(repo[n] for n in outgoing.missing)
- touchedbranches = set()
- for ctx in missingctx:
- branch = ctx.branch()
- touchedbranches.add(branch)
- if branch not in headssum:
- headssum[branch] = (None, [], [])
-
- # C drop data about untouched branches:
- for branch in remotebranches - touchedbranches:
- del headssum[branch]
-
- # D. Update newmap with outgoing changes.
- # This will possibly add new heads and remove existing ones.
- newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
- if heads[0] is not None)
- repo._updatebranchcache(newmap, missingctx)
- for branch, newheads in newmap.iteritems():
- headssum[branch][1][:] = newheads
- return headssum
-
-def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
- """Compute branchmapsummary for repo without branchmap support"""
+ The returned tuple is meant to be passed to changelog.findmissing.'''
+ common, _any, _hds = commoninc or findcommonincoming(repo, other, force=force)
+ return (common, onlyheads or repo.heads())
+
+def prepush(repo, remote, force, revs, newbranch):
+ '''Analyze the local and remote repositories and determine which
+ changesets need to be pushed to the remote. Return value depends
+ on circumstances:
+
+ If we are not going to push anything, return a tuple (None,
+ outgoing) where outgoing is 0 if there are no outgoing
+ changesets and 1 if there are, but we refuse to push them
+ (e.g. would create new remote heads).
+
+ Otherwise, return a tuple (changegroup, remoteheads), where
+ changegroup is a readable file-like object whose read() returns
+ successive changegroup chunks ready to be sent over the wire and
+ remoteheads is the list of remote heads.'''
+ commoninc = findcommonincoming(repo, remote, force=force)
+ common, revs = findcommonoutgoing(repo, remote, onlyheads=revs,
+ commoninc=commoninc, force=force)
+ _common, inc, remoteheads = commoninc
cl = repo.changelog
- # 1-4b. old servers: Check for new topological heads.
- # Construct {old,new}map with branch = None (topological branch).
- # (code based on _updatebranchcache)
- oldheads = set(h for h in remoteheads if h in cl.nodemap)
- # all nodes in outgoing.missing are children of either:
- # - an element of oldheads
- # - another element of outgoing.missing
- # - nullrev
- # This explains why the new head are very simple to compute.
- r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
- newheads = list(c.node() for c in r)
- unsynced = inc and set([None]) or set()
- return {None: (oldheads, newheads, unsynced)}
+ outg = cl.findmissing(common, revs)
+
+ if not outg:
+ repo.ui.status(_("no changes found\n"))
+ return None, 1
+
+ if not force and remoteheads != [nullid]:
+ if remote.capable('branchmap'):
+ # Check for each named branch if we're creating new remote heads.
+ # To be a remote head after push, node must be either:
+ # - unknown locally
+ # - a local outgoing head descended from update
+ # - a remote head that's known locally and not
+ # ancestral to an outgoing head
+
+ # 1. Create set of branches involved in the push.
+ branches = set(repo[n].branch() for n in outg)
+
+ # 2. Check for new branches on the remote.
+ remotemap = remote.branchmap()
+ newbranches = branches - set(remotemap)
+ if newbranches and not newbranch: # new branch requires --new-branch
+ branchnames = ', '.join(sorted(newbranches))
+ raise util.Abort(_("push creates new remote branches: %s!")
+ % branchnames,
+ hint=_("use 'hg push --new-branch' to create"
+ " new remote branches"))
+ branches.difference_update(newbranches)
+
+ # 3. Construct the initial oldmap and newmap dicts.
+ # They contain information about the remote heads before and
+ # after the push, respectively.
+ # Heads not found locally are not included in either dict,
+ # since they won't be affected by the push.
+ # unsynced contains all branches with incoming changesets.
+ oldmap = {}
+ newmap = {}
+ unsynced = set()
+ for branch in branches:
+ remotebrheads = remotemap[branch]
+ prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
+ oldmap[branch] = prunedbrheads
+ newmap[branch] = list(prunedbrheads)
+ if len(remotebrheads) > len(prunedbrheads):
+ unsynced.add(branch)
+
+ # 4. Update newmap with outgoing changes.
+ # This will possibly add new heads and remove existing ones.
+ ctxgen = (repo[n] for n in outg)
+ repo._updatebranchcache(newmap, ctxgen)
-def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
- """Check that a push won't add any outgoing head
-
- raise Abort error and display ui message as needed.
- """
- # Check for each named branch if we're creating new remote heads.
- # To be a remote head after push, node must be either:
- # - unknown locally
- # - a local outgoing head descended from update
- # - a remote head that's known locally and not
- # ancestral to an outgoing head
- if remoteheads == [nullid]:
- # remote is empty, nothing to check.
- return
-
- if remote.capable('branchmap'):
- headssum = _headssummary(repo, remote, outgoing)
- else:
- headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
- newbranches = [branch for branch, heads in headssum.iteritems()
- if heads[0] is None]
- # 1. Check for new branches on the remote.
- if newbranches and not newbranch: # new branch requires --new-branch
- branchnames = ', '.join(sorted(newbranches))
- raise util.Abort(_("push creates new remote branches: %s!")
- % branchnames,
- hint=_("use 'hg push --new-branch' to create"
- " new remote branches"))
-
- # 2 compute newly pushed bookmarks. We
- # we don't warned about bookmarked heads.
- localbookmarks = repo._bookmarks
- remotebookmarks = remote.listkeys('bookmarks')
- bookmarkedheads = set()
- for bm in localbookmarks:
- rnode = remotebookmarks.get(bm)
- if rnode and rnode in repo:
- lctx, rctx = repo[bm], repo[rnode]
- if rctx == lctx.ancestor(rctx):
- bookmarkedheads.add(lctx.node())
-
- # 3. Check for new heads.
- # If there are more heads after the push than before, a suitable
- # error message, depending on unsynced status, is displayed.
- error = None
- unsynced = False
- allmissing = set(outgoing.missing)
- for branch, heads in headssum.iteritems():
- if heads[0] is None:
- # Maybe we should abort if we push more that one head
- # for new branches ?
- continue
- if heads[2]:
- unsynced = True
- oldhs = set(heads[0])
- candidate_newhs = set(heads[1])
- # add unsynced data
- oldhs.update(heads[2])
- candidate_newhs.update(heads[2])
- dhs = None
- if repo.obsstore:
- # remove future heads which are actually obsolete by another
- # pushed element:
- #
- # XXX There is several case this case does not handle properly
- #
- # (1) if <nh> is public, it won't be affected by obsolete marker
- # and a new is created
- #
- # (2) if the new heads have ancestors which are not obsolete and
- # not ancestors of any other heads we will have a new head too.
- #
- # This two case will be easy to handle for know changeset but much
- # more tricky for unsynced changes.
- newhs = set()
- for nh in candidate_newhs:
- for suc in obsolete.anysuccessors(repo.obsstore, nh):
- if suc != nh and suc in allmissing:
- break
- else:
- newhs.add(nh)
else:
- newhs = candidate_newhs
- if len(newhs) > len(oldhs):
- # strip updates to existing remote heads from the new heads list
- dhs = list(newhs - bookmarkedheads - oldhs)
- if dhs:
- if error is None:
- if branch not in ('default', None):
- error = _("push creates new remote head %s "
- "on branch '%s'!") % (short(dhs[0]), branch)
- else:
- error = _("push creates new remote head %s!"
- ) % short(dhs[0])
- if heads[2]: # unsynced
- hint = _("you should pull and merge or "
- "use push -f to force")
- else:
- hint = _("did you forget to merge? "
- "use push -f to force")
- if branch is not None:
- repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
- for h in dhs:
- repo.ui.note(_("new remote head %s\n") % short(h))
- if error:
- raise util.Abort(error, hint=hint)
-
- # 6. Check for unsynced changes on involved branches.
- if unsynced:
- repo.ui.warn(_("note: unsynced remote changes!\n"))
-
-def visibleheads(repo):
- """return the set of visible head of this repo"""
- # XXX we want a cache on this
- sroots = repo._phasecache.phaseroots[phases.secret]
- if sroots or repo.obsstore:
- # XXX very slow revset. storing heads or secret "boundary"
- # would help.
- revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
-
- vheads = [ctx.node() for ctx in revset]
- if not vheads:
- vheads.append(nullid)
- else:
- vheads = repo.heads()
- return vheads
-
-
-def visiblebranchmap(repo):
- """return a branchmap for the visible set"""
- # XXX Recomputing this data on the fly is very slow. We should build a
- # XXX cached version while computin the standard branchmap version.
- sroots = repo._phasecache.phaseroots[phases.secret]
- if sroots or repo.obsstore:
- vbranchmap = {}
- for branch, nodes in repo.branchmap().iteritems():
- # search for secret heads.
- for n in nodes:
- if repo[n].phase() >= phases.secret:
- nodes = None
- break
- # if secret heads were found we must compute them again
- if nodes is None:
- s = repo.set('heads(branch(%s) - secret() - extinct())',
- branch)
- nodes = [c.node() for c in s]
- vbranchmap[branch] = nodes
+ # 1-4b. old servers: Check for new topological heads.
+ # Construct {old,new}map with branch = None (topological branch).
+ # (code based on _updatebranchcache)
+ oldheads = set(h for h in remoteheads if h in cl.nodemap)
+ newheads = oldheads.union(outg)
+ if len(newheads) > 1:
+ for latest in reversed(outg):
+ if latest not in newheads:
+ continue
+ minhrev = min(cl.rev(h) for h in newheads)
+ reachable = cl.reachable(latest, cl.node(minhrev))
+ reachable.remove(latest)
+ newheads.difference_update(reachable)
+ branches = set([None])
+ newmap = {None: newheads}
+ oldmap = {None: oldheads}
+ unsynced = inc and branches or set()
+
+ # 5. Check for new heads.
+ # If there are more heads after the push than before, a suitable
+ # error message, depending on unsynced status, is displayed.
+ error = None
+ for branch in branches:
+ newhs = set(newmap[branch])
+ oldhs = set(oldmap[branch])
+ if len(newhs) > len(oldhs):
+ dhs = list(newhs - oldhs)
+ if error is None:
+ if branch != 'default':
+ error = _("push creates new remote head %s "
+ "on branch '%s'!") % (short(dhs[0]), branch)
+ else:
+ error = _("push creates new remote head %s!"
+ ) % short(dhs[0])
+ if branch in unsynced:
+ hint = _("you should pull and merge or "
+ "use push -f to force")
+ else:
+ hint = _("did you forget to merge? "
+ "use push -f to force")
+ repo.ui.note("new remote heads on branch '%s'\n" % branch)
+ for h in dhs:
+ repo.ui.note("new remote head %s\n" % short(h))
+ if error:
+ raise util.Abort(error, hint=hint)
+
+ # 6. Check for unsynced changes on involved branches.
+ if unsynced:
+ repo.ui.warn(_("note: unsynced remote changes!\n"))
+
+ if revs is None:
+ # use the fast path, no race possible on push
+ cg = repo._changegroup(outg, 'push')
else:
- vbranchmap = repo.branchmap()
- return vbranchmap
+ cg = repo.getbundle('push', heads=revs, common=common)
+ return cg, remoteheads