summaryrefslogtreecommitdiff
path: root/pp_sort.c
diff options
context:
space:
mode:
authorTomasz Konojacki <me@xenu.pl>2020-03-03 00:45:02 +0100
committerKarl Williamson <khw@cpan.org>2020-03-09 07:55:49 -0600
commit7ea738a97baf02df1e454884eb4239bf3aadf5e7 (patch)
treec42a91534bcc0dd6e26757f59ea463017e0cc4b8 /pp_sort.c
parentf1e99d0d8751081b78b97ac6bb774d9655a256fb (diff)
downloadperl-7ea738a97baf02df1e454884eb4239bf3aadf5e7.tar.gz
pp_sort.c: normalize indentation
According to perlhack, code should be indented with four spaces. This commit doesn't contain any functional changes. If you're seeing it in "git blame" output, try using -w switch, it will hide whitespace-only changes.
Diffstat (limited to 'pp_sort.c')
-rw-r--r--pp_sort.c946
1 files changed, 473 insertions, 473 deletions
diff --git a/pp_sort.c b/pp_sort.c
index 1cea9baeed..a65f28f182 100644
--- a/pp_sort.c
+++ b/pp_sort.c
@@ -34,7 +34,7 @@
#define sv_cmp_locale_static Perl_sv_cmp_locale
#ifndef SMALLSORT
-#define SMALLSORT (200)
+#define SMALLSORT (200)
#endif
/* Flags for sortsv_flags */
@@ -60,8 +60,8 @@
*/
-typedef char * aptr; /* pointer for arithmetic on sizes */
-typedef SV * gptr; /* pointers in our lists */
+typedef char * aptr; /* pointer for arithmetic on sizes */
+typedef SV * gptr; /* pointers in our lists */
/* Binary merge internal sort, with a few special mods
** for the special perl environment it now finds itself in.
@@ -74,31 +74,31 @@ typedef SV * gptr; /* pointers in our lists */
/* Pointer types for arithmetic and storage and convenience casts */
-#define APTR(P) ((aptr)(P))
-#define GPTP(P) ((gptr *)(P))
+#define APTR(P) ((aptr)(P))
+#define GPTP(P) ((gptr *)(P))
#define GPPP(P) ((gptr **)(P))
/* byte offset from pointer P to (larger) pointer Q */
-#define BYTEOFF(P, Q) (APTR(Q) - APTR(P))
+#define BYTEOFF(P, Q) (APTR(Q) - APTR(P))
#define PSIZE sizeof(gptr)
/* If PSIZE is power of 2, make PSHIFT that power, if that helps */
-#ifdef PSHIFT
-#define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT))
-#define PNBYTE(N) ((N) << (PSHIFT))
-#define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N)))
+#ifdef PSHIFT
+#define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT))
+#define PNBYTE(N) ((N) << (PSHIFT))
+#define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N)))
#else
/* Leave optimization to compiler */
-#define PNELEM(P, Q) (GPTP(Q) - GPTP(P))
-#define PNBYTE(N) ((N) * (PSIZE))
-#define PINDEX(P, N) (GPTP(P) + (N))
+#define PNELEM(P, Q) (GPTP(Q) - GPTP(P))
+#define PNBYTE(N) ((N) * (PSIZE))
+#define PINDEX(P, N) (GPTP(P) + (N))
#endif
/* Pointer into other corresponding to pointer into this */
-#define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P))
+#define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P))
#define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim)
@@ -109,7 +109,7 @@ typedef SV * gptr; /* pointers in our lists */
** NEXT is used as an lvalue, too.
*/
-#define NEXT(P) (*GPPP(P))
+#define NEXT(P) (*GPPP(P))
/* PTHRESH is the minimum number of pairs with the same sense to justify
@@ -117,7 +117,7 @@ typedef SV * gptr; /* pointers in our lists */
** not just elements, so PTHRESH == 8 means a run of 16.
*/
-#define PTHRESH (8)
+#define PTHRESH (8)
/* RTHRESH is the number of elements in a run that must compare low
** to the low element from the opposing run before we justify
@@ -166,12 +166,12 @@ typedef SV * gptr; /* pointers in our lists */
** In any event, after the check (if any), we have two main cases.
**
** 1) Short run. b <= q < p <= r <= t.
-** b through q is a run (perhaps trivial)
-** q through p are uninteresting pairs
-** p through r is a run
+** b through q is a run (perhaps trivial)
+** q through p are uninteresting pairs
+** p through r is a run
**
** 2) Long run. b < r <= q < t.
-** b through q is a run (of length >= 2 * PTHRESH)
+** b through q is a run (of length >= 2 * PTHRESH)
**
** Note that degenerate cases are not only possible, but likely.
** For example, if the pair following b compares with opposite sense,
@@ -191,63 +191,63 @@ dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp)
last = PINDEX(b, nmemb);
sense = (cmp(aTHX_ *b, *(b+1)) > 0);
for (p2 = list2; b < last; ) {
- /* We just started, or just reversed sense.
- ** Set t at end of pairs with the prevailing sense.
- */
- for (p = b+2, t = p; ++p < last; t = ++p) {
- if ((cmp(aTHX_ *t, *p) > 0) != sense) break;
- }
- q = b;
- /* Having laid out the playing field, look for long runs */
- do {
- p = r = b + (2 * PTHRESH);
- if (r >= t) p = r = t; /* too short to care about */
- else {
- while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) &&
- ((p -= 2) > q)) {}
- if (p <= q) {
- /* b through r is a (long) run.
- ** Extend it as far as possible.
- */
- p = q = r;
- while (((p += 2) < t) &&
- ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p;
- r = p = q + 2; /* no simple pairs, no after-run */
- }
- }
- if (q > b) { /* run of greater than 2 at b */
- gptr *savep = p;
-
- p = q += 2;
- /* pick up singleton, if possible */
- if ((p == t) &&
- ((t + 1) == last) &&
- ((cmp(aTHX_ *(p-1), *p) > 0) == sense))
- savep = r = p = q = last;
- p2 = NEXT(p2) = p2 + (p - b); ++runs;
- if (sense)
- while (b < --p) {
- const gptr c = *b;
- *b++ = *p;
- *p = c;
- }
- p = savep;
- }
- while (q < p) { /* simple pairs */
- p2 = NEXT(p2) = p2 + 2; ++runs;
- if (sense) {
- const gptr c = *q++;
- *(q-1) = *q;
- *q++ = c;
- } else q += 2;
- }
- if (((b = p) == t) && ((t+1) == last)) {
- NEXT(p2) = p2 + 1; ++runs;
- b++;
- }
- q = r;
- } while (b < t);
- sense = !sense;
+ /* We just started, or just reversed sense.
+ ** Set t at end of pairs with the prevailing sense.
+ */
+ for (p = b+2, t = p; ++p < last; t = ++p) {
+ if ((cmp(aTHX_ *t, *p) > 0) != sense) break;
+ }
+ q = b;
+ /* Having laid out the playing field, look for long runs */
+ do {
+ p = r = b + (2 * PTHRESH);
+ if (r >= t) p = r = t; /* too short to care about */
+ else {
+ while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) &&
+ ((p -= 2) > q)) {}
+ if (p <= q) {
+ /* b through r is a (long) run.
+ ** Extend it as far as possible.
+ */
+ p = q = r;
+ while (((p += 2) < t) &&
+ ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p;
+ r = p = q + 2; /* no simple pairs, no after-run */
+ }
+ }
+ if (q > b) { /* run of greater than 2 at b */
+ gptr *savep = p;
+
+ p = q += 2;
+ /* pick up singleton, if possible */
+ if ((p == t) &&
+ ((t + 1) == last) &&
+ ((cmp(aTHX_ *(p-1), *p) > 0) == sense))
+ savep = r = p = q = last;
+ p2 = NEXT(p2) = p2 + (p - b); ++runs;
+ if (sense)
+ while (b < --p) {
+ const gptr c = *b;
+ *b++ = *p;
+ *p = c;
+ }
+ p = savep;
+ }
+ while (q < p) { /* simple pairs */
+ p2 = NEXT(p2) = p2 + 2; ++runs;
+ if (sense) {
+ const gptr c = *q++;
+ *(q-1) = *q;
+ *q++ = c;
+ } else q += 2;
+ }
+ if (((b = p) == t) && ((t+1) == last)) {
+ NEXT(p2) = p2 + 1; ++runs;
+ b++;
+ }
+ q = r;
+ } while (b < t);
+ sense = !sense;
}
return runs;
}
@@ -334,9 +334,9 @@ dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp)
*/
typedef struct {
- IV offset; /* offset of 1st of 2 runs at this level */
- IV runs; /* how many runs must be combined into 1 */
-} off_runs; /* pseudo-stack element */
+ IV offset; /* offset of 1st of 2 runs at this level */
+ IV runs; /* how many runs must be combined into 1 */
+} off_runs; /* pseudo-stack element */
static I32
@@ -370,16 +370,16 @@ Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
SVCOMPARE_t savecmp = NULL;
PERL_ARGS_ASSERT_SORTSV_FLAGS;
- if (nmemb <= 1) return; /* sorted trivially */
+ if (nmemb <= 1) return; /* sorted trivially */
if ((flags & SORTf_DESC) != 0) {
- savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */
- PL_sort_RealCmp = cmp; /* Put comparison routine where cmp_desc can find it */
- cmp = cmp_desc;
+ savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */
+ PL_sort_RealCmp = cmp; /* Put comparison routine where cmp_desc can find it */
+ cmp = cmp_desc;
}
- if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */
- else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */
+ if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */
+ else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */
level = 0;
stackp = stack;
stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp);
@@ -387,184 +387,184 @@ Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
which[0] = which[2] = base;
which[1] = aux;
for (;;) {
- /* On levels where both runs have be constructed (stackp->runs == 0),
- * merge them, and note the offset of their end, in case the offset
- * is needed at the next level up. Hop up a level, and,
- * as long as stackp->runs is 0, keep merging.
- */
- IV runs = stackp->runs;
- if (runs == 0) {
- gptr *list1, *list2;
- iwhich = level & 1;
- list1 = which[iwhich]; /* area where runs are now */
- list2 = which[++iwhich]; /* area for merged runs */
- do {
- gptr *l1, *l2, *tp2;
- offset = stackp->offset;
- f1 = p1 = list1 + offset; /* start of first run */
- p = tp2 = list2 + offset; /* where merged run will go */
- t = NEXT(p); /* where first run ends */
- f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */
- t = NEXT(t); /* where second runs ends */
- l2 = POTHER(t, list2, list1); /* ... on the other side */
- offset = PNELEM(list2, t);
- while (f1 < l1 && f2 < l2) {
- /* If head 1 is larger than head 2, find ALL the elements
- ** in list 2 strictly less than head1, write them all,
- ** then head 1. Then compare the new heads, and repeat,
- ** until one or both lists are exhausted.
- **
- ** In all comparisons (after establishing
- ** which head to merge) the item to merge
- ** (at pointer q) is the first operand of
- ** the comparison. When we want to know
- ** if "q is strictly less than the other",
- ** we can't just do
- ** cmp(q, other) < 0
- ** because stability demands that we treat equality
- ** as high when q comes from l2, and as low when
- ** q was from l1. So we ask the question by doing
- ** cmp(q, other) <= sense
- ** and make sense == 0 when equality should look low,
- ** and -1 when equality should look high.
- */
-
- gptr *q;
- if (cmp(aTHX_ *f1, *f2) <= 0) {
- q = f2; b = f1; t = l1;
- sense = -1;
- } else {
- q = f1; b = f2; t = l2;
- sense = 0;
- }
-
-
- /* ramp up
- **
- ** Leave t at something strictly
- ** greater than q (or at the end of the list),
- ** and b at something strictly less than q.
- */
- for (i = 1, run = 0 ;;) {
- if ((p = PINDEX(b, i)) >= t) {
- /* off the end */
- if (((p = PINDEX(t, -1)) > b) &&
- (cmp(aTHX_ *q, *p) <= sense))
- t = p;
- else b = p;
- break;
- } else if (cmp(aTHX_ *q, *p) <= sense) {
- t = p;
- break;
- } else b = p;
- if (++run >= RTHRESH) i += i;
- }
-
-
- /* q is known to follow b and must be inserted before t.
- ** Increment b, so the range of possibilities is [b,t).
- ** Round binary split down, to favor early appearance.
- ** Adjust b and t until q belongs just before t.
- */
-
- b++;
- while (b < t) {
- p = PINDEX(b, (PNELEM(b, t) - 1) / 2);
- if (cmp(aTHX_ *q, *p) <= sense) {
- t = p;
- } else b = p + 1;
- }
-
-
- /* Copy all the strictly low elements */
-
- if (q == f1) {
- FROMTOUPTO(f2, tp2, t);
- *tp2++ = *f1++;
- } else {
- FROMTOUPTO(f1, tp2, t);
- *tp2++ = *f2++;
- }
- }
-
-
- /* Run out remaining list */
- if (f1 == l1) {
- if (f2 < l2) FROMTOUPTO(f2, tp2, l2);
- } else FROMTOUPTO(f1, tp2, l1);
- p1 = NEXT(p1) = POTHER(tp2, list2, list1);
-
- if (--level == 0) goto done;
- --stackp;
- t = list1; list1 = list2; list2 = t; /* swap lists */
- } while ((runs = stackp->runs) == 0);
- }
-
-
- stackp->runs = 0; /* current run will finish level */
- /* While there are more than 2 runs remaining,
- * turn them into exactly 2 runs (at the "other" level),
- * each made up of approximately half the runs.
- * Stack the second half for later processing,
- * and set about producing the first half now.
- */
- while (runs > 2) {
- ++level;
- ++stackp;
- stackp->offset = offset;
- runs -= stackp->runs = runs / 2;
- }
- /* We must construct a single run from 1 or 2 runs.
- * All the original runs are in which[0] == base.
- * The run we construct must end up in which[level&1].
- */
- iwhich = level & 1;
- if (runs == 1) {
- /* Constructing a single run from a single run.
- * If it's where it belongs already, there's nothing to do.
- * Otherwise, copy it to where it belongs.
- * A run of 1 is either a singleton at level 0,
- * or the second half of a split 3. In neither event
- * is it necessary to set offset. It will be set by the merge
- * that immediately follows.
- */
- if (iwhich) { /* Belongs in aux, currently in base */
- f1 = b = PINDEX(base, offset); /* where list starts */
- f2 = PINDEX(aux, offset); /* where list goes */
- t = NEXT(f2); /* where list will end */
- offset = PNELEM(aux, t); /* offset thereof */
- t = PINDEX(base, offset); /* where it currently ends */
- FROMTOUPTO(f1, f2, t); /* copy */
- NEXT(b) = t; /* set up parallel pointer */
- } else if (level == 0) goto done; /* single run at level 0 */
- } else {
- /* Constructing a single run from two runs.
- * The merge code at the top will do that.
- * We need only make sure the two runs are in the "other" array,
- * so they'll end up in the correct array after the merge.
- */
- ++level;
- ++stackp;
- stackp->offset = offset;
- stackp->runs = 0; /* take care of both runs, trigger merge */
- if (!iwhich) { /* Merged runs belong in aux, copy 1st */
- f1 = b = PINDEX(base, offset); /* where first run starts */
- f2 = PINDEX(aux, offset); /* where it will be copied */
- t = NEXT(f2); /* where first run will end */
- offset = PNELEM(aux, t); /* offset thereof */
- p = PINDEX(base, offset); /* end of first run */
- t = NEXT(t); /* where second run will end */
- t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */
- FROMTOUPTO(f1, f2, t); /* copy both runs */
- NEXT(b) = p; /* paralleled pointer for 1st */
- NEXT(p) = t; /* ... and for second */
- }
- }
+ /* On levels where both runs have be constructed (stackp->runs == 0),
+ * merge them, and note the offset of their end, in case the offset
+ * is needed at the next level up. Hop up a level, and,
+ * as long as stackp->runs is 0, keep merging.
+ */
+ IV runs = stackp->runs;
+ if (runs == 0) {
+ gptr *list1, *list2;
+ iwhich = level & 1;
+ list1 = which[iwhich]; /* area where runs are now */
+ list2 = which[++iwhich]; /* area for merged runs */
+ do {
+ gptr *l1, *l2, *tp2;
+ offset = stackp->offset;
+ f1 = p1 = list1 + offset; /* start of first run */
+ p = tp2 = list2 + offset; /* where merged run will go */
+ t = NEXT(p); /* where first run ends */
+ f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */
+ t = NEXT(t); /* where second runs ends */
+ l2 = POTHER(t, list2, list1); /* ... on the other side */
+ offset = PNELEM(list2, t);
+ while (f1 < l1 && f2 < l2) {
+ /* If head 1 is larger than head 2, find ALL the elements
+ ** in list 2 strictly less than head1, write them all,
+ ** then head 1. Then compare the new heads, and repeat,
+ ** until one or both lists are exhausted.
+ **
+ ** In all comparisons (after establishing
+ ** which head to merge) the item to merge
+ ** (at pointer q) is the first operand of
+ ** the comparison. When we want to know
+ ** if "q is strictly less than the other",
+ ** we can't just do
+ ** cmp(q, other) < 0
+ ** because stability demands that we treat equality
+ ** as high when q comes from l2, and as low when
+ ** q was from l1. So we ask the question by doing
+ ** cmp(q, other) <= sense
+ ** and make sense == 0 when equality should look low,
+ ** and -1 when equality should look high.
+ */
+
+ gptr *q;
+ if (cmp(aTHX_ *f1, *f2) <= 0) {
+ q = f2; b = f1; t = l1;
+ sense = -1;
+ } else {
+ q = f1; b = f2; t = l2;
+ sense = 0;
+ }
+
+
+ /* ramp up
+ **
+ ** Leave t at something strictly
+ ** greater than q (or at the end of the list),
+ ** and b at something strictly less than q.
+ */
+ for (i = 1, run = 0 ;;) {
+ if ((p = PINDEX(b, i)) >= t) {
+ /* off the end */
+ if (((p = PINDEX(t, -1)) > b) &&
+ (cmp(aTHX_ *q, *p) <= sense))
+ t = p;
+ else b = p;
+ break;
+ } else if (cmp(aTHX_ *q, *p) <= sense) {
+ t = p;
+ break;
+ } else b = p;
+ if (++run >= RTHRESH) i += i;
+ }
+
+
+ /* q is known to follow b and must be inserted before t.
+ ** Increment b, so the range of possibilities is [b,t).
+ ** Round binary split down, to favor early appearance.
+ ** Adjust b and t until q belongs just before t.
+ */
+
+ b++;
+ while (b < t) {
+ p = PINDEX(b, (PNELEM(b, t) - 1) / 2);
+ if (cmp(aTHX_ *q, *p) <= sense) {
+ t = p;
+ } else b = p + 1;
+ }
+
+
+ /* Copy all the strictly low elements */
+
+ if (q == f1) {
+ FROMTOUPTO(f2, tp2, t);
+ *tp2++ = *f1++;
+ } else {
+ FROMTOUPTO(f1, tp2, t);
+ *tp2++ = *f2++;
+ }
+ }
+
+
+ /* Run out remaining list */
+ if (f1 == l1) {
+ if (f2 < l2) FROMTOUPTO(f2, tp2, l2);
+ } else FROMTOUPTO(f1, tp2, l1);
+ p1 = NEXT(p1) = POTHER(tp2, list2, list1);
+
+ if (--level == 0) goto done;
+ --stackp;
+ t = list1; list1 = list2; list2 = t; /* swap lists */
+ } while ((runs = stackp->runs) == 0);
+ }
+
+
+ stackp->runs = 0; /* current run will finish level */
+ /* While there are more than 2 runs remaining,
+ * turn them into exactly 2 runs (at the "other" level),
+ * each made up of approximately half the runs.
+ * Stack the second half for later processing,
+ * and set about producing the first half now.
+ */
+ while (runs > 2) {
+ ++level;
+ ++stackp;
+ stackp->offset = offset;
+ runs -= stackp->runs = runs / 2;
+ }
+ /* We must construct a single run from 1 or 2 runs.
+ * All the original runs are in which[0] == base.
+ * The run we construct must end up in which[level&1].
+ */
+ iwhich = level & 1;
+ if (runs == 1) {
+ /* Constructing a single run from a single run.
+ * If it's where it belongs already, there's nothing to do.
+ * Otherwise, copy it to where it belongs.
+ * A run of 1 is either a singleton at level 0,
+ * or the second half of a split 3. In neither event
+ * is it necessary to set offset. It will be set by the merge
+ * that immediately follows.
+ */
+ if (iwhich) { /* Belongs in aux, currently in base */
+ f1 = b = PINDEX(base, offset); /* where list starts */
+ f2 = PINDEX(aux, offset); /* where list goes */
+ t = NEXT(f2); /* where list will end */
+ offset = PNELEM(aux, t); /* offset thereof */
+ t = PINDEX(base, offset); /* where it currently ends */
+ FROMTOUPTO(f1, f2, t); /* copy */
+ NEXT(b) = t; /* set up parallel pointer */
+ } else if (level == 0) goto done; /* single run at level 0 */
+ } else {
+ /* Constructing a single run from two runs.
+ * The merge code at the top will do that.
+ * We need only make sure the two runs are in the "other" array,
+ * so they'll end up in the correct array after the merge.
+ */
+ ++level;
+ ++stackp;
+ stackp->offset = offset;
+ stackp->runs = 0; /* take care of both runs, trigger merge */
+ if (!iwhich) { /* Merged runs belong in aux, copy 1st */
+ f1 = b = PINDEX(base, offset); /* where first run starts */
+ f2 = PINDEX(aux, offset); /* where it will be copied */
+ t = NEXT(f2); /* where first run will end */
+ offset = PNELEM(aux, t); /* offset thereof */
+ p = PINDEX(base, offset); /* end of first run */
+ t = NEXT(t); /* where second run will end */
+ t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */
+ FROMTOUPTO(f1, f2, t); /* copy both runs */
+ NEXT(b) = p; /* paralleled pointer for 1st */
+ NEXT(p) = t; /* ... and for second */
+ }
+ }
}
done:
- if (aux != small) Safefree(aux); /* free iff allocated */
+ if (aux != small) Safefree(aux); /* free iff allocated */
if (savecmp != NULL) {
- PL_sort_RealCmp = savecmp; /* Restore current comparison routine, if any */
+ PL_sort_RealCmp = savecmp; /* Restore current comparison routine, if any */
}
return;
}
@@ -616,165 +616,165 @@ PP(pp_sort)
I32 all_SIVs = 1;
if ((priv & OPpSORT_DESCEND) != 0)
- sort_flags |= SORTf_DESC;
+ sort_flags |= SORTf_DESC;
if ((priv & OPpSORT_STABLE) != 0)
- sort_flags |= SORTf_STABLE;
+ sort_flags |= SORTf_STABLE;
if ((priv & OPpSORT_UNSTABLE) != 0)
- sort_flags |= SORTf_UNSTABLE;
+ sort_flags |= SORTf_UNSTABLE;
if (gimme != G_ARRAY) {
- SP = MARK;
- EXTEND(SP,1);
- RETPUSHUNDEF;
+ SP = MARK;
+ EXTEND(SP,1);
+ RETPUSHUNDEF;
}
ENTER;
SAVEVPTR(PL_sortcop);
if (flags & OPf_STACKED) {
- if (flags & OPf_SPECIAL) {
+ if (flags & OPf_SPECIAL) {
OP *nullop = OpSIBLING(cLISTOP->op_first); /* pass pushmark */
assert(nullop->op_type == OP_NULL);
- PL_sortcop = nullop->op_next;
- }
- else {
- GV *autogv = NULL;
- HV *stash;
- cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD);
- check_cv:
- if (cv && SvPOK(cv)) {
- const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv));
- if (proto && strEQ(proto, "$$")) {
- hasargs = TRUE;
- }
- }
- if (cv && CvISXSUB(cv) && CvXSUB(cv)) {
- is_xsub = 1;
- }
- else if (!(cv && CvROOT(cv))) {
- if (gv) {
- goto autoload;
- }
- else if (!CvANON(cv) && (gv = CvGV(cv))) {
- if (cv != GvCV(gv)) cv = GvCV(gv);
- autoload:
- if (!autogv && (
- autogv = gv_autoload_pvn(
- GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv),
- GvNAMEUTF8(gv) ? SVf_UTF8 : 0
- )
- )) {
- cv = GvCVu(autogv);
- goto check_cv;
- }
- else {
- SV *tmpstr = sv_newmortal();
- gv_efullname3(tmpstr, gv, NULL);
- DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called",
- SVfARG(tmpstr));
- }
- }
- else {
- DIE(aTHX_ "Undefined subroutine in sort");
- }
- }
-
- if (is_xsub)
- PL_sortcop = (OP*)cv;
- else
- PL_sortcop = CvSTART(cv);
- }
+ PL_sortcop = nullop->op_next;
+ }
+ else {
+ GV *autogv = NULL;
+ HV *stash;
+ cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD);
+ check_cv:
+ if (cv && SvPOK(cv)) {
+ const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv));
+ if (proto && strEQ(proto, "$$")) {
+ hasargs = TRUE;
+ }
+ }
+ if (cv && CvISXSUB(cv) && CvXSUB(cv)) {
+ is_xsub = 1;
+ }
+ else if (!(cv && CvROOT(cv))) {
+ if (gv) {
+ goto autoload;
+ }
+ else if (!CvANON(cv) && (gv = CvGV(cv))) {
+ if (cv != GvCV(gv)) cv = GvCV(gv);
+ autoload:
+ if (!autogv && (
+ autogv = gv_autoload_pvn(
+ GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv),
+ GvNAMEUTF8(gv) ? SVf_UTF8 : 0
+ )
+ )) {
+ cv = GvCVu(autogv);
+ goto check_cv;
+ }
+ else {
+ SV *tmpstr = sv_newmortal();
+ gv_efullname3(tmpstr, gv, NULL);
+ DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called",
+ SVfARG(tmpstr));
+ }
+ }
+ else {
+ DIE(aTHX_ "Undefined subroutine in sort");
+ }
+ }
+
+ if (is_xsub)
+ PL_sortcop = (OP*)cv;
+ else
+ PL_sortcop = CvSTART(cv);
+ }
}
else {
- PL_sortcop = NULL;
+ PL_sortcop = NULL;
}
/* optimiser converts "@a = sort @a" to "sort \@a". In this case,
* push (@a) onto stack, then assign result back to @a at the end of
* this function */
if (priv & OPpSORT_INPLACE) {
- assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV);
- (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */
- av = MUTABLE_AV((*SP));
+ assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV);
+ (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */
+ av = MUTABLE_AV((*SP));
if (SvREADONLY(av))
Perl_croak_no_modify();
- max = AvFILL(av) + 1;
+ max = AvFILL(av) + 1;
MEXTEND(SP, max);
- if (SvMAGICAL(av)) {
- for (i=0; i < max; i++) {
- SV **svp = av_fetch(av, i, FALSE);
- *SP++ = (svp) ? *svp : NULL;
- }
- }
+ if (SvMAGICAL(av)) {
+ for (i=0; i < max; i++) {
+ SV **svp = av_fetch(av, i, FALSE);
+ *SP++ = (svp) ? *svp : NULL;
+ }
+ }
else {
SV **svp = AvARRAY(av);
assert(svp || max == 0);
- for (i = 0; i < max; i++)
+ for (i = 0; i < max; i++)
*SP++ = *svp++;
- }
+ }
SP--;
p1 = p2 = SP - (max-1);
}
else {
- p2 = MARK+1;
- max = SP - MARK;
- }
+ p2 = MARK+1;
+ max = SP - MARK;
+ }
/* shuffle stack down, removing optional initial cv (p1!=p2), plus
* any nulls; also stringify or converting to integer or number as
* required any args */
copytmps = cBOOL(PL_sortcop);
for (i=max; i > 0 ; i--) {
- if ((*p1 = *p2++)) { /* Weed out nulls. */
- if (copytmps && SvPADTMP(*p1)) {
- *p1 = sv_mortalcopy(*p1);
+ if ((*p1 = *p2++)) { /* Weed out nulls. */
+ if (copytmps && SvPADTMP(*p1)) {
+ *p1 = sv_mortalcopy(*p1);
+ }
+ SvTEMP_off(*p1);
+ if (!PL_sortcop) {
+ if (priv & OPpSORT_NUMERIC) {
+ if (priv & OPpSORT_INTEGER) {
+ if (!SvIOK(*p1))
+ (void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
+ }
+ else {
+ if (!SvNSIOK(*p1))
+ (void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
+ if (all_SIVs && !SvSIOK(*p1))
+ all_SIVs = 0;
+ }
+ }
+ else {
+ if (!SvPOK(*p1))
+ (void)sv_2pv_flags(*p1, 0,
+ SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD);
+ }
+ if (SvAMAGIC(*p1))
+ overloading = 1;
}
- SvTEMP_off(*p1);
- if (!PL_sortcop) {
- if (priv & OPpSORT_NUMERIC) {
- if (priv & OPpSORT_INTEGER) {
- if (!SvIOK(*p1))
- (void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
- }
- else {
- if (!SvNSIOK(*p1))
- (void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
- if (all_SIVs && !SvSIOK(*p1))
- all_SIVs = 0;
- }
- }
- else {
- if (!SvPOK(*p1))
- (void)sv_2pv_flags(*p1, 0,
- SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD);
- }
- if (SvAMAGIC(*p1))
- overloading = 1;
- }
- p1++;
- }
- else
- max--;
+ p1++;
+ }
+ else
+ max--;
}
if (max > 1) {
- SV **start;
- if (PL_sortcop) {
- PERL_CONTEXT *cx;
- const bool oldcatch = CATCH_GET;
+ SV **start;
+ if (PL_sortcop) {
+ PERL_CONTEXT *cx;
+ const bool oldcatch = CATCH_GET;
I32 old_savestack_ix = PL_savestack_ix;
- SAVEOP();
-
- CATCH_SET(TRUE);
- PUSHSTACKi(PERLSI_SORT);
- if (!hasargs && !is_xsub) {
- SAVEGENERICSV(PL_firstgv);
- SAVEGENERICSV(PL_secondgv);
- PL_firstgv = MUTABLE_GV(SvREFCNT_inc(
- gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV)
- ));
- PL_secondgv = MUTABLE_GV(SvREFCNT_inc(
- gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV)
- ));
+ SAVEOP();
+
+ CATCH_SET(TRUE);
+ PUSHSTACKi(PERLSI_SORT);
+ if (!hasargs && !is_xsub) {
+ SAVEGENERICSV(PL_firstgv);
+ SAVEGENERICSV(PL_secondgv);
+ PL_firstgv = MUTABLE_GV(SvREFCNT_inc(
+ gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV)
+ ));
+ PL_secondgv = MUTABLE_GV(SvREFCNT_inc(
+ gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV)
+ ));
/* make sure the GP isn't removed out from under us for
* the SAVESPTR() */
save_gp(PL_firstgv, 0);
@@ -782,86 +782,86 @@ PP(pp_sort)
/* we don't want modifications localized */
GvINTRO_off(PL_firstgv);
GvINTRO_off(PL_secondgv);
- SAVEGENERICSV(GvSV(PL_firstgv));
- SvREFCNT_inc(GvSV(PL_firstgv));
- SAVEGENERICSV(GvSV(PL_secondgv));
- SvREFCNT_inc(GvSV(PL_secondgv));
- }
+ SAVEGENERICSV(GvSV(PL_firstgv));
+ SvREFCNT_inc(GvSV(PL_firstgv));
+ SAVEGENERICSV(GvSV(PL_secondgv));
+ SvREFCNT_inc(GvSV(PL_secondgv));
+ }
gimme = G_SCALAR;
- cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix);
- if (!(flags & OPf_SPECIAL)) {
- cx->cx_type = CXt_SUB|CXp_MULTICALL;
- cx_pushsub(cx, cv, NULL, hasargs);
- if (!is_xsub) {
- PADLIST * const padlist = CvPADLIST(cv);
-
- if (++CvDEPTH(cv) >= 2)
- pad_push(padlist, CvDEPTH(cv));
- PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv));
-
- if (hasargs) {
- /* This is mostly copied from pp_entersub */
- AV * const av = MUTABLE_AV(PAD_SVl(0));
-
- cx->blk_sub.savearray = GvAV(PL_defgv);
- GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av));
- }
-
- }
- }
+ cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix);
+ if (!(flags & OPf_SPECIAL)) {
+ cx->cx_type = CXt_SUB|CXp_MULTICALL;
+ cx_pushsub(cx, cv, NULL, hasargs);
+ if (!is_xsub) {
+ PADLIST * const padlist = CvPADLIST(cv);
+
+ if (++CvDEPTH(cv) >= 2)
+ pad_push(padlist, CvDEPTH(cv));
+ PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv));
+
+ if (hasargs) {
+ /* This is mostly copied from pp_entersub */
+ AV * const av = MUTABLE_AV(PAD_SVl(0));
+
+ cx->blk_sub.savearray = GvAV(PL_defgv);
+ GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av));
+ }
+
+ }
+ }
- start = p1 - max;
- sortsvp(aTHX_ start, max,
- (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv),
- sort_flags);
+ start = p1 - max;
+ sortsvp(aTHX_ start, max,
+ (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv),
+ sort_flags);
/* Reset cx, in case the context stack has been reallocated. */
cx = CX_CUR();
- PL_stack_sp = PL_stack_base + cx->blk_oldsp;
+ PL_stack_sp = PL_stack_base + cx->blk_oldsp;
CX_LEAVE_SCOPE(cx);
- if (!(flags & OPf_SPECIAL)) {
+ if (!(flags & OPf_SPECIAL)) {
assert(CxTYPE(cx) == CXt_SUB);
cx_popsub(cx);
- }
+ }
else
assert(CxTYPE(cx) == CXt_NULL);
/* there isn't a POPNULL ! */
- cx_popblock(cx);
+ cx_popblock(cx);
CX_POP(cx);
- POPSTACK;
- CATCH_SET(oldcatch);
- }
- else {
- MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */
- start = ORIGMARK+1;
- sortsvp(aTHX_ start, max,
- (priv & OPpSORT_NUMERIC)
- ? ( ( ( priv & OPpSORT_INTEGER) || all_SIVs)
- ? ( overloading ? S_amagic_i_ncmp : S_sv_i_ncmp)
- : ( overloading ? S_amagic_ncmp : S_sv_ncmp ) )
- : (
+ POPSTACK;
+ CATCH_SET(oldcatch);
+ }
+ else {
+ MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */
+ start = ORIGMARK+1;
+ sortsvp(aTHX_ start, max,
+ (priv & OPpSORT_NUMERIC)
+ ? ( ( ( priv & OPpSORT_INTEGER) || all_SIVs)
+ ? ( overloading ? S_amagic_i_ncmp : S_sv_i_ncmp)
+ : ( overloading ? S_amagic_ncmp : S_sv_ncmp ) )
+ : (
#ifdef USE_LOCALE_COLLATE
IN_LC_RUNTIME(LC_COLLATE)
- ? ( overloading
- ? (SVCOMPARE_t)S_amagic_cmp_locale
- : (SVCOMPARE_t)sv_cmp_locale_static)
+ ? ( overloading
+ ? (SVCOMPARE_t)S_amagic_cmp_locale
+ : (SVCOMPARE_t)sv_cmp_locale_static)
:
#endif
- ( overloading ? (SVCOMPARE_t)S_amagic_cmp : (SVCOMPARE_t)sv_cmp_static)),
- sort_flags);
- }
- if ((priv & OPpSORT_REVERSE) != 0) {
- SV **q = start+max-1;
- while (start < q) {
- SV * const tmp = *start;
- *start++ = *q;
- *q-- = tmp;
- }
- }
+ ( overloading ? (SVCOMPARE_t)S_amagic_cmp : (SVCOMPARE_t)sv_cmp_static)),
+ sort_flags);
+ }
+ if ((priv & OPpSORT_REVERSE) != 0) {
+ SV **q = start+max-1;
+ while (start < q) {
+ SV * const tmp = *start;
+ *start++ = *q;
+ *q-- = tmp;
+ }
+ }
}
if (av) {
@@ -956,22 +956,22 @@ S_sortcv_stacked(pTHX_ SV *const a, SV *const b)
PERL_ARGS_ASSERT_SORTCV_STACKED;
if (AvREAL(av)) {
- av_clear(av);
- AvREAL_off(av);
- AvREIFY_on(av);
+ av_clear(av);
+ AvREAL_off(av);
+ AvREIFY_on(av);
}
if (AvMAX(av) < 1) {
- SV **ary = AvALLOC(av);
- if (AvARRAY(av) != ary) {
- AvMAX(av) += AvARRAY(av) - AvALLOC(av);
- AvARRAY(av) = ary;
- }
- if (AvMAX(av) < 1) {
- Renew(ary,2,SV*);
- AvMAX(av) = 1;
- AvARRAY(av) = ary;
- AvALLOC(av) = ary;
- }
+ SV **ary = AvALLOC(av);
+ if (AvARRAY(av) != ary) {
+ AvMAX(av) += AvARRAY(av) - AvALLOC(av);
+ AvARRAY(av) = ary;
+ }
+ if (AvMAX(av) < 1) {
+ Renew(ary,2,SV*);
+ AvMAX(av) = 1;
+ AvARRAY(av) = ary;
+ AvALLOC(av) = ary;
+ }
}
AvFILLp(av) = 1;
@@ -1028,8 +1028,8 @@ S_sv_ncmp(pTHX_ SV *const a, SV *const b)
PERL_ARGS_ASSERT_SV_NCMP;
if (cmp == 2) {
- if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL);
- return 0;
+ if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL);
+ return 0;
}
return cmp;
@@ -1048,8 +1048,8 @@ S_sv_i_ncmp(pTHX_ SV *const a, SV *const b)
#define tryCALL_AMAGICbin(left,right,meth) \
(SvAMAGIC(left)||SvAMAGIC(right)) \
- ? amagic_call(left, right, meth, 0) \
- : NULL;
+ ? amagic_call(left, right, meth, 0) \
+ : NULL;
#define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0))
@@ -1065,10 +1065,10 @@ S_amagic_ncmp(pTHX_ SV *const a, SV *const b)
const I32 i = SvIVX(tmpsv);
return SORT_NORMAL_RETURN_VALUE(i);
}
- else {
- const NV d = SvNV(tmpsv);
- return SORT_NORMAL_RETURN_VALUE(d);
- }
+ else {
+ const NV d = SvNV(tmpsv);
+ return SORT_NORMAL_RETURN_VALUE(d);
+ }
}
return S_sv_ncmp(aTHX_ a, b);
}
@@ -1085,10 +1085,10 @@ S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b)
const I32 i = SvIVX(tmpsv);
return SORT_NORMAL_RETURN_VALUE(i);
}
- else {
- const NV d = SvNV(tmpsv);
- return SORT_NORMAL_RETURN_VALUE(d);
- }
+ else {
+ const NV d = SvNV(tmpsv);
+ return SORT_NORMAL_RETURN_VALUE(d);
+ }
}
return S_sv_i_ncmp(aTHX_ a, b);
}
@@ -1105,10 +1105,10 @@ S_amagic_cmp(pTHX_ SV *const str1, SV *const str2)
const I32 i = SvIVX(tmpsv);
return SORT_NORMAL_RETURN_VALUE(i);
}
- else {
- const NV d = SvNV(tmpsv);
- return SORT_NORMAL_RETURN_VALUE(d);
- }
+ else {
+ const NV d = SvNV(tmpsv);
+ return SORT_NORMAL_RETURN_VALUE(d);
+ }
}
return sv_cmp(str1, str2);
}
@@ -1127,10 +1127,10 @@ S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2)
const I32 i = SvIVX(tmpsv);
return SORT_NORMAL_RETURN_VALUE(i);
}
- else {
- const NV d = SvNV(tmpsv);
- return SORT_NORMAL_RETURN_VALUE(d);
- }
+ else {
+ const NV d = SvNV(tmpsv);
+ return SORT_NORMAL_RETURN_VALUE(d);
+ }
}
return sv_cmp_locale(str1, str2);
}