1N/A/* pp_sort.c
1N/A *
1N/A * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
1N/A * 2000, 2001, 2002, 2003, 2004, by Larry Wall and others
1N/A *
1N/A * You may distribute under the terms of either the GNU General Public
1N/A * License or the Artistic License, as specified in the README file.
1N/A *
1N/A */
1N/A
1N/A/*
1N/A * ...they shuffled back towards the rear of the line. 'No, not at the
1N/A * rear!' the slave-driver shouted. 'Three files up. And stay there...
1N/A */
1N/A
1N/A#include "EXTERN.h"
1N/A#define PERL_IN_PP_SORT_C
1N/A#include "perl.h"
1N/A
1N/A#if defined(UNDER_CE)
1N/A/* looks like 'small' is reserved word for WINCE (or somesuch)*/
1N/A#define small xsmall
1N/A#endif
1N/A
1N/Astatic I32 sortcv(pTHX_ SV *a, SV *b);
1N/Astatic I32 sortcv_stacked(pTHX_ SV *a, SV *b);
1N/Astatic I32 sortcv_xsub(pTHX_ SV *a, SV *b);
1N/Astatic I32 sv_ncmp(pTHX_ SV *a, SV *b);
1N/Astatic I32 sv_i_ncmp(pTHX_ SV *a, SV *b);
1N/Astatic I32 amagic_ncmp(pTHX_ SV *a, SV *b);
1N/Astatic I32 amagic_i_ncmp(pTHX_ SV *a, SV *b);
1N/Astatic I32 amagic_cmp(pTHX_ SV *a, SV *b);
1N/Astatic I32 amagic_cmp_locale(pTHX_ SV *a, SV *b);
1N/A
1N/A#define sv_cmp_static Perl_sv_cmp
1N/A#define sv_cmp_locale_static Perl_sv_cmp_locale
1N/A
1N/A#define SORTHINTS(hintsv) \
1N/A (((hintsv) = GvSV(gv_fetchpv("sort::hints", GV_ADDMULTI, SVt_IV))), \
1N/A (SvIOK(hintsv) ? ((I32)SvIV(hintsv)) : 0))
1N/A
1N/A#ifndef SMALLSORT
1N/A#define SMALLSORT (200)
1N/A#endif
1N/A
1N/A/*
1N/A * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>.
1N/A *
1N/A * The original code was written in conjunction with BSD Computer Software
1N/A * Research Group at University of California, Berkeley.
1N/A *
1N/A * See also: "Optimistic Merge Sort" (SODA '92)
1N/A *
1N/A * The integration to Perl is by John P. Linderman <jpl@research.att.com>.
1N/A *
1N/A * The code can be distributed under the same terms as Perl itself.
1N/A *
1N/A */
1N/A
1N/A
1N/Atypedef char * aptr; /* pointer for arithmetic on sizes */
1N/Atypedef SV * gptr; /* pointers in our lists */
1N/A
1N/A/* Binary merge internal sort, with a few special mods
1N/A** for the special perl environment it now finds itself in.
1N/A**
1N/A** Things that were once options have been hotwired
1N/A** to values suitable for this use. In particular, we'll always
1N/A** initialize looking for natural runs, we'll always produce stable
1N/A** output, and we'll always do Peter McIlroy's binary merge.
1N/A*/
1N/A
1N/A/* Pointer types for arithmetic and storage and convenience casts */
1N/A
1N/A#define APTR(P) ((aptr)(P))
1N/A#define GPTP(P) ((gptr *)(P))
1N/A#define GPPP(P) ((gptr **)(P))
1N/A
1N/A
1N/A/* byte offset from pointer P to (larger) pointer Q */
1N/A#define BYTEOFF(P, Q) (APTR(Q) - APTR(P))
1N/A
1N/A#define PSIZE sizeof(gptr)
1N/A
1N/A/* If PSIZE is power of 2, make PSHIFT that power, if that helps */
1N/A
1N/A#ifdef PSHIFT
1N/A#define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT))
1N/A#define PNBYTE(N) ((N) << (PSHIFT))
1N/A#define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N)))
1N/A#else
1N/A/* Leave optimization to compiler */
1N/A#define PNELEM(P, Q) (GPTP(Q) - GPTP(P))
1N/A#define PNBYTE(N) ((N) * (PSIZE))
1N/A#define PINDEX(P, N) (GPTP(P) + (N))
1N/A#endif
1N/A
1N/A/* Pointer into other corresponding to pointer into this */
1N/A#define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P))
1N/A
1N/A#define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim)
1N/A
1N/A
1N/A/* Runs are identified by a pointer in the auxilliary list.
1N/A** The pointer is at the start of the list,
1N/A** and it points to the start of the next list.
1N/A** NEXT is used as an lvalue, too.
1N/A*/
1N/A
1N/A#define NEXT(P) (*GPPP(P))
1N/A
1N/A
1N/A/* PTHRESH is the minimum number of pairs with the same sense to justify
1N/A** checking for a run and extending it. Note that PTHRESH counts PAIRS,
1N/A** not just elements, so PTHRESH == 8 means a run of 16.
1N/A*/
1N/A
1N/A#define PTHRESH (8)
1N/A
1N/A/* RTHRESH is the number of elements in a run that must compare low
1N/A** to the low element from the opposing run before we justify
1N/A** doing a binary rampup instead of single stepping.
1N/A** In random input, N in a row low should only happen with
1N/A** probability 2^(1-N), so we can risk that we are dealing
1N/A** with orderly input without paying much when we aren't.
1N/A*/
1N/A
1N/A#define RTHRESH (6)
1N/A
1N/A
1N/A/*
1N/A** Overview of algorithm and variables.
1N/A** The array of elements at list1 will be organized into runs of length 2,
1N/A** or runs of length >= 2 * PTHRESH. We only try to form long runs when
1N/A** PTHRESH adjacent pairs compare in the same way, suggesting overall order.
1N/A**
1N/A** Unless otherwise specified, pair pointers address the first of two elements.
1N/A**
1N/A** b and b+1 are a pair that compare with sense ``sense''.
1N/A** b is the ``bottom'' of adjacent pairs that might form a longer run.
1N/A**
1N/A** p2 parallels b in the list2 array, where runs are defined by
1N/A** a pointer chain.
1N/A**
1N/A** t represents the ``top'' of the adjacent pairs that might extend
1N/A** the run beginning at b. Usually, t addresses a pair
1N/A** that compares with opposite sense from (b,b+1).
1N/A** However, it may also address a singleton element at the end of list1,
1N/A** or it may be equal to ``last'', the first element beyond list1.
1N/A**
1N/A** r addresses the Nth pair following b. If this would be beyond t,
1N/A** we back it off to t. Only when r is less than t do we consider the
1N/A** run long enough to consider checking.
1N/A**
1N/A** q addresses a pair such that the pairs at b through q already form a run.
1N/A** Often, q will equal b, indicating we only are sure of the pair itself.
1N/A** However, a search on the previous cycle may have revealed a longer run,
1N/A** so q may be greater than b.
1N/A**
1N/A** p is used to work back from a candidate r, trying to reach q,
1N/A** which would mean b through r would be a run. If we discover such a run,
1N/A** we start q at r and try to push it further towards t.
1N/A** If b through r is NOT a run, we detect the wrong order at (p-1,p).
1N/A** In any event, after the check (if any), we have two main cases.
1N/A**
1N/A** 1) Short run. b <= q < p <= r <= t.
1N/A** b through q is a run (perhaps trivial)
1N/A** q through p are uninteresting pairs
1N/A** p through r is a run
1N/A**
1N/A** 2) Long run. b < r <= q < t.
1N/A** b through q is a run (of length >= 2 * PTHRESH)
1N/A**
1N/A** Note that degenerate cases are not only possible, but likely.
1N/A** For example, if the pair following b compares with opposite sense,
1N/A** then b == q < p == r == t.
1N/A*/
1N/A
1N/A
1N/Astatic IV
1N/Adynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, SVCOMPARE_t cmp)
1N/A{
1N/A I32 sense;
1N/A register gptr *b, *p, *q, *t, *p2;
1N/A register gptr c, *last, *r;
1N/A gptr *savep;
1N/A IV runs = 0;
1N/A
1N/A b = list1;
1N/A last = PINDEX(b, nmemb);
1N/A sense = (cmp(aTHX_ *b, *(b+1)) > 0);
1N/A for (p2 = list2; b < last; ) {
1N/A /* We just started, or just reversed sense.
1N/A ** Set t at end of pairs with the prevailing sense.
1N/A */
1N/A for (p = b+2, t = p; ++p < last; t = ++p) {
1N/A if ((cmp(aTHX_ *t, *p) > 0) != sense) break;
1N/A }
1N/A q = b;
1N/A /* Having laid out the playing field, look for long runs */
1N/A do {
1N/A p = r = b + (2 * PTHRESH);
1N/A if (r >= t) p = r = t; /* too short to care about */
1N/A else {
1N/A while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) &&
1N/A ((p -= 2) > q));
1N/A if (p <= q) {
1N/A /* b through r is a (long) run.
1N/A ** Extend it as far as possible.
1N/A */
1N/A p = q = r;
1N/A while (((p += 2) < t) &&
1N/A ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p;
1N/A r = p = q + 2; /* no simple pairs, no after-run */
1N/A }
1N/A }
1N/A if (q > b) { /* run of greater than 2 at b */
1N/A savep = p;
1N/A p = q += 2;
1N/A /* pick up singleton, if possible */
1N/A if ((p == t) &&
1N/A ((t + 1) == last) &&
1N/A ((cmp(aTHX_ *(p-1), *p) > 0) == sense))
1N/A savep = r = p = q = last;
1N/A p2 = NEXT(p2) = p2 + (p - b); ++runs;
1N/A if (sense) while (b < --p) {
1N/A c = *b;
1N/A *b++ = *p;
1N/A *p = c;
1N/A }
1N/A p = savep;
1N/A }
1N/A while (q < p) { /* simple pairs */
1N/A p2 = NEXT(p2) = p2 + 2; ++runs;
1N/A if (sense) {
1N/A c = *q++;
1N/A *(q-1) = *q;
1N/A *q++ = c;
1N/A } else q += 2;
1N/A }
1N/A if (((b = p) == t) && ((t+1) == last)) {
1N/A NEXT(p2) = p2 + 1; ++runs;
1N/A b++;
1N/A }
1N/A q = r;
1N/A } while (b < t);
1N/A sense = !sense;
1N/A }
1N/A return runs;
1N/A}
1N/A
1N/A
1N/A/* The original merge sort, in use since 5.7, was as fast as, or faster than,
1N/A * qsort on many platforms, but slower than qsort, conspicuously so,
1N/A * on others. The most likely explanation was platform-specific
1N/A * differences in cache sizes and relative speeds.
1N/A *
1N/A * The quicksort divide-and-conquer algorithm guarantees that, as the
1N/A * problem is subdivided into smaller and smaller parts, the parts
1N/A * fit into smaller (and faster) caches. So it doesn't matter how
1N/A * many levels of cache exist, quicksort will "find" them, and,
1N/A * as long as smaller is faster, take advanatge of them.
1N/A *
1N/A * By contrast, consider how the original mergesort algorithm worked.
1N/A * Suppose we have five runs (each typically of length 2 after dynprep).
1N/A *
1N/A * pass base aux
1N/A * 0 1 2 3 4 5
1N/A * 1 12 34 5
1N/A * 2 1234 5
1N/A * 3 12345
1N/A * 4 12345
1N/A *
1N/A * Adjacent pairs are merged in "grand sweeps" through the input.
1N/A * This means, on pass 1, the records in runs 1 and 2 aren't revisited until
1N/A * runs 3 and 4 are merged and the runs from run 5 have been copied.
1N/A * The only cache that matters is one large enough to hold *all* the input.
1N/A * On some platforms, this may be many times slower than smaller caches.
1N/A *
1N/A * The following pseudo-code uses the same basic merge algorithm,
1N/A * but in a divide-and-conquer way.
1N/A *
1N/A * # merge $runs runs at offset $offset of list $list1 into $list2.
1N/A * # all unmerged runs ($runs == 1) originate in list $base.
1N/A * sub mgsort2 {
1N/A * my ($offset, $runs, $base, $list1, $list2) = @_;
1N/A *
1N/A * if ($runs == 1) {
1N/A * if ($list1 is $base) copy run to $list2
1N/A * return offset of end of list (or copy)
1N/A * } else {
1N/A * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1)
1N/A * mgsort2($off2, $runs/2, $base, $list2, $list1)
1N/A * merge the adjacent runs at $offset of $list1 into $list2
1N/A * return the offset of the end of the merged runs
1N/A * }
1N/A * }
1N/A * mgsort2(0, $runs, $base, $aux, $base);
1N/A *
1N/A * For our 5 runs, the tree of calls looks like
1N/A *
1N/A * 5
1N/A * 3 2
1N/A * 2 1 1 1
1N/A * 1 1
1N/A *
1N/A * 1 2 3 4 5
1N/A *
1N/A * and the corresponding activity looks like
1N/A *
1N/A * copy runs 1 and 2 from base to aux
1N/A * merge runs 1 and 2 from aux to base
1N/A * (run 3 is where it belongs, no copy needed)
1N/A * merge runs 12 and 3 from base to aux
1N/A * (runs 4 and 5 are where they belong, no copy needed)
1N/A * merge runs 4 and 5 from base to aux
1N/A * merge runs 123 and 45 from aux to base
1N/A *
1N/A * Note that we merge runs 1 and 2 immediately after copying them,
1N/A * while they are still likely to be in fast cache. Similarly,
1N/A * run 3 is merged with run 12 while it still may be lingering in cache.
1N/A * This implementation should therefore enjoy much of the cache-friendly
1N/A * behavior that quicksort does. In addition, it does less copying
1N/A * than the original mergesort implementation (only runs 1 and 2 are copied)
1N/A * and the "balancing" of merges is better (merged runs comprise more nearly
1N/A * equal numbers of original runs).
1N/A *
1N/A * The actual cache-friendly implementation will use a pseudo-stack
1N/A * to avoid recursion, and will unroll processing of runs of length 2,
1N/A * but it is otherwise similar to the recursive implementation.
1N/A */
1N/A
1N/Atypedef struct {
1N/A IV offset; /* offset of 1st of 2 runs at this level */
1N/A IV runs; /* how many runs must be combined into 1 */
1N/A} off_runs; /* pseudo-stack element */
1N/A
1N/ASTATIC void
1N/AS_mergesortsv(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp)
1N/A{
1N/A IV i, run, runs, offset;
1N/A I32 sense, level;
1N/A int iwhich;
1N/A register gptr *f1, *f2, *t, *b, *p, *tp2, *l1, *l2, *q;
1N/A gptr *aux, *list1, *list2;
1N/A gptr *p1;
1N/A gptr small[SMALLSORT];
1N/A gptr *which[3];
1N/A off_runs stack[60], *stackp;
1N/A
1N/A if (nmemb <= 1) return; /* sorted trivially */
1N/A if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */
1N/A else { New(799,aux,nmemb,gptr); } /* allocate auxilliary array */
1N/A level = 0;
1N/A stackp = stack;
1N/A stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp);
1N/A stackp->offset = offset = 0;
1N/A which[0] = which[2] = base;
1N/A which[1] = aux;
1N/A for (;;) {
1N/A /* On levels where both runs have be constructed (stackp->runs == 0),
1N/A * merge them, and note the offset of their end, in case the offset
1N/A * is needed at the next level up. Hop up a level, and,
1N/A * as long as stackp->runs is 0, keep merging.
1N/A */
1N/A if ((runs = stackp->runs) == 0) {
1N/A iwhich = level & 1;
1N/A list1 = which[iwhich]; /* area where runs are now */
1N/A list2 = which[++iwhich]; /* area for merged runs */
1N/A do {
1N/A offset = stackp->offset;
1N/A f1 = p1 = list1 + offset; /* start of first run */
1N/A p = tp2 = list2 + offset; /* where merged run will go */
1N/A t = NEXT(p); /* where first run ends */
1N/A f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */
1N/A t = NEXT(t); /* where second runs ends */
1N/A l2 = POTHER(t, list2, list1); /* ... on the other side */
1N/A offset = PNELEM(list2, t);
1N/A while (f1 < l1 && f2 < l2) {
1N/A /* If head 1 is larger than head 2, find ALL the elements
1N/A ** in list 2 strictly less than head1, write them all,
1N/A ** then head 1. Then compare the new heads, and repeat,
1N/A ** until one or both lists are exhausted.
1N/A **
1N/A ** In all comparisons (after establishing
1N/A ** which head to merge) the item to merge
1N/A ** (at pointer q) is the first operand of
1N/A ** the comparison. When we want to know
1N/A ** if ``q is strictly less than the other'',
1N/A ** we can't just do
1N/A ** cmp(q, other) < 0
1N/A ** because stability demands that we treat equality
1N/A ** as high when q comes from l2, and as low when
1N/A ** q was from l1. So we ask the question by doing
1N/A ** cmp(q, other) <= sense
1N/A ** and make sense == 0 when equality should look low,
1N/A ** and -1 when equality should look high.
1N/A */
1N/A
1N/A
1N/A if (cmp(aTHX_ *f1, *f2) <= 0) {
1N/A q = f2; b = f1; t = l1;
1N/A sense = -1;
1N/A } else {
1N/A q = f1; b = f2; t = l2;
1N/A sense = 0;
1N/A }
1N/A
1N/A
1N/A /* ramp up
1N/A **
1N/A ** Leave t at something strictly
1N/A ** greater than q (or at the end of the list),
1N/A ** and b at something strictly less than q.
1N/A */
1N/A for (i = 1, run = 0 ;;) {
1N/A if ((p = PINDEX(b, i)) >= t) {
1N/A /* off the end */
1N/A if (((p = PINDEX(t, -1)) > b) &&
1N/A (cmp(aTHX_ *q, *p) <= sense))
1N/A t = p;
1N/A else b = p;
1N/A break;
1N/A } else if (cmp(aTHX_ *q, *p) <= sense) {
1N/A t = p;
1N/A break;
1N/A } else b = p;
1N/A if (++run >= RTHRESH) i += i;
1N/A }
1N/A
1N/A
1N/A /* q is known to follow b and must be inserted before t.
1N/A ** Increment b, so the range of possibilities is [b,t).
1N/A ** Round binary split down, to favor early appearance.
1N/A ** Adjust b and t until q belongs just before t.
1N/A */
1N/A
1N/A b++;
1N/A while (b < t) {
1N/A p = PINDEX(b, (PNELEM(b, t) - 1) / 2);
1N/A if (cmp(aTHX_ *q, *p) <= sense) {
1N/A t = p;
1N/A } else b = p + 1;
1N/A }
1N/A
1N/A
1N/A /* Copy all the strictly low elements */
1N/A
1N/A if (q == f1) {
1N/A FROMTOUPTO(f2, tp2, t);
1N/A *tp2++ = *f1++;
1N/A } else {
1N/A FROMTOUPTO(f1, tp2, t);
1N/A *tp2++ = *f2++;
1N/A }
1N/A }
1N/A
1N/A
1N/A /* Run out remaining list */
1N/A if (f1 == l1) {
1N/A if (f2 < l2) FROMTOUPTO(f2, tp2, l2);
1N/A } else FROMTOUPTO(f1, tp2, l1);
1N/A p1 = NEXT(p1) = POTHER(tp2, list2, list1);
1N/A
1N/A if (--level == 0) goto done;
1N/A --stackp;
1N/A t = list1; list1 = list2; list2 = t; /* swap lists */
1N/A } while ((runs = stackp->runs) == 0);
1N/A }
1N/A
1N/A
1N/A stackp->runs = 0; /* current run will finish level */
1N/A /* While there are more than 2 runs remaining,
1N/A * turn them into exactly 2 runs (at the "other" level),
1N/A * each made up of approximately half the runs.
1N/A * Stack the second half for later processing,
1N/A * and set about producing the first half now.
1N/A */
1N/A while (runs > 2) {
1N/A ++level;
1N/A ++stackp;
1N/A stackp->offset = offset;
1N/A runs -= stackp->runs = runs / 2;
1N/A }
1N/A /* We must construct a single run from 1 or 2 runs.
1N/A * All the original runs are in which[0] == base.
1N/A * The run we construct must end up in which[level&1].
1N/A */
1N/A iwhich = level & 1;
1N/A if (runs == 1) {
1N/A /* Constructing a single run from a single run.
1N/A * If it's where it belongs already, there's nothing to do.
1N/A * Otherwise, copy it to where it belongs.
1N/A * A run of 1 is either a singleton at level 0,
1N/A * or the second half of a split 3. In neither event
1N/A * is it necessary to set offset. It will be set by the merge
1N/A * that immediately follows.
1N/A */
1N/A if (iwhich) { /* Belongs in aux, currently in base */
1N/A f1 = b = PINDEX(base, offset); /* where list starts */
1N/A f2 = PINDEX(aux, offset); /* where list goes */
1N/A t = NEXT(f2); /* where list will end */
1N/A offset = PNELEM(aux, t); /* offset thereof */
1N/A t = PINDEX(base, offset); /* where it currently ends */
1N/A FROMTOUPTO(f1, f2, t); /* copy */
1N/A NEXT(b) = t; /* set up parallel pointer */
1N/A } else if (level == 0) goto done; /* single run at level 0 */
1N/A } else {
1N/A /* Constructing a single run from two runs.
1N/A * The merge code at the top will do that.
1N/A * We need only make sure the two runs are in the "other" array,
1N/A * so they'll end up in the correct array after the merge.
1N/A */
1N/A ++level;
1N/A ++stackp;
1N/A stackp->offset = offset;
1N/A stackp->runs = 0; /* take care of both runs, trigger merge */
1N/A if (!iwhich) { /* Merged runs belong in aux, copy 1st */
1N/A f1 = b = PINDEX(base, offset); /* where first run starts */
1N/A f2 = PINDEX(aux, offset); /* where it will be copied */
1N/A t = NEXT(f2); /* where first run will end */
1N/A offset = PNELEM(aux, t); /* offset thereof */
1N/A p = PINDEX(base, offset); /* end of first run */
1N/A t = NEXT(t); /* where second run will end */
1N/A t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */
1N/A FROMTOUPTO(f1, f2, t); /* copy both runs */
1N/A NEXT(b) = p; /* paralled pointer for 1st */
1N/A NEXT(p) = t; /* ... and for second */
1N/A }
1N/A }
1N/A }
1N/Adone:
1N/A if (aux != small) Safefree(aux); /* free iff allocated */
1N/A return;
1N/A}
1N/A
1N/A/*
1N/A * The quicksort implementation was derived from source code contributed
1N/A * by Tom Horsley.
1N/A *
1N/A * NOTE: this code was derived from Tom Horsley's qsort replacement
1N/A * and should not be confused with the original code.
1N/A */
1N/A
1N/A/* Copyright (C) Tom Horsley, 1997. All rights reserved.
1N/A
1N/A Permission granted to distribute under the same terms as perl which are
1N/A (briefly):
1N/A
1N/A This program is free software; you can redistribute it and/or modify
1N/A it under the terms of either:
1N/A
1N/A a) the GNU General Public License as published by the Free
1N/A Software Foundation; either version 1, or (at your option) any
1N/A later version, or
1N/A
1N/A b) the "Artistic License" which comes with this Kit.
1N/A
1N/A Details on the perl license can be found in the perl source code which
1N/A may be located via the www.perl.com web page.
1N/A
1N/A This is the most wonderfulest possible qsort I can come up with (and
1N/A still be mostly portable) My (limited) tests indicate it consistently
1N/A does about 20% fewer calls to compare than does the qsort in the Visual
1N/A C++ library, other vendors may vary.
1N/A
1N/A Some of the ideas in here can be found in "Algorithms" by Sedgewick,
1N/A others I invented myself (or more likely re-invented since they seemed
1N/A pretty obvious once I watched the algorithm operate for a while).
1N/A
1N/A Most of this code was written while watching the Marlins sweep the Giants
1N/A in the 1997 National League Playoffs - no Braves fans allowed to use this
1N/A code (just kidding :-).
1N/A
1N/A I realize that if I wanted to be true to the perl tradition, the only
1N/A comment in this file would be something like:
1N/A
1N/A ...they shuffled back towards the rear of the line. 'No, not at the
1N/A rear!' the slave-driver shouted. 'Three files up. And stay there...
1N/A
1N/A However, I really needed to violate that tradition just so I could keep
1N/A track of what happens myself, not to mention some poor fool trying to
1N/A understand this years from now :-).
1N/A*/
1N/A
1N/A/* ********************************************************** Configuration */
1N/A
1N/A#ifndef QSORT_ORDER_GUESS
1N/A#define QSORT_ORDER_GUESS 2 /* Select doubling version of the netBSD trick */
1N/A#endif
1N/A
1N/A/* QSORT_MAX_STACK is the largest number of partitions that can be stacked up for
1N/A future processing - a good max upper bound is log base 2 of memory size
1N/A (32 on 32 bit machines, 64 on 64 bit machines, etc). In reality can
1N/A safely be smaller than that since the program is taking up some space and
1N/A most operating systems only let you grab some subset of contiguous
1N/A memory (not to mention that you are normally sorting data larger than
1N/A 1 byte element size :-).
1N/A*/
1N/A#ifndef QSORT_MAX_STACK
1N/A#define QSORT_MAX_STACK 32
1N/A#endif
1N/A
1N/A/* QSORT_BREAK_EVEN is the size of the largest partition we should insertion sort.
1N/A Anything bigger and we use qsort. If you make this too small, the qsort
1N/A will probably break (or become less efficient), because it doesn't expect
1N/A the middle element of a partition to be the same as the right or left -
1N/A you have been warned).
1N/A*/
1N/A#ifndef QSORT_BREAK_EVEN
1N/A#define QSORT_BREAK_EVEN 6
1N/A#endif
1N/A
1N/A/* QSORT_PLAY_SAFE is the size of the largest partition we're willing
1N/A to go quadratic on. We innoculate larger partitions against
1N/A quadratic behavior by shuffling them before sorting. This is not
1N/A an absolute guarantee of non-quadratic behavior, but it would take
1N/A staggeringly bad luck to pick extreme elements as the pivot
1N/A from randomized data.
1N/A*/
1N/A#ifndef QSORT_PLAY_SAFE
1N/A#define QSORT_PLAY_SAFE 255
1N/A#endif
1N/A
1N/A/* ************************************************************* Data Types */
1N/A
1N/A/* hold left and right index values of a partition waiting to be sorted (the
1N/A partition includes both left and right - right is NOT one past the end or
1N/A anything like that).
1N/A*/
1N/Astruct partition_stack_entry {
1N/A int left;
1N/A int right;
1N/A#ifdef QSORT_ORDER_GUESS
1N/A int qsort_break_even;
1N/A#endif
1N/A};
1N/A
1N/A/* ******************************************************* Shorthand Macros */
1N/A
1N/A/* Note that these macros will be used from inside the qsort function where
1N/A we happen to know that the variable 'elt_size' contains the size of an
1N/A array element and the variable 'temp' points to enough space to hold a
1N/A temp element and the variable 'array' points to the array being sorted
1N/A and 'compare' is the pointer to the compare routine.
1N/A
1N/A Also note that there are very many highly architecture specific ways
1N/A these might be sped up, but this is simply the most generally portable
1N/A code I could think of.
1N/A*/
1N/A
1N/A/* Return < 0 == 0 or > 0 as the value of elt1 is < elt2, == elt2, > elt2
1N/A*/
1N/A#define qsort_cmp(elt1, elt2) \
1N/A ((*compare)(aTHX_ array[elt1], array[elt2]))
1N/A
1N/A#ifdef QSORT_ORDER_GUESS
1N/A#define QSORT_NOTICE_SWAP swapped++;
1N/A#else
1N/A#define QSORT_NOTICE_SWAP
1N/A#endif
1N/A
1N/A/* swaps contents of array elements elt1, elt2.
1N/A*/
1N/A#define qsort_swap(elt1, elt2) \
1N/A STMT_START { \
1N/A QSORT_NOTICE_SWAP \
1N/A temp = array[elt1]; \
1N/A array[elt1] = array[elt2]; \
1N/A array[elt2] = temp; \
1N/A } STMT_END
1N/A
1N/A/* rotate contents of elt1, elt2, elt3 such that elt1 gets elt2, elt2 gets
1N/A elt3 and elt3 gets elt1.
1N/A*/
1N/A#define qsort_rotate(elt1, elt2, elt3) \
1N/A STMT_START { \
1N/A QSORT_NOTICE_SWAP \
1N/A temp = array[elt1]; \
1N/A array[elt1] = array[elt2]; \
1N/A array[elt2] = array[elt3]; \
1N/A array[elt3] = temp; \
1N/A } STMT_END
1N/A
1N/A/* ************************************************************ Debug stuff */
1N/A
1N/A#ifdef QSORT_DEBUG
1N/A
1N/Astatic void
1N/Abreak_here()
1N/A{
1N/A return; /* good place to set a breakpoint */
1N/A}
1N/A
1N/A#define qsort_assert(t) (void)( (t) || (break_here(), 0) )
1N/A
1N/Astatic void
1N/Adoqsort_all_asserts(
1N/A void * array,
1N/A size_t num_elts,
1N/A size_t elt_size,
1N/A int (*compare)(const void * elt1, const void * elt2),
1N/A int pc_left, int pc_right, int u_left, int u_right)
1N/A{
1N/A int i;
1N/A
1N/A qsort_assert(pc_left <= pc_right);
1N/A qsort_assert(u_right < pc_left);
1N/A qsort_assert(pc_right < u_left);
1N/A for (i = u_right + 1; i < pc_left; ++i) {
1N/A qsort_assert(qsort_cmp(i, pc_left) < 0);
1N/A }
1N/A for (i = pc_left; i < pc_right; ++i) {
1N/A qsort_assert(qsort_cmp(i, pc_right) == 0);
1N/A }
1N/A for (i = pc_right + 1; i < u_left; ++i) {
1N/A qsort_assert(qsort_cmp(pc_right, i) < 0);
1N/A }
1N/A}
1N/A
1N/A#define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) \
1N/A doqsort_all_asserts(array, num_elts, elt_size, compare, \
1N/A PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT)
1N/A
1N/A#else
1N/A
1N/A#define qsort_assert(t) ((void)0)
1N/A
1N/A#define qsort_all_asserts(PC_LEFT, PC_RIGHT, U_LEFT, U_RIGHT) ((void)0)
1N/A
1N/A#endif
1N/A
1N/A/* ****************************************************************** qsort */
1N/A
1N/ASTATIC void /* the standard unstable (u) quicksort (qsort) */
1N/AS_qsortsvu(pTHX_ SV ** array, size_t num_elts, SVCOMPARE_t compare)
1N/A{
1N/A register SV * temp;
1N/A
1N/A struct partition_stack_entry partition_stack[QSORT_MAX_STACK];
1N/A int next_stack_entry = 0;
1N/A
1N/A int part_left;
1N/A int part_right;
1N/A#ifdef QSORT_ORDER_GUESS
1N/A int qsort_break_even;
1N/A int swapped;
1N/A#endif
1N/A
1N/A /* Make sure we actually have work to do.
1N/A */
1N/A if (num_elts <= 1) {
1N/A return;
1N/A }
1N/A
1N/A /* Innoculate large partitions against quadratic behavior */
1N/A if (num_elts > QSORT_PLAY_SAFE) {
1N/A register size_t n, j;
1N/A register SV **q;
1N/A for (n = num_elts, q = array; n > 1; ) {
1N/A j = (size_t)(n-- * Drand01());
1N/A temp = q[j];
1N/A q[j] = q[n];
1N/A q[n] = temp;
1N/A }
1N/A }
1N/A
1N/A /* Setup the initial partition definition and fall into the sorting loop
1N/A */
1N/A part_left = 0;
1N/A part_right = (int)(num_elts - 1);
1N/A#ifdef QSORT_ORDER_GUESS
1N/A qsort_break_even = QSORT_BREAK_EVEN;
1N/A#else
1N/A#define qsort_break_even QSORT_BREAK_EVEN
1N/A#endif
1N/A for ( ; ; ) {
1N/A if ((part_right - part_left) >= qsort_break_even) {
1N/A /* OK, this is gonna get hairy, so lets try to document all the
1N/A concepts and abbreviations and variables and what they keep
1N/A track of:
1N/A
1N/A pc: pivot chunk - the set of array elements we accumulate in the
1N/A middle of the partition, all equal in value to the original
1N/A pivot element selected. The pc is defined by:
1N/A
1N/A pc_left - the leftmost array index of the pc
1N/A pc_right - the rightmost array index of the pc
1N/A
1N/A we start with pc_left == pc_right and only one element
1N/A in the pivot chunk (but it can grow during the scan).
1N/A
1N/A u: uncompared elements - the set of elements in the partition
1N/A we have not yet compared to the pivot value. There are two
1N/A uncompared sets during the scan - one to the left of the pc
1N/A and one to the right.
1N/A
1N/A u_right - the rightmost index of the left side's uncompared set
1N/A u_left - the leftmost index of the right side's uncompared set
1N/A
1N/A The leftmost index of the left sides's uncompared set
1N/A doesn't need its own variable because it is always defined
1N/A by the leftmost edge of the whole partition (part_left). The
1N/A same goes for the rightmost edge of the right partition
1N/A (part_right).
1N/A
1N/A We know there are no uncompared elements on the left once we
1N/A get u_right < part_left and no uncompared elements on the
1N/A right once u_left > part_right. When both these conditions
1N/A are met, we have completed the scan of the partition.
1N/A
1N/A Any elements which are between the pivot chunk and the
1N/A uncompared elements should be less than the pivot value on
1N/A the left side and greater than the pivot value on the right
1N/A side (in fact, the goal of the whole algorithm is to arrange
1N/A for that to be true and make the groups of less-than and
1N/A greater-then elements into new partitions to sort again).
1N/A
1N/A As you marvel at the complexity of the code and wonder why it
1N/A has to be so confusing. Consider some of the things this level
1N/A of confusion brings:
1N/A
1N/A Once I do a compare, I squeeze every ounce of juice out of it. I
1N/A never do compare calls I don't have to do, and I certainly never
1N/A do redundant calls.
1N/A
1N/A I also never swap any elements unless I can prove there is a
1N/A good reason. Many sort algorithms will swap a known value with
1N/A an uncompared value just to get things in the right place (or
1N/A avoid complexity :-), but that uncompared value, once it gets
1N/A compared, may then have to be swapped again. A lot of the
1N/A complexity of this code is due to the fact that it never swaps
1N/A anything except compared values, and it only swaps them when the
1N/A compare shows they are out of position.
1N/A */
1N/A int pc_left, pc_right;
1N/A int u_right, u_left;
1N/A
1N/A int s;
1N/A
1N/A pc_left = ((part_left + part_right) / 2);
1N/A pc_right = pc_left;
1N/A u_right = pc_left - 1;
1N/A u_left = pc_right + 1;
1N/A
1N/A /* Qsort works best when the pivot value is also the median value
1N/A in the partition (unfortunately you can't find the median value
1N/A without first sorting :-), so to give the algorithm a helping
1N/A hand, we pick 3 elements and sort them and use the median value
1N/A of that tiny set as the pivot value.
1N/A
1N/A Some versions of qsort like to use the left middle and right as
1N/A the 3 elements to sort so they can insure the ends of the
1N/A partition will contain values which will stop the scan in the
1N/A compare loop, but when you have to call an arbitrarily complex
1N/A routine to do a compare, its really better to just keep track of
1N/A array index values to know when you hit the edge of the
1N/A partition and avoid the extra compare. An even better reason to
1N/A avoid using a compare call is the fact that you can drop off the
1N/A edge of the array if someone foolishly provides you with an
1N/A unstable compare function that doesn't always provide consistent
1N/A results.
1N/A
1N/A So, since it is simpler for us to compare the three adjacent
1N/A elements in the middle of the partition, those are the ones we
1N/A pick here (conveniently pointed at by u_right, pc_left, and
1N/A u_left). The values of the left, center, and right elements
1N/A are refered to as l c and r in the following comments.
1N/A */
1N/A
1N/A#ifdef QSORT_ORDER_GUESS
1N/A swapped = 0;
1N/A#endif
1N/A s = qsort_cmp(u_right, pc_left);
1N/A if (s < 0) {
1N/A /* l < c */
1N/A s = qsort_cmp(pc_left, u_left);
1N/A /* if l < c, c < r - already in order - nothing to do */
1N/A if (s == 0) {
1N/A /* l < c, c == r - already in order, pc grows */
1N/A ++pc_right;
1N/A qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
1N/A } else if (s > 0) {
1N/A /* l < c, c > r - need to know more */
1N/A s = qsort_cmp(u_right, u_left);
1N/A if (s < 0) {
1N/A /* l < c, c > r, l < r - swap c & r to get ordered */
1N/A qsort_swap(pc_left, u_left);
1N/A qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
1N/A } else if (s == 0) {
1N/A /* l < c, c > r, l == r - swap c&r, grow pc */
1N/A qsort_swap(pc_left, u_left);
1N/A --pc_left;
1N/A qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
1N/A } else {
1N/A /* l < c, c > r, l > r - make lcr into rlc to get ordered */
1N/A qsort_rotate(pc_left, u_right, u_left);
1N/A qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
1N/A }
1N/A }
1N/A } else if (s == 0) {
1N/A /* l == c */
1N/A s = qsort_cmp(pc_left, u_left);
1N/A if (s < 0) {
1N/A /* l == c, c < r - already in order, grow pc */
1N/A --pc_left;
1N/A qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
1N/A } else if (s == 0) {
1N/A /* l == c, c == r - already in order, grow pc both ways */
1N/A --pc_left;
1N/A ++pc_right;
1N/A qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
1N/A } else {
1N/A /* l == c, c > r - swap l & r, grow pc */
1N/A qsort_swap(u_right, u_left);
1N/A ++pc_right;
1N/A qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
1N/A }
1N/A } else {
1N/A /* l > c */
1N/A s = qsort_cmp(pc_left, u_left);
1N/A if (s < 0) {
1N/A /* l > c, c < r - need to know more */
1N/A s = qsort_cmp(u_right, u_left);
1N/A if (s < 0) {
1N/A /* l > c, c < r, l < r - swap l & c to get ordered */
1N/A qsort_swap(u_right, pc_left);
1N/A qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
1N/A } else if (s == 0) {
1N/A /* l > c, c < r, l == r - swap l & c, grow pc */
1N/A qsort_swap(u_right, pc_left);
1N/A ++pc_right;
1N/A qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
1N/A } else {
1N/A /* l > c, c < r, l > r - rotate lcr into crl to order */
1N/A qsort_rotate(u_right, pc_left, u_left);
1N/A qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
1N/A }
1N/A } else if (s == 0) {
1N/A /* l > c, c == r - swap ends, grow pc */
1N/A qsort_swap(u_right, u_left);
1N/A --pc_left;
1N/A qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
1N/A } else {
1N/A /* l > c, c > r - swap ends to get in order */
1N/A qsort_swap(u_right, u_left);
1N/A qsort_all_asserts(pc_left, pc_right, u_left + 1, u_right - 1);
1N/A }
1N/A }
1N/A /* We now know the 3 middle elements have been compared and
1N/A arranged in the desired order, so we can shrink the uncompared
1N/A sets on both sides
1N/A */
1N/A --u_right;
1N/A ++u_left;
1N/A qsort_all_asserts(pc_left, pc_right, u_left, u_right);
1N/A
1N/A /* The above massive nested if was the simple part :-). We now have
1N/A the middle 3 elements ordered and we need to scan through the
1N/A uncompared sets on either side, swapping elements that are on
1N/A the wrong side or simply shuffling equal elements around to get
1N/A all equal elements into the pivot chunk.
1N/A */
1N/A
1N/A for ( ; ; ) {
1N/A int still_work_on_left;
1N/A int still_work_on_right;
1N/A
1N/A /* Scan the uncompared values on the left. If I find a value
1N/A equal to the pivot value, move it over so it is adjacent to
1N/A the pivot chunk and expand the pivot chunk. If I find a value
1N/A less than the pivot value, then just leave it - its already
1N/A on the correct side of the partition. If I find a greater
1N/A value, then stop the scan.
1N/A */
1N/A while ((still_work_on_left = (u_right >= part_left))) {
1N/A s = qsort_cmp(u_right, pc_left);
1N/A if (s < 0) {
1N/A --u_right;
1N/A } else if (s == 0) {
1N/A --pc_left;
1N/A if (pc_left != u_right) {
1N/A qsort_swap(u_right, pc_left);
1N/A }
1N/A --u_right;
1N/A } else {
1N/A break;
1N/A }
1N/A qsort_assert(u_right < pc_left);
1N/A qsort_assert(pc_left <= pc_right);
1N/A qsort_assert(qsort_cmp(u_right + 1, pc_left) <= 0);
1N/A qsort_assert(qsort_cmp(pc_left, pc_right) == 0);
1N/A }
1N/A
1N/A /* Do a mirror image scan of uncompared values on the right
1N/A */
1N/A while ((still_work_on_right = (u_left <= part_right))) {
1N/A s = qsort_cmp(pc_right, u_left);
1N/A if (s < 0) {
1N/A ++u_left;
1N/A } else if (s == 0) {
1N/A ++pc_right;
1N/A if (pc_right != u_left) {
1N/A qsort_swap(pc_right, u_left);
1N/A }
1N/A ++u_left;
1N/A } else {
1N/A break;
1N/A }
1N/A qsort_assert(u_left > pc_right);
1N/A qsort_assert(pc_left <= pc_right);
1N/A qsort_assert(qsort_cmp(pc_right, u_left - 1) <= 0);
1N/A qsort_assert(qsort_cmp(pc_left, pc_right) == 0);
1N/A }
1N/A
1N/A if (still_work_on_left) {
1N/A /* I know I have a value on the left side which needs to be
1N/A on the right side, but I need to know more to decide
1N/A exactly the best thing to do with it.
1N/A */
1N/A if (still_work_on_right) {
1N/A /* I know I have values on both side which are out of
1N/A position. This is a big win because I kill two birds
1N/A with one swap (so to speak). I can advance the
1N/A uncompared pointers on both sides after swapping both
1N/A of them into the right place.
1N/A */
1N/A qsort_swap(u_right, u_left);
1N/A --u_right;
1N/A ++u_left;
1N/A qsort_all_asserts(pc_left, pc_right, u_left, u_right);
1N/A } else {
1N/A /* I have an out of position value on the left, but the
1N/A right is fully scanned, so I "slide" the pivot chunk
1N/A and any less-than values left one to make room for the
1N/A greater value over on the right. If the out of position
1N/A value is immediately adjacent to the pivot chunk (there
1N/A are no less-than values), I can do that with a swap,
1N/A otherwise, I have to rotate one of the less than values
1N/A into the former position of the out of position value
1N/A and the right end of the pivot chunk into the left end
1N/A (got all that?).
1N/A */
1N/A --pc_left;
1N/A if (pc_left == u_right) {
1N/A qsort_swap(u_right, pc_right);
1N/A qsort_all_asserts(pc_left, pc_right-1, u_left, u_right-1);
1N/A } else {
1N/A qsort_rotate(u_right, pc_left, pc_right);
1N/A qsort_all_asserts(pc_left, pc_right-1, u_left, u_right-1);
1N/A }
1N/A --pc_right;
1N/A --u_right;
1N/A }
1N/A } else if (still_work_on_right) {
1N/A /* Mirror image of complex case above: I have an out of
1N/A position value on the right, but the left is fully
1N/A scanned, so I need to shuffle things around to make room
1N/A for the right value on the left.
1N/A */
1N/A ++pc_right;
1N/A if (pc_right == u_left) {
1N/A qsort_swap(u_left, pc_left);
1N/A qsort_all_asserts(pc_left+1, pc_right, u_left+1, u_right);
1N/A } else {
1N/A qsort_rotate(pc_right, pc_left, u_left);
1N/A qsort_all_asserts(pc_left+1, pc_right, u_left+1, u_right);
1N/A }
1N/A ++pc_left;
1N/A ++u_left;
1N/A } else {
1N/A /* No more scanning required on either side of partition,
1N/A break out of loop and figure out next set of partitions
1N/A */
1N/A break;
1N/A }
1N/A }
1N/A
1N/A /* The elements in the pivot chunk are now in the right place. They
1N/A will never move or be compared again. All I have to do is decide
1N/A what to do with the stuff to the left and right of the pivot
1N/A chunk.
1N/A
1N/A Notes on the QSORT_ORDER_GUESS ifdef code:
1N/A
1N/A 1. If I just built these partitions without swapping any (or
1N/A very many) elements, there is a chance that the elements are
1N/A already ordered properly (being properly ordered will
1N/A certainly result in no swapping, but the converse can't be
1N/A proved :-).
1N/A
1N/A 2. A (properly written) insertion sort will run faster on
1N/A already ordered data than qsort will.
1N/A
1N/A 3. Perhaps there is some way to make a good guess about
1N/A switching to an insertion sort earlier than partition size 6
1N/A (for instance - we could save the partition size on the stack
1N/A and increase the size each time we find we didn't swap, thus
1N/A switching to insertion sort earlier for partitions with a
1N/A history of not swapping).
1N/A
1N/A 4. Naturally, if I just switch right away, it will make
1N/A artificial benchmarks with pure ascending (or descending)
1N/A data look really good, but is that a good reason in general?
1N/A Hard to say...
1N/A */
1N/A
1N/A#ifdef QSORT_ORDER_GUESS
1N/A if (swapped < 3) {
1N/A#if QSORT_ORDER_GUESS == 1
1N/A qsort_break_even = (part_right - part_left) + 1;
1N/A#endif
1N/A#if QSORT_ORDER_GUESS == 2
1N/A qsort_break_even *= 2;
1N/A#endif
1N/A#if QSORT_ORDER_GUESS == 3
1N/A int prev_break = qsort_break_even;
1N/A qsort_break_even *= qsort_break_even;
1N/A if (qsort_break_even < prev_break) {
1N/A qsort_break_even = (part_right - part_left) + 1;
1N/A }
1N/A#endif
1N/A } else {
1N/A qsort_break_even = QSORT_BREAK_EVEN;
1N/A }
1N/A#endif
1N/A
1N/A if (part_left < pc_left) {
1N/A /* There are elements on the left which need more processing.
1N/A Check the right as well before deciding what to do.
1N/A */
1N/A if (pc_right < part_right) {
1N/A /* We have two partitions to be sorted. Stack the biggest one
1N/A and process the smallest one on the next iteration. This
1N/A minimizes the stack height by insuring that any additional
1N/A stack entries must come from the smallest partition which
1N/A (because it is smallest) will have the fewest
1N/A opportunities to generate additional stack entries.
1N/A */
1N/A if ((part_right - pc_right) > (pc_left - part_left)) {
1N/A /* stack the right partition, process the left */
1N/A partition_stack[next_stack_entry].left = pc_right + 1;
1N/A partition_stack[next_stack_entry].right = part_right;
1N/A#ifdef QSORT_ORDER_GUESS
1N/A partition_stack[next_stack_entry].qsort_break_even = qsort_break_even;
1N/A#endif
1N/A part_right = pc_left - 1;
1N/A } else {
1N/A /* stack the left partition, process the right */
1N/A partition_stack[next_stack_entry].left = part_left;
1N/A partition_stack[next_stack_entry].right = pc_left - 1;
1N/A#ifdef QSORT_ORDER_GUESS
1N/A partition_stack[next_stack_entry].qsort_break_even = qsort_break_even;
1N/A#endif
1N/A part_left = pc_right + 1;
1N/A }
1N/A qsort_assert(next_stack_entry < QSORT_MAX_STACK);
1N/A ++next_stack_entry;
1N/A } else {
1N/A /* The elements on the left are the only remaining elements
1N/A that need sorting, arrange for them to be processed as the
1N/A next partition.
1N/A */
1N/A part_right = pc_left - 1;
1N/A }
1N/A } else if (pc_right < part_right) {
1N/A /* There is only one chunk on the right to be sorted, make it
1N/A the new partition and loop back around.
1N/A */
1N/A part_left = pc_right + 1;
1N/A } else {
1N/A /* This whole partition wound up in the pivot chunk, so
1N/A we need to get a new partition off the stack.
1N/A */
1N/A if (next_stack_entry == 0) {
1N/A /* the stack is empty - we are done */
1N/A break;
1N/A }
1N/A --next_stack_entry;
1N/A part_left = partition_stack[next_stack_entry].left;
1N/A part_right = partition_stack[next_stack_entry].right;
1N/A#ifdef QSORT_ORDER_GUESS
1N/A qsort_break_even = partition_stack[next_stack_entry].qsort_break_even;
1N/A#endif
1N/A }
1N/A } else {
1N/A /* This partition is too small to fool with qsort complexity, just
1N/A do an ordinary insertion sort to minimize overhead.
1N/A */
1N/A int i;
1N/A /* Assume 1st element is in right place already, and start checking
1N/A at 2nd element to see where it should be inserted.
1N/A */
1N/A for (i = part_left + 1; i <= part_right; ++i) {
1N/A int j;
1N/A /* Scan (backwards - just in case 'i' is already in right place)
1N/A through the elements already sorted to see if the ith element
1N/A belongs ahead of one of them.
1N/A */
1N/A for (j = i - 1; j >= part_left; --j) {
1N/A if (qsort_cmp(i, j) >= 0) {
1N/A /* i belongs right after j
1N/A */
1N/A break;
1N/A }
1N/A }
1N/A ++j;
1N/A if (j != i) {
1N/A /* Looks like we really need to move some things
1N/A */
1N/A int k;
1N/A temp = array[i];
1N/A for (k = i - 1; k >= j; --k)
1N/A array[k + 1] = array[k];
1N/A array[j] = temp;
1N/A }
1N/A }
1N/A
1N/A /* That partition is now sorted, grab the next one, or get out
1N/A of the loop if there aren't any more.
1N/A */
1N/A
1N/A if (next_stack_entry == 0) {
1N/A /* the stack is empty - we are done */
1N/A break;
1N/A }
1N/A --next_stack_entry;
1N/A part_left = partition_stack[next_stack_entry].left;
1N/A part_right = partition_stack[next_stack_entry].right;
1N/A#ifdef QSORT_ORDER_GUESS
1N/A qsort_break_even = partition_stack[next_stack_entry].qsort_break_even;
1N/A#endif
1N/A }
1N/A }
1N/A
1N/A /* Believe it or not, the array is sorted at this point! */
1N/A}
1N/A
1N/A/* Stabilize what is, presumably, an otherwise unstable sort method.
1N/A * We do that by allocating (or having on hand) an array of pointers
1N/A * that is the same size as the original array of elements to be sorted.
1N/A * We initialize this parallel array with the addresses of the original
1N/A * array elements. This indirection can make you crazy.
1N/A * Some pictures can help. After initializing, we have
1N/A *
1N/A * indir list1
1N/A * +----+ +----+
1N/A * | | --------------> | | ------> first element to be sorted
1N/A * +----+ +----+
1N/A * | | --------------> | | ------> second element to be sorted
1N/A * +----+ +----+
1N/A * | | --------------> | | ------> third element to be sorted
1N/A * +----+ +----+
1N/A * ...
1N/A * +----+ +----+
1N/A * | | --------------> | | ------> n-1st element to be sorted
1N/A * +----+ +----+
1N/A * | | --------------> | | ------> n-th element to be sorted
1N/A * +----+ +----+
1N/A *
1N/A * During the sort phase, we leave the elements of list1 where they are,
1N/A * and sort the pointers in the indirect array in the same order determined
1N/A * by the original comparison routine on the elements pointed to.
1N/A * Because we don't move the elements of list1 around through
1N/A * this phase, we can break ties on elements that compare equal
1N/A * using their address in the list1 array, ensuring stabilty.
1N/A * This leaves us with something looking like
1N/A *
1N/A * indir list1
1N/A * +----+ +----+
1N/A * | | --+ +---> | | ------> first element to be sorted
1N/A * +----+ | | +----+
1N/A * | | --|-------|---> | | ------> second element to be sorted
1N/A * +----+ | | +----+
1N/A * | | --|-------+ +-> | | ------> third element to be sorted
1N/A * +----+ | | +----+
1N/A * ...
1N/A * +----+ | | | | +----+
1N/A * | | ---|-+ | +--> | | ------> n-1st element to be sorted
1N/A * +----+ | | +----+
1N/A * | | ---+ +----> | | ------> n-th element to be sorted
1N/A * +----+ +----+
1N/A *
1N/A * where the i-th element of the indirect array points to the element
1N/A * that should be i-th in the sorted array. After the sort phase,
1N/A * we have to put the elements of list1 into the places
1N/A * dictated by the indirect array.
1N/A */
1N/A
1N/A
1N/Astatic I32
1N/Acmpindir(pTHX_ gptr a, gptr b)
1N/A{
1N/A I32 sense;
1N/A gptr *ap = (gptr *)a;
1N/A gptr *bp = (gptr *)b;
1N/A
1N/A if ((sense = PL_sort_RealCmp(aTHX_ *ap, *bp)) == 0)
1N/A sense = (ap > bp) ? 1 : ((ap < bp) ? -1 : 0);
1N/A return sense;
1N/A}
1N/A
1N/ASTATIC void
1N/AS_qsortsv(pTHX_ gptr *list1, size_t nmemb, SVCOMPARE_t cmp)
1N/A{
1N/A SV *hintsv;
1N/A
1N/A if (SORTHINTS(hintsv) & HINT_SORT_STABLE) {
1N/A register gptr **pp, *q;
1N/A register size_t n, j, i;
1N/A gptr *small[SMALLSORT], **indir, tmp;
1N/A SVCOMPARE_t savecmp;
1N/A if (nmemb <= 1) return; /* sorted trivially */
1N/A
1N/A /* Small arrays can use the stack, big ones must be allocated */
1N/A if (nmemb <= SMALLSORT) indir = small;
1N/A else { New(1799, indir, nmemb, gptr *); }
1N/A
1N/A /* Copy pointers to original array elements into indirect array */
1N/A for (n = nmemb, pp = indir, q = list1; n--; ) *pp++ = q++;
1N/A
1N/A savecmp = PL_sort_RealCmp; /* Save current comparison routine, if any */
1N/A PL_sort_RealCmp = cmp; /* Put comparison routine where cmpindir can find it */
1N/A
1N/A /* sort, with indirection */
1N/A S_qsortsvu(aTHX_ (gptr *)indir, nmemb, cmpindir);
1N/A
1N/A pp = indir;
1N/A q = list1;
1N/A for (n = nmemb; n--; ) {
1N/A /* Assert A: all elements of q with index > n are already
1N/A * in place. This is vacuosly true at the start, and we
1N/A * put element n where it belongs below (if it wasn't
1N/A * already where it belonged). Assert B: we only move
1N/A * elements that aren't where they belong,
1N/A * so, by A, we never tamper with elements above n.
1N/A */
1N/A j = pp[n] - q; /* This sets j so that q[j] is
1N/A * at pp[n]. *pp[j] belongs in
1N/A * q[j], by construction.
1N/A */
1N/A if (n != j) { /* all's well if n == j */
1N/A tmp = q[j]; /* save what's in q[j] */
1N/A do {
1N/A q[j] = *pp[j]; /* put *pp[j] where it belongs */
1N/A i = pp[j] - q; /* the index in q of the element
1N/A * just moved */
1N/A pp[j] = q + j; /* this is ok now */
1N/A } while ((j = i) != n);
1N/A /* There are only finitely many (nmemb) addresses
1N/A * in the pp array.
1N/A * So we must eventually revisit an index we saw before.
1N/A * Suppose the first revisited index is k != n.
1N/A * An index is visited because something else belongs there.
1N/A * If we visit k twice, then two different elements must
1N/A * belong in the same place, which cannot be.
1N/A * So j must get back to n, the loop terminates,
1N/A * and we put the saved element where it belongs.
1N/A */
1N/A q[n] = tmp; /* put what belongs into
1N/A * the n-th element */
1N/A }
1N/A }
1N/A
1N/A /* free iff allocated */
1N/A if (indir != small) { Safefree(indir); }
1N/A /* restore prevailing comparison routine */
1N/A PL_sort_RealCmp = savecmp;
1N/A } else {
1N/A S_qsortsvu(aTHX_ list1, nmemb, cmp);
1N/A }
1N/A}
1N/A
1N/A/*
1N/A=head1 Array Manipulation Functions
1N/A
1N/A=for apidoc sortsv
1N/A
1N/ASort an array. Here is an example:
1N/A
1N/A sortsv(AvARRAY(av), av_len(av)+1, Perl_sv_cmp_locale);
1N/A
1N/ASee lib/sort.pm for details about controlling the sorting algorithm.
1N/A
1N/A=cut
1N/A*/
1N/A
1N/Avoid
1N/APerl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp)
1N/A{
1N/A void (*sortsvp)(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp) =
1N/A S_mergesortsv;
1N/A SV *hintsv;
1N/A I32 hints;
1N/A
1N/A /* Sun's Compiler (cc: WorkShop Compilers 4.2 30 Oct 1996 C 4.2) used
1N/A to miscompile this function under optimization -O. If you get test
1N/A errors related to picking the correct sort() function, try recompiling
1N/A this file without optimiziation. -- A.D. 4/2002.
1N/A */
1N/A hints = SORTHINTS(hintsv);
1N/A if (hints & HINT_SORT_QUICKSORT) {
1N/A sortsvp = S_qsortsv;
1N/A }
1N/A else {
1N/A /* The default as of 5.8.0 is mergesort */
1N/A sortsvp = S_mergesortsv;
1N/A }
1N/A
1N/A sortsvp(aTHX_ array, nmemb, cmp);
1N/A}
1N/A
1N/APP(pp_sort)
1N/A{
1N/A dSP; dMARK; dORIGMARK;
1N/A register SV **p1 = ORIGMARK+1, **p2;
1N/A register I32 max, i;
1N/A AV* av = Nullav;
1N/A HV *stash;
1N/A GV *gv;
1N/A CV *cv = 0;
1N/A I32 gimme = GIMME;
1N/A OP* nextop = PL_op->op_next;
1N/A I32 overloading = 0;
1N/A bool hasargs = FALSE;
1N/A I32 is_xsub = 0;
1N/A I32 sorting_av = 0;
1N/A
1N/A if (gimme != G_ARRAY) {
1N/A SP = MARK;
1N/A RETPUSHUNDEF;
1N/A }
1N/A
1N/A ENTER;
1N/A SAVEVPTR(PL_sortcop);
1N/A if (PL_op->op_flags & OPf_STACKED) {
1N/A if (PL_op->op_flags & OPf_SPECIAL) {
1N/A OP *kid = cLISTOP->op_first->op_sibling; /* pass pushmark */
1N/A kid = kUNOP->op_first; /* pass rv2gv */
1N/A kid = kUNOP->op_first; /* pass leave */
1N/A PL_sortcop = kid->op_next;
1N/A stash = CopSTASH(PL_curcop);
1N/A }
1N/A else {
1N/A cv = sv_2cv(*++MARK, &stash, &gv, 0);
1N/A if (cv && SvPOK(cv)) {
1N/A STRLEN n_a;
1N/A char *proto = SvPV((SV*)cv, n_a);
1N/A if (proto && strEQ(proto, "$$")) {
1N/A hasargs = TRUE;
1N/A }
1N/A }
1N/A if (!(cv && CvROOT(cv))) {
1N/A if (cv && CvXSUB(cv)) {
1N/A is_xsub = 1;
1N/A }
1N/A else if (gv) {
1N/A SV *tmpstr = sv_newmortal();
1N/A gv_efullname3(tmpstr, gv, Nullch);
1N/A DIE(aTHX_ "Undefined sort subroutine \"%"SVf"\" called",
1N/A tmpstr);
1N/A }
1N/A else {
1N/A DIE(aTHX_ "Undefined subroutine in sort");
1N/A }
1N/A }
1N/A
1N/A if (is_xsub)
1N/A PL_sortcop = (OP*)cv;
1N/A else {
1N/A PL_sortcop = CvSTART(cv);
1N/A SAVEVPTR(CvROOT(cv)->op_ppaddr);
1N/A CvROOT(cv)->op_ppaddr = PL_ppaddr[OP_NULL];
1N/A
1N/A PAD_SET_CUR(CvPADLIST(cv), 1);
1N/A }
1N/A }
1N/A }
1N/A else {
1N/A PL_sortcop = Nullop;
1N/A stash = CopSTASH(PL_curcop);
1N/A }
1N/A
1N/A /* optimiser converts "@a = sort @a" to "sort \@a";
1N/A * in case of tied @a, pessimise: push (@a) onto stack, then assign
1N/A * result back to @a at the end of this function */
1N/A if (PL_op->op_private & OPpSORT_INPLACE) {
1N/A assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV);
1N/A (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */
1N/A av = (AV*)(*SP);
1N/A max = AvFILL(av) + 1;
1N/A if (SvMAGICAL(av)) {
1N/A MEXTEND(SP, max);
1N/A p2 = SP;
1N/A for (i=0; i < (U32)max; i++) {
1N/A SV **svp = av_fetch(av, i, FALSE);
1N/A *SP++ = (svp) ? *svp : Nullsv;
1N/A }
1N/A }
1N/A else {
1N/A p1 = p2 = AvARRAY(av);
1N/A sorting_av = 1;
1N/A }
1N/A }
1N/A else {
1N/A p2 = MARK+1;
1N/A max = SP - MARK;
1N/A }
1N/A
1N/A /* shuffle stack down, removing optional initial cv (p1!=p2), plus any
1N/A * nulls; also stringify any args */
1N/A for (i=max; i > 0 ; i--) {
1N/A if ((*p1 = *p2++)) { /* Weed out nulls. */
1N/A SvTEMP_off(*p1);
1N/A if (!PL_sortcop && !SvPOK(*p1)) {
1N/A STRLEN n_a;
1N/A if (SvAMAGIC(*p1))
1N/A overloading = 1;
1N/A else
1N/A (void)sv_2pv(*p1, &n_a);
1N/A }
1N/A p1++;
1N/A }
1N/A else
1N/A max--;
1N/A }
1N/A if (sorting_av)
1N/A AvFILLp(av) = max-1;
1N/A
1N/A if (max > 1) {
1N/A if (PL_sortcop) {
1N/A PERL_CONTEXT *cx;
1N/A SV** newsp;
1N/A bool oldcatch = CATCH_GET;
1N/A
1N/A SAVETMPS;
1N/A SAVEOP();
1N/A
1N/A CATCH_SET(TRUE);
1N/A PUSHSTACKi(PERLSI_SORT);
1N/A if (!hasargs && !is_xsub) {
1N/A if (PL_sortstash != stash || !PL_firstgv || !PL_secondgv) {
1N/A SAVESPTR(PL_firstgv);
1N/A SAVESPTR(PL_secondgv);
1N/A PL_firstgv = gv_fetchpv("a", TRUE, SVt_PV);
1N/A PL_secondgv = gv_fetchpv("b", TRUE, SVt_PV);
1N/A PL_sortstash = stash;
1N/A }
1N/A#ifdef USE_5005THREADS
1N/A sv_lock((SV *)PL_firstgv);
1N/A sv_lock((SV *)PL_secondgv);
1N/A#endif
1N/A SAVESPTR(GvSV(PL_firstgv));
1N/A SAVESPTR(GvSV(PL_secondgv));
1N/A }
1N/A
1N/A PUSHBLOCK(cx, CXt_NULL, PL_stack_base);
1N/A if (!(PL_op->op_flags & OPf_SPECIAL)) {
1N/A cx->cx_type = CXt_SUB;
1N/A cx->blk_gimme = G_SCALAR;
1N/A PUSHSUB(cx);
1N/A }
1N/A PL_sortcxix = cxstack_ix;
1N/A
1N/A if (hasargs && !is_xsub) {
1N/A /* This is mostly copied from pp_entersub */
1N/A AV *av = (AV*)PAD_SVl(0);
1N/A
1N/A#ifndef USE_5005THREADS
1N/A cx->blk_sub.savearray = GvAV(PL_defgv);
1N/A GvAV(PL_defgv) = (AV*)SvREFCNT_inc(av);
1N/A#endif /* USE_5005THREADS */
1N/A CX_CURPAD_SAVE(cx->blk_sub);
1N/A cx->blk_sub.argarray = av;
1N/A }
1N/A sortsv(p1-max, max,
1N/A is_xsub ? sortcv_xsub : hasargs ? sortcv_stacked : sortcv);
1N/A
1N/A POPBLOCK(cx,PL_curpm);
1N/A PL_stack_sp = newsp;
1N/A POPSTACK;
1N/A CATCH_SET(oldcatch);
1N/A }
1N/A else {
1N/A MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */
1N/A sortsv(sorting_av ? AvARRAY(av) : ORIGMARK+1, max,
1N/A (PL_op->op_private & OPpSORT_NUMERIC)
1N/A ? ( (PL_op->op_private & OPpSORT_INTEGER)
1N/A ? ( overloading ? amagic_i_ncmp : sv_i_ncmp)
1N/A : ( overloading ? amagic_ncmp : sv_ncmp))
1N/A : ( IN_LOCALE_RUNTIME
1N/A ? ( overloading
1N/A ? amagic_cmp_locale
1N/A : sv_cmp_locale_static)
1N/A : ( overloading ? amagic_cmp : sv_cmp_static)));
1N/A if (PL_op->op_private & OPpSORT_REVERSE) {
1N/A SV **p = sorting_av ? AvARRAY(av) : ORIGMARK+1;
1N/A SV **q = p+max-1;
1N/A while (p < q) {
1N/A SV *tmp = *p;
1N/A *p++ = *q;
1N/A *q-- = tmp;
1N/A }
1N/A }
1N/A }
1N/A }
1N/A if (av && !sorting_av) {
1N/A /* simulate pp_aassign of tied AV */
1N/A SV *sv;
1N/A SV** base, **didstore;
1N/A for (base = ORIGMARK+1, i=0; i < max; i++) {
1N/A sv = NEWSV(28,0);
1N/A sv_setsv(sv, base[i]);
1N/A base[i] = sv;
1N/A }
1N/A av_clear(av);
1N/A av_extend(av, max);
1N/A for (i=0; i < max; i++) {
1N/A sv = base[i];
1N/A didstore = av_store(av, i, sv);
1N/A if (SvSMAGICAL(sv))
1N/A mg_set(sv);
1N/A if (!didstore)
1N/A sv_2mortal(sv);
1N/A }
1N/A }
1N/A LEAVE;
1N/A PL_stack_sp = ORIGMARK + (sorting_av ? 0 : max);
1N/A return nextop;
1N/A}
1N/A
1N/Astatic I32
1N/Asortcv(pTHX_ SV *a, SV *b)
1N/A{
1N/A I32 oldsaveix = PL_savestack_ix;
1N/A I32 oldscopeix = PL_scopestack_ix;
1N/A I32 result;
1N/A GvSV(PL_firstgv) = a;
1N/A GvSV(PL_secondgv) = b;
1N/A PL_stack_sp = PL_stack_base;
1N/A PL_op = PL_sortcop;
1N/A CALLRUNOPS(aTHX);
1N/A if (PL_stack_sp != PL_stack_base + 1)
1N/A Perl_croak(aTHX_ "Sort subroutine didn't return single value");
1N/A if (!SvNIOKp(*PL_stack_sp))
1N/A Perl_croak(aTHX_ "Sort subroutine didn't return a numeric value");
1N/A result = SvIV(*PL_stack_sp);
1N/A while (PL_scopestack_ix > oldscopeix) {
1N/A LEAVE;
1N/A }
1N/A leave_scope(oldsaveix);
1N/A return result;
1N/A}
1N/A
1N/Astatic I32
1N/Asortcv_stacked(pTHX_ SV *a, SV *b)
1N/A{
1N/A I32 oldsaveix = PL_savestack_ix;
1N/A I32 oldscopeix = PL_scopestack_ix;
1N/A I32 result;
1N/A AV *av;
1N/A
1N/A#ifdef USE_5005THREADS
1N/A av = (AV*)PAD_SVl(0);
1N/A#else
1N/A av = GvAV(PL_defgv);
1N/A#endif
1N/A
1N/A if (AvMAX(av) < 1) {
1N/A SV** ary = AvALLOC(av);
1N/A if (AvARRAY(av) != ary) {
1N/A AvMAX(av) += AvARRAY(av) - AvALLOC(av);
1N/A SvPVX(av) = (char*)ary;
1N/A }
1N/A if (AvMAX(av) < 1) {
1N/A AvMAX(av) = 1;
1N/A Renew(ary,2,SV*);
1N/A SvPVX(av) = (char*)ary;
1N/A }
1N/A }
1N/A AvFILLp(av) = 1;
1N/A
1N/A AvARRAY(av)[0] = a;
1N/A AvARRAY(av)[1] = b;
1N/A PL_stack_sp = PL_stack_base;
1N/A PL_op = PL_sortcop;
1N/A CALLRUNOPS(aTHX);
1N/A if (PL_stack_sp != PL_stack_base + 1)
1N/A Perl_croak(aTHX_ "Sort subroutine didn't return single value");
1N/A if (!SvNIOKp(*PL_stack_sp))
1N/A Perl_croak(aTHX_ "Sort subroutine didn't return a numeric value");
1N/A result = SvIV(*PL_stack_sp);
1N/A while (PL_scopestack_ix > oldscopeix) {
1N/A LEAVE;
1N/A }
1N/A leave_scope(oldsaveix);
1N/A return result;
1N/A}
1N/A
1N/Astatic I32
1N/Asortcv_xsub(pTHX_ SV *a, SV *b)
1N/A{
1N/A dSP;
1N/A I32 oldsaveix = PL_savestack_ix;
1N/A I32 oldscopeix = PL_scopestack_ix;
1N/A I32 result;
1N/A CV *cv=(CV*)PL_sortcop;
1N/A
1N/A SP = PL_stack_base;
1N/A PUSHMARK(SP);
1N/A EXTEND(SP, 2);
1N/A *++SP = a;
1N/A *++SP = b;
1N/A PUTBACK;
1N/A (void)(*CvXSUB(cv))(aTHX_ cv);
1N/A if (PL_stack_sp != PL_stack_base + 1)
1N/A Perl_croak(aTHX_ "Sort subroutine didn't return single value");
1N/A if (!SvNIOKp(*PL_stack_sp))
1N/A Perl_croak(aTHX_ "Sort subroutine didn't return a numeric value");
1N/A result = SvIV(*PL_stack_sp);
1N/A while (PL_scopestack_ix > oldscopeix) {
1N/A LEAVE;
1N/A }
1N/A leave_scope(oldsaveix);
1N/A return result;
1N/A}
1N/A
1N/A
1N/Astatic I32
1N/Asv_ncmp(pTHX_ SV *a, SV *b)
1N/A{
1N/A NV nv1 = SvNV(a);
1N/A NV nv2 = SvNV(b);
1N/A return nv1 < nv2 ? -1 : nv1 > nv2 ? 1 : 0;
1N/A}
1N/A
1N/Astatic I32
1N/Asv_i_ncmp(pTHX_ SV *a, SV *b)
1N/A{
1N/A IV iv1 = SvIV(a);
1N/A IV iv2 = SvIV(b);
1N/A return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0;
1N/A}
1N/A#define tryCALL_AMAGICbin(left,right,meth,svp) STMT_START { \
1N/A *svp = Nullsv; \
1N/A if (PL_amagic_generation) { \
1N/A if (SvAMAGIC(left)||SvAMAGIC(right))\
1N/A *svp = amagic_call(left, \
1N/A right, \
1N/A CAT2(meth,_amg), \
1N/A 0); \
1N/A } \
1N/A } STMT_END
1N/A
1N/Astatic I32
1N/Aamagic_ncmp(pTHX_ register SV *a, register SV *b)
1N/A{
1N/A SV *tmpsv;
1N/A tryCALL_AMAGICbin(a,b,ncmp,&tmpsv);
1N/A if (tmpsv) {
1N/A NV d;
1N/A
1N/A if (SvIOK(tmpsv)) {
1N/A I32 i = SvIVX(tmpsv);
1N/A if (i > 0)
1N/A return 1;
1N/A return i? -1 : 0;
1N/A }
1N/A d = SvNV(tmpsv);
1N/A if (d > 0)
1N/A return 1;
1N/A return d? -1 : 0;
1N/A }
1N/A return sv_ncmp(aTHX_ a, b);
1N/A}
1N/A
1N/Astatic I32
1N/Aamagic_i_ncmp(pTHX_ register SV *a, register SV *b)
1N/A{
1N/A SV *tmpsv;
1N/A tryCALL_AMAGICbin(a,b,ncmp,&tmpsv);
1N/A if (tmpsv) {
1N/A NV d;
1N/A
1N/A if (SvIOK(tmpsv)) {
1N/A I32 i = SvIVX(tmpsv);
1N/A if (i > 0)
1N/A return 1;
1N/A return i? -1 : 0;
1N/A }
1N/A d = SvNV(tmpsv);
1N/A if (d > 0)
1N/A return 1;
1N/A return d? -1 : 0;
1N/A }
1N/A return sv_i_ncmp(aTHX_ a, b);
1N/A}
1N/A
1N/Astatic I32
1N/Aamagic_cmp(pTHX_ register SV *str1, register SV *str2)
1N/A{
1N/A SV *tmpsv;
1N/A tryCALL_AMAGICbin(str1,str2,scmp,&tmpsv);
1N/A if (tmpsv) {
1N/A NV d;
1N/A
1N/A if (SvIOK(tmpsv)) {
1N/A I32 i = SvIVX(tmpsv);
1N/A if (i > 0)
1N/A return 1;
1N/A return i? -1 : 0;
1N/A }
1N/A d = SvNV(tmpsv);
1N/A if (d > 0)
1N/A return 1;
1N/A return d? -1 : 0;
1N/A }
1N/A return sv_cmp(str1, str2);
1N/A}
1N/A
1N/Astatic I32
1N/Aamagic_cmp_locale(pTHX_ register SV *str1, register SV *str2)
1N/A{
1N/A SV *tmpsv;
1N/A tryCALL_AMAGICbin(str1,str2,scmp,&tmpsv);
1N/A if (tmpsv) {
1N/A NV d;
1N/A
1N/A if (SvIOK(tmpsv)) {
1N/A I32 i = SvIVX(tmpsv);
1N/A if (i > 0)
1N/A return 1;
1N/A return i? -1 : 0;
1N/A }
1N/A d = SvNV(tmpsv);
1N/A if (d > 0)
1N/A return 1;
1N/A return d? -1 : 0;
1N/A }
1N/A return sv_cmp_locale(str1, str2);
1N/A}