summaryrefslogtreecommitdiff
path: root/hv.c
diff options
context:
space:
mode:
authorNicholas Clark <nick@ccl4.org>2005-05-31 10:40:01 +0000
committerNicholas Clark <nick@ccl4.org>2005-05-31 10:40:01 +0000
commit0298d7b92741692bcf2e34c418a564332bb034e6 (patch)
treedbaa1d2159a34e12fe44741e02e643fd5f7e56bf /hv.c
parent9e720f71ce602dbb0ba425fccdc5b863b0188ec2 (diff)
downloadperl-0298d7b92741692bcf2e34c418a564332bb034e6.tar.gz
Avoid updating a variable in a loop.
Only calculate the number of links in a hash bucket chain if we really need it. p4raw-id: //depot/perl@24648
Diffstat (limited to 'hv.c')
-rw-r--r--hv.c40
1 files changed, 25 insertions, 15 deletions
diff --git a/hv.c b/hv.c
index 215bcc3fc4..521d1ff137 100644
--- a/hv.c
+++ b/hv.c
@@ -413,7 +413,6 @@ S_hv_fetch_common(pTHX_ HV *hv, SV *keysv, const char *key, STRLEN klen,
{
dVAR;
XPVHV* xhv;
- U32 n_links;
HE *entry;
HE **oentry;
SV *sv;
@@ -654,7 +653,6 @@ S_hv_fetch_common(pTHX_ HV *hv, SV *keysv, const char *key, STRLEN klen,
}
masked_flags = (flags & HVhek_MASK);
- n_links = 0;
#ifdef DYNAMIC_ENV_FETCH
if (!HvARRAY(hv)) entry = Null(HE*);
@@ -663,7 +661,7 @@ S_hv_fetch_common(pTHX_ HV *hv, SV *keysv, const char *key, STRLEN klen,
{
entry = (HvARRAY(hv))[hash & (I32) HvMAX(hv)];
}
- for (; entry; ++n_links, entry = HeNEXT(entry)) {
+ for (; entry; entry = HeNEXT(entry)) {
if (HeHASH(entry) != hash) /* strings can't be equal */
continue;
if (HeKLEN(entry) != (I32)klen)
@@ -798,18 +796,30 @@ S_hv_fetch_common(pTHX_ HV *hv, SV *keysv, const char *key, STRLEN klen,
if (masked_flags & HVhek_ENABLEHVKFLAGS)
HvHASKFLAGS_on(hv);
- xhv->xhv_keys++; /* HvKEYS(hv)++ */
- if (!n_links) { /* initial entry? */
- xhv->xhv_fill++; /* HvFILL(hv)++ */
- } else if ((xhv->xhv_keys > (IV)xhv->xhv_max)
- || ((n_links > HV_MAX_LENGTH_BEFORE_SPLIT) && !HvREHASH(hv))) {
- /* Use only the old HvKEYS(hv) > HvMAX(hv) condition to limit bucket
- splits on a rehashed hash, as we're not going to split it again,
- and if someone is lucky (evil) enough to get all the keys in one
- list they could exhaust our memory as we repeatedly double the
- number of buckets on every entry. Linear search feels a less worse
- thing to do. */
- hsplit(hv);
+ {
+ const HE *counter = HeNEXT(entry);
+
+ xhv->xhv_keys++; /* HvKEYS(hv)++ */
+ if (!counter) { /* initial entry? */
+ xhv->xhv_fill++; /* HvFILL(hv)++ */
+ } else if (xhv->xhv_keys > (IV)xhv->xhv_max) {
+ hsplit(hv);
+ } else if(!HvREHASH(hv)) {
+ U32 n_links = 1;
+
+ while ((counter = HeNEXT(counter)))
+ n_links++;
+
+ if (n_links > HV_MAX_LENGTH_BEFORE_SPLIT) {
+ /* Use only the old HvKEYS(hv) > HvMAX(hv) condition to limit
+ bucket splits on a rehashed hash, as we're not going to
+ split it again, and if someone is lucky (evil) enough to
+ get all the keys in one list they could exhaust our memory
+ as we repeatedly double the number of buckets on every
+ entry. Linear search feels a less worse thing to do. */
+ hsplit(hv);
+ }
+ }
}
return entry;