summaryrefslogtreecommitdiff
path: root/subversion/libsvn_subr/cache-membuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'subversion/libsvn_subr/cache-membuffer.c')
-rw-r--r--subversion/libsvn_subr/cache-membuffer.c1377
1 files changed, 956 insertions, 421 deletions
diff --git a/subversion/libsvn_subr/cache-membuffer.c b/subversion/libsvn_subr/cache-membuffer.c
index 6122945..7aa90cd 100644
--- a/subversion/libsvn_subr/cache-membuffer.c
+++ b/subversion/libsvn_subr/cache-membuffer.c
@@ -23,6 +23,8 @@
#include <assert.h>
#include <apr_md5.h>
+#include <apr_thread_rwlock.h>
+
#include "svn_pools.h"
#include "svn_checksum.h"
#include "md5.h"
@@ -30,11 +32,13 @@
#include "cache.h"
#include "svn_string.h"
#include "private/svn_dep_compat.h"
+#include "private/svn_mutex.h"
+#include "private/svn_pseudo_md5.h"
/*
* This svn_cache__t implementation actually consists of two parts:
* a shared (per-process) singleton membuffer cache instance and shallow
- * svn_cache__t frontend instances that each use different key spaces.
+ * svn_cache__t front-end instances that each use different key spaces.
* For data management, they all forward to the singleton membuffer cache.
*
* A membuffer cache consists of two parts:
@@ -55,14 +59,14 @@
* The data buffer usage information is implicitly given by the directory
* entries. Every USED entry has a reference to the previous and the next
* used dictionary entry and this double-linked list is ordered by the
- * offsets of their item data within the the data buffer. So removing data,
+ * offsets of their item data within the data buffer. So removing data,
* for instance, is done simply by unlinking it from the chain, implicitly
* marking the entry as well as the data buffer section previously
* associated to it as unused.
*
* Insertion can occur at only one, sliding position. It is marked by its
* offset in the data buffer plus the index of the first used entry at or
- * behind that position. If this gap is too small to accomodate the new
+ * behind that position. If this gap is too small to accommodate the new
* item, the insertion window is extended as described below. The new entry
* will always be inserted at the bottom end of the window and since the
* next used entry is known, properly sorted insertion is possible.
@@ -76,7 +80,7 @@
* get evicted, it is moved to the begin of that window and the window is
* moved.
*
- * Moreover, the entry's hits get halfed to make that entry more likely to
+ * Moreover, the entry's hits get halved to make that entry more likely to
* be removed the next time the sliding insertion / removal window comes by.
* As a result, frequently used entries are likely not to be dropped until
* they get not used for a while. Also, even a cache thrashing situation
@@ -97,33 +101,61 @@
* on their hash key.
*/
-/* A 4-way associative cache seems to be the best compromise between
- * performance (worst-case lookups) and efficiency-loss due to collisions.
+/* APR's read-write lock implementation on Windows is horribly inefficient.
+ * Even with very low contention a runtime overhead of 35% percent has been
+ * measured for 'svn-bench null-export' over ra_serf.
*
- * This value may be changed to any positive integer.
+ * Use a simple mutex on Windows. Because there is one mutex per segment,
+ * large machines should (and usually can) be configured with large caches
+ * such that read contention is kept low. This is basically the situation
+ * we head before 1.8.
*/
-#define GROUP_SIZE 4
+#ifdef WIN32
+# define USE_SIMPLE_MUTEX 1
+#else
+# define USE_SIMPLE_MUTEX 0
+#endif
-/* We use MD5 for digest size and speed (SHA1 is >2x slower, for instance).
+/* A 16-way associative cache seems to be a good compromise between
+ * performance (worst-case lookups) and efficiency-loss due to collisions.
+ *
+ * This value may be changed to any positive integer.
*/
-#define KEY_SIZE APR_MD5_DIGESTSIZE
+#define GROUP_SIZE 16
-/* For more efficient copy operations, let'a align all data items properly.
+/* For more efficient copy operations, let's align all data items properly.
* Must be a power of 2.
*/
#define ITEM_ALIGNMENT 16
-/* Don't create cache segments smaller than this value unless the total
- * cache size itself is smaller.
+/* By default, don't create cache segments smaller than this value unless
+ * the total cache size itself is smaller.
*/
-#define MIN_SEGMENT_SIZE 0x2000000ull
+#define DEFAULT_MIN_SEGMENT_SIZE APR_UINT64_C(0x2000000)
+
+/* The minimum segment size we will allow for multi-segmented caches
+ */
+#define MIN_SEGMENT_SIZE APR_UINT64_C(0x10000)
+
+/* The maximum number of segments allowed. Larger numbers reduce the size
+ * of each segment, in turn reducing the max size of a cachable item.
+ * Also, each segment gets its own lock object. The actual number supported
+ * by the OS may therefore be lower and svn_cache__membuffer_cache_create
+ * may return an error.
+ */
+#define MAX_SEGMENT_COUNT 0x10000
+
+/* As of today, APR won't allocate chunks of 4GB or more. So, limit the
+ * segment size to slightly below that.
+ */
+#define MAX_SEGMENT_SIZE APR_UINT64_C(0xffff0000)
/* We don't mark the initialization status for every group but initialize
* a number of groups at once. That will allow for a very small init flags
* vector that is likely to fit into the CPU caches even for fairly large
- * caches. For instance, the default of 32 means 8x32 groups per byte, i.e.
- * 8 flags/byte x 32 groups/flag x 4 entries/group x 40 index bytes/entry
- * x 16 cache bytes/index byte = 1kB init vector / 640MB cache.
+ * membuffer caches. For instance, the default of 32 means 8x32 groups per
+ * byte, i.e. 8 flags/byte x 32 groups/flag x 8 entries/group x 40 index
+ * bytes/entry x 8 cache bytes/index byte = 1kB init vector / 640MB cache.
*/
#define GROUP_INIT_GRANULARITY 32
@@ -131,9 +163,18 @@
*/
#define NO_INDEX APR_UINT32_MAX
-/* Invalid buffer offset reference value. Equivalent to APR_UINT32_T(-1)
+/* To save space in our group structure, we only use 32 bit size values
+ * and, therefore, limit the size of each entry to just below 4GB.
+ * Supporting larger items is not a good idea as the data transfer
+ * to and from the cache would block other threads for a very long time.
*/
-#define NO_OFFSET APR_UINT64_MAX
+#define MAX_ITEM_SIZE ((apr_uint32_t)(0 - ITEM_ALIGNMENT))
+
+/* A 16 byte key type. We use that to identify cache entries.
+ * The notation as just two integer values will cause many compilers
+ * to create better code.
+ */
+typedef apr_uint64_t entry_key_t[2];
/* Debugging / corruption detection support.
* If you define this macro, the getter functions will performed expensive
@@ -194,25 +235,25 @@ static void get_prefix_tail(const char *prefix, char *prefix_tail)
/* Initialize all members of TAG except for the content hash.
*/
-static svn_error_t* store_key_part(entry_tag_t *tag,
- unsigned char *prefix_hash,
+static svn_error_t *store_key_part(entry_tag_t *tag,
+ entry_key_t prefix_hash,
char *prefix_tail,
const void *key,
- apr_size_t size,
+ apr_size_t key_len,
apr_pool_t *pool)
{
svn_checksum_t *checksum;
SVN_ERR(svn_checksum(&checksum,
svn_checksum_md5,
key,
- size,
+ key_len,
pool));
memcpy(tag->prefix_hash, prefix_hash, sizeof(tag->prefix_hash));
memcpy(tag->key_hash, checksum->digest, sizeof(tag->key_hash));
memcpy(tag->prefix_tail, prefix_tail, sizeof(tag->prefix_tail));
- tag->key_len = size;
+ tag->key_len = key_len;
return SVN_NO_ERROR;
}
@@ -255,16 +296,17 @@ static svn_error_t* assert_equal_tags(const entry_tag_t *lhs,
return SVN_NO_ERROR;
}
-/* Reoccuring code snippets.
+/* Reoccurring code snippets.
*/
#define DEBUG_CACHE_MEMBUFFER_TAG_ARG entry_tag_t *tag,
-#define DEBUG_CACHE_MEMBUFFER_TAG &tag,
+#define DEBUG_CACHE_MEMBUFFER_TAG tag,
#define DEBUG_CACHE_MEMBUFFER_INIT_TAG \
- entry_tag_t tag; \
- SVN_ERR(store_key_part(&tag, \
+ entry_tag_t _tag; \
+ entry_tag_t *tag = &_tag; \
+ SVN_ERR(store_key_part(tag, \
cache->prefix, \
cache->prefix_tail, \
key, \
@@ -286,24 +328,22 @@ static svn_error_t* assert_equal_tags(const entry_tag_t *lhs,
/* A single dictionary entry. Since all entries will be allocated once
* during cache creation, those entries might be either used or unused.
* An entry is used if and only if it is contained in the doubly-linked
- * list of used entries. An entry is unused if and only if its OFFSET
- * member is NO_OFFSET.
+ * list of used entries.
*/
typedef struct entry_t
{
/* Identifying the data item. Only valid for used entries.
*/
- unsigned char key [KEY_SIZE];
+ entry_key_t key;
- /* If NO_OFFSET, the entry is not in used. Otherwise, it is the offset
- * of the cached item's serialized data within the data buffer.
+ /* The offset of the cached item's serialized data within the data buffer.
*/
apr_uint64_t offset;
/* Size of the serialized item data. May be 0.
* Only valid for used entries.
*/
- apr_uint32_t size;
+ apr_size_t size;
/* Number of (read) hits for this entry. Will be reset upon write.
* Only valid for used entries.
@@ -332,9 +372,16 @@ typedef struct entry_t
#endif
} entry_t;
-/* We group dictionary entries to make this GROUP-SIZE-way assicative.
+/* We group dictionary entries to make this GROUP-SIZE-way associative.
*/
-typedef entry_t entry_group_t[GROUP_SIZE];
+typedef struct entry_group_t
+{
+ /* number of entries used [0 .. USED-1] */
+ apr_uint32_t used;
+
+ /* the actual entries */
+ entry_t entries[GROUP_SIZE];
+} entry_group_t;
/* The cache header structure.
*/
@@ -390,13 +437,18 @@ struct svn_membuffer_t
*/
apr_uint64_t current_data;
- /* Total number of data buffer bytes in use. This is for statistics only.
+ /* Total number of data buffer bytes in use.
*/
apr_uint64_t data_used;
+ /* Largest entry size that we would accept. For total cache sizes
+ * less than 4TB (sic!), this is determined by the total cache size.
+ */
+ apr_uint64_t max_entry_size;
+
/* Number of used dictionary entries, i.e. number of cached items.
- * In conjunction with hit_cout, this is used calculate the average
+ * In conjunction with hit_count, this is used calculate the average
* hit count as part of the randomized LFU algorithm.
*/
apr_uint32_t used_entries;
@@ -428,7 +480,17 @@ struct svn_membuffer_t
* the cache's creator doesn't feel the cache needs to be
* thread-safe.
*/
- apr_thread_mutex_t *mutex;
+# if USE_SIMPLE_MUTEX
+ svn_mutex__t *lock;
+# else
+ apr_thread_rwlock_t *lock;
+# endif
+
+ /* If set, write access will wait until they get exclusive access.
+ * Otherwise, they will become no-ops if the segment is currently
+ * read-locked. Only used when LOCK is an r/w lock.
+ */
+ svn_boolean_t allow_blocking_writes;
#endif
};
@@ -440,50 +502,158 @@ struct svn_membuffer_t
*/
#define ALIGN_POINTER(pointer) ((void*)ALIGN_VALUE((apr_size_t)(char*)(pointer)))
-/* Acquire the cache mutex, if necessary.
+/* If locking is supported for CACHE, acquire a read lock for it.
*/
static svn_error_t *
-lock_cache(svn_membuffer_t *cache)
+read_lock_cache(svn_membuffer_t *cache)
{
#if APR_HAS_THREADS
- if (cache->mutex)
+# if USE_SIMPLE_MUTEX
+ return svn_mutex__lock(cache->lock);
+# else
+ if (cache->lock)
{
- apr_status_t status = apr_thread_mutex_lock(cache->mutex);
+ apr_status_t status = apr_thread_rwlock_rdlock(cache->lock);
if (status)
return svn_error_wrap_apr(status, _("Can't lock cache mutex"));
}
+# endif
+#endif
+ return SVN_NO_ERROR;
+}
+
+/* If locking is supported for CACHE, acquire a write lock for it.
+ */
+static svn_error_t *
+write_lock_cache(svn_membuffer_t *cache, svn_boolean_t *success)
+{
+#if APR_HAS_THREADS
+# if USE_SIMPLE_MUTEX
+
+ return svn_mutex__lock(cache->lock);
+
+# else
+
+ if (cache->lock)
+ {
+ apr_status_t status;
+ if (cache->allow_blocking_writes)
+ {
+ status = apr_thread_rwlock_wrlock(cache->lock);
+ }
+ else
+ {
+ status = apr_thread_rwlock_trywrlock(cache->lock);
+ if (SVN_LOCK_IS_BUSY(status))
+ {
+ *success = FALSE;
+ status = APR_SUCCESS;
+ }
+ }
+
+ if (status)
+ return svn_error_wrap_apr(status,
+ _("Can't write-lock cache mutex"));
+ }
+
+# endif
#endif
+ return SVN_NO_ERROR;
+}
+
+/* If locking is supported for CACHE, acquire an unconditional write lock
+ * for it.
+ */
+static svn_error_t *
+force_write_lock_cache(svn_membuffer_t *cache)
+{
+#if APR_HAS_THREADS
+# if USE_SIMPLE_MUTEX
+
+ return svn_mutex__lock(cache->lock);
+
+# else
+ apr_status_t status = apr_thread_rwlock_wrlock(cache->lock);
+ if (status)
+ return svn_error_wrap_apr(status,
+ _("Can't write-lock cache mutex"));
+
+# endif
+#endif
return SVN_NO_ERROR;
}
-/* Release the cache mutex, if necessary.
+/* If locking is supported for CACHE, release the current lock
+ * (read or write).
*/
static svn_error_t *
unlock_cache(svn_membuffer_t *cache, svn_error_t *err)
{
#if APR_HAS_THREADS
- if (cache->mutex)
+# if USE_SIMPLE_MUTEX
+
+ return svn_mutex__unlock(cache->lock, err);
+
+# else
+
+ if (cache->lock)
{
- apr_status_t status = apr_thread_mutex_unlock(cache->mutex);
+ apr_status_t status = apr_thread_rwlock_unlock(cache->lock);
if (err)
return err;
if (status)
return svn_error_wrap_apr(status, _("Can't unlock cache mutex"));
}
-#endif
+# endif
+#endif
return err;
}
+/* If supported, guard the execution of EXPR with a read lock to cache.
+ * Macro has been modeled after SVN_MUTEX__WITH_LOCK.
+ */
+#define WITH_READ_LOCK(cache, expr) \
+do { \
+ SVN_ERR(read_lock_cache(cache)); \
+ SVN_ERR(unlock_cache(cache, (expr))); \
+} while (0)
+
+/* If supported, guard the execution of EXPR with a write lock to cache.
+ * Macro has been modeled after SVN_MUTEX__WITH_LOCK.
+ *
+ * The write lock process is complicated if we don't allow to wait for
+ * the lock: If we didn't get the lock, we may still need to remove an
+ * existing entry for the given key because that content is now stale.
+ * Once we discovered such an entry, we unconditionally do a blocking
+ * wait for the write lock. In case no old content could be found, a
+ * failing lock attempt is simply a no-op and we exit the macro.
+ */
+#define WITH_WRITE_LOCK(cache, expr) \
+do { \
+ svn_boolean_t got_lock = TRUE; \
+ SVN_ERR(write_lock_cache(cache, &got_lock)); \
+ if (!got_lock) \
+ { \
+ svn_boolean_t exists; \
+ SVN_ERR(entry_exists(cache, group_index, key, &exists)); \
+ if (exists) \
+ SVN_ERR(force_write_lock_cache(cache)); \
+ else \
+ break; \
+ } \
+ SVN_ERR(unlock_cache(cache, (expr))); \
+} while (0)
+
/* Resolve a dictionary entry reference, i.e. return the entry
* for the given IDX.
*/
static APR_INLINE entry_t *
get_entry(svn_membuffer_t *cache, apr_uint32_t idx)
{
- return &cache->directory[idx / GROUP_SIZE][idx % GROUP_SIZE];
+ return &cache->directory[idx / GROUP_SIZE].entries[idx % GROUP_SIZE];
}
/* Get the entry references for the given ENTRY.
@@ -491,7 +661,11 @@ get_entry(svn_membuffer_t *cache, apr_uint32_t idx)
static APR_INLINE apr_uint32_t
get_index(svn_membuffer_t *cache, entry_t *entry)
{
- return (apr_uint32_t)(entry - (entry_t *)cache->directory);
+ apr_size_t group_index
+ = ((char *)entry - (char *)cache->directory) / sizeof(entry_group_t);
+
+ return (apr_uint32_t)group_index * GROUP_SIZE
+ + (apr_uint32_t)(entry - cache->directory[group_index].entries);
}
/* Remove the used ENTRY from the CACHE, i.e. make it "unused".
@@ -500,11 +674,16 @@ get_index(svn_membuffer_t *cache, entry_t *entry)
static void
drop_entry(svn_membuffer_t *cache, entry_t *entry)
{
+ /* the group that ENTRY belongs to plus a number of useful index values
+ */
apr_uint32_t idx = get_index(cache, entry);
+ apr_uint32_t group_index = idx / GROUP_SIZE;
+ entry_group_t *group = &cache->directory[group_index];
+ apr_uint32_t last_in_group = group_index * GROUP_SIZE + group->used - 1;
/* Only valid to be called for used entries.
*/
- assert(entry->offset != NO_OFFSET);
+ assert(idx <= last_in_group);
/* update global cache usage counters
*/
@@ -526,7 +705,7 @@ drop_entry(svn_membuffer_t *cache, entry_t *entry)
/* remove the first entry -> insertion may start at pos 0, now */
cache->current_data = 0;
}
- else
+ else
{
/* insertion may start right behind the previous entry */
entry_t *previous = get_entry(cache, entry->previous);
@@ -547,9 +726,35 @@ drop_entry(svn_membuffer_t *cache, entry_t *entry)
else
get_entry(cache, entry->next)->previous = entry->previous;
- /* Mark the entry as unused.
+ /* Move last entry into hole (if the removed one is not the last used).
+ * We need to do this since all used entries are at the beginning of
+ * the group's entries array.
*/
- entry->offset = NO_OFFSET;
+ if (idx < last_in_group)
+ {
+ /* copy the last used entry to the removed entry's index
+ */
+ *entry = group->entries[group->used-1];
+
+ /* update foreign links to new index
+ */
+ if (last_in_group == cache->next)
+ cache->next = idx;
+
+ if (entry->previous == NO_INDEX)
+ cache->first = idx;
+ else
+ get_entry(cache, entry->previous)->next = idx;
+
+ if (entry->next == NO_INDEX)
+ cache->last = idx;
+ else
+ get_entry(cache, entry->next)->previous = idx;
+ }
+
+ /* Update the number of used entries.
+ */
+ group->used--;
}
/* Insert ENTRY into the chain of used dictionary entries. The entry's
@@ -559,21 +764,28 @@ drop_entry(svn_membuffer_t *cache, entry_t *entry)
static void
insert_entry(svn_membuffer_t *cache, entry_t *entry)
{
+ /* the group that ENTRY belongs to plus a number of useful index values
+ */
apr_uint32_t idx = get_index(cache, entry);
+ apr_uint32_t group_index = idx / GROUP_SIZE;
+ entry_group_t *group = &cache->directory[group_index];
entry_t *next = cache->next == NO_INDEX
? NULL
: get_entry(cache, cache->next);
/* The entry must start at the beginning of the insertion window.
+ * It must also be the first unused entry in the group.
*/
assert(entry->offset == cache->current_data);
+ assert(idx == group_index * GROUP_SIZE + group->used);
cache->current_data = ALIGN_VALUE(entry->offset + entry->size);
- /* update global cache usage counters
+ /* update usage counters
*/
cache->used_entries++;
cache->data_used += entry->size;
entry->hit_count = 0;
+ group->used++;
/* update entry chain
*/
@@ -608,47 +820,28 @@ insert_entry(svn_membuffer_t *cache, entry_t *entry)
else
cache->first = idx;
}
+
+ /* The current insertion position must never point outside our
+ * data buffer.
+ */
+ assert(cache->current_data <= cache->data_size);
}
-/* Map a KEY of length LEN to the CACHE and group that shall contain the
- * respective item. Return the hash value in TO_FIND. Returns -1 upon error.
+/* Map a KEY of 16 bytes to the CACHE and group that shall contain the
+ * respective item.
*/
static apr_uint32_t
get_group_index(svn_membuffer_t **cache,
- const void *key,
- apr_size_t len,
- unsigned char *to_find,
- apr_pool_t *pool)
+ entry_key_t key)
{
- apr_uint32_t hash = 0;
- int i;
-
- /* calculate a hash value for the key */
- svn_checksum_t *checksum;
- svn_error_t *err;
-
- err = svn_checksum(&checksum, svn_checksum_md5, key, len, pool);
- if (err != NULL)
- {
- svn_error_clear(err);
- return NO_INDEX;
- }
-
- memcpy(to_find, checksum->digest, APR_MD5_DIGESTSIZE);
-
- /* select the cache segment to use */
- *cache = &(*cache)[to_find[0] & ((*cache)->segment_count -1)];
-
- /* Get the group that *must* contain the entry. Fold the hash value
- * just to be sure (it should not be necessary for perfect hashes).
- */
- for (i = 0; i < sizeof(to_find) / sizeof(apr_uint32_t); ++i)
- hash += ((apr_uint32_t*)to_find)[i] ^ ((hash >> 19) || (hash << 13));
+ svn_membuffer_t *segment0 = *cache;
- return hash % (*cache)->group_count;
+ /* select the cache segment to use. they have all the same group_count */
+ *cache = &segment0[key[0] & (segment0->segment_count -1)];
+ return key[1] % segment0->group_count;
}
-/* Reduce the hit count of ENTRY and update the accumunated hit info
+/* Reduce the hit count of ENTRY and update the accumulated hit info
* in CACHE accordingly.
*/
static APR_INLINE void
@@ -660,26 +853,28 @@ let_entry_age(svn_membuffer_t *cache, entry_t *entry)
entry->hit_count -= hits_removed;
}
-/* Returns 0 if the entry group idenified by GROUP_INDEX in CACHE has not
- * been intialized, yet. In that case, this group can not data. Otherwise,
+/* Returns 0 if the entry group identified by GROUP_INDEX in CACHE has not
+ * been initialized, yet. In that case, this group can not data. Otherwise,
* a non-zero value is returned.
*/
static APR_INLINE unsigned char
is_group_initialized(svn_membuffer_t *cache, apr_uint32_t group_index)
{
- unsigned char flags = cache->group_initialized
- [group_index / (8 * GROUP_INIT_GRANULARITY)];
- unsigned char bit_mask = 1 << ((group_index / GROUP_INIT_GRANULARITY) % 8);
+ unsigned char flags
+ = cache->group_initialized[group_index / (8 * GROUP_INIT_GRANULARITY)];
+ unsigned char bit_mask
+ = (unsigned char)(1 << ((group_index / GROUP_INIT_GRANULARITY) % 8));
+
return flags & bit_mask;
}
/* Initializes the section of the directory in CACHE that contains
- * the entry group indentified by GROUP_INDEX. */
+ * the entry group identified by GROUP_INDEX. */
static void
initialize_group(svn_membuffer_t *cache, apr_uint32_t group_index)
{
unsigned char bit_mask;
- apr_uint32_t i, j;
+ apr_uint32_t i;
/* range of groups to initialize due to GROUP_INIT_GRANULARITY */
apr_uint32_t first_index =
@@ -689,11 +884,11 @@ initialize_group(svn_membuffer_t *cache, apr_uint32_t group_index)
last_index = cache->group_count;
for (i = first_index; i < last_index; ++i)
- for (j = 0; j < GROUP_SIZE; j++)
- cache->directory[i][j].offset = NO_OFFSET;
+ cache->directory[i].used = 0;
/* set the "initialized" bit for these groups */
- bit_mask = 1 << ((group_index / GROUP_INIT_GRANULARITY) % 8);
+ bit_mask
+ = (unsigned char)(1 << ((group_index / GROUP_INIT_GRANULARITY) % 8));
cache->group_initialized[group_index / (8 * GROUP_INIT_GRANULARITY)]
|= bit_mask;
}
@@ -713,16 +908,16 @@ initialize_group(svn_membuffer_t *cache, apr_uint32_t group_index)
static entry_t *
find_entry(svn_membuffer_t *cache,
apr_uint32_t group_index,
- unsigned char *to_find,
+ const apr_uint64_t to_find[2],
svn_boolean_t find_empty)
{
- entry_t *group;
+ entry_group_t *group;
entry_t *entry = NULL;
- int i;
+ apr_size_t i;
/* get the group that *must* contain the entry
*/
- group = &cache->directory[group_index][0];
+ group = &cache->directory[group_index];
/* If the entry group has not been initialized, yet, there is no data.
*/
@@ -731,10 +926,11 @@ find_entry(svn_membuffer_t *cache,
if (find_empty)
{
initialize_group(cache, group_index);
- entry = group;
+ entry = &group->entries[0];
/* initialize entry for the new key */
- memcpy(entry->key, to_find, KEY_SIZE);
+ entry->key[0] = to_find[0];
+ entry->key[1] = to_find[1];
}
return entry;
@@ -742,54 +938,53 @@ find_entry(svn_membuffer_t *cache,
/* try to find the matching entry
*/
- for (i = 0; i < GROUP_SIZE; ++i)
- if (group[i].offset != NO_OFFSET &&
- !memcmp(to_find, group[i].key, KEY_SIZE))
+ for (i = 0; i < group->used; ++i)
+ if ( to_find[0] == group->entries[i].key[0]
+ && to_find[1] == group->entries[i].key[1])
{
/* found it
*/
- entry = &group[i];
+ entry = &group->entries[i];
if (find_empty)
drop_entry(cache, entry);
-
- return entry;
+ else
+ return entry;
}
/* None found. Are we looking for a free entry?
*/
if (find_empty)
{
- /* look for an empty entry and return that ...
- */
- for (i = 0; i < GROUP_SIZE; ++i)
- if (group[i].offset == NO_OFFSET)
- {
- entry = &group[i];
- break;
- }
-
- /* ... or, if none is empty, delete the oldest entry
+ /* if there is no empty entry, delete the oldest entry
*/
- if (entry == NULL)
+ if (group->used == GROUP_SIZE)
{
- entry = &group[0];
+ /* every entry gets the same chance of being removed.
+ * Otherwise, we free the first entry, fill it and
+ * remove it again on the next occasion without considering
+ * the other entries in this group.
+ */
+ entry = &group->entries[rand() % GROUP_SIZE];
for (i = 1; i < GROUP_SIZE; ++i)
- if (entry->hit_count > group[i].hit_count)
- entry = &group[i];
+ if (entry->hit_count > group->entries[i].hit_count)
+ entry = &group->entries[i];
/* for the entries that don't have been removed,
- * reduce their hitcounts to put them at a relative
+ * reduce their hit counts to put them at a relative
* disadvantage the next time.
*/
for (i = 0; i < GROUP_SIZE; ++i)
- if (entry != &group[i])
+ if (entry != &group->entries[i])
let_entry_age(cache, entry);
drop_entry(cache, entry);
}
- /* initialize entry for the new key */
- memcpy(entry->key, to_find, KEY_SIZE);
+ /* initialize entry for the new key
+ */
+ entry = &group->entries[group->used];
+ entry->key[0] = to_find[0];
+ entry->key[1] = to_find[1];
}
return entry;
@@ -811,7 +1006,7 @@ move_entry(svn_membuffer_t *cache, entry_t *entry)
/* Move the entry to the start of the empty / insertion section
* (if it isn't there already). Size-aligned moves are legal
- * since all offsets and block sizes share this same aligment.
+ * since all offsets and block sizes share this same alignment.
* Size-aligned moves tend to be faster than non-aligned ones
* because no "odd" bytes at the end need to special treatment.
*/
@@ -827,6 +1022,11 @@ move_entry(svn_membuffer_t *cache, entry_t *entry)
*/
cache->current_data = entry->offset + size;
cache->next = entry->next;
+
+ /* The current insertion position must never point outside our
+ * data buffer.
+ */
+ assert(cache->current_data <= cache->data_size);
}
/* If necessary, enlarge the insertion window until it is at least
@@ -868,7 +1068,7 @@ ensure_data_insertable(svn_membuffer_t *cache, apr_size_t size)
/* leave function as soon as the insertion window is large enough
*/
- if (end - cache->current_data >= size)
+ if (end >= size + cache->current_data)
return TRUE;
/* Don't be too eager to cache data. Smaller items will fit into
@@ -910,22 +1110,38 @@ ensure_data_insertable(svn_membuffer_t *cache, apr_size_t size)
}
else
{
- /* Roll the dice and determine a threshold somewhere from 0 up
- * to 2 times the average hit count.
- */
- average_hit_value = cache->hit_count / cache->used_entries;
- threshold = (average_hit_value+1) * (rand() % 4096) / 2048;
+ svn_boolean_t keep;
- /* Drop the entry from the end of the insertion window, if it
- * has been hit less than the threshold. Otherwise, keep it and
- * move the insertion window one entry further.
- */
- if (entry->hit_count >= threshold)
+ if (cache->hit_count > cache->used_entries)
+ {
+ /* Roll the dice and determine a threshold somewhere from 0 up
+ * to 2 times the average hit count.
+ */
+ average_hit_value = cache->hit_count / cache->used_entries;
+ threshold = (average_hit_value+1) * (rand() % 4096) / 2048;
+
+ keep = entry->hit_count >= threshold;
+ }
+ else
+ {
+ /* general hit count is low. Keep everything that got hit
+ * at all and assign some 50% survival chance to everything
+ * else.
+ */
+ keep = (entry->hit_count > 0) || (rand() & 1);
+ }
+
+ /* keepers or destroyers? */
+ if (keep)
{
move_entry(cache, entry);
}
else
{
+ /* Drop the entry from the end of the insertion window, if it
+ * has been hit less than the threshold. Otherwise, keep it and
+ * move the insertion window one entry further.
+ */
drop_size += entry->size;
drop_entry(cache, entry);
}
@@ -959,55 +1175,82 @@ static void* secure_aligned_alloc(apr_pool_t *pool,
return memory;
}
-/* Create a new membuffer cache instance. If the TOTAL_SIZE of the
- * memory i too small to accomodate the DICTIONARY_SIZE, the latte
- * will be resized automatically. Also, a minumum size is assured
- * for the DICTIONARY_SIZE. THREAD_SAFE may be FALSE, if there will
- * be no concurrent acccess to the CACHE returned.
- *
- * All allocations, in particular the data buffer and dictionary will
- * be made from POOL.
- */
svn_error_t *
svn_cache__membuffer_cache_create(svn_membuffer_t **cache,
apr_size_t total_size,
apr_size_t directory_size,
+ apr_size_t segment_count,
svn_boolean_t thread_safe,
+ svn_boolean_t allow_blocking_writes,
apr_pool_t *pool)
{
svn_membuffer_t *c;
- apr_uint32_t segment_count_shift = 0;
- apr_uint32_t segment_count = 1;
-
apr_uint32_t seg;
apr_uint32_t group_count;
apr_uint32_t group_init_size;
apr_uint64_t data_size;
+ apr_uint64_t max_entry_size;
- /* Determine a reasonable number of cache segments. Segmentation is
- * only useful for multi-threaded / multi-core servers as it reduces
- * lock contention on these systems.
- *
- * But on these systems, we can assume that ample memory has been
- * allocated to this cache. Smaller caches should not be segmented
- * as this severely limites the maximum size of cachable items.
- *
- * Segments should not be smaller than 32MB and max. cachable item
- * size should grow as fast as segmentation.
+ /* Limit the total size (only relevant if we can address > 4GB)
*/
- while (((2 * MIN_SEGMENT_SIZE) << (2 * segment_count_shift)) < total_size)
- ++segment_count_shift;
+#if APR_SIZEOF_VOIDP > 4
+ if (total_size > MAX_SEGMENT_SIZE * MAX_SEGMENT_COUNT)
+ total_size = MAX_SEGMENT_SIZE * MAX_SEGMENT_COUNT;
+#endif
- segment_count = 1 << segment_count_shift;
+ /* Limit the segment count
+ */
+ if (segment_count > MAX_SEGMENT_COUNT)
+ segment_count = MAX_SEGMENT_COUNT;
+ if (segment_count * MIN_SEGMENT_SIZE > total_size)
+ segment_count = total_size / MIN_SEGMENT_SIZE;
+
+ /* The segment count must be a power of two. Round it down as necessary.
+ */
+ while ((segment_count & (segment_count-1)) != 0)
+ segment_count &= segment_count-1;
+
+ /* if the caller hasn't provided a reasonable segment count or the above
+ * limitations set it to 0, derive one from the absolute cache size
+ */
+ if (segment_count < 1)
+ {
+ /* Determine a reasonable number of cache segments. Segmentation is
+ * only useful for multi-threaded / multi-core servers as it reduces
+ * lock contention on these systems.
+ *
+ * But on these systems, we can assume that ample memory has been
+ * allocated to this cache. Smaller caches should not be segmented
+ * as this severely limits the maximum size of cachable items.
+ *
+ * Segments should not be smaller than 32MB and max. cachable item
+ * size should grow as fast as segmentation.
+ */
+
+ apr_uint32_t segment_count_shift = 0;
+ while (((2 * DEFAULT_MIN_SEGMENT_SIZE) << (2 * segment_count_shift))
+ < total_size)
+ ++segment_count_shift;
+
+ segment_count = (apr_size_t)1 << segment_count_shift;
+ }
+
+ /* If we have an extremely large cache (>512 GB), the default segment
+ * size may exceed the amount allocatable as one chunk. In that case,
+ * increase segmentation until we are under the threshold.
+ */
+ while ( total_size / segment_count > MAX_SEGMENT_SIZE
+ && segment_count < MAX_SEGMENT_COUNT)
+ segment_count *= 2;
/* allocate cache as an array of segments / cache objects */
c = apr_palloc(pool, segment_count * sizeof(*c));
/* Split total cache size into segments of equal size
*/
- total_size >>= segment_count_shift;
- directory_size >>= segment_count_shift;
+ total_size /= segment_count;
+ directory_size /= segment_count;
/* prevent pathological conditions: ensure a certain minimum cache size
*/
@@ -1024,24 +1267,36 @@ svn_cache__membuffer_cache_create(svn_membuffer_t **cache,
/* limit the data size to what we can address.
* Note that this cannot overflow since all values are of size_t.
+ * Also, make it a multiple of the item placement granularity to
+ * prevent subtle overflows.
*/
- data_size = total_size - directory_size;
+ data_size = ALIGN_VALUE(total_size - directory_size + 1) - ITEM_ALIGNMENT;
- /* to keep the entries small, we use 32 bit indices only
- * -> we need to ensure that no more then 4G entries exist
+ /* For cache sizes > 4TB, individual cache segments will be larger
+ * than 16GB allowing for >4GB entries. But caching chunks larger
+ * than 4GB is simply not supported.
*/
- group_count = directory_size / sizeof(entry_group_t);
- if (group_count >= (APR_UINT32_MAX / GROUP_SIZE))
- {
- group_count = (APR_UINT32_MAX / GROUP_SIZE) - 1;
- }
+ max_entry_size = data_size / 4 > MAX_ITEM_SIZE
+ ? MAX_ITEM_SIZE
+ : data_size / 4;
+
+ /* to keep the entries small, we use 32 bit indexes only
+ * -> we need to ensure that no more then 4G entries exist.
+ *
+ * Note, that this limit could only be exceeded in a very
+ * theoretical setup with about 1EB of cache.
+ */
+ group_count = directory_size / sizeof(entry_group_t)
+ >= (APR_UINT32_MAX / GROUP_SIZE)
+ ? (APR_UINT32_MAX / GROUP_SIZE) - 1
+ : (apr_uint32_t)(directory_size / sizeof(entry_group_t));
group_init_size = 1 + group_count / (8 * GROUP_INIT_GRANULARITY);
for (seg = 0; seg < segment_count; ++seg)
{
/* allocate buffers and initialize cache members
*/
- c[seg].segment_count = segment_count;
+ c[seg].segment_count = (apr_uint32_t)segment_count;
c[seg].group_count = group_count;
c[seg].directory = apr_pcalloc(pool,
@@ -1059,6 +1314,7 @@ svn_cache__membuffer_cache_create(svn_membuffer_t **cache,
c[seg].data = secure_aligned_alloc(pool, (apr_size_t)data_size, FALSE);
c[seg].current_data = 0;
c[seg].data_used = 0;
+ c[seg].max_entry_size = max_entry_size;
c[seg].used_entries = 0;
c[seg].hit_count = 0;
@@ -1073,27 +1329,34 @@ svn_cache__membuffer_cache_create(svn_membuffer_t **cache,
{
/* We are OOM. There is no need to proceed with "half a cache".
*/
- return svn_error_wrap_apr(APR_ENOMEM, _("OOM"));
+ return svn_error_wrap_apr(APR_ENOMEM, "OOM");
}
#if APR_HAS_THREADS
/* A lock for intra-process synchronization to the cache, or NULL if
* the cache's creator doesn't feel the cache needs to be
- * thread-safe. */
+ * thread-safe.
+ */
+# if USE_SIMPLE_MUTEX
+
+ SVN_ERR(svn_mutex__init(&c[seg].lock, thread_safe, pool));
+
+# else
- c[seg].mutex = NULL;
+ c[seg].lock = NULL;
if (thread_safe)
{
apr_status_t status =
- apr_thread_mutex_create(&(c[seg].mutex),
- APR_THREAD_MUTEX_DEFAULT,
- pool);
+ apr_thread_rwlock_create(&(c[seg].lock), pool);
if (status)
return svn_error_wrap_apr(status, _("Can't create cache mutex"));
}
-#else
- if (thread_safe)
- return svn_error_wrap_apr(APR_ENOTIMPL, _("APR doesn't support threads"));
+
+# endif
+
+ /* Select the behavior of write operations.
+ */
+ c[seg].allow_blocking_writes = allow_blocking_writes;
#endif
}
@@ -1103,55 +1366,101 @@ svn_cache__membuffer_cache_create(svn_membuffer_t **cache,
return SVN_NO_ERROR;
}
+/* Look for the cache entry in group GROUP_INDEX of CACHE, identified
+ * by the hash value TO_FIND and set *FOUND accordingly.
+ *
+ * Note: This function requires the caller to serialize access.
+ * Don't call it directly, call entry_exists instead.
+ */
+static svn_error_t *
+entry_exists_internal(svn_membuffer_t *cache,
+ apr_uint32_t group_index,
+ entry_key_t to_find,
+ svn_boolean_t *found)
+{
+ *found = find_entry(cache, group_index, to_find, FALSE) != NULL;
+ return SVN_NO_ERROR;
+}
+
+/* Look for the cache entry in group GROUP_INDEX of CACHE, identified
+ * by the hash value TO_FIND and set *FOUND accordingly.
+ */
+static svn_error_t *
+entry_exists(svn_membuffer_t *cache,
+ apr_uint32_t group_index,
+ entry_key_t to_find,
+ svn_boolean_t *found)
+{
+ WITH_READ_LOCK(cache,
+ entry_exists_internal(cache,
+ group_index,
+ to_find,
+ found));
-/* Try to insert the ITEM and use the KEY to unqiuely identify it.
+ return SVN_NO_ERROR;
+}
+
+
+/* Try to insert the serialized item given in BUFFER with SIZE into
+ * the group GROUP_INDEX of CACHE and uniquely identify it by hash
+ * value TO_FIND.
+ *
* However, there is no guarantee that it will actually be put into
- * the cache. If there is already some data associated to the KEY,
+ * the cache. If there is already some data associated with TO_FIND,
* it will be removed from the cache even if the new data cannot
* be inserted.
*
- * The SERIALIZER is called to transform the ITEM into a single,
- * flat data buffer. Temporary allocations may be done in POOL.
+ * Note: This function requires the caller to serialization access.
+ * Don't call it directly, call membuffer_cache_get_partial instead.
*/
static svn_error_t *
-membuffer_cache_set(svn_membuffer_t *cache,
- const void *key,
- apr_size_t key_len,
- void *item,
- svn_cache__serialize_func_t serializer,
- DEBUG_CACHE_MEMBUFFER_TAG_ARG
- apr_pool_t *scratch_pool)
+membuffer_cache_set_internal(svn_membuffer_t *cache,
+ entry_key_t to_find,
+ apr_uint32_t group_index,
+ char *buffer,
+ apr_size_t size,
+ DEBUG_CACHE_MEMBUFFER_TAG_ARG
+ apr_pool_t *scratch_pool)
{
- apr_uint32_t group_index;
- unsigned char to_find[KEY_SIZE];
- entry_t *entry;
- char *buffer;
- apr_size_t size;
+ /* first, look for a previous entry for the given key */
+ entry_t *entry = find_entry(cache, group_index, to_find, FALSE);
- /* find the entry group that will hold the key.
- */
- group_index = get_group_index(&cache, key, key_len, to_find, scratch_pool);
- if (group_index == NO_INDEX)
- return SVN_NO_ERROR;
+ /* if there is an old version of that entry and the new data fits into
+ * the old spot, just re-use that space. */
+ if (entry && ALIGN_VALUE(entry->size) >= size && buffer)
+ {
+ /* Careful! We need to cast SIZE to the full width of CACHE->DATA_USED
+ * lest we run into trouble with 32 bit underflow *not* treated as a
+ * negative value.
+ */
+ cache->data_used += (apr_uint64_t)size - entry->size;
+ entry->size = size;
- /* Serialize data data.
- */
- if (item)
- SVN_ERR(serializer(&buffer, &size, item, scratch_pool));
+#ifdef SVN_DEBUG_CACHE_MEMBUFFER
- /* The actual cache data access needs to sync'ed
- */
- SVN_ERR(lock_cache(cache));
+ /* Remember original content, type and key (hashes)
+ */
+ SVN_ERR(store_content_part(tag, buffer, size, scratch_pool));
+ memcpy(&entry->tag, tag, sizeof(*tag));
+
+#endif
+
+ if (size)
+ memcpy(cache->data + entry->offset, buffer, size);
+
+ cache->total_writes++;
+ return SVN_NO_ERROR;
+ }
/* if necessary, enlarge the insertion window.
*/
- if ( item != NULL
- && cache->data_size / 4 > size
+ if ( buffer != NULL
+ && cache->max_entry_size >= size
&& ensure_data_insertable(cache, size))
{
/* Remove old data for this key, if that exists.
* Get an unused entry for the key and and initialize it with
- * the serialized item's (future) posion within data buffer.
+ * the serialized item's (future) position within data buffer.
*/
entry = find_entry(cache, group_index, to_find, TRUE);
entry->size = size;
@@ -1166,60 +1475,94 @@ membuffer_cache_set(svn_membuffer_t *cache,
#endif
+ /* Link the entry properly.
+ */
+ insert_entry(cache, entry);
+
/* Copy the serialized item data into the cache.
*/
if (size)
memcpy(cache->data + entry->offset, buffer, size);
- /* Link the entry properly.
- */
- insert_entry(cache, entry);
cache->total_writes++;
}
else
{
/* if there is already an entry for this key, drop it.
+ * Since ensure_data_insertable may have removed entries from
+ * ENTRY's group, re-do the lookup.
*/
- find_entry(cache, group_index, to_find, TRUE);
+ entry = find_entry(cache, group_index, to_find, FALSE);
+ if (entry)
+ drop_entry(cache, entry);
}
- /* done here -> unlock the cache
- */
- return unlock_cache(cache, SVN_NO_ERROR);
+ return SVN_NO_ERROR;
}
-/* Look for the *ITEM identified by KEY. If no item has been stored
- * for KEY, *ITEM will be NULL. Otherwise, the DESERIALIZER is called
- * re-construct the proper object from the serialized data.
- * Allocations will be done in POOL.
+/* Try to insert the ITEM and use the KEY to uniquely identify it.
+ * However, there is no guarantee that it will actually be put into
+ * the cache. If there is already some data associated to the KEY,
+ * it will be removed from the cache even if the new data cannot
+ * be inserted.
+ *
+ * The SERIALIZER is called to transform the ITEM into a single,
+ * flat data buffer. Temporary allocations may be done in POOL.
*/
static svn_error_t *
-membuffer_cache_get(svn_membuffer_t *cache,
- const void *key,
- apr_size_t key_len,
- void **item,
- svn_cache__deserialize_func_t deserializer,
+membuffer_cache_set(svn_membuffer_t *cache,
+ entry_key_t key,
+ void *item,
+ svn_cache__serialize_func_t serializer,
DEBUG_CACHE_MEMBUFFER_TAG_ARG
- apr_pool_t *result_pool)
+ apr_pool_t *scratch_pool)
{
apr_uint32_t group_index;
- unsigned char to_find[KEY_SIZE];
- entry_t *entry;
- char *buffer;
- apr_size_t size;
+ void *buffer = NULL;
+ apr_size_t size = 0;
/* find the entry group that will hold the key.
*/
- group_index = get_group_index(&cache, key, key_len, to_find, result_pool);
- if (group_index == NO_INDEX)
- {
- /* Some error occured, return "item not found".
- */
- *item = NULL;
- return SVN_NO_ERROR;
- }
+ group_index = get_group_index(&cache, key);
- SVN_ERR(lock_cache(cache));
+ /* Serialize data data.
+ */
+ if (item)
+ SVN_ERR(serializer(&buffer, &size, item, scratch_pool));
+
+ /* The actual cache data access needs to sync'ed
+ */
+ WITH_WRITE_LOCK(cache,
+ membuffer_cache_set_internal(cache,
+ key,
+ group_index,
+ buffer,
+ size,
+ DEBUG_CACHE_MEMBUFFER_TAG
+ scratch_pool));
+ return SVN_NO_ERROR;
+}
+
+/* Look for the cache entry in group GROUP_INDEX of CACHE, identified
+ * by the hash value TO_FIND. If no item has been stored for KEY,
+ * *BUFFER will be NULL. Otherwise, return a copy of the serialized
+ * data in *BUFFER and return its size in *ITEM_SIZE. Allocations will
+ * be done in POOL.
+ *
+ * Note: This function requires the caller to serialization access.
+ * Don't call it directly, call membuffer_cache_get_partial instead.
+ */
+static svn_error_t *
+membuffer_cache_get_internal(svn_membuffer_t *cache,
+ apr_uint32_t group_index,
+ entry_key_t to_find,
+ char **buffer,
+ apr_size_t *item_size,
+ DEBUG_CACHE_MEMBUFFER_TAG_ARG
+ apr_pool_t *result_pool)
+{
+ entry_t *entry;
+ apr_size_t size;
/* The actual cache data access needs to sync'ed
*/
@@ -1229,13 +1572,15 @@ membuffer_cache_get(svn_membuffer_t *cache,
{
/* no such entry found.
*/
- *item = NULL;
- return unlock_cache(cache, SVN_NO_ERROR);
+ *buffer = NULL;
+ *item_size = 0;
+
+ return SVN_NO_ERROR;
}
size = ALIGN_VALUE(entry->size);
- buffer = ALIGN_POINTER(apr_palloc(result_pool, size + ITEM_ALIGNMENT-1));
- memcpy(buffer, (const char*)cache->data + entry->offset, size);
+ *buffer = ALIGN_POINTER(apr_palloc(result_pool, size + ITEM_ALIGNMENT-1));
+ memcpy(*buffer, (const char*)cache->data + entry->offset, size);
#ifdef SVN_DEBUG_CACHE_MEMBUFFER
@@ -1247,7 +1592,7 @@ membuffer_cache_get(svn_membuffer_t *cache,
/* Compare original content, type and key (hashes)
*/
- SVN_ERR(store_content_part(tag, buffer, entry->size, result_pool));
+ SVN_ERR(store_content_part(tag, *buffer, entry->size, result_pool));
SVN_ERR(assert_equal_tags(&entry->tag, tag));
#endif
@@ -1258,45 +1603,81 @@ membuffer_cache_get(svn_membuffer_t *cache,
cache->hit_count++;
cache->total_hits++;
- SVN_ERR(unlock_cache(cache, SVN_NO_ERROR));
+ *item_size = entry->size;
- /* re-construct the original data object from its serialized form.
- */
- return deserializer(item, buffer, entry->size, result_pool);
+ return SVN_NO_ERROR;
}
-/* Look for the cache entry identified by KEY and KEY_LEN. FOUND indicates
- * whether that entry exists. If not found, *ITEM will be NULL. Otherwise,
- * the DESERIALIZER is called with that entry and the BATON provided
- * and will extract the desired information. The result is set in *ITEM.
+/* Look for the *ITEM identified by KEY. If no item has been stored
+ * for KEY, *ITEM will be NULL. Otherwise, the DESERIALIZER is called
+ * re-construct the proper object from the serialized data.
* Allocations will be done in POOL.
*/
static svn_error_t *
-membuffer_cache_get_partial(svn_membuffer_t *cache,
- const void *key,
- apr_size_t key_len,
- void **item,
- svn_boolean_t *found,
- svn_cache__partial_getter_func_t deserializer,
- void *baton,
- DEBUG_CACHE_MEMBUFFER_TAG_ARG
- apr_pool_t *result_pool)
+membuffer_cache_get(svn_membuffer_t *cache,
+ entry_key_t key,
+ void **item,
+ svn_cache__deserialize_func_t deserializer,
+ DEBUG_CACHE_MEMBUFFER_TAG_ARG
+ apr_pool_t *result_pool)
{
apr_uint32_t group_index;
- unsigned char to_find[KEY_SIZE];
- entry_t *entry;
- svn_error_t *err = SVN_NO_ERROR;
+ char *buffer;
+ apr_size_t size;
- group_index = get_group_index(&cache, key, key_len, to_find, result_pool);
+ /* find the entry group that will hold the key.
+ */
+ group_index = get_group_index(&cache, key);
+ WITH_READ_LOCK(cache,
+ membuffer_cache_get_internal(cache,
+ group_index,
+ key,
+ &buffer,
+ &size,
+ DEBUG_CACHE_MEMBUFFER_TAG
+ result_pool));
- SVN_ERR(lock_cache(cache));
+ /* re-construct the original data object from its serialized form.
+ */
+ if (buffer == NULL)
+ {
+ *item = NULL;
+ return SVN_NO_ERROR;
+ }
- entry = find_entry(cache, group_index, to_find, FALSE);
+ return deserializer(item, buffer, size, result_pool);
+}
+
+/* Look for the cache entry in group GROUP_INDEX of CACHE, identified
+ * by the hash value TO_FIND. FOUND indicates whether that entry exists.
+ * If not found, *ITEM will be NULL.
+ *
+ * Otherwise, the DESERIALIZER is called with that entry and the BATON
+ * provided and will extract the desired information. The result is set
+ * in *ITEM. Allocations will be done in POOL.
+ *
+ * Note: This function requires the caller to serialization access.
+ * Don't call it directly, call membuffer_cache_get_partial instead.
+ */
+static svn_error_t *
+membuffer_cache_get_partial_internal(svn_membuffer_t *cache,
+ apr_uint32_t group_index,
+ entry_key_t to_find,
+ void **item,
+ svn_boolean_t *found,
+ svn_cache__partial_getter_func_t deserializer,
+ void *baton,
+ DEBUG_CACHE_MEMBUFFER_TAG_ARG
+ apr_pool_t *result_pool)
+{
+ entry_t *entry = find_entry(cache, group_index, to_find, FALSE);
cache->total_reads++;
if (entry == NULL)
{
*item = NULL;
*found = FALSE;
+
+ return SVN_NO_ERROR;
}
else
{
@@ -1324,50 +1705,71 @@ membuffer_cache_get_partial(svn_membuffer_t *cache,
#endif
- err = deserializer(item,
- (const char*)cache->data + entry->offset,
- entry->size,
- baton,
- result_pool);
+ return deserializer(item,
+ (const char*)cache->data + entry->offset,
+ entry->size,
+ baton,
+ result_pool);
}
-
- /* done here -> unlock the cache
- */
- return unlock_cache(cache, err);
}
-/* Look for the cache entry identified by KEY and KEY_LEN. If no entry
- * has been found, the function returns without modifying the cache.
- * Otherwise, FUNC is called with that entry and the BATON provided
- * and may modify the cache entry. Allocations will be done in POOL.
+/* Look for the cache entry identified by KEY. FOUND indicates
+ * whether that entry exists. If not found, *ITEM will be NULL. Otherwise,
+ * the DESERIALIZER is called with that entry and the BATON provided
+ * and will extract the desired information. The result is set in *ITEM.
+ * Allocations will be done in POOL.
*/
static svn_error_t *
-membuffer_cache_set_partial(svn_membuffer_t *cache,
- const void *key,
- apr_size_t key_len,
- svn_cache__partial_setter_func_t func,
+membuffer_cache_get_partial(svn_membuffer_t *cache,
+ entry_key_t key,
+ void **item,
+ svn_boolean_t *found,
+ svn_cache__partial_getter_func_t deserializer,
void *baton,
DEBUG_CACHE_MEMBUFFER_TAG_ARG
- apr_pool_t *scratch_pool)
+ apr_pool_t *result_pool)
{
- apr_uint32_t group_index;
- unsigned char to_find[KEY_SIZE];
- entry_t *entry;
- svn_error_t *err = SVN_NO_ERROR;
+ apr_uint32_t group_index = get_group_index(&cache, key);
- /* cache item lookup
- */
- group_index = get_group_index(&cache, key, key_len, to_find, scratch_pool);
+ WITH_READ_LOCK(cache,
+ membuffer_cache_get_partial_internal
+ (cache, group_index, key, item, found,
+ deserializer, baton, DEBUG_CACHE_MEMBUFFER_TAG
+ result_pool));
- SVN_ERR(lock_cache(cache));
+ return SVN_NO_ERROR;
+}
- entry = find_entry(cache, group_index, to_find, FALSE);
+/* Look for the cache entry in group GROUP_INDEX of CACHE, identified
+ * by the hash value TO_FIND. If no entry has been found, the function
+ * returns without modifying the cache.
+ *
+ * Otherwise, FUNC is called with that entry and the BATON provided
+ * and may modify the cache entry. Allocations will be done in POOL.
+ *
+ * Note: This function requires the caller to serialization access.
+ * Don't call it directly, call membuffer_cache_set_partial instead.
+ */
+static svn_error_t *
+membuffer_cache_set_partial_internal(svn_membuffer_t *cache,
+ apr_uint32_t group_index,
+ entry_key_t to_find,
+ svn_cache__partial_setter_func_t func,
+ void *baton,
+ DEBUG_CACHE_MEMBUFFER_TAG_ARG
+ apr_pool_t *scratch_pool)
+{
+ /* cache item lookup
+ */
+ entry_t *entry = find_entry(cache, group_index, to_find, FALSE);
cache->total_reads++;
/* this function is a no-op if the item is not in cache
*/
if (entry != NULL)
{
+ svn_error_t *err;
+
/* access the serialized cache item */
char *data = (char*)cache->data + entry->offset;
char *orig_data = data;
@@ -1392,9 +1794,9 @@ membuffer_cache_set_partial(svn_membuffer_t *cache,
#endif
- /* modify it, preferrably in-situ.
+ /* modify it, preferably in-situ.
*/
- err = func(&data, &size, baton, scratch_pool);
+ err = func((void **)&data, &size, baton, scratch_pool);
if (err)
{
@@ -1414,11 +1816,12 @@ membuffer_cache_set_partial(svn_membuffer_t *cache,
/* Remove the old entry and try to make space for the new one.
*/
drop_entry(cache, entry);
- if ( (cache->data_size / 4 > size)
+ if ( (cache->max_entry_size >= size)
&& ensure_data_insertable(cache, size))
{
/* Write the new entry.
*/
+ entry = find_entry(cache, group_index, to_find, TRUE);
entry->size = size;
entry->offset = cache->current_data;
if (size)
@@ -1427,23 +1830,48 @@ membuffer_cache_set_partial(svn_membuffer_t *cache,
/* Link the entry properly.
*/
insert_entry(cache, entry);
+ }
+ }
#ifdef SVN_DEBUG_CACHE_MEMBUFFER
- /* Remember original content, type and key (hashes)
- */
- SVN_ERR(store_content_part(tag, data, size, scratch_pool));
- memcpy(&entry->tag, tag, sizeof(*tag));
+ /* Remember original content, type and key (hashes)
+ */
+ SVN_ERR(store_content_part(tag, data, size, scratch_pool));
+ memcpy(&entry->tag, tag, sizeof(*tag));
#endif
- }
- }
}
}
+ return SVN_NO_ERROR;
+}
+
+/* Look for the cache entry identified by KEY. If no entry
+ * has been found, the function returns without modifying the cache.
+ * Otherwise, FUNC is called with that entry and the BATON provided
+ * and may modify the cache entry. Allocations will be done in POOL.
+ */
+static svn_error_t *
+membuffer_cache_set_partial(svn_membuffer_t *cache,
+ entry_key_t key,
+ svn_cache__partial_setter_func_t func,
+ void *baton,
+ DEBUG_CACHE_MEMBUFFER_TAG_ARG
+ apr_pool_t *scratch_pool)
+{
+ /* cache item lookup
+ */
+ apr_uint32_t group_index = get_group_index(&cache, key);
+ WITH_WRITE_LOCK(cache,
+ membuffer_cache_set_partial_internal
+ (cache, group_index, key, func, baton,
+ DEBUG_CACHE_MEMBUFFER_TAG
+ scratch_pool));
+
/* done here -> unlock the cache
*/
- return unlock_cache(cache, err);
+ return SVN_NO_ERROR;
}
/* Implement the svn_cache__t interface on top of a shared membuffer cache.
@@ -1451,11 +1879,11 @@ membuffer_cache_set_partial(svn_membuffer_t *cache,
* Because membuffer caches tend to be very large, there will be rather few
* of them (usually only one). Thus, the same instance shall be used as the
* backend to many application-visible svn_cache__t instances. This should
- * also achive global resource usage fairness.
+ * also achieve global resource usage fairness.
*
- * To accomodate items from multiple resources, the individual keys must be
- * unique over all sources. This is achived by simply adding a prefix key
- * that unambigously identifies the item's context (e.g. path to the
+ * To accommodate items from multiple resources, the individual keys must be
+ * unique over all sources. This is achieved by simply adding a prefix key
+ * that unambiguously identifies the item's context (e.g. path to the
* respective repository). The prefix will be set upon construction of the
* svn_cache__t instance.
*/
@@ -1482,7 +1910,7 @@ typedef struct svn_membuffer_cache_t
* This makes (very likely) our keys different from all keys used
* by other svn_membuffer_cache_t instances.
*/
- unsigned char prefix [APR_MD5_DIGESTSIZE];
+ entry_key_t prefix;
/* A copy of the unmodified prefix. It is being used as a user-visible
* ID for this cache instance.
@@ -1494,6 +1922,10 @@ typedef struct svn_membuffer_cache_t
*/
apr_ssize_t key_len;
+ /* Temporary buffer containing the hash key for the current access
+ */
+ entry_key_t combined_key;
+
/* a pool for temporary allocations during get() and set()
*/
apr_pool_t *pool;
@@ -1503,6 +1935,9 @@ typedef struct svn_membuffer_cache_t
*/
int alloc_counter;
+ /* if enabled, this will serialize the access to this instance.
+ */
+ svn_mutex__t *mutex;
#ifdef SVN_DEBUG_CACHE_MEMBUFFER
/* Invariant tag info for all items stored by this cache instance.
@@ -1518,29 +1953,48 @@ typedef struct svn_membuffer_cache_t
#define ALLOCATIONS_PER_POOL_CLEAR 10
-/* Basically concatenate PREFIX and KEY and return the result in FULL_KEY.
- * Allocations will be made in POOL.
+/* Basically calculate a hash value for KEY of length KEY_LEN, combine it
+ * with the CACHE->PREFIX and write the result in CACHE->COMBINED_KEY.
*/
static void
-combine_key(const void *prefix,
- apr_size_t prefix_len,
+combine_key(svn_membuffer_cache_t *cache,
const void *key,
- apr_ssize_t key_len,
- void **full_key,
- apr_size_t *full_key_len,
- apr_pool_t *pool)
+ apr_ssize_t key_len)
{
if (key_len == APR_HASH_KEY_STRING)
key_len = strlen((const char *) key);
- *full_key_len = prefix_len + key_len;
- *full_key = apr_palloc(pool, *full_key_len);
+ if (key_len < 16)
+ {
+ apr_uint32_t data[4] = { 0 };
+ memcpy(data, key, key_len);
+
+ svn__pseudo_md5_15((apr_uint32_t *)cache->combined_key, data);
+ }
+ else if (key_len < 32)
+ {
+ apr_uint32_t data[8] = { 0 };
+ memcpy(data, key, key_len);
+
+ svn__pseudo_md5_31((apr_uint32_t *)cache->combined_key, data);
+ }
+ else if (key_len < 64)
+ {
+ apr_uint32_t data[16] = { 0 };
+ memcpy(data, key, key_len);
+
+ svn__pseudo_md5_63((apr_uint32_t *)cache->combined_key, data);
+ }
+ else
+ {
+ apr_md5((unsigned char*)cache->combined_key, key, key_len);
+ }
- memcpy(*full_key, prefix, prefix_len);
- memcpy((char *)*full_key + prefix_len, key, key_len);
+ cache->combined_key[0] ^= cache->prefix[0];
+ cache->combined_key[1] ^= cache->prefix[1];
}
-/* Implement svn_cache__vtable_t.get
+/* Implement svn_cache__vtable_t.get (not thread-safe)
*/
static svn_error_t *
svn_membuffer_cache_get(void **value_p,
@@ -1551,47 +2005,36 @@ svn_membuffer_cache_get(void **value_p,
{
svn_membuffer_cache_t *cache = cache_void;
+ DEBUG_CACHE_MEMBUFFER_INIT_TAG
+
+ /* special case */
+ if (key == NULL)
+ {
+ *value_p = NULL;
+ *found = FALSE;
+
+ return SVN_NO_ERROR;
+ }
+
/* construct the full, i.e. globally unique, key by adding
* this cache instances' prefix
*/
- void *full_key;
- apr_size_t full_key_len;
-
- DEBUG_CACHE_MEMBUFFER_INIT_TAG
-
- combine_key(cache->prefix,
- sizeof(cache->prefix),
- key,
- cache->key_len,
- &full_key,
- &full_key_len,
- cache->pool);
+ combine_key(cache, key, cache->key_len);
/* Look the item up. */
SVN_ERR(membuffer_cache_get(cache->membuffer,
- full_key,
- full_key_len,
+ cache->combined_key,
value_p,
cache->deserializer,
DEBUG_CACHE_MEMBUFFER_TAG
result_pool));
- /* We don't need more the key anymore.
- * But since we allocate only small amounts of data per get() call and
- * apr_pool_clear is somewhat expensive, we clear it only now and then.
- */
- if (++cache->alloc_counter > ALLOCATIONS_PER_POOL_CLEAR)
- {
- apr_pool_clear(cache->pool);
- cache->alloc_counter = 0;
- }
-
/* return result */
*found = *value_p != NULL;
return SVN_NO_ERROR;
}
-/* Implement svn_cache__vtable_t.set
+/* Implement svn_cache__vtable_t.set (not thread-safe)
*/
static svn_error_t *
svn_membuffer_cache_set(void *cache_void,
@@ -1601,38 +2044,32 @@ svn_membuffer_cache_set(void *cache_void,
{
svn_membuffer_cache_t *cache = cache_void;
- void *full_key;
- apr_size_t full_key_len;
-
DEBUG_CACHE_MEMBUFFER_INIT_TAG
+ /* special case */
+ if (key == NULL)
+ return SVN_NO_ERROR;
+
/* we do some allocations below, so increase the allocation counter
* by a slightly larger amount. Free allocated memory every now and then.
*/
cache->alloc_counter += 3;
if (cache->alloc_counter > ALLOCATIONS_PER_POOL_CLEAR)
{
- apr_pool_clear(cache->pool);
+ svn_pool_clear(cache->pool);
cache->alloc_counter = 0;
}
/* construct the full, i.e. globally unique, key by adding
* this cache instances' prefix
*/
- combine_key(cache->prefix,
- sizeof(cache->prefix),
- key,
- cache->key_len,
- &full_key,
- &full_key_len,
- cache->pool);
+ combine_key(cache, key, cache->key_len);
/* (probably) add the item to the cache. But there is no real guarantee
* that the item will actually be cached afterwards.
*/
return membuffer_cache_set(cache->membuffer,
- full_key,
- full_key_len,
+ cache->combined_key,
value,
cache->serializer,
DEBUG_CACHE_MEMBUFFER_TAG
@@ -1652,6 +2089,8 @@ svn_membuffer_cache_iter(svn_boolean_t *completed,
_("Can't iterate a membuffer-based cache"));
}
+/* Implement svn_cache__vtable_t.get_partial (not thread-safe)
+ */
static svn_error_t *
svn_membuffer_cache_get_partial(void **value_p,
svn_boolean_t *found,
@@ -1663,28 +2102,19 @@ svn_membuffer_cache_get_partial(void **value_p,
{
svn_membuffer_cache_t *cache = cache_void;
- void *full_key;
- apr_size_t full_key_len;
-
DEBUG_CACHE_MEMBUFFER_INIT_TAG
- if (++cache->alloc_counter > ALLOCATIONS_PER_POOL_CLEAR)
+ if (key == NULL)
{
- apr_pool_clear(cache->pool);
- cache->alloc_counter = 0;
- }
+ *value_p = NULL;
+ *found = FALSE;
- combine_key(cache->prefix,
- sizeof(cache->prefix),
- key,
- cache->key_len,
- &full_key,
- &full_key_len,
- cache->pool);
+ return SVN_NO_ERROR;
+ }
+ combine_key(cache, key, cache->key_len);
SVN_ERR(membuffer_cache_get_partial(cache->membuffer,
- full_key,
- full_key_len,
+ cache->combined_key,
value_p,
found,
func,
@@ -1695,6 +2125,8 @@ svn_membuffer_cache_get_partial(void **value_p,
return SVN_NO_ERROR;
}
+/* Implement svn_cache__vtable_t.set_partial (not thread-safe)
+ */
static svn_error_t *
svn_membuffer_cache_set_partial(void *cache_void,
const void *key,
@@ -1704,30 +2136,24 @@ svn_membuffer_cache_set_partial(void *cache_void,
{
svn_membuffer_cache_t *cache = cache_void;
- void *full_key;
- apr_size_t full_key_len;
-
DEBUG_CACHE_MEMBUFFER_INIT_TAG
- combine_key(cache->prefix,
- sizeof(cache->prefix),
- key,
- cache->key_len,
- &full_key,
- &full_key_len,
- scratch_pool);
-
- SVN_ERR(membuffer_cache_set_partial(cache->membuffer,
- full_key,
- full_key_len,
- func,
- baton,
- DEBUG_CACHE_MEMBUFFER_TAG
- scratch_pool));
-
+ if (key != NULL)
+ {
+ combine_key(cache, key, cache->key_len);
+ SVN_ERR(membuffer_cache_set_partial(cache->membuffer,
+ cache->combined_key,
+ func,
+ baton,
+ DEBUG_CACHE_MEMBUFFER_TAG
+ scratch_pool));
+ }
return SVN_NO_ERROR;
}
+/* Implement svn_cache__vtable_t.is_cachable
+ * (thread-safe even without mutex)
+ */
static svn_boolean_t
svn_membuffer_cache_is_cachable(void *cache_void, apr_size_t size)
{
@@ -1736,10 +2162,29 @@ svn_membuffer_cache_is_cachable(void *cache_void, apr_size_t size)
* must be small enough to be stored in a 32 bit value.
*/
svn_membuffer_cache_t *cache = cache_void;
- return (size < cache->membuffer->data_size / 4)
- && (size < APR_UINT32_MAX - ITEM_ALIGNMENT);
+ return size <= cache->membuffer->max_entry_size;
}
+/* Add statistics of SEGMENT to INFO.
+ */
+static svn_error_t *
+svn_membuffer_get_segment_info(svn_membuffer_t *segment,
+ svn_cache__info_t *info)
+{
+ info->data_size += segment->data_size;
+ info->used_size += segment->data_used;
+ info->total_size += segment->data_size +
+ segment->group_count * GROUP_SIZE * sizeof(entry_t);
+
+ info->used_entries += segment->used_entries;
+ info->total_entries += segment->group_count * GROUP_SIZE;
+
+ return SVN_NO_ERROR;
+}
+
+/* Implement svn_cache__vtable_t.get_info
+ * (thread-safe even without mutex)
+ */
static svn_error_t *
svn_membuffer_cache_get_info(void *cache_void,
svn_cache__info_t *info,
@@ -1749,11 +2194,11 @@ svn_membuffer_cache_get_info(void *cache_void,
svn_membuffer_cache_t *cache = cache_void;
apr_uint32_t i;
- /* cache frontend specific data */
+ /* cache front-end specific data */
info->id = apr_pstrdup(result_pool, cache->full_prefix);
- /* collect info from shared cache backend */
+ /* collect info from shared cache back-end */
info->data_size = 0;
info->used_size = 0;
@@ -1765,25 +2210,15 @@ svn_membuffer_cache_get_info(void *cache_void,
for (i = 0; i < cache->membuffer->segment_count; ++i)
{
svn_membuffer_t *segment = cache->membuffer + i;
-
- SVN_ERR(lock_cache(segment));
-
- info->data_size += segment->data_size;
- info->used_size += segment->data_used;
- info->total_size += segment->data_size +
- segment->group_count * GROUP_SIZE * sizeof(entry_t);
-
- info->used_entries += segment->used_entries;
- info->total_entries += segment->group_count * GROUP_SIZE;
-
- SVN_ERR(unlock_cache(segment, SVN_NO_ERROR));
+ WITH_READ_LOCK(segment,
+ svn_membuffer_get_segment_info(segment, info));
}
return SVN_NO_ERROR;
}
-/* the v-table for membuffer-based caches
+/* the v-table for membuffer-based caches (single-threaded access)
*/
static svn_cache__vtable_t membuffer_cache_vtable = {
svn_membuffer_cache_get,
@@ -1795,11 +2230,105 @@ static svn_cache__vtable_t membuffer_cache_vtable = {
svn_membuffer_cache_get_info
};
+/* Implement svn_cache__vtable_t.get and serialize all cache access.
+ */
+static svn_error_t *
+svn_membuffer_cache_get_synced(void **value_p,
+ svn_boolean_t *found,
+ void *cache_void,
+ const void *key,
+ apr_pool_t *result_pool)
+{
+ svn_membuffer_cache_t *cache = cache_void;
+ SVN_MUTEX__WITH_LOCK(cache->mutex,
+ svn_membuffer_cache_get(value_p,
+ found,
+ cache_void,
+ key,
+ result_pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* Implement svn_cache__vtable_t.set and serialize all cache access.
+ */
+static svn_error_t *
+svn_membuffer_cache_set_synced(void *cache_void,
+ const void *key,
+ void *value,
+ apr_pool_t *scratch_pool)
+{
+ svn_membuffer_cache_t *cache = cache_void;
+ SVN_MUTEX__WITH_LOCK(cache->mutex,
+ svn_membuffer_cache_set(cache_void,
+ key,
+ value,
+ scratch_pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* Implement svn_cache__vtable_t.get_partial and serialize all cache access.
+ */
+static svn_error_t *
+svn_membuffer_cache_get_partial_synced(void **value_p,
+ svn_boolean_t *found,
+ void *cache_void,
+ const void *key,
+ svn_cache__partial_getter_func_t func,
+ void *baton,
+ apr_pool_t *result_pool)
+{
+ svn_membuffer_cache_t *cache = cache_void;
+ SVN_MUTEX__WITH_LOCK(cache->mutex,
+ svn_membuffer_cache_get_partial(value_p,
+ found,
+ cache_void,
+ key,
+ func,
+ baton,
+ result_pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* Implement svn_cache__vtable_t.set_partial and serialize all cache access.
+ */
+static svn_error_t *
+svn_membuffer_cache_set_partial_synced(void *cache_void,
+ const void *key,
+ svn_cache__partial_setter_func_t func,
+ void *baton,
+ apr_pool_t *scratch_pool)
+{
+ svn_membuffer_cache_t *cache = cache_void;
+ SVN_MUTEX__WITH_LOCK(cache->mutex,
+ svn_membuffer_cache_set_partial(cache_void,
+ key,
+ func,
+ baton,
+ scratch_pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* the v-table for membuffer-based caches with multi-threading support)
+ */
+static svn_cache__vtable_t membuffer_cache_synced_vtable = {
+ svn_membuffer_cache_get_synced,
+ svn_membuffer_cache_set_synced,
+ svn_membuffer_cache_iter, /* no sync required */
+ svn_membuffer_cache_is_cachable, /* no sync required */
+ svn_membuffer_cache_get_partial_synced,
+ svn_membuffer_cache_set_partial_synced,
+ svn_membuffer_cache_get_info /* no sync required */
+};
+
/* standard serialization function for svn_stringbuf_t items.
* Implements svn_cache__serialize_func_t.
*/
static svn_error_t *
-serialize_svn_stringbuf(char **buffer,
+serialize_svn_stringbuf(void **buffer,
apr_size_t *buffer_size,
void *item,
apr_pool_t *result_pool)
@@ -1817,12 +2346,14 @@ serialize_svn_stringbuf(char **buffer,
*/
static svn_error_t *
deserialize_svn_stringbuf(void **item,
- char *buffer,
+ void *buffer,
apr_size_t buffer_size,
apr_pool_t *result_pool)
{
- svn_string_t *value_str = apr_palloc(result_pool, sizeof(svn_string_t));
+ svn_stringbuf_t *value_str = apr_palloc(result_pool, sizeof(svn_stringbuf_t));
+ value_str->pool = result_pool;
+ value_str->blocksize = buffer_size;
value_str->data = buffer;
value_str->len = buffer_size-1;
*item = value_str;
@@ -1839,6 +2370,7 @@ svn_cache__create_membuffer_cache(svn_cache__t **cache_p,
svn_cache__deserialize_func_t deserializer,
apr_ssize_t klen,
const char *prefix,
+ svn_boolean_t thread_safe,
apr_pool_t *pool)
{
svn_checksum_t *checksum;
@@ -1862,6 +2394,8 @@ svn_cache__create_membuffer_cache(svn_cache__t **cache_p,
cache->pool = svn_pool_create(pool);
cache->alloc_counter = 0;
+ SVN_ERR(svn_mutex__init(&cache->mutex, thread_safe, pool));
+
/* for performance reasons, we don't actually store the full prefix but a
* hash value of it
*/
@@ -1882,7 +2416,8 @@ svn_cache__create_membuffer_cache(svn_cache__t **cache_p,
/* initialize the generic cache wrapper
*/
- wrapper->vtable = &membuffer_cache_vtable;
+ wrapper->vtable = thread_safe ? &membuffer_cache_synced_vtable
+ : &membuffer_cache_vtable;
wrapper->cache_internal = cache;
wrapper->error_handler = 0;
wrapper->error_baton = 0;