diff options
author | Ramon Fernandez <ramon@mongodb.com> | 2016-08-26 18:28:48 -0400 |
---|---|---|
committer | Ramon Fernandez <ramon@mongodb.com> | 2016-08-26 18:28:48 -0400 |
commit | f2a613a41d6ad7b5a1b66087e386380d38e50599 (patch) | |
tree | 4843fb7b6a835e72046142046e9364f7d7dda992 /src/third_party/wiredtiger/src/conn/conn_cache_pool.c | |
parent | 7614c0eb2449eb4ec22d21b677177124d61f1888 (diff) | |
download | mongo-f2a613a41d6ad7b5a1b66087e386380d38e50599.tar.gz |
Import wiredtiger: 2566118fc68b0124187e806bed52eb7cdbcb1be0 from branch mongodb-3.4
ref: 34182ad..2566118fc6
for: 3.3.12
WT-2631 nullptr is passed for parameters marked with attribute non-null
WT-2638 ftruncate may not be supported
WT-2645 wt dump: push the complexity of collecting metadata into a dump cursor
WT-2678 The metadata should not imply that an empty value is true
WT-2695 Integrate s390x accelerated crc32c support
WT-2719 add fuzz testing for WiredTiger options and reconfiguration.
WT-2734 Improve documentation of eviction behavior
WT-2766 Don't count eviction of lookaside file pages for the purpose of checking stuck cache
WT-2783 wtperf multi-btree.wtperf dumps core on Mac
WT-2787 Include src/include/wiredtiger_ext.h is problematic
WT-2795 Update documentation around read-only configuration
WT-2807 Switch Jenkins performance tests to tcmalloc
WT-2813 small cache usage stuck even with large cache
WT-2814 Enhance wtperf to support single-op truncate mode
WT-2816 Improve WiredTiger eviction performance
WT-2817 Investigate performance regression in develop, add workload to wtperf/runners
WT-2818 The page visibility check when queuing pages for eviction is overly restrictive
WT-2820 add gcc warn_unused_result attribute
WT-2822 panic mutex and other functions that cannot fail
WT-2823 support file handles without a truncate method
WT-2826 clang38 false positive on uninitialized variable.
WT-2827 checkpoint log_size configuration improvements
WT-2828 Make long wtperf tests reflect mongoDB usage
WT-2829 Switch automated testing to use enable-strict configure option
WT-2832 Python test uses hard-coded temporary directory
WT-2834 Join cursor: discrepancy with bloom filters
WT-2835 WT_CONNECTION.leak-memory can skip memory map and cache cleanup
WT-2838 Don't free session handles on close if leak memory is configured
WT-2839 lint: Ignoring return value of function
WT-2840 clang analysis: garbage values
WT-2841 Jenkins Valgrind runner is reporting errors in test wt2719_reconfig
WT-2843 Fix a bug in recovery if there is no filesystem truncate support
WT-2846 Several bugs related to reconfiguring eviction server at runtime
WT-2847 Merge fair locks into read/write locks.
WT-2850 clang 4.1 attribute warnings when building
WT-2853 Multi threaded reader writer example shows temporary slowdown or lockup
WT-2857 POSIX ftruncate calls should be #ifdef'd HAVE_FTRUNCATE
WT-2862 Fix lint error in test case for forced eviction with multiple cursors
WT-2863 Support UTF-8 paths on Windows
Diffstat (limited to 'src/third_party/wiredtiger/src/conn/conn_cache_pool.c')
-rw-r--r-- | src/third_party/wiredtiger/src/conn/conn_cache_pool.c | 68 |
1 files changed, 32 insertions, 36 deletions
diff --git a/src/third_party/wiredtiger/src/conn/conn_cache_pool.c b/src/third_party/wiredtiger/src/conn/conn_cache_pool.c index 75ecb6b3b4a..15517f37b6a 100644 --- a/src/third_party/wiredtiger/src/conn/conn_cache_pool.c +++ b/src/third_party/wiredtiger/src/conn/conn_cache_pool.c @@ -108,8 +108,8 @@ __wt_cache_pool_config(WT_SESSION_IMPL *session, const char **cfg) "cache pool server", false, &cp->cache_pool_cond)); __wt_process.cache_pool = cp; - WT_ERR(__wt_verbose(session, - WT_VERB_SHARED_CACHE, "Created cache pool %s", cp->name)); + __wt_verbose(session, + WT_VERB_SHARED_CACHE, "Created cache pool %s", cp->name); } else if (!updating && strcmp(__wt_process.cache_pool->name, pool_name) != 0) /* Only a single cache pool is supported. */ @@ -212,12 +212,12 @@ __wt_cache_pool_config(WT_SESSION_IMPL *session, const char **cfg) /* Wake up the cache pool server so any changes are noticed. */ if (updating) - WT_ERR(__wt_cond_signal( - session, __wt_process.cache_pool->cache_pool_cond)); + __wt_cond_signal( + session, __wt_process.cache_pool->cache_pool_cond); - WT_ERR(__wt_verbose(session, WT_VERB_SHARED_CACHE, + __wt_verbose(session, WT_VERB_SHARED_CACHE, "Configured cache pool %s. Size: %" PRIu64 - ", chunk size: %" PRIu64, cp->name, cp->size, cp->chunk)); + ", chunk size: %" PRIu64, cp->name, cp->size, cp->chunk); F_SET(conn, WT_CONN_CACHE_POOL); err: __wt_spin_unlock(session, &__wt_process.spinlock); @@ -267,8 +267,8 @@ __wt_conn_cache_pool_open(WT_SESSION_IMPL *session) TAILQ_INSERT_TAIL(&cp->cache_pool_qh, conn, cpq); __wt_spin_unlock(session, &cp->cache_pool_lock); - WT_RET(__wt_verbose(session, WT_VERB_SHARED_CACHE, - "Added %s to cache pool %s", conn->home, cp->name)); + __wt_verbose(session, WT_VERB_SHARED_CACHE, + "Added %s to cache pool %s", conn->home, cp->name); /* * Each connection participating in the cache pool starts a manager @@ -282,7 +282,7 @@ __wt_conn_cache_pool_open(WT_SESSION_IMPL *session) __wt_cache_pool_server, cache->cp_session)); /* Wake up the cache pool server to get our initial chunk. */ - WT_RET(__wt_cond_signal(session, cp->cache_pool_cond)); + __wt_cond_signal(session, cp->cache_pool_cond); return (0); } @@ -324,8 +324,8 @@ __wt_conn_cache_pool_destroy(WT_SESSION_IMPL *session) * queue. We did increment the reference count, so proceed regardless. */ if (found) { - WT_TRET(__wt_verbose(session, WT_VERB_SHARED_CACHE, - "Removing %s from cache pool", entry->home)); + __wt_verbose(session, WT_VERB_SHARED_CACHE, + "Removing %s from cache pool", entry->home); TAILQ_REMOVE(&cp->cache_pool_qh, entry, cpq); /* Give the connection's resources back to the pool. */ @@ -341,7 +341,7 @@ __wt_conn_cache_pool_destroy(WT_SESSION_IMPL *session) cp_locked = false; F_CLR(cache, WT_CACHE_POOL_RUN); - WT_TRET(__wt_cond_signal(session, cp->cache_pool_cond)); + __wt_cond_signal(session, cp->cache_pool_cond); WT_TRET(__wt_thread_join(session, cache->cp_tid)); wt_session = &cache->cp_session->iface; @@ -372,8 +372,8 @@ __wt_conn_cache_pool_destroy(WT_SESSION_IMPL *session) } if (!F_ISSET(cp, WT_CACHE_POOL_ACTIVE)) { - WT_TRET(__wt_verbose( - session, WT_VERB_SHARED_CACHE, "Destroying cache pool")); + __wt_verbose( + session, WT_VERB_SHARED_CACHE, "Destroying cache pool"); __wt_spin_lock(session, &__wt_process.spinlock); /* * We have been holding the pool lock - no connections could @@ -401,8 +401,8 @@ __wt_conn_cache_pool_destroy(WT_SESSION_IMPL *session) /* Notify other participants if we were managing */ if (F_ISSET(cache, WT_CACHE_POOL_MANAGER)) { cp->pool_managed = 0; - WT_TRET(__wt_verbose(session, WT_VERB_SHARED_CACHE, - "Shutting down shared cache manager connection")); + __wt_verbose(session, WT_VERB_SHARED_CACHE, + "Shutting down shared cache manager connection"); } } @@ -538,14 +538,14 @@ __cache_pool_assess(WT_SESSION_IMPL *session, uint64_t *phighest) if (cache->cp_pass_pressure > highest) highest = cache->cp_pass_pressure; - WT_RET(__wt_verbose(session, WT_VERB_SHARED_CACHE, + __wt_verbose(session, WT_VERB_SHARED_CACHE, "Assess entry. reads: %" PRIu64 ", app evicts: %" PRIu64 ", app waits: %" PRIu64 ", pressure: %" PRIu64, - reads, app_evicts, app_waits, cache->cp_pass_pressure)); + reads, app_evicts, app_waits, cache->cp_pass_pressure); } - WT_RET(__wt_verbose(session, WT_VERB_SHARED_CACHE, + __wt_verbose(session, WT_VERB_SHARED_CACHE, "Highest eviction count: %" PRIu64 ", entries: %" PRIu64, - highest, entries)); + highest, entries); *phighest = highest; return (0); @@ -577,10 +577,10 @@ __cache_pool_adjust(WT_SESSION_IMPL *session, highest_percentile = (highest / 100) + 1; if (WT_VERBOSE_ISSET(session, WT_VERB_SHARED_CACHE)) { - WT_RET(__wt_verbose(session, - WT_VERB_SHARED_CACHE, "Cache pool distribution: ")); - WT_RET(__wt_verbose(session, WT_VERB_SHARED_CACHE, - "\t" "cache (MB), pressure, skips, busy, %% full:")); + __wt_verbose(session, + WT_VERB_SHARED_CACHE, "Cache pool distribution: "); + __wt_verbose(session, WT_VERB_SHARED_CACHE, + "\t" "cache (MB), pressure, skips, busy, %% full:"); } for (entry = forward ? TAILQ_FIRST(&cp->cache_pool_qh) : @@ -602,10 +602,10 @@ __cache_pool_adjust(WT_SESSION_IMPL *session, pressure = cache->cp_pass_pressure / highest_percentile; busy = __wt_eviction_needed(entry->default_session, &pct_full); - WT_RET(__wt_verbose(session, WT_VERB_SHARED_CACHE, + __wt_verbose(session, WT_VERB_SHARED_CACHE, "\t%5" PRIu64 ", %3" PRIu64 ", %2" PRIu32 ", %d, %2u", entry->cache_size >> 20, pressure, cache->cp_skip_count, - busy, pct_full)); + busy, pct_full); /* Allow to stabilize after changes. */ if (cache->cp_skip_count > 0 && --cache->cp_skip_count > 0) @@ -699,9 +699,9 @@ __cache_pool_adjust(WT_SESSION_IMPL *session, entry->cache_size -= adjustment; cp->currently_used -= adjustment; } - WT_RET(__wt_verbose(session, WT_VERB_SHARED_CACHE, + __wt_verbose(session, WT_VERB_SHARED_CACHE, "Allocated %s%" PRId64 " to %s", - grow ? "" : "-", adjustment, entry->home)); + grow ? "" : "-", adjustment, entry->home); /* * TODO: Add a loop waiting for connection to give up @@ -721,7 +721,6 @@ __wt_cache_pool_server(void *arg) { WT_CACHE *cache; WT_CACHE_POOL *cp; - WT_DECL_RET; WT_SESSION_IMPL *session; bool forward; @@ -734,8 +733,8 @@ __wt_cache_pool_server(void *arg) while (F_ISSET(cp, WT_CACHE_POOL_ACTIVE) && F_ISSET(cache, WT_CACHE_POOL_RUN)) { if (cp->currently_used <= cp->size) - WT_ERR(__wt_cond_wait(session, - cp->cache_pool_cond, WT_MILLION)); + __wt_cond_wait( + session, cp->cache_pool_cond, WT_MILLION); /* * Re-check pool run flag - since we want to avoid getting the @@ -748,8 +747,8 @@ __wt_cache_pool_server(void *arg) /* Try to become the managing thread */ if (__wt_atomic_cas8(&cp->pool_managed, 0, 1)) { F_SET(cache, WT_CACHE_POOL_MANAGER); - WT_ERR(__wt_verbose(session, WT_VERB_SHARED_CACHE, - "Cache pool switched manager thread")); + __wt_verbose(session, WT_VERB_SHARED_CACHE, + "Cache pool switched manager thread"); } /* @@ -762,8 +761,5 @@ __wt_cache_pool_server(void *arg) } } - if (0) { -err: WT_PANIC_MSG(session, ret, "cache pool manager server error"); - } return (WT_THREAD_RET_VALUE); } |