summaryrefslogtreecommitdiff
path: root/sql/threadpool_unix.cc
diff options
context:
space:
mode:
authorSergey Vojtovich <svoj@mariadb.org>2013-11-05 09:18:59 +0400
committerSergey Vojtovich <svoj@mariadb.org>2013-11-05 09:18:59 +0400
commitfd9f1638ea7401e9bc72653882fded4c0dee15d8 (patch)
tree930887a8e4febd36cc7a2539a0c225376d0924d8 /sql/threadpool_unix.cc
parent52dea41052bc1be1efbe59c320046d7e62cfd0a6 (diff)
downloadmariadb-git-fd9f1638ea7401e9bc72653882fded4c0dee15d8.tar.gz
MDEV-5205 - MariaDB does not start if more than 128 cpu's are available
- thread_pool_size command line option upper limit increased to 100 000 (same as for max_connections) - thread_pool_size system variable upper limit is maximum of 128 or the value given at command line - thread groups are now allocated dynamically Different limit for command line option and system variable was done to avoid additional mutex for all_groups and threadpool_max_size.
Diffstat (limited to 'sql/threadpool_unix.cc')
-rw-r--r--sql/threadpool_unix.cc22
1 files changed, 15 insertions, 7 deletions
diff --git a/sql/threadpool_unix.cc b/sql/threadpool_unix.cc
index dc2d8d999ef..717fb468769 100644
--- a/sql/threadpool_unix.cc
+++ b/sql/threadpool_unix.cc
@@ -147,7 +147,7 @@ struct thread_group_t
} MY_ALIGNED(512);
-static thread_group_t all_groups[MAX_THREAD_GROUPS];
+static thread_group_t *all_groups;
static uint group_count;
/**
@@ -517,7 +517,7 @@ static void* timer_thread(void *param)
timer->current_microtime= microsecond_interval_timer();
/* Check stalls in thread groups */
- for(i=0; i< array_elements(all_groups);i++)
+ for (i= 0; i < threadpool_max_size; i++)
{
if(all_groups[i].connection_count)
check_stall(&all_groups[i]);
@@ -907,6 +907,7 @@ int thread_group_init(thread_group_t *thread_group, pthread_attr_t* thread_attr)
thread_group->pollfd= -1;
thread_group->shutdown_pipe[0]= -1;
thread_group->shutdown_pipe[1]= -1;
+ thread_group->queue.empty();
DBUG_RETURN(0);
}
@@ -1510,10 +1511,18 @@ static void *worker_main(void *param)
bool tp_init()
{
DBUG_ENTER("tp_init");
+ threadpool_max_size= max(threadpool_size, 128);
+ all_groups= (thread_group_t *)
+ my_malloc(sizeof(thread_group_t) * threadpool_max_size, MYF(MY_WME|MY_ZEROFILL));
+ if (!all_groups)
+ {
+ threadpool_max_size= 0;
+ DBUG_RETURN(1);
+ }
threadpool_started= true;
scheduler_init();
- for(uint i=0; i < array_elements(all_groups); i++)
+ for (uint i= 0; i < threadpool_max_size; i++)
{
thread_group_init(&all_groups[i], get_connection_attrib());
}
@@ -1542,10 +1551,11 @@ void tp_end()
DBUG_VOID_RETURN;
stop_timer(&pool_timer);
- for(uint i=0; i< array_elements(all_groups); i++)
+ for (uint i= 0; i < threadpool_max_size; i++)
{
thread_group_close(&all_groups[i]);
}
+ my_free(all_groups);
threadpool_started= false;
DBUG_VOID_RETURN;
}
@@ -1604,9 +1614,7 @@ void tp_set_threadpool_stall_limit(uint limit)
int tp_get_idle_thread_count()
{
int sum=0;
- for(uint i= 0;
- i< array_elements(all_groups) && (all_groups[i].pollfd >= 0);
- i++)
+ for (uint i= 0; i < threadpool_max_size && all_groups[i].pollfd >= 0; i++)
{
sum+= (all_groups[i].thread_count - all_groups[i].active_thread_count);
}