summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xBuild-tools/Do-compile14
-rw-r--r--Docs/manual.texi52
-rw-r--r--client/mysqlshow.c171
-rw-r--r--mysql-test/r/create.result4
-rw-r--r--mysql-test/t/create.test12
-rw-r--r--scripts/make_binary_distribution.sh55
-rw-r--r--sql-bench/Comments/postgres.benchmark1
-rw-r--r--sql-bench/server-cfg.sh2
8 files changed, 251 insertions, 60 deletions
diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile
index 3ef9ba614f9..a82d045f880 100755
--- a/Build-tools/Do-compile
+++ b/Build-tools/Do-compile
@@ -4,10 +4,10 @@ use Getopt::Long;
$opt_distribution=$opt_user=$opt_result=$opt_config_options=$opt_config_env="";
$opt_dbd_options=$opt_perl_options=$opt_suffix="";
$opt_tmp=$version_suffix="";
-$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=0;
+$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=$opt_no_strip=0;
$opt_innodb=$opt_bdb=0;
-GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution") || usage();
+GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution","no-strip") || usage();
usage() if ($opt_help || $opt_Information);
usage() if (!$opt_distribution);
@@ -19,7 +19,7 @@ if ($opt_innodb || $opt_bdb)
chomp($host=`hostname`);
$full_host_name=$host;
-print "$host: Compiling MySQL$version_suffix at $host$suffix, stage: $opt_stage\n" if ($opt_debug);
+info("Compiling MySQL$version_suffix at $host$suffix, stage: $opt_stage\n");
$connect_option= ($opt_tcpip ? "--host=$host" : "");
$host =~ /^([^.-]*)/;
$host=$1 . $opt_suffix;
@@ -146,10 +146,13 @@ if ($opt_stage <= 2)
#
if ($opt_stage <= 3)
{
+ my ($flags);
log_system("rm -fr mysql-3* mysql-4* $pwd/$host/*.tar.gz");
log_system("nm -n sql/mysqld | gzip -9 -v 2>&1 > sql/mysqld.sym.gz | cat");
- log_system("strip sql/mysqld extra/comp_err client/mysql sql/mysqld client/mysqlshow extra/replace isam/isamchk client/mysqladmin client/mysqldump extra/perror");
- check_system("scripts/make_binary_distribution $opt_tmp $opt_suffix",".tar.gz created");
+
+ $flags="";
+ $flags.="--no-strip" if ($opt_no_strip);
+ check_system("scripts/make_binary_distribution --tmp=$opt_tmp --suffix=$opt_suffix $flags",".tar.gz created");
safe_system("mv mysql*.tar.gz $pwd/$host");
safe_system("cp client/mysqladmin $pwd/$host/bin");
safe_system("$make clean") if ($opt_with_small_disk);
@@ -174,6 +177,7 @@ if ($opt_stage <= 4 && !$opt_no_test)
$tar_file =~ /(mysql-[^\/]*)\.tar/;
$ver=$1;
$test_dir="$pwd/$host/test/$ver";
+$ENV{"LD_LIBRARY_PATH"}= "$testdir/lib:" . $ENV{"LD_LIBRARY_PATH"};
if ($opt_stage <= 5 && !$opt_no_test && !$opt_no_mysqltest)
{
diff --git a/Docs/manual.texi b/Docs/manual.texi
index 078c09aed12..eba352092a1 100644
--- a/Docs/manual.texi
+++ b/Docs/manual.texi
@@ -7472,6 +7472,9 @@ Configure @strong{MySQL} with the @code{--with-named-z-libs=no} option.
@node Solaris x86, SunOS, Solaris 2.7, Source install system issues
@subsection Solaris x86 Notes
+On Solaris 2.8 on x86, @strong{mysqld} will core dump if you run
+'strip' in.
+
If you are using @code{gcc} or @code{egcs} on Solaris x86 and you
experience problems with core dumps under load, you should use the
following @code{configure} command:
@@ -7530,6 +7533,11 @@ Linux version that doesn't have @code{glibc2}, you must install
LinuxThreads before trying to compile @strong{MySQL}. You can get
LinuxThreads at @uref{http://www.mysql.com/Downloads/Linux}.
+@strong{NOTE:} We have seen some strange problems with Linux 2.2.14 and
+@strong{MySQL} on SMP systems; If you have a SMP system, we recommend
+you to upgrade to Linux 2.4 ASAP! Your system will be faster and more
+stable by doing this!
+
Note that @code{glibc} versions before and including Version 2.1.1 have
a fatal bug in @code{pthread_mutex_timedwait} handling, which is used
when you do @code{INSERT DELAYED}. We recommend you to not use
@@ -43627,15 +43635,15 @@ application. If you need speed, @strong{MySQL} is probably your best
choice. If you need some of the extra features that only @code{PostgreSQL}
can offer, you should use @code{PostgreSQL}.
-@cindex PostgreSQL/MySQL, goals
+@cindex PostgreSQL/MySQL, strategies
@menu
-* MySQL-PostgreSQL goals:: MySQL and PostgreSQL development goals
+* MySQL-PostgreSQL goals:: MySQL and PostgreSQL development strategies
* MySQL-PostgreSQL features:: Featurevise Comparison of MySQL and PostgreSQL
* MySQL-PostgreSQL benchmarks:: Benchmarking MySQL and PostgreSQL
@end menu
@node MySQL-PostgreSQL goals, MySQL-PostgreSQL features, Compare PostgreSQL, Compare PostgreSQL
-@subsection MySQL and PostgreSQL development goals
+@subsection MySQL and PostgreSQL development strategies
When adding things to MySQL we take pride to do an optimal, definite
solution. The code should be so good that we shouldn't have any need to
@@ -43718,7 +43726,8 @@ you never have to run any cleanups on @code{MySQL}. PostgreSQL doesn't
yet support 24/7 systems because you have have to run @code{vacuum()}
once in a while to reclaim space from @code{UPDATE} and @code{DELETE}
commands and to perform statistics analyzes that are critical to get
-good performance with PostgreSQL. On a busy system with lots of changes
+good performance with PostgreSQL. Vacuum is also needed after adding
+a lot of new rows to a table. On a busy system with lots of changes
vacuum must be run very frequently, in the worst cases even many times a
day. During the @code{vacuum()} run, which may take hours if the
database is big, the database is from a production standpoint
@@ -43809,7 +43818,7 @@ Tools to repair and optimize @strong{MyISAM} tables (the most common
physical corruption of a data file happens, usually from a hardware
failure. It allows a majority of the data to be recovered.
@item
-Upgrading @strong{MySQL} is painless. When you upgrading @strong{MySQL},
+Upgrading @strong{MySQL} is painless. When you are upgrading @strong{MySQL},
you don't need to dump/restore your data, as you have to do with most
PostgreSQL upgrades.
@end itemize
@@ -43907,7 +43916,7 @@ We have many times asked the PostgreSQL developers and some PostgreSQL
users to help us extend this benchmark to make the definitive benchmark
for databases, but unfortunately we haven't got any feedback for this.
-We, the @strong{MySQL} developers, has because of this spent a lot of
+We, the @strong{MySQL} developers, have because of this spent a lot of
hours to get maximum performance from PostgreSQL for the benchmarks, but
because we don't know PostgreSQL intimately we are sure that there are
things that we have missed. We have on the benchmark page documented
@@ -44002,8 +44011,8 @@ optimized indexes boost performance by some margin". Our benchmarks
clearly indicates that the difference in running a lot of selects on a
database with and without vacuum() can easily differ by a factor of 10.
@item
-The test results where also strange; The ASPAP3 test benchmark
-documentation mentions that the test does:
+The test results where also strange; The AS3AP test documentation
+mentions that the test does:
"selections, simple joins, projections, aggregates, one-tuple updates,
and bulk updates"
@@ -44031,13 +44040,13 @@ be regarded as fair play. They should have done two tests with and
without ODBC to provide the right facts (after having got experts to tune
all involved databases of course).
@item
-They refer to the TCP-C tests, but doesn't anywhere mention that the
-tests they did where not a true TCP-C test and they where not even
-allowed to call it a TCP-C test. A TCP-C test can only be conducted by
-the rules approved by the @uref{http://www.tpc.org,TCP-council}. Great
-Bridge didn't do that. By doing this they have both violated the TCP
+They refer to the TPC-C tests, but doesn't anywhere mention that the
+tests they did where not a true TPC-C test and they where not even
+allowed to call it a TPC-C test. A TPC-C test can only be conducted by
+the rules approved by the @uref{http://www.tpc.org,TPC-council}. Great
+Bridge didn't do that. By doing this they have both violated the TPC
trademark and miscredited their own benchmarks. The rules set by the
-TCP-council are very strict to ensure that no one can produce false
+TPC-council are very strict to ensure that no one can produce false
results or make unprovable statements. Apparently Great Bridge wasn't
interested in doing this.
@item
@@ -44054,7 +44063,7 @@ standard binary (used by 80% of our users), which was statically linked
with a fixed glibc library.
According to what we know, Great Bridge did nothing to ensure that the
-other databases was setup correctly to run good in their test
+other databases where setup correctly to run good in their test
environment. We are sure however that they didn't contact Oracle or
Microsoft to ask for their advice in this matter ;)
@item
@@ -44095,7 +44104,7 @@ The only benchmarks that exist today that anyone can download and run
against @strong{MySQL}and PostgreSQL is the MySQL benchmarks. We here
at @strong{MySQL} believe that open source databases should be tested
with open source tools! This is the only way to ensure that no one
-does tests that none can reproduce and use this to claim that a
+does tests that nobody can reproduce and use this to claim that a
database is better than another. Without knowing all the facts it's
impossible to answer the claims of the tester.
@@ -44110,8 +44119,9 @@ going!
For more information about our benchmarks suite see @xref{MySQL
Benchmarks}.
-We are working on even better benchmarks including much better
-documentation (the current is lacking).
+We are working on an even better benchmark suite, including much better
+documentation of what the individual tests really do and how to add more
+tests to the suite.
@cindex internals
@cindex threads
@@ -46347,8 +46357,10 @@ not yet 100% confident in this code.
@appendixsubsec Changes in release 3.23.39
@itemize @bullet
@item
-If one dropped and added an @code{auto_increment} column, the
-@code{auto_increment} value wasn't reset.
+If one dropped and added an @code{AUTO_INCREMENT} column, the
+@code{AUTO_INCREMENT} sequence wasn't reset.
+@item
+@code{CREATE .. SELECT} now creates not unique indexes delayed.
@item
Fixed problem where @code{LOCK TABLES table_name READ} followed by
@code{FLUSH TABLES} put a exclusive lock on the table.
diff --git a/client/mysqlshow.c b/client/mysqlshow.c
index 8fffe02a52f..199318abc2f 100644
--- a/client/mysqlshow.c
+++ b/client/mysqlshow.c
@@ -16,7 +16,7 @@
/* Show databases, tables or columns */
-#define SHOW_VERSION "8.2"
+#define SHOW_VERSION "8.3"
#include <global.h>
#include <my_sys.h>
@@ -30,6 +30,7 @@
static my_string host=0,opt_password=0,user=0;
static my_bool opt_show_keys=0,opt_compress=0,opt_status=0;
+static uint opt_verbose=0;
static void get_options(int *argc,char ***argv);
static uint opt_mysql_port=0;
@@ -140,6 +141,7 @@ static struct option long_options[] =
#ifndef DONT_ALLOW_USER_CHANGE
{"user", required_argument, 0, 'u'},
#endif
+ {"verbose", no_argument, 0, 'v'},
{"version", no_argument, 0, 'V'},
{0, 0, 0, 0}
};
@@ -181,6 +183,8 @@ static void usage(void)
-u, --user=# user for login if not current user\n");
#endif
printf("\
+ -v, --verbose more verbose output; You can use this multiple times\n\
+ to get even more verbose output.\n\
-V, --version output version information and exit\n");
puts("\n\
@@ -200,7 +204,7 @@ get_options(int *argc,char ***argv)
int c,option_index;
my_bool tty_password=0;
- while ((c=getopt_long(*argc,*argv,"c:h:p::u:#::P:S:Ck?VWi",long_options,
+ while ((c=getopt_long(*argc,*argv,"c:h:p::u:#::P:S:Ck?vVWi",long_options,
&option_index)) != EOF)
{
switch(c) {
@@ -210,6 +214,9 @@ get_options(int *argc,char ***argv)
case 'c':
charsets_dir= optarg;
break;
+ case 'v':
+ opt_verbose++;
+ break;
case 'h':
host = optarg;
break;
@@ -277,10 +284,13 @@ static int
list_dbs(MYSQL *mysql,const char *wild)
{
const char *header;
- uint length;
+ uint length, counter = 0;
+ ulong rowcount = 0L;
+ char tables[NAME_LEN+1], rows[NAME_LEN+1];
+ char query[255];
MYSQL_FIELD *field;
MYSQL_RES *result;
- MYSQL_ROW row;
+ MYSQL_ROW row, trow, rrow;
if (!(result=mysql_list_dbs(mysql,wild)))
{
@@ -297,10 +307,79 @@ list_dbs(MYSQL *mysql,const char *wild)
if (length < field->max_length)
length=field->max_length;
- print_header(header,length,NullS);
+ if (!opt_verbose)
+ print_header(header,length,NullS);
+ else if (opt_verbose == 1)
+ print_header(header,length,"Tables",6,NullS);
+ else
+ print_header(header,length,"Tables",6,"Total Rows",12,NullS);
+
while ((row = mysql_fetch_row(result)))
- print_row(row[0],length,0);
- print_trailer(length,0);
+ {
+ counter++;
+
+ if (opt_verbose)
+ {
+ /*
+ * Original code by MG16373; Slightly modified by Monty.
+ * Print now the count of tables and rows for each database.
+ */
+
+ if (!(mysql_select_db(mysql,row[0])))
+ {
+ MYSQL_RES *tresult = mysql_list_tables(mysql,(char*)NULL);
+ if (mysql_affected_rows(mysql) > 0)
+ {
+ sprintf(tables,"%6lu",(ulong) mysql_affected_rows(mysql));
+ rowcount = 0;
+ if (opt_verbose > 1)
+ {
+ while ((trow = mysql_fetch_row(tresult)))
+ {
+ sprintf(query,"SELECT COUNT(*) FROM `%s`",trow[0]);
+ if (!(mysql_query(mysql,query)))
+ {
+ MYSQL_RES *rresult;
+ if ((rresult = mysql_store_result(mysql)))
+ {
+ rrow = mysql_fetch_row(rresult);
+ rowcount += (ulong) strtoull(rrow[0], (char**) 0, 10);
+ mysql_free_result(rresult);
+ }
+ }
+ }
+ sprintf(rows,"%12lu",rowcount);
+ }
+ }
+ else
+ {
+ sprintf(tables,"%6d",0);
+ sprintf(rows,"%12d",0);
+ }
+ mysql_free_result(tresult);
+ }
+ else
+ {
+ strmov(tables,"N/A");
+ strmov(rows,"N/A");
+ }
+ }
+
+ if (!opt_verbose)
+ print_row(row[0],length,0);
+ else if (opt_verbose == 1)
+ print_row(row[0],length,tables,6,NullS);
+ else
+ print_row(row[0],length,tables,6,rows,12,NullS);
+ }
+
+ print_trailer(length,
+ (opt_verbose > 0 ? 6 : 0),
+ (opt_verbose > 1 ? 12 :0),
+ 0);
+
+ if (counter && opt_verbose)
+ printf("%u row%s in set.\n",counter,(counter > 1) ? "s" : "");
mysql_free_result(result);
return 0;
}
@@ -310,10 +389,11 @@ static int
list_tables(MYSQL *mysql,const char *db,const char *table)
{
const char *header;
- uint head_length;
+ uint head_length, counter = 0;
+ char query[255], rows[64], fields[16];
MYSQL_FIELD *field;
MYSQL_RES *result;
- MYSQL_ROW row;
+ MYSQL_ROW row, rrow;
if (mysql_select_db(mysql,db))
{
@@ -338,14 +418,81 @@ list_tables(MYSQL *mysql,const char *db,const char *table)
if (head_length < field->max_length)
head_length=field->max_length;
- print_header(header,head_length,NullS);
+ if (!opt_verbose)
+ print_header(header,head_length,NullS);
+ else if (opt_verbose == 1)
+ print_header(header,head_length,"Columns",8,NullS);
+ else
+ print_header(header,head_length,"Columns",8, "Total Rows",10,NullS);
+
while ((row = mysql_fetch_row(result)))
- print_row(row[0],head_length,0);
- print_trailer(head_length,0);
+ {
+ /*
+ * Modified by MG16373
+ * Print now the count of rows for each table.
+ */
+ counter++;
+ if (opt_verbose > 0)
+ {
+ if (!(mysql_select_db(mysql,db)))
+ {
+ MYSQL_RES *rresult = mysql_list_fields(mysql,row[0],NULL);
+ ulong rowcount=0L;
+ if (!rresult)
+ {
+ strmov(fields,"N/A");
+ strmov(rows,"N/A");
+ }
+ else
+ {
+ sprintf(fields,"%8u",(uint) mysql_num_fields(rresult));
+ mysql_free_result(rresult);
+
+ if (opt_verbose > 1)
+ {
+ sprintf(query,"SELECT COUNT(*) FROM `%s`",row[0]);
+ if (!(mysql_query(mysql,query)))
+ {
+ if ((rresult = mysql_store_result(mysql)))
+ {
+ rrow = mysql_fetch_row(rresult);
+ rowcount += (unsigned long) strtoull(rrow[0], (char**) 0, 10);
+ mysql_free_result(rresult);
+ }
+ sprintf(rows,"%10lu",rowcount);
+ }
+ else
+ sprintf(rows,"%10d",0);
+ }
+ }
+ }
+ else
+ {
+ strmov(fields,"N/A");
+ strmov(rows,"N/A");
+ }
+ }
+ if (!opt_verbose)
+ print_row(row[0],head_length,NullS);
+ else if (opt_verbose == 1)
+ print_row(row[0],head_length, fields,8, NullS);
+ else
+ print_row(row[0],head_length, fields,8, rows,10, NullS);
+ }
+
+ print_trailer(head_length,
+ (opt_verbose > 0 ? 8 : 0),
+ (opt_verbose > 1 ? 10 :0),
+ 0);
+
+ if (counter && opt_verbose)
+ printf("%u row%s in set.\n\n",counter,(counter > 1) ? "s" : "");
+
mysql_free_result(result);
return 0;
}
+
static int
list_table_status(MYSQL *mysql,const char *db,const char *wild)
{
diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result
index f32c9b0bc80..7940d51868a 100644
--- a/mysql-test/r/create.result
+++ b/mysql-test/r/create.result
@@ -8,3 +8,7 @@ b
1 10000000001
a$1 $b c$
1 2 3
+table type possible_keys key key_len ref rows Extra
+t2 ref B B 21 const 1 where used
+a B
+3 world
diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test
index a5224cd0318..d45d013c9fb 100644
--- a/mysql-test/t/create.test
+++ b/mysql-test/t/create.test
@@ -2,6 +2,7 @@
# Check some special create statements.
#
+drop table if exists t1,t2;
create table t1 (b char(0));
insert into t1 values (""),(null);
select * from t1;
@@ -57,3 +58,14 @@ select a$1, $b, c$ from test_$1.$test1;
create table test_$1.test2$ (a int);
drop table test_$1.test2$;
drop database test_$1;
+
+#
+# Test of CREATE ... SELECT with indexes
+#
+
+create table t1 (a int auto_increment not null primary key, B CHAR(20));
+insert into t1 (b) values ("hello"),("my"),("world");
+create table t2 (key (b)) select * from t1;
+explain select * from t2 where b="world";
+select * from t2 where b="world";
+drop table t1,t2;
diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh
index 84dac59018b..40c48188346 100644
--- a/scripts/make_binary_distribution.sh
+++ b/scripts/make_binary_distribution.sh
@@ -11,30 +11,29 @@ export machine system version
SOURCE=`pwd`
CP="cp -p"
-# Debug option must come first
+STRIP=1
DEBUG=0
-if test x$1 = x"--debug"
-then
- DEBUG=1
- shift 1
-fi
-
-# Save temporary distribution here (must be full path)
+SILENT=0
TMP=/tmp
-if test $# -gt 0
-then
- TMP=$1
- shift 1
-fi
-
-# Get optional suffix for distribution
SUFFIX=""
-if test $# -gt 0
-then
- SUFFIX=$1
- shift 1
-fi
+parse_arguments() {
+ for arg do
+ case "$arg" in
+ --debug) DEBUG=1;;
+ --tmp=*) TMP=`echo "$arg" | sed -e "s;--tmp=;;"` ;;
+ --suffix=*) SUFFIX=`echo "$arg" | sed -e "s;--suffix=;;"` ;;
+ --no-strip) STRIP=0 ;;
+ --silent) SILENT=1 ;;
+ *)
+ echo "Unknown argument '$arg'"
+ exit 1
+ ;;
+ esac
+ done
+}
+
+parse_arguments "$@"
#make
@@ -68,14 +67,18 @@ for i in extra/comp_err extra/replace extra/perror extra/resolveip \
client/mysql sql/mysqld client/mysqlshow client/mysqlcheck \
client/mysqladmin client/mysqldump client/mysqlimport client/mysqltest \
client/.libs/mysql client/.libs/mysqlshow client/.libs/mysqladmin \
- client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest
+ client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest \
+ client/.libs/mysqlcheck
do
if [ -f $i ]
then
$CP $i $BASE/bin
fi
done
-strip $BASE/bin/*
+
+if [ x$STRIP = x1 ] ; then
+ strip $BASE/bin/*
+fi
for i in sql/mysqld.sym.gz
do
@@ -190,7 +193,13 @@ fi
echo "Using $tar to create archive"
cd $TMP
-$tar cvf $SOURCE/$NEW_NAME.tar $NEW_NAME
+
+OPT=cvf
+if [ x$SILENT = x1 ] ; then
+ OPT=cf
+fi
+
+$tar $OPT $SOURCE/$NEW_NAME.tar $NEW_NAME
cd $SOURCE
echo "Compressing archive"
gzip -9 $NEW_NAME.tar
diff --git a/sql-bench/Comments/postgres.benchmark b/sql-bench/Comments/postgres.benchmark
index 6fadf20755e..b25a9931f9e 100644
--- a/sql-bench/Comments/postgres.benchmark
+++ b/sql-bench/Comments/postgres.benchmark
@@ -84,6 +84,7 @@ run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --u
# the database between each major update of the tables:
# vacuum table
# or
+# vacuum analyze
# vacuum
# The time for vacuum() is accounted for in the book-keeping() column, not
diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh
index 86b891d8856..a8d992bfdce 100644
--- a/sql-bench/server-cfg.sh
+++ b/sql-bench/server-cfg.sh
@@ -811,6 +811,7 @@ sub vacuum
{
foreach $table (@tables)
{
+ $dbh->do("vacuum analyze $table") || die "Got error: $DBI::errstr when executing 'vacuum analyze $table'\n";
$dbh->do("vacuum $table") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
}
}
@@ -818,6 +819,7 @@ sub vacuum
{
# $dbh->do("vacuum pg_attributes") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
# $dbh->do("vacuum pg_index") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
+ $dbh->do("vacuum analyze") || die "Got error: $DBI::errstr when executing 'vacuum analyze'\n";
$dbh->do("vacuum") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
}
$end_time=new Benchmark;