diff options
author | unknown <monty@hundin.mysql.fi> | 2001-06-03 12:26:24 +0300 |
---|---|---|
committer | unknown <monty@hundin.mysql.fi> | 2001-06-03 12:26:24 +0300 |
commit | cf42a95562695b0596bdbbcc941e86f7eab6682c (patch) | |
tree | a8649f73d8dc4f4b298871c75089fd3e584adb6e | |
parent | 1b4d4338d4b6697cb9958d4bc4903a395724b0e0 (diff) | |
download | mariadb-git-cf42a95562695b0596bdbbcc941e86f7eab6682c.tar.gz |
Added ABS() to make tests more portable.
New postgresql crash-me file.
Increased blob size in benchmarks from 65K to 1M.
mysql-test/t/select.test:
Added ABS() to make tests more portable
mysys/tree.c:
Added missing call to tree->free (MySQL didn't use this)
sql-bench/Comments/postgres.benchmark:
Updated documentation
sql-bench/bench-init.pl.sh:
Updated version number (changed blob size)
sql-bench/limits/pg.cfg:
New postgres results
sql-bench/server-cfg.sh:
Updated to PostgreSQL 7.1.1
sql-bench/test-connect.sh:
Changed select_big -> select_big_str
tests/fork_big.pl:
Added count(distinct) test
-rw-r--r-- | mysql-test/t/select.test | 2 | ||||
-rw-r--r-- | mysys/tree.c | 2 | ||||
-rw-r--r-- | sql-bench/Comments/postgres.benchmark | 47 | ||||
-rw-r--r-- | sql-bench/bench-init.pl.sh | 2 | ||||
-rw-r--r-- | sql-bench/limits/pg.cfg | 79 | ||||
-rw-r--r-- | sql-bench/server-cfg.sh | 110 | ||||
-rw-r--r-- | sql-bench/test-connect.sh | 2 | ||||
-rwxr-xr-x | tests/fork_big.pl | 30 |
8 files changed, 153 insertions, 121 deletions
diff --git a/mysql-test/t/select.test b/mysql-test/t/select.test index 10079ba2549..cdb6ee57e0f 100644 --- a/mysql-test/t/select.test +++ b/mysql-test/t/select.test @@ -1609,7 +1609,7 @@ select t2.fld1,count(*) from t2,t3 where t2.fld1=158402 and t3.name=t2.fld3 grou # select sum(Period)/count(*) from t1; -select companynr,count(price) as "count",sum(price) as "sum" ,sum(price)/count(price)-avg(price) as "diff",(0+count(price))*companynr as func from t3 group by companynr; +select companynr,count(price) as "count",sum(price) as "sum" ,abs(sum(price)/count(price)-avg(price)) as "diff",(0+count(price))*companynr as func from t3 group by companynr; select companynr,sum(price)/count(price) as avg from t3 group by companynr having avg > 70000000 order by avg; # diff --git a/mysys/tree.c b/mysys/tree.c index a36fd06f3f1..002062d9f0e 100644 --- a/mysys/tree.c +++ b/mysys/tree.c @@ -251,6 +251,8 @@ int tree_delete(TREE *tree, void *key) } if (remove_colour == BLACK) rb_delete_fixup(tree,parent); + if (tree->free) + (*tree->free)(ELEMENT_KEY(tree,element)); my_free((gptr) element,MYF(0)); tree->elements_in_tree--; return 0; diff --git a/sql-bench/Comments/postgres.benchmark b/sql-bench/Comments/postgres.benchmark index a51752a5023..365a73c6727 100644 --- a/sql-bench/Comments/postgres.benchmark +++ b/sql-bench/Comments/postgres.benchmark @@ -18,39 +18,43 @@ # corresponding file. If you are using csh, use īsetenvī. # -export POSTGRES_INCLUDE=/usr/local/pgsql/include -export POSTGRES_LIB=/usr/local/pgsql/lib +export POSTGRES_INCLUDE=/usr/local/pg/include +export POSTGRES_LIB=/usr/local/pg/lib -PATH=$PATH:/usr/local/pgsql/bin -MANPATH=$MANPATH:/usr/local/pgsql/man +PATH=$PATH:/usr/local/pg/bin +MANPATH=$MANPATH:/usr/local/pg/man # # Add the following line to /etc/ld.so.conf: # -/usr/local/pgsql/lib +/usr/local/pg/lib and run ldconfig. -# -# untar the postgres source distribution and cd to src/ -# run the following commands: -# +# untar the postgres source distribution, cd to postgresql-* +# and run the following commands: -./configure +CFLAGS=-O3 ./configure gmake gmake install -mkdir /usr/local/pgsql/data -chown postgres /usr/local/pgsql/data +mkdir /usr/local/pg/data +chown postgres /usr/local/pg/data su - postgres -/usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data -su postgres -c "/usr/local/pgsql/bin/postmaster -o -F -D /usr/local/pgsql/data" & -su postgres -c "/usr/local/pgsql/bin/createdb test" +/usr/local/pg/bin/initdb -D /usr/local/pg/data +/usr/local/pg/bin/postmaster -o -F -D /usr/local/pg/data & +/usr/local/pg/bin/createdb test +exit # -# Second, install packages DBD-Pg-0.95.tar.gz and DBI-1.14.tar.gz, +# Second, install packages DBD-Pg-1.00.tar.gz and DBI-1.14.tar.gz, # available from http://www.perl.com/CPAN/ -# + +export POSTGRES_LIB=/usr/local/pg/lib/ +export POSTGRES_INCLUDE=/usr/local/pg/include/postgresql +perl Makefile.PL +make +make install # # Now we run the test that can be found in the sql-bench directory in the @@ -59,17 +63,16 @@ su postgres -c "/usr/local/pgsql/bin/createdb test" # We did run two tests: # The standard test -run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql +run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql # and a test where we do a vacuum() after each update. # (The time for vacuum() is counted in the book-keeping() column) -run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast +run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast # If you want to store the results in a output/RUN-xxx file, you should # repeate the benchmark with the extra option --log --use-old-result # This will create a the RUN file based of the previous results -# -run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --log --use-old-result -run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast --log --use-old-result +run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --log --use-old-result +run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512MG, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast --log --use-old-result diff --git a/sql-bench/bench-init.pl.sh b/sql-bench/bench-init.pl.sh index c316bae1e4d..2e0b3a9a51d 100644 --- a/sql-bench/bench-init.pl.sh +++ b/sql-bench/bench-init.pl.sh @@ -31,7 +31,7 @@ # $server Object for current server # $limits Hash reference to limits for benchmark -$benchmark_version="2.12"; +$benchmark_version="2.13"; use Getopt::Long; require "$pwd/server-cfg" || die "Can't read Configuration file: $!\n"; diff --git a/sql-bench/limits/pg.cfg b/sql-bench/limits/pg.cfg index 7e4d20b052a..ed1c2eaa63f 100644 --- a/sql-bench/limits/pg.cfg +++ b/sql-bench/limits/pg.cfg @@ -1,10 +1,10 @@ -#This file is automaticly generated by crash-me 1.54 +#This file is automaticly generated by crash-me 1.56 NEG=yes # update of column= -column Need_cast_for_null=no # Need to cast NULL for arithmetic alter_add_col=yes # Alter table add column -alter_add_constraint=no # Alter table add constraint -alter_add_foreign_key=yes # Alter table add foreign key +alter_add_constraint=yes # Alter table add constraint +alter_add_foreign_key=no # Alter table add foreign key alter_add_multi_col=no # Alter table add many columns alter_add_primary_key=no # Alter table add primary key alter_add_unique=no # Alter table add unique @@ -29,21 +29,22 @@ columns_in_order_by=+64 # number of columns in order by comment_#=no # # as comment comment_--=yes # -- as comment (ANSI) comment_/**/=yes # /* */ as comment -comment_//=no # // as comment (ANSI) +comment_//=no # // as comment compute=no # Compute connections=32 # Simultaneous connections (installation default) constraint_check=yes # Column constraints constraint_check_table=yes # Table constraints constraint_null=yes # NULL constraint (SyBase style) crash_me_safe=yes # crash me safe -crash_me_version=1.54 # crash me version +crash_me_version=1.56 # crash me version create_default=yes # default value for column -create_default_func=no # default value function for column +create_default_func=yes # default value function for column create_if_not_exists=no # create table if not exists create_index=yes # create index create_schema=no # Create SCHEMA create_table_select=with AS # create table from select cross_join=yes # cross join (same as from a,b) +date_as_string=yes # String functions on date columns date_infinity=no # Supports 'infinity dates date_last=yes # Supports 9999-12-31 dates date_one=yes # Supports 0001-01-01 dates @@ -58,16 +59,16 @@ drop_requires_cascade=no # drop table require cascade/restrict drop_restrict=no # drop table with cascade/restrict end_colon=yes # allows end ';' except=yes # except -except_all=no # except all +except_all=yes # except all except_all_incompat=no # except all (incompatible lists) except_incompat=no # except (incompatible lists) float_int_expr=yes # mixing of integer and float in expression foreign_key=yes # foreign keys foreign_key_syntax=yes # foreign key syntax -full_outer_join=no # full outer join +full_outer_join=yes # full outer join func_extra_!=no # Function NOT as '!' in SELECT func_extra_%=yes # Function MOD as % -func_extra_&=no # Function & (bitwise and) +func_extra_&=yes # Function & (bitwise and) func_extra_&&=no # Function AND as '&&' func_extra_<>=yes # Function <> in SELECT func_extra_==yes # Function = @@ -79,12 +80,12 @@ func_extra_atn2=no # Function ATN2 func_extra_auto_num2string=no # Function automatic num->string convert func_extra_auto_string2num=yes # Function automatic string->num convert func_extra_between=yes # Function BETWEEN in SELECT -func_extra_binary_shifts=no # Function << and >> (bitwise shifts) +func_extra_binary_shifts=yes # Function << and >> (bitwise shifts) func_extra_bit_count=no # Function BIT_COUNT func_extra_ceil=yes # Function CEIL func_extra_charindex=no # Function CHARINDEX -func_extra_chr=no # Function CHR -func_extra_concat_as_+=no # Function concatenation with + +func_extra_chr=yes # Function CHR +func_extra_concat_as_+=error # Function concatenation with + func_extra_concat_list=no # Function CONCAT(list) func_extra_convert=no # Function CONVERT func_extra_cosh=no # Function COSH @@ -103,7 +104,7 @@ func_extra_getdate=no # Function GETDATE func_extra_greatest=no # Function GREATEST func_extra_if=no # Function IF func_extra_in_num=yes # Function IN on numbers in SELECT -func_extra_in_str=no # Function IN on strings in SELECT +func_extra_in_str=yes # Function IN on strings in SELECT func_extra_initcap=yes # Function INITCAP func_extra_instr=no # Function LOCATE as INSTR func_extra_instr_oracle=no # Function INSTR (Oracle syntax) @@ -114,7 +115,7 @@ func_extra_last_insert_id=no # Function LAST_INSERT_ID func_extra_least=no # Function LEAST func_extra_lengthb=no # Function LENGTHB func_extra_like=yes # Function LIKE in SELECT -func_extra_like_escape=no # Function LIKE ESCAPE in SELECT +func_extra_like_escape=yes # Function LIKE ESCAPE in SELECT func_extra_ln=no # Function LN func_extra_log(m_n)=yes # Function LOG(m,n) func_extra_logn=no # Function LOGN @@ -160,7 +161,7 @@ func_extra_unix_timestamp=no # Function UNIX_TIMESTAMP func_extra_userenv=no # Function USERENV func_extra_version=yes # Function VERSION func_extra_weekday=no # Function WEEKDAY -func_extra_|=no # Function | (bitwise or) +func_extra_|=yes # Function | (bitwise or) func_extra_||=no # Function OR as '||' func_extra_~*=yes # Function ~* (case insensitive compare) func_odbc_abs=yes # Function ABS @@ -192,7 +193,7 @@ func_odbc_ifnull=no # Function IFNULL func_odbc_insert=no # Function INSERT func_odbc_lcase=no # Function LCASE func_odbc_left=no # Function LEFT -func_odbc_length=no # Function REAL LENGTH +func_odbc_length=yes # Function REAL LENGTH func_odbc_length_without_space=no # Function ODBC LENGTH func_odbc_locate_2=no # Function LOCATE(2 arg) func_odbc_locate_3=no # Function LOCATE(3 arg) @@ -220,7 +221,7 @@ func_odbc_sin=yes # Function SIN func_odbc_soundex=no # Function SOUNDEX func_odbc_space=no # Function SPACE func_odbc_sqrt=no # Function SQRT -func_odbc_substring=no # Function ODBC SUBSTRING +func_odbc_substring=yes # Function ODBC SUBSTRING func_odbc_tan=yes # Function TAN func_odbc_timestampadd=no # Function TIMESTAMPADD func_odbc_timestampdiff=no # Function TIMESTAMPDIFF @@ -246,8 +247,8 @@ func_sql_localtime=no # Function LOCALTIME func_sql_localtimestamp=no # Function LOCALTIMESTAMP func_sql_lower=yes # Function LOWER func_sql_nullif_num=yes # Function NULLIF with numbers -func_sql_nullif_string=no # Function NULLIF with strings -func_sql_octet_length=no # Function OCTET_LENGTH +func_sql_nullif_string=yes # Function NULLIF with strings +func_sql_octet_length=yes # Function OCTET_LENGTH func_sql_position=yes # Function POSITION func_sql_searched_case=yes # Function searched CASE func_sql_session_user=yes # Function SESSION_USER @@ -264,7 +265,7 @@ func_where_eq_some=yes # Function = SOME func_where_exists=yes # Function EXISTS func_where_in_num=yes # Function IN on numbers func_where_like=yes # Function LIKE -func_where_like_escape=no # Function LIKE ESCAPE +func_where_like_escape=yes # Function LIKE ESCAPE func_where_match=no # Function MATCH func_where_match_unique=no # Function MATCH UNIQUE func_where_matches=no # Function MATCHES @@ -283,8 +284,8 @@ group_func_extra_bit_and=no # Group function BIT_AND group_func_extra_bit_or=no # Group function BIT_OR group_func_extra_count_distinct_list=no # Group function COUNT(DISTINCT expr,expr,...) group_func_extra_std=no # Group function STD -group_func_extra_stddev=no # Group function STDDEV -group_func_extra_variance=no # Group function VARIANCE +group_func_extra_stddev=yes # Group function STDDEV +group_func_extra_variance=yes # Group function VARIANCE group_func_sql_any=no # Group function ANY group_func_sql_avg=yes # Group function AVG group_func_sql_count_*=yes # Group function COUNT (*) @@ -315,37 +316,37 @@ insert_multi_value=no # INSERT with Value lists insert_select=yes # insert INTO ... SELECT ... insert_with_set=no # INSERT with set syntax intersect=yes # intersect -intersect_all=no # intersect all +intersect_all=yes # intersect all intersect_all_incompat=no # intersect all (incompatible lists) intersect_incompat=no # intersect (incompatible lists) join_tables=+64 # tables in join -left_outer_join=no # left outer join -left_outer_join_using=no # left outer join using +left_outer_join=yes # left outer join +left_outer_join_using=yes # left outer join using like_with_column=yes # column LIKE column like_with_number=yes # LIKE on numbers lock_tables=yes # lock table logical_value=1 # Value of logical operation (1=1) max_big_expressions=10 # big expressions -max_char_size=8104 # max char() size +max_char_size=+8000000 # max char() size max_column_name=+512 # column name length max_columns=1600 # Columns in table max_conditions=19994 # OR and AND in WHERE max_expressions=9999 # simple expressions max_index=+64 # max index -max_index_length=2704 # index length +max_index_length=+8192 # index length max_index_name=+512 # index name length -max_index_part_length=2704 # max index part length +max_index_part_length=235328 # max index part length max_index_parts=16 # index parts -max_index_varchar_part_length=2704 # index varchar part length -max_row_length=7949 # max table row length (without blobs) -max_row_length_with_null=7949 # table row length with nulls (without blobs) +max_index_varchar_part_length=235328 # index varchar part length +max_row_length=64519 # max table row length (without blobs) +max_row_length_with_null=64519 # table row length with nulls (without blobs) max_select_alias_name=+512 # select alias name length max_stack_expression=+2000 # stacked expressions max_table_alias_name=+512 # table alias name length max_table_name=+512 # table name length -max_text_size=8104 # max text or blob size +max_text_size=+8000000 # max text or blob size max_unique_index=+64 # unique indexes -max_varchar_size=8104 # max varchar() size +max_varchar_size=+8000000 # max varchar() size minus=no # minus minus_incompat=no # minus (incompatible lists) minus_neg=no # Calculate 1--1 @@ -356,7 +357,7 @@ multi_table_delete=no # DELETE FROM table1,table2... multi_table_update=no # Update with many tables natural_join=yes # natural join natural_join_incompat=yes # natural join (incompatible lists) -natural_left_outer_join=no # natural left outer join +natural_left_outer_join=yes # natural left outer join no_primary_key=yes # Tables without primary key null_concat_expr=yes # Is 'a' || NULL = NULL null_in_index=yes # null in index @@ -364,7 +365,7 @@ null_in_unique=yes # null in unique index null_num_expr=yes # Is 1+NULL = NULL nulls_in_unique=yes # null combination in unique index odbc_left_outer_join=no # left outer join odbc style -operating_system=Linux 2.2.14-5.0 i686 # crash-me tested on +operating_system=Linux 2.4.0-64GB-SMP i686 # crash-me tested on order_by=yes # Order by order_by_alias=yes # Order by alias order_by_function=yes # Order by function @@ -386,7 +387,7 @@ remember_end_space=no # Remembers end space in char() remember_end_space_varchar=yes # Remembers end space in varchar() rename_table=no # rename table repeat_string_size=+8000000 # return string size from function -right_outer_join=no # right outer join +right_outer_join=yes # right outer join rowid=oid # Type for row id select_constants=yes # Select constants select_limit=with LIMIT # LIMIT number of rows @@ -394,7 +395,7 @@ select_limit2=yes # SELECT with LIMIT #,# select_string_size=16777207 # constant string size in SELECT select_table_update=yes # Update with sub select select_without_from=yes # SELECT without FROM -server_version=PostgreSQL version 7.0.2 # server version +server_version=PostgreSQL version 7.1.1 # server version simple_joins=yes # ANSI SQL simple joins storage_of_float=round # Storage of float values subqueries=yes # subqueries @@ -466,7 +467,7 @@ type_extra_timespan=yes # Type timespan type_extra_uint=no # Type uint type_extra_varchar2(1_arg)=no # Type varchar2(1 arg) type_extra_year=no # Type year -type_odbc_bigint=no # Type bigint +type_odbc_bigint=yes # Type bigint type_odbc_binary(1_arg)=no # Type binary(1 arg) type_odbc_datetime=yes # Type datetime type_odbc_tinyint=no # Type tinyint @@ -519,4 +520,4 @@ union_incompat=yes # union (incompatible lists) unique_in_create=yes # unique in create table unique_null_in_create=yes # unique null in create views=yes # views -where_string_size=16777182 # constant string size in where +where_string_size=16777181 # constant string size in where diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh index d87966db5f0..1983b2ce01b 100644 --- a/sql-bench/server-cfg.sh +++ b/sql-bench/server-cfg.sh @@ -121,53 +121,49 @@ sub new $self->{'vacuum'} = 1; # When using with --fast $self->{'drop_attr'} = ""; - $limits{'max_conditions'} = 9999; # (Actually not a limit) - $limits{'max_columns'} = 2000; # Max number of columns in table - # Windows can't handle that many files in one directory - $limits{'max_tables'} = (($machine || '') =~ "^win") ? 5000 : 65000; - $limits{'max_text_size'} = 65000; # Max size with default buffers. - $limits{'query_size'} = 1000000; # Max size with default buffers. - $limits{'max_index'} = 16; # Max number of keys - $limits{'max_index_parts'} = 16; # Max segments/key - $limits{'max_column_name'} = 64; # max table and column name - - $limits{'join_optimizer'} = 1; # Can optimize FROM tables - $limits{'load_data_infile'} = 1; # Has load data infile - $limits{'lock_tables'} = 1; # Has lock tables - $limits{'functions'} = 1; # Has simple functions (+/-) - $limits{'group_functions'} = 1; # Have group functions - $limits{'group_func_sql_min_str'} = 1; # Can execute MIN() and MAX() on strings - $limits{'group_distinct_functions'}= 1; # Have count(distinct) - $limits{'select_without_from'}= 1; # Can do 'select 1'; - $limits{'multi_drop'} = 1; # Drop table can take many tables - $limits{'subqueries'} = 0; # Doesn't support sub-queries. - $limits{'left_outer_join'} = 1; # Supports left outer joins - $limits{'table_wildcard'} = 1; # Has SELECT table_name.* - $limits{'having_with_alias'} = 1; # Can use aliases in HAVING - $limits{'having_with_group'} = 1; # Can use group functions in HAVING - $limits{'like_with_column'} = 1; # Can use column1 LIKE column2 - $limits{'order_by_position'} = 1; # Can use 'ORDER BY 1' - $limits{'group_by_position'} = 1; # Can use 'GROUP BY 1' - $limits{'alter_table'} = 1; # Have ALTER TABLE + $limits{'NEG'} = 1; # Supports -id $limits{'alter_add_multi_col'}= 1; #Have ALTER TABLE t add a int,add b int; + $limits{'alter_table'} = 1; # Have ALTER TABLE $limits{'alter_table_dropcol'}= 1; # Have ALTER TABLE DROP column - $limits{'insert_multi_value'} = 1; # Have INSERT ... values (1,2),(3,4) - - $limits{'group_func_extra_std'} = 1; # Have group function std(). - - $limits{'func_odbc_mod'} = 1; # Have function mod. + $limits{'column_alias'} = 1; # Alias for fields in select statement. $limits{'func_extra_%'} = 1; # Has % as alias for mod() - $limits{'func_odbc_floor'} = 1; # Has func_odbc_floor function $limits{'func_extra_if'} = 1; # Have function if. - $limits{'column_alias'} = 1; # Alias for fields in select statement. - $limits{'NEG'} = 1; # Supports -id $limits{'func_extra_in_num'} = 1; # Has function in - $limits{'limit'} = 1; # supports the limit attribute - $limits{'unique_index'} = 1; # Unique index works or not + $limits{'func_odbc_floor'} = 1; # Has func_odbc_floor function + $limits{'func_odbc_mod'} = 1; # Have function mod. + $limits{'functions'} = 1; # Has simple functions (+/-) + $limits{'group_by_position'} = 1; # Can use 'GROUP BY 1' + $limits{'group_distinct_functions'}= 1; # Have count(distinct) + $limits{'group_func_extra_std'} = 1; # Have group function std(). + $limits{'group_func_sql_min_str'} = 1; # Can execute MIN() and MAX() on strings + $limits{'group_functions'} = 1; # Have group functions + $limits{'having_with_alias'} = 1; # Can use aliases in HAVING + $limits{'having_with_group'} = 1; # Can use group functions in HAVING + $limits{'insert_multi_value'} = 1; # Have INSERT ... values (1,2),(3,4) $limits{'insert_select'} = 1; - $limits{'working_blobs'} = 1; # If big varchar/blobs works + $limits{'join_optimizer'} = 1; # Can optimize FROM tables + $limits{'left_outer_join'} = 1; # Supports left outer joins + $limits{'like_with_column'} = 1; # Can use column1 LIKE column2 + $limits{'limit'} = 1; # supports the limit attribute + $limits{'load_data_infile'} = 1; # Has load data infile + $limits{'lock_tables'} = 1; # Has lock tables + $limits{'max_column_name'} = 64; # max table and column name + $limits{'max_columns'} = 2000; # Max number of columns in table + $limits{'max_conditions'} = 9999; # (Actually not a limit) + $limits{'max_index'} = 16; # Max number of keys + $limits{'max_index_parts'} = 16; # Max segments/key + $limits{'max_tables'} = (($machine || '') =~ "^win") ? 5000 : 65000; + $limits{'max_text_size'} = 1000000; # Good enough for tests + $limits{'multi_drop'} = 1; # Drop table can take many tables + $limits{'order_by_position'} = 1; # Can use 'ORDER BY 1' $limits{'order_by_unused'} = 1; + $limits{'query_size'} = 1000000; # Max size with default buffers. + $limits{'select_without_from'}= 1; # Can do 'select 1'; + $limits{'subqueries'} = 0; # Doesn't support sub-queries. + $limits{'table_wildcard'} = 1; # Has SELECT table_name.* + $limits{'unique_index'} = 1; # Unique index works or not $limits{'working_all_fields'} = 1; + $limits{'working_blobs'} = 1; # If big varchar/blobs works $smds{'time'} = 1; $smds{'q1'} = 'b'; # with time not supp by mysql ('') @@ -568,12 +564,12 @@ sub new $self->{'drop_attr'} = ""; $self->{"vacuum"} = 1; $limits{'join_optimizer'} = 1; # Can optimize FROM tables - $limits{'load_data_infile'} = 0; # Is this true ? + $limits{'load_data_infile'} = 0; - $limits{'NEG'} = 1; # Can't handle -id - $limits{'alter_table'} = 1; # alter ?? + $limits{'NEG'} = 1; $limits{'alter_add_multi_col'}= 0; # alter_add_multi_col ? - $limits{'alter_table_dropcol'}= 0; # alter_drop_col ? + $limits{'alter_table'} = 1; + $limits{'alter_table_dropcol'}= 0; $limits{'column_alias'} = 1; $limits{'func_extra_%'} = 1; $limits{'func_extra_if'} = 0; @@ -582,33 +578,33 @@ sub new $limits{'func_odbc_mod'} = 1; # Has % $limits{'functions'} = 1; $limits{'group_by_position'} = 1; + $limits{'group_distinct_functions'}= 1; # Have count(distinct) $limits{'group_func_extra_std'} = 0; $limits{'group_func_sql_min_str'}= 1; # Can execute MIN() and MAX() on strings $limits{'group_functions'} = 1; - $limits{'group_distinct_functions'}= 1; # Have count(distinct) $limits{'having_with_alias'} = 0; $limits{'having_with_group'} = 1; - $limits{'left_outer_join'} = 0; + $limits{'insert_select'} = 1; + $limits{'left_outer_join'} = 1; $limits{'like_with_column'} = 1; $limits{'lock_tables'} = 0; # in ATIS gives this a problem + $limits{'max_column_name'} = 128; + $limits{'max_columns'} = 1000; # 500 crashes pg 6.3 + $limits{'max_conditions'} = 9999; # This makes Pg real slow + $limits{'max_index'} = 64; # Big enough + $limits{'max_index_parts'} = 16; + $limits{'max_tables'} = 5000; # 10000 crashes pg 7.0.2 + $limits{'max_text_size'} = 65000; # Good enough for test $limits{'multi_drop'} = 1; $limits{'order_by_position'} = 1; + $limits{'order_by_unused'} = 1; + $limits{'query_size'} = 16777216; $limits{'select_without_from'}= 1; $limits{'subqueries'} = 1; $limits{'table_wildcard'} = 1; - $limits{'max_column_name'} = 32; # Is this true - $limits{'max_columns'} = 1000; # 500 crashes pg 6.3 - $limits{'max_tables'} = 5000; # 10000 crashes pg 7.0.2 - $limits{'max_conditions'} = 30; # This makes Pg real slow - $limits{'max_index'} = 64; # Is this true ? - $limits{'max_index_parts'} = 16; # Is this true ? - $limits{'max_text_size'} = 7000; # 8000 crashes pg 6.3 - $limits{'query_size'} = 16777216; $limits{'unique_index'} = 1; # Unique index works or not - $limits{'insert_select'} = 1; - $limits{'working_blobs'} = 1; # If big varchar/blobs works - $limits{'order_by_unused'} = 1; $limits{'working_all_fields'} = 1; + $limits{'working_blobs'} = 1; # If big varchar/blobs works # the different cases per query ... $smds{'q1'} = 'b'; # with time @@ -639,7 +635,7 @@ sub new sub version { my ($version,$dir); - foreach $dir ($ENV{'PGDATA'},"/usr/local/pgsql/data", "/my/local/pgsql/") + foreach $dir ($ENV{'PGDATA'},"/usr/local/pgsql/data", "/usr/local/pg/data") { if ($dir && -e "$dir/PG_VERSION") { diff --git a/sql-bench/test-connect.sh b/sql-bench/test-connect.sh index cddb32e2775..862161e3a03 100644 --- a/sql-bench/test-connect.sh +++ b/sql-bench/test-connect.sh @@ -266,7 +266,7 @@ for ($i=0 ; $i < $opt_loop_count ; $i++) } $end_time=new Benchmark; -print "Time to select_big ($opt_loop_count): " . +print "Time to select_big_str ($opt_loop_count): " . timestr(timediff($end_time, $loop_time),"all") . "\n\n"; $sth = $dbh->do("drop table bench1" . $server->{'drop_attr'}) diff --git a/tests/fork_big.pl b/tests/fork_big.pl index 8f16db74793..4009a9da71b 100755 --- a/tests/fork_big.pl +++ b/tests/fork_big.pl @@ -88,6 +88,7 @@ for ($i=0 ; $i < $opt_threads ; $i ++) { test_select() if (($pid=fork()) == 0); $work{$pid}="select_key"; } +test_select_count() if (($pid=fork()) == 0); $work{$pid}="select_count"; test_delete() if (($pid=fork()) == 0); $work{$pid}="delete"; test_update() if (($pid=fork()) == 0); $work{$pid}="update"; test_flush() if (($pid=fork()) == 0); $work{$pid}= "flush"; @@ -214,6 +215,35 @@ sub test_select } # +# Do big select count(distinct..) over the table +# + +sub test_select_count +{ + my ($dbh, $i, $j, $count, $loop); + + $dbh = DBI->connect("DBI:mysql:$opt_db:$opt_host", + $opt_user, $opt_password, + { PrintError => 0}) || die $DBI::errstr; + + $count=0; + $i=0; + while (!test_if_abort($dbh)) + { + for ($j=0 ; $j < $numtables ; $j++) + { + my ($table)= $testtables[$j]->[0]; + simple_query($dbh, "select count(distinct marker),count(distinct id),count(distinct info) from $table"); + $count++; + } + sleep(20); # This query is quite slow + } + $dbh->disconnect; $dbh=0; + print "Test_select: Executed $count select count(distinct) queries\n"; + exit(0); +} + +# # Delete 1-5 rows from the first 2 tables. # Test ends when the number of rows for table 3 didn't change during # one loop |