summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorSergei Golubchik <sergii@pisem.net>2011-01-14 11:58:45 +0100
committerSergei Golubchik <sergii@pisem.net>2011-01-14 11:58:45 +0100
commit6554977fe577507378290c44d9d3476d1d9d107d (patch)
treedd4bc7b57b53ff196b3d5c749ffece2e6e7a359b /sql
parentf5ea301acc593b51b13303899246a38efff92c1a (diff)
downloadmariadb-git-6554977fe577507378290c44d9d3476d1d9d107d.tar.gz
Added ha_write_tmp_row() for slightly faster write_row for internal temp tables.
This will also enable us in the future to collect statistics for writes to internal tmp tables. sql/handler.h: Added ha_write_tmp_row() sql/opt_subselect.cc: ha_write_row -> ha_write_tmp_row sql/sql_class.h: Added ha_write_tmp_row() sql/sql_select.cc: ha_write_row -> ha_write_tmp_row
Diffstat (limited to 'sql')
-rw-r--r--sql/handler.h1
-rw-r--r--sql/opt_subselect.cc2
-rw-r--r--sql/sql_class.h5
-rw-r--r--sql/sql_select.cc16
4 files changed, 15 insertions, 9 deletions
diff --git a/sql/handler.h b/sql/handler.h
index 760754b21e6..3beda9f7c88 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -2415,6 +2415,7 @@ public:
/* XXX to be removed, see ha_partition::partition_ht() */
virtual handlerton *partition_ht() const
{ return ht; }
+ inline int ha_write_tmp_row(uchar *buf);
};
#include "multi_range_read.h"
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 675717573c6..7fbbdf03c38 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -2907,7 +2907,7 @@ int do_sj_dups_weedout(THD *thd, SJ_TMP_TABLE *sjtbl)
}
}
- error= sjtbl->tmp_table->file->ha_write_row(sjtbl->tmp_table->record[0]);
+ error= sjtbl->tmp_table->file->ha_write_tmp_row(sjtbl->tmp_table->record[0]);
if (error)
{
/* create_internal_tmp_table_from_heap will generate error if needed */
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 4b49fbcd3ff..c6bc3d0c649 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -3639,5 +3639,10 @@ inline int handler::ha_read_first_row(uchar *buf, uint primary_key)
return error;
}
+inline int handler::ha_write_tmp_row(uchar *buf)
+{
+ increment_statistics(&SSV::ha_write_count);
+ return write_row(buf);
+}
#endif /* MYSQL_SERVER */
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index b047a33a61b..8a8946cd1fa 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -7515,7 +7515,7 @@ end_sj_materialize(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
fill_record(thd, table->field, sjm->sjm_table_cols, TRUE, FALSE);
if (thd->is_error())
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
- if ((error= table->file->ha_write_row(table->record[0])))
+ if ((error= table->file->ha_write_tmp_row(table->record[0])))
{
/* create_myisam_from_heap will generate error if needed */
if (table->file->is_fatal_error(error, HA_CHECK_DUP) &&
@@ -12833,13 +12833,13 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table,
*/
while (!table->file->ha_rnd_next(new_table.record[1]))
{
- write_err= new_table.file->ha_write_row(new_table.record[1]);
+ write_err= new_table.file->ha_write_tmp_row(new_table.record[1]);
DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;);
if (write_err)
goto err;
}
/* copy row that filled HEAP table */
- if ((write_err=new_table.file->ha_write_row(table->record[0])))
+ if ((write_err=new_table.file->ha_write_tmp_row(table->record[0])))
{
if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUP) ||
!ignore_last_dupp_key_error)
@@ -14725,7 +14725,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
int error;
join->found_records++;
- if ((error= table->file->ha_write_row(table->record[0])))
+ if ((error= table->file->ha_write_tmp_row(table->record[0])))
{
if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
goto end;
@@ -14814,7 +14814,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
init_tmptable_sum_functions(join->sum_funcs);
if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
- if ((error= table->file->ha_write_row(table->record[0])))
+ if ((error= table->file->ha_write_tmp_row(table->record[0])))
{
if (create_internal_tmp_table_from_heap(join->thd, table,
join->tmp_table_param.start_recinfo,
@@ -14857,7 +14857,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
- if (!(error= table->file->ha_write_row(table->record[0])))
+ if (!(error= table->file->ha_write_tmp_row(table->record[0])))
join->send_records++; // New group
else
{
@@ -14917,7 +14917,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
join->sum_funcs_end[send_group_parts]);
if (!join->having || join->having->val_int())
{
- int error= table->file->ha_write_row(table->record[0]);
+ int error= table->file->ha_write_tmp_row(table->record[0]);
if (error &&
create_internal_tmp_table_from_heap(join->thd, table,
join->tmp_table_param.start_recinfo,
@@ -18592,7 +18592,7 @@ int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
item->save_in_result_field(1);
}
copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]);
- if ((write_error= table_arg->file->ha_write_row(table_arg->record[0])))
+ if ((write_error= table_arg->file->ha_write_tmp_row(table_arg->record[0])))
{
if (create_internal_tmp_table_from_heap(thd, table_arg,
tmp_table_param.start_recinfo,