summaryrefslogtreecommitdiff
path: root/sql/ha_heap.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/ha_heap.cc')
-rw-r--r--sql/ha_heap.cc133
1 files changed, 97 insertions, 36 deletions
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index 9c680daaf91..79d4575ff1b 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -23,12 +23,48 @@
#include <myisampack.h>
#include "ha_heap.h"
+handlerton heap_hton= {
+ "MEMORY",
+ SHOW_OPTION_YES,
+ "Hash based, stored in memory, useful for temporary tables",
+ DB_TYPE_HEAP,
+ NULL,
+ 0, /* slot */
+ 0, /* savepoint size. */
+ NULL, /* close_connection */
+ NULL, /* savepoint */
+ NULL, /* rollback to savepoint */
+ NULL, /* release savepoint */
+ NULL, /* commit */
+ NULL, /* rollback */
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_CAN_RECREATE
+};
+
/*****************************************************************************
** HEAP tables
*****************************************************************************/
+ha_heap::ha_heap(TABLE *table_arg)
+ :handler(&heap_hton, table_arg), file(0), records_changed(0),
+ key_stat_version(0)
+{}
+
+
+static const char *ha_heap_exts[] = {
+ NullS
+};
+
const char **ha_heap::bas_ext() const
-{ static const char *ext[1]= { NullS }; return ext; }
+{
+ return ha_heap_exts;
+}
/*
Hash index statistics is updated (copied from HP_KEYDEF::hash_buckets to
@@ -98,16 +134,17 @@ int ha_heap::close(void)
void ha_heap::set_keys_for_scanning(void)
{
btree_keys.clear_all();
- for (uint i= 0 ; i < table->keys ; i++)
+ for (uint i= 0 ; i < table->s->keys ; i++)
{
if (table->key_info[i].algorithm == HA_KEY_ALG_BTREE)
btree_keys.set_bit(i);
}
}
+
void ha_heap::update_key_stats()
{
- for (uint i= 0; i < table->keys; i++)
+ for (uint i= 0; i < table->s->keys; i++)
{
KEY *key=table->key_info+i;
if (!key->rec_per_key)
@@ -131,17 +168,18 @@ void ha_heap::update_key_stats()
key_stat_version= file->s->key_stat_version;
}
+
int ha_heap::write_row(byte * buf)
{
int res;
- statistic_increment(ha_write_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
if (table->next_number_field && buf == table->record[0])
update_auto_increment();
res= heap_write(file,buf);
- if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD >
- file->s->records)
+ if (!res && (++records_changed*HEAP_STATS_UPDATE_THRESHOLD >
+ file->s->records))
{
/*
We can perform this safely since only one writer at the time is
@@ -155,7 +193,7 @@ int ha_heap::write_row(byte * buf)
int ha_heap::update_row(const byte * old_data, byte * new_data)
{
int res;
- statistic_increment(ha_update_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
res= heap_update(file,old_data,new_data);
@@ -174,9 +212,9 @@ int ha_heap::update_row(const byte * old_data, byte * new_data)
int ha_heap::delete_row(const byte * buf)
{
int res;
- statistic_increment(ha_delete_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
res= heap_delete(file,buf);
- if (!res && table->tmp_table == NO_TMP_TABLE &&
+ if (!res && table->s->tmp_table == NO_TMP_TABLE &&
++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)
{
/*
@@ -192,7 +230,8 @@ int ha_heap::index_read(byte * buf, const byte * key, uint key_len,
enum ha_rkey_function find_flag)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(ha_read_key_count, &LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_key_count,
+ &LOCK_status);
int error = heap_rkey(file,buf,active_index, key, key_len, find_flag);
table->status = error ? STATUS_NOT_FOUND : 0;
return error;
@@ -201,7 +240,8 @@ int ha_heap::index_read(byte * buf, const byte * key, uint key_len,
int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(ha_read_key_count, &LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_key_count,
+ &LOCK_status);
int error= heap_rkey(file, buf, active_index, key, key_len,
HA_READ_PREFIX_LAST);
table->status= error ? STATUS_NOT_FOUND : 0;
@@ -211,7 +251,8 @@ int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len)
int ha_heap::index_read_idx(byte * buf, uint index, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
- statistic_increment(ha_read_key_count, &LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_key_count,
+ &LOCK_status);
int error = heap_rkey(file, buf, index, key, key_len, find_flag);
table->status = error ? STATUS_NOT_FOUND : 0;
return error;
@@ -220,7 +261,8 @@ int ha_heap::index_read_idx(byte * buf, uint index, const byte * key,
int ha_heap::index_next(byte * buf)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(ha_read_next_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_next_count,
+ &LOCK_status);
int error=heap_rnext(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -229,7 +271,8 @@ int ha_heap::index_next(byte * buf)
int ha_heap::index_prev(byte * buf)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(ha_read_prev_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_prev_count,
+ &LOCK_status);
int error=heap_rprev(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -238,7 +281,8 @@ int ha_heap::index_prev(byte * buf)
int ha_heap::index_first(byte * buf)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(ha_read_first_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_first_count,
+ &LOCK_status);
int error=heap_rfirst(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -247,7 +291,8 @@ int ha_heap::index_first(byte * buf)
int ha_heap::index_last(byte * buf)
{
DBUG_ASSERT(inited==INDEX);
- statistic_increment(ha_read_last_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_last_count,
+ &LOCK_status);
int error=heap_rlast(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -260,7 +305,8 @@ int ha_heap::rnd_init(bool scan)
int ha_heap::rnd_next(byte *buf)
{
- statistic_increment(ha_read_rnd_next_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
+ &LOCK_status);
int error=heap_scan(file, buf);
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
@@ -270,7 +316,8 @@ int ha_heap::rnd_pos(byte * buf, byte *pos)
{
int error;
HEAP_PTR position;
- statistic_increment(ha_read_rnd_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_rnd_count,
+ &LOCK_status);
memcpy_fixed((char*) &position,pos,sizeof(HEAP_PTR));
error=heap_rrnd(file, buf, position);
table->status=error ? STATUS_NOT_FOUND: 0;
@@ -314,7 +361,7 @@ int ha_heap::extra(enum ha_extra_function operation)
int ha_heap::delete_all_rows()
{
heap_clear(file);
- if (table->tmp_table == NO_TMP_TABLE)
+ if (table->s->tmp_table == NO_TMP_TABLE)
{
/*
We can perform this safely since only one writer at the time is
@@ -493,23 +540,25 @@ ha_rows ha_heap::records_in_range(uint inx, key_range *min_key,
int ha_heap::create(const char *name, TABLE *table_arg,
HA_CREATE_INFO *create_info)
{
- uint key, parts, mem_per_row= 0;
+ uint key, parts, mem_per_row= 0, keys= table_arg->s->keys;
uint auto_key= 0, auto_key_type= 0;
ha_rows max_rows;
HP_KEYDEF *keydef;
HA_KEYSEG *seg;
char buff[FN_REFLEN];
int error;
+ TABLE_SHARE *share= table_arg->s;
+ bool found_real_auto_increment= 0;
- for (key= parts= 0; key < table_arg->keys; key++)
+ for (key= parts= 0; key < keys; key++)
parts+= table_arg->key_info[key].key_parts;
- if (!(keydef= (HP_KEYDEF*) my_malloc(table_arg->keys * sizeof(HP_KEYDEF) +
+ if (!(keydef= (HP_KEYDEF*) my_malloc(keys * sizeof(HP_KEYDEF) +
parts * sizeof(HA_KEYSEG),
MYF(MY_WME))))
return my_errno;
- seg= my_reinterpret_cast(HA_KEYSEG*) (keydef + table_arg->keys);
- for (key= 0; key < table_arg->keys; key++)
+ seg= my_reinterpret_cast(HA_KEYSEG*) (keydef + keys);
+ for (key= 0; key < keys; key++)
{
KEY *pos= table_arg->key_info+key;
KEY_PART_INFO *key_part= pos->key_part;
@@ -532,22 +581,26 @@ int ha_heap::create(const char *name, TABLE *table_arg,
default:
DBUG_ASSERT(0); // cannot happen
}
- keydef[key].algorithm= ((pos->algorithm == HA_KEY_ALG_UNDEF) ?
- HA_KEY_ALG_HASH : pos->algorithm);
for (; key_part != key_part_end; key_part++, seg++)
{
Field *field= key_part->field;
+
if (pos->algorithm == HA_KEY_ALG_BTREE)
seg->type= field->key_type();
else
{
- if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT)
+ if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT &&
+ seg->type != HA_KEYTYPE_VARTEXT1 &&
+ seg->type != HA_KEYTYPE_VARTEXT2 &&
+ seg->type != HA_KEYTYPE_VARBINARY1 &&
+ seg->type != HA_KEYTYPE_VARBINARY2)
seg->type= HA_KEYTYPE_BINARY;
}
seg->start= (uint) key_part->offset;
seg->length= (uint) key_part->length;
- seg->flag = 0;
+ seg->flag= key_part->key_part_flag;
+
seg->charset= field->charset();
if (field->null_ptr)
{
@@ -561,7 +614,7 @@ int ha_heap::create(const char *name, TABLE *table_arg,
}
if (field->flags & AUTO_INCREMENT_FLAG &&
table_arg->found_next_number_field &&
- key == table_arg->next_number_index)
+ key == share->next_number_index)
{
/*
Store key number and type for found auto_increment key
@@ -572,21 +625,29 @@ int ha_heap::create(const char *name, TABLE *table_arg,
}
}
}
- mem_per_row+= MY_ALIGN(table_arg->reclength + 1, sizeof(char*));
+ mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*));
+ max_rows = (ha_rows) (table->in_use->variables.max_heap_table_size /
+ mem_per_row);
+ if (table_arg->found_next_number_field)
+ {
+ keydef[share->next_number_index].flag|= HA_AUTO_KEY;
+ found_real_auto_increment= share->next_number_key_offset == 0;
+ }
HP_CREATE_INFO hp_create_info;
hp_create_info.auto_key= auto_key;
hp_create_info.auto_key_type= auto_key_type;
hp_create_info.auto_increment= (create_info->auto_increment_value ?
create_info->auto_increment_value - 1 : 0);
hp_create_info.max_table_size=current_thd->variables.max_heap_table_size;
+ hp_create_info.with_auto_increment= found_real_auto_increment;
max_rows = (ha_rows) (hp_create_info.max_table_size / mem_per_row);
error= heap_create(fn_format(buff,name,"","",
MY_REPLACE_EXT|MY_UNPACK_FILENAME),
- table_arg->keys,keydef, table_arg->reclength,
- (ulong) ((table_arg->max_rows < max_rows &&
- table_arg->max_rows) ?
- table_arg->max_rows : max_rows),
- (ulong) table_arg->min_rows, &hp_create_info);
+ keys, keydef, share->reclength,
+ (ulong) ((share->max_rows < max_rows &&
+ share->max_rows) ?
+ share->max_rows : max_rows),
+ (ulong) share->min_rows, &hp_create_info);
my_free((gptr) keydef, MYF(0));
if (file)
info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE);
@@ -601,7 +662,7 @@ void ha_heap::update_create_info(HA_CREATE_INFO *create_info)
create_info->auto_increment_value= auto_increment_value;
}
-longlong ha_heap::get_auto_increment()
+ulonglong ha_heap::get_auto_increment()
{
ha_heap::info(HA_STATUS_AUTO);
return auto_increment_value;