summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_innodb.cc32
-rw-r--r--sql/ha_ndbcluster.cc121
-rw-r--r--sql/item.cc39
-rw-r--r--sql/item.h17
-rw-r--r--sql/item_func.cc13
-rw-r--r--sql/sql_base.cc4
-rw-r--r--sql/sql_class.h47
-rw-r--r--sql/sql_parse.cc8
-rw-r--r--sql/sql_prepare.cc15
-rw-r--r--sql/sql_union.cc24
-rw-r--r--sql/table.h1
11 files changed, 211 insertions, 110 deletions
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 04a1c6a9014..8d9ecb95fc0 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -2461,9 +2461,10 @@ ha_innobase::write_row(
/* If the insert did not succeed we restore the value of
the auto-inc counter we used; note that this behavior was
introduced only in version 4.0.4.
- NOTE that a REPLACE command handles a duplicate key error
+ NOTE that a REPLACE command and LOAD DATA INFILE REPLACE
+ handles a duplicate key error
itself, and we must not decrement the autoinc counter
- if we are performing a REPLACE statement.
+ if we are performing a those statements.
NOTE 2: if there was an error, for example a deadlock,
which caused InnoDB to roll back the whole transaction
already in the call of row_insert_for_mysql(), we may no
@@ -2475,7 +2476,9 @@ ha_innobase::write_row(
if (error == DB_DUPLICATE_KEY
&& (user_thd->lex->sql_command == SQLCOM_REPLACE
|| user_thd->lex->sql_command
- == SQLCOM_REPLACE_SELECT)) {
+ == SQLCOM_REPLACE_SELECT
+ || (user_thd->lex->sql_command == SQLCOM_LOAD
+ && user_thd->lex->duplicates == DUP_REPLACE))) {
skip_auto_inc_decr= TRUE;
}
@@ -4334,6 +4337,8 @@ ha_innobase::info(
ha_rows rec_per_key;
ulong j;
ulong i;
+ char path[FN_REFLEN];
+ os_file_stat_t stat_info;
DBUG_ENTER("info");
@@ -4371,6 +4376,26 @@ ha_innobase::info(
prebuilt->trx->op_info = (char*)
"returning various info to MySQL";
+
+ if (ib_table->space != 0) {
+ my_snprintf(path, sizeof(path), "%s/%s%s",
+ mysql_data_home, ib_table->name,
+ ".ibd");
+ unpack_filename(path,path);
+ } else {
+ my_snprintf(path, sizeof(path), "%s/%s%s",
+ mysql_data_home, ib_table->name,
+ reg_ext);
+
+ unpack_filename(path,path);
+ }
+
+ /* Note that we do not know the access time of the table,
+ nor the CHECK TABLE time, nor the UPDATE or INSERT time. */
+
+ if (os_file_get_status(path,&stat_info)) {
+ create_time = stat_info.ctime;
+ }
}
if (flag & HA_STATUS_VARIABLE) {
@@ -5501,6 +5526,7 @@ innobase_query_is_replace(void)
thd = (THD *)innobase_current_thd();
if ( thd->lex->sql_command == SQLCOM_REPLACE ||
+ thd->lex->sql_command == SQLCOM_REPLACE_SELECT ||
( thd->lex->sql_command == SQLCOM_LOAD &&
thd->lex->duplicates == DUP_REPLACE )) {
return true;
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index e1050a73b7a..145fd23ff43 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -1403,6 +1403,52 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
DBUG_RETURN(0);
}
+inline
+int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
+{
+ uint i;
+ THD *thd= current_thd;
+ NdbConnection *trans= m_active_trans;
+
+ DBUG_ENTER("define_read_attrs");
+
+ // Define attributes to read
+ for (i= 0; i < table->fields; i++)
+ {
+ Field *field= table->field[i];
+ if ((thd->query_id == field->query_id) ||
+ (field->flags & PRI_KEY_FLAG) ||
+ retrieve_all_fields)
+ {
+ if (get_ndb_value(op, field, i, buf))
+ ERR_RETURN(op->getNdbError());
+ }
+ else
+ {
+ m_value[i].ptr= NULL;
+ }
+ }
+
+ if (table->primary_key == MAX_KEY)
+ {
+ DBUG_PRINT("info", ("Getting hidden key"));
+ // Scanning table with no primary key
+ int hidden_no= table->fields;
+#ifndef DBUG_OFF
+ const NDBTAB *tab= (const NDBTAB *) m_table;
+ if (!tab->getColumn(hidden_no))
+ DBUG_RETURN(1);
+#endif
+ if (get_ndb_value(op, NULL, hidden_no, NULL))
+ ERR_RETURN(op->getNdbError());
+ }
+
+ if (execute_no_commit(this,trans) != 0)
+ DBUG_RETURN(ndb_err(trans));
+ DBUG_PRINT("exit", ("Scan started successfully"));
+ DBUG_RETURN(next_result(buf));
+}
+
/*
Start ordered index scan in NDB
*/
@@ -1573,53 +1619,6 @@ int ha_ndbcluster::full_table_scan(byte *buf)
DBUG_RETURN(define_read_attrs(buf, op));
}
-inline
-int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
-{
- uint i;
- THD *thd= current_thd;
- NdbConnection *trans= m_active_trans;
-
- DBUG_ENTER("define_read_attrs");
-
- // Define attributes to read
- for (i= 0; i < table->fields; i++)
- {
- Field *field= table->field[i];
- if ((thd->query_id == field->query_id) ||
- (field->flags & PRI_KEY_FLAG) ||
- retrieve_all_fields)
- {
- if (get_ndb_value(op, field, i, buf))
- ERR_RETURN(op->getNdbError());
- }
- else
- {
- m_value[i].ptr= NULL;
- }
- }
-
- if (table->primary_key == MAX_KEY)
- {
- DBUG_PRINT("info", ("Getting hidden key"));
- // Scanning table with no primary key
- int hidden_no= table->fields;
-#ifndef DBUG_OFF
- const NDBTAB *tab= (const NDBTAB *) m_table;
- if (!tab->getColumn(hidden_no))
- DBUG_RETURN(1);
-#endif
- if (get_ndb_value(op, NULL, hidden_no, NULL))
- ERR_RETURN(op->getNdbError());
- }
-
- if (execute_no_commit(this,trans) != 0)
- DBUG_RETURN(ndb_err(trans));
- DBUG_PRINT("exit", ("Scan started successfully"));
- DBUG_RETURN(next_result(buf));
-}
-
-
/*
Insert one record into NDB
*/
@@ -2315,20 +2314,6 @@ int ha_ndbcluster::index_last(byte *buf)
}
-int ha_ndbcluster::read_range_first(const key_range *start_key,
- const key_range *end_key,
- bool eq_range, bool sorted)
-{
- byte* buf= table->record[0];
- DBUG_ENTER("ha_ndbcluster::read_range_first");
-
- DBUG_RETURN(read_range_first_to_buf(start_key,
- end_key,
- eq_range,
- sorted,
- buf));
-}
-
inline
int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
const key_range *end_key,
@@ -2373,6 +2358,20 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
}
+int ha_ndbcluster::read_range_first(const key_range *start_key,
+ const key_range *end_key,
+ bool eq_range, bool sorted)
+{
+ byte* buf= table->record[0];
+ DBUG_ENTER("ha_ndbcluster::read_range_first");
+
+ DBUG_RETURN(read_range_first_to_buf(start_key,
+ end_key,
+ eq_range,
+ sorted,
+ buf));
+}
+
int ha_ndbcluster::read_range_next()
{
DBUG_ENTER("ha_ndbcluster::read_range_next");
diff --git a/sql/item.cc b/sql/item.cc
index 58143f52aff..d3d2206d02c 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -348,17 +348,39 @@ Item_field::Item_field(Field *f)
:Item_ident(NullS, f->table_name, f->field_name)
{
set_field(f);
- collation.set(DERIVATION_IMPLICIT);
- fixed= 1;
+ /*
+ field_name and talbe_name should not point to garbage
+ if this item is to be reused
+ */
+ orig_table_name= orig_field_name= "";
}
Item_field::Item_field(THD *thd, Field *f)
- :Item_ident(NullS, thd->strdup(f->table_name),
- thd->strdup(f->field_name))
+ :Item_ident(f->table->table_cache_key, f->table_name, f->field_name)
{
+ /*
+ We always need to provide Item_field with a fully qualified field
+ name to avoid ambiguity when executing prepared statements like
+ SELECT * from d1.t1, d2.t1; (assuming d1.t1 and d2.t1 have columns
+ with same names).
+ This is because prepared statements never deal with wildcards in
+ select list ('*') and always fix fields using fully specified path
+ (i.e. db.table.column).
+ No check for OOM: if db_name is NULL, we'll just get
+ "Field not found" error.
+ We need to copy db_name, table_name and field_name because they must
+ be allocated in the statement memory, not in table memory (the table
+ structure can go away and pop up again between subsequent executions
+ of a prepared statement).
+ */
+ if (thd->current_arena->is_stmt_prepare())
+ {
+ if (db_name)
+ orig_db_name= thd->strdup(db_name);
+ orig_table_name= thd->strdup(table_name);
+ orig_field_name= thd->strdup(field_name);
+ }
set_field(f);
- collation.set(DERIVATION_IMPLICIT);
- fixed= 1;
}
// Constructor need to process subselect with temporary tables (see Item)
@@ -381,6 +403,7 @@ void Item_field::set_field(Field *field_par)
db_name=field_par->table->table_cache_key;
unsigned_flag=test(field_par->flags & UNSIGNED_FLAG);
collation.set(field_par->charset(), DERIVATION_IMPLICIT);
+ fixed= 1;
}
const char *Item_ident::full_name() const
@@ -1374,8 +1397,8 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
field->query_id=thd->query_id;
table->used_fields++;
table->used_keys.intersect(field->part_of_key);
+ fixed= 1;
}
- fixed= 1;
return 0;
}
@@ -2120,7 +2143,6 @@ bool Item_default_value::fix_fields(THD *thd,
def_field->move_field(def_field->table->default_values -
def_field->table->record[0]);
set_field(def_field);
- fixed= 1;
return 0;
}
@@ -2178,7 +2200,6 @@ bool Item_insert_value::fix_fields(THD *thd,
set_field(new Field_null(0, 0, Field::NONE, tmp_field->field_name,
tmp_field->table, &my_charset_bin));
}
- fixed= 1;
return 0;
}
diff --git a/sql/item.h b/sql/item.h
index 68fa013647c..b3142ec4b06 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -310,6 +310,7 @@ public:
class st_select_lex;
class Item_ident :public Item
{
+protected:
/*
We have to store initial values of db_name, table_name and field_name
to be able to restore them during cleanup() because they can be
@@ -347,7 +348,6 @@ public:
class Item_field :public Item_ident
{
- void set_field(Field *field);
public:
Field *field,*result_field;
@@ -356,13 +356,21 @@ public:
:Item_ident(db_par,table_name_par,field_name_par),
field(0), result_field(0)
{ collation.set(DERIVATION_IMPLICIT); }
- // Constructor need to process subselect with temporary tables (see Item)
+ /*
+ Constructor needed to process subselect with temporary tables (see Item)
+ */
Item_field(THD *thd, Item_field *item);
/*
- Constructor used inside setup_wild(), ensures that field and table
- names will live as long as Item_field (important in prep. stmt.)
+ Constructor used inside setup_wild(), ensures that field, table,
+ and database names will live as long as Item_field (this is important
+ in prepared statements).
*/
Item_field(THD *thd, Field *field);
+ /*
+ If this constructor is used, fix_fields() won't work, because
+ db_name, table_name and column_name are unknown. It's necessary to call
+ set_field() before fix_fields() for all fields created this way.
+ */
Item_field(Field *field);
enum Type type() const { return FIELD_ITEM; }
bool eq(const Item *item, bool binary_cmp) const;
@@ -373,6 +381,7 @@ public:
longlong val_int_result();
String *str_result(String* tmp);
bool send(Protocol *protocol, String *str_arg);
+ void set_field(Field *field);
bool fix_fields(THD *, struct st_table_list *, Item **);
void make_field(Send_field *tmp_field);
int save_in_field(Field *field,bool no_conversions);
diff --git a/sql/item_func.cc b/sql/item_func.cc
index f20d69bf2ad..17cf8642ce5 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -651,11 +651,11 @@ void Item_func_int_div::fix_length_and_dec()
double Item_func_mod::val()
{
DBUG_ASSERT(fixed == 1);
- double value= floor(args[0]->val()+0.5);
- double val2=floor(args[1]->val()+0.5);
- if ((null_value=val2 == 0.0 || args[0]->null_value || args[1]->null_value))
+ double x= args[0]->val();
+ double y= args[1]->val();
+ if ((null_value= (y == 0.0) || args[0]->null_value || args[1]->null_value))
return 0.0; /* purecov: inspected */
- return fmod(value,val2);
+ return fmod(x, y);
}
longlong Item_func_mod::val_int()
@@ -670,10 +670,7 @@ longlong Item_func_mod::val_int()
void Item_func_mod::fix_length_and_dec()
{
- max_length=args[1]->max_length;
- decimals=0;
- maybe_null=1;
- find_num_type();
+ Item_num_op::fix_length_and_dec();
}
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 2fc0aa62f19..cd7b643e146 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -2655,8 +2655,8 @@ int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds)
strlen(t1_field_name), 0, 0,
&not_used_field_index)))
{
- Item_func_eq *tmp=new Item_func_eq(new Item_field(*t1_field),
- new Item_field(t2_field));
+ Item_func_eq *tmp=new Item_func_eq(new Item_field(thd, *t1_field),
+ new Item_field(thd, t2_field));
if (!tmp)
goto err;
/* Mark field used for table cache */
diff --git a/sql/sql_class.h b/sql/sql_class.h
index c30e1c321b0..e73b35966a9 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -464,6 +464,8 @@ public:
inline bool is_stmt_prepare() const { return (int)state < (int)PREPARED; }
inline bool is_first_stmt_execute() const { return state == PREPARED; }
+ inline bool is_stmt_execute() const
+ { return state == PREPARED || state == EXECUTED; }
inline bool is_conventional_execution() const
{ return state == CONVENTIONAL_EXECUTION; }
inline gptr alloc(unsigned int size) { return alloc_root(&mem_root,size); }
@@ -1112,6 +1114,13 @@ public:
unit= u;
return 0;
}
+ /*
+ Because of peculiarities of prepared statements protocol
+ we need to know number of columns in the result set (if
+ there is a result set) apart from sending columns metadata.
+ */
+ virtual uint field_count(List<Item> &fields) const
+ { return fields.elements; }
virtual bool send_fields(List<Item> &list,uint flag)=0;
virtual bool send_data(List<Item> &items)=0;
virtual bool initialize_tables (JOIN *join=0) { return 0; }
@@ -1126,6 +1135,20 @@ public:
};
+/*
+ Base class for select_result descendands which intercept and
+ transform result set rows. As the rows are not sent to the client,
+ sending of result set metadata should be suppressed as well.
+*/
+
+class select_result_interceptor: public select_result
+{
+public:
+ uint field_count(List<Item> &fields) const { return 0; }
+ bool send_fields(List<Item> &fields, uint flag) { return FALSE; }
+};
+
+
class select_send :public select_result {
public:
select_send() {}
@@ -1135,7 +1158,7 @@ public:
};
-class select_to_file :public select_result {
+class select_to_file :public select_result_interceptor {
protected:
sql_exchange *exchange;
File file;
@@ -1147,7 +1170,6 @@ public:
select_to_file(sql_exchange *ex) :exchange(ex), file(-1),row_count(0L)
{ path[0]=0; }
~select_to_file();
- bool send_fields(List<Item> &list, uint flag) { return 0; }
void send_error(uint errcode,const char *err);
bool send_eof();
void cleanup();
@@ -1174,7 +1196,7 @@ public:
};
-class select_insert :public select_result {
+class select_insert :public select_result_interceptor {
public:
TABLE *table;
List<Item> *fields;
@@ -1190,8 +1212,6 @@ class select_insert :public select_result {
}
~select_insert();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
- bool send_fields(List<Item> &list, uint flag)
- { return 0; }
bool send_data(List<Item> &items);
void send_error(uint errcode,const char *err);
bool send_eof();
@@ -1274,7 +1294,7 @@ public:
}
};
-class select_union :public select_result {
+class select_union :public select_result_interceptor {
public:
TABLE *table;
COPY_INFO info;
@@ -1283,8 +1303,6 @@ class select_union :public select_result {
select_union(TABLE *table_par);
~select_union();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
- bool send_fields(List<Item> &list, uint flag)
- { return 0; }
bool send_data(List<Item> &items);
bool send_eof();
bool flush();
@@ -1292,13 +1310,12 @@ class select_union :public select_result {
};
/* Base subselect interface class */
-class select_subselect :public select_result
+class select_subselect :public select_result_interceptor
{
protected:
Item_subselect *item;
public:
select_subselect(Item_subselect *item);
- bool send_fields(List<Item> &list, uint flag) { return 0; };
bool send_data(List<Item> &items)=0;
bool send_eof() { return 0; };
};
@@ -1435,7 +1452,7 @@ public:
};
-class multi_delete :public select_result
+class multi_delete :public select_result_interceptor
{
TABLE_LIST *delete_tables, *table_being_deleted;
Unique **tempfiles;
@@ -1448,8 +1465,6 @@ public:
multi_delete(THD *thd, TABLE_LIST *dt, uint num_of_tables);
~multi_delete();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
- bool send_fields(List<Item> &list,
- uint flag) { return 0; }
bool send_data(List<Item> &items);
bool initialize_tables (JOIN *join);
void send_error(uint errcode,const char *err);
@@ -1458,7 +1473,7 @@ public:
};
-class multi_update :public select_result
+class multi_update :public select_result_interceptor
{
TABLE_LIST *all_tables, *update_tables, *table_being_updated;
THD *thd;
@@ -1477,7 +1492,6 @@ public:
List<Item> *values, enum_duplicates handle_duplicates);
~multi_update();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
- bool send_fields(List<Item> &list, uint flag) { return 0; }
bool send_data(List<Item> &items);
bool initialize_tables (JOIN *join);
void send_error(uint errcode,const char *err);
@@ -1486,7 +1500,7 @@ public:
};
-class select_dumpvar :public select_result {
+class select_dumpvar :public select_result_interceptor {
ha_rows row_count;
public:
List<LEX_STRING> var_list;
@@ -1494,7 +1508,6 @@ public:
select_dumpvar(void) { var_list.empty(); vars.empty(); row_count=0;}
~select_dumpvar() {}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
- bool send_fields(List<Item> &list, uint flag) {return 0;}
bool send_data(List<Item> &items);
bool send_eof();
void cleanup();
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 9dd18f0f152..8ee27cdc21f 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -501,12 +501,17 @@ void free_max_user_conn(void)
/*
Mark all commands that somehow changes a table
This is used to check number of updates / hour
+
+ sql_command is actually set to SQLCOM_END sometimes
+ so we need the +1 to include it in the array.
*/
-char uc_update_queries[SQLCOM_END];
+char uc_update_queries[SQLCOM_END+1];
void init_update_queries(void)
{
+ bzero((gptr) &uc_update_queries, sizeof(uc_update_queries));
+
uc_update_queries[SQLCOM_CREATE_TABLE]=1;
uc_update_queries[SQLCOM_CREATE_INDEX]=1;
uc_update_queries[SQLCOM_ALTER_TABLE]=1;
@@ -531,6 +536,7 @@ void init_update_queries(void)
bool is_update_query(enum enum_sql_command command)
{
+ DBUG_ASSERT(command >= 0 && command <= SQLCOM_END);
return uc_update_queries[command];
}
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 239ff13f261..12f526c5566 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1064,6 +1064,12 @@ static int mysql_test_select(Prepared_statement *stmt,
DBUG_RETURN(1);
#endif
+ if (!lex->result && !(lex->result= new (&stmt->mem_root) select_send))
+ {
+ send_error(thd);
+ goto err;
+ }
+
if (open_and_lock_tables(thd, tables))
{
send_error(thd);
@@ -1087,8 +1093,13 @@ static int mysql_test_select(Prepared_statement *stmt,
}
else
{
- if (send_prep_stmt(stmt, lex->select_lex.item_list.elements) ||
- thd->protocol_simple.send_fields(&lex->select_lex.item_list, 0)
+ List<Item> &fields= lex->select_lex.item_list;
+ /*
+ We can use lex->result as it should've been
+ prepared in unit->prepare call above.
+ */
+ if (send_prep_stmt(stmt, lex->result->field_count(fields)) ||
+ lex->result->send_fields(fields, 0)
#ifndef EMBEDDED_LIBRARY
|| net_flush(&thd->net)
#endif
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index fc2d2a3a5e4..cd1127f9683 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -266,14 +266,14 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
if (first_select->next_select())
{
-
- // it is not single select
+ /* This is not a single select */
/*
Check that it was possible to aggregate
all collations together for UNION.
*/
List_iterator_fast<Item> tp(types);
+ Item_arena *arena= thd->current_arena;
Item *type;
while ((type= tp++))
{
@@ -305,7 +305,11 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
thd_arg->lex->current_select= lex_select_save;
if (!item_list.elements)
{
- Item_arena *arena= thd->current_arena, backup;
+ /*
+ We're in statement prepare or in execution
+ of a conventional statement.
+ */
+ Item_arena backup;
if (arena->is_stmt_prepare())
thd->set_n_backup_item_arena(arena, &backup);
Field **field;
@@ -345,6 +349,20 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
fake_select_lex->table_list.empty();
}
}
+ else if (arena->is_stmt_execute())
+ {
+ /*
+ We're in execution of a prepared statement: reset field items
+ to point at fields from the created temporary table.
+ */
+ List_iterator_fast<Item> it(item_list);
+ for (Field **field= table->field; *field; field++)
+ {
+ Item_field *item_field= (Item_field*) it++;
+ DBUG_ASSERT(item_field);
+ item_field->set_field(*field);
+ }
+ }
}
else
first_select->braces= 0; // remove our changes
diff --git a/sql/table.h b/sql/table.h
index f25b172a0d9..2eb854f553d 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -164,6 +164,7 @@ struct st_table {
MEM_ROOT mem_root;
GRANT_INFO grant;
+ /* A pair "database_name\0table_name\0", widely used as simply a db name */
char *table_cache_key;
char *table_name,*real_name,*path;
uint key_length; /* Length of key */