diff options
author | Monty <monty@mariadb.org> | 2017-04-23 19:39:57 +0300 |
---|---|---|
committer | Monty <monty@mariadb.org> | 2017-04-23 22:35:46 +0300 |
commit | 5a759d31f766087d5e135e1d3d3d987693bc9b88 (patch) | |
tree | 93c7359e8b211e269bfa73e5f595f34b9dca575a /storage | |
parent | cba84469eb96481568a9f4ddf3f2989c49c9294c (diff) | |
download | mariadb-git-5a759d31f766087d5e135e1d3d3d987693bc9b88.tar.gz |
Changing field::field_name and Item::name to LEX_CSTRING
Benefits of this patch:
- Removed a lot of calls to strlen(), especially for field_string
- Strings generated by parser are now const strings, less chance of
accidently changing a string
- Removed a lot of calls with LEX_STRING as parameter (changed to pointer)
- More uniform code
- Item::name_length was not kept up to date. Now fixed
- Several bugs found and fixed (Access to null pointers,
access of freed memory, wrong arguments to printf like functions)
- Removed a lot of casts from (const char*) to (char*)
Changes:
- This caused some ABI changes
- lex_string_set now uses LEX_CSTRING
- Some fucntions are now taking const char* instead of char*
- Create_field::change and after changed to LEX_CSTRING
- handler::connect_string, comment and engine_name() changed to LEX_CSTRING
- Checked printf() related calls to find bugs. Found and fixed several
errors in old code.
- A lot of changes from LEX_STRING to LEX_CSTRING, especially related to
parsing and events.
- Some changes from LEX_STRING and LEX_STRING & to LEX_CSTRING*
- Some changes for char* to const char*
- Added printf argument checking for my_snprintf()
- Introduced null_clex_str, star_clex_string, temp_lex_str to simplify
code
- Added item_empty_name and item_used_name to be able to distingush between
items that was given an empty name and items that was not given a name
This is used in sql_yacc.yy to know when to give an item a name.
- select table_name."*' is not anymore same as table_name.*
- removed not used function Item::rename()
- Added comparision of item->name_length before some calls to
my_strcasecmp() to speed up comparison
- Moved Item_sp_variable::make_field() from item.h to item.cc
- Some minimal code changes to avoid copying to const char *
- Fixed wrong error message in wsrep_mysql_parse()
- Fixed wrong code in find_field_in_natural_join() where real_item() was
set when it shouldn't
- ER_ERROR_ON_RENAME was used with extra arguments.
- Removed some (wrong) ER_OUTOFMEMORY, as alloc_root will already
give the error.
TODO:
- Check possible unsafe casts in plugin/auth_examples/qa_auth_interface.c
- Change code to not modify LEX_CSTRING for database name
(as part of lower_case_table_names)
Diffstat (limited to 'storage')
52 files changed, 421 insertions, 409 deletions
diff --git a/storage/archive/azio.c b/storage/archive/azio.c index cc2140e838e..8bf90e700d4 100644 --- a/storage/archive/azio.c +++ b/storage/archive/azio.c @@ -913,7 +913,7 @@ int azread_frm(azio_stream *s, uchar *blob) /* Simple comment field */ -int azwrite_comment(azio_stream *s, char *blob, unsigned int length) +int azwrite_comment(azio_stream *s, const char *blob, unsigned int length) { if (s->mode == 'r') return 1; diff --git a/storage/archive/azlib.h b/storage/archive/azlib.h index 2971705b2f1..d9318002901 100644 --- a/storage/archive/azlib.h +++ b/storage/archive/azlib.h @@ -336,7 +336,8 @@ extern int azclose(azio_stream *file); extern int azwrite_frm (azio_stream *s, const uchar *blob, unsigned int length); extern int azread_frm (azio_stream *s, uchar *blob); -extern int azwrite_comment (azio_stream *s, char *blob, unsigned int length); +extern int azwrite_comment (azio_stream *s, const char *blob, + unsigned int length); extern int azread_comment (azio_stream *s, char *blob); #ifdef __cplusplus diff --git a/storage/cassandra/ha_cassandra.cc b/storage/cassandra/ha_cassandra.cc index f95922088ff..ff758a57872 100644 --- a/storage/cassandra/ha_cassandra.cc +++ b/storage/cassandra/ha_cassandra.cc @@ -411,7 +411,7 @@ int ha_cassandra::check_field_options(Field **fields) { if (dyncol_set || (*field)->type() != MYSQL_TYPE_BLOB) { - my_error(ER_WRONG_FIELD_SPEC, MYF(0), (*field)->field_name); + my_error(ER_WRONG_FIELD_SPEC, MYF(0), (*field)->field_name.str); DBUG_RETURN(HA_WRONG_CREATE_OPTION); } dyncol_set= 1; @@ -1497,14 +1497,14 @@ bool ha_cassandra::setup_field_converters(Field **field_arg, uint n_fields) for (field= field_arg + 1, i= 1; *field; field++, i++) { if ((!dyncol_set || dyncol_field != i) && - !strcmp((*field)->field_name, col_name)) + !strcmp((*field)->field_name.str, col_name)) { n_mapped++; ColumnDataConverter **conv= field_converters + (*field)->field_index; if (!(*conv= map_field_to_validator(*field, col_type))) { se->print_error("Failed to map column %s to datatype %s", - (*field)->field_name, col_type); + (*field)->field_name.str, col_type); my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str()); DBUG_RETURN(true); } @@ -1543,7 +1543,7 @@ bool ha_cassandra::setup_field_converters(Field **field_arg, uint n_fields) DBUG_ASSERT(first_unmapped); se->print_error("Field `%s` could not be mapped to any field in Cassandra", - first_unmapped->field_name); + first_unmapped->field_name.str); my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str()); DBUG_RETURN(true); } @@ -1552,14 +1552,14 @@ bool ha_cassandra::setup_field_converters(Field **field_arg, uint n_fields) Setup type conversion for row_key. */ se->get_rowkey_type(&col_name, &col_type); - if (col_name && strcmp(col_name, (*field_arg)->field_name)) + if (col_name && strcmp(col_name, (*field_arg)->field_name.str)) { se->print_error("PRIMARY KEY column must match Cassandra's name '%s'", col_name); my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str()); DBUG_RETURN(true); } - if (!col_name && strcmp("rowkey", (*field_arg)->field_name)) + if (!col_name && strcmp("rowkey", (*field_arg)->field_name.str)) { se->print_error("target column family has no key_alias defined, " "PRIMARY KEY column must be named 'rowkey'"); @@ -1742,14 +1742,14 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk) { uint fieldnr= (*field)->field_index; if ((!dyncol_set || dyncol_field != fieldnr) && - !strcmp((*field)->field_name, cass_name)) + !strcmp((*field)->field_name.str, cass_name)) { found= 1; (*field)->set_notnull(); if (field_converters[fieldnr]->cassandra_to_mariadb(cass_value, cass_value_len)) { - print_conversion_error((*field)->field_name, cass_value, + print_conversion_error((*field)->field_name.str, cass_value, cass_value_len); res=1; goto err; @@ -1770,7 +1770,7 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk) se->print_error("Unable to convert value for field `%s`" " from Cassandra's data format. Name" " length exceed limit of %u: '%s'", - table->field[dyncol_field]->field_name, + table->field[dyncol_field]->field_name.str, (uint)MAX_NAME_LENGTH, cass_name); my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str()); res=1; @@ -1782,7 +1782,7 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk) se->print_error("Unable to convert value for field `%s`" " from Cassandra's data format. Sum of all names" " length exceed limit of %lu", - table->field[dyncol_field]->field_name, + table->field[dyncol_field]->field_name.str, cass_name, (uint)MAX_TOTAL_NAME_LENGTH); my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str()); res=1; @@ -1841,7 +1841,7 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk) se->get_read_rowkey(&cass_value, &cass_value_len); if (rowkey_converter->cassandra_to_mariadb(cass_value, cass_value_len)) { - print_conversion_error((*field)->field_name, cass_value, cass_value_len); + print_conversion_error((*field)->field_name.str, cass_value, cass_value_len); res=1; goto err; } @@ -1953,7 +1953,7 @@ int ha_cassandra::write_row(uchar *buf) if (rowkey_converter->mariadb_to_cassandra(&cass_key, &cass_key_len)) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), - rowkey_converter->field->field_name, insert_lineno); + rowkey_converter->field->field_name.str, insert_lineno); dbug_tmp_restore_column_map(table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } @@ -1987,11 +1987,11 @@ int ha_cassandra::write_row(uchar *buf) &cass_data_len)) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), - field_converters[i]->field->field_name, insert_lineno); + field_converters[i]->field->field_name.str, insert_lineno); dbug_tmp_restore_column_map(table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - se->add_insert_column(field_converters[i]->field->field_name, 0, + se->add_insert_column(field_converters[i]->field->field_name.str, 0, cass_data, cass_data_len); } } @@ -2074,7 +2074,7 @@ int ha_cassandra::rnd_init(bool scan) { se->clear_read_columns(); for (uint i= 1; i < table->s->fields; i++) - se->add_read_column(table->field[i]->field_name); + se->add_read_column(table->field[i]->field_name.str); } se->read_batch_size= THDVAR(table->in_use, rnd_batch_size); @@ -2355,7 +2355,7 @@ public: if (idx == obj->table->s->fields) return NULL; else - return obj->table->field[idx++]->field_name; + return obj->table->field[idx++]->field_name.str; } }; @@ -2386,7 +2386,7 @@ int ha_cassandra::update_row(const uchar *old_data, uchar *new_data) if (rowkey_converter->mariadb_to_cassandra(&new_key, &new_key_len)) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), - rowkey_converter->field->field_name, insert_lineno); + rowkey_converter->field->field_name.str, insert_lineno); dbug_tmp_restore_column_map(table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } @@ -2449,11 +2449,11 @@ int ha_cassandra::update_row(const uchar *old_data, uchar *new_data) if (field_converters[i]->mariadb_to_cassandra(&cass_data, &cass_data_len)) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), - field_converters[i]->field->field_name, insert_lineno); + field_converters[i]->field->field_name.str, insert_lineno); dbug_tmp_restore_column_map(table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - se->add_insert_column(field_converters[i]->field->field_name, 0, + se->add_insert_column(field_converters[i]->field->field_name.str, 0, cass_data, cass_data_len); } } diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index e655655fb9c..09681d4daa4 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -241,7 +241,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, /****************************************************************************/ /* Return str as a zero terminated string. */ /****************************************************************************/ -static char *strz(PGLOBAL g, LEX_STRING &ls) +static char *strz(PGLOBAL g, LEX_CSTRING &ls) { char *str= (char*)PlugSubAlloc(g, NULL, ls.length + 1); @@ -1211,7 +1211,7 @@ char *ha_connect::GetStringOption(char *opname, char *sdef) PTOS options= GetTableOptionStruct(); if (!stricmp(opname, "Connect")) { - LEX_STRING cnc= (tshp) ? tshp->connect_string + LEX_CSTRING cnc= (tshp) ? tshp->connect_string : table->s->connect_string; if (cnc.length) @@ -1391,7 +1391,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) pcf->Flags= 0; // Now get column information - pcf->Name= (char*)fp->field_name; + pcf->Name= (char*)fp->field_name.str; if (fop && fop->special) { pcf->Fieldfmt= (char*)fop->special; @@ -1588,7 +1588,7 @@ PIXDEF ha_connect::GetIndexInfo(TABLE_SHARE *s) // Get the the key parts info for (int k= 0; (unsigned)k < kp.user_defined_key_parts; k++) { - pn= (char*)kp.key_part[k].field->field_name; + pn= (char*)kp.key_part[k].field->field_name.str; name= PlugDup(g, pn); // Allocate the key part description block @@ -1704,7 +1704,7 @@ int ha_connect::GetColNameLen(Field *fp) if (fop && fop->special) n= strlen(fop->special) + 1; else - n= strlen(fp->field_name); + n= fp->field_name.length; return n; } // end of GetColNameLen @@ -1716,7 +1716,7 @@ char *ha_connect::GetColName(Field *fp) { PFOS fop= GetFieldOptionStruct(fp); - return (fop && fop->special) ? fop->special : (char*)fp->field_name; + return (fop && fop->special) ? fop->special : (char*)fp->field_name.str; } // end of GetColName /****************************************************************************/ @@ -1731,7 +1731,7 @@ void ha_connect::AddColName(char *cp, Field *fp) // The prefix * mark the column as "special" strcat(strcpy(cp, "*"), strupr(fop->special)); else - strcpy(cp, (char*)fp->field_name); + strcpy(cp, fp->field_name.str); } // end of AddColName #endif // 0 @@ -1818,12 +1818,12 @@ int ha_connect::OpenTable(PGLOBAL g, bool del) for (field= table->field; fp= *field; field++) { if (bitmap_is_set(map, fp->field_index)) { - n1+= (strlen(fp->field_name) + 1); + n1+= (fp->field_name.length + 1); k1++; } // endif if (ump && bitmap_is_set(ump, fp->field_index)) { - n2+= (strlen(fp->field_name) + 1); + n2+= (fp->field_name.length + 1); k2++; } // endif @@ -1834,8 +1834,8 @@ int ha_connect::OpenTable(PGLOBAL g, bool del) for (field= table->field; fp= *field; field++) if (bitmap_is_set(map, fp->field_index)) { - strcpy(p, (char*)fp->field_name); - p+= (strlen(p) + 1); + strcpy(p, fp->field_name.str); + p+= (fp->field_name.length + 1); } // endif used field *p= '\0'; // mark end of list @@ -1846,7 +1846,7 @@ int ha_connect::OpenTable(PGLOBAL g, bool del) for (field= table->field; fp= *field; field++) if (bitmap_is_set(ump, fp->field_index)) { - strcpy(p, (char*)fp->field_name); + strcpy(p, fp->field_name.str); if (part_id && bitmap_is_set(part_id, fp->field_index)) { // Trying to update a column used for partitioning @@ -1911,9 +1911,9 @@ bool ha_connect::CheckColumnList(PGLOBAL g) if ((rc= setjmp(g->jumper[++g->jump_level])) == 0) { for (field= table->field; fp= *field; field++) if (bitmap_is_set(map, fp->field_index)) { - if (!(colp= tdbp->ColDB(g, (PSZ)fp->field_name, 0))) { + if (!(colp= tdbp->ColDB(g, (PSZ)fp->field_name.str, 0))) { sprintf(g->Message, "Column %s not found in %s", - fp->field_name, tdbp->GetName()); + fp->field_name.str, tdbp->GetName()); brc= true; goto fin; } // endif colp @@ -2003,14 +2003,14 @@ int ha_connect::MakeRecord(char *buf) // This is a used field, fill the buffer with value for (colp= tdbp->GetColumns(); colp; colp= colp->GetNext()) if ((!mrr || colp->GetKcol()) && - !stricmp(colp->GetName(), (char*)fp->field_name)) + !stricmp(colp->GetName(), fp->field_name.str)) break; if (!colp) { if (mrr) continue; - htrc("Column %s not found\n", fp->field_name); + htrc("Column %s not found\n", fp->field_name.str); dbug_tmp_restore_column_map(table->write_set, org_bitmap); DBUG_RETURN(HA_ERR_WRONG_IN_RECORD); } // endif colp @@ -2066,7 +2066,7 @@ int ha_connect::MakeRecord(char *buf) sprintf(buf, "Out of range value %.140s for column '%s' at row %ld", value->GetCharString(val), - fp->field_name, + fp->field_name.str, thd->get_stmt_da()->current_row_for_warning()); push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, buf); @@ -2125,11 +2125,11 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *) && tdbp->GetAmType() != TYPE_AM_JDBC) || bitmap_is_set(table->write_set, fp->field_index)) { for (colp= tdbp->GetSetCols(); colp; colp= colp->GetNext()) - if (!stricmp(colp->GetName(), fp->field_name)) + if (!stricmp(colp->GetName(), fp->field_name.str)) break; if (!colp) { - htrc("Column %s not found\n", fp->field_name); + htrc("Column %s not found\n", fp->field_name.str); rc= HA_ERR_WRONG_IN_RECORD; goto err; } else @@ -2321,10 +2321,10 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, if (q) { oom|= qry->Append(q); - oom|= qry->Append((PSZ)fp->field_name); + oom|= qry->Append((PSZ)fp->field_name.str); oom|= qry->Append(q); } else - oom|= qry->Append((PSZ)fp->field_name); + oom|= qry->Append((PSZ)fp->field_name.str); switch (ranges[i]->flag) { case HA_READ_KEY_EXACT: @@ -2581,7 +2581,7 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) return NULL; if (pField->field->table != table || - !(colp[i]= tdbp->ColDB(g, (PSZ)pField->field->field_name, 0))) + !(colp[i]= tdbp->ColDB(g, (PSZ)pField->field->field_name.str, 0))) return NULL; // Column does not belong to this table // These types are not yet implemented (buggy) @@ -2599,7 +2599,7 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) if (trace) { htrc("Field index=%d\n", pField->field->field_index); - htrc("Field name=%s\n", pField->field->field_name); + htrc("Field name=%s\n", pField->field->field_name.str); } // endif trace } else { @@ -2856,7 +2856,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) } else { bool h; - fnm = filp->Chk(pField->field->field_name, &h); + fnm = filp->Chk(pField->field->field_name.str, &h); if (h && i && !ishav) return NULL; // Having should be col VOP arg @@ -2867,7 +2867,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) if (trace) { htrc("Field index=%d\n", pField->field->field_index); - htrc("Field name=%s\n", pField->field->field_name); + htrc("Field name=%s\n", pField->field->field_name.str); htrc("Field type=%d\n", pField->field->type()); htrc("Field_type=%d\n", args[i]->field_type()); } // endif trace @@ -4169,7 +4169,7 @@ int ha_connect::delete_all_rows() } // end of delete_all_rows -bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn, bool quick) +bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool quick) { const char *db= (dbn && *dbn) ? dbn : NULL; TABTYPE type=GetRealType(options); @@ -5380,7 +5380,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, } // endif p } else if (ttp != TAB_ODBC || !(fnc & (FNC_TABLE | FNC_COL))) - tab= table_s->table_name.str; // Default value + tab= (char*) table_s->table_name.str; // Default value } // endif tab @@ -6221,7 +6221,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, if (fp->flags & (BLOB_FLAG | ENUM_FLAG | SET_FLAG)) { sprintf(g->Message, "Unsupported type for column %s", - fp->field_name); + fp->field_name.str); my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); rc= HA_ERR_INTERNAL_ERROR; DBUG_RETURN(rc); @@ -6257,11 +6257,11 @@ int ha_connect::create(const char *name, TABLE *table_arg, case MYSQL_TYPE_STRING: if (!fp->field_length) { sprintf(g->Message, "Unsupported 0 length for column %s", - fp->field_name); + fp->field_name.str); rc= HA_ERR_INTERNAL_ERROR; my_printf_error(ER_UNKNOWN_ERROR, "Unsupported 0 length for column %s", - MYF(0), fp->field_name); + MYF(0), fp->field_name.str); DBUG_RETURN(rc); } // endif fp @@ -6276,12 +6276,12 @@ int ha_connect::create(const char *name, TABLE *table_arg, case MYSQL_TYPE_BLOB: case MYSQL_TYPE_GEOMETRY: default: -// fprintf(stderr, "Unsupported type column %s\n", fp->field_name); +// fprintf(stderr, "Unsupported type column %s\n", fp->field_name.str); sprintf(g->Message, "Unsupported type for column %s", - fp->field_name); + fp->field_name.str); rc= HA_ERR_INTERNAL_ERROR; my_printf_error(ER_UNKNOWN_ERROR, "Unsupported type for column %s", - MYF(0), fp->field_name); + MYF(0), fp->field_name.str); DBUG_RETURN(rc); break; } // endswitch type @@ -6296,12 +6296,12 @@ int ha_connect::create(const char *name, TABLE *table_arg, if (dbf) { bool b= false; - if ((b= strlen(fp->field_name) > 10)) + if ((b= fp->field_name.length > 10)) sprintf(g->Message, "DBF: Column name '%s' is too long (max=10)", - fp->field_name); + fp->field_name.str); else if ((b= fp->field_length > 255)) sprintf(g->Message, "DBF: Column length too big for '%s' (max=255)", - fp->field_name); + fp->field_name.str); if (b) { my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h index 4757f6edfe1..de735668133 100644 --- a/storage/connect/ha_connect.h +++ b/storage/connect/ha_connect.h @@ -503,7 +503,7 @@ private: DsMrr_impl ds_mrr; protected: - bool check_privileges(THD *thd, PTOS options, char *dbn, bool quick=false); + bool check_privileges(THD *thd, PTOS options, const char *dbn, bool quick=false); MODE CheckMode(PGLOBAL g, THD *thd, MODE newmode, bool *chk, bool *cras); char *GetDBfromName(const char *name); diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index f07a1ac818f..360d0d1a82a 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -1500,7 +1500,8 @@ static PSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i) if (args->arg_count > (unsigned)i) { int j = 0, n = args->attribute_lengths[i]; my_bool b; // true if attribute is zero terminated - PSZ p, s = args->attributes[i]; + PSZ p; + const char *s = args->attributes[i]; if (s && *s && (n || *s == '\'')) { if ((b = (!n || !s[n]))) @@ -1519,7 +1520,7 @@ static PSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i) } // endif *s if (n < 1) - return "Key"; + return (char*) "Key"; if (!b) { p = (PSZ)PlugSubAlloc(g, NULL, n + 1); @@ -1530,10 +1531,10 @@ static PSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i) } // endif s - return s; + return (char*) s; } // endif count - return "Key"; + return (char*) "Key"; } // end of MakeKey /*********************************************************************************/ diff --git a/storage/connect/tabtbl.cpp b/storage/connect/tabtbl.cpp index 0bf3f6beb43..9570fbf882a 100644 --- a/storage/connect/tabtbl.cpp +++ b/storage/connect/tabtbl.cpp @@ -228,7 +228,7 @@ bool TDBTBL::InitTableList(PGLOBAL g) { int n; uint sln; - char *scs; + const char *scs; PTABLE tp, tabp; PCOL colp; PTBLDEF tdp = (PTBLDEF)To_Def; diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp index 762c61bd1a1..ad939db86be 100644 --- a/storage/connect/tabutil.cpp +++ b/storage/connect/tabutil.cpp @@ -181,7 +181,7 @@ PQRYRES TabColumns(PGLOBAL g, THD *thd, const char *db, // Get column name crp = qrp->Colresp; // Column_Name - colname = (char *)fp->field_name; + colname = (char *)fp->field_name.str; crp->Kdata->SetValue(colname, i); chset = (char *)fp->charset()->name; @@ -262,7 +262,7 @@ PQRYRES TabColumns(PGLOBAL g, THD *thd, const char *db, crp = crp->Next; // Remark // For Valgrind - if (fp->comment.length > 0 && (fld = fp->comment.str)) + if (fp->comment.length > 0 && (fld = (char*) fp->comment.str)) crp->Kdata->SetValue(fld, fp->comment.length, i); else crp->Kdata->Reset(i); diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc index 13062c0bf7c..657f5cb9d01 100644 --- a/storage/example/ha_example.cc +++ b/storage/example/ha_example.cc @@ -896,7 +896,7 @@ int ha_example::create(const char *name, TABLE *table_arg, ha_field_option_struct *field_options= (*field)->option_struct; DBUG_ASSERT(field_options); DBUG_PRINT("info", ("field: %s complex: '%-.64s'", - (*field)->field_name, + (*field)->field_name.str, (field_options->complex_param_to_parse_it_in_engine ? field_options->complex_param_to_parse_it_in_engine : "<NULL>"))); @@ -975,7 +975,7 @@ ha_example::check_if_supported_inplace_alter(TABLE* altered_table, { push_warning_printf(ha_thd(), Sql_condition::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR, "EXAMPLE DEBUG: Field %`s COMPLEX '%s' -> '%s'", - table->s->field[i]->field_name, + table->s->field[i]->field_name.str, f_old->complex_param_to_parse_it_in_engine, f_new->complex_param_to_parse_it_in_engine); } diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index b4b781ca534..f22bb6cb758 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -419,7 +419,7 @@ static int federated_rollback(handlerton *hton, THD *thd, bool all); /* Federated storage engine handlerton */ -static handler *federated_create_handler(handlerton *hton, +static handler *federated_create_handler(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root) { @@ -753,9 +753,9 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table, share->table_name++; share->table_name_length= (uint) strlen(share->table_name); - DBUG_PRINT("info", + DBUG_PRINT("info", ("internal format, parsed table_name share->connection_string \ - %s share->table_name %s", + %s share->table_name %s", share->connection_string, share->table_name)); /* @@ -777,9 +777,9 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table, */ share->table_name= strmake_root(mem_root, table->s->table_name.str, (share->table_name_length= table->s->table_name.length)); - DBUG_PRINT("info", + DBUG_PRINT("info", ("internal format, default table_name share->connection_string \ - %s share->table_name %s", + %s share->table_name %s", share->connection_string, share->table_name)); } @@ -971,8 +971,8 @@ uint ha_federated::convert_row_to_internal_format(uchar *record, static bool emit_key_part_name(String *to, KEY_PART_INFO *part) { DBUG_ENTER("emit_key_part_name"); - if (append_ident(to, part->field->field_name, - strlen(part->field->field_name), ident_quote_char)) + if (append_ident(to, part->field->field_name.str, + part->field->field_name.length, ident_quote_char)) DBUG_RETURN(1); // Out of memory DBUG_RETURN(0); } @@ -1234,7 +1234,7 @@ read_range_first: start_key 3 end_key 3 Summary: -* If the start key flag is 0 the max key flag shouldn't even be set, +* If the start key flag is 0 the max key flag shouldn't even be set, and if it is, the query produced would be invalid. * Multipart keys, even if containing some or all numeric columns, are treated the same as non-numeric keys @@ -1533,8 +1533,8 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) query.append(STRING_WITH_LEN("SELECT ")); for (field= table->field; *field; field++) { - append_ident(&query, (*field)->field_name, - strlen((*field)->field_name), ident_quote_char); + append_ident(&query, (*field)->field_name.str, + (*field)->field_name.length, ident_quote_char); query.append(STRING_WITH_LEN(", ")); } /* chops off trailing comma */ @@ -1542,7 +1542,7 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) query.append(STRING_WITH_LEN(" FROM ")); - append_ident(&query, tmp_share.table_name, + append_ident(&query, tmp_share.table_name, tmp_share.table_name_length, ident_quote_char); if (!(share= (FEDERATED_SHARE *) memdup_root(&mem_root, (char*)&tmp_share, sizeof(*share))) || @@ -1762,7 +1762,7 @@ bool ha_federated::append_stmt_insert(String *query) insert_string.append(STRING_WITH_LEN("INSERT IGNORE INTO ")); else insert_string.append(STRING_WITH_LEN("INSERT INTO ")); - append_ident(&insert_string, share->table_name, share->table_name_length, + append_ident(&insert_string, share->table_name, share->table_name_length, ident_quote_char); tmp_length= insert_string.length(); insert_string.append(STRING_WITH_LEN(" (")); @@ -1776,8 +1776,8 @@ bool ha_federated::append_stmt_insert(String *query) if (bitmap_is_set(table->write_set, (*field)->field_index)) { /* append the field name */ - append_ident(&insert_string, (*field)->field_name, - strlen((*field)->field_name), ident_quote_char); + append_ident(&insert_string, (*field)->field_name.str, + (*field)->field_name.length, ident_quote_char); /* append commas between both fields and fieldnames */ /* @@ -1926,11 +1926,11 @@ int ha_federated::write_row(uchar *buf) if (bulk_insert.length == 0) { char insert_buffer[FEDERATED_QUERY_BUFFER_SIZE]; - String insert_string(insert_buffer, sizeof(insert_buffer), + String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin); insert_string.length(0); append_stmt_insert(&insert_string); - dynstr_append_mem(&bulk_insert, insert_string.ptr(), + dynstr_append_mem(&bulk_insert, insert_string.ptr(), insert_string.length()); } else @@ -2068,7 +2068,7 @@ int ha_federated::optimize(THD* thd, HA_CHECK_OPT* check_opt) query.set_charset(system_charset_info); query.append(STRING_WITH_LEN("OPTIMIZE TABLE ")); - append_ident(&query, share->table_name, share->table_name_length, + append_ident(&query, share->table_name, share->table_name_length, ident_quote_char); if (real_query(query.ptr(), query.length())) @@ -2090,7 +2090,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt) query.set_charset(system_charset_info); query.append(STRING_WITH_LEN("REPAIR TABLE ")); - append_ident(&query, share->table_name, share->table_name_length, + append_ident(&query, share->table_name, share->table_name_length, ident_quote_char); if (check_opt->flags & T_QUICK) query.append(STRING_WITH_LEN(" QUICK")); @@ -2190,8 +2190,8 @@ int ha_federated::update_row(const uchar *old_data, const uchar *new_data) { if (bitmap_is_set(table->write_set, (*field)->field_index)) { - size_t field_name_length= strlen((*field)->field_name); - append_ident(&update_string, (*field)->field_name, field_name_length, + append_ident(&update_string, (*field)->field_name.str, + (*field)->field_name.length, ident_quote_char); update_string.append(STRING_WITH_LEN(" = ")); @@ -2216,8 +2216,8 @@ int ha_federated::update_row(const uchar *old_data, const uchar *new_data) if (bitmap_is_set(table->read_set, (*field)->field_index)) { - size_t field_name_length= strlen((*field)->field_name); - append_ident(&where_string, (*field)->field_name, field_name_length, + append_ident(&where_string, (*field)->field_name.str, + (*field)->field_name.length, ident_quote_char); if (field_in_record_is_null(table, *field, (char*) old_data)) where_string.append(STRING_WITH_LEN(" IS NULL ")); @@ -2299,8 +2299,8 @@ int ha_federated::delete_row(const uchar *buf) found++; if (bitmap_is_set(table->read_set, cur_field->field_index)) { - append_ident(&delete_string, (*field)->field_name, - strlen((*field)->field_name), ident_quote_char); + append_ident(&delete_string, (*field)->field_name.str, + (*field)->field_name.length, ident_quote_char); data_string.length(0); if (cur_field->is_null()) { diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc index a7b1edd0086..17f228c1209 100644 --- a/storage/federatedx/ha_federatedx.cc +++ b/storage/federatedx/ha_federatedx.cc @@ -892,8 +892,8 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record, static bool emit_key_part_name(String *to, KEY_PART_INFO *part) { DBUG_ENTER("emit_key_part_name"); - if (append_ident(to, part->field->field_name, - strlen(part->field->field_name), ident_quote_char)) + if (append_ident(to, part->field->field_name.str, + part->field->field_name.length, ident_quote_char)) DBUG_RETURN(1); // Out of memory DBUG_RETURN(0); } @@ -1595,8 +1595,8 @@ static FEDERATEDX_SHARE *get_share(const char *table_name, TABLE *table) query.append(STRING_WITH_LEN("SELECT ")); for (field= table->field; *field; field++) { - append_ident(&query, (*field)->field_name, - strlen((*field)->field_name), ident_quote_char); + append_ident(&query, (*field)->field_name.str, + (*field)->field_name.length, ident_quote_char); query.append(STRING_WITH_LEN(", ")); } /* chops off trailing comma */ @@ -1604,7 +1604,7 @@ static FEDERATEDX_SHARE *get_share(const char *table_name, TABLE *table) query.append(STRING_WITH_LEN(" FROM ")); - append_ident(&query, tmp_share.table_name, + append_ident(&query, tmp_share.table_name, tmp_share.table_name_length, ident_quote_char); if (!(share= (FEDERATEDX_SHARE *) memdup_root(&mem_root, (char*)&tmp_share, sizeof(*share))) || @@ -1900,7 +1900,7 @@ bool ha_federatedx::append_stmt_insert(String *query) insert_string.append(STRING_WITH_LEN("INSERT IGNORE INTO ")); else insert_string.append(STRING_WITH_LEN("INSERT INTO ")); - append_ident(&insert_string, share->table_name, share->table_name_length, + append_ident(&insert_string, share->table_name, share->table_name_length, ident_quote_char); tmp_length= insert_string.length(); insert_string.append(STRING_WITH_LEN(" (")); @@ -1914,8 +1914,8 @@ bool ha_federatedx::append_stmt_insert(String *query) if (bitmap_is_set(table->write_set, (*field)->field_index)) { /* append the field name */ - append_ident(&insert_string, (*field)->field_name, - strlen((*field)->field_name), ident_quote_char); + append_ident(&insert_string, (*field)->field_name.str, + (*field)->field_name.length, ident_quote_char); /* append commas between both fields and fieldnames */ /* @@ -2212,7 +2212,7 @@ int ha_federatedx::optimize(THD* thd, HA_CHECK_OPT* check_opt) query.set_charset(system_charset_info); query.append(STRING_WITH_LEN("OPTIMIZE TABLE ")); - append_ident(&query, share->table_name, share->table_name_length, + append_ident(&query, share->table_name, share->table_name_length, ident_quote_char); DBUG_ASSERT(txn == get_txn(thd)); @@ -2238,7 +2238,7 @@ int ha_federatedx::repair(THD* thd, HA_CHECK_OPT* check_opt) query.set_charset(system_charset_info); query.append(STRING_WITH_LEN("REPAIR TABLE ")); - append_ident(&query, share->table_name, share->table_name_length, + append_ident(&query, share->table_name, share->table_name_length, ident_quote_char); if (check_opt->flags & T_QUICK) query.append(STRING_WITH_LEN(" QUICK")); @@ -2342,8 +2342,8 @@ int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data) { if (bitmap_is_set(table->write_set, (*field)->field_index)) { - uint field_name_length= strlen((*field)->field_name); - append_ident(&update_string, (*field)->field_name, field_name_length, + append_ident(&update_string, (*field)->field_name.str, + (*field)->field_name.length, ident_quote_char); update_string.append(STRING_WITH_LEN(" = ")); @@ -2368,8 +2368,8 @@ int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data) if (bitmap_is_set(table->read_set, (*field)->field_index)) { - uint field_name_length= strlen((*field)->field_name); - append_ident(&where_string, (*field)->field_name, field_name_length, + append_ident(&where_string, (*field)->field_name.str, + (*field)->field_name.length, ident_quote_char); if (field_in_record_is_null(table, *field, (char*) old_data)) where_string.append(STRING_WITH_LEN(" IS NULL ")); @@ -2455,8 +2455,8 @@ int ha_federatedx::delete_row(const uchar *buf) found++; if (bitmap_is_set(table->read_set, cur_field->field_index)) { - append_ident(&delete_string, (*field)->field_name, - strlen((*field)->field_name), ident_quote_char); + append_ident(&delete_string, (*field)->field_name.str, + (*field)->field_name.length, ident_quote_char); data_string.length(0); if (cur_field->is_null()) { diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index b351ac00c1a..2022ae4a4c1 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -3376,7 +3376,7 @@ innobase_query_caching_of_table_permitted( THD* thd, /*!< in: thd of the user who is trying to store a result to the query cache or retrieve it */ - char* full_name, /*!< in: normalized path to the table */ + const char* full_name, /*!< in: normalized path to the table */ uint full_name_len, /*!< in: length of the normalized path to the table */ ulonglong *unused) /*!< unused for this engine */ @@ -6218,7 +6218,7 @@ innobase_build_v_templ( name = dict_table_get_v_col_name(ib_table, z); } - ut_ad(!ut_strcmp(name, field->field_name)); + ut_ad(!ut_strcmp(name, field->field_name.str)); #endif const dict_v_col_t* vcol; @@ -6253,7 +6253,7 @@ innobase_build_v_templ( const char* name = dict_table_get_col_name( ib_table, j); - ut_ad(!ut_strcmp(name, field->field_name)); + ut_ad(!ut_strcmp(name, field->field_name.str)); #endif s_templ->vtempl[j] = static_cast< @@ -7957,7 +7957,7 @@ build_template_field( ib::info() << "MySQL table " << table->s->table_name.str << " field " << j << " name " - << table->field[j]->field_name; + << table->field[j]->field_name.str; } ib::error() << "Clustered record field for column " << i @@ -9080,7 +9080,7 @@ calc_row_difference( if (field_mysql_type == MYSQL_TYPE_LONGLONG && prebuilt->table->fts && innobase_strcasecmp( - field->field_name, FTS_DOC_ID_COL_NAME) == 0) { + field->field_name.str, FTS_DOC_ID_COL_NAME) == 0) { doc_id = (doc_id_t) mach_read_from_n_little_endian( n_ptr, 8); if (doc_id == 0) { @@ -11477,7 +11477,7 @@ create_table_check_doc_id_col( col_len = field->pack_length(); - if (innobase_strcasecmp(field->field_name, + if (innobase_strcasecmp(field->field_name.str, FTS_DOC_ID_COL_NAME) == 0) { /* Note the name is case sensitive due to @@ -11485,7 +11485,7 @@ create_table_check_doc_id_col( if (col_type == DATA_INT && !field->real_maybe_null() && col_len == sizeof(doc_id_t) - && (strcmp(field->field_name, + && (strcmp(field->field_name.str, FTS_DOC_ID_COL_NAME) == 0)) { *doc_id_col = i; } else { @@ -11497,7 +11497,7 @@ create_table_check_doc_id_col( " of BIGINT NOT NULL type, and named" " in all capitalized characters"); my_error(ER_WRONG_COLUMN_NAME, MYF(0), - field->field_name); + field->field_name.str); *doc_id_col = ULINT_UNDEFINED; } @@ -11568,7 +11568,7 @@ innodb_base_col_setup( for (z = 0; z < table->n_cols; z++) { const char* name = dict_table_get_col_name(table, z); if (!innobase_strcasecmp(name, - base_field->field_name)) { + base_field->field_name.str)) { break; } } @@ -11609,7 +11609,7 @@ innodb_base_col_setup_for_stored( const char* name = dict_table_get_col_name( table, z); if (!innobase_strcasecmp( - name, base_field->field_name)) { + name, base_field->field_name.str)) { break; } } @@ -11753,7 +11753,7 @@ create_table_info_t::create_table_def() " column type and try to re-create" " the table with an appropriate" " column type.", - table->name.m_name, field->field_name); + table->name.m_name, field->field_name.str); goto err_col; } @@ -11816,9 +11816,9 @@ create_table_info_t::create_table_def() /* First check whether the column to be added has a system reserved name. */ - if (dict_col_name_is_reserved(field->field_name)){ + if (dict_col_name_is_reserved(field->field_name.str)){ my_error(ER_WRONG_COLUMN_NAME, MYF(0), - field->field_name); + field->field_name.str); err_col: dict_mem_table_free(table); mem_heap_free(heap); @@ -11830,7 +11830,7 @@ err_col: if (!is_virtual) { dict_mem_table_add_col(table, heap, - field->field_name, col_type, + field->field_name.str, col_type, dtype_form_prtype( (ulint) field->type() | nulls_allowed | unsigned_type @@ -11839,7 +11839,7 @@ err_col: col_len); } else { dict_mem_table_add_v_col(table, heap, - field->field_name, col_type, + field->field_name.str, col_type, dtype_form_prtype( (ulint) field->type() | nulls_allowed | unsigned_type @@ -12022,7 +12022,7 @@ create_index( } dict_mem_index_add_field( - index, key_part->field->field_name, 0); + index, key_part->field->field_name.str, 0); } DBUG_RETURN(convert_error_code_to_mysql( @@ -12074,7 +12074,7 @@ create_index( if (field == NULL) ut_error; - const char* field_name = key_part->field->field_name; + const char* field_name = key_part->field->field_name.str; col_type = get_innobase_type_from_mysql_type( &is_unsigned, key_part->field); @@ -12100,7 +12100,7 @@ create_index( " inappropriate data type. Table" " name %s, column name %s.", table_name, - key_part->field->field_name); + key_part->field->field_name.str); prefix_len = 0; } @@ -12731,7 +12731,7 @@ create_table_info_t::innobase_table_flags() /* Do a pre-check on FTS DOC ID index */ if (!(key->flags & HA_NOSAME) || strcmp(key->name, FTS_DOC_ID_INDEX_NAME) - || strcmp(key->key_part[0].field->field_name, + || strcmp(key->key_part[0].field->field_name.str, FTS_DOC_ID_COL_NAME)) { fts_doc_id_index_bad = key->name; } @@ -15818,8 +15818,8 @@ get_foreign_key_info( char tmp_buff[NAME_LEN+1]; char name_buff[NAME_LEN+1]; const char* ptr; - LEX_STRING* referenced_key_name; - LEX_STRING* name = NULL; + LEX_CSTRING* referenced_key_name; + LEX_CSTRING* name = NULL; ptr = dict_remove_db_name(foreign->id); f_key_info.foreign_id = thd_make_lex_string( @@ -17935,7 +17935,7 @@ my_bool ha_innobase::register_query_cache_table( /*====================================*/ THD* thd, /*!< in: user thread handle */ - char* table_key, /*!< in: normalized path to the + const char* table_key, /*!< in: normalized path to the table */ uint key_length, /*!< in: length of the normalized path to the table */ diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index 429c6eb0b86..297b73ed02f 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -288,7 +288,7 @@ public: */ my_bool register_query_cache_table( THD* thd, - char* table_key, + const char* table_key, uint key_length, qc_engine_callback* call_back, ulonglong* engine_data); diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index a5b919b424d..dcfe72b0531 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -523,8 +523,8 @@ check_v_col_in_order( } if (my_strcasecmp(system_charset_info, - field->field_name, - new_field->field_name) != 0) { + field->field_name.str, + new_field->field_name.str) != 0) { /* different column */ return(false); } else { @@ -869,7 +869,7 @@ ha_innobase::check_if_supported_inplace_alter( && innobase_fulltext_exist(altered_table) && !my_strcasecmp( system_charset_info, - key_part->field->field_name, + key_part->field->field_name.str, FTS_DOC_ID_COL_NAME)) { ha_alter_info->unsupported_reason = innobase_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS); @@ -966,7 +966,7 @@ ha_innobase::check_if_supported_inplace_alter( if (!my_strcasecmp( system_charset_info, - (*fp)->field_name, + (*fp)->field_name.str, FTS_DOC_ID_COL_NAME)) { ha_alter_info->unsupported_reason = innobase_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS); @@ -1046,7 +1046,7 @@ innobase_init_foreign( /*==================*/ dict_foreign_t* foreign, /*!< in/out: structure to initialize */ - char* constraint_name, /*!< in/out: constraint name if + const char* constraint_name, /*!< in/out: constraint name if exists */ dict_table_t* table, /*!< in: foreign table */ dict_index_t* index, /*!< in: foreign key index */ @@ -1254,7 +1254,7 @@ no_match: } if (innobase_strcasecmp(col_names[j], - key_part.field->field_name)) { + key_part.field->field_name.str)) { /* Name mismatch */ goto no_match; } @@ -2045,7 +2045,7 @@ name_ok: } my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB", - field->field_name); + field->field_name.str); return(ER_WRONG_KEY_COLUMN); } @@ -2061,7 +2061,7 @@ name_ok: } my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB", - field->field_name); + field->field_name.str); return(ER_WRONG_KEY_COLUMN); } } @@ -2291,20 +2291,20 @@ innobase_fts_check_doc_id_col( } if (my_strcasecmp(system_charset_info, - field->field_name, FTS_DOC_ID_COL_NAME)) { + field->field_name.str, FTS_DOC_ID_COL_NAME)) { continue; } - if (strcmp(field->field_name, FTS_DOC_ID_COL_NAME)) { + if (strcmp(field->field_name.str, FTS_DOC_ID_COL_NAME)) { my_error(ER_WRONG_COLUMN_NAME, MYF(0), - field->field_name); + field->field_name.str); } else if (field->type() != MYSQL_TYPE_LONGLONG || field->pack_length() != 8 || field->real_maybe_null() || !(field->flags & UNSIGNED_FLAG) || innobase_is_v_fld(field)) { my_error(ER_INNODB_FT_WRONG_DOCID_COLUMN, MYF(0), - field->field_name); + field->field_name.str); } else { *fts_doc_col_no = i - *num_v; } @@ -2377,7 +2377,7 @@ innobase_fts_check_doc_id_index( if ((key.flags & HA_NOSAME) && key.user_defined_key_parts == 1 && !strcmp(key.name, FTS_DOC_ID_INDEX_NAME) - && !strcmp(key.key_part[0].field->field_name, + && !strcmp(key.key_part[0].field->field_name.str, FTS_DOC_ID_COL_NAME)) { if (fts_doc_col_no) { *fts_doc_col_no = ULINT_UNDEFINED; @@ -2456,7 +2456,7 @@ innobase_fts_check_doc_id_index_in_def( if (!(key->flags & HA_NOSAME) || key->user_defined_key_parts != 1 || strcmp(key->name, FTS_DOC_ID_INDEX_NAME) - || strcmp(key->key_part[0].field->field_name, + || strcmp(key->key_part[0].field->field_name.str, FTS_DOC_ID_COL_NAME)) { return(FTS_INCORRECT_DOC_ID_INDEX); } @@ -2960,7 +2960,7 @@ innobase_check_foreigns( if (!new_field || (new_field->flags & NOT_NULL_FLAG)) { if (innobase_check_foreigns_low( user_table, drop_fk, n_drop_fk, - (*fp)->field_name, !new_field)) { + (*fp)->field_name.str, !new_field)) { return(true); } } @@ -3255,7 +3255,7 @@ innobase_get_col_names( } if (new_field->field == table->field[old_i]) { - cols[old_i - num_v] = new_field->field_name; + cols[old_i - num_v] = new_field->field_name.str; break; } } @@ -3543,7 +3543,7 @@ innobase_check_gis_columns( ulint col_nr = dict_table_has_column( table, - key_part.field->field_name, + key_part.field->field_name.str, key_part.fieldnr); ut_ad(col_nr != table->n_def); dict_col_t* col = &table->cols[col_nr]; @@ -3666,7 +3666,7 @@ prepare_inplace_add_virtual( if (charset_no > MAX_CHAR_COLL_NUM) { my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB", - field->field_name); + field->field_name.str); return(true); } } else { @@ -3697,7 +3697,7 @@ prepare_inplace_add_virtual( ctx->add_vcol[j].m_col.ind = i - 1; ctx->add_vcol[j].num_base = 0; - ctx->add_vcol_name[j] = field->field_name; + ctx->add_vcol_name[j] = field->field_name.str; ctx->add_vcol[j].base_col = NULL; ctx->add_vcol[j].v_pos = ctx->old_table->n_v_cols - ctx->num_to_drop_vcol + j; @@ -3785,7 +3785,7 @@ prepare_inplace_drop_virtual( if (charset_no > MAX_CHAR_COLL_NUM) { my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB", - field->field_name); + field->field_name.str); return(true); } } else { @@ -3816,7 +3816,7 @@ prepare_inplace_drop_virtual( ctx->drop_vcol[j].m_col.ind = i; - ctx->drop_vcol_name[j] = field->field_name; + ctx->drop_vcol_name[j] = field->field_name.str; dict_v_col_t* v_col = dict_table_get_nth_v_col_mysql( ctx->old_table, i); @@ -4631,7 +4631,7 @@ prepare_inplace_alter_table_dict( dict_mem_table_free( ctx->new_table); my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB", - field->field_name); + field->field_name.str); goto new_clustered_failed; } } else { @@ -4665,17 +4665,17 @@ prepare_inplace_alter_table_dict( col_len = DATA_POINT_LEN; } - if (dict_col_name_is_reserved(field->field_name)) { + if (dict_col_name_is_reserved(field->field_name.str)) { dict_mem_table_free(ctx->new_table); my_error(ER_WRONG_COLUMN_NAME, MYF(0), - field->field_name); + field->field_name.str); goto new_clustered_failed; } if (is_virtual) { dict_mem_table_add_v_col( ctx->new_table, ctx->heap, - field->field_name, + field->field_name.str, col_type, dtype_form_prtype( field_type, charset_no) @@ -4684,7 +4684,7 @@ prepare_inplace_alter_table_dict( } else { dict_mem_table_add_col( ctx->new_table, ctx->heap, - field->field_name, + field->field_name.str, col_type, dtype_form_prtype( field_type, charset_no), @@ -5655,7 +5655,7 @@ err_exit_no_heap: cf_it.rewind(); while (Create_field* cf = cf_it++) { if (cf->field == *fp) { - name = cf->field_name; + name = cf->field_name.str; goto check_if_ok_to_rename; } } @@ -5665,7 +5665,7 @@ check_if_ok_to_rename: /* Prohibit renaming a column from FTS_DOC_ID if full-text indexes exist. */ if (!my_strcasecmp(system_charset_info, - (*fp)->field_name, + (*fp)->field_name.str, FTS_DOC_ID_COL_NAME) && innobase_fulltext_exist(altered_table)) { my_error(ER_INNODB_FT_WRONG_DOCID_COLUMN, @@ -7107,8 +7107,8 @@ innobase_rename_columns_try( if (innobase_rename_column_try( ctx->old_table, trx, table_name, col_n, - cf->field->field_name, - cf->field_name, + cf->field->field_name.str, + cf->field_name.str, ctx->need_rebuild(), is_virtual)) { return(true); @@ -7332,8 +7332,8 @@ innobase_rename_or_enlarge_columns_cache( if ((*fp)->flags & FIELD_IS_RENAMED) { dict_mem_table_col_rename( user_table, col_n, - cf->field->field_name, - cf->field_name, is_virtual); + cf->field->field_name.str, + cf->field_name.str, is_virtual); } break; @@ -7384,7 +7384,7 @@ commit_set_autoinc( const Field* ai = old_table->found_next_number_field; ut_ad(!strcmp(dict_table_get_col_name(ctx->old_table, innodb_col_no(ai)), - ai->field_name)); + ai->field_name.str)); ib_uint64_t autoinc = ha_alter_info->create_info->auto_increment_value; diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index adf32b4f68c..548c4a2112e 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -2268,7 +2268,7 @@ bool ha_maria::check_and_repair(THD *thd) if (!file->state->del && (maria_recover_options & HA_RECOVER_QUICK)) check_opt.flags |= T_QUICK; - thd->set_query(table->s->table_name.str, + thd->set_query((char*) table->s->table_name.str, (uint) table->s->table_name.length, system_charset_info); if (!(crashed= maria_is_crashed(file))) @@ -3403,7 +3403,7 @@ bool maria_show_status(handlerton *hton, stat_print_fn *print, enum ha_stat_type stat) { - const LEX_STRING *engine_name= hton_name(hton); + const LEX_CSTRING *engine_name= hton_name(hton); switch (stat) { case HA_ENGINE_LOGS: { @@ -3643,7 +3643,7 @@ static int ha_maria_init(void *p) @retval FALSE An error occurred */ -my_bool ha_maria::register_query_cache_table(THD *thd, char *table_name, +my_bool ha_maria::register_query_cache_table(THD *thd, const char *table_name, uint table_name_len, qc_engine_callback *engine_callback, diff --git a/storage/maria/ha_maria.h b/storage/maria/ha_maria.h index 49ee2afc827..51438462787 100644 --- a/storage/maria/ha_maria.h +++ b/storage/maria/ha_maria.h @@ -162,7 +162,7 @@ public: int net_read_dump(NET * net); #endif #ifdef HAVE_QUERY_CACHE - my_bool register_query_cache_table(THD *thd, char *table_key, + my_bool register_query_cache_table(THD *thd, const char *table_key, uint key_length, qc_engine_callback *engine_callback, diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp index ba3d910f936..2c83c3f891e 100644 --- a/storage/mroonga/ha_mroonga.cpp +++ b/storage/mroonga/ha_mroonga.cpp @@ -3213,7 +3213,7 @@ int ha_mroonga::storage_create(const char *name, TABLE *table, int key_parts = KEY_N_KEY_PARTS(key_info); if (key_parts == 1) { Field *pkey_field = key_info->key_part[0].field; - const char *column_name = pkey_field->field_name; + const char *column_name = pkey_field->field_name.str; is_id = (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0); grn_builtin_type gtype = mrn_grn_type_from_field(ctx, pkey_field, false); @@ -3307,7 +3307,7 @@ int ha_mroonga::storage_create(const char *name, TABLE *table, uint n_columns = table->s->fields; for (uint i = 0; i < n_columns; i++) { Field *field = table->s->field[i]; - const char *column_name = field->field_name; + const char *column_name = field->field_name.str; int column_name_size = strlen(column_name); if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) { @@ -3374,7 +3374,7 @@ int ha_mroonga::storage_create_validate_pseudo_column(TABLE *table) n_columns = table->s->fields; for (i = 0; i < n_columns; i++) { Field *field = table->s->field[i]; - const char *column_name = field->field_name; + const char *column_name = field->field_name.str; if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) { switch (field->type()) { case MYSQL_TYPE_TINY : @@ -3422,17 +3422,17 @@ bool ha_mroonga::storage_create_foreign_key(TABLE *table, } List_iterator<Key_part_spec> key_part_col_iterator(key->columns); Key_part_spec *key_part_col = key_part_col_iterator++; - LEX_STRING field_name = key_part_col->field_name; + LEX_CSTRING field_name = key_part_col->field_name; DBUG_PRINT("info", ("mroonga: field_name=%s", field_name.str)); - DBUG_PRINT("info", ("mroonga: field->field_name=%s", field->field_name)); - if (strcmp(field->field_name, field_name.str)) + DBUG_PRINT("info", ("mroonga: field->field_name=%s", field->field_name.str)); + if (strcmp(field->field_name.str, field_name.str)) { continue; } Foreign_key *fk = (Foreign_key *) key; List_iterator<Key_part_spec> key_part_ref_col_iterator(fk->ref_columns); Key_part_spec *key_part_ref_col = key_part_ref_col_iterator++; - LEX_STRING ref_field_name = key_part_ref_col->field_name; + LEX_CSTRING ref_field_name = key_part_ref_col->field_name; DBUG_PRINT("info", ("mroonga: ref_field_name=%s", ref_field_name.str)); #ifdef MRN_FOREIGN_KEY_USE_CONST_STRING LEX_CSTRING ref_db_name = fk->ref_db; @@ -3537,7 +3537,7 @@ bool ha_mroonga::storage_create_foreign_key(TABLE *table, DBUG_RETURN(false); } Field *ref_field = &ref_key_info->key_part->field[0]; - if (strcmp(ref_field->field_name, ref_field_name.str)) { + if (strcmp(ref_field->field_name.str, ref_field_name.str)) { mrn_open_mutex_lock(table->s); mrn_free_tmp_table_share(tmp_ref_table_share); mrn_open_mutex_unlock(table->s); @@ -3554,8 +3554,8 @@ bool ha_mroonga::storage_create_foreign_key(TABLE *table, mrn_free_tmp_table_share(tmp_ref_table_share); mrn_open_mutex_unlock(table->s); grn_obj_flags col_flags = GRN_OBJ_PERSISTENT; - column = grn_column_create(ctx, table_obj, field->field_name, - strlen(field->field_name), + column = grn_column_create(ctx, table_obj, field->field_name.str, + field->field_name.length, NULL, col_flags, grn_table_ref); if (ctx->rc) { grn_obj_unlink(ctx, grn_table_ref); @@ -3564,7 +3564,7 @@ bool ha_mroonga::storage_create_foreign_key(TABLE *table, DBUG_RETURN(false); } - mrn::IndexColumnName index_column_name(grn_table_name, field->field_name); + mrn::IndexColumnName index_column_name(grn_table_name, field->field_name.str); grn_obj_flags ref_col_flags = GRN_OBJ_COLUMN_INDEX | GRN_OBJ_PERSISTENT; column_ref = grn_column_create(ctx, grn_table_ref, index_column_name.c_str(), @@ -3618,7 +3618,7 @@ int ha_mroonga::storage_create_validate_index(TABLE *table) continue; } Field *field = key_info->key_part[0].field; - const char *column_name = field->field_name; + const char *column_name = field->field_name.str; if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) { if (key_info->algorithm == HA_KEY_ALG_HASH) { continue; // hash index is ok @@ -3755,8 +3755,8 @@ int ha_mroonga::storage_create_index(TABLE *table, const char *grn_table_name, bool is_multiple_column_index = KEY_N_KEY_PARTS(key_info) > 1; if (!is_multiple_column_index) { Field *field = key_info->key_part[0].field; - column_name = field->field_name; - column_name_size = strlen(column_name); + column_name = field->field_name.str; + column_name_size = field->field_name.length; if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) { // skipping _id virtual column DBUG_RETURN(0); @@ -3813,8 +3813,8 @@ int ha_mroonga::storage_create_index(TABLE *table, const char *grn_table_name, int j, n_key_parts = KEY_N_KEY_PARTS(key_info); for (j = 0; j < n_key_parts; j++) { Field *field = key_info->key_part[j].field; - const char *column_name = field->field_name; - int column_name_size = strlen(column_name); + const char *column_name = field->field_name.str; + int column_name_size = field->field_name.length; grn_obj *source_column = grn_obj_column(ctx, grn_table, column_name, column_name_size); grn_id source_id = grn_obj_id(ctx, source_column); @@ -4113,8 +4113,8 @@ int ha_mroonga::wrapper_open_indexes(const char *name) /* just for backward compatibility before 1.0. */ Field *field = key_info->key_part[0].field; grn_index_columns[i] = grn_obj_column(ctx, grn_index_tables[i], - field->field_name, - strlen(field->field_name)); + field->field_name.str, + field->field_name.length); } if (ctx->rc) { @@ -4294,8 +4294,8 @@ int ha_mroonga::storage_open_columns(void) for (int i = 0; i < n_columns; i++) { Field *field = table->field[i]; - const char *column_name = field->field_name; - int column_name_size = strlen(column_name); + const char *column_name = field->field_name.str; + int column_name_size = field->field_name.length; if (table_share->blob_fields) { blob_buffers[i].set_charset(field->charset()); @@ -4409,8 +4409,8 @@ int ha_mroonga::storage_open_indexes(const char *name) /* just for backward compatibility before 1.0. */ Field *field = key_info->key_part[0].field; grn_index_columns[i] = grn_obj_column(ctx, grn_index_tables[i], - field->field_name, - strlen(field->field_name)); + field->field_name.str, + field->field_name.length); } } } @@ -5401,7 +5401,7 @@ int ha_mroonga::storage_write_row(uchar *buf) mrn::DebugColumnAccess debug_column_access(table, table->read_set); for (i = 0; i < n_columns; i++) { Field *field = table->field[i]; - const char *column_name = field->field_name; + const char *column_name = field->field_name.str; if (field->is_null()) continue; @@ -5495,7 +5495,7 @@ int ha_mroonga::storage_write_row(uchar *buf) GRN_VOID_INIT(&colbuf); for (i = 0; i < n_columns; i++) { Field *field = table->field[i]; - const char *column_name = field->field_name; + const char *column_name = field->field_name.str; if (field->is_null()) continue; @@ -6010,7 +6010,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, for (i = 0; i < n_columns; i++) { Field *field = table->field[i]; - const char *column_name = field->field_name; + const char *column_name = field->field_name.str; if (bitmap_is_set(table->write_set, field->field_index)) { if (field->is_null()) continue; @@ -6047,7 +6047,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, GRN_VOID_INIT(&colbuf); for (i = 0; i < n_columns; i++) { Field *field = table->field[i]; - const char *column_name = field->field_name; + const char *column_name = field->field_name.str; if (bitmap_is_set(table->write_set, field->field_index)) { mrn::DebugColumnAccess debug_column_access(table, table->read_set); DBUG_PRINT("info", ("mroonga: update column %d(%d)",i,field->field_index)); @@ -6068,7 +6068,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, bool have_pkey = false; for (j = 0; j < KEY_N_KEY_PARTS(pkey_info); j++) { Field *pkey_field = pkey_info->key_part[j].field; - if (strcmp(pkey_field->field_name, column_name) == 0) { + if (strcmp(pkey_field->field_name.str, column_name) == 0) { if (!replacing_) { char message[MRN_BUFFER_SIZE]; snprintf(message, MRN_BUFFER_SIZE, @@ -6738,7 +6738,7 @@ ha_rows ha_mroonga::storage_records_in_range(uint key_nr, key_range *range_min, DBUG_RETURN(row_count); } else { Field *field = key_info->key_part[0].field; - const char *column_name = field->field_name; + const char *column_name = field->field_name.str; mrn_change_encoding(ctx, field->charset()); if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) { @@ -7030,7 +7030,7 @@ int ha_mroonga::storage_index_read_map(uchar *buf, const uchar *key, DBUG_RETURN(error); if (find_flag == HA_READ_KEY_EXACT) { - const char *column_name = field->field_name; + const char *column_name = field->field_name.str; key_min = key_min_entity; key_max = key_min_entity; @@ -7582,7 +7582,7 @@ int ha_mroonga::storage_read_range_first(const key_range *start_key, } } else { Field *field = key_info->key_part[0].field; - const char *column_name = field->field_name; + const char *column_name = field->field_name.str; error = mrn_change_encoding(ctx, field->charset()); if (error) DBUG_RETURN(error); @@ -9112,7 +9112,7 @@ grn_obj *ha_mroonga::find_column_type(Field *field, MRN_SHARE *mrn_share, int i, char error_message[MRN_BUFFER_SIZE]; snprintf(error_message, MRN_BUFFER_SIZE, "unknown custom Groonga type name for <%s> column: <%s>", - field->field_name, grn_type_name); + field->field_name.str, grn_type_name); GRN_LOG(ctx, GRN_LOG_ERROR, "%s", error_message); my_message(error_code, error_message, MYF(0)); @@ -9790,8 +9790,8 @@ bool ha_mroonga::is_primary_key_field(Field *field) const DBUG_RETURN(false); } - if (strcmp(field->field_name, - key_info->key_part[0].field->field_name) == 0) { + if (strcmp(field->field_name.str, + key_info->key_part[0].field->field_name.str) == 0) { DBUG_RETURN(true); } else { DBUG_RETURN(false); @@ -9894,8 +9894,8 @@ void ha_mroonga::check_fast_order_limit(grn_table_sort_key **sort_keys, if (item->type() == Item::FIELD_ITEM) { Field *field = static_cast<Item_field *>(item)->field; - const char *column_name = field->field_name; - int column_name_size = strlen(column_name); + const char *column_name = field->field_name.str; + int column_name_size = field->field_name.length; if (should_normalize(field)) { @@ -10988,11 +10988,11 @@ void ha_mroonga::storage_store_fields(uchar *buf, grn_id record_id) if (bitmap_is_set(table->read_set, field->field_index) || bitmap_is_set(table->write_set, field->field_index)) { - const char *column_name = field->field_name; + const char *column_name = field->field_name.str; if (ignoring_no_key_columns) { KEY *key_info = &(table->s->key_info[active_index]); - if (strcmp(key_info->key_part[0].field->field_name, column_name)) { + if (strcmp(key_info->key_part[0].field->field_name.str, column_name)) { continue; } } @@ -11005,7 +11005,7 @@ void ha_mroonga::storage_store_fields(uchar *buf, grn_id record_id) field->set_notnull(); field->store((int)record_id); } else if (primary_key_field && - strcmp(primary_key_field->field_name, column_name) == 0) { + strcmp(primary_key_field->field_name.str, column_name) == 0) { // for primary key column storage_store_field_column(field, true, i, record_id); } else { @@ -11468,7 +11468,8 @@ int ha_mroonga::storage_encode_key_set(Field *field, const uchar *key, MRN_DBUG_ENTER_METHOD(); int error = 0; Field_set unpacker((uchar *)key, field->field_length, (uchar *)(key - 1), - field->null_bit, field->unireg_check, field->field_name, + field->null_bit, field->unireg_check, + &field->field_name, field->pack_length(), static_cast<Field_set*>(field)->typelib, static_cast<Field_set*>(field)->charset()); @@ -12903,8 +12904,8 @@ int ha_mroonga::storage_rename_foreign_key(MRN_SHARE *tmp_share, MRN_DBUG_ENTER_METHOD(); for (i = 0; i < n_columns; ++i) { Field *field = tmp_table_share->field[i]; - const char *column_name = field->field_name; - uint column_name_size = strlen(column_name); + const char *column_name = field->field_name.str; + uint column_name_size = field->field_name.length; if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) { continue; @@ -14570,8 +14571,8 @@ bool ha_mroonga::storage_inplace_alter_table_add_column( } Field *field = altered_table->s->field[i]; - const char *column_name = field->field_name; - int column_name_size = strlen(column_name); + const char *column_name = field->field_name.str; + int column_name_size = field->field_name.length; int error = mrn_add_column_param(tmp_share, field, i); if (error) { @@ -14651,8 +14652,8 @@ bool ha_mroonga::storage_inplace_alter_table_drop_column( continue; } - const char *column_name = field->field_name; - int column_name_size = strlen(column_name); + const char *column_name = field->field_name.str; + int column_name_size = field->field_name.length; grn_obj *column_obj; column_obj = grn_obj_column(ctx, table_obj, column_name, column_name_size); @@ -14692,7 +14693,8 @@ bool ha_mroonga::storage_inplace_alter_table_rename_column( continue; } - const char *new_name = NULL; + LEX_CSTRING new_name; + new_name.str= 0; List_iterator_fast<Create_field> create_fields(alter_info->create_list); while (Create_field *create_field = create_fields++) { if (create_field->field == field) { @@ -14701,15 +14703,16 @@ bool ha_mroonga::storage_inplace_alter_table_rename_column( } } - if (!new_name) { + if (!new_name.str) { continue; } - const char *old_name = field->field_name; + const char *old_name = field->field_name.str; grn_obj *column_obj; - column_obj = grn_obj_column(ctx, table_obj, old_name, strlen(old_name)); + column_obj = grn_obj_column(ctx, table_obj, old_name, + field->field_name.length); if (column_obj) { - grn_column_rename(ctx, column_obj, new_name, strlen(new_name)); + grn_column_rename(ctx, column_obj, new_name.str, new_name.length); if (ctx->rc) { int error = ER_WRONG_COLUMN_NAME; my_message(error, ctx->errbuf, MYF(0)); @@ -15878,8 +15881,8 @@ char *ha_mroonga::storage_get_foreign_key_create_info() create_info_str.length(0); for (i = 0; i < n_columns; ++i) { Field *field = table_share->field[i]; - const char *column_name = field->field_name; - uint column_name_size = strlen(column_name); + const char *column_name = field->field_name.str; + uint column_name_size = field->field_name.length; if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) { continue; @@ -15953,8 +15956,8 @@ char *ha_mroonga::storage_get_foreign_key_create_info() uint ref_pkey_nr = tmp_ref_table_share->primary_key; KEY *ref_key_info = &tmp_ref_table_share->key_info[ref_pkey_nr]; Field *ref_field = &ref_key_info->key_part->field[0]; - append_identifier(ha_thd(), &create_info_str, ref_field->field_name, - strlen(ref_field->field_name)); + append_identifier(ha_thd(), &create_info_str, ref_field->field_name.str, + ref_field->field_name.length); mrn_open_mutex_lock(table_share); mrn_free_tmp_table_share(tmp_ref_table_share); mrn_open_mutex_unlock(table_share); @@ -16085,8 +16088,8 @@ int ha_mroonga::storage_get_foreign_key_list(THD *thd, MRN_DBUG_ENTER_METHOD(); for (i = 0; i < n_columns; ++i) { Field *field = table_share->field[i]; - const char *column_name = field->field_name; - uint column_name_size = strlen(column_name); + const char *column_name = field->field_name.str; + uint column_name_size = field->field_name.length; if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) { continue; @@ -16132,7 +16135,7 @@ int ha_mroonga::storage_get_foreign_key_list(THD *thd, f_key_info.delete_method = FK_OPTION_RESTRICT; f_key_info.referenced_key_name = thd_make_lex_string(thd, NULL, "PRIMARY", 7, TRUE); - LEX_STRING *field_name = thd_make_lex_string(thd, NULL, column_name, + LEX_CSTRING *field_name = thd_make_lex_string(thd, NULL, column_name, column_name_size, TRUE); f_key_info.foreign_fields.push_back(field_name); @@ -16157,9 +16160,9 @@ int ha_mroonga::storage_get_foreign_key_list(THD *thd, uint ref_pkey_nr = tmp_ref_table_share->primary_key; KEY *ref_key_info = &tmp_ref_table_share->key_info[ref_pkey_nr]; Field *ref_field = &ref_key_info->key_part->field[0]; - LEX_STRING *ref_col_name = thd_make_lex_string(thd, NULL, - ref_field->field_name, - strlen(ref_field->field_name), + LEX_CSTRING *ref_col_name = thd_make_lex_string(thd, NULL, + ref_field->field_name.str, + ref_field->field_name.length, TRUE); f_key_info.referenced_fields.push_back(ref_col_name); mrn_open_mutex_lock(table_share); @@ -16432,7 +16435,7 @@ void ha_mroonga::rebind_psi() #endif my_bool ha_mroonga::wrapper_register_query_cache_table(THD *thd, - char *table_key, + const char *table_key, uint key_length, qc_engine_callback *engine_callback, @@ -16453,7 +16456,7 @@ my_bool ha_mroonga::wrapper_register_query_cache_table(THD *thd, } my_bool ha_mroonga::storage_register_query_cache_table(THD *thd, - char *table_key, + const char *table_key, uint key_length, qc_engine_callback *engine_callback, @@ -16469,7 +16472,7 @@ my_bool ha_mroonga::storage_register_query_cache_table(THD *thd, } my_bool ha_mroonga::register_query_cache_table(THD *thd, - char *table_key, + const char *table_key, uint key_length, qc_engine_callback *engine_callback, diff --git a/storage/mroonga/ha_mroonga.hpp b/storage/mroonga/ha_mroonga.hpp index 579107f9465..6416513f0eb 100644 --- a/storage/mroonga/ha_mroonga.hpp +++ b/storage/mroonga/ha_mroonga.hpp @@ -192,7 +192,7 @@ extern "C" { # define MRN_HAVE_HTON_ALTER_TABLE_FLAGS #endif -#if MYSQL_VERSION_ID >= 50706 && !defined(MRN_MARIADB_P) +#if MYSQL_VERSION_ID >= 50706 # define MRN_FOREIGN_KEY_USE_CONST_STRING #endif @@ -559,7 +559,7 @@ protected: void rebind_psi(); #endif my_bool register_query_cache_table(THD *thd, - char *table_key, + const char *table_key, uint key_length, qc_engine_callback *engine_callback, ulonglong *engine_data); @@ -1219,13 +1219,13 @@ private: void storage_rebind_psi(); #endif my_bool wrapper_register_query_cache_table(THD *thd, - char *table_key, + const char *table_key, uint key_length, qc_engine_callback *engine_callback, ulonglong *engine_data); my_bool storage_register_query_cache_table(THD *thd, - char *table_key, + const char *table_key, uint key_length, qc_engine_callback *engine_callback, diff --git a/storage/mroonga/lib/mrn_condition_converter.cpp b/storage/mroonga/lib/mrn_condition_converter.cpp index 1527a546938..cd739b15e51 100644 --- a/storage/mroonga/lib/mrn_condition_converter.cpp +++ b/storage/mroonga/lib/mrn_condition_converter.cpp @@ -28,8 +28,8 @@ # define MRN_ITEM_FIELD_GET_NAME(item) ((item)->item_name.ptr()) # define MRN_ITEM_FIELD_GET_NAME_LENGTH(item) ((item)->item_name.length()) #else -# define MRN_ITEM_FIELD_GET_NAME(item) ((item)->name) -# define MRN_ITEM_FIELD_GET_NAME_LENGTH(item) (strlen((item)->name)) +# define MRN_ITEM_FIELD_GET_NAME(item) ((item)->name.str) +# define MRN_ITEM_FIELD_GET_NAME_LENGTH(item) ((item)->name.length) #endif namespace mrn { diff --git a/storage/mroonga/mrn_table.cpp b/storage/mroonga/mrn_table.cpp index 96f24ff2e00..629c96d3e1a 100644 --- a/storage/mroonga/mrn_table.cpp +++ b/storage/mroonga/mrn_table.cpp @@ -466,7 +466,7 @@ int mrn_parse_table_param(MRN_SHARE *share, TABLE *table) if (share->engine) { - LEX_STRING engine_name; + LEX_CSTRING engine_name; if ( ( share->engine_length == MRN_DEFAULT_LEN && @@ -1064,9 +1064,9 @@ TABLE_SHARE *mrn_create_tmp_table_share(TABLE_LIST *table_list, const char *path void mrn_free_tmp_table_share(TABLE_SHARE *tmp_table_share) { MRN_DBUG_ENTER_FUNCTION(); - char *normalized_path = tmp_table_share->normalized_path.str; + const char *normalized_path = tmp_table_share->normalized_path.str; free_table_share(tmp_table_share); - my_free(normalized_path); + my_free((char*) normalized_path); DBUG_VOID_RETURN; } diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 99073a2caa1..210111d89ff 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -1741,7 +1741,7 @@ bool ha_myisam::check_and_repair(THD *thd) sql_print_warning("Checking table: '%s'",table->s->path.str); const CSET_STRING query_backup= thd->query_string; - thd->set_query(table->s->table_name.str, + thd->set_query((char*) table->s->table_name.str, (uint) table->s->table_name.length, system_charset_info); if ((marked_crashed= mi_is_crashed(file)) || check(thd, &check_opt)) @@ -2575,7 +2575,7 @@ maria_declare_plugin_end; @retval FALSE An error occurred */ -my_bool ha_myisam::register_query_cache_table(THD *thd, char *table_name, +my_bool ha_myisam::register_query_cache_table(THD *thd, const char *table_name, uint table_name_len, qc_engine_callback *engine_callback, diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h index 531c96baacc..804963f5efc 100644 --- a/storage/myisam/ha_myisam.h +++ b/storage/myisam/ha_myisam.h @@ -144,7 +144,7 @@ class ha_myisam: public handler Alter_inplace_info *alter_info); bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); #ifdef HAVE_QUERY_CACHE - my_bool register_query_cache_table(THD *thd, char *table_key, + my_bool register_query_cache_table(THD *thd, const char *table_key, uint key_length, qc_engine_callback *engine_callback, diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index 8f4997cf795..dbcd7a0cb3b 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -1609,7 +1609,7 @@ void ha_myisammrg::append_create_info(String *packet) for (first= open_table= children_l;; open_table= open_table->next_global) { - LEX_STRING db= { open_table->db, open_table->db_length }; + LEX_CSTRING db= { open_table->db, open_table->db_length }; if (open_table != first) packet->append(','); @@ -1646,7 +1646,7 @@ bool ha_myisammrg::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha_alter_info) { char tmp_path[FN_REFLEN]; - char *name= table->s->normalized_path.str; + const char *name= table->s->normalized_path.str; DBUG_ENTER("ha_myisammrg::inplace_alter_table"); fn_format(tmp_path, name, "", MYRG_NAME_TMPEXT, MY_UNPACK_FILENAME | MY_APPEND_EXT); int res= create_mrg(tmp_path, ha_alter_info->create_info); diff --git a/storage/oqgraph/ha_oqgraph.cc b/storage/oqgraph/ha_oqgraph.cc index 2380ab8157e..4b858d468f5 100644 --- a/storage/oqgraph/ha_oqgraph.cc +++ b/storage/oqgraph/ha_oqgraph.cc @@ -297,7 +297,7 @@ int ha_oqgraph::oqgraph_check_table_structure (TABLE *table_arg) Field **field= table_arg->field; for (i= 0; *field && skel[i].colname; i++, field++) { - DBUG_PRINT( "oq-debug", ("Column %d: name='%s', expected '%s'; type=%d, expected %d.", i, (*field)->field_name, skel[i].colname, (*field)->type(), skel[i].coltype)); + DBUG_PRINT( "oq-debug", ("Column %d: name='%s', expected '%s'; type=%d, expected %d.", i, (*field)->field_name.str, skel[i].colname, (*field)->type(), skel[i].coltype)); bool badColumn = false; bool isLatchColumn = strcmp(skel[i].colname, "latch")==0; bool isStringLatch = true; @@ -346,7 +346,7 @@ int ha_oqgraph::oqgraph_check_table_structure (TABLE *table_arg) push_warning_printf( current_thd, Sql_condition::WARN_LEVEL_WARN, HA_WRONG_CREATE_OPTION, "Column %d must be NULL.", i); } /* Check the column name */ - if (!badColumn) if (strcmp(skel[i].colname,(*field)->field_name)) { + if (!badColumn) if (strcmp(skel[i].colname,(*field)->field_name.str)) { badColumn = true; push_warning_printf( current_thd, Sql_condition::WARN_LEVEL_WARN, HA_WRONG_CREATE_OPTION, "Column %d must be named '%s'.", i, skel[i].colname); } @@ -577,11 +577,10 @@ int ha_oqgraph::open(const char *name, int mode, uint test_if_locked) size_t tlen= strlen(options->table_name); size_t plen= (int)(p - name) + tlen + 1; - share->path.str= (char*)alloc_root(&share->mem_root, plen + 1); // MDEV-5996 space for trailing zero - // it seems there was a misunderstanding of why there is a separate length field in the String object - strmov(strnmov(share->path.str, name, (int)(p - name) + 1), options->table_name); - - share->path.str[plen] = 0; // MDEV-5996 Make sure the pointer is zero terminated. I really think this needs refactoring, soon... + share->path.str= (char*)alloc_root(&share->mem_root, plen + 1); + strmov(strnmov((char*) share->path.str, name, (int)(p - name) + 1), + options->table_name); + DBUG_ASSERT(strlen(share->path.str) == plen); share->normalized_path.str= share->path.str; share->path.length= share->normalized_path.length= plen; @@ -655,7 +654,7 @@ int ha_oqgraph::open(const char *name, int mode, uint test_if_locked) for (Field **field= edges->field; *field; ++field) { - if (strcmp(options->origid, (*field)->field_name)) + if (strcmp(options->origid, (*field)->field_name.str)) continue; if ((*field)->cmp_type() != INT_RESULT || !((*field)->flags & NOT_NULL_FLAG)) @@ -680,7 +679,7 @@ int ha_oqgraph::open(const char *name, int mode, uint test_if_locked) for (Field **field= edges->field; *field; ++field) { - if (strcmp(options->destid, (*field)->field_name)) + if (strcmp(options->destid, (*field)->field_name.str)) continue; if ((*field)->type() != origid->type() || !((*field)->flags & NOT_NULL_FLAG)) @@ -703,7 +702,7 @@ int ha_oqgraph::open(const char *name, int mode, uint test_if_locked) } // Make sure origid column != destid column - if (strcmp( origid->field_name, destid->field_name)==0) { + if (strcmp( origid->field_name.str, destid->field_name.str)==0) { fprint_error("Invalid OQGRAPH backing store ('%s.destid' attribute set to same column as origid attribute)", p+1, options->table_name); closefrm(edges); free_table_share(share); @@ -712,7 +711,7 @@ int ha_oqgraph::open(const char *name, int mode, uint test_if_locked) for (Field **field= edges->field; options->weight && *field; ++field) { - if (strcmp(options->weight, (*field)->field_name)) + if (strcmp(options->weight, (*field)->field_name.str)) continue; if ((*field)->result_type() != REAL_RESULT || !((*field)->flags & NOT_NULL_FLAG)) diff --git a/storage/oqgraph/ha_oqgraph.h b/storage/oqgraph/ha_oqgraph.h index ad0cdd61256..f06db8bbf14 100644 --- a/storage/oqgraph/ha_oqgraph.h +++ b/storage/oqgraph/ha_oqgraph.h @@ -118,7 +118,7 @@ public: virtual const char *table_type() const { return hton_name(ht)->str; } #endif - my_bool register_query_cache_table(THD *thd, char *table_key, + my_bool register_query_cache_table(THD *thd, const char *table_key, uint key_length, qc_engine_callback *engine_callback, diff --git a/storage/perfschema/ha_perfschema.h b/storage/perfschema/ha_perfschema.h index 62996e12fe0..988caae2de3 100644 --- a/storage/perfschema/ha_perfschema.h +++ b/storage/perfschema/ha_perfschema.h @@ -188,7 +188,8 @@ public: { return HA_CACHE_TBL_NOCACHE; } virtual my_bool register_query_cache_table - (THD *, char *, uint , qc_engine_callback *engine_callback, ulonglong *) + (THD *, const char *, uint , qc_engine_callback *engine_callback, + ulonglong *) { *engine_callback= 0; return FALSE; diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc index b58862d4824..58704c87b74 100644 --- a/storage/perfschema/pfs.cc +++ b/storage/perfschema/pfs.cc @@ -1236,7 +1236,7 @@ static enum_operation_type socket_operation_map[]= @param [out] output_length Length of the resulting output string. @return 0 for success, non zero for errors */ -static int build_prefix(const LEX_STRING *prefix, const char *category, +static int build_prefix(const LEX_CSTRING *prefix, const char *category, char *output, int *output_length) { int len= strlen(category); diff --git a/storage/perfschema/pfs_column_values.cc b/storage/perfschema/pfs_column_values.cc index 65d0ae7171b..9c4dee89af5 100644 --- a/storage/perfschema/pfs_column_values.cc +++ b/storage/perfschema/pfs_column_values.cc @@ -22,29 +22,29 @@ #include "my_global.h" #include "pfs_column_values.h" -LEX_STRING PERFORMANCE_SCHEMA_str= -{ C_STRING_WITH_LEN("performance_schema") }; +LEX_CSTRING PERFORMANCE_SCHEMA_str= +{ STRING_WITH_LEN("performance_schema") }; -LEX_STRING mutex_instrument_prefix= -{ C_STRING_WITH_LEN("wait/synch/mutex/") }; +LEX_CSTRING mutex_instrument_prefix= +{ STRING_WITH_LEN("wait/synch/mutex/") }; -LEX_STRING rwlock_instrument_prefix= -{ C_STRING_WITH_LEN("wait/synch/rwlock/") }; +LEX_CSTRING rwlock_instrument_prefix= +{ STRING_WITH_LEN("wait/synch/rwlock/") }; -LEX_STRING cond_instrument_prefix= -{ C_STRING_WITH_LEN("wait/synch/cond/") }; +LEX_CSTRING cond_instrument_prefix= +{ STRING_WITH_LEN("wait/synch/cond/") }; -LEX_STRING thread_instrument_prefix= -{ C_STRING_WITH_LEN("thread/") }; +LEX_CSTRING thread_instrument_prefix= +{ STRING_WITH_LEN("thread/") }; -LEX_STRING file_instrument_prefix= -{ C_STRING_WITH_LEN("wait/io/file/") }; +LEX_CSTRING file_instrument_prefix= +{ STRING_WITH_LEN("wait/io/file/") }; -LEX_STRING stage_instrument_prefix= -{ C_STRING_WITH_LEN("stage/") }; +LEX_CSTRING stage_instrument_prefix= +{ STRING_WITH_LEN("stage/") }; -LEX_STRING statement_instrument_prefix= -{ C_STRING_WITH_LEN("statement/") }; +LEX_CSTRING statement_instrument_prefix= +{ STRING_WITH_LEN("statement/") }; -LEX_STRING socket_instrument_prefix= -{ C_STRING_WITH_LEN("wait/io/socket/") }; +LEX_CSTRING socket_instrument_prefix= +{ STRING_WITH_LEN("wait/io/socket/") }; diff --git a/storage/perfschema/pfs_column_values.h b/storage/perfschema/pfs_column_values.h index 204d5230ddf..952230043af 100644 --- a/storage/perfschema/pfs_column_values.h +++ b/storage/perfschema/pfs_column_values.h @@ -25,23 +25,23 @@ */ /** String, "PERFORMANCE_SCHEMA". */ -extern LEX_STRING PERFORMANCE_SCHEMA_str; +extern LEX_CSTRING PERFORMANCE_SCHEMA_str; /** String prefix for all mutex instruments. */ -extern LEX_STRING mutex_instrument_prefix; +extern LEX_CSTRING mutex_instrument_prefix; /** String prefix for all rwlock instruments. */ -extern LEX_STRING rwlock_instrument_prefix; +extern LEX_CSTRING rwlock_instrument_prefix; /** String prefix for all cond instruments. */ -extern LEX_STRING cond_instrument_prefix; +extern LEX_CSTRING cond_instrument_prefix; /** String prefix for all thread instruments. */ -extern LEX_STRING thread_instrument_prefix; +extern LEX_CSTRING thread_instrument_prefix; /** String prefix for all file instruments. */ -extern LEX_STRING file_instrument_prefix; +extern LEX_CSTRING file_instrument_prefix; /** String prefix for all stage instruments. */ -extern LEX_STRING stage_instrument_prefix; +extern LEX_CSTRING stage_instrument_prefix; /** String prefix for all statement instruments. */ -extern LEX_STRING statement_instrument_prefix; -extern LEX_STRING socket_instrument_prefix; +extern LEX_CSTRING statement_instrument_prefix; +extern LEX_CSTRING socket_instrument_prefix; #endif diff --git a/storage/perfschema/pfs_engine_table.cc b/storage/perfschema/pfs_engine_table.cc index 579d3ccd74e..5f37d43acf1 100644 --- a/storage/perfschema/pfs_engine_table.cc +++ b/storage/perfschema/pfs_engine_table.cc @@ -1372,7 +1372,7 @@ end: } int pfs_discover_table_names(handlerton *hton __attribute__((unused)), - LEX_STRING *db, + LEX_CSTRING *db, MY_DIR *dir __attribute__((unused)), handlerton::discovered_list *result) { diff --git a/storage/perfschema/pfs_engine_table.h b/storage/perfschema/pfs_engine_table.h index bae27172810..e12ab36cd61 100644 --- a/storage/perfschema/pfs_engine_table.h +++ b/storage/perfschema/pfs_engine_table.h @@ -457,7 +457,7 @@ struct PFS_triple_index bool pfs_show_status(handlerton *hton, THD *thd, stat_print_fn *print, enum ha_stat_type stat); -int pfs_discover_table_names(handlerton *hton, LEX_STRING *db, +int pfs_discover_table_names(handlerton *hton, LEX_CSTRING *db, MY_DIR *dir, handlerton::discovered_list *result); diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 1344bdc497f..c280e14daf1 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -4977,7 +4977,7 @@ int ha_rocksdb::create_cfs( ER_UNKNOWN_ERROR, "Unsupported collation on string indexed " "column %s.%s Use binary collation (%s).", MYF(0), tbl_def_arg->full_tablename().c_str(), - table_arg->key_info[i].key_part[part].field->field_name, + table_arg->key_info[i].key_part[part].field->field_name.str, collation_err.c_str()); DBUG_RETURN(HA_EXIT_FAILURE); } @@ -5181,8 +5181,8 @@ int ha_rocksdb::compare_key_parts(const KEY *const old_key, /* Check to see that key parts themselves match */ for (uint i = 0; i < old_key->user_defined_key_parts; i++) { - if (strcmp(old_key->key_part[i].field->field_name, - new_key->key_part[i].field->field_name) != 0) { + if (strcmp(old_key->key_part[i].field->field_name.str, + new_key->key_part[i].field->field_name.str) != 0) { DBUG_RETURN(HA_EXIT_FAILURE); } } diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 1ac48111636..27885d50eb5 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -1139,7 +1139,7 @@ public: enum thr_lock_type lock_type) override MY_ATTRIBUTE((__warn_unused_result__)); - my_bool register_query_cache_table(THD *const thd, char *const table_key, + my_bool register_query_cache_table(THD *const thd, const char *table_key, uint key_length, qc_engine_callback *const engine_callback, ulonglong *const engine_data) override { diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index 43b67210a12..2bfb6f9be7c 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -2833,7 +2833,7 @@ bool Rdb_validate_tbls::check_frm_file(const std::string &fullpath, the connection handle as we don't have one here. */ char eng_type_buf[NAME_CHAR_LEN+1]; - LEX_STRING eng_type_str = {eng_type_buf, 0}; + LEX_CSTRING eng_type_str = {eng_type_buf, 0}; bool is_sequence; enum Table_type type = dd_frm_type(nullptr, fullfilename.c_ptr(), &eng_type_str, &is_sequence); if (type == TABLE_TYPE_UNKNOWN) { diff --git a/storage/sequence/sequence.cc b/storage/sequence/sequence.cc index ad617c79a5a..3e56303973d 100644 --- a/storage/sequence/sequence.cc +++ b/storage/sequence/sequence.cc @@ -418,7 +418,7 @@ create_group_by_handler(THD *thd, Query *query) if (field->table != query->from->table) return 0; /* Check that we are using a SUM() on the primary key */ - if (strcmp(field->field_name, "seq")) + if (strcmp(field->field_name.str, "seq")) return 0; } diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc index d65f5ac900a..e9eeb802568 100644 --- a/storage/sphinx/ha_sphinx.cc +++ b/storage/sphinx/ha_sphinx.cc @@ -1029,7 +1029,7 @@ static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate ) for ( int i=0; i<share->m_iTableFields; i++ ) { - share->m_sTableField[i] = sphDup ( table->field[i]->field_name ); + share->m_sTableField[i] = sphDup ( table->field[i]->field_name.str ); share->m_eTableFieldType[i] = table->field[i]->type(); } } @@ -2331,7 +2331,7 @@ int ha_sphinx::write_row ( byte * ) for ( Field ** ppField = table->field; *ppField; ppField++ ) { - sQuery.append ( (*ppField)->field_name ); + sQuery.append ( (*ppField)->field_name.str ); if ( ppField[1] ) sQuery.append ( ", " ); } @@ -3427,7 +3427,7 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT ) { my_snprintf ( sError, sizeof(sError), "%s: %dth column (attribute %s) MUST be integer, bigint, timestamp, varchar, or float", - name, i+1, table->field[i]->field_name ); + name, i+1, table->field[i]->field_name.str ); break; } } @@ -3439,10 +3439,10 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) if ( table->s->keys!=1 || table->key_info[0].user_defined_key_parts!=1 || - strcasecmp ( table->key_info[0].key_part[0].field->field_name, table->field[2]->field_name ) ) + strcasecmp ( table->key_info[0].key_part[0].field->field_name.str, table->field[2]->field_name.str ) ) { my_snprintf ( sError, sizeof(sError), "%s: there must be an index on '%s' column", - name, table->field[2]->field_name ); + name, table->field[2]->field_name.str ); break; } @@ -3457,7 +3457,7 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) sError[0] = '\0'; // check that 1st column is id, is of int type, and has an index - if ( strcmp ( table->field[0]->field_name, "id" ) ) + if ( strcmp ( table->field[0]->field_name.str, "id" ) ) { my_snprintf ( sError, sizeof(sError), "%s: 1st column must be called 'id'", name ); break; @@ -3473,7 +3473,7 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) if ( table->s->keys!=1 || table->key_info[0].user_defined_key_parts!=1 || - strcasecmp ( table->key_info[0].key_part[0].field->field_name, "id" ) ) + strcasecmp ( table->key_info[0].key_part[0].field->field_name.str, "id" ) ) { my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be indexed", name ); break; @@ -3486,7 +3486,7 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT ) { my_snprintf ( sError, sizeof(sError), "%s: column %d(%s) is of unsupported type (use int/bigint/timestamp/varchar/float)", - name, i+1, table->field[i]->field_name ); + name, i+1, table->field[i]->field_name.str ); break; } } diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc index fef18e0e652..a87abfb8073 100644 --- a/storage/spider/ha_spider.cc +++ b/storage/spider/ha_spider.cc @@ -7790,7 +7790,8 @@ int ha_spider::cmp_ref( ) { if ((ret = (*field)->cmp_binary_offset(ptr_diff))) { - DBUG_PRINT("info",("spider different at %s", (*field)->field_name)); + DBUG_PRINT("info",("spider different at %s", + (*field)->field_name.str)); break; } } diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index 5b2071b5f1e..96d323a3492 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -1665,7 +1665,7 @@ int spider_db_append_key_where_internal( if (sql_kind == SPIDER_SQL_KIND_HANDLER) { - char *key_name = key_info->name; + const char *key_name = key_info->name; key_name_length = strlen(key_name); if (str->reserve(SPIDER_SQL_READ_LEN + /* SPIDER_SQL_NAME_QUOTE_LEN */ 2 + key_name_length)) @@ -2844,7 +2844,7 @@ int spider_db_fetch_row( ) { int error_num; DBUG_ENTER("spider_db_fetch_row"); - DBUG_PRINT("info", ("spider field_name %s", field->field_name)); + DBUG_PRINT("info", ("spider field_name %s", field->field_name.str)); DBUG_PRINT("info", ("spider fieldcharset %s", field->charset()->csname)); field->move_field_offset(ptr_diff); error_num = row->store_to_field(field, share->access_charset); @@ -2967,7 +2967,8 @@ int spider_db_fetch_table( my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, table->write_set); #endif - DBUG_PRINT("info", ("spider bitmap is set %s", (*field)->field_name)); + DBUG_PRINT("info", ("spider bitmap is set %s", + (*field)->field_name.str)); if ((error_num = spider_db_fetch_row(share, *field, row, ptr_diff))) DBUG_RETURN(error_num); @@ -3138,7 +3139,7 @@ int spider_db_fetch_key( my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, table->write_set); #endif - DBUG_PRINT("info", ("spider bitmap is set %s", field->field_name)); + DBUG_PRINT("info", ("spider bitmap is set %s", field->field_name.str)); if ((error_num = spider_db_fetch_row(share, field, row, ptr_diff))) DBUG_RETURN(error_num); @@ -3252,7 +3253,8 @@ int spider_db_fetch_minimum_columns( my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, table->write_set); #endif - DBUG_PRINT("info", ("spider bitmap is set %s", (*field)->field_name)); + DBUG_PRINT("info", ("spider bitmap is set %s", + (*field)->field_name.str)); if ((error_num = spider_db_fetch_row(share, *field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF @@ -5114,7 +5116,8 @@ int spider_db_seek_tmp_table( my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, table->write_set); #endif - DBUG_PRINT("info", ("spider bitmap is set %s", (*field)->field_name)); + DBUG_PRINT("info", ("spider bitmap is set %s", + (*field)->field_name.str)); if ((error_num = spider_db_fetch_row(spider->share, *field, row, ptr_diff))) DBUG_RETURN(error_num); @@ -5201,7 +5204,7 @@ int spider_db_seek_tmp_key( my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, table->write_set); #endif - DBUG_PRINT("info", ("spider bitmap is set %s", field->field_name)); + DBUG_PRINT("info", ("spider bitmap is set %s", field->field_name.str)); if ((error_num = spider_db_fetch_row(spider->share, field, row, ptr_diff))) DBUG_RETURN(error_num); @@ -5291,7 +5294,8 @@ int spider_db_seek_tmp_minimum_columns( my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, table->write_set); #endif - DBUG_PRINT("info", ("spider bitmap is set %s", (*field)->field_name)); + DBUG_PRINT("info", ("spider bitmap is set %s", + (*field)->field_name.str)); if ((error_num = spider_db_fetch_row(spider->share, *field, row, ptr_diff))) DBUG_RETURN(error_num); @@ -5303,7 +5307,7 @@ int spider_db_seek_tmp_minimum_columns( else if (bitmap_is_set(table->read_set, (*field)->field_index)) { DBUG_PRINT("info", ("spider bitmap is cleared %s", - (*field)->field_name)); + (*field)->field_name.str)); bitmap_clear_bit(table->read_set, (*field)->field_index); } } @@ -8059,10 +8063,7 @@ int spider_db_open_item_ident( } if (str) { - if (item_ident->field_name) - field_name_length = strlen(item_ident->field_name); - else - field_name_length = 0; + field_name_length = item_ident->field_name.length; if (share->access_charset->cset == system_charset_info->cset) { if (str->reserve(alias_length + @@ -8072,7 +8073,7 @@ int spider_db_open_item_ident( } str->q_append(alias, alias_length); if ((error_num = spider_dbton[dbton_id].db_util-> - append_name(str, item_ident->field_name, field_name_length))) + append_name(str, item_ident->field_name.str, field_name_length))) { DBUG_RETURN(error_num); } @@ -8081,7 +8082,7 @@ int spider_db_open_item_ident( DBUG_RETURN(HA_ERR_OUT_OF_MEM); str->q_append(alias, alias_length); if ((error_num = spider_dbton[dbton_id].db_util-> - append_name_with_charset(str, item_ident->field_name, + append_name_with_charset(str, item_ident->field_name.str, field_name_length, system_charset_info))) { DBUG_RETURN(error_num); @@ -8140,18 +8141,18 @@ int spider_db_open_item_ref( (*(item_ref->ref))->type() != Item::CACHE_ITEM && item_ref->ref_type() != Item_ref::VIEW_REF && !item_ref->table_name && - item_ref->name && + item_ref->name.str && item_ref->alias_name_used ) { if (str) { - uint length = strlen(item_ref->name); + uint length = item_ref->name.length; if (str->reserve(length + /* SPIDER_SQL_NAME_QUOTE_LEN */ 2)) { DBUG_RETURN(HA_ERR_OUT_OF_MEM); } if ((error_num = spider_dbton[dbton_id].db_util-> - append_name(str, item_ref->name, length))) + append_name(str, item_ref->name.str, length))) { DBUG_RETURN(error_num); } @@ -9621,7 +9622,7 @@ int spider_db_udf_copy_key_row( int error_num; DBUG_ENTER("spider_db_udf_copy_key_row"); if ((error_num = spider_db_append_name_with_quote_str(str, - (char *) field->field_name, dbton_id))) + (char *) field->field_name.str, dbton_id))) DBUG_RETURN(error_num); if (str->reserve(joint_length + *length + SPIDER_SQL_AND_LEN)) DBUG_RETURN(HA_ERR_OUT_OF_MEM); diff --git a/storage/spider/spd_db_handlersocket.cc b/storage/spider/spd_db_handlersocket.cc index 47f22b04116..8f031acdbde 100644 --- a/storage/spider/spd_db_handlersocket.cc +++ b/storage/spider/spd_db_handlersocket.cc @@ -3904,7 +3904,7 @@ int spider_handlersocket_share::create_column_name_str() str->init_calc_mem(202); str->set_charset(spider_share->access_charset); if ((error_num = spider_db_append_name_with_quote_str(str, - (char *) (*field)->field_name, dbton_id))) + (char *) (*field)->field_name.str, dbton_id))) goto error; } DBUG_RETURN(0); diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index 1c1c440c2ed..f902508e9c4 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -2707,7 +2707,7 @@ void spider_db_mysql::set_dup_key_idx( uint roop_count, pk_idx = table->s->primary_key; int key_name_length; int max_length = 0; - char *key_name; + const char *key_name; DBUG_ENTER("spider_db_mysql::set_dup_key_idx"); DBUG_PRINT("info",("spider this=%p", this)); DBUG_PRINT("info",("spider error_str=%s", conn->error_str)); @@ -4560,7 +4560,7 @@ int spider_mysql_share::create_column_name_str() str->init_calc_mem(89); str->set_charset(spider_share->access_charset); if ((error_num = spider_db_append_name_with_quote_str(str, - (char *) (*field)->field_name, dbton_id))) + (char *) (*field)->field_name.str, dbton_id))) goto error; } DBUG_RETURN(0); @@ -11882,7 +11882,7 @@ int spider_mysql_copy_table::append_table_columns( DBUG_RETURN(HA_ERR_OUT_OF_MEM); sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN); if ((error_num = spider_db_append_name_with_quote_str(&sql, - (char *) (*field)->field_name, spider_dbton_mysql.dbton_id))) + (char *) (*field)->field_name.str, spider_dbton_mysql.dbton_id))) DBUG_RETURN(error_num); if (sql.reserve(SPIDER_SQL_NAME_QUOTE_LEN + SPIDER_SQL_COMMA_LEN)) DBUG_RETURN(HA_ERR_OUT_OF_MEM); @@ -12011,7 +12011,7 @@ int spider_mysql_copy_table::append_key_order_str( DBUG_RETURN(HA_ERR_OUT_OF_MEM); sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN); if ((error_num = spider_db_append_name_with_quote_str(&sql, - (char *) field->field_name, spider_dbton_mysql.dbton_id))) + (char *) field->field_name.str, spider_dbton_mysql.dbton_id))) DBUG_RETURN(error_num); if (key_part->key_part_flag & HA_REVERSE_SORT) { @@ -12041,7 +12041,7 @@ int spider_mysql_copy_table::append_key_order_str( DBUG_RETURN(HA_ERR_OUT_OF_MEM); sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN); if ((error_num = spider_db_append_name_with_quote_str(&sql, - (char *) field->field_name, spider_dbton_mysql.dbton_id))) + (char *) field->field_name.str, spider_dbton_mysql.dbton_id))) DBUG_RETURN(error_num); if (key_part->key_part_flag & HA_REVERSE_SORT) { @@ -12173,7 +12173,7 @@ int spider_mysql_copy_table::copy_key_row( DBUG_RETURN(HA_ERR_OUT_OF_MEM); sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN); if ((error_num = spider_db_append_name_with_quote_str(&sql, - (char *) field->field_name, spider_dbton_mysql.dbton_id))) + (char *) field->field_name.str, spider_dbton_mysql.dbton_id))) DBUG_RETURN(error_num); if (sql.reserve(SPIDER_SQL_NAME_QUOTE_LEN + joint_length + *length + SPIDER_SQL_AND_LEN)) diff --git a/storage/spider/spd_db_oracle.cc b/storage/spider/spd_db_oracle.cc index c3dfe8b8cf2..5e6c89b10d0 100644 --- a/storage/spider/spd_db_oracle.cc +++ b/storage/spider/spd_db_oracle.cc @@ -4361,7 +4361,7 @@ int spider_oracle_share::create_column_name_str() str->init_calc_mem(196); str->set_charset(spider_share->access_charset); if ((error_num = spider_db_append_name_with_quote_str(str, - (char *) (*field)->field_name, dbton_id))) + (char *) (*field)->field_name.str, dbton_id))) goto error; } DBUG_RETURN(0); @@ -12031,7 +12031,7 @@ int spider_oracle_copy_table::append_table_columns( DBUG_RETURN(HA_ERR_OUT_OF_MEM); sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN); if ((error_num = spider_db_append_name_with_quote_str(&sql, - (char *) (*field)->field_name, spider_dbton_oracle.dbton_id))) + (char *) (*field)->field_name.str, spider_dbton_oracle.dbton_id))) DBUG_RETURN(error_num); if (sql.reserve(SPIDER_SQL_NAME_QUOTE_LEN + SPIDER_SQL_COMMA_LEN)) DBUG_RETURN(HA_ERR_OUT_OF_MEM); @@ -12177,7 +12177,7 @@ int spider_oracle_copy_table::append_key_order_str( sql_part.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN); if ((error_num = spider_db_append_name_with_quote_str(&sql_part, - (char *) field->field_name, spider_dbton_oracle.dbton_id))) + (char *) field->field_name.str, spider_dbton_oracle.dbton_id))) DBUG_RETURN(error_num); if (key_part->key_part_flag & HA_REVERSE_SORT) { @@ -12211,7 +12211,7 @@ int spider_oracle_copy_table::append_key_order_str( sql_part.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN); if ((error_num = spider_db_append_name_with_quote_str(&sql_part, - (char *) field->field_name, spider_dbton_oracle.dbton_id))) + (char *) field->field_name.str, spider_dbton_oracle.dbton_id))) DBUG_RETURN(error_num); if (key_part->key_part_flag & HA_REVERSE_SORT) { @@ -12275,7 +12275,7 @@ int spider_oracle_copy_table::append_key_order_str( sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN); if ((error_num = spider_db_append_name_with_quote_str(&sql, - (char *) field->field_name, spider_dbton_oracle.dbton_id))) + (char *) field->field_name.str, spider_dbton_oracle.dbton_id))) DBUG_RETURN(error_num); if (key_part->key_part_flag & HA_REVERSE_SORT) { @@ -12308,7 +12308,7 @@ int spider_oracle_copy_table::append_key_order_str( sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN); if ((error_num = spider_db_append_name_with_quote_str(&sql, - (char *) field->field_name, spider_dbton_oracle.dbton_id))) + (char *) field->field_name.str, spider_dbton_oracle.dbton_id))) DBUG_RETURN(error_num); if (key_part->key_part_flag & HA_REVERSE_SORT) { @@ -12516,7 +12516,7 @@ int spider_oracle_copy_table::copy_key_row( DBUG_RETURN(HA_ERR_OUT_OF_MEM); sql.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN); if ((error_num = spider_db_append_name_with_quote_str(&sql, - (char *) field->field_name, spider_dbton_oracle.dbton_id))) + (char *) field->field_name.str, spider_dbton_oracle.dbton_id))) DBUG_RETURN(error_num); if (sql.reserve(SPIDER_SQL_NAME_QUOTE_LEN + joint_length + *length + SPIDER_SQL_AND_LEN)) diff --git a/storage/spider/spd_sys_table.cc b/storage/spider/spd_sys_table.cc index e1e81394ce8..a140fba7915 100644 --- a/storage/spider/spd_sys_table.cc +++ b/storage/spider/spd_sys_table.cc @@ -2386,10 +2386,11 @@ TABLE *spider_mk_sys_tmp_table( Item_field *i_field; List<Item> i_list; TABLE *tmp_table; + LEX_CSTRING name= { field_name, strlen(field_name) }; DBUG_ENTER("spider_mk_sys_tmp_table"); if (!(field = new Field_blob( - 4294967295U, FALSE, field_name, cs, TRUE))) + 4294967295U, FALSE, &name, cs, TRUE))) goto error_alloc_field; field->init(table); @@ -2444,10 +2445,13 @@ TABLE *spider_mk_sys_tmp_table_for_result( Item_field *i_field1, *i_field2, *i_field3; List<Item> i_list; TABLE *tmp_table; + LEX_CSTRING name1= { field_name1, strlen(field_name1) }; + LEX_CSTRING name2= { field_name2, strlen(field_name2) }; + LEX_CSTRING name3= { field_name3, strlen(field_name3) }; DBUG_ENTER("spider_mk_sys_tmp_table_for_result"); if (!(field1 = new Field_blob( - 4294967295U, FALSE, field_name1, cs, TRUE))) + 4294967295U, FALSE, &name1, cs, TRUE))) goto error_alloc_field1; field1->init(table); @@ -2463,7 +2467,7 @@ TABLE *spider_mk_sys_tmp_table_for_result( goto error_push_item1; if (!(field2 = new (thd->mem_root) Field_blob( - 4294967295U, FALSE, field_name2, cs, TRUE))) + 4294967295U, FALSE, &name2, cs, TRUE))) goto error_alloc_field2; field2->init(table); @@ -2479,7 +2483,7 @@ TABLE *spider_mk_sys_tmp_table_for_result( goto error_push_item2; if (!(field3 = new (thd->mem_root) Field_blob( - 4294967295U, FALSE, field_name3, cs, TRUE))) + 4294967295U, FALSE, &name3, cs, TRUE))) goto error_alloc_field3; field3->init(table); diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc index ceab0b29d61..9db14dbb443 100644 --- a/storage/tokudb/ha_tokudb.cc +++ b/storage/tokudb/ha_tokudb.cc @@ -6830,7 +6830,7 @@ void ha_tokudb::update_create_info(HA_CREATE_INFO* create_info) { // during drop table, we do not attempt to remove already dropped // indexes because we did not keep status.tokudb in sync with list of indexes. // -int ha_tokudb::remove_key_name_from_status(DB* status_block, char* key_name, DB_TXN* txn) { +int ha_tokudb::remove_key_name_from_status(DB* status_block, const char* key_name, DB_TXN* txn) { int error; uchar status_key_info[FN_REFLEN + sizeof(HA_METADATA_KEY)]; HA_METADATA_KEY md_key = hatoku_key_name; @@ -6856,7 +6856,8 @@ int ha_tokudb::remove_key_name_from_status(DB* status_block, char* key_name, DB_ // writes the key name in status.tokudb, so that we may later delete or rename // the dictionary associated with key_name // -int ha_tokudb::write_key_name_to_status(DB* status_block, char* key_name, DB_TXN* txn) { +int ha_tokudb::write_key_name_to_status(DB* status_block, const char* key_name, + DB_TXN* txn) { int error; uchar status_key_info[FN_REFLEN + sizeof(HA_METADATA_KEY)]; HA_METADATA_KEY md_key = hatoku_key_name; @@ -6895,7 +6896,7 @@ void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) { TOKUDB_HANDLER_TRACE( "field:%d:%s:type=%d:flags=%x", i, - field->field_name, + field->field_name.str, field->type(), field->flags); } @@ -6915,7 +6916,7 @@ void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) { i, p, key_part->length, - field->field_name, + field->field_name.str, field->type(), field->flags); } @@ -7247,7 +7248,7 @@ int ha_tokudb::create( "This is probably due to an alter table engine=TokuDB. To load this " "table, do a dump and load", name, - field->field_name + field->field_name.str ); error = HA_ERR_UNSUPPORTED; goto cleanup; diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h index d240cf1169d..7db00339b87 100644 --- a/storage/tokudb/ha_tokudb.h +++ b/storage/tokudb/ha_tokudb.h @@ -681,8 +681,8 @@ private: int remove_metadata(DB* db, void* key_data, uint key_size, DB_TXN* transaction); int update_max_auto_inc(DB* db, ulonglong val); - int remove_key_name_from_status(DB* status_block, char* key_name, DB_TXN* txn); - int write_key_name_to_status(DB* status_block, char* key_name, DB_TXN* txn); + int remove_key_name_from_status(DB* status_block, const char* key_name, DB_TXN* txn); + int write_key_name_to_status(DB* status_block, const char* key_name, DB_TXN* txn); int write_auto_inc_create(DB* db, ulonglong val, DB_TXN* txn); void init_auto_increment(); bool can_replace_into_be_fast(TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info, uint pk); diff --git a/storage/tokudb/ha_tokudb_alter_56.cc b/storage/tokudb/ha_tokudb_alter_56.cc index ba1afbf091a..b4eccf17b57 100644 --- a/storage/tokudb/ha_tokudb_alter_56.cc +++ b/storage/tokudb/ha_tokudb_alter_56.cc @@ -113,7 +113,7 @@ void ha_tokudb::print_alter_info( TOKUDB_TRACE( "name: %s, types: %u %u, nullable: %d, null_offset: %d, is_null_field: " "%d, is_null %d, pack_length %u", - curr_field->field_name, + curr_field->field_name.str, curr_field->real_type(), mysql_to_toku_type(curr_field), curr_field->null_bit, @@ -132,7 +132,7 @@ void ha_tokudb::print_alter_info( TOKUDB_TRACE( "name: %s, types: %u %u, nullable: %d, null_offset: %d, " "is_null_field: %d, is_null %d, pack_length %u", - curr_field->field_name, + curr_field->field_name.str, curr_field->real_type(), mysql_to_toku_type(curr_field), curr_field->null_bit, @@ -398,7 +398,7 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter( TOKUDB_TRACE( "Added column: index %d, name %s", curr_added_index, - curr_added_field->field_name); + curr_added_field->field_name.str); } } result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK; @@ -427,7 +427,7 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter( TOKUDB_TRACE( "Dropped column: index %d, name %s", curr_dropped_index, - curr_dropped_field->field_name); + curr_dropped_field->field_name.str); } } result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK; @@ -1125,7 +1125,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets( static bool field_in_key(KEY *key, Field *field) { for (uint i = 0; i < key->user_defined_key_parts; i++) { KEY_PART_INFO *key_part = &key->key_part[i]; - if (strcmp(key_part->field->field_name, field->field_name) == 0) + if (strcmp(key_part->field->field_name.str, field->field_name.str) == 0) return true; } return false; diff --git a/storage/tokudb/ha_tokudb_alter_common.cc b/storage/tokudb/ha_tokudb_alter_common.cc index d41a676de1f..17f9a4daa39 100644 --- a/storage/tokudb/ha_tokudb_alter_common.cc +++ b/storage/tokudb/ha_tokudb_alter_common.cc @@ -697,8 +697,8 @@ static int find_changed_columns( sql_print_error( "Two fields that were supposedly the same are not: %s in " "original, %s in new", - curr_field_in_orig->field_name, - curr_field_in_new->field_name); + curr_field_in_orig->field_name.str, + curr_field_in_new->field_name.str); retval = 1; goto cleanup; } diff --git a/storage/tokudb/ha_tokudb_update.cc b/storage/tokudb/ha_tokudb_update.cc index 23de81f3d8a..a6c72506448 100644 --- a/storage/tokudb/ha_tokudb_update.cc +++ b/storage/tokudb/ha_tokudb_update.cc @@ -91,7 +91,7 @@ static void dump_item(Item* item) { ":field=%s.%s.%s", field_item->db_name, field_item->table_name, - field_item->field_name); + field_item->field_name.str); break; } case Item::COND_ITEM: { @@ -141,7 +141,7 @@ static Field* find_field_by_name(TABLE* table, Item* item) { Field *found_field = NULL; for (uint i = 0; i < table->s->fields; i++) { Field *test_field = table->s->field[i]; - if (strcmp(field_item->field_name, test_field->field_name) == 0) { + if (strcmp(field_item->field_name.str, test_field->field_name.str) == 0) { found_field = test_field; break; } @@ -290,7 +290,7 @@ static bool check_insert_value(Item* item, const char* field_name) { if (value_item->arg->type() != Item::FIELD_ITEM) return false; Item_field* arg = static_cast<Item_field*>(value_item->arg); - if (strcmp(field_name, arg->field_name) != 0) + if (strcmp(field_name, arg->field_name.str) != 0) return false; return true; } @@ -315,7 +315,7 @@ static bool check_x_op_constant( if (arguments[0]->type() != Item::FIELD_ITEM) return false; Item_field* arg0 = static_cast<Item_field*>(arguments[0]); - if (strcmp(field_name, arg0->field_name) != 0) + if (strcmp(field_name, arg0->field_name.str) != 0) return false; if (!check_int_result(arguments[1])) if (!(allow_insert_value && @@ -359,11 +359,11 @@ static bool check_decr_floor_expression(Field* lhs_field, Item* item) { uint n = item_func->argument_count(); if (n != 3) return false; - if (!check_x_equal_0(lhs_field->field_name, arguments[0])) + if (!check_x_equal_0(lhs_field->field_name.str, arguments[0])) return false; if (arguments[1]->type() != Item::INT_ITEM || arguments[1]->val_int() != 0) return false; - if (!check_x_minus_1(lhs_field->field_name, arguments[2])) + if (!check_x_minus_1(lhs_field->field_name.str, arguments[2])) return false; if (!(lhs_field->flags & UNSIGNED_FLAG)) return false; @@ -394,14 +394,14 @@ static bool check_update_expression( return true; Item* item_constant; if (check_x_op_constant( - lhs_field->field_name, + lhs_field->field_name.str, rhs_item, "+", &item_constant, allow_insert_value)) return true; if (check_x_op_constant( - lhs_field->field_name, + lhs_field->field_name.str, rhs_item, "-", &item_constant, @@ -455,7 +455,7 @@ static bool full_field_in_key(TABLE* table, Field* field) { KEY* key = &table->s->key_info[table->s->primary_key]; for (uint i = 0; i < key->user_defined_key_parts; i++) { KEY_PART_INFO* key_part = &key->key_part[i]; - if (strcmp(field->field_name, key_part->field->field_name) == 0) { + if (strcmp(field->field_name.str, key_part->field->field_name.str) == 0) { return key_part->length == field->field_length; } } diff --git a/storage/tokudb/hatoku_cmp.cc b/storage/tokudb/hatoku_cmp.cc index cb02e4ff13a..ee57e064fbf 100644 --- a/storage/tokudb/hatoku_cmp.cc +++ b/storage/tokudb/hatoku_cmp.cc @@ -3031,7 +3031,7 @@ static uint32_t pack_key_from_desc( } static bool fields_have_same_name(Field* a, Field* b) { - return strcmp(a->field_name, b->field_name) == 0; + return strcmp(a->field_name.str, b->field_name.str) == 0; } static bool fields_are_same_type(Field* a, Field* b) { diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc index c3fb6afed73..4fa40c4e45b 100644 --- a/storage/tokudb/hatoku_hton.cc +++ b/storage/tokudb/hatoku_hton.cc @@ -101,7 +101,7 @@ static int tokudb_discover3( THD* thd, const char* db, const char* name, - char* path, + const char* path, uchar** frmblob, size_t* frmlen); handlerton* tokudb_hton; @@ -1207,7 +1207,7 @@ static int tokudb_discover3( THD* thd, const char* db, const char* name, - char* path, + const char* path, uchar** frmblob, size_t* frmlen) { diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 4a9cfb975fe..b89bd419876 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -3359,7 +3359,7 @@ innobase_query_caching_of_table_permitted( THD* thd, /*!< in: thd of the user who is trying to store a result to the query cache or retrieve it */ - char* full_name, /*!< in: normalized path to the table */ + const char* full_name, /*!< in: normalized path to the table */ uint full_name_len, /*!< in: length of the normalized path to the table */ ulonglong *unused) /*!< unused for this engine */ @@ -6212,7 +6212,7 @@ ha_innobase::innobase_initialize_autoinc() ut_a(prebuilt->trx == thd_to_trx(user_thd)); - col_name = field->field_name; + col_name = field->field_name.str; index = innobase_get_index(table->s->next_number_index); /* Execute SELECT MAX(col_name) FROM TABLE; */ @@ -8121,7 +8121,7 @@ build_template_field( "MySQL table %s field %lu name %s", table->s->table_name.str, j, - table->field[j]->field_name); + table->field[j]->field_name.str); } ib_logf(IB_LOG_LEVEL_ERROR, @@ -9125,7 +9125,7 @@ calc_row_difference( if (field_mysql_type == MYSQL_TYPE_LONGLONG && prebuilt->table->fts && innobase_strcasecmp( - field->field_name, FTS_DOC_ID_COL_NAME) == 0) { + field->field_name.str, FTS_DOC_ID_COL_NAME) == 0) { doc_id = (doc_id_t) mach_read_from_n_little_endian( n_ptr, 8); if (doc_id == 0) { @@ -11310,7 +11310,7 @@ create_table_check_doc_id_col( col_len = field->pack_length(); - if (innobase_strcasecmp(field->field_name, + if (innobase_strcasecmp(field->field_name.str, FTS_DOC_ID_COL_NAME) == 0) { /* Note the name is case sensitive due to @@ -11318,7 +11318,7 @@ create_table_check_doc_id_col( if (col_type == DATA_INT && !field->real_maybe_null() && col_len == sizeof(doc_id_t) - && (strcmp(field->field_name, + && (strcmp(field->field_name.str, FTS_DOC_ID_COL_NAME) == 0)) { *doc_id_col = i; } else { @@ -11330,7 +11330,7 @@ create_table_check_doc_id_col( "of BIGINT NOT NULL type, and named " "in all capitalized characters"); my_error(ER_WRONG_COLUMN_NAME, MYF(0), - field->field_name); + field->field_name.str); *doc_id_col = ULINT_UNDEFINED; } @@ -11476,7 +11476,7 @@ create_table_def( "column type and try to re-create " "the table with an appropriate " "column type.", - table->name, field->field_name); + table->name, field->field_name.str); goto err_col; } @@ -11526,9 +11526,9 @@ create_table_def( /* First check whether the column to be added has a system reserved name. */ - if (dict_col_name_is_reserved(field->field_name)){ + if (dict_col_name_is_reserved(field->field_name.str)){ my_error(ER_WRONG_COLUMN_NAME, MYF(0), - field->field_name); + field->field_name.str); err_col: dict_mem_table_free(table); mem_heap_free(heap); @@ -11539,7 +11539,7 @@ err_col: } dict_mem_table_add_col(table, heap, - field->field_name, + field->field_name.str, col_type, dtype_form_prtype( (ulint) field->type() @@ -11617,7 +11617,7 @@ create_index( for (ulint i = 0; i < key->user_defined_key_parts; i++) { KEY_PART_INFO* key_part = key->key_part + i; dict_mem_index_add_field( - index, key_part->field->field_name, 0); + index, key_part->field->field_name.str, 0); } DBUG_RETURN(convert_error_code_to_mysql( @@ -11668,8 +11668,8 @@ create_index( field = form->field[j]; if (0 == innobase_strcasecmp( - field->field_name, - key_part->field->field_name)) { + field->field_name.str, + key_part->field->field_name.str)) { /* Found the corresponding column */ goto found; @@ -11702,7 +11702,7 @@ found: "inappropriate data type. Table " "name %s, column name %s.", table_name, - key_part->field->field_name); + key_part->field->field_name.str); prefix_len = 0; } @@ -11713,7 +11713,7 @@ found: field_lengths[i] = key_part->length; dict_mem_index_add_field( - index, key_part->field->field_name, prefix_len); + index, key_part->field->field_name.str, prefix_len); } ut_ad(key->flags & HA_FULLTEXT || !(index->type & DICT_FTS)); @@ -12188,7 +12188,7 @@ innobase_table_flags( /* Do a pre-check on FTS DOC ID index */ if (!(key->flags & HA_NOSAME) || strcmp(key->name, FTS_DOC_ID_INDEX_NAME) - || strcmp(key->key_part[0].field->field_name, + || strcmp(key->key_part[0].field->field_name.str, FTS_DOC_ID_COL_NAME)) { fts_doc_id_index_bad = key->name; } @@ -16976,7 +16976,7 @@ my_bool ha_innobase::register_query_cache_table( /*====================================*/ THD* thd, /*!< in: user thread handle */ - char* table_key, /*!< in: normalized path to the + const char* table_key, /*!< in: normalized path to the table */ uint key_length, /*!< in: length of the normalized path to the table */ diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h index d1ec566b043..f6f2f1b0eee 100644 --- a/storage/xtradb/handler/ha_innodb.h +++ b/storage/xtradb/handler/ha_innodb.h @@ -235,7 +235,7 @@ class ha_innobase: public handler /* ask handler about permission to cache table during query registration */ - my_bool register_query_cache_table(THD *thd, char *table_key, + my_bool register_query_cache_table(THD *thd, const char *table_key, uint key_length, qc_engine_callback *call_back, ulonglong *engine_data); diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index a521db3ce25..b73ed019c6f 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -533,7 +533,7 @@ ha_innobase::check_if_supported_inplace_alter( && innobase_fulltext_exist(altered_table) && !my_strcasecmp( system_charset_info, - key_part->field->field_name, + key_part->field->field_name.str, FTS_DOC_ID_COL_NAME)) { ha_alter_info->unsupported_reason = innobase_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS); @@ -590,7 +590,7 @@ ha_innobase::check_if_supported_inplace_alter( if (!my_strcasecmp( system_charset_info, - (*fp)->field_name, + (*fp)->field_name.str, FTS_DOC_ID_COL_NAME)) { ha_alter_info->unsupported_reason = innobase_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS); @@ -859,7 +859,7 @@ no_match: } if (innobase_strcasecmp(col_names[j], - key_part.field->field_name)) { + key_part.field->field_name.str)) { /* Name mismatch */ goto no_match; } @@ -1515,7 +1515,7 @@ name_ok: } my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB", - field->field_name); + field->field_name.str); return(ER_WRONG_KEY_COLUMN); } @@ -1531,7 +1531,7 @@ name_ok: } my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB", - field->field_name); + field->field_name.str); return(ER_WRONG_KEY_COLUMN); } } @@ -1570,7 +1570,7 @@ innobase_create_index_field_def( ut_a(field); index_field->col_no = key_part->fieldnr; - index_field->col_name = altered_table ? field->field_name : fields[key_part->fieldnr]->field_name; + index_field->col_name = altered_table ? field->field_name.str : fields[key_part->fieldnr]->field_name.str; col_type = get_innobase_type_from_mysql_type(&is_unsigned, field); @@ -1695,19 +1695,19 @@ innobase_fts_check_doc_id_col( stored_in_db())) sql_idx++; if (my_strcasecmp(system_charset_info, - field->field_name, FTS_DOC_ID_COL_NAME)) { + field->field_name.str, FTS_DOC_ID_COL_NAME)) { continue; } - if (strcmp(field->field_name, FTS_DOC_ID_COL_NAME)) { + if (strcmp(field->field_name.str, FTS_DOC_ID_COL_NAME)) { my_error(ER_WRONG_COLUMN_NAME, MYF(0), - field->field_name); + field->field_name.str); } else if (field->type() != MYSQL_TYPE_LONGLONG || field->pack_length() != 8 || field->real_maybe_null() || !(field->flags & UNSIGNED_FLAG)) { my_error(ER_INNODB_FT_WRONG_DOCID_COLUMN, MYF(0), - field->field_name); + field->field_name.str); } else { *fts_doc_col_no = i; } @@ -1778,7 +1778,7 @@ innobase_fts_check_doc_id_index( if ((key.flags & HA_NOSAME) && key.user_defined_key_parts == 1 && !strcmp(key.name, FTS_DOC_ID_INDEX_NAME) - && !strcmp(key.key_part[0].field->field_name, + && !strcmp(key.key_part[0].field->field_name.str, FTS_DOC_ID_COL_NAME)) { if (fts_doc_col_no) { *fts_doc_col_no = ULINT_UNDEFINED; @@ -1857,7 +1857,7 @@ innobase_fts_check_doc_id_index_in_def( if (!(key->flags & HA_NOSAME) || key->user_defined_key_parts != 1 || strcmp(key->name, FTS_DOC_ID_INDEX_NAME) - || strcmp(key->key_part[0].field->field_name, + || strcmp(key->key_part[0].field->field_name.str, FTS_DOC_ID_COL_NAME)) { return(FTS_INCORRECT_DOC_ID_INDEX); } @@ -2464,7 +2464,7 @@ innobase_check_foreigns( if (!new_field || (new_field->flags & NOT_NULL_FLAG)) { if (innobase_check_foreigns_low( user_table, drop_fk, n_drop_fk, - (*fp)->field_name, !new_field)) { + (*fp)->field_name.str, !new_field)) { return(true); } } @@ -2693,7 +2693,7 @@ innobase_get_col_names( for (uint old_i = 0; table->field[old_i]; old_i++) { if (new_field->field == table->field[old_i]) { - cols[old_i] = new_field->field_name; + cols[old_i] = new_field->field_name.str; break; } } @@ -2980,7 +2980,7 @@ prepare_inplace_alter_table_dict( dict_mem_table_free( ctx->new_table); my_error(ER_WRONG_KEY_COLUMN, MYF(0), - field->field_name); + field->field_name.str); goto new_clustered_failed; } } else { @@ -3007,16 +3007,16 @@ prepare_inplace_alter_table_dict( } } - if (dict_col_name_is_reserved(field->field_name)) { + if (dict_col_name_is_reserved(field->field_name.str)) { dict_mem_table_free(ctx->new_table); my_error(ER_WRONG_COLUMN_NAME, MYF(0), - field->field_name); + field->field_name.str); goto new_clustered_failed; } dict_mem_table_add_col( ctx->new_table, ctx->heap, - field->field_name, + field->field_name.str, col_type, dtype_form_prtype(field_type, charset_no), col_len); @@ -3619,7 +3619,7 @@ err_exit_no_heap: cf_it.rewind(); while (Create_field* cf = cf_it++) { if (cf->field == *fp) { - name = cf->field_name; + name = cf->field_name.str; goto check_if_ok_to_rename; } } @@ -3629,7 +3629,7 @@ check_if_ok_to_rename: /* Prohibit renaming a column from FTS_DOC_ID if full-text indexes exist. */ if (!my_strcasecmp(system_charset_info, - (*fp)->field_name, + (*fp)->field_name.str, FTS_DOC_ID_COL_NAME) && innobase_fulltext_exist(altered_table)) { my_error(ER_INNODB_FT_WRONG_DOCID_COLUMN, @@ -4806,8 +4806,8 @@ innobase_rename_columns_try( if (cf->field == *fp) { if (innobase_rename_column_try( ctx->old_table, trx, table_name, i, - cf->field->field_name, - cf->field_name, + cf->field->field_name.str, + cf->field_name.str, ctx->need_rebuild())) { return(true); } @@ -4854,8 +4854,8 @@ innobase_rename_columns_cache( while (Create_field* cf = cf_it++) { if (cf->field == *fp) { dict_mem_table_col_rename(user_table, i, - cf->field->field_name, - cf->field_name); + cf->field->field_name.str, + cf->field_name.str); goto processed_field; } } @@ -4917,7 +4917,7 @@ commit_get_autoinc( dict_table_autoinc_lock(ctx->old_table); err = row_search_max_autoinc( - index, autoinc_field->field_name, &max_value_table); + index, autoinc_field->field_name.str, &max_value_table); if (err != DB_SUCCESS) { ut_ad(0); |