diff options
author | Monty <monty@mariadb.org> | 2018-05-24 15:19:55 +0300 |
---|---|---|
committer | Monty <monty@mariadb.org> | 2018-05-26 12:49:25 +0300 |
commit | 5a16fe0e6f33f1b123bbe9422126dd3b0fdf5ed1 (patch) | |
tree | 2d61e0a0269126419f660c0578e85d3d92e5550d | |
parent | 29dbb23fb7ac3b10e70e4c5f99dcedab91af85ba (diff) | |
download | mariadb-git-5a16fe0e6f33f1b123bbe9422126dd3b0fdf5ed1.tar.gz |
Fixed compiler warnings
When merging this with 10.2 and later, one can just use the 10.2 or later code
-rw-r--r-- | storage/connect/ha_connect.cc | 2 | ||||
-rw-r--r-- | storage/sphinx/ha_sphinx.cc | 87 |
2 files changed, 43 insertions, 46 deletions
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index e6bfa97f327..af329c0c8df 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -432,7 +432,7 @@ handlerton *connect_hton= NULL; uint GetTraceValue(void) {return (uint)(connect_hton ? THDVAR(current_thd, xtrace) : 0);} bool ExactInfo(void) {return THDVAR(current_thd, exact_info);} -bool CondPushEnabled(void) {return THDVAR(current_thd, cond_push);} +static bool CondPushEnabled(void) {return THDVAR(current_thd, cond_push);} USETEMP UseTemp(void) {return (USETEMP)THDVAR(current_thd, use_tempfile);} int GetConvSize(void) {return THDVAR(current_thd, conv_size);} TYPCONV GetTypeConv(void) {return (TYPCONV)THDVAR(current_thd, type_conv);} diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc index 5308f780e0e..3f6770b5d26 100644 --- a/storage/sphinx/ha_sphinx.cc +++ b/storage/sphinx/ha_sphinx.cc @@ -17,7 +17,7 @@ #pragma implementation // gcc: Class implementation #endif -#if _MSC_VER>=1400 +#if defined(_MSC_VER) && _MSC_VER>=1400 #define _CRT_SECURE_NO_DEPRECATE 1 #define _CRT_NONSTDC_NO_DEPRECATE 1 #endif @@ -64,7 +64,7 @@ #define MSG_WAITALL 0 #endif -#if _MSC_VER>=1400 +#if defined(_MSC_VER) && _MSC_VER>=1400 #pragma warning(push,4) #endif @@ -1041,8 +1041,8 @@ static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate ) bool bOk = true; bool bQL = false; char * sScheme = NULL; - char * sHost = SPHINXAPI_DEFAULT_HOST; - char * sIndex = SPHINXAPI_DEFAULT_INDEX; + char * sHost = (char*) SPHINXAPI_DEFAULT_HOST; + char * sIndex = (char*) SPHINXAPI_DEFAULT_INDEX; int iPort = SPHINXAPI_DEFAULT_PORT; // parse connection string, if any @@ -1068,12 +1068,12 @@ static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate ) sHost--; // reuse last slash iPort = 0; if (!( sIndex = strrchr ( sHost, ':' ) )) - sIndex = SPHINXAPI_DEFAULT_INDEX; + sIndex = (char*) SPHINXAPI_DEFAULT_INDEX; else { *sIndex++ = '\0'; if ( !*sIndex ) - sIndex = SPHINXAPI_DEFAULT_INDEX; + sIndex = (char*) SPHINXAPI_DEFAULT_INDEX; } bOk = true; break; @@ -1095,7 +1095,7 @@ static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate ) if ( sIndex ) *sIndex++ = '\0'; else - sIndex = SPHINXAPI_DEFAULT_INDEX; + sIndex = (char*) SPHINXAPI_DEFAULT_INDEX; iPort = atoi(sPort); if ( !iPort ) @@ -1107,7 +1107,7 @@ static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate ) if ( sIndex ) *sIndex++ = '\0'; else - sIndex = SPHINXAPI_DEFAULT_INDEX; + sIndex = (char*) SPHINXAPI_DEFAULT_INDEX; } bOk = true; break; @@ -1303,8 +1303,8 @@ CSphSEQuery::CSphSEQuery ( const char * sQuery, int iLength, const char * sIndex , m_sGeoLongAttr ( "" ) , m_fGeoLatitude ( 0.0f ) , m_fGeoLongitude ( 0.0f ) - , m_sComment ( "" ) - , m_sSelect ( "*" ) + , m_sComment ( (char*) "" ) + , m_sSelect ( (char*) "*" ) , m_pBuf ( NULL ) , m_pCur ( NULL ) @@ -1738,7 +1738,7 @@ bool CSphSEQuery::ParseField ( char * sField ) } } else if ( !strcmp ( sName, "override" ) ) // name,type,id:value,id:value,... { - char * sName = NULL; + sName = NULL; int iType = 0; CSphSEQuery::Override_t * pOverride = NULL; @@ -1794,7 +1794,7 @@ bool CSphSEQuery::ParseField ( char * sField ) *sRest++ = '\0'; if (!( sRest - sId )) break; - char * sValue = sRest; + sValue = sRest; if ( ( sRest = strchr ( sRest, ',' ) )!=NULL ) *sRest++ = '\0'; if ( !*sValue ) @@ -2213,7 +2213,7 @@ int ha_sphinx::Connect ( const char * sHost, ushort uPort ) } char sError[512]; - int iSocket = socket ( iDomain, SOCK_STREAM, 0 ); + int iSocket = (int) socket ( iDomain, SOCK_STREAM, 0 ); if ( iSocket<0 ) { @@ -2538,12 +2538,6 @@ char * ha_sphinx::UnpackString () } -static inline const char * FixNull ( const char * s ) -{ - return s ? s : "(null)"; -} - - bool ha_sphinx::UnpackSchema () { SPH_ENTER_METHOD(); @@ -2674,7 +2668,7 @@ bool ha_sphinx::UnpackStats ( CSphSEStats * pStats ) assert ( pStats ); char * pCurSave = m_pCur; - for ( uint i=0; i<m_iMatchesTotal && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT + for ( uint m=0; m<m_iMatchesTotal && m_pCur<m_pResponseEnd-sizeof(uint32); m++ ) // NOLINT { m_pCur += m_bId64 ? 12 : 8; // skip id+weight for ( uint32 i=0; i<m_iAttrs && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT @@ -3159,7 +3153,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint ) } } - af->store ( sBuf, pCur-sBuf, &my_charset_bin ); + af->store ( sBuf, uint(pCur-sBuf), &my_charset_bin ); } break; @@ -3386,39 +3380,39 @@ ha_rows ha_sphinx::records_in_range ( uint, key_range *, key_range * ) // currently provided for doing that. // // Called from handle.cc by ha_create_table(). -int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) +int ha_sphinx::create ( const char * name, TABLE * table_arg, HA_CREATE_INFO * ) { SPH_ENTER_METHOD(); char sError[256]; CSphSEShare tInfo; - if ( !ParseUrl ( &tInfo, table, true ) ) + if ( !ParseUrl ( &tInfo, table_arg, true ) ) SPH_RET(-1); // check SphinxAPI table for ( ; !tInfo.m_bSphinxQL; ) { // check system fields (count and types) - if ( table->s->fields<SPHINXSE_SYSTEM_COLUMNS ) + if ( table_arg->s->fields<SPHINXSE_SYSTEM_COLUMNS ) { my_snprintf ( sError, sizeof(sError), "%s: there MUST be at least %d columns", name, SPHINXSE_SYSTEM_COLUMNS ); break; } - if ( !IsIDField ( table->field[0] ) ) + if ( !IsIDField ( table_arg->field[0] ) ) { my_snprintf ( sError, sizeof(sError), "%s: 1st column (docid) MUST be unsigned integer or bigint", name ); break; } - if ( !IsIntegerFieldType ( table->field[1]->type() ) ) + if ( !IsIntegerFieldType ( table_arg->field[1]->type() ) ) { my_snprintf ( sError, sizeof(sError), "%s: 2nd column (weight) MUST be integer or bigint", name ); break; } - enum_field_types f2 = table->field[2]->type(); + enum_field_types f2 = table_arg->field[2]->type(); if ( f2!=MYSQL_TYPE_VARCHAR && f2!=MYSQL_TYPE_BLOB && f2!=MYSQL_TYPE_MEDIUM_BLOB && f2!=MYSQL_TYPE_LONG_BLOB && f2!=MYSQL_TYPE_TINY_BLOB ) { @@ -3428,28 +3422,28 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) // check attributes int i; - for ( i=3; i<(int)table->s->fields; i++ ) + for ( i=3; i<(int)table_arg->s->fields; i++ ) { - enum_field_types eType = table->field[i]->type(); + enum_field_types eType = table_arg->field[i]->type(); if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT ) { my_snprintf ( sError, sizeof(sError), "%s: %dth column (attribute %s) MUST be integer, bigint, timestamp, varchar, or float", - name, i+1, table->field[i]->field_name ); + name, i+1, table_arg->field[i]->field_name ); break; } } - if ( i!=(int)table->s->fields ) + if ( i!=(int)table_arg->s->fields ) break; // check index if ( - table->s->keys!=1 || - table->key_info[0].user_defined_key_parts!=1 || - strcasecmp ( table->key_info[0].key_part[0].field->field_name, table->field[2]->field_name ) ) + table_arg->s->keys!=1 || + table_arg->key_info[0].user_defined_key_parts!=1 || + strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name, table->field[2]->field_name ) ) { my_snprintf ( sError, sizeof(sError), "%s: there must be an index on '%s' column", - name, table->field[2]->field_name ); + name, table_arg->field[2]->field_name ); break; } @@ -3464,13 +3458,13 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) sError[0] = '\0'; // check that 1st column is id, is of int type, and has an index - if ( strcmp ( table->field[0]->field_name, "id" ) ) + if ( strcmp ( table_arg->field[0]->field_name, "id" ) ) { my_snprintf ( sError, sizeof(sError), "%s: 1st column must be called 'id'", name ); break; } - if ( !IsIDField ( table->field[0] ) ) + if ( !IsIDField ( table_arg->field[0] ) ) { my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be INT UNSIGNED or BIGINT", name ); break; @@ -3478,22 +3472,22 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) // check index if ( - table->s->keys!=1 || - table->key_info[0].user_defined_key_parts!=1 || - strcasecmp ( table->key_info[0].key_part[0].field->field_name, "id" ) ) + table_arg->s->keys!=1 || + table_arg->key_info[0].user_defined_key_parts!=1 || + strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name, "id" ) ) { my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be indexed", name ); break; } // check column types - for ( int i=1; i<(int)table->s->fields; i++ ) + for ( int i=1; i<(int)table_arg->s->fields; i++ ) { - enum_field_types eType = table->field[i]->type(); + enum_field_types eType = table_arg->field[i]->type(); if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT ) { my_snprintf ( sError, sizeof(sError), "%s: column %d(%s) is of unsupported type (use int/bigint/timestamp/varchar/float)", - name, i+1, table->field[i]->field_name ); + name, i+1, table_arg->field[i]->field_name ); break; } } @@ -3507,8 +3501,11 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) // report and bail if ( sError[0] ) { - my_error ( ER_CANT_CREATE_TABLE, MYF(0), - table->s->db.str, table->s->table_name, sError ); + my_printf_error(ER_CANT_CREATE_TABLE, + "Can\'t create table %s.%s (Error: %s)", + MYF(0), + table_arg->s->db.str, + table_arg->s->table_name.str, sError); SPH_RET(-1); } |