diff options
author | unknown <ndbdev@dl145b.mysql.com> | 2005-06-07 12:50:45 +0200 |
---|---|---|
committer | unknown <ndbdev@dl145b.mysql.com> | 2005-06-07 12:50:45 +0200 |
commit | bb00abb03a5da743432fef81fcba4cf043bac262 (patch) | |
tree | 0228961a3c99a34b7c80a47d81e41d23fd7a3a1d | |
parent | a4e96888aa10fcaafdeaf1d6305aa7769cd85e91 (diff) | |
parent | c7787f8af1be3b64e22fd00659b453972b94476c (diff) | |
download | mariadb-git-bb00abb03a5da743432fef81fcba4cf043bac262.tar.gz |
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.0
into dl145b.mysql.com:/home/ndbdev/tomas/mysql-5.1
configure.in:
Auto merged
libmysqld/Makefile.am:
Auto merged
scripts/Makefile.am:
Auto merged
sql/log.cc:
Auto merged
sql/mysql_priv.h:
Auto merged
sql/mysqld.cc:
Auto merged
sql/sql_class.h:
Auto merged
storage/ndb/include/transporter/TransporterRegistry.hpp:
Auto merged
storage/ndb/src/common/transporter/SCI_Transporter.cpp:
Auto merged
storage/ndb/src/common/transporter/SCI_Transporter.hpp:
Auto merged
storage/ndb/src/common/transporter/SHM_Buffer.hpp:
Auto merged
storage/ndb/src/common/transporter/SHM_Transporter.cpp:
Auto merged
storage/ndb/src/common/transporter/SHM_Transporter.hpp:
Auto merged
storage/ndb/src/common/transporter/SendBuffer.cpp:
Auto merged
storage/ndb/src/common/transporter/SendBuffer.hpp:
Auto merged
storage/ndb/src/common/transporter/TCP_Transporter.cpp:
Auto merged
storage/ndb/src/common/transporter/TCP_Transporter.hpp:
Auto merged
storage/ndb/src/common/transporter/Transporter.hpp:
Auto merged
storage/ndb/src/common/transporter/TransporterRegistry.cpp:
Auto merged
storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp:
Auto merged
storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp:
Auto merged
storage/ndb/src/mgmsrv/ConfigInfo.cpp:
Auto merged
storage/ndb/src/ndbapi/NdbOperationExec.cpp:
Auto merged
storage/ndb/test/ndbapi/testNdbApi.cpp:
Auto merged
storage/ndb/test/run-test/daily-basic-tests.txt:
Auto merged
storage/ndb/test/run-test/ndb-autotest.sh:
Auto merged
storage/ndb/test/tools/hugoLoad.cpp:
Auto merged
storage/ndb/test/tools/hugoPkUpdate.cpp:
Auto merged
storage/ndb/test/tools/hugoScanRead.cpp:
Auto merged
storage/ndb/test/tools/hugoScanUpdate.cpp:
Auto merged
storage/ndb/tools/Makefile.am:
Auto merged
storage/ndb/tools/restore/Restore.cpp:
Auto merged
support-files/mysql.spec.sh:
Auto merged
storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp:
Auto merged
33 files changed, 468 insertions, 148 deletions
diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test index 3c7b99da25a..0173fabd46f 100644 --- a/mysql-test/t/ndb_restore.test +++ b/mysql-test/t/ndb_restore.test @@ -214,4 +214,4 @@ drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; # Test BUG#10287 # ---exec $NDB_TOOLS_DIR/ndb_select_all -d sys -D , SYSTAB_0 | grep 520093696 +--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults -d sys -D , SYSTAB_0 | grep 520093696 diff --git a/scripts/Makefile.am b/scripts/Makefile.am index 48e748f8cf0..79764ab63cc 100644 --- a/scripts/Makefile.am +++ b/scripts/Makefile.am @@ -32,8 +32,6 @@ bin_SCRIPTS = @server_scripts@ \ mysqldumpslow \ mysql_explain_log \ mysqld_multi \ - make_win_src_distribution \ - make_win_binary_distribution \ mysql_create_system_tables EXTRA_SCRIPTS = make_binary_distribution.sh \ @@ -84,6 +82,7 @@ CLEANFILES = @server_scripts@ \ mysqldumpslow \ mysqld_multi \ make_win_src_distribution \ + make_win_binary_distribution \ mysql_create_system_tables DISTCLEANFILES = mysqlbug diff --git a/sql/log.cc b/sql/log.cc index 918da9b95cd..b1b558a1915 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2372,7 +2372,7 @@ void sql_print_information(const char *format, ...) DBUG_VOID_RETURN; } -#ifdef HAVE_MMAP + /********* transaction coordinator log for 2pc - mmap() based solution *******/ /* @@ -2410,13 +2410,17 @@ void sql_print_information(const char *format, ...) new xid is added into it. Removing a xid from a page does not make it dirty - we don't sync removals to disk. */ + +ulong tc_log_page_waits= 0; + +#ifdef HAVE_MMAP + #define TC_LOG_HEADER_SIZE (sizeof(tc_log_magic)+1) static const char tc_log_magic[]={(char) 254, 0x23, 0x05, 0x74}; ulong opt_tc_log_size= TC_LOG_MIN_SIZE; -ulong tc_log_max_pages_used=0, tc_log_page_size=0, - tc_log_page_waits=0, tc_log_cur_pages_used=0; +ulong tc_log_max_pages_used=0, tc_log_page_size=0, tc_log_cur_pages_used=0; int TC_LOG_MMAP::open(const char *opt_name) { diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 40a63556788..c5753a7f114 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1087,6 +1087,8 @@ extern ulong rpl_recovery_rank, thread_cache_size; extern ulong back_log; extern ulong specialflag, current_pid; extern ulong expire_logs_days, sync_binlog_period, sync_binlog_counter; +extern ulong opt_tc_log_size, tc_log_max_pages_used, tc_log_page_size; +extern ulong tc_log_page_waits; extern my_bool relay_log_purge, opt_innodb_safe_binlog, opt_innodb; extern uint test_flags,select_errors,ha_open_options; extern uint protocol_version, mysqld_port, dropping_tables; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index ea9c95d13f3..69aba3abd42 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4673,9 +4673,11 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, "more than one storage engine, when binary log is disabled)", (gptr*) &opt_tc_log_file, (gptr*) &opt_tc_log_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_MMAP {"log-tc-size", OPT_LOG_TC_SIZE, "Size of transaction coordinator log.", (gptr*) &opt_tc_log_size, (gptr*) &opt_tc_log_size, 0, GET_ULONG, REQUIRED_ARG, TC_LOG_MIN_SIZE, TC_LOG_MIN_SIZE, ~0L, 0, TC_LOG_PAGE_SIZE, 0}, +#endif {"log-update", OPT_UPDATE_LOG, "The update log is deprecated since version 5.0, is replaced by the binary \ log and this option justs turns on --log-bin instead.", @@ -5817,9 +5819,11 @@ struct show_var_st status_vars[]= { #endif /* HAVE_OPENSSL */ {"Table_locks_immediate", (char*) &locks_immediate, SHOW_LONG}, {"Table_locks_waited", (char*) &locks_waited, SHOW_LONG}, +#ifdef HAVE_MMAP {"Tc_log_max_pages_used", (char*) &tc_log_max_pages_used, SHOW_LONG}, {"Tc_log_page_size", (char*) &tc_log_page_size, SHOW_LONG}, {"Tc_log_page_waits", (char*) &tc_log_page_waits, SHOW_LONG}, +#endif {"Threads_cached", (char*) &cached_thread_count, SHOW_LONG_CONST}, {"Threads_connected", (char*) &thread_count, SHOW_INT_CONST}, {"Threads_created", (char*) &thread_created, SHOW_LONG_CONST}, diff --git a/sql/sql_class.h b/sql/sql_class.h index b81595d21a1..0511f327615 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -45,10 +45,6 @@ extern const char **errmesg; #define TC_LOG_PAGE_SIZE 8192 #define TC_LOG_MIN_SIZE (3*TC_LOG_PAGE_SIZE) -extern ulong opt_tc_log_size; -extern ulong tc_log_max_pages_used; -extern ulong tc_log_page_size; -extern ulong tc_log_page_waits; #define TC_HEURISTIC_RECOVER_COMMIT 1 #define TC_HEURISTIC_RECOVER_ROLLBACK 2 diff --git a/storage/ndb/include/transporter/TransporterRegistry.hpp b/storage/ndb/include/transporter/TransporterRegistry.hpp index 363cdabe10a..1ae8a4068c4 100644 --- a/storage/ndb/include/transporter/TransporterRegistry.hpp +++ b/storage/ndb/include/transporter/TransporterRegistry.hpp @@ -203,6 +203,13 @@ public: bool createSCITransporter(struct TransporterConfiguration * config); bool createSHMTransporter(struct TransporterConfiguration * config); bool createOSETransporter(struct TransporterConfiguration * config); + + /** + * Get free buffer space + * + * Get #free bytes in send buffer for <em>node</node> + */ + Uint32 get_free_buffer(Uint32 node) const ; /** * prepareSend diff --git a/storage/ndb/src/common/transporter/SCI_Transporter.cpp b/storage/ndb/src/common/transporter/SCI_Transporter.cpp index 506140a887f..c96f84a4f4f 100644 --- a/storage/ndb/src/common/transporter/SCI_Transporter.cpp +++ b/storage/ndb/src/common/transporter/SCI_Transporter.cpp @@ -1025,7 +1025,8 @@ SCI_Transporter::initSCI() { DBUG_RETURN(true); } - - - - +Uint32 +SCI_Transporter::get_free_buffer() const +{ + return (m_TargetSegm[m_ActiveAdapterId].writer)->get_free_buffer(); +} diff --git a/storage/ndb/src/common/transporter/SCI_Transporter.hpp b/storage/ndb/src/common/transporter/SCI_Transporter.hpp index 8d263f32a57..cb42e437118 100644 --- a/storage/ndb/src/common/transporter/SCI_Transporter.hpp +++ b/storage/ndb/src/common/transporter/SCI_Transporter.hpp @@ -133,7 +133,8 @@ public: * remote segment is mapped. Otherwize false. */ bool getConnectionStatus(); - + + virtual Uint32 get_free_buffer() const; private: SCI_Transporter(TransporterRegistry &t_reg, const char *local_host, diff --git a/storage/ndb/src/common/transporter/SHM_Buffer.hpp b/storage/ndb/src/common/transporter/SHM_Buffer.hpp index f49b4fe73cb..27321a3191f 100644 --- a/storage/ndb/src/common/transporter/SHM_Buffer.hpp +++ b/storage/ndb/src/common/transporter/SHM_Buffer.hpp @@ -157,6 +157,7 @@ public: inline Uint32 getWriteIndex() const { return m_writeIndex;} inline Uint32 getBufferSize() const { return m_bufferSize;} + inline Uint32 get_free_buffer() const; inline void copyIndexes(SHM_Writer * standbyWriter); @@ -212,5 +213,21 @@ SHM_Writer::updateWritePtr(Uint32 sz){ m_writeIndex = tWriteIndex; * m_sharedWriteIndex = tWriteIndex; } + +inline +Uint32 +SHM_Writer::get_free_buffer() const +{ + Uint32 tReadIndex = * m_sharedReadIndex; + Uint32 tWriteIndex = m_writeIndex; + + Uint32 free; + if(tReadIndex <= tWriteIndex){ + free = m_bufferSize + tReadIndex - tWriteIndex; + } else { + free = tReadIndex - tWriteIndex; + } + return free; +} #endif diff --git a/storage/ndb/src/common/transporter/SHM_Transporter.cpp b/storage/ndb/src/common/transporter/SHM_Transporter.cpp index e2d23cf94e2..a225988d37f 100644 --- a/storage/ndb/src/common/transporter/SHM_Transporter.cpp +++ b/storage/ndb/src/common/transporter/SHM_Transporter.cpp @@ -365,3 +365,9 @@ SHM_Transporter::doSend() kill(m_remote_pid, g_ndb_shm_signum); } } + +Uint32 +SHM_Transporter::get_free_buffer() const +{ + return writer->get_free_buffer(); +} diff --git a/storage/ndb/src/common/transporter/SHM_Transporter.hpp b/storage/ndb/src/common/transporter/SHM_Transporter.hpp index 677bd6efc37..e7a76225471 100644 --- a/storage/ndb/src/common/transporter/SHM_Transporter.hpp +++ b/storage/ndb/src/common/transporter/SHM_Transporter.hpp @@ -139,6 +139,8 @@ protected: int m_remote_pid; Uint32 m_last_signal; Uint32 m_signal_threshold; + + virtual Uint32 get_free_buffer() const; private: bool _shmSegCreated; diff --git a/storage/ndb/src/common/transporter/SendBuffer.cpp b/storage/ndb/src/common/transporter/SendBuffer.cpp index 58cad96931f..8f69eb4bd40 100644 --- a/storage/ndb/src/common/transporter/SendBuffer.cpp +++ b/storage/ndb/src/common/transporter/SendBuffer.cpp @@ -60,7 +60,7 @@ SendBuffer::bufferSize() { } Uint32 -SendBuffer::bufferSizeRemaining() { +SendBuffer::bufferSizeRemaining() const { return (sizeOfBuffer - dataSize); } diff --git a/storage/ndb/src/common/transporter/SendBuffer.hpp b/storage/ndb/src/common/transporter/SendBuffer.hpp index 63a01f3de24..7ebeb6d890e 100644 --- a/storage/ndb/src/common/transporter/SendBuffer.hpp +++ b/storage/ndb/src/common/transporter/SendBuffer.hpp @@ -51,7 +51,7 @@ public: bool initBuffer(Uint32 aRemoteNodeId); // Number of bytes remaining in the buffer - Uint32 bufferSizeRemaining(); + Uint32 bufferSizeRemaining() const; // Number of bytes of data in the buffer int bufferSize(); diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.cpp b/storage/ndb/src/common/transporter/TCP_Transporter.cpp index fd71cf71cd9..5db12d3985c 100644 --- a/storage/ndb/src/common/transporter/TCP_Transporter.cpp +++ b/storage/ndb/src/common/transporter/TCP_Transporter.cpp @@ -253,6 +253,11 @@ TCP_Transporter::sendIsPossible(struct timeval * timeout) { #endif } +Uint32 +TCP_Transporter::get_free_buffer() const +{ + return m_sendBuffer.bufferSizeRemaining(); +} Uint32 * TCP_Transporter::getWritePtr(Uint32 lenBytes, Uint32 prio){ diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.hpp b/storage/ndb/src/common/transporter/TCP_Transporter.hpp index 9cd174150c1..df4149531b4 100644 --- a/storage/ndb/src/common/transporter/TCP_Transporter.hpp +++ b/storage/ndb/src/common/transporter/TCP_Transporter.hpp @@ -101,6 +101,7 @@ private: */ virtual void updateReceiveDataPtr(Uint32 bytesRead); + virtual Uint32 get_free_buffer() const; protected: /** * Setup client/server and perform connect/accept diff --git a/storage/ndb/src/common/transporter/Transporter.hpp b/storage/ndb/src/common/transporter/Transporter.hpp index 53414f1179d..8c5e96226a3 100644 --- a/storage/ndb/src/common/transporter/Transporter.hpp +++ b/storage/ndb/src/common/transporter/Transporter.hpp @@ -86,6 +86,8 @@ public: m_socket_client->set_port(port); }; + virtual Uint32 get_free_buffer() const = 0; + protected: Transporter(TransporterRegistry &, TransporterType, diff --git a/storage/ndb/src/common/transporter/TransporterRegistry.cpp b/storage/ndb/src/common/transporter/TransporterRegistry.cpp index f331b1660c1..5ffd3ac334e 100644 --- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp @@ -558,6 +558,18 @@ TransporterRegistry::removeTransporter(NodeId nodeId) { theTransporters[nodeId] = NULL; } +Uint32 +TransporterRegistry::get_free_buffer(Uint32 node) const +{ + Transporter *t; + if(likely((t = theTransporters[node]) != 0)) + { + return t->get_free_buffer(); + } + return 0; +} + + SendStatus TransporterRegistry::prepareSend(const SignalHeader * const signalHeader, Uint8 prio, diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index b2ed7acd347..03309f3ac67 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -5072,6 +5072,7 @@ Dbdih::invalidateNodeLCP(Signal* signal, Uint32 nodeId, TabRecordPtr tabPtr) * And reset nextLcp */ replicaPtr.p->nextLcp = 0; + replicaPtr.p->noCrashedReplicas = 0; }//if }//for }//for diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp index 6527864135b..6a478bea917 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp @@ -133,6 +133,9 @@ void Dbtup::sendReadAttrinfo(Signal* signal, Uint32 ToutBufIndex, const Operationrec * const regOperPtr) { + if(ToutBufIndex == 0) + return; + const BlockReference recBlockref = regOperPtr->recBlockref; const Uint32 sig0 = regOperPtr->tcOperationPtr; const Uint32 sig1 = regOperPtr->transid1; diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index 67bf09fab10..b1fe0735612 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -1668,7 +1668,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { false, ConfigInfo::CI_INT, "256K", - "16K", + "64K", STR_VALUE(MAX_INT_RNIL) }, { @@ -1856,7 +1856,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { false, ConfigInfo::CI_INT, "1M", - "4K", + "64K", STR_VALUE(MAX_INT_RNIL) }, { diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index baf5c7e5c83..6ecaf54b888 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -1605,6 +1605,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, bool haveAutoIncrement = false; Uint64 autoIncrementValue = 0; + Uint32 distKeys= 0; for(i = 0; i<sz; i++){ const NdbColumnImpl * col = impl.m_columns[i]; if(col == 0) @@ -1616,7 +1617,9 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, } haveAutoIncrement = true; autoIncrementValue = col->m_autoIncrementInitialValue; - } + } + if (col->m_distributionKey) + distKeys++; } // Check max length of frm data @@ -1649,7 +1652,10 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, abort(); } - int distKeys= impl.m_noOfDistributionKeys; + if (distKeys == impl.m_noOfKeys) + distKeys= 0; + impl.m_noOfDistributionKeys= distKeys; + for(i = 0; i<sz; i++){ const NdbColumnImpl * col = impl.m_columns[i]; if(col == 0) @@ -1661,7 +1667,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, tmpAttr.AttributeId = i; tmpAttr.AttributeKeyFlag = col->m_pk; tmpAttr.AttributeNullableFlag = col->m_nullable; - tmpAttr.AttributeDKey = col->m_distributionKey; + tmpAttr.AttributeDKey = distKeys ? col->m_distributionKey : 0; tmpAttr.AttributeExtType = (Uint32)col->m_type; tmpAttr.AttributeExtPrecision = ((unsigned)col->m_precision & 0xFFFF); diff --git a/storage/ndb/src/ndbapi/NdbOperationExec.cpp b/storage/ndb/src/ndbapi/NdbOperationExec.cpp index 4200300615d..58a816e3c1a 100644 --- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp @@ -104,8 +104,9 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) { Uint32 tTransId1, tTransId2; Uint32 tReqInfo; - Uint32 tInterpretInd = theInterpretIndicator; - + Uint8 tInterpretInd = theInterpretIndicator; + Uint8 tDirtyIndicator = theDirtyIndicator; + Uint32 tTotalCurrAI_Len = theTotalCurrAI_Len; theErrorLine = 0; if (tInterpretInd != 1) { @@ -123,7 +124,13 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) if (tStatus != GetValue) { setErrorCodeAbort(4116); return -1; - }//if + } + else if(unlikely(tDirtyIndicator && tTotalCurrAI_Len == 0)) + { + getValue(NdbDictionary::Column::FRAGMENT); + tTotalCurrAI_Len = theTotalCurrAI_Len; + assert(theTotalCurrAI_Len); + } } else { setErrorCodeAbort(4005); return -1; @@ -132,6 +139,7 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) if (prepareSendInterpreted() == -1) { return -1; }//if + tTotalCurrAI_Len = theTotalCurrAI_Len; }//if //------------------------------------------------------------- @@ -140,7 +148,6 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) //------------------------------------------------------------- TcKeyReq * const tcKeyReq = CAST_PTR(TcKeyReq, theTCREQ->getDataPtrSend()); - Uint32 tTotalCurrAI_Len = theTotalCurrAI_Len; Uint32 tTableId = m_currentTable->m_tableId; Uint32 tSchemaVersion = m_currentTable->m_version; @@ -188,7 +195,6 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) tcKeyReq->setStartFlag(tReqInfo, tStartIndicator); tcKeyReq->setInterpretedFlag(tReqInfo, tInterpretIndicator); - Uint8 tDirtyIndicator = theDirtyIndicator; OperationType tOperationType = theOperationType; Uint32 tTupKeyLen = theTupKeyLen; Uint8 abortOption = diff --git a/storage/ndb/test/ndbapi/testNdbApi.cpp b/storage/ndb/test/ndbapi/testNdbApi.cpp index 4867ea11a9a..ad1b1462ae7 100644 --- a/storage/ndb/test/ndbapi/testNdbApi.cpp +++ b/storage/ndb/test/ndbapi/testNdbApi.cpp @@ -866,6 +866,112 @@ int runUpdateWithoutKeys(NDBT_Context* ctx, NDBT_Step* step){ return result; } + +int runReadWithoutGetValue(NDBT_Context* ctx, NDBT_Step* step){ + int result = NDBT_OK; + const NdbDictionary::Table* pTab = ctx->getTab(); + + HugoOperations hugoOps(*pTab); + + Ndb* pNdb = GETNDB(step); + Uint32 lm; + + for(Uint32 cm= 0; cm < 2; cm++) + { + for(lm= 0; lm <= NdbOperation::LM_CommittedRead; lm++) + { + NdbConnection* pCon = pNdb->startTransaction(); + if (pCon == NULL){ + pNdb->closeTransaction(pCon); + return NDBT_FAILED; + } + + NdbOperation* pOp = pCon->getNdbOperation(pTab->getName()); + if (pOp == NULL){ + ERR(pCon->getNdbError()); + pNdb->closeTransaction(pCon); + return NDBT_FAILED; + } + + if (pOp->readTuple((NdbOperation::LockMode)lm) != 0){ + pNdb->closeTransaction(pCon); + ERR(pOp->getNdbError()); + return NDBT_FAILED; + } + + for(int a = 0; a<pTab->getNoOfColumns(); a++){ + if (pTab->getColumn(a)->getPrimaryKey() == true){ + if(hugoOps.equalForAttr(pOp, a, 1) != 0){ + ERR(pCon->getNdbError()); + pNdb->closeTransaction(pCon); + return NDBT_FAILED; + } + } + } + + // Dont' call any getValues + + // Execute should work + int check = pCon->execute(cm == 0 ? NoCommit : Commit); + if (check == 0){ + ndbout << "execute worked" << endl; + } else { + ERR(pCon->getNdbError()); + result = NDBT_FAILED; + } + + pNdb->closeTransaction(pCon); + } + } + + /** + * Now test scans + */ + for(lm= 0; lm <= NdbOperation::LM_CommittedRead; lm++) + { + NdbConnection* pCon = pNdb->startTransaction(); + if (pCon == NULL){ + pNdb->closeTransaction(pCon); + return NDBT_FAILED; + } + + NdbScanOperation* pOp = pCon->getNdbScanOperation(pTab->getName()); + if (pOp == NULL){ + ERR(pCon->getNdbError()); + pNdb->closeTransaction(pCon); + return NDBT_FAILED; + } + + if ((pOp->readTuples((NdbOperation::LockMode)lm)) != 0){ + pNdb->closeTransaction(pCon); + ERR(pOp->getNdbError()); + return NDBT_FAILED; + } + + + // Dont' call any getValues + + // Execute should work + int check = pCon->execute(NoCommit); + if (check == 0){ + ndbout << "execute worked" << endl; + } else { + ERR(pCon->getNdbError()); + result = NDBT_FAILED; + } + + int res; + while((res = pOp->nextResult()) == 0); + pNdb->closeTransaction(pCon); + + if(res != 1) + result = NDBT_FAILED; + } + + return result; +} + + int runCheckGetNdbErrorOperation(NDBT_Context* ctx, NDBT_Step* step){ int result = NDBT_OK; const NdbDictionary::Table* pTab = ctx->getTab(); @@ -1000,6 +1106,12 @@ TESTCASE("NdbErrorOperation", "Test that NdbErrorOperation is properly set"){ INITIALIZER(runCheckGetNdbErrorOperation); } +TESTCASE("ReadWithoutGetValue", + "Test that it's possible to perform read wo/ getvalue's\n"){ + INITIALIZER(runLoadTable); + INITIALIZER(runReadWithoutGetValue); + FINALIZER(runClearTable); +} NDBT_TESTSUITE_END(testNdbApi); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt index b2d809ef6be..e3d7501e6f7 100644 --- a/storage/ndb/test/run-test/daily-basic-tests.txt +++ b/storage/ndb/test/run-test/daily-basic-tests.txt @@ -512,6 +512,10 @@ max-time: 500 cmd: testNdbApi args: -n UpdateWithoutValues T6 +max-time: 500 +cmd: testNdbApi +args: -n ReadWithoutGetValue + #max-time: 500 #cmd: testInterpreter #args: T1 diff --git a/storage/ndb/test/run-test/ndb-autotest.sh b/storage/ndb/test/run-test/ndb-autotest.sh index f1c83f079cd..3ba4d1928d5 100755 --- a/storage/ndb/test/run-test/ndb-autotest.sh +++ b/storage/ndb/test/run-test/ndb-autotest.sh @@ -1,10 +1,20 @@ #!/bin/sh +############################################################# +# This script created by Jonas does the following # +# Cleans up clones and pevious builds, pulls new clones, # +# builds, deploys, configures the tests and launches ATRT # +############################################################# + +############### +#Script setup # +############## save_args=$* VERSION="ndb-autotest.sh version 1.04" DATE=`date '+%Y-%m-%d'` -export DATE +HOST=`hostname -s` +export DATE HOST set -e ulimit -Sc unlimited @@ -14,21 +24,33 @@ echo "`date` starting: $*" RSYNC_RSH=ssh export RSYNC_RSH +verbose=0 do_clone=yes build=yes deploy=yes +run_test=yes +config=yes +report=yes clone=5.0-ndb RUN="daily-basic daily-devel" conf=autotest.conf +############################ +# Read command line entries# +############################ + while [ "$1" ] do case "$1" in --no-clone) do_clone="";; --no-build) build="";; --no-deploy) deploy="";; - --clone=*) clone=`echo $1 | sed s/--clone=//`;; + --no-test) run_test="";; + --no-config) config="";; + --no-report) report="";; + --verbose) verbose=`expr $verbose + 1`;; + --clone=*) clone=`echo $1 | sed s/--clone=//`;; --conf=*) conf=`echo $1 | sed s/--conf=//`;; --version) echo $VERSION; exit;; *) RUN=$*;; @@ -36,6 +58,12 @@ do shift done +################################# +#Make sure the configfile exists# +#if it does not exit. if it does# +# (.) load it # +################################# + if [ -f $conf ] then . $conf @@ -44,51 +72,119 @@ else exit fi -env +############################### +# Validate that all interesting +# variables where set in conf +############################### +vars="target base_dir src_clone_base install_dir build_dir hosts configure" +if [ "$report" ] +then + vars="$vars result_host result_path" +fi +for i in $vars +do + t=`echo echo \\$$i` + if [ -z `eval $t` ] + then + echo "Invalid config: $conf, variable $i is not set" + exit + fi +done + +############################### +#Print out the enviroment vars# +############################### + +if [ $verbose -gt 0 ] +then + env +fi + +#################################### +# Setup the lock file name and path# +# Setup the clone source location # +#################################### LOCK=$HOME/.autotest-lock src_clone=$src_clone_base-$clone +####################################### +# Check to see if the lock file exists# +# If it does exit. # +####################################### + if [ -f $LOCK ] then echo "Lock file exists: $LOCK" exit 1 fi +####################################### +# If the lock file does not exist then# +# create it with date and run info # +####################################### + echo "$DATE $RUN" > $LOCK + +############################# +#If any errors here down, we# +# trap them, and remove the # +# Lock file before exit # +############################# + trap "rm -f $LOCK" ERR +# You can add more to this path# +################################ + dst_place=${build_dir}/clone-mysql-$clone-$DATE +######################################### +# Delete source and pull down the latest# +######################################### + if [ "$do_clone" ] then rm -rf $dst_place bk clone $src_clone $dst_place fi +########################################## +# Build the source, make installs, and # +# create the database to be rsynced # +########################################## + if [ "$build" ] then cd $dst_place - rm -rf $run_dir/* - aclocal; autoheader; autoconf; automake - if [ -d storage ] + rm -rf $install_dir/* + if [ -x BUILD/autorun.sh ] then - (cd storage/innobase; aclocal; autoheader; autoconf; automake) - (cd storage/bdb/dist; sh s_all) + ./BUILD/autorun.sh else - (cd innobase; aclocal; autoheader; autoconf; automake) - (cd bdb/dist; sh s_all) + aclocal; autoheader; autoconf; automake + if [ -d storage ] + then + (cd storage/innobase; aclocal; autoheader; autoconf; automake) + (cd storage/bdb/dist; sh s_all) + else + (cd innobase; aclocal; autoheader; autoconf; automake) + (cd bdb/dist; sh s_all) + fi fi - eval $configure --prefix=$run_dir + eval $configure --prefix=$install_dir make make install - (cd $run_dir; ./bin/mysql_install_db) + (cd $install_dir; ./bin/mysql_install_db) # This will be rsynced to all fi -### -# check script version -# -script=$run_dir/mysql-test/ndb/ndb-autotest.sh +################################ +# check script version. If the # +# version is old, replace it # +# and restart # +################################ + +script=$install_dir/mysql-test/ndb/ndb-autotest.sh if [ -x $script ] then $script --version > /tmp/version.$$ @@ -100,21 +196,34 @@ rm -f /tmp/version.$$ if [ $match -eq 0 ] then echo "Incorrect script version...restarting" - cp $run_dir/mysql-test/ndb/ndb-autotest.sh /tmp/at.$$.sh - rm -rf $run_dir $dst_place + cp $install_dir/mysql-test/ndb/ndb-autotest.sh /tmp/at.$$.sh + rm -rf $install_dir $dst_place sh /tmp/at.$$.sh $save_args exit fi -# Check that all interesting files are present -test_dir=$run_dir/mysql-test/ndb +############################################### +# Check that all interesting files are present# +############################################### + +test_dir=$install_dir/mysql-test/ndb atrt=$test_dir/atrt html=$test_dir/make-html-reports.sh -mkconfig=$run_dir/mysql-test/ndb/make-config.sh +mkconfig=$install_dir/mysql-test/ndb/make-config.sh -PATH=$run_dir/bin:$test_dir:$PATH +########################## +#Setup bin and test paths# +########################## + +PATH=$install_dir/bin:$test_dir:$PATH export PATH +########################### +# This will filter out all# +# the host that did not # +# respond. Called below # +########################### + filter(){ neg=$1 shift @@ -125,18 +234,22 @@ filter(){ done } -### -# check ndb_cpcc fail hosts -# +############################ +# check ndb_cpcc fail hosts# +############################ ndb_cpcc $hosts | awk '{ if($1=="Failed"){ print;}}' > /tmp/failed.$DATE filter /tmp/failed.$DATE $hosts > /tmp/hosts.$DATE hosts=`cat /tmp/hosts.$DATE` +############################# +# Push bin and test to hosts# +############################# + if [ "$deploy" ] then for i in $hosts - do - rsync -a --delete --force --ignore-errors $run_dir/ $i:$run_dir + do + rsync -a --delete --force --ignore-errors $install_dir/ $i:$install_dir ok=$? if [ $ok -ne 0 ] then @@ -145,7 +258,6 @@ then fi done fi -rm -f /tmp/build.$DATE.tgz ### # handle scp failed hosts @@ -154,9 +266,11 @@ filter /tmp/failed.$DATE $hosts > /tmp/hosts.$DATE hosts=`cat /tmp/hosts.$DATE` cat /tmp/failed.$DATE > /tmp/filter_hosts.$$ -### -# functions for running atrt -# +############################# +# Function for replacing the# +# choose host with real host# +# names. Note $$ = PID # +############################# choose(){ SRC=$1 TMP1=/tmp/choose.$$ @@ -177,16 +291,25 @@ choose(){ } choose_conf(){ - host=`hostname -s` - if [ -f $test_dir/conf-$1-$host.txt ] - then + if [ -f $test_dir/conf-$1-$HOST.txt ] + then + echo "$test_dir/conf-$1-$HOST.txt" echo "$test_dir/conf-$1-$host.txt" elif [ -f $test_dir/conf-$1.txt ] then echo "$test_dir/conf-$1.txt" + else + echo "Unable to find conf file looked for" 1>&2 + echo "$testdir/conf-$1-host.txt and" 1>&2 + echo "$testdir/conf-$1.txt" 1>&2 + exit fi } - +###################################### +# Starts ATRT and gives it the right # +# command line options. after it # +# Gathers results and moves them # +###################################### start(){ rm -rf report.txt result* log.txt $atrt -v -v -r -R --log-file=log.txt --testcase-file=$test_dir/$2-tests.txt & @@ -202,17 +325,31 @@ start(){ cd .. p2=`pwd` cd .. - tar cfz /tmp/res.$$.tgz `basename $p2`/$DATE - scp /tmp/res.$$.tgz $result_host:$result_path/res.$DATE.`hostname -s`.$2.$$.tgz - rm -f /tmp/res.$$.tgz + if [ "$report" ] + then + tar cfz /tmp/res.$2.$$.tgz `basename $p2`/$DATE + scp /tmp/res.$2.$$.tgz \ + $result_host:$result_path/res.$DATE.$HOST.$2.$$.tgz + rm -f /tmp/res.$2.$$.tgz + fi } +######################################### +# Count how many computers we have ready# +######################################### + count_hosts(){ - cnt=`grep "CHOOSE_host" $1 | - awk '{for(i=1; i<=NF;i++) if(match($i, "CHOOSE_host") > 0) print $i;}' | - sort | uniq | wc -l` + cnt=`grep "CHOOSE_host" $1 | awk '{for(i=1; i<=NF;i++) \ + if(match($i, "CHOOSE_host") > 0) print $i;}' | sort | uniq | wc -l` echo $cnt } +####################################################### +# Calls: Choose # +# Choose_host # +# Count_host # +# start # +# for each directory in the $RUN variable # +####################################################### p=`pwd` for dir in $RUN @@ -223,26 +360,36 @@ do res_dir=$base_dir/result-$dir-mysql-$clone-$target/$DATE mkdir -p $run_dir $res_dir - rm -rf $res_dir/* $run_dir/* - - conf=`choose_conf $dir` - count=`count_hosts $conf` - avail_hosts=`filter /tmp/filter_hosts.$$ $hosts` - avail=`echo $avail_hosts | wc -w` - if [ $count -gt $avail ] + rm -rf $res_dir/* + cd $run_dir + + if [ "$config" ] then + rm -rf $run_dir/* + + conf=`choose_conf $dir` + count=`count_hosts $conf` + avail_hosts=`filter /tmp/filter_hosts.$$ $hosts` + avail=`echo $avail_hosts | wc -w` + if [ $count -gt $avail ] + then echo "Not enough hosts" echo "Needs: $count available: $avail ($avail_hosts)" break; - fi + fi - run_hosts=`echo $avail_hosts|awk '{for(i=1;i<='$count';i++)print $i;}'` - echo $run_hosts >> /tmp/filter_hosts.$$ + run_hosts=`echo $avail_hosts| \ + awk '{for(i=1;i<='$count';i++)print $i;}'` + echo $run_hosts >> /tmp/filter_hosts.$$ - cd $run_dir - choose $conf $run_hosts > d.tmp - $mkconfig d.tmp - start $dir-mysql-$clone-$target $dir $res_dir & + choose $conf $run_hosts > d.tmp + $mkconfig d.tmp + fi + + if [ "$run_test" ] + then + start $dir-mysql-$clone-$target $dir $res_dir & + fi done cd $p rm /tmp/filter_hosts.$$ diff --git a/storage/ndb/test/tools/hugoLoad.cpp b/storage/ndb/test/tools/hugoLoad.cpp index 7d9d0dafaff..1a229169650 100644 --- a/storage/ndb/test/tools/hugoLoad.cpp +++ b/storage/ndb/test/tools/hugoLoad.cpp @@ -30,10 +30,12 @@ int main(int argc, const char** argv){ const char* _tabname = NULL; int _help = 0; int _batch = 512; - + const char* db = 0; + struct getargs args[] = { { "records", 'r', arg_integer, &_records, "Number of records", "recs" }, { "batch", 'b', arg_integer, &_batch, "Number of operations in each transaction", "batch" }, + { "database", 'd', arg_string, &db, "Database", "" }, { "usage", '?', arg_flag, &_help, "Print help", "" } }; int num_args = sizeof(args) / sizeof(args[0]); @@ -59,7 +61,7 @@ int main(int argc, const char** argv){ { return NDBT_ProgramExit(NDBT_FAILED); } - Ndb MyNdb(&con, "TEST_DB" ); + Ndb MyNdb( &con, db ? db : "TEST_DB" ); if(MyNdb.init() != 0){ ERR(MyNdb.getNdbError()); diff --git a/storage/ndb/test/tools/hugoPkUpdate.cpp b/storage/ndb/test/tools/hugoPkUpdate.cpp index 6e7ff39f903..7d46ae95c29 100644 --- a/storage/ndb/test/tools/hugoPkUpdate.cpp +++ b/storage/ndb/test/tools/hugoPkUpdate.cpp @@ -33,7 +33,7 @@ int main(int argc, const char** argv){ int _loops = 1; int _abort = 0; int _batch = 0; - const char* _tabname = NULL; + const char* _tabname = NULL, *db = 0; int _help = 0; struct getargs args[] = { @@ -41,7 +41,8 @@ int main(int argc, const char** argv){ { "loops", 'l', arg_integer, &_loops, "number of times to run this program(0=infinite loop)", "loops" }, // { "batch", 'b', arg_integer, &_batch, "batch value", "batch" }, { "records", 'r', arg_integer, &_records, "Number of records", "records" }, - { "usage", '?', arg_flag, &_help, "Print help", "" } + { "usage", '?', arg_flag, &_help, "Print help", "" }, + { "database", 'd', arg_string, &db, "Database", "" } }; int num_args = sizeof(args) / sizeof(args[0]); int optind = 0; @@ -62,7 +63,7 @@ int main(int argc, const char** argv){ { return NDBT_ProgramExit(NDBT_FAILED); } - Ndb MyNdb(&con, "TEST_DB" ); + Ndb MyNdb( &con, db ? db : "TEST_DB" ); if(MyNdb.init() != 0){ ERR(MyNdb.getNdbError()); diff --git a/storage/ndb/test/tools/hugoScanRead.cpp b/storage/ndb/test/tools/hugoScanRead.cpp index 4f76362ecab..a345bb88d0e 100644 --- a/storage/ndb/test/tools/hugoScanRead.cpp +++ b/storage/ndb/test/tools/hugoScanRead.cpp @@ -33,7 +33,7 @@ int main(int argc, const char** argv){ int _loops = 1; int _abort = 0; int _parallelism = 1; - const char* _tabname = NULL; + const char* _tabname = NULL, *db = 0; int _help = 0; int lock = NdbOperation::LM_Read; int sorted = 0; @@ -45,7 +45,8 @@ int main(int argc, const char** argv){ { "records", 'r', arg_integer, &_records, "Number of records", "recs" }, { "usage", '?', arg_flag, &_help, "Print help", "" }, { "lock", 'm', arg_integer, &lock, "lock mode", "" }, - { "sorted", 's', arg_flag, &sorted, "sorted", "" } + { "sorted", 's', arg_flag, &sorted, "sorted", "" }, + { "database", 'd', arg_string, &db, "Database", "" } }; int num_args = sizeof(args) / sizeof(args[0]); int optind = 0; @@ -66,7 +67,7 @@ int main(int argc, const char** argv){ { return NDBT_ProgramExit(NDBT_FAILED); } - Ndb MyNdb(&con, "TEST_DB" ); + Ndb MyNdb( &con, db ? db : "TEST_DB" ); if(MyNdb.init() != 0){ ERR(MyNdb.getNdbError()); diff --git a/storage/ndb/test/tools/hugoScanUpdate.cpp b/storage/ndb/test/tools/hugoScanUpdate.cpp index 88c343f8fd3..6960fa44b96 100644 --- a/storage/ndb/test/tools/hugoScanUpdate.cpp +++ b/storage/ndb/test/tools/hugoScanUpdate.cpp @@ -33,7 +33,7 @@ int main(int argc, const char** argv){ int _loops = 1; int _parallelism = 1; int _ver2 = 0; - const char* _tabname = NULL; + const char* _tabname = NULL, *db = 0; int _help = 0; struct getargs args[] = { @@ -42,7 +42,8 @@ int main(int argc, const char** argv){ { "records", 'r', arg_integer, &_records, "Number of records", "recs" }, { "ver2", '2', arg_flag, &_ver2, "Use version 2 of scanUpdateRecords", "" }, { "ver2", '1', arg_negative_flag, &_ver2, "Use version 1 of scanUpdateRecords (default)", "" }, - { "usage", '?', arg_flag, &_help, "Print help", "" } + { "usage", '?', arg_flag, &_help, "Print help", "" }, + { "database", 'd', arg_string, &db, "Database", "" } }; int num_args = sizeof(args) / sizeof(args[0]); int optind = 0; @@ -63,7 +64,7 @@ int main(int argc, const char** argv){ { return NDBT_ProgramExit(NDBT_FAILED); } - Ndb MyNdb(&con, "TEST_DB" ); + Ndb MyNdb( &con, db ? db : "TEST_DB" ); if(MyNdb.init() != 0){ ERR(MyNdb.getNdbError()); @@ -100,6 +101,7 @@ int main(int argc, const char** argv){ return NDBT_ProgramExit(NDBT_FAILED); } i++; + //NdbSleep_MilliSleep(300); } return NDBT_ProgramExit(NDBT_OK); diff --git a/storage/ndb/tools/Makefile.am b/storage/ndb/tools/Makefile.am index 958f28bae02..db6037df037 100644 --- a/storage/ndb/tools/Makefile.am +++ b/storage/ndb/tools/Makefile.am @@ -30,7 +30,8 @@ ndb_restore_SOURCES = restore/restore_main.cpp \ restore/consumer.cpp \ restore/consumer_restore.cpp \ restore/consumer_printer.cpp \ - restore/Restore.cpp + restore/Restore.cpp \ + ../test/src/NDBT_ResultRow.cpp $(tools_common_sources) include $(top_srcdir)/storage/ndb/config/common.mk.am include $(top_srcdir)/storage/ndb/config/type_ndbapitools.mk.am diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp index fa616ee8fee..81a8d4aba41 100644 --- a/storage/ndb/tools/restore/Restore.cpp +++ b/storage/ndb/tools/restore/Restore.cpp @@ -925,19 +925,12 @@ operator<<(NdbOut& ndbout, const LogEntry& logE) return ndbout; } +#include <NDBT.hpp> NdbOut & operator<<(NdbOut& ndbout, const TableS & table){ - ndbout << endl << "Table: " << table.getTableName() << endl; - for (int j = 0; j < table.getNoOfAttributes(); j++) - { - const AttributeDesc * desc = table[j]; - ndbout << desc->m_column->getName() << ": " - << (Uint32) desc->m_column->getType(); - ndbout << " key: " << (Uint32) desc->m_column->getPrimaryKey(); - ndbout << " array: " << desc->arraySize; - ndbout << " size: " << desc->size << endl; - } // for + + ndbout << (* (NDBT_Table*)table.m_dictTable) << endl; return ndbout; } diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index 7b7c0c258d9..e1d1a2209ce 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -4,8 +4,10 @@ %else %define release 0.glibc23 %endif +%define license GPL %define mysqld_user mysql %define server_suffix -standard +%define mysqldatadir /var/lib/mysql # We don't package all files installed into the build root by intention - # See BUG#998 for details. @@ -16,11 +18,9 @@ Name: MySQL Summary: MySQL: a very fast and reliable SQL database server Group: Applications/Databases -Summary(pt_BR): MySQL: Um servidor SQL rápido e confiável. -Group(pt_BR): Aplicações/Banco_de_Dados Version: @MYSQL_NO_DASH_VERSION@ Release: %{release} -License: GPL +License: %{license} Source: http://www.mysql.com/Downloads/MySQL-@MYSQL_BASE_VERSION@/mysql-%{mysql_version}.tar.gz URL: http://www.mysql.com/ Packager: Lenz Grimmer <build@mysql.com> @@ -54,11 +54,8 @@ news and information about the MySQL software. Also please see the documentation and the manual for more information. %package server -Release: %{release} Summary: MySQL: a very fast and reliable SQL database server Group: Applications/Databases -Summary(pt_BR): MySQL: Um servidor SQL rápido e confiável. -Group(pt_BR): Aplicações/Banco_de_Dados Requires: fileutils sh-utils Provides: msqlormysql mysql-server mysql MySQL Obsoletes: MySQL mysql mysql-server @@ -88,11 +85,8 @@ If you want to access and work with the database, you have to install package "MySQL-client" as well! %package client -Release: %{release} Summary: MySQL - Client Group: Applications/Databases -Summary(pt_BR): MySQL - Cliente -Group(pt_BR): Aplicações/Banco_de_Dados Obsoletes: mysql-client Provides: mysql-client @@ -101,11 +95,7 @@ This package contains the standard MySQL clients and administration tools. %{see_base} -%description client -l pt_BR -Este pacote contém os clientes padrão para o MySQL. - %package ndb-storage -Release: %{release} Summary: MySQL - ndbcluster storage engine Group: Applications/Databases @@ -119,7 +109,6 @@ with the MySQL Max server. %{see_base} %package ndb-management -Release: %{release} Summary: MySQL - ndbcluster storage engine management Group: Applications/Databases @@ -131,7 +120,6 @@ one computer in the cluster. %{see_base} %package ndb-tools -Release: %{release} Summary: MySQL - ndbcluster storage engine basic tools Group: Applications/Databases @@ -141,7 +129,6 @@ This package contains ndbcluster storage engine basic tools. %{see_base} %package ndb-extra -Release: %{release} Summary: MySQL - ndbcluster storage engine extra tools Group: Applications/Databases @@ -152,12 +139,9 @@ They should be used with caution. %{see_base} %package bench -Release: %{release} Requires: %{name}-client perl-DBI perl Summary: MySQL - Benchmarks and test system Group: Applications/Databases -Summary(pt_BR): MySQL - Medições de desempenho -Group(pt_BR): Aplicações/Banco_de_Dados Provides: mysql-bench Obsoletes: mysql-bench @@ -166,15 +150,9 @@ This package contains MySQL benchmark scripts and data. %{see_base} -%description bench -l pt_BR -Este pacote contém medições de desempenho de scripts e dados do MySQL. - %package devel -Release: %{release} Summary: MySQL - Development header files and libraries Group: Applications/Databases -Summary(pt_BR): MySQL - Medições de desempenho -Group(pt_BR): Aplicações/Banco_de_Dados Provides: mysql-devel Obsoletes: mysql-devel @@ -184,12 +162,7 @@ necessary to develop MySQL client applications. %{see_base} -%description devel -l pt_BR -Este pacote contém os arquivos de cabeçalho (header files) e bibliotecas -necessárias para desenvolver aplicações clientes do MySQL. - %package shared -Release: %{release} Summary: MySQL - Shared libraries Group: Applications/Databases @@ -198,7 +171,6 @@ This package contains the shared libraries (*.so*) which certain languages and applications need to dynamically load and use MySQL. %package Max -Release: %{release} Summary: MySQL - server with extended functionality Group: Applications/Databases Provides: mysql-Max @@ -222,12 +194,9 @@ the standard MySQL package. Please note that this is a dynamically linked binary! %package embedded -Release: %{release} Requires: %{name}-devel Summary: MySQL - embedded library Group: Applications/Databases -Summary(pt_BR): MySQL - Medições de desempenho -Group(pt_BR): Aplicações/Banco_de_Dados Obsoletes: mysql-embedded %description embedded @@ -271,7 +240,7 @@ sh -c "PATH=\"${MYSQL_BUILD_PATH:-$PATH}\" \ --libdir=%{_libdir} \ --sysconfdir=%{_sysconfdir} \ --datadir=%{_datadir} \ - --localstatedir=/var/lib/mysql \ + --localstatedir=%{mysqldatadir} \ --infodir=%{_infodir} \ --includedir=%{_includedir} \ --mandir=%{_mandir} \ @@ -311,7 +280,7 @@ mkdir -p $RBR%{_libdir}/mysql PATH=${MYSQL_BUILD_PATH:-/bin:/usr/bin} export PATH -# Build the 4.0 Max binary (includes BDB and UDFs and therefore +# Build the Max binary (includes BDB and UDFs and therefore # cannot be linked statically against the patched glibc) # Use gcc for C and C++ code (to avoid a dependency on libstdc++ and @@ -336,8 +305,7 @@ BuildMySQL "--enable-shared \ --with-comment=\"MySQL Community Edition - Max (GPL)\" \ --with-server-suffix='-Max'" -# Save everything for debug -# tar cf $RBR/all.tar . +make test # Save mysqld-max mv sql/mysqld sql/mysqld-max @@ -387,13 +355,15 @@ BuildMySQL "--disable-shared \ --without-openssl" nm --numeric-sort sql/mysqld > sql/mysqld.sym +make test + %install RBR=$RPM_BUILD_ROOT MBD=$RPM_BUILD_DIR/mysql-%{mysql_version} # Ensure that needed directories exists install -d $RBR%{_sysconfdir}/{logrotate.d,init.d} -install -d $RBR/var/lib/mysql/mysql +install -d $RBR%{mysqldatadir}/mysql install -d $RBR%{_datadir}/{sql-bench,mysql-test} install -d $RBR%{_includedir} install -d $RBR%{_libdir} @@ -447,7 +417,7 @@ then fi %post server -mysql_datadir=/var/lib/mysql +mysql_datadir=%{mysqldatadir} # Create data directory if needed if test ! -d $mysql_datadir; then mkdir -m755 $mysql_datadir; fi @@ -467,17 +437,17 @@ fi # Create a MySQL user. Do not report any problems if it already # exists. This is redhat specific and should be handled more portable -useradd -M -r -d $mysql_datadir -s /bin/bash -c "MySQL server" mysql 2> /dev/null || true +useradd -M -r -d $mysql_datadir -s /bin/bash -c "MySQL server" %{mysqld_user} 2> /dev/null || true # Change permissions so that the user that will run the MySQL daemon # owns all database files. -chown -R mysql $mysql_datadir +chown -R %{mysqld_user} $mysql_datadir # Initiate databases -mysql_install_db --rpm --user=mysql +%{_bindir}/mysql_install_db --rpm --user=%{mysqld_user} # Change permissions again to fix any new files. -chown -R mysql $mysql_datadir +chown -R %{mysqld_user} $mysql_datadir # Fix permissions for the permission database so that only the user # can read them. @@ -671,6 +641,7 @@ fi %defattr(-, root, root, 0755) %attr(-, root, root) %{_datadir}/sql-bench %attr(-, root, root) %{_datadir}/mysql-test +%attr(755, rott, root) %{_bindir}/mysql_client_test %attr(755, root, root) %{_bindir}/mysqltestmanager %attr(755, root, root) %{_bindir}/mysqltestmanager-pwgen %attr(755, root, root) %{_bindir}/mysqltestmanagerc @@ -688,6 +659,17 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Mon Jun 06 2005 Lenz Grimmer <lenz@mysql.com> + +- added mysql_client_test to the "bench" subpackage (BUG 10676) + +* Wed Jun 01 2005 Lenz Grimmer <lenz@mysql.com> + +- use "mysqldatadir" variable instead of hard-coding the path multiple times +- use the "mysqld_user" variable on all occasions a user name is referenced +- removed (incomplete) Brazilian translations +- removed redundant release tags from the subpackage descriptions + * Wed May 25 2005 Joerg Bruehe <joerg@mysql.com> - Added a "make clean" between separate calls to "BuildMySQL". |