fix bug of dumping too many files to disk and not

being able to merge, and corrupting RdbBase::m_files[]
array and associated arrays.
This commit is contained in:
Matt Wells
2015-11-17 09:52:41 -08:00
committed by Brian Rasmusson
parent f10589ed4e
commit 185dc25631
3 changed files with 19 additions and 5 deletions

14
Rdb.cpp

@ -1521,6 +1521,20 @@ bool Rdb::dumpCollLoop ( ) {
"available secondary id for titledb: %s." ,
mstrerror(g_errno) );
}
// if we add to many files then we can not merge, because merge op
// needs to add a file and it calls addNewFile() too
static int32_t s_flag = 0;
if ( base->m_numFiles + 1 >= MAX_RDB_FILES ) {
if ( s_flag < 10 )
log("db: could not dump tree to disk for cn="
"%i %s because it has %"INT32" files on disk. "
"Need to wait for merge operation.",
(int)m_dumpCollnum,m_dbname,base->m_numFiles);
s_flag++;
goto loop;
}
// this file must not exist already, we are dumping the tree into it
m_fn = base->addNewFile ( id2 ) ;
if ( m_fn < 0 ) return log(LOG_LOGIC,"db: rdb: Failed to add new file "

@ -1833,7 +1833,7 @@ void RdbBase::gotTokenForMerge ( ) {
// sanity check
if ( n <= 1 ) {
log(LOG_LOGIC,"merge: attemptMerge: Resuming. bad "
"engineer");
"engineer for %s coll=%s",m_dbname,m_coll);
//g_msg35.releaseToken();
return false;
}

@ -338,10 +338,10 @@ class RdbBase {
// . older files are listed first (lower fileIds)
// . filenames should include the directory (full filenames)
// . TODO: RdbMgr should control what rdb gets merged?
BigFile *m_files [ MAX_RDB_FILES ];
int32_t m_fileIds [ MAX_RDB_FILES ];
int32_t m_fileIds2 [ MAX_RDB_FILES ]; // for titledb/tfndb linking
RdbMap *m_maps [ MAX_RDB_FILES ];
BigFile *m_files [ MAX_RDB_FILES+1 ];
int32_t m_fileIds [ MAX_RDB_FILES+1 ];
int32_t m_fileIds2 [ MAX_RDB_FILES+1 ]; // for titledb/tfndb linking
RdbMap *m_maps [ MAX_RDB_FILES+1 ];
int32_t m_numFiles;
// this class contains a ptr to us