forked from Mirrors/privacore-open-source-search-engine
bring back max mem control into master controls.
it's useful to limit per process mem usage to prevent oom killer because we can't save if we get killed. overhaul diskpagecache to just use rdbcache. much simpler and faster, but disabled for now until debugged more. reduce min files to merge for crawlbot collections so they stay more tightly merged to conserve fds and mem. improved logDebugDisk msgs. overhauled File.cpp fd pool. now it is way faster and doesn't use any extra mem. much simpler too. although could be sped up a little by using a linked list, but probably is not significant enough to warrant doing right now. increase mem ptr table from 3M to 8M slots. should really make dynamic though. fix core from null msg20s[0]->m_r. only call attemptMergeAll once every 60 seconds really. do not attempt merge if already merging.
This commit is contained in:
BigFile.cppBigFile.hBlaster.cppCachedb.cppCollectiondb.cppConf.cppConf.hFile.cppFile.hHttpServer.cppIndexdb.cppLinkdb.cppMem.cppMem.hMonitordb.cppMsg40.cppOldDiskPageCache.cppOldDiskPageCache.hParms.cppPingServer.cppPosdb.cppRdb.cppRdbBase.cppRdbCache.hRdbDump.cppRdbMap.cppRdbScan.cppSpider.cppThreads.cppmain.cpp
126
BigFile.cpp
126
BigFile.cpp
@ -176,6 +176,8 @@ bool BigFile::doesPartExist ( int32_t n ) {
|
||||
return exists;
|
||||
}
|
||||
|
||||
static int64_t s_vfd = 0;
|
||||
|
||||
// . overide File::open so we can set m_numParts
|
||||
// . set maxFileSize when opening a new file for writing and using
|
||||
// DiskPageCache
|
||||
@ -192,10 +194,10 @@ bool BigFile::open ( int flags , class DiskPageCache *pc ,
|
||||
// . this returns our "virtual fd", not the same as File::m_vfd
|
||||
// . returns -1 and sets g_errno on failure
|
||||
// . we pass m_vfd to getPages() and addPages()
|
||||
if ( m_pc ) {
|
||||
if ( maxFileSize == -1 ) maxFileSize = getFileSize();
|
||||
m_vfd = m_pc->getVfd ( maxFileSize, m_vfdAllowed );
|
||||
g_errno = 0;
|
||||
if ( m_pc && m_vfd == -1 ) {
|
||||
//if ( maxFileSize == -1 ) maxFileSize = getFileSize();
|
||||
m_vfd = ++s_vfd;
|
||||
//g_errno = 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -221,7 +223,7 @@ void BigFile::makeFilename_r ( char *baseFilename ,
|
||||
|
||||
// . get the fd of the nth file
|
||||
// . will try to open the file if it hasn't yet been opened
|
||||
int BigFile::getfd ( int32_t n , bool forReading , int32_t *vfd ) {
|
||||
int BigFile::getfd ( int32_t n , bool forReading ) { // , int64_t *vfd ) {
|
||||
// boundary check
|
||||
if ( n >= MAX_PART_FILES )
|
||||
return log("disk: Part number %"INT32" > %"INT32". fd not available.",
|
||||
@ -237,14 +239,14 @@ int BigFile::getfd ( int32_t n , bool forReading , int32_t *vfd ) {
|
||||
f = m_files[n];
|
||||
}
|
||||
// open it if not opened
|
||||
if ( ! f->isOpen() ) {
|
||||
if ( ! f->calledOpen() ) {
|
||||
if ( ! f->open ( m_flags , m_permissions ) ) {
|
||||
log("disk: Failed to open file part #%"INT32".",n);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
// set it virtual fd, too
|
||||
if ( vfd ) *vfd = f->m_vfd;
|
||||
//if ( vfd ) *vfd = f->m_vfd;
|
||||
// get it's file descriptor
|
||||
int fd = f->getfd ( ) ;
|
||||
if ( fd >= -1 ) return fd;
|
||||
@ -406,29 +408,32 @@ bool BigFile::readwrite ( void *buf ,
|
||||
int32_t allocSize;
|
||||
// reset this
|
||||
fstate->m_errno = 0;
|
||||
fstate->m_inPageCache = false;
|
||||
// . try to get as much as we can from page cache first
|
||||
// . the vfd of the big file will be the vfd of its last File class
|
||||
if ( ! doWrite && m_pc && allowPageCache ) {
|
||||
int32_t oldOff = offset;
|
||||
//int32_t oldOff = offset;
|
||||
// we have to set these so RdbScan doesn't freak out if we
|
||||
// have it all cached and return without hitting disk
|
||||
fstate->m_bytesDone = size;
|
||||
fstate->m_bytesToGo = size;
|
||||
// sanity
|
||||
if ( m_vfd == -1 ) { char *xx=NULL;*xx=0; }
|
||||
//log("getting pages off=%"INT64" size=%"INT32"",offset,size);
|
||||
// now we pass in a ptr to the buf ptr, because if buf is NULL
|
||||
// this will allocate one for us if it has some pages in the
|
||||
// cache that we can use.
|
||||
m_pc->getPages (m_vfd,(char **)&buf,size,offset,&size,&offset,
|
||||
&allocBuf,&allocSize,allocOff);
|
||||
char *readBuf = m_pc->getPages ( m_vfd, offset, size );
|
||||
//log("got pages off=%"INT64" size=%"INT32"",offset,size);
|
||||
bufOff = offset - oldOff;
|
||||
//bufOff = offset - oldOff;
|
||||
// comment out for test
|
||||
if ( size == 0 ) {
|
||||
if ( readBuf ) {
|
||||
// let caller/RdbScan know about the newly alloc'd buf
|
||||
fstate->m_buf = (char *)buf;
|
||||
fstate->m_allocBuf = allocBuf;
|
||||
fstate->m_allocSize = allocSize;
|
||||
fstate->m_allocOff = allocOff;
|
||||
fstate->m_buf = (char *)readBuf;
|
||||
fstate->m_allocBuf = readBuf;
|
||||
fstate->m_allocSize = size;
|
||||
fstate->m_allocOff = 0;
|
||||
fstate->m_inPageCache = true;
|
||||
return true;
|
||||
}
|
||||
// check
|
||||
@ -494,8 +499,8 @@ bool BigFile::readwrite ( void *buf ,
|
||||
// &fstate->m_vfd2);
|
||||
fstate->m_fd1 = -3;
|
||||
fstate->m_fd2 = -3;
|
||||
fstate->m_vfd1 = -3;
|
||||
fstate->m_vfd2 = -3;
|
||||
// fstate->m_vfd1 = -3;
|
||||
// fstate->m_vfd2 = -3;
|
||||
// . if we are writing, prevent these fds from being closed on us
|
||||
// by File::closedLeastUsed(), because the fd could then be re-opened
|
||||
// by someone else doing a write and we end up writing to THAT FILE!
|
||||
@ -504,14 +509,12 @@ bool BigFile::readwrite ( void *buf ,
|
||||
if ( doWrite ) {
|
||||
// actually have to do the open here for writing so it
|
||||
// can prevent the fds from being closed on us
|
||||
fstate->m_fd1 = getfd ( fstate->m_filenum1 , !doWrite,
|
||||
&fstate->m_vfd1);
|
||||
fstate->m_fd2 = getfd ( fstate->m_filenum2 , !doWrite,
|
||||
&fstate->m_vfd2);
|
||||
fstate->m_fd1 = getfd ( fstate->m_filenum1 , !doWrite);
|
||||
fstate->m_fd2 = getfd ( fstate->m_filenum2 , !doWrite);
|
||||
//File *f1 = m_files [ fstate->m_filenum1 ];
|
||||
//File *f2 = m_files [ fstate->m_filenum2 ];
|
||||
enterWriteMode( fstate->m_vfd1 );
|
||||
enterWriteMode( fstate->m_vfd2 );
|
||||
enterWriteMode( fstate->m_fd1 );
|
||||
enterWriteMode( fstate->m_fd2 );
|
||||
fstate->m_closeCount1 = getCloseCount_r ( fstate->m_fd1 );
|
||||
fstate->m_closeCount2 = getCloseCount_r ( fstate->m_fd2 );
|
||||
}
|
||||
@ -603,10 +606,8 @@ bool BigFile::readwrite ( void *buf ,
|
||||
// come here if we haven't spawned a thread
|
||||
skipThread:
|
||||
// if there was no room in the thread queue, then we must do this here
|
||||
fstate->m_fd1 = getfd ( fstate->m_filenum1 , !doWrite ,
|
||||
&fstate->m_vfd1);
|
||||
fstate->m_fd2 = getfd ( fstate->m_filenum2 , !doWrite ,
|
||||
&fstate->m_vfd2);
|
||||
fstate->m_fd1 = getfd ( fstate->m_filenum1 , !doWrite );
|
||||
fstate->m_fd2 = getfd ( fstate->m_filenum2 , !doWrite );
|
||||
fstate->m_closeCount1 = getCloseCount_r ( fstate->m_fd1 );
|
||||
fstate->m_closeCount2 = getCloseCount_r ( fstate->m_fd2 );
|
||||
// clear g_errno from the failed thread spawn
|
||||
@ -720,8 +721,8 @@ bool BigFile::readwrite ( void *buf ,
|
||||
//File *f2 = m_files [ fstate->m_filenum2 ];
|
||||
//f1->exitWriteMode();
|
||||
//f2->exitWriteMode();
|
||||
exitWriteMode( fstate->m_vfd1 );
|
||||
exitWriteMode( fstate->m_vfd2 );
|
||||
exitWriteMode( fstate->m_fd1 );
|
||||
exitWriteMode( fstate->m_fd2 );
|
||||
}
|
||||
|
||||
// set this up here
|
||||
@ -765,9 +766,9 @@ bool BigFile::readwrite ( void *buf ,
|
||||
// store read/written pages into page cache
|
||||
if ( ! g_errno && fstate->m_pc )
|
||||
fstate->m_pc->addPages ( fstate->m_vfd ,
|
||||
fstate->m_buf ,
|
||||
fstate->m_bytesDone ,
|
||||
fstate->m_offset ,
|
||||
fstate->m_bytesDone ,
|
||||
fstate->m_buf ,
|
||||
fstate->m_niceness );
|
||||
// now log our stuff here
|
||||
if ( g_errno && g_errno != EBADENGINEER )
|
||||
@ -823,8 +824,8 @@ void doneWrapper ( void *state , ThreadEntry *t ) {
|
||||
//File *f2 = THIS->m_files [ fstate->m_filenum2 ];
|
||||
//f1->exitWriteMode();
|
||||
//f2->exitWriteMode();
|
||||
exitWriteMode( fstate->m_vfd1 );
|
||||
exitWriteMode( fstate->m_vfd2 );
|
||||
exitWriteMode( fstate->m_fd1 );
|
||||
exitWriteMode( fstate->m_fd2 );
|
||||
}
|
||||
// if it read less than 8MB/s bitch
|
||||
int64_t took = fstate->m_doneTime - fstate->m_startTime;
|
||||
@ -849,9 +850,9 @@ void doneWrapper ( void *state , ThreadEntry *t ) {
|
||||
// reference it...
|
||||
if ( ! g_errno && fstate->m_pc )
|
||||
fstate->m_pc->addPages ( fstate->m_vfd ,
|
||||
fstate->m_buf ,
|
||||
fstate->m_bytesDone ,
|
||||
fstate->m_offset ,
|
||||
fstate->m_bytesDone ,
|
||||
fstate->m_buf ,
|
||||
fstate->m_niceness );
|
||||
|
||||
// add the stat
|
||||
@ -908,12 +909,13 @@ void doneWrapper ( void *state , ThreadEntry *t ) {
|
||||
if ( g_errno && g_errno != EDISKSTUCK ) {
|
||||
//int fd1 = fstate->m_fd1;
|
||||
//int fd2 = fstate->m_fd2;
|
||||
int vfd1 = fstate->m_vfd1;
|
||||
int vfd2 = fstate->m_vfd2;
|
||||
int ofd1 = getfdFromVfd(vfd1);
|
||||
int ofd2 = getfdFromVfd(vfd2);
|
||||
log(tt,"disk: vfd1=%i s_fds[%i]=%i.",vfd1,vfd1,ofd1);
|
||||
log(tt,"disk: vfd2=%i s_fds[%i]=%i.",vfd2,vfd2,ofd2);
|
||||
//int vfd1 = fstate->m_vfd1;
|
||||
//int vfd2 = fstate->m_vfd2;
|
||||
//int ofd1 = getfdFromVfd(vfd1);
|
||||
//int ofd2 = getfdFromVfd(vfd2);
|
||||
//log(tt,"disk: vfd1=%i s_fds[%i].",vfd1,vfd1);//,ofd1);
|
||||
//log(tt,"disk: vfd2=%i s_fds[%i].",vfd2,vfd2);//,ofd2);
|
||||
log("disk: nondstuckerr=%s",mstrerror(g_errno));
|
||||
}
|
||||
// . this EBADENGINEER can happen right after a merge if
|
||||
// the file is renamed because the fd may have changed from
|
||||
@ -1179,16 +1181,27 @@ bool readwrite_r ( FileState *fstate , ThreadEntry *t ) {
|
||||
g_lastDiskReadCompleted = g_now; // gettimeofdayInMilliseconds_r();
|
||||
|
||||
// debug msg
|
||||
//char *s = "read";
|
||||
//if ( fstate->m_doWrite ) s = "wrote";
|
||||
//char *t = "no"; // are we blocking?
|
||||
//if ( fstate->m_this->getFlags() & O_NONBLOCK ) t = "yes";
|
||||
// this is bad for real-time threads cuz our unlink() routine may
|
||||
// have been called by RdbMerge and our m_files may be altered
|
||||
//log("disk::readwrite: %s %"INT32" bytes from %s(nonBlock=%s)",s,n,
|
||||
// m_files[filenum]->getFilename(),t);
|
||||
//log("disk::readwrite_r: %s %"INT32" bytes (nonBlock=%s)", s,n,t);
|
||||
//log("disk::readwrite_r: did %"INT32" bytes", n);
|
||||
if ( g_conf.m_logDebugDisk ) {
|
||||
char *s = "read";
|
||||
if ( fstate->m_doWrite ) s = "wrote";
|
||||
char *t = "no"; // are we blocking?
|
||||
if ( fstate->m_this->getFlags() & O_NONBLOCK ) t = "yes";
|
||||
// this is bad for real-time threads cuz our unlink() routine
|
||||
// may have been called by RdbMerge and our m_files may be
|
||||
// altered
|
||||
log("disk::readwrite: %s %i bytes from %s(nonBlock=%s) fd %i "
|
||||
"cc1=%i=?%i cc2=%i=?%i",
|
||||
s,n,
|
||||
fstate->m_this->getFilename(),
|
||||
t,fd,
|
||||
(int)fstate->m_closeCount1 ,
|
||||
(int)getCloseCount_r ( fstate->m_fd1 ) ,
|
||||
(int)fstate->m_closeCount2 ,
|
||||
(int)getCloseCount_r ( fstate->m_fd2 ) );
|
||||
//log("disk::readwrite_r: %s %"INT32" bytes (nonBlock=%s)",
|
||||
//s,n,t);
|
||||
//log("disk::readwrite_r: did %"INT32" bytes", n);
|
||||
}
|
||||
|
||||
// . if n is 0 that's strange!!
|
||||
// . i think the fd will have been closed and re-opened on us if this
|
||||
@ -1442,7 +1455,8 @@ bool BigFile::unlinkRename ( // non-NULL for renames, NULL for unlinks
|
||||
if ( m_isUnlink && part == -1 ) {
|
||||
// release it first, cuz the removeThreads() below
|
||||
// may call QUICKPOLL() and we end up reading from same file!
|
||||
if ( m_pc ) m_pc->rmVfd ( m_vfd );
|
||||
// this is no longer needed since we use rdbcache basically now
|
||||
//if ( m_pc ) m_pc->rmVfd ( m_vfd );
|
||||
// remove all queued threads that point to us that have not
|
||||
// yet been launched
|
||||
g_threads.m_threadQueues[DISK_THREAD].removeThreads(this);
|
||||
@ -1658,15 +1672,15 @@ bool BigFile::close ( ) {
|
||||
// the done wrapper, sending back an error reply, shutting down the
|
||||
// udp server, calling main.cpp::resetAll(), which resets the Rdb and
|
||||
// free this big file
|
||||
DiskPageCache *pc = m_pc;
|
||||
int32_t vfd = m_vfd;
|
||||
//DiskPageCache *pc = m_pc;
|
||||
//int32_t vfd = m_vfd;
|
||||
|
||||
// remove all queued threads that point to us that have not
|
||||
// yet been launched
|
||||
g_threads.m_threadQueues[DISK_THREAD].removeThreads(this);
|
||||
// release our pages from the DiskPageCache
|
||||
//if ( m_pc ) m_pc->rmVfd ( m_vfd );
|
||||
if ( pc ) pc->rmVfd ( vfd );
|
||||
//if ( pc ) pc->rmVfd ( vfd );
|
||||
return true;
|
||||
}
|
||||
|
||||
|
14
BigFile.h
14
BigFile.h
@ -24,6 +24,10 @@ ssize_t gbpwrite(int fd, const void *buf, size_t count, off_t offset);
|
||||
|
||||
// have enough part files to do a 2048gig file
|
||||
#define MAX_PART_FILES (((2048LL*1000LL*1000LL*1000LL)/MAX_PART_SIZE)+1LL)
|
||||
|
||||
// HACK to save mem. support a 128GB file
|
||||
//#define MAX_PART_FILES (((128LL*1000LL*1000LL*1000LL)/MAX_PART_SIZE)+1LL)
|
||||
|
||||
// debug define
|
||||
//#define MAX_PART_FILES 100
|
||||
|
||||
@ -53,6 +57,8 @@ public:
|
||||
void (*m_callback) ( void *state ) ;
|
||||
// goes from 0 to 1, the lower the niceness, the higher the priority
|
||||
int32_t m_niceness;
|
||||
// was it found in the disk page cache?
|
||||
char m_inPageCache;
|
||||
// . if signal is still pending we need to know if BigFile got deleted
|
||||
// . m_files must be NULL terminated
|
||||
//class BigFile **m_files;
|
||||
@ -76,15 +82,15 @@ public:
|
||||
class DiskPageCache *m_pc;
|
||||
// this is just used for accessing the DiskPageCache, m_pc, it is
|
||||
// a "virtual fd" for this whole file
|
||||
int32_t m_vfd;
|
||||
int64_t m_vfd;
|
||||
// test parms
|
||||
//int32_t m_osize;
|
||||
//char *m_obuf;
|
||||
// for avoiding unlink/reopens while doing a threaded read
|
||||
int32_t m_closeCount1 ;
|
||||
int32_t m_closeCount2 ;
|
||||
int32_t m_vfd1;
|
||||
int32_t m_vfd2;
|
||||
//int32_t m_vfd1;
|
||||
//int32_t m_vfd2;
|
||||
|
||||
//char m_baseFilename[32];
|
||||
int32_t m_flags;
|
||||
@ -217,7 +223,7 @@ class BigFile {
|
||||
|
||||
// . opens the nth file if necessary to get it's fd
|
||||
// . returns -1 if none, >=0 on success
|
||||
int getfd ( int32_t n , bool forReading , int32_t *vfd = NULL );
|
||||
int getfd ( int32_t n , bool forReading );//, int32_t *vfd = NULL );
|
||||
|
||||
// public for wrapper to call
|
||||
//bool readwrite_r ( FileState *fstate );
|
||||
|
@ -40,7 +40,7 @@ bool Blaster::init(){
|
||||
log("blaster::hashinit failed" ); return 0; }
|
||||
|
||||
// init the memory class after conf since it gets maxMem from Conf
|
||||
if ( ! g_mem.init ( 200000000 ) ) {
|
||||
if ( ! g_mem.init ( ) ) {//200000000 ) ) {
|
||||
log("blaster::Mem init failed" ); return 0; }
|
||||
// start up log file
|
||||
if ( ! g_log.init( "/tmp/blasterLog" ) ) {
|
||||
|
@ -41,9 +41,7 @@ bool Cachedb::init ( ) {
|
||||
if ( ! m_pc.init ( m_name ,
|
||||
m_rdbId, // RDB_CACHEDB,
|
||||
pcmem ,
|
||||
pageSize ,
|
||||
true , // use shared mem?
|
||||
false )) // minimizeDiskSeeks?
|
||||
pageSize ))
|
||||
return log("db: %s init failed.",m_name);
|
||||
// init the rdb
|
||||
if ( ! m_rdb.init ( g_hostdb.m_dir ,
|
||||
|
@ -1987,6 +1987,15 @@ bool CollectionRec::load ( char *coll , int32_t i ) {
|
||||
// always turn off gigabits so &s=1000 can do summary skipping
|
||||
if ( m_isCustomCrawl ) m_docsToScanForTopics = 0;
|
||||
|
||||
// make min to merge smaller than normal since most collections are
|
||||
// small and we want to reduce the # of vfds (files) we have
|
||||
if ( m_isCustomCrawl ) {
|
||||
m_posdbMinFilesToMerge = 6;
|
||||
m_titledbMinFilesToMerge = 4;
|
||||
m_linkdbMinFilesToMerge = 3;
|
||||
m_tagdbMinFilesToMerge = 2;
|
||||
}
|
||||
|
||||
// always turn on distributed spider locking because otherwise
|
||||
// we end up calling Msg50 which calls Msg25 for the same root url
|
||||
// at the same time, thereby wasting massive resources. it is also
|
||||
|
9
Conf.cpp
9
Conf.cpp
@ -12,6 +12,8 @@ Conf g_conf;
|
||||
Conf::Conf ( ) {
|
||||
m_save = true;
|
||||
m_doingCommandLine = false;
|
||||
// set max mem to 16GB at least until we load on disk
|
||||
m_maxMem = 16000000000;
|
||||
}
|
||||
|
||||
// . does this requester have ROOT admin privledges???
|
||||
@ -285,9 +287,12 @@ bool Conf::init ( char *dir ) { // , int32_t hostId ) {
|
||||
//}
|
||||
|
||||
// make sure g_mem.maxMem is big enough temporarily
|
||||
if ( g_mem.m_maxMem < 10000000 ) g_mem.m_maxMem = 10000000;
|
||||
g_conf.m_maxMem = 8000000000; // 8gb temp
|
||||
|
||||
bool status = g_parms.setFromFile ( this , fname , NULL , OBJ_CONF );
|
||||
|
||||
if ( g_conf.m_maxMem < 10000000 ) g_conf.m_maxMem = 10000000;
|
||||
|
||||
// if not there, create it!
|
||||
if ( ! status ) {
|
||||
log("gb: Creating %s from defaults.",fname);
|
||||
@ -323,7 +328,7 @@ bool Conf::init ( char *dir ) { // , int32_t hostId ) {
|
||||
|
||||
// update g_mem
|
||||
//g_mem.m_maxMem = g_conf.m_maxMem;
|
||||
if ( ! g_mem.init ( g_conf.m_maxMem ) ) return false;
|
||||
if ( ! g_mem.init ( ) ) return false;
|
||||
// always turn this off
|
||||
g_conf.m_testMem = false;
|
||||
// and this, in case you forgot to turn it off
|
||||
|
1
Conf.h
1
Conf.h
@ -653,6 +653,7 @@ class Conf {
|
||||
bool m_logDebugDb ;
|
||||
bool m_logDebugDirty ;
|
||||
bool m_logDebugDisk ;
|
||||
bool m_logDebugDiskPageCache;
|
||||
bool m_logDebugDns ;
|
||||
bool m_logDebugDownloads;
|
||||
bool m_logDebugFacebook;
|
||||
|
416
File.cpp
416
File.cpp
@ -11,16 +11,20 @@
|
||||
// if someone is using a file we must make sure this is true...
|
||||
static int s_isInitialized = false;
|
||||
|
||||
/*
|
||||
// We have up to 5k virtual descriptors, each is mapped to a real descriptor
|
||||
// or -1. We gotta store the filename to re-open one if it was closed.
|
||||
// 5 ints = 20 bytes = 20k
|
||||
static int s_fds [ MAX_NUM_VFDS ]; // the real fd
|
||||
// -1 means not opened
|
||||
// -2 means available
|
||||
*/
|
||||
//static char *s_filenames [ MAX_NUM_VFDS ]; // in case we gotta re-open
|
||||
static int64_t s_timestamps [ MAX_NUM_VFDS ]; // when was it last accessed
|
||||
static char s_writing [ MAX_NUM_VFDS ]; // is it being written to?
|
||||
static char s_unlinking [ MAX_NUM_VFDS ]; // is being unlinked/renamed
|
||||
static int64_t s_timestamps [ MAX_NUM_FDS ]; // when was it last accessed
|
||||
static char s_writing [ MAX_NUM_FDS ]; // is it being written to?
|
||||
static char s_unlinking [ MAX_NUM_FDS ]; // is being unlinked/renamed
|
||||
static char s_open [ MAX_NUM_FDS ]; // is opened?
|
||||
static File *s_filePtrs [ MAX_NUM_FDS ];
|
||||
|
||||
// . how many open files are we allowed?? hardcode it!
|
||||
// . rest are used for sockets
|
||||
@ -49,9 +53,26 @@ static int s_numOpenFiles = 0;
|
||||
#include "Loop.h" // MAX_NUM_FDS
|
||||
static int32_t s_closeCounts [ MAX_NUM_FDS ];
|
||||
|
||||
void sanityCheck ( ) {
|
||||
if ( ! g_conf.m_logDebugDisk ) {
|
||||
log("disk: sanity check called but not in debug mode");
|
||||
return;
|
||||
}
|
||||
int32_t openCount = 0;
|
||||
for ( int i = 0 ; i < MAX_NUM_FDS ; i++ )
|
||||
if ( s_open[i] ) openCount++;
|
||||
if ( openCount != s_numOpenFiles ) { char *xx=NULL;*xx=0; }
|
||||
}
|
||||
|
||||
|
||||
// for avoiding unlink/opens that mess up our threaded read
|
||||
int32_t getCloseCount_r ( int fd ) {
|
||||
if ( fd < 0 ) return 0;
|
||||
if ( fd >= MAX_NUM_FDS ) {
|
||||
log("disk: got fd of %i out of bounds 2 of %i",
|
||||
(int)fd,(int)MAX_NUM_FDS);
|
||||
return 0;
|
||||
}
|
||||
return s_closeCounts [ fd ];
|
||||
}
|
||||
|
||||
@ -66,7 +87,7 @@ void File::incCloseCount_r ( ) {
|
||||
*/
|
||||
|
||||
File::File ( ) {
|
||||
m_vfd = -1;
|
||||
m_fd = -1;
|
||||
// initialize m_maxFileSize and the virtual fd table
|
||||
if ( ! s_isInitialized ) initialize ();
|
||||
// we are not being renamed
|
||||
@ -74,12 +95,19 @@ File::File ( ) {
|
||||
// threaded unlink sets this to true before spawning thread so we
|
||||
// do not try to open it!
|
||||
//m_gone = 0;
|
||||
m_nextActive = NULL;
|
||||
m_prevActive = NULL;
|
||||
// m_nextActive = NULL;
|
||||
// m_prevActive = NULL;
|
||||
m_calledOpen = false;
|
||||
if ( g_conf.m_logDebugDisk )
|
||||
log("disk: constructor fd %i this=0x%"PTRFMT,
|
||||
(int)m_fd,(PTRTYPE)this);
|
||||
}
|
||||
|
||||
|
||||
File::~File ( ) {
|
||||
if ( g_conf.m_logDebugDisk )
|
||||
log("disk: destructor fd %i this=0x%"PTRFMT,
|
||||
(int)m_fd,(PTRTYPE)this);
|
||||
close ();
|
||||
}
|
||||
|
||||
@ -110,7 +138,7 @@ void File::set ( char *filename ) {
|
||||
return;
|
||||
}
|
||||
// if we already had another file open then we must close it first.
|
||||
if ( m_vfd >= 0 ) close();
|
||||
if ( m_fd >= 0 ) close();
|
||||
// copy into m_filename and NULL terminate
|
||||
gbmemcpy ( m_filename , filename , len );
|
||||
m_filename [ len ] = '\0';
|
||||
@ -131,7 +159,7 @@ bool File::rename ( char *newFilename ) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
static File *s_activeHead = NULL;
|
||||
static File *s_activeTail = NULL;
|
||||
|
||||
@ -174,12 +202,13 @@ void promoteInLinkedList ( File *f ) {
|
||||
rmFileFromLinkedList ( f );
|
||||
addFileToLinkedList ( f );
|
||||
}
|
||||
*/
|
||||
|
||||
// . open the file
|
||||
// . only call once per File after calling set()
|
||||
bool File::open ( int flags , int permissions ) {
|
||||
// if we already had another file open then we must close it first.
|
||||
if ( m_vfd >= 0 ) {
|
||||
if ( m_fd >= 0 ) {
|
||||
log(LOG_LOGIC,
|
||||
"disk: Open already called. Closing and re-opening.");
|
||||
close();
|
||||
@ -187,6 +216,7 @@ bool File::open ( int flags , int permissions ) {
|
||||
// save these in case we need to reopen in getfd()
|
||||
m_flags = flags;
|
||||
m_permissions = permissions;
|
||||
m_calledOpen = true;
|
||||
// sanity check
|
||||
//int32_t ss = 0;
|
||||
//for ( int32_t i = 0 ; i < MAX_NUM_VFDS ; i++ )
|
||||
@ -196,28 +226,30 @@ bool File::open ( int flags , int permissions ) {
|
||||
// we must assign this to a virtual descriptor
|
||||
// scan down our list looking for an m_fd of -2 (available) [-1 means
|
||||
// used but but not really open]
|
||||
int i;
|
||||
for ( i = 0 ; i < MAX_NUM_VFDS ; i++ ) if (s_fds [ i ] == -2 ) break;
|
||||
//int i;
|
||||
//for ( i = 0 ; i < MAX_NUM_VFDS ; i++ ) if (s_fds [ i ] == -2 ) break;
|
||||
// can these fools use all 5k fd's?
|
||||
if ( i >= MAX_NUM_VFDS ) {
|
||||
g_errno = EBADENGINEER;
|
||||
return log (
|
||||
"disk: All %"INT32" virtual fd's are in use. Panic.",
|
||||
(int32_t)MAX_NUM_VFDS);
|
||||
}
|
||||
// if ( i >= MAX_NUM_VFDS ) {
|
||||
// g_errno = EBADENGINEER;
|
||||
// return log (
|
||||
// "disk: All %"INT32" virtual fd's are in use. Panic.",
|
||||
// (int32_t)MAX_NUM_VFDS);
|
||||
// }
|
||||
// remember OUR virtual file descriptor for successive calls to
|
||||
// read/write/...
|
||||
m_vfd = i;
|
||||
//m_vfd = i;
|
||||
// we are not open at this point, but no longer available at least
|
||||
s_fds [ m_vfd ] = -1;
|
||||
//s_fds [ m_vfd ] = -1;
|
||||
// open for real, return true on success
|
||||
if ( getfd () >= 0 ) return true;
|
||||
// log the error
|
||||
log("disk: open: %s",mstrerror(g_errno));
|
||||
// . close the virtual fd so we can call open again
|
||||
// . sets s_fds [ m_vfd ] to -2 (available)
|
||||
// . and sets our m_vfd to -1
|
||||
close();
|
||||
// otherwise bitch and return false
|
||||
return log("disk: open: %s",mstrerror(g_errno));
|
||||
return false;
|
||||
}
|
||||
|
||||
// . returns number of bytes written
|
||||
@ -290,16 +322,16 @@ int File::read ( void *buf ,
|
||||
|
||||
// uses lseek to get file's current position
|
||||
int32_t File::getCurrentPos ( ) {
|
||||
return (int32_t) ::lseek (s_fds[m_vfd] , 0 , SEEK_CUR );
|
||||
return (int32_t) ::lseek ( m_fd , 0 , SEEK_CUR );
|
||||
}
|
||||
|
||||
bool File::isNonBlocking () {
|
||||
// return true if never opened!
|
||||
if ( m_vfd < 0 ) return false;
|
||||
//if ( m_vfd < 0 ) return false;
|
||||
// what was the actual file descriptor it represented?
|
||||
int fd = s_fds [ m_vfd ];
|
||||
//int fd = s_fds [ m_vfd ];
|
||||
// always block on a close
|
||||
int flags = fcntl ( fd , F_GETFL ) ;
|
||||
int flags = fcntl ( m_fd , F_GETFL ) ;
|
||||
// return true if non-blocking
|
||||
return ( flags & O_NONBLOCK );
|
||||
}
|
||||
@ -322,34 +354,33 @@ bool File::isNonBlocking () {
|
||||
void File::close1_r ( ) {
|
||||
// assume no close
|
||||
m_closedIt = false;
|
||||
// debug
|
||||
log(LOG_DEBUG,"disk: close1_r: Closing vfd=%i after unlink/rename.",
|
||||
m_vfd);
|
||||
|
||||
// debug. don't log in thread - might hurt us
|
||||
log(LOG_DEBUG,"disk: close1_r: Closing fd %i for %s after "
|
||||
"unlink/rename.",m_fd,m_filename);
|
||||
|
||||
// problem. this could be a closed map file, m_vfd=-1.
|
||||
if ( m_vfd < 0 ) {
|
||||
if ( m_fd < 0 ) {
|
||||
// -1 just means it was already closed, probably this is
|
||||
// from unlinking and RdbMap file which is closed after we
|
||||
// read it in at startup.
|
||||
log(LOG_DEBUG,"disk: close1_r: m_vfd=%i < 0",m_vfd);
|
||||
log(LOG_DEBUG,"disk: close1_r: fd %i < 0",m_fd);
|
||||
return ;
|
||||
}
|
||||
// panic!
|
||||
if ( s_writing [ m_vfd ] ) {
|
||||
if ( s_writing [ m_fd ] ) {
|
||||
log(LOG_LOGIC,"disk: close1_r: In write mode and closing.");
|
||||
return;
|
||||
}
|
||||
// if already being unlinked, skip
|
||||
if ( s_unlinking [ m_vfd ] ) {
|
||||
if ( s_unlinking [ m_fd ] ) {
|
||||
log(LOG_LOGIC,"disk: close1_r: In unlink mode and closing.");
|
||||
return;
|
||||
}
|
||||
// this is < 0 if invalid
|
||||
int fd = s_fds [ m_vfd ];
|
||||
// debug. don't log in thread - might hurt us
|
||||
log(LOG_DEBUG,"disk: close1_r: Closing fd=%i for %s after "
|
||||
"unlink/rename.",fd,m_filename);
|
||||
//int fd = s_fds [ m_vfd ];
|
||||
|
||||
if ( fd < 0 ) return ;
|
||||
if ( m_fd < 0 ) return ;
|
||||
// . do not allow closeLeastUsed to close this fd as well
|
||||
// . that can really mess us up:
|
||||
// . 1. we close this fd being unlinked/renamed
|
||||
@ -357,11 +388,17 @@ void File::close1_r ( ) {
|
||||
// . 3. closeLeastUsed closes it again and sets our s_fds[m_vfd] to -1
|
||||
// this leaving the other file with a seemingly valid fd that
|
||||
// always gives EBADF errors cuz it was closed.
|
||||
s_unlinking [ m_vfd ] = 1;
|
||||
s_unlinking [ m_fd ] = 1;
|
||||
again:
|
||||
if ( fd == 0 ) log("disk: closing1 fd of 0");
|
||||
if ( ::close(fd) == 0 ) { m_closedIt = true; return; }
|
||||
log("disk: close(%i): %s.",fd,strerror(errno));
|
||||
if ( m_fd == 0 ) log("disk: closing1 fd of 0");
|
||||
if ( ::close(m_fd) == 0 ) {
|
||||
m_closedIt = true;
|
||||
// close2() needs to see m_fd so it can set flags...
|
||||
// so m_fd MUST be intact
|
||||
//m_fd = -1;
|
||||
return;
|
||||
}
|
||||
log("disk: close(%i): %s.",m_fd,strerror(errno));
|
||||
if ( errno == EINTR ) goto again;
|
||||
}
|
||||
|
||||
@ -369,36 +406,78 @@ void File::close1_r ( ) {
|
||||
// . BigFile.cpp calls this when done unlinking/renaming this file
|
||||
void File::close2 ( ) {
|
||||
// if already gone, bail. this could be a closed map file, m_vfd=-1.
|
||||
if ( m_vfd < 0 ) {
|
||||
if ( m_fd < 0 ) {
|
||||
// -1 just means it was already closed, probably this is
|
||||
// from unlinking and RdbMap file which is closed after we
|
||||
// read it in at startup.
|
||||
log(LOG_INFO,"disk: close2: m_vfd=%i < 0",m_vfd);
|
||||
log(LOG_INFO,"disk: close2: fd %i < 0",m_fd);
|
||||
return;
|
||||
}
|
||||
// clear for later
|
||||
s_unlinking [ m_vfd ] = 0;
|
||||
// return if we did not actually do a close
|
||||
|
||||
// clear for later, but only if nobody else got our fd when opening
|
||||
// a file... because we called close() in a thread in close1_r()
|
||||
if ( s_filePtrs [ m_fd ] == this )
|
||||
s_unlinking [ m_fd ] = 0;
|
||||
|
||||
// return if we did not actually do a close in close1_r()
|
||||
if ( ! m_closedIt ) {
|
||||
// this can happen if the fd was always -1 before call to
|
||||
// close1_r(), like when deleting a map file... so we never
|
||||
// needed to call ::close() in close1_r().
|
||||
return;
|
||||
/*
|
||||
int fd = -3;
|
||||
if ( m_vfd >= 0 ) fd = s_fds[m_vfd];
|
||||
log(LOG_LOGIC,"disk: close2: "
|
||||
"closeLeastUsed() or someone else beat us to the close. "
|
||||
"This should never happen. vfd=%i fd=%i.", m_vfd,fd);
|
||||
return;
|
||||
*/
|
||||
}
|
||||
|
||||
if ( g_conf.m_logDebugDisk ) sanityCheck();
|
||||
|
||||
// excise from linked list of active files
|
||||
//rmFileFromLinkedList ( this );
|
||||
// mark this virtual file descriptor as available.
|
||||
s_fds [ m_vfd ] = -2;
|
||||
// no more virtual file descriptor
|
||||
m_vfd = -1;
|
||||
//s_closeCounts [ fd ]++;
|
||||
//s_fds [ m_vfd ] = -2;
|
||||
|
||||
// save this for stuff below
|
||||
int fd = m_fd;
|
||||
|
||||
// now it is closed. do not try to re-close in destructor's call to
|
||||
// close() so set m_fd to -1
|
||||
m_fd = -1;
|
||||
|
||||
// mark it as closed
|
||||
// CAUTION: since we closed the fd in a thread in close1_r() it may
|
||||
// have been returned for another file, so check here. make sure we are
|
||||
// still considered the 'owner'. if not then we were supplanted in
|
||||
// File::getfd() and s_numOpenFiles-- was called there as well so
|
||||
// we should skip everything below here.
|
||||
if ( s_filePtrs [ fd ] != this ) return;
|
||||
|
||||
s_open [ fd ] = 0;
|
||||
s_filePtrs [ fd ] = NULL;
|
||||
// i guess there is no need to do this close count inc
|
||||
// if we lost our fd already shortly after our thread closed
|
||||
// the fd, otherwise we'll falsely mess up the new owner
|
||||
// and he will do a re-read.
|
||||
s_closeCounts [ fd ]++;
|
||||
|
||||
// to keep our sanityCheck() from coring, only decrement this
|
||||
// if we owned it still
|
||||
s_numOpenFiles--;
|
||||
// no more virtual file descriptor
|
||||
//m_vfd = -1;
|
||||
//s_closeCounts [ fd ]++;
|
||||
// debug log
|
||||
if ( g_conf.m_logDebugDisk )
|
||||
log("disk: close2 fd %i for %s #openfiles=%i "
|
||||
"this=0x%"PTRFMT,
|
||||
fd,m_filename,(int)s_numOpenFiles,(PTRTYPE)this);
|
||||
|
||||
if ( g_conf.m_logDebugDisk ) sanityCheck();
|
||||
}
|
||||
|
||||
// . return -2 on error
|
||||
@ -407,34 +486,34 @@ void File::close2 ( ) {
|
||||
// . closes the file for real!
|
||||
// . analogous to a reset() routine
|
||||
bool File::close ( ) {
|
||||
// return true if never opened!
|
||||
if ( m_vfd < 0 ) return true;
|
||||
// return true if not open
|
||||
if ( m_fd < 0 ) return true;
|
||||
// flush any changes
|
||||
//flush ( );
|
||||
// what was the actual file descriptor it represented?
|
||||
int fd = s_fds [ m_vfd ];
|
||||
//int fd = s_fds [ m_vfd ];
|
||||
// mark this virtual file descriptor as available.
|
||||
s_fds [ m_vfd ] = -2;
|
||||
//s_fds [ m_vfd ] = -2;
|
||||
// save
|
||||
int32_t vfd = m_vfd;
|
||||
//int32_t vfd = m_vfd;
|
||||
//s_filenames [ m_vfd ] = NULL;
|
||||
// no more virtual file descriptor
|
||||
m_vfd = -1;
|
||||
//m_vfd = -1;
|
||||
// if it was already closed or available then return true
|
||||
if ( fd < 0 ) return true;
|
||||
//if ( fd < 0 ) return true;
|
||||
// panic!
|
||||
if ( s_writing [ vfd ] )
|
||||
if ( s_writing [ m_fd ] )
|
||||
return log(LOG_LOGIC,"disk: In write mode and closing 2.");
|
||||
// if already being unlinked, skip
|
||||
if ( s_unlinking [ vfd ] )
|
||||
if ( s_unlinking [ m_fd ] )
|
||||
return log(LOG_LOGIC,"disk: In unlink mode and closing 2.");
|
||||
// always block on a close
|
||||
int flags = fcntl ( fd , F_GETFL ) ;
|
||||
int flags = fcntl ( m_fd , F_GETFL ) ;
|
||||
// turn off these 2 flags on fd to make sure
|
||||
flags &= ~( O_NONBLOCK | O_ASYNC );
|
||||
// return false on error
|
||||
retry26:
|
||||
if ( fcntl ( fd, F_SETFL, flags ) < 0 ) {
|
||||
if ( fcntl ( m_fd, F_SETFL, flags ) < 0 ) {
|
||||
// valgrind
|
||||
if ( errno == EINTR ) goto retry26;
|
||||
// copy errno to g_errno
|
||||
@ -448,9 +527,12 @@ bool File::close ( ) {
|
||||
// before that open will know it!
|
||||
//s_closeCounts [ fd ]++;
|
||||
// otherwise we gotta really close it
|
||||
|
||||
if ( g_conf.m_logDebugDisk ) sanityCheck();
|
||||
|
||||
again:
|
||||
if ( fd == 0 ) log("disk: closing2 fd of 0");
|
||||
int status = ::close ( fd );
|
||||
if ( m_fd == 0 ) log("disk: closing2 fd of 0");
|
||||
int status = ::close ( m_fd );
|
||||
if ( status == -1 && errno == EINTR ) goto again;
|
||||
// there was a closing error if status is non-zero. --- not checking
|
||||
// the error may lead to silent loss of data --- see "man 2 close"
|
||||
@ -458,18 +540,33 @@ bool File::close ( ) {
|
||||
log("disk: close(%s) : %s" , m_filename,mstrerrno(g_errno));
|
||||
return false;
|
||||
}
|
||||
// sanity
|
||||
if ( ! s_open[m_fd] ) { char *xx=NULL;*xx=0; }
|
||||
// mark it as closed
|
||||
s_open [ m_fd ] = 0;
|
||||
s_filePtrs [ m_fd ] = NULL;
|
||||
s_closeCounts [ m_fd ]++;
|
||||
// otherwise decrease the # of open files
|
||||
s_numOpenFiles--;
|
||||
// debug log
|
||||
if ( g_conf.m_logDebugDisk )
|
||||
log("disk: close0 fd %i for %s #openfiles=%i",
|
||||
m_fd,m_filename,(int)s_numOpenFiles);
|
||||
// set this to -1 to indicate closed
|
||||
m_fd = -1;
|
||||
// excise from linked list of active files
|
||||
//rmFileFromLinkedList ( this );
|
||||
// return true blue
|
||||
if ( g_conf.m_logDebugDisk ) sanityCheck();
|
||||
return true;
|
||||
}
|
||||
|
||||
int File::getfdNoOpen ( ) {
|
||||
if ( m_vfd < 0 ) return -1;
|
||||
// this is -1 if not open
|
||||
return m_fd;
|
||||
//if ( m_vfd < 0 ) return -1;
|
||||
// this is < 0 if invalid
|
||||
return s_fds [ m_vfd ];
|
||||
//return s_fds [ m_vfd ];
|
||||
}
|
||||
|
||||
// . get the fd of this file
|
||||
@ -482,7 +579,7 @@ int File::getfdNoOpen ( ) {
|
||||
// . otherwise, return the file descriptor
|
||||
int File::getfd () {
|
||||
// if m_vfd is -1 it's never been opened
|
||||
if ( m_vfd < 0 ) {
|
||||
if ( ! m_calledOpen ) { // m_vfd < 0 ) {
|
||||
g_errno = EBADENGINEER;
|
||||
log(LOG_LOGIC,"disk: getfd: Must call open() first.");
|
||||
char *xx=NULL; *xx=0;
|
||||
@ -492,27 +589,31 @@ int File::getfd () {
|
||||
// . no caller should call open/getfd after unlink was queued for thred
|
||||
//if ( m_gone ) { char *xx = NULL; *xx = 0; }
|
||||
// get the real fd from the virtual fd
|
||||
int fd = s_fds [ m_vfd ];
|
||||
//int fd = s_fds [ m_vfd ];
|
||||
// return true if it's already opened
|
||||
if ( fd >= 0 ) {
|
||||
if ( m_fd >= 0 ) {
|
||||
// debug msg
|
||||
log(LOG_DEBUG,"disk: Opened vfd #%"INT32" of %"INT32".",
|
||||
(int32_t)m_vfd,(int32_t)s_fds[m_vfd]);
|
||||
log(LOG_DEBUG,"disk: returning existing fd %i for %s",
|
||||
(int)m_fd,m_filename);
|
||||
if ( m_fd >= MAX_NUM_FDS ) { char *xx=NULL;*xx=0; }
|
||||
// but update the timestamp to reduce chance it closes on us
|
||||
//s_timestamps [ m_vfd ] = getTime();
|
||||
s_timestamps [ m_vfd ] = gettimeofdayInMillisecondsLocal();
|
||||
return fd;
|
||||
s_timestamps [ m_fd ] = gettimeofdayInMillisecondsLocal();
|
||||
return m_fd;
|
||||
}
|
||||
// if fd is -2 it's marked as available
|
||||
if ( fd != -1 ) {
|
||||
g_errno = EBADENGINEER;
|
||||
log (LOG_LOGIC, "disk: getfd: fd is available?!?!" );
|
||||
return -2;
|
||||
}
|
||||
// if ( fd != -1 ) {
|
||||
// g_errno = EBADENGINEER;
|
||||
// log (LOG_LOGIC, "disk: getfd: fd is available?!?!" );
|
||||
// return -2;
|
||||
// }
|
||||
// . a real fd of -1 means it's been closed and we gotta reopen it
|
||||
// . we have to close someone if we don't have enough room
|
||||
while ( s_numOpenFiles >= s_maxNumOpenFiles )
|
||||
while ( s_numOpenFiles >= s_maxNumOpenFiles ) {
|
||||
if ( g_conf.m_logDebugDisk ) sanityCheck();
|
||||
if ( ! closeLeastUsed() ) return -1;
|
||||
if ( g_conf.m_logDebugDisk ) sanityCheck();
|
||||
}
|
||||
// what was the filename/mode of this timed-out fd?
|
||||
//char *filename = s_filenames [ m_vfd ];
|
||||
// time the calls to open just in case they are hurting us
|
||||
@ -523,6 +624,7 @@ int File::getfd () {
|
||||
// t1 = gettimeofdayInMilliseconds();
|
||||
// fd = ::open ( m_oldFilename , m_flags , m_permissions );
|
||||
//}
|
||||
int fd = -1;
|
||||
// then try to open the new name
|
||||
if ( fd == -1 ) {
|
||||
t1 = gettimeofdayInMilliseconds();
|
||||
@ -537,6 +639,32 @@ int File::getfd () {
|
||||
fd = ::open ( m_filename , m_flags , m_permissions );
|
||||
if ( fd == 0 )
|
||||
log("disk: Got fd of 0 when opening2 %s.",m_filename);
|
||||
if ( fd >= MAX_NUM_FDS )
|
||||
log("disk: got fd of %i out of bounds 1 of %i",
|
||||
(int)fd,(int)MAX_NUM_FDS);
|
||||
|
||||
// if we got someone else's fd that called close1_r() in a
|
||||
// thread but did not have time to call close2() to fix
|
||||
// up these member vars, then do it here. close2() will
|
||||
// see that s_filePtrs[fd] does not equal the file ptr any more
|
||||
// and it will not update s_numOpenFiles in that case.
|
||||
if ( fd >= 0 && s_open [ fd ] ) {
|
||||
File *f = s_filePtrs [ fd ];
|
||||
if ( g_conf.m_logDebugDisk )
|
||||
log("disk: swiping fd %i from %s before "
|
||||
"his close thread returned",fd,
|
||||
f->m_filename);
|
||||
// he only incs/decs his counters if he owns it so in
|
||||
// close2() so dec this global counter here
|
||||
s_numOpenFiles--;
|
||||
s_open[fd] = 0;
|
||||
s_filePtrs[fd] = NULL;
|
||||
if ( g_conf.m_logDebugDisk ) sanityCheck();
|
||||
}
|
||||
|
||||
// sanity. how can we get an fd already opened?
|
||||
// because it was closed in a thread in close1_r()
|
||||
if ( fd >= 0 && s_open[fd] ) { char *xx=NULL;*xx=0; }
|
||||
// . now inc that count in case there was someone reading on
|
||||
// that fd right before it was closed and we got it
|
||||
// . ::close() call can now happen in a thread, so we
|
||||
@ -562,23 +690,38 @@ int File::getfd () {
|
||||
"%"INT64" ms.",m_filename,dt);
|
||||
}
|
||||
// copy errno to g_errno
|
||||
if ( fd == -1 ) {
|
||||
if ( fd <= -1 ) {
|
||||
g_errno = errno;
|
||||
log("disk: error open(%s) : %s",m_filename,strerror(g_errno));
|
||||
log("disk: error open(%s) : %s fd %i",
|
||||
m_filename,strerror(g_errno),(int)fd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ( g_conf.m_logDebugDisk ) sanityCheck();
|
||||
|
||||
// we're another open file
|
||||
s_numOpenFiles++;
|
||||
|
||||
// debug log
|
||||
if ( g_conf.m_logDebugDisk )
|
||||
log("disk: opened1 fd %i for %s #openfiles=%i this=0x%"PTRFMT,
|
||||
(int)fd,m_filename,(int)s_numOpenFiles,(PTRTYPE)this);
|
||||
|
||||
// set this file descriptor, the other stuff remains the same
|
||||
s_fds [ m_vfd ] = fd;
|
||||
//s_fds [ m_vfd ] = fd;
|
||||
m_fd = fd;
|
||||
// 0 means stdout, right? why am i seeing it get assigned???
|
||||
if ( fd == 0 )
|
||||
log("disk: Found fd of 0 when opening %s.",m_filename);
|
||||
// reset
|
||||
s_writing [ m_vfd ] = 0;
|
||||
s_unlinking [ m_vfd ] = 0;
|
||||
s_writing [ fd ] = 0;
|
||||
s_unlinking [ fd ] = 0;
|
||||
// update the time stamp
|
||||
s_timestamps [ m_vfd ] = gettimeofdayInMillisecondsLocal();
|
||||
s_timestamps [ fd ] = gettimeofdayInMillisecondsLocal();
|
||||
s_open [ fd ] = true;
|
||||
s_filePtrs [ fd ] = this;
|
||||
|
||||
if ( g_conf.m_logDebugDisk ) sanityCheck();
|
||||
// add file to linked list of active files
|
||||
//addFileToLinkedList ( this );
|
||||
return fd;
|
||||
@ -593,27 +736,33 @@ bool File::closeLeastUsed () {
|
||||
int64_t now = gettimeofdayInMillisecondsLocal();
|
||||
|
||||
|
||||
int32_t notopen = 0;
|
||||
int32_t writing = 0;
|
||||
int32_t unlinking = 0;
|
||||
int32_t young = 0;
|
||||
|
||||
// get the least used of all the actively opened file descriptors.
|
||||
// we can't get files that were opened for writing!!!
|
||||
int i;
|
||||
for ( i = 0 ; i < MAX_NUM_VFDS ; i++ ) {
|
||||
if ( s_fds [ i ] < 0 ) continue;
|
||||
for ( i = 0 ; i < MAX_NUM_FDS ; i++ ) {
|
||||
//if ( s_fds [ i ] < 0 ) continue;
|
||||
if ( ! s_open[i] ) { notopen++; continue; }
|
||||
// fds opened for writing are not candidates, because if
|
||||
// we close on a threaded write, that fd may be used to
|
||||
// re-open another file which gets garbled!
|
||||
if ( s_writing [ i ] ) continue;
|
||||
if ( s_writing [ i ] ) { writing++; continue; }
|
||||
// do not close guys being unlinked they are in the middle
|
||||
// of being closed ALREADY in close1_r(). There should only be
|
||||
// like one unlink thread allowed to be active at a time so we
|
||||
// don't have to worry about it hogging all the fds.
|
||||
if ( s_unlinking [ i ] ) continue;
|
||||
if ( s_unlinking [ i ] ) { unlinking++; continue; }
|
||||
// when we got like 1000 reads queued up, it uses a *lot* of
|
||||
// memory and we can end up never being able to complete a
|
||||
// read because the descriptors are always getting closed on us
|
||||
// so do a hack fix and do not close descriptors that are
|
||||
// about .5 seconds old on avg.
|
||||
if ( s_timestamps [ i ] == now ) continue;
|
||||
if ( s_timestamps [ i ] == now - 1 ) continue;
|
||||
if ( s_timestamps [ i ] == now ) { young++; continue; }
|
||||
if ( s_timestamps [ i ] == now - 1 ) { young++; continue; }
|
||||
if ( mini == -1 || s_timestamps [ i ] < min ) {
|
||||
min = s_timestamps [ i ];
|
||||
mini = i;
|
||||
@ -663,14 +812,19 @@ bool File::closeLeastUsed () {
|
||||
return log("File: closeLeastUsed: failed. All %"INT32" "
|
||||
"descriptors "
|
||||
"are unavailable to be closed and re-used to read "
|
||||
"from another file.",(int32_t)s_maxNumOpenFiles);
|
||||
"from another file. notopen=%i writing=%i "
|
||||
"unlinking=%i young=%i"
|
||||
,(int32_t)s_maxNumOpenFiles
|
||||
,notopen
|
||||
,writing
|
||||
,unlinking
|
||||
,young );
|
||||
|
||||
// debug msg
|
||||
logf(LOG_DEBUG,"disk: Closing vfd #%i of %"INT32". delta=%"INT64"",
|
||||
mini,(int32_t)s_fds[mini],now-s_timestamps[mini]);
|
||||
|
||||
int fd = mini;
|
||||
|
||||
// always block on close
|
||||
int fd = s_fds[mini];
|
||||
//int fd = s_fds[mini];
|
||||
int flags = fcntl ( fd , F_GETFL ) ;
|
||||
// turn off these 2 flags on fd to make sure
|
||||
flags &= ~( O_NONBLOCK | O_ASYNC );
|
||||
@ -698,13 +852,36 @@ bool File::closeLeastUsed () {
|
||||
|
||||
// -1 means can be reopened because File::close() wasn't called.
|
||||
// we're just conserving file descriptors
|
||||
s_fds [ mini ] = -1;
|
||||
|
||||
|
||||
//s_fds [ mini ] = -1;
|
||||
|
||||
// if the real close was successful then decrement the # of open files
|
||||
if ( status == 0 ) {
|
||||
// it's not open
|
||||
s_open [ fd ] = 0;
|
||||
// if someone is trying to read on this let them know
|
||||
s_closeCounts [ fd ]++;
|
||||
|
||||
s_numOpenFiles--;
|
||||
|
||||
File *f = s_filePtrs [ fd ];
|
||||
// don't let him use the stolen fd
|
||||
f->m_fd = -1 ;
|
||||
|
||||
// debug msg
|
||||
if ( g_conf.m_logDebugDisk ) {
|
||||
File *f = s_filePtrs [ fd ];
|
||||
char *fname = "";
|
||||
if ( f ) fname = f->m_filename;
|
||||
logf(LOG_DEBUG,"disk: force closed fd %i for"
|
||||
" %s. age=%"INT64" #openfiles=%i this=0x%"PTRFMT,
|
||||
mini,fname,now-s_timestamps[mini],
|
||||
(int)s_numOpenFiles,
|
||||
(PTRTYPE)this);
|
||||
}
|
||||
|
||||
// no longer the owner
|
||||
s_filePtrs [ fd ] = NULL;
|
||||
|
||||
// excise from linked list of active files
|
||||
//rmFileFromLinkedList ( f );
|
||||
// getfd() may not execute in time to ince the closeCount
|
||||
@ -717,6 +894,8 @@ bool File::closeLeastUsed () {
|
||||
if ( status == -1 )
|
||||
return log("disk: close(%i) : %s", fd , strerror(errno));
|
||||
|
||||
if ( g_conf.m_logDebugDisk ) sanityCheck();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -843,10 +1022,10 @@ bool File::unlink ( ) {
|
||||
}
|
||||
|
||||
bool File::flush ( ) {
|
||||
int fd =s_fds[m_vfd];
|
||||
if ( fd < 0 ) return false;
|
||||
//int fd =s_fds[m_vfd];
|
||||
if ( m_fd < 0 ) return false;
|
||||
//return log("file::flush(%s): no fd", m_filename );
|
||||
int status = fsync ( fd );
|
||||
int status = fsync ( m_fd );
|
||||
if ( status == 0 ) return true;
|
||||
// copy errno to g_errno
|
||||
g_errno = errno;
|
||||
@ -856,7 +1035,7 @@ bool File::flush ( ) {
|
||||
// a wrapper for lseek
|
||||
int32_t File::lseek ( int32_t offset , int whence ) {
|
||||
|
||||
int32_t position = (int32_t) ::lseek (s_fds [ m_vfd ] , offset , whence );
|
||||
int32_t position = (int32_t) ::lseek ( m_fd , offset , whence );
|
||||
|
||||
if ( position >= 0 ) return position;
|
||||
|
||||
@ -864,7 +1043,7 @@ int32_t File::lseek ( int32_t offset , int whence ) {
|
||||
g_errno = errno;
|
||||
|
||||
log("disk: lseek ( %s(%i) , %"INT32" , whence ): %s" , m_filename ,
|
||||
s_fds [m_vfd], offset , strerror ( g_errno ) );
|
||||
m_fd, offset , strerror ( g_errno ) );
|
||||
|
||||
return -1;
|
||||
}
|
||||
@ -877,16 +1056,19 @@ bool File::initialize ( ) {
|
||||
// log ( 0 , "file::initialize: running");
|
||||
|
||||
// reset all the virtual file descriptos
|
||||
for ( int i = 0 ; i < MAX_NUM_VFDS ; i++ ) {
|
||||
s_fds [ i ] = -2; // -2 means vfd #i is available
|
||||
for ( int i = 0 ; i < MAX_NUM_FDS ; i++ ) {
|
||||
//s_fds [ i ] = -2; // -2 means vfd #i is available
|
||||
//s_filenames [ i ] = NULL;
|
||||
s_timestamps [ i ] = 0LL;
|
||||
s_writing [ i ] = 0;
|
||||
s_unlinking [ i ] = 0;
|
||||
s_open [ i ] = 0;
|
||||
s_closeCounts [ i ] = 0;
|
||||
s_filePtrs [ i ] = NULL;
|
||||
}
|
||||
|
||||
for ( int32_t i = 0 ; i < MAX_NUM_FDS ; i++ )
|
||||
s_closeCounts[i] = 0;
|
||||
// for ( int32_t i = 0 ; i < MAX_NUM_FDS ; i++ )
|
||||
// s_closeCounts[i] = 0;
|
||||
|
||||
s_isInitialized = true;
|
||||
|
||||
@ -912,17 +1094,17 @@ char *File::getExtension ( ) {
|
||||
// and the merge guy gets a new fd which happens to be the old fd of the
|
||||
// dump, so when the dump thread lets its write go it writes into the merge
|
||||
// file.
|
||||
void enterWriteMode ( int32_t vfd ) {
|
||||
if ( vfd >= 0 ) s_writing [ vfd ] = 1;
|
||||
void enterWriteMode ( int fd ) {
|
||||
if ( fd >= 0 ) s_writing [ fd ] = 1;
|
||||
}
|
||||
void exitWriteMode ( int32_t vfd ) {
|
||||
if ( vfd >= 0 ) s_writing [ vfd ] = 0;
|
||||
void exitWriteMode ( int fd ) {
|
||||
if ( fd >= 0 ) s_writing [ fd ] = 0;
|
||||
}
|
||||
// error correction routine used by BigFile.cpp
|
||||
void releaseVfd ( int32_t vfd ) {
|
||||
if ( vfd >= 0 && s_fds [ vfd ] >= 0 ) s_fds [ vfd ] = -1;
|
||||
}
|
||||
int getfdFromVfd ( int32_t vfd ) {
|
||||
if ( vfd <= 0 ) return -1;
|
||||
return s_fds [ vfd ];
|
||||
}
|
||||
// void releaseVfd ( int32_t vfd ) {
|
||||
// if ( vfd >= 0 && s_fds [ vfd ] >= 0 ) s_fds [ vfd ] = -1;
|
||||
// }
|
||||
// int getfdFromVfd ( int32_t vfd ) {
|
||||
// if ( vfd <= 0 ) return -1;
|
||||
// return s_fds [ vfd ];
|
||||
// }
|
||||
|
25
File.h
25
File.h
@ -23,7 +23,8 @@
|
||||
// . man, chris has 958 files, lets crank it up from 2k to 5k
|
||||
// . boost up to 50,000 since we are hitting this limit with crawlbot
|
||||
// . we are hitting again with crawlbot, boost to 200k from 50k
|
||||
#define MAX_NUM_VFDS (200*1024)
|
||||
// . TODO: make this dynamically allocate based on need
|
||||
//#define MAX_NUM_VFDS (1024*1024)
|
||||
|
||||
#include <sys/types.h> // for open/lseek
|
||||
#include <sys/stat.h> // for open
|
||||
@ -38,11 +39,11 @@ int64_t getFileSize ( char *filename ) ;
|
||||
int32_t getCloseCount_r ( int fd );
|
||||
|
||||
// prevent fd from being closed on us when we are writing
|
||||
void enterWriteMode ( int32_t vfd ) ;
|
||||
void exitWriteMode ( int32_t vfd ) ;
|
||||
void enterWriteMode ( int fd ) ;
|
||||
void exitWriteMode ( int fd ) ;
|
||||
// error correction routine used by BigFile.cpp
|
||||
void releaseVfd ( int32_t vfd ) ;
|
||||
int getfdFromVfd ( int32_t vfd ) ;
|
||||
//void releaseVfd ( int32_t vfd ) ;
|
||||
//int getfdFromVfd ( int32_t vfd ) ;
|
||||
|
||||
class File {
|
||||
|
||||
@ -66,8 +67,7 @@ class File {
|
||||
// returns false and sets errno on error, returns true on success
|
||||
bool rename ( char *newFilename );
|
||||
|
||||
// if m_vfd is negative it's never been opened
|
||||
bool isOpen () { return ( m_vfd >= 0 ); };
|
||||
bool calledOpen () { return m_calledOpen; };
|
||||
|
||||
bool isNonBlocking () ;
|
||||
|
||||
@ -174,18 +174,23 @@ class File {
|
||||
bool closeLeastUsed ( );
|
||||
|
||||
// THIS file's VIRTUAL descriptor
|
||||
int m_vfd;
|
||||
//int m_vfd;
|
||||
|
||||
// now just the real fd. is -1 if not opened
|
||||
int m_fd;
|
||||
|
||||
// save the permission and flag sets in case of re-opening
|
||||
int m_flags;
|
||||
int m_permissions;
|
||||
|
||||
char m_calledOpen;
|
||||
|
||||
time_t m_st_mtime; // file last mod date
|
||||
int32_t m_st_size; // file size
|
||||
time_t getLastModifiedDate ( ) ;
|
||||
|
||||
class File *m_nextActive;
|
||||
class File *m_prevActive;
|
||||
//class File *m_nextActive;
|
||||
//class File *m_prevActive;
|
||||
};
|
||||
|
||||
|
||||
|
@ -1543,7 +1543,7 @@ bool HttpServer::sendReply ( TcpSocket *s , HttpRequest *r , bool isAdmin) {
|
||||
// case, as it is only set to true in TcpServer::readSocketWrapper()
|
||||
// which should never be called by TcpServer::sendMsg() above.
|
||||
// so let cleanUp know it is no longer valid
|
||||
if ( ! f->isOpen() ) f->open( O_RDONLY );
|
||||
if ( ! f->calledOpen() ) f->open( O_RDONLY );
|
||||
int fd = f->getfd();
|
||||
cleanUp ( f , NULL/*TcpSocket */ );
|
||||
// . AND we need to do this ourselves here
|
||||
|
@ -112,9 +112,7 @@ bool Indexdb::init ( ) {
|
||||
if ( ! m_pc.init ( "indexdb",
|
||||
RDB_INDEXDB,
|
||||
pcmem ,
|
||||
pageSize ,
|
||||
true , // use RAM disk?
|
||||
false )) // minimize disk seeks?
|
||||
pageSize ))
|
||||
return log("db: Indexdb init failed.");
|
||||
|
||||
// . set our own internal rdb
|
||||
|
@ -120,9 +120,7 @@ bool Linkdb::init ( ) {
|
||||
if ( ! m_pc.init ( "linkdb" ,
|
||||
RDB_LINKDB,
|
||||
pcmem ,
|
||||
pageSize ,
|
||||
true , // use shared mem?
|
||||
false )) // minimizeDiskSeeks?
|
||||
pageSize ))
|
||||
return log("db: Linkdb init failed.");
|
||||
// init the rdb
|
||||
return m_rdb.init ( g_hostdb.m_dir ,
|
||||
|
42
Mem.cpp
42
Mem.cpp
@ -246,9 +246,13 @@ void * operator new (size_t size) throw (std::bad_alloc) {
|
||||
//if ( ! g_stats.m_gotLock || g_threads.amThread() ) mutexLock();
|
||||
//else unlock = false;
|
||||
|
||||
// hack so hostid #0 can use more mem
|
||||
int64_t max = g_conf.m_maxMem;
|
||||
//if ( g_hostdb.m_hostId == 0 ) max += 2000000000;
|
||||
|
||||
// don't go over max
|
||||
if ( g_mem.m_used + (int32_t)size >= g_mem.m_maxMem &&
|
||||
g_mem.m_maxMem > 1000000 ) {
|
||||
if ( g_mem.m_used + (int32_t)size >= max &&
|
||||
g_conf.m_maxMem > 1000000 ) {
|
||||
log("mem: new(%"UINT32"): Out of memory.", (uint32_t)size );
|
||||
//if ( unlock ) mutexUnlock();
|
||||
throw std::bad_alloc();
|
||||
@ -334,9 +338,13 @@ void * operator new [] (size_t size) throw (std::bad_alloc) {
|
||||
// // return NULL; }
|
||||
//}
|
||||
|
||||
// hack so hostid #0 can use more mem
|
||||
int64_t max = g_conf.m_maxMem;
|
||||
//if ( g_hostdb.m_hostId == 0 ) max += 2000000000;
|
||||
|
||||
// don't go over max
|
||||
if ( g_mem.m_used + (int32_t)size >= g_mem.m_maxMem &&
|
||||
g_mem.m_maxMem > 1000000 ) {
|
||||
if ( g_mem.m_used + (int32_t)size >= max &&
|
||||
g_conf.m_maxMem > 1000000 ) {
|
||||
log("mem: new(%"UINT32"): Out of memory.", (uint32_t)size );
|
||||
throw std::bad_alloc();
|
||||
//throw 1;
|
||||
@ -406,7 +414,7 @@ newmemloop:
|
||||
Mem::Mem() {
|
||||
m_used = 0;
|
||||
// assume large max until this gets set for real
|
||||
m_maxMem = 50000000;
|
||||
//m_maxMem = 50000000;
|
||||
m_numAllocated = 0;
|
||||
m_numTotalAllocated = 0;
|
||||
m_maxAlloc = 0;
|
||||
@ -447,17 +455,16 @@ pid_t Mem::getPid() {
|
||||
return s_pid;
|
||||
}
|
||||
|
||||
bool Mem::init ( int64_t maxMem ) {
|
||||
bool Mem::init ( ) { // int64_t maxMem ) {
|
||||
// set main process pid
|
||||
s_pid = getpid();
|
||||
|
||||
// . don't swap our memory out, man...
|
||||
// . damn, linux 2.4.17 seems to crash the kernel sometimes w/ this
|
||||
//if ( mlockall( MCL_CURRENT | MCL_FUTURE ) == -1 ) {
|
||||
// log("Mem::init: mlockall: %s" , strerror(errno) );
|
||||
// errno = 0;
|
||||
//}
|
||||
m_maxMem = maxMem;
|
||||
//m_maxMem = maxMem;
|
||||
// set it
|
||||
//struct rlimit lim;
|
||||
//lim.rlim_max = maxMem;
|
||||
@ -542,7 +549,8 @@ void Mem::addMem ( void *mem , int32_t size , const char *note , char isnew ) {
|
||||
if ( ! s_initialized ) {
|
||||
//m_memtablesize = m_maxMem / 6510;
|
||||
// support 1.2M ptrs for now. good for about 8GB
|
||||
m_memtablesize = 3000*1024;//m_maxMem / 6510;
|
||||
// raise from 3000 to 8194 to fix host #1
|
||||
m_memtablesize = 8194*1024;//m_maxMem / 6510;
|
||||
//if ( m_maxMem < 8000000000 ) { char *xx=NULL;*xx=0; }
|
||||
}
|
||||
|
||||
@ -1338,8 +1346,13 @@ void *Mem::gbmalloc ( int size , const char *note ) {
|
||||
}
|
||||
|
||||
retry:
|
||||
|
||||
// hack so hostid #0 can use more mem
|
||||
int64_t max = g_conf.m_maxMem;
|
||||
//if ( g_hostdb.m_hostId == 0 ) max += 2000000000;
|
||||
|
||||
// don't go over max
|
||||
if ( m_used + size + UNDERPAD + OVERPAD >= m_maxMem ) {
|
||||
if ( m_used + size + UNDERPAD + OVERPAD >= max ) {
|
||||
// try to free temp mem. returns true if it freed some.
|
||||
if ( freeCacheMem() ) goto retry;
|
||||
g_errno = ENOMEM;
|
||||
@ -1398,7 +1411,7 @@ mallocmemloop:
|
||||
static int64_t s_lastTime;
|
||||
static int32_t s_missed = 0;
|
||||
int64_t now = gettimeofdayInMillisecondsLocal();
|
||||
int64_t avail = (int64_t)m_maxMem -
|
||||
int64_t avail = (int64_t)g_conf.m_maxMem -
|
||||
(int64_t)m_used;
|
||||
if ( now - s_lastTime >= 1000LL ) {
|
||||
log("mem: system malloc(%i,%s) availShouldBe=%"INT64": "
|
||||
@ -1502,8 +1515,13 @@ void *Mem::gbrealloc ( void *ptr , int oldSize , int newSize ,
|
||||
// return NULL;
|
||||
//}
|
||||
retry:
|
||||
|
||||
// hack so hostid #0 can use more mem
|
||||
int64_t max = g_conf.m_maxMem;
|
||||
//if ( g_hostdb.m_hostId == 0 ) max += 2000000000;
|
||||
|
||||
// don't go over max
|
||||
if ( m_used + newSize - oldSize >= m_maxMem ) {
|
||||
if ( m_used + newSize - oldSize >= max ) {
|
||||
// try to free temp mem. returns true if it freed some.
|
||||
if ( freeCacheMem() ) goto retry;
|
||||
g_errno = ENOMEM;
|
||||
|
4
Mem.h
4
Mem.h
@ -81,7 +81,7 @@ class Mem {
|
||||
Mem();
|
||||
~Mem();
|
||||
|
||||
bool init ( int64_t maxMem );
|
||||
bool init ( );//int64_t maxMem );
|
||||
|
||||
void setPid();
|
||||
pid_t getPid();
|
||||
@ -161,7 +161,7 @@ class Mem {
|
||||
int64_t m_maxAlloced; // at any one time
|
||||
int64_t m_maxAlloc; // the biggest single alloc ever done
|
||||
const char *m_maxAllocBy; // the biggest single alloc ever done
|
||||
int64_t m_maxMem;
|
||||
//int64_t m_maxMem;
|
||||
|
||||
// shared mem used
|
||||
int64_t m_sharedUsed;
|
||||
|
@ -26,9 +26,7 @@ bool Monitordb::init ( ) {
|
||||
if ( ! m_pc.init ( "monitordb" ,
|
||||
RDB_MONITORDB,
|
||||
pcmem ,
|
||||
pageSize ,
|
||||
true , // use shared mem?
|
||||
false )) // minimizeDiskSeeks?
|
||||
pageSize ))
|
||||
return log("db: Monitordb init failed.");
|
||||
// init the rdb
|
||||
return m_rdb.init ( g_hostdb.m_dir ,
|
||||
|
@ -5971,7 +5971,7 @@ bool Msg40::printCSVHeaderRow ( SafeBuf *sb ) {
|
||||
SafeBuf nameBuf (tmp2, 1024);
|
||||
|
||||
int32_t ct = 0;
|
||||
if ( msg20s[0] ) ct = msg20s[0]->m_r->m_contentType;
|
||||
if ( msg20s[0] && msg20s[0]->m_r ) ct = msg20s[0]->m_r->m_contentType;
|
||||
|
||||
CollectionRec *cr =g_collectiondb.getRec(m_firstCollnum);
|
||||
|
||||
|
@ -34,7 +34,8 @@
|
||||
|
||||
// how many BigFiles can be using the same DiskPageCache?
|
||||
#include "File.h"
|
||||
#define MAX_NUM_VFDS2 MAX_NUM_VFDS
|
||||
//#define MAX_NUM_VFDS2 MAX_NUM_VFDS
|
||||
#define MAX_NUM_VFDS2 100*1024
|
||||
|
||||
extern void freeAllSharedMem ( int32_t max );
|
||||
|
34
Parms.cpp
34
Parms.cpp
@ -5082,18 +5082,6 @@ void Parms::init ( ) {
|
||||
m++;
|
||||
*/
|
||||
|
||||
m->m_title = "max mem";
|
||||
m->m_desc = "Mem available to this process. May be exceeded due "
|
||||
"to fragmentation.";
|
||||
m->m_off = (char *)&g_conf.m_maxMem - g;
|
||||
m->m_def = "8000000000";
|
||||
m->m_cgi = "maxmem";
|
||||
m->m_obj = OBJ_CONF;
|
||||
m->m_page = PAGE_NONE;
|
||||
m->m_type = TYPE_LONG_LONG;
|
||||
m->m_flags = PF_NOAPI;
|
||||
m++;
|
||||
|
||||
/*
|
||||
m->m_title = "indexdb split";
|
||||
m->m_desc = "Number of times to split indexdb across groups. "
|
||||
@ -9918,6 +9906,18 @@ void Parms::init ( ) {
|
||||
m->m_obj = OBJ_CONF;
|
||||
m++;
|
||||
|
||||
m->m_title = "max mem";
|
||||
m->m_desc = "Mem available to this process. May be exceeded due "
|
||||
"to fragmentation.";
|
||||
m->m_cgi = "maxmem";
|
||||
m->m_off = (char *)&g_conf.m_maxMem - g;
|
||||
m->m_def = "8000000000";
|
||||
m->m_obj = OBJ_CONF;
|
||||
m->m_page = PAGE_MASTER; // PAGE_NONE;
|
||||
m->m_type = TYPE_LONG_LONG;
|
||||
//m->m_flags = PF_NOAPI;
|
||||
m++;
|
||||
|
||||
|
||||
m->m_title = "max total spiders";
|
||||
m->m_desc = "What is the maximum number of web "
|
||||
@ -19356,6 +19356,16 @@ void Parms::init ( ) {
|
||||
m->m_obj = OBJ_CONF;
|
||||
m++;
|
||||
|
||||
m->m_title = "log debug disk page cache";
|
||||
m->m_cgi = "ldpc";
|
||||
m->m_off = (char *)&g_conf.m_logDebugDiskPageCache - g;
|
||||
m->m_type = TYPE_BOOL;
|
||||
m->m_def = "0";
|
||||
m->m_priv = 1;
|
||||
m->m_page = PAGE_LOG;
|
||||
m->m_obj = OBJ_CONF;
|
||||
m++;
|
||||
|
||||
m->m_title = "log debug dns messages";
|
||||
m->m_cgi = "lddns";
|
||||
m->m_off = (char *)&g_conf.m_logDebugDns - g;
|
||||
|
@ -3253,6 +3253,10 @@ void doneGettingNotifyUrlWrapper ( void *state , TcpSocket *sock ) {
|
||||
// or maxToProcess limitation.
|
||||
bool sendNotification ( EmailInfo *ei ) {
|
||||
|
||||
// disable for now
|
||||
//log("ping: NOT SENDING NOTIFICATION -- DEBUG!!");
|
||||
//return true;
|
||||
|
||||
if ( ei->m_inUse ) { char *xx=NULL;*xx=0; }
|
||||
|
||||
// caller must set this, as well as m_finalCallback/m_finalState
|
||||
|
@ -148,9 +148,7 @@ bool Posdb::init ( ) {
|
||||
if ( ! m_pc.init ( "posdb",
|
||||
RDB_POSDB,
|
||||
pcmem ,
|
||||
pageSize ,
|
||||
true , // use RAM disk?
|
||||
false )) // minimize disk seeks?
|
||||
pageSize ))
|
||||
return log("db: Posdb init failed.");
|
||||
|
||||
// . set our own internal rdb
|
||||
|
18
Rdb.cpp
18
Rdb.cpp
@ -1726,6 +1726,16 @@ void Rdb::doneDumping ( ) {
|
||||
// this should be called every few seconds by the sleep callback, too
|
||||
void attemptMergeAll ( int fd , void *state ) {
|
||||
|
||||
// if fd is MAX_NUM_FDS that means it is from the sleep callback
|
||||
if ( fd != 0 ) {
|
||||
static int s_count = -1;
|
||||
s_count++;
|
||||
// instead of every 2 seconds, try every 60
|
||||
if ( s_count == 30 ) s_count = 0;
|
||||
if ( s_count != 0 ) return;
|
||||
log("rdb: attempting to merge all files from sleep callback");
|
||||
}
|
||||
|
||||
// wait an additional 1ms for every collection we have lest this
|
||||
// slows things down since it is called every 2 seconds. so if
|
||||
// we have 20,000 collections, wait an extra 20000 ms = 20 seconds.
|
||||
@ -1735,6 +1745,9 @@ void attemptMergeAll ( int fd , void *state ) {
|
||||
// if ( nowms - s_lastTry < extraWait ) return;
|
||||
// s_lastTry = nowms;
|
||||
|
||||
// wait for any current merge to stop!
|
||||
if ( g_merge.isMerging() ) return;
|
||||
|
||||
|
||||
if ( state && g_conf.m_logDebugDb ) state = NULL;
|
||||
//g_checksumdb.getRdb()->attemptMerge ( 1 , false , !state);
|
||||
@ -1778,6 +1791,9 @@ void attemptMergeAll ( int fd , void *state ) {
|
||||
// called by main.cpp
|
||||
void Rdb::attemptMerge ( int32_t niceness , bool forced , bool doLog ) {
|
||||
|
||||
// wait for any current merge to stop!
|
||||
if ( g_merge.isMerging() ) return;
|
||||
|
||||
for ( int32_t i = 0 ; i < getNumBases() ; i++ ) {
|
||||
// we need this quickpoll for when we got 20,000+ collections
|
||||
QUICKPOLL ( niceness );
|
||||
@ -1797,6 +1813,8 @@ void Rdb::attemptMerge ( int32_t niceness , bool forced , bool doLog ) {
|
||||
// lest we have 2000 collections all trying to merge tagdb
|
||||
// at the same time!!!! this happened once...
|
||||
if ( g_numThreads > 0 ) break;
|
||||
// if we started a merge, stop checking then
|
||||
if ( g_merge.isMerging() ) break;
|
||||
}
|
||||
}
|
||||
|
||||
|
26
RdbBase.cpp
26
RdbBase.cpp
@ -341,6 +341,11 @@ bool RdbBase::init ( char *dir ,
|
||||
// load any saved tree
|
||||
//if ( ! loadTree ( ) ) return false;
|
||||
|
||||
// now diskpagecache is much simpler, just basically rdbcache...
|
||||
return true;
|
||||
|
||||
/*
|
||||
|
||||
// . init BigFile::m_fileSize and m_lastModifiedTime
|
||||
// . m_lastModifiedTime is now used by the merge to select older
|
||||
// titledb files to merge
|
||||
@ -424,6 +429,7 @@ bool RdbBase::init ( char *dir ,
|
||||
//int32_t n = f.write ( buf , 128*1024*5+10 , 0 );
|
||||
//fprintf(stderr,"n=%"INT32"\n",n);
|
||||
return true;
|
||||
*/
|
||||
}
|
||||
|
||||
// . move all files into trash subdir
|
||||
@ -727,12 +733,12 @@ int32_t RdbBase::addFile ( int32_t id , bool isNew , int32_t mergeNum , int32_t
|
||||
// HACK: skip to avoid a OOM lockup. if RdbBase cannot dump
|
||||
// its data to disk it can backlog everyone and memory will
|
||||
// never get freed up.
|
||||
int64_t mm = g_mem.m_maxMem;
|
||||
g_mem.m_maxMem = 0x0fffffffffffffffLL;
|
||||
int64_t mm = g_conf.m_maxMem;
|
||||
g_conf.m_maxMem = 0x0fffffffffffffffLL;
|
||||
BigFile *f ;
|
||||
try { f = new (BigFile); }
|
||||
catch ( ... ) {
|
||||
g_mem.m_maxMem = mm;
|
||||
g_conf.m_maxMem = mm;
|
||||
g_errno = ENOMEM;
|
||||
log("RdbBase: new(%i): %s",
|
||||
(int)sizeof(BigFile),mstrerror(g_errno));
|
||||
@ -742,7 +748,7 @@ int32_t RdbBase::addFile ( int32_t id , bool isNew , int32_t mergeNum , int32_t
|
||||
RdbMap *m ;
|
||||
try { m = new (RdbMap); }
|
||||
catch ( ... ) {
|
||||
g_mem.m_maxMem = mm;
|
||||
g_conf.m_maxMem = mm;
|
||||
g_errno = ENOMEM;
|
||||
log("RdbBase: new(%i): %s",
|
||||
(int)sizeof(RdbMap),mstrerror(g_errno));
|
||||
@ -752,7 +758,7 @@ int32_t RdbBase::addFile ( int32_t id , bool isNew , int32_t mergeNum , int32_t
|
||||
}
|
||||
mnew ( m , sizeof(RdbMap) , "RdbBMap" );
|
||||
// reinstate the memory limit
|
||||
g_mem.m_maxMem = mm;
|
||||
g_conf.m_maxMem = mm;
|
||||
// sanity check
|
||||
if ( id2 < 0 && m_isTitledb ) { char *xx = NULL; *xx = 0; }
|
||||
|
||||
@ -2497,10 +2503,12 @@ void RdbBase::saveMaps ( bool useThread ) {
|
||||
|
||||
void RdbBase::verifyDiskPageCache ( ) {
|
||||
if ( !m_pc ) return;
|
||||
for ( int32_t i = 0; i < m_numFiles; i++ ){
|
||||
BigFile *f = m_files[i];
|
||||
m_pc->verifyData(f);
|
||||
}
|
||||
// disable for now
|
||||
return;
|
||||
// for ( int32_t i = 0; i < m_numFiles; i++ ){
|
||||
// BigFile *f = m_files[i];
|
||||
// m_pc->verifyData(f);
|
||||
// }
|
||||
}
|
||||
|
||||
bool RdbBase::verifyFileSharding ( ) {
|
||||
|
@ -28,8 +28,8 @@
|
||||
// allocating if the record size is 256k or more. Copying 256k only
|
||||
// takes .1 ms on the P4 2.60CGHz. This is on the TODO list.
|
||||
|
||||
#ifndef _RDBCACHE_H_
|
||||
#define _RDBCACHE_H_
|
||||
#ifndef RDBCACHE_H
|
||||
#define RDBCACHE_H
|
||||
|
||||
// . TODO:
|
||||
// . if size of added rec is ABOVE this, then don't use our memory buffer
|
||||
|
@ -678,10 +678,11 @@ bool RdbDump::doneDumpingList ( bool addToMap ) {
|
||||
// note it
|
||||
log(LOG_LOGIC,"db: setting fd for vfd to -1.");
|
||||
// mark our fd as not there...
|
||||
int32_t i = (m_offset - m_bytesToWrite) / MAX_PART_SIZE;
|
||||
//int32_t i=(m_offset-m_bytesToWrite) / MAX_PART_SIZE;
|
||||
// sets s_fds[vfd] to -1
|
||||
if ( m_file->m_files[i] )
|
||||
releaseVfd ( m_file->m_files[i]->m_vfd );
|
||||
// MDW: no, can't do this now
|
||||
// if ( m_file->m_files[i] )
|
||||
// releaseVfd ( m_file->m_files[i]->m_vfd );
|
||||
}
|
||||
//log("RdbDump::doneDumpingList: retrying.");
|
||||
return dumpList ( m_list , m_niceness , true );
|
||||
|
@ -349,7 +349,10 @@ bool RdbMap::verifyMap2 ( ) {
|
||||
log("db: %s",cmd.getBufStart() );
|
||||
gbsystem ( cmd.getBufStart() );
|
||||
|
||||
exit(0);
|
||||
//exit(0);
|
||||
// make the bash shell restart us by returning a 1 error code
|
||||
exit(1);
|
||||
|
||||
//char *xx=NULL;*xx=0;
|
||||
// was k too small?
|
||||
//if ( i + 1 < m_numPages && lastKey <= getKey(i+1) ) {
|
||||
|
26
RdbScan.cpp
26
RdbScan.cpp
@ -203,6 +203,7 @@ void gotListWrapper ( void *state ) {
|
||||
|
||||
void RdbScan::gotList ( ) {
|
||||
char *allocBuf = m_fstate.m_allocBuf;
|
||||
int32_t allocOff = m_fstate.m_allocOff; //buf=allocBuf+allocOff
|
||||
int32_t allocSize = m_fstate.m_allocSize;
|
||||
// do not free the allocated buf for when the actual thread
|
||||
// does the read and finally completes in this case. we free it
|
||||
@ -226,7 +227,6 @@ void RdbScan::gotList ( ) {
|
||||
if ( m_fstate.m_allocBuf ) {
|
||||
// get the buffer info for setting the list
|
||||
//char *allocBuf = m_fstate.m_allocBuf;
|
||||
int32_t allocOff = m_fstate.m_allocOff; //buf=allocBuf+allocOff
|
||||
//int32_t allocSize = m_fstate.m_allocSize;
|
||||
int32_t bytesDone = m_fstate.m_bytesDone;
|
||||
// sanity checks
|
||||
@ -248,16 +248,21 @@ void RdbScan::gotList ( ) {
|
||||
m_useHalfKeys ,
|
||||
m_ks );
|
||||
}
|
||||
|
||||
// this was bitching a lot when running on a multinode cluster,
|
||||
// so i effectively disabled it by changing to _GBSANITYCHECK2_
|
||||
#ifdef GBSANITYCHECK2
|
||||
//#ifdef GBSANITYCHECK2
|
||||
// this first test, tests to make sure the read from cache worked
|
||||
DiskPageCache *pc = m_file->getDiskPageCache();
|
||||
if ( pc && ! g_errno ) {
|
||||
if ( pc &&
|
||||
! g_errno &&
|
||||
g_conf.m_logDebugDiskPageCache &&
|
||||
// if we got it from the page cache, verify with disk
|
||||
m_fstate.m_inPageCache ) {
|
||||
// ensure threads disabled
|
||||
bool on = ! g_threads.areThreadsDisabled();
|
||||
if ( on ) g_threads.disableThreads();
|
||||
pc->disableCache();
|
||||
//pc->disableCache();
|
||||
FileState fstate;
|
||||
// ensure we don't mess around
|
||||
fstate.m_allocBuf = NULL;
|
||||
@ -274,7 +279,7 @@ void RdbScan::gotList ( ) {
|
||||
NULL , // callback state
|
||||
gotListWrapper , // FAKE callback
|
||||
MAX_NICENESS , // niceness
|
||||
false, // m_allowPageCache ,
|
||||
false, // m_allowPageCache ,... not for test!
|
||||
m_hitDisk ,
|
||||
16 + m_off );
|
||||
//char *allocBuf = fstate.m_allocBuf;
|
||||
@ -289,16 +294,21 @@ void RdbScan::gotList ( ) {
|
||||
if ( m_bytesToRead != m_list->getListSize() ) {
|
||||
char *xx = NULL; *xx = 0; }
|
||||
}
|
||||
// compare
|
||||
if ( memcmp ( allocBuf+allocOff, bb , m_bytesToRead ) ) {
|
||||
log("db: failed diskpagecache verify");
|
||||
char *xx=NULL;*xx=0;
|
||||
}
|
||||
//mfree ( allocBuf , allocSize , "RS" );
|
||||
mfree ( bb , m_bytesToRead , "RS" );
|
||||
if ( on ) g_threads.enableThreads();
|
||||
pc->enableCache();
|
||||
//pc->enableCache();
|
||||
// . this test tests to make sure the page stores worked
|
||||
// . go through each page in page cache and verify on disk
|
||||
pc->verifyData ( m_file );
|
||||
//pc->verifyData ( m_file );
|
||||
}
|
||||
skip:
|
||||
#endif
|
||||
//#endif
|
||||
// assume we did not shift it
|
||||
m_shifted = 0;//false;
|
||||
// if we were doing a cache only read, and got nothing, bail now
|
||||
|
12
Spider.cpp
12
Spider.cpp
@ -652,9 +652,7 @@ bool Spiderdb::init ( ) {
|
||||
if ( ! m_pc.init ( "spiderdb",
|
||||
RDB_SPIDERDB ,
|
||||
pcmem ,
|
||||
pageSize ,
|
||||
false , // use shared mem?
|
||||
false )) // minimizeDiskSeeks?
|
||||
pageSize ))
|
||||
return log(LOG_INIT,"spiderdb: Init failed.");
|
||||
|
||||
// initialize our own internal rdb
|
||||
@ -854,9 +852,7 @@ bool Doledb::init ( ) {
|
||||
if ( ! m_pc.init ( "doledb" ,
|
||||
RDB_DOLEDB ,
|
||||
pcmem ,
|
||||
pageSize ,
|
||||
true , // use shared mem?
|
||||
false )) // minimizeDiskSeeks?
|
||||
pageSize ))
|
||||
return log(LOG_INIT,"doledb: Init failed.");
|
||||
|
||||
// initialize our own internal rdb
|
||||
@ -7534,7 +7530,7 @@ bool SpiderLoop::spiderUrl9 ( SpiderRequest *sreq ,
|
||||
// this causes us to dead lock when spiders use up all the mem, and
|
||||
// file merge operation can not get any, and spiders need to add to
|
||||
// titledb but can not until the merge completes!!
|
||||
if ( g_mem.m_maxMem - g_mem.m_used < 25*1024*1024 ) {
|
||||
if ( g_conf.m_maxMem - g_mem.m_used < 25*1024*1024 ) {
|
||||
static int32_t s_lastTime = 0;
|
||||
static int32_t s_missed = 0;
|
||||
s_missed++;
|
||||
@ -7543,7 +7539,7 @@ bool SpiderLoop::spiderUrl9 ( SpiderRequest *sreq ,
|
||||
if ( now - s_lastTime > 10 ) {
|
||||
log("spider: Need 25MB of free mem to launch spider, "
|
||||
"only have %"INT64". Failed to launch %"INT32" times so "
|
||||
"far.", g_mem.m_maxMem - g_mem.m_used , s_missed );
|
||||
"far.", g_conf.m_maxMem - g_mem.m_used , s_missed );
|
||||
s_lastTime = now;
|
||||
}
|
||||
}
|
||||
|
@ -1984,8 +1984,8 @@ bool ThreadQueue::launchThread2 ( ThreadEntry *te ) {
|
||||
// . we know the stored File is still around because of that
|
||||
bool doWrite = fs->m_doWrite;
|
||||
BigFile *bb = fs->m_this;
|
||||
fs->m_fd1 = bb->getfd (fs->m_filenum1, !doWrite, &fs->m_vfd1);
|
||||
fs->m_fd2 = bb->getfd (fs->m_filenum2, !doWrite, &fs->m_vfd2);
|
||||
fs->m_fd1 = bb->getfd (fs->m_filenum1,!doWrite);//&fs->m_vfd1);
|
||||
fs->m_fd2 = bb->getfd (fs->m_filenum2,!doWrite);//&fs->m_vfd2);
|
||||
// is this bad?
|
||||
if ( fs->m_fd1 < 0 ) log("disk: fd1 is %i for %s",
|
||||
fs->m_fd1,bb->m_baseFilename);
|
||||
|
21
main.cpp
21
main.cpp
@ -1805,7 +1805,7 @@ int main2 ( int argc , char *argv[] ) {
|
||||
// Load categories and generate country table
|
||||
char structureFile[256];
|
||||
g_conf.m_maxMem = 1000000000LL; // 1G
|
||||
g_mem.m_maxMem = 1000000000LL; // 1G
|
||||
//g_mem.m_maxMem = 1000000000LL; // 1G
|
||||
sprintf(structureFile, "%scatdb/gbdmoz.structure.dat", g_hostdb.m_dir);
|
||||
g_categories = &g_categories1;
|
||||
if (g_categories->loadCategories(structureFile) != 0) {
|
||||
@ -2396,7 +2396,7 @@ int main2 ( int argc , char *argv[] ) {
|
||||
if ( strcmp ( cmd , "freecache" ) == 0 ) {
|
||||
int32_t max = 7000000;
|
||||
if ( cmdarg + 1 < argc ) max = atoi ( argv[cmdarg+1] );
|
||||
freeAllSharedMem( max );
|
||||
//freeAllSharedMem( max );
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -10727,7 +10727,7 @@ bool gbgunzip (char *filename) {
|
||||
// time speed of inserts into RdbTree for indexdb
|
||||
bool bucketstest ( char* dbname ) {
|
||||
g_conf.m_maxMem = 2000000000LL; // 2G
|
||||
g_mem.m_maxMem = 2000000000LL; // 2G
|
||||
//g_mem.m_maxMem = 2000000000LL; // 2G
|
||||
|
||||
|
||||
if ( dbname ) {
|
||||
@ -12224,7 +12224,7 @@ void dumpTagdb (char *coll,int32_t startFileNum,int32_t numFiles,
|
||||
|
||||
bool parseTest ( char *coll , int64_t docId , char *query ) {
|
||||
g_conf.m_maxMem = 2000000000LL; // 2G
|
||||
g_mem.m_maxMem = 2000000000LL; // 2G
|
||||
//g_mem.m_maxMem = 2000000000LL; // 2G
|
||||
//g_conf.m_checksumdbMaxDiskPageCacheMem = 0;
|
||||
//g_conf.m_spiderdbMaxDiskPageCacheMem = 0;
|
||||
g_conf.m_tfndbMaxDiskPageCacheMem = 0;
|
||||
@ -14547,7 +14547,8 @@ int injectFile ( char *filename , char *ips ,
|
||||
int64_t startDocId = 0LL;
|
||||
int64_t endDocId = MAX_DOCID;
|
||||
|
||||
g_mem.init ( 4000000000LL );
|
||||
g_conf.m_maxMem = 4000000000LL;
|
||||
g_mem.init ( );//4000000000LL );
|
||||
|
||||
// set up the loop
|
||||
if ( ! g_loop.init() ) return log("build: inject: Loop init "
|
||||
@ -16325,8 +16326,8 @@ bool memTest() {
|
||||
// if ( ! g_log.init( "./memlog" ) ) {//g_hostdb.m_logFilename ) ) {
|
||||
// fprintf (stderr,"db: Log file init failed.\n" ); return 1; }
|
||||
//g_mem.init(0xffffffff);
|
||||
g_mem.m_maxMem = 0xffffffffLL;
|
||||
g_mem.init( g_mem.m_maxMem );
|
||||
g_conf.m_maxMem = 0xffffffffLL;
|
||||
g_mem.init( );//g_mem.m_maxMem );
|
||||
|
||||
|
||||
fprintf(stderr, "memtest: Testing memory bus bandwidth.\n");
|
||||
@ -16344,7 +16345,7 @@ bool memTest() {
|
||||
membustest ( 8000 , 100000 , true );
|
||||
|
||||
fprintf(stderr, "memtest: Allocating up to %"INT64" bytes\n",
|
||||
g_mem.m_maxMem);
|
||||
g_conf.m_maxMem);
|
||||
for (i=0;i<4096;i++) {
|
||||
ptrs[numPtrs] = mmalloc(1024*1024, "memtest");
|
||||
if (!ptrs[numPtrs]) break;
|
||||
@ -16354,7 +16355,7 @@ bool memTest() {
|
||||
fprintf(stderr, "memtest: Was able to allocate %"INT64" bytes of a "
|
||||
"total of "
|
||||
"%"INT64" bytes of memory attempted.\n",
|
||||
g_mem.m_used,g_mem.m_maxMem);
|
||||
g_mem.m_used,g_conf.m_maxMem);
|
||||
|
||||
return true;
|
||||
|
||||
@ -16484,7 +16485,7 @@ void membustest ( int32_t nb , int32_t loops , bool readf ) {
|
||||
bool cacheTest() {
|
||||
|
||||
g_conf.m_maxMem = 2000000000LL; // 2G
|
||||
g_mem.m_maxMem = 2000000000LL; // 2G
|
||||
//g_mem.m_maxMem = 2000000000LL; // 2G
|
||||
|
||||
hashinit();
|
||||
|
||||
|
Reference in New Issue
Block a user