fix pesky memory leak finally

This commit is contained in:
Matt Wells 2015-07-13 17:47:34 -06:00
parent c03594034d
commit 1ba57f9278
12 changed files with 48 additions and 6 deletions

@ -170,6 +170,10 @@ int32_t Highlight::set ( SafeBuf *sb ,
//m_bufLen = bufLen;
//m_bufPtr = buf;
m_sb = sb;
// label it
m_sb->setLabel ("highw");
// save room for terminating \0
//m_bufEnd = m_buf + m_bufLen - 1;

@ -530,6 +530,11 @@ void Mem::addMem ( void *mem , int32_t size , const char *note , char isnew ) {
//validate();
// if ( note && note[0] == 'S' && note[1] == 'a' &&
// note[2] == 'f' && size == 13371521 )
// log("mem: got mystery safebuf");
//m_memtablesize = 0;//DMEMTABLESIZE;
// 4G/x = 600*1024 -> x = 4000000000.0/(600*1024) = 6510
// crap, g_hostdb.init() is called inmain.cpp before

@ -413,6 +413,8 @@ bool Msg2::getLists ( ) {
// mem. we should also report the size of each termlist
// in bytes in the query info header.
//int32_t minRecSizes = DEFAULT_POSDB_READSIZE;
// MDW TODO fix this later we go oom too easily for queries
// like 'www.disney.nl'
int32_t minRecSizes = -1;
// start up the read. thread will wait in thread queue to

@ -380,7 +380,9 @@ bool Msg3a::gotCacheReply ( ) {
rs = DEFAULT_POSDB_READSIZE;//90000000; // 90MB!
// it is better to go oom then leave users scratching their
// heads as to why some results are not being returned.
rs = -1;
// no, because we are going out of mem for queries like
// 'www.disney.nl' etc.
//rs = -1;
// if section stats, limit to 1MB
//if ( m_r->m_getSectionStats ) rs = 1000000;
// get the jth query term

@ -3519,6 +3519,7 @@ bool printApiForPage ( SafeBuf *sb , int32_t PAGENUM , CollectionRec *cr ) {
if ( pageNum != PAGENUM ) continue;
SafeBuf tmp;
tmp.setLabel("apisb");
char diff = 0;
bool printVal = false;
if ( parm->m_type != TYPE_CMD &&

@ -28,6 +28,7 @@ void Query::constructor ( ) {
//m_bmap = NULL;
m_bitScores = NULL;
m_qwords = NULL;
m_numWords = 0;
//m_expressions = NULL;
m_qwordsAllocSize = 0;
//m_expressionsAllocSize = 0;
@ -68,6 +69,11 @@ void Query::reset ( ) {
qt->m_facetIndexBuf.purge();
}
for ( int32_t i = 0 ; i < m_numWords ; i++ ) {
QueryWord *qw = &m_qwords[i];
qw->destructor();
}
m_sb.purge();
m_osb.purge();
m_docIdRestriction = 0LL;
@ -86,6 +92,7 @@ void Query::reset ( ) {
//if ( m_bitScores && m_bitScoresSize ) // != m_bsbuf )
// mfree ( m_bitScores , m_bitScoresSize , "Query2" );
//m_bmap = NULL;
m_bitScores = NULL;
//m_bmapSize = 0;
m_bitScoresSize = 0;
@ -1402,6 +1409,7 @@ bool Query::setQTerms ( Words &words , Phrases &phrases ) {
// sanity
if ( naids > MAX_SYNS ) { char *xx=NULL;*xx=0; }
// now make the buffer to hold them for us
qw->m_synWordBuf.setLabel("qswbuf");
qw->m_synWordBuf.safeMemcpy ( &syn.m_synWordBuf );
// get the term for this word
QueryTerm *origTerm = qw->m_queryWordTerm;
@ -2057,6 +2065,9 @@ bool Query::setQWords ( char boolFlag ,
return log("query: Could not allocate mem for query.");
m_qwordsAllocSize = need;
}
// reset safebuf in there
for ( int32_t i = 0 ; i < m_numWords ; i++ )
m_qwords[i].constructor();
// is all alpha chars in query in upper case? caps lock on?
bool allUpper = true;
@ -5655,3 +5666,11 @@ int64_t Query::getQueryHash() {
}
return qh;
}
void QueryWord::constructor () {
m_synWordBuf.constructor();
}
void QueryWord::destructor () {
m_synWordBuf.purge();
}

@ -274,6 +274,9 @@ class QueryWord {
if ( is_wspace_utf8 ( p ) ) return true;
return false;
};
void constructor ();
void destructor ();
//UCScript wordScript() {
// UChar*foo;
// return ucGetScript(utf16Decode((UChar*)(m_word),&foo));

@ -522,7 +522,8 @@ int32_t SafeBuf::safeSave (char *filename ) {
}
int32_t SafeBuf::fillFromFile(char *dir,char *filename) {
int32_t SafeBuf::fillFromFile(char *dir,char *filename,char *label) {
m_label = label;
char buf[1024];
if ( dir ) snprintf(buf,1024,"%s/%s",dir,filename);
else snprintf(buf,1024,"%s",filename);

@ -73,8 +73,9 @@ public:
int32_t safeSave (char *filename );
int32_t fillFromFile(char *filename);
int32_t fillFromFile(char *dir,char *filename);
int32_t load(char *dir,char *fname) { return fillFromFile(dir,fname);};
int32_t fillFromFile(char *dir,char *filename, char *label=NULL);
int32_t load(char *dir,char *fname,char *label = NULL) {
return fillFromFile(dir,fname,label);};
int32_t load(char *fname) { return fillFromFile(fname);};
void filterTags();

@ -825,6 +825,9 @@ bool SearchInput::setQueryBuffers ( HttpRequest *hr ) {
m_sbuf2.safeStrcpy(" AND ");
}
}
m_sbuf1.setLabel("sisbuf1");
m_sbuf2.setLabel("sisbuf2");
m_sbuf3.setLabel("sisbuf3");
// append the natural query
if ( m_query && m_query[0] ) {
//if ( p > pstart ) *p++ = ' ';

@ -12,6 +12,7 @@
#include "Wiktionary.h"
Synonyms::Synonyms() {
m_synWordBuf.setLabel("syswbuf");
}
Synonyms::~Synonyms() {

@ -5049,8 +5049,8 @@ bool Tagdb::loadMinSiteInlinksBuffer2 ( ) {
// use 4 bytes for the first 130,000 entries or so to hold
// # of site inlinks. then we only need 1 byte since the remaining
// 25M are <256 sitenuminlinksunqiecblocks
m_siteBuf1.load(g_hostdb.m_dir,"sitelinks1.dat");
m_siteBuf2.load(g_hostdb.m_dir,"sitelinks2.dat");
m_siteBuf1.load(g_hostdb.m_dir,"sitelinks1.dat","stelnks1");
m_siteBuf2.load(g_hostdb.m_dir,"sitelinks2.dat","stelnks2");
m_siteBuf1.setLabel("sitelnks");
m_siteBuf2.setLabel("sitelnks");