log msg cleanups

This commit is contained in:
mwells
2014-05-11 21:55:44 -07:00
parent a9dc18c866
commit 45b8bb3421
7 changed files with 32 additions and 31 deletions

@ -1656,7 +1656,7 @@ bool CollectionRec::load ( char *coll , long i ) {
strcpy ( m_coll , coll );
if ( ! g_conf.m_doingCommandLine )
log(LOG_INFO,"db: loading conf for collection %s (%li)",coll,
log(LOG_INFO,"db: Loading conf for collection %s (%li)",coll,
(long)m_collnum);
// collection name HACK for backwards compatibility
@ -1688,7 +1688,7 @@ bool CollectionRec::load ( char *coll , long i ) {
// LOAD LOCAL
snprintf ( tmp1 , 1023, "%scoll.%s.%li/localcrawlinfo.dat",
g_hostdb.m_dir , m_coll , (long)m_collnum );
log(LOG_DEBUG,"db: loading %s",tmp1);
log(LOG_DEBUG,"db: Loading %s",tmp1);
m_localCrawlInfo.reset();
SafeBuf sb;
// fillfromfile returns 0 if does not exist, -1 on read error
@ -1699,7 +1699,7 @@ bool CollectionRec::load ( char *coll , long i ) {
if ( ! g_conf.m_doingCommandLine )
log("coll: loaded %s (%li) local hasurlsready=%li",
log("coll: Loaded %s (%li) local hasurlsready=%li",
m_coll,
(long)m_collnum,
(long)m_localCrawlInfo.m_hasUrlsReadyToSpider);
@ -1737,7 +1737,7 @@ bool CollectionRec::load ( char *coll , long i ) {
// LOAD GLOBAL
snprintf ( tmp1 , 1023, "%scoll.%s.%li/globalcrawlinfo.dat",
g_hostdb.m_dir , m_coll , (long)m_collnum );
log(LOG_DEBUG,"db: loading %s",tmp1);
log(LOG_DEBUG,"db: Loading %s",tmp1);
m_globalCrawlInfo.reset();
sb.reset();
if ( sb.fillFromFile ( tmp1 ) > 0 )
@ -1746,7 +1746,7 @@ bool CollectionRec::load ( char *coll , long i ) {
memcpy ( &m_globalCrawlInfo , sb.getBufStart(),sb.length() );
if ( ! g_conf.m_doingCommandLine )
log("coll: loaded %s (%li) global hasurlsready=%li",
log("coll: Loaded %s (%li) global hasurlsready=%li",
m_coll,
(long)m_collnum,
(long)m_globalCrawlInfo.m_hasUrlsReadyToSpider);
@ -2775,7 +2775,7 @@ void nukeDoledb ( collnum_t collnum );
bool CollectionRec::rebuildUrlFilters ( ) {
if ( ! g_conf.m_doingCommandLine )
log("coll: rebuilding url filters for %s ufp=%li",m_coll,
log("coll: Rebuilding url filters for %s ufp=%li",m_coll,
(long)m_urlFiltersProfile);
// if not a custom crawl, and no expressions, add a default one

@ -95,11 +95,11 @@ bool HttpServer::init ( short port,
m_ssltcp.reset();
}
// log an innocent msg
log(LOG_INIT,"http: listening on TCP port %i with sd=%i",
log(LOG_INIT,"http: Listening on TCP port %i with sd=%i",
port, m_tcp.m_sock );
// log for https
if (m_ssltcp.m_ready)
log(LOG_INIT,"https: listening on TCP port %i with sd=%i",
log(LOG_INIT,"https: Listening on TCP port %i with sd=%i",
sslPort, m_ssltcp.m_sock );
return true;

@ -1105,8 +1105,8 @@ bool Rdb::loadTree ( ) {
return log("db: Could not load saved buckets.");
long numKeys = m_buckets.getNumKeys();
log("db: Loaded %li recs from %s's buckets on disk.",
numKeys, m_dbname);
// log("db: Loaded %li recs from %s's buckets on disk.",
// numKeys, m_dbname);
if(!m_buckets.testAndRepair()) {
log("db: unrepairable buckets, "

@ -594,9 +594,9 @@ bool RdbBuckets::set ( long fixedDataSize , long maxMem,
return false;
}
log("init: Successfully initialized buckets for %s, "
"keysize is %li, max mem is %li, datasize is %li",
m_dbname, (long)m_ks, m_maxMem, m_fixedDataSize);
// log("init: Successfully initialized buckets for %s, "
// "keysize is %li, max mem is %li, datasize is %li",
// m_dbname, (long)m_ks, m_maxMem, m_fixedDataSize);
/*
@ -719,12 +719,12 @@ bool RdbBuckets::resizeTable(long numNeeded) {
g_errno = ENOMEM;
return false;
}
log(LOG_INFO,
"db: scaling down request for buckets. "
"Currently have %li "
"buckets, asked for %li, max number of buckets"
" for %li bytes is %li.",
m_maxBuckets, numNeeded, m_maxMem, m_maxBucketsCapacity);
// log(LOG_INFO,
// "db: scaling down request for buckets. "
// "Currently have %li "
// "buckets, asked for %li, max number of buckets"
// " for %li bytes is %li.",
// m_maxBuckets, numNeeded, m_maxMem, m_maxBucketsCapacity);
numNeeded = m_maxBucketsCapacity;
}
@ -1114,6 +1114,7 @@ bool RdbBuckets::selfTest(bool thorough, bool core) {
last = kk;
lastcoll = b->getCollnum();
}
if ( totalNumKeys != m_numKeysApprox )
log(LOG_WARN, "db have %li keys, should have %li. "
"%li buckets in %li colls for db %s",
totalNumKeys, m_numKeysApprox, m_numBuckets,

@ -52,14 +52,14 @@ bool Wiki::load() {
m_txtSize = stats1.st_size;
// just use the .dat if we got it
if ( ! errno2 ) {
log(LOG_INFO,"wiki: loading %s",ff2);
log(LOG_INFO,"wiki: Loading %s",ff2);
// "dir" is NULL since already included in ff2
return m_ht.load ( NULL , ff2 );
}
// if we got a newer binary version, use that
// add in 10 seconds i guess
if ( ! errno2 && ! errno1 && stats2.st_mtime +10> stats1.st_mtime ) {
log(LOG_INFO,"wiki: loading %s",ff2);
log(LOG_INFO,"wiki: Loading %s",ff2);
// "dir" is NULL since already included in ff2
return m_ht.load ( NULL , ff2 );
}
@ -82,12 +82,12 @@ bool Wiki::loadText ( long fileSize ) {
SafeBuf sb;
char ff1[256];
sprintf(ff1, "%swikititles.txt.part1", g_hostdb.m_dir);
log(LOG_INFO,"wiki: loading %s",ff1);
log(LOG_INFO,"wiki: Loading %s",ff1);
if ( ! sb.fillFromFile(ff1) ) return false;
char ff2[256];
sprintf(ff2, "%swikititles.txt.part2", g_hostdb.m_dir);
log(LOG_INFO,"wiki: loading %s",ff2);
log(LOG_INFO,"wiki: Loading %s",ff2);
if ( ! sb.catFile(ff2) ) return false;

@ -261,10 +261,10 @@ bool Wiktionary::load() {
( errno1 || stats3.st_mtime > stats1.st_mtime )
//&& ( errno2 || stats3.st_mtime > stats2.st_mtime )
) {
log(LOG_INFO,"wikt: loading %s",ff3);
log(LOG_INFO,"wikt: Loading %s",ff3);
if ( ! m_synTable .load ( NULL , ff3 ) )
return false;
log(LOG_INFO,"wikt: loading %s",ff4);
log(LOG_INFO,"wikt: Loading %s",ff4);
if ( m_synBuf.fillFromFile ( NULL , ff4 ) <= 0 )
return false;
@ -517,7 +517,7 @@ bool Wiktionary::generateHashTableFromWiktionaryTxt ( long sizen ) {
//
char ff1[256];
sprintf(ff1, "%swiktionary.txt.aa", g_hostdb.m_dir);
log(LOG_INFO,"wikt: loading %s",ff1);
log(LOG_INFO,"wikt: Loading %s",ff1);
int fd1 = open ( ff1 , O_RDONLY );
if ( fd1 < 0 ) {
log("wikt: open %s : %s",ff1,mstrerror(errno));
@ -558,7 +558,7 @@ bool Wiktionary::generateHashTableFromWiktionaryTxt ( long sizen ) {
round++;
offset = 0;
sprintf(ff1,"%swiktionary.txt.ab",g_hostdb.m_dir);
log(LOG_INFO,"wikt: loading %s",ff1);
log(LOG_INFO,"wikt: Loading %s",ff1);
int fd1 = open ( ff1 , O_RDONLY );
if ( fd1 < 0 ) {
log("wikt: open %s : %s",ff1,mstrerror(errno));

@ -2872,17 +2872,17 @@ int main2 ( int argc , char *argv[] ) {
g_log.m_logTimestamps = true;
// show current working dir
log("host: working directory is %s",workingDir);
log("host: Working directory is %s",workingDir);
log("host: using %shosts.conf",g_hostdb.m_dir);
log("host: Using %shosts.conf",g_hostdb.m_dir);
// from Hostdb.cpp
ips = getLocalIps();
for ( ; ips && *ips ; ips++ )
log("host: detected local ip %s",iptoa(*ips));
log("host: Detected local ip %s",iptoa(*ips));
// show it
log("host: running as host id #%li",g_hostdb.m_hostId );
log("host: Running as host id #%li",g_hostdb.m_hostId );
if (!ucInit(g_hostdb.m_dir, true)) {