fix problem of saving rdbmap when coring in a malloc/free.

This commit is contained in:
Matt Wells
2015-08-16 22:14:53 -07:00
parent be1ebfbcd0
commit 28644f127e
4 changed files with 19 additions and 1 deletions

@ -165,6 +165,18 @@ bool BigFile::addPart ( int32_t n ) {
char *xx=NULL;*xx=0;}
// how much more mem do we need?
int32_t delta = need - m_fileBuf.getLength();
// avoid a malloc for small files.
// this way we can save in memory RdbMaps upon a core, even malloc/free
// related cores, cuz we won't have to do a malloc to save!
if ( delta <= LITTLEBUFSIZE && ! m_fileBuf.m_buf ) {
m_fileBuf.m_usingStack = true;
m_fileBuf.m_buf = m_littleBuf;
m_fileBuf.m_capacity = LITTLEBUFSIZE;
m_fileBuf.m_length = 0;
// do not call reserve() below:
delta = 0;
}
// . make sure our CAPACITY is increased by what we need
// . SafeBuf::reserve() ADDS this much to current capacity
// . true = clear new mem so File::m_calledSet is false for Files

@ -31,6 +31,8 @@ ssize_t gbpwrite(int fd, const void *buf, size_t count, off_t offset);
// debug define
//#define MAX_PART_FILES 100
#define LITTLEBUFSIZE 80
// use this state class for doing non-blocking reads/writes
#ifdef ASYNCIO
#include <aio.h> // TODO: use kaio, uses only 4 threads
@ -263,6 +265,8 @@ class BigFile {
// to hold the array of Files
SafeBuf m_fileBuf;
char m_littleBuf[LITTLEBUFSIZE];
// ptrs to the part files
//File *m_files ;//[ MAX_PART_FILES ];

@ -93,6 +93,8 @@ void RdbMap::reset ( ) {
m_lastLogTime = 0;
m_badKeys = 0;
m_needVerify = false;
m_file.reset();
}

@ -388,7 +388,7 @@ public:
public:
int32_t m_capacity;
int32_t m_length;
protected:
//protected:
char *m_buf;
public:
char *m_label;