optimize tfile

This commit is contained in:
yihaoDeng 2022-01-04 15:59:54 +08:00
parent 4079bd2b55
commit 4bdac1fdbf
4 changed files with 13 additions and 14 deletions

View File

@ -397,7 +397,7 @@ static void indexDestroyTempResult(SArray* result) {
}
int indexFlushCacheTFile(SIndex* sIdx, void* cache) {
if (sIdx == NULL) { return -1; }
indexWarn("suid %" PRIu64 " merge cache into tindex", sIdx->suid);
indexInfo("suid %" PRIu64 " merge cache into tindex", sIdx->suid);
IndexCache* pCache = (IndexCache*)cache;
TFileReader* pReader = tfileGetReaderByCol(sIdx->tindex, pCache->suid, pCache->colName);

View File

@ -20,7 +20,7 @@
#define MAX_INDEX_KEY_LEN 256 // test only, change later
#define MEM_TERM_LIMIT 5 * 10000
#define MEM_TERM_LIMIT 10 * 10000
// ref index_cache.h:22
//#define CACHE_KEY_LEN(p) \
// (sizeof(int32_t) + sizeof(uint16_t) + sizeof(p->colType) + sizeof(p->nColVal) + p->nColVal + sizeof(uint64_t) +

View File

@ -264,7 +264,8 @@ int tfileWriterPut(TFileWriter* tw, void* data, bool order) {
// ugly code, refactor later
for (size_t i = 0; i < sz; i++) {
TFileValue* v = taosArrayGetP((SArray*)data, i);
// taosArrayRemoveDuplicate(v->tablId, tfileUidCompare, NULL);
taosArraySort(v->tableId, tfileUidCompare);
taosArrayRemoveDuplicate(v->tableId, tfileUidCompare, NULL);
int32_t tbsz = taosArrayGetSize(v->tableId);
fstOffset += TF_TABLE_TATOAL_SIZE(tbsz);
}
@ -545,8 +546,8 @@ static int tfileReaderLoadHeader(TFileReader* reader) {
indexError("actual Read: %d, to read: %d, errno: %d, filefd: %d, filename: %s", (int)(nread), (int)sizeof(buf),
errno, reader->ctx->file.fd, reader->ctx->file.buf);
} else {
indexError("actual Read: %d, to read: %d, errno: %d, filefd: %d, filename: %s", (int)(nread), (int)sizeof(buf),
errno, reader->ctx->file.fd, reader->ctx->file.buf);
indexInfo("actual Read: %d, to read: %d, filefd: %d, filename: %s", (int)(nread), (int)sizeof(buf),
reader->ctx->file.fd, reader->ctx->file.buf);
}
// assert(nread == sizeof(buf));
memcpy(&reader->header, buf, sizeof(buf));
@ -562,8 +563,8 @@ static int tfileReaderLoadFst(TFileReader* reader) {
WriterCtx* ctx = reader->ctx;
int32_t nread = ctx->readFrom(ctx, buf, FST_MAX_SIZE, reader->header.fstOffset);
indexError("nread = %d, and fst offset=%d, filename: %s, size: %d ", nread, reader->header.fstOffset, ctx->file.buf,
ctx->file.size);
indexInfo("nread = %d, and fst offset=%d, filename: %s, size: %d ", nread, reader->header.fstOffset, ctx->file.buf,
ctx->file.size);
// we assuse fst size less than FST_MAX_SIZE
assert(nread > 0 && nread < FST_MAX_SIZE);

View File

@ -701,7 +701,8 @@ class IndexObj {
int64_t s = taosGetTimestampUs();
if (Search(mq, result) == 0) {
int64_t e = taosGetTimestampUs();
std::cout << "search one successfully and time cost:" << e - s << std::endl;
std::cout << "search one successfully and time cost:" << e - s << "\tquery col:" << colName
<< "\t val: " << colVal << "\t size:" << taosArrayGetSize(result) << std::endl;
} else {
}
int sz = taosArrayGetSize(result);
@ -834,11 +835,8 @@ static void write_and_search(IndexObj* idx) {
std::string colName("tag1"), colVal("Hello");
int target = idx->SearchOne("tag1", "Hello");
std::cout << "search: " << target << std::endl;
target = idx->SearchOne("tag2", "Test");
std::cout << "search: " << target << std::endl;
idx->PutOne(colName, colVal);
// idx->PutOne(colName, colVal);
}
TEST_F(IndexEnv2, testIndex_serarch_cache_and_tfile) {
std::string path = "/tmp/cache_and_tfile";
@ -847,8 +845,8 @@ TEST_F(IndexEnv2, testIndex_serarch_cache_and_tfile) {
}
index->PutOne("tag1", "Hello");
index->PutOne("tag2", "Test");
index->WriteMultiMillonData("tag1", "Hello", 50 * 10000);
index->WriteMultiMillonData("tag2", "Test", 10 * 10000);
index->WriteMultiMillonData("tag1", "Hello", 100 * 10000);
index->WriteMultiMillonData("tag2", "Test", 100 * 10000);
std::thread threads[NUM_OF_THREAD];
for (int i = 0; i < NUM_OF_THREAD; i++) {