more
This commit is contained in:
parent
a581d9b356
commit
2434e71118
|
@ -639,7 +639,7 @@ void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno) {
|
||||||
if (pgno == 0 && pCache->nRefSum) {
|
if (pgno == 0 && pCache->nRefSum) {
|
||||||
sqlite3_pcache_page *pPage1;
|
sqlite3_pcache_page *pPage1;
|
||||||
pPage1 = pcache2.xFetch(pCache->pCache, 1, 0);
|
pPage1 = pcache2.xFetch(pCache->pCache, 1, 0);
|
||||||
if (ALWAYS(pPage1)) { /* Page 1 is always available in cache, because
|
if (pPage1) { /* Page 1 is always available in cache, because
|
||||||
** pCache->nRefSum>0 */
|
** pCache->nRefSum>0 */
|
||||||
memset(pPage1->pBuf, 0, pCache->szPage);
|
memset(pPage1->pBuf, 0, pCache->szPage);
|
||||||
pgno = 1;
|
pgno = 1;
|
||||||
|
@ -712,7 +712,7 @@ static PgHdr *pcacheSortDirtyList(PgHdr *pIn) {
|
||||||
p = pIn;
|
p = pIn;
|
||||||
pIn = p->pDirty;
|
pIn = p->pDirty;
|
||||||
p->pDirty = 0;
|
p->pDirty = 0;
|
||||||
for (i = 0; ALWAYS(i < N_SORT_BUCKET - 1); i++) {
|
for (i = 0; i < N_SORT_BUCKET - 1; i++) {
|
||||||
if (a[i] == 0) {
|
if (a[i] == 0) {
|
||||||
a[i] = p;
|
a[i] = p;
|
||||||
break;
|
break;
|
||||||
|
@ -721,7 +721,7 @@ static PgHdr *pcacheSortDirtyList(PgHdr *pIn) {
|
||||||
a[i] = 0;
|
a[i] = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (NEVER(i == N_SORT_BUCKET - 1)) {
|
if (i == N_SORT_BUCKET - 1) {
|
||||||
/* To get here, there need to be 2^(N_SORT_BUCKET) elements in
|
/* To get here, there need to be 2^(N_SORT_BUCKET) elements in
|
||||||
** the input list. But that is impossible.
|
** the input list. But that is impossible.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -228,18 +228,9 @@ static struct PCacheGlobal {
|
||||||
int bUnderPressure; /* True if low on PAGECACHE memory */
|
int bUnderPressure; /* True if low on PAGECACHE memory */
|
||||||
} pcache1;
|
} pcache1;
|
||||||
|
|
||||||
/*
|
#define pcache1EnterMutex(X) pthread_mutex_lock(&((X)->mutex))
|
||||||
** Macros to enter and leave the PCache LRU mutex.
|
#define pcache1LeaveMutex(X) pthread_mutex_unlock(&((X)->mutex))
|
||||||
*/
|
|
||||||
#if !defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) || SQLITE_THREADSAFE == 0
|
|
||||||
#define pcache1EnterMutex(X) assert((X)->mutex == 0)
|
|
||||||
#define pcache1LeaveMutex(X) assert((X)->mutex == 0)
|
|
||||||
#define PCACHE1_MIGHT_USE_GROUP_MUTEX 0
|
|
||||||
#else
|
|
||||||
#define pcache1EnterMutex(X) sqlite3_mutex_enter((X)->mutex)
|
|
||||||
#define pcache1LeaveMutex(X) sqlite3_mutex_leave((X)->mutex)
|
|
||||||
#define PCACHE1_MIGHT_USE_GROUP_MUTEX 1
|
#define PCACHE1_MIGHT_USE_GROUP_MUTEX 1
|
||||||
#endif
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/******** Page Allocation/SQLITE_CONFIG_PCACHE Related Functions **************/
|
/******** Page Allocation/SQLITE_CONFIG_PCACHE Related Functions **************/
|
||||||
|
@ -285,7 +276,7 @@ static int pcache1InitBulk(PCache1 *pCache) {
|
||||||
if (pcache1.nInitPage == 0) return 0;
|
if (pcache1.nInitPage == 0) return 0;
|
||||||
/* Do not bother with a bulk allocation if the cache size very small */
|
/* Do not bother with a bulk allocation if the cache size very small */
|
||||||
if (pCache->nMax < 3) return 0;
|
if (pCache->nMax < 3) return 0;
|
||||||
sqlite3BeginBenignMalloc();
|
// sqlite3BeginBenignMalloc();
|
||||||
if (pcache1.nInitPage > 0) {
|
if (pcache1.nInitPage > 0) {
|
||||||
szBulk = pCache->szAlloc * (i64)pcache1.nInitPage;
|
szBulk = pCache->szAlloc * (i64)pcache1.nInitPage;
|
||||||
} else {
|
} else {
|
||||||
|
@ -294,10 +285,10 @@ static int pcache1InitBulk(PCache1 *pCache) {
|
||||||
if (szBulk > pCache->szAlloc * (i64)pCache->nMax) {
|
if (szBulk > pCache->szAlloc * (i64)pCache->nMax) {
|
||||||
szBulk = pCache->szAlloc * (i64)pCache->nMax;
|
szBulk = pCache->szAlloc * (i64)pCache->nMax;
|
||||||
}
|
}
|
||||||
zBulk = pCache->pBulk = sqlite3Malloc(szBulk);
|
zBulk = pCache->pBulk = malloc(szBulk);
|
||||||
sqlite3EndBenignMalloc();
|
// sqlite3EndBenignMalloc();
|
||||||
if (zBulk) {
|
if (zBulk) {
|
||||||
int nBulk = sqlite3MallocSize(zBulk) / pCache->szAlloc;
|
int nBulk = szBulk / pCache->szAlloc;
|
||||||
do {
|
do {
|
||||||
PgHdr1 *pX = (PgHdr1 *)&zBulk[pCache->szPage];
|
PgHdr1 *pX = (PgHdr1 *)&zBulk[pCache->szPage];
|
||||||
pX->page.pBuf = zBulk;
|
pX->page.pBuf = zBulk;
|
||||||
|
@ -414,7 +405,7 @@ static PgHdr1 *pcache1AllocPage(PCache1 *pCache, int benignMalloc) {
|
||||||
PgHdr1 *p = 0;
|
PgHdr1 *p = 0;
|
||||||
void * pPg;
|
void * pPg;
|
||||||
|
|
||||||
assert(sqlite3_mutex_held(pCache->pGroup->mutex));
|
// assert(sqlite3_mutex_held(pCache->pGroup->mutex));
|
||||||
if (pCache->pFree || (pCache->nPage == 0 && pcache1InitBulk(pCache))) {
|
if (pCache->pFree || (pCache->nPage == 0 && pcache1InitBulk(pCache))) {
|
||||||
assert(pCache->pFree != 0);
|
assert(pCache->pFree != 0);
|
||||||
p = pCache->pFree;
|
p = pCache->pFree;
|
||||||
|
@ -470,7 +461,7 @@ static void pcache1FreePage(PgHdr1 *p) {
|
||||||
PCache1 *pCache;
|
PCache1 *pCache;
|
||||||
assert(p != 0);
|
assert(p != 0);
|
||||||
pCache = p->pCache;
|
pCache = p->pCache;
|
||||||
assert(sqlite3_mutex_held(p->pCache->pGroup->mutex));
|
// assert(sqlite3_mutex_held(p->pCache->pGroup->mutex));
|
||||||
if (p->isBulkLocal) {
|
if (p->isBulkLocal) {
|
||||||
p->pNext = pCache->pFree;
|
p->pNext = pCache->pFree;
|
||||||
pCache->pFree = p;
|
pCache->pFree = p;
|
||||||
|
@ -536,7 +527,7 @@ static void pcache1ResizeHash(PCache1 *p) {
|
||||||
unsigned int nNew;
|
unsigned int nNew;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
assert(sqlite3_mutex_held(p->pGroup->mutex));
|
// assert(sqlite3_mutex_held(p->pGroup->mutex));
|
||||||
|
|
||||||
nNew = p->nHash * 2;
|
nNew = p->nHash * 2;
|
||||||
if (nNew < 256) {
|
if (nNew < 256) {
|
||||||
|
@ -547,7 +538,7 @@ static void pcache1ResizeHash(PCache1 *p) {
|
||||||
if (p->nHash) {
|
if (p->nHash) {
|
||||||
sqlite3BeginBenignMalloc();
|
sqlite3BeginBenignMalloc();
|
||||||
}
|
}
|
||||||
apNew = (PgHdr1 **)sqlite3MallocZero(sizeof(PgHdr1 *) * nNew);
|
apNew = (PgHdr1 **)calloc(nNew, sizeof(PgHdr1 *));
|
||||||
if (p->nHash) {
|
if (p->nHash) {
|
||||||
sqlite3EndBenignMalloc();
|
sqlite3EndBenignMalloc();
|
||||||
}
|
}
|
||||||
|
@ -581,7 +572,7 @@ static PgHdr1 *pcache1PinPage(PgHdr1 *pPage) {
|
||||||
assert(PAGE_IS_UNPINNED(pPage));
|
assert(PAGE_IS_UNPINNED(pPage));
|
||||||
assert(pPage->pLruNext);
|
assert(pPage->pLruNext);
|
||||||
assert(pPage->pLruPrev);
|
assert(pPage->pLruPrev);
|
||||||
assert(sqlite3_mutex_held(pPage->pCache->pGroup->mutex));
|
// assert(sqlite3_mutex_held(pPage->pCache->pGroup->mutex));
|
||||||
pPage->pLruPrev->pLruNext = pPage->pLruNext;
|
pPage->pLruPrev->pLruNext = pPage->pLruNext;
|
||||||
pPage->pLruNext->pLruPrev = pPage->pLruPrev;
|
pPage->pLruNext->pLruPrev = pPage->pLruPrev;
|
||||||
pPage->pLruNext = 0;
|
pPage->pLruNext = 0;
|
||||||
|
@ -605,7 +596,7 @@ static void pcache1RemoveFromHash(PgHdr1 *pPage, int freeFlag) {
|
||||||
PCache1 * pCache = pPage->pCache;
|
PCache1 * pCache = pPage->pCache;
|
||||||
PgHdr1 ** pp;
|
PgHdr1 ** pp;
|
||||||
|
|
||||||
assert(sqlite3_mutex_held(pCache->pGroup->mutex));
|
// assert(sqlite3_mutex_held(pCache->pGroup->mutex));
|
||||||
h = pPage->iKey % pCache->nHash;
|
h = pPage->iKey % pCache->nHash;
|
||||||
for (pp = &pCache->apHash[h]; (*pp) != pPage; pp = &(*pp)->pNext)
|
for (pp = &pCache->apHash[h]; (*pp) != pPage; pp = &(*pp)->pNext)
|
||||||
;
|
;
|
||||||
|
@ -622,7 +613,7 @@ static void pcache1RemoveFromHash(PgHdr1 *pPage, int freeFlag) {
|
||||||
static void pcache1EnforceMaxPage(PCache1 *pCache) {
|
static void pcache1EnforceMaxPage(PCache1 *pCache) {
|
||||||
PGroup *pGroup = pCache->pGroup;
|
PGroup *pGroup = pCache->pGroup;
|
||||||
PgHdr1 *p;
|
PgHdr1 *p;
|
||||||
assert(sqlite3_mutex_held(pGroup->mutex));
|
// assert(sqlite3_mutex_held(pGroup->mutex));
|
||||||
while (pGroup->nPurgeable > pGroup->nMaxPage && (p = pGroup->lru.pLruPrev)->isAnchor == 0) {
|
while (pGroup->nPurgeable > pGroup->nMaxPage && (p = pGroup->lru.pLruPrev)->isAnchor == 0) {
|
||||||
assert(p->pCache->pGroup == pGroup);
|
assert(p->pCache->pGroup == pGroup);
|
||||||
assert(PAGE_IS_UNPINNED(p));
|
assert(PAGE_IS_UNPINNED(p));
|
||||||
|
@ -647,7 +638,7 @@ static void pcache1TruncateUnsafe(PCache1 * pCache, /* The cache to truncate
|
||||||
) {
|
) {
|
||||||
TESTONLY(int nPage = 0;) /* To assert pCache->nPage is correct */
|
TESTONLY(int nPage = 0;) /* To assert pCache->nPage is correct */
|
||||||
unsigned int h, iStop;
|
unsigned int h, iStop;
|
||||||
assert(sqlite3_mutex_held(pCache->pGroup->mutex));
|
// assert(sqlite3_mutex_held(pCache->pGroup->mutex));
|
||||||
assert(pCache->iMaxKey >= iLimit);
|
assert(pCache->iMaxKey >= iLimit);
|
||||||
assert(pCache->nHash > 0);
|
assert(pCache->nHash > 0);
|
||||||
if (pCache->iMaxKey - iLimit < pCache->nHash) {
|
if (pCache->iMaxKey - iLimit < pCache->nHash) {
|
||||||
|
@ -760,7 +751,7 @@ static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable) {
|
||||||
assert(szExtra < 300);
|
assert(szExtra < 300);
|
||||||
|
|
||||||
sz = sizeof(PCache1) + sizeof(PGroup) * pcache1.separateCache;
|
sz = sizeof(PCache1) + sizeof(PGroup) * pcache1.separateCache;
|
||||||
pCache = (PCache1 *)sqlite3MallocZero(sz);
|
pCache = (PCache1 *)calloc(1, sz);
|
||||||
if (pCache) {
|
if (pCache) {
|
||||||
if (pcache1.separateCache) {
|
if (pcache1.separateCache) {
|
||||||
pGroup = (PGroup *)&pCache[1];
|
pGroup = (PGroup *)&pCache[1];
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
#ifndef SQLITEINT_H
|
#ifndef SQLITEINT_H
|
||||||
|
|
Loading…
Reference in New Issue