do GC before table array is allocated

This commit is contained in:
CPunch 2020-11-17 14:32:20 -06:00
parent 329d34aa43
commit 7182f5ccd1
3 changed files with 17 additions and 14 deletions

View File

@ -27,10 +27,7 @@ void *cosmoM_reallocate(CState* state, void *buf, size_t oldSize, size_t newSize
} }
#endif #endif
#else #else
// if the state isn't frozen && we've reached the GC event cosmoM_checkGarbage(state, 0);
if (!(cosmoM_isFrozen(state)) && state->allocatedBytes > state->nextGC) {
cosmoM_collectGarbage(state); // cya lol
}
#endif #endif
// otherwise just use realloc to do all the heavy lifting // otherwise just use realloc to do all the heavy lifting
@ -44,6 +41,16 @@ void *cosmoM_reallocate(CState* state, void *buf, size_t oldSize, size_t newSize
return newBuf; return newBuf;
} }
COSMO_API bool cosmoM_checkGarbage(CState *state, size_t needed) {
if (!(cosmoM_isFrozen(state)) && state->allocatedBytes + needed > state->nextGC) {
cosmoM_collectGarbage(state); // cya lol
return true;
}
return false;
}
void markObject(CState *state, CObj *obj); void markObject(CState *state, CObj *obj);
void markValue(CState *state, CValue val); void markValue(CState *state, CValue val);

View File

@ -46,6 +46,7 @@
#endif #endif
COSMO_API void *cosmoM_reallocate(CState *state, void *buf, size_t oldSize, size_t newSize); COSMO_API void *cosmoM_reallocate(CState *state, void *buf, size_t oldSize, size_t newSize);
COSMO_API bool cosmoM_checkGarbage(CState *state, size_t needed); // returns true if GC event was triggered
COSMO_API void cosmoM_collectGarbage(CState *state); COSMO_API void cosmoM_collectGarbage(CState *state);
COSMO_API void cosmoM_updateThreshhold(CState *state); COSMO_API void cosmoM_updateThreshhold(CState *state);

View File

@ -108,25 +108,19 @@ static CTableEntry *findEntry(CTableEntry *entries, int mask, CValue key) {
} }
static void resizeTbl(CState *state, CTable *tbl, size_t newCapacity) { static void resizeTbl(CState *state, CTable *tbl, size_t newCapacity) {
CTableEntry *entries = cosmoM_xmalloc(state, sizeof(CTableEntry) * newCapacity); size_t size = sizeof(CTableEntry) * newCapacity;
cosmoM_checkGarbage(state, size);
/* Before someone asks, no we shouldn't move the tombstone check to before the entries allocation.
The garbage collector is threshhold based, based on the currently allocated bytes. There's an
edgecase where if GC_STRESS is not enabled the GC will really only be called on growth of the
string interning table (this.) However the new size of the table is accounted for in the next threshhold
cycle, causing allocations to become less and less frequent until your computer develops dementia.
*/
// if count > 8 and active entries < tombstones // if count > 8 and active entries < tombstones
if (tbl->count > MIN_TABLE_CAPACITY && tbl->count - tbl->tombstones < tbl->tombstones) { if (tbl->count > MIN_TABLE_CAPACITY && tbl->count - tbl->tombstones < tbl->tombstones) {
cosmoM_freearray(state, CTableEntry, entries, newCapacity);
int tombs = tbl->tombstones; int tombs = tbl->tombstones;
tbl->tombstones = 0; tbl->tombstones = 0; // set this to 0 so in our recursive call to resizeTbl() this branch isn't run again
resizeTbl(state, tbl, nextPow2((tbl->capacity - tombs) * GROW_FACTOR)); resizeTbl(state, tbl, nextPow2((tbl->capacity - tombs) * GROW_FACTOR));
cosmoM_updateThreshhold(state); // force a threshhold update since this *could* be such a huge memory difference cosmoM_updateThreshhold(state); // force a threshhold update since this *could* be such a huge memory difference
return; return;
} }
CTableEntry *entries = cosmoM_xmalloc(state, size);
int newCount = 0; int newCount = 0;
// set all nodes as NIL : NIL // set all nodes as NIL : NIL
@ -154,6 +148,7 @@ static void resizeTbl(CState *state, CTable *tbl, size_t newCapacity) {
tbl->table = entries; tbl->table = entries;
tbl->capacity = newCapacity; tbl->capacity = newCapacity;
tbl->count = newCount; tbl->count = newCount;
tbl->tombstones = 0;
} }
// returns a pointer to the allocated value // returns a pointer to the allocated value