diff --git a/src/cmem.c b/src/cmem.c index e7eb763..3e0e032 100644 --- a/src/cmem.c +++ b/src/cmem.c @@ -27,10 +27,7 @@ void *cosmoM_reallocate(CState* state, void *buf, size_t oldSize, size_t newSize } #endif #else - // if the state isn't frozen && we've reached the GC event - if (!(cosmoM_isFrozen(state)) && state->allocatedBytes > state->nextGC) { - cosmoM_collectGarbage(state); // cya lol - } + cosmoM_checkGarbage(state, 0); #endif // otherwise just use realloc to do all the heavy lifting @@ -44,6 +41,16 @@ void *cosmoM_reallocate(CState* state, void *buf, size_t oldSize, size_t newSize return newBuf; } +COSMO_API bool cosmoM_checkGarbage(CState *state, size_t needed) { + if (!(cosmoM_isFrozen(state)) && state->allocatedBytes + needed > state->nextGC) { + cosmoM_collectGarbage(state); // cya lol + return true; + } + + return false; +} + + void markObject(CState *state, CObj *obj); void markValue(CState *state, CValue val); diff --git a/src/cmem.h b/src/cmem.h index bf1e537..354662f 100644 --- a/src/cmem.h +++ b/src/cmem.h @@ -46,6 +46,7 @@ #endif COSMO_API void *cosmoM_reallocate(CState *state, void *buf, size_t oldSize, size_t newSize); +COSMO_API bool cosmoM_checkGarbage(CState *state, size_t needed); // returns true if GC event was triggered COSMO_API void cosmoM_collectGarbage(CState *state); COSMO_API void cosmoM_updateThreshhold(CState *state); diff --git a/src/ctable.c b/src/ctable.c index 1adef66..51256b0 100644 --- a/src/ctable.c +++ b/src/ctable.c @@ -108,25 +108,19 @@ static CTableEntry *findEntry(CTableEntry *entries, int mask, CValue key) { } static void resizeTbl(CState *state, CTable *tbl, size_t newCapacity) { - CTableEntry *entries = cosmoM_xmalloc(state, sizeof(CTableEntry) * newCapacity); - - /* Before someone asks, no we shouldn't move the tombstone check to before the entries allocation. - The garbage collector is threshhold based, based on the currently allocated bytes. There's an - edgecase where if GC_STRESS is not enabled the GC will really only be called on growth of the - string interning table (this.) However the new size of the table is accounted for in the next threshhold - cycle, causing allocations to become less and less frequent until your computer develops dementia. - */ + size_t size = sizeof(CTableEntry) * newCapacity; + cosmoM_checkGarbage(state, size); // if count > 8 and active entries < tombstones if (tbl->count > MIN_TABLE_CAPACITY && tbl->count - tbl->tombstones < tbl->tombstones) { - cosmoM_freearray(state, CTableEntry, entries, newCapacity); int tombs = tbl->tombstones; - tbl->tombstones = 0; + tbl->tombstones = 0; // set this to 0 so in our recursive call to resizeTbl() this branch isn't run again resizeTbl(state, tbl, nextPow2((tbl->capacity - tombs) * GROW_FACTOR)); cosmoM_updateThreshhold(state); // force a threshhold update since this *could* be such a huge memory difference return; } + CTableEntry *entries = cosmoM_xmalloc(state, size); int newCount = 0; // set all nodes as NIL : NIL @@ -154,6 +148,7 @@ static void resizeTbl(CState *state, CTable *tbl, size_t newCapacity) { tbl->table = entries; tbl->capacity = newCapacity; tbl->count = newCount; + tbl->tombstones = 0; } // returns a pointer to the allocated value