mirror of
https://github.com/CPunch/Cosmo.git
synced 2024-11-05 08:10:05 +00:00
do GC before table array is allocated
This commit is contained in:
parent
329d34aa43
commit
7182f5ccd1
15
src/cmem.c
15
src/cmem.c
@ -27,10 +27,7 @@ void *cosmoM_reallocate(CState* state, void *buf, size_t oldSize, size_t newSize
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
// if the state isn't frozen && we've reached the GC event
|
||||
if (!(cosmoM_isFrozen(state)) && state->allocatedBytes > state->nextGC) {
|
||||
cosmoM_collectGarbage(state); // cya lol
|
||||
}
|
||||
cosmoM_checkGarbage(state, 0);
|
||||
#endif
|
||||
|
||||
// otherwise just use realloc to do all the heavy lifting
|
||||
@ -44,6 +41,16 @@ void *cosmoM_reallocate(CState* state, void *buf, size_t oldSize, size_t newSize
|
||||
return newBuf;
|
||||
}
|
||||
|
||||
COSMO_API bool cosmoM_checkGarbage(CState *state, size_t needed) {
|
||||
if (!(cosmoM_isFrozen(state)) && state->allocatedBytes + needed > state->nextGC) {
|
||||
cosmoM_collectGarbage(state); // cya lol
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void markObject(CState *state, CObj *obj);
|
||||
void markValue(CState *state, CValue val);
|
||||
|
||||
|
@ -46,6 +46,7 @@
|
||||
#endif
|
||||
|
||||
COSMO_API void *cosmoM_reallocate(CState *state, void *buf, size_t oldSize, size_t newSize);
|
||||
COSMO_API bool cosmoM_checkGarbage(CState *state, size_t needed); // returns true if GC event was triggered
|
||||
COSMO_API void cosmoM_collectGarbage(CState *state);
|
||||
COSMO_API void cosmoM_updateThreshhold(CState *state);
|
||||
|
||||
|
15
src/ctable.c
15
src/ctable.c
@ -108,25 +108,19 @@ static CTableEntry *findEntry(CTableEntry *entries, int mask, CValue key) {
|
||||
}
|
||||
|
||||
static void resizeTbl(CState *state, CTable *tbl, size_t newCapacity) {
|
||||
CTableEntry *entries = cosmoM_xmalloc(state, sizeof(CTableEntry) * newCapacity);
|
||||
|
||||
/* Before someone asks, no we shouldn't move the tombstone check to before the entries allocation.
|
||||
The garbage collector is threshhold based, based on the currently allocated bytes. There's an
|
||||
edgecase where if GC_STRESS is not enabled the GC will really only be called on growth of the
|
||||
string interning table (this.) However the new size of the table is accounted for in the next threshhold
|
||||
cycle, causing allocations to become less and less frequent until your computer develops dementia.
|
||||
*/
|
||||
size_t size = sizeof(CTableEntry) * newCapacity;
|
||||
cosmoM_checkGarbage(state, size);
|
||||
|
||||
// if count > 8 and active entries < tombstones
|
||||
if (tbl->count > MIN_TABLE_CAPACITY && tbl->count - tbl->tombstones < tbl->tombstones) {
|
||||
cosmoM_freearray(state, CTableEntry, entries, newCapacity);
|
||||
int tombs = tbl->tombstones;
|
||||
tbl->tombstones = 0;
|
||||
tbl->tombstones = 0; // set this to 0 so in our recursive call to resizeTbl() this branch isn't run again
|
||||
resizeTbl(state, tbl, nextPow2((tbl->capacity - tombs) * GROW_FACTOR));
|
||||
cosmoM_updateThreshhold(state); // force a threshhold update since this *could* be such a huge memory difference
|
||||
return;
|
||||
}
|
||||
|
||||
CTableEntry *entries = cosmoM_xmalloc(state, size);
|
||||
int newCount = 0;
|
||||
|
||||
// set all nodes as NIL : NIL
|
||||
@ -154,6 +148,7 @@ static void resizeTbl(CState *state, CTable *tbl, size_t newCapacity) {
|
||||
tbl->table = entries;
|
||||
tbl->capacity = newCapacity;
|
||||
tbl->count = newCount;
|
||||
tbl->tombstones = 0;
|
||||
}
|
||||
|
||||
// returns a pointer to the allocated value
|
||||
|
Loading…
Reference in New Issue
Block a user