fixed GC bug

This commit is contained in:
CPunch 2020-11-17 03:10:55 -06:00
parent 204bec3d0a
commit 1329b72fcd
5 changed files with 56 additions and 9 deletions

View File

@ -66,7 +66,7 @@ void tableRemoveWhite(CState *state, CTable *tbl) {
for (int i = 0; i < tbl->capacity; i++) {
CTableEntry *entry = &tbl->table[i];
if (IS_OBJ(entry->key) && !(entry->key.val.obj)->isMarked) { // if the key is a object and it's white (unmarked), remove it from the table
cosmoT_remove(tbl, entry->key);
cosmoT_remove(state, tbl, entry->key);
}
}
}
@ -224,6 +224,7 @@ COSMO_API void cosmoM_collectGarbage(CState *state) {
printf("-- GC start\n");
size_t start = state->allocatedBytes;
#endif
cosmoM_freezeGC(state); // we don't want a recursive garbage collection event!
markRoots(state);
@ -232,10 +233,16 @@ COSMO_API void cosmoM_collectGarbage(CState *state) {
sweep(state);
// set our next GC event
state->nextGC = state->allocatedBytes * HEAP_GROW_FACTOR;
cosmoM_updateThreshhold(state);
cosmoM_unfreezeGC(state);
#ifdef GC_DEBUG
printf("-- GC end, reclaimed %ld bytes (started at %ld, ended at %ld), next garbage collection scheduled at %ld bytes\n",
start - state->allocatedBytes, start, state->allocatedBytes, state->nextGC);
getchar(); // pauses execution
#endif
}
COSMO_API void cosmoM_updateThreshhold(CState *state) {
state->nextGC = state->allocatedBytes * HEAP_GROW_FACTOR;
}

View File

@ -45,8 +45,9 @@
state->freezeGC--
#endif
COSMO_API void *cosmoM_reallocate(CState* state, void *buf, size_t oldSize, size_t newSize);
COSMO_API void cosmoM_collectGarbage(CState* state);
COSMO_API void *cosmoM_reallocate(CState *state, void *buf, size_t oldSize, size_t newSize);
COSMO_API void cosmoM_collectGarbage(CState *state);
COSMO_API void cosmoM_updateThreshhold(CState *state);
/*
wrapper for cosmoM_reallocate so we can track our memory usage (it's also safer :P)

View File

@ -39,7 +39,7 @@ void cosmoO_free(CState *state, CObj* obj) {
switch(obj->type) {
case COBJ_STRING: {
CObjString *objStr = (CObjString*)obj;
cosmoM_freearray(state, char, objStr->str, objStr->length);
cosmoM_freearray(state, char, objStr->str, objStr->length + 1);
cosmoM_free(state, CObjString, objStr);
break;
}

View File

@ -6,10 +6,27 @@
#include <string.h>
#define MAX_TABLE_FILL 0.75
// at 30% capacity with capacity > ARRAY_START, shrink the array
#define MIN_TABLE_CAPACITY ARRAY_START
// bit-twiddling hacks, gets the next power of 2
unsigned int nextPow2(unsigned int x) {
if (x <= ARRAY_START - 1) return ARRAY_START; // sanity check
x--;
int power = 2;
while (x >>= 1) power <<= 1;
if (power < ARRAY_START)
return ARRAY_START;
return power;
}
void cosmoT_initTable(CState *state, CTable *tbl, int startCap) {
tbl->capacity = startCap != 0 ? startCap : ARRAY_START; // sanity check :P
tbl->count = 0;
tbl->tombstones = 0;
tbl->table = NULL; // to let out GC know we're initalizing
tbl->table = cosmoM_xmalloc(state, sizeof(CTableEntry) * tbl->capacity);
@ -90,8 +107,26 @@ static CTableEntry *findEntry(CTableEntry *entries, int mask, CValue key) {
}
}
static void growTbl(CState *state, CTable *tbl, size_t newCapacity) {
static void resizeTbl(CState *state, CTable *tbl, size_t newCapacity) {
CTableEntry *entries = cosmoM_xmalloc(state, sizeof(CTableEntry) * newCapacity);
/* Before someone asks, no we shouldn't move the tombstone check to before the entries allocation.
The garbage collector is threshhold based, based on the currently allocated bytes. There's an
edgecase where if GC_STRESS is not enabled the GC will really only be called on growth of the
string interning table (this.) However the new size of the table is accounted for in the next threshhold
cycle, causing allocations to become less and less frequent until your computer develops dementia.
*/
// if count > 8 and active entries < tombstones
if (tbl->count > MIN_TABLE_CAPACITY && tbl->count - tbl->tombstones < tbl->tombstones) {
cosmoM_freearray(state, CTableEntry, entries, newCapacity);
int tombs = tbl->tombstones;
tbl->tombstones = 0;
resizeTbl(state, tbl, nextPow2((tbl->count - tombs) * GROW_FACTOR));
cosmoM_updateThreshhold(state); // force a threshhold update since this *could* be such a huge memory difference
return;
}
int newCount = 0;
// set all nodes as NIL : NIL
@ -119,14 +154,16 @@ static void growTbl(CState *state, CTable *tbl, size_t newCapacity) {
tbl->table = entries;
tbl->capacity = newCapacity;
tbl->count = newCount;
tbl->tombstones = 0;
}
// returns a pointer to the allocated value
COSMO_API CValue* cosmoT_insert(CState *state, CTable *tbl, CValue key) {
// make sure we have enough space allocated
if (tbl->count + 1 > (int)(tbl->capacity * MAX_TABLE_FILL)) {
// grow table
int newCap = tbl->capacity * GROW_FACTOR;
growTbl(state, tbl, newCap);
resizeTbl(state, tbl, newCap);
}
// insert into the table
@ -151,7 +188,7 @@ bool cosmoT_get(CTable *tbl, CValue key, CValue *val) {
return !(IS_NIL(entry->key));
}
bool cosmoT_remove(CTable *tbl, CValue key) {
bool cosmoT_remove(CState* state, CTable *tbl, CValue key) {
if (tbl->count == 0) return 0; // sanity check
CTableEntry *entry = findEntry(tbl->table, tbl->capacity - 1, key);
@ -161,6 +198,7 @@ bool cosmoT_remove(CTable *tbl, CValue key) {
// crafts tombstone
entry->key = cosmoV_newNil(); // this has to be nil
entry->val = cosmoV_newBoolean(false); // doesn't reall matter what this is, as long as it isn't nil
tbl->tombstones++;
return true;
}

View File

@ -12,6 +12,7 @@ typedef struct CTableEntry {
typedef struct CTable {
int count;
int capacity;
int tombstones;
CTableEntry *table;
} CTable;
@ -22,7 +23,7 @@ COSMO_API CValue *cosmoT_insert(CState *state, CTable *tbl, CValue key);
CObjString *cosmoT_lookupString(CTable *tbl, const char *str, size_t length, uint32_t hash);
bool cosmoT_get(CTable *tbl, CValue key, CValue *val);
bool cosmoT_remove(CTable *tbl, CValue key);
bool cosmoT_remove(CState *state, CTable *tbl, CValue key);
void cosmoT_printTable(CTable *tbl, const char *name);