char** (*config_args)(int*, char**);
void (*embed_libs)(Gwion);
void (*embed_scripts)(Gwion);
+ uint32_t thread_count;
+ uint32_t queue_size;
bool loop;
bool quit;
bool urc;
m_uint sz;
} HMapInfo;
+ANN static inline void dict_alloc(MemPool mp, const M_Object o, const m_uint sz, const m_uint capacity) {
+ HMap *hmap = &*(struct HMap*)o->data;
+ hmap->data = (m_bit*)mp_calloc2(mp, sz * capacity);
+ hmap->state = (m_bit*)mp_calloc2(mp, sizeof(HState) * capacity);
+ hmap->capacity = capacity;
+}
#endif
};
enum tflag {
- tflag_none = 1 << 0,
- tflag_scan0 = 1 << 1, //
- tflag_scan1 = 1 << 2, //
- tflag_scan2 = 1 << 3, //
- tflag_check = 1 << 4, //
- tflag_emit = 1 << 5, //
- tflag_infer = 1 << 6,
- tflag_empty = 1 << 7,
- tflag_ftmpl = 1 << 8,
- tflag_ntmpl = 1 << 9, // do NOT need types
- tflag_udef = 1 << 10,
- tflag_cdef = 1 << 11,
- tflag_struct = 1 << 12,
- tflag_ctor = 1 << 13,
- tflag_dtor = 1 << 14,
- tflag_tmpl = 1 << 15,
- tflag_typedef = 1 << 16,
- tflag_distinct = 1 << 17,
- tflag_noret = 1 << 18,
- tflag_contract = 1 << 19,
- tflag_float = 1 << 20,
- tflag_union = 1 << 21,
- tflag_ref = 1 << 22,
- tflag_packed = 1 << 23,
- tflag_compound = 1 << 24,
- tflag_release = 1 << 25, // mark structs that need release
+ tflag_none = 1 << 0,
+ tflag_scan0 = 1 << 1, //
+ tflag_scan1 = 1 << 2, //
+ tflag_scan2 = 1 << 3, //
+ tflag_check = 1 << 4, //
+ tflag_emit = 1 << 5, //
+ tflag_infer = 1 << 6,
+ tflag_empty = 1 << 7,
+ tflag_ftmpl = 1 << 8,
+ tflag_ntmpl = 1 << 9, // do NOT need types
+ tflag_udef = 1 << 10,
+ tflag_cdef = 1 << 11,
+ tflag_struct = 1 << 12,
+ tflag_ctor = 1 << 13,
+ tflag_dtor = 1 << 14,
+ tflag_tmpl = 1 << 15,
+ tflag_typedef = 1 << 16,
+ tflag_distinct = 1 << 17,
+ tflag_noret = 1 << 18,
+ tflag_contract = 1 << 19,
+ tflag_float = 1 << 20,
+ tflag_union = 1 << 21,
+ tflag_ref = 1 << 22,
+ tflag_packed = 1 << 23,
+ tflag_compound = 1 << 24,
+ tflag_release = 1 << 25, // mark structs that need release
+ tflag_primitive = 1 << 26, // mark structs that need release
} __attribute__((packed));
struct Type_ {
ANN static inline void defined_here(const Value v) {
if (v->from->filename) {// TODO: check why is that from check
- char c[256] = {[255] = '\0'};
+ char c[256];
+ c[255] = '\0';
snprintf(c, 256, _("%.*s defined here"), 240, v->name);
gwerr_secondary(c, v->from->filename, v->from->loc);
}
ANN void gwion_set_debug(const Gwion gwion, const bool dbg);
ANN void gwion_set_dump(const Gwion gwion, const bool dump);
ANN void gwion_set_cdoc(const Gwion gwion, const bool dbg);
+
+
+ANN static inline void shred_pool_run(const VM_Shred shred, void (*fun)(void*), void *arg) {
+ shreduler_remove(shred->tick->shreduler, shred, false);
+ threadpool_add(shred->info->vm->gwion->data->tpool, fun, arg);
+}
+
+
#endif
typedef struct GwionData_ {
struct Map_ freearg;
struct Map_ id;
- MUTEX_TYPE mutex;
+ gwtlock_t mutex;
struct Vector_ child;
struct Vector_ child2;
struct Passes_ *passes;
Plugs *plugs;
+ threadpool_t *tpool;
bool cdoc;
bool errored;
} GwionData;
-ANN GwionData *new_gwiondata(const MemPool);
+ANN GwionData *new_gwiondata(const MemPool, const uint32_t thread_count, const uint32_t queue_size);
ANN GwionData *cpy_gwiondata(MemPool, const GwionData *);
ANN void free_gwiondata(const Gwion);
ANN void free_gwiondata_cpy(const MemPool, GwionData *);
#ifndef __IMPORT
#define __IMPORT
-typedef void (*f_xtor)(const M_Object, const m_bit *, const VM_Shred);
-typedef void (*f_mfun)(const M_Object, const m_bit *, const VM_Shred);
-typedef void (*f_sfun)(const m_bit *, const m_bit *, const VM_Shred);
-typedef void (*f_gack)(const Type, const m_bit *, const VM_Shred);
+typedef void (*f_xtor)(const M_Object, m_bit *const, const VM_Shred);
+typedef void (*f_mfun)(const M_Object, m_bit *const, const VM_Shred);
+typedef void (*f_sfun)(const m_bit *, m_bit *const, const VM_Shred);
+typedef void (*f_gack)(const Type, m_bit *const, const VM_Shred);
typedef void (*f_xfun)();
typedef struct Gwi_ *Gwi;
#define MFUN(a) \
- ANN void a(const M_Object o NUSED, const m_bit *RETURN NUSED, \
+ ANN void a(const M_Object o NUSED, m_bit *const RETURN NUSED, \
const VM_Shred shred NUSED)
#define SFUN(a) \
- ANN void a(const M_Object o NUSED, const m_bit *RETURN NUSED, \
+ ANN void a(const M_Object o NUSED, m_bit *const RETURN NUSED, \
const VM_Shred shred NUSED)
#define CTOR(a) \
- ANN void a(const M_Object o NUSED, const m_bit *_ NUSED, \
+ ANN void a(const M_Object o NUSED, m_bit *const _ NUSED, \
const VM_Shred shred NUSED)
#define DTOR(a) \
- ANN void a(const M_Object o NUSED, const m_bit *_ NUSED, \
+ ANN void a(const M_Object o NUSED, m_bit *const _ NUSED, \
const VM_Shred shred NUSED)
#define GACK(a) \
ANN2(2) \
- void a(const Type t NUSED, const m_bit *VALUE NUSED, \
+ void a(const Type t NUSED, m_bit *const VALUE NUSED, \
const VM_Shred shred NUSED)
#define OP_CHECK(a) ANN Type a(const Env env NUSED, void *data NUSED)
#define OP_EMIT(a) ANN m_bool a(const Emitter emit NUSED, void *data NUSED)
struct ShredTick_ *list;
struct ShredTick_ *curr;
struct Vector_ active_shreds;
- MUTEX_TYPE mutex;
+ gwtlock_t mutex;
size_t shred_ids;
bool loop;
};
size_t pc;
struct ShredTick_ *tick;
struct ShredInfo_ *info;
- MUTEX_TYPE mutex;
+ gwtlock_t mutex;
};
REF_FUNC(VM_Code, vmcode)
ANN2(1, 4)
DEBUG,
DUMP,
CDOC,
+ THREAD,
+ QUEUE,
NOPTIONS
};
&opt[DUMP]);
cmdapp_set(app, 'H', "cdoc", CMDOPT_MAYTAKEARG, NULL, "set/unset cdoc mode", "bool",
&opt[CDOC]);
+ cmdapp_set(app, 't', "thread_count", CMDOPT_TAKESARG, NULL, "set number of threads", "integer",
+ &opt[THREAD]);
+ cmdapp_set(app, 'q', "queue_size", CMDOPT_TAKESARG, NULL, "set threadpool queue_size", "integer",
+ &opt[QUEUE]);
}
static inline void add2arg(CliArg *const arg, const char *data,
case 'H':
get_cdoc(arg_int->gwion, option->value);
break;
+ case 't':
+ _arg->thread_count = (uint32_t)ARG2INT(option->value);
+ break;
+ case 'q':
+ _arg->queue_size = (uint32_t)ARG2INT(option->value);
+ break;
}
}
}
for (m_uint i = 0; i < vector_size(&gwion->data->passes->vec); ++i) {
const compilation_pass pass =
(compilation_pass)vector_at(&gwion->data->passes->vec, i);
- CHECK_BB(pass(gwion->env, &c->ast));
+ if(pass(gwion->env, &c->ast) < 0) {
+ gwion->data->errored = true;
+ return GW_ERROR;
+ }
}
return GW_OK;
}
ANN static m_uint compile(struct Gwion_ *gwion, struct Compiler *c) {
compiler_name(c);
- MUTEX_LOCK(gwion->data->mutex);
+ gwt_lock(&gwion->data->mutex);
const m_uint ret = _compile(gwion, c);
- MUTEX_UNLOCK(gwion->data->mutex);
+ gwt_unlock(&gwion->data->mutex);
compiler_clean(c);
return ret;
}
gwion->ppa = mp_calloc(gwion->mp, PPArg);
pparg_ini(gwion->mp, gwion->ppa);
gwion_core(gwion);
- gwion->data = new_gwiondata(gwion->mp);
+ gwion->data = new_gwiondata(gwion->mp, arg->thread_count, arg->queue_size);
gwion->type = (Type *)xcalloc(MAX_TYPE, sizeof(struct Type_ *));
arg->si = gwion->vm->bbq->si = new_soundinfo(gwion->mp);
new_passes(gwion);
ANN static inline GwionData *gwiondata(MemPool mp) {
struct GwionData_ *data = mp_calloc(mp, GwionData);
- MUTEX_SETUP(data->mutex);
+ gwt_lock_ini(&data->mutex); // init lock
return data;
}
-ANN GwionData *new_gwiondata(const MemPool mp) {
+ANN GwionData *new_gwiondata(const MemPool mp, const uint32_t thread_count,
+ const uint32_t queue_size) {
GwionData *data = gwiondata(mp);
map_init(&data->freearg);
map_init(&data->id);
+ data->tpool = new_threadpool(thread_count, queue_size);
return data;
}
data->id = src->id;
data->plugs = src->plugs;
data->passes = src->passes;
+ data->tpool = src->tpool;
return data;
}
ANN void free_gwiondata_cpy(const MemPool mp, GwionData *data) {
- MUTEX_CLEANUP(data->mutex);
+ gwt_lock_end(&data->mutex);
mp_free(mp, GwionData, data);
}
mp_free(gwion->mp, SpecialId, (struct SpecialId_ *)map_at(&data->id, i));
map_release(&data->id);
free_passes(gwion->mp, data->passes);
+ free_threadpool(data->tpool);
free_gwiondata_cpy(gwion->mp, data);
}
const Type t = new_type(env->gwion->mp, name, NULL);
t->size = sz;
t->actual_size = size;
+ set_tflag(t, tflag_primitive);
scan_prim_op(env, t);
scan_prim_op2(env, t);
if(size < SZ_INT) {
const Type actual = fdef->base->func->value_ref->type;
set_fbflag(fdef->base, fbflag_lambda);
Var_Decl vd = bin->rhs->d.exp_decl.vd;
+exp_setvar(bin->rhs, true);
return vd.value->type = bin->rhs->type = bin->rhs->d.exp_decl.type = actual;
}
ERR_N(bin->lhs->pos, "invalid {G+}function{0} {+}:=>{0} {+G}function{0} assignment");
if (bin->lhs->exp_type == ae_exp_td)
ERR_N(bin->lhs->pos, "can't use {/}type decl expressions{0} in auto function pointer declarations");
- if(!bin->lhs->type->info->func)
+// if(!bin->lhs->type->info->func)
+ if(!bin->lhs->type->info->func || !strncmp(bin->lhs->type->name, "partial:", 8))
return partial2auto(env, bin);
+
// create a matching signature
// TODO: we could check first if there a matching existing one
+ // we can maybe add it to the lhs type function namespace
+ // would make it easy enough to search
Func_Base *const fbase =
cpy_func_base(env->gwion->mp, bin->lhs->type->info->func->def->base);
const Fptr_Def fptr_def = new_fptr_def(env->gwion->mp, fbase);
const Type t = (Type)instr->m_val2;
const M_Object o = new_object(shred->info->mp, t);
const HMapInfo *hinfo = (HMapInfo*)t->nspc->class_data;
- HMap *a = &*(struct HMap*)o->data;
- a->data = (m_bit*)mp_calloc2(shred->info->mp, hinfo->sz * instr->m_val);
- a->state = (m_bit*)mp_calloc2(shred->info->mp, sizeof(HState) * instr->m_val);
- a->capacity = instr->m_val;
+ dict_alloc(shred->info->mp, o, hinfo->sz, instr->m_val);
shred->reg += SZ_INT;
*(M_Object*)REG(-SZ_INT) = o;
}
free_vm_shred(s);
}
-static MFUN(shred_lock) { if(ME(o)->tick) MUTEX_LOCK(ME(o)->mutex); }
+static MFUN(shred_lock) { if(ME(o)->tick) gwt_lock(&ME(o)->mutex); }
-static MFUN(shred_unlock) { if(ME(o)->tick) MUTEX_UNLOCK(ME(o)->mutex); }
+static MFUN(shred_unlock) { if(ME(o)->tick) gwt_unlock(&ME(o)->mutex); }
static void stop(const M_Object o) {
VM *vm = ME(o)->info->vm;
- MUTEX_LOCK(vm->shreduler->mutex);
- MUTEX_LOCK(ME(o)->mutex);
+ gwt_lock(&vm->shreduler->mutex);
+ gwt_lock(&ME(o)->mutex);
vm->shreduler->bbq->is_running = 0;
*(m_int *)(o->data + o_shred_cancel) = 1;
- MUTEX_UNLOCK(ME(o)->mutex);
- MUTEX_UNLOCK(vm->shreduler->mutex);
+ *(m_int *)(o->data + o_fork_done) = 1;
+ gwt_unlock(&ME(o)->mutex);
+ gwt_unlock(&vm->shreduler->mutex);
}
static inline void join(const M_Object o) {
+// if(!ME(o)) return;
+// gwt_lock(&ME(o)->mutex);
+// const bool done = !*(m_int *)(o->data + o_fork_done);
+// gwt_unlock(&ME(o)->mutex);
+//// if (!*(m_int *)(o->data + o_fork_done)) {
if (FORK_THREAD(o)) {
+// if (!done) {
+// *(m_int *)(o->data + o_fork_done) = 1;
THREAD_JOIN(FORK_THREAD(o));
FORK_THREAD(o) = 0;
}
static DTOR(fork_dtor) {
VM *parent = ME(o)->info->vm->parent;
-// MUTEX_LOCK(parent->shreduler->mutex);
- MUTEX_LOCK(ME(o)->mutex);
+ gwt_lock(&ME(o)->mutex);
*(m_int *)(o->data + o_fork_done) = 1;
- MUTEX_UNLOCK(ME(o)->mutex);
+ gwt_unlock(&ME(o)->mutex);
stop(o);
join(o);
-// MUTEX_UNLOCK(parent->shreduler->mutex);
- MUTEX_LOCK(parent->shreduler->mutex);
-// MUTEX_LOCK(ME(o)->mutex);
+ gwt_lock(&parent->shreduler->mutex);
if (parent->gwion->data->child.ptr) {
const m_int idx = vector_find(&parent->gwion->data->child, (vtype)o);
if (idx > -1) VPTR(&parent->gwion->data->child, idx) = 0;
if (!parent->gwion->data->child2.ptr)
vector_init(&parent->gwion->data->child2);
vector_add(&parent->gwion->data->child2, (vtype)ME(o)->info->vm->gwion);
-// MUTEX_UNLOCK(ME(o)->mutex);
- MUTEX_UNLOCK(parent->shreduler->mutex);
+ gwt_unlock(&parent->shreduler->mutex);
vmcode_remref(ME(o)->code, ME(o)->info->vm->gwion);
}
static MFUN(shred_cancel) {
if(!ME(o)->tick)return;
-// vm_lock(ME(o)->info->vm);
- MUTEX_LOCK(ME(o)->mutex);
+ gwt_lock(&ME(o)->mutex);
*(m_int *)(o->data + o_shred_cancel) = *(m_int *)MEM(SZ_INT);
- MUTEX_UNLOCK(ME(o)->mutex);
-// vm_unlock(ME(o)->info->vm);
+ gwt_unlock(&ME(o)->mutex);
}
static MFUN(shred_test_cancel) {
- MUTEX_LOCK(ME(o)->mutex);
+ gwt_lock(&ME(o)->mutex);
if (*(m_int *)(o->data + o_shred_cancel)) {
- MUTEX_UNLOCK(ME(o)->mutex);
+ gwt_unlock(&ME(o)->mutex);
vm_shred_exit(ME(o));
- } else
- MUTEX_UNLOCK(ME(o)->mutex);
+ } else gwt_unlock(&ME(o)->mutex);
}
static MFUN(fork_test_cancel) {
VM *parent = ME(o)->info->vm;
- MUTEX_LOCK(parent->shreduler->mutex);
+ gwt_lock(&parent->shreduler->mutex);
if (*(m_int *)(o->data + o_shred_cancel)) {
- MUTEX_UNLOCK(parent->shreduler->mutex);
+ gwt_unlock(&parent->shreduler->mutex);
stop(o);
join(o);
_release(o, ME(o));
vm_shred_exit(ME(o));
- } else
- MUTEX_UNLOCK(parent->shreduler->mutex);
+ } else gwt_unlock(&parent->shreduler->mutex);
}
static MFUN(shred_now) {
VM *vm = ME(o)->info->vm;
while (vm->parent) vm = vm->parent;
- MUTEX_LOCK(vm->shreduler->mutex);
+ gwt_lock(&vm->shreduler->mutex);
*(m_float *)RETURN = vm->bbq->pos;
- MUTEX_UNLOCK(vm->shreduler->mutex);
+ gwt_unlock(&vm->shreduler->mutex);
}
static MFUN(shred_blackhole) {
}
struct ThreadLauncher {
- MUTEX_TYPE mutex;
- THREAD_COND_TYPE cond;
+ gwtlock_t *mutex;
+ gwtcond_t *cond;
VM * vm;
};
static inline int fork_running(VM *vm, const M_Object o) {
- MUTEX_LOCK(ME(o)->mutex);
+ gwt_lock(&ME(o)->mutex);
const int cancel = *(m_int *)(o->data + o_shred_cancel);
- MUTEX_UNLOCK(ME(o)->mutex);
+ gwt_unlock(&ME(o)->mutex);
if(cancel)return false;
- MUTEX_LOCK(vm->shreduler->mutex);
+ gwt_lock(&vm->shreduler->mutex);
const int ret = vm->bbq->is_running;
- MUTEX_UNLOCK(vm->shreduler->mutex);
+ gwt_unlock(&vm->shreduler->mutex);
return ret;
}
static ANN THREAD_FUNC(fork_run) {
struct ThreadLauncher *tl = data;
VM * vm = tl->vm;
- MUTEX_TYPE mutex = tl->mutex;
+ gwtlock_t * mutex = tl->mutex;
const M_Object me = vm->shreduler->list->self->info->me;
- MUTEX_COND_LOCK(mutex);
- THREAD_COND_SIGNAL(tl->cond);
- MUTEX_COND_UNLOCK(mutex);
-// THREAD_COND_CLEANUP(tl->cond);
-// MUTEX_CLEANUP(tl->mutex);
+ gwt_lock(mutex);
+ gwt_signal(tl->cond);
+ gwt_unlock(mutex);
while (fork_running(vm, me)) {
vm_run_audio(vm);
++vm->bbq->pos;
}
gwion_end_child(ME(me), vm->gwion);
-vm_lock(vm);
-// MUTEX_LOCK(vm->shreduler->mutex);
-// MUTEX_LOCK(vm->parent->shreduler->mutex);
- MUTEX_LOCK(ME(me)->mutex);
+ vm_lock(vm);
+ gwt_lock(&ME(me)->mutex);
if (!*(m_int *)(me->data + o_shred_cancel) &&
me->type_ref != vm->gwion->type[et_fork])
memcpy(me->data + vm->gwion->type[et_fork]->nspc->offset, ME(me)->reg,
((Type)vector_front(&me->type_ref->info->tuple->types))->size);
*(m_int *)(me->data + o_fork_done) = 1;
- MUTEX_UNLOCK(ME(me)->mutex);
+ gwt_unlock(&ME(me)->mutex);
if (!*(m_int *)(me->data + o_shred_cancel))
broadcast(*(M_Object *)(me->data + o_fork_ev));
-vm_unlock(vm);
-// MUTEX_UNLOCK(vm->parent->shreduler->mutex);
-// MUTEX_UNLOCK(vm->shreduler->mutex);
+ vm_unlock(vm);
THREAD_RETURN(0);
}
ANN void fork_launch(const M_Object o) {
- MUTEX_TYPE mutex;
- MUTEX_SETUP(mutex);
- THREAD_COND_TYPE cond;
- THREAD_COND_SETUP(cond);
+ gwtlock_t mutex;
+ gwt_lock_ini(&mutex);
+ gwtcond_t cond;
+ gwt_cond_ini(&cond);
struct ThreadLauncher tl = {
- .mutex = mutex, .cond = cond, .vm = ME(o)->info->vm};
+ .mutex = &mutex, .cond = &cond, .vm = ME(o)->info->vm};
++o->ref;
- MUTEX_COND_LOCK(mutex);
- THREAD_CREATE(FORK_THREAD(o), fork_run, &tl);
- THREAD_COND_WAIT(cond, mutex);
- MUTEX_COND_UNLOCK(mutex);
- THREAD_COND_CLEANUP(cond);
- MUTEX_CLEANUP(mutex);
+ gwt_lock(&mutex);
+// THREAD_CREATE(FORK_THREAD(o), fork_run, &tl);
+ gwt_create(&FORK_THREAD(o), fork_run, &tl);
+ gwt_wait(&cond, &mutex);
+ gwt_unlock(&mutex);
+ gwt_cond_end(&cond);
+ gwt_lock_end(&mutex);
}
ANN void fork_clean(const VM_Shred shred, const Vector v) {
gwi_class_xtor(gwi, NULL, fork_dtor);
gwi->gwion->type[et_fork] = t_fork;
o_fork_thread = t_fork->nspc->offset;
- t_fork->nspc->offset += SZ_INT;
+ t_fork->nspc->offset += sizeof(gwtthread_t*);
gwi_item_ini(gwi, "int", "is_done");
GWI_BB((o_fork_done = gwi_item_end(gwi, ae_flag_const, num, 0)))
}
int main(int argc, char **argv) {
- Arg arg = {};
+ Arg arg = {
+ .thread_count = 4,
+ .queue_size = 16
+ };
gwion_ini(&gwion, &arg);
arg_release(&arg);
afl_run(&gwion);
.config_args = gwion_config_args,
.embed_libs = gwion_embed_libs,
.embed_scripts = gwion_embed_scripts,
+ .thread_count = 4,
+ .queue_size = 16
};
const m_bool ini = gwion_ini(&gwion, &arg);
arg_release(&arg);
}
ANN VM_Shred shreduler_get(const Shreduler s) {
- MUTEX_LOCK(s->mutex);
+ gwt_lock(&s->mutex);
Driver *const bbq = s->bbq;
struct ShredTick_ *const tk = s->list;
if (tk) {
if ((s->list = tk->next)) s->list->prev = NULL;
tk->next = tk->prev = NULL;
s->curr = tk;
- MUTEX_UNLOCK(s->mutex);
+ gwt_unlock(&s->mutex);
return tk->self;
}
}
if (!s->loop && !vector_size(&s->active_shreds)) bbq->is_running = 0;
- MUTEX_UNLOCK(s->mutex);
+ gwt_unlock(&s->mutex);
return NULL;
}
struct ShredTick_ *const tk) {
const VM_Shred shred = tk->self;
if (tk->child.ptr) child(s, &tk->child);
- MUTEX_LOCK(shred->mutex);
+ gwt_lock(&shred->mutex);
tk->prev = (struct ShredTick_*)-1;
- MUTEX_UNLOCK(shred->mutex);
+ gwt_unlock(&shred->mutex);
const m_uint size =
shred->info->frame.ptr ? vector_size(&shred->info->frame) : 0;
if(size) unwind(shred, (Symbol)-1, size);
ANN void shreduler_remove(const Shreduler s, const VM_Shred out,
const bool erase) {
- MUTEX_LOCK(s->mutex);
+ gwt_lock(&s->mutex);
struct ShredTick_ *const tk = out->tick;
tk_remove(s, tk);
if (likely(!erase)) tk->prev = tk->next = NULL;
if (tk->parent) vector_rem2(&tk->parent->child, (vtype)out);
shreduler_erase(s, tk);
}
- MUTEX_UNLOCK(s->mutex);
+ gwt_unlock(&s->mutex);
}
ANN static void _shredule(const Shreduler s, struct ShredTick_ *tk,
ANN void shredule(const Shreduler s, const VM_Shred shred,
const m_float wake_time) {
struct ShredTick_ *tk = shred->tick;
- MUTEX_LOCK(s->mutex);
+ gwt_lock(&s->mutex);
_shredule(s, tk, wake_time);
- MUTEX_UNLOCK(s->mutex);
+ gwt_unlock(&s->mutex);
}
ANN void shreduler_ini(const Shreduler s, const VM_Shred shred) {
ANN void shreduler_add(const Shreduler s, const VM_Shred shred) {
shreduler_ini(s, shred);
shred->tick->xid = ++s->shred_ids;
- MUTEX_LOCK(s->mutex);
+ gwt_lock(&s->mutex);
vector_add(&s->active_shreds, (vtype)shred);
_shredule(s, shred->tick, GWION_EPSILON);
- MUTEX_UNLOCK(s->mutex);
+ gwt_unlock(&s->mutex);
}
ANN Shreduler new_shreduler(const MemPool mp) {
Shreduler s = (Shreduler)mp_calloc(mp, Shreduler);
vector_init(&s->active_shreds);
- MUTEX_SETUP(s->mutex);
+ gwt_lock_ini(&s->mutex);
return s;
}
ANN void free_shreduler(const MemPool mp, const Shreduler s) {
vector_release(&s->active_shreds);
- MUTEX_CLEANUP(s->mutex);
+ gwt_lock_end(&s->mutex);
mp_free(mp, Shreduler, s);
}
ANN void vm_lock(VM const *vm) {
if (vm->parent) vm_lock(vm->parent);
- MUTEX_LOCK(vm->shreduler->mutex);
+ gwt_lock(&vm->shreduler->mutex);
}
ANN void vm_unlock(VM const *vm) {
- do MUTEX_UNLOCK(vm->shreduler->mutex);
+ do gwt_unlock(&vm->shreduler->mutex);
while ((vm = vm->parent));
}
// remove me
ANN void next_bbq_pos(const VM *vm) {
Driver *const di = vm->bbq;
- MUTEX_LOCK(vm->shreduler->mutex);
+ gwt_lock(&vm->shreduler->mutex);
if(++di->pos == 16777216-1) {
const Vector v = &vm->shreduler->active_shreds;
for(m_uint i = 0; i < vector_size(v); i++) {
}
di->pos = 0;
}
- MUTEX_UNLOCK(vm->shreduler->mutex);
+ gwt_unlock(&vm->shreduler->mutex);
}
ANN void vm_run_audio(const VM *vm) {
shred->reg = (m_bit *)shred + sizeof(struct VM_Shred_);
shred->base = shred->mem = shred->reg + SIZEOF_REG;
shred->info = new_shredinfo(p, c);
- MUTEX_SETUP(shred->mutex);
+ gwt_lock_ini(&shred->mutex);
return shred;
}
const MemPool mp = shred->info->mp;
mp_free(mp, ShredTick, shred->tick);
free_shredinfo(mp, shred->info);
- MUTEX_CLEANUP(shred->mutex);
+ gwt_lock_end(&shred->mutex);
mp_free(mp, Stack, shred);
}