-Subproject commit 54f6475d7a9686929d553695180bb6bd0be262bf
+Subproject commit 22dd7634ccd580fa2265cd554dbbaf572a8489be
size_t curr_offset;
struct Vector_ stack;
struct Vector_ defer;
+ struct Map_ handlers;
+ m_uint try_top;
} Frame;
typedef struct Code_ {
eGackType,
eGackEnd,
eGack,
+ eTryIni,
+ eTryEnd,
+ eHandleEffect,
eNoOp,
eEOC,
eUnroll2,
#define GackType (f_instr)eGackType
#define GackEnd (f_instr)eGackEnd
#define Gack (f_instr)eGack
+#define TryIni (f_instr)eTryIni
+#define TryEnd (f_instr)eTryEnd
+#define HandleEffect (f_instr)eHandleEffect
#define NoOp (f_instr)eNoOp
#define EOC (f_instr)eEOC
#define Unroll2 (f_instr)eUnroll2
void* memoize;
Closure *closure;
};
+ struct Map_ handlers;
m_str name;
uint16_t ref;
ae_flag flag;
Vector args;
MemPool mp;
VM_Code orig;
+ struct Map_ frame;
};
struct ShredTick_ {
GackType
GackEnd
Gack
+TryIni
+TryEnd
+HandleEffect
NoOp
EOC
Unroll2
clean_case_list(a, b->next);
}
+ANN static void clean_handler_list(Clean *a, Handler_List b) {
+ ++a->scope;
+ clean_stmt(a, b->stmt);
+ --a->scope;
+ if(b->next)
+ clean_handler_list(a, b->next);
+}
+ANN static void clean_stmt_try(Clean *a, Stmt_Try b) {
+ ++a->scope;
+ clean_stmt(a, b->stmt);
+ --a->scope;
+ clean_handler_list(a, b->handler);
+}
+
ANN static void clean_stmt_match(Clean *a, Stmt_Match b) {
++a->scope;
clean_exp(a, b->cond);
#define clean_stmt_pp clean_dummy
#define clean_stmt_break clean_dummy
#define clean_stmt_continue clean_dummy
+#define clean_stmt_resume clean_dummy
DECL_STMT_FUNC(clean, void, Clean*)
ANN static void clean_stmt(Clean *a, Stmt b) {
mp_free(p, Local, (Local*)vector_at(&a->stack, i - 1));
vector_release(&a->stack);
vector_release(&a->defer);
+ if(a->handlers.ptr)
+ map_release(&a->handlers);
mp_free(p, Frame, a);
}
return GW_OK;
}
+
+// add a Goto. later the index is set to the ont of the happy path
+// maybe this and the function above can use the same machinery as returns or breaks
+ANN static inline void emit_try_goto(const restrict Emitter emit, const Vector v) {
+ const Instr instr = emit_add_instr(emit, Goto);
+ vector_add(v, (m_uint)instr);
+}
+
+// set Goto indexes the one of the happy path
+ANN static inline void try_goto_indexes(const Vector v, const m_uint pc) {
+ for(m_uint i = 0; i < vector_size(v); i++) {
+ const Instr instr = (Instr)vector_at(v, i);
+ instr->m_val = pc;
+ }
+}
+
+ANN static inline m_bool emit_handler_list(const restrict Emitter emit, const Handler_List handler,
+ const Vector v) {
+ const Instr instr = emit_add_instr(emit, HandleEffect);
+ instr->m_val2 = (m_uint)handler->xid;
+ CHECK_BB(scoped_stmt(emit, handler->stmt, 1))
+ if(handler->next)
+ CHECK_BB(emit_handler_list(emit, handler->next, v))
+ emit_try_goto(emit, v);
+ instr->m_val = emit_code_size(emit);
+ return GW_OK;
+}
+
+ANN static inline m_bool emit_stmt_try(const restrict Emitter emit, const Stmt_Try stmt) {
+ const m_uint top = emit->code->frame->try_top;
+ emit->code->frame->try_top = emit_code_size(emit);
+ (void)emit_add_instr(emit, TryIni);
+ struct Vector_ v; // store Gotos to the happy path
+ vector_init(&v);
+ CHECK_BB(scoped_stmt(emit, stmt->stmt, 1))
+ emit_try_goto(emit, &v);
+ if(!emit->code->frame->handlers.ptr)
+ map_init(&emit->code->frame->handlers);
+ CHECK_BB(emit_handler_list(emit, stmt->handler, &v))
+ try_goto_indexes(&v, emit_code_size(emit));
+ vector_release(&v);
+ emit->code->frame->try_top = top;
+ (void)emit_add_instr(emit, TryEnd);
+ return GW_OK;
+}
+
ANN static m_bool emit_stmt_exp(const Emitter emit, const struct Stmt_Exp_* exp) {
return exp->val ? emit_exp(emit, exp->val) : GW_OK;
}
return GW_OK;
}
+ANN static m_bool emit_stmt_resume(const Emitter emit, const struct Stmt_Index_* stmt NUSED) {
+ const Instr instr = emit_add_instr(emit, Goto);
+ instr->m_val = emit->code->frame->try_top;
+ return GW_OK;
+}
+
#define emit_stmt_while emit_stmt_flow
#define emit_stmt_until emit_stmt_flow
DECL_STMT_FUNC(emit, m_bool , Emitter)
ANN void load_context(const Context context, const Env env) {
context_addref((env->context = context));
vector_add(&env->scope->nspc_stack, (vtype)env->curr);
+ env->name = context->name;
context->nspc->parent = env->curr;
env->curr = context->nspc;
}
}
ANN static m_bool fptr_tmpl_push(const Env env, struct FptrInfo *info) {
- if(safe_tflag(info->rhs->value_ref->from->owner_class, tflag_tmpl))
+// if(safe_tflag(info->rhs->value_ref->from->owner_class, tflag_tmpl))
if(!info->rhs->def->base->tmpl)
return GW_OK;
nspc_push_type(env->gwion->mp, env->curr);
ANN static m_bool _check_lambda(const Env env, Exp_Lambda *l, const Func_Def def) {
//if(l->def->base->func)return GW_OK;
- if(safe_tflag(def->base->func->value_ref->from->owner_class, tflag_tmpl))
+ const bool is_tmpl = safe_tflag(def->base->func->value_ref->from->owner_class, tflag_tmpl);
+ if(is_tmpl)
template_push_types(env, def->base->func->value_ref->from->owner_class->info->cdef->base.tmpl);
Arg_List base = def->base->args, arg = l->def->base->args;
while(base && arg) {
base = base->next;
arg = arg->next;
}
+ l->def->base->td = type2td(env->gwion, known_type(env, def->base->td), exp_self(l)->pos);
+ if(is_tmpl)
+ nspc_pop_type(env->gwion->mp, env->curr);
if(base || arg) // beware, error between pops
ERR_B(exp_self(l)->pos, _("argument number does not match for lambda"))
l->def->base->flag = def->base->flag;
// if(GET_FLAG(def->base, global) && !l->owner && def->base->func->value_ref->from->owner_class)
UNSET_FLAG(l->def->base, global);
- l->def->base->td = type2td(env->gwion, known_type(env, def->base->td), exp_self(l)->pos);
l->def->base->values = env->curr->info->value;
- if(safe_tflag(def->base->func->value_ref->from->owner_class, tflag_tmpl))
- nspc_pop_type(env->gwion->mp, env->curr);
const m_uint scope = env->scope->depth;
// if(GET_FLAG(def->base, global) && !l->owner && def->base->func->value_ref->from->owner_class)
//env_push(env, NULL, env->context->nspc);
#undef insert_symbol
ANN void exception(const VM_Shred shred, const m_str c) {
+//handle(shred);
gw_err("{+}%s{0}: shred[{-}id=%" UINT_F "{0}:%s], PC=[{-}%" UINT_F "{0}]\n",
c, shred->tick->xid, shred->info->name, shred->pc - 1);
vm_shred_exit(shred);
return GW_OK;
}
+
+ANN static inline m_bool check_handler_list(const restrict Env env, const Handler_List handler) {
+ if(handler->next)
+ CHECK_BB(check_handler_list(env, handler->next))
+ RET_NSPC(check_stmt(env, handler->stmt))
+}
+
+ANN static inline m_bool check_stmt_try(const restrict Env env, const Stmt_Try stmt) {
+ CHECK_BB(check_handler_list(env, stmt->handler))
+ RET_NSPC(check_stmt(env, stmt->stmt))
+}
+
ANN static m_bool _check_stmt_match(const Env env, const Stmt_Match stmt) {
CHECK_OB(check_exp(env, stmt->cond))
MATCH_INI(env->scope)
return check_stmt(env, stmt->stmt);
}
+#define check_stmt_resume dummy_func
DECL_STMT_FUNC(check, m_bool , Env)
ANN m_bool check_stmt(const Env env, const Stmt stmt) {
RET_NSPC(_scan1_stmt_match(env, stmt))
}
+
+ANN static inline m_bool scan1_handler_list(const restrict Env env, const Handler_List handler) {
+ if(handler->next)
+ CHECK_BB(scan1_handler_list(env, handler->next))
+ RET_NSPC(scan1_stmt(env, handler->stmt))
+}
+
+ANN static inline m_bool scan1_stmt_try(const restrict Env env, const Stmt_Try stmt) {
+ CHECK_BB(scan1_handler_list(env, stmt->handler))
+ RET_NSPC(scan1_stmt(env, stmt->stmt))
+}
+
ANN static inline m_bool stmt_each_defined(const restrict Env env, const Stmt_Each stmt) {
if(nspc_lookup_value1(env->curr, stmt->sym))
ERR_B(stmt_self(stmt)->pos, _("foreach value '%s' is already defined"), s_name(stmt->sym))
#define scan1_stmt_while scan1_stmt_flow
#define scan1_stmt_until scan1_stmt_flow
-#define scan1_stmt_continue (void*)dummy_func
-#define scan1_stmt_break (void*)dummy_func
+#define scan1_stmt_continue dummy_func
+#define scan1_stmt_break dummy_func
#define scan1_stmt_return scan1_stmt_exp
+#define scan1_stmt_resume dummy_func
ANN static m_bool scan1_stmt_pp(const Env env, const Stmt_PP stmt) {
if(stmt->pp_type == ae_pp_include)
return GW_OK;
}
+ANN static inline m_bool scan2_handler_list(const restrict Env env, const Handler_List handler) {
+ if(handler->next)
+ CHECK_BB(scan2_handler_list(env, handler->next))
+ RET_NSPC(scan2_stmt(env, handler->stmt))
+}
+
+ANN static inline m_bool scan2_stmt_try(const restrict Env env, const Stmt_Try stmt) {
+ CHECK_BB(scan2_handler_list(env, stmt->handler))
+ RET_NSPC(scan2_stmt(env, stmt->stmt))
+}
+
ANN static inline m_bool scan2_stmt_match(const restrict Env env, const Stmt_Match stmt) {
CHECK_BB(scan2_exp(env, stmt->cond))
RET_NSPC(_scan2_stmt_match(env, stmt))
#define scan2_stmt_while scan2_stmt_flow
#define scan2_stmt_until scan2_stmt_flow
-#define scan2_stmt_continue (void*)dummy_func
-#define scan2_stmt_break (void*)dummy_func
+#define scan2_stmt_continue dummy_func
+#define scan2_stmt_break dummy_func
#define scan2_stmt_return scan2_stmt_exp
+#define scan2_stmt_resume dummy_func
ANN static m_bool scan2_stmt_pp(const Env env, const Stmt_PP stmt) {
if(stmt->pp_type == ae_pp_include)
return ME(o);
}
-#define TEST0(t, pos) if(!*(t*)(reg-pos)){ shred->pc = PC; exception(shred, "ZeroDivideException"); break; }
+ANN static bool unwind(VM_Shred shred, const Symbol effect) {
+ // there is an handler
+ if(!map_size(&shred->info->frame))
+ return true;
+ if(shred->code->handlers.ptr) {
+ const m_uint start = VKEY(&shred->info->frame, VLEN(&shred->info->frame) - 1);
+ if(start > shred->pc)
+ return true;
+ const Map m = &shred->code->handlers;
+ m_uint pc = 0;
+ for(m_uint i = 0; i < map_size(m); i++) {
+ if(start > shred->pc)
+ break;
+ if(start < shred->pc && VKEY(m, i) > shred->pc) {
+ const m_uint next = VKEY(m, i);
+ const Instr instr = (Instr)vector_at(shred->code->instr, next + 1);
+ if(!instr->m_val2 || (Symbol)instr->m_val2 == effect) {
+ pc = next + 1;
+ break;
+ }
+ }
+ }
+ if(!pc) // outside of a try statement
+ return true;
+ shred->reg = (m_bit*)VVAL(&shred->info->frame, VLEN(&shred->info->frame) - 1);
+ shredule(shred->tick->shreduler, shred, 0);
+ shred->pc = pc;//VKEY(m, i);
+ return false;
+ }
+ // there might be no more stack to unwind
+ map_remove(&shred->info->frame, VLEN(&shred->info->frame)-1);
+ if(shred->mem == (m_bit*)shred + sizeof(struct VM_Shred_) + SIZEOF_REG)
+ return true;
+ // literally unwind
+ shred->pc = *(m_uint*) (shred->mem - SZ_INT * 2);
+ shred->code = *(VM_Code*)(shred->mem - SZ_INT * 3);
+ shred->mem -= (*(m_uint*)(shred->mem - SZ_INT * 4) + SZ_INT * 4);
+ return unwind(shred, effect);
+}
+
+ANN void handle(VM_Shred shred, const Symbol xid) {
+ // remove from the shreduler
+ // TODO: get shred->mem and shred->reg offsets
+ shreduler_remove(shred->tick->shreduler, shred, false);
+ // do the unwiding
+ if(unwind(shred, xid)) {
+ vm_shred_exit(shred);
+ puts(s_name(xid));
+ }
+ // I guess the VM could have *trace mode*
+ // which would happen here from the top
+}
+
+#define TEST0(t, pos) if(!*(t*)(reg-pos)){ shred->pc = PC; handle(shred, insert_symbol(vm->gwion->st, "ZeroDivideException")); break; }
#define ADVANCE() byte += BYTECODE_SZ;
&&upvalueint, &&upvaluefloat, &&upvalueother, &&upvalueaddr,
&&dotfunc,
&&gcini, &&gcadd, &&gcend,
- &&gacktype, &&gackend, &&gack, &&noop, &&eoc, &&unroll2, &&other, &®pushimm
+ &&gacktype, &&gackend, &&gack, &&try_ini, &&try_end, &&handleeffect, &&noop, &&eoc, &&unroll2, &&other, &®pushimm
};
const Shreduler s = vm->shreduler;
register VM_Shred shred;
VM_OUT
gack(shred, VAL);
goto in;
+try_ini:
+ if(!shred->info->frame.ptr) // ???
+ map_init(&shred->info->frame);
+ map_set(&shred->info->frame, PC, (m_uint)shred->reg);
+ DISPATCH();
+try_end:
+ map_remove(&shred->info->frame, VLEN(&shred->info->frame)-1);
+handleeffect:
+// this should check the *xid* of the exception
noop:
DISPATCH();
other:
}
free_code_instr(a->instr, gwion);
}
+ if(a->handlers.ptr)
+ map_release(&a->handlers);
free_vector(gwion->mp, a->instr);
}
free_mstr(gwion->mp, a->name);
static inline uint isgoto(const unsigned opcode) {
return opcode == eGoto || opcode == eArrayTop ||
opcode == eBranchEqInt || opcode == eBranchNeqInt ||
- opcode == eBranchEqFloat || opcode == eBranchNeqFloat;
+ opcode == eBranchEqFloat || opcode == eBranchNeqFloat ||
+ opcode == eHandleEffect;
}
static inline void setpc(const m_bit *data, const m_uint i) {
if(instr->m_val <= vector_at(&nop, pc))
break;
}
- *(m_uint*)(data + SZ_INT) = instr->m_val > pc ? instr->m_val - pc : 0;
+ const m_uint new_pc = instr->m_val > pc ? instr->m_val - pc : 0;
+ if(instr->opcode == eHandleEffect) {
+ if(!code->handlers.ptr)
+ map_init(&code->handlers);
+ map_set(&code->handlers, j, new_pc);
+ }
+ *(m_uint*)(data + SZ_INT) = new_pc;
}
setpc(data, j);
++j;
for(m_uint i = vector_size(&shred->gc) + 1; --i;)
release((M_Object)vector_at(&shred->gc, i - 1), shred);
vector_release(&shred->gc);
+ if(shred->info->frame.ptr)
+ map_release(&shred->info->frame);
vmcode_remref(shred->info->orig, shred->info->vm->gwion);
const MemPool mp = shred->info->mp;
mp_free(mp, ShredTick, shred->tick);