-Subproject commit 8262b471834986ec11eacce6c836cbcfe65649e9
+Subproject commit 1ef0e2082f517b521ef935039c27a42e42ab5c3d
ANEW ANN Emitter new_emitter(MemPool);
ANN void free_emitter(MemPool, Emitter);
ANN m_bool emit_ast(const Env env, Ast ast);
+ANN m_bool emit_func_def(const Emitter emit, const Func_Def fdef);
ANN m_bool emit_exp_call1(const Emitter, const Func);
ANN2(1) Instr emit_add_instr(const Emitter, const f_instr) __attribute__((returns_nonnull));
ANN Code* emit_class_code(const Emitter, const m_str);
ANEW ANN Func new_func(MemPool, const m_str, const Func_Def);
ANN2(1,2) Symbol func_symbol(const Env, const m_str, const m_str, const m_str, const m_uint);
ANN m_bool check_lambda(const Env, const Type, Exp_Lambda*);
+ANN Type check_op_call(const Env env, Exp_Call *const exp);
ANN void builtin_func(const MemPool mp, const Func f, void* func_ptr);
#endif
typedef enum {
et_void, et_int, et_bool, et_char, et_float,
et_error, et_compound, et_object, et_shred, et_fork, et_event, et_ugen, et_string, et_ptr, et_array, et_gack,
- et_function, et_fptr, et_vararg, et_lambda, et_class, et_union, et_auto, et_none,
+ et_function, et_fptr, et_vararg, et_lambda, et_op, et_class, et_union, et_auto, et_none,
MAX_TYPE
} type_enum;
#endif
--- /dev/null
+#include <stdlib.h>
+#include <string.h>
+#include "gwion_util.h"
+
+typedef struct HashMapValue_ HashMapValue;
+
+typedef struct HashMap_ *HashMap;
+
+ANN HashMap new_hashmap(MemPool p, const size_t key_size, const size_t val_size);
+
+ANN bool hashmap_find(const HashMap hmap, m_bit* key, m_bit *ret_val);
+
+ANN bool hashmap_remove(const HashMap hmap, m_bit *key, m_bit *ret_val);
+
+ANN void hashmap_set(const HashMap hmap, m_bit *key, m_bit *val);
+ANN void free_hashmap(MemPool p, HashMap a);
--- /dev/null
+#ifndef MAP_H
+#define HMAP_H__
+
+ANN HashMap new_hashmap(MemPool p, const size_t key_size, const size_t val_size);
+ANN bool hashmap_find(const HashMap hmap, m_bit* key, m_bit *ret_val) {ANN bool hashmap_remove(const HashMap hmap, m_bit *key, m_bit *ret_val);
+ANN void hashmap_set(const HashMap hmap, m_bit *key, m_bit *val);
+ANN void free_hashmap(MemPool p, HashMap a);
+
+#endif
};
struct Op_Import {
- Type lhs, rhs, ret;
+ Type lhs;
+ Type rhs;
+ Type ret;
const struct Op_Func *func;
uintptr_t data;
- loc_t pos;
+ // used to return nspc in op_check.
+ // could be put in a union with `ret`
+ Nspc nspc;
Symbol op;
+ loc_t pos;
};
struct Implicit {
m_uint native_func;
};
size_t stack_depth;
- struct Vector_ tmpl_types;
+ Type ret_type; // could be `struct Vector_ tmpl_types;`
void* memoize;
Closure *closure;
m_str name;
ANN static m_bool emit_stmt(const Emitter emit, const Stmt stmt, const m_bool pop);
ANN static m_bool emit_stmt_list(const Emitter emit, Stmt_List list);
ANN static m_bool emit_exp_dot(const Emitter emit, const Exp_Dot* member);
-ANN static m_bool emit_func_def(const Emitter emit, const Func_Def func_def);
ANEW static Code* new_code(const Emitter emit, const m_str name) {
Code* code = mp_calloc(emit->gwion->mp, Code);
func->code->memoize = memoize_ini(emit, func);
}
-ANN static m_bool emit_func_def(const Emitter emit, const Func_Def f) {
+ANN m_bool emit_func_def(const Emitter emit, const Func_Def f) {
const Func func = f->base->func;
const Func_Def fdef = func->def;
const Func former = emit->env->func;
if(vflag(func->value_ref, vflag_builtin) && safe_tflag(emit->env->class_def, tflag_tmpl)) {
const Func base = nspc_lookup_func1(func->value_ref->from->owner, f->base->xid);
builtin_func(emit->gwion->mp, func, (f_xfun)base->code->native_func);
- vector_init(&func->code->tmpl_types);
- vector_add(&func->code->tmpl_types, (m_uint)fdef->base->ret_type);
- Arg_List args = fdef->base->args;
- while(args) {
- vector_add(&func->code->tmpl_types, (m_uint)args->type);
- args = args->next;
- }
+ func->code->ret_type = fdef->base->ret_type;
return GW_OK;
}
if(fdef_is_file_global(emit, fdef))
CHECK_BB(env_access(env, flag, pos))
return !(env->class_def && GET(flag, ae_flag_global)) ? GW_OK :GW_ERROR;
}
+#undef GET
-ANN Type __find_type(const Type type, const Symbol xid) {
+#define RETURN_TYPE(a) \
+do { \
+ const Type t = (a); \
+ if(t) return t; \
+} while(0)
+
+ANN static Type find_in_parent(const Type type, const Symbol xid) {
Type base = type;
while(base && base->nspc) {
- const Type t = nspc_lookup_type1(base->nspc, xid);
- if(t)
- return t;
+ RETURN_TYPE(nspc_lookup_type1(base->nspc, xid));
base = base->info->parent;
}
return NULL;
}
-ANN Type _find_type(const Env env, const Symbol xid) {
- const Type type = nspc_lookup_type1(env->curr, xid);
- if(type || !env->class_def)
- return type;
- return __find_type(env->class_def, xid);
+ANN Type find_initial(const Env env, const Symbol xid) {
+ if(env->class_def)
+ RETURN_TYPE(find_in_parent(env->class_def, xid));
+ RETURN_TYPE(nspc_lookup_type1(env->curr, xid));
+ const Vector v = &env->scope->nspc_stack;
+ for(m_uint i = vector_size(v) + 1; --i;) {
+ const Nspc nspc = (Nspc)vector_at(v, i-1);
+ RETURN_TYPE(nspc_lookup_type1(nspc, xid));
+ }
+ return NULL;
}
+#undef RETURN_TYPE
ANN Type find_type(const Env env, Type_Decl *path) {
- DECL_OO(Type, type, = _find_type(env, path->xid))
+ DECL_OO(const Type, type, = find_initial(env, path->xid))
while((path = path->next) && type && type->nspc) {
const Nspc nspc = type->nspc;
- type = __find_type(type, path->xid);
- if(!type)
+ const Type child = find_in_parent(type, path->xid);
+ if(!child)
ERR_O(path->pos, _("...(cannot find class '%s' in nspc '%s')"), s_name(path->xid), nspc->name)
}
return type;
env_push(es->env, NULL, t->info->value->from->ctx ? t->info->value->from->ctx->nspc : es->env->curr);
if(es->func && !(t->tflag & es->flag))
CHECK_BB(es->func((void*)es->data, t))
- if(tflag(t, tflag_tmpl))
- CHECK_BB(template_push_types(es->env, t->info->cdef->base.tmpl)) // incorrect templates
env_push_type((void*)es->env, t);
+ if(tflag(t, tflag_tmpl))
+ CHECK_BB(template_push_types(es->env, t->info->cdef->base.tmpl)) // incorrect templates?
return GW_OK;
}
}
ANN2(1) void envset_pop(struct EnvSet *es, const Type t) {
+ if(safe_tflag(t, tflag_tmpl)) // might not be useful
+ nspc_pop_type(es->env->gwion->mp, es->env->curr);
env_pop(es->env, es->scope);
if(!t)
return;
- if(tflag(t, tflag_tmpl))
- nspc_pop_type(es->env->gwion->mp, es->env->curr);
const Type owner_class = t->info->value->from->owner_class;
if(owner_class)
envset_pop(es, owner_class);
return new_fptr_def(gwi->gwion->mp, base);
}
+ANN static m_bool section_fptr(const Gwi gwi, const Fptr_Def fdef) {
+ Section* section = new_section_fptr_def(gwi->gwion->mp, fdef);
+ const Ast body = new_ast(gwi->gwion->mp, section, NULL);
+ gwi_body(gwi, body);
+ return GW_OK;
+}
+
ANN Type gwi_fptr_end(const Gwi gwi, const ae_flag flag) {
CHECK_BO(ck_ok(gwi, ck_fdef))
DECL_OO(const Fptr_Def, fptr, = import_fptr(gwi))
fptr->base->flag |= flag;
- // what happens if it is in a template class ?
+ if(safe_tflag(gwi->gwion->env->class_def, tflag_tmpl)/* && !fptr->base->tmpl*/) {
+ section_fptr(gwi, fptr);
+ ck_end(gwi);
+ return (Type)GW_OK;
+ }
const m_bool ret = traverse_fptr_def(gwi->gwion->env, fptr);
if(fptr->base->func) // is it needed ?
set_vflag(fptr->base->func->value_ref, vflag_builtin);
static m_bit map_byte[BYTECODE_SZ*4];
static const struct VM_Code_ map_run_code = {
.name = "map_run_code",
- .stack_depth = SZ_INT,
.bytecode = map_byte
};
-#define MAP_CODE_OFFSET SZ_INT*10
+static m_bit compactmap_byte[BYTECODE_SZ*4];
+static const struct VM_Code_ compactmap_run_code = {
+ .name = "compactmap_run_code",
+ .bytecode = compactmap_byte
+};
+
+static m_bit filter_byte[BYTECODE_SZ*4];
+static const struct VM_Code_ filter_run_code = {
+ .name = "filter_run_code",
+ .bytecode = filter_byte
+};
+
+static m_bit count_byte[BYTECODE_SZ*4];
+static const struct VM_Code_ count_run_code = {
+ .name = "count_run_code",
+ .bytecode = count_byte
+};
+
+static m_bit foldl_byte[BYTECODE_SZ*4];
+static const struct VM_Code_ foldl_run_code = {
+ .name = "foldl_run_code",
+ .bytecode = foldl_byte
+};
+
+static m_bit foldr_byte[BYTECODE_SZ*4];
+static const struct VM_Code_ foldr_run_code = {
+ .name = "foldr_run_code",
+ .bytecode = foldr_byte
+};
+
+typedef struct FunctionalFrame {
+ m_uint pc;
+ VM_Code code;
+ m_uint offset;
+ m_uint index;
+} FunctionalFrame;
+
+ANN static inline void _init(const VM_Shred shred,
+ const struct VM_Code_ *code, const m_uint offset, const m_uint start) {
+ FunctionalFrame *frame = &*(FunctionalFrame*)MEM(SZ_INT*2 + start);
+ frame->pc = shred->pc;
+ frame->code = shred->code;
+ frame->offset = offset;
+ frame->index = 0;
+ *(m_uint*)REG(SZ_INT) = offset;
+ shred->code = (VM_Code)code;
+ shred->pc = 0;
+ shredule(shred->tick->shreduler, shred, 0);
+}
+
+ANN static inline void _next(const VM_Shred shred, const m_uint offset) {
+ shred->pc = 0;
+ *(m_uint*)REG(0) = offset;
+ POP_REG(shred, SZ_INT);
+}
+
+ANN static inline void _return(const VM_Shred shred, const FunctionalFrame* frame) {
+ shred->pc = frame->pc;
+ shred->code = frame->code;
+}
+
+ANN static inline void _finish(const VM_Shred shred, const FunctionalFrame* frame) {
+ POP_MEM(shred, frame->offset);
+ shredule(shred->tick->shreduler, shred, 0);
+}
+
+#define MAP_CODE_OFFSET (sizeof(FunctionalFrame) + SZ_INT*4)
static INSTR(map_run_ini) {
- *(VM_Code*)(shred->reg) = (*(VM_Code*)MEM(SZ_INT));
- *(VM_Code*)(shred->reg + SZ_INT) = 0;
+ const m_uint offset = *(m_uint*)REG(SZ_INT);
+ if(offset)
+ PUSH_MEM(shred, offset);
PUSH_REG(shred, SZ_INT);
const M_Object self = *(M_Object*)MEM(0);
const M_Vector array = ARRAY(self);
- const m_uint index = *(m_uint*)MEM(SZ_INT*5);
- shred->pc++;
- (*(m_uint*)MEM(SZ_INT*5))++; // increment the index
- shred->mem += MAP_CODE_OFFSET; // work in a safe memory space
- *(m_uint*)(shred->reg + SZ_INT) = 0;
+ FunctionalFrame *frame = &*(FunctionalFrame*)MEM(SZ_INT*3);
+ shred->pc++;
+ shred->mem += MAP_CODE_OFFSET + SZ_INT; // work in a safe memory space
*(m_uint*)(shred->mem-SZ_INT) = 0;
*(m_uint*)(shred->mem-SZ_INT*2) = 0;
- *(VM_Code*)(shred->mem-SZ_INT*3) = (VM_Code)&map_run_code;
- m_vector_get(array, index, &*(m_bit**)(shred->mem + SZ_INT*5));
+ m_vector_get(array, frame->index, &*(m_bit**)(shred->mem + SZ_INT*6));
}
static INSTR(map_run_end) {
- shred->mem -= MAP_CODE_OFFSET;
+ shred->mem -= MAP_CODE_OFFSET + SZ_INT;
const M_Object ret_obj = *(M_Object*)MEM(SZ_INT*2);
- const M_Vector array = ARRAY(ret_obj);
+ const M_Vector array = ARRAY(*(M_Object*)MEM(0));
POP_REG(shred, ARRAY_SIZE(array));
- const m_uint index = *(m_uint*)MEM(SZ_INT*5);
- const m_uint size = m_vector_size(array);
- m_vector_set(array, index - 1, &*(m_bit**)shred->reg);
- if(index == size) {
- shred->pc = *(m_uint*)MEM(SZ_INT*3);
- shred->code = *(VM_Code*)MEM(SZ_INT*4);
- *(M_Object*)(shred->reg-SZ_INT) = ret_obj;
- } else {
- shred->pc = 0;
- POP_REG(shred, SZ_INT);
- }
- shredule(shred->tick->shreduler, shred, 0);
+ FunctionalFrame *const frame = &*(FunctionalFrame*)MEM(SZ_INT*3);
+ m_vector_set(ARRAY(ret_obj), frame->index, shred->reg);
+ if(++frame->index == ARRAY_LEN(array)) {
+ _return(shred, frame);
+ *(M_Object*)(REG(-SZ_INT)) = ret_obj;
+ } else
+ _next(shred, frame->offset);
+ _finish(shred, frame);
+}
+
+static INSTR(compactmap_run_end) {
+ shred->mem -= MAP_CODE_OFFSET + SZ_INT;
+ const M_Object self = *(M_Object*)MEM(0);
+ const M_Vector self_array = ARRAY(self);
+ const M_Object ret_obj = *(M_Object*)MEM(SZ_INT*2);
+ const M_Vector ret_array = ARRAY(ret_obj);
+ POP_REG(shred, ARRAY_SIZE(ret_array));
+ FunctionalFrame *const frame = &*(FunctionalFrame*)MEM(SZ_INT*3);
+ const m_uint size = m_vector_size(self_array);
+ const M_Object obj = *(M_Object*)REG(0);
+ if(*(m_uint*)obj->data)
+ m_vector_add(ret_array, &*(m_bit*)(obj->data + SZ_INT));
+ if(++frame->index == size) {
+ _return(shred, frame);
+ *(M_Object*)(REG(-SZ_INT)) = ret_obj;
+ } else
+ _next(shred, frame->offset);
+ _finish(shred, frame);
+}
+
+static INSTR(filter_run_end) {
+ shred->mem -= MAP_CODE_OFFSET + SZ_INT;
+ POP_REG(shred, SZ_INT);
+ const M_Object self = *(M_Object*)MEM(0);
+ const M_Object ret_obj = *(M_Object*)MEM(SZ_INT*2);
+ const M_Vector array = ARRAY(ret_obj);
+ FunctionalFrame *const frame = &*(FunctionalFrame*)MEM(SZ_INT*3);
+ if(*(m_uint*)(shred->reg))
+ m_vector_add(array, ARRAY_PTR(ARRAY(self)) + frame->index * ARRAY_SIZE(array));
+ if(++frame->index == ARRAY_LEN(ARRAY(self))) {
+ _return(shred, frame);
+ *(M_Object*)(REG(-SZ_INT)) = ret_obj;
+ } else
+ _next(shred, frame->offset);
+ _finish(shred, frame);
+}
+
+static INSTR(count_run_end) {
+ shred->mem -= MAP_CODE_OFFSET + SZ_INT;
+ const M_Object self= *(M_Object*)MEM(0);
+ POP_REG(shred, SZ_INT);
+ FunctionalFrame *const frame = &*(FunctionalFrame*)MEM(SZ_INT*3);
+ if(*(m_uint*)(shred->reg))
+ (*(m_uint*)MEM(SZ_INT*2))++;
+ if(++frame->index == ARRAY_LEN(ARRAY(self))) {
+ _return(shred, frame);
+ *(m_uint*)(REG(-SZ_INT)) = *(m_uint*)MEM(SZ_INT*2);
+ } else
+ _next(shred, frame->offset);
+ _finish(shred, frame);
}
static MFUN(vm_vector_map) {
+ const m_uint offset = *(m_uint*)REG(SZ_INT*3);
+ const M_Object ret = new_array(shred->info->vm->gwion->mp, o->type_ref, ARRAY_LEN(ARRAY(o)));
+ vector_add(&shred->gc, (m_uint)ret);
+ if(ARRAY_LEN(ARRAY(o))) {
+ _init(shred, &map_run_code, offset, SZ_INT);
+ *(M_Object*)MEM(SZ_INT*2) = ret;
+ } else
+ *(M_Object*)RETURN = ret;
+}
+
+static MFUN(vm_vector_compactmap) {
const VM_Code code = *(VM_Code*)REG(SZ_INT*2);
- const M_Object ret = *(M_Object*)MEM(SZ_INT*2) = *(M_Object*)RETURN = new_array(shred->info->vm->gwion->mp, (Type)vector_front(&code->tmpl_types), ARRAY_LEN(ARRAY(o)));
+ const m_uint offset = *(m_uint*)REG(SZ_INT*3);
+ const M_Object ret = new_array(shred->info->vm->gwion->mp,
+ code->ret_type, 0);
vector_add(&shred->gc, (m_uint)ret);
- *(m_uint*)MEM(SZ_INT*3) = shred->pc;
- *(VM_Code*)MEM(SZ_INT*4) = shred->code;
- shred->code = (VM_Code)&map_run_code;
- shred->pc = 0;
- shredule(shred->tick->shreduler, shred, 0);
+ if(ARRAY_LEN(ARRAY(o))) {
+ _init(shred, &compactmap_run_code, offset, SZ_INT);
+ *(M_Object*)MEM(SZ_INT*2) = ret;
+ } else
+ *(M_Object*)RETURN = ret;
+}
+
+static MFUN(vm_vector_filter) {
+ const m_uint offset = *(m_uint*)REG(SZ_INT*3);
+ const M_Object ret = new_array(shred->info->vm->gwion->mp, o->type_ref, 0);
+ vector_add(&shred->gc, (m_uint)ret);
+ if(ARRAY_LEN(ARRAY(o))) {
+ _init(shred, &filter_run_code, offset, SZ_INT);
+ *(M_Object*)MEM(SZ_INT*2) = ret;
+ } else
+ *(M_Object*)RETURN = ret;
+}
+
+static MFUN(vm_vector_count) {
+ const m_uint offset = *(m_uint*)REG(SZ_INT*3);
+ if(ARRAY_LEN(ARRAY(o))) {
+ _init(shred, &count_run_code, offset, SZ_INT);
+ *(m_uint*)MEM(SZ_INT*2) = 0;
+ } else
+ *(m_uint*)RETURN = 0;
+}
+
+static INSTR(foldl_run_ini) {
+ const m_uint offset = *(m_uint*)REG(SZ_INT);
+ if(offset)
+ PUSH_MEM(shred, offset);
+ const M_Object self = *(M_Object*)MEM(0);
+ *(m_uint*)(shred->reg + SZ_INT) = 0;
+ PUSH_REG(shred, SZ_INT);
+ shred->pc++;
+ const FunctionalFrame *frame = &*(FunctionalFrame*)MEM(SZ_INT*3);
+ shred->mem += MAP_CODE_OFFSET + SZ_INT; // work in a safe memory space
+ *(m_uint*)(shred->mem-SZ_INT) = 0;
+ *(m_uint*)(shred->mem-SZ_INT*2) = 0;
+ m_vector_get(ARRAY(self), frame->index, &*(m_bit**)(shred->mem + SZ_INT*4));
+}
+
+static INSTR(foldr_run_ini) {
+ const m_uint offset = *(m_uint*)REG(SZ_INT);
+ if(offset)
+ PUSH_MEM(shred, offset);
+ const M_Object self = *(M_Object*)MEM(0);
+ *(m_uint*)(shred->reg + SZ_INT) = 0;
+ PUSH_REG(shred, SZ_INT);
+ shred->pc++;
+ const FunctionalFrame *frame = &*(FunctionalFrame*)MEM(SZ_INT*3);
+ shred->mem += MAP_CODE_OFFSET + SZ_INT; // work in a safe memory space
+ *(m_uint*)(shred->mem-SZ_INT) = 0;
+ *(m_uint*)(shred->mem-SZ_INT*2) = 0;
+ const M_Vector array = ARRAY(self);
+ m_vector_get(array, ARRAY_LEN(array) - frame->index - 1, &*(m_bit**)(shred->mem + SZ_INT*4));
+}
+
+static INSTR(fold_run_end) {
+ shred->mem -= MAP_CODE_OFFSET + SZ_INT;
+ FunctionalFrame *const frame = &*(FunctionalFrame*)MEM(SZ_INT*3);
+ const M_Object self = *(M_Object*)MEM(0);
+ const VM_Code code = *(VM_Code*)MEM(SZ_INT);
+ const m_uint sz = code->stack_depth - ARRAY_SIZE(ARRAY(self));
+ const m_uint base_sz = code->stack_depth - sz;
+ POP_REG(shred, base_sz);
+ if(++frame->index == ARRAY_LEN(ARRAY(self))) {
+ POP_REG(shred, SZ_INT - base_sz);
+ shred->pc = frame->pc;
+ shred->code = frame->code;
+ memcpy(REG(-sz), REG(0), base_sz);
+ } else {
+ memcpy(shred->mem + MAP_CODE_OFFSET + SZ_INT*2 + SZ_INT*3 + sz, shred->reg, base_sz);
+ _next(shred, frame->offset);
+ }
+ _finish(shred, frame);
+}
+
+static MFUN(vm_vector_foldl) {
+ const m_bit *byte = shred->code->bytecode + (shred->pc-1) * BYTECODE_SZ;
+ const m_uint acc_sz = *(m_uint*)(byte + SZ_INT);
+ const m_uint offset = *(m_uint*)REG(SZ_INT*3 + acc_sz);
+ if(ARRAY_LEN(ARRAY(o))) {
+ _init(shred, &foldl_run_code, offset, SZ_INT);
+ memcpy(shred->mem + MAP_CODE_OFFSET + SZ_INT*5 + acc_sz, MEM(SZ_INT*2), acc_sz);
+ } else
+ memcpy((m_bit*)RETURN, MEM(SZ_INT*2), acc_sz);
+}
+
+static MFUN(vm_vector_foldr) {
+ const m_bit *byte = shred->code->bytecode + (shred->pc-1) * BYTECODE_SZ;
+ const m_uint acc_sz = *(m_uint*)(byte + SZ_INT);
+ const m_uint offset = *(m_uint*)REG(SZ_INT*3 + acc_sz);
+ if(ARRAY_LEN(ARRAY(o))) {
+ _init(shred, &foldr_run_code, offset, SZ_INT);
+ memcpy(shred->mem + MAP_CODE_OFFSET + SZ_INT*5 + acc_sz, MEM(SZ_INT*2), acc_sz);
+ } else
+ memcpy((m_bit*)RETURN, MEM(SZ_INT*2), acc_sz);
}
ANN /*static */Symbol array_sym(const Env env, const Type src, const m_uint depth);
builtin_func(env->gwion->mp, (Func)vector_at(&t->nspc->info->vtable, 4), vm_vector_cap);
builtin_func(env->gwion->mp, (Func)vector_at(&t->nspc->info->vtable, 5), vm_vector_random);
builtin_func(env->gwion->mp, (Func)vector_at(&t->nspc->info->vtable, 6), vm_vector_map);
+ builtin_func(env->gwion->mp, (Func)vector_at(&t->nspc->info->vtable, 7), vm_vector_compactmap);
+ builtin_func(env->gwion->mp, (Func)vector_at(&t->nspc->info->vtable, 8), vm_vector_filter);
+ builtin_func(env->gwion->mp, (Func)vector_at(&t->nspc->info->vtable, 9), vm_vector_count);
+ builtin_func(env->gwion->mp, (Func)vector_at(&t->nspc->info->vtable, 10), vm_vector_foldl);
+ builtin_func(env->gwion->mp, (Func)vector_at(&t->nspc->info->vtable, 11), vm_vector_foldr);
if(isa(base, env->gwion->type[et_compound]) > 0) {
t->nspc->dtor = new_vmcode(env->gwion->mp, NULL, SZ_INT, 1, "array component dtor");
set_tflag(t, tflag_dtor);
return imp->t;
}
-static void prepare_map_run(void) {
- *(unsigned*)map_byte = eOP_MAX;
- *(f_instr*)(map_byte + SZ_INT*2) = map_run_ini;
- *(unsigned*)(map_byte+ BYTECODE_SZ) = eSetCode;
- *(m_uint*)(map_byte + BYTECODE_SZ + SZ_INT*2) = 3;
- *(unsigned*)(map_byte+ BYTECODE_SZ*2) = eOverflow;
- *(unsigned*)(map_byte+ BYTECODE_SZ*3) = eOP_MAX;
- *(f_instr*)(map_byte + BYTECODE_SZ*3 + SZ_INT*2) = map_run_end;
+ANN static void prepare_run(m_bit *const byte, const f_instr ini, const f_instr end) {
+ *(unsigned*)byte = eOP_MAX;
+ *(f_instr*)(byte + SZ_INT*2) = ini;
+ *(unsigned*)(byte+ BYTECODE_SZ) = eSetCode;
+ *(m_uint*)(byte + BYTECODE_SZ + SZ_INT*2) = 3;
+ *(unsigned*)(byte+ BYTECODE_SZ*2) = eOverflow;
+ *(unsigned*)(byte+ BYTECODE_SZ*3) = eOP_MAX;
+ *(f_instr*)(byte + BYTECODE_SZ*3 + SZ_INT*2) = end;
+}
+
+ANN static void prepare_map_run(m_bit *const byte, const f_instr end) {
+ prepare_run(byte, map_run_ini, end);
+}
+
+ANN static void prepare_fold_run(m_bit *const byte, const f_instr ini) {
+ prepare_run(byte, ini, fold_run_end);
}
GWION_IMPORT(array) {
- prepare_map_run();
+ prepare_map_run(map_byte, map_run_end);
+ prepare_map_run(compactmap_byte, compactmap_run_end);
+ prepare_map_run(filter_byte, filter_run_end);
+ prepare_map_run(count_byte, count_run_end);
+ prepare_fold_run(foldl_byte, foldl_run_ini);
+ prepare_fold_run(foldr_byte, foldr_run_ini);
const Type t_array = gwi_class_ini(gwi, "Array:[T]", "Object");
gwi->gwion->type[et_array] = t_array;
gwi_class_xtor(gwi, NULL, array_dtor);
GWI_BB(gwi_func_arg(gwi, "T", "elem"))
GWI_BB(gwi_fptr_end(gwi, ae_flag_global))
+ GWI_BB(gwi_fptr_ini(gwi, "Option:[A]", "compactmap_t:[A]"))
+ GWI_BB(gwi_func_arg(gwi, "T", "elem"))
+ GWI_BB(gwi_fptr_end(gwi, ae_flag_global))
+
+ GWI_BB(gwi_fptr_ini(gwi, "A", "fold_t:[A]"))
+ GWI_BB(gwi_func_arg(gwi, "T", "elem"))
+ GWI_BB(gwi_func_arg(gwi, "A", "acc"))
+ GWI_BB(gwi_fptr_end(gwi, ae_flag_global))
+
+ GWI_BB(gwi_fptr_ini(gwi, "bool", "filter_t"))
+ GWI_BB(gwi_func_arg(gwi, "T", "elem"))
+ GWI_BB(gwi_fptr_end(gwi, ae_flag_global))
+
// put functions using T first
GWI_BB(gwi_func_ini(gwi, "bool", "remove"))
GWI_BB(gwi_func_arg(gwi, "int", "index"))
GWI_BB(gwi_func_arg(gwi, "map_t:[A]", "data"))
GWI_BB(gwi_func_end(gwi, vm_vector_map, ae_flag_none))
+ GWI_BB(gwi_func_ini(gwi, "A[]", "compactMap:[A]"))
+ GWI_BB(gwi_func_arg(gwi, "compactmap_t:[A]", "data"))
+ GWI_BB(gwi_func_end(gwi, vm_vector_compactmap, ae_flag_none))
+
+ GWI_BB(gwi_func_ini(gwi, "T[]", "filter"))
+ GWI_BB(gwi_func_arg(gwi, "filter_t", "data"))
+ GWI_BB(gwi_func_end(gwi, vm_vector_filter, ae_flag_none))
+
+ GWI_BB(gwi_func_ini(gwi, "int", "count"))
+ GWI_BB(gwi_func_arg(gwi, "filter_t", "data"))
+ GWI_BB(gwi_func_end(gwi, vm_vector_count, ae_flag_none))
+
+ GWI_BB(gwi_func_ini(gwi, "A", "foldl:[A]"))
+ GWI_BB(gwi_func_arg(gwi, "fold_t", "data"))
+ GWI_BB(gwi_func_arg(gwi, "A", "initial"))
+ GWI_BB(gwi_func_end(gwi, vm_vector_foldl, ae_flag_none))
+
+ GWI_BB(gwi_func_ini(gwi, "A", "foldr:[A]"))
+ GWI_BB(gwi_func_arg(gwi, "fold_t", "data"))
+ GWI_BB(gwi_func_arg(gwi, "A", "initial"))
+ GWI_BB(gwi_func_end(gwi, vm_vector_foldr, ae_flag_none))
+
GWI_BB(gwi_class_end(gwi))
GWI_BB(gwi_oper_ini(gwi, "Array", "Array", NULL))
const Type t_fptr = gwi_mk_type(gwi, "@func_ptr", SZ_INT, "@function");
GWI_BB(gwi_gack(gwi, t_fptr, gack_fptr))
GWI_BB(gwi_set_global_type(gwi, t_fptr, et_fptr))
+ const Type t_op = gwi_mk_type(gwi, "@op", SZ_INT, "@function");
+ GWI_BB(gwi_set_global_type(gwi, t_op, et_op))
const Type t_lambda = gwi_mk_type(gwi, "@lambda", SZ_INT, "@function");
set_tflag(t_lambda, tflag_infer);
GWI_BB(gwi_set_global_type(gwi, t_lambda, et_lambda))
GWI_BB(import_object_op(gwi))
GWI_BB(import_values(gwi))
+ GWI_BB(import_union(gwi))
GWI_BB(import_array(gwi))
GWI_BB(import_event(gwi))
GWI_BB(gwi_oper_emi(gwi, opem_object_dot))
GWI_BB(gwi_oper_end(gwi, "@dot", NULL))
- GWI_BB(import_union(gwi))
return GW_OK;
}
};
ANN static void _fptr_tmpl_push(const Env env, const Func f) {
- ID_List il = f->def->base->tmpl ? f->def->base->tmpl->list : NULL;
- if(il) {
- Type_List tl = f->def->base->tmpl->call;
- while(il) {
- const Type t = tl ? known_type(env, tl->td) : env->gwion->type[et_auto];
- nspc_add_type(env->curr, il->xid, t);
- il = il->next;
- if(tl)
- tl = tl->next;
- }
+ const Tmpl *tmpl = f->def->base->tmpl;
+ if(!tmpl)
+ return;
+ Type_List tl = tmpl->call;
+ if(!tl)
+ return;
+ ID_List il = tmpl->list;
+ while(il) {
+ const Type t = known_type(env, tl->td);
+ nspc_add_type(env->curr, il->xid, t);
+ il = il->next;
+ tl = tl->next;
}
}
ANN static m_bool fptr_tmpl_push(const Env env, struct FptrInfo *info) {
+ if(safe_tflag(info->rhs->value_ref->from->owner_class, tflag_tmpl))
if(!info->rhs->def->base->tmpl)
return GW_OK;
nspc_push_type(env->gwion->mp, env->curr);
DECL_OB(const Type, t1, = known_type(env, id[1]))
if(isa(t0, t1) > 0)
return GW_OK;
- return t1 == env->gwion->type[et_auto];
+ return t1 == env->gwion->type[et_auto] ? GW_OK:GW_ERROR;
}
ANN static m_bool fptr_args(const Env env, Func_Base *base[2]) {
Arg_List arg0 = base[0]->args, arg1 = base[1]->args;
while(arg0) {
CHECK_OB(arg1)
- Type_Decl* td[2] = { arg0->td, arg1->td };
- CHECK_BB(td_match(env, td))
+ if(arg0->type && arg1->type)
+ CHECK_BB(isa(arg0->type, arg1->type))
+ else {
+ Type_Decl* td[2] = { arg0->td, arg1->td };
+ CHECK_BB(td_match(env, td))
+ }
arg0 = arg0->next;
arg1 = arg1->next;
}
const Nspc nspc = v->from->owner;
const m_str c = s_name(info->lhs->def->base->xid),
stmpl = !info->rhs->def->base->tmpl ? NULL : "template";
- Type type = NULL;
- for(m_uint i = 0; i <= v->from->offset && !type; ++i) {
+ for(m_uint i = 0; i <= v->from->offset; ++i) {
const Symbol sym = (!info->lhs->def->base->tmpl || i != 0) ?
func_symbol(env, nspc->name, c, stmpl, i) : info->lhs->def->base->xid;
if(!is_class(env->gwion, info->lhs->value_ref->type)) {
DECL_OO(const Type, t, = nspc_lookup_type1(nspc, info->lhs->def->base->xid))
info->lhs = actual_type(env->gwion, t)->info->func;
}
+ Type type = NULL;
Func_Base *base[2] = { info->lhs->def->base, info->rhs->def->base };
CHECK_BO(fptr_tmpl_push(env, info))
if(fptr_rettype(env, info) > 0 &&
type = actual_type(env->gwion, info->lhs->value_ref->type) ?: info->lhs->value_ref->type;
if(info->rhs->def->base->tmpl)
nspc_pop_type(env->gwion->mp, env->curr);
+ if(type)return type;
}
- return type;
+ return NULL;
}
ANN static m_bool _check_lambda(const Env env, Exp_Lambda *l, const Func_Def def) {
+//if(l->def->base->func)return GW_OK;
+ if(safe_tflag(def->base->func->value_ref->from->owner_class, tflag_tmpl))
+ template_push_types(env, def->base->func->value_ref->from->owner_class->info->cdef->base.tmpl);
Arg_List base = def->base->args, arg = l->def->base->args;
while(base && arg) {
-// arg->td = base->td;
arg->td = type2td(env->gwion, known_type(env, base->td), exp_self(l)->pos);
-// arg->type = known_type(env, base->td);
base = base->next;
arg = arg->next;
}
- if(base || arg)
+ if(base || arg) // beware, error between pops
ERR_B(exp_self(l)->pos, _("argument number does not match for lambda"))
l->def->base->flag = def->base->flag;
// if(GET_FLAG(def->base, global) && !l->owner && def->base->func->value_ref->from->owner_class)
UNSET_FLAG(l->def->base, global);
l->def->base->td = type2td(env->gwion, known_type(env, def->base->td), exp_self(l)->pos);
l->def->base->values = env->curr->info->value;
+ if(safe_tflag(def->base->func->value_ref->from->owner_class, tflag_tmpl))
+ nspc_pop_type(env->gwion->mp, env->curr);
+ const m_uint scope = env->scope->depth;
+// if(GET_FLAG(def->base, global) && !l->owner && def->base->func->value_ref->from->owner_class)
+//env_push(env, NULL, env->context->nspc);
+ env->scope->depth = 0;
const m_bool ret = traverse_func_def(env, l->def);
+ env->scope->depth = scope;
+// if(GET_FLAG(def->base, global) && !l->owner && def->base->func->value_ref->from->owner_class)
+//env_pop(env, scope);
if(l->def->base->func) {
if(env->curr->info->value != l->def->base->values) {
free_scope(env->gwion->mp, env->curr->info->value);
const Fptr_Def fptr_def = new_fptr_def(env->gwion->mp, fbase);
char name[13 + strlen(env->curr->name) +
num_digit(bin->rhs->pos.first.line) + num_digit(bin->rhs->pos.first.column)];
- sprintf(name, "generated@%s@%u:%u", env->curr->name, bin->rhs->pos.first.line, bin->rhs->pos.first.column);
fptr_def->base->xid = insert_symbol(name);
const m_bool ret = traverse_fptr_def(env, fptr_def);
const Type t = fptr_def->type;
static OP_EMIT(opem_fptr_cast) {
const Exp_Cast* cast = (Exp_Cast*)data;
- if(exp_self(cast)->type->info->func->def->base->tmpl)
- fptr_instr(emit, cast->exp->type->info->func, 1);
if(is_member(cast->exp->type))
member_fptr(emit);
+ if(exp_self(cast)->type->info->func->def->base->tmpl)
+ fptr_instr(emit, cast->exp->type->info->func, 1);
return GW_OK;
}
return impl->t;
}
+// smh the VM should be able to do that
+static INSTR(Func2Code) {
+ *(VM_Code*)REG(-SZ_INT) = (*(Func*)REG(-SZ_INT))->code;
+}
+
static OP_EMIT(opem_fptr_impl) {
struct Implicit *impl = (struct Implicit*)data;
if(is_member(impl->e->type))
member_fptr(emit);
- if(impl->t->info->func->def->base->tmpl)
+ if(impl->t->info->func->def->base->tmpl) {
fptr_instr(emit, ((Exp)impl->e)->type->info->func, 1);
+ if(!is_fptr(emit->gwion, impl->e->type) && safe_tflag(impl->t->info->value->from->owner_class, tflag_tmpl)){
+ emit_add_instr(emit, Func2Code);
+ }
+ }
return GW_OK;
}
+static void op_narg_err(const Env env, const Func_Def fdef, const loc_t loc) {
+ if(!env->context->error) {
+ gwerr_basic(_("invalid operator decay"), _("Decayed operators take two arguments"), NULL,
+ env->name, loc, 0);
+ if(fdef)
+ gwerr_secondary("declared here", env->name, fdef->base->pos);
+ env->context->error = true;
+ }
+}
+
+static m_bool op_call_narg(const Env env, Exp arg,
+ const loc_t loc) {
+ m_uint narg = 0;
+ while(arg) {
+ narg++;
+ arg = arg->next;
+ }
+ if(narg == 2)
+ return GW_OK;
+ op_narg_err(env, NULL, loc);
+ return GW_ERROR;
+}
+
+ANN Type check_op_call(const Env env, Exp_Call *const exp) {
+ CHECK_BO(op_call_narg(env, exp->args, exp->func->pos))
+ const Exp base = exp_self(exp);
+ const Exp op_exp = exp->func;
+ base->exp_type = ae_exp_binary;
+ Exp_Binary *bin = &base->d.exp_binary;
+ bin->lhs = exp->args;
+ bin->lhs = exp->args->next;
+ exp->args->next = NULL;
+ bin->op = op_exp->d.prim.d.var;
+ free_exp(env->gwion->mp, op_exp);
+ return check_exp(env, base);
+}
+
+static m_bool op_impl_narg(const Env env, const Func_Def fdef,
+ const loc_t loc) {
+ m_uint narg = 0;
+ Arg_List arg = fdef->base->args;
+ while(arg) {
+ narg++;
+ arg = arg->next;
+ }
+ if(narg == 2)
+ return GW_OK;
+ op_narg_err(env, fdef, loc);
+ return GW_ERROR;
+}
+
+static inline void op_impl_ensure_types(const Env env, const Func func) {
+ Arg_List arg = func->def->base->args;
+ const bool owner_tmpl = safe_tflag(func->value_ref->from->owner_class, tflag_tmpl);
+ const bool func_tmpl = fflag(func, fflag_tmpl);
+ if(owner_tmpl)
+ template_push_types(env, func->value_ref->from->owner_class->info->cdef->base.tmpl);
+ if(func_tmpl)
+ template_push_types(env, func->def->base->tmpl);
+ while(arg) {
+ if(!arg->type)
+ arg->type = known_type(env, arg->td);
+ arg = arg->next;
+ }
+ if(!func->def->base->ret_type)
+ func->def->base->ret_type = known_type(env, func->def->base->td);
+ if(owner_tmpl)
+ nspc_pop_type(env->gwion->mp, env->curr);
+ if(func_tmpl)
+ nspc_pop_type(env->gwion->mp, env->curr);
+}
+
+#include "tmp_resolve.h"
+static OP_CHECK(opck_op_impl){
+ struct Implicit *impl = (struct Implicit*)data;
+ const Func func = impl->t->info->func;
+ CHECK_BN(op_impl_narg(env, func->def, impl->e->pos))
+ op_impl_ensure_types(env, func);
+ const Symbol lhs_sym = insert_symbol("@lhs");
+ const Symbol rhs_sym = insert_symbol("@rhs");
+ struct Exp_ _lhs = { .d={ .prim={ .d={.var=lhs_sym} , .prim_type=ae_prim_id }}, .exp_type=ae_exp_primary, .type=func->def->base->args->type, .pos=func->def->base->args->td->pos };
+ struct Exp_ _rhs = { .d={ .prim={ .d={.var=rhs_sym} , .prim_type=ae_prim_id }}, .exp_type=ae_exp_primary, .type=func->def->base->args->next->type, .pos=func->def->base->args->next->td->pos };
+ struct Exp_ self = { .pos=impl->e->pos };
+// Exp_Binary _bin = { .lhs=&_lhs, .op=impl->e->d.prim.d.var, .rhs=&_rhs };// .lhs=func->def->base->args // TODO
+ self.d.exp_binary.lhs = &_lhs;
+ self.d.exp_binary.rhs = &_rhs;
+ self.d.exp_binary.op = impl->e->d.prim.d.var;
+ struct Op_Import opi = { .op=impl->e->d.prim.d.var, .lhs=func->def->base->args->type,
+ .rhs=func->def->base->args->next->type, .data=(uintptr_t)&self.d.exp_binary, .pos=impl->e->pos };
+ DECL_ON(const Type, t, = op_check(env, &opi))
+ CHECK_BN(isa(t, func->def->base->ret_type))
+ // Find if the function exists
+ Value v = nspc_lookup_value0(opi.nspc, impl->e->d.prim.d.var);
+ if(v) {
+ const m_uint scope = env_push(env, NULL, opi.nspc);
+ _lhs.next = &_rhs;
+ Exp_Call call = { .args=&_lhs };
+ const Func exists = (Func)find_func_match(env, v->d.func_ref, &call);
+ env_pop(env, scope);
+ if(exists)
+ return actual_type(env->gwion, func->value_ref->type);
+ }
+ const Arg_List args = cpy_arg_list(env->gwion->mp, func->def->base->args);
+ // beware shadowing ?
+ args->var_decl->xid = lhs_sym;
+ args->next->var_decl->xid = rhs_sym;
+ Func_Base *base = new_func_base(env->gwion->mp, type2td(env->gwion, t, impl->e->pos),
+ impl->e->d.prim.d.var, args, ae_flag_none, impl->e->pos);
+ const Exp lhs = new_prim_id(env->gwion->mp, args->var_decl->xid, impl->e->pos);
+ const Exp rhs = new_prim_id(env->gwion->mp, args->next->var_decl->xid, impl->e->pos);
+ const Exp bin = new_exp_binary(env->gwion->mp, lhs, impl->e->d.prim.d.var, rhs, impl->e->pos);
+ const Stmt stmt = new_stmt_exp(env->gwion->mp, ae_stmt_return, bin, impl->e->pos);
+ const Stmt_List list = new_stmt_list(env->gwion->mp, stmt, NULL);
+ const Stmt code = new_stmt_code(env->gwion->mp, list, impl->e->pos);
+ const Func_Def def = new_func_def(env->gwion->mp, base, code);
+ def->base->xid = impl->e->d.prim.d.var;
+ const m_uint scope = env_push(env, NULL, opi.nspc);
+ // we assume succes here
+ /*const m_bool ret = */traverse_func_def(env, def);
+ env_pop(env, scope);
+ def->base->func->value_ref->type->info->parent = env->gwion->type[et_op];
+ impl->e->type = def->base->func->value_ref->type;
+ impl->e->d.prim.value = def->base->func->value_ref;
+ return actual_type(env->gwion, func->value_ref->type);
+}
+
+static OP_EMIT(opem_op_impl) {
+ struct Implicit *impl = (struct Implicit*)data;
+ const Func_Def fdef = impl->e->type->info->func->def;
+ const m_bool ret = emit_func_def(emit, fdef);
+ const Instr instr = emit_add_instr(emit, RegPushImm);
+ instr->m_val = (m_uint)fdef->base->func->code;
+ return ret;
+}
+
ANN Type check_exp_unary_spork(const Env env, const Stmt code);
ANN static void fork_exp(const Env env, const Exp_Unary* unary) {
GWI_BB(gwi_oper_add(gwi, opck_fptr_impl))
GWI_BB(gwi_oper_emi(gwi, opem_fptr_impl))
GWI_BB(gwi_oper_end(gwi, "@implicit", NULL))
+ GWI_BB(gwi_oper_ini(gwi, "@op", "@func_ptr", NULL))
+ GWI_BB(gwi_oper_add(gwi, opck_op_impl))
+ GWI_BB(gwi_oper_emi(gwi, opem_op_impl))
+ GWI_BB(gwi_oper_end(gwi, "@implicit", NULL))
GWI_BB(gwi_oper_ini(gwi, NULL, (m_str)OP_ANY_TYPE, NULL))
GWI_BB(gwi_oper_add(gwi, opck_spork))
GWI_BB(gwi_oper_emi(gwi, opem_spork))
const Union_Def udef = new_union_def(env->gwion->mp, cpy_union_list(env->gwion->mp, u->l), u->pos);
udef->xid = info->name;
udef->tmpl = mk_tmpl(env, u->tmpl, info->td->types);
+ // resolve the template here
if(GET_FLAG(info->base, global))
SET_FLAG(udef, global);
const m_bool ret = scan0_union_def(env, udef);
if(udef->type) {
udef->type->info->udef = udef;// mark as udef
- info->ret = udef->type;
+ info->ret = udef->type;// is info->ret necessary?
set_tflag(info->ret, tflag_udef);
} else
free_union_def(env->gwion->mp, udef);
add_op(env->gwion, &opi);
}
-OP_CHECK(opck_foreach_scan) {
+static OP_CHECK(opck_ref_scan) {
struct TemplateScan *ts = (struct TemplateScan*)data;
struct tmpl_info info = { .base=ts->t, .td=ts->td, .list=ts->t->info->cdef->base.tmpl->list };
const Type exists = tmpl_exists(env, &info);
GWI_BB(gwi_item_end(gwi, ae_flag_none, num, 0))
GWI_BB(gwi_struct_end(gwi))
GWI_BB(gwi_oper_ini(gwi, "Ref", NULL, NULL))
- GWI_BB(gwi_oper_add(gwi, opck_foreach_scan))
+ GWI_BB(gwi_oper_add(gwi, opck_ref_scan))
GWI_BB(gwi_oper_end(gwi, "@scan", NULL))
return GW_OK;
}
+#include <ctype.h>
#include "gwion_util.h"
#include "gwion_ast.h"
#include "gwion_env.h"
const Symbol sym = *data;
const Value v = check_non_res_value(env, data);
if(!v || !vflag(v, vflag_valid) || (v->from->ctx && v->from->ctx->error)) {
+ const m_str name = s_name(*data);
+ if(!isalpha(*name) && *name != '_')
+ return env->gwion->type[et_op];
gwerr_basic(_("Invalid variable"), _("not legit at this point."), NULL,
env->name, prim_pos(data), 0);
did_you_mean_nspc(v ? value_owner(env, v) : env->curr, s_name(sym));
CHECK_OO(check_prim_interp(env, data))
return env->gwion->type[et_gack];
}
-
+/*
ANN static Type check_prim_map(const Env env, const Exp *data) {
CHECK_OO(check_exp(env, *data))
if(env->func) // really?
env->context->error = true;
return NULL;
}
-
+*/
#define describe_prim_xxx(name, type) \
ANN static Type check_prim_##name(const Env env NUSED, const union prim_data* data NUSED) {\
return type; \
do {
const Var_Decl decl = arg_list->var_decl;
const Value v = decl->value;
-// TODO: use coumpound instead of object?
- if(isa(v->type, env->gwion->type[et_object]) > 0 || isa(v->type, env->gwion->type[et_function]) > 0)
- unset_fflag(env->func, fflag_pure);
CHECK_BB(already_defined(env, decl->xid, decl->pos))
set_vflag(v, vflag_valid);
nspc_add_value(env->curr, decl->xid, v);
const Type t = op_check(env, &opi);
return t;
}
+ if(t == env->gwion->type[et_op])
+ return check_op_call(env, exp);
if(t == env->gwion->type[et_lambda])
return check_lambda_call(env, exp);
if(fflag(t->info->func, fflag_ftmpl)) {
nspc_lookup_value1(v->from->owner, sym);
}
-ANN static inline m_bool tmpl_valid(const Env env, const Func_Def fdef) {
- return safe_fflag(fdef->base->func, fflag_valid) ||
- check_traverse_fdef(env, fdef) > 0;
+ANN static inline m_bool tmpl_traverse(const Env env, const Func_Def fdef) {
+ return check_traverse_fdef(env, fdef);
+}
+
+ANN static inline bool tmpl_valid(const Env env, const Func_Def fdef/*, Exp_Call *const exp*/) {
+ if(safe_fflag(fdef->base->func, fflag_valid))
+ return true;
+// const Tmpl tmpl = { .list=fdef->base->tmpl->list, .call=exp->tmpl->call };
+// CHECK_BO(template_push_types(env, &tmpl));
+ const bool ret = check_traverse_fdef(env, fdef) > 0;
+// nspc_pop_type(env->gwion->mp, env->curr);
+ return ret;
}
ANN static Func ensure_tmpl(const Env env, const Func_Def fdef, Exp_Call *const exp) {
- if(!tmpl_valid(env, fdef))
+ if(!tmpl_valid(env, fdef/*, exp*/))
return NULL;
if(exp->args && !exp->args->type)
return NULL;
struct ResolverArgs ra = {.v = v, .e = exp, .tmpl_name = tmpl_name, .types = types};
CHECK_BO(envset_push(&es, v->from->owner_class, v->from->owner))
(void)env_push(env, v->from->owner_class, v->from->owner);
-if(v->from->owner_class && v->from->owner_class->info->cdef->base.tmpl)
-(void)template_push_types(env, v->from->owner_class->info->cdef->base.tmpl);
-//const Tmpl tmpl = { .list=v->frombase->tmpl->list, .call=ra->types };
-//CHECK_BO(template_push_types(env, &tmpl));
+ if(v->from->owner_class && v->from->owner_class->info->cdef->base.tmpl)
+ (void)template_push_types(env, v->from->owner_class->info->cdef->base.tmpl);
const Func m_func = !is_fptr(env->gwion, v->type) ?
func_match(env, &ra) :fptr_match(env, &ra);
-if(v->from->owner_class && v->from->owner_class->info->cdef->base.tmpl)
-nspc_pop_type(env->gwion->mp, env->curr);
-//nspc_pop_type(env->gwion->mp, env->curr);
+ if(v->from->owner_class && v->from->owner_class->info->cdef->base.tmpl)
+ nspc_pop_type(env->gwion->mp, env->curr);
env_pop(env, scope);
if(es.run)
envset_pop(&es, v->from->owner_class);
if(ret) {
if(ret == env->gwion->type[et_error])
return NULL;
+ opi->nspc = nspc;
return ret;
}
} while(l && (l = op_parent(env, l)));
return already_defined(env, s, pos);
}
-ANN static void fptr_assign(const Env env, const Fptr_Def fptr) {
+ANN static void fptr_assign(const Fptr_Def fptr) {
const Func_Def def = fptr->type->info->func->def;
if(GET_FLAG(fptr->base, global)) {
- context_global(env);
SET_FLAG(fptr->value, global);
SET_FLAG(fptr->base->func, global);
SET_FLAG(def->base, global);
const m_str name = s_name(fptr->base->xid);
const Type t = scan0_type(env, name, env->gwion->type[et_fptr]);
const bool global = !env->class_def && GET_FLAG(fptr->base, global);
- t->nspc = new_nspc(env->gwion->mp, name);
t->flag |= fptr->base->flag;
fptr->type = t;
if(global) {
valuefrom(env, fptr->value->from, fptr->base->pos);
fptr_def(env, fptr);
if(env->class_def)
- fptr_assign(env, fptr);
+ fptr_assign(fptr);
set_vflag(fptr->value, vflag_func);
add_type(env, t->info->value->from->owner, t);
type_addref(t);
free_code_instr(a->instr, gwion);
}
free_vector(gwion->mp, a->instr);
- } else if(a->tmpl_types.ptr)
- vector_release(&a->tmpl_types);
+ }
free_mstr(gwion->mp, a->name);
mp_free(gwion->mp , VM_Code, a);
}
--- /dev/null
+funcdef int test_t(int, int);
+fun void test(test_t t) {
+ <<<t(1,2)>>>;
+}
+(+) => test;
+(+) => test;
+(+) => test;
--- /dev/null
+<<< (+)(1,2) >>>;
+<<< (1,2) => (+) >>>;
#! [contains] must be declared 'abstract'
class abstract C {
fun abstract void test(int i);
+ fun abstract void test(int i, float f);
}
class D extends C {}
+++ /dev/null
-op_already_imported.gw