mp_obj_t mp_make_closure_from_id(uint unique_code_id, mp_obj_t closure_tuple, mp_obj_t def_args) { DEBUG_OP_printf("make_closure_from_id %d\n", unique_code_id); // make function object mp_obj_t ffun = mp_make_function_from_id(unique_code_id, false, def_args); // wrap function in closure object return mp_obj_new_closure(ffun, closure_tuple); }
mp_obj_t mp_make_function_from_raw_code(mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args) { DEBUG_OP_printf("make_function_from_raw_code %p\n", rc); assert(rc != NULL); // def_args must be MP_OBJ_NULL or a tuple assert(def_args == MP_OBJ_NULL || MP_OBJ_IS_TYPE(def_args, &mp_type_tuple)); // TODO implement default kw args assert(def_kw_args == MP_OBJ_NULL); // make the function, depending on the raw code kind mp_obj_t fun; switch (rc->kind) { case MP_CODE_BYTE: fun = mp_obj_new_fun_bc(rc->scope_flags, rc->arg_names, rc->n_pos_args, rc->n_kwonly_args, def_args, rc->u_byte.code); break; case MP_CODE_NATIVE: fun = mp_make_function_n(rc->n_pos_args, rc->u_native.fun); break; case MP_CODE_INLINE_ASM: fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->u_inline_asm.fun); break; default: // raw code was never set (this should not happen) assert(0); return mp_const_none; } // check for generator functions and if so wrap in generator object if ((rc->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) { fun = mp_obj_new_gen_wrap(fun); } return fun; }
mp_obj_t rt_unary_op(int op, mp_obj_t arg) { DEBUG_OP_printf("unary %d %p\n", op, arg); if (MP_OBJ_IS_SMALL_INT(arg)) { mp_small_int_t val = MP_OBJ_SMALL_INT_VALUE(arg); switch (op) { case RT_UNARY_OP_NOT: if (val != 0) { return mp_const_true;} else { return mp_const_false; } case RT_UNARY_OP_POSITIVE: break; case RT_UNARY_OP_NEGATIVE: val = -val; break; case RT_UNARY_OP_INVERT: val = ~val; break; default: assert(0); val = 0; } if (fit_small_int(val)) { return MP_OBJ_NEW_SMALL_INT(val); } else { // TODO make a bignum assert(0); return mp_const_none; } } else { // will be an object (small ints are caught in previous if) mp_obj_base_t *o = arg; if (o->type->unary_op != NULL) { mp_obj_t result = o->type->unary_op(op, arg); if (result != NULL) { return result; } } // TODO specify in error message what the operator is nlr_jump(mp_obj_new_exception_msg_1_arg(MP_QSTR_TypeError, "bad operand type for unary operator: '%s'", o->type->name)); } }
mp_obj_t rt_load_build_class(void) { DEBUG_OP_printf("load_build_class\n"); mp_map_elem_t *elem = mp_qstr_map_lookup(&map_builtins, MP_QSTR___build_class__, false); if (elem == NULL) { nlr_jump(mp_obj_new_exception_msg(MP_QSTR_NameError, "name '__build_class__' is not defined")); } return elem->value; }
mp_obj_t rt_load_global(qstr qstr) { // logic: search globals, builtins DEBUG_OP_printf("load global %s\n", qstr_str(qstr)); mp_map_elem_t *elem = mp_qstr_map_lookup(map_globals, qstr, false); if (elem == NULL) { elem = mp_qstr_map_lookup(&map_builtins, qstr, false); if (elem == NULL) { nlr_jump(mp_obj_new_exception_msg_1_arg(MP_QSTR_NameError, "name '%s' is not defined", qstr_str(qstr))); } } return elem->value; }
mp_obj_t mp_make_closure_from_raw_code(mp_raw_code_t *rc, uint n_closed_over, const mp_obj_t *args) { DEBUG_OP_printf("make_closure_from_raw_code %p %u %p\n", rc, n_closed_over, args); // make function object mp_obj_t ffun; if (n_closed_over & 0x100) { // default positional and keyword args given ffun = mp_make_function_from_raw_code(rc, args[0], args[1]); } else { // default positional and keyword args not given ffun = mp_make_function_from_raw_code(rc, MP_OBJ_NULL, MP_OBJ_NULL); } // wrap function in closure object return mp_obj_new_closure(ffun, n_closed_over & 0xff, args + ((n_closed_over >> 7) & 2)); }
mp_obj_t mp_make_function_from_raw_code(mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args) { DEBUG_OP_printf("make_function_from_raw_code %p\n", rc); assert(rc != NULL); // def_args must be MP_OBJ_NULL or a tuple assert(def_args == MP_OBJ_NULL || MP_OBJ_IS_TYPE(def_args, &mp_type_tuple)); // def_kw_args must be MP_OBJ_NULL or a dict assert(def_kw_args == MP_OBJ_NULL || MP_OBJ_IS_TYPE(def_kw_args, &mp_type_dict)); // make the function, depending on the raw code kind mp_obj_t fun; switch (rc->kind) { case MP_CODE_BYTECODE: no_other_choice: fun = mp_obj_new_fun_bc(def_args, def_kw_args, rc->data.u_byte.bytecode, rc->data.u_byte.const_table); break; #if MICROPY_EMIT_NATIVE case MP_CODE_NATIVE_PY: fun = mp_obj_new_fun_native(def_args, def_kw_args, rc->data.u_native.fun_data, rc->data.u_native.const_table); break; case MP_CODE_NATIVE_VIPER: fun = mp_obj_new_fun_viper(rc->n_pos_args, rc->data.u_native.fun_data, rc->data.u_native.type_sig); break; #endif #if MICROPY_EMIT_INLINE_THUMB case MP_CODE_NATIVE_ASM: fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->data.u_native.fun_data); break; #endif default: // raw code was never set (this should not happen) assert(0); goto no_other_choice; // to help flow control analysis } // check for generator functions and if so wrap in generator object if ((rc->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) { fun = mp_obj_new_gen_wrap(fun); } return fun; }
int rt_is_true(mp_obj_t arg) { DEBUG_OP_printf("is true %p\n", arg); if (MP_OBJ_IS_SMALL_INT(arg)) { if (MP_OBJ_SMALL_INT_VALUE(arg) == 0) { return 0; } else { return 1; } } else if (arg == mp_const_none) { return 0; } else if (arg == mp_const_false) { return 0; } else if (arg == mp_const_true) { return 1; } else { assert(0); return 0; } }
mp_obj_t mp_make_function_from_id(uint unique_code_id, bool free_unique_code, mp_obj_t def_args, mp_obj_t def_kw_args) { DEBUG_OP_printf("make_function_from_id %d\n", unique_code_id); if (unique_code_id >= unique_codes_total) { // illegal code id return mp_const_none; } // TODO implement default kw args assert(def_kw_args == MP_OBJ_NULL); // make the function, depending on the code kind mp_code_t *c = &unique_codes[unique_code_id]; mp_obj_t fun; switch (c->kind) { case MP_CODE_BYTE: fun = mp_obj_new_fun_bc(c->scope_flags, c->arg_names, c->n_args, def_args, c->u_byte.code); break; case MP_CODE_NATIVE: fun = mp_make_function_n(c->n_args, c->u_native.fun); break; case MP_CODE_INLINE_ASM: fun = mp_obj_new_fun_asm(c->n_args, c->u_inline_asm.fun); break; default: // code id was never assigned (this should not happen) assert(0); return mp_const_none; } // check for generator functions and if so wrap in generator object if ((c->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) { fun = mp_obj_new_gen_wrap(fun); } // in some cases we can free the unique_code slot // any dynamically allocade memory is now owned by the fun object if (free_unique_code) { memset(c, 0, sizeof *c); // make sure all pointers are zeroed c->kind = MP_CODE_UNUSED; } return fun; }
mp_obj_t mp_make_function_from_raw_code(const mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args) { DEBUG_OP_printf("make_function_from_raw_code %p\n", rc); assert(rc != NULL); // def_args must be MP_OBJ_NULL or a tuple assert(def_args == MP_OBJ_NULL || mp_obj_is_type(def_args, &mp_type_tuple)); // def_kw_args must be MP_OBJ_NULL or a dict assert(def_kw_args == MP_OBJ_NULL || mp_obj_is_type(def_kw_args, &mp_type_dict)); // make the function, depending on the raw code kind mp_obj_t fun; switch (rc->kind) { #if MICROPY_EMIT_NATIVE case MP_CODE_NATIVE_PY: case MP_CODE_NATIVE_VIPER: fun = mp_obj_new_fun_native(def_args, def_kw_args, rc->fun_data, rc->const_table); // Check for a generator function, and if so change the type of the object if ((rc->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) { ((mp_obj_base_t*)MP_OBJ_TO_PTR(fun))->type = &mp_type_native_gen_wrap; } break; #endif #if MICROPY_EMIT_INLINE_ASM case MP_CODE_NATIVE_ASM: fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->fun_data, rc->type_sig); break; #endif default: // rc->kind should always be set and BYTECODE is the only remaining case assert(rc->kind == MP_CODE_BYTECODE); fun = mp_obj_new_fun_bc(def_args, def_kw_args, rc->fun_data, rc->const_table); // check for generator functions and if so change the type of the object if ((rc->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) { ((mp_obj_base_t*)MP_OBJ_TO_PTR(fun))->type = &mp_type_gen_wrap; } break; } return fun; }
mp_obj_t rt_binary_op(int op, mp_obj_t lhs, mp_obj_t rhs) { DEBUG_OP_printf("binary %d %p %p\n", op, lhs, rhs); // TODO correctly distinguish inplace operators for mutable objects // lookup logic that CPython uses for +=: // check for implemented += // then check for implemented + // then check for implemented seq.inplace_concat // then check for implemented seq.concat // then fail // note that list does not implement + or +=, so that inplace_concat is reached first for += if (MP_OBJ_IS_SMALL_INT(lhs)) { mp_small_int_t lhs_val = MP_OBJ_SMALL_INT_VALUE(lhs); if (MP_OBJ_IS_SMALL_INT(rhs)) { mp_small_int_t rhs_val = MP_OBJ_SMALL_INT_VALUE(rhs); switch (op) { case RT_BINARY_OP_OR: case RT_BINARY_OP_INPLACE_OR: lhs_val |= rhs_val; break; case RT_BINARY_OP_XOR: case RT_BINARY_OP_INPLACE_XOR: lhs_val ^= rhs_val; break; case RT_BINARY_OP_AND: case RT_BINARY_OP_INPLACE_AND: lhs_val &= rhs_val; break; case RT_BINARY_OP_LSHIFT: case RT_BINARY_OP_INPLACE_LSHIFT: lhs_val <<= rhs_val; break; case RT_BINARY_OP_RSHIFT: case RT_BINARY_OP_INPLACE_RSHIFT: lhs_val >>= rhs_val; break; case RT_BINARY_OP_ADD: case RT_BINARY_OP_INPLACE_ADD: lhs_val += rhs_val; break; case RT_BINARY_OP_SUBTRACT: case RT_BINARY_OP_INPLACE_SUBTRACT: lhs_val -= rhs_val; break; case RT_BINARY_OP_MULTIPLY: case RT_BINARY_OP_INPLACE_MULTIPLY: lhs_val *= rhs_val; break; case RT_BINARY_OP_FLOOR_DIVIDE: case RT_BINARY_OP_INPLACE_FLOOR_DIVIDE: lhs_val /= rhs_val; break; #if MICROPY_ENABLE_FLOAT case RT_BINARY_OP_TRUE_DIVIDE: case RT_BINARY_OP_INPLACE_TRUE_DIVIDE: return mp_obj_new_float((mp_float_t)lhs_val / (mp_float_t)rhs_val); #endif // TODO implement modulo as specified by Python case RT_BINARY_OP_MODULO: case RT_BINARY_OP_INPLACE_MODULO: lhs_val %= rhs_val; break; // TODO check for negative power, and overflow case RT_BINARY_OP_POWER: case RT_BINARY_OP_INPLACE_POWER: { int ans = 1; while (rhs_val > 0) { if (rhs_val & 1) { ans *= lhs_val; } lhs_val *= lhs_val; rhs_val /= 2; } lhs_val = ans; break; } default: assert(0); } if (fit_small_int(lhs_val)) { return MP_OBJ_NEW_SMALL_INT(lhs_val); } } else if (MP_OBJ_IS_TYPE(rhs, &float_type)) { return mp_obj_float_binary_op(op, lhs_val, rhs); } else if (MP_OBJ_IS_TYPE(rhs, &complex_type)) { return mp_obj_complex_binary_op(op, lhs_val, 0, rhs); } } else if (MP_OBJ_IS_OBJ(lhs)) {
void rt_store_global(qstr qstr, mp_obj_t obj) { DEBUG_OP_printf("store global %s <- %p\n", qstr_str(qstr), obj); mp_qstr_map_lookup(map_globals, qstr, true)->value = obj; }
mp_obj_t rt_load_const_str(qstr qstr) { DEBUG_OP_printf("load '%s'\n", qstr_str(qstr)); return mp_obj_new_str(qstr); }
mp_obj_t rt_load_const_dec(qstr qstr) { #if MICROPY_ENABLE_FLOAT DEBUG_OP_printf("load '%s'\n", qstr_str(qstr)); const char *s = qstr_str(qstr); int in = PARSE_DEC_IN_INTG; mp_float_t dec_val = 0; bool exp_neg = false; int exp_val = 0; int exp_extra = 0; bool imag = false; for (; *s; s++) { int dig = *s; if ('0' <= dig && dig <= '9') { dig -= '0'; if (in == PARSE_DEC_IN_EXP) { exp_val = 10 * exp_val + dig; } else { dec_val = 10 * dec_val + dig; if (in == PARSE_DEC_IN_FRAC) { exp_extra -= 1; } } } else if (in == PARSE_DEC_IN_INTG && dig == '.') { in = PARSE_DEC_IN_FRAC; } else if (in != PARSE_DEC_IN_EXP && (dig == 'E' || dig == 'e')) { in = PARSE_DEC_IN_EXP; if (s[1] == '+') { s++; } else if (s[1] == '-') { s++; exp_neg = true; } } else if (dig == 'J' || dig == 'j') { s++; imag = true; break; } else { // unknown character break; } } if (*s != 0) { nlr_jump(mp_obj_new_exception_msg(MP_QSTR_SyntaxError, "invalid syntax for number")); } if (exp_neg) { exp_val = -exp_val; } exp_val += exp_extra; for (; exp_val > 0; exp_val--) { dec_val *= 10; } for (; exp_val < 0; exp_val++) { dec_val *= 0.1; } if (imag) { return mp_obj_new_complex(0, dec_val); } else { return mp_obj_new_float(dec_val); } #else nlr_jump(mp_obj_new_exception_msg(MP_QSTR_SyntaxError, "decimal numbers not supported")); #endif }