Пример #1
0
STATIC mp_obj_t mod_thread_start_new_thread(size_t n_args, const mp_obj_t *args) {
    // This structure holds the Python function and arguments for thread entry.
    // We copy all arguments into this structure to keep ownership of them.
    // We must be very careful about root pointers because this pointer may
    // disappear from our address space before the thread is created.
    thread_entry_args_t *th_args;

    // get positional arguments
    size_t pos_args_len;
    mp_obj_t *pos_args_items;
    mp_obj_get_array(args[1], &pos_args_len, &pos_args_items);

    // check for keyword arguments
    if (n_args == 2) {
        // just position arguments
        th_args = m_new_obj_var(thread_entry_args_t, mp_obj_t, pos_args_len);
        th_args->n_kw = 0;
    } else {
        // positional and keyword arguments
        if (mp_obj_get_type(args[2]) != &mp_type_dict) {
            mp_raise_TypeError("expecting a dict for keyword args");
        }
        mp_map_t *map = &((mp_obj_dict_t*)MP_OBJ_TO_PTR(args[2]))->map;
        th_args = m_new_obj_var(thread_entry_args_t, mp_obj_t, pos_args_len + 2 * map->used);
        th_args->n_kw = map->used;
        // copy across the keyword arguments
        for (size_t i = 0, n = pos_args_len; i < map->alloc; ++i) {
            if (mp_map_slot_is_filled(map, i)) {
                th_args->args[n++] = map->table[i].key;
                th_args->args[n++] = map->table[i].value;
            }
        }
    }

    // copy agross the positional arguments
    th_args->n_args = pos_args_len;
    memcpy(th_args->args, pos_args_items, pos_args_len * sizeof(mp_obj_t));

    // pass our locals and globals into the new thread
    th_args->dict_locals = mp_locals_get();
    th_args->dict_globals = mp_globals_get();

    // set the stack size to use
    th_args->stack_size = thread_stack_size;

    // set the function for thread entry
    th_args->fun = args[0];

    // spawn the thread!
    mp_thread_create(thread_entry, th_args, &th_args->stack_size);

    return mp_const_none;
}
Пример #2
0
mp_parse_node_struct_t *parse_node_new_struct(int rule_id, int num_args) {
    mp_parse_node_struct_t *pn = m_new_obj_var(mp_parse_node_struct_t, mp_parse_node_t, num_args);
    pn->source = 0; // TODO
    pn->kind_num_nodes = (rule_id & 0xff) | (num_args << 8);
    num_parse_nodes_allocated += 1;
    return pn;
}
Пример #3
0
mp_obj_t mp_obj_new_fun_bc(uint scope_flags, qstr *args, uint n_args, mp_obj_t def_args_in, const byte *code) {
    uint n_def_args = 0;
    uint n_extra_args = 0;
    mp_obj_tuple_t *def_args = def_args_in;
    if (def_args != MP_OBJ_NULL) {
        n_def_args = def_args->len;
        n_extra_args = def_args->len;
    }
    if ((scope_flags & MP_SCOPE_FLAG_VARARGS) != 0) {
        n_extra_args += 1;
    }
    if ((scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0) {
        n_extra_args += 1;
    }
    mp_obj_fun_bc_t *o = m_new_obj_var(mp_obj_fun_bc_t, mp_obj_t, n_extra_args);
    o->base.type = &mp_type_fun_bc;
    o->globals = mp_globals_get();
    o->args = args;
    o->n_args = n_args;
    o->n_def_args = n_def_args;
    o->takes_var_args = (scope_flags & MP_SCOPE_FLAG_VARARGS) != 0;
    o->takes_kw_args = (scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0;
    o->bytecode = code;
    if (def_args != MP_OBJ_NULL) {
        memcpy(o->extra_args, def_args->items, n_def_args * sizeof(mp_obj_t));
    }
    return o;
}
Пример #4
0
mp_obj_t mp_obj_new_gen_instance(const byte *bytecode, uint n_state, int n_args, const mp_obj_t *args) {
    mp_obj_gen_instance_t *o = m_new_obj_var(mp_obj_gen_instance_t, mp_obj_t, n_state);
    o->base.type = &gen_instance_type;
    o->code_info = bytecode;
    o->ip = bytecode;
    o->sp = &o->state[0] - 1; // sp points to top of stack, which starts off 1 below the state
    o->n_state = n_state;

    // copy args to end of state array, in reverse (that's how mp_execute_byte_code_2 needs it)
    for (int i = 0; i < n_args; i++) {
        o->state[n_state - 1 - i] = args[i];
    }

    // TODO
    // prelude for making cells (closed over variables)
    // for now we just make sure there are no cells variables
    // need to work out how to implement closed over variables in generators

    // get code info size
    machine_uint_t code_info_size = bytecode[0] | (bytecode[1] << 8) | (bytecode[2] << 16) | (bytecode[3] << 24);
    o->ip += code_info_size;
    assert(o->ip[0] == 0);
    o->ip += 1;

    return o;
}
Пример #5
0
STATIC mp_obj_t gen_wrap_call(mp_obj_t self_in, mp_uint_t n_args, mp_uint_t n_kw, const mp_obj_t *args) {
    mp_obj_gen_wrap_t *self = self_in;
    mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t*)self->fun;
    assert(MP_OBJ_IS_TYPE(self_fun, &mp_type_fun_bc));

    // skip code-info block
    const byte *code_info = self_fun->bytecode;
    mp_uint_t code_info_size = mp_decode_uint(&code_info);
    const byte *ip = self_fun->bytecode + code_info_size;

    // bytecode prelude: skip arg names
    ip += (self_fun->n_pos_args + self_fun->n_kwonly_args) * sizeof(mp_obj_t);

    // bytecode prelude: get state size and exception stack size
    mp_uint_t n_state = mp_decode_uint(&ip);
    mp_uint_t n_exc_stack = mp_decode_uint(&ip);

    // allocate the generator object, with room for local stack and exception stack
    mp_obj_gen_instance_t *o = m_new_obj_var(mp_obj_gen_instance_t, byte,
        n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t));
    o->base.type = &mp_type_gen_instance;

    o->globals = self_fun->globals;
    o->code_state.n_state = n_state;
    o->code_state.ip = ip;
    mp_setup_code_state(&o->code_state, self_fun, n_args, n_kw, args);
    return o;
}
Пример #6
0
mp_obj_t mp_obj_new_fun_bc(mp_obj_t def_args_in, mp_obj_t def_kw_args, const byte *code, const mp_uint_t *const_table) {
    mp_uint_t n_def_args = 0;
    mp_uint_t n_extra_args = 0;
    mp_obj_tuple_t *def_args = MP_OBJ_TO_PTR(def_args_in);
    if (def_args_in != MP_OBJ_NULL) {
        assert(MP_OBJ_IS_TYPE(def_args_in, &mp_type_tuple));
        n_def_args = def_args->len;
        n_extra_args = def_args->len;
    }
    if (def_kw_args != MP_OBJ_NULL) {
        n_extra_args += 1;
    }
    mp_obj_fun_bc_t *o = m_new_obj_var(mp_obj_fun_bc_t, mp_obj_t, n_extra_args);
    o->base.type = &mp_type_fun_bc;
    o->globals = mp_globals_get();
    o->bytecode = code;
    o->const_table = const_table;
    if (def_args != NULL) {
        memcpy(o->extra_args, def_args->items, n_def_args * sizeof(mp_obj_t));
    }
    if (def_kw_args != MP_OBJ_NULL) {
        o->extra_args[n_def_args] = def_kw_args;
    }
    return MP_OBJ_FROM_PTR(o);
}
Пример #7
0
STATIC mp_obj_t gen_wrap_call(mp_obj_t self_in, uint n_args, uint n_kw, const mp_obj_t *args) {
    mp_obj_gen_wrap_t *self = self_in;
    mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t*)self->fun;
    assert(MP_OBJ_IS_TYPE(self_fun, &mp_type_fun_bc));

    const byte *bytecode = self_fun->bytecode;
    // get code info size, and skip the line number table
    machine_uint_t code_info_size = bytecode[0] | (bytecode[1] << 8) | (bytecode[2] << 16) | (bytecode[3] << 24);
    bytecode += code_info_size;

    // bytecode prelude: get state size and exception stack size
    machine_uint_t n_state = bytecode[0] | (bytecode[1] << 8);
    machine_uint_t n_exc_stack = bytecode[2] | (bytecode[3] << 8);
    bytecode += 4;

    // allocate the generator object, with room for local stack and exception stack
    mp_obj_gen_instance_t *o = m_new_obj_var(mp_obj_gen_instance_t, byte,
        n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t));
    o->base.type = &mp_type_gen_instance;

    o->globals = self_fun->globals;
    o->code_state.n_state = n_state;
    o->code_state.ip = bytecode;
    mp_setup_code_state(&o->code_state, self_fun, n_args, n_kw, args);
    return o;
}
Пример #8
0
mp_obj_t mp_obj_str_builder_start(uint len, byte **data) {
    mp_obj_str_t *o = m_new_obj_var(mp_obj_str_t, byte, len + 1);
    o->base.type = &str_type;
    o->len = len;
    *data = o->data;
    return o;
}
Пример #9
0
mp_obj_t mp_obj_new_fun_bc(mp_uint_t scope_flags, mp_uint_t n_pos_args, mp_uint_t n_kwonly_args, mp_obj_t def_args_in, mp_obj_t def_kw_args, const byte *code) {
    mp_uint_t n_def_args = 0;
    mp_uint_t n_extra_args = 0;
    mp_obj_tuple_t *def_args = def_args_in;
    if (def_args != MP_OBJ_NULL) {
        assert(MP_OBJ_IS_TYPE(def_args, &mp_type_tuple));
        n_def_args = def_args->len;
        n_extra_args = def_args->len;
    }
    if (def_kw_args != MP_OBJ_NULL) {
        n_extra_args += 1;
    }
    mp_obj_fun_bc_t *o = m_new_obj_var(mp_obj_fun_bc_t, mp_obj_t, n_extra_args);
    o->base.type = &mp_type_fun_bc;
    o->globals = mp_globals_get();
    o->n_pos_args = n_pos_args;
    o->n_kwonly_args = n_kwonly_args;
    o->n_def_args = n_def_args;
    o->has_def_kw_args = def_kw_args != MP_OBJ_NULL;
    o->takes_var_args = (scope_flags & MP_SCOPE_FLAG_VARARGS) != 0;
    o->takes_kw_args = (scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0;
    o->bytecode = code;
    if (def_args != MP_OBJ_NULL) {
        memcpy(o->extra_args, def_args->items, n_def_args * sizeof(mp_obj_t));
    }
    if (def_kw_args != MP_OBJ_NULL) {
        o->extra_args[n_def_args] = def_kw_args;
    }
    return o;
}
Пример #10
0
mp_obj_t mp_obj_new_closure(mp_obj_t fun, mp_uint_t n_closed_over, const mp_obj_t *closed) {
    mp_obj_closure_t *o = m_new_obj_var(mp_obj_closure_t, mp_obj_t, n_closed_over);
    o->base.type = &closure_type;
    o->fun = fun;
    o->n_closed = n_closed_over;
    memcpy(o->closed, closed, n_closed_over * sizeof(mp_obj_t));
    return MP_OBJ_FROM_PTR(o);
}
Пример #11
0
mp_obj_namedtuple_type_t *mp_obj_new_namedtuple_base(size_t n_fields, mp_obj_t *fields) {
    mp_obj_namedtuple_type_t *o = m_new_obj_var(mp_obj_namedtuple_type_t, qstr, n_fields);
    memset(&o->base, 0, sizeof(o->base));
    o->n_fields = n_fields;
    for (size_t i = 0; i < n_fields; i++) {
        o->fields[i] = mp_obj_str_get_qstr(fields[i]);
    }
    return o;
}
Пример #12
0
STATIC mp_obj_t bufwriter_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
    mp_arg_check_num(n_args, n_kw, 2, 2, false);
    size_t alloc = mp_obj_get_int(args[1]);
    mp_obj_bufwriter_t *o = m_new_obj_var(mp_obj_bufwriter_t, byte, alloc);
    o->base.type = type;
    o->stream = args[0];
    o->alloc = alloc;
    o->len = 0;
    return o;
}
Пример #13
0
mp_obj_t mp_obj_new_attrtuple(const qstr *fields, mp_uint_t n, const mp_obj_t *items) {
    mp_obj_tuple_t *o = m_new_obj_var(mp_obj_tuple_t, mp_obj_t, n + 1);
    o->base.type = &mp_type_attrtuple;
    o->len = n;
    for (mp_uint_t i = 0; i < n; i++) {
        o->items[i] = items[i];
    }
    o->items[n] = MP_OBJ_FROM_PTR(fields);
    return MP_OBJ_FROM_PTR(o);
}
Пример #14
0
STATIC mp_obj_t zip_make_new(mp_obj_t type_in, uint n_args, uint n_kw, const mp_obj_t *args) {
    // TODO check n_kw == 0

    mp_obj_zip_t *o = m_new_obj_var(mp_obj_zip_t, mp_obj_t, n_args);
    o->base.type = &mp_type_zip;
    o->n_iters = n_args;
    for (int i = 0; i < n_args; i++) {
        o->iters[i] = mp_getiter(args[i]);
    }
    return o;
}
Пример #15
0
STATIC mp_obj_t zip_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
    mp_arg_check_num(n_args, n_kw, 0, MP_OBJ_FUN_ARGS_MAX, false);

    mp_obj_zip_t *o = m_new_obj_var(mp_obj_zip_t, mp_obj_t, n_args);
    o->base.type = type;
    o->n_iters = n_args;
    for (mp_uint_t i = 0; i < n_args; i++) {
        o->iters[i] = mp_getiter(args[i], NULL);
    }
    return MP_OBJ_FROM_PTR(o);
}
Пример #16
0
STATIC mp_obj_t zip_make_new(mp_obj_t type_in, uint n_args, uint n_kw, const mp_obj_t *args) {
    mp_arg_check_num(n_args, n_kw, 0, MP_OBJ_FUN_ARGS_MAX, false);

    mp_obj_zip_t *o = m_new_obj_var(mp_obj_zip_t, mp_obj_t, n_args);
    o->base.type = &mp_type_zip;
    o->n_iters = n_args;
    for (int i = 0; i < n_args; i++) {
        o->iters[i] = mp_getiter(args[i]);
    }
    return o;
}
Пример #17
0
STATIC mp_obj_t map_make_new(mp_obj_t type_in, uint n_args, uint n_kw, const mp_obj_t *args) {
    if (n_args < 2 || n_kw != 0) {
        nlr_jump(mp_obj_new_exception_msg(&mp_type_TypeError, "map must have at least 2 arguments and no keyword arguments"));
    }
    assert(n_args >= 2);
    mp_obj_map_t *o = m_new_obj_var(mp_obj_map_t, mp_obj_t, n_args - 1);
    o->base.type = &map_type;
    o->n_iters = n_args - 1;
    o->fun = args[0];
    for (int i = 0; i < n_args - 1; i++) {
        o->iters[i] = rt_getiter(args[i + 1]);
    }
    return o;
}
Пример #18
0
mp_obj_t mp_obj_new_tuple(size_t n, const mp_obj_t *items) {
    if (n == 0) {
        return mp_const_empty_tuple;
    }
    mp_obj_tuple_t *o = m_new_obj_var(mp_obj_tuple_t, mp_obj_t, n);
    o->base.type = &mp_type_tuple;
    o->len = n;
    if (items) {
        for (size_t i = 0; i < n; i++) {
            o->items[i] = items[i];
        }
    }
    return MP_OBJ_FROM_PTR(o);
}
Пример #19
0
mp_obj_t mp_obj_new_tuple(uint n, const mp_obj_t *items) {
    if (n == 0) {
        return mp_const_empty_tuple;
    }
    mp_obj_tuple_t *o = m_new_obj_var(mp_obj_tuple_t, mp_obj_t, n);
    o->base.type = &mp_type_tuple;
    o->len = n;
    if (items) {
        for (int i = 0; i < n; i++) {
            o->items[i] = items[i];
        }
    }
    return o;
}
Пример #20
0
static mp_obj_t exception_call(mp_obj_t self_in, uint n_args, uint n_kw, const mp_obj_t *args) {
    mp_obj_exception_t *base = self_in;

    if (n_kw != 0) {
        nlr_jump(mp_obj_new_exception_msg_1_arg(MP_QSTR_TypeError, "%s does not take keyword arguments", qstr_str(base->id)));
    }

    mp_obj_exception_t *o = m_new_obj_var(mp_obj_exception_t, mp_obj_t, n_args);
    o->base.type = &exception_type;
    o->id = base->id;
    o->msg = NULL;
    o->args.len = n_args;
    memcpy(o->args.items, args, n_args * sizeof(mp_obj_t));
    return o;
}
Пример #21
0
mp_obj_t mp_obj_new_gen_instance(mp_obj_dict_t *globals, const byte *bytecode,
                                 uint n_args, const mp_obj_t *args,
                                 uint n_args2, const mp_obj_t *args2) {
    const byte *code_info = bytecode;
    // get code info size, and skip the line number table
    machine_uint_t code_info_size = bytecode[0] | (bytecode[1] << 8) | (bytecode[2] << 16) | (bytecode[3] << 24);
    bytecode += code_info_size;

    // bytecode prelude: get state size and exception stack size
    machine_uint_t n_state = bytecode[0] | (bytecode[1] << 8);
    machine_uint_t n_exc_stack = bytecode[2] | (bytecode[3] << 8);
    bytecode += 4;

    // allocate the generator object, with room for local stack and exception stack
    mp_obj_gen_instance_t *o = m_new_obj_var(mp_obj_gen_instance_t, byte, n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t));
    o->base.type = &mp_type_gen_instance;
    o->globals = globals;
    o->code_info = code_info;
    o->sp = &o->state[0] - 1; // sp points to top of stack, which starts off 1 below the state
    o->exc_sp = (mp_exc_stack_t*)(o->state + n_state) - 1;
    o->n_state = n_state;

    // copy args to end of state array, in reverse (that's how mp_execute_bytecode2 needs it)
    for (uint i = 0; i < n_args; i++) {
        o->state[n_state - 1 - i] = args[i];
    }
    for (uint i = 0; i < n_args2; i++) {
        o->state[n_state - 1 - n_args - i] = args2[i];
    }

    // set rest of state to MP_OBJ_NULL
    for (uint i = 0; i < n_state - n_args - n_args2; i++) {
        o->state[i] = MP_OBJ_NULL;
    }

    // bytecode prelude: initialise closed over variables
    for (uint n_local = *bytecode++; n_local > 0; n_local--) {
        uint local_num = *bytecode++;
        o->state[n_state - 1 - local_num] = mp_obj_new_cell(o->state[n_state - 1 - local_num]);
    }

    // set ip to start of actual byte code
    o->ip = bytecode;

    return o;
}
Пример #22
0
mp_obj_t mp_obj_new_str(const byte* data, uint len, bool make_qstr_if_not_already) {
    qstr q = qstr_find_strn(data, len);
    if (q != MP_QSTR_NULL) {
        // qstr with this data already exists
        return MP_OBJ_NEW_QSTR(q);
    } else if (make_qstr_if_not_already) {
        // no existing qstr, make a new one
        return MP_OBJ_NEW_QSTR(qstr_from_strn((const char*)data, len));
    } else {
        // no existing qstr, don't make one
        mp_obj_str_t *o = m_new_obj_var(mp_obj_str_t, byte, len + 1);
        o->base.type = &str_type;
        o->hash = qstr_compute_hash(data, len);
        o->len = len;
        memcpy(o->data, data, len * sizeof(byte));
        o->data[len] = '\0'; // for now we add null for compatibility with C ASCIIZ strings
        return o;
    }
}
Пример #23
0
STATIC mp_obj_t gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
    // A generating function is just a bytecode function with type mp_type_gen_wrap
    mp_obj_fun_bc_t *self_fun = MP_OBJ_TO_PTR(self_in);

    // bytecode prelude: get state size and exception stack size
    size_t n_state = mp_decode_uint_value(self_fun->bytecode);
    size_t n_exc_stack = mp_decode_uint_value(mp_decode_uint_skip(self_fun->bytecode));

    // allocate the generator object, with room for local stack and exception stack
    mp_obj_gen_instance_t *o = m_new_obj_var(mp_obj_gen_instance_t, byte,
        n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t));
    o->base.type = &mp_type_gen_instance;

    o->globals = self_fun->globals;
    o->code_state.fun_bc = self_fun;
    o->code_state.ip = 0;
    mp_setup_code_state(&o->code_state, n_args, n_kw, args);
    return MP_OBJ_FROM_PTR(o);
}
Пример #24
0
STATIC mp_obj_t mp_obj_new_namedtuple_type(qstr name, mp_uint_t n_fields, mp_obj_t *fields) {
    mp_obj_namedtuple_type_t *o = m_new_obj_var(mp_obj_namedtuple_type_t, qstr, n_fields);
    memset(&o->base, 0, sizeof(o->base));
    o->base.base.type = &mp_type_type;
    o->base.name = name;
    o->base.print = namedtuple_print;
    o->base.make_new = namedtuple_make_new;
    o->base.unary_op = mp_obj_tuple_unary_op;
    o->base.binary_op = mp_obj_tuple_binary_op;
    o->base.attr = namedtuple_attr;
    o->base.subscr = mp_obj_tuple_subscr;
    o->base.getiter = mp_obj_tuple_getiter;
    o->base.bases_tuple = (mp_obj_tuple_t*)(mp_rom_obj_tuple_t*)&namedtuple_base_tuple;
    o->n_fields = n_fields;
    for (mp_uint_t i = 0; i < n_fields; i++) {
        o->fields[i] = mp_obj_str_get_qstr(fields[i]);
    }
    return MP_OBJ_FROM_PTR(o);
}
Пример #25
0
// args are in reverse order in the array
mp_obj_t mp_obj_new_gen_instance(const byte *bytecode, uint n_state, int n_args, const mp_obj_t *args) {
    mp_obj_gen_instance_t *o = m_new_obj_var(mp_obj_gen_instance_t, mp_obj_t, n_state);
    o->base.type = &gen_instance_type;
    o->ip = bytecode;
    o->sp = o->state + n_state;

    // copy args (which are in reverse order) to start of state array
    for (int i = 0; i < n_args; i++) {
        o->state[i] = args[n_args - 1 - i];
    }

    // TODO
    // prelude for making cells (closed over variables)
    // for now we just make sure there are no cells variables
    // need to work out how to implement closed over variables in generators
    assert(o->ip[0] == 0);
    o->ip += 1;

    return o;
}
Пример #26
0
mp_obj_t mp_obj_new_gen_instance(const byte *bytecode, uint n_args, const mp_obj_t *args, uint n_args2, const mp_obj_t *args2) {
    const byte *code_info = bytecode;
    // get code info size, and skip the line number table
    machine_uint_t code_info_size = bytecode[0] | (bytecode[1] << 8) | (bytecode[2] << 16) | (bytecode[3] << 24);
    bytecode += code_info_size;

    // bytecode prelude: get state size and exception stack size
    machine_uint_t n_state = bytecode[0] | (bytecode[1] << 8);
    machine_uint_t n_exc_stack = bytecode[2] | (bytecode[3] << 8);
    bytecode += 4;

    // bytecode prelude: initialise closed over variables
    // TODO
    // for now we just make sure there are no cells variables
    // need to work out how to implement closed over variables in generators
    assert(bytecode[0] == 0);
    bytecode += 1;

    mp_obj_gen_instance_t *o = m_new_obj_var(mp_obj_gen_instance_t, byte, n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t));
    o->base.type = &mp_type_gen_instance;
    o->code_info = code_info;
    o->ip = bytecode;
    o->sp = &o->state[0] - 1; // sp points to top of stack, which starts off 1 below the state
    o->exc_sp = (mp_exc_stack_t*)(o->state + n_state) - 1;
    o->n_state = n_state;

    // copy args to end of state array, in reverse (that's how mp_execute_byte_code_2 needs it)
    for (uint i = 0; i < n_args; i++) {
        o->state[n_state - 1 - i] = args[i];
    }
    for (uint i = 0; i < n_args2; i++) {
        o->state[n_state - 1 - n_args - i] = args2[i];
    }

    return o;
}
Пример #27
0
STATIC mp_obj_t fun_bc_call(mp_obj_t self_in, mp_uint_t n_args, mp_uint_t n_kw, const mp_obj_t *args) {
    MP_STACK_CHECK();

    DEBUG_printf("Input n_args: " UINT_FMT ", n_kw: " UINT_FMT "\n", n_args, n_kw);
    DEBUG_printf("Input pos args: ");
    dump_args(args, n_args);
    DEBUG_printf("Input kw args: ");
    dump_args(args + n_args, n_kw * 2);
    mp_obj_fun_bc_t *self = self_in;
    DEBUG_printf("Func n_def_args: %d\n", self->n_def_args);

    // skip code-info block
    const byte *code_info = self->bytecode;
    mp_uint_t code_info_size = mp_decode_uint(&code_info);
    const byte *ip = self->bytecode + code_info_size;

    // bytecode prelude: skip arg names
    ip += (self->n_pos_args + self->n_kwonly_args) * sizeof(mp_obj_t);

    // bytecode prelude: state size and exception stack size
    mp_uint_t n_state = mp_decode_uint(&ip);
    mp_uint_t n_exc_stack = mp_decode_uint(&ip);

#if VM_DETECT_STACK_OVERFLOW
    n_state += 1;
#endif

    // allocate state for locals and stack
    mp_uint_t state_size = n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t);
    mp_code_state *code_state;
    if (state_size > VM_MAX_STATE_ON_STACK) {
        code_state = m_new_obj_var(mp_code_state, byte, state_size);
    } else {
        code_state = alloca(sizeof(mp_code_state) + state_size);
    }

    code_state->n_state = n_state;
    code_state->code_info = 0; // offset to code-info
    code_state->ip = (byte*)(ip - self->bytecode); // offset to prelude
    mp_setup_code_state(code_state, self_in, n_args, n_kw, args);

    // execute the byte code with the correct globals context
    code_state->old_globals = mp_globals_get();
    mp_globals_set(self->globals);
    mp_vm_return_kind_t vm_return_kind = mp_execute_bytecode(code_state, MP_OBJ_NULL);
    mp_globals_set(code_state->old_globals);

#if VM_DETECT_STACK_OVERFLOW
    if (vm_return_kind == MP_VM_RETURN_NORMAL) {
        if (code_state->sp < code_state->state) {
            printf("VM stack underflow: " INT_FMT "\n", code_state->sp - code_state->state);
            assert(0);
        }
    }
    // We can't check the case when an exception is returned in state[n_state - 1]
    // and there are no arguments, because in this case our detection slot may have
    // been overwritten by the returned exception (which is allowed).
    if (!(vm_return_kind == MP_VM_RETURN_EXCEPTION && self->n_pos_args + self->n_kwonly_args == 0)) {
        // Just check to see that we have at least 1 null object left in the state.
        bool overflow = true;
        for (mp_uint_t i = 0; i < n_state - self->n_pos_args - self->n_kwonly_args; i++) {
            if (code_state->state[i] == MP_OBJ_NULL) {
                overflow = false;
                break;
            }
        }
        if (overflow) {
            printf("VM stack overflow state=%p n_state+1=" UINT_FMT "\n", code_state->state, n_state);
            assert(0);
        }
    }
#endif

    mp_obj_t result;
    switch (vm_return_kind) {
        case MP_VM_RETURN_NORMAL:
            // return value is in *sp
            result = *code_state->sp;
            break;

        case MP_VM_RETURN_EXCEPTION:
            // return value is in state[n_state - 1]
            result = code_state->state[n_state - 1];
            break;

        case MP_VM_RETURN_YIELD: // byte-code shouldn't yield
        default:
            assert(0);
            result = mp_const_none;
            vm_return_kind = MP_VM_RETURN_NORMAL;
            break;
    }

    // free the state if it was allocated on the heap
    if (state_size > VM_MAX_STATE_ON_STACK) {
        m_del_var(mp_code_state, byte, state_size, code_state);
    }

    if (vm_return_kind == MP_VM_RETURN_NORMAL) {
        return result;
    } else { // MP_VM_RETURN_EXCEPTION
        nlr_raise(result);
    }
}
Пример #28
0
#else // don't print debugging info
#define DEBUG_PRINT (0)
#define DEBUG_printf(...) (void)0
#endif

STATIC mp_obj_t static_class_method_make_new(mp_obj_t self_in, mp_uint_t n_args, mp_uint_t n_kw, const mp_obj_t *args);

/******************************************************************************/
// instance object

#define is_instance_type(type) ((type)->make_new == instance_make_new)
#define is_native_type(type) ((type)->make_new != instance_make_new)
mp_obj_t instance_make_new(mp_obj_t self_in, mp_uint_t n_args, mp_uint_t n_kw, const mp_obj_t *args);

STATIC mp_obj_t mp_obj_new_instance(mp_obj_t class, uint subobjs) {
    mp_obj_instance_t *o = m_new_obj_var(mp_obj_instance_t, mp_obj_t, subobjs);
    o->base.type = class;
    mp_map_init(&o->members, 0);
    mp_seq_clear(o->subobj, 0, subobjs, sizeof(*o->subobj));
    return o;
}

STATIC int instance_count_native_bases(const mp_obj_type_t *type, const mp_obj_type_t **last_native_base) {
    mp_uint_t len;
    mp_obj_t *items;
    mp_obj_tuple_get(type->bases_tuple, &len, &items);

    int count = 0;
    for (uint i = 0; i < len; i++) {
        assert(MP_OBJ_IS_TYPE(items[i], &mp_type_type));
        const mp_obj_type_t *bt = (const mp_obj_type_t *)items[i];
Пример #29
0
STATIC mp_obj_t fun_bc_call(mp_obj_t self_in, uint n_args, uint n_kw, const mp_obj_t *args) {
    // This function is pretty complicated.  It's main aim is to be efficient in speed and RAM
    // usage for the common case of positional only args.

    DEBUG_printf("Input n_args: %d, n_kw: %d\n", n_args, n_kw);
    DEBUG_printf("Input pos args: ");
    dump_args(args, n_args);
    DEBUG_printf("Input kw args: ");
    dump_args(args + n_args, n_kw * 2);
    mp_obj_fun_bc_t *self = self_in;
    DEBUG_printf("Func n_def_args: %d\n", self->n_def_args);

    const byte *ip = self->bytecode;

    // get code info size, and skip line number table
    machine_uint_t code_info_size = ip[0] | (ip[1] << 8) | (ip[2] << 16) | (ip[3] << 24);
    ip += code_info_size;

    // bytecode prelude: state size and exception stack size; 16 bit uints
    machine_uint_t n_state = ip[0] | (ip[1] << 8);
    machine_uint_t n_exc_stack = ip[2] | (ip[3] << 8);
    ip += 4;

#if VM_DETECT_STACK_OVERFLOW
    n_state += 1;
#endif

    // allocate state for locals and stack
    uint state_size = n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t);
    mp_code_state *code_state;
    if (state_size > VM_MAX_STATE_ON_STACK) {
        code_state = m_new_obj_var(mp_code_state, byte, state_size);
    } else {
        code_state = alloca(sizeof(mp_code_state) + state_size);
    }

    code_state->n_state = n_state;
    code_state->ip = ip;
    mp_setup_code_state(code_state, self_in, n_args, n_kw, args);

    // execute the byte code with the correct globals context
    mp_obj_dict_t *old_globals = mp_globals_get();
    mp_globals_set(self->globals);
    mp_vm_return_kind_t vm_return_kind = mp_execute_bytecode(code_state, MP_OBJ_NULL);
    mp_globals_set(old_globals);

#if VM_DETECT_STACK_OVERFLOW
    if (vm_return_kind == MP_VM_RETURN_NORMAL) {
        if (code_state->sp < code_state->state) {
            printf("VM stack underflow: " INT_FMT "\n", code_state->sp - code_state->state);
            assert(0);
        }
    }
    // We can't check the case when an exception is returned in state[n_state - 1]
    // and there are no arguments, because in this case our detection slot may have
    // been overwritten by the returned exception (which is allowed).
    if (!(vm_return_kind == MP_VM_RETURN_EXCEPTION && self->n_pos_args + self->n_kwonly_args == 0)) {
        // Just check to see that we have at least 1 null object left in the state.
        bool overflow = true;
        for (uint i = 0; i < n_state - self->n_pos_args - self->n_kwonly_args; i++) {
            if (code_state->state[i] == MP_OBJ_NULL) {
                overflow = false;
                break;
            }
        }
        if (overflow) {
            printf("VM stack overflow state=%p n_state+1=" UINT_FMT "\n", code_state->state, n_state);
            assert(0);
        }
    }
#endif

    mp_obj_t result;
    switch (vm_return_kind) {
        case MP_VM_RETURN_NORMAL:
            // return value is in *sp
            result = *code_state->sp;
            break;

        case MP_VM_RETURN_EXCEPTION:
            // return value is in state[n_state - 1]
            result = code_state->state[n_state - 1];
            break;

        case MP_VM_RETURN_YIELD: // byte-code shouldn't yield
        default:
            assert(0);
            result = mp_const_none;
            vm_return_kind = MP_VM_RETURN_NORMAL;
            break;
    }

    // free the state if it was allocated on the heap
    if (state_size > VM_MAX_STATE_ON_STACK) {
        m_del_var(mp_code_state, byte, state_size, code_state);
    }

    if (vm_return_kind == MP_VM_RETURN_NORMAL) {
        return result;
    } else { // MP_VM_RETURN_EXCEPTION
        nlr_raise(result);
    }
}
Пример #30
0
STATIC mp_obj_t fun_bc_call(mp_obj_t self_in, uint n_args, uint n_kw, const mp_obj_t *args) {
    // This function is pretty complicated.  It's main aim is to be efficient in speed and RAM
    // usage for the common case of positional only args.

    DEBUG_printf("Input n_args: %d, n_kw: %d\n", n_args, n_kw);
    DEBUG_printf("Input pos args: ");
    dump_args(args, n_args);
    DEBUG_printf("Input kw args: ");
    dump_args(args + n_args, n_kw * 2);
    mp_obj_fun_bc_t *self = self_in;
    DEBUG_printf("Func n_def_args: %d\n", self->n_def_args);

    const byte *ip = self->bytecode;

    // get code info size, and skip line number table
    machine_uint_t code_info_size = ip[0] | (ip[1] << 8) | (ip[2] << 16) | (ip[3] << 24);
    ip += code_info_size;

    // bytecode prelude: state size and exception stack size; 16 bit uints
    machine_uint_t n_state = ip[0] | (ip[1] << 8);
    machine_uint_t n_exc_stack = ip[2] | (ip[3] << 8);
    ip += 4;

#if VM_DETECT_STACK_OVERFLOW
    n_state += 1;
#endif

    // allocate state for locals and stack
    uint state_size = n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t);
    mp_code_state *code_state;
    if (state_size > VM_MAX_STATE_ON_STACK) {
        code_state = m_new_obj_var(mp_code_state, byte, state_size);
    } else {
        code_state = alloca(sizeof(mp_code_state) + state_size);
    }

    code_state->code_info = self->bytecode;
    code_state->sp = &code_state->state[0] - 1;
    code_state->exc_sp = (mp_exc_stack_t*)(code_state->state + n_state) - 1;
    code_state->n_state = n_state;

    // zero out the local stack to begin with
    memset(code_state->state, 0, n_state * sizeof(*code_state->state));

    const mp_obj_t *kwargs = args + n_args;

    // var_pos_kw_args points to the stack where the var-args tuple, and var-kw dict, should go (if they are needed)
    mp_obj_t *var_pos_kw_args = &code_state->state[n_state - 1 - self->n_pos_args - self->n_kwonly_args];

    // check positional arguments

    if (n_args > self->n_pos_args) {
        // given more than enough arguments
        if (!self->takes_var_args) {
            fun_pos_args_mismatch(self, self->n_pos_args, n_args);
        }
        // put extra arguments in varargs tuple
        *var_pos_kw_args-- = mp_obj_new_tuple(n_args - self->n_pos_args, args + self->n_pos_args);
        n_args = self->n_pos_args;
    } else {
        if (self->takes_var_args) {
            DEBUG_printf("passing empty tuple as *args\n");
            *var_pos_kw_args-- = mp_const_empty_tuple;
        }
        // Apply processing and check below only if we don't have kwargs,
        // otherwise, kw handling code below has own extensive checks.
        if (n_kw == 0 && !self->has_def_kw_args) {
            if (n_args >= self->n_pos_args - self->n_def_args) {
                // given enough arguments, but may need to use some default arguments
                for (uint i = n_args; i < self->n_pos_args; i++) {
                    code_state->state[n_state - 1 - i] = self->extra_args[i - (self->n_pos_args - self->n_def_args)];
                }
            } else {
                fun_pos_args_mismatch(self, self->n_pos_args - self->n_def_args, n_args);
            }
        }
    }

    // copy positional args into state
    for (uint i = 0; i < n_args; i++) {
        code_state->state[n_state - 1 - i] = args[i];
    }

    // check keyword arguments

    if (n_kw != 0 || self->has_def_kw_args) {
        DEBUG_printf("Initial args: ");
        dump_args(code_state->state + n_state - self->n_pos_args - self->n_kwonly_args, self->n_pos_args + self->n_kwonly_args);

        mp_obj_t dict = MP_OBJ_NULL;
        if (self->takes_kw_args) {
            dict = mp_obj_new_dict(n_kw); // TODO: better go conservative with 0?
            *var_pos_kw_args = dict;
        }

        for (uint i = 0; i < n_kw; i++) {
            qstr arg_name = MP_OBJ_QSTR_VALUE(kwargs[2 * i]);
            for (uint j = 0; j < self->n_pos_args + self->n_kwonly_args; j++) {
                if (arg_name == self->args[j]) {
                    if (code_state->state[n_state - 1 - j] != MP_OBJ_NULL) {
                        nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
                            "function got multiple values for argument '%s'", qstr_str(arg_name)));
                    }
                    code_state->state[n_state - 1 - j] = kwargs[2 * i + 1];
                    goto continue2;
                }
            }
            // Didn't find name match with positional args
            if (!self->takes_kw_args) {
                nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "function does not take keyword arguments"));
            }
            mp_obj_dict_store(dict, kwargs[2 * i], kwargs[2 * i + 1]);
continue2:;
        }

        DEBUG_printf("Args with kws flattened: ");
        dump_args(code_state->state + n_state - self->n_pos_args - self->n_kwonly_args, self->n_pos_args + self->n_kwonly_args);

        // fill in defaults for positional args
        mp_obj_t *d = &code_state->state[n_state - self->n_pos_args];
        mp_obj_t *s = &self->extra_args[self->n_def_args - 1];
        for (int i = self->n_def_args; i > 0; i--, d++, s--) {
            if (*d == MP_OBJ_NULL) {
                *d = *s;
            }
        }

        DEBUG_printf("Args after filling default positional: ");
        dump_args(code_state->state + n_state - self->n_pos_args - self->n_kwonly_args, self->n_pos_args + self->n_kwonly_args);

        // Check that all mandatory positional args are specified
        while (d < &code_state->state[n_state]) {
            if (*d++ == MP_OBJ_NULL) {
                nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
                    "function missing required positional argument #%d", &code_state->state[n_state] - d));
            }
        }

        // Check that all mandatory keyword args are specified
        // Fill in default kw args if we have them
        for (uint i = 0; i < self->n_kwonly_args; i++) {
            if (code_state->state[n_state - 1 - self->n_pos_args - i] == MP_OBJ_NULL) {
                mp_map_elem_t *elem = NULL;
                if (self->has_def_kw_args) {
                    elem = mp_map_lookup(&((mp_obj_dict_t*)self->extra_args[self->n_def_args])->map, MP_OBJ_NEW_QSTR(self->args[self->n_pos_args + i]), MP_MAP_LOOKUP);
                }
                if (elem != NULL) {
                    code_state->state[n_state - 1 - self->n_pos_args - i] = elem->value;
                } else {
                    nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
                        "function missing required keyword argument '%s'", qstr_str(self->args[self->n_pos_args + i])));
                }
            }
        }

    } else {
        // no keyword arguments given
        if (self->n_kwonly_args != 0) {
            nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError,
                "function missing keyword-only argument"));
        }
        if (self->takes_kw_args) {
            *var_pos_kw_args = mp_obj_new_dict(0);
        }
    }

    // bytecode prelude: initialise closed over variables
    for (uint n_local = *ip++; n_local > 0; n_local--) {
        uint local_num = *ip++;
        code_state->state[n_state - 1 - local_num] = mp_obj_new_cell(code_state->state[n_state - 1 - local_num]);
    }

    // now that we skipped over the prelude, set the ip for the VM
    code_state->ip = ip;

    DEBUG_printf("Calling: n_pos_args=%d, n_kwonly_args=%d\n", self->n_pos_args, self->n_kwonly_args);
    dump_args(code_state->state + n_state - self->n_pos_args - self->n_kwonly_args, self->n_pos_args + self->n_kwonly_args);
    dump_args(code_state->state, n_state);

    // execute the byte code with the correct globals context
    mp_obj_dict_t *old_globals = mp_globals_get();
    mp_globals_set(self->globals);
    mp_vm_return_kind_t vm_return_kind = mp_execute_bytecode(code_state, MP_OBJ_NULL);
    mp_globals_set(old_globals);

#if VM_DETECT_STACK_OVERFLOW
    if (vm_return_kind == MP_VM_RETURN_NORMAL) {
        if (code_state->sp < code_state->state) {
            printf("VM stack underflow: " INT_FMT "\n", code_state->sp - code_state->state);
            assert(0);
        }
    }
    // We can't check the case when an exception is returned in state[n_state - 1]
    // and there are no arguments, because in this case our detection slot may have
    // been overwritten by the returned exception (which is allowed).
    if (!(vm_return_kind == MP_VM_RETURN_EXCEPTION && self->n_pos_args + self->n_kwonly_args == 0)) {
        // Just check to see that we have at least 1 null object left in the state.
        bool overflow = true;
        for (uint i = 0; i < n_state - self->n_pos_args - self->n_kwonly_args; i++) {
            if (code_state->state[i] == MP_OBJ_NULL) {
                overflow = false;
                break;
            }
        }
        if (overflow) {
            printf("VM stack overflow state=%p n_state+1=" UINT_FMT "\n", code_state->state, n_state);
            assert(0);
        }
    }
#endif

    mp_obj_t result;
    switch (vm_return_kind) {
        case MP_VM_RETURN_NORMAL:
            // return value is in *sp
            result = *code_state->sp;
            break;

        case MP_VM_RETURN_EXCEPTION:
            // return value is in state[n_state - 1]
            result = code_state->state[n_state - 1];
            break;

        case MP_VM_RETURN_YIELD: // byte-code shouldn't yield
        default:
            assert(0);
            result = mp_const_none;
            vm_return_kind = MP_VM_RETURN_NORMAL;
            break;
    }

    // free the state if it was allocated on the heap
    if (state_size > VM_MAX_STATE_ON_STACK) {
        m_del_var(mp_code_state, byte, state_size, code_state);
    }

    if (vm_return_kind == MP_VM_RETURN_NORMAL) {
        return result;
    } else { // MP_VM_RETURN_EXCEPTION
        nlr_raise(result);
    }
}