STATIC mp_obj_t set_intersect_int(mp_obj_t self_in, mp_obj_t other, bool update) { if (update) { check_set(self_in); } else { check_set_or_frozenset(self_in); } if (self_in == other) { return update ? mp_const_none : set_copy(self_in); } mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in); mp_obj_set_t *out = MP_OBJ_TO_PTR(mp_obj_new_set(0, NULL)); mp_obj_t iter = mp_getiter(other, NULL); mp_obj_t next; while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) { if (mp_set_lookup(&self->set, next, MP_MAP_LOOKUP)) { set_add(MP_OBJ_FROM_PTR(out), next); } } if (update) { m_del(mp_obj_t, self->set.table, self->set.alloc); self->set.alloc = out->set.alloc; self->set.used = out->set.used; self->set.table = out->set.table; } return update ? mp_const_none : MP_OBJ_FROM_PTR(out); }
STATIC mp_obj_t set_intersect_int(mp_obj_t self_in, mp_obj_t other, bool update) { assert(MP_OBJ_IS_TYPE(self_in, &mp_type_set)); if (self_in == other) { return update ? mp_const_none : set_copy(self_in); } mp_obj_set_t *self = self_in; mp_obj_set_t *out = mp_obj_new_set(0, NULL); mp_obj_t iter = mp_getiter(other); mp_obj_t next; while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) { if (mp_set_lookup(&self->set, next, MP_MAP_LOOKUP)) { set_add(out, next); } } if (update) { m_del(mp_obj_t, self->set.table, self->set.alloc); self->set.alloc = out->set.alloc; self->set.used = out->set.used; self->set.table = out->set.table; } return update ? mp_const_none : out; }
STATIC mp_obj_t esp_flash_read(mp_obj_t offset_in, mp_obj_t len_or_buf_in) { mp_int_t offset = mp_obj_get_int(offset_in); mp_int_t len; byte *buf; bool alloc_buf = MP_OBJ_IS_INT(len_or_buf_in); if (alloc_buf) { len = mp_obj_get_int(len_or_buf_in); buf = m_new(byte, len); } else { mp_buffer_info_t bufinfo; mp_get_buffer_raise(len_or_buf_in, &bufinfo, MP_BUFFER_WRITE); len = bufinfo.len; buf = bufinfo.buf; } // We know that allocation will be 4-byte aligned for sure SpiFlashOpResult res = spi_flash_read(offset, (uint32_t*)buf, len); if (res == SPI_FLASH_RESULT_OK) { if (alloc_buf) { return mp_obj_new_bytes(buf, len); } return mp_const_none; } if (alloc_buf) { m_del(byte, buf, len); } mp_raise_OSError(res == SPI_FLASH_RESULT_TIMEOUT ? MP_ETIMEDOUT : MP_EIO); }
void asm_x86_free(asm_x86_t *as, bool free_code) { if (free_code) { MP_PLAT_FREE_EXEC(as->code_base, as->code_size); } m_del(mp_uint_t, as->label_offsets, as->max_num_labels); m_del_obj(asm_x86_t, as); }
void mp_parse_tree_clear(mp_parse_tree_t *tree) { mp_parse_chunk_t *chunk = tree->chunk; while (chunk != NULL) { mp_parse_chunk_t *next = chunk->union_.next; m_del(byte, chunk, sizeof(mp_parse_chunk_t) + chunk->alloc); chunk = next; } }
void mp_lexer_free(mp_lexer_t *lex) { if (lex) { lex->reader.close(lex->reader.data); vstr_clear(&lex->vstr); m_del(uint16_t, lex->indent_level, lex->alloc_indent_level); m_del_obj(mp_lexer_t, lex); } }
mp_uint_t sdcard_write_blocks(const uint8_t *src, uint32_t block_num, uint32_t num_blocks) { // check that SD card is initialised if (sd_handle.Instance == NULL) { return HAL_ERROR; } HAL_StatusTypeDef err = HAL_OK; // check that src pointer is aligned on a 4-byte boundary if (((uint32_t)src & 3) != 0) { // pointer is not aligned, so allocate a temporary block to do the write uint8_t *src_aligned = m_new_maybe(uint8_t, SDCARD_BLOCK_SIZE); if (src_aligned == NULL) { return HAL_ERROR; } for (size_t i = 0; i < num_blocks; ++i) { memcpy(src_aligned, src + i * SDCARD_BLOCK_SIZE, SDCARD_BLOCK_SIZE); err = sdcard_write_blocks(src_aligned, block_num + i, 1); if (err != HAL_OK) { break; } } m_del(uint8_t, src_aligned, SDCARD_BLOCK_SIZE); return err; } if (query_irq() == IRQ_STATE_ENABLED) { // we must disable USB irqs to prevent MSC contention with SD card uint32_t basepri = raise_irq_pri(IRQ_PRI_OTG_FS); #if SDIO_USE_GPDMA dma_init(&sd_tx_dma, &SDMMC_TX_DMA, &sd_handle); sd_handle.hdmatx = &sd_tx_dma; #endif // make sure cache is flushed to RAM so the DMA can read the correct data MP_HAL_CLEAN_DCACHE(src, num_blocks * SDCARD_BLOCK_SIZE); err = HAL_SD_WriteBlocks_DMA(&sd_handle, (uint8_t*)src, block_num, num_blocks); if (err == HAL_OK) { err = sdcard_wait_finished(&sd_handle, 60000); } #if SDIO_USE_GPDMA dma_deinit(&SDMMC_TX_DMA); sd_handle.hdmatx = NULL; #endif restore_irq_pri(basepri); } else { err = HAL_SD_WriteBlocks(&sd_handle, (uint8_t*)src, block_num, num_blocks, 60000); if (err == HAL_OK) { err = sdcard_wait_finished(&sd_handle, 60000); } } return err; }
void mp_emit_glue_deinit(void) { #ifdef WRITE_CODE if (fp_write_code != NULL) { fclose(fp_write_code); } #endif m_del(mp_code_t, unique_codes, unique_codes_alloc); }
void mp_map_clear(mp_map_t *map) { if (!map->table_is_fixed_array) { m_del(mp_map_elem_t, map->table, map->alloc); } map->alloc = 0; map->used = 0; map->all_keys_are_qstrs = 1; map->table_is_fixed_array = 0; map->table = NULL; }
STATIC mp_obj_t sd_read(mp_obj_t self, mp_obj_t block_num) { uint8_t *dest = m_new(uint8_t, SDCARD_BLOCK_SIZE); mp_uint_t ret = sdcard_read_blocks(dest, mp_obj_get_int(block_num), 1); if (ret != 0) { m_del(uint8_t, dest, SDCARD_BLOCK_SIZE); nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_Exception, "sdcard_read_blocks failed [%u]", ret)); } return mp_obj_new_bytearray_by_ref(SDCARD_BLOCK_SIZE, dest); }
// args are in reverse order in the array mp_obj_t fun_native_call_n(mp_obj_t self_in, int n_args, const mp_obj_t *args) { mp_obj_fun_native_t *self = self_in; if (self->is_kw) { return fun_native_call_n_kw(self_in, n_args, 0, args); } if (self->n_args_min == self->n_args_max) { // function requires a fixed number of arguments // check number of arguments if (n_args != self->n_args_min) { nlr_jump(mp_obj_new_exception_msg_2_args(MP_QSTR_TypeError, "function takes %d positional arguments but %d were given", (const char*)(machine_int_t)self->n_args_min, (const char*)(machine_int_t)n_args)); } // dispatch function call switch (self->n_args_min) { case 0: return ((mp_fun_0_t)self->fun)(); case 1: return ((mp_fun_1_t)self->fun)(args[0]); case 2: return ((mp_fun_2_t)self->fun)(args[1], args[0]); case 3: return ((mp_fun_3_t)self->fun)(args[2], args[1], args[0]); default: assert(0); return mp_const_none; } } else { // function takes a variable number of arguments if (n_args < self->n_args_min) { nlr_jump(mp_obj_new_exception_msg_1_arg(MP_QSTR_TypeError, "<fun name>() missing %d required positional arguments: <list of names of params>", (const char*)(machine_int_t)(self->n_args_min - n_args))); } else if (n_args > self->n_args_max) { nlr_jump(mp_obj_new_exception_msg_2_args(MP_QSTR_TypeError, "<fun name> expected at most %d arguments, got %d", (void*)(machine_int_t)self->n_args_max, (void*)(machine_int_t)n_args)); } // TODO really the args need to be passed in as a Python tuple, as the form f(*[1,2]) can be used to pass var args mp_obj_t *args_ordered = m_new(mp_obj_t, n_args); for (int i = 0; i < n_args; i++) { args_ordered[i] = args[n_args - i - 1]; } mp_obj_t res = ((mp_fun_var_t)self->fun)(n_args, args_ordered); m_del(mp_obj_t, args_ordered, n_args); return res; } }
STATIC void mp_set_rehash(mp_set_t *set) { int old_alloc = set->alloc; mp_obj_t *old_table = set->table; set->alloc = get_doubling_prime_greater_or_equal_to(set->alloc + 1); set->used = 0; set->table = m_new0(mp_obj_t, set->alloc); for (int i = 0; i < old_alloc; i++) { if (old_table[i] != NULL) { mp_set_lookup(set, old_table[i], true); } } m_del(mp_obj_t, old_table, old_alloc); }
STATIC void mp_set_rehash(mp_set_t *set) { size_t old_alloc = set->alloc; mp_obj_t *old_table = set->table; set->alloc = get_hash_alloc_greater_or_equal_to(set->alloc + 1); set->used = 0; set->table = m_new0(mp_obj_t, set->alloc); for (size_t i = 0; i < old_alloc; i++) { if (old_table[i] != MP_OBJ_NULL && old_table[i] != MP_OBJ_SENTINEL) { mp_set_lookup(set, old_table[i], MP_MAP_LOOKUP_ADD_IF_NOT_FOUND); } } m_del(mp_obj_t, old_table, old_alloc); }
STATIC void mp_map_rehash(mp_map_t *map) { int old_alloc = map->alloc; mp_map_elem_t *old_table = map->table; map->alloc = get_doubling_prime_greater_or_equal_to(map->alloc + 1); map->used = 0; map->all_keys_are_qstrs = 1; map->table = m_new0(mp_map_elem_t, map->alloc); for (int i = 0; i < old_alloc; i++) { if (old_table[i].key != NULL) { mp_map_lookup(map, old_table[i].key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = old_table[i].value; } } m_del(mp_map_elem_t, old_table, old_alloc); }
STATIC mp_obj_t map_iternext(mp_obj_t self_in) { assert(MP_OBJ_IS_TYPE(self_in, &map_type)); mp_obj_map_t *self = self_in; mp_obj_t *nextses = m_new(mp_obj_t, self->n_iters); for (int i = 0; i < self->n_iters; i++) { mp_obj_t next = rt_iternext(self->iters[i]); if (next == mp_const_stop_iteration) { m_del(mp_obj_t, nextses, self->n_iters); return mp_const_stop_iteration; } nextses[i] = next; } return rt_call_function_n_kw(self->fun, self->n_iters, 0, nextses); }
mp_uint_t sdcard_write_blocks(const uint8_t *src, uint32_t block_num, uint32_t num_blocks) { // check that SD card is initialised if (sd_handle.Instance == NULL) { return SD_ERROR; } HAL_SD_ErrorTypedef err = SD_OK; // check that src pointer is aligned on a 4-byte boundary if (((uint32_t)src & 3) != 0) { // pointer is not aligned, so allocate a temporary block to do the write uint8_t *src_aligned = m_new_maybe(uint8_t, SDCARD_BLOCK_SIZE); if (src_aligned == NULL) { return SD_ERROR; } for (size_t i = 0; i < num_blocks; ++i) { memcpy(src_aligned, src + i * SDCARD_BLOCK_SIZE, SDCARD_BLOCK_SIZE); err = sdcard_write_blocks(src_aligned, block_num + i, 1); if (err != SD_OK) { break; } } m_del(uint8_t, src_aligned, SDCARD_BLOCK_SIZE); return err; } if (query_irq() == IRQ_STATE_ENABLED) { // we must disable USB irqs to prevent MSC contention with SD card uint32_t basepri = raise_irq_pri(IRQ_PRI_OTG_FS); dma_init(&sd_tx_dma, &dma_SDIO_0_TX, &sd_handle); sd_handle.hdmatx = &sd_tx_dma; err = HAL_SD_WriteBlocks_BlockNumber_DMA(&sd_handle, (uint32_t*)src, block_num, SDCARD_BLOCK_SIZE, num_blocks); if (err == SD_OK) { // wait for DMA transfer to finish, with a large timeout err = HAL_SD_CheckWriteOperation(&sd_handle, 100000000); } dma_deinit(&dma_SDIO_0_TX); sd_handle.hdmatx = NULL; restore_irq_pri(basepri); } else { err = HAL_SD_WriteBlocks_BlockNumber(&sd_handle, (uint32_t*)src, block_num, SDCARD_BLOCK_SIZE, num_blocks); } return err; }
STATIC void mp_map_rehash(mp_map_t *map) { size_t old_alloc = map->alloc; size_t new_alloc = get_hash_alloc_greater_or_equal_to(map->alloc + 1); mp_map_elem_t *old_table = map->table; mp_map_elem_t *new_table = m_new0(mp_map_elem_t, new_alloc); // If we reach this point, table resizing succeeded, now we can edit the old map. map->alloc = new_alloc; map->used = 0; map->all_keys_are_qstrs = 1; map->table = new_table; for (size_t i = 0; i < old_alloc; i++) { if (old_table[i].key != MP_OBJ_NULL && old_table[i].key != MP_OBJ_SENTINEL) { mp_map_lookup(map, old_table[i].key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = old_table[i].value; } } m_del(mp_map_elem_t, old_table, old_alloc); }
STATIC mp_obj_t mp_obj_tuple_make_new(mp_obj_t type_in, mp_uint_t n_args, mp_uint_t n_kw, const mp_obj_t *args) { (void)type_in; mp_arg_check_num(n_args, n_kw, 0, 1, false); switch (n_args) { case 0: // return a empty tuple return mp_const_empty_tuple; case 1: default: { // 1 argument, an iterable from which we make a new tuple if (MP_OBJ_IS_TYPE(args[0], &mp_type_tuple)) { return args[0]; } // TODO optimise for cases where we know the length of the iterator mp_uint_t alloc = 4; mp_uint_t len = 0; mp_obj_t *items = m_new(mp_obj_t, alloc); mp_obj_t iterable = mp_getiter(args[0]); mp_obj_t item; while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) { if (len >= alloc) { items = m_renew(mp_obj_t, items, alloc, alloc * 2); alloc *= 2; } items[len++] = item; } mp_obj_t tuple = mp_obj_new_tuple(len, items); m_del(mp_obj_t, items, alloc); return tuple; } } }
// method socket.recv(bufsize) STATIC mp_obj_t esp_socket_recv(mp_obj_t self_in, mp_obj_t len_in) { esp_socket_obj_t *s = self_in; if (s->recvbuf == NULL) { nlr_raise(mp_obj_new_exception_msg(&mp_type_OSError, "no data available")); } mp_uint_t mxl = mp_obj_get_int(len_in); if (mxl >= s->recvbuf_len) { mp_obj_t trt = mp_obj_new_bytes(s->recvbuf, s->recvbuf_len); m_del(uint8_t, s->recvbuf, s->recvbuf_len); s->recvbuf = NULL; return trt; } else { mp_obj_t trt = mp_obj_new_bytes(s->recvbuf, mxl); memmove(s->recvbuf, &s->recvbuf[mxl], s->recvbuf_len - mxl); s->recvbuf = m_renew(uint8_t, s->recvbuf, s->recvbuf_len, s->recvbuf_len - mxl); s->recvbuf_len -= mxl; return trt; } }
mp_obj_t bound_meth_call(mp_obj_t self_in, uint n_args, uint n_kw, const mp_obj_t *args) { mp_obj_bound_meth_t *self = self_in; // need to insert self->self before all other args and then call self->meth int n_total = n_args + 2 * n_kw; if (n_total <= 4) { // use stack to allocate temporary args array mp_obj_t args2[5]; args2[0] = self->self; memcpy(args2 + 1, args, n_total * sizeof(mp_obj_t)); return mp_call_function_n_kw(self->meth, n_args + 1, n_kw, &args2[0]); } else { // use heap to allocate temporary args array mp_obj_t *args2 = m_new(mp_obj_t, 1 + n_total); args2[0] = self->self; memcpy(args2 + 1, args, n_total * sizeof(mp_obj_t)); mp_obj_t res = mp_call_function_n_kw(self->meth, n_args + 1, n_kw, &args2[0]); m_del(mp_obj_t, args2, 1 + n_total); return res; } }
mp_obj_t mp_alloc_emergency_exception_buf(mp_obj_t size_in) { mp_int_t size = mp_obj_get_int(size_in); void *buf = NULL; if (size > 0) { buf = m_new(byte, size); } int old_size = mp_emergency_exception_buf_size; void *old_buf = MP_STATE_VM(mp_emergency_exception_buf); // Update the 2 variables atomically so that an interrupt can't occur // between the assignments. mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION(); mp_emergency_exception_buf_size = size; MP_STATE_VM(mp_emergency_exception_buf) = buf; MICROPY_END_ATOMIC_SECTION(atomic_state); if (old_buf != NULL) { m_del(byte, old_buf, old_size); } return mp_const_none; }
STATIC mp_obj_t closure_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { mp_obj_closure_t *self = MP_OBJ_TO_PTR(self_in); // need to concatenate closed-over-vars and args mp_uint_t n_total = self->n_closed + n_args + 2 * n_kw; if (n_total <= 5) { // use stack to allocate temporary args array mp_obj_t args2[5]; memcpy(args2, self->closed, self->n_closed * sizeof(mp_obj_t)); memcpy(args2 + self->n_closed, args, (n_args + 2 * n_kw) * sizeof(mp_obj_t)); return mp_call_function_n_kw(self->fun, self->n_closed + n_args, n_kw, args2); } else { // use heap to allocate temporary args array mp_obj_t *args2 = m_new(mp_obj_t, n_total); memcpy(args2, self->closed, self->n_closed * sizeof(mp_obj_t)); memcpy(args2 + self->n_closed, args, (n_args + 2 * n_kw) * sizeof(mp_obj_t)); mp_obj_t res = mp_call_function_n_kw(self->fun, self->n_closed + n_args, n_kw, args2); m_del(mp_obj_t, args2, n_total); return res; } }
void emit_inline_thumb_free(emit_inline_asm_t *emit) { m_del(qstr, emit->label_lookup, emit->max_num_labels); asm_thumb_free(emit->as, false); m_del_obj(emit_inline_asm_t, emit); }
void emit_bc_free(emit_t *emit) { m_del(mp_uint_t, emit->label_offsets, emit->max_num_labels); m_del_obj(emit_t, emit); }
STATIC mp_obj_t pyb_uart_init_helper(pyb_uart_obj_t *self, mp_uint_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { bool success; // parse args mp_arg_val_t args[MP_ARRAY_SIZE(pyb_uart_init_args)]; mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(pyb_uart_init_args), pyb_uart_init_args, args); // set the UART configuration values if (n_args > 1) { self->baudrate = args[0].u_int; switch (args[1].u_int) { case 5: self->config = UART_CONFIG_WLEN_5; break; case 6: self->config = UART_CONFIG_WLEN_6; break; case 7: self->config = UART_CONFIG_WLEN_7; break; case 8: self->config = UART_CONFIG_WLEN_8; break; default: nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError, mpexception_value_invalid_arguments)); break; } // Parity if (args[2].u_obj == mp_const_none) { self->config |= UART_CONFIG_PAR_NONE; } else { self->config |= ((mp_obj_get_int(args[2].u_obj) & 1) ? UART_CONFIG_PAR_ODD : UART_CONFIG_PAR_EVEN); } // Stop bits self->config |= (args[3].u_int == 1 ? UART_CONFIG_STOP_ONE : UART_CONFIG_STOP_TWO); // Flow control self->flowcontrol = args[4].u_int; success = uart_init2(self); } else { success = uart_init(self, args[0].u_int); } // init UART (if it fails, something weird happened) if (!success) { nlr_raise(mp_obj_new_exception_msg(&mp_type_OSError, mpexception_os_operation_failed)); } // set timeouts self->timeout = args[5].u_int; self->timeout_char = args[6].u_int; // setup the read buffer m_del(byte, self->read_buf, self->read_buf_len); self->read_buf_head = 0; self->read_buf_tail = 0; if (args[7].u_int <= 0) { // no read buffer self->read_buf_len = 0; self->read_buf = NULL; MAP_UARTIntDisable(self->reg, UART_INT_RX | UART_INT_RT); } else { // read buffer using interrupts self->read_buf_len = args[7].u_int; self->read_buf = m_new(byte, args[7].u_int); } return mp_const_none; }
mp_parse_node_t mp_parse(mp_lexer_t *lex, mp_parse_input_kind_t input_kind) { // allocate memory for the parser and its stacks parser_t *parser = m_new_obj(parser_t); parser->rule_stack_alloc = 64; parser->rule_stack_top = 0; parser->rule_stack = m_new(rule_stack_t, parser->rule_stack_alloc); parser->result_stack_alloc = 64; parser->result_stack_top = 0; parser->result_stack = m_new(mp_parse_node_t, parser->result_stack_alloc); // work out the top-level rule to use, and push it on the stack int top_level_rule; switch (input_kind) { case MP_PARSE_SINGLE_INPUT: top_level_rule = RULE_single_input; break; //case MP_PARSE_EVAL_INPUT: top_level_rule = RULE_eval_input; break; default: top_level_rule = RULE_file_input; } push_rule(parser, rules[top_level_rule], 0); // parse! uint n, i; bool backtrack = false; const rule_t *rule; mp_token_kind_t tok_kind; bool emit_rule; bool had_trailing_sep; for (;;) { next_rule: if (parser->rule_stack_top == 0) { break; } pop_rule(parser, &rule, &i); n = rule->act & RULE_ACT_ARG_MASK; /* // debugging printf("depth=%d ", parser->rule_stack_top); for (int j = 0; j < parser->rule_stack_top; ++j) { printf(" "); } printf("%s n=%d i=%d bt=%d\n", rule->rule_name, n, i, backtrack); */ switch (rule->act & RULE_ACT_KIND_MASK) { case RULE_ACT_OR: if (i > 0 && !backtrack) { goto next_rule; } else { backtrack = false; } for (; i < n - 1; ++i) { switch (rule->arg[i] & RULE_ARG_KIND_MASK) { case RULE_ARG_TOK: if (mp_lexer_is_kind(lex, rule->arg[i] & RULE_ARG_ARG_MASK)) { push_result_token(parser, lex); mp_lexer_to_next(lex); goto next_rule; } break; case RULE_ARG_RULE: push_rule(parser, rule, i + 1); push_rule_from_arg(parser, rule->arg[i]); goto next_rule; default: assert(0); } } if ((rule->arg[i] & RULE_ARG_KIND_MASK) == RULE_ARG_TOK) { if (mp_lexer_is_kind(lex, rule->arg[i] & RULE_ARG_ARG_MASK)) { push_result_token(parser, lex); mp_lexer_to_next(lex); } else { backtrack = true; goto next_rule; } } else { push_rule_from_arg(parser, rule->arg[i]); } break; case RULE_ACT_AND: // failed, backtrack if we can, else syntax error if (backtrack) { assert(i > 0); if ((rule->arg[i - 1] & RULE_ARG_KIND_MASK) == RULE_ARG_OPT_RULE) { // an optional rule that failed, so continue with next arg push_result_node(parser, MP_PARSE_NODE_NULL); backtrack = false; } else { // a mandatory rule that failed, so propagate backtrack if (i > 1) { // already eaten tokens so can't backtrack goto syntax_error; } else { goto next_rule; } } } // progress through the rule for (; i < n; ++i) { switch (rule->arg[i] & RULE_ARG_KIND_MASK) { case RULE_ARG_TOK: // need to match a token tok_kind = rule->arg[i] & RULE_ARG_ARG_MASK; if (mp_lexer_is_kind(lex, tok_kind)) { // matched token if (tok_kind == MP_TOKEN_NAME) { push_result_token(parser, lex); } mp_lexer_to_next(lex); } else { // failed to match token if (i > 0) { // already eaten tokens so can't backtrack goto syntax_error; } else { // this rule failed, so backtrack backtrack = true; goto next_rule; } } break; case RULE_ARG_RULE: //if (i + 1 < n) { push_rule(parser, rule, i + 1); //} push_rule_from_arg(parser, rule->arg[i]); goto next_rule; case RULE_ARG_OPT_RULE: push_rule(parser, rule, i + 1); push_rule_from_arg(parser, rule->arg[i]); goto next_rule; default: assert(0); } } assert(i == n); // matched the rule, so now build the corresponding parse_node // count number of arguments for the parse_node i = 0; emit_rule = false; for (int x = 0; x < n; ++x) { if ((rule->arg[x] & RULE_ARG_KIND_MASK) == RULE_ARG_TOK) { tok_kind = rule->arg[x] & RULE_ARG_ARG_MASK; if (tok_kind >= MP_TOKEN_NAME) { emit_rule = true; } if (tok_kind == MP_TOKEN_NAME) { // only tokens which were names are pushed to stack i += 1; } } else { // rules are always pushed i += 1; } } // always emit these rules, even if they have only 1 argument if (rule->rule_id == RULE_expr_stmt || rule->rule_id == RULE_yield_stmt) { emit_rule = true; } // never emit these rules if they have only 1 argument // NOTE: can't put atom_paren here because we need it to distinguisg, for example, [a,b] from [(a,b)] // TODO possibly put varargslist_name, varargslist_equal here as well if (rule->rule_id == RULE_else_stmt || rule->rule_id == RULE_testlist_comp_3b || rule->rule_id == RULE_import_as_names_paren || rule->rule_id == RULE_typedargslist_name || rule->rule_id == RULE_typedargslist_colon || rule->rule_id == RULE_typedargslist_equal || rule->rule_id == RULE_dictorsetmaker_colon || rule->rule_id == RULE_classdef_2 || rule->rule_id == RULE_with_item_as || rule->rule_id == RULE_assert_stmt_extra || rule->rule_id == RULE_as_name || rule->rule_id == RULE_raise_stmt_from || rule->rule_id == RULE_vfpdef) { emit_rule = false; } // always emit these rules, and add an extra blank node at the end (to be used by the compiler to store data) if (rule->rule_id == RULE_funcdef || rule->rule_id == RULE_classdef || rule->rule_id == RULE_comp_for || rule->rule_id == RULE_lambdef || rule->rule_id == RULE_lambdef_nocond) { emit_rule = true; push_result_node(parser, MP_PARSE_NODE_NULL); i += 1; } int num_not_nil = 0; for (int x = 0; x < i; ++x) { if (peek_result(parser, x) != MP_PARSE_NODE_NULL) { num_not_nil += 1; } } //printf("done and %s n=%d i=%d notnil=%d\n", rule->rule_name, n, i, num_not_nil); if (emit_rule) { push_result_rule(parser, rule, i); } else if (num_not_nil == 0) { push_result_rule(parser, rule, i); // needed for, eg, atom_paren, testlist_comp_3b //result_stack_show(parser); //assert(0); } else if (num_not_nil == 1) { // single result, leave it on stack mp_parse_node_t pn = MP_PARSE_NODE_NULL; for (int x = 0; x < i; ++x) { mp_parse_node_t pn2 = pop_result(parser); if (pn2 != MP_PARSE_NODE_NULL) { pn = pn2; } } push_result_node(parser, pn); } else { push_result_rule(parser, rule, i); } break; case RULE_ACT_LIST: // n=2 is: item item* // n=1 is: item (sep item)* // n=3 is: item (sep item)* [sep] if (backtrack) { list_backtrack: had_trailing_sep = false; if (n == 2) { if (i == 1) { // fail on item, first time round; propagate backtrack goto next_rule; } else { // fail on item, in later rounds; finish with this rule backtrack = false; } } else { if (i == 1) { // fail on item, first time round; propagate backtrack goto next_rule; } else if ((i & 1) == 1) { // fail on item, in later rounds; have eaten tokens so can't backtrack if (n == 3) { // list allows trailing separator; finish parsing list had_trailing_sep = true; backtrack = false; } else { // list doesn't allowing trailing separator; fail goto syntax_error; } } else { // fail on separator; finish parsing list backtrack = false; } } } else { for (;;) { uint arg = rule->arg[i & 1 & n]; switch (arg & RULE_ARG_KIND_MASK) { case RULE_ARG_TOK: if (mp_lexer_is_kind(lex, arg & RULE_ARG_ARG_MASK)) { if (i & 1 & n) { // separators which are tokens are not pushed to result stack } else { push_result_token(parser, lex); } mp_lexer_to_next(lex); // got element of list, so continue parsing list i += 1; } else { // couldn't get element of list i += 1; backtrack = true; goto list_backtrack; } break; case RULE_ARG_RULE: push_rule(parser, rule, i + 1); push_rule_from_arg(parser, arg); goto next_rule; default: assert(0); } } } assert(i >= 1); // compute number of elements in list, result in i i -= 1; if ((n & 1) && (rule->arg[1] & RULE_ARG_KIND_MASK) == RULE_ARG_TOK) { // don't count separators when they are tokens i = (i + 1) / 2; } if (i == 1) { // list matched single item if (had_trailing_sep) { // if there was a trailing separator, make a list of a single item push_result_rule(parser, rule, i); } else { // just leave single item on stack (ie don't wrap in a list) } } else { //printf("done list %s %d %d\n", rule->rule_name, n, i); push_result_rule(parser, rule, i); } break; default: assert(0); } } // check we are at the end of the token stream if (!mp_lexer_is_kind(lex, MP_TOKEN_END)) { goto syntax_error; } //printf("--------------\n"); //result_stack_show(parser); //printf("rule stack alloc: %d\n", parser->rule_stack_alloc); //printf("result stack alloc: %d\n", parser->result_stack_alloc); //printf("number of parse nodes allocated: %d\n", num_parse_nodes_allocated); // get the root parse node that we created assert(parser->result_stack_top == 1); mp_parse_node_t result = parser->result_stack[0]; finished: // free the memory that we don't need anymore m_del(rule_stack_t, parser->rule_stack, parser->rule_stack_alloc); m_del(mp_parse_node_t, parser->result_stack, parser->result_stack_alloc); m_del_obj(parser_t, parser); // return the result return result; syntax_error: // TODO these should raise a proper exception if (mp_lexer_is_kind(lex, MP_TOKEN_INDENT)) { mp_lexer_show_error_pythonic(lex, "IndentationError: unexpected indent"); } else if (mp_lexer_is_kind(lex, MP_TOKEN_DEDENT_MISMATCH)) { mp_lexer_show_error_pythonic(lex, "IndentationError: unindent does not match any outer indentation level"); } else { mp_lexer_show_error_pythonic(lex, "syntax error:"); #ifdef USE_RULE_NAME mp_lexer_show_error(lex, rule->rule_name); #endif mp_token_show(mp_lexer_cur(lex)); } result = MP_PARSE_NODE_NULL; goto finished; }
void emit_inline_thumb_free(emit_inline_asm_t *emit) { m_del(qstr, emit->label_lookup, emit->max_num_labels); mp_asm_base_deinit(&emit->as.base, false); m_del_obj(emit_inline_asm_t, emit); }
void scope_free(scope_t *scope) { m_del(id_info_t, scope->id_info, scope->id_info_alloc); m_del(scope_t, scope, 1); }
void mp_set_clear(mp_set_t *set) { m_del(mp_obj_t, set->table, set->alloc); set->alloc = 0; set->used = 0; set->table = NULL; }
// Differentiate from mp_map_clear() - semantics is different void mp_map_deinit(mp_map_t *map) { if (!map->table_is_fixed_array) { m_del(mp_map_elem_t, map->table, map->alloc); } map->used = map->alloc = 0; }