char * PyOS_StdioReadline(FILE *sys_stdin, FILE *sys_stdout, char *prompt) { size_t n; char *p; n = 100; if ((p = (char *)PyMem_MALLOC(n)) == NULL) return NULL; fflush(sys_stdout); #ifndef RISCOS if (prompt) fprintf(stderr, "%s", prompt); #else if (prompt) { if(Py_RISCOSWimpFlag) fprintf(stderr, "\x0cr%s\x0c", prompt); else fprintf(stderr, "%s", prompt); } #endif fflush(stderr); switch (my_fgets(p, (int)n, sys_stdin)) { case 0: /* Normal case */ break; case 1: /* Interrupt */ PyMem_FREE(p); return NULL; case -1: /* EOF */ case -2: /* Error */ default: /* Shouldn't happen */ *p = '\0'; break; } n = strlen(p); while (n > 0 && p[n-1] != '\n') { size_t incr = n+2; p = (char *)PyMem_REALLOC(p, n + incr); if (p == NULL) return NULL; if (incr > INT_MAX) { PyErr_SetString(PyExc_OverflowError, "input line too long"); } if (my_fgets(p+n, (int)incr, sys_stdin) != 0) break; n += strlen(p+n); } return (char *)PyMem_REALLOC(p, n+1); }
static PyObject *bpy_bm_utils_vert_separate(PyObject *UNUSED(self), PyObject *args) { BPy_BMVert *py_vert; PyObject *edge_seq; BMesh *bm; BMVert **elem; int elem_len; /* edges to split */ BMEdge **edge_array; Py_ssize_t edge_array_len; PyObject *ret; if (!PyArg_ParseTuple(args, "O!O:vert_separate", &BPy_BMVert_Type, &py_vert, &edge_seq)) { return NULL; } BPY_BM_CHECK_OBJ(py_vert); bm = py_vert->bm; edge_array = BPy_BMElem_PySeq_As_Array(&bm, edge_seq, 0, PY_SSIZE_T_MAX, &edge_array_len, BM_EDGE, TRUE, TRUE, "vert_separate(...)"); if (edge_array == NULL) { return NULL; } if (BM_vert_separate(bm, py_vert->v, &elem, &elem_len, edge_array, edge_array_len)) { /* return collected verts */ ret = BPy_BMElem_Array_As_Tuple(bm, (BMHeader **)elem, elem_len); MEM_freeN(elem); } else { ret = PyTuple_New(0); } PyMem_FREE(edge_array); return ret; }
static int get_coding_spec(const char *s, char **spec, Py_ssize_t size, struct tok_state *tok) { Py_ssize_t i; *spec = NULL; /* Coding spec must be in a comment, and that comment must be * the only statement on the source code line. */ for (i = 0; i < size - 6; i++) { if (s[i] == '#') break; if (s[i] != ' ' && s[i] != '\t' && s[i] != '\014') return 1; } for (; i < size - 6; i++) { /* XXX inefficient search */ const char* t = s + i; if (strncmp(t, "coding", 6) == 0) { const char* begin = NULL; t += 6; if (t[0] != ':' && t[0] != '=') continue; do { t++; } while (t[0] == '\x20' || t[0] == '\t'); begin = t; while (Py_ISALNUM(t[0]) || t[0] == '-' || t[0] == '_' || t[0] == '.') t++; if (begin < t) { char* r = new_string(begin, t - begin, tok); char* q; if (!r) return 0; q = get_normal_name(r); if (r != q) { PyMem_FREE(r); r = new_string(q, strlen(q), tok); if (!r) return 0; } *spec = r; } } } return 1; }
static PyObject *bpyunits_to_value(PyObject *UNUSED(self), PyObject *args, PyObject *kw) { static const char *kwlist[] = {"unit_system", "unit_category", "str_input", "str_ref_unit", NULL}; char *usys_str = NULL, *ucat_str = NULL, *inpt = NULL, *uref = NULL; const float scale = 1.0f; char *str; Py_ssize_t str_len; double result; int usys, ucat; PyObject *ret; if (!PyArg_ParseTupleAndKeywords(args, kw, "sss#|z:bpy.utils.units.to_value", (char **)kwlist, &usys_str, &ucat_str, &inpt, &str_len, &uref)) { return NULL; } if (!bpyunits_validate(usys_str, ucat_str, &usys, &ucat)) { return NULL; } str_len = str_len * 2 + 64; str = PyMem_MALLOC(sizeof(*str) * (size_t)str_len); BLI_strncpy(str, inpt, (size_t)str_len); bUnit_ReplaceString(str, (int)str_len, uref, scale, usys, ucat); if (!PyC_RunString_AsNumber(str, &result, "<bpy_units_api>")) { if (PyErr_Occurred()) { PyErr_Print(); PyErr_Clear(); } PyErr_Format(PyExc_ValueError, "'%.200s' (converted as '%s') could not be evaluated.", inpt, str); ret = NULL; } else { ret = PyFloat_FromDouble(result); } PyMem_FREE(str); return ret; }
char * PyOS_StdioReadline(char *prompt) { size_t n; char *p; n = 100; if ((p = PyMem_MALLOC(n)) == NULL) return NULL; fflush(stdout); if (prompt) fprintf(stderr, "%s", prompt); fflush(stderr); switch (my_fgets(p, (int)n, stdin)) { case 0: /* Normal case */ break; case 1: /* Interrupt */ PyMem_FREE(p); return NULL; case -1: /* EOF */ case -2: /* Error */ default: /* Shouldn't happen */ *p = '\0'; break; } #ifdef MPW /* Hack for MPW C where the prompt comes right back in the input */ /* XXX (Actually this would be rather nice on most systems...) */ n = strlen(prompt); if (strncmp(p, prompt, n) == 0) memmove(p, p + n, strlen(p) - n + 1); #endif n = strlen(p); while (n > 0 && p[n-1] != '\n') { size_t incr = n+2; p = PyMem_REALLOC(p, n + incr); if (p == NULL) return NULL; if (incr > INT_MAX) { PyErr_SetString(PyExc_OverflowError, "input line too long"); } if (my_fgets(p+n, (int)incr, stdin) != 0) break; n += strlen(p+n); } return PyMem_REALLOC(p, n+1); }
static void module_dealloc(PyModuleObject *m) { PyObject_GC_UnTrack(m); if (Py_VerboseFlag && m->md_name) { PySys_FormatStderr("# destroy %S\n", m->md_name); } if (m->md_weaklist != NULL) PyObject_ClearWeakRefs((PyObject *) m); if (m->md_def && m->md_def->m_free) m->md_def->m_free(m); Py_XDECREF(m->md_dict); Py_XDECREF(m->md_name); if (m->md_state != NULL) PyMem_FREE(m->md_state); Py_TYPE(m)->tp_free((PyObject *)m); }
static PyObject * complex_subtype_from_string(PyTypeObject *type, PyObject *v) { const char *s; PyObject *result = NULL; #ifdef Py_USING_UNICODE char *s_buffer = NULL; #endif Py_ssize_t len; if (PyString_Check(v)) { s = PyString_AS_STRING(v); len = PyString_GET_SIZE(v); } #ifdef Py_USING_UNICODE else if (PyUnicode_Check(v)) { s_buffer = (char *)PyMem_MALLOC(PyUnicode_GET_SIZE(v)+1); if (s_buffer == NULL) return PyErr_NoMemory(); if (PyUnicode_EncodeDecimal(PyUnicode_AS_UNICODE(v), PyUnicode_GET_SIZE(v), s_buffer, NULL)) goto exit; s = s_buffer; len = strlen(s); } #endif else { PyErr_SetString(PyExc_TypeError, "complex() arg is not a string"); return NULL; } result = _Py_string_to_number_with_underscores(s, len, "complex", v, type, complex_from_string_inner); exit: #ifdef Py_USING_UNICODE if (s_buffer) PyMem_FREE(s_buffer); #endif return result; }
static void code_dealloc(PyCodeObject *co) { Py_XDECREF(co->co_code); Py_XDECREF(co->co_consts); Py_XDECREF(co->co_names); Py_XDECREF(co->co_varnames); Py_XDECREF(co->co_freevars); Py_XDECREF(co->co_cellvars); Py_XDECREF(co->co_filename); Py_XDECREF(co->co_name); Py_XDECREF(co->co_lnotab); if (co->co_cell2arg != NULL) PyMem_FREE(co->co_cell2arg); if (co->co_zombieframe != NULL) PyObject_GC_Del(co->co_zombieframe); if (co->co_weakreflist != NULL) PyObject_ClearWeakRefs((PyObject*)co); PyObject_DEL(co); }
static char * get_coding_spec(const char *s, Py_ssize_t size) { Py_ssize_t i; /* Coding spec must be in a comment, and that comment must be * the only statement on the source code line. */ for (i = 0; i < size - 6; i++) { if (s[i] == '#') break; if (s[i] != ' ' && s[i] != '\t' && s[i] != '\014') return NULL; } for (; i < size - 6; i++) { /* XXX inefficient search */ const char* t = s + i; if (strncmp(t, "coding", 6) == 0) { const char* begin = NULL; t += 6; if (t[0] != ':' && t[0] != '=') continue; do { t++; } while (t[0] == '\x20' || t[0] == '\t'); begin = t; while (isalnum(Py_CHARMASK(t[0])) || t[0] == '-' || t[0] == '_' || t[0] == '.') t++; if (begin < t) { char* r = new_string(begin, t - begin); char* q = get_normal_name(r); if (r != q) { PyMem_FREE(r); r = new_string(q, strlen(q)); } return r; } } } return NULL; }
static PyObject * strop_replace(PyObject *self, PyObject *args) { char *str, *pat,*sub,*new_s; Py_ssize_t len,pat_len,sub_len,out_len; Py_ssize_t count = -1; PyObject *newstr; WARN; if (!PyArg_ParseTuple(args, "t#t#t#|n:replace", &str, &len, &pat, &pat_len, &sub, &sub_len, &count)) return NULL; if (pat_len <= 0) { PyErr_SetString(PyExc_ValueError, "empty pattern string"); return NULL; } /* CAUTION: strop treats a replace count of 0 as infinity, unlke * current (2.1) string.py and string methods. Preserve this for * ... well, hard to say for what <wink>. */ if (count == 0) count = -1; new_s = mymemreplace(str,len,pat,pat_len,sub,sub_len,count,&out_len); if (new_s == NULL) { PyErr_NoMemory(); return NULL; } if (out_len == -1) { /* we're returning another reference to the input string */ newstr = PyTuple_GetItem(args, 0); Py_XINCREF(newstr); } else { newstr = PyString_FromStringAndSize(new_s, out_len); PyMem_FREE(new_s); } return newstr; }
/* Synchronize and close an external memory list. */ static PyObject *em_list_close(em_list_t *self) { mapped_file_t *index = self->index; mapped_file_t *values = self->values; if(self->is_open) { /* Sync and close "index.bin". */ mapped_file_sync(index, 0, index->size); mapped_file_close(index); /* Sync and close "values.bin". */ mapped_file_sync(values, 0, values->size); mapped_file_truncate(values, mapped_file_get_eof(values)); mapped_file_close(values); PyMem_FREE(self->dirname); self->is_open = 0; } Py_RETURN_NONE; };
static PyObject *bpy_bm_utils_face_join(PyObject *UNUSED(self), PyObject *args) { BMesh *bm = NULL; PyObject *py_face_array; BMFace **face_array; Py_ssize_t face_seq_len = 0; BMFace *f_new; bool do_remove = true; if (!PyArg_ParseTuple( args, "O|i:face_join", &py_face_array, PyC_ParseBool, &do_remove)) { return NULL; } face_array = BPy_BMElem_PySeq_As_Array(&bm, py_face_array, 2, PY_SSIZE_T_MAX, &face_seq_len, BM_FACE, true, true, "face_join(...)"); if (face_array == NULL) { return NULL; /* error will be set */ } /* Go ahead and join the face! * --------------------------- */ f_new = BM_faces_join(bm, face_array, (int)face_seq_len, do_remove); PyMem_FREE(face_array); if (f_new) { return BPy_BMFace_CreatePyObject(bm, f_new); } else { Py_RETURN_NONE; } }
static int check_bom(int get_char(struct tok_state *), void unget_char(int, struct tok_state *), int set_readline(struct tok_state *, const char *), struct tok_state *tok) { int ch = get_char(tok); tok->decoding_state = 1; if (ch == EOF) { return 1; } else if (ch == 0xEF) { ch = get_char(tok); if (ch != 0xBB) goto NON_BOM; ch = get_char(tok); if (ch != 0xBF) goto NON_BOM; #if 0 /* Disable support for UTF-16 BOMs until a decision is made whether this needs to be supported. */ } else if (ch == 0xFE) { ch = get_char(tok); if (ch != 0xFF) goto NON_BOM; if (!set_readline(tok, "utf-16-be")) return 0; tok->decoding_state = -1; } else if (ch == 0xFF) { ch = get_char(tok); if (ch != 0xFE) goto NON_BOM; if (!set_readline(tok, "utf-16-le")) return 0; tok->decoding_state = -1; #endif } else { unget_char(ch, tok); return 1; } if (tok->encoding != NULL) PyMem_FREE(tok->encoding); tok->encoding = new_string("utf-8", 5); /* resulting is in utf-8 */ return 1; NON_BOM: /* any token beginning with '\xEF', '\xFE', '\xFF' is a bad token */ unget_char(0xFF, tok); /* XXX this will cause a syntax error */ return 1; }
parser_state * PyParser_New(grammar *g, int start) { parser_state *ps; if (!g->g_accel) PyGrammar_AddAccelerators(g); ps = (parser_state *)PyMem_MALLOC(sizeof(parser_state)); if (ps == NULL) return NULL; ps->p_grammar = g; #ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD ps->p_flags = 0; #endif ps->p_tree = PyNode_New(start); if (ps->p_tree == NULL) { PyMem_FREE(ps); return NULL; } s_reset(&ps->p_stack); (void) s_push(&ps->p_stack, PyGrammar_FindDFA(g, start), ps->p_tree); return ps; }
static PyObject* PyLocale_strcoll(PyObject* self, PyObject* args) { #if !defined(HAVE_WCSCOLL) || !defined(Py_USING_UNICODE) char *s1,*s2; if (!PyArg_ParseTuple(args, "ss:strcoll", &s1, &s2)) return NULL; return PyInt_FromLong(strcoll(s1, s2)); #else PyObject *os1, *os2, *result = NULL; wchar_t *ws1 = NULL, *ws2 = NULL; int rel1 = 0, rel2 = 0, len1, len2; if (!PyArg_ParseTuple(args, "OO:strcoll", &os1, &os2)) return NULL; /* If both arguments are byte strings, use strcoll. */ if (PyString_Check(os1) && PyString_Check(os2)) return PyInt_FromLong(strcoll(PyString_AS_STRING(os1), PyString_AS_STRING(os2))); /* If neither argument is unicode, it's an error. */ if (!PyUnicode_Check(os1) && !PyUnicode_Check(os2)) { PyErr_SetString(PyExc_ValueError, "strcoll arguments must be strings"); } /* Convert the non-unicode argument to unicode. */ if (!PyUnicode_Check(os1)) { os1 = PyUnicode_FromObject(os1); if (!os1) return NULL; rel1 = 1; } if (!PyUnicode_Check(os2)) { os2 = PyUnicode_FromObject(os2); if (!os2) { Py_DECREF(os1); return NULL; } rel2 = 1; } /* Convert the unicode strings to wchar[]. */ len1 = PyUnicode_GET_SIZE(os1) + 1; ws1 = PyMem_MALLOC(len1 * sizeof(wchar_t)); if (!ws1) { PyErr_NoMemory(); goto done; } if (PyUnicode_AsWideChar((PyUnicodeObject*)os1, ws1, len1) == -1) goto done; ws1[len1 - 1] = 0; len2 = PyUnicode_GET_SIZE(os2) + 1; ws2 = PyMem_MALLOC(len2 * sizeof(wchar_t)); if (!ws2) { PyErr_NoMemory(); goto done; } if (PyUnicode_AsWideChar((PyUnicodeObject*)os2, ws2, len2) == -1) goto done; ws2[len2 - 1] = 0; /* Collate the strings. */ result = PyInt_FromLong(wcscoll(ws1, ws2)); done: /* Deallocate everything. */ if (ws1) PyMem_FREE(ws1); if (ws2) PyMem_FREE(ws2); if (rel1) { Py_DECREF(os1); } if (rel2) { Py_DECREF(os2); } return result; #endif }
/* TODO, multi-dimensional arrays */ int pyrna_array_contains_py(PointerRNA *ptr, PropertyRNA *prop, PyObject *value) { int len = RNA_property_array_length(ptr, prop); int type; int i; if (len == 0) /* possible with dynamic arrays */ return 0; if (RNA_property_array_dimension(ptr, prop, NULL) > 1) { PyErr_SetString(PyExc_TypeError, "PropertyRNA - multi dimensional arrays not supported yet"); return -1; } type = RNA_property_type(prop); switch (type) { case PROP_FLOAT: { float value_f = PyFloat_AsDouble(value); if (value_f == -1 && PyErr_Occurred()) { PyErr_Clear(); return 0; } else { float tmp[32]; float *tmp_arr; if (len * sizeof(float) > sizeof(tmp)) { tmp_arr = PyMem_MALLOC(len * sizeof(float)); } else { tmp_arr = tmp; } RNA_property_float_get_array(ptr, prop, tmp_arr); for (i = 0; i < len; i++) { if (tmp_arr[i] == value_f) { break; } } if (tmp_arr != tmp) PyMem_FREE(tmp_arr); return i < len ? 1 : 0; } break; } case PROP_BOOLEAN: case PROP_INT: { int value_i = PyLong_AsSsize_t(value); if (value_i == -1 && PyErr_Occurred()) { PyErr_Clear(); return 0; } else { int tmp[32]; int *tmp_arr; if (len * sizeof(int) > sizeof(tmp)) { tmp_arr = PyMem_MALLOC(len * sizeof(int)); } else { tmp_arr = tmp; } if (type == PROP_BOOLEAN) RNA_property_boolean_get_array(ptr, prop, tmp_arr); else RNA_property_int_get_array(ptr, prop, tmp_arr); for (i = 0; i < len; i++) { if (tmp_arr[i] == value_i) { break; } } if (tmp_arr != tmp) PyMem_FREE(tmp_arr); return i < len ? 1 : 0; } break; } } /* should never reach this */ PyErr_SetString(PyExc_TypeError, "PropertyRNA - type not in float/bool/int"); return -1; }
static void cleanup_ptr(void *ptr) { PyMem_FREE(ptr); }
PyCodeObject * PyCode_New(int argcount, int kwonlyargcount, int nlocals, int stacksize, int flags, PyObject *code, PyObject *consts, PyObject *names, PyObject *varnames, PyObject *freevars, PyObject *cellvars, PyObject *filename, PyObject *name, int firstlineno, PyObject *lnotab) { PyCodeObject *co; Py_ssize_t *cell2arg = NULL; Py_ssize_t i, n_cellvars, n_varnames, total_args; /* Check argument types */ if (argcount < 0 || kwonlyargcount < 0 || nlocals < 0 || code == NULL || !PyBytes_Check(code) || consts == NULL || !PyTuple_Check(consts) || names == NULL || !PyTuple_Check(names) || varnames == NULL || !PyTuple_Check(varnames) || freevars == NULL || !PyTuple_Check(freevars) || cellvars == NULL || !PyTuple_Check(cellvars) || name == NULL || !PyUnicode_Check(name) || filename == NULL || !PyUnicode_Check(filename) || lnotab == NULL || !PyBytes_Check(lnotab)) { PyErr_BadInternalCall(); return NULL; } /* Ensure that the filename is a ready Unicode string */ if (PyUnicode_READY(filename) < 0) return NULL; intern_strings(names); intern_strings(varnames); intern_strings(freevars); intern_strings(cellvars); intern_string_constants(consts); /* Check for any inner or outer closure references */ n_cellvars = PyTuple_GET_SIZE(cellvars); if (!n_cellvars && !PyTuple_GET_SIZE(freevars)) { flags |= CO_NOFREE; } else { flags &= ~CO_NOFREE; } n_varnames = PyTuple_GET_SIZE(varnames); if (argcount <= n_varnames && kwonlyargcount <= n_varnames) { /* Never overflows. */ total_args = (Py_ssize_t)argcount + (Py_ssize_t)kwonlyargcount + ((flags & CO_VARARGS) != 0) + ((flags & CO_VARKEYWORDS) != 0); } else { total_args = n_varnames + 1; } if (total_args > n_varnames) { PyErr_SetString(PyExc_ValueError, "code: varnames is too small"); return NULL; } /* Create mapping between cells and arguments if needed. */ if (n_cellvars) { bool used_cell2arg = false; cell2arg = PyMem_NEW(Py_ssize_t, n_cellvars); if (cell2arg == NULL) { PyErr_NoMemory(); return NULL; } /* Find cells which are also arguments. */ for (i = 0; i < n_cellvars; i++) { Py_ssize_t j; PyObject *cell = PyTuple_GET_ITEM(cellvars, i); cell2arg[i] = CO_CELL_NOT_AN_ARG; for (j = 0; j < total_args; j++) { PyObject *arg = PyTuple_GET_ITEM(varnames, j); int cmp = PyUnicode_Compare(cell, arg); if (cmp == -1 && PyErr_Occurred()) { PyMem_FREE(cell2arg); return NULL; } if (cmp == 0) { cell2arg[i] = j; used_cell2arg = true; break; } } } if (!used_cell2arg) { PyMem_FREE(cell2arg); cell2arg = NULL; } } co = PyObject_NEW(PyCodeObject, &PyCode_Type); if (co == NULL) { if (cell2arg) PyMem_FREE(cell2arg); return NULL; } co->co_argcount = argcount; co->co_kwonlyargcount = kwonlyargcount; co->co_nlocals = nlocals; co->co_stacksize = stacksize; co->co_flags = flags; Py_INCREF(code); co->co_code = code; Py_INCREF(consts); co->co_consts = consts; Py_INCREF(names); co->co_names = names; Py_INCREF(varnames); co->co_varnames = varnames; Py_INCREF(freevars); co->co_freevars = freevars; Py_INCREF(cellvars); co->co_cellvars = cellvars; co->co_cell2arg = cell2arg; Py_INCREF(filename); co->co_filename = filename; Py_INCREF(name); co->co_name = name; co->co_firstlineno = firstlineno; Py_INCREF(lnotab); co->co_lnotab = lnotab; co->co_zombieframe = NULL; co->co_weakreflist = NULL; co->co_extra = NULL; return co; }
static int check_coding_spec(const char* line, Py_ssize_t size, struct tok_state *tok, int set_readline(struct tok_state *, const char *)) { char * cs; int r = 1; if (tok->cont_line) { /* It's a continuation line, so it can't be a coding spec. */ tok->read_coding_spec = 1; return 1; } cs = get_coding_spec(line, size); if (!cs) { Py_ssize_t i; for (i = 0; i < size; i++) { if (line[i] == '#' || line[i] == '\n' || line[i] == '\r') break; if (line[i] != ' ' && line[i] != '\t' && line[i] != '\014') { /* Stop checking coding spec after a line containing * anything except a comment. */ tok->read_coding_spec = 1; break; } } } else { tok->read_coding_spec = 1; if (tok->encoding == NULL) { assert(tok->decoding_state == 1); /* raw */ if (strcmp(cs, "utf-8") == 0 || strcmp(cs, "iso-8859-1") == 0) { tok->encoding = cs; } else { #ifdef Py_USING_UNICODE r = set_readline(tok, cs); if (r) { tok->encoding = cs; tok->decoding_state = -1; } else { PyErr_Format(PyExc_SyntaxError, "encoding problem: %s", cs); PyMem_FREE(cs); } #else /* Without Unicode support, we cannot process the coding spec. Since there won't be any Unicode literals, that won't matter. */ PyMem_FREE(cs); #endif } } else { /* then, compare cs with BOM */ r = (strcmp(tok->encoding, cs) == 0); if (!r) PyErr_Format(PyExc_SyntaxError, "encoding problem: %s with BOM", cs); PyMem_FREE(cs); } } return r; }
static int tok_nextc(register struct tok_state *tok) { for (;;) { if (tok->cur != tok->inp) { return Py_CHARMASK(*tok->cur++); /* Fast path */ } if (tok->done != E_OK) return EOF; if (tok->fp == NULL) { char *end = strchr(tok->inp, '\n'); if (end != NULL) end++; else { end = strchr(tok->inp, '\0'); if (end == tok->inp) { tok->done = E_EOF; return EOF; } } if (tok->start == NULL) tok->buf = tok->cur; tok->line_start = tok->cur; tok->lineno++; tok->inp = end; return Py_CHARMASK(*tok->cur++); } if (tok->prompt != NULL) { char *newtok = PyOS_Readline(stdin, stdout, tok->prompt); if (tok->nextprompt != NULL) tok->prompt = tok->nextprompt; if (newtok == NULL) tok->done = E_INTR; else if (*newtok == '\0') { PyMem_FREE(newtok); tok->done = E_EOF; } #if !defined(PGEN) && defined(Py_USING_UNICODE) else if (tok_stdin_decode(tok, &newtok) != 0) PyMem_FREE(newtok); #endif else if (tok->start != NULL) { size_t start = tok->start - tok->buf; size_t oldlen = tok->cur - tok->buf; size_t newlen = oldlen + strlen(newtok); char *buf = tok->buf; buf = (char *)PyMem_REALLOC(buf, newlen+1); tok->lineno++; if (buf == NULL) { PyMem_FREE(tok->buf); tok->buf = NULL; PyMem_FREE(newtok); tok->done = E_NOMEM; return EOF; } tok->buf = buf; tok->cur = tok->buf + oldlen; tok->line_start = tok->cur; strcpy(tok->buf + oldlen, newtok); PyMem_FREE(newtok); tok->inp = tok->buf + newlen; tok->end = tok->inp + 1; tok->start = tok->buf + start; } else { tok->lineno++; if (tok->buf != NULL) PyMem_FREE(tok->buf); tok->buf = newtok; tok->line_start = tok->buf; tok->cur = tok->buf; tok->line_start = tok->buf; tok->inp = strchr(tok->buf, '\0'); tok->end = tok->inp + 1; } } else { int done = 0; Py_ssize_t cur = 0; char *pt; if (tok->start == NULL) { if (tok->buf == NULL) { tok->buf = (char *) PyMem_MALLOC(BUFSIZ); if (tok->buf == NULL) { tok->done = E_NOMEM; return EOF; } tok->end = tok->buf + BUFSIZ; } if (decoding_fgets(tok->buf, (int)(tok->end - tok->buf), tok) == NULL) { tok->done = E_EOF; done = 1; } else { tok->done = E_OK; tok->inp = strchr(tok->buf, '\0'); done = tok->inp[-1] == '\n'; } } else { cur = tok->cur - tok->buf; if (decoding_feof(tok)) { tok->done = E_EOF; done = 1; } else tok->done = E_OK; } tok->lineno++; /* Read until '\n' or EOF */ while (!done) { Py_ssize_t curstart = tok->start == NULL ? -1 : tok->start - tok->buf; Py_ssize_t curvalid = tok->inp - tok->buf; Py_ssize_t newsize = curvalid + BUFSIZ; char *newbuf = tok->buf; newbuf = (char *)PyMem_REALLOC(newbuf, newsize); if (newbuf == NULL) { tok->done = E_NOMEM; tok->cur = tok->inp; return EOF; } tok->buf = newbuf; tok->inp = tok->buf + curvalid; tok->end = tok->buf + newsize; tok->start = curstart < 0 ? NULL : tok->buf + curstart; if (decoding_fgets(tok->inp, (int)(tok->end - tok->inp), tok) == NULL) { /* Break out early on decoding errors, as tok->buf will be NULL */ if (tok->decoding_erred) return EOF; /* Last line does not end in \n, fake one */ strcpy(tok->inp, "\n"); } tok->inp = strchr(tok->inp, '\0'); done = tok->inp[-1] == '\n'; } if (tok->buf != NULL) { tok->cur = tok->buf + cur; tok->line_start = tok->cur; /* replace "\r\n" with "\n" */ /* For Mac leave the \r, giving a syntax error */ pt = tok->inp - 2; if (pt >= tok->buf && *pt == '\r') { *pt++ = '\n'; *pt = '\0'; tok->inp = pt; } } } if (tok->done != E_OK) { if (tok->prompt != NULL) PySys_WriteStderr("\n"); tok->cur = tok->inp; return EOF; } } /*NOTREACHED*/ }
static PyObject * marshal_Load_internal(PyObject *py_stream, PyObject *py_callback, int skipcrc) { // Return value: New Reference char *stream; Py_ssize_t size; char *s; char *end; int type = -1; // current object type int shared = -1; // indicates whether current object is shared int i; char *error = "NO ERROR SPECIFIED"; char errortext[256]; Py_ssize_t length = 0; // generic length value. int shared_mapsize; int shared_count; // shared object index counter int *shared_map; // points to shared object mapping at end of stream PyObject **shared_obj = NULL; // holds the shared objects PyObject *obj = NULL; // currently decoded object PyObject *result = NULL; // final result int ct_ix = 0; struct Container ct_stack[MAX_DEPTH]; struct Container *container = &ct_stack[0]; if(PyString_AsStringAndSize(py_stream, &stream, &size) == -1) return NULL; s = stream; container->obj = NULL; container->type = 0; container->free = -1; container->index = 0; if(size < 6 || *s++ != PROTOCOL_ID) { int offset = 0; result = unpickle(py_stream, &offset); if(!result) goto cleanup; return result; } // how many shared objects in this stream? shared_mapsize = *(int32_t *)s; s += 4; // Security Check: assert there is enough data for that many items. if((5 + shared_mapsize*4) > size) { PyErr_Format(UnmarshalError, "Not enough room in stream for map. Wanted %d, but have only %d bytes remaining...", (shared_mapsize*4), ((int)size-5)); goto cleanup; } // ok, we got the map data right here... shared_map = (int32_t *)&stream[size - shared_mapsize * 4]; // Security Check #2: assert all map entries are between 1 and shared_mapsize for(i=0; i<shared_mapsize; i++) { if( (shared_map[i] > shared_mapsize) || (shared_map[i] < 1) ) { PyErr_SetString(UnmarshalError, "Bogus map data in marshal stream"); goto cleanup; } } // the start of which is incidentally also the end of the object data. end = (char *)shared_map; // create object table shared_obj = PyMem_MALLOC(shared_mapsize * sizeof(PyObject *)); if(!shared_obj) goto cleanup; // zero out object table for(shared_count = 0; shared_count < shared_mapsize; shared_count++) shared_obj[shared_count] = NULL; shared_count = 0; // start decoding. while(s < end) { // This outer loop is responsible for reading and decoding the next // object from the stream. The object is then handed to the inner loop, // which adds it to the current container, or returns it to the caller. // get type of next object to decode and check shared flag type = *s++; shared = type & SHARED_FLAG; type &= ~SHARED_FLAG; // if token uses a normal length value, read it now. if(needlength[type]) { READ_LENGTH; } else length = 0; #if MARSHAL_DEBUG // if(shared) { char text[220]; DEBUG_INDENT; sprintf(text, "pos:%4d type:%s(0x%02x) shared:%d len:%4d map:[", s-stream, tokenname[type], type, shared?1:0, length); printf(text); for(i=0; i<shared_mapsize; i++) printf("%d(%d),", shared_obj[i], shared_obj[i] ? ((PyObject *)(shared_obj[i]))->ob_refcnt : 0); printf("]\r\n"); } #endif // MARSHAL_DEBUG switch(type) { // // break statement: // attempts to add the newly decoded object (in the obj variable) to // the currently building container object. // // continue statement: // indicates the decoded object or type marker was handled/consumed // by the case and should _not_ be added to the currently building // container object or used in any other way; immediately decode a // new object // //--------------------------------------------------------------------- // SCALAR TYPES //--------------------------------------------------------------------- case TYPE_INT8: CHECK_SIZE(1); obj = PyInt_FromLong(*(int8_t *)s); s++; break; case TYPE_INT16: CHECK_SIZE(2); obj = PyInt_FromLong(*(int16_t *)s); s += 2; break; case TYPE_INT32: CHECK_SIZE(4); obj = PyInt_FromLong(*(int32_t *)s); s += 4; break; case TYPE_INT64: CHECK_SIZE(8); obj = PyLong_FromLongLong(*(int64_t *)s); s += 8; break; case TYPE_LONG: CHECK_SIZE(length); if(!length) obj = PyLong_FromLong(0); else { obj = _PyLong_FromByteArray((unsigned char *)s, length, 1, 1); Py_INCREF(obj); } CHECK_SHARED(obj); s += length; break; case TYPE_FLOAT: CHECK_SIZE(8); obj = PyFloat_FromDouble(*(double *)s); s += 8; break; case TYPE_CHECKSUM: CHECK_SIZE(4); if(!skipcrc && (*(uint32_t *)s != (uint32_t)adler32(1, s, (unsigned long)(end-s)))) { error = "checksum error"; goto fail; } s += 4; // because this type does not yield an object, go grab another // object right away! continue; //--------------------------------------------------------------------- // STRING TYPES //--------------------------------------------------------------------- case TYPE_STRINGR: if (length < 1 || length >= PyList_GET_SIZE(string_table)) { if(PyList_GET_SIZE(string_table)) PyErr_Format(UnmarshalError, "Invalid string table index %d", (int)length); else PyErr_SetString(PyExc_RuntimeError, "_stringtable not initialized"); goto cleanup; } obj = PyList_GET_ITEM(string_table, length); Py_INCREF(obj); break; case TYPE_STRING: // appears to be deprecated since machoVersion 213 CHECK_SIZE(1); length = *(unsigned char *)s++; CHECK_SIZE(length); obj = PyString_FromStringAndSize(s, length); s += length; break; case TYPE_STRING1: CHECK_SIZE(1); obj = PyString_FromStringAndSize(s, 1); s++; break; case TYPE_STREAM: // fallthrough, can be treated as string. case TYPE_STRINGL: // fallthrough, deprecated since machoVersion 213 case TYPE_BUFFER: // Type identifier re-used by CCP. treat as string. CHECK_SIZE(length); obj = PyString_FromStringAndSize(s, length); s += length; CHECK_SHARED(obj); break; case TYPE_UNICODE1: CHECK_SIZE(2); #ifdef Py_UNICODE_WIDE obj = _PyUnicodeUCS4_FromUCS2((void *)s, 1); #else obj = PyUnicode_FromWideChar((wchar_t *)s, 1); #endif s += 2; break; case TYPE_UNICODE: CHECK_SIZE(length*2); #ifdef Py_UNICODE_WIDE obj = _PyUnicodeUCS4_FromUCS2((void *)s, (int)length); #else obj = PyUnicode_FromWideChar((wchar_t *)s, length); #endif s += length*2; break; case TYPE_UTF8: CHECK_SIZE(length); obj = PyUnicode_DecodeUTF8(s, length, NULL); s += length; break; //--------------------------------------------------------------------- // SEQUENCE/MAPPING TYPES //--------------------------------------------------------------------- case TYPE_TUPLE1: NEW_SEQUENCE(TYPE_TUPLE, 1); continue; case TYPE_TUPLE2: NEW_SEQUENCE(TYPE_TUPLE, 2); continue; case TYPE_TUPLE: NEW_SEQUENCE(TYPE_TUPLE, (int)length); continue; case TYPE_LIST0: obj = PyList_New(0); CHECK_SHARED(obj); break; case TYPE_LIST1: NEW_SEQUENCE(TYPE_LIST, 1); continue; case TYPE_LIST: NEW_SEQUENCE(TYPE_LIST, (int)length); continue; case TYPE_DICT: if(length) { CHECK_SIZE(length*2); PUSH_CONTAINER(TYPE_DICT, (int)length*2); container->obj = PyDict_New(); container->obj2 = NULL; container->index = 0; CHECK_SHARED(container->obj); continue; } else { obj = PyDict_New(); CHECK_SHARED(obj); break; } //--------------------------------------------------------------------- // OBJECT TYPES //--------------------------------------------------------------------- case TYPE_REF: // length value is index in sharedobj array! if((length < 1 || length > shared_mapsize)) { error = "Shared reference index out of range"; goto fail; } if(!(obj = shared_obj[length-1])) { error = "Shared reference points to invalid object"; goto fail; } Py_INCREF(obj); //printf("Getting object %d from %d (refs:%d)\r\n", (int)obj, length-1, obj->ob_refcnt); break; case TYPE_GLOBAL: { PyObject *name; CHECK_SIZE(length); name = PyString_FromStringAndSize(s, length); if(!name) goto cleanup; s += length; if(!(obj = find_global(name))) { // exception should be set by find_global goto cleanup; } Py_DECREF(name); CHECK_SHARED(obj); break; } case TYPE_DBROW: case TYPE_INSTANCE: case TYPE_NEWOBJ: case TYPE_REDUCE: PUSH_CONTAINER(type, -1); container->obj = NULL; RESERVE_SLOT(container->index); continue; case TYPE_MARK: // this is a marker, not a real object. list/dict iterators check // for this type, but it can't be instantiated. break; default: if((obj = constants[type])) { Py_INCREF(obj); } else { error = "Unsupported type"; goto fail; } } // object decoding and construction done! if(!obj && type != TYPE_MARK) { // if obj is somehow NULL, whatever caused it is expected to have // set an exception at this point. goto cleanup; } #if MARSHAL_DEBUG /* if(obj && obj->ob_refcnt < 0) { char b[200]; sprintf(b, "type: %d, refcount: %d", type, obj->ob_refcnt); DEBUG(b); } */ if(obj) { DEBUG_INDENT; printf("`-- "); PyObject_Print(obj, stdout, 0); printf("\r\n"); fflush(stdout); } else { DEBUG_INDENT; printf("*** MARK\r\n"); } #endif // MARSHAL_DEBUG while(1) { // This inner loop does one of two things: // // - return the finished object to the caller if we're at the root // container. // // - add the object to the current container in a container- // specific manner. note that ownership of the reference is to be // given to the container object. #if MARSHAL_DEBUG { //char text[220]; DEBUG_INDENT; printf("container ix:%d (%08lx) type:%s[0x%02x] free:%d index:%d\r\n", ct_ix, container->obj, tokenname[container->type], container->type, container->free, container->index); } #endif // MARSHAL_DEBUG /* if(!container->obj) { error = "Root container popped off stack"; goto fail; } */ switch(container->type) { case TYPE_TUPLE: // tuples steal references. PyTuple_SET_ITEM(container->obj, container->index++, obj); break; case TYPE_LIST: // lists steal references. PyList_SET_ITEM(container->obj, container->index++, obj); break; case TYPE_DBROW: if(container->obj) { // we have an initialized DBRow. current object is a // non-scalar object for the row. append it. if(!dbrow_append_internal((PyDBRowObject *)container->obj, obj)) { // append call will have set an exception here. goto cleanup; } } else { // we now have a DBRowDescriptor, and the header data // should follow. Pull it and create the DBRow. READ_LENGTH; CHECK_SIZE(length); container->obj = PyDBRow_New((PyDBRowDescriptorObject *)obj, s, (int)length); container->free = 1+((PyDBRowDescriptorObject *)obj)->rd_num_objects; if(!container->obj) goto cleanup; Py_DECREF(obj); s += length; // add as shared object, if neccessary... UPDATE_SLOT(container->index, container->obj); } break; case TYPE_INSTANCE: { PyObject *cls; if(container->free == -1) { // create class instance if(!(cls = find_global(obj))) goto cleanup; container->obj = PyInstance_NewRaw(cls, 0); Py_DECREF(cls); if(!container->obj) goto cleanup; UPDATE_SLOT(container->index, container->obj); Py_DECREF(obj); break; } if(container->free == -2) { container->free = 1; // set state. if(!set_state(container->obj, obj)) goto cleanup; Py_DECREF(obj); break; } error = "invalid container state"; goto fail; } case TYPE_NEWOBJ: { PyObject *cls, *args, *__new__, *state; // instantiate the object... if(!(args = PyTuple_GetItem(obj, 0))) goto cleanup; if(!(cls = PyTuple_GetItem(args, 0))) goto cleanup; __new__ = PyObject_GetAttr(cls, py__new__); if(!__new__) goto cleanup; container->obj = PyObject_CallObject(__new__, args); Py_DECREF(__new__); if(!container->obj) goto cleanup; // add as shared object, if neccessary... UPDATE_SLOT(container->index, container->obj); // is there state data? if(PyTuple_GET_SIZE(obj) > 1) { state = PyTuple_GET_ITEM(obj, 1); if(!set_state(container->obj, state)) goto cleanup; } Py_DECREF(obj); // switch to list iterator LIST_ITERATOR; break; } case TYPE_REDUCE: { PyObject *callable, *args, *state; if(!(args = PyTuple_GetItem(obj, 1))) goto cleanup; if(!(callable = PyTuple_GET_ITEM(obj, 0))) goto cleanup; if(!(container->obj = PyObject_CallObject(callable, args))) goto cleanup; UPDATE_SLOT(container->index, container->obj); if(PyTuple_GET_SIZE(obj) > 2) { state = PyTuple_GET_ITEM(obj, 2); if(!set_state(container->obj, state)) goto cleanup; } Py_DECREF(obj); // switch to list iterator LIST_ITERATOR; break; } case TYPE_LIST_ITERATOR: if(type == TYPE_MARK) { // clear mark so nested iterator containers do not get terminated prematurely. type = -1; // decref the append method Py_XDECREF(container->obj2); container->obj2 = NULL; container->type = TYPE_DICT_ITERATOR; break; } if(!container->obj2) { // grab the append method from the container and keep // it around for speed. if(!(container->obj2 = PyObject_GetAttr(container->obj, pyappend))) goto cleanup; } if(!PyObject_CallFunctionObjArgs(container->obj2, obj, NULL)) goto cleanup; #if MARSHAL_DEBUG DEBUG_INDENT; printf("Appended %08lx to %08lx\r\n", obj, container->obj); #endif // MARSHAL_DEBUG Py_DECREF(obj); break; case TYPE_DICT_ITERATOR: if(type == TYPE_MARK) { // clear mark so nested iterator containers do not get terminated prematurely. type = -1; // we're done with dict iter. container is finished. container->free = 1; break; } POPULATE_DICT(container->obj2, obj); break; case TYPE_DICT: POPULATE_DICT(obj, container->obj2); break; case 0: // we're at the root. return the object to caller. result = obj; // avoid decreffing this object. obj = NULL; goto cleanup; } container->free--; if(container->free) // still room in this container. // break out of container handling to get next object for it. break; // this container is done, it is the next object to put into the // container under it! obj = container->obj; // switch context to said older container POP_CONTAINER; } // we've processed the object. clear it for next one. obj = NULL; } // if we get here, we're out of data, but it's a "clean" eof; we ran out // of data while expecting a new object... error = "Not enough objects in stream"; fail: PyErr_Format(UnmarshalError, "%s - type:0x%02x ctype:0x%02x len:%d share:%d pos:%d size:%d", error, type, container->type, (int)length, shared, (int)(s-stream), (int)(size)); cleanup: // on any error the current object we were working on will be unassociated // with anything, decref it. if decode was succesful or an object failed to // be created, it will be NULL anyway. Py_XDECREF(obj); // same story for containers... while(container->type) { Py_XDECREF(container->obj); // possibly unassociated object for dict entry? if(container->type == TYPE_DICT || container->type == TYPE_DICT_ITERATOR) { Py_XDECREF(container->obj2); } POP_CONTAINER; } if(shared_obj) { /* shared object list held a safety ref to all objects, decref em */ int i; for(i=0; i<shared_mapsize; i++) Py_XDECREF(shared_obj[i]); /* and free the list */ PyMem_FREE(shared_obj); } return result; }
int exceptionlist__initmodule(PyObject* module, struct exceptionlist* exclist) { const struct exceptionlist* exc; char* longname = NULL; PyObject* name; Py_ssize_t size; Py_ssize_t maxsize; if ((name = PyObject_GetAttrString(module, "__name__")) == NULL) return -1; if (!PyString_Check(name)) { PyErr_SetString(PyExc_TypeError, "__name__ required an str()"); goto failed; } size = PyString_GET_SIZE(name); for (maxsize = 0, exc = exclist; exc->exc_name != NULL; exc++) { register int i = strlen(exc->exc_name); if (i > maxsize) maxsize = i; } if ((longname = PyMem_MALLOC(size + sizeof(".") + maxsize + sizeof("\0"))) == NULL) { PyErr_NoMemory(); goto failed; } memcpy(longname, PyString_AS_STRING(name), size); Py_DECREF(name); longname[size++] = '.'; for (exc = exclist; exc->exc_name != NULL; exc++) { PyObject* dict; PyObject* s; if ((dict = PyDict_New()) == NULL) goto failed; if ((s = PyString_FromString(exc->exc_doc)) == NULL) { Py_DECREF(dict); goto failed; } if (PyDict_SetItemString(dict, "__doc__", s) < 0) { Py_DECREF(dict); Py_DECREF(s); goto failed; } Py_DECREF(s); strcpy(&longname[size], exc->exc_name); if (*exc->exc_this == NULL && (*exc->exc_this = PyErr_NewException(longname, *exc->exc_base, dict)) == NULL) { Py_DECREF(dict); goto failed; } Py_DECREF(dict); } PyMem_FREE(longname); for (exc = exclist; exc->exc_name != NULL; exc++) { Py_INCREF(*exc->exc_this); if (PyModule_AddObject(module, exc->exc_name, *exc->exc_this) < 0) return -1; } return 0; failed: if (longname != NULL) PyMem_FREE(longname); Py_DECREF(name); return -1; }
static int prepare_s(PyStructObject *self) { const formatdef *f; const formatdef *e; formatcode *codes; const char *s; const char *fmt; char c; Py_ssize_t size, len, num, itemsize; fmt = PyBytes_AS_STRING(self->s_format); f = whichtable((char **)&fmt); s = fmt; size = 0; len = 0; while ((c = *s++) != '\0') { if (isspace(Py_CHARMASK(c))) continue; if ('0' <= c && c <= '9') { num = c - '0'; while ('0' <= (c = *s++) && c <= '9') { /* overflow-safe version of if (num*10 + (c - '0') > PY_SSIZE_T_MAX) { ... } */ if (num >= PY_SSIZE_T_MAX / 10 && ( num > PY_SSIZE_T_MAX / 10 || (c - '0') > PY_SSIZE_T_MAX % 10)) goto overflow; num = num*10 + (c - '0'); } if (c == '\0') { PyErr_SetString(StructError, "repeat count given without format specifier"); return -1; } } else num = 1; e = getentry(c, f); if (e == NULL) return -1; switch (c) { case 's': /* fall through */ case 'p': len++; break; case 'x': break; default: len += num; break; } itemsize = e->size; size = align(size, c, e); if (size == -1) goto overflow; /* if (size + num * itemsize > PY_SSIZE_T_MAX) { ... } */ if (num > (PY_SSIZE_T_MAX - size) / itemsize) goto overflow; size += num * itemsize; } /* check for overflow */ if ((len + 1) > (PY_SSIZE_T_MAX / sizeof(formatcode))) { PyErr_NoMemory(); return -1; } self->s_size = size; self->s_len = len; codes = (formatcode *) PyMem_MALLOC((len + 1) * sizeof(formatcode)); if (codes == NULL) { PyErr_NoMemory(); return -1; } /* Free any s_codes value left over from a previous initialization. */ if (self->s_codes != NULL) PyMem_FREE(self->s_codes); self->s_codes = codes; s = fmt; size = 0; while ((c = *s++) != '\0') { if (isspace(Py_CHARMASK(c))) continue; if ('0' <= c && c <= '9') { num = c - '0'; while ('0' <= (c = *s++) && c <= '9') num = num*10 + (c - '0'); if (c == '\0') break; } else num = 1; e = getentry(c, f); size = align(size, c, e); if (c == 's' || c == 'p') { codes->offset = size; codes->size = num; codes->fmtdef = e; codes++; size += num; } else if (c == 'x') { size += num; } else { while (--num >= 0) { codes->offset = size; codes->size = e->size; codes->fmtdef = e; codes++; size += e->size; } } } codes->fmtdef = NULL; codes->offset = size; codes->size = 0; return 0; overflow: PyErr_SetString(StructError, "total struct size too long"); return -1; }
static int tok_stdin_decode(struct tok_state *tok, char **inp) { PyObject *enc, *sysstdin, *decoded, *utf8; const char *encoding; char *converted; if (PySys_GetFile((char *)"stdin", NULL) != stdin) return 0; sysstdin = PySys_GetObject("stdin"); if (sysstdin == NULL || !PyFile_Check(sysstdin)) return 0; enc = ((PyFileObject *)sysstdin)->f_encoding; if (enc == NULL || !PyString_Check(enc)) return 0; Py_INCREF(enc); encoding = PyString_AsString(enc); decoded = PyUnicode_Decode(*inp, strlen(*inp), encoding, NULL); if (decoded == NULL) goto error_clear; utf8 = PyUnicode_AsEncodedString(decoded, "utf-8", NULL); Py_DECREF(decoded); if (utf8 == NULL) goto error_clear; assert(PyString_Check(utf8)); converted = new_string(PyString_AS_STRING(utf8), PyString_GET_SIZE(utf8)); Py_DECREF(utf8); if (converted == NULL) goto error_nomem; PyMem_FREE(*inp); *inp = converted; if (tok->encoding != NULL) PyMem_FREE(tok->encoding); tok->encoding = new_string(encoding, strlen(encoding)); if (tok->encoding == NULL) goto error_nomem; Py_DECREF(enc); return 0; error_nomem: Py_DECREF(enc); tok->done = E_NOMEM; return -1; error_clear: Py_DECREF(enc); if (!PyErr_ExceptionMatches(PyExc_UnicodeDecodeError)) { tok->done = E_ERROR; return -1; } /* Fallback to iso-8859-1: for backward compatibility */ PyErr_Clear(); return 0; }
int _Py_DisplaySourceLine(PyObject *f, PyObject *filename, int lineno, int indent) { int err = 0; int fd; int i; char *found_encoding; char *encoding; PyObject *io; PyObject *binary; PyObject *fob = NULL; PyObject *lineobj = NULL; PyObject *res; char buf[MAXPATHLEN+1]; int kind; void *data; /* open the file */ if (filename == NULL) return 0; io = PyImport_ImportModuleNoBlock("io"); if (io == NULL) return -1; binary = _PyObject_CallMethodId(io, &PyId_open, "Os", filename, "rb"); if (binary == NULL) { PyErr_Clear(); binary = _Py_FindSourceFile(filename, buf, sizeof(buf), io); if (binary == NULL) { Py_DECREF(io); return -1; } } /* use the right encoding to decode the file as unicode */ fd = PyObject_AsFileDescriptor(binary); if (fd < 0) { Py_DECREF(io); Py_DECREF(binary); return 0; } found_encoding = PyTokenizer_FindEncodingFilename(fd, filename); if (found_encoding == NULL) PyErr_Clear(); encoding = (found_encoding != NULL) ? found_encoding : "utf-8"; /* Reset position */ if (lseek(fd, 0, SEEK_SET) == (off_t)-1) { Py_DECREF(io); Py_DECREF(binary); PyMem_FREE(found_encoding); return 0; } fob = _PyObject_CallMethodId(io, &PyId_TextIOWrapper, "Os", binary, encoding); Py_DECREF(io); Py_DECREF(binary); PyMem_FREE(found_encoding); if (fob == NULL) { PyErr_Clear(); return 0; } /* get the line number lineno */ for (i = 0; i < lineno; i++) { Py_XDECREF(lineobj); lineobj = PyFile_GetLine(fob, -1); if (!lineobj) { err = -1; break; } } res = _PyObject_CallMethodId(fob, &PyId_close, ""); if (res) Py_DECREF(res); else PyErr_Clear(); Py_DECREF(fob); if (!lineobj || !PyUnicode_Check(lineobj)) { Py_XDECREF(lineobj); return err; } /* remove the indentation of the line */ kind = PyUnicode_KIND(lineobj); data = PyUnicode_DATA(lineobj); for (i=0; i < PyUnicode_GET_LENGTH(lineobj); i++) { Py_UCS4 ch = PyUnicode_READ(kind, data, i); if (ch != ' ' && ch != '\t' && ch != '\014') break; } if (i) { PyObject *truncated; truncated = PyUnicode_Substring(lineobj, i, PyUnicode_GET_LENGTH(lineobj)); if (truncated) { Py_DECREF(lineobj); lineobj = truncated; } else { PyErr_Clear(); } } /* Write some spaces before the line */ strcpy(buf, " "); assert (strlen(buf) == 10); while (indent > 0) { if (indent < 10) buf[indent] = '\0'; err = PyFile_WriteString(buf, f); if (err != 0) break; indent -= 10; } /* finally display the line */ if (err == 0) err = PyFile_WriteObject(lineobj, f, Py_PRINT_RAW); Py_DECREF(lineobj); if (err == 0) err = PyFile_WriteString("\n", f); return err; }
int _Py_DisplaySourceLine(PyObject *f, PyObject *filename, int lineno, int indent) { int err = 0; int fd; int i; char *found_encoding; char *encoding; PyObject *io; PyObject *binary; PyObject *fob = NULL; PyObject *lineobj = NULL; PyObject *res; char buf[MAXPATHLEN+1]; Py_UNICODE *u, *p; Py_ssize_t len; /* open the file */ if (filename == NULL) return 0; io = PyImport_ImportModuleNoBlock("io"); if (io == NULL) return -1; binary = PyObject_CallMethod(io, "open", "Os", filename, "rb"); if (binary == NULL) { binary = _Py_FindSourceFile(filename, buf, sizeof(buf), io); if (binary == NULL) { Py_DECREF(io); return 0; } } /* use the right encoding to decode the file as unicode */ fd = PyObject_AsFileDescriptor(binary); found_encoding = PyTokenizer_FindEncoding(fd); encoding = (found_encoding != NULL) ? found_encoding : "utf-8"; lseek(fd, 0, 0); /* Reset position */ fob = PyObject_CallMethod(io, "TextIOWrapper", "Os", binary, encoding); Py_DECREF(io); Py_DECREF(binary); PyMem_FREE(found_encoding); if (fob == NULL) { PyErr_Clear(); return 0; } /* get the line number lineno */ for (i = 0; i < lineno; i++) { Py_XDECREF(lineobj); lineobj = PyFile_GetLine(fob, -1); if (!lineobj) { err = -1; break; } } res = PyObject_CallMethod(fob, "close", ""); if (res) Py_DECREF(res); else PyErr_Clear(); Py_DECREF(fob); if (!lineobj || !PyUnicode_Check(lineobj)) { Py_XDECREF(lineobj); return err; } /* remove the indentation of the line */ u = PyUnicode_AS_UNICODE(lineobj); len = PyUnicode_GET_SIZE(lineobj); for (p=u; *p == ' ' || *p == '\t' || *p == '\014'; p++) len--; if (u != p) { PyObject *truncated; truncated = PyUnicode_FromUnicode(p, len); if (truncated) { Py_DECREF(lineobj); lineobj = truncated; } else { PyErr_Clear(); } } /* Write some spaces before the line */ strcpy(buf, " "); assert (strlen(buf) == 10); while (indent > 0) { if(indent < 10) buf[indent] = '\0'; err = PyFile_WriteString(buf, f); if (err != 0) break; indent -= 10; } /* finally display the line */ if (err == 0) err = PyFile_WriteObject(lineobj, f, Py_PRINT_RAW); Py_DECREF(lineobj); if (err == 0) err = PyFile_WriteString("\n", f); return err; }
static PyObject * complex_subtype_from_string(PyTypeObject *type, PyObject *v) { const char *s, *start; char *end; double x=0.0, y=0.0, z; int got_bracket=0; #ifdef Py_USING_UNICODE char *s_buffer = NULL; #endif Py_ssize_t len; if (PyString_Check(v)) { s = PyString_AS_STRING(v); len = PyString_GET_SIZE(v); } #ifdef Py_USING_UNICODE else if (PyUnicode_Check(v)) { s_buffer = (char *)PyMem_MALLOC(PyUnicode_GET_SIZE(v)+1); if (s_buffer == NULL) return PyErr_NoMemory(); if (PyUnicode_EncodeDecimal(PyUnicode_AS_UNICODE(v), PyUnicode_GET_SIZE(v), s_buffer, NULL)) goto error; s = s_buffer; len = strlen(s); } #endif else if (PyObject_AsCharBuffer(v, &s, &len)) { PyErr_SetString(PyExc_TypeError, "complex() arg is not a string"); return NULL; } /* position on first nonblank */ start = s; while (Py_ISSPACE(*s)) s++; if (*s == '(') { /* Skip over possible bracket from repr(). */ got_bracket = 1; s++; while (Py_ISSPACE(*s)) s++; } /* a valid complex string usually takes one of the three forms: <float> - real part only <float>j - imaginary part only <float><signed-float>j - real and imaginary parts where <float> represents any numeric string that's accepted by the float constructor (including 'nan', 'inf', 'infinity', etc.), and <signed-float> is any string of the form <float> whose first character is '+' or '-'. For backwards compatibility, the extra forms <float><sign>j <sign>j j are also accepted, though support for these forms may be removed from a future version of Python. */ /* first look for forms starting with <float> */ z = PyOS_string_to_double(s, &end, NULL); if (z == -1.0 && PyErr_Occurred()) { if (PyErr_ExceptionMatches(PyExc_ValueError)) PyErr_Clear(); else goto error; } if (end != s) { /* all 4 forms starting with <float> land here */ s = end; if (*s == '+' || *s == '-') { /* <float><signed-float>j | <float><sign>j */ x = z; y = PyOS_string_to_double(s, &end, NULL); if (y == -1.0 && PyErr_Occurred()) { if (PyErr_ExceptionMatches(PyExc_ValueError)) PyErr_Clear(); else goto error; } if (end != s) /* <float><signed-float>j */ s = end; else { /* <float><sign>j */ y = *s == '+' ? 1.0 : -1.0; s++; } if (!(*s == 'j' || *s == 'J')) goto parse_error; s++; } else if (*s == 'j' || *s == 'J') { /* <float>j */ s++; y = z; } else /* <float> */ x = z; } else { /* not starting with <float>; must be <sign>j or j */ if (*s == '+' || *s == '-') { /* <sign>j */ y = *s == '+' ? 1.0 : -1.0; s++; } else /* j */ y = 1.0; if (!(*s == 'j' || *s == 'J')) goto parse_error; s++; } /* trailing whitespace and closing bracket */ while (Py_ISSPACE(*s)) s++; if (got_bracket) { /* if there was an opening parenthesis, then the corresponding closing parenthesis should be right here */ if (*s != ')') goto parse_error; s++; while (Py_ISSPACE(*s)) s++; } /* we should now be at the end of the string */ if (s-start != len) goto parse_error; #ifdef Py_USING_UNICODE if (s_buffer) PyMem_FREE(s_buffer); #endif return complex_subtype_from_doubles(type, x, y); parse_error: PyErr_SetString(PyExc_ValueError, "complex() arg is a malformed string"); error: #ifdef Py_USING_UNICODE if (s_buffer) PyMem_FREE(s_buffer); #endif return NULL; }
static node * parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret, int *flags) { parser_state *ps; node *n; int started = 0; growable_int_array type_ignores; if (!growable_int_array_init(&type_ignores, 10)) { err_ret->error = E_NOMEM; Ta27Tokenizer_Free(tok); return NULL; } if ((ps = Ta27Parser_New(g, start)) == NULL) { fprintf(stderr, "no mem for new parser\n"); err_ret->error = E_NOMEM; Ta27Tokenizer_Free(tok); return NULL; } #ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD if (*flags & PyPARSE_PRINT_IS_FUNCTION) { ps->p_flags |= CO_FUTURE_PRINT_FUNCTION; } if (*flags & PyPARSE_UNICODE_LITERALS) { ps->p_flags |= CO_FUTURE_UNICODE_LITERALS; } #endif for (;;) { char *a, *b; int type; size_t len; char *str; int col_offset; type = Ta27Tokenizer_Get(tok, &a, &b); if (type == ERRORTOKEN) { err_ret->error = tok->done; break; } if (type == ENDMARKER && started) { type = NEWLINE; /* Add an extra newline */ started = 0; /* Add the right number of dedent tokens, except if a certain flag is given -- codeop.py uses this. */ if (tok->indent && !(*flags & PyPARSE_DONT_IMPLY_DEDENT)) { tok->pendin = -tok->indent; tok->indent = 0; } } else started = 1; len = b - a; /* XXX this may compute NULL - NULL */ str = (char *) PyObject_MALLOC(len + 1); if (str == NULL) { fprintf(stderr, "no mem for next token\n"); err_ret->error = E_NOMEM; break; } if (len > 0) strncpy(str, a, len); str[len] = '\0'; #ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD #endif if (a >= tok->line_start) col_offset = a - tok->line_start; else col_offset = -1; if (type == TYPE_IGNORE) { if (!growable_int_array_add(&type_ignores, tok->lineno)) { err_ret->error = E_NOMEM; break; } continue; } if ((err_ret->error = Ta27Parser_AddToken(ps, (int)type, str, tok->lineno, col_offset, &(err_ret->expected))) != E_OK) { if (err_ret->error != E_DONE) { PyObject_FREE(str); err_ret->token = type; } break; } } if (err_ret->error == E_DONE) { n = ps->p_tree; ps->p_tree = NULL; if (n->n_type == file_input) { /* Put type_ignore nodes in the ENDMARKER of file_input. */ int num; node *ch; size_t i; num = NCH(n); ch = CHILD(n, num - 1); REQ(ch, ENDMARKER); for (i = 0; i < type_ignores.num_items; i++) { Ta27Node_AddChild(ch, TYPE_IGNORE, NULL, type_ignores.items[i], 0); } } growable_int_array_deallocate(&type_ignores); } else n = NULL; #ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD *flags = ps->p_flags; #endif Ta27Parser_Delete(ps); if (n == NULL) { if (tok->lineno <= 1 && tok->done == E_EOF) err_ret->error = E_EOF; err_ret->lineno = tok->lineno; if (tok->buf != NULL) { char *text = NULL; size_t len; assert(tok->cur - tok->buf < INT_MAX); err_ret->offset = (int)(tok->cur - tok->buf); len = tok->inp - tok->buf; #ifdef Py_USING_UNICODE text = Ta27Tokenizer_RestoreEncoding(tok, len, &err_ret->offset); #endif if (text == NULL) { text = (char *) PyObject_MALLOC(len + 1); if (text != NULL) { if (len > 0) strncpy(text, tok->buf, len); text[len] = '\0'; } } err_ret->text = text; } } else if (tok->encoding != NULL) { /* 'nodes->n_str' uses PyObject_*, while 'tok->encoding' was * allocated using PyMem_ */ node* r = Ta27Node_New(encoding_decl); if (r) r->n_str = PyObject_MALLOC(strlen(tok->encoding)+1); if (!r || !r->n_str) { err_ret->error = E_NOMEM; if (r) PyObject_FREE(r); n = NULL; goto done; } strcpy(r->n_str, tok->encoding); PyMem_FREE(tok->encoding); tok->encoding = NULL; r->n_nchildren = 1; r->n_child = n; n = r; } done: Ta27Tokenizer_Free(tok); return n; }
PyCodeObject * PyCode_New(int argcount, int kwonlyargcount, int nlocals, int stacksize, int flags, PyObject *code, PyObject *consts, PyObject *names, PyObject *varnames, PyObject *freevars, PyObject *cellvars, PyObject *filename, PyObject *name, int firstlineno, PyObject *lnotab) { PyCodeObject *co; unsigned char *cell2arg = NULL; Py_ssize_t i, n_cellvars; /* Check argument types */ if (argcount < 0 || kwonlyargcount < 0 || nlocals < 0 || code == NULL || consts == NULL || !PyTuple_Check(consts) || names == NULL || !PyTuple_Check(names) || varnames == NULL || !PyTuple_Check(varnames) || freevars == NULL || !PyTuple_Check(freevars) || cellvars == NULL || !PyTuple_Check(cellvars) || name == NULL || !PyUnicode_Check(name) || filename == NULL || !PyUnicode_Check(filename) || lnotab == NULL || !PyBytes_Check(lnotab) || !PyObject_CheckReadBuffer(code)) { PyErr_BadInternalCall(); return NULL; } /* Ensure that the filename is a ready Unicode string */ if (PyUnicode_READY(filename) < 0) return NULL; n_cellvars = PyTuple_GET_SIZE(cellvars); intern_strings(names); intern_strings(varnames); intern_strings(freevars); intern_strings(cellvars); /* Intern selected string constants */ for (i = PyTuple_GET_SIZE(consts); --i >= 0; ) { PyObject *v = PyTuple_GetItem(consts, i); if (!all_name_chars(v)) continue; PyUnicode_InternInPlace(&PyTuple_GET_ITEM(consts, i)); } /* Create mapping between cells and arguments if needed. */ if (n_cellvars) { Py_ssize_t total_args = argcount + kwonlyargcount + ((flags & CO_VARARGS) != 0) + ((flags & CO_VARKEYWORDS) != 0); Py_ssize_t alloc_size = sizeof(unsigned char) * n_cellvars; int used_cell2arg = 0; cell2arg = PyMem_MALLOC(alloc_size); if (cell2arg == NULL) return NULL; memset(cell2arg, CO_CELL_NOT_AN_ARG, alloc_size); /* Find cells which are also arguments. */ for (i = 0; i < n_cellvars; i++) { Py_ssize_t j; PyObject *cell = PyTuple_GET_ITEM(cellvars, i); for (j = 0; j < total_args; j++) { PyObject *arg = PyTuple_GET_ITEM(varnames, j); if (!PyUnicode_Compare(cell, arg)) { cell2arg[i] = j; used_cell2arg = 1; break; } } } if (!used_cell2arg) { PyMem_FREE(cell2arg); cell2arg = NULL; } } co = PyObject_NEW(PyCodeObject, &PyCode_Type); if (co == NULL) { if (cell2arg) PyMem_FREE(cell2arg); return NULL; } co->co_argcount = argcount; co->co_kwonlyargcount = kwonlyargcount; co->co_nlocals = nlocals; co->co_stacksize = stacksize; co->co_flags = flags; Py_INCREF(code); co->co_code = code; Py_INCREF(consts); co->co_consts = consts; Py_INCREF(names); co->co_names = names; Py_INCREF(varnames); co->co_varnames = varnames; Py_INCREF(freevars); co->co_freevars = freevars; Py_INCREF(cellvars); co->co_cellvars = cellvars; co->co_cell2arg = cell2arg; Py_INCREF(filename); co->co_filename = filename; Py_INCREF(name); co->co_name = name; co->co_firstlineno = firstlineno; Py_INCREF(lnotab); co->co_lnotab = lnotab; co->co_zombieframe = NULL; co->co_weakreflist = NULL; return co; }
static node * parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret, int *flags) { parser_state *ps; node *n; int started = 0; if ((ps = PyParser_New(g, start)) == NULL) { err_ret->error = E_NOMEM; PyTokenizer_Free(tok); return NULL; } #ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD if (*flags & PyPARSE_BARRY_AS_BDFL) ps->p_flags |= CO_FUTURE_BARRY_AS_BDFL; #endif for (;;) { char *a, *b; int type; size_t len; char *str; int col_offset; type = PyTokenizer_Get(tok, &a, &b); if (type == ERRORTOKEN) { err_ret->error = tok->done; break; } if (type == ENDMARKER && started) { type = NEWLINE; /* Add an extra newline */ started = 0; /* Add the right number of dedent tokens, except if a certain flag is given -- codeop.py uses this. */ if (tok->indent && !(*flags & PyPARSE_DONT_IMPLY_DEDENT)) { tok->pendin = -tok->indent; tok->indent = 0; } } else started = 1; len = b - a; /* XXX this may compute NULL - NULL */ str = (char *) PyObject_MALLOC(len + 1); if (str == NULL) { err_ret->error = E_NOMEM; break; } if (len > 0) strncpy(str, a, len); str[len] = '\0'; #ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD if (type == NOTEQUAL) { if (!(ps->p_flags & CO_FUTURE_BARRY_AS_BDFL) && strcmp(str, "!=")) { PyObject_FREE(str); err_ret->error = E_SYNTAX; break; } else if ((ps->p_flags & CO_FUTURE_BARRY_AS_BDFL) && strcmp(str, "<>")) { PyObject_FREE(str); err_ret->text = "with Barry as BDFL, use '<>' " "instead of '!='"; err_ret->error = E_SYNTAX; break; } } #endif if (a >= tok->line_start) col_offset = a - tok->line_start; else col_offset = -1; if ((err_ret->error = PyParser_AddToken(ps, (int)type, str, tok->lineno, col_offset, &(err_ret->expected))) != E_OK) { if (err_ret->error != E_DONE) { PyObject_FREE(str); err_ret->token = type; } break; } } if (err_ret->error == E_DONE) { n = ps->p_tree; ps->p_tree = NULL; #ifndef PGEN /* Check that the source for a single input statement really is a single statement by looking at what is left in the buffer after parsing. Trailing whitespace and comments are OK. */ if (start == single_input) { char *cur = tok->cur; char c = *tok->cur; for (;;) { while (c == ' ' || c == '\t' || c == '\n' || c == '\014') c = *++cur; if (!c) break; if (c != '#') { err_ret->error = E_BADSINGLE; PyNode_Free(n); n = NULL; break; } /* Suck up comment. */ while (c && c != '\n') c = *++cur; } } #endif } else n = NULL; #ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD *flags = ps->p_flags; #endif PyParser_Delete(ps); if (n == NULL) { if (tok->done == E_EOF) err_ret->error = E_EOF; err_ret->lineno = tok->lineno; if (tok->buf != NULL) { size_t len; assert(tok->cur - tok->buf < INT_MAX); err_ret->offset = (int)(tok->cur - tok->buf); len = tok->inp - tok->buf; err_ret->text = (char *) PyObject_MALLOC(len + 1); if (err_ret->text != NULL) { if (len > 0) strncpy(err_ret->text, tok->buf, len); err_ret->text[len] = '\0'; } } } else if (tok->encoding != NULL) { /* 'nodes->n_str' uses PyObject_*, while 'tok->encoding' was * allocated using PyMem_ */ node* r = PyNode_New(encoding_decl); if (r) r->n_str = PyObject_MALLOC(strlen(tok->encoding)+1); if (!r || !r->n_str) { err_ret->error = E_NOMEM; if (r) PyObject_FREE(r); n = NULL; goto done; } strcpy(r->n_str, tok->encoding); PyMem_FREE(tok->encoding); tok->encoding = NULL; r->n_nchildren = 1; r->n_child = n; n = r; } done: PyTokenizer_Free(tok); return n; }