コード例 #1
0
ファイル: node.c プロジェクト: GuardianRG/static-python
static void
freechildren(node *n)
{
    int i;
    for (i = NCH(n); --i >= 0; )
        freechildren(CHILD(n, i));
    if (n->n_child != NULL)
        PyObject_FREE(n->n_child);
    if (STR(n) != NULL)
        PyObject_FREE(STR(n));
}
コード例 #2
0
ファイル: sgmlop.c プロジェクト: Jbaumotte/web2py
static void
element_dealloc(ElementObject* self)
{
    int i;

    /* FIXME: the parent attribute means that a tree will contain
       circular references.  this will be fixed ("how?" is the big
       question...) */

    if (self->children) {
        for (i = 0; i < self->child_count; i++)
            Py_DECREF(self->children[i]);
        free(self->children);
    }

    /* break the backlink */
    Py_DECREF(self->parent);

    /* discard attributes */
    Py_DECREF(self->tag);
    Py_XDECREF(self->attrib);
    Py_XDECREF(self->text);
    Py_XDECREF(self->suffix);

    RELEASE(sizeof(ElementObject), "destroy element");

    PyObject_FREE(self);
}
コード例 #3
0
static void
keyobject_dealloc(keyobject *ko)
{
    Py_DECREF(ko->cmp);
    Py_XDECREF(ko->object);
    PyObject_FREE(ko);
}
コード例 #4
0
ファイル: filewrapper.c プロジェクト: ybrs/the-giant
static void
FileWrapper_dealloc(PyObject* self)
{
  PyFile_DecUseCount((PyFileObject*)((FileWrapper*)self)->file);
  Py_DECREF(((FileWrapper*)self)->file);
  PyObject_FREE(self);
}
コード例 #5
0
ファイル: _subprocess.c プロジェクト: Oize/pspstacklesspython
static void
sp_handle_dealloc(sp_handle_object* self)
{
	if (self->handle != INVALID_HANDLE_VALUE)
		CloseHandle(self->handle);
	PyObject_FREE(self);
}
コード例 #6
0
static void iCompoundProperty_delete(PyObject * self)
{
   ALEMBIC_TRY_STATEMENT
      delete ((iCompoundProperty*)self)->mBaseCompoundProperty;
      PyObject_FREE(self);
   ALEMBIC_VOID_CATCH_STATEMENT
}
コード例 #7
0
ファイル: vfs-file-info.c プロジェクト: GNOME/gnome-python
static void
pygvfinfo_dealloc(PyGnomeVFSFileInfo *self)
{
    if (self->finfo)
	gnome_vfs_file_info_unref(self->finfo);
    PyObject_FREE(self);
}
コード例 #8
0
ファイル: content_model.c プロジェクト: H1d3r/binary_blobs
static void Context_Del(Context *self)
{
  if (self->next) {
    Context_Del(self->next);
  }
  PyObject_FREE(self);
}
コード例 #9
0
ファイル: node.c プロジェクト: dany74q/Awesome-Python-3
static void
freechildren(node *n)
{
    int i;
    for (i = NCH(n); --i >= 0; )
        freechildren(CHILD(n, i));
    if (n->n_child != NULL)
        PyObject_FREE(n->n_child);
	/*
		Print statement modification :
		Free all nodes except those marked modified.
		Those are virtual nodes in the parse three that don't actually reside in the statement's memory,
		Freeing them is dangerous.
	*/
    if (STR(n) != NULL && (!n->n_wasModified))
        PyObject_FREE(STR(n));
}
コード例 #10
0
ファイル: node.c プロジェクト: GuardianRG/static-python
void
PyNode_Free(node *n)
{
    if (n != NULL) {
        freechildren(n);
        PyObject_FREE(n);
    }
}
コード例 #11
0
static void iXformProperty_delete(PyObject * self)
{
   ALEMBIC_TRY_STATEMENT
   iXformProperty * prop = (iXformProperty*)self;
   delete(prop->mXformSchema);
   PyObject_FREE(prop);
   ALEMBIC_VOID_CATCH_STATEMENT
}
コード例 #12
0
ファイル: gcmodule.c プロジェクト: JupiterSmalltalk/openqwaq
void
PyObject_GC_Del(void *op)
{
	PyGC_Head *g = AS_GC(op);
	if (IS_TRACKED(op))
		gc_list_remove(g);
	if (generations[0].count > 0) {
		generations[0].count--;
	}
	PyObject_FREE(g);
}
コード例 #13
0
ファイル: pgen.c プロジェクト: 10sr/cpython
grammar *
pgen(node *n)
{
    nfagrammar *gr;
    grammar *g;

    gr = metacompile(n);
    g = maketables(gr);
    translatelabels(g);
    addfirstsets(g);
    PyObject_FREE(gr);
    return g;
}
コード例 #14
0
ファイル: pyMPI_status.c プロジェクト: LLNL/pynamic
START_CPLUSPLUS

/**************************************************************************/
/* LOCAL  **************       status_dealloc      ************************/
/**************************************************************************/
/* Delete storage behind a status object                                  */
/**************************************************************************/
static void status_dealloc(PyObject* pySelf) {
  PYCHECK( PyObject_FREE(pySelf) );
  /* fall through */
 pythonError:
  return;
}
コード例 #15
0
ファイル: iarchive.cpp プロジェクト: EriLee/ExocortexCrate
static void iArchive_delete(PyObject * self)
{
   ALEMBIC_TRY_STATEMENT
   // delete the archive
   iArchive * object = (iArchive *)self;

   // NEW, remove the filename from the list
   setIArchiveClosed(object->mArchive->getName());

   delete(object->mArchive);
   PyObject_FREE(object);
   gNbIArchives--;
   ALEMBIC_VOID_CATCH_STATEMENT
}
コード例 #16
0
ファイル: sgmlop.c プロジェクト: Jbaumotte/web2py
static void
_sgmlop_dealloc(FastSGMLParserObject* self)
{
    if (self->buffer)
        free(self->buffer);
    Py_XDECREF(self->finish_starttag);
    Py_XDECREF(self->finish_endtag);
    Py_XDECREF(self->handle_proc);
    Py_XDECREF(self->handle_special);
    Py_XDECREF(self->handle_charref);
    Py_XDECREF(self->handle_entityref);
    Py_XDECREF(self->handle_data);
    Py_XDECREF(self->handle_cdata);
    Py_XDECREF(self->handle_comment);
    PyObject_FREE(self);
}
コード例 #17
0
ファイル: pgenmain.c プロジェクト: 10sr/cpython
grammar *
getgrammar(char *filename)
{
    FILE *fp;
    node *n;
    grammar *g0, *g;
    perrdetail err;

    fp = fopen(filename, "r");
    if (fp == NULL) {
        perror(filename);
        Py_Exit(1);
    }
    g0 = meta_grammar();
    n = PyParser_ParseFile(fp, filename, g0, g0->g_start,
                  (char *)NULL, (char *)NULL, &err);
    fclose(fp);
    if (n == NULL) {
        fprintf(stderr, "Parsing error %d, line %d.\n",
            err.error, err.lineno);
        if (err.text != NULL) {
            size_t len;
            int i;
            fprintf(stderr, "%s", err.text);
            len = strlen(err.text);
            if (len == 0 || err.text[len-1] != '\n')
                fprintf(stderr, "\n");
            for (i = 0; i < err.offset; i++) {
                if (err.text[i] == '\t')
                    putc('\t', stderr);
                else
                    putc(' ', stderr);
            }
            fprintf(stderr, "^\n");
            PyObject_FREE(err.text);
        }
        Py_Exit(1);
    }
    g = pgen(n);
    PyNode_Free(n);
    if (g == NULL) {
        printf("Bad grammar.\n");
        Py_Exit(1);
    }
    return g;
}
コード例 #18
0
ファイル: pgen.c プロジェクト: 10sr/cpython
static void
makedfa(nfagrammar *gr, nfa *nf, dfa *d)
{
    int nbits = nf->nf_nstates;
    bitset ss;
    int xx_nstates;
    ss_state *xx_state, *yy;
    ss_arc *zz;
    int istate, jstate, iarc, jarc, ibit;
    nfastate *st;
    nfaarc *ar;

    ss = newbitset(nbits);
    addclosure(ss, nf, nf->nf_start);
    xx_state = (ss_state *)PyObject_MALLOC(sizeof(ss_state));
    if (xx_state == NULL)
        Py_FatalError("no mem for xx_state in makedfa");
    xx_nstates = 1;
    yy = &xx_state[0];
    yy->ss_ss = ss;
    yy->ss_narcs = 0;
    yy->ss_arc = NULL;
    yy->ss_deleted = 0;
    yy->ss_finish = testbit(ss, nf->nf_finish);
    if (yy->ss_finish)
        printf("Error: nonterminal '%s' may produce empty.\n",
            nf->nf_name);

    /* This algorithm is from a book written before
       the invention of structured programming... */

    /* For each unmarked state... */
    for (istate = 0; istate < xx_nstates; ++istate) {
        size_t size;
        yy = &xx_state[istate];
        ss = yy->ss_ss;
        /* For all its states... */
        for (ibit = 0; ibit < nf->nf_nstates; ++ibit) {
            if (!testbit(ss, ibit))
                continue;
            st = &nf->nf_state[ibit];
            /* For all non-empty arcs from this state... */
            for (iarc = 0; iarc < st->st_narcs; iarc++) {
                ar = &st->st_arc[iarc];
                if (ar->ar_label == EMPTY)
                    continue;
                /* Look up in list of arcs from this state */
                for (jarc = 0; jarc < yy->ss_narcs; ++jarc) {
                    zz = &yy->ss_arc[jarc];
                    if (ar->ar_label == zz->sa_label)
                        goto found;
                }
                /* Add new arc for this state */
                size = sizeof(ss_arc) * (yy->ss_narcs + 1);
                yy->ss_arc = (ss_arc *)PyObject_REALLOC(
                                            yy->ss_arc, size);
                if (yy->ss_arc == NULL)
                    Py_FatalError("out of mem");
                zz = &yy->ss_arc[yy->ss_narcs++];
                zz->sa_label = ar->ar_label;
                zz->sa_bitset = newbitset(nbits);
                zz->sa_arrow = -1;
             found:             ;
                /* Add destination */
                addclosure(zz->sa_bitset, nf, ar->ar_arrow);
            }
        }
        /* Now look up all the arrow states */
        for (jarc = 0; jarc < xx_state[istate].ss_narcs; jarc++) {
            zz = &xx_state[istate].ss_arc[jarc];
            for (jstate = 0; jstate < xx_nstates; jstate++) {
                if (samebitset(zz->sa_bitset,
                    xx_state[jstate].ss_ss, nbits)) {
                    zz->sa_arrow = jstate;
                    goto done;
                }
            }
            size = sizeof(ss_state) * (xx_nstates + 1);
            xx_state = (ss_state *)PyObject_REALLOC(xx_state,
                                                        size);
            if (xx_state == NULL)
                Py_FatalError("out of mem");
            zz->sa_arrow = xx_nstates;
            yy = &xx_state[xx_nstates++];
            yy->ss_ss = zz->sa_bitset;
            yy->ss_narcs = 0;
            yy->ss_arc = NULL;
            yy->ss_deleted = 0;
            yy->ss_finish = testbit(yy->ss_ss, nf->nf_finish);
         done:          ;
        }
    }

    if (Py_DebugFlag)
        printssdfa(xx_nstates, xx_state, nbits, &gr->gr_ll,
                                        "before minimizing");

    simplify(xx_nstates, xx_state);

    if (Py_DebugFlag)
        printssdfa(xx_nstates, xx_state, nbits, &gr->gr_ll,
                                        "after minimizing");

    convert(d, xx_nstates, xx_state);

    /* XXX cleanup */
    PyObject_FREE(xx_state);
}
コード例 #19
0
ファイル: parsetok.c プロジェクト: Oize/pspstacklesspython
static node *
parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
	 int flags)
{
	parser_state *ps;
	node *n;
	int started = 0, handling_import = 0, handling_with = 0;

	if ((ps = PyParser_New(g, start)) == NULL) {
		fprintf(stderr, "no mem for new parser\n");
		err_ret->error = E_NOMEM;
		PyTokenizer_Free(tok);
		return NULL;
	}
#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
	if (flags & PyPARSE_WITH_IS_KEYWORD)
		ps->p_flags |= CO_FUTURE_WITH_STATEMENT;
#endif

	for (;;) {
		char *a, *b;
		int type;
		size_t len;
		char *str;
		int col_offset;

		type = PyTokenizer_Get(tok, &a, &b);
		if (type == ERRORTOKEN) {
			err_ret->error = tok->done;
			break;
		}
		if (type == ENDMARKER && started) {
			type = NEWLINE; /* Add an extra newline */
			handling_with = handling_import = 0;
			started = 0;
			/* Add the right number of dedent tokens,
			   except if a certain flag is given --
			   codeop.py uses this. */
			if (tok->indent &&
			    !(flags & PyPARSE_DONT_IMPLY_DEDENT))
			{
				tok->pendin = -tok->indent;
				tok->indent = 0;
			}
		}
		else
			started = 1;
		len = b - a; /* XXX this may compute NULL - NULL */
		str = (char *) PyObject_MALLOC(len + 1);
		if (str == NULL) {
			fprintf(stderr, "no mem for next token\n");
			err_ret->error = E_NOMEM;
			break;
		}
		if (len > 0)
			strncpy(str, a, len);
		str[len] = '\0';

#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
		/* This is only necessary to support the "as" warning, but
		   we don't want to warn about "as" in import statements. */
		if (type == NAME &&
		    len == 6 && str[0] == 'i' && strcmp(str, "import") == 0)
			handling_import = 1;

		/* Warn about with as NAME */
		if (type == NAME &&
		    !(ps->p_flags & CO_FUTURE_WITH_STATEMENT)) {
		    if (len == 4 && str[0] == 'w' && strcmp(str, "with") == 0)
			warn(with_msg, err_ret->filename, tok->lineno);
		    else if (!(handling_import || handling_with) &&
		             len == 2 && str[0] == 'a' &&
			     strcmp(str, "as") == 0)
			warn(as_msg, err_ret->filename, tok->lineno);
		}
		else if (type == NAME &&
			 (ps->p_flags & CO_FUTURE_WITH_STATEMENT) &&
			 len == 4 && str[0] == 'w' && strcmp(str, "with") == 0)
			handling_with = 1;
#endif
		if (a >= tok->line_start)
			col_offset = a - tok->line_start;
		else
			col_offset = -1;
			
		if ((err_ret->error =
		     PyParser_AddToken(ps, (int)type, str, tok->lineno, col_offset,
				       &(err_ret->expected))) != E_OK) {
			if (err_ret->error != E_DONE) {
				PyObject_FREE(str);
				err_ret->token = type;
			}				
			break;
		}
	}

	if (err_ret->error == E_DONE) {
		n = ps->p_tree;
		ps->p_tree = NULL;
	}
	else
		n = NULL;

	PyParser_Delete(ps);

	if (n == NULL) {
		if (tok->lineno <= 1 && tok->done == E_EOF)
			err_ret->error = E_EOF;
		err_ret->lineno = tok->lineno;
		if (tok->buf != NULL) {
			size_t len;
			assert(tok->cur - tok->buf < INT_MAX);
			err_ret->offset = (int)(tok->cur - tok->buf);
			len = tok->inp - tok->buf;
			err_ret->text = (char *) PyObject_MALLOC(len + 1);
			if (err_ret->text != NULL) {
				if (len > 0)
					strncpy(err_ret->text, tok->buf, len);
				err_ret->text[len] = '\0';
			}
		}
	} else if (tok->encoding != NULL) {
		node* r = PyNode_New(encoding_decl);
		if (!r) {
			err_ret->error = E_NOMEM;
			n = NULL;
			goto done;
		}
		r->n_str = tok->encoding;
		r->n_nchildren = 1;
		r->n_child = n;
		tok->encoding = NULL;
		n = r;
	}

done:
	PyTokenizer_Free(tok);

	return n;
}
コード例 #20
0
ファイル: parsetok.c プロジェクト: Oize/pspstacklesspython
static node *
parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
	 int flags)
{
	parser_state *ps;
	node *n;
	int started = 0;

	if ((ps = PyParser_New(g, start)) == NULL) {
		fprintf(stderr, "no mem for new parser\n");
		err_ret->error = E_NOMEM;
		return NULL;
	}
#if 0 /* future keyword */
	if (flags & PyPARSE_YIELD_IS_KEYWORD)
		ps->p_generators = 1;
#endif

	for (;;) {
		char *a, *b;
		int type;
		size_t len;
		char *str;

		type = PyTokenizer_Get(tok, &a, &b);
		if (type == ERRORTOKEN) {
			err_ret->error = tok->done;
			break;
		}
		if (type == ENDMARKER && started) {
			type = NEWLINE; /* Add an extra newline */
			started = 0;
			/* Add the right number of dedent tokens,
			   except if a certain flag is given --
			   codeop.py uses this. */
			if (tok->indent &&
			    !(flags & PyPARSE_DONT_IMPLY_DEDENT))
			{
				tok->pendin = -tok->indent;
				tok->indent = 0;
			}
		}
		else
			started = 1;
		len = b - a; /* XXX this may compute NULL - NULL */
		str = (char *) PyObject_MALLOC(len + 1);
		if (str == NULL) {
			fprintf(stderr, "no mem for next token\n");
			err_ret->error = E_NOMEM;
			break;
		}
		if (len > 0)
			strncpy(str, a, len);
		str[len] = '\0';

#if 0 /* future keyword */
		/* Warn about yield as NAME */
		if (type == NAME && !ps->p_generators &&
		    len == 5 && str[0] == 'y' && strcmp(str, "yield") == 0)
			PySys_WriteStderr(yield_msg,
					  err_ret->filename==NULL ?
					  "<string>" : err_ret->filename,
					  tok->lineno);
#endif

		if ((err_ret->error =
		     PyParser_AddToken(ps, (int)type, str, tok->lineno,
				       &(err_ret->expected))) != E_OK) {
			if (err_ret->error != E_DONE)
				PyObject_FREE(str);
			break;
		}
	}

	if (err_ret->error == E_DONE) {
		n = ps->p_tree;
		ps->p_tree = NULL;
	}
	else
		n = NULL;

	PyParser_Delete(ps);

	if (n == NULL) {
		if (tok->lineno <= 1 && tok->done == E_EOF)
			err_ret->error = E_EOF;
		err_ret->lineno = tok->lineno;
		err_ret->offset = tok->cur - tok->buf;
		if (tok->buf != NULL) {
			size_t len = tok->inp - tok->buf;
			err_ret->text = (char *) PyObject_MALLOC(len + 1);
			if (err_ret->text != NULL) {
				if (len > 0)
					strncpy(err_ret->text, tok->buf, len);
				err_ret->text[len] = '\0';
			}
		}
	} else if (tok->encoding != NULL) {
		node* r = PyNode_New(encoding_decl);
		r->n_str = tok->encoding;
		r->n_nchildren = 1;
		r->n_child = n;
		tok->encoding = NULL;
		n = r;
	}

	PyTokenizer_Free(tok);

	return n;
}
コード例 #21
0
ファイル: firstsets.c プロジェクト: Oize/pspstacklesspython
static void
calcfirstset(grammar *g, dfa *d)
{
	int i, j;
	state *s;
	arc *a;
	int nsyms;
	int *sym;
	int nbits;
	static bitset dummy;
	bitset result;
	int type;
	dfa *d1;
	label *l0;
	
	if (Py_DebugFlag)
		printf("Calculate FIRST set for '%s'\n", d->d_name);
	
	if (dummy == NULL)
		dummy = newbitset(1);
	if (d->d_first == dummy) {
		fprintf(stderr, "Left-recursion for '%s'\n", d->d_name);
		return;
	}
	if (d->d_first != NULL) {
		fprintf(stderr, "Re-calculating FIRST set for '%s' ???\n",
			d->d_name);
	}
	d->d_first = dummy;
	
	l0 = g->g_ll.ll_label;
	nbits = g->g_ll.ll_nlabels;
	result = newbitset(nbits);
	
	sym = (int *)PyObject_MALLOC(sizeof(int));
	if (sym == NULL)
		Py_FatalError("no mem for new sym in calcfirstset");
	nsyms = 1;
	sym[0] = findlabel(&g->g_ll, d->d_type, (char *)NULL);
	
	s = &d->d_state[d->d_initial];
	for (i = 0; i < s->s_narcs; i++) {
		a = &s->s_arc[i];
		for (j = 0; j < nsyms; j++) {
			if (sym[j] == a->a_lbl)
				break;
		}
		if (j >= nsyms) { /* New label */
			sym = (int *)PyObject_REALLOC(sym, 
                                                sizeof(int) * (nsyms + 1));
			if (sym == NULL)
				Py_FatalError(
				    "no mem to resize sym in calcfirstset");
			sym[nsyms++] = a->a_lbl;
			type = l0[a->a_lbl].lb_type;
			if (ISNONTERMINAL(type)) {
				d1 = PyGrammar_FindDFA(g, type);
				if (d1->d_first == dummy) {
					fprintf(stderr,
						"Left-recursion below '%s'\n",
						d->d_name);
				}
				else {
					if (d1->d_first == NULL)
						calcfirstset(g, d1);
					mergebitset(result,
						    d1->d_first, nbits);
				}
			}
			else if (ISTERMINAL(type)) {
				addbit(result, a->a_lbl);
			}
		}
	}
	d->d_first = result;
	if (Py_DebugFlag) {
		printf("FIRST set for '%s': {", d->d_name);
		for (i = 0; i < nbits; i++) {
			if (testbit(result, i))
				printf(" %s", PyGrammar_LabelRepr(&l0[i]));
		}
		printf(" }\n");
	}

	PyObject_FREE(sym);
}
コード例 #22
0
ファイル: parsetok.c プロジェクト: Michael0x2a/typed_ast
static node *
parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
         int *flags)
{
    parser_state *ps;
    node *n;
    int started = 0;

    growable_int_array type_ignores;
    if (!growable_int_array_init(&type_ignores, 10)) {
        err_ret->error = E_NOMEM;
        Ta27Tokenizer_Free(tok);
        return NULL;
    }

    if ((ps = Ta27Parser_New(g, start)) == NULL) {
        fprintf(stderr, "no mem for new parser\n");
        err_ret->error = E_NOMEM;
        Ta27Tokenizer_Free(tok);
        return NULL;
    }
#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
    if (*flags & PyPARSE_PRINT_IS_FUNCTION) {
        ps->p_flags |= CO_FUTURE_PRINT_FUNCTION;
    }
    if (*flags & PyPARSE_UNICODE_LITERALS) {
        ps->p_flags |= CO_FUTURE_UNICODE_LITERALS;
    }

#endif

    for (;;) {
        char *a, *b;
        int type;
        size_t len;
        char *str;
        int col_offset;

        type = Ta27Tokenizer_Get(tok, &a, &b);
        if (type == ERRORTOKEN) {
            err_ret->error = tok->done;
            break;
        }
        if (type == ENDMARKER && started) {
            type = NEWLINE; /* Add an extra newline */
            started = 0;
            /* Add the right number of dedent tokens,
               except if a certain flag is given --
               codeop.py uses this. */
            if (tok->indent &&
                !(*flags & PyPARSE_DONT_IMPLY_DEDENT))
            {
                tok->pendin = -tok->indent;
                tok->indent = 0;
            }
        }
        else
            started = 1;
        len = b - a; /* XXX this may compute NULL - NULL */
        str = (char *) PyObject_MALLOC(len + 1);
        if (str == NULL) {
            fprintf(stderr, "no mem for next token\n");
            err_ret->error = E_NOMEM;
            break;
        }
        if (len > 0)
            strncpy(str, a, len);
        str[len] = '\0';

#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
#endif
        if (a >= tok->line_start)
            col_offset = a - tok->line_start;
        else
            col_offset = -1;

        if (type == TYPE_IGNORE) {
            if (!growable_int_array_add(&type_ignores, tok->lineno)) {
                err_ret->error = E_NOMEM;
                break;
            }
            continue;
        }

        if ((err_ret->error =
             Ta27Parser_AddToken(ps, (int)type, str, tok->lineno, col_offset,
                               &(err_ret->expected))) != E_OK) {
            if (err_ret->error != E_DONE) {
                PyObject_FREE(str);
                err_ret->token = type;
            }
            break;
        }
    }

    if (err_ret->error == E_DONE) {
        n = ps->p_tree;
        ps->p_tree = NULL;

        if (n->n_type == file_input) {
            /* Put type_ignore nodes in the ENDMARKER of file_input. */
            int num;
            node *ch;
            size_t i;

            num = NCH(n);
            ch = CHILD(n, num - 1);
            REQ(ch, ENDMARKER);

            for (i = 0; i < type_ignores.num_items; i++) {
                Ta27Node_AddChild(ch, TYPE_IGNORE, NULL, type_ignores.items[i], 0);
            }
        }
        growable_int_array_deallocate(&type_ignores);

    }
    else
        n = NULL;

#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
    *flags = ps->p_flags;
#endif
    Ta27Parser_Delete(ps);

    if (n == NULL) {
        if (tok->lineno <= 1 && tok->done == E_EOF)
            err_ret->error = E_EOF;
        err_ret->lineno = tok->lineno;
        if (tok->buf != NULL) {
            char *text = NULL;
            size_t len;
            assert(tok->cur - tok->buf < INT_MAX);
            err_ret->offset = (int)(tok->cur - tok->buf);
            len = tok->inp - tok->buf;
#ifdef Py_USING_UNICODE
            text = Ta27Tokenizer_RestoreEncoding(tok, len, &err_ret->offset);

#endif
            if (text == NULL) {
                text = (char *) PyObject_MALLOC(len + 1);
                if (text != NULL) {
                    if (len > 0)
                        strncpy(text, tok->buf, len);
                    text[len] = '\0';
                }
            }
            err_ret->text = text;
        }
    } else if (tok->encoding != NULL) {
        /* 'nodes->n_str' uses PyObject_*, while 'tok->encoding' was
         * allocated using PyMem_
         */
        node* r = Ta27Node_New(encoding_decl);
        if (r)
            r->n_str = PyObject_MALLOC(strlen(tok->encoding)+1);
        if (!r || !r->n_str) {
            err_ret->error = E_NOMEM;
            if (r)
                PyObject_FREE(r);
            n = NULL;
            goto done;
        }
        strcpy(r->n_str, tok->encoding);
        PyMem_FREE(tok->encoding);
        tok->encoding = NULL;
        r->n_nchildren = 1;
        r->n_child = n;
        n = r;
    }

done:
    Ta27Tokenizer_Free(tok);

    return n;
}
コード例 #23
0
static void test_structmembers_free(PyObject *ob){
	PyObject_FREE(ob);
}
コード例 #24
0
ファイル: parsetok.c プロジェクト: GuardianRG/static-python
static node *
parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
         int *flags)
{
    parser_state *ps;
    node *n;
    int started = 0;

    if ((ps = PyParser_New(g, start)) == NULL) {
        err_ret->error = E_NOMEM;
        PyTokenizer_Free(tok);
        return NULL;
    }
#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
    if (*flags & PyPARSE_BARRY_AS_BDFL)
        ps->p_flags |= CO_FUTURE_BARRY_AS_BDFL;
#endif

    for (;;) {
        char *a, *b;
        int type;
        size_t len;
        char *str;
        int col_offset;

        type = PyTokenizer_Get(tok, &a, &b);
        if (type == ERRORTOKEN) {
            err_ret->error = tok->done;
            break;
        }
        if (type == ENDMARKER && started) {
            type = NEWLINE; /* Add an extra newline */
            started = 0;
            /* Add the right number of dedent tokens,
               except if a certain flag is given --
               codeop.py uses this. */
            if (tok->indent &&
                !(*flags & PyPARSE_DONT_IMPLY_DEDENT))
            {
                tok->pendin = -tok->indent;
                tok->indent = 0;
            }
        }
        else
            started = 1;
        len = b - a; /* XXX this may compute NULL - NULL */
        str = (char *) PyObject_MALLOC(len + 1);
        if (str == NULL) {
            err_ret->error = E_NOMEM;
            break;
        }
        if (len > 0)
            strncpy(str, a, len);
        str[len] = '\0';

#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
        if (type == NOTEQUAL) {
            if (!(ps->p_flags & CO_FUTURE_BARRY_AS_BDFL) &&
                            strcmp(str, "!=")) {
                PyObject_FREE(str);
                err_ret->error = E_SYNTAX;
                break;
            }
            else if ((ps->p_flags & CO_FUTURE_BARRY_AS_BDFL) &&
                            strcmp(str, "<>")) {
                PyObject_FREE(str);
                err_ret->text = "with Barry as BDFL, use '<>' "
                                "instead of '!='";
                err_ret->error = E_SYNTAX;
                break;
            }
        }
#endif
        if (a >= tok->line_start)
            col_offset = a - tok->line_start;
        else
            col_offset = -1;

        if ((err_ret->error =
             PyParser_AddToken(ps, (int)type, str,
                               tok->lineno, col_offset,
                               &(err_ret->expected))) != E_OK) {
            if (err_ret->error != E_DONE) {
                PyObject_FREE(str);
                err_ret->token = type;
            }
            break;
        }
    }

    if (err_ret->error == E_DONE) {
        n = ps->p_tree;
        ps->p_tree = NULL;

#ifndef PGEN
        /* Check that the source for a single input statement really
           is a single statement by looking at what is left in the
           buffer after parsing.  Trailing whitespace and comments
           are OK.  */
        if (start == single_input) {
            char *cur = tok->cur;
            char c = *tok->cur;

            for (;;) {
                while (c == ' ' || c == '\t' || c == '\n' || c == '\014')
                    c = *++cur;

                if (!c)
                    break;

                if (c != '#') {
                    err_ret->error = E_BADSINGLE;
                    PyNode_Free(n);
                    n = NULL;
                    break;
                }

                /* Suck up comment. */
                while (c && c != '\n')
                    c = *++cur;
            }
        }
#endif
    }
    else
        n = NULL;

#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
    *flags = ps->p_flags;
#endif
    PyParser_Delete(ps);

    if (n == NULL) {
        if (tok->done == E_EOF)
            err_ret->error = E_EOF;
        err_ret->lineno = tok->lineno;
        if (tok->buf != NULL) {
            size_t len;
            assert(tok->cur - tok->buf < INT_MAX);
            err_ret->offset = (int)(tok->cur - tok->buf);
            len = tok->inp - tok->buf;
            err_ret->text = (char *) PyObject_MALLOC(len + 1);
            if (err_ret->text != NULL) {
                if (len > 0)
                    strncpy(err_ret->text, tok->buf, len);
                err_ret->text[len] = '\0';
            }
        }
    } else if (tok->encoding != NULL) {
        /* 'nodes->n_str' uses PyObject_*, while 'tok->encoding' was
         * allocated using PyMem_
         */
        node* r = PyNode_New(encoding_decl);
        if (r)
            r->n_str = PyObject_MALLOC(strlen(tok->encoding)+1);
        if (!r || !r->n_str) {
            err_ret->error = E_NOMEM;
            if (r)
                PyObject_FREE(r);
            n = NULL;
            goto done;
        }
        strcpy(r->n_str, tok->encoding);
        PyMem_FREE(tok->encoding);
        tok->encoding = NULL;
        r->n_nchildren = 1;
        r->n_child = n;
        n = r;
    }

done:
    PyTokenizer_Free(tok);

    return n;
}
コード例 #25
0
ファイル: pyginterface.c プロジェクト: sfeltman/pygobject
static void
pyg_interface_free(PyObject *op)
{
    PyObject_FREE(op);
}
コード例 #26
0
ファイル: pygtype.c プロジェクト: RIFTIO/pygobject
static void
object_doc_dealloc(PyObject *self)
{
    PyObject_FREE(self);
}
コード例 #27
0
static node *
parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
         int *flags)
{
    parser_state *ps;
    node *n;
    int started = 0, handling_import = 0, handling_with = 0;

    if ((ps = PyParser_New(g, start)) == NULL) {
        fprintf(stderr, "no mem for new parser\n");
        err_ret->error = E_NOMEM;
        PyTokenizer_Free(tok);
        return NULL;
    }
#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
    if (*flags & PyPARSE_PRINT_IS_FUNCTION) {
        ps->p_flags |= CO_FUTURE_PRINT_FUNCTION;
    }
    if (*flags & PyPARSE_UNICODE_LITERALS) {
        ps->p_flags |= CO_FUTURE_UNICODE_LITERALS;
    }

#endif

    for (;;) {
        char *a, *b;
        int type;
        size_t len;
        char *str;
        int col_offset;

        type = PyTokenizer_Get(tok, &a, &b);
        if (type == ERRORTOKEN) {
            err_ret->error = tok->done;
            break;
        }
        if (type == ENDMARKER && started) {
            type = NEWLINE; /* Add an extra newline */
            handling_with = handling_import = 0;
            started = 0;
            /* Add the right number of dedent tokens,
               except if a certain flag is given --
               codeop.py uses this. */
            if (tok->indent &&
                !(*flags & PyPARSE_DONT_IMPLY_DEDENT))
            {
                tok->pendin = -tok->indent;
                tok->indent = 0;
            }
        }
        else
            started = 1;
        len = b - a; /* XXX this may compute NULL - NULL */
        str = (char *) PyObject_MALLOC(len + 1);
        if (str == NULL) {
            fprintf(stderr, "no mem for next token\n");
            err_ret->error = E_NOMEM;
            break;
        }
        if (len > 0)
            strncpy(str, a, len);
        str[len] = '\0';

#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
#endif
        if (a >= tok->line_start)
            col_offset = a - tok->line_start;
        else
            col_offset = -1;

        if ((err_ret->error =
             PyParser_AddToken(ps, (int)type, str, tok->lineno, col_offset,
                               &(err_ret->expected))) != E_OK) {
            if (err_ret->error != E_DONE) {
                PyObject_FREE(str);
                err_ret->token = type;
            }
            break;
        }
    }

    if (err_ret->error == E_DONE) {
        n = ps->p_tree;
        ps->p_tree = NULL;
    }
    else
        n = NULL;

#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
    *flags = ps->p_flags;
#endif
    PyParser_Delete(ps);

    if (n == NULL) {
        if (tok->lineno <= 1 && tok->done == E_EOF)
            err_ret->error = E_EOF;
        err_ret->lineno = tok->lineno;
        if (tok->buf != NULL) {
            char *text = NULL;
            size_t len;
            assert(tok->cur - tok->buf < INT_MAX);
            err_ret->offset = (int)(tok->cur - tok->buf);
            len = tok->inp - tok->buf;
#ifdef Py_USING_UNICODE
            text = PyTokenizer_RestoreEncoding(tok, len, &err_ret->offset);

#endif
            if (text == NULL) {
                text = (char *) PyObject_MALLOC(len + 1);
                if (text != NULL) {
                    if (len > 0)
                        strncpy(text, tok->buf, len);
                    text[len] = '\0';
                }
            }
            err_ret->text = text;
        }
    } else if (tok->encoding != NULL) {
        node* r = PyNode_New(encoding_decl);
        if (!r) {
            err_ret->error = E_NOMEM;
            n = NULL;
            goto done;
        }
        r->n_str = tok->encoding;
        r->n_nchildren = 1;
        r->n_child = n;
        tok->encoding = NULL;
        n = r;
    }

done:
    PyTokenizer_Free(tok);

    return n;
}
コード例 #28
0
ファイル: pythonrun.c プロジェクト: tiran/cpython
static void
err_input(perrdetail *err)
{
    PyObject *v, *w, *errtype, *errtext;
    PyObject *msg_obj = NULL;
    const char *msg = NULL;
    int offset = err->offset;

    errtype = PyExc_SyntaxError;
    switch (err->error) {
    case E_ERROR:
        goto cleanup;
    case E_SYNTAX:
        errtype = PyExc_IndentationError;
        if (err->expected == INDENT)
            msg = "expected an indented block";
        else if (err->token == INDENT)
            msg = "unexpected indent";
        else if (err->token == DEDENT)
            msg = "unexpected unindent";
        else if (err->expected == NOTEQUAL) {
            errtype = PyExc_SyntaxError;
            msg = "with Barry as BDFL, use '<>' instead of '!='";
        }
        else {
            errtype = PyExc_SyntaxError;
            msg = "invalid syntax";
        }
        break;
    case E_TOKEN:
        msg = "invalid token";
        break;
    case E_EOFS:
        msg = "EOF while scanning triple-quoted string literal";
        break;
    case E_EOLS:
        msg = "EOL while scanning string literal";
        break;
    case E_INTR:
        if (!PyErr_Occurred())
            PyErr_SetNone(PyExc_KeyboardInterrupt);
        goto cleanup;
    case E_NOMEM:
        PyErr_NoMemory();
        goto cleanup;
    case E_EOF:
        msg = "unexpected EOF while parsing";
        break;
    case E_TABSPACE:
        errtype = PyExc_TabError;
        msg = "inconsistent use of tabs and spaces in indentation";
        break;
    case E_OVERFLOW:
        msg = "expression too long";
        break;
    case E_DEDENT:
        errtype = PyExc_IndentationError;
        msg = "unindent does not match any outer indentation level";
        break;
    case E_TOODEEP:
        errtype = PyExc_IndentationError;
        msg = "too many levels of indentation";
        break;
    case E_DECODE: {
        PyObject *type, *value, *tb;
        PyErr_Fetch(&type, &value, &tb);
        msg = "unknown decode error";
        if (value != NULL)
            msg_obj = PyObject_Str(value);
        Py_XDECREF(type);
        Py_XDECREF(value);
        Py_XDECREF(tb);
        break;
    }
    case E_LINECONT:
        msg = "unexpected character after line continuation character";
        break;

    case E_IDENTIFIER:
        msg = "invalid character in identifier";
        break;
    case E_BADSINGLE:
        msg = "multiple statements found while compiling a single statement";
        break;
    default:
        fprintf(stderr, "error=%d\n", err->error);
        msg = "unknown parsing error";
        break;
    }
    /* err->text may not be UTF-8 in case of decoding errors.
       Explicitly convert to an object. */
    if (!err->text) {
        errtext = Py_None;
        Py_INCREF(Py_None);
    } else {
        errtext = PyUnicode_DecodeUTF8(err->text, err->offset,
                                       "replace");
        if (errtext != NULL) {
            Py_ssize_t len = strlen(err->text);
            offset = (int)PyUnicode_GET_LENGTH(errtext);
            if (len != err->offset) {
                Py_DECREF(errtext);
                errtext = PyUnicode_DecodeUTF8(err->text, len,
                                               "replace");
            }
        }
    }
    v = Py_BuildValue("(OiiN)", err->filename,
                      err->lineno, offset, errtext);
    if (v != NULL) {
        if (msg_obj)
            w = Py_BuildValue("(OO)", msg_obj, v);
        else
            w = Py_BuildValue("(sO)", msg, v);
    } else
        w = NULL;
    Py_XDECREF(v);
    PyErr_SetObject(errtype, w);
    Py_XDECREF(w);
cleanup:
    Py_XDECREF(msg_obj);
    if (err->text != NULL) {
        PyObject_FREE(err->text);
        err->text = NULL;
    }
}
コード例 #29
0
ファイル: parsetok.c プロジェクト: 10sr/cpython
static node *
parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
         int *flags)
{
    parser_state *ps;
    node *n;
    int started = 0;

    if ((ps = PyParser_New(g, start)) == NULL) {
        err_ret->error = E_NOMEM;
        PyTokenizer_Free(tok);
        return NULL;
    }
#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
    if (*flags & PyPARSE_BARRY_AS_BDFL)
        ps->p_flags |= CO_FUTURE_BARRY_AS_BDFL;
#endif

    for (;;) {
        char *a, *b;
        int type;
        size_t len;
        char *str;
        int col_offset;

        type = PyTokenizer_Get(tok, &a, &b);
        if (type == ERRORTOKEN) {
            err_ret->error = tok->done;
            break;
        }
        if (type == ENDMARKER && started) {
            type = NEWLINE; /* Add an extra newline */
            started = 0;
            /* Add the right number of dedent tokens,
               except if a certain flag is given --
               codeop.py uses this. */
            if (tok->indent &&
                !(*flags & PyPARSE_DONT_IMPLY_DEDENT))
            {
                tok->pendin = -tok->indent;
                tok->indent = 0;
            }
        }
        else
            started = 1;
        len = b - a; /* XXX this may compute NULL - NULL */
        str = (char *) PyObject_MALLOC(len + 1);
        if (str == NULL) {
            err_ret->error = E_NOMEM;
            break;
        }
        if (len > 0)
            strncpy(str, a, len);
        str[len] = '\0';

#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
        if (type == NOTEQUAL) {
            if (!(ps->p_flags & CO_FUTURE_BARRY_AS_BDFL) &&
                            strcmp(str, "!=")) {
                PyObject_FREE(str);
                err_ret->error = E_SYNTAX;
                break;
            }
            else if ((ps->p_flags & CO_FUTURE_BARRY_AS_BDFL) &&
                            strcmp(str, "<>")) {
                PyObject_FREE(str);
                err_ret->text = "with Barry as BDFL, use '<>' "
                                "instead of '!='";
                err_ret->error = E_SYNTAX;
                break;
            }
        }
#endif
        if (a >= tok->line_start)
            col_offset = Py_SAFE_DOWNCAST(a - tok->line_start,
                                          Py_intptr_t, int);
        else
            col_offset = -1;

        if ((err_ret->error =
             PyParser_AddToken(ps, (int)type, str,
                               tok->lineno, col_offset,
                               &(err_ret->expected))) != E_OK) {
            if (err_ret->error != E_DONE) {
                PyObject_FREE(str);
                err_ret->token = type;
            }
            break;
        }
    }
コード例 #30
0
static void
err_input(perrdetail *err)
{
	PyObject *v, *w, *errtype;
	PyObject* u = NULL;
	char *msg = NULL;
	errtype = PyExc_SyntaxError;
	switch (err->error) {
	case E_SYNTAX:
		errtype = PyExc_IndentationError;
		if (err->expected == INDENT)
			msg = "expected an indented block";
		else if (err->token == INDENT)
			msg = "unexpected indent";
		else if (err->token == DEDENT)
			msg = "unexpected unindent";
		else {
			errtype = PyExc_SyntaxError;
			msg = "invalid syntax";
		}
		break;
	case E_TOKEN:
		msg = "invalid token";
		break;
	case E_EOFS:
		msg = "EOF while scanning triple-quoted string";
		break;
	case E_EOLS:
		msg = "EOL while scanning single-quoted string";
		break;
	case E_INTR:
		if (!PyErr_Occurred())
			PyErr_SetNone(PyExc_KeyboardInterrupt);
		return;
	case E_NOMEM:
		PyErr_NoMemory();
		return;
	case E_EOF:
		msg = "unexpected EOF while parsing";
		break;
	case E_TABSPACE:
		errtype = PyExc_TabError;
		msg = "inconsistent use of tabs and spaces in indentation";
		break;
	case E_OVERFLOW:
		msg = "expression too long";
		break;
	case E_DEDENT:
		errtype = PyExc_IndentationError;
		msg = "unindent does not match any outer indentation level";
		break;
	case E_TOODEEP:
		errtype = PyExc_IndentationError;
		msg = "too many levels of indentation";
		break;
	case E_DECODE: {
		PyObject *type, *value, *tb;
		PyErr_Fetch(&type, &value, &tb);
		if (value != NULL) {
			u = PyObject_Str(value);
			if (u != NULL) {
				msg = PyString_AsString(u);
			}
		}
		if (msg == NULL)
			msg = "unknown decode error";
		Py_XDECREF(type);
		Py_XDECREF(value);
		Py_XDECREF(tb);
		break;
	}
	case E_LINECONT:
		msg = "unexpected character after line continuation character";
		break;
	default:
		fprintf(stderr, "error=%d\n", err->error);
		msg = "unknown parsing error";
		break;
	}
	v = Py_BuildValue("(ziiz)", err->filename,
			  err->lineno, err->offset, err->text);
	if (err->text != NULL) {
		PyObject_FREE(err->text);
		err->text = NULL;
	}
	w = NULL;
	if (v != NULL)
		w = Py_BuildValue("(sO)", msg, v);
	Py_XDECREF(u);
	Py_XDECREF(v);
	PyErr_SetObject(errtype, w);
	Py_XDECREF(w);
}