Exemplo n.º 1
0
int make_joined_schema (void)
{
	int nof, j;
	schema_t *sch;
	struct sql_source *i;
	nof = 0;
	i = (struct sql_source *)llist_next(query.source);
	while (i != NULL)
	{
		nof += i->stream->schema->nof;
		i = (struct sql_source *)llist_next(i->link);		
	}
	sch = (schema_t *)xmalloc(sizeof(schema_t) + nof * sizeof(field_t));
	sch->nof = nof;
	i = (struct sql_source *)llist_next(query.source);
	nof = 0;
	while (i != NULL)
	{
		for (j = 0; j < i->stream->schema->nof; j++)
		{
			joined_field_name(sch->field[nof].name, i, j);
			sch->field[nof].type = i->stream->schema->field[j].type;
			nof++;
		}
		i = (struct sql_source *)llist_next(i->link);
	}
	fix_schema(sch);
	query.joined_schema = sch;
	return 0;
}
Exemplo n.º 2
0
int sql_resolve_aggre (void)
{
	struct sql_aggre *aggre;
	aggre = (struct sql_aggre *)llist_next(query.aggre);
	while(aggre != NULL)
	{
		struct sql_source *src;
		int field, ret;
		aggre_t *ag;
		if (aggre->nsource == NULL && aggre->nfield == NULL)
		{
			aggre->source = NULL;
			aggre->field = -1;
			aggre->aggre = aggre_load(aggre->naggre, TYPE_VOID);
		}
		else
		{
			ret = sql_lookup_field(aggre->nsource, aggre->nfield, &src, &field);
			aggre->source = src;
			aggre->field = field;
			ag = NULL;
			if (ret == 0)
			{
				ag = aggre_load(aggre->naggre, src->stream->schema->field[field].type);
			}
			aggre->aggre = ag;
			if (aggre->aggre == NULL) sql_ok = 0;
		}
		aggre = (struct sql_aggre *)llist_next(aggre->link);
	}
	return 0;
}
Exemplo n.º 3
0
/*
 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
 * context with local IRQs disabled.
 */
void irq_work_run(void)
{
	struct irq_work *work;
	struct llist_head *this_list;
	struct llist_node *llnode;

	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty(this_list))
		return;

	BUG_ON(!in_irq());
	BUG_ON(!irqs_disabled());

	llnode = llist_del_all(this_list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);

		llnode = llist_next(llnode);

		/*
		 * Clear the PENDING bit, after this point the @work
		 * can be re-used.
		 */
		work->flags = IRQ_WORK_BUSY;
		work->func(work);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
	}
}
Exemplo n.º 4
0
static void irq_work_run_list(struct llist_head *list)
{
	unsigned long flags;
	struct irq_work *work;
	struct llist_node *llnode;

	BUG_ON(!irqs_disabled());

	if (llist_empty(list))
		return;

	llnode = llist_del_all(list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);

		llnode = llist_next(llnode);

		/*
		 * Clear the PENDING bit, after this point the @work
		 * can be re-used.
		 * Make it immediately visible so that other CPUs trying
		 * to claim that work don't rely on us to handle their data
		 * while we are in the middle of the func.
		 */
		flags = work->flags & ~IRQ_WORK_PENDING;
		xchg(&work->flags, flags);

		work->func(work);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
	}
}
Exemplo n.º 5
0
void print_data ( text_widget * widget ) {
    u16 count = 0;
    u16 col = 0;
    block * b;
    LList * ll_b;

    for (ll_b = widget->blocks; ll_b; ll_b = llist_next(ll_b)) {
        b = (block *) llist_data(ll_b);

        printf("Block %x len %d size %d gap %d contains: \n[", 
               b, b->len, b->data_size, b->b_gap);
        for (count = 0, col = 1; count <  b->data_size; count++, col++) {
            if (col == 80)
                printf("\n");
            col = col % 80;
            
            if ((count >= b->b_gap) && 
                (count < b->b_gap + b->data_size - b->len)) {
                printf("*");
            } else if (b->data[count] == '\n') {
                printf("^");
            } else if (b->data[count] == '\t') {
                printf("_");
            } else {
                printf("%c", b->data[count]);
            }
        }
        printf("]\n\n");
    }
}
Exemplo n.º 6
0
int sql_cleanup_query (void)
{
	void *lnode;
	struct sql_map *map;
	struct sql_source *src;
	struct sql_filter *filter;
	struct sql_group_filter *gfp;
	map = (struct sql_map *)llist_next(query.map);
	while (map != NULL)
	{
		sql_free_expr(map->expr);
		if (map->alias != NULL) free(map->alias);
		lnode = map;
		map = (struct sql_map *)llist_next(map->link);
		free(lnode);
	}
	src = (struct sql_source *)llist_next(query.source);
	while (src != NULL)
	{
		if (src->alias != NULL) free(src->alias);
		filter = (struct sql_filter *)llist_next(src->filter);
		while (filter != NULL)
		{
			if (!sql_ok)
			{
				if (filter->type->free != NULL)
					filter->type->free(filter->data);
				free(filter->data);
			}
			lnode = filter;
			filter = (struct sql_filter *)llist_next(filter->link);
			free(lnode);
		}
		lnode = src;
		src = (struct sql_source *)llist_next(src->link);
		free(lnode);
	}
	llist_free_lazy(&query.group);
	llist_free_lazy(&query.aggre);
	gfp = (struct sql_group_filter *)llist_next(query.group_filter);
	while (gfp != NULL)
	{
		if (gfp->type->free != NULL)
			gfp->type->free(gfp->data);
		free(gfp->data);
		lnode = gfp;
		gfp = (struct sql_group_filter *)llist_next(gfp->link);
		free(lnode);
	}
	if (query.joined_schema != NULL) free(query.joined_schema);
	if (query.grouped_schema != NULL) free(query.grouped_schema);
	if (query.mapped_schema != NULL) free(query.mapped_schema);
	return 0;
}
static void delayed_fput(struct work_struct *unused)
{
	struct llist_node *node = llist_del_all(&delayed_fput_list);
	struct llist_node *next;

	for (; node; node = next) {
		next = llist_next(node);
		__fput(llist_entry(node, struct file, f_u.fu_llist));
	}
}
Exemplo n.º 8
0
int sql_lookup_field (char *nsrc, char *nfld, struct sql_source **src, int *fld)
{
	if (nsrc == NULL)
	{
		struct sql_source *i;
		if (nfld == NULL)
		{
			*src = NULL;
			*fld = 0;
			return 0;
		}
		i = (struct sql_source *)llist_next(query.source);
		while (i != NULL)
		{
			*src = lookup_source_field(i, nfld, fld);
			if (*src != NULL) return 0;
			i = (struct sql_source *)llist_next(i->link);
		}
		*src = NULL;
		return -1;	
	}
	else
	{
		struct sql_source *i = (struct sql_source *)llist_next(query.source);
		while (i != NULL)
		{
			if (strcmp(i->stream->name, nsrc) == 0) break;
			if (strcmp(i->alias, nsrc) == 0) break;
			i = (struct sql_source *)llist_next(i->link);
		}
		if (i == NULL)
		{
			sql_ok = 0;
			*src = NULL;
			return -1;
		}
		*src = lookup_source_field(i, nsrc, fld);
		return *src != NULL ? 0 : -1;
	}
}
Exemplo n.º 9
0
void print_block ( text_widget * widget,
                   LList * ll_b ) {
    block * b;
    paragraph * p;
    atom * a;
    LList * ll_p, * ll_a;
    b = (block *) llist_data(ll_b);
    if (widget->current == ll_b)
        printf ("*");
    if (widget->fvb == ll_b)
        printf ("@");
    
    printf("Block %x len %d\n", b, b->len);
    for (ll_p = b->paragraphs; ll_p; ll_p = llist_next(ll_p)) {
        p = (paragraph *) llist_data(ll_p);
        if (b->cursor_paragraph == ll_p)
            printf ("*");
        printf("\tParagraph %x len %d height %d flags %x\n", 
               p, p->len, p->height, p->flags);
        
        for (ll_a = p->atoms; ll_a; ll_a = llist_next(ll_a)) {
            a = (atom *) llist_data(ll_a);
            if (b->cursor_atom == ll_a)
                printf ("*");
            
            printf("\t\tAtom %x len %d width %d height %d", 
                   a, a->len, a->width, a->height);
            if (a->flags & ATOM_FLAG_LEFT)
                printf(" L");
            if (a->flags & ATOM_FLAG_RIGHT)
                printf(" R");
            if (a->flags & ATOM_FLAG_DRAW)
                printf(" D");
            if (a->flags & ATOM_FLAG_SELECTED)
                printf(" S");
            printf("\n");
        }
    }
}
Exemplo n.º 10
0
int sql_resolve_group_filter (void)
{
	int i;
	struct sql_group_filter *tmp;
	tmp = (struct sql_group_filter *)llist_next(query.group_filter);
	while (tmp != NULL)
	{
		aggre_field_name (sql_buf, tmp->aggre == NULL ? NULL : tmp->aggre->name, tmp->source, tmp->field);		
		i = schema_field(query.grouped_schema, sql_buf);
		tmp->field = i;	
	}
	return 0;
}
Exemplo n.º 11
0
int sql_resolve_group (void)
{
	struct sql_group *grp;
	struct sql_aggre *agg;
	char buf[MAX_ID_LEN];
	grp = (struct sql_group *)llist_next(query.group);
	while (grp != NULL)
	{
		int field;
		joined_field_name(buf, grp->source, grp->field);
		field = schema_field(query.joined_schema, buf);
		assert(field >= 0);
		grp->source = NULL;
		grp->field = field;
		grp = (struct sql_group *)llist_next(grp->link);
	}
	agg = (struct sql_aggre *)llist_next(query.aggre);
	while (agg != NULL)
	{
		int field;
		if (agg->source == NULL)
		{
			field = 0;
		}
		else
		{
			joined_field_name(buf, agg->source, agg->field);
			field = schema_field(query.joined_schema, buf);
			assert(field >= 0);
		}
		agg->source = NULL;
		agg->field = field;
		agg = (struct sql_aggre *)llist_next(agg->link);
	}
	return 0;
}
Exemplo n.º 12
0
int make_mapped_schema (void)
{
	int nof;
	schema_t *sch;
	struct sql_map *i;
	if (query.sel_all)
	{
		if (query.grouped_schema != NULL)
			query.mapped_schema = schema_dup(query.grouped_schema);
		else
			query.mapped_schema = schema_dup(query.joined_schema);
		return 0;
	}
	nof = llist_count(query.map);
	sch = schema_alloc(nof);
	sch->nof = nof;
	nof = 0;
	i = (struct sql_map *)llist_next(query.map);
	while (i != NULL)
	{
		if (i->alias == NULL)
		{
			strcpy(sch->field[nof].name, "");
		}
		else
		{
			strcpy(sch->field[nof].name, i->alias);
		}
		sch->field[nof].type = i->expr->type;
		i = (struct sql_map *)llist_next(i->link);
		nof++;
	}
	fix_schema(sch);
	query.mapped_schema = sch;
	return 0;	
}
Exemplo n.º 13
0
int make_grouped_schema  (void)
{
	int nof;
	schema_t *sch;
	struct sql_group *grp;
	struct sql_aggre *agg;
	stream_t *str;
	int no_group, no_aggre;
	no_group = llist_count(query.group);
	no_aggre = llist_count(query.aggre);
	nof = no_group + no_aggre;
	if (nof == 0) return 0;
	sch = schema_alloc(nof);
	sch->nof = nof;
	grp = (struct sql_group *)llist_next(query.group);
	nof = 0;
	while (grp != NULL)
	{
		str = grp->source->stream;
		joined_field_name(sch->field[nof].name, grp->source, grp->field);
		sch->field[nof].type = str->schema->field[grp->field].type;
		grp = (struct sql_group *)llist_next(grp->link);
		nof++;
	}
	agg = (struct sql_aggre *)llist_next(query.aggre);
	while (agg != NULL)
	{
		aggre_field_name(sch->field[nof].name, agg->aggre->name, agg->source, agg->field);
		sch->field[nof].type = agg->aggre->otype;
		agg = (struct sql_aggre *)llist_next(agg->link);
		nof++;
	}
	fix_schema(sch);
	query.grouped_schema = sch;
	return 0;
}
Exemplo n.º 14
0
/*---------------------------------------------------------------------------*/
static void priv_ev_loop_run_tasklet(unsigned long data)
{
	struct xio_ev_loop *loop = (struct xio_ev_loop *) data;
	struct xio_ev_data	*tev;
	struct llist_node	*node;

	while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
		node = llist_reverse_order(node);
		while (node) {
			tev = llist_entry(node, struct xio_ev_data, ev_llist);
			node = llist_next(node);
			tev->handler(tev->data);
		}
	}
}
Exemplo n.º 15
0
static void __irq_work_run(void)
{
	unsigned long flags;
	struct irq_work *work;
	struct llist_head *this_list;
	struct llist_node *llnode;


	/*
	 * Reset the "raised" state right before we check the list because
	 * an NMI may enqueue after we find the list empty from the runner.
	 */
	__this_cpu_write(irq_work_raised, 0);
	barrier();

	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty_relaxed(this_list))
		return;

	BUG_ON(!irqs_disabled());

	llnode = llist_del_all(this_list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);

		llnode = llist_next(llnode);

		/*
		 * Clear the PENDING bit, after this point the @work
		 * can be re-used.
		 * Make it immediately visible so that other CPUs trying
		 * to claim that work don't rely on us to handle their data
		 * while we are in the middle of the func.
		 */
		flags = work->flags & ~IRQ_WORK_PENDING;
		xchg(&work->flags, flags);

		work->func(work);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
	}
}
Exemplo n.º 16
0
LumieraConfigitem
lumiera_config_lookup_remove (LumieraConfigLookup self, LumieraConfigitem item)
{
  TRACE (configlookup_dbg, "%s", item->line);
  REQUIRE (!llist_is_empty (&item->lookup), "item is not in a lookup");

  if (llist_is_single (&item->lookup))
    {
      /* last item in lookup, remove it from the splay tree */
      LumieraConfigLookupentry entry = LLIST_TO_STRUCTP (llist_next (&item->lookup), lumiera_config_lookupentry, configitems);
      llist_unlink (&item->lookup);
      psplay_delete_node (&self->tree, (PSplaynode)entry);
    }
  else
    {
      /* more than this item present in hash, just unlink this item */
      llist_unlink (&item->lookup);
    }

  return item;
}
Exemplo n.º 17
0
/*---------------------------------------------------------------------------*/
int priv_ev_loop_run(void *loop_hndl)
{
	struct xio_ev_loop	*loop = loop_hndl;
	struct xio_ev_data	*tev;
	struct llist_node	*node;
	int cpu;

	clear_bit(XIO_EV_LOOP_STOP, &loop->states);

	switch (loop->flags) {
	case XIO_LOOP_GIVEN_THREAD:
		if (loop->ctx->worker != (uint64_t) get_current()) {
			ERROR_LOG("worker kthread(%p) is not current(%p).\n",
				  (void *) loop->ctx->worker, get_current());
			goto cleanup0;
		}
		/* no need to disable preemption */
		cpu = raw_smp_processor_id();
		if (loop->ctx->cpuid != cpu) {
			TRACE_LOG("worker on core(%d) scheduled to(%d).\n",
				  cpu, loop->ctx->cpuid);
			set_cpus_allowed_ptr(get_current(),
					     cpumask_of(loop->ctx->cpuid));
		}
		break;
	case XIO_LOOP_TASKLET:
		/* were events added to list while in STOP state ? */
		if (!llist_empty(&loop->ev_llist))
			priv_kick_tasklet(loop_hndl);
		return 0;
	case XIO_LOOP_WORKQUEUE:
		/* were events added to list while in STOP state ? */
		while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
			node = llist_reverse_order(node);
			while (node) {
				tev = llist_entry(node, struct xio_ev_data,
						  ev_llist);
				node = llist_next(node);
				tev->work.func = priv_ev_loop_run_work;
				queue_work_on(loop->ctx->cpuid, loop->workqueue,
					      &tev->work);
			}
		}
		return 0;
	default:
		/* undo */
		set_bit(XIO_EV_LOOP_STOP, &loop->states);
		return -1;
	}

retry_wait:
	wait_event_interruptible(loop->wait,
				 test_bit(XIO_EV_LOOP_WAKE, &loop->states));

retry_dont_wait:

	while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
		node = llist_reverse_order(node);
		while (node) {
			tev = llist_entry(node, struct xio_ev_data, ev_llist);
			node = llist_next(node);
			tev->handler(tev->data);
		}
	}

	/* "race point" */
	clear_bit(XIO_EV_LOOP_WAKE, &loop->states);

	if (unlikely(test_bit(XIO_EV_LOOP_STOP, &loop->states)))
		return 0;

	/* if a new entry was added while we were at "race point"
	 * than wait event might block forever as condition is false */
	if (llist_empty(&loop->ev_llist))
		goto retry_wait;

	/* race detected */
	if (!test_and_set_bit(XIO_EV_LOOP_WAKE, &loop->states))
		goto retry_dont_wait;

	/* was one wakeup was called */
	goto retry_wait;

cleanup0:
	set_bit(XIO_EV_LOOP_STOP, &loop->states);
	return -1;
}
Exemplo n.º 18
0
void print_tree ( text_widget * widget ) {
    LList * ll_b;
    for (ll_b = widget->blocks; ll_b; ll_b = llist_next(ll_b)) {
        print_block(widget, ll_b);
    }
}