Exemple #1
0
void cbuf_debug_cbiddump(unsigned int cbid)
{
	struct cbuf_info *cbi;
	struct cbuf_maps *m;

	printc("mgr dump cbid %u\n", cbid);
	cbi = cmap_lookup(&cbufs, cbid);
	assert(cbi);
	printc("cbid %u cbi: id %d sz %lu mem %p\n", cbid, cbi->cbid, cbi->size, cbi->mem);
	m = &cbi->owner;
	do {
		struct cbuf_meta *meta = m->m;
		printc("map: spd %d addr %lux meta %p\n", m->spdid, m->addr, m->m);
		printc("meta: nfo %lux addr %lux cbid %u\n", meta->nfo, CBUF_PTR(meta), meta->cbid_tag.cbid);
		m = FIRST_LIST(m, next, prev);
	} while(m != &cbi->owner);
}
Exemple #2
0
/**
 * Column callback
 */
static inline void cb_col(void *s, size_t len, void *data) {
    struct csv_context *ctx = (struct csv_context *)data;
    size_t cnt;

    // Put a comma if we should
    if(ctx->put_comma) {
        ctx->csv_buf = cbuf_putc(ctx->csv_buf, ',');
    }
    ctx->put_comma = 1;

    // If we are keeping same columns together see if we're on one
    if(ctx->gcol > -1 && ctx->col == ctx->gcol) {
        // Don't treat header columns as a group column
        if(!ctx->use_header || ctx->header_len) {
            // If we have a last column value and we're in overflow, check
	    	// the new row's value against the last one
            if(ctx->gcol_buf && ctx->opos && memcmp(ctx->gcol_buf, s, len) != 0) {
                // Flush the data we have!
                flush_file(ctx, 1);
            } else if(!ctx->gcol_buf) {
                // Initialize a new group column buffer
                ctx->gcol_buf = cbuf_init(len);
            }

            // Update our last group column value
            ctx->gcol_buf = cbuf_setlen(ctx->gcol_buf, (const char*)s, len);
        }
    }

    // Make sure we can write all the data
    while((cnt = csv_write(CBUF_PTR(ctx->csv_buf), CBUF_REM(ctx->csv_buf), s, len)) > CBUF_REM(ctx->csv_buf)) {
        // We didn't have room, reallocate
        ctx->csv_buf = cbuf_double(ctx->csv_buf);
    }

    // Increment where we are in our buffer
    CBUF_POS(ctx->csv_buf)+=cnt;

    // Increment our column
    ctx->col++;
}
Exemple #3
0
int
cbuf_create(spdid_t spdid, unsigned long size, int cbid)
{
	struct cbuf_comp_info *cci;
	struct cbuf_info *cbi;
	struct cbuf_meta *meta;
	struct cbuf_bin *bin;
	int ret = 0;
	unsigned int id = (unsigned int)cbid;

	printl("cbuf_create\n");
	if (unlikely(cbid < 0)) return 0;
	CBUF_TAKE();
	tracking_start(NULL, CBUF_CRT);

	cci = cbuf_comp_info_get(spdid);
	if (unlikely(!cci)) goto done;

	/* 
	 * Client wants to allocate a new cbuf, but the meta might not
	 * be mapped in.
	 */
	if (!cbid) {
		/* TODO: check if have enough free memory: ask mem manager */
		/*memory usage exceeds the target, block this thread*/
		if (size + cci->allocated_size > cci->target_size) {
			cbuf_shrink(cci, size);
			if (size + cci->allocated_size > cci->target_size) {
				cbuf_thread_block(cci, size);
				return 0;
			}
		}

 		cbi = malloc(sizeof(struct cbuf_info));

		if (unlikely(!cbi)) goto done;
		/* Allocate and map in the cbuf. Discard inconsistent cbufs */
		/* TODO: Find a better way to manage those inconsistent cbufs */
		do {
			id   = cmap_add(&cbufs, cbi);
			meta = cbuf_meta_lookup(cci, id);
		} while(meta && CBUF_INCONSISENT(meta));

		cbi->cbid        = id;
		size             = round_up_to_page(size);
		cbi->size        = size;
		cbi->owner.m     = NULL;
		cbi->owner.spdid = spdid;
		INIT_LIST(&cbi->owner, next, prev);
		INIT_LIST(cbi, next, prev);
		if (cbuf_alloc_map(spdid, &(cbi->owner.addr), 
				   (void**)&(cbi->mem), NULL, size, MAPPING_RW)) {
			goto free;
		}
	} 
	/* If the client has a cbid, then make sure we agree! */
	else {
		cbi = cmap_lookup(&cbufs, id);
		if (unlikely(!cbi)) goto done;
		if (unlikely(cbi->owner.spdid != spdid)) goto done;
	}
	meta = cbuf_meta_lookup(cci, id);

	/* We need to map in the meta for this cbid.  Tell the client. */
	if (!meta) {
		ret = (int)id * -1;
		goto done;
	}
	
	/* 
	 * Now we know we have a cbid, a backing structure for it, a
	 * component structure, and the meta mapped in for the cbuf.
	 * Update the meta with the correct addresses and flags!
	 */
	memset(meta, 0, sizeof(struct cbuf_meta));
	meta->sz            = cbi->size >> PAGE_ORDER;
	meta->cbid_tag.cbid = id;
	CBUF_FLAG_ADD(meta, CBUF_OWNER);
	CBUF_PTR_SET(meta, cbi->owner.addr);
	CBUF_REFCNT_INC(meta);

	/*
	 * When creates a new cbuf, the manager should be the only
	 * one who can access the meta
	 */
	/* TODO: malicious client may trigger this assertion, just for debug */
	assert(CBUF_REFCNT(meta) == 1);
	assert(CBUF_PTR(meta));
	cbi->owner.m = meta;

	/*
	 * Install cbi last. If not, after return a negative cbid, 
	 * collection may happen and get a dangle cbi
	 */
	bin = cbuf_comp_info_bin_get(cci, size);
	if (!bin) bin = cbuf_comp_info_bin_add(cci, size);
	if (unlikely(!bin)) goto free;
	if (bin->c) ADD_LIST(bin->c, cbi, next, prev);
	else        bin->c   = cbi;
	cci->allocated_size += size;
	ret = (int)id;
done:
	tracking_end(NULL, CBUF_CRT);
	CBUF_RELEASE();

	return ret;
free:
	cmap_del(&cbufs, id);
	free(cbi);
	goto done;
}