コード例 #1
0
ファイル: dbpf-open-cache.c プロジェクト: Goon83/SALB
void dbpf_open_cache_initialize(void)
{
    int i = 0, ret = 0;

    gen_mutex_lock(&cache_mutex);

    /* run through preallocated cache elements to initialize
     * and put them on the free list
     */
    if (OPEN_CACHE_SIZE == 0)
    {
	gossip_err("Warning: dbpf_open_cache disabled.\n");
    }

    for (i = 0; i < OPEN_CACHE_SIZE; i++)
    {
        prealloc[i].fd = -1;
	qlist_add(&prealloc[i].queue_link, &free_list);
    }

    gen_mutex_unlock(&cache_mutex);

    /* Initialize and create the worker thread for threaded deletes */
    INIT_QLIST_HEAD(&dbpf_unlink_context.global_list);
    pthread_mutex_init(&dbpf_unlink_context.mutex, NULL);
    pthread_cond_init(&dbpf_unlink_context.data_available, NULL);
    ret = pthread_create(&dbpf_unlink_context.thread_id, NULL, unlink_bstream, (void*)&dbpf_unlink_context);
    if(ret)
    {
        gossip_err("dbpf_open_cache_initialize: failed [%d]\n", ret);
        return;
    }
}
コード例 #2
0
void fcfs_next_rc(
        void               * sched,
        void               * rc_event_save,
        model_net_sched_rc * rc,
        tw_lp              * lp){
    mn_sched_queue *s = sched;
    if (rc->rtn == -1){
        // no op
    }
    else{
        if (s->is_recv_queue){
            dprintf("%lu (mn): rc receiving message\n", lp->gid);
            s->method->model_net_method_recv_msg_event_rc(lp);
        }
        else {
            dprintf("%lu (mn): rc issuing packet\n", lp->gid);
            s->method->model_net_method_packet_event_rc(lp);
        }
        if (rc->rtn == 0){
            // just get the front and increment rem
            mn_sched_qitem *q = qlist_entry(s->reqs.next, mn_sched_qitem, ql);
            // just increment rem
            q->rem += q->req.packet_size;
        }
        else if (rc->rtn == 1){
            // re-create the q item
            mn_sched_qitem *q = malloc(sizeof(mn_sched_qitem));
            assert(q);
            q->req = rc->req;
            q->sched_params = rc->sched_params;
            q->rem = (q->req.is_pull ? PULL_MSG_SIZE : q->req.msg_size) % 
                q->req.packet_size;
            if (q->rem == 0){ // processed exactly a packet's worth of data
                q->rem = q->req.packet_size;
            }
            void * e_dat = rc_event_save;
            if (q->req.remote_event_size > 0){
                q->remote_event = malloc(q->req.remote_event_size);
                memcpy(q->remote_event, e_dat, q->req.remote_event_size);
                e_dat = (char*) e_dat + q->req.remote_event_size;
            }
            else { q->remote_event = NULL; }
            if (q->req.self_event_size > 0) {
                q->local_event = malloc(q->req.self_event_size);
                memcpy(q->local_event, e_dat, q->req.self_event_size);
            }
            else { q->local_event = NULL; }
            // add back to front of list
            qlist_add(&q->ql, &s->reqs);
            s->queue_len++;
        }
        else {
            assert(0);
        }
    }
}
コード例 #3
0
void rr_next_rc (
        void               * sched,
        void               * rc_event_save,
        model_net_sched_rc * rc,
        tw_lp              * lp){
    // only time we need to do something apart from fcfs is on a successful
    // rr_next that didn't remove the item from the queue
    if (rc->rtn == 0){
        mn_sched_queue *s = sched;
        qlist_add(qlist_pop_back(&s->reqs), &s->reqs);
    }
    fcfs_next_rc(sched, rc_event_save, rc, lp);
}
コード例 #4
0
static void qlist_set(t_qlist *x, t_symbol *s, int ac, t_atom *av)
{
    qlist_clear(x);
    qlist_add(x, s, ac, av);
}
コード例 #5
0
ファイル: dbpf-open-cache.c プロジェクト: Goon83/SALB
/**
 * The dbpf open cache is used primarily to manage open
 * file descriptors to bstream files on IO servers.  PVFS
 * currently uses a lazy style of creating the actual datafiles for
 * bstreams.  Only on the first write to a bstream is the file
 * actually created (opened with O_CREAT).  This means that if a
 * read of a bstream that hasn't been written should somehow occur,
 * an ENOENT error will be returned immediately, instead of allowing
 * a read to EOF (of a zero-byte file).  For us, this is ok, since
 * the client gets the size of the bstream in the getattr before doing
 * any IO.  All that being said, the open_cache_get call needs to
 * behave differently based on the desired operation:  reads on
 * files that don't exist should return ENOENT, but writes on files
 * that don't exist should create and open the file.
 */
int dbpf_open_cache_get(
    TROVE_coll_id coll_id,
    TROVE_handle handle,
    enum open_cache_open_type type,
    struct open_cache_ref* out_ref)
{
    struct qlist_head *tmp_link;
    struct open_cache_entry* tmp_entry = NULL;
    int found = 0;
    int ret = 0;

    gossip_debug(GOSSIP_DBPF_OPEN_CACHE_DEBUG,
                 "dbpf_open_cache_get: called\n");

    gen_mutex_lock(&cache_mutex);

    /* check already opened objects first, reuse ref if possible */

    tmp_entry = dbpf_open_cache_find_entry(
        &used_list, "used list", coll_id, handle);
    if(!tmp_entry)
    {
        tmp_entry = dbpf_open_cache_find_entry(
            &unused_list, "unused list", coll_id, handle);
    }

    out_ref->fd = -1;

    if (tmp_entry)
    {
	if (tmp_entry->fd < 0)
	{
	    ret = open_fd(&(tmp_entry->fd), coll_id, handle, type);
	    if (ret < 0)
	    {
		gen_mutex_unlock(&cache_mutex);
		return ret;
	    }
            tmp_entry->type = type;
	}
        out_ref->fd = tmp_entry->fd;
        out_ref->type = type;

	out_ref->internal = tmp_entry;
	tmp_entry->ref_ct++;

	/* remove the entry and place it at the used head (assuming it
	 * will be referenced again soon)
	 */
	gossip_debug(GOSSIP_DBPF_OPEN_CACHE_DEBUG, "dbpf_open_cache_get: "
                     "moving to (or reordering in) used list.\n");
	qlist_del(&tmp_entry->queue_link);
	qlist_add(&tmp_entry->queue_link, &used_list);

	gen_mutex_unlock(&cache_mutex);

        assert(out_ref->fd > 0);
	return 0;
    }

    /* if we fall through to this point, then the object was not found
     * in the cache. In order of priority we will now try: free list,
     * unused_list, and then bypass cache
     */
    if (!qlist_empty(&free_list))
    {
	tmp_link = free_list.next;
	tmp_entry = qlist_entry(tmp_link, struct open_cache_entry,
	    queue_link);
	qlist_del(&tmp_entry->queue_link);
	found = 1;
	gossip_debug(GOSSIP_DBPF_OPEN_CACHE_DEBUG,
	    "dbpf_open_cache_get: resetting entry from free list.\n");
    }