static int queues_queue_remove(struct PINT_manager_s *manager, PINT_worker_inst *inst, PINT_queue_id queue_id) { struct PINT_worker_queues_s *w; struct PINT_queue_s *queue; w = &inst->queues; gen_mutex_lock(&w->mutex); queue = id_gen_fast_lookup(queue_id); assert(queue); /* make sure its actually in there at the moment */ while(!qlist_exists(&w->queues, &queue->link)) { /* assume that operations are being pulled off presently * and it just needs to be added back to the * list of queues, which we will wait for */ gen_cond_wait(&w->cond, &w->mutex); } /* now we're sure that its there, so pluck it off */ qlist_del(&queue->link); PINT_queue_remove_producer(queue_id, w); PINT_queue_remove_consumer(queue_id, w); gen_mutex_unlock(&w->mutex); return 0; }
cfio_msg_t *cfio_recv_get_first() { cfio_msg_t *_msg = NULL, *msg; qlist_head_t *link; size_t size; debug(DEBUG_RECV, "client_get_index = %d", client_get_index); if(qlist_empty(&(msg_head[client_get_index].link))) { link = NULL; }else { link = msg_head[client_get_index].link.next; } if(NULL == link) { msg = NULL; }else { msg = qlist_entry(link, cfio_msg_t, link); cfio_recv_unpack_msg_size(msg, &size); if(msg->size == size) //only contain one single msg { qlist_del(link); _msg = msg; }else { msg->size -= size; _msg = cfio_msg_create(); _msg->addr = msg->addr; _msg->src = msg->src; _msg->dst = msg->dst; msg->addr += size; } client_get_index = (client_get_index + 1) % client_num; } if(_msg != NULL) { debug(DEBUG_RECV, "get msg size : %lu", _msg->size); } return _msg; }
static int threaded_queues_queue_remove(struct PINT_manager_s *manager, PINT_worker_inst *inst, PINT_queue_id queue_id) { struct PINT_worker_threaded_queues_s *w; struct PINT_queue_s *queue; struct timespec timeout; w = &inst->threaded; gen_mutex_lock(&w->mutex); w->remove_requested = 1; queue = id_gen_fast_lookup(queue_id); assert(queue); /* we wait for 10 millisecs -- long enough for the queue to * be added back to the unused list */ while(!qlist_exists(&w->queues, &queue->link)) { /* assume that operations are being pulled off presently * and it just needs to be added back to the * list of queues, which we will wait for */ timeout.tv_sec = time(NULL); timeout.tv_nsec = 1e6; gen_cond_timedwait(&w->cond, &w->mutex, &timeout); } /* now we're ensured that its there, so pluck it off */ qlist_del(&queue->link); PINT_queue_remove_producer(queue_id, w); PINT_queue_remove_consumer(queue_id, w); memset(&queue->link, 0, sizeof(queue->link)); w->remove_requested = 0; gen_cond_broadcast(&w->cond); gen_mutex_unlock(&w->mutex); return 0; }
void dbpf_op_queue_remove(dbpf_queued_op_t *dbpf_op) { qlist_del(&dbpf_op->link); }
/** * The dbpf open cache is used primarily to manage open * file descriptors to bstream files on IO servers. PVFS * currently uses a lazy style of creating the actual datafiles for * bstreams. Only on the first write to a bstream is the file * actually created (opened with O_CREAT). This means that if a * read of a bstream that hasn't been written should somehow occur, * an ENOENT error will be returned immediately, instead of allowing * a read to EOF (of a zero-byte file). For us, this is ok, since * the client gets the size of the bstream in the getattr before doing * any IO. All that being said, the open_cache_get call needs to * behave differently based on the desired operation: reads on * files that don't exist should return ENOENT, but writes on files * that don't exist should create and open the file. */ int dbpf_open_cache_get( TROVE_coll_id coll_id, TROVE_handle handle, enum open_cache_open_type type, struct open_cache_ref* out_ref) { struct qlist_head *tmp_link; struct open_cache_entry* tmp_entry = NULL; int found = 0; int ret = 0; gossip_debug(GOSSIP_DBPF_OPEN_CACHE_DEBUG, "dbpf_open_cache_get: called\n"); gen_mutex_lock(&cache_mutex); /* check already opened objects first, reuse ref if possible */ tmp_entry = dbpf_open_cache_find_entry( &used_list, "used list", coll_id, handle); if(!tmp_entry) { tmp_entry = dbpf_open_cache_find_entry( &unused_list, "unused list", coll_id, handle); } out_ref->fd = -1; if (tmp_entry) { if (tmp_entry->fd < 0) { ret = open_fd(&(tmp_entry->fd), coll_id, handle, type); if (ret < 0) { gen_mutex_unlock(&cache_mutex); return ret; } tmp_entry->type = type; } out_ref->fd = tmp_entry->fd; out_ref->type = type; out_ref->internal = tmp_entry; tmp_entry->ref_ct++; /* remove the entry and place it at the used head (assuming it * will be referenced again soon) */ gossip_debug(GOSSIP_DBPF_OPEN_CACHE_DEBUG, "dbpf_open_cache_get: " "moving to (or reordering in) used list.\n"); qlist_del(&tmp_entry->queue_link); qlist_add(&tmp_entry->queue_link, &used_list); gen_mutex_unlock(&cache_mutex); assert(out_ref->fd > 0); return 0; } /* if we fall through to this point, then the object was not found * in the cache. In order of priority we will now try: free list, * unused_list, and then bypass cache */ if (!qlist_empty(&free_list)) { tmp_link = free_list.next; tmp_entry = qlist_entry(tmp_link, struct open_cache_entry, queue_link); qlist_del(&tmp_entry->queue_link); found = 1; gossip_debug(GOSSIP_DBPF_OPEN_CACHE_DEBUG, "dbpf_open_cache_get: resetting entry from free list.\n"); }