Esempio n. 1
0
static int __gnix_sfl_refill(struct gnix_s_freelist *fl, int n)
{	int i, ret = FI_SUCCESS;
	unsigned char *elems;

	assert(fl);
	assert(n > 0);
	/*
	 * We allocate an extra element for use as the pointer to the
	 * memory chunk maintained in the chunks field for later
	 * freeing.  Use an entire element, in case size was padded
	 * for alignment
	 */
	elems = calloc((n+1), fl->elem_size);
	if (elems == NULL) {
		ret = -FI_ENOMEM;
		goto err;
	}

	/* Save away the pointer to the chunk */
	slist_insert_tail((struct slist_entry *) elems, &fl->chunks);

	/* Start with slist_entry of first element */
	elems += fl->elem_size + fl->offset;

	for (i = 0; i < n; i++) {
		slist_insert_tail((struct slist_entry *) elems, &fl->freelist);
		elems += fl->elem_size;
	}
err:
	return ret;
}
Esempio n. 2
0
void psmx2_eq_enqueue_event(struct psmx2_fid_eq *eq,
			    struct psmx2_eq_event *event)
{
	fastlock_acquire(&eq->lock);
	if (event->error)
		slist_insert_tail(&event->list_entry, &eq->error_queue);
	else
		slist_insert_tail(&event->list_entry, &eq->event_queue);
	fastlock_release(&eq->lock);

	if (eq->wait)
		psmx2_wait_signal((struct fid_wait *)eq->wait);
}
Esempio n. 3
0
static inline void psmx_am_enqueue_rma(struct psmx_fid_domain *domain,
				       struct psmx_am_request *req)
{
	fastlock_acquire(&domain->rma_queue.lock);
	slist_insert_tail(&req->list_entry, &domain->rma_queue.list);
	fastlock_release(&domain->rma_queue.lock);
}
Esempio n. 4
0
static inline void psmx_am_enqueue_unexp(struct psmx_fid_domain *domain,
					 struct psmx_unexp *unexp)
{
	fastlock_acquire(&domain->unexp_queue.lock);
	slist_insert_tail(&unexp->list_entry, &domain->unexp_queue.list);
	fastlock_release(&domain->unexp_queue.lock);
}
Esempio n. 5
0
int smr_rx_comp(struct smr_ep *ep, void *context, uint64_t flags, size_t len,
		void *buf, void *addr, uint64_t tag, uint64_t data,
		uint64_t err)
{
	struct fi_cq_tagged_entry *comp;
	struct util_cq_err_entry *entry;

	comp = ofi_cirque_tail(ep->util_ep.rx_cq->cirq);
	if (err) {
		if (!(entry = calloc(1, sizeof(*entry))))
			return -FI_ENOMEM;
		entry->err_entry.op_context = context;
		entry->err_entry.flags = flags;
		entry->err_entry.tag = tag;
		entry->err_entry.err = err;
		entry->err_entry.prov_errno = -err;
		slist_insert_tail(&entry->list_entry,
				  &ep->util_ep.rx_cq->err_list);
		comp->flags = UTIL_FLAG_ERROR;
	} else {
		comp->op_context = context;
		comp->flags = flags;
		comp->len = len;
		comp->buf = buf;
		comp->data = data;
		comp->tag = tag;
	}
	ofi_cirque_commit(ep->util_ep.rx_cq->cirq);
	return 0;
}
Esempio n. 6
0
void psmx_cq_enqueue_event(struct psmx_fid_cq *cq, struct psmx_cq_event *event)
{
	slist_insert_tail(&event->list_entry, &cq->event_queue);
	cq->event_count++;
	if (cq->wait)
		psmx_wait_signal((struct fid_wait *)cq->wait);
}
Esempio n. 7
0
/* Must call with cq->lock held */
static inline int fi_ibv_poll_outstanding_cq(struct fi_ibv_ep *ep,
					     struct fi_ibv_cq *cq)
{
	struct fi_ibv_wce *wce;
	struct ibv_wc wc;
	ssize_t ret;

	ret = ibv_poll_cq(cq->cq, 1, &wc);
	if (ret <= 0)
		return ret;

	/* Handle WR entry when user doesn't request the completion */
	if (wc.wr_id == VERBS_INJECT_FLAG) {
		/* To ensure the new iteration */
		return 1;
	}

	if (wc.status != IBV_WC_WR_FLUSH_ERR) {
		ret = fi_ibv_wc_2_wce(cq, &wc, &wce);
		if (OFI_UNLIKELY(ret)) {
			ret = -FI_EAGAIN;
			goto fn;
		}
		slist_insert_tail(&wce->entry, &cq->wcq);
	}
	ret = 1;
fn:

	return ret;
}
Esempio n. 8
0
ssize_t ofi_eq_write(struct fid_eq *eq_fid, uint32_t event,
		     const void *buf, size_t len, uint64_t flags)
{
	struct util_eq *eq;
	struct util_event *entry;

	eq = container_of(eq_fid, struct util_eq, eq_fid);
	entry = calloc(1, sizeof(*entry) + len);
	if (!entry)
		return -FI_ENOMEM;

	entry->size = (int) len;
	entry->event = event;
	entry->err = !!(flags & UTIL_FLAG_ERROR);
	memcpy(entry->data, buf, len);

	fastlock_acquire(&eq->lock);
	slist_insert_tail(&entry->entry, &eq->list);
	fastlock_release(&eq->lock);

	if (eq->wait)
		eq->wait->signal(eq->wait);

	return len;
}
Esempio n. 9
0
static void process_tx_entry(struct tcpx_xfer_entry *tx_entry)
{
	struct tcpx_cq *tcpx_cq;
	int ret;

	ret = tcpx_send_msg(tx_entry);
	if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret))
		return;

	/* Keep this path below as a single pass path.*/
	tx_entry->ep->hdr_bswap(&tx_entry->hdr.base_hdr);
	slist_remove_head(&tx_entry->ep->tx_queue);

	if (ret) {
		FI_WARN(&tcpx_prov, FI_LOG_DOMAIN, "msg send failed\n");
		tcpx_ep_shutdown_report(tx_entry->ep,
					&tx_entry->ep->util_ep.ep_fid.fid);
		tcpx_cq_report_error(tx_entry->ep->util_ep.tx_cq,
				     tx_entry, ret);
	} else {
		tcpx_cq_report_success(tx_entry->ep->util_ep.tx_cq,
				       tx_entry);

		if (tx_entry->hdr.base_hdr.flags &
		    (OFI_DELIVERY_COMPLETE | OFI_COMMIT_COMPLETE)) {
			tx_entry->flags |= FI_COMPLETION;
			slist_insert_tail(&tx_entry->entry,
					  &tx_entry->ep->tx_rsp_pend_queue);
			return;
		}
	}
	tcpx_cq = container_of(tx_entry->ep->util_ep.tx_cq,
			       struct tcpx_cq, util_cq);
	tcpx_xfer_entry_release(tcpx_cq, tx_entry);
}
Esempio n. 10
0
static inline void psmx_am_enqueue_unexp(struct psmx_fid_domain *domain,
					 struct psmx_unexp *unexp)
{
	pthread_mutex_lock(&domain->unexp_queue.lock);
	slist_insert_tail(&unexp->list_entry, &domain->unexp_queue.list);
	pthread_mutex_unlock(&domain->unexp_queue.lock);
}
Esempio n. 11
0
struct sock_rx_entry *sock_rx_new_entry(struct sock_rx_ctx *rx_ctx)
{
	struct sock_rx_entry *rx_entry;
	struct slist_entry *entry;
	int i;

	if (rx_ctx->rx_entry_pool == NULL) {
		rx_ctx->rx_entry_pool = calloc(rx_ctx->attr.size,
						sizeof(*rx_entry));
		if (!rx_ctx->rx_entry_pool)
			return NULL;

		slist_init(&rx_ctx->pool_list);

		for (i = 0; i < rx_ctx->attr.size; i++) {
			slist_insert_tail(&rx_ctx->rx_entry_pool[i].pool_entry,
					  &rx_ctx->pool_list);
			rx_ctx->rx_entry_pool[i].is_pool_entry = 1;
		}
	}

	if (!slist_empty(&rx_ctx->pool_list)) {
		entry = slist_remove_head(&rx_ctx->pool_list);
		rx_entry = container_of(entry, struct sock_rx_entry, pool_entry);
		rx_entry->rx_ctx = rx_ctx;
		entry = slist_remove_head(&rx_ctx->pool_list);
	} else {
Esempio n. 12
0
void psmx_cntr_check_trigger(struct psmx_fid_cntr *cntr)
{
	struct psmx_fid_domain *domain = cntr->domain;
	struct psmx_trigger *trigger;

	if (!cntr->trigger)
		return;

	pthread_mutex_lock(&cntr->trigger_lock);

	trigger = cntr->trigger;
	while (trigger) {
		if (ofi_atomic_get64(&cntr->counter) < trigger->threshold)
			break;

		cntr->trigger = trigger->next;

		if (domain->am_initialized) {
			fastlock_acquire(&domain->trigger_queue.lock);
			slist_insert_tail(&trigger->list_entry, &domain->trigger_queue.list);
			fastlock_release(&domain->trigger_queue.lock);
		} else {
			psmx_process_trigger(domain, trigger);
		}

		trigger = cntr->trigger;
	}

	pthread_mutex_unlock(&cntr->trigger_lock);
}
Esempio n. 13
0
static inline void psmx2_am_enqueue_rma(struct psmx2_trx_ctxt *trx_ctxt,
					struct psmx2_am_request *req)
{
	psmx2_lock(&trx_ctxt->rma_queue.lock, 2);
	slist_insert_tail(&req->list_entry, &trx_ctxt->rma_queue.list);
	psmx2_unlock(&trx_ctxt->rma_queue.lock, 2);
}
Esempio n. 14
0
void process_tx_entry(struct tcpx_xfer_entry *tx_entry)
{
	struct tcpx_cq *tcpx_cq;
	int ret;

	ret = tcpx_send_msg(tx_entry);
	if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret))
		return;

	if (!ret)
		goto done;

	FI_WARN(&tcpx_prov, FI_LOG_DOMAIN, "msg send failed\n");

	if (ret == -FI_ENOTCONN)
		tcpx_ep_shutdown_report(tx_entry->ep,
					&tx_entry->ep->util_ep.ep_fid.fid);
done:
	tcpx_cq_report_completion(tx_entry->ep->util_ep.tx_cq,
				  tx_entry, -ret);
	slist_remove_head(&tx_entry->ep->tx_queue);

	if (ntohl(tx_entry->msg_hdr.hdr.flags) &
	    (OFI_DELIVERY_COMPLETE | OFI_COMMIT_COMPLETE)) {
		tx_entry->flags |= FI_COMPLETION;
		slist_insert_tail(&tx_entry->entry,
				  &tx_entry->ep->tx_rsp_pend_queue);
		return;
	}

	tcpx_cq = container_of(tx_entry->ep->util_ep.tx_cq,
			       struct tcpx_cq, util_cq);
	tcpx_xfer_entry_release(tcpx_cq, tx_entry);
}
Esempio n. 15
0
static int rxm_msg_cq_read(struct util_cq *util_cq, struct fid_cq *cq,
		struct fi_cq_tagged_entry *comp)
{
	struct util_cq_err_entry *entry;
	int ret;

	ret = fi_cq_read(cq, comp, 1);
	if (ret == -FI_EAVAIL) {
		entry = calloc(1, sizeof(*entry));
		if (!entry) {
			FI_WARN(&rxm_prov, FI_LOG_CQ,
					"Unable to allocate util_cq_err_entry\n");
			return -FI_ENOMEM;
		}
		OFI_CQ_READERR(&rxm_prov, FI_LOG_CQ, cq, ret, entry->err_entry);
		if (ret < 0) {
			free(entry);
			return ret;
		}
		slist_insert_tail(&entry->list_entry, &util_cq->err_list);
		comp->flags = UTIL_FLAG_ERROR;
	}

	return ret;
}
Esempio n. 16
0
static inline void psmx_am_enqueue_recv(struct psmx_fid_domain *domain,
					struct psmx_am_request *req)
{
	pthread_mutex_lock(&domain->recv_queue.lock);
	slist_insert_tail(&req->list_entry, &domain->recv_queue.list);
	pthread_mutex_unlock(&domain->recv_queue.lock);
}
Esempio n. 17
0
opcode_chain_t opcode_chain_add_langdef(opcode_chain_t oc, wast_t node,long row,long col) {
	ast_node_t n = make_ast(node);
	const char* str = tinyap_serialize_to_string(n);
	opcode_chain_node_t ocn = ochain_new_langdef(str,row,col);
	delete_node(n);
	slist_insert_tail(oc, ocn);
	return oc;
}
Esempio n. 18
0
static inline void psmx_am_enqueue_send(struct psmx_fid_domain *domain,
					struct psmx_am_request *req)
{
	pthread_mutex_lock(&domain->send_queue.lock);
	req->state = PSMX_AM_STATE_QUEUED;
	slist_insert_tail(&req->list_entry, &domain->send_queue.list);
	pthread_mutex_unlock(&domain->send_queue.lock);
}
Esempio n. 19
0
static ssize_t fi_ibv_cq_read(struct fid_cq *cq_fid, void *buf, size_t count)
{
	struct fi_ibv_cq *cq;
	struct fi_ibv_wce *wce;
	struct slist_entry *entry;
	struct ibv_wc wc;
	ssize_t ret = 0, i;

	cq = container_of(cq_fid, struct fi_ibv_cq, util_cq.cq_fid);

	cq->util_cq.cq_fastlock_acquire(&cq->util_cq.cq_lock);

	for (i = 0; i < count; i++) {
		if (!slist_empty(&cq->wcq)) {
			wce = container_of(cq->wcq.head, struct fi_ibv_wce, entry);
			if (wce->wc.status) {
				ret = -FI_EAVAIL;
				break;
			}
			entry = slist_remove_head(&cq->wcq);
			wce = container_of(entry, struct fi_ibv_wce, entry);
			cq->read_entry(&wce->wc, (char *)buf + i * cq->entry_size);
			util_buf_release(cq->wce_pool, wce);
			continue;
		}

		ret = fi_ibv_poll_cq(cq, &wc);
		if (ret <= 0)
			break;

		/* Insert error entry into wcq */
		if (OFI_UNLIKELY(wc.status)) {
			if (wc.status == IBV_WC_WR_FLUSH_ERR) {
				/* Handle case when remote side destroys
				 * the connection, but local side isn't aware
				 * about that yet */
				VERBS_DBG(FI_LOG_CQ,
					  "Ignoring WC with status "
					  "IBV_WC_WR_FLUSH_ERR(%d)\n",
					  wc.status);
				i--;
				continue;
			}
			wce = util_buf_alloc(cq->wce_pool);
			if (!wce) {
				cq->util_cq.cq_fastlock_release(&cq->util_cq.cq_lock);
				return -FI_ENOMEM;
			}
			memset(wce, 0, sizeof(*wce));
			memcpy(&wce->wc, &wc, sizeof wc);
			slist_insert_tail(&wce->entry, &cq->wcq);
			ret = -FI_EAVAIL;
			break;
		}

		cq->read_entry(&wc, (char *)buf + i * cq->entry_size);
	}
Esempio n. 20
0
static int map_insert(struct gnix_fid_av *int_av, const void *addr,
		      size_t count, fi_addr_t *fi_addr, uint64_t flags,
		      void *context)
{
	int ret;
	struct gnix_ep_name *temp = NULL;
	struct gnix_av_addr_entry *the_entry;
	gnix_ht_key_t key;
	size_t i;
	struct gnix_av_block *blk = NULL;

	assert(int_av->map_ht != NULL);

	if (count == 0)
		return 0;

	blk = calloc(1, sizeof(struct gnix_av_block));
	if (blk == NULL)
		return -FI_ENOMEM;

	blk->base =  calloc(count, sizeof(struct gnix_av_addr_entry));
	if (blk->base == NULL) {
		free(blk);
		return -FI_ENOMEM;
	}

	slist_insert_tail(&blk->slist, &int_av->block_list);

	for (i = 0; i < count; i++) {
		temp = &((struct gnix_ep_name *)addr)[i];
		((struct gnix_address *)fi_addr)[i] = temp->gnix_addr;
		the_entry =  &blk->base[i];
		memcpy(&the_entry->gnix_addr, &temp->gnix_addr,
		       sizeof(struct gnix_address));
		the_entry->name_type = temp->name_type;
		the_entry->cm_nic_cdm_id = temp->cm_nic_cdm_id;
		the_entry->cookie = temp->cookie;
		memcpy(&key, &temp->gnix_addr, sizeof(gnix_ht_key_t));
		ret = _gnix_ht_insert(int_av->map_ht,
				      key,
				      the_entry);
		/*
		 * we are okay with user trying to add more
		 * entries with same key.
		 */
		if ((ret != FI_SUCCESS) && (ret != -FI_ENOSPC)) {
			GNIX_WARN(FI_LOG_AV,
				  "_gnix_ht_insert failed %d\n",
				  ret);
			return ret;
		}

	}

	return count;
}
Esempio n. 21
0
void psmx_cq_enqueue_event(struct psmx_fid_cq *cq, struct psmx_cq_event *event)
{
	fastlock_acquire(&cq->lock);
	slist_insert_tail(&event->list_entry, &cq->event_queue);
	cq->event_count++;
	fastlock_release(&cq->lock);

	if (cq->wait)
		cq->wait->signal(cq->wait);
}
Esempio n. 22
0
void psmx_cq_enqueue_event(struct psmx_fid_cq *cq, struct psmx_cq_event *event)
{
	pthread_mutex_lock(&cq->mutex);
	slist_insert_tail(&event->list_entry, &cq->event_queue);
	cq->event_count++;
	pthread_mutex_unlock(&cq->mutex);

	if (cq->wait)
		psmx_wait_signal((struct fid_wait *)cq->wait);
}
Esempio n. 23
0
int util_buf_grow(struct util_buf_pool *pool)
{
	int ret;
	size_t i;
	union util_buf *util_buf;
	struct util_buf_region *buf_region;

	if (pool->max_cnt && pool->num_allocated >= pool->max_cnt) {
		return -1;
	}

	buf_region = calloc(1, sizeof(*buf_region));
	if (!buf_region)
		return -1;

	ret = ofi_memalign((void **)&buf_region->mem_region, pool->alignment,
			     pool->chunk_cnt * pool->entry_sz);
	if (ret)
		goto err;

	if (pool->alloc_hndlr) {
		ret = pool->alloc_hndlr(pool->ctx, buf_region->mem_region,
					pool->chunk_cnt * pool->entry_sz,
					&buf_region->context);
		if (ret)
			goto err;
	}

	for (i = 0; i < pool->chunk_cnt; i++) {
		util_buf = (union util_buf *)
			(buf_region->mem_region + i * pool->entry_sz);
		util_buf_set_region(util_buf, buf_region, pool);
		slist_insert_tail(&util_buf->entry, &pool->buf_list);
	}

	slist_insert_tail(&buf_region->entry, &pool->region_list);
	pool->num_allocated += pool->chunk_cnt;
	return 0;
err:
	free(buf_region);
	return -1;
}
Esempio n. 24
0
void psmx2_cq_enqueue_event(struct psmx2_fid_cq *cq,
			    struct psmx2_cq_event *event)
{
	psmx2_lock(&cq->lock, 2);
	slist_insert_tail(&event->list_entry, &cq->event_queue);
	cq->event_count++;
	psmx2_unlock(&cq->lock, 2);

	if (cq->wait)
		cq->wait->signal(cq->wait);
}
Esempio n. 25
0
/* Schedule deferred request processing.  Usually used in RX completers. */
int _gnix_vc_queue_work_req(struct gnix_fab_req *req)
{
	struct gnix_vc *vc = req->vc;

	fastlock_acquire(&vc->work_queue_lock);
	slist_insert_tail(&req->slist, &vc->work_queue);
	__gnix_vc_work_schedule(vc);
	fastlock_release(&vc->work_queue_lock);

	return FI_SUCCESS;
}
Esempio n. 26
0
void _gnix_sfe_free(struct slist_entry *e, struct gnix_s_freelist *fl)
{
	assert(e);
	assert(fl);

	e->next = NULL;  /* keep slist implementation happy */

	if (fl->ts)
		fastlock_acquire(&fl->lock);
	slist_insert_tail(e, &fl->freelist);
	if (fl->ts)
		fastlock_release(&fl->lock);
}
Esempio n. 27
0
opcode_chain_t opcode_chain_add_opcode(opcode_chain_t oc, opcode_arg_t argtyp, const char* opcode, const char* arg,long row,long col) {
	const char*argdup;
	opcode_chain_node_t ocn = ochain_new_opcode(argtyp,opcode,arg,row,col);
	/* FIXME : this should go into ochain_new_opcode() */
	ocn->name=strdup(opcode);
	ocn->arg_type=argtyp;
	if(arg) {
		argdup=strdup(arg);
	} else {
		argdup=NULL;
	}
	ocn->arg=argdup;
	slist_insert_tail(oc, ocn);
	return oc;
}
Esempio n. 28
0
void rxd_cq_report_error(struct rxd_cq *cq, struct fi_cq_err_entry *err_entry)
{
	struct fi_cq_tagged_entry cq_entry = {0};
	struct util_cq_err_entry *entry = calloc(1, sizeof(*entry));
	if (!entry) {
		FI_WARN(&rxd_prov, FI_LOG_CQ,
			"out of memory, cannot report CQ error\n");
		return;
	}

	entry->err_entry = *err_entry;
	slist_insert_tail(&entry->list_entry, &cq->util_cq.err_list);
	cq_entry.flags = UTIL_FLAG_ERROR;
	cq->write_fn(cq, &cq_entry);
}
Esempio n. 29
0
opcode_chain_t opcode_chain_add_data(opcode_chain_t oc, vm_data_type_t argtyp, const char* data, const char* rep,long row,long col) {
	const char*repdup;
	/* FIXME : this should be ochain_new_data() */
	opcode_chain_node_t ocn = ochain_new_opcode(argtyp,data,rep,row,col);
	/* FIXME : this should go into ochain_new_data() */
	ocn->type = NodeData;
	ocn->name=strdup(data);
	ocn->arg_type=(opcode_arg_t)argtyp;
	if(rep) {
		repdup=strdup(rep);
	} else {
		repdup=strdup("1");
	}
	ocn->arg=repdup;
	slist_insert_tail(oc, ocn);
	return oc;
}
Esempio n. 30
0
static ssize_t fi_ibv_cq_read(struct fid_cq *cq_fid, void *buf, size_t count)
{
	struct fi_ibv_cq *cq;
	struct fi_ibv_wce *wce;
	struct slist_entry *entry;
	struct ibv_wc wc;
	ssize_t ret = 0, i;

	cq = container_of(cq_fid, struct fi_ibv_cq, cq_fid);

	fastlock_acquire(&cq->lock);

	for (i = 0; i < count; i++) {
		if (!slist_empty(&cq->wcq)) {
			wce = container_of(cq->wcq.head, struct fi_ibv_wce, entry);
			if (wce->wc.status) {
				ret = -FI_EAVAIL;
				break;
			}
			entry = slist_remove_head(&cq->wcq);
			wce = container_of(entry, struct fi_ibv_wce, entry);
			cq->read_entry(&wce->wc, i, buf);
			util_buf_release(cq->domain->fab->wce_pool, wce);
			continue;
		}

		ret = fi_ibv_poll_cq(cq, &wc);
		if (ret <= 0)
			break;

		/* Insert error entry into wcq */
		if (wc.status) {
			wce = util_buf_alloc(cq->domain->fab->wce_pool);
			if (!wce) {
				fastlock_release(&cq->lock);
				return -FI_ENOMEM;
			}
			memset(wce, 0, sizeof(*wce));
			memcpy(&wce->wc, &wc, sizeof wc);
			slist_insert_tail(&wce->entry, &cq->wcq);
			ret = -FI_EAVAIL;
			break;
		}

		cq->read_entry(&wc, i, buf);
	}