示例#1
0
struct sock_mr *sock_mr_verify_key(struct sock_domain *domain, uint64_t key,
				   void *buf, size_t len, uint64_t access)
{
	int i;
	struct sock_mr *mr;

	fastlock_acquire(&domain->lock);
	mr = sock_mr_get_entry(domain, key);
	if (!mr) {
		fastlock_release(&domain->lock);
		return NULL;
	}

	if (domain->attr.mr_mode == FI_MR_SCALABLE)
		buf = (char *)buf + mr->offset;

	for (i = 0; i < mr->iov_count; i++) {
		if ((uintptr_t)buf >= (uintptr_t)mr->mr_iov[i].iov_base &&
		    ((uintptr_t)buf + len <= (uintptr_t) mr->mr_iov[i].iov_base +
		     mr->mr_iov[i].iov_len)) {
			if ((access & mr->access) == access)
				goto out;
		}
	}
	SOCK_LOG_ERROR("MR check failed\n");
	mr = NULL;
out:
	fastlock_release(&domain->lock);
	return mr;
}
示例#2
0
static ssize_t util_cq_read(struct fid_cq *cq_fid, void *buf, size_t count)
{
	struct util_cq *cq;
	struct fi_cq_tagged_entry *entry;
	ssize_t i;

	cq = container_of(cq_fid, struct util_cq, cq_fid);
	fastlock_acquire(&cq->cq_lock);
	if (cirque_isempty(cq->cirq)) {
		fastlock_release(&cq->cq_lock);
		cq->progress(cq);
		fastlock_acquire(&cq->cq_lock);
		if (cirque_isempty(cq->cirq)) {
			i = -FI_EAGAIN;
			goto out;
		}
	}

	if (count > cirque_usedcnt(cq->cirq))
		count = cirque_usedcnt(cq->cirq);

	for (i = 0; i < count; i++) {
		entry = cirque_head(cq->cirq);
		if (entry->flags & UTIL_FLAG_ERROR) {
			if (!i)
				i = -FI_EAVAIL;
			break;
		}
		cq->read_entry(&buf, entry);
		cirque_discard(cq->cirq);
	}
out:
	fastlock_release(&cq->cq_lock);
	return i;
}
示例#3
0
int ofi_monitor_subscribe(struct ofi_notification_queue *nq,
			  void *addr, size_t len,
			  struct ofi_subscription *subscription)
{
	int ret;

	FI_DBG(&core_prov, FI_LOG_MR,
	       "subscribing addr=%p len=%zu subscription=%p nq=%p\n",
	       addr, len, subscription, nq);

	/* Ensure the subscription is initialized before we can get events */
	dlist_init(&subscription->entry);

	subscription->nq = nq;
	subscription->addr = addr;
	subscription->len = len;
	fastlock_acquire(&nq->lock);
	nq->refcnt++;
	fastlock_release(&nq->lock);

	ret = nq->monitor->subscribe(nq->monitor, addr, len, subscription);
	if (OFI_UNLIKELY(ret)) {
		FI_WARN(&core_prov, FI_LOG_MR,
			"Failed (ret = %d) to monitor addr=%p len=%zu",
			ret, addr, len);
		fastlock_acquire(&nq->lock);
		nq->refcnt--;
		fastlock_release(&nq->lock);
	}
	return ret;
}
示例#4
0
static int sock_regattr(struct fid_domain *domain, const struct fi_mr_attr *attr,
		uint64_t flags, struct fid_mr **mr)
{
	struct fi_eq_entry eq_entry;
	struct sock_domain *dom;
	struct sock_mr *_mr;
	uint64_t key;

	dom = container_of(domain, struct sock_domain, dom_fid);
	if (!(dom->info.mode & FI_PROV_MR_ATTR) && 
	    ((attr->requested_key > IDX_MAX_INDEX) ||
	    idm_lookup(&dom->mr_idm, (int) attr->requested_key)))
		return -FI_ENOKEY;

	_mr = calloc(1, sizeof(*_mr) + sizeof(_mr->mr_iov) * (attr->iov_count - 1));
	if (!_mr)
		return -FI_ENOMEM;

	_mr->mr_fid.fid.fclass = FI_CLASS_MR;
	_mr->mr_fid.fid.context = attr->context;
	_mr->mr_fid.fid.ops = &sock_mr_fi_ops;

	_mr->domain = dom;
	_mr->access = attr->access;
	_mr->flags = flags;
	_mr->offset = (flags & FI_MR_OFFSET) ?
		(uintptr_t) attr->mr_iov[0].iov_base + attr->offset : 
		(uintptr_t) attr->mr_iov[0].iov_base;

	fastlock_acquire(&dom->lock);
	key = (dom->info.mode & FI_PROV_MR_ATTR) ?
	      sock_get_mr_key(dom) : (uint16_t) attr->requested_key;
	if (idm_set(&dom->mr_idm, key, _mr) < 0)
		goto err;
	_mr->mr_fid.key = key;
	_mr->mr_fid.mem_desc = (void *)key;
	fastlock_release(&dom->lock);

	_mr->iov_count = attr->iov_count;
	memcpy(&_mr->mr_iov, attr->mr_iov, sizeof(_mr->mr_iov) * attr->iov_count);

	*mr = &_mr->mr_fid;
	atomic_inc(&dom->ref);

	if (dom->mr_eq) {
		eq_entry.fid = &domain->fid;
		eq_entry.context = attr->context;
		return sock_eq_report_event(dom->mr_eq, FI_MR_COMPLETE,
					    &eq_entry, sizeof(eq_entry), 0);
	}

	return 0;

err:
	fastlock_release(&dom->lock);
	free(_mr);
	return -errno;
}
示例#5
0
static int gnix_domain_close(fid_t fid)
{
	int ret = FI_SUCCESS, references_held;
	struct gnix_fid_domain *domain;

	GNIX_TRACE(FI_LOG_DOMAIN, "\n");

	domain = container_of(fid, struct gnix_fid_domain, domain_fid.fid);
	if (domain->domain_fid.fid.fclass != FI_CLASS_DOMAIN) {
		ret = -FI_EINVAL;
		goto err;
	}

	/* before checking the refcnt, flush the memory registration cache */
	if (domain->mr_cache_ro) {
		fastlock_acquire(&domain->mr_cache_lock);
		ret = _gnix_mr_cache_flush(domain->mr_cache_ro);
		if (ret != FI_SUCCESS) {
			GNIX_WARN(FI_LOG_DOMAIN,
				  "failed to flush memory cache on domain close\n");
			fastlock_release(&domain->mr_cache_lock);
			goto err;
		}
		fastlock_release(&domain->mr_cache_lock);
	}

	if (domain->mr_cache_rw) {
		fastlock_acquire(&domain->mr_cache_lock);
		ret = _gnix_mr_cache_flush(domain->mr_cache_rw);
		if (ret != FI_SUCCESS) {
			GNIX_WARN(FI_LOG_DOMAIN,
				  "failed to flush memory cache on domain close\n");
			fastlock_release(&domain->mr_cache_lock);
			goto err;
		}
		fastlock_release(&domain->mr_cache_lock);
	}

	/*
	 * if non-zero refcnt, there are eps, mrs, and/or an eq associated
	 * with this domain which have not been closed.
	 */

	references_held = _gnix_ref_put(domain);

	if (references_held) {
		GNIX_INFO(FI_LOG_DOMAIN, "failed to fully close domain due to "
			  "lingering references. references=%i dom=%p\n",
			  references_held, domain);
	}

	GNIX_INFO(FI_LOG_DOMAIN, "gnix_domain_close invoked returning %d\n",
		  ret);
err:
	return ret;
}
示例#6
0
static int sock_poll_poll(struct fid_poll *pollset, void **context, int count)
{
	struct sock_poll *poll;
	struct sock_cq *cq;
	struct sock_eq *eq;
	struct sock_cntr *cntr;
	struct sock_fid_list *list_item;
	struct dlist_entry *p, *head;
	int ret_count = 0;

	poll = container_of(pollset, struct sock_poll, poll_fid.fid);
	head = &poll->fid_list;

	for (p = head->next; p != head && ret_count < count; p = p->next) {
		list_item = container_of(p, struct sock_fid_list, entry);
		switch (list_item->fid->fclass) {
		case FI_CLASS_CQ:
			cq = container_of(list_item->fid, struct sock_cq, cq_fid);
			sock_cq_progress(cq);
			fastlock_acquire(&cq->lock);
			if (rbfdused(&cq->cq_rbfd)) {
				*context++ = cq->cq_fid.fid.context;
				ret_count++;
			}
			fastlock_release(&cq->lock);
			break;

		case FI_CLASS_CNTR:
			cntr = container_of(list_item->fid, struct sock_cntr, cntr_fid);
			sock_cntr_progress(cntr);
			fastlock_acquire(&cntr->mut);
			if (atomic_get(&cntr->value) >= atomic_get(&cntr->threshold)) {
				*context++ = cntr->cntr_fid.fid.context;
				ret_count++;
			}
			fastlock_release(&cntr->mut);
			break;

		case FI_CLASS_EQ:
			eq = container_of(list_item->fid, struct sock_eq, eq);
			fastlock_acquire(&eq->lock);
			if (!dlistfd_empty(&eq->list)) {
				*context++ = eq->eq.fid.context;
				ret_count++;
			}
			fastlock_release(&eq->lock);
			break;

		default:
			break;
		}
	}

	return ret_count;
}
示例#7
0
static ssize_t sock_cq_sreadfrom(struct fid_cq *cq, void *buf, size_t count,
			fi_addr_t *src_addr, const void *cond, int timeout)
{
	int ret = 0;
	size_t threshold;
	struct sock_cq *sock_cq;
	uint64_t start_ms = 0, end_ms = 0;
	ssize_t cq_entry_len, avail;

	sock_cq = container_of(cq, struct sock_cq, cq_fid);
	if (rbused(&sock_cq->cqerr_rb))
		return -FI_EAVAIL;

	cq_entry_len = sock_cq->cq_entry_size;
	if (sock_cq->attr.wait_cond == FI_CQ_COND_THRESHOLD)
		threshold = MIN((uintptr_t) cond, count);
	else
		threshold = count;

	if (sock_cq->domain->progress_mode == FI_PROGRESS_MANUAL) {
		if (timeout >= 0) {
			start_ms = fi_gettime_ms();
			end_ms = start_ms + timeout;
		}

		do {
			sock_cq_progress(sock_cq);
			fastlock_acquire(&sock_cq->lock);
			avail = rbfdused(&sock_cq->cq_rbfd);
			if (avail)
				ret = sock_cq_rbuf_read(sock_cq, buf,
					MIN(threshold, avail / cq_entry_len),
					src_addr, cq_entry_len);
			fastlock_release(&sock_cq->lock);
			if (ret == 0 && timeout >= 0) {
				if (fi_gettime_ms() >= end_ms)
					return -FI_EAGAIN;
			}
		} while (ret == 0);
	} else {
		ret = rbfdwait(&sock_cq->cq_rbfd, timeout);
		if (ret > 0) {
			fastlock_acquire(&sock_cq->lock);
			ret = 0;
			avail = rbfdused(&sock_cq->cq_rbfd);
			if (avail)
				ret = sock_cq_rbuf_read(sock_cq, buf,
					MIN(threshold, avail / cq_entry_len),
					src_addr, cq_entry_len);
			fastlock_release(&sock_cq->lock);
		}
	}
	return (ret == 0 || ret == -FI_ETIMEDOUT) ? -FI_EAGAIN : ret;
}
示例#8
0
static struct psmx_cq_event *psmx_cq_dequeue_event(struct psmx_fid_cq *cq)
{
	struct slist_entry *entry;

	fastlock_acquire(&cq->lock);
	if (slist_empty(&cq->event_queue)) {
		fastlock_release(&cq->lock);
		return NULL;
	}
	entry = slist_remove_head(&cq->event_queue);
	cq->event_count--;
	fastlock_release(&cq->lock);

	return container_of(entry, struct psmx_cq_event, list_entry);
}
示例#9
0
DIRECT_FN STATIC int gnix_reject(struct fid_pep *pep, fid_t handle,
				 const void *param, size_t paramlen)
{
	struct gnix_fid_pep *pep_priv;
	struct gnix_pep_sock_conn *conn;
	struct gnix_pep_sock_connresp resp;
	struct fi_eq_cm_entry *eqe_ptr;
	int ret;

	if (!pep)
		return -FI_EINVAL;

	pep_priv = container_of(pep, struct gnix_fid_pep, pep_fid.fid);

	fastlock_acquire(&pep_priv->lock);

	conn = (struct gnix_pep_sock_conn *)handle;
	if (!conn || conn->fid.fclass != FI_CLASS_CONNREQ) {
		fastlock_release(&pep_priv->lock);
		return -FI_EINVAL;
	}

	resp.cmd = GNIX_PEP_SOCK_RESP_REJECT;

	resp.cm_data_len = paramlen;
	if (paramlen) {
		eqe_ptr = (struct fi_eq_cm_entry *)resp.eqe_buf;
		memcpy(eqe_ptr->data, param, paramlen);
	}

	ret = write(conn->sock_fd, &resp, sizeof(resp));
	if (ret != sizeof(resp)) {
		fastlock_release(&pep_priv->lock);
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Failed to send resp, errno: %d\n",
			  errno);
		return -FI_EIO;
	}

	close(conn->sock_fd);
	free(conn);

	fastlock_release(&pep_priv->lock);

	GNIX_DEBUG(FI_LOG_EP_CTRL, "Sent conn reject: %p\n", pep_priv);

	return FI_SUCCESS;
}
示例#10
0
int ofi_wait_fd_del(struct util_wait *wait, int fd)
{
	int ret = 0;
	struct ofi_wait_fd_entry *fd_entry;
	struct dlist_entry *entry;
	struct util_wait_fd *wait_fd = container_of(wait, struct util_wait_fd,
						    util_wait);

	fastlock_acquire(&wait_fd->lock);
	entry = dlist_find_first_match(&wait_fd->fd_list, ofi_wait_fd_match, &fd);
	if (!entry) {
		FI_INFO(wait->prov, FI_LOG_FABRIC,
			"Given fd (%d) not found in wait list - %p\n",
			fd, wait_fd);
		ret = -FI_EINVAL;
		goto out;
	}
	fd_entry = container_of(entry, struct ofi_wait_fd_entry, entry);
	if (ofi_atomic_dec32(&fd_entry->ref))
		goto out;
	dlist_remove(&fd_entry->entry);
	fi_epoll_del(wait_fd->epoll_fd, fd_entry->fd);
	free(fd_entry);
out:
	fastlock_release(&wait_fd->lock);
	return ret;
}
示例#11
0
void psmx_cntr_check_trigger(struct psmx_fid_cntr *cntr)
{
	struct psmx_fid_domain *domain = cntr->domain;
	struct psmx_trigger *trigger;

	if (!cntr->trigger)
		return;

	pthread_mutex_lock(&cntr->trigger_lock);

	trigger = cntr->trigger;
	while (trigger) {
		if (ofi_atomic_get64(&cntr->counter) < trigger->threshold)
			break;

		cntr->trigger = trigger->next;

		if (domain->am_initialized) {
			fastlock_acquire(&domain->trigger_queue.lock);
			slist_insert_tail(&trigger->list_entry, &domain->trigger_queue.list);
			fastlock_release(&domain->trigger_queue.lock);
		} else {
			psmx_process_trigger(domain, trigger);
		}

		trigger = cntr->trigger;
	}

	pthread_mutex_unlock(&cntr->trigger_lock);
}
示例#12
0
int ofi_domain_init(struct fid_fabric *fabric_fid, const struct fi_info *info,
		   struct util_domain *domain, void *context)
{
	struct util_fabric *fabric;
	int ret;

	fabric = container_of(fabric_fid, struct util_fabric, fabric_fid);
	domain->fabric = fabric;
	domain->prov = fabric->prov;
	ret = util_domain_init(domain, info);
	if (ret) {
		free(domain);
		return ret;
	}

	domain->domain_fid.fid.fclass = FI_CLASS_DOMAIN;
	domain->domain_fid.fid.context = context;
	/*
	 * domain ops set by provider
	 */
	domain->domain_fid.mr = &util_domain_mr_ops;

	fastlock_acquire(&fabric->lock);
	dlist_insert_tail(&domain->list_entry, &fabric->domain_list);
	fastlock_release(&fabric->lock);

	ofi_atomic_inc32(&fabric->ref);
	return 0;
}
int
_gnix_notifier_get_event(struct gnix_mr_notifier *mrn, void* buf, size_t len)
{
	int ret, ret_errno;

	if ((mrn == NULL) || (buf == NULL) || (len <= 0)) {
		GNIX_WARN(FI_LOG_MR,
			  "Invalid argument to _gnix_notifier_get_event\n");
		return -FI_EINVAL;
	}

	fastlock_acquire(&mrn->lock);

	if (*(mrn->cntr) > 0) {
		GNIX_DEBUG(FI_LOG_MR, "reading kdreg event\n");
		ret = read(mrn->fd, buf, len);
		if (ret < 0) {
			ret_errno = errno;
			if (ret_errno != EAGAIN) {
				GNIX_WARN(FI_LOG_MR,
					  "kdreg event read failed: %s\n",
					  strerror(ret_errno));
			}
			/* Not all of these map to fi_errno values */
			ret = -ret_errno;
		}
	} else {
		GNIX_DEBUG(FI_LOG_MR, "nothing to read from kdreg :(\n");
		ret = -FI_EAGAIN;
	}

	fastlock_release(&mrn->lock);

	return ret;
}
int
_gnix_notifier_unmonitor(struct gnix_mr_notifier *mrn, uint64_t cookie)
{
	int ret;
	struct registration_monitor rm;

	fastlock_acquire(&mrn->lock);

	ret = notifier_verify_stuff(mrn);
	if (ret != FI_SUCCESS) {
		GNIX_WARN(FI_LOG_MR, "Invalid MR notifier\n");
		goto err_exit;
	}

	GNIX_DEBUG(FI_LOG_MR, "unmonitoring cookie=%lu\n", cookie);

	memset(&rm, 0, sizeof(rm));

	rm.type = REGISTRATION_UNMONITOR;
	rm.u.unmon.user_cookie = cookie;

	ret = kdreg_write(mrn, &rm, sizeof(rm));

err_exit:
	fastlock_release(&mrn->lock);

	return ret;
}
示例#15
0
ssize_t udpx_recv(struct fid_ep *ep_fid, void *buf, size_t len, void *desc,
		fi_addr_t src_addr, void *context)
{
	struct udpx_ep *ep;
	struct udpx_ep_entry *entry;
	ssize_t ret;

	ep = container_of(ep_fid, struct udpx_ep, util_ep.ep_fid.fid);
	fastlock_acquire(&ep->util_ep.rx_cq->cq_lock);
	if (ofi_cirque_isfull(ep->rxq)) {
		ret = -FI_EAGAIN;
		goto out;
	}

	entry = ofi_cirque_tail(ep->rxq);
	entry->context = context;
	entry->iov_count = 1;
	entry->iov[0].iov_base = buf;
	entry->iov[0].iov_len = len;
	entry->flags = 0;

	ofi_cirque_commit(ep->rxq);
	ret = 0;
out:
	fastlock_release(&ep->util_ep.rx_cq->cq_lock);
	return ret;
}
示例#16
0
void sock_cq_remove_rx_ctx(struct sock_cq *cq, struct sock_rx_ctx *rx_ctx)
{
	fastlock_acquire(&cq->list_lock);
	dlist_remove(&rx_ctx->cq_entry);
	ofi_atomic_dec32(&cq->ref);
	fastlock_release(&cq->list_lock);
}
示例#17
0
static ssize_t sock_tx_size_left(struct fid_ep *ep)
{
	struct sock_ep *sock_ep;
	struct sock_tx_ctx *tx_ctx;
	ssize_t num_left = 0;

	switch (ep->fid.fclass) {
	case FI_CLASS_EP:
		sock_ep = container_of(ep, struct sock_ep, ep);
		tx_ctx = sock_ep->attr->tx_ctx;
		break;

	case FI_CLASS_TX_CTX:
		tx_ctx = container_of(ep, struct sock_tx_ctx, fid.ctx);
		break;

	default:
		SOCK_LOG_ERROR("Invalid EP type\n");
		return -FI_EINVAL;
	}

	fastlock_acquire(&tx_ctx->wlock);
	num_left = rbavail(&tx_ctx->rb)/SOCK_EP_TX_ENTRY_SZ;
	fastlock_release(&tx_ctx->wlock);
	return num_left;
}
示例#18
0
int sock_cq_report_error(struct sock_cq *cq, struct sock_pe_entry *entry,
			 size_t olen, int err, int prov_errno, void *err_data)
{
	int ret;
	struct fi_cq_err_entry err_entry;

	fastlock_acquire(&cq->lock);
	if (rbavail(&cq->cqerr_rb) < sizeof(err_entry)) {
		ret = -FI_ENOSPC;
		goto out;
	}

	err_entry.err = err;
	err_entry.olen = olen;
	err_entry.err_data = err_data;
	err_entry.len = entry->data_len;
	err_entry.prov_errno = prov_errno;
	err_entry.flags = entry->flags;
	err_entry.data = entry->data;
	err_entry.tag = entry->tag;
	err_entry.op_context = (void *) (uintptr_t) entry->context;

	if (entry->type == SOCK_PE_RX)
		err_entry.buf = (void *) (uintptr_t) entry->pe.rx.rx_iov[0].iov.addr;
	else
		err_entry.buf = (void *) (uintptr_t) entry->pe.tx.tx_iov[0].src.iov.addr;

	rbwrite(&cq->cqerr_rb, &err_entry, sizeof(err_entry));
	rbcommit(&cq->cqerr_rb);
	ret = 0;

out:
	fastlock_release(&cq->lock);
	return ret;
}
示例#19
0
static int rxd_ep_enable(struct rxd_ep *ep)
{
	size_t i;
	ssize_t ret;

	ret = fi_ep_bind(ep->dg_ep, &ep->dg_cq->fid, FI_TRANSMIT | FI_RECV);
	if (ret)
		return ret;

	ret = fi_enable(ep->dg_ep);
	if (ret)
		return ret;

	ep->tx_flags = rxd_tx_flags(ep->util_ep.tx_op_flags);
	ep->rx_flags = rxd_rx_flags(ep->util_ep.rx_op_flags);

	fastlock_acquire(&ep->util_ep.lock);
	for (i = 0; i < ep->rx_size; i++) {
		ret = rxd_ep_post_buf(ep);
		if (ret)
			break;
	}

	fastlock_release(&ep->util_ep.lock);
	return 0;
}
示例#20
0
static ssize_t rxd_ep_cancel_recv(struct rxd_ep *ep, struct dlist_entry *list,
				  void *context)
{
	struct dlist_entry *entry;
	struct rxd_x_entry *rx_entry;
	struct fi_cq_err_entry err_entry;
	int ret = 0;

	fastlock_acquire(&ep->util_ep.lock);

	entry = dlist_find_first_match(list, &rxd_match_ctx, context);
	if (!entry)
		goto out;

	rx_entry = container_of(entry, struct rxd_x_entry, entry);
	memset(&err_entry, 0, sizeof(struct fi_cq_err_entry));
	err_entry.op_context = rx_entry->cq_entry.op_context;
	err_entry.flags = rx_entry->cq_entry.flags;
	err_entry.err = FI_ECANCELED;
	err_entry.prov_errno = 0;
	ret = ofi_cq_write_error(&rxd_ep_rx_cq(ep)->util_cq, &err_entry);
	if (ret) {
		FI_WARN(&rxd_prov, FI_LOG_EP_CTRL, "could not write error entry\n");
		goto out;
	}

	rx_entry->flags |= RXD_CANCELLED;

	ret = 1;
out:
	fastlock_release(&ep->util_ep.lock);
	return ret;
}
示例#21
0
ssize_t udpx_sendmsg(struct fid_ep *ep_fid, const struct fi_msg *msg,
		uint64_t flags)
{
	struct udpx_ep *ep;
	struct msghdr hdr;
	ssize_t ret;

	ep = container_of(ep_fid, struct udpx_ep, util_ep.ep_fid.fid);
	hdr.msg_name = ip_av_get_addr(ep->util_ep.av, msg->addr);
	hdr.msg_namelen = ep->util_ep.av->addrlen;
	hdr.msg_iov = (struct iovec *) msg->msg_iov;
	hdr.msg_iovlen = msg->iov_count;
	hdr.msg_control = NULL;
	hdr.msg_controllen = 0;
	hdr.msg_flags = 0;

	fastlock_acquire(&ep->util_ep.tx_cq->cq_lock);
	if (ofi_cirque_isfull(ep->util_ep.tx_cq->cirq)) {
		ret = -FI_EAGAIN;
		goto out;
	}

	ret = sendmsg(ep->sock, &hdr, 0);
	if (ret >= 0) {
		ep->tx_comp(ep, msg->context);
		ret = 0;
	} else {
		ret = -errno;
	}
out:
	fastlock_release(&ep->util_ep.tx_cq->cq_lock);
	return ret;
}
示例#22
0
ssize_t udpx_send(struct fid_ep *ep_fid, const void *buf, size_t len, void *desc,
		fi_addr_t dest_addr, void *context)
{
	struct udpx_ep *ep;
	ssize_t ret;

	ep = container_of(ep_fid, struct udpx_ep, util_ep.ep_fid.fid);
	fastlock_acquire(&ep->util_ep.tx_cq->cq_lock);
	if (ofi_cirque_isfull(ep->util_ep.tx_cq->cirq)) {
		ret = -FI_EAGAIN;
		goto out;
	}

	ret = sendto(ep->sock, buf, len, 0,
		     ip_av_get_addr(ep->util_ep.av, dest_addr),
		     ep->util_ep.av->addrlen);
	if (ret == len) {
		ep->tx_comp(ep, context);
		ret = 0;
	} else {
		ret = -errno;
	}
out:
	fastlock_release(&ep->util_ep.tx_cq->cq_lock);
	return ret;
}
示例#23
0
/* Process incoming connection requests on a listening PEP. */
int _gnix_pep_progress(struct gnix_fid_pep *pep)
{
	int accept_fd, ret;

	fastlock_acquire(&pep->lock);

	accept_fd = accept(pep->listen_fd, NULL, NULL);
	if (accept_fd >= 0) {
		/* New Connection. */
		ret = __gnix_pep_connreq(pep, accept_fd);
		if (ret != FI_SUCCESS) {
			GNIX_WARN(FI_LOG_EP_CTRL,
				  "__gnix_pep_connreq failed, err: %d\n",
				  ret);
		}
	} else if (errno != EAGAIN) {
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "(accept) Unexpected errno on listen socket: %d\n",
			  errno);
	}

	fastlock_release(&pep->lock);

	return FI_SUCCESS;
}
示例#24
0
static int util_wait_fd_close(struct fid *fid)
{
	struct util_wait_fd *wait;
	struct ofi_wait_fd_entry *fd_entry;
	int ret;

	wait = container_of(fid, struct util_wait_fd, util_wait.wait_fid.fid);
	ret = fi_wait_cleanup(&wait->util_wait);
	if (ret)
		return ret;

	fastlock_acquire(&wait->lock);
	while (!dlist_empty(&wait->fd_list)) {
		dlist_pop_front(&wait->fd_list, struct ofi_wait_fd_entry,
				fd_entry, entry);
		fi_epoll_del(wait->epoll_fd, fd_entry->fd);
		free(fd_entry);
	}
	fastlock_release(&wait->lock);

	fi_epoll_del(wait->epoll_fd, wait->signal.fd[FI_READ_FD]);
	fd_signal_free(&wait->signal);
	fi_epoll_close(wait->epoll_fd);
	fastlock_destroy(&wait->lock);
	free(wait);
	return 0;
}
示例#25
0
static size_t fi_ibv_eq_read_event(struct fi_ibv_eq *eq, uint32_t *event,
		void *buf, size_t len, uint64_t flags)
{
	struct fi_ibv_eq_entry *entry;
	ssize_t ret = 0;

	fastlock_acquire(&eq->lock);

	if (dlistfd_empty(&eq->list_head))
		goto out;

	entry = container_of(eq->list_head.list.next, struct fi_ibv_eq_entry, item);
	if (entry->len > len) {
		ret = -FI_ETOOSMALL;
		goto out;
	}

	ret = entry->len;
	*event = entry->event;
	memcpy(buf, entry->eq_entry, entry->len);

	if (!(flags & FI_PEEK)) {
		dlistfd_remove(eq->list_head.list.next, &eq->list_head);
		free(entry);
	}

out:
	fastlock_release(&eq->lock);
	return ret;
}
示例#26
0
void fi_ibv_ep_ini_conn_done(struct fi_ibv_xrc_ep *ep, uint32_t peer_srqn,
			     uint32_t tgt_qpn)
{
	struct fi_ibv_domain *domain = fi_ibv_ep_to_domain(&ep->base_ep);

	assert(ep->base_ep.id && ep->ini_conn);

	fastlock_acquire(&domain->xrc.ini_mgmt_lock);

	assert(ep->ini_conn->state == FI_IBV_INI_QP_CONNECTING ||
	       ep->ini_conn->state == FI_IBV_INI_QP_CONNECTED);

	/* If this was a physical INI/TGT QP connection, remove the QP
	 * from control of the RDMA CM. We don't want the shared INI QP
	 * to be destroyed if this endpoint closes. */
	if (ep->base_ep.id->qp) {
		ep->ini_conn->state = FI_IBV_INI_QP_CONNECTED;
		ep->ini_conn->tgt_qpn = tgt_qpn;
		ep->base_ep.id->qp = NULL;
		VERBS_DBG(FI_LOG_EP_CTRL,
			  "Set INI Conn QP %d remote TGT QP %d\n",
			  ep->ini_conn->ini_qp->qp_num,
			  ep->ini_conn->tgt_qpn);
	}

	fi_ibv_log_ep_conn(ep, "INI Connection Done");
	fi_ibv_sched_ini_conn(ep->ini_conn);
	fastlock_release(&domain->xrc.ini_mgmt_lock);
}
int _gnix_buddy_allocator_destroy(gnix_buddy_alloc_handle_t *alloc_handle)
{
	GNIX_TRACE(FI_LOG_EP_CTRL, "\n");

	if (unlikely(!alloc_handle)) {
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Invalid parameter to _gnix_buddy_allocator_destroy."
			  "\n");
		return -FI_EINVAL;
	}

	fastlock_acquire(&alloc_handle->lock);

	free(alloc_handle->lists);

	while (_gnix_free_bitmap(&alloc_handle->bitmap)) {
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Trying to free buddy allocator handle bitmap.\n");
		sleep(1);
	}

	fastlock_release(&alloc_handle->lock);
	fastlock_destroy(&alloc_handle->lock);

	free(alloc_handle);

	return FI_SUCCESS;
}
int
_gnix_notifier_close(struct gnix_mr_notifier *mrn)
{
	int ret = FI_SUCCESS;
	int ret_errno;

	fastlock_acquire(&mrn->lock);

	ret = notifier_verify_stuff(mrn);
	if (ret != FI_SUCCESS) {
		GNIX_WARN(FI_LOG_MR, "Invalid MR notifier\n");
		goto err_exit;
	}

	assert(mrn->ref_cnt > 0);
	if (--mrn->ref_cnt) {
		goto err_exit;
	}

	if (close(mrn->fd) != 0) {
		ret_errno = errno;
		GNIX_INFO(FI_LOG_MR, "error closing kdreg device: %s\n",
			  strerror(ret_errno));
		/* Not all of these map to fi_errno values */
		ret = -ret_errno;
		goto err_exit;
	}

	mrn->fd = -1;
	mrn->cntr = NULL;
err_exit:
	fastlock_release(&mrn->lock);

	return ret;
}
示例#29
0
文件: sock_cq.c 项目: Slbomber/ompi
int sock_cq_progress(struct sock_cq *cq)
{
	struct sock_tx_ctx *tx_ctx;
	struct sock_rx_ctx *rx_ctx;
	struct dlist_entry *entry;

	if (cq->domain->progress_mode == FI_PROGRESS_AUTO)
		return 0;

	fastlock_acquire(&cq->list_lock);
	for (entry = cq->tx_list.next; entry != &cq->tx_list;
	     entry = entry->next) {
		tx_ctx = container_of(entry, struct sock_tx_ctx, cq_entry);
		sock_pe_progress_tx_ctx(cq->domain->pe, tx_ctx);
	}

	for (entry = cq->rx_list.next; entry != &cq->rx_list;
	     entry = entry->next) {
		rx_ctx = container_of(entry, struct sock_rx_ctx, cq_entry);
		sock_pe_progress_rx_ctx(cq->domain->pe, rx_ctx);
	}
	fastlock_release(&cq->list_lock);

	return 0;
}
int
_gnix_notifier_monitor(struct gnix_mr_notifier *mrn,
		    void *addr, uint64_t len, uint64_t cookie)
{
	int ret;
	struct registration_monitor rm;

	fastlock_acquire(&mrn->lock);

	ret = notifier_verify_stuff(mrn);
	if (ret != FI_SUCCESS) {
		GNIX_WARN(FI_LOG_MR, "Invalid MR notifier\n");
		goto err_exit;
	}

	if (ret == 0) {
		GNIX_DEBUG(FI_LOG_MR, "monitoring %p (len=%lu) cookie=%lu\n",
			   addr, len, cookie);

		memset(&rm, 0, sizeof(rm));
		rm.type = REGISTRATION_MONITOR;
		rm.u.mon.addr = (uint64_t) addr;
		rm.u.mon.len = len;
		rm.u.mon.user_cookie = cookie;

		ret = kdreg_write(mrn, &rm, sizeof(rm));
	}

err_exit:
	fastlock_release(&mrn->lock);

	return ret;
}