Esempio n. 1
0
static int fi_ibv_add_rai(struct dlist_entry *verbs_devs, struct rdma_cm_id *id,
		struct rdma_addrinfo *rai)
{
	struct verbs_dev_info *dev;
	struct verbs_addr *addr;
	const char *dev_name;

	if (!(addr = malloc(sizeof(*addr))))
		return -FI_ENOMEM;

	addr->rai = rai;

	dev_name = ibv_get_device_name(id->verbs->device);
	dlist_foreach_container(verbs_devs, struct verbs_dev_info, dev, entry)
		if (!strcmp(dev_name, dev->name))
			goto add_rai;

	if (!(dev = malloc(sizeof(*dev))))
		goto err1;

	if (!(dev->name = strdup(dev_name)))
		goto err2;

	dlist_init(&dev->addrs);
	dlist_insert_tail(&dev->entry, verbs_devs);
add_rai:
	dlist_insert_tail(&addr->entry, &dev->addrs);
	return 0;
err2:
	free(dev);
err1:
	free(addr);
	return -FI_ENOMEM;
}
Esempio n. 2
0
File: app_pd.c Progetto: ArkShen/xcb
static inline void load_config(void) {
	/* FIXME */
	if ((cfg = config_load("/etc/xcb/pd.conf"))) {
		char *cat = category_browse(cfg, NULL);

		while (cat) {
			if (!strcasecmp(cat, "pair")) {
				struct variable *var = variable_browse(cfg, cat);
				struct cpl *cpl = NULL;

				while (var) {
					if (!strcasecmp(var->name, "contract1")) {
						if (!strcasecmp(var->value, ""))
							break;
						if (cpl == NULL) {
							if (NEW(cpl) == NULL)
								break;
							cpl->contract2 = NULL;
							cpl->price1 = cpl->price2 = cpl->prevpd = -1.0;
							pthread_spin_init(&cpl->lock, 0);
						}
						cpl->contract1 = var->value;

					} else if (!strcasecmp(var->name, "contract2")) {
						if (!strcasecmp(var->value, ""))
							break;
						if (cpl == NULL) {
							if (NEW(cpl) == NULL)
								break;
							cpl->contract1 = NULL;
							cpl->price1 = cpl->price2 = cpl->prevpd = -1.0;
							pthread_spin_init(&cpl->lock, 0);
						}
						cpl->contract2 = var->value;
					} else
						xcb_log(XCB_LOG_WARNING, "Unknown variable '%s' in "
							"category '%s' of pd.conf", var->name, cat);
					var = var->next;
				}
				if (cpl && cpl->contract1 && cpl->contract2) {
					dlist_t dlist;

					if ((dlist = table_get_value(contracts, cpl->contract1)) == NULL) {
						dlist = dlist_new(NULL, NULL);
						table_insert(contracts, cpl->contract1, dlist);
					}
					dlist_insert_tail(dlist, cpl);
					if ((dlist = table_get_value(contracts, cpl->contract2)) == NULL) {
						dlist = dlist_new(NULL, NULL);
						table_insert(contracts, cpl->contract2, dlist);
					}
					dlist_insert_tail(dlist, cpl);
					dlist_insert_tail(pairs, cpl);
				} else if (cpl)
					FREE(cpl);
			}
			cat = category_browse(cfg, cat);
		}
	}
}
Esempio n. 3
0
/* FIXME */
void sall_command(client c) {
	dstr res = get_indices();
	dstr *fields = NULL;
	int nfield = 0, i;

	RTRIM(res);
	fields = dstr_split_len(res, dstr_length(res), ",", 1, &nfield);
	for (i = 1; i < nfield; ++i) {
		dstr pkey = dstr_new(fields[i]);
		dstr skey = dstr_new(pkey);
		dlist_t dlist;
		struct kvd *kvd;

		table_rwlock_wrlock(subscribers);
		if ((dlist = table_get_value(subscribers, pkey)) == NULL) {
			if (NEW(kvd)) {
				kvd->key = skey;
				kvd->u.dlist = dlist_new(NULL, NULL);
				dlist_insert_tail(kvd->u.dlist, c);
				dlist = dlist_new(cmpkvd, kdfree);
				dlist_insert_sort(dlist, kvd);
				table_insert(subscribers, pkey, dlist);
			} else {
				add_reply_error(c, "error allocating memory for kvd\r\n");
				dstr_free(skey);
				dstr_free(pkey);
			}
		} else {
			if (NEW(kvd)) {
				dlist_node_t node;

				kvd->key     = skey;
				kvd->u.dlist = dlist_new(NULL, NULL);
				if ((node = dlist_find(dlist, kvd)) == NULL) {
					dlist_insert_tail(kvd->u.dlist, c);
					dlist_insert_sort(dlist, kvd);
				} else {
					kdfree(kvd);
					kvd = (struct kvd *)dlist_node_value(node);
					if (dlist_find(kvd->u.dlist, c) == NULL)
						dlist_insert_tail(kvd->u.dlist, c);
				}
			} else {
				add_reply_error(c, "error allocating memory for kvd\r\n");
				dstr_free(skey);
			}
			dstr_free(pkey);
		}
		table_rwlock_unlock(subscribers);
	}
	dstr_free(res);
//--------------------------------------------------------------------------------------------------------------------
//	FIXME
//--------------------------------------------------------------------------------------------------------------------
}
Esempio n. 4
0
static struct sock_conn *sock_conn_map_insert(struct sock_ep *ep,
				struct sockaddr_in *addr, int conn_fd,
				int addr_published)
{
	int index;
	struct sock_conn_map *map = &ep->cmap;

	if (map->size == map->used) {
		if (sock_conn_map_increase(map, map->size * 2))
			return 0;
	}

	index = map->used;
	map->used++;

	map->table[index].addr = *addr;
	map->table[index].sock_fd = conn_fd;
	map->table[index].ep = ep;
	sock_set_sockopts(conn_fd);

	fastlock_acquire(&ep->lock);
	dlist_insert_tail(&map->table[index].ep_entry, &ep->conn_list);
	fastlock_release(&ep->lock);

	if (idm_set(&ep->conn_idm, conn_fd, &map->table[index]) < 0)
                SOCK_LOG_ERROR("idm_set failed\n");

	if (sock_epoll_add(&map->epoll_set, conn_fd))
                SOCK_LOG_ERROR("failed to add to epoll set: %d\n", conn_fd);

	map->table[index].address_published = addr_published;
	sock_pe_poll_add(ep->domain->pe, conn_fd);
	return &map->table[index];
}
Esempio n. 5
0
struct sock_rx_entry *sock_rx_new_buffered_entry(struct sock_rx_ctx *rx_ctx,
						 size_t len)
{
	struct sock_rx_entry *rx_entry;

	if (rx_ctx->buffered_len + len >= rx_ctx->attr.total_buffered_recv) {
		SOCK_LOG_ERROR("Reached max buffered recv limit\n");
		return NULL;
	}

	rx_entry = calloc(1, sizeof(*rx_entry) + len);
	if (!rx_entry)
		return NULL;

	SOCK_LOG_INFO("New buffered entry:%p len: %lu, ctx: %p\n", 
		       rx_entry, len, rx_ctx);

	rx_entry->is_busy = 1;
	rx_entry->is_buffered = 1;
	rx_entry->rx_op.dest_iov_len = 1;
	rx_entry->iov[0].iov.len = len;
	rx_entry->iov[0].iov.addr = (uintptr_t) (rx_entry + 1);
	rx_entry->total_len = len;
	
	rx_ctx->buffered_len += len;
	dlist_insert_tail(&rx_entry->entry, &rx_ctx->rx_buffered_list);
	rx_entry->is_busy = 1;
	rx_entry->is_tagged = 0;

	return rx_entry;
}
Esempio n. 6
0
static ssize_t rxd_ep_send_rts(struct rxd_ep *rxd_ep, fi_addr_t rxd_addr)
{
	struct rxd_pkt_entry *pkt_entry;
	struct rxd_rts_pkt *rts_pkt;
	ssize_t ret;
	size_t addrlen;

	pkt_entry = rxd_get_tx_pkt(rxd_ep);
	if (!pkt_entry)
		return -FI_ENOMEM;

	rts_pkt = (struct rxd_rts_pkt *) (pkt_entry->pkt);
	pkt_entry->pkt_size = sizeof(*rts_pkt) + rxd_ep->tx_prefix_size;
	pkt_entry->peer = rxd_addr;

	rts_pkt->base_hdr.version = RXD_PROTOCOL_VERSION;
	rts_pkt->base_hdr.type = RXD_RTS;
	rts_pkt->rts_addr = rxd_addr;

	addrlen = RXD_NAME_LENGTH;
	memset(rts_pkt->source, 0, RXD_NAME_LENGTH);
	ret = fi_getname(&rxd_ep->dg_ep->fid, (void *) rts_pkt->source,
			 &addrlen);
	if (ret) {
		ofi_buf_free(pkt_entry);
		return ret;
	}

	rxd_ep_send_pkt(rxd_ep, pkt_entry);
	rxd_insert_unacked(rxd_ep, rxd_addr, pkt_entry);
	dlist_insert_tail(&rxd_ep->peers[rxd_addr].entry, &rxd_ep->rts_sent_list);

	return 0;
}
Esempio n. 7
0
static int sock_ep_rx_ctx(struct fid_ep *ep, int index, struct fi_rx_attr *attr,
		    struct fid_ep **rx_ep, void *context)
{
	struct sock_ep *sock_ep;
	struct sock_rx_ctx *rx_ctx;

	sock_ep = container_of(ep, struct sock_ep, ep);
	if (index >= sock_ep->ep_attr.rx_ctx_cnt)
		return -FI_EINVAL;

	rx_ctx = sock_rx_ctx_alloc(attr, context);
	if (!rx_ctx)
		return -FI_ENOMEM;

	rx_ctx->attr.total_buffered_recv = rx_ctx->attr.total_buffered_recv ?
		rx_ctx->attr.total_buffered_recv : SOCK_EP_MAX_BUFF_RECV;
	
	rx_ctx->rx_id = index;
	rx_ctx->ep = sock_ep;
	rx_ctx->domain = sock_ep->domain;
	dlist_insert_tail(&sock_ep->rx_ctx_entry, &rx_ctx->ep_list);

	rx_ctx->ctx.fid.ops = &sock_ctx_ops;
	rx_ctx->ctx.ops = &sock_ctx_ep_ops;
	rx_ctx->ctx.msg = &sock_ep_msg_ops;
	rx_ctx->ctx.tagged = &sock_ep_tagged;

	rx_ctx->min_multi_recv = sock_ep->min_multi_recv;
	*rx_ep = &rx_ctx->ctx;
	sock_ep->rx_array[index] = rx_ctx;
	atomic_inc(&sock_ep->num_rx_ctx);
	atomic_inc(&sock_ep->domain->ref);
	return 0;
}
Esempio n. 8
0
void rxd_ep_send_ack(struct rxd_ep *rxd_ep, fi_addr_t peer)
{
	struct rxd_pkt_entry *pkt_entry;
	struct rxd_ack_pkt *ack;

	pkt_entry = rxd_get_tx_pkt(rxd_ep);
	if (!pkt_entry) {
		FI_WARN(&rxd_prov, FI_LOG_EP_CTRL, "Unable to send ack\n");
		return;
	}

	ack = (struct rxd_ack_pkt *) (pkt_entry->pkt);
	pkt_entry->pkt_size = sizeof(*ack) + rxd_ep->tx_prefix_size;
	pkt_entry->peer = peer;

	ack->base_hdr.version = RXD_PROTOCOL_VERSION;
	ack->base_hdr.type = RXD_ACK;
	ack->base_hdr.peer = rxd_ep->peers[peer].peer_addr;
	ack->base_hdr.seq_no = rxd_ep->peers[peer].rx_seq_no;
	ack->ext_hdr.rx_id = rxd_ep->peers[peer].rx_window;
	rxd_ep->peers[peer].last_tx_ack = ack->base_hdr.seq_no;

	dlist_insert_tail(&pkt_entry->d_entry, &rxd_ep->ctrl_pkts);
	if (rxd_ep_send_pkt(rxd_ep, pkt_entry)) {
		dlist_remove(&pkt_entry->d_entry);
		ofi_buf_free(pkt_entry);
	}
}
Esempio n. 9
0
static int sock_ep_tx_ctx(struct fid_ep *ep, int index, struct fi_tx_attr *attr,
			  struct fid_ep **tx_ep, void *context)
{
	struct sock_ep *sock_ep;
	struct sock_tx_ctx *tx_ctx;

	sock_ep = container_of(ep, struct sock_ep, ep);
	if (sock_ep->attr->fclass != FI_CLASS_SEP ||
		index >= sock_ep->attr->ep_attr.tx_ctx_cnt)
		return -FI_EINVAL;

	tx_ctx = sock_tx_ctx_alloc(&sock_ep->tx_attr, context, 0);
	if (!tx_ctx)
		return -FI_ENOMEM;

	tx_ctx->tx_id = index;
	tx_ctx->ep_attr = sock_ep->attr;
	tx_ctx->domain = sock_ep->attr->domain;
	tx_ctx->av = sock_ep->attr->av;
	dlist_insert_tail(&sock_ep->attr->tx_ctx_entry, &tx_ctx->ep_list);

	tx_ctx->fid.ctx.fid.ops = &sock_ctx_ops;
	tx_ctx->fid.ctx.ops = &sock_ctx_ep_ops;
	tx_ctx->fid.ctx.msg = &sock_ep_msg_ops;
	tx_ctx->fid.ctx.tagged = &sock_ep_tagged;
	tx_ctx->fid.ctx.rma = &sock_ep_rma;
	tx_ctx->fid.ctx.atomic = &sock_ep_atomic;

	*tx_ep = &tx_ctx->fid.ctx;
	sock_ep->attr->tx_array[index] = tx_ctx;
	atomic_inc(&sock_ep->attr->num_tx_ctx);
	atomic_inc(&sock_ep->attr->domain->ref);
	return 0;
}
Esempio n. 10
0
int ofi_domain_init(struct fid_fabric *fabric_fid, const struct fi_info *info,
		   struct util_domain *domain, void *context)
{
	struct util_fabric *fabric;
	int ret;

	fabric = container_of(fabric_fid, struct util_fabric, fabric_fid);
	domain->fabric = fabric;
	domain->prov = fabric->prov;
	ret = util_domain_init(domain, info);
	if (ret) {
		free(domain);
		return ret;
	}

	domain->domain_fid.fid.fclass = FI_CLASS_DOMAIN;
	domain->domain_fid.fid.context = context;
	/*
	 * domain ops set by provider
	 */
	domain->domain_fid.mr = &util_domain_mr_ops;

	fastlock_acquire(&fabric->lock);
	dlist_insert_tail(&domain->list_entry, &fabric->domain_list);
	fastlock_release(&fabric->lock);

	ofi_atomic_inc32(&fabric->ref);
	return 0;
}
Esempio n. 11
0
void rxd_insert_unacked(struct rxd_ep *ep, fi_addr_t peer,
			struct rxd_pkt_entry *pkt_entry)
{
	dlist_insert_tail(&pkt_entry->d_entry,
			  &ep->peers[peer].unacked);
	ep->peers[peer].unacked_cnt++;
}
Esempio n. 12
0
w_err_t wind_msgbox_post(w_msgbox_s *msgbox,w_msg_s *pmsg)
{
    w_thread_s *thread;
    WIND_ASSERT_RETURN(msgbox != W_NULL,W_ERR_PTR_NULL);
    WIND_ASSERT_RETURN(pmsg != W_NULL,W_ERR_PTR_NULL);
    WIND_ASSERT_RETURN(msgbox->obj.magic == WIND_MSGBOX_MAGIC,W_ERR_FAIL);
    WIND_ASSERT_RETURN(msgbox->owner,W_ERR_FAIL);
    //将消息加入邮箱
    wind_disable_switch();
    dlist_insert_tail(&msgbox->msglist,&pmsg->msgnode);
    msgbox->msgnum ++;

    //激活被阻塞的线程
    thread = msgbox->owner;
    if((thread->runstat != THREAD_STATUS_SLEEP) 
       || (thread->cause != CAUSE_MSG))
    {
        wind_enable_switch();
        return W_ERR_OK;
    }
    msgbox->owner->runstat = THREAD_STATUS_READY;
    wind_enable_switch();
    _wind_thread_dispatch();//切换线程
    return W_ERR_OK;
}
Esempio n. 13
0
static int sock_ep_rx_ctx(struct fid_ep *ep, int index, struct fi_rx_attr *attr,
		    struct fid_ep **rx_ep, void *context)
{
	struct sock_ep *sock_ep;
	struct sock_rx_ctx *rx_ctx;

	sock_ep = container_of(ep, struct sock_ep, ep);
	if (sock_ep->attr->fclass != FI_CLASS_SEP ||
		index >= sock_ep->attr->ep_attr.rx_ctx_cnt)
		return -FI_EINVAL;

	rx_ctx = sock_rx_ctx_alloc(attr ? attr : &sock_ep->rx_attr, context, 0);
	if (!rx_ctx)
		return -FI_ENOMEM;

	rx_ctx->rx_id = index;
	rx_ctx->ep_attr = sock_ep->attr;
	rx_ctx->domain = sock_ep->attr->domain;
	rx_ctx->av = sock_ep->attr->av;
	dlist_insert_tail(&sock_ep->attr->rx_ctx_entry, &rx_ctx->ep_list);

	rx_ctx->ctx.fid.ops = &sock_ctx_ops;
	rx_ctx->ctx.ops = &sock_ctx_ep_ops;
	rx_ctx->ctx.msg = &sock_ep_msg_ops;
	rx_ctx->ctx.tagged = &sock_ep_tagged;

	rx_ctx->min_multi_recv = sock_ep->attr->min_multi_recv;
	*rx_ep = &rx_ctx->ctx;
	sock_ep->attr->rx_array[index] = rx_ctx;
	atomic_inc(&sock_ep->attr->num_rx_ctx);
	atomic_inc(&sock_ep->attr->domain->ref);
	return 0;
}
Esempio n. 14
0
static client client_new(int fd, int sock) {
	client c;

	if (NEW(c) == NULL)
		return NULL;
	/* FIXME */
	net_nonblock(fd, NULL, 0);
	net_tcp_nodelay(fd, NULL, 0);
	if (create_file_event(el, fd, EVENT_READABLE, read_from_client, c) == -1) {
		close(fd);
		client_free(c);
		return NULL;
	}
	c->fd            = fd;
	pthread_spin_init(&c->lock, 0);
	c->sock          = sock;
	c->flags         = 0;
	c->inpos         = 0;
	c->argc          = 0;
	c->argv          = NULL;
	c->outpos        = 0;
	c->reply         = ring_new();
	c->sentlen       = 0;
	c->refcount      = 0;
	c->eagcount      = 0;
	c->authenticated = 0;
	dlist_lock(clients);
	dlist_insert_tail(clients, c);
	dlist_unlock(clients);
	return c;
}
static inline int __gnix_buddy_create_lists(gnix_buddy_alloc_handle_t
					    *alloc_handle)
{
	uint32_t i, offset = 0;

	alloc_handle->nlists = (uint32_t) __gnix_buddy_log2(alloc_handle->max /
							    MIN_BLOCK_SIZE) + 1;
	alloc_handle->lists = calloc(1, sizeof(struct dlist_entry) *
				     alloc_handle->nlists);

	if (unlikely(!alloc_handle->lists)) {
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Could not create buddy allocator lists.\n");
		return -FI_ENOMEM;
	}

	for (i = 0; i < alloc_handle->nlists; i++) {
		dlist_init(alloc_handle->lists + i);
	}

	/* Insert free blocks of size max in sorted order into last list */
	for (i = 0; i < alloc_handle->len / alloc_handle->max; i++) {
		dlist_insert_tail((void *) ((uint8_t *) alloc_handle->base +
					    offset),
				  alloc_handle->lists +
				  alloc_handle->nlists - 1);
		offset += alloc_handle->max;
	}

	return FI_SUCCESS;
}
Esempio n. 16
0
int rxm_ep_recv_common(struct fid_ep *ep_fid, const struct iovec *iov, void **desc,
		size_t count, fi_addr_t src_addr, uint64_t tag, uint64_t ignore,
		void *context, uint64_t flags, int op)
{
	struct rxm_recv_entry *recv_entry;
	struct rxm_ep *rxm_ep;
	struct rxm_recv_queue *recv_queue;
	dlist_func_t *match;
	int ret, i;

	rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid);

	// TODO pass recv_queue as arg
	if (op == ofi_op_msg) {
		recv_queue = &rxm_ep->recv_queue;
		match = ofi_match_unexp_msg;
	} else if (op == ofi_op_tagged) {
		recv_queue = &rxm_ep->trecv_queue;
		match = ofi_match_unexp_msg_tagged;
	} else {
		FI_WARN(&rxm_prov, FI_LOG_EP_DATA, "Unknown op!\n");
		return -FI_EINVAL;
	}

	if (freestack_isempty(recv_queue->recv_fs)) {
		FI_DBG(&rxm_prov, FI_LOG_CQ, "Exhaused recv_entry freestack\n");
		return -FI_EAGAIN;
	}

	recv_entry = freestack_pop(recv_queue->recv_fs);

	for (i = 0; i < count; i++) {
		recv_entry->iov[i].iov_base = iov[i].iov_base;
		recv_entry->iov[i].iov_len = iov[i].iov_len;
		recv_entry->desc[i] = desc[i];
		FI_DBG(&rxm_prov, FI_LOG_EP_CTRL, "post recv: %u\n",
			iov[i].iov_len);
	}
	recv_entry->count = count;
	recv_entry->addr = (rxm_ep->rxm_info->caps & FI_DIRECTED_RECV) ?
		src_addr : FI_ADDR_UNSPEC;
	recv_entry->flags = flags;
	if (op == ofi_op_tagged) {
		recv_entry->tag = tag;
		recv_entry->ignore = ignore;
	}

	if (!dlist_empty(&recv_queue->unexp_msg_list)) {
		ret = rxm_check_unexp_msg_list(rxm_ep->util_ep.rx_cq, recv_queue,
				recv_entry, match);
		if (ret) {
			FI_WARN(&rxm_prov, FI_LOG_EP_DATA,
					"Unable to check unexp msg list\n");
			return ret;
		}
	}

	dlist_insert_tail(&recv_entry->entry, &recv_queue->recv_list);
	return 0;
}
Esempio n. 17
0
w_err_t wind_msgbox_wait(w_msgbox_s *msgbox,w_msg_s **pmsg,w_uint32_t timeout)
{
    w_err_t err;
    w_uint32_t ticks;
    w_dnode_s *dnode;
    w_thread_s *thread;
    w_dlist_s *sleeplist = _wind_thread_sleep_list();
    WIND_ASSERT_RETURN(msgbox != W_NULL,W_ERR_PTR_NULL);
    WIND_ASSERT_RETURN(pmsg != W_NULL,W_ERR_PTR_NULL);
    WIND_ASSERT_RETURN(msgbox->obj.magic == WIND_MSGBOX_MAGIC,W_ERR_FAIL);
    WIND_ASSERT_RETURN(msgbox->owner,W_ERR_FAIL);
    thread = wind_thread_current();
    WIND_ASSERT_RETURN(msgbox->owner == thread,W_ERR_FAIL);
    //如果邮箱中有消息,就直接返回消息
    wind_disable_switch();
    if(msgbox->msgnum > 0)
    {
        msgbox->msgnum --;
        dnode = dlist_remove_head(&msgbox->msglist);
        *pmsg = NODE_TO_MSG(dnode);
        wind_enable_switch();
        return W_ERR_OK;
    }

    //否则将线程加入睡眠队列
    ticks = timeout *WIND_TICK_PER_SEC / 1000;
    if(ticks == 0)
        ticks = 1;
    thread = msgbox->owner;
    thread->runstat = THREAD_STATUS_SLEEP;
    thread->cause = CAUSE_MSG;
    dlist_insert_tail(sleeplist,&thread->sleepnode.dnode);
    wind_enable_switch();
    
    _wind_thread_dispatch();
    if(thread->cause == CAUSE_MSG)
    {
        if(msgbox->msgnum <= 0)
        {
            thread->runstat = THREAD_STATUS_READY;
            return W_ERR_PTR_NULL;
        }
        dnode = dlist_remove_head(&msgbox->msglist);
        *pmsg = NODE_TO_MSG(dnode);
        wind_enable_switch();
        err = W_ERR_OK;
    }
    else if(thread->cause == CAUSE_SLEEP)
    {
        err = W_ERR_TIMEOUT;
    }
    else
        err = W_ERR_FAIL;
    
    return err;
}
Esempio n. 18
0
ssize_t sock_queue_atomic_op(struct fid_ep *ep, const struct fi_msg_atomic *msg,
			     const struct fi_ioc *comparev, size_t compare_count,
			     struct fi_ioc *resultv, size_t result_count,
			     uint64_t flags, uint8_t op_type)
{
	struct sock_cntr *cntr;
	struct sock_trigger *trigger;
	struct fi_triggered_context *trigger_context;
	struct fi_trigger_threshold *threshold;

	trigger_context = (struct fi_triggered_context *) msg->context;
	if ((flags & FI_INJECT) || !trigger_context ||
	     (trigger_context->event_type != FI_TRIGGER_THRESHOLD))
		return -FI_EINVAL;

	threshold = &trigger_context->trigger.threshold;
	cntr = container_of(threshold->cntr, struct sock_cntr, cntr_fid);
	if (atomic_get(&cntr->value) >= threshold->threshold)
		return 1;

	trigger = calloc(1, sizeof(*trigger));
	if (!trigger)
		return -FI_ENOMEM;

	trigger->threshold = threshold->threshold;
	memcpy(&trigger->op.atomic.msg, msg, sizeof(*msg));
	trigger->op.atomic.msg.msg_iov = &trigger->op.atomic.msg_iov[0];
	trigger->op.atomic.msg.rma_iov = &trigger->op.atomic.rma_iov[0];

	memcpy(&trigger->op.atomic.msg_iov[0], &msg->msg_iov[0],
	       msg->iov_count * sizeof(struct fi_ioc));
	memcpy(&trigger->op.atomic.rma_iov[0], &msg->rma_iov[0],
	       msg->iov_count * sizeof(struct fi_rma_ioc));

	if (comparev) {
		memcpy(&trigger->op.atomic.comparev[0], &comparev[0],
		       compare_count * sizeof(struct fi_ioc));
	}

	if (resultv) {
		memcpy(&trigger->op.atomic.resultv[0], &resultv[0],
		       result_count * sizeof(struct fi_ioc));
	}

	trigger->op_type = op_type;
	trigger->ep = ep;
	trigger->flags = flags;

	fastlock_acquire(&cntr->trigger_lock);
	dlist_insert_tail(&trigger->entry, &cntr->trigger_list);
	fastlock_release(&cntr->trigger_lock);
	sock_cntr_check_trigger_list(cntr);
	return 0;
}
Esempio n. 19
0
ssize_t	rxd_ep_readmsg(struct fid_ep *ep, const struct fi_msg_rma *msg,
		       uint64_t flags)
{
	ssize_t ret;
	uint64_t peer_addr;
	struct rxd_ep *rxd_ep;
	struct rxd_peer *peer;
	struct rxd_tx_entry *tx_entry;
	rxd_ep = container_of(ep, struct rxd_ep, ep);

	peer_addr = rxd_av_get_dg_addr(rxd_ep->av, msg->addr);
	peer = rxd_ep_getpeer_info(rxd_ep, peer_addr);

#if ENABLE_DEBUG
	if (msg->iov_count > RXD_IOV_LIMIT ||
	    msg->rma_iov_count > RXD_IOV_LIMIT)
		return -FI_EINVAL;
#endif

	rxd_ep_lock_if_required(rxd_ep);
	if (!peer->addr_published) {
		ret = rxd_ep_post_conn_msg(rxd_ep, peer, peer_addr);
		ret = (ret) ? ret : -FI_EAGAIN;
		goto out;
	}

	tx_entry = rxd_tx_entry_acquire(rxd_ep, peer);
	if (!tx_entry) {
		ret = -FI_EAGAIN;
		goto out;
	}

	dlist_init(&tx_entry->pkt_list);
	tx_entry->op_type = RXD_TX_READ_REQ;
	tx_entry->read_req.msg = *msg;
	tx_entry->flags = flags;
	tx_entry->peer = peer_addr;
	rxd_ep_copy_msg_iov(msg->msg_iov,
			    &tx_entry->read_req.dst_iov[0], msg->iov_count);
	rxd_ep_copy_rma_iov(msg->rma_iov,
			    &tx_entry->read_req.src_iov[0], msg->rma_iov_count);
	ret = rxd_ep_post_start_msg(rxd_ep, peer, ofi_op_read_req, tx_entry);
	if (ret)
		goto err;

	dlist_insert_tail(&tx_entry->entry, &rxd_ep->tx_entry_list);
out:
	rxd_ep_unlock_if_required(rxd_ep);
	return ret;
err:
	rxd_tx_entry_release(rxd_ep, tx_entry);
	goto out;
}
Esempio n. 20
0
static struct rxd_rx_entry *rxd_rx_entry_alloc(struct rxd_ep *ep)
{
	struct rxd_rx_entry *rx_entry;

	if (freestack_isempty(ep->rx_entry_fs))
		return NULL;

	rx_entry = freestack_pop(ep->rx_entry_fs);
	rx_entry->key = rx_entry - &ep->rx_entry_fs->buf[0];
	dlist_insert_tail(&rx_entry->entry, &ep->rx_entry_list);
	return rx_entry;
}
Esempio n. 21
0
/* FIXME */
static void monitor_command(client c) {
	if (!strcasecmp(c->argv[1], "on")) {
		dlist_rwlock_wrlock(monitors);
		if (dlist_find(monitors, c) == NULL)
			dlist_insert_tail(monitors, c);
		dlist_rwlock_unlock(monitors);
	} else if (!strcasecmp(c->argv[1], "off")) {
		remove_from_monitors(c);
		add_reply_string(c, "\r\n", 2);
	} else
		add_reply_error_format(c, "unknown action '%s'\r\n", c->argv[1]);
}
Esempio n. 22
0
/* Schedule the VC for RX progress. */
int _gnix_vc_rx_schedule(struct gnix_vc *vc)
{
	struct gnix_nic *nic = vc->ep->nic;

	if (!_gnix_test_and_set_bit(&vc->flags, GNIX_VC_FLAG_RX_SCHEDULED)) {
		fastlock_acquire(&nic->rx_vc_lock);
		dlist_insert_tail(&vc->rx_list, &nic->rx_vcs);
		fastlock_release(&nic->rx_vc_lock);
		GNIX_INFO(FI_LOG_EP_CTRL, "Scheduled RX VC (%p)\n", vc);
	}

	return FI_SUCCESS;
}
Esempio n. 23
0
static int gnix_ep_control(fid_t fid, int command, void *arg)
{
	int i, ret = FI_SUCCESS;
	struct gnix_fid_ep *ep;
	struct gnix_fid_domain *dom;
	struct gnix_vc *vc;

	ep = container_of(fid, struct gnix_fid_ep, ep_fid);

	switch (command) {
	/*
	 * for FI_EP_RDM, post wc datagrams now
	 */
	case FI_ENABLE:
		if (ep->type == FI_EP_RDM) {
			dom = ep->domain;
			for (i = 0; i < dom->fabric->n_wc_dgrams; i++) {
				assert(ep->recv_cq != NULL);
				ret = _gnix_vc_alloc(ep, FI_ADDR_UNSPEC, &vc);
				if (ret != FI_SUCCESS) {
					GNIX_WARN(FI_LOG_EP_CTRL,
				     "_gnix_vc_alloc call returned %d\n", ret);
					goto err;
				}
				ret = _gnix_vc_accept(vc);
				if (ret != FI_SUCCESS) {
					GNIX_WARN(FI_LOG_EP_CTRL,
						"_gnix_vc_accept returned %d\n",
						ret);
					_gnix_vc_destroy(vc);
					goto err;
				} else {
					fastlock_acquire(&ep->vc_list_lock);
					dlist_insert_tail(&vc->entry,
						       &ep->wc_vc_list);
					fastlock_release(&ep->vc_list_lock);
				}
			}
		}
		break;

	case FI_GETFIDFLAG:
	case FI_SETFIDFLAG:
	case FI_ALIAS:
	default:
		return -FI_ENOSYS;
	}

err:
	return ret;
}
Esempio n. 24
0
static int rxm_finish_buf_recv(struct rxm_rx_buf *rx_buf)
{
	uint64_t flags;
	char *data;

	if (rx_buf->pkt.ctrl_hdr.type == ofi_ctrl_seg_data &&
	    rxm_sar_get_seg_type(&rx_buf->pkt.ctrl_hdr) != RXM_SAR_SEG_FIRST) {
		dlist_insert_tail(&rx_buf->unexp_msg.entry,
				  &rx_buf->conn->sar_deferred_rx_msg_list);
		rx_buf = rxm_rx_buf_alloc(rx_buf->ep, rx_buf->msg_ep, 1);
		if (OFI_UNLIKELY(!rx_buf)) {
			FI_WARN(&rxm_prov, FI_LOG_EP_DATA,
				"ran out of buffers from RX buffer pool\n");
			return -FI_ENOMEM;
		}
		dlist_insert_tail(&rx_buf->repost_entry,
				  &rx_buf->ep->repost_ready_list);

		return 0;
	}

	flags = rxm_cq_get_rx_comp_and_op_flags(rx_buf);

	if (rx_buf->pkt.ctrl_hdr.type != ofi_ctrl_data)
		flags |= FI_MORE;

	if (rx_buf->pkt.ctrl_hdr.type == ofi_ctrl_large_data)
		data = rxm_pkt_rndv_data(&rx_buf->pkt);
	else
		data = rx_buf->pkt.data;

	FI_DBG(&rxm_prov, FI_LOG_CQ, "writing buffered recv completion: "
	       "length: %" PRIu64 "\n", rx_buf->pkt.hdr.size);
	rx_buf->recv_context.ep = &rx_buf->ep->util_ep.ep_fid;

	return rxm_cq_write_recv_comp(rx_buf, &rx_buf->recv_context, flags,
				      rx_buf->pkt.hdr.size, data);
}
Esempio n. 25
0
// TODO go for separate recv functions (recvmsg, recvv, etc) to be optimal
static ssize_t
mrail_recv_common(struct mrail_ep *mrail_ep, struct mrail_recv_queue *recv_queue,
		  struct iovec *iov, size_t count, void *context,
		  fi_addr_t src_addr, uint64_t tag, uint64_t ignore,
		  uint64_t flags, uint64_t comp_flags)
{
	struct mrail_recv *recv;
	struct mrail_unexp_msg_entry *unexp_msg_entry;

	recv = mrail_pop_recv(mrail_ep);
	if (!recv)
		return -FI_EAGAIN;

	recv->count 		= count + 1;
	recv->context 		= context;
	recv->flags 		= flags;
	recv->comp_flags 	|= comp_flags;
	recv->addr	 	= src_addr;
	recv->tag 		= tag;
	recv->ignore 		= ignore;

	memcpy(&recv->iov[1], iov, sizeof(*iov) * count);

	FI_DBG(&mrail_prov, FI_LOG_EP_DATA, "Posting recv of length: %zu "
	       "src_addr: 0x%" PRIx64 " tag: 0x%" PRIx64 " ignore: 0x%" PRIx64
	       "\n", ofi_total_iov_len(iov, count), recv->addr,
	       recv->tag, recv->ignore);

	ofi_ep_lock_acquire(&mrail_ep->util_ep);
	unexp_msg_entry = container_of(dlist_remove_first_match(
						&recv_queue->unexp_msg_list,
						recv_queue->match_unexp,
						recv),
				       struct mrail_unexp_msg_entry,
				       entry);
	if (!unexp_msg_entry) {
		dlist_insert_tail(&recv->entry, &recv_queue->recv_list);
		ofi_ep_lock_release(&mrail_ep->util_ep);
		return 0;
	}
	ofi_ep_lock_release(&mrail_ep->util_ep);

	FI_DBG(recv_queue->prov, FI_LOG_EP_DATA, "Match for posted recv"
	       " with addr: 0x%" PRIx64 ", tag: 0x%" PRIx64 " ignore: "
	       "0x%" PRIx64 " found in unexpected msg queue\n",
	       recv->addr, recv->tag, recv->ignore);

	return mrail_cq_process_buf_recv((struct fi_cq_tagged_entry *)
					 unexp_msg_entry->data, recv);
}
Esempio n. 26
0
int ofi_ep_bind_av(struct util_ep *util_ep, struct util_av *av)
{
	if (util_ep->av) {
		FI_WARN(util_ep->av->prov, FI_LOG_EP_CTRL,
				"duplicate AV binding\n");
		return -FI_EINVAL;
	}
	util_ep->av = av;
	ofi_atomic_inc32(&av->ref);

	fastlock_acquire(&av->ep_list_lock);
	dlist_insert_tail(&util_ep->av_entry, &av->ep_list);
	fastlock_release(&av->ep_list_lock);

	return 0;
}
Esempio n. 27
0
void ofi_mr_cache_delete(struct ofi_mr_cache *cache, struct ofi_mr_entry *entry)
{
	FI_DBG(cache->domain->prov, FI_LOG_MR, "delete %p (len: %" PRIu64 ")\n",
	       entry->iov.iov_base, entry->iov.iov_len);
	cache->delete_cnt++;

	util_mr_cache_process_events(cache);

	if (--entry->use_cnt == 0) {
		if (entry->cached) {
			dlist_insert_tail(&entry->lru_entry, &cache->lru_list);
		} else {
			util_mr_free_entry(cache, entry);
		}
	}
}
Esempio n. 28
0
void sock_cq_add_tx_ctx(struct sock_cq *cq, struct sock_tx_ctx *tx_ctx)
{
	struct dlist_entry *entry;
	struct sock_tx_ctx *curr_ctx;
	fastlock_acquire(&cq->list_lock);
	for (entry = cq->tx_list.next; entry != &cq->tx_list;
	     entry = entry->next) {
		curr_ctx = container_of(entry, struct sock_tx_ctx, cq_entry);
		if (tx_ctx == curr_ctx)
			goto out;
	}
	dlist_insert_tail(&tx_ctx->cq_entry, &cq->tx_list);
	ofi_atomic_inc32(&cq->ref);
out:
	fastlock_release(&cq->list_lock);
}
Esempio n. 29
0
/* Schedule the VC for work progress. */
static int __gnix_vc_work_schedule(struct gnix_vc *vc)
{
	struct gnix_nic *nic = vc->ep->nic;

	/* Don't bother scheduling if there's no work to do. */
	if (slist_empty(&vc->work_queue))
		return FI_SUCCESS;

	if (!_gnix_test_and_set_bit(&vc->flags, GNIX_VC_FLAG_WORK_SCHEDULED)) {
		fastlock_acquire(&nic->work_vc_lock);
		dlist_insert_tail(&vc->work_list, &nic->work_vcs);
		fastlock_release(&nic->work_vc_lock);
		GNIX_INFO(FI_LOG_EP_CTRL, "Scheduled work VC (%p)\n", vc);
	}

	return FI_SUCCESS;
}
Esempio n. 30
0
void rxd_ep_handle_read_req(struct rxd_ep *ep, struct rxd_tx_entry *tx_entry,
			    struct rxd_peer *peer)
{
	int ret;

	dlist_init(&tx_entry->pkt_list);
	tx_entry->op_type = RXD_TX_READ_RSP;
	ret = rxd_ep_post_start_msg(ep, peer, ofi_op_read_rsp, tx_entry);
	if (ret)
		goto err;

	dlist_insert_tail(&tx_entry->entry, &ep->tx_entry_list);
	return;
err:
	rxd_tx_entry_release(ep, tx_entry);
	return;
}