Esempio n. 1
0
static int sock_dom_close(struct fid *fid)
{
	struct sock_domain *dom;
	void *res;
	int c;

	dom = container_of(fid, struct sock_domain, dom_fid.fid);
	if (atomic_get(&dom->ref)) {
		return -FI_EBUSY;
	}

	dom->listening = 0;
	write(dom->signal_fds[0], &c, 1);
	if (pthread_join(dom->listen_thread, &res)) {
		SOCK_LOG_ERROR("could not join listener thread, errno = %d\n", errno);
		return -FI_EBUSY;
	}

	if (dom->r_cmap.size)
		sock_conn_map_destroy(&dom->r_cmap);
	fastlock_destroy(&dom->r_cmap.lock);

	sock_pe_finalize(dom->pe);
	fastlock_destroy(&dom->lock);
	free(dom);
	return 0;
}
Esempio n. 2
0
static int psmx2_domain_close(fid_t fid)
{
	struct psmx2_fid_domain *domain;

	domain = container_of(fid, struct psmx2_fid_domain,
			      util_domain.domain_fid.fid);

	FI_INFO(&psmx2_prov, FI_LOG_DOMAIN, "refcnt=%d\n",
		ofi_atomic_get32(&domain->util_domain.ref));

	psmx2_domain_release(domain);

	if (ofi_domain_close(&domain->util_domain))
		return 0;

	if (domain->progress_thread_enabled)
		psmx2_domain_stop_progress(domain);

	fastlock_destroy(&domain->sep_lock);

	fastlock_destroy(&domain->vl_lock);
	rbtDelete(domain->mr_map);
	fastlock_destroy(&domain->mr_lock);

	psmx2_trx_ctxt_free(domain->base_trx_ctxt);
	domain->fabric->active_domain = NULL;
	free(domain);

	psmx2_atomic_global_fini();
	psmx2_am_global_fini();
	return 0;
}
Esempio n. 3
0
static int psmx2_domain_close(fid_t fid)
{
	struct psmx2_fid_domain *domain;

	domain = container_of(fid, struct psmx2_fid_domain,
			      util_domain.domain_fid.fid);

	FI_INFO(&psmx2_prov, FI_LOG_DOMAIN, "refcnt=%d\n",
		ofi_atomic_get32(&domain->util_domain.ref));

	if (ofi_domain_close(&domain->util_domain))
		return 0;

	if (domain->progress_thread_enabled)
		psmx2_domain_stop_progress(domain);

	fastlock_destroy(&domain->sep_lock);
	fastlock_destroy(&domain->mr_lock);
	rbtDelete(domain->mr_map);

	psmx2_lock(&domain->fabric->domain_lock, 1);
	dlist_remove(&domain->entry);
	psmx2_unlock(&domain->fabric->domain_lock, 1);
	psmx2_fabric_release(domain->fabric);

	free(domain);
	return 0;
}
Esempio n. 4
0
static int psmx_domain_close(fid_t fid)
{
	struct psmx_fid_domain *domain;
	int err;

	domain = container_of(fid, struct psmx_fid_domain,
			      util_domain.domain_fid.fid);

	FI_INFO(&psmx_prov, FI_LOG_DOMAIN, "refcnt=%d\n",
		ofi_atomic_get32(&domain->util_domain.ref));

	psmx_domain_release(domain);

	if (ofi_domain_close(&domain->util_domain))
		return 0;

	if (domain->progress_thread_enabled)
		psmx_domain_stop_progress(domain);

	if (domain->am_initialized)
		psmx_am_fini(domain);

	fastlock_destroy(&domain->poll_lock);
	rbtDelete(domain->mr_map);
	fastlock_destroy(&domain->mr_lock);

#if 0
	/* AM messages could arrive after MQ is finalized, causing segfault
	 * when trying to dereference the MQ pointer. There is no mechanism
	 * to properly shutdown AM. The workaround is to keep MQ valid.
	 */
	psm_mq_finalize(domain->psm_mq);
#endif

	/* workaround for:
	 * Assertion failure at psm_ep.c:1059: ep->mctxt_master == ep
	 */
	sleep(psmx_env.delay);

	if (psmx_env.timeout)
		err = psm_ep_close(domain->psm_ep, PSM_EP_CLOSE_GRACEFUL,
				   (int64_t) psmx_env.timeout * 1000000000LL);
	else
		err = PSM_EP_CLOSE_TIMEOUT;

	if (err != PSM_OK)
		psm_ep_close(domain->psm_ep, PSM_EP_CLOSE_FORCE, 0);

	domain->fabric->active_domain = NULL;

	free(domain);
	return 0;
}
Esempio n. 5
0
static int psmx2_fabric_close(fid_t fid)
{
	struct psmx2_fid_fabric *fabric;

	fabric = container_of(fid, struct psmx2_fid_fabric,
			      util_fabric.fabric_fid.fid);

	psmx2_fabric_release(fabric);

	FI_INFO(&psmx2_prov, FI_LOG_CORE, "refcnt=%d\n",
		ofi_atomic_get32(&fabric->util_fabric.ref));

	if (ofi_fabric_close(&fabric->util_fabric))
		return 0;

	if (psmx2_env.name_server)
		ofi_ns_stop_server(&fabric->name_server);

	fastlock_destroy(&fabric->domain_lock);
	assert(fabric == psmx2_active_fabric);
	psmx2_active_fabric = NULL;
	free(fabric);

	psmx2_atomic_global_fini();
	return 0;
}
Esempio n. 6
0
static int sock_dom_close(struct fid *fid)
{
	struct sock_domain *dom;
	dom = container_of(fid, struct sock_domain, dom_fid.fid);
	if (atomic_get(&dom->ref))
		return -FI_EBUSY;

	sock_pe_finalize(dom->pe);
	if (dom->r_cmap.size)
		sock_conn_map_destroy(&dom->r_cmap);
	fastlock_destroy(&dom->r_cmap.lock);
	fastlock_destroy(&dom->lock);
	sock_dom_remove_from_list(dom);
	free(dom);
	return 0;
}
Esempio n. 7
0
static int util_eq_close(struct fid *fid)
{
	struct util_eq *eq;
	struct slist_entry *entry;
	struct util_event *event;

	eq = container_of(fid, struct util_eq, eq_fid.fid);
	if (ofi_atomic_get32(&eq->ref))
		return -FI_EBUSY;

	while (!slist_empty(&eq->list)) {
		entry = slist_remove_head(&eq->list);
		event = container_of(entry, struct util_event, entry);
		free(event);
	}

	if (eq->wait) {
		fi_poll_del(&eq->wait->pollset->poll_fid,
			    &eq->eq_fid.fid, 0);
		if (eq->internal_wait)
			fi_close(&eq->wait->wait_fid.fid);
	}

	fastlock_destroy(&eq->lock);
	ofi_atomic_dec32(&eq->fabric->ref);
	free(eq);
	return 0;
}
Esempio n. 8
0
static int psmx2_ep_close(fid_t fid)
{
	struct psmx2_fid_ep *ep;
	struct slist_entry *entry;
	struct psmx2_context *item;

	ep = container_of(fid, struct psmx2_fid_ep, ep.fid);

	if (ep->base_ep) {
		atomic_dec(&ep->base_ep->ref);
		return 0;
	}

	if (atomic_get(&ep->ref))
		return -FI_EBUSY;

	ep->domain->eps[ep->vlane] = NULL;
	psmx2_free_vlane(ep->domain, ep->vlane);
	psmx2_domain_release(ep->domain);

	while (!slist_empty(&ep->free_context_list)) {
		entry = slist_remove_head(&ep->free_context_list);
		item = container_of(entry, struct psmx2_context, list_entry);
		free(item);
	}

	fastlock_destroy(&ep->context_lock);

	free(ep);

	return 0;
}
Esempio n. 9
0
static int fi_ibv_eq_close(fid_t fid)
{
	struct fi_ibv_eq *eq;
	struct fi_ibv_eq_entry *entry;

	eq = container_of(fid, struct fi_ibv_eq, eq_fid.fid);
	/* TODO: use util code, if possible, and add ref counting */

	if (eq->channel)
		rdma_destroy_event_channel(eq->channel);

	close(eq->epfd);

	while (!dlistfd_empty(&eq->list_head)) {
		entry = container_of(eq->list_head.list.next,
				     struct fi_ibv_eq_entry, item);
		dlistfd_remove(eq->list_head.list.next, &eq->list_head);
		free(entry);
	}

	dlistfd_head_free(&eq->list_head);
	fastlock_destroy(&eq->lock);
	free(eq);

	return 0;
}
Esempio n. 10
0
int tcpx_conn_mgr_init(struct tcpx_fabric *tcpx_fabric)
{
	int ret;

	dlist_init(&tcpx_fabric->poll_mgr.list);
	fastlock_init(&tcpx_fabric->poll_mgr.lock);
	ret = fd_signal_init(&tcpx_fabric->poll_mgr.signal);
	if (ret) {
		FI_WARN(&tcpx_prov, FI_LOG_FABRIC,"signal init failed\n");
		goto err;
	}

	tcpx_fabric->poll_mgr.run = 1;
	ret = pthread_create(&tcpx_fabric->conn_mgr_thread, 0,
			     tcpx_conn_mgr_thread, (void *) tcpx_fabric);
	if (ret) {
		FI_WARN(&tcpx_prov, FI_LOG_FABRIC,
			"Failed creating tcpx connection manager thread");

		goto err1;
	}
	return 0;
err1:
	fd_signal_free(&tcpx_fabric->poll_mgr.signal);
err:
	fastlock_destroy(&tcpx_fabric->poll_mgr.lock);
	return ret;
}
int _gnix_buddy_allocator_destroy(gnix_buddy_alloc_handle_t *alloc_handle)
{
	GNIX_TRACE(FI_LOG_EP_CTRL, "\n");

	if (unlikely(!alloc_handle)) {
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Invalid parameter to _gnix_buddy_allocator_destroy."
			  "\n");
		return -FI_EINVAL;
	}

	fastlock_acquire(&alloc_handle->lock);

	free(alloc_handle->lists);

	while (_gnix_free_bitmap(&alloc_handle->bitmap)) {
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Trying to free buddy allocator handle bitmap.\n");
		sleep(1);
	}

	fastlock_release(&alloc_handle->lock);
	fastlock_destroy(&alloc_handle->lock);

	free(alloc_handle);

	return FI_SUCCESS;
}
Esempio n. 12
0
void psmx2_trx_ctxt_free(struct psmx2_trx_ctxt *trx_ctxt)
{
	int err;

	if (!trx_ctxt)
		return;

	if (trx_ctxt->am_initialized)
		psmx2_am_fini(trx_ctxt);

#if 0
	/* AM messages could arrive after MQ is finalized, causing segfault
	 * when trying to dereference the MQ pointer. There is no mechanism
	 * to properly shutdown AM. The workaround is to keep MQ valid.
	 */
	psm2_mq_finalize(trx_ctxt->psm2_mq);
#endif

	/* workaround for:
	 * Assertion failure at psm2_ep.c:1059: ep->mctxt_master == ep
	 */
	sleep(psmx2_env.delay);

	if (psmx2_env.timeout)
		err = psm2_ep_close(trx_ctxt->psm2_ep, PSM2_EP_CLOSE_GRACEFUL,
				    (int64_t) psmx2_env.timeout * 1000000000LL);
	else
		err = PSM2_EP_CLOSE_TIMEOUT;

	if (err != PSM2_OK)
		psm2_ep_close(trx_ctxt->psm2_ep, PSM2_EP_CLOSE_FORCE, 0);

	fastlock_destroy(&trx_ctxt->poll_lock);
	free(trx_ctxt);
}
Esempio n. 13
0
static int util_wait_fd_close(struct fid *fid)
{
	struct util_wait_fd *wait;
	struct ofi_wait_fd_entry *fd_entry;
	int ret;

	wait = container_of(fid, struct util_wait_fd, util_wait.wait_fid.fid);
	ret = fi_wait_cleanup(&wait->util_wait);
	if (ret)
		return ret;

	fastlock_acquire(&wait->lock);
	while (!dlist_empty(&wait->fd_list)) {
		dlist_pop_front(&wait->fd_list, struct ofi_wait_fd_entry,
				fd_entry, entry);
		fi_epoll_del(wait->epoll_fd, fd_entry->fd);
		free(fd_entry);
	}
	fastlock_release(&wait->lock);

	fi_epoll_del(wait->epoll_fd, wait->signal.fd[FI_READ_FD]);
	fd_signal_free(&wait->signal);
	fi_epoll_close(wait->epoll_fd);
	fastlock_destroy(&wait->lock);
	free(wait);
	return 0;
}
Esempio n. 14
0
static int fi_ibv_eq_close(fid_t fid)
{
	struct fi_ibv_eq *eq;
	struct fi_ibv_eq_entry *entry;

	eq = container_of(fid, struct fi_ibv_eq, eq_fid.fid);

	if (eq->channel)
		rdma_destroy_event_channel(eq->channel);

	close(eq->epfd);

	fastlock_acquire(&eq->lock);
	while(!dlistfd_empty(&eq->list_head)) {
		entry = container_of(eq->list_head.list.next, struct fi_ibv_eq_entry, item);
		dlistfd_remove(eq->list_head.list.next, &eq->list_head);
		free(entry);
	}

	dlistfd_head_free(&eq->list_head);
	fastlock_destroy(&eq->lock);
	free(eq);

	return 0;
}
Esempio n. 15
0
void psmx2_domain_release(struct psmx2_fid_domain *domain)
{
	int err;

	FI_INFO(&psmx2_prov, FI_LOG_DOMAIN, "refcnt=%d\n", domain->refcnt);

	if (--domain->refcnt > 0)
		return;

	if (domain->progress_thread_enabled)
		psmx2_domain_stop_progress(domain);

	psmx2_am_fini(domain);

	fastlock_destroy(&domain->poll_lock);
	fastlock_destroy(&domain->vl_lock);
	rbtDelete(domain->mr_map);
	fastlock_destroy(&domain->mr_lock);

#if 0
	/* AM messages could arrive after MQ is finalized, causing segfault
	 * when trying to dereference the MQ pointer. There is no mechanism
	 * to properly shutdown AM. The workaround is to keep MQ valid.
	 */
	psm2_mq_finalize(domain->psm2_mq);
#endif

	/* workaround for:
	 * Assertion failure at psm2_ep.c:1059: ep->mctxt_master == ep
	 */
	sleep(psmx2_env.delay);

	if (psmx2_env.timeout)
		err = psm2_ep_close(domain->psm2_ep, PSM2_EP_CLOSE_GRACEFUL,
				    (int64_t) psmx2_env.timeout * 1000000000LL);
	else
		err = PSM2_EP_CLOSE_TIMEOUT;

	if (err != PSM2_OK)
		psm2_ep_close(domain->psm2_ep, PSM2_EP_CLOSE_FORCE, 0);

	domain->fabric->active_domain = NULL;

	psmx2_fabric_release(domain->fabric);

	free(domain);
}
Esempio n. 16
0
int ofi_fabric_close(struct util_fabric *fabric)
{
	if (atomic_get(&fabric->ref))
		return -FI_EBUSY;

	fi_fabric_remove(fabric);
	fastlock_destroy(&fabric->lock);
	return 0;
}
Esempio n. 17
0
static int sock_cntr_close(struct fid *fid)
{
    struct sock_cntr *cntr;

    cntr = container_of(fid, struct sock_cntr, cntr_fid.fid);
    if (atomic_get(&cntr->ref))
        return -FI_EBUSY;

    if (cntr->signal && cntr->attr.wait_obj == FI_WAIT_FD)
        sock_wait_close(&cntr->waitset->fid);

    pthread_mutex_destroy(&cntr->mut);
    fastlock_destroy(&cntr->list_lock);
    fastlock_destroy(&cntr->trigger_lock);

    pthread_cond_destroy(&cntr->cond);
    atomic_dec(&cntr->domain->ref);
    free(cntr);
    return 0;
}
Esempio n. 18
0
int _gnix_mbox_allocator_destroy(struct gnix_mbox_alloc_handle *alloc_handle)
{
	struct slist_entry *entry;
	struct gnix_slab *temp;
	char error_buf[256];
	int position;
	char *error;
	int ret = FI_SUCCESS;

	GNIX_TRACE(FI_LOG_EP_CTRL, "\n");

	if (!alloc_handle) {
		GNIX_WARN(FI_LOG_EP_CTRL, "Invalid alloc handle.\n");
		return -FI_EINVAL;
	}

	position = __find_used(alloc_handle, &temp);
	if (position >= 0) {
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Can't destroy, not all mailboxes have been returned (pos = %d).\n",
			  position);
		return -FI_EBUSY;
	}

	while (!slist_empty(&alloc_handle->slab_list)) {
		entry = slist_remove_head(&alloc_handle->slab_list);

		temp = container_of(entry, struct gnix_slab, list_entry);

		ret = __destroy_slab(alloc_handle, temp);
		if (ret)
			GNIX_WARN(FI_LOG_EP_CTRL,
				  "Error destroying slab.\n");
	}

	if (alloc_handle->filename != NULL)
		free(alloc_handle->filename);

	if (alloc_handle->fd != -1)
		ret = close(alloc_handle->fd);

	if (ret) {
		error = strerror_r(errno, error_buf, sizeof(error_buf));
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "Error closing map file: %s\n",
			  error);
	}

	fastlock_destroy(&alloc_handle->lock);

	free(alloc_handle);

	return FI_SUCCESS;
}
Esempio n. 19
0
static int sock_cq_close(struct fid *fid)
{
	struct sock_cq *cq;

	cq = container_of(fid, struct sock_cq, cq_fid.fid);
	if (atomic_get(&cq->ref))
		return -FI_EBUSY;

	if (cq->signal && cq->attr.wait_obj == FI_WAIT_MUTEX_COND)
		sock_wait_close(&cq->waitset->fid);

	rbfree(&cq->addr_rb);
	rbfree(&cq->cqerr_rb);
	rbfdfree(&cq->cq_rbfd);

	fastlock_destroy(&cq->lock);
	fastlock_destroy(&cq->list_lock);
	atomic_dec(&cq->domain->ref);

	free(cq);
	return 0;
}
Esempio n. 20
0
void sock_conn_map_destroy(struct sock_conn_map *cmap)
{
	int i;

	for (i = 0; i < cmap->used; i++) {
		close(cmap->table[i].sock_fd);
	}
	free(cmap->table);
	cmap->table = NULL;
	cmap->used = cmap->size = 0;
	sock_epoll_close(&cmap->epoll_set);
	fastlock_destroy(&cmap->lock);
}
Esempio n. 21
0
int ofi_domain_close(struct util_domain *domain)
{
	if (ofi_atomic_get32(&domain->ref))
		return -FI_EBUSY;

	fastlock_acquire(&domain->fabric->lock);
	dlist_remove(&domain->list_entry);
	fastlock_release(&domain->fabric->lock);

	free(domain->name);
	fastlock_destroy(&domain->lock);
	ofi_atomic_dec32(&domain->fabric->ref);
	return 0;
}
Esempio n. 22
0
void sock_conn_map_destroy(struct sock_ep_attr *ep_attr)
{
	int i;
	struct sock_conn_map *cmap = &ep_attr->cmap;
	for (i = 0; i < cmap->used; i++) {
		sock_epoll_del(&cmap->epoll_set, cmap->table[i].sock_fd);
		sock_pe_poll_del(ep_attr->domain->pe, cmap->table[i].sock_fd);
		ofi_close_socket(cmap->table[i].sock_fd);
	}
	free(cmap->table);
	cmap->table = NULL;
	cmap->used = cmap->size = 0;
	sock_epoll_close(&cmap->epoll_set);
	fastlock_destroy(&cmap->lock);
}
Esempio n. 23
0
void _gnix_sfl_destroy(struct gnix_s_freelist *fl)
{
	assert(fl);

	struct slist_entry *chunk;

	for (chunk = slist_remove_head(&fl->chunks);
	     chunk != NULL;
	     chunk = slist_remove_head(&fl->chunks)) {
		free(chunk);
	}

	if (fl->ts)
		fastlock_destroy(&fl->lock);
}
Esempio n. 24
0
void sock_conn_map_destroy(struct sock_ep_attr *ep_attr)
{
	int i;
	struct sock_conn_map *cmap = &ep_attr->cmap;
	for (i = 0; i < cmap->used; i++) {
		if (cmap->table[i].sock_fd != -1) {
			sock_pe_poll_del(ep_attr->domain->pe, cmap->table[i].sock_fd);
			sock_conn_release_entry(cmap, &cmap->table[i]);
		}
	}
	free(cmap->table);
	cmap->table = NULL;
	cmap->used = cmap->size = 0;
	fi_epoll_close(cmap->epoll_set);
	fastlock_destroy(&cmap->lock);
}
Esempio n. 25
0
int ofi_av_close(struct util_av *av)
{
	if (atomic_get(&av->ref)) {
		FI_WARN(av->prov, FI_LOG_AV, "AV is busy\n");
		return -FI_EBUSY;
	}

	if (av->eq)
		atomic_dec(&av->eq->ref);

	atomic_dec(&av->domain->ref);
	fastlock_destroy(&av->lock);
	/* TODO: unmap data? */
	free(av->data);
	return 0;
}
Esempio n. 26
0
int rxd_av_close(struct fid *fid)
{
	int ret;
	struct rxd_av *av;
	av = container_of(fid, struct rxd_av, util_av.av_fid);
	ret = fi_close(&av->dg_av->fid);
	if (ret)
		return ret;

	ret = ofi_av_close(&av->util_av);
	if (ret)
		return ret;

	fastlock_destroy(&av->lock);
	free(av);
	return 0;
}
Esempio n. 27
0
int sock_cq_close(struct fid *fid)
{
	struct sock_cq *cq;

	cq = container_of(fid, struct sock_cq, cq_fid.fid);
	if (atomic_get(&cq->ref))
		return -FI_EBUSY;

	rbfree(&cq->addr_rb);
	rbfree(&cq->cqerr_rb);
	rbfdfree(&cq->cq_rbfd);

	fastlock_destroy(&cq->lock);
	atomic_dec(&cq->domain->ref);

	free(cq);
	return 0;
}
Esempio n. 28
0
static void __pep_destruct(void *obj)
{
	struct gnix_fid_pep *pep = (struct gnix_fid_pep *)obj;

	GNIX_DEBUG(FI_LOG_EP_CTRL, "Destroying PEP: %p\n", pep);

	fastlock_destroy(&pep->lock);

	if (pep->listen_fd >= 0)
		close(pep->listen_fd);

	if (pep->eq) {
		_gnix_eq_poll_obj_rem(pep->eq, &pep->pep_fid.fid);
		_gnix_ref_put(pep->eq);
	}

	free(pep);
}
Esempio n. 29
0
static int tcpx_ep_close(struct fid *fid)
{
	struct tcpx_ep *ep = container_of(fid, struct tcpx_ep,
					  util_ep.ep_fid.fid);

	tcpx_ep_tx_rx_queues_release(ep);
	tcpx_cq_wait_ep_del(ep);
	if (ep->util_ep.eq->wait)
		ofi_wait_fd_del(ep->util_ep.eq->wait, ep->conn_fd);

	ofi_eq_remove_fid_events(ep->util_ep.eq,
				  &ep->util_ep.ep_fid.fid);
	ofi_close_socket(ep->conn_fd);
	ofi_endpoint_close(&ep->util_ep);
	fastlock_destroy(&ep->lock);

	free(ep);
	return 0;
}
Esempio n. 30
0
static int psmx2_domain_init(struct psmx2_fid_domain *domain,
			     struct psmx2_ep_name *src_addr)
{
	int err;

	err = fastlock_init(&domain->mr_lock);
	if (err) {
		FI_WARN(&psmx2_prov, FI_LOG_CORE,
			"fastlock_init(mr_lock) returns %d\n", err);
		goto err_out;
	}

	domain->mr_map = rbtNew(&psmx2_key_compare);
	if (!domain->mr_map) {
		FI_WARN(&psmx2_prov, FI_LOG_CORE,
			"rbtNew failed\n");
		goto err_out_destroy_mr_lock;
	}

	domain->mr_reserved_key = 1;
	domain->max_atomic_size = INT_MAX;

	ofi_atomic_initialize32(&domain->sep_cnt, 0);
	fastlock_init(&domain->sep_lock);
	dlist_init(&domain->sep_list);
	dlist_init(&domain->trx_ctxt_list);
	fastlock_init(&domain->trx_ctxt_lock);

	if (domain->progress_thread_enabled)
		psmx2_domain_start_progress(domain);

	return 0;

err_out_destroy_mr_lock:
	fastlock_destroy(&domain->mr_lock);

err_out:
	return err;
}