Esempio n. 1
0
sxstate sx_set_autocommit(sxmanager *m, sxindex *index, sx *x, svlog *log, svv *v)
{
	if (sslikely(m->count_rw == 0)) {
		sx_init(m, x, log);
		svlogv lv;
		lv.index_id = index->dsn;
		lv.next     = UINT32_MAX;
		lv.v        = v;
		lv.ptr      = NULL;
		sv_logadd(x->log, index->r, &lv);
		sr_seq(index->r->seq, SR_TSNNEXT);
		sx_promote(x, SX_COMMIT);
		return SX_COMMIT;
	}
	sx_begin(m, x, SX_RW, log, 0);
	int rc = sx_set(x, index, v);
	if (ssunlikely(rc == -1)) {
		sx_rollback(x);
		return SX_ROLLBACK;
	}
	sxstate s = sx_prepare(x, NULL, NULL);
	switch (s) {
	case SX_PREPARE:
		s = sx_commit(x);
		break;
	case SX_LOCK:
		s = sx_rollback(x);
		break;
	case SX_ROLLBACK:
		break;
	default:
		assert(0);
	}
	return s;
}
/*
 * Initialize global process hashing structures.
 */
void
procinit()
{

	sx_init(&allproc_lock, "allproc");
	sx_init(&proctree_lock, "proctree");
	mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
	LIST_INIT(&allproc);
	LIST_INIT(&zombproc);
	pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
	pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
	proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
	    proc_ctor, proc_dtor, proc_init, proc_fini,
	    UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
	uihashinit();
}
Esempio n. 3
0
sxstate sx_begin(sxmanager *m, sx *x, sxtype type, uint64_t vlsn)
{
	sx_promote(x, SXREADY);
	x->type = type;
	x->log_read = -1;
	sr_seqlock(m->r->seq);
	x->csn = m->csn;
	x->id = sr_seqdo(m->r->seq, SR_TSNNEXT);
	if (sslikely(vlsn == 0))
		x->vlsn = sr_seqdo(m->r->seq, SR_LSN);
	else
		x->vlsn = vlsn;
	sr_sequnlock(m->r->seq);
	sx_init(m, x);
	ss_spinlock(&m->lock);
	ssrbnode *n = NULL;
	int rc = sx_matchtx(&m->i, NULL, (char*)&x->id, sizeof(x->id), &n);
	if (rc == 0 && n) {
		assert(0);
	} else {
		ss_rbset(&m->i, n, rc, &x->node);
	}
	if (type == SXRO)
		m->count_rd++;
	else
		m->count_rw++;
	ss_spinunlock(&m->lock);
	return SXREADY;
}
Esempio n. 4
0
sxstate sx_set_autocommit(sxmanager *m, sxindex *index, sx *x, svv *v)
{
	if (sslikely(m->count_rw == 0)) {
		sx_init(m, x);
		svlogv lv;
		lv.id   = index->dsn;
		lv.next = UINT32_MAX;
		sv_init(&lv.v, &sv_vif, v, NULL);
		sv_logadd(&x->log, m->r->a, &lv, index->ptr);
		sr_seq(m->r->seq, SR_TSNNEXT);
		return SXCOMMIT;
	}
	sx_begin(m, x, SXRW, 0);
	int rc = sx_set(x, index, v);
	if (ssunlikely(rc == -1)) {
		sx_rollback(x);
		return SXROLLBACK;
	}
	sxstate s = sx_prepare(x, NULL, NULL);
	if (sslikely(s == SXPREPARE))
		sx_commit(x);
	else
	if (s == SXLOCK)
		sx_rollback(x);
	return s;
}
Esempio n. 5
0
void
sx_sysinit(void *arg)
{
	struct sx_args *sargs = arg;

	sx_init(sargs->sa_sx, sargs->sa_desc);
}
Esempio n. 6
0
/*
 * Dictionary management.  We maintain an in-kernel dictionary to map
 * paths to shmfd objects.  We use the FNV hash on the path to store
 * the mappings in a hash table.
 */
static void
shm_dict_init(void *arg)
{

	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
	sx_init(&shm_dict_lock, "shm dictionary");
	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
}
Esempio n. 7
0
void
t4_tracer_modload()
{

	sx_init(&t4_trace_lock, "T4/T5 tracer lock");
	t4_cloner = if_clone_advanced(t4_cloner_name, 0, t4_cloner_match,
	    t4_cloner_create, t4_cloner_destroy);
}
Esempio n. 8
0
/*
 * Initialize per-FS structures supporting extended attributes.  Do not
 * start extended attributes yet.
 */
void
ufs_extattr_uepm_init(struct ufs_extattr_per_mount *uepm)
{

	uepm->uepm_flags = 0;
	LIST_INIT(&uepm->uepm_list);
	sx_init(&uepm->uepm_lock, "ufs_extattr_sx");
	uepm->uepm_flags |= UFS_EXTATTR_UEPM_INITIALIZED;
}
Esempio n. 9
0
/*
 * Usage:
 *	xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
 *
 * Creates, registers, and returns a (rpc) tcp based transporter.
 * Once *xprt is initialized, it is registered as a transporter
 * see (svc.h, xprt_register).  This routine returns
 * a NULL if a problem occurred.
 *
 * The filedescriptor passed in is expected to refer to a bound, but
 * not yet connected socket.
 *
 * Since streams do buffered io similar to stdio, the caller can specify
 * how big the send and receive buffers are via the second and third parms;
 * 0 => use the system default.
 */
SVCXPRT *
svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
    size_t recvsize)
{
	SVCXPRT *xprt = NULL;
	struct sockaddr* sa;
	int error;

	SOCK_LOCK(so);
	if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED)) {
		SOCK_UNLOCK(so);
		CURVNET_SET(so->so_vnet);
		error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
		CURVNET_RESTORE();
		if (error)
			return (NULL);
		xprt = svc_vc_create_conn(pool, so, sa);
		free(sa, M_SONAME);
		return (xprt);
	}
	SOCK_UNLOCK(so);

	xprt = svc_xprt_alloc();
	sx_init(&xprt->xp_lock, "xprt->xp_lock");
	xprt->xp_pool = pool;
	xprt->xp_socket = so;
	xprt->xp_p1 = NULL;
	xprt->xp_p2 = NULL;
	xprt->xp_ops = &svc_vc_rendezvous_ops;

	CURVNET_SET(so->so_vnet);
	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
	CURVNET_RESTORE();
	if (error) {
		goto cleanup_svc_vc_create;
	}

	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
	free(sa, M_SONAME);

	xprt_register(xprt);

	solisten(so, -1, curthread);

	SOCKBUF_LOCK(&so->so_rcv);
	xprt->xp_upcallset = 1;
	soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
	SOCKBUF_UNLOCK(&so->so_rcv);

	return (xprt);
cleanup_svc_vc_create:
	if (xprt) {
		sx_destroy(&xprt->xp_lock);
		svc_xprt_free(xprt);
	}
	return (NULL);
}
Esempio n. 10
0
void
linux_proc_init(struct thread *td, struct thread *newtd, int flags)
{
	struct linux_emuldata *em;
	struct linux_pemuldata *pem;
	struct epoll_emuldata *emd;
	struct proc *p;

	if (newtd != NULL) {
		p = newtd->td_proc;

		/* non-exec call */
		em = malloc(sizeof(*em), M_TEMP, M_WAITOK | M_ZERO);
		if (flags & LINUX_CLONE_THREAD) {
			LINUX_CTR1(proc_init, "thread newtd(%d)",
			    newtd->td_tid);

			em->em_tid = newtd->td_tid;
		} else {
			LINUX_CTR1(proc_init, "fork newtd(%d)", p->p_pid);

			em->em_tid = p->p_pid;

			pem = malloc(sizeof(*pem), M_LINUX, M_WAITOK | M_ZERO);
			sx_init(&pem->pem_sx, "lpemlk");
			p->p_emuldata = pem;
		}
		newtd->td_emuldata = em;
	} else {
		p = td->td_proc;

		/* exec */
		LINUX_CTR1(proc_init, "exec newtd(%d)", p->p_pid);

		/* lookup the old one */
		em = em_find(td);
		KASSERT(em != NULL, ("proc_init: emuldata not found in exec case.\n"));

		em->em_tid = p->p_pid;
		em->flags = 0;
		em->pdeath_signal = 0;
		em->robust_futexes = NULL;
		em->child_clear_tid = NULL;
		em->child_set_tid = NULL;

		 /* epoll should be destroyed in a case of exec. */
		pem = pem_find(p);
		KASSERT(pem != NULL, ("proc_exit: proc emuldata not found.\n"));

		if (pem->epoll != NULL) {
			emd = pem->epoll;
			pem->epoll = NULL;
			free(emd, M_EPOLL);
		}
	}

}
Esempio n. 11
0
void drm_global_init(void)
{
	int i;

	for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
		struct drm_global_item *item = &glob[i];
		sx_init(&item->mutex, "drmgi");
		item->object = NULL;
		item->refcount = 0;
	}
}
Esempio n. 12
0
SVCXPRT *
svc_dg_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
    size_t recvsize)
{
	SVCXPRT *xprt;
	struct __rpc_sockinfo si;
	struct sockaddr* sa;
	int error;

	if (!__rpc_socket2sockinfo(so, &si)) {
		printf(svc_dg_str, svc_dg_err1);
		return (NULL);
	}
	/*
	 * Find the receive and the send size
	 */
	sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
	recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
	if ((sendsize == 0) || (recvsize == 0)) {
		printf(svc_dg_str, svc_dg_err2);
		return (NULL);
	}

	xprt = svc_xprt_alloc();
	sx_init(&xprt->xp_lock, "xprt->xp_lock");
	xprt->xp_pool = pool;
	xprt->xp_socket = so;
	xprt->xp_p1 = NULL;
	xprt->xp_p2 = NULL;
	xprt->xp_ops = &svc_dg_ops;

	CURVNET_SET(so->so_vnet);
	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
	CURVNET_RESTORE();
	if (error)
		goto freedata;

	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
	free(sa, M_SONAME);

	xprt_register(xprt);

	SOCKBUF_LOCK(&so->so_rcv);
	soupcall_set(so, SO_RCV, svc_dg_soupcall, xprt);
	SOCKBUF_UNLOCK(&so->so_rcv);

	return (xprt);
freedata:
	(void) printf(svc_dg_str, __no_mem_str);
	if (xprt) {
		svc_xprt_free(xprt);
	}
	return (NULL);
}
Esempio n. 13
0
struct pefs_dircache *
pefs_dircache_get(void)
{
	struct pefs_dircache *pd;

	pd = uma_zalloc(dircache_zone, M_WAITOK | M_ZERO);
	sx_init(&pd->pd_lock, "pefs_dircache_sx");
	LIST_INIT(&pd->pd_heads[0]);
	LIST_INIT(&pd->pd_heads[1]);

	return (pd);
}
Esempio n. 14
0
struct icl_listen *
icl_listen_new(void (*accept_cb)(struct socket *, struct sockaddr *, int))
{
	struct icl_listen *il;

	il = malloc(sizeof(*il), M_ICL_PROXY, M_ZERO | M_WAITOK);
	TAILQ_INIT(&il->il_sockets);
	sx_init(&il->il_lock, "icl_listen");
	il->il_accept = accept_cb;

	return (il);
}
Esempio n. 15
0
static void
ktrace_init(void *dummy)
{
	struct ktr_request *req;
	int i;

	mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
	sx_init(&ktrace_sx, "ktrace_sx");
	STAILQ_INIT(&ktr_free);
	for (i = 0; i < ktr_requestpool; i++) {
		req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
		STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
	}
}
Esempio n. 16
0
/*
 * Mount the filesystem
 */
static int
devfs_mount(struct mount *mp)
{
	int error;
	struct devfs_mount *fmp;
	struct vnode *rvp;

	if (devfs_unr == NULL)
		devfs_unr = new_unrhdr(0, INT_MAX, NULL);

	error = 0;

	if (mp->mnt_flag & (MNT_UPDATE | MNT_ROOTFS))
		return (EOPNOTSUPP);

	fmp = malloc(sizeof *fmp, M_DEVFS, M_WAITOK | M_ZERO);
	fmp->dm_idx = alloc_unr(devfs_unr);
	sx_init(&fmp->dm_lock, "devfsmount");
	fmp->dm_holdcnt = 1;

	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED |
	    MNTK_EXTENDED_SHARED;
#ifdef MAC
	mp->mnt_flag |= MNT_MULTILABEL;
#endif
	MNT_IUNLOCK(mp);
	fmp->dm_mount = mp;
	mp->mnt_data = (void *) fmp;
	vfs_getnewfsid(mp);

	fmp->dm_rootdir = devfs_vmkdir(fmp, NULL, 0, NULL, DEVFS_ROOTINO);

	error = devfs_root(mp, LK_EXCLUSIVE, &rvp);
	if (error) {
		sx_destroy(&fmp->dm_lock);
		free_unr(devfs_unr, fmp->dm_idx);
		free(fmp, M_DEVFS);
		return (error);
	}

	VOP_UNLOCK(rvp, 0);

	vfs_mountedfrom(mp, "devfs");

	return (0);
}
RTDECL(int)  RTSemFastMutexCreate(PRTSEMFASTMUTEX phFastMtx)
{
    AssertCompile(sizeof(RTSEMFASTMUTEXINTERNAL) > sizeof(void *));
    AssertPtrReturn(phFastMtx, VERR_INVALID_POINTER);

    PRTSEMFASTMUTEXINTERNAL pThis = (PRTSEMFASTMUTEXINTERNAL)RTMemAllocZ(sizeof(*pThis));
    if (pThis)
    {
        pThis->u32Magic = RTSEMFASTMUTEX_MAGIC;
        sx_init(&pThis->SxLock, "IPRT Fast Mutex Semaphore");

        *phFastMtx = pThis;
        return VINF_SUCCESS;
    }
    return VERR_NO_MEMORY;
}
Esempio n. 18
0
static int
iicopen(struct cdev *dev, int flags, int fmt, struct thread *td)
{
	struct iic_cdevpriv *priv;
	int error;

	priv = malloc(sizeof(*priv), M_IIC, M_WAITOK | M_ZERO);

	sx_init(&priv->lock, "iic");
	priv->sc = dev->si_drv1;

	error = devfs_set_cdevpriv(priv, iicdtor); 
	if (error != 0)
		free(priv, M_IIC);

	return (error);
}
Esempio n. 19
0
/*
 * Create a new transport for a backchannel on a clnt_vc socket.
 */
SVCXPRT *
svc_vc_create_backchannel(SVCPOOL *pool)
{
	SVCXPRT *xprt = NULL;
	struct cf_conn *cd = NULL;

	cd = mem_alloc(sizeof(*cd));
	cd->strm_stat = XPRT_IDLE;

	xprt = svc_xprt_alloc();
	sx_init(&xprt->xp_lock, "xprt->xp_lock");
	xprt->xp_pool = pool;
	xprt->xp_socket = NULL;
	xprt->xp_p1 = cd;
	xprt->xp_p2 = NULL;
	xprt->xp_ops = &svc_vc_backchannel_ops;
	return (xprt);
}
Esempio n. 20
0
static int
filemon_open(struct cdev *dev, int oflags __unused, int devtype __unused,
    struct thread *td)
{
	int error;
	struct filemon *filemon;

	filemon = malloc(sizeof(*filemon), M_FILEMON,
	    M_WAITOK | M_ZERO);
	sx_init(&filemon->lock, "filemon");
	refcount_init(&filemon->refcnt, 1);
	filemon->cred = crhold(td->td_ucred);

	error = devfs_set_cdevpriv(filemon, filemon_dtr);
	if (error != 0)
		filemon_release(filemon);

	return (error);
}
Esempio n. 21
0
struct fuse_data *
fdata_alloc(struct cdev *fdev, struct ucred *cred)
{
	struct fuse_data *data;

	debug_printf("fdev=%p\n", fdev);

	data = malloc(sizeof(struct fuse_data), M_FUSEMSG, M_WAITOK | M_ZERO);

	data->fdev = fdev;
	mtx_init(&data->ms_mtx, "fuse message list mutex", NULL, MTX_DEF);
	STAILQ_INIT(&data->ms_head);
	mtx_init(&data->aw_mtx, "fuse answer list mutex", NULL, MTX_DEF);
	TAILQ_INIT(&data->aw_head);
	data->daemoncred = crhold(cred);
	data->daemon_timeout = FUSE_DEFAULT_DAEMON_TIMEOUT;
	sx_init(&data->rename_lock, "fuse rename lock");
	data->ref = 1;

	return data;
}
Esempio n. 22
0
static int
ksem_module_init(void)
{
	int error;

	mtx_init(&sem_lock, "sem", NULL, MTX_DEF);
	mtx_init(&ksem_count_lock, "ksem count", NULL, MTX_DEF);
	sx_init(&ksem_dict_lock, "ksem dictionary");
	ksem_dictionary = hashinit(1024, M_KSEM, &ksem_hash);
	p31b_setcfg(CTL_P1003_1B_SEM_NSEMS_MAX, SEM_MAX);
	p31b_setcfg(CTL_P1003_1B_SEM_VALUE_MAX, SEM_VALUE_MAX);

	error = syscall_helper_register(ksem_syscalls);
	if (error)
		return (error);
#ifdef COMPAT_FREEBSD32
	error = syscall32_helper_register(ksem32_syscalls);
	if (error)
		return (error);
#endif
	return (0);
}
Esempio n. 23
0
int
uinet_init(void)
{
	struct thread *td;

	printf("uinet_init starting\n");

	/* XXX need to get this from OS */
	mp_ncpus = 1;

        /* vm_init bits */
        ncallout = 64;
	
        pcpup = malloc(sizeof(struct pcpu), M_DEVBUF, M_ZERO);
        pcpu_init(pcpup, 0, sizeof(struct pcpu));
        kern_timeout_callwheel_alloc(malloc(512*1024, M_DEVBUF, M_ZERO));
        kern_timeout_callwheel_init();
	uinet_init_thread0();
        uma_startup(malloc(40*4096, M_DEVBUF, M_ZERO), 40);
	uma_startup2();
	/* XXX fix this magic 64 to something a bit more dynamic & sensible */
	uma_page_slab_hash = malloc(sizeof(struct uma_page)*64, M_DEVBUF, M_ZERO);
	uma_page_mask = 64-1;
	pthread_mutex_init(&init_lock, NULL);
	pthread_cond_init(&init_cond, NULL);
	mutex_init();
        mi_startup();
	sx_init(&proctree_lock, "proctree");
	td = curthread;

	/* XXX - would very much like to do better than this */
	/* give all configuration threads time to complete initialization
	 * before continuing
	 */
	sleep(1);
	return (0);
}
Esempio n. 24
0
sxstate sx_begin(sxmanager *m, sx *t, uint64_t vlsn)
{
	t->s = SXREADY; 
	t->complete = 0;
	sr_seqlock(m->seq);
	t->id = sr_seqdo(m->seq, SR_TSNNEXT);
	if (sslikely(vlsn == 0))
		t->vlsn = sr_seqdo(m->seq, SR_LSN);
	else
		t->vlsn = vlsn;
	sr_sequnlock(m->seq);
	sx_init(m, t);
	ss_spinlock(&m->lock);
	ssrbnode *n = NULL;
	int rc = sx_matchtx(&m->i, NULL, (char*)&t->id, sizeof(t->id), &n);
	if (rc == 0 && n) {
		assert(0);
	} else {
		ss_rbset(&m->i, n, rc, &t->node);
	}
	m->count++;
	ss_spinunlock(&m->lock);
	return SXREADY;
}
Esempio n. 25
0
/*
 * Create a new transport for a socket optained via soaccept().
 */
SVCXPRT *
svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
{
	SVCXPRT *xprt = NULL;
	struct cf_conn *cd = NULL;
	struct sockaddr* sa = NULL;
	struct sockopt opt;
	int one = 1;
	int error;

	bzero(&opt, sizeof(struct sockopt));
	opt.sopt_dir = SOPT_SET;
	opt.sopt_level = SOL_SOCKET;
	opt.sopt_name = SO_KEEPALIVE;
	opt.sopt_val = &one;
	opt.sopt_valsize = sizeof(one);
	CURVNET_SET(so->so_vnet);
	error = sosetopt(so, &opt);
	if (error) {
		CURVNET_RESTORE();
		return (NULL);
	}

	if (so->so_proto->pr_protocol == IPPROTO_TCP) {
		bzero(&opt, sizeof(struct sockopt));
		opt.sopt_dir = SOPT_SET;
		opt.sopt_level = IPPROTO_TCP;
		opt.sopt_name = TCP_NODELAY;
		opt.sopt_val = &one;
		opt.sopt_valsize = sizeof(one);
		error = sosetopt(so, &opt);
		if (error) {
			CURVNET_RESTORE();
			return (NULL);
		}
	}
	CURVNET_RESTORE();

	cd = mem_alloc(sizeof(*cd));
	cd->strm_stat = XPRT_IDLE;

	xprt = svc_xprt_alloc();
	sx_init(&xprt->xp_lock, "xprt->xp_lock");
	xprt->xp_pool = pool;
	xprt->xp_socket = so;
	xprt->xp_p1 = cd;
	xprt->xp_p2 = NULL;
	xprt->xp_ops = &svc_vc_ops;

	/*
	 * See http://www.connectathon.org/talks96/nfstcp.pdf - client
	 * has a 5 minute timer, server has a 6 minute timer.
	 */
	xprt->xp_idletimeout = 6 * 60;

	memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len);

	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
	if (error)
		goto cleanup_svc_vc_create;

	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
	free(sa, M_SONAME);

	xprt_register(xprt);

	SOCKBUF_LOCK(&so->so_rcv);
	xprt->xp_upcallset = 1;
	soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
	SOCKBUF_UNLOCK(&so->so_rcv);

	/*
	 * Throw the transport into the active list in case it already
	 * has some data buffered.
	 */
	sx_xlock(&xprt->xp_lock);
	xprt_active(xprt);
	sx_xunlock(&xprt->xp_lock);

	return (xprt);
cleanup_svc_vc_create:
	if (xprt) {
		mem_free(xprt, sizeof(*xprt));
	}
	if (cd)
		mem_free(cd, sizeof(*cd));
	return (NULL);
}
Esempio n. 26
0
void
nwfs_hash_init(void) {
	nwhashtbl = hashinit(desiredvnodes, M_NWFSHASH, &nwnodehash);
	sx_init(&nwhashlock, "nwfshl");
}
Esempio n. 27
0
static int
linux_elf_modevent(module_t mod, int type, void *data)
{
	Elf32_Brandinfo **brandinfo;
	int error;
	struct linux_ioctl_handler **lihp;
	struct linux_device_handler **ldhp;

	error = 0;

	switch(type) {
	case MOD_LOAD:
		for (brandinfo = &linux_brandlist[0]; *brandinfo != NULL;
		     ++brandinfo)
			if (elf32_insert_brand_entry(*brandinfo) < 0)
				error = EINVAL;
		if (error == 0) {
			SET_FOREACH(lihp, linux_ioctl_handler_set)
				linux_ioctl_register_handler(*lihp);
			SET_FOREACH(ldhp, linux_device_handler_set)
				linux_device_register_handler(*ldhp);
			mtx_init(&emul_lock, "emuldata lock", NULL, MTX_DEF);
			sx_init(&emul_shared_lock, "emuldata->shared lock");
			LIST_INIT(&futex_list);
			mtx_init(&futex_mtx, "ftllk", NULL, MTX_DEF);
			linux_exit_tag = EVENTHANDLER_REGISTER(process_exit,
			    linux_proc_exit, NULL, 1000);
			linux_exec_tag = EVENTHANDLER_REGISTER(process_exec,
			    linux_proc_exec, NULL, 1000);
			linux_szplatform = roundup(strlen(linux_platform) + 1,
			    sizeof(char *));
			linux_osd_jail_register();
			stclohz = (stathz ? stathz : hz);
			if (bootverbose)
				printf("Linux ELF exec handler installed\n");
		} else
			printf("cannot insert Linux ELF brand handler\n");
		break;
	case MOD_UNLOAD:
		for (brandinfo = &linux_brandlist[0]; *brandinfo != NULL;
		     ++brandinfo)
			if (elf32_brand_inuse(*brandinfo))
				error = EBUSY;
		if (error == 0) {
			for (brandinfo = &linux_brandlist[0];
			     *brandinfo != NULL; ++brandinfo)
				if (elf32_remove_brand_entry(*brandinfo) < 0)
					error = EINVAL;
		}
		if (error == 0) {
			SET_FOREACH(lihp, linux_ioctl_handler_set)
				linux_ioctl_unregister_handler(*lihp);
			SET_FOREACH(ldhp, linux_device_handler_set)
				linux_device_unregister_handler(*ldhp);
			mtx_destroy(&emul_lock);
			sx_destroy(&emul_shared_lock);
			mtx_destroy(&futex_mtx);
			EVENTHANDLER_DEREGISTER(process_exit, linux_exit_tag);
			EVENTHANDLER_DEREGISTER(process_exec, linux_exec_tag);
			linux_osd_jail_deregister();
			if (bootverbose)
				printf("Linux ELF exec handler removed\n");
		} else
			printf("Could not deinstall ELF interpreter entry\n");
		break;
	default:
		return EOPNOTSUPP;
	}
	return error;
}
Esempio n. 28
0
int
uinet_init(unsigned int ncpus, unsigned int nmbclusters, struct uinet_instance_cfg *inst_cfg)
{
	struct thread *td;
	char tmpbuf[32];
	int boot_pages;
	int num_hash_buckets;
	caddr_t v;

	if (ncpus > MAXCPU) {
		printf("Limiting number of CPUs to %u\n", MAXCPU);
		ncpus = MAXCPU;
	} else if (0 == ncpus) {
		printf("Setting number of CPUs to 1\n");
		ncpus = 1;
	}

	printf("uinet starting: cpus=%u, nmbclusters=%u\n", ncpus, nmbclusters);

	snprintf(tmpbuf, sizeof(tmpbuf), "%u", nmbclusters);
	setenv("kern.ipc.nmbclusters", tmpbuf);

	/* The env var kern.ncallout will get read in proc0_init(), but
	 * that's after we init the callwheel below.  So we set it here for
	 * consistency, but the operative setting is the direct assignment
	 * below.
	 */
        ncallout = HZ * 3600;
	snprintf(tmpbuf, sizeof(tmpbuf), "%u", ncallout);
	setenv("kern.ncallout", tmpbuf);

	/* Assuming maxsockets will be set to nmbclusters, the following
	 * sets the TCP tcbhash size so that perfectly uniform hashing would
	 * result in a maximum bucket depth of about 16.
	 */
	num_hash_buckets = 1;
	while (num_hash_buckets < nmbclusters / 16)
		num_hash_buckets <<= 1;
	snprintf(tmpbuf, sizeof(tmpbuf), "%u", num_hash_buckets);	
	setenv("net.inet.tcp.tcbhashsize", tmpbuf);

	snprintf(tmpbuf, sizeof(tmpbuf), "%u", 2048);
	setenv("net.inet.tcp.syncache.hashsize", tmpbuf);

	boot_pages = 16;  /* number of pages made available for uma to bootstrap itself */

	mp_ncpus = ncpus;
	mp_maxid = mp_ncpus - 1;

	uhi_set_num_cpus(mp_ncpus);

        /* vm_init bits */
	
	/* first get size required, then alloc memory, then give that memory to the second call */
	v = 0;
        v = kern_timeout_callwheel_alloc(v);
	kern_timeout_callwheel_alloc(malloc(round_page((vm_offset_t)v), M_DEVBUF, M_ZERO));
        kern_timeout_callwheel_init();

	uinet_init_thread0();

        uma_startup(malloc(boot_pages*PAGE_SIZE, M_DEVBUF, M_ZERO), boot_pages);
	uma_startup2();

	/* XXX any need to tune this? */
	num_hash_buckets = 8192;  /* power of 2.  32 bytes per bucket on a 64-bit system, so no need to skimp */
	uma_page_slab_hash = malloc(sizeof(struct uma_page)*num_hash_buckets, M_DEVBUF, M_ZERO);
	uma_page_mask = num_hash_buckets - 1;

#if 0
	pthread_mutex_init(&init_lock, NULL);
	pthread_cond_init(&init_cond, NULL);
#endif
	mutex_init();
        mi_startup();
	sx_init(&proctree_lock, "proctree");
	td = curthread;

	/* XXX - would very much like to do better than this */
	/* give all configuration threads time to complete initialization
	 * before continuing
	 */
	sleep(1);

	uinet_instance_init(&uinst0, vnet0, inst_cfg);

	if (uhi_msg_init(&shutdown_helper_msg, 1, 0) != 0)
		printf("Failed to init shutdown helper message - there will be no shutdown helper thread\n");
	else if (kthread_add(shutdown_helper, &shutdown_helper_msg, NULL, &shutdown_helper_thread, 0, 0, "shutdown_helper"))
		printf("Failed to create shutdown helper thread\n");

	/*
	 * XXX This should be configurable - applications that arrange for a
	 * particular thread to process all signals will not want this.
	 */
	if (kthread_add(one_sighandling_thread, NULL, NULL, &at_least_one_sighandling_thread, 0, 0, "one_sighandler"))
		printf("Failed to create at least one signal handling thread\n");
	uhi_mask_all_signals();

#if 0
	printf("maxusers=%d\n", maxusers);
	printf("maxfiles=%d\n", maxfiles);
	printf("maxsockets=%d\n", maxsockets);
	printf("nmbclusters=%d\n", nmbclusters);
#endif

	return (0);
}
Esempio n. 29
0
static int
mlx_pci_attach(device_t dev)
{
    struct mlx_softc	*sc;
    struct mlx_ident	*m;
    int			error;

    debug_called(1);

    pci_enable_busmaster(dev);

    sc = device_get_softc(dev);
    sc->mlx_dev = dev;

    /*
     * Work out what sort of adapter this is (we need to know this in order
     * to map the appropriate interface resources).
     */
    m = mlx_pci_match(dev);
    if (m == NULL)		/* shouldn't happen */
	return(ENXIO);
    sc->mlx_iftype = m->iftype;

    mtx_init(&sc->mlx_io_lock, "mlx I/O", NULL, MTX_DEF);
    sx_init(&sc->mlx_config_lock, "mlx config");
    callout_init_mtx(&sc->mlx_timeout, &sc->mlx_io_lock, 0);

    /*
     * Allocate the PCI register window.
     */
    
    /* type 2/3 adapters have an I/O region we don't prefer at base 0 */
    switch(sc->mlx_iftype) {
    case MLX_IFTYPE_2:
    case MLX_IFTYPE_3:
	sc->mlx_mem_type = SYS_RES_MEMORY;
	sc->mlx_mem_rid = MLX_CFG_BASE1;
	sc->mlx_mem = bus_alloc_resource_any(dev, sc->mlx_mem_type,
		&sc->mlx_mem_rid, RF_ACTIVE);
	if (sc->mlx_mem == NULL) {
	    sc->mlx_mem_type = SYS_RES_IOPORT;
	    sc->mlx_mem_rid = MLX_CFG_BASE0;
	    sc->mlx_mem = bus_alloc_resource_any(dev, sc->mlx_mem_type,
		&sc->mlx_mem_rid, RF_ACTIVE);
	}
	break;
    case MLX_IFTYPE_4:
    case MLX_IFTYPE_5:
	sc->mlx_mem_type = SYS_RES_MEMORY;
	sc->mlx_mem_rid = MLX_CFG_BASE0;
	sc->mlx_mem = bus_alloc_resource_any(dev, sc->mlx_mem_type,
		&sc->mlx_mem_rid, RF_ACTIVE);
	break;
    }
    if (sc->mlx_mem == NULL) {
	device_printf(sc->mlx_dev, "couldn't allocate mailbox window\n");
	mlx_free(sc);
	return(ENXIO);
    }

    /*
     * Allocate the parent bus DMA tag appropriate for PCI.
     */
    error = bus_dma_tag_create(bus_get_dma_tag(dev),	/* PCI parent */
			       1, 0, 			/* alignment, boundary */
			       BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
			       BUS_SPACE_MAXADDR, 	/* highaddr */
			       NULL, NULL, 		/* filter, filterarg */
			       MAXBSIZE, MLX_NSEG,	/* maxsize, nsegments */
			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
			       BUS_DMA_ALLOCNOW,	/* flags */
			       NULL,			/* lockfunc */
			       NULL,			/* lockarg */
			       &sc->mlx_parent_dmat);
    if (error != 0) {
	device_printf(dev, "can't allocate parent DMA tag\n");
	mlx_free(sc);
	return(ENOMEM);
    }

    /*
     * Do bus-independant initialisation.
     */
    error = mlx_attach(sc);
    if (error != 0) {
	mlx_free(sc);
	return(error);
    }
    
    /*
     * Start the controller.
     */
    mlx_startup(sc);
    return(0);
}
Esempio n. 30
0
static int
sfxge_create(struct sfxge_softc *sc)
{
	device_t dev;
	efx_nic_t *enp;
	int error;

	dev = sc->dev;

	sx_init(&sc->softc_lock, "sfxge_softc");

	sc->stats_node = SYSCTL_ADD_NODE(
		device_get_sysctl_ctx(dev),
		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
		OID_AUTO, "stats", CTLFLAG_RD, NULL, "Statistics");
	if (!sc->stats_node) {
		error = ENOMEM;
		goto fail;
	}

	TASK_INIT(&sc->task_reset, 0, sfxge_reset, sc);

	(void) pci_enable_busmaster(dev);

	/* Initialize DMA mappings. */
	if ((error = sfxge_dma_init(sc)) != 0)
		goto fail;

	/* Map the device registers. */
	if ((error = sfxge_bar_init(sc)) != 0)
		goto fail;

	error = efx_family(pci_get_vendor(dev), pci_get_device(dev),
	    &sc->family);
	KASSERT(error == 0, ("Family should be filtered by sfxge_probe()"));

	/* Create the common code nic object. */
	mtx_init(&sc->enp_lock, "sfxge_nic", NULL, MTX_DEF);
	if ((error = efx_nic_create(sc->family, (efsys_identifier_t *)sc,
	    &sc->bar, &sc->enp_lock, &enp)) != 0)
		goto fail3;
	sc->enp = enp;

	/* Initialize MCDI to talk to the microcontroller. */
	if ((error = sfxge_mcdi_init(sc)) != 0)
		goto fail4;

	/* Probe the NIC and build the configuration data area. */
	if ((error = efx_nic_probe(enp)) != 0)
		goto fail5;

	/* Initialize the NVRAM. */
	if ((error = efx_nvram_init(enp)) != 0)
		goto fail6;

	/* Initialize the VPD. */
	if ((error = efx_vpd_init(enp)) != 0)
		goto fail7;

	/* Reset the NIC. */
	if ((error = efx_nic_reset(enp)) != 0)
		goto fail8;

	/* Initialize buffer table allocation. */
	sc->buffer_table_next = 0;

	/* Set up interrupts. */
	if ((error = sfxge_intr_init(sc)) != 0)
		goto fail8;

	/* Initialize event processing state. */
	if ((error = sfxge_ev_init(sc)) != 0)
		goto fail11;

	/* Initialize receive state. */
	if ((error = sfxge_rx_init(sc)) != 0)
		goto fail12;

	/* Initialize transmit state. */
	if ((error = sfxge_tx_init(sc)) != 0)
		goto fail13;

	/* Initialize port state. */
	if ((error = sfxge_port_init(sc)) != 0)
		goto fail14;

	sc->init_state = SFXGE_INITIALIZED;

	return (0);

fail14:
	sfxge_tx_fini(sc);

fail13:
	sfxge_rx_fini(sc);

fail12:
	sfxge_ev_fini(sc);

fail11:
	sfxge_intr_fini(sc);

fail8:
	efx_vpd_fini(enp);

fail7:
	efx_nvram_fini(enp);

fail6:
	efx_nic_unprobe(enp);

fail5:
	sfxge_mcdi_fini(sc);

fail4:
	sc->enp = NULL;
	efx_nic_destroy(enp);
	mtx_destroy(&sc->enp_lock);

fail3:
	sfxge_bar_fini(sc);
	(void) pci_disable_busmaster(sc->dev);

fail:
	sc->dev = NULL;
	sx_destroy(&sc->softc_lock);
	return (error);
}