Пример #1
0
int
kern_thr_exit(struct thread *td)
{
	struct proc *p;

	p = td->td_proc;

	rw_wlock(&tidhash_lock);
	PROC_LOCK(p);

	if (p->p_numthreads != 1) {
		racct_sub(p, RACCT_NTHR, 1);
		LIST_REMOVE(td, td_hash);
		rw_wunlock(&tidhash_lock);
		tdsigcleanup(td);
		umtx_thread_exit(td);
		PROC_SLOCK(p);
		thread_stopped(p);
		thread_exit();
		/* NOTREACHED */
	}

	/*
	 * Ignore attempts to shut down last thread in the proc.  This
	 * will actually call _exit(2) in the usermode trampoline when
	 * it returns.
	 */
	PROC_UNLOCK(p);
	rw_wunlock(&tidhash_lock);
	return (0);
}
Пример #2
0
void
kthread_exit(void)
{
	struct proc *p;

	p = curthread->td_proc;

	/* A module may be waiting for us to exit. */
	wakeup(curthread);

	/*
	 * The last exiting thread in a kernel process must tear down
	 * the whole process.
	 */
	rw_wlock(&tidhash_lock);
	PROC_LOCK(p);
	if (p->p_numthreads == 1) {
		PROC_UNLOCK(p);
		rw_wunlock(&tidhash_lock);
		kproc_exit(0);
	}
	LIST_REMOVE(curthread, td_hash);
	rw_wunlock(&tidhash_lock);
	umtx_thread_exit(curthread);
	PROC_SLOCK(p);
	thread_exit();
}
Пример #3
0
static void 
ps_ether_detach(struct ifnet *ifp) {
    struct pspcb *psp, *psp_next;

    LIST_FOREACH_SAFE(psp, pspcbhead, psp_list, psp_next) {
	rw_wlock(&psp->psp_lock);
	if (ifp == psp->psp_ifp) {
	    IFP2AC(ifp)->ac_netgraph = NULL;
	    psp->psp_ifp = NULL;
	    rw_wunlock(&psp->psp_lock);
	    return;
	}
	rw_wunlock(&psp->psp_lock);
    }
Пример #4
0
/*
 * Allocate an L2T entry for use by a switching rule.  Such need to be
 * explicitly freed and while busy they are not on any hash chain, so normal
 * address resolution updates do not see them.
 */
struct l2t_entry *
t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
    uint8_t *eth_addr)
{
	struct l2t_data *d = sc->l2t;
	struct l2t_entry *e;
	int rc;

	rw_wlock(&d->lock);
	e = find_or_alloc_l2e(d, vlan, port, eth_addr);
	if (e) {
		if (atomic_load_acq_int(&e->refcnt) == 0) {
			mtx_lock(&e->lock);    /* avoid race with t4_l2t_free */
			e->wrq = &sc->sge.ctrlq[0];
			e->iqid = sc->sge.fwq.abs_id;
			e->state = L2T_STATE_SWITCHING;
			e->vlan = vlan;
			e->lport = port;
			memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
			atomic_store_rel_int(&e->refcnt, 1);
			atomic_subtract_int(&d->nfree, 1);
			rc = t4_write_l2e(e, 0);
			mtx_unlock(&e->lock);
			if (rc != 0)
				e = NULL;
		} else {
			MPASS(e->vlan == vlan);
			MPASS(e->lport == port);
			atomic_add_int(&e->refcnt, 1);
		}
	}
	rw_wunlock(&d->lock);
	return (e);
}
Пример #5
0
static inline void
remove_adapter(adapter_t *adap)
{
	rw_wlock(&adapter_list_lock);
	TAILQ_REMOVE(&adapter_list, adap, adapter_entry);
	rw_wunlock(&adapter_list_lock);
}
Пример #6
0
static inline void
add_adapter(adapter_t *adap)
{
	rw_wlock(&adapter_list_lock);
	TAILQ_INSERT_TAIL(&adapter_list, adap, adapter_entry);
	rw_wunlock(&adapter_list_lock);
}
Пример #7
0
void timeout_test_callout_rwlock(bool delay_with_lock)
{
	enum arg argument = HANDLER_NOT_VISITED;
	struct callout callout;
	struct rwlock rw;
	int retval = 0;
	printf("== Start a callout with a rwlock%s\n", delay_with_lock ? " and delay execution by locking it." : ".");

	rw_init(&rw, "callouttest");
	callout_init_rw(&callout, &rw, 0);

	retval = callout_reset(&callout, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS), timeout_handler, &argument);
	assert(retval == 0);

	usleep(TEST_NOT_FIRED_MS * 1000);
	assert(argument == HANDLER_NOT_VISITED);
	
	if(delay_with_lock)
	{
		retval = rw_try_wlock(&rw);
		assert(retval != 0);

		usleep(TEST_DELAY_MS * 1000);
		assert(argument == HANDLER_NOT_VISITED);

		rw_wunlock(&rw);
	}

	usleep(TEST_FIRED_MS * 1000);
	assert(argument == HANDLER_VISITED);
	
	callout_deactivate(&callout);
}
Пример #8
0
void
loginclass_free(struct loginclass *lc)
{

	if (refcount_release_if_not_last(&lc->lc_refcount))
		return;

	rw_wlock(&loginclasses_lock);
	if (!refcount_release(&lc->lc_refcount)) {
		rw_wunlock(&loginclasses_lock);
		return;
	}

	racct_destroy(&lc->lc_racct);
	LIST_REMOVE(lc, lc_next);
	rw_wunlock(&loginclasses_lock);

	free(lc, M_LOGINCLASS);
}
Пример #9
0
/*
 * Return loginclass structure with a corresponding name.  Not
 * performance critical, as it's used mainly by setloginclass(2),
 * which happens once per login session.  Caller has to use
 * loginclass_free() on the returned value when it's no longer
 * needed.
 */
struct loginclass *
loginclass_find(const char *name)
{
	struct loginclass *lc, *new_lc;

	if (name[0] == '\0' || strlen(name) >= MAXLOGNAME)
		return (NULL);

	lc = curthread->td_ucred->cr_loginclass;
	if (strcmp(name, lc->lc_name) == 0) {
		loginclass_hold(lc);
		return (lc);
	}

	rw_rlock(&loginclasses_lock);
	lc = loginclass_lookup(name);
	rw_runlock(&loginclasses_lock);
	if (lc != NULL)
		return (lc);

	new_lc = malloc(sizeof(*new_lc), M_LOGINCLASS, M_ZERO | M_WAITOK);
	racct_create(&new_lc->lc_racct);
	refcount_init(&new_lc->lc_refcount, 1);
	strcpy(new_lc->lc_name, name);

	rw_wlock(&loginclasses_lock);
	/*
	 * There's a chance someone created our loginclass while we
	 * were in malloc and not holding the lock, so we have to
	 * make sure we don't insert a duplicate loginclass.
	 */
	if ((lc = loginclass_lookup(name)) == NULL) {
		LIST_INSERT_HEAD(&loginclasses, new_lc, lc_next);
		rw_wunlock(&loginclasses_lock);
		lc = new_lc;
	} else {
		rw_wunlock(&loginclasses_lock);
		racct_destroy(&new_lc->lc_racct);
		free(new_lc, M_LOGINCLASS);
	}

	return (lc);
}
Пример #10
0
/*
 * The TOE wants an L2 table entry that it can use to reach the next hop over
 * the specified port.  Produce such an entry - create one if needed.
 *
 * Note that the ifnet could be a pseudo-device like if_vlan, if_lagg, etc. on
 * top of the real cxgbe interface.
 */
struct l2t_entry *
t4_l2t_get(struct port_info *pi, struct ifnet *ifp, struct sockaddr *sa)
{
	struct l2t_entry *e;
	struct adapter *sc = pi->adapter;
	struct l2t_data *d = sc->l2t;
	u_int hash, smt_idx = pi->port_id;

	KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
	    ("%s: sa %p has unexpected sa_family %d", __func__, sa,
	    sa->sa_family));

#ifndef VLAN_TAG
	if (ifp->if_type == IFT_L2VLAN)
		return (NULL);
#endif

	hash = l2_hash(d, sa, ifp->if_index);
	rw_wlock(&d->lock);
	for (e = d->l2tab[hash].first; e; e = e->next) {
		if (l2_cmp(sa, e) == 0 && e->ifp == ifp &&
		    e->smt_idx == smt_idx) {
			l2t_hold(d, e);
			goto done;
		}
	}

	/* Need to allocate a new entry */
	e = t4_alloc_l2e(d);
	if (e) {
		mtx_lock(&e->lock);          /* avoid race with t4_l2t_free */
		e->next = d->l2tab[hash].first;
		d->l2tab[hash].first = e;

		e->state = L2T_STATE_RESOLVING;
		l2_store(sa, e);
		e->ifp = ifp;
		e->smt_idx = smt_idx;
		e->hash = hash;
		e->lport = pi->lport;
		e->wrq = &sc->sge.ctrlq[pi->port_id];
		e->iqid = sc->sge.ofld_rxq[pi->vi[0].first_ofld_rxq].iq.abs_id;
		atomic_store_rel_int(&e->refcnt, 1);
#ifdef VLAN_TAG
		if (ifp->if_type == IFT_L2VLAN)
			VLAN_TAG(ifp, &e->vlan);
		else
			e->vlan = VLAN_NONE;
#endif
		mtx_unlock(&e->lock);
	}
done:
	rw_wunlock(&d->lock);
	return e;
}
Пример #11
0
int
mem_range_attr_set(struct mem_range_desc *mrd, int *arg)
{
	int ret;

	if (mem_range_softc.mr_op == NULL)
		return (EOPNOTSUPP);
	rw_wlock(&mr_lock);
	ret = mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg);
	rw_wunlock(&mr_lock);
	return (ret);
}
Пример #12
0
void
loginclass_free(struct loginclass *lc)
{
	int old;

	old = lc->lc_refcount;
	if (old > 1 && atomic_cmpset_int(&lc->lc_refcount, old, old - 1))
		return;

	rw_wlock(&loginclasses_lock);
	if (!refcount_release(&lc->lc_refcount)) {
		rw_wunlock(&loginclasses_lock);
		return;
	}

	racct_destroy(&lc->lc_racct);
	LIST_REMOVE(lc, lc_next);
	rw_wunlock(&loginclasses_lock);

	free(lc, M_LOGINCLASS);
}
Пример #13
0
int ttm_base_object_init(struct ttm_object_file *tfile,
			 struct ttm_base_object *base,
			 bool shareable,
			 enum ttm_object_type object_type,
			 void (*rcount_release) (struct ttm_base_object **),
			 void (*ref_obj_release) (struct ttm_base_object *,
						  enum ttm_ref_type ref_type))
{
	struct ttm_object_device *tdev = tfile->tdev;
	int ret;

	base->shareable = shareable;
	base->tfile = ttm_object_file_ref(tfile);
	base->refcount_release = rcount_release;
	base->ref_obj_release = ref_obj_release;
	base->object_type = object_type;
	refcount_init(&base->refcount, 1);
	rw_init(&tdev->object_lock, "ttmbao");
	rw_wlock(&tdev->object_lock);
	ret = drm_ht_just_insert_please(&tdev->object_hash,
					    &base->hash,
					    (unsigned long)base, 31, 0, 0);
	rw_wunlock(&tdev->object_lock);
	if (unlikely(ret != 0))
		goto out_err0;

	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
	if (unlikely(ret != 0))
		goto out_err1;

	ttm_base_object_unref(&base);

	return 0;
out_err1:
	rw_wlock(&tdev->object_lock);
	(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
	rw_wunlock(&tdev->object_lock);
out_err0:
	return ret;
}
Пример #14
0
int
sys_thr_exit(struct thread *td, struct thr_exit_args *uap)
    /* long *state */
{
	struct proc *p;

	p = td->td_proc;

	/* Signal userland that it can free the stack. */
	if ((void *)uap->state != NULL) {
		suword_lwpid(uap->state, 1);
		kern_umtx_wake(td, uap->state, INT_MAX, 0);
	}

	rw_wlock(&tidhash_lock);

	PROC_LOCK(p);

	if (p->p_numthreads != 1) {
		racct_sub(p, RACCT_NTHR, 1);
		LIST_REMOVE(td, td_hash);
		rw_wunlock(&tidhash_lock);
		tdsigcleanup(td);
		PROC_SLOCK(p);
		thread_stopped(p);
		thread_exit();
		/* NOTREACHED */
	}

	/*
	 * Ignore attempts to shut down last thread in the proc.  This
	 * will actually call _exit(2) in the usermode trampoline when
	 * it returns.
	 */
	PROC_UNLOCK(p);
	rw_wunlock(&tidhash_lock);
	return (0);
}
Пример #15
0
uintptr_t
unlock_rw(struct lock_object *lock)
{
	struct rwlock *rw;

	rw = (struct rwlock *)lock;
	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
	if (rw->rw_lock & RW_LOCK_READ) {
		rw_runlock(rw);
		return (1);
	} else {
		rw_wunlock(rw);
		return (0);
	}
}
Пример #16
0
/*
 * Allocate an L2T entry for use by a switching rule.  Such need to be
 * explicitly freed and while busy they are not on any hash chain, so normal
 * address resolution updates do not see them.
 */
struct l2t_entry *
t4_l2t_alloc_switching(struct l2t_data *d)
{
	struct l2t_entry *e;

	rw_wlock(&d->lock);
	e = t4_alloc_l2e(d);
	if (e) {
		mtx_lock(&e->lock);          /* avoid race with t4_l2t_free */
		e->state = L2T_STATE_SWITCHING;
		atomic_store_rel_int(&e->refcnt, 1);
		mtx_unlock(&e->lock);
	}
	rw_wunlock(&d->lock);
	return e;
}
Пример #17
0
void ttm_base_object_unref(struct ttm_base_object **p_base)
{
	struct ttm_base_object *base = *p_base;
	struct ttm_object_device *tdev = base->tfile->tdev;

	*p_base = NULL;

	/*
	 * Need to take the lock here to avoid racing with
	 * users trying to look up the object.
	 */

	rw_wlock(&tdev->object_lock);
	if (refcount_release(&base->refcount))
		ttm_release_base(base);
	rw_wunlock(&tdev->object_lock);
}
Пример #18
0
int
ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
    struct vm_object **obj_res, int nprot)
{
	struct ttm_bo_driver *driver;
	struct ttm_buffer_object *bo;
	struct vm_object *vm_obj;
	int ret;

	rw_wlock(&bdev->vm_lock);
	bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
	if (likely(bo != NULL))
		refcount_acquire(&bo->kref);
	rw_wunlock(&bdev->vm_lock);

	if (unlikely(bo == NULL)) {
		printf("[TTM] Could not find buffer object to map\n");
		return (EINVAL);
	}

	driver = bo->bdev->driver;
	if (unlikely(!driver->verify_access)) {
		ret = EPERM;
		goto out_unref;
	}
	ret = -driver->verify_access(bo);
	if (unlikely(ret != 0))
		goto out_unref;

	vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
	    size, nprot, 0, curthread->td_ucred);
	if (vm_obj == NULL) {
		ret = EINVAL;
		goto out_unref;
	}
	/*
	 * Note: We're transferring the bo reference to vm_obj->handle here.
	 */
	*offset = 0;
	*obj_res = vm_obj;
	return 0;
out_unref:
	ttm_bo_unref(&bo);
	return ret;
}
Пример #19
0
static void ttm_release_base(struct ttm_base_object *base)
{
	struct ttm_object_device *tdev = base->tfile->tdev;

	(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
	rw_wunlock(&tdev->object_lock);
	/*
	 * Note: We don't use synchronize_rcu() here because it's far
	 * too slow. It's up to the user to free the object using
	 * call_rcu() or ttm_base_object_kfree().
	 */

	if (base->refcount_release) {
		ttm_object_file_unref(&base->tfile);
		base->refcount_release(&base);
	}
	rw_wlock(&tdev->object_lock);
}
Пример #20
0
static void
cpu_initialize_context(unsigned int cpu)
{
	/* vcpu_guest_context_t is too large to allocate on the stack.
	 * Hence we allocate statically and protect it with a lock */
	vm_page_t m[NPGPTD + 2];
	static vcpu_guest_context_t ctxt;
	vm_offset_t boot_stack;
	vm_offset_t newPTD;
	vm_paddr_t ma[NPGPTD];
	int i;

	/*
	 * Page 0,[0-3]	PTD
	 * Page 1, [4]	boot stack
	 * Page [5]	PDPT
	 *
	 */
	for (i = 0; i < NPGPTD + 2; i++) {
		m[i] = vm_page_alloc(NULL, 0,
		    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
		    VM_ALLOC_ZERO);

		pmap_zero_page(m[i]);

	}
	boot_stack = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
	newPTD = kmem_alloc_nofault(kernel_map, NPGPTD * PAGE_SIZE);
	ma[0] = VM_PAGE_TO_MACH(m[0])|PG_V;

#ifdef PAE	
	pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD + 1]));
	for (i = 0; i < NPGPTD; i++) {
		((vm_paddr_t *)boot_stack)[i] =
		ma[i] = VM_PAGE_TO_MACH(m[i])|PG_V;
	}
#endif	

	/*
	 * Copy cpu0 IdlePTD to new IdlePTD - copying only
	 * kernel mappings
	 */
	pmap_qenter(newPTD, m, 4);
	
	memcpy((uint8_t *)newPTD + KPTDI*sizeof(vm_paddr_t),
	    (uint8_t *)PTOV(IdlePTD) + KPTDI*sizeof(vm_paddr_t),
	    nkpt*sizeof(vm_paddr_t));

	pmap_qremove(newPTD, 4);
	kmem_free(kernel_map, newPTD, 4 * PAGE_SIZE);
	/*
	 * map actual idle stack to boot_stack
	 */
	pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD]));


	xen_pgdpt_pin(VM_PAGE_TO_MACH(m[NPGPTD + 1]));
	rw_wlock(&pvh_global_lock);
	for (i = 0; i < 4; i++) {
		int pdir = (PTDPTDI + i) / NPDEPG;
		int curoffset = (PTDPTDI + i) % NPDEPG;
		
		xen_queue_pt_update((vm_paddr_t)
		    ((ma[pdir] & ~PG_V) + (curoffset*sizeof(vm_paddr_t))), 
		    ma[i]);
	}
	PT_UPDATES_FLUSH();
	rw_wunlock(&pvh_global_lock);
	
	memset(&ctxt, 0, sizeof(ctxt));
	ctxt.flags = VGCF_IN_KERNEL;
	ctxt.user_regs.ds = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.es = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.fs = GSEL(GPRIV_SEL, SEL_KPL);
	ctxt.user_regs.gs = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.cs = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.user_regs.ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.eip = (unsigned long)init_secondary;
	ctxt.user_regs.eflags = PSL_KERNEL | 0x1000; /* IOPL_RING1 */

	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));

	smp_trap_init(ctxt.trap_ctxt);

	ctxt.ldt_ents = 0;
	ctxt.gdt_frames[0] = (uint32_t)((uint64_t)vtomach(bootAPgdt) >> PAGE_SHIFT);
	ctxt.gdt_ents      = 512;

#ifdef __i386__
	ctxt.user_regs.esp = boot_stack + PAGE_SIZE;

	ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.kernel_sp = boot_stack + PAGE_SIZE;

	ctxt.event_callback_cs     = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.event_callback_eip    = (unsigned long)Xhypervisor_callback;
	ctxt.failsafe_callback_cs  = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;

	ctxt.ctrlreg[3] = VM_PAGE_TO_MACH(m[NPGPTD + 1]);
#else /* __x86_64__ */
	ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
	ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.kernel_sp = idle->thread.rsp0;

	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
	ctxt.syscall_callback_eip  = (unsigned long)system_call;

	ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));

	ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
#endif

	printf("gdtpfn=%lx pdptpfn=%lx\n",
	    ctxt.gdt_frames[0],
	    ctxt.ctrlreg[3] >> PAGE_SHIFT);

	PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt));
	DELAY(3000);
	PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL));
}