Пример #1
0
/*ARGSUSED1*/
static int
kcpc_open(dev_t *dev, int flags, int otyp, cred_t *cr)
{
	processorid_t	cpuid;
	int		error;

	ASSERT(pcbe_ops != NULL);

	if ((error = secpolicy_cpc_cpu(cr)) != 0)
		return (error);
	if (getminor(*dev) != KCPC_MINOR_SHARED)
		return (ENXIO);
	if ((cpuid = curthread->t_bind_cpu) == PBIND_NONE)
		return (EINVAL);
	if (cpuid > max_cpuid)
		return (EINVAL);

	rw_enter(&kcpc_cpuctx_lock, RW_WRITER);
	if (++kcpc_cpuctx == 1) {
		ASSERT(kcpc_cpumap == NULL);

		/*
		 * Bail out if DTrace is already using the counters.
		 */
		if (dtrace_cpc_in_use) {
			kcpc_cpuctx--;
			rw_exit(&kcpc_cpuctx_lock);
			return (EAGAIN);
		}
		kcpc_cpumap = kmem_zalloc(BT_SIZEOFMAP(max_cpuid + 1),
		    KM_SLEEP);
		/*
		 * When this device is open for processor-based contexts,
		 * no further lwp-based contexts can be created.
		 *
		 * Since this is the first open, ensure that all existing
		 * contexts are invalidated.
		 */
		kcpc_invalidate_all();
	} else if (BT_TEST(kcpc_cpumap, cpuid)) {
		kcpc_cpuctx--;
		rw_exit(&kcpc_cpuctx_lock);
		return (EAGAIN);
	} else if (kcpc_hw_cpu_hook(cpuid, kcpc_cpumap) != 0) {
		kcpc_cpuctx--;
		rw_exit(&kcpc_cpuctx_lock);
		return (EACCES);
	}
	BT_SET(kcpc_cpumap, cpuid);
	rw_exit(&kcpc_cpuctx_lock);

	*dev = makedevice(getmajor(*dev), (minor_t)cpuid);

	return (0);
}
Пример #2
0
/*ARGSUSED*/
static int
x86_featureset_cmd(uintptr_t addr, uint_t flags, int argc,
    const mdb_arg_t *argv)
{
	void *fset;
	GElf_Sym sym;
	uintptr_t nptr;
	char name[128];
	int ii;

	size_t sz = sizeof (uchar_t) * BT_SIZEOFMAP(NUM_X86_FEATURES);

	if (argc != 0)
		return (DCMD_USAGE);

	if (mdb_lookup_by_name("x86_feature_names", &sym) == -1) {
		mdb_warn("couldn't find x86_feature_names");
		return (DCMD_ERR);
	}

	fset = mdb_zalloc(sz, UM_NOSLEEP);
	if (fset == NULL) {
		mdb_warn("failed to allocate memory for x86_featureset");
		return (DCMD_ERR);
	}

	if (mdb_readvar(fset, "x86_featureset") != sz) {
		mdb_warn("failed to read x86_featureset");
		mdb_free(fset, sz);
		return (DCMD_ERR);
	}

	for (ii = 0; ii < NUM_X86_FEATURES; ii++) {
		if (!BT_TEST((ulong_t *)fset, ii))
			continue;

		if (mdb_vread(&nptr, sizeof (char *), sym.st_value +
		    sizeof (void *) * ii) != sizeof (char *)) {
			mdb_warn("failed to read feature array %d", ii);
			mdb_free(fset, sz);
			return (DCMD_ERR);
		}

		if (mdb_readstr(name, sizeof (name), nptr) == -1) {
			mdb_warn("failed to read feature %d", ii);
			mdb_free(fset, sz);
			return (DCMD_ERR);
		}
		mdb_printf("%s\n", name);
	}

	mdb_free(fset, sz);
	return (DCMD_OK);
}
Пример #3
0
/*ARGSUSED1*/
static int
kcpc_close(dev_t dev, int flags, int otyp, cred_t *cr)
{
	rw_enter(&kcpc_cpuctx_lock, RW_WRITER);
	BT_CLEAR(kcpc_cpumap, getminor(dev));
	if (--kcpc_cpuctx == 0) {
		kmem_free(kcpc_cpumap, BT_SIZEOFMAP(max_cpuid + 1));
		kcpc_cpumap = NULL;
	}
	ASSERT(kcpc_cpuctx >= 0);
	rw_exit(&kcpc_cpuctx_lock);

	return (0);
}
Пример #4
0
int
dt_provider_xref(dtrace_hdl_t *dtp, dt_provider_t *pvp, id_t id)
{
	size_t oldsize = BT_SIZEOFMAP(pvp->pv_xrmax);
	size_t newsize = BT_SIZEOFMAP(dtp->dt_xlatorid);

	assert(id >= 0 && id < dtp->dt_xlatorid);

	if (newsize > oldsize) {
		ulong_t *xrefs = dt_zalloc(dtp, newsize);

		if (xrefs == NULL)
			return (-1);

		bcopy(pvp->pv_xrefs, xrefs, oldsize);
		dt_free(dtp, pvp->pv_xrefs);

		pvp->pv_xrefs = xrefs;
		pvp->pv_xrmax = dtp->dt_xlatorid;
	}

	BT_SET(pvp->pv_xrefs, id);
	return (0);
}
Пример #5
0
/*
 * Allocate the segment specific private data struct and fill it in
 * with the per kp segment mutex, anon ptr. array and hash table.
 */
int
segkp_create(struct seg *seg)
{
	struct segkp_segdata *kpsd;
	size_t	np;

	ASSERT(seg != NULL && seg->s_as == &kas);
	ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));

	if (seg->s_size & PAGEOFFSET) {
		panic("Bad segkp size");
		/*NOTREACHED*/
	}

	kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP);

	/*
	 * Allocate the virtual memory for segkp and initialize it
	 */
	if (segkp_fromheap) {
		np = btop(kvseg.s_size);
		segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP);
		kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE,
		    vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP);
	} else {
		segkp_bitmap = NULL;
		np = btop(seg->s_size);
		kpsd->kpsd_arena = vmem_create("segkp", seg->s_base,
		    seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE,
		    VM_SLEEP);
	}

	kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE);

	kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *),
	    KM_SLEEP);
	seg->s_data = (void *)kpsd;
	seg->s_ops = &segkp_ops;
	segkpinit_mem_config(seg);
	return (0);
}
Пример #6
0
/*
 * Rewrite the xlate/xlarg instruction at dtdo_buf[i] so that the instruction's
 * xltab index reflects the offset 'xi' of the assigned dtdo_xlmtab[] location.
 * We track the cumulative references to translators and members in the pcb's
 * pcb_asxrefs[] array, a two-dimensional array of bitmaps indexed by the
 * global translator id and then by the corresponding translator member id.
 */
static void
dt_as_xlate(dt_pcb_t *pcb, dtrace_difo_t *dp,
    uint_t i, uint_t xi, dt_node_t *dnp)
{
	dtrace_hdl_t *dtp = pcb->pcb_hdl;
	dt_xlator_t *dxp = dnp->dn_membexpr->dn_xlator;

	assert(i < dp->dtdo_len);
	assert(xi < dp->dtdo_xlmlen);

	assert(dnp->dn_kind == DT_NODE_MEMBER);
	assert(dnp->dn_membexpr->dn_kind == DT_NODE_XLATOR);

	assert(dxp->dx_id < dtp->dt_xlatorid);
	assert(dnp->dn_membid < dxp->dx_nmembers);

	if (pcb->pcb_asxrefs == NULL) {
		pcb->pcb_asxreflen = dtp->dt_xlatorid;
		pcb->pcb_asxrefs =
		    dt_zalloc(dtp, sizeof (ulong_t *) * pcb->pcb_asxreflen);
		if (pcb->pcb_asxrefs == NULL)
			longjmp(pcb->pcb_jmpbuf, EDT_NOMEM);
	}

	if (pcb->pcb_asxrefs[dxp->dx_id] == NULL) {
		pcb->pcb_asxrefs[dxp->dx_id] =
		    dt_zalloc(dtp, BT_SIZEOFMAP(dxp->dx_nmembers));
		if (pcb->pcb_asxrefs[dxp->dx_id] == NULL)
			longjmp(pcb->pcb_jmpbuf, EDT_NOMEM);
	}

	dp->dtdo_buf[i] = DIF_INSTR_XLATE(
	    DIF_INSTR_OP(dp->dtdo_buf[i]), xi, DIF_INSTR_RD(dp->dtdo_buf[i]));

	BT_SET(pcb->pcb_asxrefs[dxp->dx_id], dnp->dn_membid);
	dp->dtdo_xlmtab[xi] = dnp;
}