Esempio n. 1
0
void
acpinex_event_init(void)
{
	/*
	 * According to ACPI specifications, notification is only supported on
	 * Device, Processor and ThermalZone. Currently we only need to handle
	 * Device and Processor objects.
	 */
	BT_SET(acpinex_object_type_mask, ACPI_TYPE_PROCESSOR);
	BT_SET(acpinex_object_type_mask, ACPI_TYPE_DEVICE);
}
RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    int rc;
    RTMPARGS Args;
    RTSOLCPUSET CpuSet;
    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;

    AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
    AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);

    Args.pfnWorker = pfnWorker;
    Args.pvUser1   = pvUser1;
    Args.pvUser2   = pvUser2;
    Args.idCpu     = idCpu1;
    Args.idCpu2    = idCpu2;
    Args.cHits     = 0;

    for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
        CpuSet.auCpus[i] = 0;
    BT_SET(CpuSet.auCpus, idCpu1);
    BT_SET(CpuSet.auCpus, idCpu2);

    /*
     * Check that both CPUs are online before doing the broadcast call.
     */
    RTThreadPreemptDisable(&PreemptState);
    if (   RTMpIsCpuOnline(idCpu1)
        && RTMpIsCpuOnline(idCpu2))
    {
        rtMpSolCrossCall(&CpuSet, rtMpSolOnPairCpuWrapper, &Args);

        Assert(Args.cHits <= 2);
        if (Args.cHits == 2)
            rc = VINF_SUCCESS;
        else if (Args.cHits == 1)
            rc = VERR_NOT_ALL_CPUS_SHOWED;
        else if (Args.cHits == 0)
            rc = VERR_CPU_OFFLINE;
        else
            rc = VERR_CPU_IPE_1;
    }
    /*
     * A CPU must be present to be considered just offline.
     */
    else if (   RTMpIsCpuPresent(idCpu1)
             && RTMpIsCpuPresent(idCpu2))
        rc = VERR_CPU_OFFLINE;
    else
        rc = VERR_CPU_NOT_FOUND;

    RTThreadPreemptRestore(&PreemptState);
    return rc;
}
Esempio n. 3
0
int
dt_regset_alloc(dt_regset_t *drp)
{
	ulong_t nbits = drp->dr_size - 1;
	ulong_t maxw = nbits >> BT_ULSHIFT;
	ulong_t wx;

	for (wx = 0; wx <= maxw; wx++) {
		if (drp->dr_bitmap[wx] != ~0UL)
			break;
	}

	if (wx <= maxw) {
		ulong_t maxb = (wx == maxw) ? nbits & BT_ULMASK : BT_NBIPUL - 1;
		ulong_t word = drp->dr_bitmap[wx];
		ulong_t bit, bx;
		int reg;

		for (bit = 1, bx = 0; bx <= maxb; bx++, bit <<= 1) {
			if ((word & bit) == 0) {
				reg = (int)((wx << BT_ULSHIFT) | bx);
				BT_SET(drp->dr_bitmap, reg);
				return (reg);
			}
		}
	}

	return (-1); /* no available registers */
}
Esempio n. 4
0
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    RTMPARGS Args;
    RT_ASSERT_INTS_ON();

    if (idCpu >= ncpus)
        return VERR_CPU_NOT_FOUND;

    if (RT_UNLIKELY(!RTMpIsCpuOnline(idCpu)))
        return RTMpIsCpuPresent(idCpu) ? VERR_CPU_OFFLINE : VERR_CPU_NOT_FOUND;

    Args.pfnWorker = pfnWorker;
    Args.pvUser1 = pvUser1;
    Args.pvUser2 = pvUser2;
    Args.idCpu = idCpu;
    Args.cHits = 0;

    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    RTThreadPreemptDisable(&PreemptState);

    RTSOLCPUSET CpuSet;
    for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
        CpuSet.auCpus[i] = 0;
    BT_SET(CpuSet.auCpus, idCpu);

    rtMpSolCrossCall(&CpuSet, rtMpSolOnSpecificCpuWrapper, &Args);

    RTThreadPreemptRestore(&PreemptState);

    Assert(ASMAtomicUoReadU32(&Args.cHits) <= 1);

    return ASMAtomicUoReadU32(&Args.cHits) == 1
         ? VINF_SUCCESS
         : VERR_CPU_NOT_FOUND;
}
Esempio n. 5
0
/*ARGSUSED1*/
static int
kcpc_open(dev_t *dev, int flags, int otyp, cred_t *cr)
{
	processorid_t	cpuid;
	int		error;

	ASSERT(pcbe_ops != NULL);

	if ((error = secpolicy_cpc_cpu(cr)) != 0)
		return (error);
	if (getminor(*dev) != KCPC_MINOR_SHARED)
		return (ENXIO);
	if ((cpuid = curthread->t_bind_cpu) == PBIND_NONE)
		return (EINVAL);
	if (cpuid > max_cpuid)
		return (EINVAL);

	rw_enter(&kcpc_cpuctx_lock, RW_WRITER);
	if (++kcpc_cpuctx == 1) {
		ASSERT(kcpc_cpumap == NULL);

		/*
		 * Bail out if DTrace is already using the counters.
		 */
		if (dtrace_cpc_in_use) {
			kcpc_cpuctx--;
			rw_exit(&kcpc_cpuctx_lock);
			return (EAGAIN);
		}
		kcpc_cpumap = kmem_zalloc(BT_SIZEOFMAP(max_cpuid + 1),
		    KM_SLEEP);
		/*
		 * When this device is open for processor-based contexts,
		 * no further lwp-based contexts can be created.
		 *
		 * Since this is the first open, ensure that all existing
		 * contexts are invalidated.
		 */
		kcpc_invalidate_all();
	} else if (BT_TEST(kcpc_cpumap, cpuid)) {
		kcpc_cpuctx--;
		rw_exit(&kcpc_cpuctx_lock);
		return (EAGAIN);
	} else if (kcpc_hw_cpu_hook(cpuid, kcpc_cpumap) != 0) {
		kcpc_cpuctx--;
		rw_exit(&kcpc_cpuctx_lock);
		return (EACCES);
	}
	BT_SET(kcpc_cpumap, cpuid);
	rw_exit(&kcpc_cpuctx_lock);

	*dev = makedevice(getmajor(*dev), (minor_t)cpuid);

	return (0);
}
Esempio n. 6
0
static int
kaif_wapt_reserve(kmdb_wapt_t *wp)
{
	int id;

	for (id = 0; id <= KDI_MAXWPIDX; id++) {
		if (!BT_TEST(&kaif_waptmap, id)) {
			/* found one */
			BT_SET(&kaif_waptmap, id);
			wp->wp_priv = (void *)(uintptr_t)id;
			return (0);
		}
	}

	return (set_errno(EMDB_WPTOOMANY));
}
Esempio n. 7
0
/*
 * Rewrite the xlate/xlarg instruction at dtdo_buf[i] so that the instruction's
 * xltab index reflects the offset 'xi' of the assigned dtdo_xlmtab[] location.
 * We track the cumulative references to translators and members in the pcb's
 * pcb_asxrefs[] array, a two-dimensional array of bitmaps indexed by the
 * global translator id and then by the corresponding translator member id.
 */
static void
dt_as_xlate(dt_pcb_t *pcb, dtrace_difo_t *dp,
    uint_t i, uint_t xi, dt_node_t *dnp)
{
	dtrace_hdl_t *dtp = pcb->pcb_hdl;
	dt_xlator_t *dxp = dnp->dn_membexpr->dn_xlator;

	assert(i < dp->dtdo_len);
	assert(xi < dp->dtdo_xlmlen);

	assert(dnp->dn_kind == DT_NODE_MEMBER);
	assert(dnp->dn_membexpr->dn_kind == DT_NODE_XLATOR);

	assert(dxp->dx_id < dtp->dt_xlatorid);
	assert(dnp->dn_membid < dxp->dx_nmembers);

	if (pcb->pcb_asxrefs == NULL) {
		pcb->pcb_asxreflen = dtp->dt_xlatorid;
		pcb->pcb_asxrefs =
		    dt_zalloc(dtp, sizeof (ulong_t *) * pcb->pcb_asxreflen);
		if (pcb->pcb_asxrefs == NULL)
			longjmp(pcb->pcb_jmpbuf, EDT_NOMEM);
	}

	if (pcb->pcb_asxrefs[dxp->dx_id] == NULL) {
		pcb->pcb_asxrefs[dxp->dx_id] =
		    dt_zalloc(dtp, BT_SIZEOFMAP(dxp->dx_nmembers));
		if (pcb->pcb_asxrefs[dxp->dx_id] == NULL)
			longjmp(pcb->pcb_jmpbuf, EDT_NOMEM);
	}

	dp->dtdo_buf[i] = DIF_INSTR_XLATE(
	    DIF_INSTR_OP(dp->dtdo_buf[i]), xi, DIF_INSTR_RD(dp->dtdo_buf[i]));

	BT_SET(pcb->pcb_asxrefs[dxp->dx_id], dnp->dn_membid);
	dp->dtdo_xlmtab[xi] = dnp;
}
Esempio n. 8
0
int
dt_provider_xref(dtrace_hdl_t *dtp, dt_provider_t *pvp, id_t id)
{
	size_t oldsize = BT_SIZEOFMAP(pvp->pv_xrmax);
	size_t newsize = BT_SIZEOFMAP(dtp->dt_xlatorid);

	assert(id >= 0 && id < dtp->dt_xlatorid);

	if (newsize > oldsize) {
		ulong_t *xrefs = dt_zalloc(dtp, newsize);

		if (xrefs == NULL)
			return (-1);

		bcopy(pvp->pv_xrefs, xrefs, oldsize);
		dt_free(dtp, pvp->pv_xrefs);

		pvp->pv_xrefs = xrefs;
		pvp->pv_xrmax = dtp->dt_xlatorid;
	}

	BT_SET(pvp->pv_xrefs, id);
	return (0);
}
Esempio n. 9
0
/*
 * Accumulate media entries from recycler's dat file.
 */
int
DatAccumulate(
    CsdEntry_t *csd)
{
    size_t size;
    MediaTable_t *dat_table;
    MediaEntry_t *datfile_cache;
    MediaEntry_t *vsn;
    MediaEntry_t *datfile;	/* vsn entry from dat file */
    MediaEntry_t *dat;	/* vsn entry in samfs dump's dat table */
    size_t ngot;
    int i;
    int idx;
    int num_inodes;
    DatTable_t table;
    int rval;
    off_t pos;
    char *path;
    int fd;

    path = csd->ce_datPath;		/* path to dat file */
    fd = readOpen(path);		/* open file descriptor for dat file */
    if (fd < 0) {
        Trace(TR_MISC, "%s '%s', dat file open failed, errno= %d",
              errmsg1, path, errno);
        return (-1);
    }
    dat_table = csd->ce_table;	/* media table generated for dat file */

    /*
     * Need to search from the beginning.  Rewind dat file and
     * the read header again.
     */
    rval = readHeader(fd, path);
    if (rval != 0) {
        Trace(TR_MISC, "%s '%s', dat table header read failed",
              errmsg1, path);
        DatClose(fd);
        return (-1);
    }

    num_inodes = 0;

    /*
     * Read datfile table entries until we find the entry we are
     * currently processing.
     */
    while (InfiniteLoop) {
        datfile_cache = NULL;

        ngot = read(fd, &table, sizeof (DatTable_t));
        if (ngot != sizeof (DatTable_t)) {
            Trace(TR_MISC, "%s '%s', dat table read failed",
                  errmsg1, path);
            DatClose(fd);
            return (-1);
        }

        if (table.dt_mapmin != dat_table->mt_mapmin) {
            continue;
        }
        if (table.dt_mapchunk != dat_table->mt_mapchunk) {
            Trace(TR_MISC, "%s '%s', map chunk does not match",
                  errmsg1, path);
            DatClose(fd);
            return (-1);
        }

        pos = lseek(fd, 0, SEEK_CUR);

        Trace(TR_MISC, "[%s] Read dat entries 0x%lx "
              "count: %d seqnum candidates: %lld-%lld",
              dat_table->mt_name, pos, table.dt_count, table.dt_mapmin,
              table.dt_mapmin + table.dt_mapchunk - 1);

        size = table.dt_count * sizeof (MediaEntry_t);
        SamMalloc(datfile_cache, size);
        (void) memset(datfile_cache, 0, size);

        for (i = 0; i < table.dt_count; i++) {
            datfile = &datfile_cache[i];

            ngot = read(fd, datfile, sizeof (MediaEntry_t));
            if (ngot != sizeof (MediaEntry_t)) {
                Trace(TR_MISC, "%s '%s', header read error "
                      "read: %d expected: %d, errno= %d",
                      errmsg1, path,
                      ngot, sizeof (MediaEntry_t), errno);
                num_inodes = -1;
                goto out;
            }

            if ((datfile->me_type == DT_DISK) &&
                    (datfile->me_mapsize != 0)) {
                SamMalloc(datfile->me_bitmap,
                          datfile->me_mapsize);
                (void) memset(datfile->me_bitmap, 0,
                              datfile->me_mapsize);

                ngot = read(fd, datfile->me_bitmap,
                            datfile->me_mapsize);
                if (ngot != datfile->me_mapsize) {
                    Trace(TR_MISC,
                          "%s '%s', bitmap read error "
                          "read: %d expected: %d, errno= %d",
                          errmsg1, path, ngot,
                          datfile->me_mapsize, errno);
                    num_inodes = -1;
                    goto out;
                }
            }

            vsn = MediaFind(&ArchMedia,
                            datfile->me_type, datfile->me_name);
            if (vsn == NULL) {
                Trace(TR_MISC,
                      "%s '%s', failed to find vsn %s.%s",
                      errmsg1, path,
                      sam_mediatoa(datfile->me_type),
                      datfile->me_name);
                num_inodes = -1;
                goto out;
            }

            /*
             * For completeness, update in-memory's dat_table for
             * samfs dump file.  This is not really necessary but
             * might be useful for debugging purposes.
             */
            dat = NULL;
            if (dat_table != NULL) {
                dat = MediaFind(dat_table, datfile->me_type,
                                datfile->me_name);
                if (dat == NULL) {
                    Trace(TR_MISC,
                          "Error failed to find vsn %s.%s",
                          sam_mediatoa(datfile->me_type),
                          datfile->me_name);
                    num_inodes = -1;
                    goto out;
                }
            }

            Trace(TR_SAMDEV,
                  "[%s.%s] Accumulate dat active files: %d",
                  sam_mediatoa(vsn->me_type), vsn->me_name,
                  datfile->me_files);

            PthreadMutexLock(&vsn->me_mutex);
            vsn->me_files += datfile->me_files;
            PthreadMutexUnlock(&vsn->me_mutex);

            if (dat != NULL) {
                PthreadMutexLock(&dat->me_mutex);
                dat->me_files += datfile->me_files;
                PthreadMutexUnlock(&dat->me_mutex);
            }

            num_inodes += datfile->me_files;

            if ((datfile->me_type == DT_DISK) &&
                    (datfile->me_mapsize != 0)) {

                for (idx = 0;
                        idx <= dat_table->mt_mapchunk; idx++) {

                    /* FIXME */
                    if (BT_TEST(datfile->me_bitmap, idx) == 1) {
                        PthreadMutexLock(&vsn->me_mutex);
                        BT_SET(vsn->me_bitmap, idx);
                        PthreadMutexUnlock(&vsn->me_mutex);

                        if (dat != NULL) {
                            PthreadMutexLock(&dat->me_mutex);
                            BT_SET(dat->me_bitmap, idx);
                            PthreadMutexUnlock(&dat->me_mutex);
                        }
                    }
                }
            }

            /*
             * Free memory allocated for bitmap read from disk.
             */
            if (datfile->me_bitmap != NULL) {
                SamFree(datfile->me_bitmap);
                datfile->me_bitmap = NULL;
            }
        }
        break;
    }

out:
    if (datfile_cache != NULL) {
        SamFree(datfile_cache);
        for (i = 0; i < table.dt_count; i++) {
            datfile = &datfile_cache[i];
            if (datfile->me_bitmap != NULL) {
                SamFree(datfile->me_bitmap);
                datfile->me_bitmap = NULL;
            }
        }
    }
    DatClose(fd);
    return (num_inodes);
}
Esempio n. 10
0
static int
schedctl_shared_alloc(sc_shared_t **kaddrp, uintptr_t *uaddrp)
{
	proc_t		*p = curproc;
	sc_page_ctl_t	*pagep;
	sc_shared_t	*ssp;
	caddr_t		base;
	index_t		index;
	int		error;

	ASSERT(MUTEX_NOT_HELD(&p->p_lock));
	mutex_enter(&p->p_sc_lock);

	/*
	 * Try to find space for the new data in existing pages
	 * within the process's list of shared pages.
	 */
	for (pagep = p->p_pagep; pagep != NULL; pagep = pagep->spc_next)
		if (pagep->spc_space != 0)
			break;

	if (pagep != NULL)
		base = pagep->spc_uaddr;
	else {
		struct anon_map *amp;
		caddr_t kaddr;

		/*
		 * No room, need to allocate a new page.  Also set up
		 * a mapping to the kernel address space for the new
		 * page and lock it in memory.
		 */
		if ((error = schedctl_getpage(&amp, &kaddr)) != 0) {
			mutex_exit(&p->p_sc_lock);
			return (error);
		}
		if ((error = schedctl_map(amp, &base, kaddr)) != 0) {
			schedctl_freepage(amp, kaddr);
			mutex_exit(&p->p_sc_lock);
			return (error);
		}

		/*
		 * Allocate and initialize the page control structure.
		 */
		pagep = kmem_alloc(sizeof (sc_page_ctl_t), KM_SLEEP);
		pagep->spc_amp = amp;
		pagep->spc_base = (sc_shared_t *)kaddr;
		pagep->spc_end = (sc_shared_t *)(kaddr + sc_pagesize);
		pagep->spc_uaddr = base;

		pagep->spc_map = kmem_zalloc(sizeof (ulong_t) * sc_bitmap_words,
		    KM_SLEEP);
		pagep->spc_space = sc_pagesize;

		pagep->spc_next = p->p_pagep;
		p->p_pagep = pagep;
	}

	/*
	 * Got a page, now allocate space for the data.  There should
	 * be space unless something's wrong.
	 */
	ASSERT(pagep != NULL && pagep->spc_space >= sizeof (sc_shared_t));
	index = bt_availbit(pagep->spc_map, sc_bitmap_len);
	ASSERT(index != -1);

	/*
	 * Get location with pointer arithmetic.  spc_base is of type
	 * sc_shared_t *.  Mark as allocated.
	 */
	ssp = pagep->spc_base + index;
	BT_SET(pagep->spc_map, index);
	pagep->spc_space -= sizeof (sc_shared_t);

	mutex_exit(&p->p_sc_lock);

	/*
	 * Return kernel and user addresses.
	 */
	*kaddrp = ssp;
	*uaddrp = (uintptr_t)base + ((uintptr_t)ssp & PAGEOFFSET);
	return (0);
}