Esempio n. 1
0
File: ctatc.c Progetto: merxbj/src
static int ct_map_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
{
	unsigned long flags;
	struct snd_pcm_runtime *runtime;
	struct ct_vm *vm;

	CTDPF("%s is called\n", __func__);
	CTASSERT(NULL != atc);
	CTASSERT(NULL != apcm);
	CTASSERT(NULL != atc->vm);

	if (NULL == apcm->substream) {
		return 0;
	}
	runtime = apcm->substream->runtime;
	vm = atc->vm;

	spin_lock_irqsave(&atc->vm_lock, flags);
	apcm->vm_block = vm->map(vm, runtime->dma_area, runtime->dma_bytes);
	spin_unlock_irqrestore(&atc->vm_lock, flags);

	if (NULL == apcm->vm_block) {
		return -ENOENT;
	}

	return 0;
}
/*
 * If we have as much entropy as is requested, fill the buffer with it
 * and return true.  Otherwise, leave the buffer alone and return
 * false.
 */
static bool
rndpool_maybe_extract(void *buffer, size_t bytes)
{
	bool ok;

	KASSERT(bytes <= RNDSINK_MAX_BYTES);
	CTASSERT(RND_ENTROPY_THRESHOLD <= 0xffffffffUL);
	CTASSERT(RNDSINK_MAX_BYTES <= (0xffffffffUL - RND_ENTROPY_THRESHOLD));
	CTASSERT((RNDSINK_MAX_BYTES + RND_ENTROPY_THRESHOLD) <=
	    (0xffffffffUL / NBBY));

	const uint32_t bits_needed = ((bytes + RND_ENTROPY_THRESHOLD) * NBBY);

	mutex_spin_enter(&rndpool_mtx);
	if (bits_needed <= rndpool_get_entropy_count(&rnd_pool)) {
		const uint32_t extracted __unused =
		    rndpool_extract_data(&rnd_pool, buffer, bytes,
			RND_EXTRACT_GOOD);

		KASSERT(extracted == bytes);

		ok = true;
	} else {
		ok = false;
		rnd_getmore(howmany(bits_needed -
			rndpool_get_entropy_count(&rnd_pool), NBBY));
	}
	mutex_spin_exit(&rndpool_mtx);

	return ok;
}
Esempio n. 3
0
static void
wmi_hp_switch_init(struct wmi_hp_softc *sc)
{
	int i, sensor[3];

	const char desc[][ENVSYS_DESCLEN] = {
		"wireless", "bluetooth", "mobile"
	};

	if (wmi_hp_method_read(sc, WMI_HP_METHOD_CMD_SWITCH) != true)
		return;

	sensor[0] = WMI_HP_SWITCH_WLAN;
	sensor[1] = WMI_HP_SWITCH_BT;
	sensor[2] = WMI_HP_SWITCH_WWAN;

	CTASSERT(WMI_HP_SENSOR_WLAN == 0);
	CTASSERT(WMI_HP_SENSOR_BT   == 1);
	CTASSERT(WMI_HP_SENSOR_WWAN == 2);

	for (i = 0; i < 3; i++) {

		if ((sc->sc_val & sensor[i]) == 0)
			continue;

		(void)strlcpy(sc->sc_sensor[i].desc, desc[i], ENVSYS_DESCLEN);

		sc->sc_sensor[i].state = ENVSYS_SINVALID;
		sc->sc_sensor[i].units = ENVSYS_INDICATOR;

		if (sysmon_envsys_sensor_attach(sc->sc_sme,
			&sc->sc_sensor[i]) != 0)
			break;
	}
}
Esempio n. 4
0
/*
 * A simple relocator for IA32/AMD64 EFI binaries.
 */
EFI_STATUS
_reloc(unsigned long ImageBase, ElfW_Dyn *dynamic, EFI_HANDLE image_handle,
    EFI_SYSTEM_TABLE *system_table)
{
	unsigned long relsz, relent;
	unsigned long *newaddr;
	ElfW_Rel *rel;
	ElfW_Dyn *dynp;

	/*
	 * Find the relocation address, its size and the relocation entry.
	 */
	relsz = 0;
	relent = 0;
	for (dynp = dynamic; dynp->d_tag != DT_NULL; dynp++) {
		switch (dynp->d_tag) {
		case DT_REL:
		case DT_RELA:
			rel = (ElfW_Rel *) ((unsigned long) dynp->d_un.d_ptr +
			    ImageBase);
			break;
		case DT_RELSZ:
		case DT_RELASZ:
			relsz = dynp->d_un.d_val;
			break;
		case DT_RELENT:
		case DT_RELAENT:
			relent = dynp->d_un.d_val;
			break;
		default:
			break;
		}
	}

	/*
	 * Perform the actual relocation.
	 * XXX: We are reusing code for the amd64 version of this, but
	 * we must make sure the relocation types are the same.
	 */
	CTASSERT(R_386_NONE == R_X86_64_NONE);
	CTASSERT(R_386_RELATIVE == R_X86_64_RELATIVE);
	for (; relsz > 0; relsz -= relent) {
		switch (ELFW_R_TYPE(rel->r_info)) {
		case R_386_NONE:
			/* No relocation needs be performed. */
			break;
		case R_386_RELATIVE:
			/* Address relative to the base address. */
			newaddr = (unsigned long *)(ImageBase + rel->r_offset);
			*newaddr += ImageBase;
			break;
		default:
			/* XXX: do we need other relocations ? */
			break;
		}
		rel = (ElfW_Rel *) ((caddr_t) rel + relent);
	}

	return (EFI_SUCCESS);
}
Esempio n. 5
0
int
workqueue_create(struct workqueue **wqp, const char *name,
    void (*callback_func)(struct work *, void *), void *callback_arg,
    pri_t prio, int ipl, int flags)
{
	struct workqueue *wq;
	struct workqueue_queue *q;
	void *ptr;
	int error = 0;

	CTASSERT(sizeof(work_impl_t) <= sizeof(struct work));

	ptr = kmem_zalloc(workqueue_size(flags), KM_SLEEP);
	wq = (void *)roundup2((uintptr_t)ptr, coherency_unit);
	wq->wq_ptr = ptr;
	wq->wq_flags = flags;

	workqueue_init(wq, name, callback_func, callback_arg, prio, ipl);

	if (flags & WQ_PERCPU) {
		struct cpu_info *ci;
		CPU_INFO_ITERATOR cii;

		/* create the work-queue for each CPU */
		for (CPU_INFO_FOREACH(cii, ci)) {
			q = workqueue_queue_lookup(wq, ci);
			error = workqueue_initqueue(wq, q, ipl, ci);
			if (error) {
				break;
			}
		}
	} else {
Esempio n. 6
0
int
dtsec_rm_pool_rx_init(struct dtsec_softc *sc)
{

	/* FM_PORT_BUFFER_SIZE must be less than PAGE_SIZE */
	CTASSERT(FM_PORT_BUFFER_SIZE < PAGE_SIZE);

	snprintf(sc->sc_rx_zname, sizeof(sc->sc_rx_zname), "%s: RX Buffers",
	    device_get_nameunit(sc->sc_dev));

	sc->sc_rx_zone = uma_zcreate(sc->sc_rx_zname, FM_PORT_BUFFER_SIZE, NULL,
	    NULL, NULL, NULL, FM_PORT_BUFFER_SIZE, 0);
	if (sc->sc_rx_zone == NULL)
		return (EIO);

	sc->sc_rx_pool = bman_pool_create(&sc->sc_rx_bpid, FM_PORT_BUFFER_SIZE,
	    0, 0, DTSEC_RM_POOL_RX_MAX_SIZE, dtsec_rm_pool_rx_get_buffer,
	    dtsec_rm_pool_rx_put_buffer, DTSEC_RM_POOL_RX_LOW_MARK,
	    DTSEC_RM_POOL_RX_HIGH_MARK, 0, 0, dtsec_rm_pool_rx_depleted, sc, NULL,
	    NULL);
	if (sc->sc_rx_pool == NULL) {
		dtsec_rm_pool_rx_free(sc);
		return (EIO);
	}

	return (0);
}
Esempio n. 7
0
static int
icl_pdu_check_header_digest(struct icl_pdu *request, size_t *availablep)
{
	struct mbuf *m;
	uint32_t received_digest, valid_digest;

	if (request->ip_conn->ic_header_crc32c == false)
		return (0);

	m = icl_conn_receive(request->ip_conn, ISCSI_HEADER_DIGEST_SIZE);
	if (m == NULL) {
		ICL_DEBUG("failed to receive header digest");
		return (-1);
	}

	CTASSERT(sizeof(received_digest) == ISCSI_HEADER_DIGEST_SIZE);
	m_copydata(m, 0, ISCSI_HEADER_DIGEST_SIZE, (void *)&received_digest);
	m_freem(m);

	*availablep -= ISCSI_HEADER_DIGEST_SIZE;

	/*
	 * XXX: Handle AHS.
	 */
	valid_digest = icl_mbuf_to_crc32c(request->ip_bhs_mbuf);
	if (received_digest != valid_digest) {
		ICL_WARN("header digest check failed; got 0x%x, "
		    "should be 0x%x", received_digest, valid_digest);
		return (-1);
	}

	return (0);
}
Esempio n. 8
0
/*
 * nfsl_principal_name_get - extracts principal from transport struct.
 * Based on "uts/common/rpc/sec/sec_svc.c" function sec_svc_getcred.
 */
static char *
nfsl_principal_name_get(struct svc_req *req)
{
	char				*principal_name = NULL;
	struct authdes_cred		*adc;
	rpc_gss_rawcred_t		*rcred;
	rpc_gss_ucred_t			*ucred;
	void				*cookie;

	switch (req->rq_cred.oa_flavor) {
	case AUTH_UNIX:
	case AUTH_NONE:
		/* no principal name provided */
		break;

	case AUTH_DES:
		adc = (struct authdes_cred *)req->rq_clntcred;
		CTASSERT(sizeof (struct authdes_cred) <= RQCRED_SIZE);
		principal_name = adc->adc_fullname.name;
		break;

	case RPCSEC_GSS:
		(void) rpc_gss_getcred(req, &rcred, &ucred, &cookie);
		principal_name = (caddr_t)rcred->client_principal;
		break;

	default:
		break;
	}
	return (principal_name);
}
Esempio n. 9
0
void
mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
{
	int ruflags = RUMPUSER_MTX_KMUTEX;
	int isspin;

	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));

	/*
	 * Try to figure out if the caller wanted a spin mutex or
	 * not with this easy set of conditionals.  The difference
	 * between a spin mutex and an adaptive mutex for a rump
	 * kernel is that the hypervisor does not relinquish the
	 * rump kernel CPU context for a spin mutex.  The
	 * hypervisor itself may block even when "spinning".
	 */
	if (type == MUTEX_SPIN) {
		isspin = 1;
	} else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
	    ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
	    ipl == IPL_SOFTSERIAL) {
		isspin = 0;
	} else {
		isspin = 1;
	}

	if (isspin)
		ruflags |= RUMPUSER_MTX_SPIN;
	rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
	ALLOCK(mtx, &mutex_lockops);
}
Esempio n. 10
0
void *
abd_checksum_edonr_tmpl_init(const zio_cksum_salt_t *salt)
{
	EdonRState	*ctx;
	uint8_t		salt_block[EDONR_BLOCK_SIZE];

	/*
	 * Edon-R needs all but the last hash invocation to be on full-size
	 * blocks, but the salt is too small. Rather than simply padding it
	 * with zeros, we expand the salt into a new salt block of proper
	 * size by double-hashing it (the new salt block will be composed of
	 * H(salt) || H(H(salt))).
	 */
	CTASSERT(EDONR_BLOCK_SIZE == 2 * (EDONR_MODE / 8));
	EdonRHash(EDONR_MODE, salt->zcs_bytes, sizeof (salt->zcs_bytes) * 8,
	    salt_block);
	EdonRHash(EDONR_MODE, salt_block, EDONR_MODE, salt_block +
	    EDONR_MODE / 8);

	/*
	 * Feed the new salt block into the hash function - this will serve
	 * as our MAC key.
	 */
	ctx = kmem_zalloc(sizeof (*ctx), KM_SLEEP);
	EdonRInit(ctx, EDONR_MODE);
	EdonRUpdate(ctx, salt_block, sizeof (salt_block) * 8);
	return (ctx);
}
Esempio n. 11
0
static npf_rproc_t *
npf_mk_rproc(prop_array_t rprocs, const char *rpname)
{
	prop_object_iterator_t it;
	prop_dictionary_t rpdict;
	npf_rproc_t *rp;
	uint64_t rpval;

	it = prop_array_iterator(rprocs);
	while ((rpdict = prop_object_iterator_next(it)) != NULL) {
		const char *iname;
		prop_dictionary_get_cstring_nocopy(rpdict, "name", &iname);
		KASSERT(iname != NULL);
		if (strcmp(rpname, iname) == 0)
			break;
	}
	prop_object_iterator_release(it);
	if (rpdict == NULL) {
		return NULL;
	}
	CTASSERT(sizeof(uintptr_t) <= sizeof(uint64_t));
	if (!prop_dictionary_get_uint64(rpdict, "rproc-ptr", &rpval)) {
		rp = npf_rproc_create(rpdict);
		rpval = (uint64_t)(uintptr_t)rp;
		prop_dictionary_set_uint64(rpdict, "rproc-ptr", rpval);
	} else {
		rp = (npf_rproc_t *)(uintptr_t)rpval;
	}
	return rp;
}
Esempio n. 12
0
// CONSTRUCTOR
//------------------------------------------------------------------------------
Report::Report()
	: m_LibraryStats( 512, true )
	, m_NumPieCharts( 0 )
{
	// Compile time check to ensure color vector is in sync
	CTASSERT( sizeof( g_ReportNodeColors ) / sizeof (uint32_t) == Node::NUM_NODE_TYPES );
}
Esempio n. 13
0
static int
icl_pdu_check_header_digest(struct icl_pdu *request, size_t *availablep)
{
	uint32_t received_digest, valid_digest;

	if (request->ip_conn->ic_header_crc32c == false)
		return (0);

	CTASSERT(sizeof(received_digest) == ISCSI_HEADER_DIGEST_SIZE);
	if (icl_conn_receive_buf(request->ip_conn,
	    &received_digest, ISCSI_HEADER_DIGEST_SIZE)) {
		ICL_DEBUG("failed to receive header digest");
		return (-1);
	}
	*availablep -= ISCSI_HEADER_DIGEST_SIZE;

	/* Temporary attach AHS to BHS to calculate header digest. */
	request->ip_bhs_mbuf->m_next = request->ip_ahs_mbuf;
	valid_digest = icl_mbuf_to_crc32c(request->ip_bhs_mbuf);
	request->ip_bhs_mbuf->m_next = NULL;
	if (received_digest != valid_digest) {
		ICL_WARN("header digest check failed; got 0x%x, "
		    "should be 0x%x", received_digest, valid_digest);
		return (-1);
	}

	return (0);
}
Esempio n. 14
0
/*
 * Allocate icl_pdu with empty BHS to fill up by the caller.
 */
struct icl_pdu *
icl_soft_conn_new_pdu(struct icl_conn *ic, int flags)
{
	struct icl_pdu *ip;

#ifdef DIAGNOSTIC
	refcount_acquire(&ic->ic_outstanding_pdus);
#endif
	ip = uma_zalloc(icl_pdu_zone, flags | M_ZERO);
	if (ip == NULL) {
		ICL_WARN("failed to allocate %zd bytes", sizeof(*ip));
#ifdef DIAGNOSTIC
		refcount_release(&ic->ic_outstanding_pdus);
#endif
		return (NULL);
	}
	ip->ip_conn = ic;

	CTASSERT(sizeof(struct iscsi_bhs) <= MHLEN);
	ip->ip_bhs_mbuf = m_gethdr(flags, MT_DATA);
	if (ip->ip_bhs_mbuf == NULL) {
		ICL_WARN("failed to allocate BHS mbuf");
		icl_soft_conn_pdu_free(ic, ip);
		return (NULL);
	}
	ip->ip_bhs = mtod(ip->ip_bhs_mbuf, struct iscsi_bhs *);
	memset(ip->ip_bhs, 0, sizeof(struct iscsi_bhs));
	ip->ip_bhs_mbuf->m_len = sizeof(struct iscsi_bhs);

	return (ip);
}
Esempio n. 15
0
	// Init
	//------------------------------------------------------------------------------
	/*static*/ void MemTracker::Init()
	{
        CTASSERT( sizeof( MemTracker::s_Mutex ) == sizeof( Mutex ) );

		ASSERT( g_MemTrackerDisabledOnThisThread );

		// first caller does init
		static uint32_t threadSafeGuard( 0 );
		if ( AtomicIncU32( &threadSafeGuard ) != 1 )
		{
			// subsequent callers wait for init
			while ( !s_Initialized ) {}
			return;
		}

		// construct primary mutex in-place
		INPLACE_NEW ( &GetMutex() ) Mutex;

		// init hash table
		s_AllocationHashTable = new Allocation*[ ALLOCATION_HASH_SIZE ];
		memset( s_AllocationHashTable, 0, ALLOCATION_HASH_SIZE * sizeof( Allocation * ) );

		// init pool for allocation structures
		s_Allocations = new MemPoolBlock( sizeof( Allocation ), __alignof( Allocation ) );

		MemoryBarrier();

		s_Initialized = true;
	}
Esempio n. 16
0
static rimeaddr_t*
asrimeaddr(MAC_ExtAddr_s *addr, rimeaddr_t *rime)
{
  CTASSERT(sizeof(*rime) == sizeof(*addr));
  ((MAC_ExtAddr_s*) rime)->u32H = addr->u32L;
  ((MAC_ExtAddr_s*) rime)->u32L = addr->u32H;
  return rime;
}
Esempio n. 17
0
void
cv_init(kcondvar_t *cv, const char *msg)
{

	CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));

	rumpuser_cv_init((struct rumpuser_cv **)cv);
}
Esempio n. 18
0
/*
 * pmap_tlb_shootdown: invalidate a page on all CPUs using pmap 'pm'.
 */
void
pmap_tlb_shootdown(struct pmap *pm, vaddr_t va, pt_entry_t pte, tlbwhy_t why)
{
	pmap_tlb_packet_t *tp;
	int s;

#ifndef XEN
	KASSERT((pte & PG_G) == 0 || pm == pmap_kernel());
#endif

	/*
	 * If tearing down the pmap, do nothing.  We will flush later
	 * when we are ready to recycle/destroy it.
	 */
	if (__predict_false(curlwp->l_md.md_gc_pmap == pm)) {
		return;
	}

	if ((pte & PG_PS) != 0) {
		va &= PG_LGFRAME;
	}

	/*
	 * Add the shootdown operation to our pending set.
	 */
	s = splvm();
	tp = (pmap_tlb_packet_t *)curcpu()->ci_pmap_data;

	/* Whole address flush will be needed if PG_G is set. */
	CTASSERT(PG_G == (uint16_t)PG_G);
	tp->tp_pte |= (uint16_t)pte;

	if (tp->tp_count == (uint16_t)-1) {
		/*
		 * Already flushing everything.
		 */
	} else if (tp->tp_count < TP_MAXVA && va != (vaddr_t)-1LL) {
		/* Flush a single page. */
		tp->tp_va[tp->tp_count++] = va;
		KASSERT(tp->tp_count > 0);
	} else {
		/* Flush everything. */
		tp->tp_count = (uint16_t)-1;
	}

	if (pm != pmap_kernel()) {
		kcpuset_merge(tp->tp_cpumask, pm->pm_cpus);
		if (va >= VM_MAXUSER_ADDRESS) {
			kcpuset_merge(tp->tp_cpumask, pm->pm_kernel_cpus);
		}
		tp->tp_userpmap = 1;
	} else {
		kcpuset_copy(tp->tp_cpumask, kcpuset_running);
	}
	pmap_tlbstat_count(pm, va, why);
	splx(s);
}
Esempio n. 19
0
void
rw_init(krwlock_t *rw)
{

	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));

	rumpuser_rw_init((struct rumpuser_rw **)rw);
	ALLOCK(rw, &rw_lockops);
}
Esempio n. 20
0
static int radeon_move_blit(struct ttm_buffer_object *bo,
			bool evict, bool no_wait_gpu,
			struct ttm_mem_reg *new_mem,
			struct ttm_mem_reg *old_mem)
{
	struct radeon_device *rdev;
	uint64_t old_start, new_start;
	struct radeon_fence *fence;
	int r, ridx;

	rdev = radeon_get_rdev(bo->bdev);
	ridx = radeon_copy_ring_index(rdev);
	old_start = old_mem->start << PAGE_SHIFT;
	new_start = new_mem->start << PAGE_SHIFT;

	switch (old_mem->mem_type) {
	case TTM_PL_VRAM:
		old_start += rdev->mc.vram_start;
		break;
	case TTM_PL_TT:
		old_start += rdev->mc.gtt_start;
		break;
	default:
		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
		return -EINVAL;
	}
	switch (new_mem->mem_type) {
	case TTM_PL_VRAM:
		new_start += rdev->mc.vram_start;
		break;
	case TTM_PL_TT:
		new_start += rdev->mc.gtt_start;
		break;
	default:
		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
		return -EINVAL;
	}
	if (!rdev->ring[ridx].ready) {
		DRM_ERROR("Trying to move memory with ring turned off.\n");
		return -EINVAL;
	}

	CTASSERT((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) == 0);

	/* sync other rings */
	fence = bo->sync_obj;
	r = radeon_copy(rdev, old_start, new_start,
			new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
			&fence);
	/* FIXME: handle copy error */
	r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
				      evict, no_wait_gpu, new_mem);
	radeon_fence_unref(&fence);
	return r;
}
Esempio n. 21
0
File: ctatc.c Progetto: merxbj/src
static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
{
	unsigned long flags;
	struct ct_vm *vm;

	CTDPF("%s is called\n", __func__);
	CTASSERT(NULL != atc);
	CTASSERT(NULL != apcm);
	CTASSERT(NULL != atc->vm);

	if (NULL == apcm->vm_block) {
		return;
	}
	vm = atc->vm;

	spin_lock_irqsave(&atc->vm_lock, flags);
	vm->unmap(vm, apcm->vm_block);
	spin_unlock_irqrestore(&atc->vm_lock, flags);

	apcm->vm_block = NULL;
}
Esempio n. 22
0
/*
 * Prepare the mps_command for a FW_DOWNLOAD request.
 */
static int
mpi_pre_fw_download(struct mps_command *cm, struct mps_usr_command *cmd)
{
	MPI2_FW_DOWNLOAD_REQUEST *req = (void *)cm->cm_req;
	MPI2_FW_DOWNLOAD_REPLY *rpl;
	MPI2_FW_DOWNLOAD_TCSGE tc;
	int error;

	/*
	 * This code assumes there is room in the request's SGL for
	 * the TransactionContext plus at least a SGL chain element.
	 */
	CTASSERT(sizeof req->SGL >= sizeof tc + MPS_SGC_SIZE);

	if (cmd->req_len != sizeof *req)
		return (EINVAL);
	if (cmd->rpl_len != sizeof *rpl)
		return (EINVAL);

	if (cmd->len == 0)
		return (EINVAL);

	error = copyin(cmd->buf, cm->cm_data, cmd->len);
	if (error != 0)
		return (error);

	mpi_init_sge(cm, req, &req->SGL);
	bzero(&tc, sizeof tc);

	/*
	 * For now, the F/W image must be provided in a single request.
	 */
	if ((req->MsgFlags & MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT) == 0)
		return (EINVAL);
	if (req->TotalImageSize != cmd->len)
		return (EINVAL);

	/*
	 * The value of the first two elements is specified in the
	 * Fusion-MPT Message Passing Interface document.
	 */
	tc.ContextSize = 0;
	tc.DetailsLength = 12;
	tc.ImageOffset = 0;
	tc.ImageSize = cmd->len;

	cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;

	return (mps_push_sge(cm, &tc, sizeof tc, 0));
}
Esempio n. 23
0
void
rw_init(krwlock_t *rw)
{
	struct uprw *uprw;

	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
	checkncpu();

	uprw = rump_hypermalloc(sizeof(*uprw), 0, true, "rwinit");
	memset(uprw, 0, sizeof(*uprw));
	rumpuser_cv_init(&uprw->uprw_rucv_reader);
	rumpuser_cv_init(&uprw->uprw_rucv_writer);
	memcpy(rw, &uprw, sizeof(void *));
}
/*
 * uvm_emap_update: update global emap generation number for current CPU.
 *
 * Function is called by MD code (eg. pmap) to take advantage of TLB flushes
 * initiated for other reasons, that sync the emap as a side effect.  Note
 * update should be performed before the actual TLB flush, to avoid race
 * with newly generated number.
 *
 * => can be called from IPI handler, therefore function must be safe.
 * => should be called _after_ TLB flush.
 * => emap generation number should be taken _before_ TLB flush.
 * => must be called with preemption disabled.
 */
void
uvm_emap_update(u_int gen)
{
	struct uvm_cpu *ucpu;

	/*
	 * See comments in uvm_emap_consume() about memory barriers and
	 * race conditions.  Store is atomic if emap_gen size is word.
	 */
	CTASSERT(sizeof(ucpu->emap_gen) == sizeof(int));
	/* XXX: KASSERT(kpreempt_disabled()); */

	ucpu = curcpu()->ci_data.cpu_uvm;
	ucpu->emap_gen = gen;
}
Esempio n. 25
0
static void
apple_smc_fan_refresh(struct sysmon_envsys *sme, struct envsys_data *edata)
{
	struct apple_smc_fan_softc *sc = sme->sme_cookie;
	uint8_t fan, sensor;
	struct apple_smc_key *key;
	uint16_t rpm;
	int error;

	/* Sanity-check the sensor number out of paranoia.  */
	CTASSERT(10 <= (SIZE_MAX / __arraycount(fan_sensors)));
	KASSERT(sc->sc_nfans < 10);
	if (edata->sensor >= (sc->sc_nfans * __arraycount(fan_sensors))) {
		aprint_error_dev(sc->sc_dev, "unknown sensor %"PRIu32"\n",
		    edata->sensor);
		return;
	}

	/* Pick apart the fan number and its sensor number.  */
	fan = (edata->sensor / __arraycount(fan_sensors));
	sensor = (edata->sensor % __arraycount(fan_sensors));

	KASSERT(fan < sc->sc_nfans);
	KASSERT(sensor < __arraycount(fan_sensors));
	KASSERT(edata == &sc->sc_fans[fan].sensors[sensor].sensor_data);

	/*
	 * If we're refreshing, this sensor got attached, so we ought
	 * to have a sensor key.  Grab it.
	 */
	key = sc->sc_fans[fan].sensors[sensor].sensor_key;
	KASSERT(key != NULL);

	/* Read the fan sensor value, in rpm.  */
	error = apple_smc_read_key_2(sc->sc_smc, key, &rpm);
	if (error) {
		aprint_error_dev(sc->sc_dev,
		    "failed to read fan %d %s speed: %d\n",
		    fan, fan_sensors[sensor].fs_name, error);
		edata->state = ENVSYS_SINVALID;
		return;
	}

	/* Success!  */
	edata->value_cur = rpm;
	edata->state = ENVSYS_SVALID;
}
Esempio n. 26
0
// CONSTRUCTOR
//------------------------------------------------------------------------------
Node::Node( const AString & name, Type type, uint32_t controlFlags )
	: m_State( NOT_PROCESSED )
	, m_BuildPassTag( 0 )
	, m_ControlFlags( controlFlags )
	, m_StatsFlags( 0 )
	, m_Stamp( 0 )
	, m_RecursiveCost( 0 )
	, m_Type( type )
	, m_Next( nullptr )
	, m_LastBuildTimeMs( 0 )
	, m_ProcessingTime( 0 )
	, m_ProgressAccumulator( 0 )
	, m_Index( INVALID_NODE_INDEX )
{
	SetName( name );

	// Compile time check to ensure name vector is in sync
	CTASSERT( sizeof( s_NodeTypeNames ) / sizeof(const char *) == NUM_NODE_TYPES );
}
Esempio n. 27
0
File: ctatc.c Progetto: merxbj/src
static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index)
{
	struct ct_vm *vm;
	void *kvirt_addr;
	unsigned long phys_addr;
	unsigned long flags;

	CTASSERT(NULL != atc);
	spin_lock_irqsave(&atc->vm_lock, flags);
	vm = atc->vm;
	if ((kvirt_addr = vm->get_ptp_virt(vm, index)) == NULL) {
		phys_addr = (~0UL);
	} else {
		phys_addr = virt_to_phys(kvirt_addr);
	}
	spin_unlock_irqrestore(&atc->vm_lock, flags);

	return phys_addr;
}
Esempio n. 28
0
// CONSTRUCTOR
//------------------------------------------------------------------------------
Process::Process()
: m_Started( false )
#if defined( __WINDOWS__ )
	, m_SharingHandles( false )
	, m_RedirectHandles( true )
    , m_StdOutRead( nullptr )
    , m_StdOutWrite( nullptr )
    , m_StdErrRead( nullptr )
    , m_StdErrWrite( nullptr )
#endif
#if defined( __LINUX__ ) || defined( __APPLE__ )
    , m_ChildPID( -1 )
    , m_HasAlreadyWaitTerminated( false )
#endif
{
    #if defined( __WINDOWS__ )
        CTASSERT( sizeof( m_ProcessInfo ) == sizeof( PROCESS_INFORMATION ) );
    #endif
}
Esempio n. 29
0
/*
 * Prepare the mps_command for a FW_UPLOAD request.
 */
static int
mpi_pre_fw_upload(struct mps_command *cm, struct mps_usr_command *cmd)
{
	MPI2_FW_UPLOAD_REQUEST *req = (void *)cm->cm_req;
	MPI2_FW_UPLOAD_REPLY *rpl;
	MPI2_FW_UPLOAD_TCSGE tc;

	/*
	 * This code assumes there is room in the request's SGL for
	 * the TransactionContext plus at least a SGL chain element.
	 */
	CTASSERT(sizeof req->SGL >= sizeof tc + MPS_SGC_SIZE);

	if (cmd->req_len != sizeof *req)
		return (EINVAL);
	if (cmd->rpl_len != sizeof *rpl)
		return (EINVAL);

	mpi_init_sge(cm, req, &req->SGL);
	if (cmd->len == 0) {
		/* Perhaps just asking what the size of the fw is? */
		return (0);
	}

	bzero(&tc, sizeof tc);

	/*
	 * The value of the first two elements is specified in the
	 * Fusion-MPT Message Passing Interface document.
	 */
	tc.ContextSize = 0;
	tc.DetailsLength = 12;
	/*
	 * XXX Is there any reason to fetch a partial image?  I.e. to
	 * set ImageOffset to something other than 0?
	 */
	tc.ImageOffset = 0;
	tc.ImageSize = cmd->len;

	return (mps_push_sge(cm, &tc, sizeof tc, 0));
}
Esempio n. 30
0
	const char * GetProtocolMessageDebugName( Protocol::MessageType msgType )
	{
		const char * const msgNames[] =
		{
			"",
			"Connection",
			"Status",
			"RequestJob",
			"NoJobAvailable",
			"Job",
			"JobResult",
			"RequestManifest",
			"Manifest",
			"RequestFile",
			"File",
			"ServerStatus"
		};
		CTASSERT( ( sizeof( msgNames ) / sizeof(const char *) ) == Protocol::NUM_MESSAGES );

		return msgNames[ msgType ];
	}