Exemple #1
0
boolean_t
cbc_encrypt(cbc_handle_t *ch, uint8_t *data, size_t datalen,
	uint8_t *IV)
{
	uint8_t *lastp;
	uint8_t *thisp;
	size_t i;

	if (!IS_P2ALIGNED(datalen, ch->blocklen)) {
		return (B_FALSE);
	}

	thisp = data;
	lastp = IV;

	for (i = 0; i < datalen; i += ch->blocklen) {
		cbc_xorblock(lastp, thisp, ch->blocklen);
		/* Encrypt the current block. */
		ch->encrypt(ch->ks, thisp);
		lastp = thisp;
		thisp += ch->blocklen;
	}

	bcopy(lastp, IV, ch->blocklen);
	return (B_TRUE);
}
Exemple #2
0
boolean_t
cbc_decrypt(cbc_handle_t *ch, uint8_t *data, size_t datalen,
	uint8_t *IV)
{
	uint8_t cbcblock[CBC_MAX_BLOCK_SIZE];
	uint8_t *lastp;
	uint8_t *thisp;
	size_t i;

	if (!IS_P2ALIGNED(datalen, ch->blocklen)) {
		return (B_FALSE);
	}

	thisp = data;
	lastp = IV;

	for (i = 0; i < datalen; i += ch->blocklen) {

		/* Copy the current ciphertext block. */
		bcopy(thisp, cbcblock, ch->blocklen);

		/* Decrypt the current block. */
		ch->decrypt(ch->ks, thisp);

		cbc_xorblock(lastp, thisp, ch->blocklen);

		/* Save the last ciphertext block. */
		bcopy(cbcblock, lastp, ch->blocklen);
		thisp += ch->blocklen;
	}

	return (B_TRUE);
}
Exemple #3
0
/* ARGSUSED */
static rpc_inline_t *
xdrrdma_inline(XDR *xdrs, int len)
{
	rpc_inline_t	*buf = NULL;
	xrdma_private_t	*xdrp = (xrdma_private_t *)(xdrs->x_private);
	struct clist	*cle = *(xdrp->xp_rcl_next);

	if (xdrs->x_op == XDR_DECODE) {
		/*
		 * Since chunks aren't in-line, check to see whether there is
		 * a chunk in the inline range.
		 */
		if (cle != NULL &&
		    cle->c_xdroff <= (xdrp->xp_offp - xdrs->x_base + len))
			return (NULL);
	}

	/* LINTED pointer alignment */
	buf = (rpc_inline_t *)xdrp->xp_offp;
	if (!IS_P2ALIGNED(buf, sizeof (int32_t)))
		return (NULL);

	if ((xdrs->x_handy < len) || (xdrp->xp_min_chunk != 0 &&
	    len >= xdrp->xp_min_chunk)) {
		return (NULL);
	} else {
		xdrs->x_handy -= len;
		xdrp->xp_offp += len;
		return (buf);
	}
}
static bool_t
xdrmblk_control(XDR *xdrs, int request, void *info)
{
	mblk_t *m;
	int32_t *int32p;
	int len;

	switch (request) {
	case XDR_PEEK:
		/*
		 * Return the next 4 byte unit in the XDR stream.
		 */
		if (xdrs->x_handy < sizeof (int32_t))
			return (FALSE);

		/* LINTED pointer alignment */
		m = (mblk_t *)xdrs->x_base;
		if (m == NULL)
			return (FALSE);

		/*
		 * If the pointer is not aligned, fail the peek
		 */
		if (!IS_P2ALIGNED(m->b_rptr, sizeof (int32_t)))
			return (FALSE);

		int32p = (int32_t *)info;
		/* LINTED pointer alignment */
		*int32p = ntohl(*((int32_t *)(m->b_rptr)));
		return (TRUE);

	case XDR_SKIPBYTES:
		/* LINTED pointer alignment */
		m = (mblk_t *)xdrs->x_base;
		if (m == NULL)
			return (FALSE);
		int32p = (int32_t *)info;
		len = RNDUP((int)(*int32p));
		if (len < 0)
			return (FALSE);
		while ((xdrs->x_handy -= len) < 0) {
			if ((xdrs->x_handy += len) > 0) {
				m->b_rptr += xdrs->x_handy;
				len -= xdrs->x_handy;
			}
			m = m->b_cont;
			xdrs->x_base = (caddr_t)m;
			if (m == NULL) {
				xdrs->x_handy = 0;
				return (FALSE);
			}
			xdrs->x_handy = (int)(m->b_wptr - m->b_rptr);
		}
		m->b_rptr += len;
		return (TRUE);

	default:
		return (FALSE);
	}
}
Exemple #5
0
void
fletcher_4_incremental_byteswap(const void *buf, uint64_t size,
    zio_cksum_t *zcp)
{
	ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));

	fletcher_4_scalar_byteswap(buf, size, zcp);
}
static rpc_inline_t *
xdrmblk_inline(XDR *xdrs, int len)
{
	rpc_inline_t *buf;
	mblk_t *m;

	/*
	 * Can't inline XDR_FREE calls, doesn't make sense.
	 */
	if (xdrs->x_op == XDR_FREE)
		return (NULL);

	/*
	 * Can't inline if there isn't enough room, don't have an
	 * mblk pointer, its not 4 byte aligned, or if there is more than
	 * one reference to the data block associated with this mblk.  This last
	 * check is used because the caller may want to modified
	 * the data in the inlined portion and someone else is
	 * holding a reference to the data who may not want it
	 * to be modified.
	 */
	if (xdrs->x_handy < len ||
	    /* LINTED pointer alignment */
	    (m = (mblk_t *)xdrs->x_base) == NULL ||
	    !IS_P2ALIGNED(m->b_rptr, sizeof (int32_t)) ||
	    m->b_datap->db_ref != 1) {
#ifdef DEBUG
		xdrmblk_inline_misses++;
#endif
		return (NULL);
	}

#ifdef DEBUG
	if (!do_xdrmblk_inline) {
		xdrmblk_inline_misses++;
		return (NULL);
	}
#endif

	xdrs->x_handy -= len;
	if (xdrs->x_op == XDR_DECODE) {
		/* LINTED pointer alignment */
		buf = (rpc_inline_t *)m->b_rptr;
		m->b_rptr += len;
	} else {
		/* LINTED pointer alignment */
		buf = (rpc_inline_t *)m->b_wptr;
		m->b_wptr += len;
	}
#ifdef DEBUG
	xdrmblk_inline_hits++;
#endif
	return (buf);
}
Exemple #7
0
/*
 * This routine is used to print a hexascii version of a key.
 * The hexascii version of the key will be twice the length
 * of 'datalen'.
 */
static void
keydump(const char *key, int keylen)
{
	uint16_t *p16;

	assert(IS_P2ALIGNED(key, sizeof (uint16_t)));
/*LINTED aligned*/
	for (p16 = (uint16_t *)key; keylen > 0; keylen -= 2) {
		(void) printf("%04x", htons(*p16++));
	}
	(void) printf("\n");
}
Exemple #8
0
/*ARGSUSED*/
static int
pxtool_access(px_t *px_p, pcitool_reg_t *prg_p, uint64_t *data_p,
    boolean_t is_write)
{
	dev_info_t *dip = px_p->px_dip;
	uint64_t phys_addr = prg_p->phys_addr;
	boolean_t endian = PCITOOL_ACC_IS_BIG_ENDIAN(prg_p->acc_attr);
	size_t size = PCITOOL_ACC_ATTR_SIZE(prg_p->acc_attr);
	int rval = SUCCESS;

	/* Alignment checking.  Assumes base address is 8-byte aligned. */
	if (!IS_P2ALIGNED(phys_addr, size)) {
		DBG(DBG_TOOLS, dip, "not aligned.\n");
		prg_p->status = PCITOOL_NOT_ALIGNED;

		rval = EINVAL;

	} else if (is_write) {	/* Made it through checks.  Do the access. */

		DBG(DBG_PHYS_ACC, dip,
		    "%d byte %s pxtool_safe_phys_poke at addr 0x%" PRIx64 "\n",
		    size, (endian ? "BE" : "LE"), phys_addr);

		if (pxtool_safe_phys_poke(px_p, endian, size, phys_addr,
		    *data_p) != DDI_SUCCESS) {
			DBG(DBG_PHYS_ACC, dip,
			    "%d byte %s pxtool_safe_phys_poke at addr "
			    "0x%" PRIx64 " failed\n",
			    size, (endian ? "BE" : "LE"), phys_addr);
			prg_p->status = PCITOOL_INVALID_ADDRESS;

			rval = EFAULT;
		}

	} else {	/* Read */

		DBG(DBG_PHYS_ACC, dip,
		    "%d byte %s pxtool_safe_phys_peek at addr 0x%" PRIx64 "\n",
		    size, (endian ? "BE" : "LE"), phys_addr);

		if (pxtool_safe_phys_peek(px_p, endian, size, phys_addr,
		    data_p) != DDI_SUCCESS) {
			DBG(DBG_PHYS_ACC, dip,
			    "%d byte %s pxtool_safe_phys_peek at addr "
			    "0x%" PRIx64 " failed\n",
			    size, (endian ? "BE" : "LE"), phys_addr);
			prg_p->status = PCITOOL_INVALID_ADDRESS;

			rval = EFAULT;
		}
	}
	return (rval);
}
Exemple #9
0
static boolean_t
pci_cfgacc_valid(pci_cfgacc_req_t *req)
{
	int sz = req->size;

	if (IS_P2ALIGNED(req->offset, sz)		&&
	    (req->offset + sz - 1 < PCIE_CFG_SPACE_SIZE)	&&
	    ((sz & 0xf) && ISP2(sz)))
		return (B_TRUE);

	cmn_err(CE_WARN, "illegal PCI request: offset = %x, size = %d",
	    req->offset, sz);
	return (B_FALSE);
}
Exemple #10
0
/* ARGSUSED */
int
interpret_esp(int flags, uint8_t *hdr, int iplen, int fraglen)
{
	/* LINTED: alignment */
	esph_t *esph = (esph_t *)hdr;
	esph_t *aligned_esph;
	esph_t storage;	/* In case hdr isn't aligned. */
	char *line;

	if (fraglen < sizeof (esph_t))
		return (fraglen);	/* incomplete header */

	if (!IS_P2ALIGNED(hdr, 4)) {
		aligned_esph = &storage;
		bcopy(hdr, aligned_esph, sizeof (esph_t));
	} else {
		aligned_esph = esph;
	}

	if (flags & F_SUM) {
		line = (char *)get_sum_line();
		/*
		 * sprintf() is safe because line guarantees us 80 columns,
		 * and SPI and replay certainly won't exceed that.
		 */
		(void) sprintf(line, "ESP SPI=0x%x Replay=%u",
		    ntohl(aligned_esph->esph_spi),
		    ntohl(aligned_esph->esph_replay));
		line += strlen(line);
	}

	if (flags & F_DTAIL) {
		show_header("ESP:  ", "Encapsulating Security Payload",
		    sizeof (esph_t));
		show_space();
		/*
		 * sprintf() is safe because get_line guarantees us 80 columns,
		 * and SPI and replay certainly won't exceed that.
		 */
		(void) sprintf(get_line((char *)&esph->esph_spi - dlc_header,
		    4), "SPI = 0x%x", ntohl(aligned_esph->esph_spi));
		(void) sprintf(get_line((char *)&esph->esph_replay -
		    dlc_header, 4), "Replay = %u",
		    ntohl(aligned_esph->esph_replay));
		(void) sprintf(get_line((char *)(esph + 1) - dlc_header,
		    4), "   ....ENCRYPTED DATA....");
	}

	return (sizeof (esph_t));
}
static bool_t
xdrmblk_getint32(XDR *xdrs, int32_t *int32p)
{
	mblk_t *m;

	/* LINTED pointer alignment */
	m = (mblk_t *)xdrs->x_base;
	if (m == NULL)
		return (FALSE);
	/*
	 * If the pointer is not aligned or there is not
	 * enough bytes, pullupmsg to get enough bytes and
	 * align the mblk.
	 */
	if (!IS_P2ALIGNED(m->b_rptr, sizeof (int32_t)) ||
	    xdrs->x_handy < sizeof (int32_t)) {
		while (!pullupmsg(m, sizeof (int32_t))) {
			/*
			 * Could have failed due to not
			 * enough data or an allocb failure.
			 */
			if (xmsgsize(m) < sizeof (int32_t))
				return (FALSE);
			delay(hz);
		}
		xdrs->x_handy = (int)(m->b_wptr - m->b_rptr);
	}

	/* LINTED pointer alignment */
	*int32p = ntohl(*((int32_t *)(m->b_rptr)));
	m->b_rptr += sizeof (int32_t);

	/*
	 * Instead of leaving handy as 0 causing more pullupmsg's
	 * simply move to the next mblk.
	 */
	if ((xdrs->x_handy -= sizeof (int32_t)) == 0) {
		m = m->b_cont;
		xdrs->x_base = (caddr_t)m;
		if (m != NULL)
			xdrs->x_handy = (int)(m->b_wptr - m->b_rptr);
	}
	return (TRUE);
}
Exemple #12
0
static bool_t
xdrmblk_getint32(XDR *xdrs, int32_t *int32p)
{
	mblk_t *m;
	struct xdrmblk_params *p;

	xdrmblk_skip_fully_read_mblks(xdrs);

	/* LINTED pointer alignment */
	m = (mblk_t *)xdrs->x_base;
	if (m == NULL)
		return (FALSE);

	p = (struct xdrmblk_params *)xdrs->x_private;

	/*
	 * If the pointer is not aligned or there is not
	 * enough bytes, pullupmsg to get enough bytes and
	 * align the mblk.
	 */
	if (!IS_P2ALIGNED(m->b_rptr, sizeof (int32_t)) ||
	    xdrs->x_handy < sizeof (int32_t)) {
		while (!pullupmsg(m, sizeof (int32_t))) {
			/*
			 * Could have failed due to not
			 * enough data or an allocb failure.
			 */
			if (xmsgsize(m) < sizeof (int32_t))
				return (FALSE);
			delay(hz);
		}
		p->apos += p->rpos;
		p->rpos = 0;
		xdrs->x_handy = (int)MBLKL(m);
	}

	/* LINTED pointer alignment */
	*int32p = ntohl(*((int32_t *)(m->b_rptr)));
	m->b_rptr += sizeof (int32_t);
	xdrs->x_handy -= sizeof (int32_t);
	p->rpos += sizeof (int32_t);

	return (TRUE);
}
Exemple #13
0
static bool_t
xdrmblk_putint32(XDR *xdrs, int32_t *int32p)
{
	mblk_t *m;
	struct xdrmblk_params *p;

	/* LINTED pointer alignment */
	m = (mblk_t *)xdrs->x_base;
	if (m == NULL)
		return (FALSE);

	p = (struct xdrmblk_params *)xdrs->x_private;

	while (!IS_P2ALIGNED(m->b_wptr, sizeof (int32_t)) ||
	    xdrs->x_handy < sizeof (int32_t)) {
		if (m->b_cont == NULL) {
			ASSERT(p->sz >= sizeof (int32_t));
			m->b_cont = xdrmblk_alloc(p->sz);
		}
		m = m->b_cont;
		xdrs->x_base = (caddr_t)m;
		p->apos += p->rpos;
		p->rpos = 0;
		if (m == NULL) {
			xdrs->x_handy = 0;
			return (FALSE);
		}
		xdrs->x_handy = (int)MBLKTAIL(m);
		ASSERT(m->b_rptr == m->b_wptr);
		ASSERT(m->b_rptr >= m->b_datap->db_base);
		ASSERT(m->b_rptr < m->b_datap->db_lim);
	}
	/* LINTED pointer alignment */
	*(int32_t *)m->b_wptr = htonl(*int32p);
	m->b_wptr += sizeof (int32_t);
	xdrs->x_handy -= sizeof (int32_t);
	p->rpos += sizeof (int32_t);
	ASSERT(m->b_wptr <= m->b_datap->db_lim);
	return (TRUE);
}
Exemple #14
0
void
fletcher_4_byteswap(const void *buf, uint64_t size, zio_cksum_t *zcp)
{
	const fletcher_4_ops_t *ops;
	uint64_t p2size = P2ALIGN(size, 64);

	ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));

	if (size == 0) {
		ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
	} else if (p2size == 0) {
		ops = &fletcher_4_scalar_ops;
		fletcher_4_byteswap_impl(ops, buf, size, zcp);
	} else {
		ops = fletcher_4_impl_get();
		fletcher_4_byteswap_impl(ops, buf, p2size, zcp);

		if (p2size < size)
			fletcher_4_incremental_byteswap((char *)buf + p2size,
			    size - p2size, zcp);
	}
}
Exemple #15
0
/*ARGSUSED*/
void
fletcher_4_byteswap(const void *buf, uint64_t size,
    const void *ctx_template, zio_cksum_t *zcp)
{
	const uint64_t p2size = P2ALIGN(size, 64);

	ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));

	if (size == 0 || p2size == 0) {
		ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);

		if (size > 0)
			fletcher_4_scalar_byteswap((fletcher_4_ctx_t *)zcp,
			    buf, size);
	} else {
		fletcher_4_byteswap_impl(buf, p2size, zcp);

		if (p2size < size)
			fletcher_4_scalar_byteswap((fletcher_4_ctx_t *)zcp,
			    (char *)buf + p2size, size - p2size);
	}
}
int
xen_ldt_setprot(user_desc_t *ldt, size_t lsize, uint_t prot)
{
	int err;
	caddr_t	lva = (caddr_t)ldt;
#if defined(__amd64)
	int pt_bits = PT_VALID;
	pgcnt_t npgs;
	if (prot & PROT_WRITE)
		pt_bits |= PT_WRITABLE;
#endif	/* __amd64 */

	if ((err = as_setprot(&kas, (caddr_t)ldt, lsize, prot)) != 0)
		goto done;

#if defined(__amd64)

	ASSERT(IS_P2ALIGNED(lsize, PAGESIZE));
	npgs = mmu_btop(lsize);
	while (npgs--) {
		if ((err = xen_kpm_page(hat_getpfnum(kas.a_hat, lva),
		    pt_bits)) != 0)
			break;
		lva += PAGESIZE;
	}
#endif	/* __amd64 */

done:
	if (err) {
		cmn_err(CE_WARN, "xen_ldt_setprot(%p, %s) failed: error %d",
		    (void *)lva,
		    (prot & PROT_WRITE) ? "writable" : "read-only", err);
	}

	return (err);
}
Exemple #17
0
/*
 * Returns 0 on success.
 */
int
brk_internal(caddr_t nva, uint_t brkszc)
{
	caddr_t ova;			/* current break address */
	size_t size;
	int	error;
	struct proc *p = curproc;
	struct as *as = p->p_as;
	size_t pgsz;
	uint_t szc;
	rctl_qty_t as_rctl;

	/*
	 * extend heap to brkszc alignment but use current p->p_brkpageszc
	 * for the newly created segment. This allows the new extension
	 * segment to be concatenated successfully with the existing brk
	 * segment.
	 */
	if ((szc = brkszc) != 0) {
		pgsz = page_get_pagesize(szc);
		ASSERT(pgsz > PAGESIZE);
	} else {
		pgsz = PAGESIZE;
	}

	mutex_enter(&p->p_lock);
	as_rctl = rctl_enforced_value(rctlproc_legacy[RLIMIT_DATA],
	    p->p_rctls, p);
	mutex_exit(&p->p_lock);

	/*
	 * If p_brkbase has not yet been set, the first call
	 * to brk() will initialize it.
	 */
	if (p->p_brkbase == 0)
		p->p_brkbase = nva;

	/*
	 * Before multiple page size support existed p_brksize was the value
	 * not rounded to the pagesize (i.e. it stored the exact user request
	 * for heap size). If pgsz is greater than PAGESIZE calculate the
	 * heap size as the real new heap size by rounding it up to pgsz.
	 * This is useful since we may want to know where the heap ends
	 * without knowing heap pagesize (e.g. some old code) and also if
	 * heap pagesize changes we can update p_brkpageszc but delay adding
	 * new mapping yet still know from p_brksize where the heap really
	 * ends. The user requested heap end is stored in libc variable.
	 */
	if (pgsz > PAGESIZE) {
		caddr_t tnva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz);
		size = tnva - p->p_brkbase;
		if (tnva < p->p_brkbase || (size > p->p_brksize &&
		    size > (size_t)as_rctl)) {
			szc = 0;
			pgsz = PAGESIZE;
			size = nva - p->p_brkbase;
		}
	} else {
		size = nva - p->p_brkbase;
	}

	/*
	 * use PAGESIZE to roundup ova because we want to know the real value
	 * of the current heap end in case p_brkpageszc changes since the last
	 * p_brksize was computed.
	 */
	nva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz);
	ova = (caddr_t)P2ROUNDUP((uintptr_t)(p->p_brkbase + p->p_brksize),
	    PAGESIZE);

	if ((nva < p->p_brkbase) || (size > p->p_brksize &&
	    size > as_rctl)) {
		mutex_enter(&p->p_lock);
		(void) rctl_action(rctlproc_legacy[RLIMIT_DATA], p->p_rctls, p,
		    RCA_SAFE);
		mutex_exit(&p->p_lock);
		return (ENOMEM);
	}

	if (nva > ova) {
		struct segvn_crargs crargs =
		    SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);

		if (!(p->p_datprot & PROT_EXEC)) {
			crargs.prot &= ~PROT_EXEC;
		}

		/*
		 * Add new zfod mapping to extend UNIX data segment
		 * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies
		 * via map_pgszcvec(). Use AS_MAP_HEAP to get intermediate
		 * page sizes if ova is not aligned to szc's pgsz.
		 */
		if (szc > 0) {
			caddr_t rbss;

			rbss = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase,
			    pgsz);
			if (IS_P2ALIGNED(p->p_bssbase, pgsz) || ova > rbss) {
				crargs.szc = p->p_brkpageszc ? p->p_brkpageszc :
				    AS_MAP_NO_LPOOB;
			} else if (ova == rbss) {
				crargs.szc = szc;
			} else {
				crargs.szc = AS_MAP_HEAP;
			}
		} else {
			crargs.szc = AS_MAP_NO_LPOOB;
		}
		crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_UP;
		error = as_map(as, ova, (size_t)(nva - ova), segvn_create,
		    &crargs);
		if (error) {
			return (error);
		}

	} else if (nva < ova) {
		/*
		 * Release mapping to shrink UNIX data segment.
		 */
		(void) as_unmap(as, nva, (size_t)(ova - nva));
	}
	p->p_brksize = size;
	return (0);
}
Exemple #18
0
/*
 * This function is for PCI IO space and memory space access.
 * It assumes that offset, bdf, acc_attr are current in prg_p.
 * It assumes that prg_p->phys_addr is the final phys addr (including offset).
 * This function modifies prg_p status and data.
 */
int
pxtool_pciiomem_access(px_t *px_p, pcitool_reg_t *prg_p,
    uint64_t *data_p, boolean_t is_write)
{
	on_trap_data_t otd;
	uint32_t io_stat = 0;
	dev_info_t *dip = px_p->px_dip;
	px_pec_t *pec_p = px_p->px_pec_p;
	size_t size = PCITOOL_ACC_ATTR_SIZE(prg_p->acc_attr);
	int rval = 0;

	/* Alignment checking. */
	if (!IS_P2ALIGNED(prg_p->offset, size)) {
		DBG(DBG_TOOLS, dip, "not aligned.\n");
		prg_p->status = PCITOOL_NOT_ALIGNED;
		return (EINVAL);
	}

	mutex_enter(&pec_p->pec_pokefault_mutex);
	pec_p->pec_ontrap_data = &otd;

	if (is_write) {
		pci_device_t bdf = PX_GET_BDF(prg_p);

		if (PCITOOL_ACC_IS_BIG_ENDIAN(prg_p->acc_attr))
			*data_p = pxtool_swap_endian(*data_p, size);

		pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;

		if (!on_trap(&otd, OT_DATA_ACCESS)) {
			otd.ot_trampoline = (uintptr_t)&poke_fault;
			rval = hvio_poke(px_p->px_dev_hdl, prg_p->phys_addr,
			    size, *data_p, bdf, &io_stat);
		} else
			rval = H_EIO;

		if (otd.ot_trap & OT_DATA_ACCESS)
			rval = H_EIO;

		DBG(DBG_TOOLS, dip, "iomem:phys_addr:0x%" PRIx64 ", bdf:0x%x, "
		    "rval:%d, io_stat:%d\n", prg_p->phys_addr, bdf,
		    rval, io_stat);
	} else {

		*data_p = 0;

		pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;

		if (!on_trap(&otd, OT_DATA_ACCESS)) {
			otd.ot_trampoline = (uintptr_t)&peek_fault;
			rval = hvio_peek(px_p->px_dev_hdl, prg_p->phys_addr,
			    size, &io_stat, data_p);
		} else
			rval = H_EIO;

		DBG(DBG_TOOLS, dip, "iomem:phys_addr:0x%" PRIx64 ", "
		    "size:0x%" PRIx64 ", hdl:0x%" PRIx64 ", "
		    "rval:%d, io_stat:%d\n", prg_p->phys_addr,
		    size, px_p->px_dev_hdl, rval, io_stat);
		DBG(DBG_TOOLS, dip, "read data:0x%" PRIx64 "\n", *data_p);

		if (PCITOOL_ACC_IS_BIG_ENDIAN(prg_p->acc_attr))
			*data_p = pxtool_swap_endian(*data_p, size);
	}

	/*
	 * Workaround: delay taking down safe access env.
	 * For more info, see comment where pxtool_iomem_delay_usec is declared.
	 */
	if (pxtool_iomem_delay_usec > 0)
		delay(drv_usectohz(pxtool_iomem_delay_usec));

	no_trap();
	pec_p->pec_ontrap_data = NULL;
	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
	mutex_exit(&pec_p->pec_pokefault_mutex);

	if (rval != SUCCESS) {
		prg_p->status = PCITOOL_INVALID_ADDRESS;
		rval = EINVAL;
	} else if (io_stat != SUCCESS) {
		prg_p->status = PCITOOL_IO_ERROR;
		rval = EIO;
	} else
		prg_p->status = PCITOOL_SUCCESS;

	return (rval);
}
Exemple #19
0
/*
 * Perform register accesses on the nexus device itself.
 */
int
pxtool_bus_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode)
{

	pcitool_reg_t		prg;
	size_t			size;
	px_t			*px_p = DIP_TO_STATE(dip);
	boolean_t		is_write = B_FALSE;
	uint32_t		rval = 0;

	if (cmd == PCITOOL_NEXUS_SET_REG)
		is_write = B_TRUE;

	DBG(DBG_TOOLS, dip, "pxtool_bus_reg_ops set/get reg\n");

	/* Read data from userland. */
	if (ddi_copyin(arg, &prg, sizeof (pcitool_reg_t),
	    mode) != DDI_SUCCESS) {
		DBG(DBG_TOOLS, dip, "Error reading arguments\n");
		return (EFAULT);
	}

	size = PCITOOL_ACC_ATTR_SIZE(prg.acc_attr);

	DBG(DBG_TOOLS, dip, "raw bus:0x%x, dev:0x%x, func:0x%x\n",
	    prg.bus_no, prg.dev_no, prg.func_no);
	DBG(DBG_TOOLS, dip, "barnum:0x%x, offset:0x%" PRIx64 ", acc:0x%x\n",
	    prg.barnum, prg.offset, prg.acc_attr);
	DBG(DBG_TOOLS, dip, "data:0x%" PRIx64 ", phys_addr:0x%" PRIx64 "\n",
	    prg.data, prg.phys_addr);

	/*
	 * If bank num == ff, base phys addr passed in from userland.
	 *
	 * Normal bank specification is invalid, as there is no OBP property to
	 * back it up.
	 */
	if (prg.barnum != PCITOOL_BASE) {
		prg.status = PCITOOL_OUT_OF_RANGE;
		rval = EINVAL;
		goto done;
	}

	/* Allow only size of 8-bytes. */
	if (size != sizeof (uint64_t)) {
		prg.status = PCITOOL_INVALID_SIZE;
		rval = EINVAL;
		goto done;
	}

	/* Alignment checking. */
	if (!IS_P2ALIGNED(prg.offset, size)) {
		DBG(DBG_TOOLS, dip, "not aligned.\n");
		prg.status = PCITOOL_NOT_ALIGNED;
		rval = EINVAL;
		goto done;
	}

	prg.phys_addr += prg.offset;

	/*
	 * Only the hypervisor can access nexus registers.  As a result, there
	 * can be no error recovery in the OS.  If there is an error, the
	 * system will go down, but with a trap type 7f.  The OS cannot
	 * intervene with this kind of trap.
	 */

	/* Access device.  prg.status is modified. */
	rval = pxtool_phys_access(px_p, prg.phys_addr, &prg.data,
	    PCITOOL_ACC_IS_BIG_ENDIAN(prg.acc_attr), is_write);
done:
	prg.drvr_version = PCITOOL_VERSION;
	if (ddi_copyout(&prg, arg, sizeof (pcitool_reg_t),
	    mode) != DDI_SUCCESS) {
		DBG(DBG_TOOLS, dip, "Copyout failed.\n");
		return (EFAULT);
	}

	return (rval);
}
Exemple #20
0
/*
 * This routine assumes that the stack grows downward.
 * Returns 0 on success, errno on failure.
 */
int
grow_internal(caddr_t sp, uint_t growszc)
{
	struct proc *p = curproc;
	size_t newsize;
	size_t oldsize;
	int    error;
	size_t pgsz;
	uint_t szc;
	struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);

	ASSERT(sp < p->p_usrstack);
	sp = (caddr_t)P2ALIGN((uintptr_t)sp, PAGESIZE);

	/*
	 * grow to growszc alignment but use current p->p_stkpageszc for
	 * the segvn_crargs szc passed to segvn_create. For memcntl to
	 * increase the szc, this allows the new extension segment to be
	 * concatenated successfully with the existing stack segment.
	 */
	if ((szc = growszc) != 0) {
		pgsz = page_get_pagesize(szc);
		ASSERT(pgsz > PAGESIZE);
		newsize = p->p_usrstack - (caddr_t)P2ALIGN((uintptr_t)sp, pgsz);
		if (newsize > (size_t)p->p_stk_ctl) {
			szc = 0;
			pgsz = PAGESIZE;
			newsize = p->p_usrstack - sp;
		}
	} else {
		pgsz = PAGESIZE;
		newsize = p->p_usrstack - sp;
	}

	if (newsize > (size_t)p->p_stk_ctl) {
		(void) rctl_action(rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p,
		    RCA_UNSAFE_ALL);

		return (ENOMEM);
	}

	oldsize = p->p_stksize;
	ASSERT(P2PHASE(oldsize, PAGESIZE) == 0);

	if (newsize <= oldsize) {	/* prevent the stack from shrinking */
		return (0);
	}

	if (!(p->p_stkprot & PROT_EXEC)) {
		crargs.prot &= ~PROT_EXEC;
	}
	/*
	 * extend stack with the proposed new growszc, which is different
	 * than p_stkpageszc only on a memcntl to increase the stack pagesize.
	 * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies via
	 * map_pgszcvec(). Use AS_MAP_STACK to get intermediate page sizes
	 * if not aligned to szc's pgsz.
	 */
	if (szc > 0) {
		caddr_t oldsp = p->p_usrstack - oldsize;
		caddr_t austk = (caddr_t)P2ALIGN((uintptr_t)p->p_usrstack,
		    pgsz);

		if (IS_P2ALIGNED(p->p_usrstack, pgsz) || oldsp < austk) {
			crargs.szc = p->p_stkpageszc ? p->p_stkpageszc :
			    AS_MAP_NO_LPOOB;
		} else if (oldsp == austk) {
			crargs.szc = szc;
		} else {
			crargs.szc = AS_MAP_STACK;
		}
	} else {
		crargs.szc = AS_MAP_NO_LPOOB;
	}
	crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_DOWN;

	if ((error = as_map(p->p_as, p->p_usrstack - newsize, newsize - oldsize,
	    segvn_create, &crargs)) != 0) {
		if (error == EAGAIN) {
			cmn_err(CE_WARN, "Sorry, no swap space to grow stack "
			    "for pid %d (%s)", p->p_pid, PTOU(p)->u_comm);
		}
		return (error);
	}
	p->p_stksize = newsize;
	return (0);
}
Exemple #21
0
int
pxtool_pcicfg_access(px_t *px_p, pcitool_reg_t *prg_p,
    uint64_t *data_p, boolean_t is_write)
{
	pci_cfg_data_t data;
	on_trap_data_t otd;
	dev_info_t *dip = px_p->px_dip;
	px_pec_t *pec_p = px_p->px_pec_p;
	size_t size = PCITOOL_ACC_ATTR_SIZE(prg_p->acc_attr);
	int rval = 0;
	pci_cfgacc_req_t req;

	if ((size <= 0) || (size > 8)) {
		DBG(DBG_TOOLS, dip, "not supported size.\n");
		prg_p->status = PCITOOL_INVALID_SIZE;
		return (ENOTSUP);
	}

	/* Alignment checking. */
	if (!IS_P2ALIGNED(prg_p->offset, size)) {
		DBG(DBG_TOOLS, dip, "not aligned.\n");
		prg_p->status = PCITOOL_NOT_ALIGNED;
		return (EINVAL);
	}

	mutex_enter(&pec_p->pec_pokefault_mutex);
	pec_p->pec_ontrap_data = &otd;

	req.rcdip = dip;
	req.bdf = PCI_GETBDF(prg_p->bus_no, prg_p->dev_no, prg_p->func_no);
	req.offset = prg_p->offset;
	req.size = size;
	req.write = is_write;
	if (is_write) {

		if (PCITOOL_ACC_IS_BIG_ENDIAN(prg_p->acc_attr))
			data.qw = pxtool_swap_endian(*data_p, size);
		else
			data.qw = *data_p;

		switch (size) {
			case sizeof (uint8_t):
				data.b = (uint8_t)data.qw;
				break;
			case sizeof (uint16_t):
				data.w = (uint16_t)data.qw;
				break;
			case sizeof (uint32_t):
				data.dw = (uint32_t)data.qw;
				break;
			case sizeof (uint64_t):
				break;
		}

		DBG(DBG_TOOLS, dip, "put: bdf:%d,%d,%d, off:0x%"PRIx64", size:"
		    "0x%"PRIx64", data:0x%"PRIx64"\n",
		    prg_p->bus_no, prg_p->dev_no, prg_p->func_no,
		    prg_p->offset, size, data.qw);

		pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;

		if (!on_trap(&otd, OT_DATA_ACCESS)) {
			otd.ot_trampoline = (uintptr_t)&poke_fault;
			VAL64(&req) = data.qw;
			pci_cfgacc_acc(&req);
		} else
			rval = H_EIO;

		if (otd.ot_trap & OT_DATA_ACCESS)
			rval = H_EIO;

	} else {

		data.qw = 0;

		pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;

		if (!on_trap(&otd, OT_DATA_ACCESS)) {
			otd.ot_trampoline = (uintptr_t)&peek_fault;
			pci_cfgacc_acc(&req);
			data.qw = VAL64(&req);
		} else
			rval = H_EIO;

		switch (size) {
			case sizeof (uint8_t):
				data.qw = (uint64_t)data.b;
				break;
			case sizeof (uint16_t):
				data.qw = (uint64_t)data.w;
				break;
			case sizeof (uint32_t):
				data.qw = (uint64_t)data.dw;
				break;
			case sizeof (uint64_t):
				break;
		}

		DBG(DBG_TOOLS, dip, "get: bdf:%d,%d,%d, off:0x%"PRIx64", size:"
		    "0x%"PRIx64", data:0x%"PRIx64"\n",
		    prg_p->bus_no, prg_p->dev_no, prg_p->func_no,
		    prg_p->offset, size, data.qw);
		*data_p = data.qw;

		if (PCITOOL_ACC_IS_BIG_ENDIAN(prg_p->acc_attr))
			*data_p = pxtool_swap_endian(*data_p, size);
	}

	/*
	 * Workaround: delay taking down safe access env.
	 * For more info, see comments where pxtool_cfg_delay_usec is declared.
	 */
	if (pxtool_cfg_delay_usec > 0)
		drv_usecwait(pxtool_cfg_delay_usec);

	no_trap();
	pec_p->pec_ontrap_data = NULL;
	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
	mutex_exit(&pec_p->pec_pokefault_mutex);

	if (rval != SUCCESS) {
		prg_p->status = PCITOOL_INVALID_ADDRESS;
		rval = EINVAL;
	} else
		prg_p->status = PCITOOL_SUCCESS;

	return (rval);
}
Exemple #22
0
/*
 * DL_UINTDATA_REQ
 */
void
proto_unitdata_req(dld_str_t *dsp, mblk_t *mp)
{
	queue_t			*q = dsp->ds_wq;
	dl_unitdata_req_t	*dlp = (dl_unitdata_req_t *)mp->b_rptr;
	off_t			off;
	size_t			len, size;
	const uint8_t		*addr;
	uint16_t		sap;
	uint_t			addr_length;
	mblk_t			*bp, *payload;
	uint32_t		start, stuff, end, value, flags;
	t_uscalar_t		dl_err;
	uint_t			max_sdu;

	if (MBLKL(mp) < sizeof (dl_unitdata_req_t) || mp->b_cont == NULL) {
		dlerrorack(q, mp, DL_UNITDATA_REQ, DL_BADPRIM, 0);
		return;
	}

	mutex_enter(&dsp->ds_lock);
	if (dsp->ds_dlstate != DL_IDLE) {
		mutex_exit(&dsp->ds_lock);
		dlerrorack(q, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0);
		return;
	}
	DLD_DATATHR_INC(dsp);
	mutex_exit(&dsp->ds_lock);

	addr_length = dsp->ds_mip->mi_addr_length;

	off = dlp->dl_dest_addr_offset;
	len = dlp->dl_dest_addr_length;

	if (!MBLKIN(mp, off, len) || !IS_P2ALIGNED(off, sizeof (uint16_t))) {
		dl_err = DL_BADPRIM;
		goto failed;
	}

	if (len != addr_length + sizeof (uint16_t)) {
		dl_err = DL_BADADDR;
		goto failed;
	}

	addr = mp->b_rptr + off;
	sap = *(uint16_t *)(mp->b_rptr + off + addr_length);

	/*
	 * Check the length of the packet and the block types.
	 */
	size = 0;
	payload = mp->b_cont;
	for (bp = payload; bp != NULL; bp = bp->b_cont) {
		if (DB_TYPE(bp) != M_DATA)
			goto baddata;

		size += MBLKL(bp);
	}

	mac_sdu_get(dsp->ds_mh, NULL, &max_sdu);
	if (size > max_sdu)
		goto baddata;

	/*
	 * Build a packet header.
	 */
	if ((bp = dls_header(dsp, addr, sap, dlp->dl_priority.dl_max,
	    &payload)) == NULL) {
		dl_err = DL_BADADDR;
		goto failed;
	}

	/*
	 * We no longer need the M_PROTO header, so free it.
	 */
	freeb(mp);

	/*
	 * Transfer the checksum offload information if it is present.
	 */
	hcksum_retrieve(payload, NULL, NULL, &start, &stuff, &end, &value,
	    &flags);
	(void) hcksum_assoc(bp, NULL, NULL, start, stuff, end, value, flags, 0);

	/*
	 * Link the payload onto the new header.
	 */
	ASSERT(bp->b_cont == NULL);
	bp->b_cont = payload;

	/*
	 * No lock can be held across modules and putnext()'s,
	 * which can happen here with the call from DLD_TX().
	 */
	if (DLD_TX(dsp, bp, 0, 0) != NULL) {
		/* flow-controlled */
		DLD_SETQFULL(dsp);
	}
	DLD_DATATHR_DCR(dsp);
	return;

failed:
	dlerrorack(q, mp, DL_UNITDATA_REQ, dl_err, 0);
	DLD_DATATHR_DCR(dsp);
	return;

baddata:
	dluderrorind(q, mp, (void *)addr, len, DL_BADDATA, 0);
	DLD_DATATHR_DCR(dsp);
}
Exemple #23
0
int
interpret_ah(int flags, uint8_t *hdr, int iplen, int fraglen)
{
	/* LINTED: alignment */
	ah_t *ah = (ah_t *)hdr;
	ah_t *aligned_ah;
	ah_t storage;	/* In case hdr isn't aligned. */
	char *line, *buff;
	uint_t ahlen, auth_data_len;
	uint8_t *auth_data, *data;
	int new_iplen;
	uint8_t proto;

	if (fraglen < sizeof (ah_t))
		return (fraglen);		/* incomplete header */

	if (!IS_P2ALIGNED(hdr, 4)) {
		aligned_ah = (ah_t *)&storage;
		bcopy(hdr, &storage, sizeof (ah_t));
	} else {
		aligned_ah = ah;
	}

	/*
	 * "+ 8" is for the "constant" part that's not included in the AH
	 * length.
	 *
	 * The AH RFC specifies the length field in "length in 4-byte units,
	 * not counting the first 8 bytes".  So if an AH is 24 bytes long,
	 * the length field will contain "4".  (4 * 4 + 8 == 24).
	 */
	ahlen = (aligned_ah->ah_length << 2) + 8;
	fraglen -= ahlen;
	if (fraglen < 0)
		return (fraglen + ahlen);	/* incomplete header */

	auth_data_len = ahlen - sizeof (ah_t);
	auth_data = (uint8_t *)(ah + 1);
	data = auth_data + auth_data_len;

	if (flags & F_SUM) {
		line = (char *)get_sum_line();
		(void) sprintf(line, "AH SPI=0x%x Replay=%u",
		    ntohl(aligned_ah->ah_spi), ntohl(aligned_ah->ah_replay));
		line += strlen(line);
	}

	if (flags & F_DTAIL) {
		show_header("AH:  ", "Authentication Header", ahlen);
		show_space();
		(void) sprintf(get_line((char *)&ah->ah_nexthdr - dlc_header,
		    1), "Next header = %d (%s)", aligned_ah->ah_nexthdr,
		    getproto(aligned_ah->ah_nexthdr));
		(void) sprintf(get_line((char *)&ah->ah_length - dlc_header, 1),
		    "AH length = %d (%d bytes)", aligned_ah->ah_length, ahlen);
		(void) sprintf(get_line((char *)&ah->ah_reserved - dlc_header,
		    2), "<Reserved field = 0x%x>",
		    ntohs(aligned_ah->ah_reserved));
		(void) sprintf(get_line((char *)&ah->ah_spi - dlc_header, 4),
		    "SPI = 0x%x", ntohl(aligned_ah->ah_spi));
		(void) sprintf(get_line((char *)&ah->ah_replay - dlc_header, 4),
		    "Replay = %u", ntohl(aligned_ah->ah_replay));

		/* * 2 for two hex digits per auth_data byte. */
		buff = malloc(auth_data_len * 2);
		if (buff != NULL) {
			int i;

			for (i = 0; i < auth_data_len; i++)
				sprintf(buff + i * 2, "%02x", auth_data[i]);
		}

		(void) sprintf(get_line((char *)auth_data - dlc_header,
		    auth_data_len), "ICV = %s",
		    (buff == NULL) ? "<out of memory>" : buff);

		/* malloc(3c) says I can call free even if buff == NULL */
		free(buff);

		show_space();
	}

	new_iplen = iplen - ahlen;
	proto = aligned_ah->ah_nexthdr;

	/*
	 * Print IPv6 Extension Headers, or skip them in the summary case.
	 */
	if (proto == IPPROTO_HOPOPTS || proto == IPPROTO_DSTOPTS ||
	    proto == IPPROTO_ROUTING || proto == IPPROTO_FRAGMENT) {
		(void) print_ipv6_extensions(flags, &data, &proto, &iplen,
		    &fraglen);
	}

	if (fraglen > 0)
		switch (proto) {
			case IPPROTO_ENCAP:
				/* LINTED: alignment */
				(void) interpret_ip(flags, (struct ip *)data,
				    new_iplen);
				break;
			case IPPROTO_IPV6:
				(void) interpret_ipv6(flags, (ip6_t *)data,
				    new_iplen);
				break;
			case IPPROTO_ICMP:
				(void) interpret_icmp(flags,
				    /* LINTED: alignment */
				    (struct icmp *)data, new_iplen, fraglen);
				break;
			case IPPROTO_ICMPV6:
				/* LINTED: alignment */
				(void) interpret_icmpv6(flags, (icmp6_t *)data,
				    new_iplen, fraglen);
				break;
			case IPPROTO_TCP:
				(void) interpret_tcp(flags,
				    (struct tcphdr *)data, new_iplen, fraglen);
				break;

			case IPPROTO_ESP:
				(void) interpret_esp(flags, data, new_iplen,
				    fraglen);
				break;

			case IPPROTO_AH:
				(void) interpret_ah(flags, data, new_iplen,
				    fraglen);
				break;

			case IPPROTO_UDP:
				(void) interpret_udp(flags,
				    (struct udphdr *)data, new_iplen, fraglen);
				break;
			/* default case is to not print anything else */
		}

	return (ahlen);
}
Exemple #24
0
/*
 * Algorithm: call arch-specific map_pgsz to get best page size to use,
 * then call brk_internal().
 * Returns 0 on success.
 */
static int
brk_lpg(caddr_t nva)
{
	struct proc *p = curproc;
	size_t pgsz, len;
	caddr_t addr, brkend;
	caddr_t bssbase = p->p_bssbase;
	caddr_t brkbase = p->p_brkbase;
	int oszc, szc;
	int err;

	oszc = p->p_brkpageszc;

	/*
	 * If p_brkbase has not yet been set, the first call
	 * to brk_internal() will initialize it.
	 */
	if (brkbase == 0) {
		return (brk_internal(nva, oszc));
	}

	len = nva - bssbase;

	pgsz = map_pgsz(MAPPGSZ_HEAP, p, bssbase, len, 0);
	szc = page_szc(pgsz);

	/*
	 * Covers two cases:
	 * 1. page_szc() returns -1 for invalid page size, so we want to
	 * ignore it in that case.
	 * 2. By design we never decrease page size, as it is more stable.
	 */
	if (szc <= oszc) {
		err = brk_internal(nva, oszc);
		/* If failed, back off to base page size. */
		if (err != 0 && oszc != 0) {
			err = brk_internal(nva, 0);
		}
		return (err);
	}

	err = brk_internal(nva, szc);
	/* If using szc failed, map with base page size and return. */
	if (err != 0) {
		if (szc != 0) {
			err = brk_internal(nva, 0);
		}
		return (err);
	}

	/*
	 * Round up brk base to a large page boundary and remap
	 * anything in the segment already faulted in beyond that
	 * point.
	 */
	addr = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase, pgsz);
	brkend = brkbase + p->p_brksize;
	len = brkend - addr;
	/* Check that len is not negative. Update page size code for heap. */
	if (addr >= p->p_bssbase && brkend > addr && IS_P2ALIGNED(len, pgsz)) {
		(void) as_setpagesize(p->p_as, addr, len, szc, B_FALSE);
		p->p_brkpageszc = szc;
	}

	ASSERT(err == 0);
	return (err);		/* should always be 0 */
}
Exemple #25
0
/*
 * DL_CAPABILITY_REQ
 */
static void
proto_capability_req(dld_str_t *dsp, mblk_t *mp)
{
	dl_capability_req_t *dlp = (dl_capability_req_t *)mp->b_rptr;
	dl_capability_sub_t *sp;
	size_t		size, len;
	offset_t	off, end;
	t_uscalar_t	dl_err;
	queue_t		*q = dsp->ds_wq;

	if (MBLKL(mp) < sizeof (dl_capability_req_t)) {
		dl_err = DL_BADPRIM;
		goto failed;
	}

	if (dsp->ds_dlstate == DL_UNATTACHED ||
	    DL_ACK_PENDING(dsp->ds_dlstate)) {
		dl_err = DL_OUTSTATE;
		goto failed;
	}

	/*
	 * This request is overloaded. If there are no requested capabilities
	 * then we just want to acknowledge with all the capabilities we
	 * support. Otherwise we enable the set of capabilities requested.
	 */
	if (dlp->dl_sub_length == 0) {
		proto_capability_advertise(dsp, mp);
		return;
	}

	if (!MBLKIN(mp, dlp->dl_sub_offset, dlp->dl_sub_length)) {
		dl_err = DL_BADPRIM;
		goto failed;
	}

	dlp->dl_primitive = DL_CAPABILITY_ACK;

	off = dlp->dl_sub_offset;
	len = dlp->dl_sub_length;

	/*
	 * Walk the list of capabilities to be enabled.
	 */
	for (end = off + len; off < end; ) {
		sp = (dl_capability_sub_t *)(mp->b_rptr + off);
		size = sizeof (dl_capability_sub_t) + sp->dl_length;

		if (off + size > end ||
		    !IS_P2ALIGNED(off, sizeof (uint32_t))) {
			dl_err = DL_BADPRIM;
			goto failed;
		}

		switch (sp->dl_cap) {
		/*
		 * TCP/IP checksum offload to hardware.
		 */
		case DL_CAPAB_HCKSUM: {
			dl_capab_hcksum_t *hcksump;
			dl_capab_hcksum_t hcksum;

			hcksump = (dl_capab_hcksum_t *)&sp[1];
			/*
			 * Copy for alignment.
			 */
			bcopy(hcksump, &hcksum, sizeof (dl_capab_hcksum_t));
			dlcapabsetqid(&(hcksum.hcksum_mid), dsp->ds_rq);
			bcopy(&hcksum, hcksump, sizeof (dl_capab_hcksum_t));
			break;
		}

		case DL_CAPAB_DLD: {
			dl_capab_dld_t	*dldp;
			dl_capab_dld_t	dld;

			dldp = (dl_capab_dld_t *)&sp[1];
			/*
			 * Copy for alignment.
			 */
			bcopy(dldp, &dld, sizeof (dl_capab_dld_t));
			dlcapabsetqid(&(dld.dld_mid), dsp->ds_rq);
			bcopy(&dld, dldp, sizeof (dl_capab_dld_t));
			break;
		}
		default:
			break;
		}
		off += size;
	}
	qreply(q, mp);
	return;
failed:
	dlerrorack(q, mp, DL_CAPABILITY_REQ, dl_err, 0);
}
Exemple #26
0
static int
fmd_ckpt_open(fmd_ckpt_t *ckp, fmd_module_t *mp)
{
	struct stat64 st;
	uint64_t seclen;
	uint_t i;
	int err;

	bzero(ckp, sizeof (fmd_ckpt_t));
	ckp->ckp_mp = mp;

	(void) snprintf(ckp->ckp_src, PATH_MAX, "%s/%s",
	    mp->mod_ckpt, mp->mod_name);

	if ((ckp->ckp_fd = open(ckp->ckp_src, O_RDONLY)) == -1)
		return (-1); /* failed to open checkpoint file */

	if (fstat64(ckp->ckp_fd, &st) == -1) {
		err = errno;
		(void) close(ckp->ckp_fd);
		return (fmd_set_errno(err));
	}

	ckp->ckp_buf = fmd_alloc(st.st_size, FMD_SLEEP);
	ckp->ckp_hdr = (void *)ckp->ckp_buf;
	ckp->ckp_size = read(ckp->ckp_fd, ckp->ckp_buf, st.st_size);

	if (ckp->ckp_size != st.st_size || ckp->ckp_size < sizeof (fcf_hdr_t) ||
	    ckp->ckp_size != ckp->ckp_hdr->fcfh_filesz) {
		err = ckp->ckp_size == (size_t)-1L ? errno : EFMD_CKPT_SHORT;
		fmd_free(ckp->ckp_buf, st.st_size);
		(void) close(ckp->ckp_fd);
		return (fmd_set_errno(err));
	}

	(void) close(ckp->ckp_fd);
	ckp->ckp_fd = -1;

	/*
	 * Once we've read in a consistent copy of the FCF file and we're sure
	 * the header can be accessed, go through it and make sure everything
	 * is valid.  We also check that unused bits are zero so we can expand
	 * to use them safely in the future and support old files if needed.
	 */
	if (bcmp(&ckp->ckp_hdr->fcfh_ident[FCF_ID_MAG0],
	    FCF_MAG_STRING, FCF_MAG_STRLEN) != 0)
		return (fmd_ckpt_inval(ckp, "bad checkpoint magic string\n"));

	if (ckp->ckp_hdr->fcfh_ident[FCF_ID_MODEL] != FCF_MODEL_NATIVE)
		return (fmd_ckpt_inval(ckp, "bad checkpoint data model\n"));

	if (ckp->ckp_hdr->fcfh_ident[FCF_ID_ENCODING] != FCF_ENCODE_NATIVE)
		return (fmd_ckpt_inval(ckp, "bad checkpoint data encoding\n"));

	if (ckp->ckp_hdr->fcfh_ident[FCF_ID_VERSION] != FCF_VERSION_1) {
		return (fmd_ckpt_inval(ckp, "bad checkpoint version %u\n",
		    ckp->ckp_hdr->fcfh_ident[FCF_ID_VERSION]));
	}

	for (i = FCF_ID_PAD; i < FCF_ID_SIZE; i++) {
		if (ckp->ckp_hdr->fcfh_ident[i] != 0) {
			return (fmd_ckpt_inval(ckp,
			    "bad checkpoint padding at id[%d]", i));
		}
	}

	if (ckp->ckp_hdr->fcfh_flags & ~FCF_FL_VALID)
		return (fmd_ckpt_inval(ckp, "bad checkpoint flags\n"));

	if (ckp->ckp_hdr->fcfh_pad != 0)
		return (fmd_ckpt_inval(ckp, "reserved field in use\n"));

	if (ckp->ckp_hdr->fcfh_hdrsize < sizeof (fcf_hdr_t) ||
	    ckp->ckp_hdr->fcfh_secsize < sizeof (fcf_sec_t)) {
		return (fmd_ckpt_inval(ckp,
		    "bad header and/or section size\n"));
	}

	seclen = (uint64_t)ckp->ckp_hdr->fcfh_secnum *
	    (uint64_t)ckp->ckp_hdr->fcfh_secsize;

	if (ckp->ckp_hdr->fcfh_secoff > ckp->ckp_size ||
	    seclen > ckp->ckp_size ||
	    ckp->ckp_hdr->fcfh_secoff + seclen > ckp->ckp_size ||
	    ckp->ckp_hdr->fcfh_secoff + seclen < ckp->ckp_hdr->fcfh_secoff)
		return (fmd_ckpt_inval(ckp, "truncated section headers\n"));

	if (!IS_P2ALIGNED(ckp->ckp_hdr->fcfh_secoff, sizeof (uint64_t)) ||
	    !IS_P2ALIGNED(ckp->ckp_hdr->fcfh_secsize, sizeof (uint64_t)))
		return (fmd_ckpt_inval(ckp, "misaligned section headers\n"));

	/*
	 * Once the header is validated, iterate over the section headers
	 * ensuring that each one is valid w.r.t. offset, alignment, and size.
	 * We also pick up the string table pointer during this pass.
	 */
	ckp->ckp_secp = (void *)(ckp->ckp_buf + ckp->ckp_hdr->fcfh_secoff);
	ckp->ckp_secs = ckp->ckp_hdr->fcfh_secnum;

	for (i = 0; i < ckp->ckp_secs; i++) {
		fcf_sec_t *sp = (void *)(ckp->ckp_buf +
		    ckp->ckp_hdr->fcfh_secoff + ckp->ckp_hdr->fcfh_secsize * i);

		const fmd_ckpt_desc_t *dp = &_fmd_ckpt_sections[sp->fcfs_type];

		if (sp->fcfs_flags != 0) {
			return (fmd_ckpt_inval(ckp, "section %u has invalid "
			    "section flags (0x%x)\n", i, sp->fcfs_flags));
		}

		if (sp->fcfs_align & (sp->fcfs_align - 1)) {
			return (fmd_ckpt_inval(ckp, "section %u has invalid "
			    "alignment (%u)\n", i, sp->fcfs_align));
		}

		if (sp->fcfs_offset & (sp->fcfs_align - 1)) {
			return (fmd_ckpt_inval(ckp, "section %u is not properly"
			    " aligned (offset %llu)\n", i, sp->fcfs_offset));
		}

		if (sp->fcfs_entsize != 0 &&
		    (sp->fcfs_entsize & (sp->fcfs_align - 1)) != 0) {
			return (fmd_ckpt_inval(ckp, "section %u has misaligned "
			    "entsize %u\n", i, sp->fcfs_entsize));
		}

		if (sp->fcfs_offset > ckp->ckp_size ||
		    sp->fcfs_size > ckp->ckp_size ||
		    sp->fcfs_offset + sp->fcfs_size > ckp->ckp_size ||
		    sp->fcfs_offset + sp->fcfs_size < sp->fcfs_offset) {
			return (fmd_ckpt_inval(ckp, "section %u has corrupt "
			    "size or offset\n", i));
		}

		if (sp->fcfs_type >= sizeof (_fmd_ckpt_sections) /
		    sizeof (_fmd_ckpt_sections[0])) {
			return (fmd_ckpt_inval(ckp, "section %u has unknown "
			    "section type %u\n", i, sp->fcfs_type));
		}

		if (sp->fcfs_align != dp->secd_align) {
			return (fmd_ckpt_inval(ckp, "section %u has align %u "
			    "(not %u)\n", i, sp->fcfs_align, dp->secd_align));
		}

		if (sp->fcfs_size < dp->secd_size ||
		    sp->fcfs_entsize < dp->secd_entsize) {
			return (fmd_ckpt_inval(ckp, "section %u has short "
			    "size or entsize\n", i));
		}

		switch (sp->fcfs_type) {
		case FCF_SECT_STRTAB:
			if (ckp->ckp_strs != NULL) {
				return (fmd_ckpt_inval(ckp, "multiple string "
				    "tables are present in checkpoint file\n"));
			}

			ckp->ckp_strs = (char *)ckp->ckp_buf + sp->fcfs_offset;
			ckp->ckp_strn = sp->fcfs_size;

			if (ckp->ckp_strs[ckp->ckp_strn - 1] != '\0') {
				return (fmd_ckpt_inval(ckp, "string table %u "
				    "is missing terminating nul byte\n", i));
			}
			break;

		case FCF_SECT_MODULE:
			if (ckp->ckp_modp != NULL) {
				return (fmd_ckpt_inval(ckp, "multiple module "
				    "sects are present in checkpoint file\n"));
			}
			ckp->ckp_modp = sp;
			break;
		}
	}

	/*
	 * Ensure that the first section is an empty one of type FCF_SECT_NONE.
	 * This is done to ensure that links can use index 0 as a null section.
	 */
	if (ckp->ckp_secs == 0 || ckp->ckp_secp->fcfs_type != FCF_SECT_NONE ||
	    ckp->ckp_secp->fcfs_entsize != 0 || ckp->ckp_secp->fcfs_size != 0) {
		return (fmd_ckpt_inval(ckp, "section 0 is not of the "
		    "appropriate size and/or attributes (SECT_NONE)\n"));
	}

	if (ckp->ckp_modp == NULL) {
		return (fmd_ckpt_inval(ckp,
		    "no module section found in file\n"));
	}

	return (0);
}
Exemple #27
0
static rpc_inline_t *
xdrmblk_inline(XDR *xdrs, int len)
{
	rpc_inline_t *buf;
	mblk_t *m;
	unsigned char **mptr;
	struct xdrmblk_params *p;

	/*
	 * Can't inline XDR_FREE calls, doesn't make sense.
	 */
	if (xdrs->x_op == XDR_FREE)
		return (NULL);

#ifdef DEBUG
	if (!do_xdrmblk_inline) {
		xdrmblk_inline_misses++;
		return (NULL);
	}
#endif

	if (xdrs->x_op == XDR_DECODE)
		xdrmblk_skip_fully_read_mblks(xdrs);

	/*
	 * Can't inline if there isn't enough room.
	 */
	if (len <= 0 || xdrs->x_handy < len) {
#ifdef DEBUG
		xdrmblk_inline_misses++;
#endif
		return (NULL);
	}

	/* LINTED pointer alignment */
	m = (mblk_t *)xdrs->x_base;
	ASSERT(m != NULL);

	if (xdrs->x_op == XDR_DECODE) {
		/* LINTED pointer alignment */
		mptr = &m->b_rptr;
	} else {
		/* LINTED pointer alignment */
		mptr = &m->b_wptr;
	}

	/*
	 * Can't inline if the buffer is not 4 byte aligned, or if there is
	 * more than one reference to the data block associated with this mblk.
	 * This last check is used because the caller may want to modify the
	 * data in the inlined portion and someone else is holding a reference
	 * to the data who may not want it to be modified.
	 */
	if (!IS_P2ALIGNED(*mptr, sizeof (int32_t)) ||
	    m->b_datap->db_ref != 1) {
#ifdef DEBUG
		xdrmblk_inline_misses++;
#endif
		return (NULL);
	}

	buf = (rpc_inline_t *)*mptr;

	p = (struct xdrmblk_params *)xdrs->x_private;

	*mptr += len;
	xdrs->x_handy -= len;
	p->rpos += len;

#ifdef DEBUG
	xdrmblk_inline_hits++;
#endif

	return (buf);
}
Exemple #28
0
static bool_t
xdrmblk_control(XDR *xdrs, int request, void *info)
{
	mblk_t *m;
	struct xdrmblk_params *p;
	int32_t *int32p;
	int len;

	switch (request) {
	case XDR_PEEK:
		xdrmblk_skip_fully_read_mblks(xdrs);

		/*
		 * Return the next 4 byte unit in the XDR stream.
		 */
		if (xdrs->x_handy < sizeof (int32_t))
			return (FALSE);

		/* LINTED pointer alignment */
		m = (mblk_t *)xdrs->x_base;
		ASSERT(m != NULL);

		/*
		 * If the pointer is not aligned, fail the peek
		 */
		if (!IS_P2ALIGNED(m->b_rptr, sizeof (int32_t)))
			return (FALSE);

		int32p = (int32_t *)info;
		/* LINTED pointer alignment */
		*int32p = ntohl(*((int32_t *)(m->b_rptr)));
		return (TRUE);

	case XDR_SKIPBYTES:
		int32p = (int32_t *)info;
		len = RNDUP((int)(*int32p));
		if (len < 0)
			return (FALSE);
		if (len == 0)
			return (TRUE);

		/* LINTED pointer alignment */
		m = (mblk_t *)xdrs->x_base;
		if (m == NULL)
			return (FALSE);

		p = (struct xdrmblk_params *)xdrs->x_private;

		while (xdrs->x_handy < len) {
			if (xdrs->x_handy > 0) {
				m->b_rptr += xdrs->x_handy;
				len -= xdrs->x_handy;
				p->rpos += xdrs->x_handy;
			}
			m = m->b_cont;
			xdrs->x_base = (caddr_t)m;
			p->apos += p->rpos;
			p->rpos = 0;
			if (m == NULL) {
				xdrs->x_handy = 0;
				return (FALSE);
			}
			xdrs->x_handy = (int)MBLKL(m);
		}

		xdrs->x_handy -= len;
		p->rpos += len;
		m->b_rptr += len;
		return (TRUE);

	default:
		return (FALSE);
	}
}
Exemple #29
0
/*
 * Algorithm: call arch-specific map_pgsz to get best page size to use,
 * then call grow_internal().
 * Returns 0 on success.
 */
static int
grow_lpg(caddr_t sp)
{
	struct proc *p = curproc;
	size_t pgsz;
	size_t len, newsize;
	caddr_t addr, saddr;
	caddr_t growend;
	int oszc, szc;
	int err;

	newsize = p->p_usrstack - sp;

	oszc = p->p_stkpageszc;
	pgsz = map_pgsz(MAPPGSZ_STK, p, sp, newsize, 0);
	szc = page_szc(pgsz);

	/*
	 * Covers two cases:
	 * 1. page_szc() returns -1 for invalid page size, so we want to
	 * ignore it in that case.
	 * 2. By design we never decrease page size, as it is more stable.
	 * This shouldn't happen as the stack never shrinks.
	 */
	if (szc <= oszc) {
		err = grow_internal(sp, oszc);
		/* failed, fall back to base page size */
		if (err != 0 && oszc != 0) {
			err = grow_internal(sp, 0);
		}
		return (err);
	}

	/*
	 * We've grown sufficiently to switch to a new page size.
	 * So we are going to remap the whole segment with the new page size.
	 */
	err = grow_internal(sp, szc);
	/* The grow with szc failed, so fall back to base page size. */
	if (err != 0) {
		if (szc != 0) {
			err = grow_internal(sp, 0);
		}
		return (err);
	}

	/*
	 * Round up stack pointer to a large page boundary and remap
	 * any pgsz pages in the segment already faulted in beyond that
	 * point.
	 */
	saddr = p->p_usrstack - p->p_stksize;
	addr = (caddr_t)P2ROUNDUP((uintptr_t)saddr, pgsz);
	growend = (caddr_t)P2ALIGN((uintptr_t)p->p_usrstack, pgsz);
	len = growend - addr;
	/* Check that len is not negative. Update page size code for stack. */
	if (addr >= saddr && growend > addr && IS_P2ALIGNED(len, pgsz)) {
		(void) as_setpagesize(p->p_as, addr, len, szc, B_FALSE);
		p->p_stkpageszc = szc;
	}

	ASSERT(err == 0);
	return (err);		/* should always be 0 */
}
Exemple #30
0
/*
 * This routine remaps the kernel using large ttes
 * All entries except locked ones will be removed from the tlb.
 * It assumes that both the text and data segments reside in a separate
 * 4mb virtual and physical contigous memory chunk.  This routine
 * is only executed by the first cpu.  The remaining cpus execute
 * sfmmu_mp_startup() instead.
 * XXX It assumes that the start of the text segment is KERNELBASE.  It should
 * actually be based on start.
 */
void
sfmmu_remap_kernel(void)
{
	pfn_t	pfn;
	uint_t	attr;
	int	flags;

	extern char end[];
	extern struct as kas;

	textva = (caddr_t)(KERNELBASE & MMU_PAGEMASK4M);
	pfn = va_to_pfn(textva);
	if (pfn == PFN_INVALID)
		prom_panic("can't find kernel text pfn");
	pfn &= TTE_PFNMASK(TTE4M);

	attr = PROC_TEXT | HAT_NOSYNC;
	flags = HAT_LOAD_LOCK | SFMMU_NO_TSBLOAD;
	sfmmu_memtte(&ktext_tte, pfn, attr, TTE4M);
	/*
	 * We set the lock bit in the tte to lock the translation in
	 * the tlb.
	 */
	TTE_SET_LOCKED(&ktext_tte);
	sfmmu_tteload(kas.a_hat, &ktext_tte, textva, NULL, flags);

	datava = (caddr_t)((uintptr_t)end & MMU_PAGEMASK4M);
	pfn = va_to_pfn(datava);
	if (pfn == PFN_INVALID)
		prom_panic("can't find kernel data pfn");
	pfn &= TTE_PFNMASK(TTE4M);

	attr = PROC_DATA | HAT_NOSYNC;
	sfmmu_memtte(&kdata_tte, pfn, attr, TTE4M);
	/*
	 * We set the lock bit in the tte to lock the translation in
	 * the tlb.  We also set the mod bit to avoid taking dirty bit
	 * traps on kernel data.
	 */
	TTE_SET_LOCKED(&kdata_tte);
	TTE_SET_LOFLAGS(&kdata_tte, 0, TTE_HWWR_INT);
	sfmmu_tteload(kas.a_hat, &kdata_tte, datava,
	    (struct page *)NULL, flags);

	/*
	 * create bigktsb ttes if necessary.
	 */
	if (enable_bigktsb) {
		int i = 0;
		caddr_t va = ktsb_base;
		size_t tsbsz = ktsb_sz;
		tte_t tte;

		ASSERT(va >= datava + MMU_PAGESIZE4M);
		ASSERT(tsbsz >= MMU_PAGESIZE4M);
		ASSERT(IS_P2ALIGNED(tsbsz, tsbsz));
		ASSERT(IS_P2ALIGNED(va, tsbsz));
		attr = PROC_DATA | HAT_NOSYNC;
		while (tsbsz != 0) {
			ASSERT(i < MAX_BIGKTSB_TTES);
			pfn = va_to_pfn(va);
			ASSERT(pfn != PFN_INVALID);
			ASSERT((pfn & ~TTE_PFNMASK(TTE4M)) == 0);
			sfmmu_memtte(&tte, pfn, attr, TTE4M);
			ASSERT(TTE_IS_MOD(&tte));
			/*
			 * No need to lock if we use physical addresses.
			 * Since we invalidate the kernel TSB using virtual
			 * addresses, it's an optimization to load them now
			 * so that we won't have to load them later.
			 */
			if (!ktsb_phys) {
				TTE_SET_LOCKED(&tte);
			}
			sfmmu_tteload(kas.a_hat, &tte, va, NULL, flags);
			bigktsb_ttes[i] = tte;
			va += MMU_PAGESIZE4M;
			tsbsz -= MMU_PAGESIZE4M;
			i++;
		}
		bigktsb_nttes = i;
	}

	sfmmu_set_tlb();
}