예제 #1
0
/*
 * sppp_dlbindreq()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Perform DL_BIND_REQ request, called by sppp_mproto.
 */
static int
sppp_dlbindreq(queue_t *q, mblk_t *mp, spppstr_t *sps)
{
	sppa_t			*ppa;
	union DL_primitives	*dlp;
	spppreqsap_t		req_sap;
	int			error = 0;

	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	dlp = (union DL_primitives *)mp->b_rptr;
	req_sap = dlp->bind_req.dl_sap;
	ASSERT(sps != NULL);
	ASSERT(!IS_SPS_PIOATTACH(sps));
	ASSERT(sps->sps_dlstate == DL_UNBOUND);

	ppa = sps->sps_ppa;
	if (ppa == NULL) {
		DBGERROR((CE_CONT, "DLPI bind: no attached ppa\n"));
		error = DL_OUTSTATE;
	} else if ((req_sap != ETHERTYPE_IP) && (req_sap != ETHERTYPE_IPV6) &&
	    (req_sap != ETHERTYPE_ALLSAP)) {
		DBGERROR((CE_CONT, "DLPI bind: unknown SAP %x\n", req_sap));
		error = DL_BADADDR;
	}
	if (error != 0) {
		dlerrorack(q, mp, dlp->dl_primitive, error, 0);
	} else {
		qwriter(q, mp, sppp_dl_bind, PERIM_INNER);
	}
	return (0);
}
예제 #2
0
파일: mesh.cpp 프로젝트: kovacsv/RayTracer
bool Mesh::Triangle::Check (UIndex materialCount, UIndex vertexCount, UIndex texCoordCount, UIndex userDefinedVertexNormalCount, UIndex calculatedVertexNormalCount) const
{
	if (DBGERROR (material >= materialCount)) {
		return false;
	}

	if (DBGERROR (vertex0 >= vertexCount || vertex1 >= vertexCount || vertex2 >= vertexCount)) {
		return false;
	}

	if (DBGERROR (texCoord0 >= texCoordCount || texCoord1 >= texCoordCount || texCoord2 >= texCoordCount)) {
		return false;
	}

	if (normalMode == UserDefined) {
		if (DBGERROR (normal0 >= userDefinedVertexNormalCount || normal1 >= userDefinedVertexNormalCount || normal2 >= userDefinedVertexNormalCount)) {
			return false;
		}
	} else if (normalMode == Calculated) {
		if (curveGroup != NonCurved) {
			if (DBGERROR (normal0 >= calculatedVertexNormalCount || normal1 >= calculatedVertexNormalCount || normal2 >= calculatedVertexNormalCount)) {
				return false;
			}
		}
	}

	return true;
}
예제 #3
0
/*
 * sppp_mproto()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Handle M_PCPROTO/M_PROTO messages, called by sppp_uwput.
 */
int
sppp_mproto(queue_t *q, mblk_t *mp, spppstr_t *sps)
{
	union DL_primitives *dlp;
	struct sppp_dlpi_pinfo_t *dpi;
	t_uscalar_t	prim;
	int		len;
	int		error = 0;

	ASSERT(!IS_SPS_CONTROL(sps));
	if ((len = MBLKL(mp)) < sizeof (t_uscalar_t)) {
		DBGERROR((CE_CONT, "bad mproto: block length %d\n", len));
		merror(q, mp, EPROTO);
		return (0);
	}
	dlp = (union DL_primitives *)mp->b_rptr;
	prim = dlp->dl_primitive;
	if (prim > DL_MAXPRIM) {
		DBGERROR((CE_CONT, "bad mproto: primitive %d > %d\n", prim,
		    DL_MAXPRIM));
		error = DL_BADPRIM;
	} else {
		dpi = &dl_pinfo[prim];
		if (dpi->pi_funcp == NULL) {
			DBGERROR((CE_CONT,
			    "bad mproto: primitive %d not supported\n", prim));
			error = DL_NOTSUPPORTED;
		} else if (len < dpi->pi_minlen) {
			DBGERROR((CE_CONT,
			    "bad mproto: primitive len %d < %d\n", len,
			    dpi->pi_minlen));
			error = DL_BADPRIM;
		} else if (dpi->pi_state != -1 &&
		    sps->sps_dlstate != dpi->pi_state) {
			DBGERROR((CE_CONT,
			    "bad state %d != %d for primitive %d\n",
			    sps->sps_dlstate, dpi->pi_state, prim));
			error = DL_OUTSTATE;
		}
	}
	if (error != 0) {
		dlerrorack(q, mp, dlp->dl_primitive, error, 0);
		return (0);
	}
#ifdef DBG_DLPI
	{
		const char *cp = prim2name(prim);
		if (cp != NULL)
			cmn_err(CE_CONT, "/%d: Dispatching %s\n",
			    sps->sps_mn_id, cp);
		else
			cmn_err(CE_CONT,
			    "/%d: Dispatching unknown primitive %d\n",
			    sps->sps_mn_id, prim);
	}
#endif
	return ((*dpi->pi_funcp)(q, mp, sps));
}
예제 #4
0
파일: mesh.cpp 프로젝트: kovacsv/RayTracer
void Mesh::SetVertex (UIndex index, const Mesh::Vertex& vertex)
{
	if (DBGERROR (finalized)) {
		return;
	}
	vertices[index] = vertex;
}
예제 #5
0
파일: mesh.cpp 프로젝트: kovacsv/RayTracer
void Mesh::SetDoubleSided (bool isDoubleSided)
{
	if (DBGERROR (finalized)) {
		return;
	}
	doubleSided = isDoubleSided;
}
예제 #6
0
파일: mesh.cpp 프로젝트: kovacsv/RayTracer
UIndex Mesh::AddNormal (const Vec3& normal)
{
	if (DBGERROR (finalized)) {
		return InvalidIndex;
	}
	userDefinedVertexNormals.push_back (Normalize (normal));
	return vertices.size () - 1;
}
예제 #7
0
파일: mesh.cpp 프로젝트: kovacsv/RayTracer
UIndex Mesh::AddTexCoord (const Vec2& texCoord)
{
	if (DBGERROR (finalized)) {
		return InvalidIndex;
	}
	texCoords.push_back (texCoord);
	return texCoords.size () - 1;
}
예제 #8
0
파일: mesh.cpp 프로젝트: kovacsv/RayTracer
UIndex Mesh::AddTriangle (const Triangle& triangle)
{
	if (DBGERROR (finalized)) {
		return InvalidIndex;
	}
	triangles.push_back (triangle);
	return triangles.size () - 1;
}
예제 #9
0
파일: mesh.cpp 프로젝트: kovacsv/RayTracer
UIndex Mesh::AddVertex (const Vertex& vertex)
{
	if (DBGERROR (finalized)) {
		return InvalidIndex;
	}
	vertices.push_back (vertex);
	return vertices.size () - 1;
}
예제 #10
0
int bufFillByte(Buffer_t* buf, BYTE v)
{
    // sanity checks
    if (!buf)    { DBGERROR("bufFill() error: no buffer\n"); return 1; }

    // fill payload
    memset(buf->payload, v, buf->m.size);
    return 0;
}
예제 #11
0
파일: vec3.cpp 프로젝트: kovacsv/RayTracer
Vec3 Normalize (const Vec3& vec)
{
	double length = sqrt (vec.x * vec.x + vec.y * vec.y + vec.z * vec.z);
	if (DBGERROR (IsZero (length))) {
		return vec;
	}

	return vec * (1.0 / length);
}
예제 #12
0
파일: mesh.cpp 프로젝트: kovacsv/RayTracer
bool Mesh::Check (UIndex materialCount) const
{
	if (DBGERROR (triangles.size () != calculatedTriangleNormals.size ())) {
		return false;
	}

	UIndex vertexCount = vertices.size ();
	UIndex texCoordCount = texCoords.size ();
	UIndex userDefinedVertexNormalCount = userDefinedVertexNormals.size ();
	UIndex calculatedVertexNormalCount = calculatedVertexNormals.size ();
	for (UIndex i = 0; i < triangles.size (); i++) {
		const Triangle& triangle = triangles[i];
		if (DBGERROR (!triangle.Check (materialCount, vertexCount, texCoordCount, userDefinedVertexNormalCount, calculatedVertexNormalCount))) {
			return false;
		}
	}
	return true;
}
예제 #13
0
파일: model.cpp 프로젝트: kovacsv/RayTracer
bool Model::Check () const
{
	UIndex materialCount = materials.size ();
	for (UIndex i = 0; i < geometry.size (); i++) {
		if (DBGERROR (!geometry[i].Check (materialCount))) {
			return false;
		}
	}
	return true;
}
예제 #14
0
파일: mesh.cpp 프로젝트: kovacsv/RayTracer
void Mesh::Transform (const Transformation& transformation)
{
	if (DBGERROR (finalized)) {
		return;
	}
	for (UIndex i = 0; i < vertices.size (); i++) {
		vertices[i].pos = transformation.Apply (vertices[i].pos);
	}
	for (UIndex i = 0; i < userDefinedVertexNormals.size (); i++) {
		userDefinedVertexNormals[i] = transformation.ApplyRotation (userDefinedVertexNormals[i]);
	}
}
예제 #15
0
/*
 * sppp_dlpromiscoffreq()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Perform DL_PROMISCOFF_REQ request, called by sppp_mproto.
 */
static int
sppp_dlpromiscoffreq(queue_t *q, mblk_t *mp, spppstr_t *sps)
{
	t_uscalar_t	level;

	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	level = ((dl_promiscoff_req_t *)mp->b_rptr)->dl_level;
	ASSERT(sps != NULL);

	if (!IS_SPS_PROMISC(sps)) {
		DBGERROR((CE_CONT, "DLPI promiscoff: not promiscuous\n"));
		dlerrorack(q, mp, DL_PROMISCOFF_REQ, DL_NOTENAB, 0);
	} else if ((level != DL_PROMISC_PHYS) && (level != DL_PROMISC_SAP) &&
	    (level != DL_PROMISC_MULTI)) {
		dlerrorack(q, mp, DL_PROMISCOFF_REQ, DL_NOTSUPPORTED, 0);
		DBGERROR((CE_CONT, "DLPI promiscoff: bad level %d\n", level));
	} else {
		qwriter(q, mp, sppp_dl_promiscoff, PERIM_INNER);
	}
	return (0);

}
예제 #16
0
Buffer_t * CreateBuffer(int width, int height, int sample_sz) {
    Buffer_t* out     = (Buffer_t*)malloc(sizeof(Buffer_t));
    out->m.height     = height;
    out->m.width      = width;
    out->m.sample_sz  = sample_sz;
    out->m.pitch      = out->m.sample_sz * out->m.width;
    out->m.size       = out->m.pitch * out->m.height;
    out->payload      = (BYTE*)malloc(out->m.size);
    if (!out->payload) {
        DBGERROR("out of memory");
        FreeBuffer(out);
        return NULL;
    }
    return out;
}
예제 #17
0
/*
 * sppp_dlinforeq()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Perform DL_INFO_REQ request, called by sppp_mproto.
 */
static int
sppp_dlinforeq(queue_t *q, mblk_t *mp, spppstr_t *sps)
{
	dl_info_ack_t	*dlip;
	uint32_t	size;
	uint32_t	addr_size;
	sppa_t		*ppa;

	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	ASSERT(sps != NULL);
	ppa = sps->sps_ppa;

	/* Exchange current msg for a DL_INFO_ACK. */
	addr_size = SPPP_ADDRL;
	size = sizeof (dl_info_ack_t) + addr_size;
	if ((mp = mexchange(q, mp, size, M_PCPROTO, DL_INFO_ACK)) == NULL) {
		DBGERROR((CE_CONT, "DLPI info: mexchange failed\n"));
		/* mexchange already sent up an merror ENOSR */
		return (0);
	}
	/* Fill in DL_INFO_ACK fields and reply */
	dlip = (dl_info_ack_t *)mp->b_rptr;
	*dlip = sppp_infoack;
	dlip->dl_current_state = sps->sps_dlstate;
	dlip->dl_max_sdu = ppa != NULL ? ppa->ppa_mtu : PPP_MAXMTU;
#ifdef DBG_DLPI
	{
		const char *cp = state2name(dlip->dl_current_state);
		if (cp != NULL)
			cmn_err(CE_CONT, "info returns state %s, max sdu %d\n",
			    cp, dlip->dl_max_sdu);
		else
			cmn_err(CE_CONT, "info returns state %d, max sdu %d\n",
			    dlip->dl_current_state, dlip->dl_max_sdu);
	}
#endif
	qreply(q, mp);
	return (0);
}
예제 #18
0
파일: mesh.cpp 프로젝트: kovacsv/RayTracer
void Mesh::Finalize ()
{
	if (DBGERROR (finalized)) {
		return;
	}

	calculatedTriangleNormals.clear ();
	calculatedVertexNormals.clear ();

	bool needVertexNormals = false;
	for (UIndex i = 0; i < triangles.size (); i++) {
		Triangle& triangle = triangles[i];
		if (triangle.curveGroup != Mesh::NonCurved) {
			needVertexNormals = true;
		}
		if (triangle.texCoord0 == InvalidIndex) {
			texCoords.push_back (Vec2 (0.0, 0.0));
			triangle.texCoord0 = texCoords.size () - 1;
		}
		if (triangle.texCoord1 == InvalidIndex) {
			texCoords.push_back (Vec2 (0.0, 0.0));
			triangle.texCoord1 = texCoords.size () - 1;
		}
		if (triangle.texCoord2 == InvalidIndex) {
			texCoords.push_back (Vec2 (0.0, 0.0));
			triangle.texCoord2 = texCoords.size () - 1;
		}
		calculatedTriangleNormals.push_back (CalculateTriangleNormal (i));
	}

	if (needVertexNormals) {
		CalculateVertexNormals ();
	}

	CalculateBoundingShapes ();
	CalculateOctree ();

	finalized = true;
}
예제 #19
0
/*
 * sppp_dlpromisconreq()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Perform DL_PROMISCON_REQ request, called by sppp_mproto.
 */
static int
sppp_dlpromisconreq(queue_t *q, mblk_t *mp, spppstr_t *sps)
{
	t_uscalar_t	level;

	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	level = ((dl_promiscon_req_t *)mp->b_rptr)->dl_level;
	ASSERT(sps != NULL);

	/* snoop issues DL_PROMISCON_REQ more than once. */
	if (IS_SPS_PROMISC(sps)) {
		dlokack(q, mp, DL_PROMISCON_REQ);
	} else if ((level != DL_PROMISC_PHYS) && (level != DL_PROMISC_SAP) &&
	    (level != DL_PROMISC_MULTI)) {
		DBGERROR((CE_CONT, "DLPI promiscon: bad level %d\n", level));
		dlerrorack(q, mp, DL_PROMISCON_REQ, DL_NOTSUPPORTED, 0);
	} else {
		qwriter(q, mp, sppp_dl_promiscon, PERIM_INNER);
	}
	return (0);
}
예제 #20
0
/*
 * sppp_dlattachreq()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Perform DL_ATTACH_REQ request, called by sppp_mproto.
 */
static int
sppp_dlattachreq(queue_t *q, mblk_t *mp, spppstr_t *sps)
{
	int	error = 0;
	union DL_primitives *dlp;

	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	dlp = (union DL_primitives *)mp->b_rptr;
	ASSERT(sps != NULL);
	ASSERT(sps->sps_dlstate == DL_UNATTACHED);

	if (IS_SPS_PIOATTACH(sps)) {
		DBGERROR((CE_CONT, "DLPI attach: already attached\n"));
		error = EINVAL;
	}
	if (error != 0) {
		dlerrorack(q, mp, dlp->dl_primitive, DL_OUTSTATE, error);
	} else {
		qwriter(q, mp, sppp_dl_attach_upper, PERIM_OUTER);
	}
	return (0);
}
예제 #21
0
/*
 * sppp_dl_attach_upper()
 *
 * MT-Perimeters:
 *    exclusive inner, exclusive outer.
 *
 * Description:
 *    Called by qwriter (INNER) from sppp_dlattachreq as the result of
 *    receiving a DL_ATTACH_REQ message.
 */
static void
sppp_dl_attach_upper(queue_t *q, mblk_t *mp)
{
	sppa_t		*ppa;
	spppstr_t	*sps = q->q_ptr;
	union DL_primitives *dlp;
	int		err = ENOMEM;
	cred_t		*cr;
	zoneid_t	zoneid;

	ASSERT(!IS_SPS_PIOATTACH(sps));
	dlp = (union DL_primitives *)mp->b_rptr;

	/* If there's something here, it's detached. */
	if (sps->sps_ppa != NULL) {
		sppp_remove_ppa(sps);
	}

	if ((cr = msg_getcred(mp, NULL)) == NULL)
		zoneid = sps->sps_zoneid;
	else
		zoneid = crgetzoneid(cr);

	ppa = sppp_find_ppa(dlp->attach_req.dl_ppa);
	if (ppa == NULL) {
		ppa = sppp_create_ppa(dlp->attach_req.dl_ppa, zoneid);
	} else if (ppa->ppa_zoneid != zoneid) {
		ppa = NULL;
		err = EPERM;
	}

	/*
	 * If we can't find or create it, then it's either because we're out of
	 * memory or because the requested PPA is owned by a different zone.
	 */
	if (ppa == NULL) {
		DBGERROR((CE_CONT, "DLPI attach: cannot create ppa %u\n",
		    dlp->attach_req.dl_ppa));
		dlerrorack(q, mp, dlp->dl_primitive, DL_SYSERR, err);
		return;
	}
	/*
	 * Preallocate the hangup message so that we're always able to
	 * send this upstream in the event of a catastrophic failure.
	 */
	if ((sps->sps_hangup = allocb(1, BPRI_MED)) == NULL) {
		DBGERROR((CE_CONT, "DLPI attach: cannot allocate hangup\n"));
		dlerrorack(q, mp, dlp->dl_primitive, DL_SYSERR, ENOSR);
		return;
	}
	sps->sps_dlstate = DL_UNBOUND;
	sps->sps_ppa = ppa;
	/*
	 * Add this stream to the head of the list of sibling streams
	 * which belong to the specified ppa.
	 */
	rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
	ppa->ppa_refcnt++;
	sps->sps_nextsib = ppa->ppa_streams;
	ppa->ppa_streams = sps;
	/*
	 * And if this stream was marked as promiscuous (SPS_PROMISC), then we
	 * need to update the promiscuous streams count. This should only
	 * happen when DL_PROMISCON_REQ was issued prior to attachment.
	 */
	if (IS_SPS_PROMISC(sps)) {
		ppa->ppa_promicnt++;
	}
	rw_exit(&ppa->ppa_sib_lock);
	DBGDLPI((CE_CONT, "/%d: attached to ppa %d\n", sps->sps_mn_id,
	    ppa->ppa_ppa_id));
	dlokack(q, mp, DL_ATTACH_REQ);
}
예제 #22
0
/*
 * sppp_dl_bind()
 *
 * MT-Perimeters:
 *    exclusive inner, shared outer.
 *
 * Description:
 *    Called by qwriter (INNER) from sppp_dlbindreq as the result of
 *    receiving a DL_BIND_REQ message.
 */
static void
sppp_dl_bind(queue_t *q, mblk_t *mp)
{
	spppstr_t		*sps;
	sppa_t			*ppa;
	union DL_primitives	*dlp;
	t_scalar_t		sap;
	spppreqsap_t		req_sap;
	mblk_t			*lsmp;

	ASSERT(q != NULL && q->q_ptr != NULL);
	sps = (spppstr_t *)q->q_ptr;
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	dlp = (union DL_primitives *)mp->b_rptr;
	ppa = sps->sps_ppa;
	ASSERT(ppa != NULL);
	req_sap = dlp->bind_req.dl_sap;
	ASSERT((req_sap == ETHERTYPE_IP) || (req_sap == ETHERTYPE_IPV6) ||
	    (req_sap == ETHERTYPE_ALLSAP));

	if (req_sap == ETHERTYPE_IP) {
		sap = PPP_IP;
	} else if (req_sap == ETHERTYPE_IPV6) {
		sap = PPP_IPV6;
	} else if (req_sap == ETHERTYPE_ALLSAP) {
		sap = PPP_ALLSAP;
	}
	/*
	 * If there's another stream with the same sap has already been bound
	 * to the same ppa, then return with DL_NOADDR. However, we do make an
	 * exception for snoop (req_sap=0x00, sap=0xff) since multiple
	 * instances of snoop may execute an a given device.
	 */
	lsmp = NULL;
	if (sap != PPP_ALLSAP) {
		if ((sap == PPP_IP) && (ppa->ppa_ip_cache == NULL)) {
			ppa->ppa_ip_cache = sps;
			if (ppa->ppa_ctl != NULL) {
				lsmp = create_lsmsg(PPP_LINKSTAT_IPV4_BOUND);
			}
		} else if ((sap == PPP_IPV6) && (ppa->ppa_ip6_cache == NULL)) {
			ppa->ppa_ip6_cache = sps;
			if (ppa->ppa_ctl != NULL) {
				lsmp = create_lsmsg(PPP_LINKSTAT_IPV6_BOUND);
			}
		} else {
			DBGERROR((CE_CONT, "DLPI bind: bad SAP %x\n", sap));
			dlerrorack(q, mp, dlp->dl_primitive, DL_NOADDR,
			    EEXIST);
			return;
		}
		sps->sps_flags |= SPS_CACHED;
	}
	/*
	 * Tell the daemon that a DLPI bind has happened on this stream,
	 * and we'll only do this for PPP_IP or PPP_IPV6 sap (not snoop).
	 */
	if (lsmp != NULL && ppa->ppa_ctl != NULL) {
#ifdef DBG_DLPI
		cmn_err(CE_CONT, "sending up %s\n",
		    ((sap == PPP_IP) ? "PPP_LINKSTAT_IPV4_BOUND" :
		    "PPP_LINKSTAT_IPV6_BOUND"));
#endif
		putnext(ppa->ppa_ctl->sps_rq, lsmp);
	}
	DBGDLPI((CE_CONT, "/%d: bound to sap %X (req %X)\n", sps->sps_mn_id,
	    sap, req_sap));
	sps->sps_req_sap = req_sap;
	sps->sps_sap = sap;
	sps->sps_dlstate = DL_IDLE;
	dlbindack(q, mp, req_sap, &sap, sizeof (int32_t), 0, 0);
}
예제 #23
0
/*
 * sppp_dlunitdatareq()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Handle DL_UNITDATA_REQ request, called by sppp_mproto. This procedure
 *    gets called for M_PROTO (DLPI) style of transmission. The fact that we
 *    have acknowledged IP's fastpath probing (DL_IOC_HDR_INFO) does not
 *    guarantee that IP will always transmit via M_DATA, and it merely implies
 *    that such situation _may_ happen. In other words, IP may decide to use
 *    M_PROTO (DLPI) for data transmission should it decide to do so.
 *    Therefore, we should never place any restrictions or checks against
 *    streams marked with SPS_FASTPATH, since it is legal for this procedure
 *    to be entered with or without the bit set.
 */
static int
sppp_dlunitdatareq(queue_t *q, mblk_t *mp, spppstr_t *sps)
{
	sppa_t		*ppa;
	mblk_t		*hdrmp;
	mblk_t		*pktmp;
	dl_unitdata_req_t *dludp;
	int		dladdroff;
	int		dladdrlen;
	int		msize;
	int		error = 0;
	boolean_t	is_promisc;

	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	ASSERT((MTYPE(mp) == M_PCPROTO) || (MTYPE(mp) == M_PROTO));
	dludp = (dl_unitdata_req_t *)mp->b_rptr;
	dladdroff = dludp->dl_dest_addr_offset;
	dladdrlen = dludp->dl_dest_addr_length;
	ASSERT(sps != NULL);
	ASSERT(!IS_SPS_PIOATTACH(sps));
	ASSERT(sps->sps_dlstate == DL_IDLE);
	ASSERT(q->q_ptr == sps);
	/*
	 * If this stream is not attached to any ppas, then discard data
	 * coming down through this stream.
	 */
	ppa = sps->sps_ppa;
	if (ppa == NULL) {
		DBGERROR((CE_CONT, "DLPI unitdata: no attached ppa\n"));
		error = ENOLINK;
	} else if (mp->b_cont == NULL) {
		DBGERROR((CE_CONT, "DLPI unitdata: missing data\n"));
		error = EPROTO;
	}
	if (error != 0) {
		dluderrorind(q, mp, mp->b_rptr + dladdroff, dladdrlen,
		    DL_BADDATA, error);
		return (0);
	}
	ASSERT(mp->b_cont->b_rptr != NULL);
	/*
	 * Check if outgoing packet size is larger than allowed. We use
	 * msgdsize to count all of M_DATA blocks in the message.
	 */
	msize = msgdsize(mp);
	if (msize > ppa->ppa_mtu) {
		/* Log, and send it anyway */
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_otoolongs++;
		mutex_exit(&ppa->ppa_sta_lock);
	}
	if (IS_SPS_KDEBUG(sps)) {
		SPDEBUG(PPP_DRV_NAME
		    "/%d: DL_UNITDATA_REQ (%d bytes) sps=0x%p flags=0x%b "
		    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, msize,
		    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
		    (void *)ppa, ppa->ppa_flags, PPA_FLAGS_STR);
	}
	/* Allocate a message (M_DATA) to contain PPP header bytes. */
	if ((hdrmp = allocb(PPP_HDRLEN, BPRI_MED)) == NULL) {
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_allocbfail++;
		mutex_exit(&ppa->ppa_sta_lock);
		DBGERROR((CE_CONT,
		    "DLPI unitdata: can't allocate header buffer\n"));
		dluderrorind(q, mp, mp->b_rptr + dladdroff, dladdrlen,
		    DL_SYSERR, ENOSR);
		return (0);
	}
	/*
	 * Should there be any promiscuous stream(s), send the data up
	 * for each promiscuous stream that we recognize.
	 */
	rw_enter(&ppa->ppa_sib_lock, RW_READER);
	is_promisc = ppa->ppa_promicnt;
	if (is_promisc) {
		ASSERT(ppa->ppa_streams != NULL);
		sppp_dlprsendup(ppa->ppa_streams, mp->b_cont, sps->sps_sap,
		    B_FALSE);
	}
	rw_exit(&ppa->ppa_sib_lock);
	/* Discard DLPI header and keep only IP payload (mp->b_cont). */
	pktmp = mp->b_cont;
	mp->b_cont = NULL;
	freemsg(mp);
	mp = hdrmp;

	*(uchar_t *)mp->b_wptr++ = PPP_ALLSTATIONS;
	*(uchar_t *)mp->b_wptr++ = PPP_UI;
	*(uchar_t *)mp->b_wptr++ = ((uint16_t)sps->sps_sap >> 8) & 0xff;
	*(uchar_t *)mp->b_wptr++ = ((uint16_t)sps->sps_sap) & 0xff;
	ASSERT(MBLKL(mp) == PPP_HDRLEN);

	linkb(mp, pktmp);
	/*
	 * Only time-stamp the packet with hrtime if the upper stream
	 * is configured to do so.
	 */
	if (IS_PPA_TIMESTAMP(ppa)) {
		ppa->ppa_lasttx = gethrtime();
	}
	/*
	 * Just put this back on the queue and allow the write service
	 * routine to handle it.  We're nested too deeply here to
	 * rewind the stack sufficiently to prevent overflow.  This is
	 * the slow path anyway.
	 */
	if (putq(q, mp) == 0) {
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_oqdropped++;
		mutex_exit(&ppa->ppa_sta_lock);
		freemsg(mp);
	} else {
		qenable(q);
	}
	return (0);
}
예제 #24
0
int bufRectCpy(Buffer_t* buffDst, Point_t dstPoint, Buffer_t* buffSrc, Rect_t srcRect)
{
    MemoryRect_t srcOrga = buffSrc->m;
    MemoryRect_t dstOrga = buffDst->m;
    unsigned long dgbTodoPixelCount = 0;
    unsigned long dgbDonePixelCount = 0;
    INT X,dstX,srcX;
    INT Y,dstY,srcY;
    CopyMode_t mode;

    // sanity checks
    if (!buffSrc) { DBGERROR("bufRectCpy() error: no source\n"); return 1; }
    if (!buffDst) { DBGERROR("bufRectCpy() error: no destination\n"); return 1; }

    // define copy
    srcOrga = buffSrc->m;
    dstOrga = buffDst->m;

    mode = rectCopyInit(&dstOrga, &dstPoint, &srcOrga, &srcRect, &dgbTodoPixelCount);
    
    DBGMODE(mode); 
    
    // Copying...
    for (Y = 0; Y < srcRect.height; Y++)
    {    
        void *d;
        void *s;
        size_t qty;

        srcY = srcRect.y + Y;
        dstY = dstPoint.y + Y;
        
        switch(mode)
        {
            case NOCPY:
                return 0;
                
            case MEMCPY: {
                size_t qty;
                srcX = srcRect.x;
                dstX = dstPoint.x;
                d  = buffDst->payload    + dstX*dstOrga.sample_sz + dstY*dstOrga.pitch;
                s  = buffSrc->payload    + srcX*srcOrga.sample_sz + srcY*srcOrga.pitch;
                qty = srcRect.height*dstOrga.pitch + srcRect.width*dstOrga.sample_sz;
                memcpy(d,s,qty);
                dgbDonePixelCount+=srcRect.width*srcRect.height;    
                return 0;
            }
            
            case ROWCPY: {
                srcX = srcRect.x;
                dstX = dstPoint.x;
                d  = buffDst->payload    + dstX*dstOrga.sample_sz + dstY*dstOrga.pitch;
                s  = buffSrc->payload    + srcX*srcOrga.sample_sz + srcY*srcOrga.pitch;
                qty = srcRect.width*dstOrga.sample_sz;
                memcpy(d,s,qty);
                dgbDonePixelCount+=srcRect.width;    
                break;
            }
            
            case PIXCPY:
            default:
                for (X = 0; X < (srcRect.width); X++) {
                    Color32_t c={{0}};
                    srcX = srcRect.x + X;
                    dstX = dstPoint.x + X;
                    bufReadPixel(buffSrc, (int)srcX, (int)srcY, &c);
                    bufWritePixel(buffDst, (int)dstX, (int)dstY, &c);
                    dgbDonePixelCount++;            
                }
                break;
        }
    }
            
    DBGMSG("bufRectCpy() %lu/%lu pixels done\n",dgbDonePixelCount,dgbTodoPixelCount);
    return 0;
}