Exemplo n.º 1
0
/* used to indicate application layer has released interest in packets in committed-data state,
 * move to parity-data state until transmission group has completed.
 */
int
pgm_rxw_release_committed (
	pgm_rxw_t*		r
	)
{
	if (r->committed_count == 0)		/* first call to read */
	{
		g_trace ("no commit packets to release");
		return PGM_RXW_OK;
	}

	g_assert( !pgm_rxw_empty(r) );

	pgm_rxw_packet_t* pp = RXW_PACKET(r, r->commit_trail);
	while ( r->committed_count && pp->state == PGM_PKT_COMMIT_DATA_STATE )
	{
		g_trace ("releasing commit sqn %u", pp->sequence_number);
		pp->state = PGM_PKT_PARITY_DATA_STATE;
		r->committed_count--;
		r->parity_data_count++;
		r->commit_trail++;
		pp = RXW_PACKET(r, r->commit_trail);
	}

	g_assert( r->committed_count == 0 );

	return PGM_RXW_OK;
}
Exemplo n.º 2
0
/* used to flush completed transmission groups of any parity-data state packets.
 */
int
pgm_rxw_free_committed (
	pgm_rxw_t*		r
	)
{
	if ( r->parity_data_count == 0 ) {
		g_trace ("no parity-data packets free'd");
		return PGM_RXW_OK;
	}

	g_assert( r->commit_trail != r->trail );

/* calculate transmission group at commit trailing edge */
	const guint32 tg_sqn_mask = 0xffffffff << r->tg_sqn_shift;
	const guint32 tg_sqn = r->commit_trail & tg_sqn_mask;
	const guint32 pkt_sqn = r->commit_trail & ~tg_sqn_mask;

	guint32 new_rx_trail = tg_sqn;
	if (pkt_sqn == r->tg_size - 1)	/* end of group */
		new_rx_trail++;

	pgm_rxw_packet_t* pp = RXW_PACKET(r, r->trail);
	while ( new_rx_trail != r->trail )
	{
		g_trace ("free committed sqn %u", pp->sequence_number);
		g_assert( pp->state == PGM_PKT_PARITY_DATA_STATE );
		pgm_rxw_pop_trail (r);
		pp = RXW_PACKET(r, r->trail);
	}

	return PGM_RXW_OK;
}
Exemplo n.º 3
0
static int
g_uzip_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
{
	struct g_provider *pp;

	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, gp->name);
	g_topology_assert();

	if (gp->softc == NULL) {
		DPRINTF(GUZ_DBG_ERR, ("%s(%s): gp->softc == NULL\n", __func__,
		    gp->name));
		return (ENXIO);
	}

	KASSERT(gp != NULL, ("NULL geom"));
	pp = LIST_FIRST(&gp->provider);
	KASSERT(pp != NULL, ("NULL provider"));
	if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)
		return (EBUSY);

	g_uzip_softc_free(gp->softc, gp);
	gp->softc = NULL;
	g_wither_geom(gp, ENXIO);

	return (0);
}
Exemplo n.º 4
0
static int
g_bde_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
{
	struct g_consumer *cp;
	struct g_provider *pp;
	struct g_bde_softc *sc;

	g_trace(G_T_TOPOLOGY, "g_bde_destroy_geom(%s, %s)", mp->name, gp->name);
	g_topology_assert();
	/*
	 * Orderly detachment.
	 */
	KASSERT(gp != NULL, ("NULL geom"));
	pp = LIST_FIRST(&gp->provider);
	KASSERT(pp != NULL, ("NULL provider"));
	if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)
		return (EBUSY);
	sc = gp->softc;
	cp = LIST_FIRST(&gp->consumer);
	KASSERT(cp != NULL, ("NULL consumer"));
	sc->dead = 1;
	wakeup(sc);
	g_access(cp, -1, -1, -1);
	g_detach(cp);
	g_destroy_consumer(cp);
	while (sc->dead != 2 && !LIST_EMPTY(&pp->consumers))
		tsleep(sc, PRIBIO, "g_bdedie", hz);
	mtx_destroy(&sc->worklist_mutex);
	bzero(&sc->key, sizeof sc->key);
	g_free(sc);
	g_wither_geom(gp, ENXIO);
	return (0);
}
Exemplo n.º 5
0
static void
g_uzip_spoiled(struct g_consumer *cp)
{
	struct g_geom *gp;

	gp = cp->geom;
	g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, gp->name);
	g_topology_assert();

	g_uzip_softc_free(gp->softc, gp);
	gp->softc = NULL;
	g_wither_geom(gp, ENXIO);
}
Exemplo n.º 6
0
static void
g_uzip_orphan(struct g_consumer *cp)
{
	struct g_geom *gp;

	g_trace(G_T_TOPOLOGY, "g_uzip_orphan(%p/%s)", cp, cp->provider->name);
	g_topology_assert();
	KASSERT(cp->provider->error != 0,
		("g_uzip_orphan with error == 0"));

	gp = cp->geom;
	g_uzip_softc_free(gp->softc, gp);
	gp->softc = NULL;
	g_wither_geom(gp, cp->provider->error);
}
Exemplo n.º 7
0
static void
g_aes_orphan(struct g_consumer *cp)
{
	struct g_geom *gp;
	struct g_aes_softc *sc;

	g_trace(G_T_TOPOLOGY, "g_aes_orphan(%p/%s)", cp, cp->provider->name);
	g_topology_assert();

	gp = cp->geom;
	sc = gp->softc;
	g_wither_geom(gp, ENXIO);
	bzero(sc, sizeof(struct g_aes_softc));	/* destroy evidence */
	g_free(sc);
	return;
}
Exemplo n.º 8
0
static void
g_bde_orphan(struct g_consumer *cp)
{
	struct g_geom *gp;
	struct g_provider *pp;
	struct g_bde_softc *sc;

	g_trace(G_T_TOPOLOGY, "g_bde_orphan(%p/%s)", cp, cp->provider->name);
	g_topology_assert();

	gp = cp->geom;
	sc = gp->softc;
	gp->flags |= G_GEOM_WITHER;
	LIST_FOREACH(pp, &gp->provider, provider)
		g_orphan_provider(pp, ENXIO);
	bzero(sc, sizeof(struct g_bde_softc));	/* destroy evidence */
	return;
}
Exemplo n.º 9
0
int
pgm_rxw_shutdown (
	pgm_rxw_t*	r
	)
{
	g_trace ("rxw: shutdown.");

	ASSERT_RXW_BASE_INVARIANT(r);
	ASSERT_RXW_POINTER_INVARIANT(r);

/* pointer array */
	if (r->pdata)
	{
		g_ptr_array_foreach (r->pdata, _list_iterator, r);
		g_ptr_array_free (r->pdata, TRUE);
		r->pdata = NULL;
	}

/* nak/ncf time lists,
 * important: link items are static to each packet struct
 */
	if (r->backoff_queue)
	{
		g_slice_free (GQueue, r->backoff_queue);
		r->backoff_queue = NULL;
	}
	if (r->wait_ncf_queue)
	{
		g_slice_free (GQueue, r->wait_ncf_queue);
		r->wait_ncf_queue = NULL;
	}
	if (r->wait_data_queue)
	{
		g_slice_free (GQueue, r->wait_data_queue);
		r->wait_data_queue = NULL;
	}

/* window */
	g_slice_free1 (sizeof(pgm_rxw_t), r);

	return PGM_RXW_OK;
}
Exemplo n.º 10
0
static void
init_chars (void)
{
  unsigned char c;
  static bool initialised = false;
  if (initialised)
    return;

#if 0
  g_trace ("initialising flyweight string representations of characters");
#endif

  for (c = 0; c < UCHAR_MAX; c++)
    {
      int i = 0;
      chars[c][i++] = '\'';
      if (c == '\\' || c == '\'')
        chars[c][i++] = '\\';
      chars[c][i++] = c;
      chars[c][i++] = '\'';
    }

  initialised = true;
}
Exemplo n.º 11
0
void lasclip(std::string &outfile, std::string &shapefile,
		std::string &layername, std::vector<std::string> &files,
		std::set<int> &classes, bool quiet) {

	if (outfile.empty())
		g_argerr("An output file is required.");
	if (shapefile.empty())
		g_argerr("A shape file is required.");
	if (files.size() == 0)
		g_argerr("At least one input file is required.");
	if (classes.size() == 0)
		g_warn("No classes specified, matching all classes.");

	/* Attempt to open and load geometries from the shape file. */
	OGRRegisterAll();
	OGRLayer *layer;
	OGRFeature *feat;
	OGRGeometry *og;
	OGRwkbGeometryType type;
	gg::GeometryCollection *geomColl;
	gg::Geometry *geom;

	OGRDataSource *ds = OGRSFDriverRegistrar::Open(shapefile.c_str(), FALSE);
	if (ds == nullptr)
		g_runerr("Couldn't open shapefile.");
	if (layername.empty()) {
		layer = ds->GetLayer(0);
	} else {
		layer = ds->GetLayerByName(layername.c_str());
	}
	if (layer == nullptr)
		g_runerr("Couldn't get layer.");

	type = layer->GetGeomType();
	if (type != wkbPolygon && type != wkbMultiPolygon)
		g_runerr("Geometry must be polygon or multipolygon.");

	const GEOSContextHandle_t gctx = OGRGeometry::createGEOSContext();
	const gg::GeometryFactory *gf = gg::GeometryFactory::getDefaultInstance();
	const gg::CoordinateSequenceFactory *cf =
			gf->getCoordinateSequenceFactory();
	std::vector<gg::Geometry *> geoms;

	while ((feat = layer->GetNextFeature()) != NULL) {
		og = feat->GetGeometryRef();
		geom = (gg::Geometry *) og->exportToGEOS(gctx);
		geoms.push_back(geom);
	}

	GDALClose(ds);

	if (geoms.size() == 0)
		g_runerr("No geometries were found.");

	/* The geometry collection is used for checking whether a las file intersects
	 the region of interest. */
	geomColl = gf->createGeometryCollection(geoms);
	const gg::Envelope *env = geomColl->getEnvelopeInternal();
	Bounds cbounds(env->getMinX(), env->getMinY(), env->getMaxX(),
			env->getMaxY());

	/* Loop over files and figure out which ones are relevant. */
	liblas::ReaderFactory rf;
	liblas::Header *dsth = nullptr;
	std::vector<unsigned int> indices;

	for (unsigned int i = 0; i < files.size(); ++i) {

		std::ifstream in(files[i].c_str(), std::ios::in | std::ios::binary);
		liblas::Reader r = rf.CreateWithStream(in);
		liblas::Header h = r.GetHeader();

		if (i == 0)
			dsth = new liblas::Header(h);

		std::vector<gg::Coordinate> coords;
		coords.push_back(gg::Coordinate(h.GetMinX(), h.GetMinY()));
		coords.push_back(gg::Coordinate(h.GetMaxX(), h.GetMinY()));
		coords.push_back(gg::Coordinate(h.GetMaxX(), h.GetMaxY()));
		coords.push_back(gg::Coordinate(h.GetMinX(), h.GetMaxY()));
		coords.push_back(gg::Coordinate(h.GetMinX(), h.GetMinY()));

		gg::CoordinateSequence *cs = cf->create(&coords);
		gg::LinearRing *lr = gf->createLinearRing(cs);
		gg::Polygon *bounds = gf->createPolygon(lr, NULL);

		if (bounds->intersects(geomColl))
			indices.push_back(i);

		in.close();
	}

	if (indices.size() == 0)
		g_runerr("No files matched the given bounds.");

	std::ofstream out(outfile, std::ios::out | std::ios::binary);
	liblas::WriterFactory wf;
	liblas::Writer w(out, *dsth);
	liblas::Header::RecordsByReturnArray recs;
	int count = 0;

	double bounds[] = { G_DBL_MAX_POS, G_DBL_MAX_NEG, G_DBL_MAX_POS,
			G_DBL_MAX_NEG, G_DBL_MAX_POS, G_DBL_MAX_NEG };

	g_trace("Using points from " << indices.size() << " files.");

	for (int i = 0; i < 5; ++i)
		recs.push_back(0);

	for (unsigned int i = 0; i < indices.size(); ++i) {

		std::ifstream in(files[indices[i]].c_str(),
				std::ios::in | std::ios::binary);
		liblas::Reader r = rf.CreateWithStream(in);
		liblas::Header h = r.GetHeader();

		g_trace("Processing file " << files[indices[i]]);

		while (r.ReadNextPoint()) {
			liblas::Point pt = r.GetPoint();

			int cls = pt.GetClassification().GetClass();
			if (classes.size() > 0 && !Util::inList(classes, cls))
				continue;

			double x = pt.GetX();
			double y = pt.GetY();
			const gg::Coordinate c(x, y);
			gg::Point *p = gf->createPoint(c);

			if (cbounds.contains(x, y) && geomColl->contains(p)) {
				++recs[cls];
				++count;
				w.WritePoint(pt);
				if (pt.GetX() < bounds[0])
					bounds[0] = pt.GetX();
				if (pt.GetX() > bounds[1])
					bounds[1] = pt.GetX();
				if (pt.GetY() < bounds[2])
					bounds[2] = pt.GetY();
				if (pt.GetY() > bounds[3])
					bounds[3] = pt.GetY();
				if (pt.GetZ() < bounds[4])
					bounds[4] = pt.GetZ();
				if (pt.GetZ() > bounds[5])
					bounds[5] = pt.GetZ();
			}
		}

		in.close();
	}

	// Set the total count and update the point record counts.
	dsth->SetMin(bounds[0], bounds[2], bounds[4]);
	dsth->SetMax(bounds[1], bounds[3], bounds[5]);
	dsth->SetPointRecordsCount(count);
	for (unsigned int i = 0; i < recs.size(); ++i)
		dsth->SetPointRecordsByReturnCount(i, recs[i]);

	w.WriteHeader();

}
Exemplo n.º 12
0
/* update receiving window with new trailing and leading edge parameters of transmit window
 * can generate data loss by excluding outstanding NAK requests.
 *
 * returns number of place holders (NAKs) generated
 */
int
pgm_rxw_window_update (
	pgm_rxw_t*	r,
	guint32		txw_trail,
	guint32		txw_lead,
	guint32		tg_size,		/* transmission group size, 1 = no groups */
	guint		tg_sqn_shift,		/*			    0 = no groups */
	pgm_time_t	nak_rb_expiry
	)
{
	ASSERT_RXW_BASE_INVARIANT(r);
	ASSERT_RXW_POINTER_INVARIANT(r);

	guint naks = 0;
	guint dropped = 0;

/* SPM is first message seen, define new window parameters */
	if (!r->is_window_defined)
	{
		g_trace ("SPM defining receive window");

		r->lead = txw_lead;
		r->commit_trail = r->commit_lead = r->rxw_trail = r->rxw_trail_init = r->trail = r->lead + 1;

		r->tg_size = tg_size;
		r->tg_sqn_shift = tg_sqn_shift;

		r->is_rxw_constrained = TRUE;
		r->is_window_defined = TRUE;

		return 0;
	}

	if ( pgm_uint32_gt (txw_lead, r->lead) )
	{
/* check bounds of commit window */
		guint32 new_commit_sqns = ( 1 + txw_lead ) - r->commit_trail;
		if ( !pgm_rxw_commit_empty (r) &&
		     (new_commit_sqns > pgm_rxw_len (r)) )
		{
			guint32 constrained_lead = r->commit_trail + pgm_rxw_len (r) - 1;
			g_trace ("constraining advertised lead %u to commit window, new lead %u",
				txw_lead, constrained_lead);
			txw_lead = constrained_lead;
		}

		g_trace ("advancing lead to %u", txw_lead);

		if ( r->lead != txw_lead)
		{
/* generate new naks, should rarely if ever occur? */
	
			while ( r->lead != txw_lead )
			{
				if ( pgm_rxw_full(r) )
				{
					dropped++;
//					g_trace ("dropping #%u due to full window.", r->trail);

					pgm_rxw_pop_trail (r);
					r->is_waiting = TRUE;
				}

				r->lead++;

				pgm_rxw_packet_t* ph = pgm_rxw_alloc0_packet(r);
				ph->link_.data		= ph;
				ph->sequence_number     = r->lead;
				ph->nak_rb_expiry	= nak_rb_expiry;
				ph->state		= PGM_PKT_BACK_OFF_STATE;
				ph->t0			= pgm_time_now;

				RXW_SET_PACKET(r, ph->sequence_number, ph);
				g_trace ("adding placeholder #%u", ph->sequence_number);

/* send nak by sending to end of expiry list */
				g_queue_push_head_link (r->backoff_queue, &ph->link_);
				naks++;
			}
		}
	}
	else
	{
		g_trace ("lead not advanced.");

		if (txw_lead != r->lead)
		{
			g_trace ("lead stepped backwards, ignoring: %u -> %u.", r->lead, txw_lead);
		}
	}

	if ( r->is_rxw_constrained && SLIDINGWINDOW_GT(r, txw_trail, r->rxw_trail_init) )
	{
		g_trace ("constraint removed on trail.");
		r->is_rxw_constrained = FALSE;
	}

	if ( !r->is_rxw_constrained && SLIDINGWINDOW_GT(r, txw_trail, r->rxw_trail) )
	{
		g_trace ("advancing rxw_trail to %u", txw_trail);
		r->rxw_trail = txw_trail;

		if (SLIDINGWINDOW_GT(r, r->rxw_trail, r->trail))
		{
			g_trace ("advancing trail to rxw_trail");

/* jump remaining sequence numbers if window is empty */
			if ( pgm_rxw_empty(r) )
			{
				const guint32 distance = ( (gint32)(r->rxw_trail) - (gint32)(r->trail) );

				dropped  += distance;
				r->commit_trail = r->commit_lead = r->trail += distance;
				r->lead  += distance;
			}
			else
			{
/* mark lost all non-received sequence numbers between commit lead and new rxw_trail */
				for (guint32 sequence_number = r->commit_lead;
				     IN_TXW(r, sequence_number) && SLIDINGWINDOW_GT(r, r->rxw_trail, sequence_number);
				     sequence_number++)
				{
					pgm_rxw_packet_t* rp = RXW_PACKET(r, sequence_number);
					if (rp->state == PGM_PKT_BACK_OFF_STATE ||
					    rp->state == PGM_PKT_WAIT_NCF_STATE ||
					    rp->state == PGM_PKT_WAIT_DATA_STATE)
					{
						dropped++;
						pgm_rxw_mark_lost (r, sequence_number);
					}
				}
			}
		} /* trail > commit_lead */
	}
	else
	{
		g_trace ("rxw_trail not advanced.");

		if (!r->is_rxw_constrained)
		{
			if (txw_trail != r->rxw_trail)
			{
				g_trace ("rxw_trail stepped backwards, ignoring.");
			}
		}
	}

	if (dropped)
	{
		g_trace ("dropped %u messages due to full window.", dropped);
		r->cumulative_losses += dropped;
	}

	if (r->tg_size != tg_size) {
		g_trace ("window transmission group size updated %i -> %i.", r->tg_size, tg_size);
		r->tg_size = tg_size;
		r->tg_sqn_shift = tg_sqn_shift;
	}

	g_trace ("window ( rxw_trail %u rxw_trail_init %u trail %u commit_trail %u commit_lead %u lead %u rxw_sqns %u )",
		r->rxw_trail, r->rxw_trail_init, r->trail, r->commit_trail, r->commit_lead, r->lead, pgm_rxw_sqns(r));

	ASSERT_RXW_BASE_INVARIANT(r);
	ASSERT_RXW_POINTER_INVARIANT(r);
	return naks;
}
Exemplo n.º 13
0
static void
g_bde_create_geom(struct gctl_req *req, struct g_class *mp, struct g_provider *pp)
{
	struct g_geom *gp;
	struct g_consumer *cp;
	struct g_bde_key *kp;
	int error, i;
	u_int sectorsize;
	off_t mediasize;
	struct g_bde_softc *sc;
	void *pass;
	void *key;

	g_trace(G_T_TOPOLOGY, "g_bde_create_geom(%s, %s)", mp->name, pp->name);
	g_topology_assert();
	gp = NULL;


	gp = g_new_geomf(mp, "%s.bde", pp->name);
	cp = g_new_consumer(gp);
	g_attach(cp, pp);
	error = g_access(cp, 1, 1, 1);
	if (error) {
		g_detach(cp);
		g_destroy_consumer(cp);
		g_destroy_geom(gp);
		gctl_error(req, "could not access consumer");
		return;
	}
	pass = NULL;
	key = NULL;
	do {
		pass = gctl_get_param(req, "pass", &i);
		if (pass == NULL || i != SHA512_DIGEST_LENGTH) {
			gctl_error(req, "No usable key presented");
			break;
		}
		key = gctl_get_param(req, "key", &i);
		if (key != NULL && i != 16) {
			gctl_error(req, "Invalid key presented");
			break;
		}
		sectorsize = cp->provider->sectorsize;
		mediasize = cp->provider->mediasize;
		sc = g_malloc(sizeof(struct g_bde_softc), M_WAITOK | M_ZERO);
		gp->softc = sc;
		sc->geom = gp;
		sc->consumer = cp;

		error = g_bde_decrypt_lock(sc, pass, key,
		    mediasize, sectorsize, NULL);
		bzero(sc->sha2, sizeof sc->sha2);
		if (error)
			break;
		kp = &sc->key;

		/* Initialize helper-fields */
		kp->keys_per_sector = kp->sectorsize / G_BDE_SKEYLEN;
		kp->zone_cont = kp->keys_per_sector * kp->sectorsize;
		kp->zone_width = kp->zone_cont + kp->sectorsize;
		kp->media_width = kp->sectorN - kp->sector0 -
		    G_BDE_MAXKEYS * kp->sectorsize;

		/* Our external parameters */
		sc->zone_cont = kp->zone_cont;
		sc->mediasize = g_bde_max_sector(kp);
		sc->sectorsize = kp->sectorsize;

		TAILQ_INIT(&sc->freelist);
		TAILQ_INIT(&sc->worklist);
		mtx_init(&sc->worklist_mutex, "g_bde_worklist", NULL, MTX_DEF);
		/* XXX: error check */
		kproc_create(g_bde_worker, gp, &sc->thread, 0, 0,
			"g_bde %s", gp->name);
		pp = g_new_providerf(gp, gp->name);
#if 0
		/*
		 * XXX: Disable this for now.  Appearantly UFS no longer
		 * XXX: issues BIO_DELETE requests correctly, with the obvious
		 * XXX: outcome that userdata is trashed.
		 */
		pp->flags |= G_PF_CANDELETE;
#endif
		pp->stripesize = kp->zone_cont;
		pp->stripeoffset = 0;
		pp->mediasize = sc->mediasize;
		pp->sectorsize = sc->sectorsize;
		g_error_provider(pp, 0);
		break;
	} while (0);
	if (pass != NULL)
		bzero(pass, SHA512_DIGEST_LENGTH);
	if (key != NULL)
		bzero(key, 16);
	if (error == 0)
		return;
	g_access(cp, -1, -1, -1);
	g_detach(cp);
	g_destroy_consumer(cp);
	if (gp->softc != NULL)
		g_free(gp->softc);
	g_destroy_geom(gp);
	return;
}
Exemplo n.º 14
0
static void
g_bde_create_geom(struct gctl_req *req, struct g_class *mp, struct g_provider *pp)
{
	struct g_geom *gp;
	struct g_consumer *cp;
	struct g_bde_key *kp;
	int error, i;
	u_int sectorsize;
	off_t mediasize;
	struct g_bde_softc *sc;
	void *pass;
	void *key;

	g_trace(G_T_TOPOLOGY, "g_bde_create_geom(%s, %s)", mp->name, pp->name);
	g_topology_assert();
	gp = NULL;


	gp = g_new_geomf(mp, "%s.bde", pp->name);
	cp = g_new_consumer(gp);
	g_attach(cp, pp);
	error = g_access(cp, 1, 1, 1);
	if (error) {
		g_detach(cp);
		g_destroy_consumer(cp);
		g_destroy_geom(gp);
		gctl_error(req, "could not access consumer");
		return;
	}
	pass = NULL;
	key = NULL;
	do {
		pass = gctl_get_param(req, "pass", &i);
		if (pass == NULL || i != SHA512_DIGEST_LENGTH) {
			gctl_error(req, "No usable key presented");
			break;
		}
		key = gctl_get_param(req, "key", &i);
		if (key != NULL && i != 16) {
			gctl_error(req, "Invalid key presented");
			break;
		}
		sectorsize = cp->provider->sectorsize;
		mediasize = cp->provider->mediasize;
		sc = g_malloc(sizeof(struct g_bde_softc), M_WAITOK | M_ZERO);
		gp->softc = sc;
		sc->geom = gp;
		sc->consumer = cp;

		error = g_bde_decrypt_lock(sc, pass, key,
		    mediasize, sectorsize, NULL);
		bzero(sc->sha2, sizeof sc->sha2);
		if (error)
			break;
		kp = &sc->key;

		/* Initialize helper-fields */
		kp->keys_per_sector = kp->sectorsize / G_BDE_SKEYLEN;
		kp->zone_cont = kp->keys_per_sector * kp->sectorsize;
		kp->zone_width = kp->zone_cont + kp->sectorsize;
		kp->media_width = kp->sectorN - kp->sector0 -
		    G_BDE_MAXKEYS * kp->sectorsize;

		/* Our external parameters */
		sc->zone_cont = kp->zone_cont;
		sc->mediasize = g_bde_max_sector(kp);
		sc->sectorsize = kp->sectorsize;

		TAILQ_INIT(&sc->freelist);
		TAILQ_INIT(&sc->worklist);
		mtx_init(&sc->worklist_mutex, "g_bde_worklist", NULL, MTX_DEF);
		/* XXX: error check */
		kproc_create(g_bde_worker, gp, &sc->thread, 0, 0,
			"g_bde %s", gp->name);
		pp = g_new_providerf(gp, "%s", gp->name);
		pp->stripesize = kp->zone_cont;
		pp->stripeoffset = 0;
		pp->mediasize = sc->mediasize;
		pp->sectorsize = sc->sectorsize;
		g_error_provider(pp, 0);
		break;
	} while (0);
	if (pass != NULL)
		bzero(pass, SHA512_DIGEST_LENGTH);
	if (key != NULL)
		bzero(key, 16);
	if (error == 0)
		return;
	g_access(cp, -1, -1, -1);
	g_detach(cp);
	g_destroy_consumer(cp);
	if (gp->softc != NULL)
		g_free(gp->softc);
	g_destroy_geom(gp);
	switch (error) {
	case ENOENT:
		gctl_error(req, "Lock was destroyed");
		break;
	case ESRCH:
		gctl_error(req, "Lock was nuked");
		break;
	case EINVAL:
		gctl_error(req, "Could not open lock");
		break;
	case ENOTDIR:
		gctl_error(req, "Lock not found");
		break;
	default:
		gctl_error(req, "Could not open lock (%d)", error);
		break;
	}
	return;
}
Exemplo n.º 15
0
static struct g_geom *
g_bsd_taste(struct g_class *mp, struct g_provider *pp, int flags)
{
	struct g_geom *gp;
	struct g_consumer *cp;
	int error, i;
	struct g_bsd_softc *ms;
	u_int secsize;
	struct g_slicer *gsp;
	u_char hash[16];
	MD5_CTX md5sum;
	struct uuid uuid;

	g_trace(G_T_TOPOLOGY, "bsd_taste(%s,%s)", mp->name, pp->name);
	g_topology_assert();

	/* We don't implement transparent inserts. */
	if (flags == G_TF_TRANSPARENT)
		return (NULL);

	/*
	 * BSD labels are a subclass of the general "slicing" topology so
	 * a lot of the work can be done by the common "slice" code.
	 * Create a geom with space for MAXPARTITIONS providers, one consumer
	 * and a softc structure for us.  Specify the provider to attach
	 * the consumer to and our "start" routine for special requests.
	 * The provider is opened with mode (1,0,0) so we can do reads
	 * from it.
	 */
	gp = g_slice_new(mp, MAXPARTITIONS, pp, &cp, &ms,
	     sizeof(*ms), g_bsd_start);
	if (gp == NULL)
		return (NULL);

	/* Get the geom_slicer softc from the geom. */
	gsp = gp->softc;

	/*
	 * The do...while loop here allows us to have multiple escapes
	 * using a simple "break".  This improves code clarity without
	 * ending up in deep nesting and without using goto or come from.
	 */
	do {
		/*
		 * If the provider is an MBR we will only auto attach
		 * to type 165 slices in the G_TF_NORMAL case.  We will
		 * attach to any other type.
		 */
		error = g_getattr("MBR::type", cp, &i);
		if (!error) {
			if (i != 165 && flags == G_TF_NORMAL)
				break;
			error = g_getattr("MBR::offset", cp, &ms->mbroffset);
			if (error)
				break;
		}

		/* Same thing if we are inside a PC98 */
		error = g_getattr("PC98::type", cp, &i);
		if (!error) {
			if (i != 0xc494 && flags == G_TF_NORMAL)
				break;
			error = g_getattr("PC98::offset", cp, &ms->mbroffset);
			if (error)
				break;
		}

		/* Same thing if we are inside a GPT */
		error = g_getattr("GPT::type", cp, &uuid);
		if (!error) {
			if (memcmp(&uuid, &freebsd_slice, sizeof(uuid)) != 0 &&
			    flags == G_TF_NORMAL)
				break;
		}

		/* Get sector size, we need it to read data. */
		secsize = cp->provider->sectorsize;
		if (secsize < 512)
			break;

		/* First look for a label at the start of the second sector. */
		error = g_bsd_try(gp, gsp, cp, secsize, ms, secsize);

		/*
		 * If sector size is not 512 the label still can be at
		 * offset 512, not at the start of the second sector. At least
		 * it's true for labels created by the FreeBSD's bsdlabel(8).
		 */
		if (error && secsize != HISTORIC_LABEL_OFFSET)
			error = g_bsd_try(gp, gsp, cp, secsize, ms,
			    HISTORIC_LABEL_OFFSET);

		/* Next, look for alpha labels */
		if (error)
			error = g_bsd_try(gp, gsp, cp, secsize, ms,
			    ALPHA_LABEL_OFFSET);

		/* If we didn't find a label, punt. */
		if (error)
			break;

		/*
		 * In order to avoid recursively attaching to the same
		 * on-disk label (it's usually visible through the 'c'
		 * partition) we calculate an MD5 and ask if other BSD's
		 * below us love that label.  If they do, we don't.
		 */
		MD5Init(&md5sum);
		MD5Update(&md5sum, ms->label, sizeof(ms->label));
		MD5Final(ms->labelsum, &md5sum);

		error = g_getattr("BSD::labelsum", cp, &hash);
		if (!error && !bcmp(ms->labelsum, hash, sizeof(hash)))
			break;

		/*
		 * Process the found disklabel, and modify our "slice"
		 * instance to match it, if possible.
		 */
		error = g_bsd_modify(gp, ms->label);
	} while (0);

	/* Success or failure, we can close our provider now. */
	g_access(cp, -1, 0, 0);

	/* If we have configured any providers, return the new geom. */
	if (gsp->nprovider > 0) {
		g_slice_conf_hot(gp, 0, ms->labeloffset, LABELSIZE,
		    G_SLICE_HOT_ALLOW, G_SLICE_HOT_DENY, G_SLICE_HOT_CALL);
		gsp->hot = g_bsd_hotwrite;
		return (gp);
	}
	/*
	 * ...else push the "self-destruct" button, by spoiling our own
	 * consumer.  This triggers a call to g_slice_spoiled which will
	 * dismantle what was setup.
	 */
	g_slice_spoiled(cp);
	return (NULL);
}
Exemplo n.º 16
0
int
pgm_rxw_ncf (
	pgm_rxw_t*	r,
	guint32		sequence_number,
	pgm_time_t	nak_rdata_expiry,
	pgm_time_t	nak_rb_expiry
	)
{
	int retval = PGM_RXW_UNKNOWN;

	ASSERT_RXW_BASE_INVARIANT(r);
	ASSERT_RXW_POINTER_INVARIANT(r);

	g_trace ("pgm_rxw_ncf(#%u)", sequence_number);

	if (!r->is_window_defined) {
		retval = PGM_RXW_WINDOW_UNDEFINED;
		goto out;
	}

/* already committed */
	if ( pgm_uint32_lt (sequence_number, r->commit_lead) )
	{
		g_trace ("ncf #%u: already committed, discarding.", sequence_number);
		retval = PGM_RXW_DUPLICATE;
		goto out;
	}

	pgm_rxw_packet_t* rp = RXW_PACKET(r, sequence_number);

	if (rp)
	{
		switch (rp->state) {
/* already received ncf */
		case PGM_PKT_WAIT_DATA_STATE:
		{
			ASSERT_RXW_BASE_INVARIANT(r);
			ASSERT_RXW_POINTER_INVARIANT(r);
			g_trace ("ncf ignored as sequence number already in wait_data_state.");
			retval = PGM_RXW_DUPLICATE;
			goto out;
		}

		case PGM_PKT_BACK_OFF_STATE:
		case PGM_PKT_WAIT_NCF_STATE:
			rp->nak_rdata_expiry = nak_rdata_expiry;
			g_trace ("nak_rdata_expiry in %f seconds.", pgm_to_secsf( rp->nak_rdata_expiry - pgm_time_now ));
			break;

/* ignore what we have or have not */
		case PGM_PKT_HAVE_DATA_STATE:
		case PGM_PKT_HAVE_PARITY_STATE:
		case PGM_PKT_COMMIT_DATA_STATE:
		case PGM_PKT_PARITY_DATA_STATE:
		case PGM_PKT_LOST_DATA_STATE:
			g_trace ("ncf ignored as sequence number already closed.");
			retval = PGM_RXW_DUPLICATE;
			goto out;

		default:
			g_assert_not_reached();
		}

		pgm_rxw_pkt_state_unlink (r, rp);
		rp->state = PGM_PKT_WAIT_DATA_STATE;
		g_queue_push_head_link (r->wait_data_queue, &rp->link_);

		retval = PGM_RXW_CREATED_PLACEHOLDER;
		goto out;
	}

/* not an expected ncf, extend receive window to pre-empt loss detection */
	if ( !IN_TXW(r, sequence_number) )
	{
		g_trace ("ncf #%u not in tx window, discarding.", sequence_number);
		retval = PGM_RXW_NOT_IN_TXW;
		goto out;
	}

	g_trace ("ncf extends lead #%u to #%u", r->lead, sequence_number);

/* mark all sequence numbers to ncf # in BACK-OFF_STATE */

	guint dropped = 0;
	
/* check bounds of commit window */
	guint32 new_commit_sqns = ( 1 + sequence_number ) - r->commit_trail;
	if ( !pgm_rxw_commit_empty (r) &&
		(new_commit_sqns > pgm_rxw_len (r)) )
	{
		pgm_rxw_window_update (r, r->rxw_trail, sequence_number, r->tg_size, r->tg_sqn_shift, nak_rb_expiry);
		retval = PGM_RXW_CREATED_PLACEHOLDER;
		goto out;
	}

	r->lead++;

	while (r->lead != sequence_number)
	{
		if ( pgm_rxw_full(r) )
		{
			dropped++;
//			g_trace ("dropping #%u due to full window.", r->trail);

			pgm_rxw_pop_trail (r);
			r->is_waiting = TRUE;
		}

		pgm_rxw_packet_t* ph = pgm_rxw_alloc0_packet(r);
		ph->link_.data		= ph;
		ph->sequence_number     = r->lead;
		ph->nak_rb_expiry	= nak_rb_expiry;
		ph->state		= PGM_PKT_BACK_OFF_STATE;
		ph->t0			= pgm_time_now;

		RXW_SET_PACKET(r, ph->sequence_number, ph);
		g_trace ("ncf: adding placeholder #%u", ph->sequence_number);

/* send nak by sending to end of expiry list */
		g_queue_push_head_link (r->backoff_queue, &ph->link_);

		r->lead++;
	}

/* create WAIT_DATA state placeholder for ncf # */

	g_assert ( r->lead == sequence_number );

	if ( pgm_rxw_full(r) )
	{
		dropped++;
//		g_trace ("dropping #%u due to full window.", r->trail);

		pgm_rxw_pop_trail (r);
		r->is_waiting = TRUE;
	}

	pgm_rxw_packet_t* ph = pgm_rxw_alloc0_packet(r);
	ph->link_.data		= ph;
	ph->sequence_number     = r->lead;
	ph->nak_rdata_expiry	= nak_rdata_expiry;
	ph->state		= PGM_PKT_WAIT_DATA_STATE;
	ph->t0			= pgm_time_now;
		
	RXW_SET_PACKET(r, ph->sequence_number, ph);
	g_trace ("ncf: adding placeholder #%u", ph->sequence_number);

/* do not send nak, simply add to ncf list */
	g_queue_push_head_link (r->wait_data_queue, &ph->link_);

	r->is_waiting = TRUE;

	if (dropped) {
		g_trace ("ncf: dropped %u messages due to full window.", dropped);
		r->cumulative_losses += dropped;
	}

	retval = PGM_RXW_CREATED_PLACEHOLDER;

out:
	ASSERT_RXW_BASE_INVARIANT(r);
	ASSERT_RXW_POINTER_INVARIANT(r);
	return retval;
}
Exemplo n.º 17
0
static struct g_geom *
g_uzip_taste(struct g_class *mp, struct g_provider *pp, int flags)
{
	int error;
	uint32_t i, total_offsets, offsets_read, blk;
	void *buf;
	struct cloop_header *header;
	struct g_consumer *cp;
	struct g_geom *gp;
	struct g_provider *pp2;
	struct g_uzip_softc *sc;
	enum {
		GEOM_UZIP = 1,
		GEOM_ULZMA
	} type;

	g_trace(G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name);
	g_topology_assert();

	/* Skip providers that are already open for writing. */
	if (pp->acw > 0)
		return (NULL);

	buf = NULL;

	/*
	 * Create geom instance.
	 */
	gp = g_new_geomf(mp, "%s.uzip", pp->name);
	cp = g_new_consumer(gp);
	error = g_attach(cp, pp);
	if (error == 0)
		error = g_access(cp, 1, 0, 0);
	if (error) {
		goto e1;
	}
	g_topology_unlock();

	/*
	 * Read cloop header, look for CLOOP magic, perform
	 * other validity checks.
	 */
	DPRINTF(GUZ_DBG_INFO, ("%s: media sectorsize %u, mediasize %jd\n",
	    gp->name, pp->sectorsize, (intmax_t)pp->mediasize));
	buf = g_read_data(cp, 0, pp->sectorsize, NULL);
	if (buf == NULL)
		goto e2;
	header = (struct cloop_header *) buf;
	if (strncmp(header->magic, CLOOP_MAGIC_START,
	    sizeof(CLOOP_MAGIC_START) - 1) != 0) {
		DPRINTF(GUZ_DBG_ERR, ("%s: no CLOOP magic\n", gp->name));
		goto e3;
	}

	switch (header->magic[CLOOP_OFS_COMPR]) {
	case CLOOP_COMP_LZMA:
	case CLOOP_COMP_LZMA_DDP:
		type = GEOM_ULZMA;
		if (header->magic[CLOOP_OFS_VERSN] < CLOOP_MINVER_LZMA) {
			DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n",
			    gp->name));
			goto e3;
		}
		DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_LZMA image found\n",
		    gp->name));
		break;
	case CLOOP_COMP_LIBZ:
	case CLOOP_COMP_LIBZ_DDP:
		type = GEOM_UZIP;
		if (header->magic[CLOOP_OFS_VERSN] < CLOOP_MINVER_ZLIB) {
			DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n",
			    gp->name));
			goto e3;
		}
		DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_ZLIB image found\n",
		    gp->name));
		break;
	default:
		DPRINTF(GUZ_DBG_ERR, ("%s: unsupported image type\n",
		    gp->name));
                goto e3;
        }

	/*
	 * Initialize softc and read offsets.
	 */
	sc = malloc(sizeof(*sc), M_GEOM_UZIP, M_WAITOK | M_ZERO);
	gp->softc = sc;
	sc->blksz = ntohl(header->blksz);
	sc->nblocks = ntohl(header->nblocks);
	if (sc->blksz % 512 != 0) {
		printf("%s: block size (%u) should be multiple of 512.\n",
		    gp->name, sc->blksz);
		goto e4;
	}
	if (sc->blksz > MAX_BLKSZ) {
		printf("%s: block size (%u) should not be larger than %d.\n",
		    gp->name, sc->blksz, MAX_BLKSZ);
	}
	total_offsets = sc->nblocks + 1;
	if (sizeof(struct cloop_header) +
	    total_offsets * sizeof(uint64_t) > pp->mediasize) {
		printf("%s: media too small for %u blocks\n",
		    gp->name, sc->nblocks);
		goto e4;
	}
	sc->toc = malloc(total_offsets * sizeof(struct g_uzip_blk),
	    M_GEOM_UZIP, M_WAITOK | M_ZERO);
	offsets_read = MIN(total_offsets,
	    (pp->sectorsize - sizeof(*header)) / sizeof(uint64_t));
	for (i = 0; i < offsets_read; i++) {
		sc->toc[i].offset = be64toh(((uint64_t *) (header + 1))[i]);
		sc->toc[i].blen = BLEN_UNDEF;
	}
	DPRINTF(GUZ_DBG_INFO, ("%s: %u offsets in the first sector\n",
	       gp->name, offsets_read));
	for (blk = 1; offsets_read < total_offsets; blk++) {
		uint32_t nread;

		free(buf, M_GEOM);
		buf = g_read_data(
		    cp, blk * pp->sectorsize, pp->sectorsize, NULL);
		if (buf == NULL)
			goto e5;
		nread = MIN(total_offsets - offsets_read,
		     pp->sectorsize / sizeof(uint64_t));
		DPRINTF(GUZ_DBG_TOC, ("%s: %u offsets read from sector %d\n",
		    gp->name, nread, blk));
		for (i = 0; i < nread; i++) {
			sc->toc[offsets_read + i].offset =
			    be64toh(((uint64_t *) buf)[i]);
			sc->toc[offsets_read + i].blen = BLEN_UNDEF;
		}
		offsets_read += nread;
	}
	free(buf, M_GEOM);
	buf = NULL;
	offsets_read -= 1;
	DPRINTF(GUZ_DBG_INFO, ("%s: done reading %u block offsets from %u "
	    "sectors\n", gp->name, offsets_read, blk));
	if (sc->nblocks != offsets_read) {
		DPRINTF(GUZ_DBG_ERR, ("%s: read %s offsets than expected "
		    "blocks\n", gp->name,
		    sc->nblocks < offsets_read ? "more" : "less"));
		goto e5;
	}
	/*
	 * "Fake" last+1 block, to make it easier for the TOC parser to
	 * iterate without making the last element a special case.
	 */
	sc->toc[sc->nblocks].offset = pp->mediasize;
	/* Massage TOC (table of contents), make sure it is sound */
	if (g_uzip_parse_toc(sc, pp, gp) != 0) {
		DPRINTF(GUZ_DBG_ERR, ("%s: TOC error\n", gp->name));
		goto e5;
	}
	mtx_init(&sc->last_mtx, "geom_uzip cache", NULL, MTX_DEF);
	mtx_init(&sc->queue_mtx, "geom_uzip wrkthread", NULL, MTX_DEF);
	bioq_init(&sc->bio_queue);
	sc->last_blk = -1;
	sc->last_buf = malloc(sc->blksz, M_GEOM_UZIP, M_WAITOK);
	sc->req_total = 0;
	sc->req_cached = 0;

	if (type == GEOM_UZIP) {
		sc->dcp = g_uzip_zlib_ctor(sc->blksz);
	} else {
		sc->dcp = g_uzip_lzma_ctor(sc->blksz);
	}
	if (sc->dcp == NULL) {
		goto e6;
	}

	sc->uzip_do = &g_uzip_do;

	error = kproc_create(g_uzip_wrkthr, sc, &sc->procp, 0, 0, "%s",
	    gp->name);
	if (error != 0) {
		goto e7;
	}

	g_topology_lock();
	pp2 = g_new_providerf(gp, "%s", gp->name);
	pp2->sectorsize = 512;
	pp2->mediasize = (off_t)sc->nblocks * sc->blksz;
	pp2->stripesize = pp->stripesize;
	pp2->stripeoffset = pp->stripeoffset;
	g_error_provider(pp2, 0);
	g_access(cp, -1, 0, 0);

	DPRINTF(GUZ_DBG_INFO, ("%s: taste ok (%d, %jd), (%d, %d), %x\n",
	    gp->name, pp2->sectorsize, (intmax_t)pp2->mediasize,
	    pp2->stripeoffset, pp2->stripesize, pp2->flags));
	DPRINTF(GUZ_DBG_INFO, ("%s: %u x %u blocks\n", gp->name, sc->nblocks,
	    sc->blksz));
	return (gp);

e7:
	sc->dcp->free(sc->dcp);
e6:
	free(sc->last_buf, M_GEOM);
	mtx_destroy(&sc->queue_mtx);
	mtx_destroy(&sc->last_mtx);
e5:
	free(sc->toc, M_GEOM);
e4:
	free(gp->softc, M_GEOM_UZIP);
e3:
	if (buf != NULL) {
		free(buf, M_GEOM);
	}
e2:
	g_topology_lock();
	g_access(cp, -1, 0, 0);
e1:
	g_detach(cp);
	g_destroy_consumer(cp);
	g_destroy_geom(gp);

	return (NULL);
}
Exemplo n.º 18
0
pgm_rxw_t*
pgm_rxw_init (
	const void*	identifier,		/* TSI */
	guint16		tpdu_length,
	guint32		preallocate_size,
	guint32		rxw_sqns,		/* transmit window size in sequence numbers */
	guint		rxw_secs,		/* size in seconds */
	guint		rxw_max_rte,		/* max bandwidth */
	GTrashStack**	trash_data,
	GTrashStack**	trash_packet,
	GStaticMutex*	trash_mutex
	)
{
	g_trace ("init (tpdu %i pre-alloc %i rxw_sqns %i rxw_secs %i rxw_max_rte %i).",
		tpdu_length, preallocate_size, rxw_sqns, rxw_secs, rxw_max_rte);

	pgm_rxw_t* r = g_slice_alloc0 (sizeof(pgm_rxw_t));
	r->identifier = identifier;
	r->pdata = g_ptr_array_new ();
	r->max_tpdu = tpdu_length;

	r->trash_data = trash_data;
	r->trash_packet = trash_packet;
	r->trash_mutex = trash_mutex;

	if (preallocate_size)
	{
		g_static_mutex_lock (r->trash_mutex);
		for (guint32 i = 0; i < preallocate_size; i++)
		{
			gpointer data   = g_slice_alloc (r->max_tpdu);
			gpointer packet = g_slice_alloc (sizeof(pgm_rxw_packet_t));
			g_trash_stack_push (r->trash_data, data);
			g_trash_stack_push (r->trash_packet, packet);
		}
		g_static_mutex_unlock (r->trash_mutex);
	}

/* calculate receive window parameters as per transmit window */
	if (rxw_sqns)
	{
	}
	else if (rxw_secs && rxw_max_rte)
	{
		rxw_sqns = (rxw_secs * rxw_max_rte) / r->max_tpdu;
	}

	g_ptr_array_set_size (r->pdata, rxw_sqns);

/* empty state:
 *
 * trail = 0, lead = -1
 * commit_trail = commit_lead = rxw_trail = rxw_trail_init = 0
 */
	r->lead = -1;
	r->trail = r->lead + 1;

/* limit retransmit requests on late session joining */
	r->is_rxw_constrained = TRUE;
	r->is_window_defined = FALSE;

/* empty queue's for nak & ncfs */
	r->backoff_queue = g_queue_new ();
	r->wait_ncf_queue = g_queue_new ();
	r->wait_data_queue = g_queue_new ();

/* statistics */
#if 0
	r->min_fill_time = G_MAXINT;
	r->max_fill_time = G_MININT;
	r->min_nak_transmit_count = G_MAXINT;
	r->max_nak_transmit_count = G_MININT;
#endif

#ifdef RXW_DEBUG
	guint memory = sizeof(pgm_rxw_t) +
/* pointer array */
			sizeof(GPtrArray) + sizeof(guint) +
			*(guint*)( (char*)r->pdata + sizeof(gpointer) + sizeof(guint) ) +
/* pre-allocated data & packets */
			( preallocate_size * (r->max_tpdu + sizeof(pgm_rxw_packet_t)) ) +
/* state queues */
			3 * sizeof(GQueue) +
/* guess at timer */
			4 * sizeof(int);
			
	g_trace ("memory usage: %ub (%uMb)", memory, memory / (1024 * 1024));
#endif

	ASSERT_RXW_BASE_INVARIANT(r);
	ASSERT_RXW_POINTER_INVARIANT(r);
	return r;
}
Exemplo n.º 19
0
static struct g_geom *
g_aes_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
{
	struct g_geom *gp;
	struct g_consumer *cp;
	struct g_aes_softc *sc;
	int error;
	u_int sectorsize;
	off_t mediasize;
	u_char *buf;

	g_trace(G_T_TOPOLOGY, "aes_taste(%s,%s)", mp->name, pp->name);
	g_topology_assert();
	gp = g_new_geomf(mp, "%s.aes", pp->name);
	cp = g_new_consumer(gp);
	g_attach(cp, pp);
	error = g_access(cp, 1, 0, 0);
	if (error) {
		g_detach(cp);
		g_destroy_consumer(cp);
		g_destroy_geom(gp);
		return (NULL);
	}
	buf = NULL;
	g_topology_unlock();
	do {
		if (gp->rank != 2)
			break;
		sectorsize = cp->provider->sectorsize;
		mediasize = cp->provider->mediasize;
		buf = g_read_data(cp, 0, sectorsize, NULL);
		if (buf == NULL) {
			break;
		}
		sc = g_malloc(sizeof(struct g_aes_softc), M_WAITOK | M_ZERO);
		if (!memcmp(buf, aes_magic, strlen(aes_magic))) {
			sc->keying = KEY_ZERO;
		} else if (!memcmp(buf, aes_magic_random, 
		    strlen(aes_magic_random))) {
			sc->keying = KEY_RANDOM;
		} else if (!memcmp(buf, aes_magic_test, 
		    strlen(aes_magic_test))) {
			sc->keying = KEY_TEST;
		} else {
			g_free(sc);
			break;
		}
		g_free(buf);
		gp->softc = sc;
		sc->sectorsize = sectorsize;
		sc->mediasize = mediasize - sectorsize;
		rijndael_cipherInit(&sc->ci, MODE_CBC, NULL);
		if (sc->keying == KEY_TEST) {
			int i;
			u_char *p;

			p = sc->master_key;
			for (i = 0; i < (int)sizeof sc->master_key; i ++) 
				*p++ = i;
		}
		if (sc->keying == KEY_RANDOM) {
			int i;
			u_int32_t u;
			u_char *p;

			p = sc->master_key;
			for (i = 0; i < (int)sizeof sc->master_key; i += sizeof u) {
				u = arc4random();
				*p++ = u;
				*p++ = u >> 8;
				*p++ = u >> 16;
				*p++ = u >> 24;
			}
		}
		g_topology_lock();
		pp = g_new_providerf(gp, "%s", gp->name);
		pp->mediasize = mediasize - sectorsize;
		pp->sectorsize = sectorsize;
		g_error_provider(pp, 0);
		g_topology_unlock();
	} while(0);
Exemplo n.º 20
0
static struct g_geom *l4ata_taste(struct g_class *mp, struct g_provider *pp, int flags) {
	int err;
	struct g_geom *gp;
	struct g_consumer *cp;

	g_trace(G_T_TOPOLOGY, "l4ata_taste(%s,%s)", mp->name, pp->name);
	g_topology_assert();
	gp = g_new_geomf(mp, "%s.%s", mp->name, pp->name);
	cp = g_new_consumer(gp);
	if (!gp || !cp) goto err_exit_1;
	g_attach(cp, pp);
	err = g_access(cp, 1, 0, 0);
	if (err) goto err_exit_2;

	if (dbg_this)
		printf("l4ata_taste: provider=\"%s\" medsz=%lld sectsz=%d geom=\"%s\"\n", pp->name, pp->mediasize, pp->sectorsize, pp->geom->name);

	if ( (pp->mediasize > 0) && (pp->sectorsize > 0) ) {
		dad_disk_t *dsk;
		dde_disk_info_t *di;

		err = g_access(cp, 0, 1, 1);
		if (err) goto err_exit_3;

		di = (dde_disk_info_t *) malloc(sizeof(*di), M_DAD, M_WAITOK|M_ZERO);
		if (!di) {
			dde_debug("error allocating dde_disk_info_t");
			goto err_exit_4;
		}
		di->issue_bio = issue_bio;
		di->geom_consumer = cp;

		dsk = (dad_disk_t *) malloc(sizeof(*dsk), M_DAD, M_WAITOK|M_ZERO);
		if (!dsk) {
			dde_debug("error allocating dad_disk_t");
			free(di, M_DAD);
			goto err_exit_4;
		}
		dsk->sectsize      = pp->sectorsize;
		dsk->sects         = pp->mediasize/pp->sectorsize;
		dsk->name          = cp->provider->name;
		dsk->client_priv   = 0; // for client use only
		dsk->dde_priv      = di;

		dad_announce_disk(dsk);

		return gp;
	}
	goto err_exit_3;

err_exit_4:
	g_access(cp, 0, -1, -1);
err_exit_3:
	g_access(cp, -1, 0, 0);
err_exit_2:
	g_detach(cp);
err_exit_1:
	if (cp) g_destroy_consumer(cp);
	if (gp) g_destroy_geom(gp);
	return NULL;
}
Exemplo n.º 21
0
int
pgm_rxw_push_fragment (
	pgm_rxw_t*	r,
	gpointer	packet,
	gsize		length,
	guint32		sequence_number,
	guint32		trail,
	struct pgm_opt_fragment* opt_fragment,
	pgm_time_t	nak_rb_expiry
	)
{
	ASSERT_RXW_BASE_INVARIANT(r);
	ASSERT_RXW_POINTER_INVARIANT(r);

	guint dropped = 0;
	int retval = PGM_RXW_UNKNOWN;

/* convert to more apparent names */
	const guint32 apdu_first_sqn	= opt_fragment ? g_ntohl (opt_fragment->opt_sqn) : 0;
	const guint32 apdu_len		= opt_fragment ? g_ntohl (opt_fragment->opt_frag_len) : 0;

	g_trace ("#%u: data trail #%u: push: window ( rxw_trail %u rxw_trail_init %u trail %u lead %u )",
		sequence_number, trail, 
		r->rxw_trail, r->rxw_trail_init, r->trail, r->lead);

/* trail is the next packet to commit upstream, lead is the leading edge
 * of the receive window with possible gaps inside, rxw_trail is the transmit
 * window trail for retransmit requests.
 */

	if ( !r->is_window_defined )
	{
/* if this packet is a fragment of an apdu, and not the first, we continue on as per spec but careful to
 * advance the trailing edge to discard the remaining fragments.
 */
		g_trace ("#%u: using odata to temporarily define window", sequence_number);

		r->lead = sequence_number - 1;
		r->commit_trail = r->commit_lead = r->rxw_trail = r->rxw_trail_init = r->trail = r->lead + 1;

		r->is_rxw_constrained = TRUE;
		r->is_window_defined = TRUE;
	}
	else
	{
/* check if packet should be discarded or processed further */

		if ( !IN_TXW(r, sequence_number) )
		{
			g_trace ("#%u: not in transmit window, discarding.", sequence_number);
			pgm_rxw_pkt_data_free1 (r, packet);
			retval = PGM_RXW_NOT_IN_TXW;
			goto out;
		}

		pgm_rxw_window_update (r, trail, r->lead, r->tg_size, r->tg_sqn_shift, nak_rb_expiry);
	}

	g_trace ("#%u: window ( rxw_trail %u rxw_trail_init %u trail %u commit_trail %u commit_lead %u lead %u )",
		sequence_number, r->rxw_trail, r->rxw_trail_init, r->trail, r->commit_trail, r->commit_lead, r->lead);
	ASSERT_RXW_BASE_INVARIANT(r);
	ASSERT_RXW_POINTER_INVARIANT(r);

/* already committed */
	if ( pgm_uint32_lt (sequence_number, r->commit_lead) )
	{
		g_trace ("#%u: already committed, discarding.", sequence_number);
		pgm_rxw_pkt_data_free1 (r, packet);
		retval = PGM_RXW_DUPLICATE;
		goto out;
	}

/* check for duplicate */
	if ( pgm_uint32_lte (sequence_number, r->lead) )
	{
		g_trace ("#%u: in rx window, checking for duplicate.", sequence_number);

		pgm_rxw_packet_t* rp = RXW_PACKET(r, sequence_number);

		if (rp)
		{
			if (rp->length)
			{
				g_trace ("#%u: already received, discarding.", sequence_number);
				pgm_rxw_pkt_data_free1 (r, packet);
				retval = PGM_RXW_DUPLICATE;
				goto out;
			}

/* for fragments check that apdu is valid */
			if (	apdu_len && 
				apdu_first_sqn != sequence_number &&
				(
					pgm_rxw_empty (r) ||
				       !ABS_IN_RXW(r, apdu_first_sqn) ||
					RXW_PACKET(r, apdu_first_sqn)->state == PGM_PKT_LOST_DATA_STATE
				)
			   )
			{
				g_trace ("#%u: first fragment #%u not in receive window, apdu is lost.", sequence_number, apdu_first_sqn);
				pgm_rxw_mark_lost (r, sequence_number);
				pgm_rxw_pkt_data_free1 (r, packet);
				retval = PGM_RXW_APDU_LOST;
				goto out_flush;
			}

			if ( apdu_len && pgm_uint32_gt (apdu_first_sqn, sequence_number) )
			{
				g_trace ("#%u: first apdu fragment sequence number: #%u not lowest, ignoring packet.",
					sequence_number, apdu_first_sqn);
				pgm_rxw_pkt_data_free1 (r, packet);
				retval = PGM_RXW_MALFORMED_APDU;
				goto out;
			}

/* destination should not contain a data packet, although it may contain parity */
			g_assert( rp->state == PGM_PKT_BACK_OFF_STATE ||
				  rp->state == PGM_PKT_WAIT_NCF_STATE ||
				  rp->state == PGM_PKT_WAIT_DATA_STATE ||
				  rp->state == PGM_PKT_HAVE_PARITY_STATE ||
				  rp->state == PGM_PKT_LOST_DATA_STATE );
			g_trace ("#%u: filling in a gap.", sequence_number);

			if ( rp->state == PGM_PKT_HAVE_PARITY_STATE )
			{
				g_trace ("#%u: destination contains parity, shuffling to next available entry.", sequence_number);
/* find if any other packets are lost in this transmission group */

				const guint32 tg_sqn_mask = 0xffffffff << r->tg_sqn_shift;
				const guint32 next_tg_sqn = (sequence_number & tg_sqn_mask) + 1;

				if (sequence_number != next_tg_sqn)
				for (guint32 i = sequence_number + 1; i != next_tg_sqn; i++)
				{
					pgm_rxw_packet_t* pp = RXW_PACKET(r, i);
					if ( pp->state == PGM_PKT_BACK_OFF_STATE ||
					     pp->state == PGM_PKT_WAIT_NCF_STATE ||
					     pp->state == PGM_PKT_WAIT_DATA_STATE ||
					     pp->state == PGM_PKT_LOST_DATA_STATE )
					{
						g_assert (pp->data == NULL);

/* move parity to this new sequence number */
						memcpy (&pp->opt_fragment, &rp->opt_fragment, sizeof(struct pgm_opt_fragment));
						pp->data	= rp->data;
						pp->length	= rp->length;
						pp->state	= rp->state;
						rp->data	= NULL;
						rp->length	= 0;
						rp->state	= PGM_PKT_WAIT_DATA_STATE;
						break;
					}
				}

/* no incomplete packet found, therefore parity is no longer required */
				if (rp->state != PGM_PKT_WAIT_DATA_STATE)
				{
					pgm_rxw_data_free1 (r, rp);
					rp->state = PGM_PKT_WAIT_DATA_STATE;
				}
			}
			else if ( rp->state == PGM_PKT_LOST_DATA_STATE )	/* lucky packet */
			{
				r->lost_count--;
			}

/* a non-committed packet */
			r->fragment_count++;

			if (apdu_len)	/* a fragment */
			{
				memcpy (&rp->opt_fragment, opt_fragment, sizeof(struct pgm_opt_fragment));
			}

			g_assert (rp->data == NULL);
			rp->data	= packet;
			rp->length	= length;

			pgm_rxw_pkt_state_unlink (r, rp);
			rp->state	= PGM_PKT_HAVE_DATA_STATE;
			retval		= PGM_RXW_FILLED_PLACEHOLDER;

			const guint32 fill_time = pgm_time_now - rp->t0;
			if (!r->max_fill_time) {
				r->max_fill_time = r->min_fill_time = fill_time;
			}
			else
			{
				if (fill_time > r->max_fill_time)
					r->max_fill_time = fill_time;
				else if (fill_time < r->min_fill_time)
					r->min_fill_time = fill_time;

				if (!r->max_nak_transmit_count) {
					r->max_nak_transmit_count = r->min_nak_transmit_count = rp->nak_transmit_count;
				}
				else
				{
					if (rp->nak_transmit_count > r->max_nak_transmit_count)
						r->max_nak_transmit_count = rp->nak_transmit_count;
					else if (rp->nak_transmit_count < r->min_nak_transmit_count)
						r->min_nak_transmit_count = rp->nak_transmit_count;
				}
			}
		}
		else
		{
			g_debug ("sequence_number %u points to (null) in window (trail %u commit_trail %u commit_lead %u lead %u).",
				sequence_number, r->trail, r->commit_trail, r->commit_lead, r->lead);
			ASSERT_RXW_BASE_INVARIANT(r);
			ASSERT_RXW_POINTER_INVARIANT(r);
			g_assert_not_reached();
		}
	}
	else	/* sequence_number > lead */
	{
/* extends receive window */

/* check bounds of commit window */
		guint32 new_commit_sqns = ( 1 + sequence_number ) - r->commit_trail;
                if ( !pgm_rxw_commit_empty (r) &&
		     (new_commit_sqns >= pgm_rxw_len (r)) )
                {
			pgm_rxw_window_update (r, r->rxw_trail, sequence_number, r->tg_size, r->tg_sqn_shift, nak_rb_expiry);
			pgm_rxw_pkt_data_free1 (r, packet);
			goto out;
                }


		g_trace ("#%u: lead extended.", sequence_number);
		g_assert ( pgm_uint32_gt (sequence_number, r->lead) );

		if ( pgm_rxw_full(r) )
		{
			dropped++;
//			g_trace ("#%u: dropping #%u due to odata filling window.", sequence_number, r->trail);

			pgm_rxw_pop_trail (r);
//			pgm_rxw_flush (r);
		}

		r->lead++;

/* if packet is non-contiguous to current leading edge add place holders */
		if (r->lead != sequence_number)
		{
/* TODO: can be rather inefficient on packet loss looping through dropped sequence numbers
 */
			while (r->lead != sequence_number)
			{
				pgm_rxw_packet_t* ph = pgm_rxw_alloc0_packet(r);
				ph->link_.data		= ph;
				ph->sequence_number     = r->lead;
				ph->nak_rb_expiry	= nak_rb_expiry;
				ph->state		= PGM_PKT_BACK_OFF_STATE;
				ph->t0			= pgm_time_now;

				RXW_SET_PACKET(r, ph->sequence_number, ph);

/* send nak by sending to end of expiry list */
				g_queue_push_head_link (r->backoff_queue, &ph->link_);
				g_trace ("#%" G_GUINT32_FORMAT ": place holder, backoff_queue %" G_GUINT32_FORMAT "/%u lead %" G_GUINT32_FORMAT,
					sequence_number, r->backoff_queue->length, pgm_rxw_sqns(r), r->lead);

				if ( pgm_rxw_full(r) )
				{
					dropped++;
//					g_trace ("dropping #%u due to odata filling window.", r->trail);

					pgm_rxw_pop_trail (r);
//					pgm_rxw_flush (r);
				}

				r->lead++;
			}
			retval = PGM_RXW_CREATED_PLACEHOLDER;
		}
		else
		{
			retval = PGM_RXW_ADVANCED_WINDOW;
		}

		g_assert ( r->lead == sequence_number );

/* sanity check on sequence number distance */
		if ( apdu_len && pgm_uint32_gt (apdu_first_sqn, sequence_number) )
		{
			g_trace ("#%u: first apdu fragment sequence number: #%u not lowest, ignoring packet.",
				sequence_number, apdu_first_sqn);
			pgm_rxw_pkt_data_free1 (r, packet);
			retval = PGM_RXW_MALFORMED_APDU;
			goto out;
		}

		pgm_rxw_packet_t* rp	= pgm_rxw_alloc0_packet(r);
		rp->link_.data		= rp;
		rp->sequence_number     = r->lead;

/* for fragments check that apdu is valid: dupe code to above */
		if (    apdu_len && 
			apdu_first_sqn != sequence_number &&
			(	
				pgm_rxw_empty (r) ||
			       !ABS_IN_RXW(r, apdu_first_sqn) ||
				RXW_PACKET(r, apdu_first_sqn)->state == PGM_PKT_LOST_DATA_STATE
			)
		   )
		{
			g_trace ("#%u: first fragment #%u not in receive window, apdu is lost.", sequence_number, apdu_first_sqn);
			pgm_rxw_pkt_data_free1 (r, packet);
			rp->state = PGM_PKT_LOST_DATA_STATE;
			r->lost_count++;
			RXW_SET_PACKET(r, rp->sequence_number, rp);
			retval = PGM_RXW_APDU_LOST;
			r->is_waiting = TRUE;
			goto out_flush;
		}

/* a non-committed packet */
		r->fragment_count++;

		if (apdu_len)	/* fragment */
		{
			memcpy (&rp->opt_fragment, opt_fragment, sizeof(struct pgm_opt_fragment));
		}
		rp->data		= packet;
		rp->length		= length;
		rp->state		= PGM_PKT_HAVE_DATA_STATE;

		RXW_SET_PACKET(r, rp->sequence_number, rp);
		g_trace ("#%" G_GUINT32_FORMAT ": added packet #%" G_GUINT32_FORMAT ", rxw_sqns %" G_GUINT32_FORMAT,
			sequence_number, rp->sequence_number, pgm_rxw_sqns(r));
	}

	r->is_waiting = TRUE;

out_flush:
	g_trace ("#%u: push complete: window ( rxw_trail %u rxw_trail_init %u trail %u commit_trail %u commit_lead %u lead %u )",
		sequence_number,
		r->rxw_trail, r->rxw_trail_init, r->trail, r->commit_trail, r->commit_lead, r->lead);

out:
	if (dropped) {
		g_trace ("dropped %u messages due to odata filling window.", dropped);
		r->cumulative_losses += dropped;
	}

	ASSERT_RXW_BASE_INVARIANT(r);
	ASSERT_RXW_POINTER_INVARIANT(r);
	return retval;
}
Exemplo n.º 22
0
static struct g_geom *
g_uzip_taste(struct g_class *mp, struct g_provider *pp, int flags)
{
	int error;
	uint32_t i, total_offsets, offsets_read, blk;
	void *buf;
	struct cloop_header *header;
	struct g_consumer *cp;
	struct g_geom *gp;
	struct g_provider *pp2;
	struct g_uzip_softc *sc;

	g_trace(G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name);
	g_topology_assert();

	/* Skip providers that are already open for writing. */
	if (pp->acw > 0)
		return (NULL);

	buf = NULL;

	/*
	 * Create geom instance.
	 */
	gp = g_new_geomf(mp, "%s.uzip", pp->name);
	cp = g_new_consumer(gp);
	error = g_attach(cp, pp);
	if (error == 0)
		error = g_access(cp, 1, 0, 0);
	if (error) {
		g_detach(cp);
		g_destroy_consumer(cp);
		g_destroy_geom(gp);
		return (NULL);
	}
	g_topology_unlock();

	/*
	 * Read cloop header, look for CLOOP magic, perform
	 * other validity checks.
	 */
	DPRINTF(("%s: media sectorsize %u, mediasize %jd\n",
	    gp->name, pp->sectorsize, (intmax_t)pp->mediasize));
	buf = g_read_data(cp, 0, pp->sectorsize, NULL);
	if (buf == NULL)
		goto err;
	header = (struct cloop_header *) buf;
	if (strncmp(header->magic, CLOOP_MAGIC_START,
	    sizeof(CLOOP_MAGIC_START) - 1) != 0) {
		DPRINTF(("%s: no CLOOP magic\n", gp->name));
		goto err;
	}
	if (header->magic[0x0b] != 'V' || header->magic[0x0c] < '2') {
		DPRINTF(("%s: image version too old\n", gp->name));
		goto err;
	}

	/*
	 * Initialize softc and read offsets.
	 */
	sc = malloc(sizeof(*sc), M_GEOM_UZIP, M_WAITOK | M_ZERO);
	gp->softc = sc;
	sc->blksz = ntohl(header->blksz);
	sc->nblocks = ntohl(header->nblocks);
	if (sc->blksz % 512 != 0) {
		printf("%s: block size (%u) should be multiple of 512.\n",
		    gp->name, sc->blksz);
		goto err;
	}
	if (sc->blksz > MAX_BLKSZ) {
		printf("%s: block size (%u) should not be larger than %d.\n",
		    gp->name, sc->blksz, MAX_BLKSZ);
	}
	total_offsets = sc->nblocks + 1;
	if (sizeof(struct cloop_header) +
	    total_offsets * sizeof(uint64_t) > pp->mediasize) {
		printf("%s: media too small for %u blocks\n",
		    gp->name, sc->nblocks);
		goto err;
	}
	sc->offsets = malloc(
	    total_offsets * sizeof(uint64_t), M_GEOM_UZIP, M_WAITOK);
	offsets_read = MIN(total_offsets,
	    (pp->sectorsize - sizeof(*header)) / sizeof(uint64_t));
	for (i = 0; i < offsets_read; i++)
		sc->offsets[i] = be64toh(((uint64_t *) (header + 1))[i]);
	DPRINTF(("%s: %u offsets in the first sector\n",
	       gp->name, offsets_read));
	for (blk = 1; offsets_read < total_offsets; blk++) {
		uint32_t nread;

		free(buf, M_GEOM);
		buf = g_read_data(
		    cp, blk * pp->sectorsize, pp->sectorsize, NULL);
		if (buf == NULL)
			goto err;
		nread = MIN(total_offsets - offsets_read,
		     pp->sectorsize / sizeof(uint64_t));
		DPRINTF(("%s: %u offsets read from sector %d\n",
		    gp->name, nread, blk));
		for (i = 0; i < nread; i++) {
			sc->offsets[offsets_read + i] =
			    be64toh(((uint64_t *) buf)[i]);
		}
		offsets_read += nread;
	}
	free(buf, M_GEOM);
	DPRINTF(("%s: done reading offsets\n", gp->name));
	mtx_init(&sc->last_mtx, "geom_uzip cache", NULL, MTX_DEF);
	sc->last_blk = -1;
	sc->last_buf = malloc(sc->blksz, M_GEOM_UZIP, M_WAITOK);
	sc->req_total = 0;
	sc->req_cached = 0;

	g_topology_lock();
	pp2 = g_new_providerf(gp, "%s", gp->name);
	pp2->sectorsize = 512;
	pp2->mediasize = (off_t)sc->nblocks * sc->blksz;
	pp2->stripesize = pp->stripesize;
	pp2->stripeoffset = pp->stripeoffset;
	g_error_provider(pp2, 0);
	g_access(cp, -1, 0, 0);

	DPRINTF(("%s: taste ok (%d, %jd), (%d, %d), %x\n",
	    gp->name,
	    pp2->sectorsize, (intmax_t)pp2->mediasize,
	    pp2->stripeoffset, pp2->stripesize, pp2->flags));
	printf("%s: %u x %u blocks\n", gp->name, sc->nblocks, sc->blksz);
	return (gp);

err:
	g_topology_lock();
	g_access(cp, -1, 0, 0);
	if (buf != NULL)
		free(buf, M_GEOM);
	if (gp->softc != NULL) {
		g_uzip_softc_free(gp->softc, NULL);
		gp->softc = NULL;
	}
	g_detach(cp);
	g_destroy_consumer(cp);
	g_destroy_geom(gp);

	return (NULL);
}
Exemplo n.º 23
0
/* flush packets but instead of calling on_data append the contiguous data packets
 * to the provided scatter/gather vector.
 *
 * when transmission groups are enabled, packets remain in the windows tagged committed
 * until the transmission group has been completely committed.  this allows the packet
 * data to be used in parity calculations to recover the missing packets.
 *
 * returns -1 on nothing read, returns 0 on zero bytes read.
 */
gssize
pgm_rxw_readv (
	pgm_rxw_t*		r,
	pgm_msgv_t**		pmsg,		/* message array, updated as messages appended */
	guint			msg_len,	/* number of items in pmsg */
	struct iovec**		piov,		/* underlying iov storage */
	guint			iov_len		/* number of items in piov */
	)
{
	ASSERT_RXW_BASE_INVARIANT(r);

	g_trace ("pgm_rxw_readv");

	guint dropped = 0;
	gssize bytes_read = 0;
	guint msgs_read = 0;
	const pgm_msgv_t* msg_end = *pmsg + msg_len;
	const struct iovec* iov_end = *piov + iov_len;

	while ( !pgm_rxw_incoming_empty (r) )
	{
		pgm_rxw_packet_t* cp = RXW_PACKET(r, r->commit_lead);
		g_assert ( cp != NULL );

		switch (cp->state) {
		case PGM_PKT_LOST_DATA_STATE:
/* if packets are being held drop them all as group is now unrecoverable */
			while (r->commit_lead != r->trail) {
				dropped++;
				pgm_rxw_pop_trail (r);
			}

/* from now on r->commit_lead ≡ r->trail */
			g_assert (r->commit_lead == r->trail);

/* check for lost apdu */
			if ( g_ntohl (cp->of_apdu_len) )
			{
				const guint32 apdu_first_sqn = g_ntohl (cp->of_apdu_first_sqn);

/* drop the first fragment, then others follow through as its no longer in the window */
				if ( r->trail == apdu_first_sqn )
				{
					dropped++;
					pgm_rxw_pop_trail (r);
				}

/* flush others, make sure to check each packet is an apdu packet and not simply a zero match */
				while (!pgm_rxw_empty(r))
				{
					cp = RXW_PACKET(r, r->trail);
					if (g_ntohl (cp->of_apdu_len) && g_ntohl (cp->of_apdu_first_sqn) == apdu_first_sqn)
					{
						dropped++;
						pgm_rxw_pop_trail (r);
					}
					else
					{	/* another apdu or tpdu exists */
						break;
					}
				}
			}
			else
			{	/* plain tpdu */
				g_trace ("skipping lost packet @ #%" G_GUINT32_FORMAT, cp->sequence_number);

				dropped++;
				pgm_rxw_pop_trail (r);
/* one tpdu lost */
			}

			g_assert (r->commit_lead == r->trail);
			goto out;
			continue;
		
		case PGM_PKT_HAVE_DATA_STATE:
			/* not lost */
			g_assert ( cp->data != NULL && cp->length > 0 );

/* check for contiguous apdu */
			if ( g_ntohl (cp->of_apdu_len) )
			{
				if ( g_ntohl (cp->of_apdu_first_sqn) != cp->sequence_number )
				{
					g_trace ("partial apdu at trailing edge, marking lost.");
					pgm_rxw_mark_lost (r, cp->sequence_number);
					break;
				}

				guint32 frag = g_ntohl (cp->of_apdu_first_sqn);
				guint32 apdu_len = 0;
				pgm_rxw_packet_t* ap = NULL;
				while ( ABS_IN_RXW(r, frag) && apdu_len < g_ntohl (cp->of_apdu_len) )
				{
					ap = RXW_PACKET(r, frag);
					g_assert ( ap != NULL );
					if (ap->state != PGM_PKT_HAVE_DATA_STATE)
					{
						break;
					}
					apdu_len += ap->length;
					frag++;
				}

				if (apdu_len == g_ntohl (cp->of_apdu_len))
				{
/* check if sufficient room for apdu */
					const guint32 apdu_len_in_frags = frag - g_ntohl (cp->of_apdu_first_sqn) + 1;
					if (*piov + apdu_len_in_frags > iov_end) {
						break;
					}

					g_trace ("contiguous apdu found @ #%" G_GUINT32_FORMAT " - #%" G_GUINT32_FORMAT 
							", passing upstream.",
						g_ntohl (cp->of_apdu_first_sqn), ap->sequence_number);

/* pass upstream & cleanup */
					(*pmsg)->msgv_identifier = r->identifier;
					(*pmsg)->msgv_iovlen     = 0;
					(*pmsg)->msgv_iov        = *piov;
					for (guint32 i = g_ntohl (cp->of_apdu_first_sqn); i < frag; i++)
					{
						ap = RXW_PACKET(r, i);

						(*piov)->iov_base = ap->data;	/* copy */
						(*piov)->iov_len  = ap->length;
						(*pmsg)->msgv_iovlen++;

						++(*piov);

						bytes_read += ap->length;	/* stats */

						ap->state = PGM_PKT_COMMIT_DATA_STATE;
						r->fragment_count--;		/* accounting */
						r->commit_lead++;
						r->committed_count++;
					}

/* end of commit buffer */
					++(*pmsg);
					msgs_read++;

					if (*pmsg == msg_end) {
						goto out;
					}

					if (*piov == iov_end) {
						goto out;
					}
				}
				else
				{	/* incomplete apdu */
					g_trace ("partial apdu found %u of %u bytes.",
						apdu_len, g_ntohl (cp->of_apdu_len));
					goto out;
				}
			}
			else
			{	/* plain tpdu */
				g_trace ("one packet found @ #%" G_GUINT32_FORMAT ", passing upstream.",
					cp->sequence_number);

/* pass upstream, including data memory ownership */
				(*pmsg)->msgv_identifier = r->identifier;
				(*pmsg)->msgv_iovlen     = 1;
				(*pmsg)->msgv_iov        = *piov;
				(*piov)->iov_base = cp->data;
				(*piov)->iov_len  = cp->length;
				bytes_read += cp->length;
				msgs_read++;

/* move to commit window */
				cp->state = PGM_PKT_COMMIT_DATA_STATE;
				r->fragment_count--;
				r->commit_lead++;
				r->committed_count++;

/* end of commit buffer */
				++(*pmsg);
				++(*piov);

				if (*pmsg == msg_end) {
					goto out;
				}

				if (*piov == iov_end) {
					goto out;
				}
			}

/* one apdu or tpdu processed */
			break;

		default:
			g_trace ("!(have|lost)_data_state, sqn %" G_GUINT32_FORMAT " packet state %s(%i) cp->length %u", r->commit_lead, pgm_rxw_state_string(cp->state), cp->state, cp->length);
			goto out;
		}
	}

out:
	r->cumulative_losses += dropped;
	r->bytes_delivered   += bytes_read;
	r->msgs_delivered    += msgs_read;

	r->pgm_sock_err.lost_count = dropped;

	ASSERT_RXW_BASE_INVARIANT(r);

	return msgs_read ? bytes_read : -1;
}