/* Dequeue a record and process it. */ void SNetStreamWork(snet_stream_desc_t *desc, worker_t *worker) { snet_record_t *rec = (snet_record_t *) SNetFifoGet(&desc->fifo); landing_t *land = desc->landing; assert(rec); worker->continue_desc = NULL; (*land->node->work)(desc, rec); if (land->type != LAND_box && land->id == worker->id) { unlock_landing(land); } SNetDescDone(desc); while (worker->continue_desc) { rec = worker->continue_rec; desc = worker->continue_desc; land = desc->landing; worker->continue_desc = NULL; (*land->node->work)(desc, rec); if (land->type != LAND_box && land->id == worker->id) { unlock_landing(land); } SNetDescDone(desc); } }
/* Merge a stream to an identity landing with the subsequent stream. */ static snet_stream_desc_t *SNetMergeStreams(snet_stream_desc_t **desc_ptr) { snet_stream_desc_t *desc = *desc_ptr; snet_stream_desc_t *next = DESC_LAND_SPEC(desc, identity)->outdesc; fifo_node_t *fifo_tail_start; fifo_node_t *fifo_tail_end; fifo_node_t *node; int count = 0; /* Remove all data from the queue towards the garbage landing. */ fifo_tail_start = SNetFifoGetTail(&desc->fifo, &fifo_tail_end); /* Count the number of data items in the captured list. */ for (node = fifo_tail_start; node; node = node->next) { ++count; } /* Append the captured list onto the subsequent stream. */ SNetFifoPutTail(&next->fifo, fifo_tail_start, fifo_tail_end); /* Reconnect the source landing of the next landing. */ next->source = desc->source; /* Increase the reference count by the number of added records. */ AAF(&(next->refs), count); /* Report statistics. */ if (SNetDebug()) { printf("%s: collecting %d recs, %d drefs, %d nrefs\n", __func__, count, desc->refs, next->refs); } /* Convert the identity landing into garbage. */ SNetBecomeGarbage(desc->landing); /* Make sure no one ever attempts to write to the dissolved stream. */ *desc_ptr = next; /* Unlock the garbage landing: some worker todo items may still need it. */ unlock_landing(desc->landing); /* Decrease reference count to the garbage collected stream. */ SNetDescDone(desc); /* Return the subsequent stream. */ return next; }
/* Try to read next input record: true iff successful. */ static bool SNetWorkerInput(worker_t *worker) { if (worker->has_input) { landing_t *land = worker->input_desc->landing; if (trylock_landing(land, worker)) { if (SNetInputAllowed(worker)) { input_state_t state = SNetGetNextInputRecord(land); if (state != INPUT_reading) { worker->has_input = false; if (state == INPUT_terminating) { SNetCloseInput(land->node); } } } unlock_landing(land); } } return worker->has_input; }
/* Work on an item. * * In case of a dissolved garbage stream update the source * stream of the work item. * * Return true iff a record was processed. * * If the contents of the item was merged with another item then * reset the item descriptor to NULL. */ static bool SNetWorkerWorkItem(work_item_t *const item, worker_t *worker) { work_item_t *lookup; /* Item must be owned and non-empty. */ assert(item->lock == worker->id); assert(item->count > 0); /* Claim destination landing. */ if (trylock_landing(item->desc->landing, worker) == false) { /* Nothing can be done. */ return false; } /* Bring item descriptors past any garbage collectable landings. */ while (item->desc->landing->type == LAND_garbage) { /* Get subsequent descriptor. */ snet_stream_desc_t *next_desc = DESC_LAND_SPEC(item->desc, siso)->outdesc; /* Release landing claim. */ unlock_landing(item->desc->landing); /* Decrease reference counts to descriptor. */ SNetDescRelease(item->desc, item->count); /* Take item out of hash table. */ SNetHashPtrRemove(worker->hash_ptab, item->desc); /* Update item descriptor. */ item->desc = next_desc; /* Also advance past subsequent garbage landings. */ while (next_desc->landing->type == LAND_garbage) { /* Get subsequent descriptor. */ item->desc = DESC_LAND_SPEC(next_desc, siso)->outdesc; /* Test if current descriptor is also in our hash table. */ lookup = (work_item_t *)SNetHashPtrLookup(worker->hash_ptab, next_desc); if (lookup && trylock_work_item(lookup, worker)) { /* Merge both descriptor counts into one. */ item->count += lookup->count; lookup->count = 0; unlock_work_item(lookup, worker); } /* Decrease reference counts to garbage descriptor. */ SNetDescRelease(next_desc, item->count); /* Advance to subsequent descriptor and repeat. */ next_desc = item->desc; } /* The new descriptor may already exist in hash table. */ lookup = (work_item_t *)SNetHashPtrLookup(worker->hash_ptab, next_desc); if (lookup) { /* Merge the two counts. */ AAF(&lookup->count, item->count); /* Reset item. */ item->count = 0; /* We already have this desciptor in lookup, so reset it. */ item->desc = NULL; /* We made progress. */ return true; } else /* (lookup == NULL) */ { /* Add new descriptor to hash table. */ SNetHashPtrStore(worker->hash_ptab, item->desc, item); /* Claim destination landing. */ if (trylock_landing(item->desc->landing, worker) == false) { /* We made progress anyway. */ return true; } } } /* Subtract one read license. */ --item->count; /* Unlock item so thieves can steal it while we work. */ unlock_work_item(item, worker); /* Finally, do the work by delegating to the streams layer. */ SNetStreamWork(item->desc, worker); /* We definitely made progress. */ return true; }