/** Find the next empty write chunk, sets self->writerChunk to it and returns * it. * * We only use the next one if it is empty. If not, we essentially just filled * up the last chunk and wrapped around to the socket reader. In that case, we * either create a new chunk if the overall buffer can still grow, or we drop * the data from the current one. * * \warning A lock on the current writer chunk should be held prior to calling * this function. It will be released, and the returned writerChunk will be * similarly locked. * * \param self BufferedWriter pointer * \param current locked BufferChunk to use or from which to find the next * \return a locked BufferChunk in which data can be stored */ BufferChunk* getNextWriteChunk(BufferedWriter* self, BufferChunk* current) { int nlost; BufferChunk* nextBuffer; assert(current != NULL); nextBuffer = current->next; oml_unlock(¤t->lock, __FUNCTION__); assert(nextBuffer != NULL); oml_lock(&self->lock, __FUNCTION__); if (nextBuffer == self->nextReaderChunk) { if (self->unallocatedBuffers > 0) { /* The next buffer is the next to be read, but we can allocate more, * allocate a new buffer, insert it after the current writer, and use it */ nextBuffer = createBufferChunk(self); assert(nextBuffer); /** \todo Use existing buffer if allocation fails */ oml_unlock(&self->lock, __FUNCTION__); oml_lock(¤t->lock, __FUNCTION__); nextBuffer->next = current->next; current->next = nextBuffer; /* we have a lock on this one */ oml_unlock(¤t->lock, __FUNCTION__); oml_lock(&self->lock, __FUNCTION__); } else { /* The next buffer is the next to be read, and we cannot allocate more, * use it, dropping unread data, and advance the read pointer */ self->nextReaderChunk = nextBuffer->next; } } self->writerChunk = nextBuffer; nlost = bw_msgcount_reset(self); self->nlost += nlost; oml_unlock(&self->lock, __FUNCTION__); oml_lock(&nextBuffer->lock, __FUNCTION__); if (nlost) { logwarn("%s: Dropping %d samples (%dB)\n", self->outStream->dest, nlost, mbuf_fill(nextBuffer->mbuf)); } mbuf_clear2(nextBuffer->mbuf, 0); // Now we just need to copy the message from current to self->writerChunk int msgSize = mbuf_message_length(current->mbuf); if (msgSize > 0) { mbuf_write(nextBuffer->mbuf, mbuf_message(current->mbuf), msgSize); mbuf_reset_write(current->mbuf); } return nextBuffer; }
/** * \brief Return and unlock MBuffer */ void bw_unlock_buf(BufferedWriterHdl instance) { BufferedWriter* self = (BufferedWriter*)instance; pthread_cond_signal(&self->semaphore); /* assume we locked for a reason */ oml_unlock(&self->lock, "bw_unlock_buf"); }
/** Return and unlock MBuffer * \param instance BufferedWriter handle for which a buffer was previously obtained through bw_get_write_buf * * \see bw_get_write_buf */ void bw_release_write_buf(BufferedWriter* instance) { BufferedWriter* self = (BufferedWriter*)instance; pthread_cond_signal(&self->semaphore); /* assume we locked for a reason */ oml_unlock(&self->writerChunk->lock, __FUNCTION__); }
/** Close an output stream and destroy the objects. * * \param instance handle (i.e., pointer) to a BufferedWriter */ void bw_close(BufferedWriter* instance) { int *retval; BufferedWriter *self = (BufferedWriter*)instance; if(!self) { return; } if (oml_lock (&self->lock, __FUNCTION__)) { return; } self->active = 0; loginfo ("%s: Waiting for buffered queue thread to drain...\n", self->outStream->dest); pthread_cond_signal (&self->semaphore); oml_unlock (&self->lock, __FUNCTION__); if(pthread_join (self->readerThread, (void**)&retval)) { logwarn ("%s: Cannot join buffered queue reader thread: %s\n", self->outStream->dest, strerror(errno)); } else { if (1 == *retval) { logdebug ("%s: Buffered queue fully drained\n", self->outStream->dest); } else { logerror ("%s: Buffered queue did not fully drain\n", self->outStream->dest, *retval); } } self->outStream->close(self->outStream); destroyBufferChain(self); oml_free(self); }
/** * \fn static void* threadStart(void* handle) * \brief start the filter thread * \param handle the stream to use the filters on */ static void* threadStart(void* handle) { BufferedWriter* self = (BufferedWriter*)handle; BufferChain* chain = self->firstChain; while (self->active) { oml_lock(&self->lock, "bufferedWriter"); pthread_cond_wait(&self->semaphore, &self->lock); // Process all chains which have data in them while(1) { if (mbuf_message(chain->mbuf) > mbuf_rdptr(chain->mbuf)) { // got something to read from this chain while (!processChain(self, chain)); } // stop if we caught up to the writer if (chain == self->writerChain) break; chain = chain->next; } oml_unlock(&self->lock, "bufferedWriter"); } return NULL; }
/** * \fn bw_push(BufferedWriterHdl* buffSocket, void* chunk, long chunkSize) * \brief Add a chunk to the end of the queue. * \param instance BufferedWriter handle * \param chunk Pointer to chunk to add * \param chunkSize size of chunk * \return 1 if success, 0 otherwise */ int bw_push( BufferedWriterHdl instance, uint8_t* chunk, size_t size ) { BufferedWriter* self = (BufferedWriter*)instance; if (oml_lock(&self->lock, "bw_push")) return 0; if (!self->active) return 0; BufferChain* chain = self->writerChain; if (chain == NULL) return 0; if (chain->mbuf->wr_remaining < size) { chain = self->writerChain = getNextWriteChain(self, chain); } if (mbuf_write(chain->mbuf, chunk, size) < 0) return 0; pthread_cond_signal(&self->semaphore); // oml_unlock(&chain->lock, "bw_push"); oml_unlock(&self->lock, "bw_push"); return 1; }
/** Close an output stream and destroy the objects. * * \param instance handle (i.e., pointer) to a BufferedWriter */ void bw_close(BufferedWriterHdl instance) { BufferedWriter *next, *self = (BufferedWriter*)instance; if(!self) return; if (oml_lock (&self->lock, "bw_close")) return; self->active = 0; loginfo ("Waiting for buffered queue reader thread to drain...\n"); pthread_cond_signal (&self->semaphore); oml_unlock (&self->lock, "bw_close"); // pthread_cond_destroy(&self->semaphore, NULL); switch (pthread_join (self->readerThread, NULL)) { case 0: loginfo ("Buffered queue reader thread finished OK...\n"); break; case EINVAL: logerror ("Buffered queue reader thread is not joinable\n"); break; case EDEADLK: logerror ("Buffered queue reader thread shutdown deadlock, or self-join\n"); break; case ESRCH: logerror ("Buffered queue reader thread shutdown failed: could not find the thread\n"); break; default: logerror ("Buffered queue reader thread shutdown failed with an unknown error\n"); break; } destroyBufferChain(self); xfree(self); }
/** Add some data to the end of the header buffer. * * \warning This function tries to acquire the lock on the header data, and * releases it when done. * * \param instance BufferedWriter handle * \param data Pointer to data to add * \param size size of data * \return 1 if success, 0 otherwise * * \see _bw_push_meta */ int bw_push_meta(BufferedWriter* instance, uint8_t* data, size_t size) { int result = 0; BufferedWriter* self = (BufferedWriter*)instance; if (oml_lock(&self->meta_lock, __FUNCTION__) == 0) { result = _bw_push_meta(instance, data, size); oml_unlock(&self->meta_lock, __FUNCTION__); } return result; }
/** Writing thread. * * \param handle the stream to use the filters on * \return 1 if all the buffer chain has been processed, <1 otherwise */ static void* bufferedWriterThread(void* handle) { int allsent = 1; BufferedWriter* self = (BufferedWriter*)handle; BufferChunk* chunk = self->firstChunk; while (self->active) { oml_lock(&self->lock, __FUNCTION__); pthread_cond_wait(&self->semaphore, &self->lock); // Process all chunks which have data in them do { oml_unlock(&self->lock, __FUNCTION__); allsent = processChunk(self, chunk); oml_lock(&self->lock, __FUNCTION__); /* Stop if we caught up to the writer... */ if (chunk == self->writerChunk) { break; } /* ...otherwise, move on to the next chunk */ if (allsent>0) { chunk = getNextReadChunk(self); } } while(allsent > 0); oml_unlock(&self->lock, __FUNCTION__); } /* Drain this writer before terminating */ /* XXX: “Backing-off for ...” messages might confuse the user as * we don't actually wait after a failure when draining at the end */ while ((allsent=processChunk(self, chunk))>=-1) { if(allsent>0) { if (chunk == self->writerChunk) { break; }; oml_lock(&self->lock, __FUNCTION__); chunk = getNextReadChunk(self); oml_unlock(&self->lock, __FUNCTION__); } }; self->retval = allsent; pthread_exit(&(self->retval)); }
/** * \fn bw_push_meta(BufferedWriterHdl* buffSocket, void* chunk, long chunkSize) * \brief Add a chunk to the end of the meta description. * * \param instance BufferedWriter handle * \param chunk Pointer to chunk to add * \param chunkSize size of chunk * \return 1 if success, 0 otherwise */ int bw_push_meta( BufferedWriterHdl instance, uint8_t* chunk, size_t size ) { BufferedWriter* self = (BufferedWriter*)instance; int result = 0; if (oml_lock(&self->lock, "bw_push")) return 0; if (!self->active) return 0; if (mbuf_write(self->meta_buf, chunk, size) > 0) { result = 1; pthread_cond_signal(&self->semaphore); } oml_unlock(&self->lock, "bw_push_meta"); return result; }
// return 1 if chain has been fully sent, 0 otherwise int processChain( BufferedWriter* self, BufferChain* chain ) { uint8_t* buf = mbuf_rdptr(chain->mbuf); size_t size = mbuf_message_offset(chain->mbuf) - mbuf_read_offset(chain->mbuf); size_t sent = 0; chain->reading = 1; oml_unlock(&self->lock, "processChain"); /* don't keep lock while transmitting */ MBuffer* meta = self->meta_buf; while (size > sent) { long cnt = self->writeFunc(self->writeFuncHdl, (void*)(buf + sent), size - sent, meta->rdptr, meta->fill); if (cnt > 0) { sent += cnt; } else { /* ERROR: Sleep a bit and try again */ /* To be on the safe side, we rewind to the beginning of the * chain and try to resend everything - this is especially important * if the underlying stream needs to reopen and resync. */ mbuf_reset_read(chain->mbuf); size = mbuf_message_offset(chain->mbuf) - mbuf_read_offset(chain->mbuf); sent = 0; sleep(1); } } // get lock back to see what happened while we were busy oml_lock_persistent(self); mbuf_read_skip(chain->mbuf, sent); if (mbuf_write_offset(chain->mbuf) == mbuf_read_offset(chain->mbuf)) { // seem to have sent everything so far, reset chain // mbuf_clear2(chain->mbuf, 0); mbuf_clear2(chain->mbuf, 1); chain->reading = 0; return 1; } return 0; }
/** * \brief Return an MBuffer with exclusive write access * \return MBuffr instance if success, NULL otherwise */ MBuffer* bw_get_write_buf( BufferedWriterHdl instance, int exclusive ) { BufferedWriter* self = (BufferedWriter*)instance; if (oml_lock(&self->lock, "bw_get_write_buf")) return 0; if (!self->active) return 0; BufferChain* chain = self->writerChain; if (chain == NULL) return 0; MBuffer* mbuf = chain->mbuf; if (mbuf_write_offset(mbuf) >= chain->targetBufSize) { chain = self->writerChain = getNextWriteChain(self, chain); mbuf = chain->mbuf; } if (! exclusive) { oml_unlock(&self->lock, "bw_get_write_buf"); } return mbuf; }
/** Send data contained in one chunk. * * \warning This function acquires the lock on the BufferedWriter for the time * it takes to check the double-buffer. * * \warning This function acquires the lock on the chunk being processed for * the time it takes to check it and swap the double buffer. * * \bug The meta buffer should also be protected. * * \param self BufferedWriter to process * \param chunk link of the chunk to process * * \return 1 if chunk has been fully sent, 0 if not, -1 on continuing back-off, -2 otherwise * \see oml_outs_write_f */ static int processChunk(BufferedWriter* self, BufferChunk* chunk) { time_t now; int ret = -2; ssize_t cnt = 0; MBuffer *read_buf = NULL; assert(self); assert(self->meta_buf); assert(self->read_buf); assert(chunk); assert(chunk->mbuf); oml_lock(&self->lock, __FUNCTION__); if (mbuf_message(self->read_buf) > mbuf_rdptr(self->read_buf)) { /* There is unread data in the double buffer */ read_buf = self->read_buf; } oml_unlock(&self->lock, __FUNCTION__); oml_lock(&chunk->lock, __FUNCTION__); if ((NULL == read_buf) && (mbuf_message(chunk->mbuf) >= mbuf_rdptr(chunk->mbuf))) { /* There is unread data in the read buffer, swap MBuffers */ read_buf = chunk->mbuf; chunk->mbuf = self->read_buf; } oml_unlock(&chunk->lock, __FUNCTION__); oml_lock(&self->lock, __FUNCTION__); self->read_buf = read_buf; oml_unlock(&self->lock, __FUNCTION__); if (NULL == read_buf) { /* The current message is not after the read pointer, * we must be on the writer chunk */ ret = 1; goto processChunk_cleanup; } time(&now); if (difftime(now, self->last_failure_time) < self->backoff) { logdebug("%s: Still in back-off period (%ds)\n", self->outStream->dest, self->backoff); ret = -1; goto processChunk_cleanup; } while (mbuf_write_offset(read_buf) > mbuf_read_offset(read_buf)) { oml_lock(&self->meta_lock, __FUNCTION__); cnt = self->outStream->write(self->outStream, mbuf_rdptr(read_buf), mbuf_message_offset(read_buf) - mbuf_read_offset(read_buf), mbuf_rdptr(self->meta_buf), mbuf_fill(self->meta_buf)); oml_unlock(&self->meta_lock, __FUNCTION__); if (cnt > 0) { mbuf_read_skip(read_buf, cnt); if (self->backoff) { self->backoff = 0; loginfo("%s: Connected\n", self->outStream->dest); } } else { self->last_failure_time = now; if (!self->backoff) { self->backoff = 1; } else if (self->backoff < UINT8_MAX) { self->backoff *= 2; } logwarn("%s: Error sending, backing off for %ds\n", self->outStream->dest, self->backoff); goto processChunk_cleanup; } } ret = 1; processChunk_cleanup: return ret; }
/** Unlock a measurement point mutex * \param mp OmlMP to unlock * \see mp_lock, oml_unlock */ void mp_unlock(OmlMP* mp) { oml_unlock(mp->mutexP, mp->name); }