static int bio_write(BIO *b, const char *buf, int len) { struct dtls_flow *tc = b->ptr; struct mbuf *mb; enum {SPACE = 4}; /* sizeof TURN channel header */ int err; mb = mbuf_alloc(SPACE + len); if (!mb) return -1; (void)mbuf_fill(mb, 0x00, SPACE); (void)mbuf_write_mem(mb, (void *)buf, len); mb->pos = SPACE; err = udp_send_helper(tc->us, &tc->peer, mb, tc->uh); if (err) { warning("dtls: udp_send_helper: %m\n", err); } mem_deref(mb); return err ? -1 : len; }
/** Find the next empty write chunk, sets self->writerChunk to it and returns * it. * * We only use the next one if it is empty. If not, we essentially just filled * up the last chunk and wrapped around to the socket reader. In that case, we * either create a new chunk if the overall buffer can still grow, or we drop * the data from the current one. * * \warning A lock on the current writer chunk should be held prior to calling * this function. It will be released, and the returned writerChunk will be * similarly locked. * * \param self BufferedWriter pointer * \param current locked BufferChunk to use or from which to find the next * \return a locked BufferChunk in which data can be stored */ BufferChunk* getNextWriteChunk(BufferedWriter* self, BufferChunk* current) { int nlost; BufferChunk* nextBuffer; assert(current != NULL); nextBuffer = current->next; oml_unlock(¤t->lock, __FUNCTION__); assert(nextBuffer != NULL); oml_lock(&self->lock, __FUNCTION__); if (nextBuffer == self->nextReaderChunk) { if (self->unallocatedBuffers > 0) { /* The next buffer is the next to be read, but we can allocate more, * allocate a new buffer, insert it after the current writer, and use it */ nextBuffer = createBufferChunk(self); assert(nextBuffer); /** \todo Use existing buffer if allocation fails */ oml_unlock(&self->lock, __FUNCTION__); oml_lock(¤t->lock, __FUNCTION__); nextBuffer->next = current->next; current->next = nextBuffer; /* we have a lock on this one */ oml_unlock(¤t->lock, __FUNCTION__); oml_lock(&self->lock, __FUNCTION__); } else { /* The next buffer is the next to be read, and we cannot allocate more, * use it, dropping unread data, and advance the read pointer */ self->nextReaderChunk = nextBuffer->next; } } self->writerChunk = nextBuffer; nlost = bw_msgcount_reset(self); self->nlost += nlost; oml_unlock(&self->lock, __FUNCTION__); oml_lock(&nextBuffer->lock, __FUNCTION__); if (nlost) { logwarn("%s: Dropping %d samples (%dB)\n", self->outStream->dest, nlost, mbuf_fill(nextBuffer->mbuf)); } mbuf_clear2(nextBuffer->mbuf, 0); // Now we just need to copy the message from current to self->writerChunk int msgSize = mbuf_message_length(current->mbuf); if (msgSize > 0) { mbuf_write(nextBuffer->mbuf, mbuf_message(current->mbuf), msgSize); mbuf_reset_write(current->mbuf); } return nextBuffer; }
static int pcp_map_encode(struct mbuf *mb, const struct pcp_map *map) { int err = 0; if (!mb || !map) return EINVAL; err |= mbuf_write_mem(mb, map->nonce, sizeof(map->nonce)); err |= mbuf_write_u8(mb, map->proto); err |= mbuf_fill(mb, 0x00, 3); err |= mbuf_write_u16(mb, htons(map->int_port)); err |= pcp_write_port(mb, &map->ext_addr); err |= pcp_ipaddr_encode(mb, &map->ext_addr); return err; }
// This function finds the next empty write chain, sets +self->writeChain+ to // it and returns. // // We only use the next one if it is empty. If not, we // essentially just filled up the last chain and wrapped // around to the socket reader. In that case, we either create a new chain // if the overall buffer can still grow, or we drop the data from the current one. // // This assumes that the current thread holds the +self->lock+ and the lock on // the +self->writeChain+. // BufferChain* getNextWriteChain( BufferedWriter* self, BufferChain* current ) { assert(current != NULL); BufferChain* nextBuffer = current->next; assert(nextBuffer != NULL); BufferChain* resChain = NULL; if (mbuf_remaining(nextBuffer->mbuf) == 0) { // It's empty, we can use it mbuf_clear2(nextBuffer->mbuf, 0); resChain = nextBuffer; } else if (self->chainsAvailable > 0) { // insert new chain between current and next one. BufferChain* newBuffer = createBufferChain(self); newBuffer->next = nextBuffer; current->next = newBuffer; resChain = newBuffer; } else { // Filled up buffer, time to drop data and reuse current buffer // Current buffer holds most recent added data (we drop from the queue's tail //assert(current->reading == 0); assert(current->reading == 0); o_log (O_LOG_WARN, "Dropping %d bytes of measurement data\n", mbuf_fill(current->mbuf)); mbuf_repack_message2(current->mbuf); return current; } // Now we just need to copy the +message+ from +current+ to +resChain+ int msgSize = mbuf_message_length(current->mbuf); if (msgSize > 0) { mbuf_write(resChain->mbuf, mbuf_message(current->mbuf), msgSize); mbuf_reset_write(current->mbuf); } return resChain; }
/** Send data contained in one chunk. * * \warning This function acquires the lock on the BufferedWriter for the time * it takes to check the double-buffer. * * \warning This function acquires the lock on the chunk being processed for * the time it takes to check it and swap the double buffer. * * \bug The meta buffer should also be protected. * * \param self BufferedWriter to process * \param chunk link of the chunk to process * * \return 1 if chunk has been fully sent, 0 if not, -1 on continuing back-off, -2 otherwise * \see oml_outs_write_f */ static int processChunk(BufferedWriter* self, BufferChunk* chunk) { time_t now; int ret = -2; ssize_t cnt = 0; MBuffer *read_buf = NULL; assert(self); assert(self->meta_buf); assert(self->read_buf); assert(chunk); assert(chunk->mbuf); oml_lock(&self->lock, __FUNCTION__); if (mbuf_message(self->read_buf) > mbuf_rdptr(self->read_buf)) { /* There is unread data in the double buffer */ read_buf = self->read_buf; } oml_unlock(&self->lock, __FUNCTION__); oml_lock(&chunk->lock, __FUNCTION__); if ((NULL == read_buf) && (mbuf_message(chunk->mbuf) >= mbuf_rdptr(chunk->mbuf))) { /* There is unread data in the read buffer, swap MBuffers */ read_buf = chunk->mbuf; chunk->mbuf = self->read_buf; } oml_unlock(&chunk->lock, __FUNCTION__); oml_lock(&self->lock, __FUNCTION__); self->read_buf = read_buf; oml_unlock(&self->lock, __FUNCTION__); if (NULL == read_buf) { /* The current message is not after the read pointer, * we must be on the writer chunk */ ret = 1; goto processChunk_cleanup; } time(&now); if (difftime(now, self->last_failure_time) < self->backoff) { logdebug("%s: Still in back-off period (%ds)\n", self->outStream->dest, self->backoff); ret = -1; goto processChunk_cleanup; } while (mbuf_write_offset(read_buf) > mbuf_read_offset(read_buf)) { oml_lock(&self->meta_lock, __FUNCTION__); cnt = self->outStream->write(self->outStream, mbuf_rdptr(read_buf), mbuf_message_offset(read_buf) - mbuf_read_offset(read_buf), mbuf_rdptr(self->meta_buf), mbuf_fill(self->meta_buf)); oml_unlock(&self->meta_lock, __FUNCTION__); if (cnt > 0) { mbuf_read_skip(read_buf, cnt); if (self->backoff) { self->backoff = 0; loginfo("%s: Connected\n", self->outStream->dest); } } else { self->last_failure_time = now; if (!self->backoff) { self->backoff = 1; } else if (self->backoff < UINT8_MAX) { self->backoff *= 2; } logwarn("%s: Error sending, backing off for %ds\n", self->outStream->dest, self->backoff); goto processChunk_cleanup; } } ret = 1; processChunk_cleanup: return ret; }