Пример #1
0
static uint64_t _ringbuffer_stream_read( stream_t* stream, void* dest, uint64_t num )
{
	stream_ringbuffer_t* rbstream = (stream_ringbuffer_t*)stream;
	ringbuffer_t* buffer = RINGBUFFER_FROM_STREAM( rbstream );

	unsigned int num_read = ringbuffer_read( buffer, dest, (unsigned int)num );

	while( num_read < num )
	{
		rbstream->pending_read = 1;

		if( rbstream->pending_write )
			semaphore_post( &rbstream->signal_read );

		semaphore_wait( &rbstream->signal_write );
		rbstream->pending_read = 0;

		num_read += ringbuffer_read( buffer, dest ? pointer_offset( dest, num_read ) : 0, (unsigned int)( num - num_read ) );
	}

	if( rbstream->pending_write )
		semaphore_post( &rbstream->signal_read );

	return num_read;
}
Пример #2
0
lcb_size_t ringbuffer_peek_at(ringbuffer_t *buffer, lcb_size_t offset, void *dest, lcb_size_t nb)
{
    ringbuffer_t copy = *buffer;
    lcb_size_t n = ringbuffer_read(&copy, NULL, offset);
    if (n != offset) {
        return -1;
    }
    return ringbuffer_read(&copy, dest, nb);
}
Пример #3
0
DECLARE_TEST( ringbuffer, io )
{
	ringbuffer_t* buffer;
	char from[256];
	char to[256];
	unsigned int size, verify, loop, loops;
	unsigned int expected_size = 0;

	for( size = 0; size < 256; ++size )
		from[size] = (char)( random32() & 0xFF );
	
	buffer = ringbuffer_allocate( 512 );
	loops = 32;
	for( loop = 0; loop < loops; ++loop )
	{
		for( size = 0; size < 256; ++size )
		{
			ringbuffer_write( buffer, from, size );
			ringbuffer_read( buffer, to, size );

			for( verify = 0; verify < size; ++verify )
				EXPECT_EQ( to[verify], from[verify] );
		
			expected_size += size;
		}
	}
	EXPECT_EQ( ringbuffer_total_read( buffer ), expected_size );
	EXPECT_EQ( ringbuffer_total_written( buffer ), expected_size );

	ringbuffer_deallocate( buffer );
	
	return 0;
}
Пример #4
0
void ringbuffer_consumed(ringbuffer_t *buffer, lcb_size_t nb)
{
    lcb_size_t n = ringbuffer_read(buffer, NULL, nb);
    if (n != nb) {
        abort();
    }
}
Пример #5
0
int ringbuffer_memcpy(ringbuffer_t *dst, ringbuffer_t *src,
                      lcb_size_t nbytes)
{
    ringbuffer_t copy = *src;
    struct lcb_iovec_st iov[2];
    int ii = 0;
    lcb_size_t towrite = nbytes;
    lcb_size_t toread, nb;

    if (nbytes > ringbuffer_get_nbytes(src)) {
        /* EINVAL */
        return -1;
    }

    if (!ringbuffer_ensure_capacity(dst, nbytes)) {
        /* Failed to allocate space */
        return -1;
    }

    ringbuffer_get_iov(dst, RINGBUFFER_WRITE, iov);
    toread = minimum(iov[ii].iov_len, nbytes);
    do {
        assert(ii < 2);
        nb = ringbuffer_read(&copy, iov[ii].iov_base, toread);
        toread -= nb;
        towrite -= nb;
        ++ii;
    } while (towrite > 0);
    ringbuffer_produced(dst, nbytes);
    return 0;
}
Пример #6
0
static const char *rb_lua_reader(lua_State *L, void *param, size_t *size) {
   (void)L;
   chunked_t *chunked = (chunked_t *)param;
   if (ringbuffer_read(chunked->rb, size, sizeof(size_t)) != sizeof(size_t)) {
      *size = 0;
      return NULL;
   }
   if (*size) {
      if (ringbuffer_read(chunked->rb, chunked->buf, *size) != *size) {
         *size = 0;
         return NULL;
      }
      return (const char *)chunked->buf;
   }
   chunked->read_last = 1;
   return NULL;
}
Пример #7
0
void full_demod(struct fm_state *fm)
{
	int i, sr, freq_next, hop = 0;
	//    pthread_mutex_lock(&data_ready);

	static unsigned char tmpBuf[DEFAULT_BUF_LENGTH];
	while(ringbuffer_is_empty((ringbuffer*)fm->buf))
    	{
        	usleep(100000);
	}
	ringbuffer_read((ringbuffer*)fm->buf, tmpBuf);
	//fprintf(stderr, "data!\n");

	rotate_90(tmpBuf, sizeof(tmpBuf));
	if (fm->fir_enable) {
        	low_pass_fir(fm, tmpBuf, sizeof(tmpBuf));
	} else {
        	low_pass(fm, tmpBuf, sizeof(tmpBuf));
	}
	//    pthread_mutex_unlock(&data_write);
	fm->mode_demod(fm);
        if (fm->mode_demod == &raw_demod) {
		fwrite(fm->signal2, 2, fm->signal2_len, fm->file);
		return;
	}
	sr = post_squelch(fm);
	if (!sr && fm->squelch_hits > fm->conseq_squelch) {
		if (fm->terminate_on_squelch) {
			fm->exit_flag = 1;}
		if (fm->freq_len == 1) {  /* mute */
			for (i=0; i<fm->signal_len; i++) {
				fm->signal2[i] = 0;}
		}
		else {
			hop = 1;}
	}
	if (fm->post_downsample > 1) {
		fm->signal2_len = low_pass_simple(fm->signal2, fm->signal2_len, fm->post_downsample);}
	if (fm->output_rate > 0) {
		low_pass_real(fm);
	}
	if (fm->deemph) {
		deemph_filter(fm);}
	if (fm->dc_block) {
		dc_block_filter(fm);}
	/* ignore under runs for now */
	fwrite(fm->signal2, 2, fm->signal2_len, fm->file);
	if (hop && ringbuffer_is_empty((ringbuffer*)fm->buf)) { // Making sure the buffer is empty before tuning
		freq_next = (fm->freq_now + 1) % fm->freq_len;
		optimal_settings(fm, freq_next, 1);
		fm->squelch_hits = fm->conseq_squelch + 1;  /* hair trigger */
		/* wait for settling and flush buffer */
		usleep(5000);
		rtlsdr_read_sync(dev, NULL, 4096, NULL);
	}
}
Пример #8
0
void test_ringbuffer() {
	uint16_t ret = 0;
	char ch = 0;
	uart_puts("ringbuffer test\r\n");
	uart_puts("\r\nringbuffer is empty. Check is_readable\r\n");
	uart_puts("should be 0000. is : ");
	uart_put_u16(ring_is_readable(&testring));
	uart_puts("\r\n");
	uart_puts("writing 4 bytes into ringbuffer (1234)\r\n");
	ringbuffer_write(&testring, '1');
	ringbuffer_write(&testring, '2');
	ringbuffer_write(&testring, '3');
	ringbuffer_write(&testring, '4');

	print_ringbuffer(&testring);

	uart_puts("check if ring is readable\r\n");
	uart_puts("check readable : ");
	uart_put_u16(ring_is_readable(&testring));
	uart_puts("\r\n");

	uart_puts("read out all avaiable bytes until ring isnt readable\r\n");
	while (ring_is_readable(&testring)) {
		uart_puts("read return code ");
		ret = ringbuffer_read(&testring, &ch);
		uart_put_u16(ret);
		uart_puts(" char is : ");
		uart_putc(ch);
		uart_puts("\r\n");
	}
	uart_puts("end reading variables\r\n");
	uart_puts("try to read even when ring isnt readable");
	uart_puts("read return code ");
	ret = ringbuffer_read(&testring, &ch);
	uart_put_u16(ret);
	uart_puts(" char is : ");
	uart_putc(ch);
	uart_puts("\r\n");
}
Пример #9
0
int ringbuffer_append(ringbuffer_t *src, ringbuffer_t *dest)
{
    char buffer[1024];
    lcb_size_t nr, nw;

    while ((nr = ringbuffer_read(src, buffer, sizeof(buffer))) != 0) {
        lcb_assert(ringbuffer_ensure_capacity(dest, nr));
        nw = ringbuffer_write(dest, buffer, nr);
        lcb_assert(nw == nr);
    }

    return 1;
}
Пример #10
0
void ringbuffer_grow_by(ringbuffer_t *rb, size_t cb) {
   size_t new_cb = rb->cb + cb;
   uint8_t *new_buf = malloc(new_cb);
   size_t rcb = ringbuffer_read(rb, new_buf, new_cb);
   free(rb->buf);
   rb->buf = new_buf;
   rb->cb = new_cb;
   rb->rp = 0;
   rb->wp = rcb;
   rb->rcb = rcb;
   rb->saved_wp = 0;
   rb->saved_rcb = 0;
}
Пример #11
0
static lcb_error_t
obs_ctxdone(lcb_MULTICMD_CTX *mctx, const void *cookie)
{
    unsigned ii;
    OBSERVECTX *ctx = CTX_FROM_MULTI(mctx);
    mc_CMDQUEUE *cq = &ctx->instance->cmdq;

    for (ii = 0; ii < ctx->nrequests; ii++) {
        protocol_binary_request_header hdr;
        mc_PACKET *pkt;
        mc_PIPELINE *pipeline;
        struct observe_st *rr = ctx->requests + ii;
        pipeline = cq->pipelines[ii];

        if (!rr->allocated) {
            continue;
        }

        pkt = mcreq_allocate_packet(pipeline);
        lcb_assert(pkt);

        mcreq_reserve_header(pipeline, pkt, MCREQ_PKT_BASESIZE);
        mcreq_reserve_value2(pipeline, pkt, rr->body.nbytes);

        hdr.request.magic = PROTOCOL_BINARY_REQ;
        hdr.request.opcode = PROTOCOL_BINARY_CMD_OBSERVE;
        hdr.request.datatype = PROTOCOL_BINARY_RAW_BYTES;
        hdr.request.keylen = 0;
        hdr.request.cas = 0;
        hdr.request.vbucket = 0;
        hdr.request.extlen = 0;
        hdr.request.opaque = pkt->opaque;
        hdr.request.bodylen = htonl((lcb_uint32_t)rr->body.nbytes);

        memcpy(SPAN_BUFFER(&pkt->kh_span), hdr.bytes, sizeof(hdr.bytes));
        ringbuffer_read(&rr->body, SPAN_BUFFER(&pkt->u_value.single), rr->body.nbytes);

        pkt->flags |= MCREQ_F_REQEXT;
        pkt->u_rdata.exdata = (mc_REQDATAEX *)ctx;
        mcreq_sched_add(pipeline, pkt);
        TRACE_OBSERVE_BEGIN(&hdr, SPAN_BUFFER(&pkt->u_value.single));
    }
    destroy_requests(ctx);
    ctx->base.start = gethrtime();
    ctx->base.cookie = cookie;
    ctx->base.callback = handle_observe_callback;
    return LCB_SUCCESS;
}
Пример #12
0
static void swallow_command(lcb_server_t *c,
                            const protocol_binary_response_header *header,
                            int was_connected)
{
    lcb_size_t nr;
    protocol_binary_request_header req;
    if (was_connected &&
            (header->response.opcode != PROTOCOL_BINARY_CMD_STAT ||
             header->response.keylen == 0)) {
        nr = ringbuffer_read(&c->cmd_log, req.bytes, sizeof(req));
        lcb_assert(nr == sizeof(req));
        ringbuffer_consumed(&c->cmd_log, ntohl(req.request.bodylen));
        ringbuffer_consumed(&c->output_cookies,
                            sizeof(struct lcb_command_data_st));
    }
}
Пример #13
0
VideoEncoder::~VideoEncoder() {
    // flush all the ringbuffer to file and stream
    unsigned int encnum = 0;

    if(encbuf) {
        do {
            if((encnum = ringbuffer_read_space(ringbuffer)) > 0)
                encnum = ringbuffer_read(ringbuffer, encbuf, encnum);
//                           ((audio_kbps + video_kbps)*1024)/24);

            if(encnum <= 0) break;

            if(write_to_disk && filedump_fd) {
                fwrite(encbuf, 1, encnum, filedump_fd);
            }

            if(write_to_stream && ice) {
                shout_sync(ice);
                shout_send(ice, (const unsigned char*)encbuf, encnum);
            }

            func("flushed %u bytes closing video encoder", encnum);

        } while(encnum > 0);
        free(encbuf);
    }
    // close the filedump
    if(filedump_fd) fclose(filedump_fd);

    // now deallocate the ringbuffer
    ringbuffer_free(ringbuffer);

    shout_close(ice);
    //  shout_sync(ice);
    //  shout_free(ice);
    shout_shutdown();

    if(enc_y) free(enc_y);
    if(enc_u) free(enc_u);
    if(enc_v) free(enc_v);
    if(enc_yuyv) free(enc_yuyv);

    free(fps);
}
Пример #14
0
unsigned int ringbuffer_read( ringbuffer_t* buffer, void* dest, unsigned int num )
{
	unsigned int do_read;
	unsigned int max_read;
	unsigned int buffer_size;
	unsigned int offset_read;
	unsigned int offset_write;

	FOUNDATION_ASSERT( buffer );

	buffer_size = buffer->buffer_size;
	offset_read = buffer->offset_read;
	offset_write = buffer->offset_write;

	if( offset_read > offset_write )
		max_read = buffer_size - offset_read;
	else
		max_read = offset_write - offset_read;

	do_read = num;
	if( do_read > max_read )
		do_read = max_read;

	if( !do_read )
		return 0;

	if( dest )
		memcpy( dest, buffer->buffer + offset_read, do_read );

	offset_read += do_read;
	if( offset_read == buffer_size )
		offset_read = 0;

	buffer->offset_read = offset_read;
	buffer->total_read += do_read;

	if( ( do_read < num ) && ( offset_read == 0 ) && ( offset_write > 0 ) )
		do_read += ringbuffer_read( buffer, pointer_offset( dest, do_read ), num - do_read );

	return do_read;
}
Пример #15
0
int ringbuffer_append(ringbuffer_t *src, ringbuffer_t *dest)
{
    char buffer[1024];
    lcb_size_t nr, nw;

    while ((nr = ringbuffer_read(src, buffer,
                                 sizeof(buffer))) != 0) {
        if (!ringbuffer_ensure_capacity(dest, nr)) {
            abort();
            return 0;
        }

        nw = ringbuffer_write(dest, buffer, nr);
        if (nw != nr) {
            abort();
            return 0;
        }
    }

    return 1;
}
Пример #16
0
int ringbuffer_ensure_capacity(ringbuffer_t *buffer, lcb_size_t size)
{
    char *new_root;
    lcb_size_t new_size = buffer->size << 1;
    if (new_size == 0) {
        new_size = 128;
    }

    if (size < (buffer->size - buffer->nbytes)) {
        /* we've got capacity! */
        return 1;
    }

    /* determine the new buffer size... */
    while ((new_size - buffer->nbytes) < size) {
        new_size <<= 1;
    }

    /* go ahead and allocate a bigger block */
    if ((new_root = malloc(new_size)) == NULL) {
        /* Allocation failed! */
        return 0;
    } else {
        /* copy the data over :) */
        char *old;
        lcb_size_t nbytes = buffer->nbytes;
        lcb_size_t nr = ringbuffer_read(buffer, new_root, nbytes);
        if (nr != nbytes) {
            abort();
        }
        old = buffer->root;
        buffer->size = new_size;
        buffer->root = new_root;
        buffer->nbytes = nbytes;
        buffer->read_head = buffer->root;
        buffer->write_head = buffer->root + nbytes;
        free(old);
        return 1;
    }
}
Пример #17
0
/****************************************************************************
 *
 * NAME: SPM_vProcStream
 *
 * DESCRIPTION:
 * Stream Processing Machine(SPM) process stream
 * If receive a valid frame, unpack and execute callback
 * else discard it.
 *
 * RETURNS:
 * void
 *
 ****************************************************************************/
PRIVATE void SPM_vProcStream(uint32 dataCnt)
{
	uint8 tmp[RXFIFOLEN] = {0};
	/* calc the minimal */
	uint32 readCnt = MIN(dataCnt, RXFIFOLEN);

	OS_eEnterCriticalSection(mutexRxRb);
	ringbuffer_read(&rb_rx_spm, tmp, readCnt);
	OS_eExitCriticalSection(mutexRxRb);

	/* Instance an apiSpec */
	tsApiSpec apiSpec;
	bool bValid = FALSE;
	memset(&apiSpec, 0, sizeof(tsApiSpec));

	/* Deassemble apiSpec frame */
	uint16 procSize =  u16DecodeApiSpec(tmp, readCnt, &apiSpec, &bValid);
	if(!bValid)
	{
	/*
	  Invalid frame,discard from ringbuffer
	  Any data received prior to the start delimiter will be discarded.
	  If the frame is not received correctly or if the checksum fails,
	  discard too.And Re-Activate Task 1ms later.
	*/
		vResetATimer(APP_tmrHandleUartRx, APP_TIME_MS(1));
	}
	else
	{
		/* Process API frame using API support layer's api */
		API_i32ApiFrmCmdProc(&apiSpec);
	}
	/* Discard already processed part */
	OS_eEnterCriticalSection(mutexRxRb);
	ringbuffer_pop(&rb_rx_spm, tmp, procSize);
	OS_eExitCriticalSection(mutexRxRb);
}
Пример #18
0
void VideoEncoder::thread_loop() {
    int encnum;
    int res;
    auto screen = this->screen.lock();

    /* Convert picture from rgb to yuv420 planar

       two steps here:

       1) rgb24a or bgr24a to yuv422 interlaced (yuyv)
       2) yuv422 to yuv420 planar (yuv420p)

       to fix endiannes issues try adding #define ARCH_PPC
       and using
       mlt_convert_bgr24a_to_yuv422
       or
       mlt_convert_argb_to_yuv422
       (see mlt_frame.h in mltframework.org sourcecode)
       i can't tell as i don't have PPC, waiting for u mr.goil :)
     */

    uint8_t *surface = (uint8_t *)screen->get_surface();
    time_t *tm = (time_t *)malloc(sizeof(time_t));
    time(tm);
//   std::cerr << "-- ENC:" << asctime(localtime(tm));
    if(!surface) {
        fps->delay();
        /* std::cout << "fps->start_tv.tv_sec :" << fps->start_tv.tv_sec << \
           " tv_usec :" << fps->start_tv.tv_usec << "   \r" << std::endl; */
        return;
    }
    fps->delay();
    //uncomment this to see how long it takes between two frames in us.
    /*    timeval start_t;
        gettimeofday(&start_t,NULL);
        timeval did;
        timersub(&start_t, &m_lastTime, &did);
        m_lastTime.tv_sec = start_t.tv_sec;
        m_lastTime.tv_usec = start_t.tv_usec;
        std::cerr << "diff time :" << did.tv_usec << std::endl;*/
    screen->lock();
    auto & geo = screen->getGeometry();
    switch(screen->get_pixel_format()) {
    case ViewPort::RGBA32:
        mlt_convert_rgb24a_to_yuv422(surface,
                                     geo.getSize().x(), geo.getSize().y(),
                                     geo.getSize().x() << 2, (uint8_t*)enc_yuyv, NULL);
        break;

    case ViewPort::BGRA32:
        mlt_convert_bgr24a_to_yuv422(surface,
                                     geo.getSize().x(), geo.getSize().y(),
                                     geo.getSize().x() << 2, (uint8_t*)enc_yuyv, NULL);
        break;

    case ViewPort::ARGB32:
        mlt_convert_argb_to_yuv422(surface,
                                   geo.getSize().x(), geo.getSize().y(),
                                   geo.getSize().x() << 2, (uint8_t*)enc_yuyv, NULL);
        break;

    default:
        error("Video Encoder %s doesn't supports Screen %s pixel format",
              name.c_str(), screen->getName().c_str());
    }

    screen->unlock();

    ccvt_yuyv_420p(geo.getSize().x(), geo.getSize().y(), enc_yuyv, enc_y, enc_u, enc_v);

    ////// got the YUV, do the encoding
    res = encode_frame();
    if(res != 0) error("Can't encode frame");

    /// proceed writing and streaming encoded data in encpipe

    encnum = 0;
    if(write_to_disk || write_to_stream) {
        if((encnum = ringbuffer_read_space(ringbuffer)) > 0) {
            encbuf = (char *)realloc(encbuf, encnum);
//      encbuf = (char *)realloc(encbuf, (((audio_kbps + video_kbps)*1024)/24)); //doesn't change anything for shifting problem
            encnum = ringbuffer_read(ringbuffer, encbuf, encnum);
//      encnum = ringbuffer_read(ringbuffer, encbuf,
//                             ((audio_kbps + video_kbps)*1024)/24);
        }
    }

    if(encnum > 0) {
        //      func("%s has encoded %i bytes", name, encnum);
        if(write_to_disk && filedump_fd)
            fwrite(encbuf, 1, encnum, filedump_fd);

        if(write_to_stream && ice) {
            /*	int	wait_ms;
                wait_ms = shout_delay(ice);
                std::cerr << "---- shout delay :" << wait_ms << std::endl;*/
            shout_sync(ice);
            if(shout_send(ice, (const unsigned char*)encbuf, encnum)
               != SHOUTERR_SUCCESS) {
                error("shout_send: %s", shout_get_error(ice));
            } // else
              //printf("%d %d\n", encnum, (int)shout_queuelen(ice));
        }
        gettimeofday(&m_ActualTime, NULL);
        if(m_ActualTime.tv_sec == m_OldTime.tv_sec)
            m_ElapsedTime += ((double)(m_ActualTime.tv_usec - m_OldTime.tv_usec)) / 1000000.0;
        else
            m_ElapsedTime += ((double)(m_ActualTime.tv_sec - m_OldTime.tv_sec)) + \
                             (((double)(m_ActualTime.tv_usec - m_OldTime.tv_usec)) / 1000000.0);
        m_OldTime.tv_sec = m_ActualTime.tv_sec;
        m_OldTime.tv_usec = m_ActualTime.tv_usec;
        m_Streamed += encnum;
        if(m_ElapsedTime >= 3.0) {      //calculate stream rate every minimum 3 seconds
            m_StreamRate = ((double)m_Streamed / m_ElapsedTime) / 1000.0;
            m_ElapsedTime = 0;
            m_Streamed = 0;
        }
    }
}
Пример #19
0
/*
 * Drop all packets with sequence number less than specified.
 *
 * The packets are considered as stale and the caller will receive
 * appropriate error code in the operation callback.
 *
 * Returns 0 on success
 */
int lcb_server_purge_implicit_responses(lcb_server_t *c,
                                        lcb_uint32_t seqno,
                                        hrtime_t end,
                                        int all)
{
    protocol_binary_request_header req;

    /** Instance level allocated buffers */
    ringbuffer_t *cmdlog, *cookies;

    lcb_size_t nr = ringbuffer_peek(&c->cmd_log, req.bytes, sizeof(req));

    /* There should at _LEAST_ be _ONE_ message in here if we're not
     * trying to purge _ALL_ of the messages in the queue
     */
    if (all && nr == 0) {
        return 0;
    }


    /**
     * Reading the command log is not re-entrant safe, as an additional
     * command to the same server may result in the command log being modified.
     * To this end, we must first buffer all the commands in a separate
     * ringbuffer (or simple buffer) for that matter, and only *then*
     * invoke the callbacks
     */
    lcb_assert(nr == sizeof(req));

    if (req.request.opaque >= seqno) {
        return 0;
    }

    cmdlog = &c->instance->purged_buf;
    cookies = &c->instance->purged_cookies;
    ringbuffer_reset(cmdlog);
    ringbuffer_reset(cookies);

    /**
     * Move all the commands we want to purge into the relevant ("local") buffers.
     * We will later read from these local buffers
     */
    while (req.request.opaque < seqno) {
        lcb_size_t packetsize = ntohl(req.request.bodylen) + (lcb_uint32_t)sizeof(req);

        ringbuffer_memcpy(cmdlog, &c->cmd_log, packetsize);
        ringbuffer_consumed(&c->cmd_log, packetsize);


        ringbuffer_memcpy(cookies, &c->output_cookies, sizeof(struct lcb_command_data_st));
        ringbuffer_consumed(&c->output_cookies, sizeof(struct lcb_command_data_st));

        nr = ringbuffer_peek(&c->cmd_log, req.bytes, sizeof(req.bytes));

        if (!nr) {
            break;
        }

        lcb_assert(nr == sizeof(req));
    }

    nr = ringbuffer_peek(cmdlog, req.bytes, sizeof(req));
    lcb_assert(nr == sizeof(req));

    if (!all) {
        lcb_assert(c->cmd_log.nbytes);
    }

    do {
        struct lcb_command_data_st ct;
        char *packet = cmdlog->read_head;
        lcb_size_t packetsize = ntohl(req.request.bodylen) + (lcb_uint32_t)sizeof(req);
        char *keyptr;

        union {
            lcb_get_resp_t get;
            lcb_store_resp_t store;
            lcb_remove_resp_t remove;
            lcb_touch_resp_t touch;
            lcb_unlock_resp_t unlock;
            lcb_arithmetic_resp_t arithmetic;
            lcb_observe_resp_t observe;
        } resp;

        nr = ringbuffer_read(cookies, &ct, sizeof(ct));
        lcb_assert(nr == sizeof(ct));

        if (c->instance->histogram) {
            lcb_record_metrics(c->instance, end - ct.start, req.request.opcode);
        }

        if (!ringbuffer_is_continous(cmdlog, RINGBUFFER_READ, packetsize)) {
            packet = malloc(packetsize);
            if (packet == NULL) {
                lcb_error_handler(c->instance, LCB_CLIENT_ENOMEM, NULL);
                return -1;
            }

            nr = ringbuffer_peek(cmdlog, packet, packetsize);
            if (nr != packetsize) {
                lcb_error_handler(c->instance, LCB_EINTERNAL, NULL);
                free(packet);
                return -1;
            }
        }

        switch (req.request.opcode) {
        case PROTOCOL_BINARY_CMD_GATQ:
        case PROTOCOL_BINARY_CMD_GETQ:
            keyptr = packet + sizeof(req) + req.request.extlen;
            setup_lcb_get_resp_t(&resp.get, keyptr, ntohs(req.request.keylen),
                                 NULL, 0, 0, 0, 0);
            TRACE_GET_END(req.request.opaque, ntohs(req.request.vbucket),
                          req.request.opcode, LCB_KEY_ENOENT, &resp.get);
            c->instance->callbacks.get(c->instance, ct.cookie, LCB_KEY_ENOENT, &resp.get);
            break;
        case CMD_OBSERVE:
            lcb_failout_observe_request(c, &ct, packet,
                                        sizeof(req.bytes) + ntohl(req.request.bodylen),
                                        LCB_SERVER_BUG);
            break;
        case PROTOCOL_BINARY_CMD_NOOP:
            break;
        default: {
            char errinfo[128] = { '\0' };
            snprintf(errinfo, 128, "Unknown implicit send message op=%0x", req.request.opcode);
            lcb_error_handler(c->instance, LCB_EINTERNAL, errinfo);
            return -1;
        }
        }

        if (packet != cmdlog->read_head) {
            free(packet);
        }

        ringbuffer_consumed(cmdlog, packetsize);
        nr = ringbuffer_peek(cmdlog, req.bytes, sizeof(req));

        if (nr == 0) {
            return 0;
        }

        lcb_assert(nr == sizeof(req));
    } while (1); /* CONSTCOND */

    return 0;
}
Пример #20
0
static void*_ringbuffer_consumer_thread(void *arg) {
/*
*
* Note: There are a lot of what might seem like pointless casts in this
* function.  In args to pthread and semaphore functions.
*
* These casts are to suppress compiler warnings
* about 'discarding the volatile directive'.
*
*/

	volatile TTSRENDER_STATE_T *st = (TTSRENDER_STATE_T*)arg;
	uint8_t *buf = NULL;
	int bytes_to_send;
	int rc;

	while(1) {
		// wait for ringbuffer data semaphore. this tells us there is data available in the ring buffer
		sem_wait((sem_t*)&st->ringbuffer_data_sema);
		pthread_mutex_lock((pthread_mutex_t*)&st->ringbuffer_mutex);
		while( ! ringbuffer_is_empty(st->ringbuffer)) {
			// set bytes_to_send to either the OMX IL Client buffer size, or the number of bytes waiting in the ring buffer, whichever is the smaller
			bytes_to_send = min(st->buffer_size, ringbuffer_used_space(st->ringbuffer));
			buf = ilctts_get_buffer((TTSRENDER_STATE_T*)st);
			while(buf == NULL) {
				// the free_buffer_cv variable is signalled inside the empty buffer callback
				pthread_mutex_lock((pthread_mutex_t*)&st->free_buffer_mutex);
				pthread_cond_wait((pthread_cond_t*)&st->free_buffer_cv, (pthread_mutex_t*)&st->free_buffer_mutex);
				buf = ilctts_get_buffer((TTSRENDER_STATE_T*)st);
				pthread_mutex_unlock((pthread_mutex_t*)&st->free_buffer_mutex);
			}// end while buf == NULL

			if (st->tts_stop) {
				ringbuffer_flush(st->ringbuffer);
				ilctts_flush((TTSRENDER_STATE_T *)st);
				st->tts_stop = 0;
			}

			rc = ringbuffer_read(st->ringbuffer, (void*)buf, bytes_to_send);
			if (rc == -1) {
				ERROR("ringbuffer_read returned -1 error code in ilctts_consumer_thread\n", "");
			}

			// try and wait for a minimum latency time (in ms) before sending the next packet
			ilctts_latency_wait((TTSRENDER_STATE_T *)st);

			rc = ilctts_send_audio((TTSRENDER_STATE_T*)st, buf, bytes_to_send);
			if (rc == -1) {
				ERROR("ilctts_send_audio returned error code -1 in ilctts_consumer_thread\n", "");
			}

		} // end while buffer is not empty

		pthread_mutex_unlock((pthread_mutex_t*)&st->ringbuffer_mutex);
		// post ringbuffer semaphore to tell producer thread to go ahead
		sem_post((sem_t*)&st->ringbuffer_empty_sema);
		buf = NULL;
		usleep(1000);
	} // end while(1)

	pthread_exit(NULL);
} // end _ringbuffer_consumer_thread
Пример #21
0
void ringbuffer_consumed(ringbuffer_t *buffer, lcb_size_t nb)
{
    lcb_size_t n = ringbuffer_read(buffer, NULL, nb);
    lcb_assert(n == nb);
}
Пример #22
0
static int parse_single(libcouchbase_server_t *c, hrtime_t stop)
{
    protocol_binary_request_header req;
    protocol_binary_response_header header;
    libcouchbase_size_t nr;
    char *packet;
    libcouchbase_size_t packetsize;
    struct libcouchbase_command_data_st ct;

    nr = ringbuffer_peek(&c->input, header.bytes, sizeof(header));
    if (nr < sizeof(header)) {
        return 0;
    }

    packetsize = ntohl(header.response.bodylen) + (libcouchbase_uint32_t)sizeof(header);
    if (c->input.nbytes < packetsize) {
        return 0;
    }

    /* Is it already timed out? */
    nr = ringbuffer_peek(&c->cmd_log, req.bytes, sizeof(req));
    if (nr < sizeof(req) || /* the command log doesn't know about it */
            (header.response.opaque < req.request.opaque &&
             header.response.opaque > 0)) { /* sasl comes with zero opaque */
        /* already processed. */
        ringbuffer_consumed(&c->input, packetsize);
        return 1;
    }

    packet = c->input.read_head;
    /* we have everything! */

    if (!ringbuffer_is_continous(&c->input, RINGBUFFER_READ,
                                 packetsize)) {
        /* The buffer isn't continous.. for now just copy it out and
        ** operate on the copy ;)
        */
        if ((packet = malloc(packetsize)) == NULL) {
            libcouchbase_error_handler(c->instance, LIBCOUCHBASE_CLIENT_ENOMEM, NULL);
            return -1;
        }
        nr = ringbuffer_read(&c->input, packet, packetsize);
        if (nr != packetsize) {
            libcouchbase_error_handler(c->instance, LIBCOUCHBASE_EINTERNAL,
                                       NULL);
            free(packet);
            return -1;
        }
    }

    nr = ringbuffer_peek(&c->output_cookies, &ct, sizeof(ct));
    if (nr != sizeof(ct)) {
        libcouchbase_error_handler(c->instance, LIBCOUCHBASE_EINTERNAL,
                                   NULL);
        if (packet != c->input.read_head) {
            free(packet);
        }
        return -1;
    }
    ct.vbucket = ntohs(req.request.vbucket);

    switch (header.response.magic) {
    case PROTOCOL_BINARY_REQ:
        c->instance->request_handler[header.response.opcode](c, &ct, (void *)packet);
        break;
    case PROTOCOL_BINARY_RES: {
        int was_connected = c->connected;
        if (libcouchbase_server_purge_implicit_responses(c, header.response.opaque, stop) != 0) {
            if (packet != c->input.read_head) {
                free(packet);
            }
            return -1;
        }

        if (c->instance->histogram) {
            libcouchbase_record_metrics(c->instance, stop - ct.start,
                                        header.response.opcode);
        }

        if (ntohs(header.response.status) != PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET
            || header.response.opcode == CMD_GET_REPLICA) {
            c->instance->response_handler[header.response.opcode](c, &ct, (void *)packet);
            /* keep command and cookie until we get complete STAT response */
            if (was_connected &&
                    (header.response.opcode != PROTOCOL_BINARY_CMD_STAT || header.response.keylen == 0)) {
                nr = ringbuffer_read(&c->cmd_log, req.bytes, sizeof(req));
                assert(nr == sizeof(req));
                ringbuffer_consumed(&c->cmd_log, ntohl(req.request.bodylen));
                ringbuffer_consumed(&c->output_cookies, sizeof(ct));
            }
        } else {
            int idx;
            char *body;
            libcouchbase_size_t nbody;
            libcouchbase_server_t *new_srv;
            /* re-schedule command to new server */
            nr = ringbuffer_read(&c->cmd_log, req.bytes, sizeof(req));
            assert(nr == sizeof(req));
            idx = vbucket_found_incorrect_master(c->instance->vbucket_config,
                                                 ntohs(req.request.vbucket),
                                                 (int)c->index);
            assert((libcouchbase_size_t)idx < c->instance->nservers);
            new_srv = c->instance->servers + idx;
            req.request.opaque = ++c->instance->seqno;
            nbody = ntohl(req.request.bodylen);
            body = malloc(nbody);
            if (body == NULL) {
                libcouchbase_error_handler(c->instance, LIBCOUCHBASE_CLIENT_ENOMEM, NULL);
                return -1;
            }
            nr = ringbuffer_read(&c->cmd_log, body, nbody);
            assert(nr == nbody);
            nr = ringbuffer_read(&c->output_cookies, &ct, sizeof(ct));
            assert(nr == sizeof(ct));
            /* Preserve the cookie and timestamp for the command. This means
             * that the library will retry the command until its time will
             * out and the client will get LIBCOUCHBASE_ETIMEDOUT error in
             * command callback */
            libcouchbase_server_retry_packet(new_srv, &ct, &req, sizeof(req));
            libcouchbase_server_write_packet(new_srv, body, nbody);
            libcouchbase_server_end_packet(new_srv);
            libcouchbase_server_send_packets(new_srv);
            free(body);
        }
        break;
    }

    default:
        libcouchbase_error_handler(c->instance,
                                   LIBCOUCHBASE_PROTOCOL_ERROR,
                                   NULL);
        if (packet != c->input.read_head) {
            free(packet);
        }
        return -1;
    }

    if (packet != c->input.read_head) {
        free(packet);
    } else {
        ringbuffer_consumed(&c->input, packetsize);
    }
    return 1;
}
Пример #23
0
LIBCOUCHBASE_API
lcb_error_t lcb_observe(lcb_t instance,
                        const void *command_cookie,
                        lcb_size_t num,
                        const lcb_observe_cmd_t *const *items)
{
    int vbid, idx, jj;
    lcb_size_t ii;
    lcb_uint32_t opaque;
    struct observe_st *requests;

    /* we need a vbucket config before we can start getting data.. */
    if (instance->vbucket_config == NULL) {
        switch (instance->type) {
        case LCB_TYPE_CLUSTER:
            return lcb_synchandler_return(instance, LCB_EBADHANDLE);
        case LCB_TYPE_BUCKET:
        default:
            return lcb_synchandler_return(instance, LCB_CLIENT_ETMPFAIL);
        }
    }

    if (instance->dist_type != VBUCKET_DISTRIBUTION_VBUCKET) {
        return lcb_synchandler_return(instance, LCB_NOT_SUPPORTED);
    }

    /* the list of pointers to body buffers for each server */
    requests = calloc(instance->nservers, sizeof(struct observe_st));
    opaque = ++instance->seqno;
    for (ii = 0; ii < num; ++ii) {
        const void *key = items[ii]->v.v0.key;
        lcb_size_t nkey = items[ii]->v.v0.nkey;
        const void *hashkey = items[ii]->v.v0.hashkey;
        lcb_size_t nhashkey = items[ii]->v.v0.nhashkey;

        if (nhashkey == 0) {
            hashkey = key;
            nhashkey = nkey;
        }

        vbid = vbucket_get_vbucket_by_key(instance->vbucket_config, hashkey,
                                          nhashkey);
        for (jj = -1; jj < instance->nreplicas; ++jj) {
            struct observe_st *rr;
            /* it will increment jj to get server index, so (-1 + 1) = 0 (master) */
            idx = vbucket_get_replica(instance->vbucket_config, vbid, jj);
            if ((idx < 0 || idx > (int)instance->nservers)) {
                /* the config says that there is no server yet at that position (-1) */
                if (jj == -1) {
                    /* master node must be available */
                    destroy_requests(requests, instance->nservers);
                    return lcb_synchandler_return(instance, LCB_NETWORK_ERROR);
                } else {
                    continue;
                }
            }
            rr = requests + idx;
            if (!rr->allocated) {
                if (!init_request(rr)) {
                    destroy_requests(requests, instance->nservers);
                    return lcb_synchandler_return(instance, LCB_CLIENT_ENOMEM);
                }
                rr->req.message.header.request.magic = PROTOCOL_BINARY_REQ;
                rr->req.message.header.request.opcode = CMD_OBSERVE;
                rr->req.message.header.request.datatype = PROTOCOL_BINARY_RAW_BYTES;
                rr->req.message.header.request.opaque = opaque;
            }

            {
                lcb_uint16_t vb = htons((lcb_uint16_t)vbid);
                lcb_uint16_t len = htons((lcb_uint16_t)nkey);
                ringbuffer_ensure_capacity(&rr->body, sizeof(vb) + sizeof(len) + nkey);
                rr->nbody += ringbuffer_write(&rr->body, &vb, sizeof(vb));
                rr->nbody += ringbuffer_write(&rr->body, &len, sizeof(len));
                rr->nbody += ringbuffer_write(&rr->body, key, nkey);
            }
        }
    }

    for (ii = 0; ii < instance->nservers; ++ii) {
        struct observe_st *rr = requests + ii;
        lcb_server_t *server = instance->servers + ii;

        if (rr->allocated) {
            char *tmp;
            rr->req.message.header.request.bodylen = ntohl((lcb_uint32_t)rr->nbody);
            lcb_server_start_packet(server, command_cookie, rr->req.bytes, sizeof(rr->req.bytes));
            if (ringbuffer_is_continous(&rr->body, RINGBUFFER_READ, rr->nbody)) {
                tmp = ringbuffer_get_read_head(&rr->body);
                TRACE_OBSERVE_BEGIN(&rr->req, server->authority, tmp, rr->nbody);
                lcb_server_write_packet(server, tmp, rr->nbody);
            } else {
                tmp = malloc(ringbuffer_get_nbytes(&rr->body));
                if (!tmp) {
                    /* FIXME by this time some of requests might be scheduled */
                    destroy_requests(requests, instance->nservers);
                    return lcb_synchandler_return(instance, LCB_CLIENT_ENOMEM);
                } else {
                    ringbuffer_read(&rr->body, tmp, rr->nbody);
                    TRACE_OBSERVE_BEGIN(&rr->req, server->authority, tmp, rr->nbody);
                    lcb_server_write_packet(server, tmp, rr->nbody);
                }
            }
            lcb_server_end_packet(server);
            lcb_server_send_packets(server);
        }
    }

    destroy_requests(requests, instance->nservers);
    return lcb_synchandler_return(instance, LCB_SUCCESS);
}
Пример #24
0
lcb_size_t ringbuffer_peek(ringbuffer_t *buffer, void *dest, lcb_size_t nb)
{
    ringbuffer_t copy = *buffer;
    return ringbuffer_read(&copy, dest, nb);
}
Пример #25
0
int JackClient::Process(jack_nframes_t nframes, void *self) {
    int j = 0;
    bool isEncoded = ((JackClient*) self)->m_Encoded;

    for(std::map<int, JackPort*>::iterator i = m_InputPortMap.begin();
        i != m_InputPortMap.end(); i++) {
        if(jack_port_connected(i->second->Port)) {
            sample_t *in = (sample_t *) jack_port_get_buffer(i->second->Port, nframes);
//                memcpy (i->second->Buf, in, sizeof (sample_t) * m_BufferSize); //m_BufferSize -> 2nd AudioCollector parameter
            //Buff attribué par SetInputBuf dans le construcAteur de AudioCollector
            if(isEncoded) {     //Added this to write in the buffer only if
                //the encoder is in action
                if(!j) {        //only streams the 1st Jack Input port
                    if(ringbuffer_write_space(((JackClient*) self)->first) >= (sizeof(sample_t) * nframes)) {
                        ringbuffer_write(((JackClient*) self)->first, (char *)in, (sizeof(sample_t) * nframes));
                    }
                    /*		    else
                                    {
                                      std::cerr << "-----------Pas suffisament de place dans audio_fred !!!" << std::endl;
                                    }*/
                    j++;
                }
            }
        }
    }

    int channels = ((JackClient*) self)->m_ringbufferchannels;


    bool output_available = false;
//m_ringbuffer created by ViewPort::add_audio
//1024*512 rounded up to the next power of two.
    if(((JackClient*) self)->m_ringbuffer) {
//        static int firsttime = 1 + ceil(4096/nframes); // XXX pre-buffer  TODO decrease this and compensate latency

        if(ringbuffer_read_space(((JackClient*) self)->m_ringbuffer) >=
           /*firsttime */ channels * nframes * sizeof(float)) {
//                firsttime=1;
            size_t rv = ringbuffer_read(((JackClient*) self)->m_ringbuffer,
                                        ((JackClient*) self)->m_inbuf,
                                        channels * nframes * sizeof(float));
            if(isEncoded) {     //Added this to write in the buffer only if
                //the encoder is in action
                if(ringbuffer_write_space(((JackClient*) self)->audio_mix_ring) >= rv) {
//                      unsigned char *aPtr = (unsigned char *)((JackClient*) self)->m_inbuf;
                    size_t rf = ringbuffer_write(((JackClient*) self)->audio_mix_ring, ((JackClient*) self)->m_inbuf, rv);
                    if(rf != rv)
                        std::cerr << "---" << rf << " : au lieu de :" << rv << " octets ecrits dans le ringbuffer !!" \
                                  << std::endl;
                } else {
                    std::cerr << "-----------Not enough room in audio_mix_ring !!!" << std::endl;
                }
            }
//reads m_ringbuffer and puts it in m_inbuf
//m_inbuf created in SetRingbufferPtr called by add_audio
//4096 * channels * sizeof(float)
            if(rv >= channels * nframes * sizeof(float)) {
                output_available = true;
            }
        }
#if 0
        else if(firsttime == 1)
            fprintf(stderr, "AUDIO BUFFER UNDERRUN: %i samples < %i\n", ringbuffer_read_space(((JackClient*) self)->m_ringbuffer) / sizeof(float) / channels, nframes);
#endif
    }

    j = 0;
    for(std::map<int, JackPort*>::iterator i = m_OutputPortMap.begin();
        i != m_OutputPortMap.end(); i++) {
        if(output_available && j < channels) {
            sample_t *out = (sample_t *) jack_port_get_buffer(i->second->Port, nframes);
            memset(out, 0, sizeof(jack_default_audio_sample_t) * nframes);
            deinterleave(((JackClient*) self)->m_inbuf, out, channels
                         , j, nframes);
//writes nframes of channels m_inbuf to out
//two times if stereo (shifted by the channel number)
#if 0                   // test-noise:
            int i;
            for(i = 0; i < nframes; i++) out[i] = (float) i / (float)nframes;
#endif
        } else { // no output availaible, clear
            sample_t *out = (sample_t *) jack_port_get_buffer(i->second->Port, nframes);
            memset(out, 0, sizeof(sample_t) * nframes);
        }
        j++;
    }

    m_BufferSize = nframes;

//      if(RunCallback&&RunContext)
//      {
//              // do the work
//              RunCallback(RunContext, nframes);
//      }

    return 0;
}
Пример #26
0
DECLARE_TEST( ringbuffer, allocate )
{
	ringbuffer_t* buffer;
	char store[256];

	buffer = ringbuffer_allocate( 0 );
	EXPECT_EQ( ringbuffer_size( buffer ), 0 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 0 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 0 );

	EXPECT_EQ( ringbuffer_read( buffer, store, 0 ), 0 );
	EXPECT_EQ( ringbuffer_write( buffer, store, 0 ), 0 );
	EXPECT_EQ( ringbuffer_size( buffer ), 0 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 0 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 0 );

	EXPECT_EQ( ringbuffer_read( buffer, store, 256 ), 0 );
	EXPECT_EQ( ringbuffer_write( buffer, store, 256 ), 0 );
	EXPECT_EQ( ringbuffer_size( buffer ), 0 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 0 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 0 );

	ringbuffer_deallocate( buffer );

	buffer = ringbuffer_allocate( 128 );
	EXPECT_EQ( ringbuffer_size( buffer ), 128 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 0 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 0 );

	EXPECT_EQ( ringbuffer_write( buffer, store, 0 ), 0 );
	EXPECT_EQ( ringbuffer_read( buffer, store, 0 ), 0 );
	EXPECT_EQ( ringbuffer_size( buffer ), 128 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 0 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 0 );

	EXPECT_EQ( ringbuffer_write( buffer, store, 256 ), 127 );
	EXPECT_EQ( ringbuffer_read( buffer, store, 256 ), 127 );
	EXPECT_EQ( ringbuffer_size( buffer ), 128 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 127 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 127 );

	ringbuffer_deallocate( buffer );

	buffer = ringbuffer_allocate( 256 );
	EXPECT_EQ( ringbuffer_size( buffer ), 256 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 0 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 0 );

	EXPECT_EQ( ringbuffer_write( buffer, store, 0 ), 0 );
	EXPECT_EQ( ringbuffer_read( buffer, store, 0 ), 0 );
	EXPECT_EQ( ringbuffer_size( buffer ), 256 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 0 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 0 );

	EXPECT_EQ( ringbuffer_write( buffer, store, 256 ), 255 );
	EXPECT_EQ( ringbuffer_read( buffer, store, 256 ), 255 );
	EXPECT_EQ( ringbuffer_size( buffer ), 256 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 255 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 255 );

	ringbuffer_deallocate( buffer );

	buffer = ringbuffer_allocate( 512 );
	EXPECT_EQ( ringbuffer_size( buffer ), 512 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 0 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 0 );

	EXPECT_EQ( ringbuffer_write( buffer, store, 0 ), 0 );
	EXPECT_EQ( ringbuffer_read( buffer, store, 0 ), 0 );
	EXPECT_EQ( ringbuffer_size( buffer ), 512 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 0 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 0 );

	EXPECT_EQ( ringbuffer_write( buffer, store, 256 ), 256 );
	EXPECT_EQ( ringbuffer_read( buffer, store, 256 ), 256 );
	EXPECT_EQ( ringbuffer_size( buffer ), 512 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 256 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 256 );

	EXPECT_EQ( ringbuffer_write( buffer, store, 256 ), 256 );
	EXPECT_EQ( ringbuffer_read( buffer, store, 256 ), 256 );
	EXPECT_EQ( ringbuffer_size( buffer ), 512 );
	EXPECT_EQ( ringbuffer_total_read( buffer ), 512 );
	EXPECT_EQ( ringbuffer_total_written( buffer ), 512 );

	ringbuffer_deallocate( buffer );
	
	return 0;
}
Пример #27
0
int ilctts_pcm_read(TTSRENDER_STATE_T *st, void *data, int length) {
	return ringbuffer_read(st->ringbuffer, data, length);
} // end ilctts_pcm_read
Пример #28
0
static
void
data_available_for_stream(pa_mainloop_api *a, pa_io_event *ioe, int fd, pa_io_event_flags_t events,
                          void *userdata)
{
    pa_stream          *s = userdata;
    snd_pcm_sframes_t   frame_count;
    size_t              frame_size = pa_frame_size(&s->ss);
    char                buf[16 * 1024];
    int                 paused = g_atomic_int_get(&s->paused);

    if (events & (PA_IO_EVENT_INPUT | PA_IO_EVENT_OUTPUT)) {

#if HAVE_SND_PCM_AVAIL
        frame_count = snd_pcm_avail(s->ph);
#else
        snd_pcm_hwsync(s->ph);
        frame_count = snd_pcm_avail_update(s->ph);
#endif

        if (frame_count < 0) {
            if (frame_count == -EBADFD) {
                // stream was closed
                return;
            }

            int cnt = 0, ret;
            do {
                cnt ++;
                ret = snd_pcm_recover(s->ph, frame_count, 1);
            } while (ret == -1 && errno == EINTR && cnt < 5);

#if HAVE_SND_PCM_AVAIL
            frame_count = snd_pcm_avail(s->ph);
#else
            snd_pcm_hwsync(s->ph);
            frame_count = snd_pcm_avail_update(s->ph);
#endif

            if (frame_count < 0) {
                trace_error("%s, can't recover after failed snd_pcm_avail (%d)\n", __func__,
                            (int)frame_count);
                return;
            }
        }
    } else {
        return;
    }

    if (events & PA_IO_EVENT_OUTPUT) {
        if (paused) {
            // client stream is corked. Pass silence to ALSA
            size_t bytecnt = MIN(sizeof(buf), frame_count * frame_size);
            memset(buf, 0, bytecnt);
            snd_pcm_writei(s->ph, buf, bytecnt / frame_size);
        } else {
            size_t writable_size = pa_stream_writable_size(s);

            if (s->write_cb && writable_size > 0)
                s->write_cb(s, writable_size, s->write_cb_userdata);

            size_t bytecnt = MIN(sizeof(buf), frame_count * frame_size);
            bytecnt = ringbuffer_read(s->rb, buf, bytecnt);

            if (bytecnt == 0) {
                // application is not ready yet, play silence
                bytecnt = MIN(sizeof(buf), frame_count * frame_size);
                memset(buf, 0, bytecnt);
            }
            snd_pcm_writei(s->ph, buf, bytecnt / frame_size);
        }
    }

    if (events & PA_IO_EVENT_INPUT) {
        if (paused) {
            // client stream is corked. Read data from ALSA and discard them
            size_t bytecnt = MIN(sizeof(buf), frame_count * frame_size);
            snd_pcm_readi(s->ph, buf, bytecnt / frame_size);
        } else {
            size_t bytecnt = ringbuffer_writable_size(s->rb);

            if (bytecnt == 0) {
                // ringbuffer is full because app doesn't read data fast enough.
                // Make some room
                ringbuffer_drop(s->rb, frame_count * frame_size);
                bytecnt = ringbuffer_writable_size(s->rb);
            }

            bytecnt = MIN(bytecnt, frame_count * frame_size);
            bytecnt = MIN(bytecnt, sizeof(buf));

            if (bytecnt > 0) {
                snd_pcm_readi(s->ph, buf, bytecnt / frame_size);
                ringbuffer_write(s->rb, buf, bytecnt);
            }

            size_t readable_size = pa_stream_readable_size(s);
            if (s->read_cb && readable_size > 0)
                s->read_cb(s, readable_size, s->read_cb_userdata);
        }
    }
}
Пример #29
0
/**
 * Extended version of observe command. This allows us to service
 * various forms of higher level operations which use observe in one way
 * or another
 */
lcb_error_t lcb_observe_ex(lcb_t instance,
                           const void *command_cookie,
                           lcb_size_t num,
                           const void *const *items,
                           lcb_observe_type_t type)
{
    lcb_size_t ii;
    lcb_size_t maxix;
    lcb_uint32_t opaque;
    struct lcb_command_data_st ct;
    struct observe_requests_st reqs;

    memset(&reqs, 0, sizeof(reqs));

    if (instance->type != LCB_TYPE_BUCKET) {
        return lcb_synchandler_return(instance, LCB_EBADHANDLE);
    }

    if (instance->config.handle == NULL) {
        return lcb_synchandler_return(instance, LCB_CLIENT_ETMPFAIL);
    }

    if (instance->config.dist_type != VBUCKET_DISTRIBUTION_VBUCKET) {
        return lcb_synchandler_return(instance, LCB_NOT_SUPPORTED);
    }

    opaque = ++instance->seqno;
    ct.cookie = command_cookie;
    maxix = instance->config.nreplicas;

    if (type == LCB_OBSERVE_TYPE_CHECK) {
        maxix = 0;

    } else {
        if (type == LCB_OBSERVE_TYPE_DURABILITY) {
            ct.flags = LCB_CMD_F_OBS_DURABILITY | LCB_CMD_F_OBS_BCAST;

        } else {
            ct.flags = LCB_CMD_F_OBS_BCAST;
        }
    }

    reqs.nrequests = instance->nservers;
    reqs.requests = calloc(reqs.nrequests, sizeof(*reqs.requests));

    for (ii = 0; ii < num; ii++) {
        const void *key, *hashkey;
        lcb_size_t nkey, nhashkey;
        int vbid, jj;

        if (type == LCB_OBSERVE_TYPE_DURABILITY) {
            const lcb_durability_entry_t *ent = items[ii];
            key = ent->request.v.v0.key;
            nkey = ent->request.v.v0.nkey;
            hashkey = ent->request.v.v0.hashkey;
            nhashkey = ent->request.v.v0.nhashkey;
        } else {
            const lcb_observe_cmd_t *ocmd = items[ii];
            key = ocmd->v.v0.key;
            nkey = ocmd->v.v0.nkey;
            hashkey = ocmd->v.v0.hashkey;
            nhashkey = ocmd->v.v0.nhashkey;
        }
        if (!nhashkey) {
            hashkey = key;
            nhashkey = nkey;
        }

        vbid = vbucket_get_vbucket_by_key(instance->config.handle,
                                          hashkey, nhashkey);

        for (jj = -1; jj < (int)maxix; jj++) {
            struct observe_st *rr;

            int idx = vbucket_get_replica(instance->config.handle,
                                          vbid, jj);

            if (idx < 0 || idx > (int)instance->nservers) {
                if (jj == -1) {
                    destroy_requests(&reqs);
                    return lcb_synchandler_return(instance, LCB_NO_MATCHING_SERVER);
                }
                continue;
            }
            lcb_assert(idx < (int)reqs.nrequests);
            rr = reqs.requests + idx;

            if (!rr->allocated) {
                if (!init_request(rr)) {
                    destroy_requests(&reqs);
                    return lcb_synchandler_return(instance, LCB_CLIENT_ENOMEM);
                }
            }

            {
                lcb_uint16_t vb = htons((lcb_uint16_t)vbid);
                lcb_uint16_t len = htons((lcb_uint16_t)nkey);

                rr->packet.message.header.request.magic = PROTOCOL_BINARY_REQ;
                rr->packet.message.header.request.opcode = CMD_OBSERVE;
                rr->packet.message.header.request.datatype = PROTOCOL_BINARY_RAW_BYTES;
                rr->packet.message.header.request.opaque = opaque;

                ringbuffer_ensure_capacity(&rr->body,
                                           sizeof(vb) + sizeof(len) + nkey);
                rr->nbody += ringbuffer_write(&rr->body, &vb, sizeof(vb));
                rr->nbody += ringbuffer_write(&rr->body, &len, sizeof(len));
                rr->nbody += ringbuffer_write(&rr->body, key, nkey);
            }
        }
    }

    for (ii = 0; ii < reqs.nrequests; ii++) {
        struct observe_st *rr = reqs.requests + ii;
        struct lcb_server_st *server = instance->servers + ii;
        char *tmp;

        if (!rr->allocated) {
            continue;
        }

        rr->packet.message.header.request.bodylen = ntohl((lcb_uint32_t)rr->nbody);
        ct.start = gethrtime();

        lcb_server_start_packet_ct(server, &ct, rr->packet.bytes,
                                   sizeof(rr->packet.bytes));

        if (ringbuffer_is_continous(&rr->body, RINGBUFFER_READ, rr->nbody)) {
            tmp = ringbuffer_get_read_head(&rr->body);
            TRACE_OBSERVE_BEGIN(&rr->packet, server->authority, tmp, rr->nbody);
            lcb_server_write_packet(server, tmp, rr->nbody);
        } else {
            tmp = malloc(ringbuffer_get_nbytes(&rr->body));
            if (!tmp) {
                /* FIXME by this time some of requests might be scheduled */
                destroy_requests(&reqs);
                return lcb_synchandler_return(instance, LCB_CLIENT_ENOMEM);
            } else {
                ringbuffer_read(&rr->body, tmp, rr->nbody);
                TRACE_OBSERVE_BEGIN(&rr->packet, server->authority, tmp, rr->nbody);
                lcb_server_write_packet(server, tmp, rr->nbody);
            }
        }
        lcb_server_end_packet(server);
        lcb_server_send_packets(server);
    }

    destroy_requests(&reqs);
    return lcb_synchandler_return(instance, LCB_SUCCESS);
}
Пример #30
0
/**
 * Returns 1 if retried, 0 if the command should fail, -1 for an internal
 * error
 */
static int handle_not_my_vbucket(lcb_server_t *c,
                                 packet_info *resinfo,
                                 protocol_binary_request_header *oldreq,
                                 struct lcb_command_data_st *oldct)
{
    int idx;
    char *body;
    lcb_size_t nbody, nr;
    lcb_server_t *new_srv;
    struct lcb_command_data_st ct;
    protocol_binary_request_header req;
    hrtime_t now;
    lcb_string config_string;
    lcb_error_t err = LCB_ERROR;

    lcb_log(LOGARGS(c, WARN),
            "NOT_MY_VBUCKET; Server=%p,ix=%d,real_start=%lu,vb=%d",
            (void *)c, c->index,
            (unsigned long)oldct->real_start,
            (int)ntohs(oldreq->request.vbucket));

    lcb_string_init(&config_string);
    if (PACKET_NBODY(resinfo)) {
        lcb_string_append(&config_string,
                          PACKET_VALUE(resinfo),
                          PACKET_NVALUE(resinfo));

        err = lcb_cccp_update(lcb_confmon_get_provider(c->instance->confmon,
                              LCB_CLCONFIG_CCCP),
                              c->curhost.host,
                              &config_string);
    }

    lcb_string_release(&config_string);

    if (err != LCB_SUCCESS) {
        lcb_bootstrap_refresh(c->instance);
    }

    /* re-schedule command to new server */
    if (!c->instance->settings.vb_noguess) {
        idx = vbucket_found_incorrect_master(c->instance->vbucket_config,
                                             ntohs(oldreq->request.vbucket),
                                             (int)c->index);
    } else {
        idx = c->index;
    }

    if (idx == -1) {
        lcb_log(LOGARGS(c, ERR), "no alternate server");
        return 0;
    }
    lcb_log(LOGARGS(c, INFO), "Mapped key to new server %d -> %d",
            c->index, idx);

    now = gethrtime();

    if (oldct->real_start) {
        hrtime_t min_ok = now - MCSERVER_TIMEOUT(c) * 1000;
        if (oldct->real_start < min_ok) {
            /** Timed out in a 'natural' manner */
            return 0;
        }
    }

    req = *oldreq;

    lcb_assert((lcb_size_t)idx < c->instance->nservers);
    new_srv = c->instance->servers + idx;

    nr = ringbuffer_read(&c->cmd_log, req.bytes, sizeof(req));
    lcb_assert(nr == sizeof(req));

    req.request.opaque = ++c->instance->seqno;
    nbody = ntohl(req.request.bodylen);
    body = malloc(nbody);
    if (body == NULL) {
        lcb_error_handler(c->instance, LCB_CLIENT_ENOMEM, NULL);
        return -1;
    }
    nr = ringbuffer_read(&c->cmd_log, body, nbody);
    lcb_assert(nr == nbody);
    nr = ringbuffer_read(&c->output_cookies, &ct, sizeof(ct));
    lcb_assert(nr == sizeof(ct));

    /* Preserve the cookie and reset timestamp for the command. This
     * means that the library will retry the command until it will
     * get code different from LCB_NOT_MY_VBUCKET */
    if (!ct.real_start) {
        ct.real_start = ct.start;
    }
    ct.start = now;

    lcb_server_retry_packet(new_srv, &ct, &req, sizeof(req));
    /* FIXME dtrace instrumentation */
    lcb_server_write_packet(new_srv, body, nbody);
    lcb_server_end_packet(new_srv);
    lcb_server_send_packets(new_srv);
    free(body);

    return 1;
}