void VC_WriteSamples(char *buf,UWORD todo) { int t; UWORD part; while(todo>0){ if(TICKLEFT==0){ md_player(); TICKLEFT=(125L*md_mixfreq)/(50L*md_bpm); /* compute volume, frequency counter & panning parameters for each channel. */ for(t=0;t<md_numchn;t++){ int pan,vol,lvol,rvol; if(vinf[t].kick){ vinf[t].current=(vinf[t].start << FRACBITS); vinf[t].active=1; vinf[t].kick=0; } if(vinf[t].frq==0) vinf[t].active=0; if(vinf[t].active){ vinf[t].increment=fraction2long(vinf[t].frq,md_mixfreq); if(vinf[t].flags & SF_REVERSE) vinf[t].increment=-vinf[t].increment; vol=vinf[t].vol; pan=vinf[t].pan; if(md_mode & DMODE_STEREO){ lvol= ( vol * (255-pan) ) / 255; rvol= ( vol * pan ) / 255; vinf[t].lvolmul=(maxvol*lvol)/64; vinf[t].rvolmul=(maxvol*rvol)/64; } else{ vinf[t].lvolmul=(maxvol*vol)/64; } } } } part=min(TICKLEFT,todo); VC_WritePortion(buf,part); TICKLEFT-=part; todo-=part; buf+=samples2bytes(part); } }
/* Writes 'todo' mixed SBYTES (!!) to 'buf'. It returns the number of SBYTES actually written to 'buf' (which is rounded to number of samples that fit into 'todo' bytes). */ ULONG VC1_WriteBytes(SBYTE* buf,ULONG todo) { if(!vc_softchn) return VC1_SilenceBytes(buf,todo); todo = bytes2samples(todo); VC1_WriteSamples(buf,todo); return samples2bytes(todo); }
UWORD VC_WriteBytes(char *buf,UWORD todo) /* Writes 'todo' mixed SBYTES (!!) to 'buf'. It returns the number of SBYTES actually written to 'buf' (which is rounded to number of samples that fit into 'todo' bytes). */ { todo=bytes2samples(todo); VC_WriteSamples(buf,todo); return samples2bytes(todo); }
/* Fill the buffer with 'todo' bytes of silence (it depends on the mixing mode how the buffer is filled) */ ULONG VC1_SilenceBytes(SBYTE* buf,ULONG todo) { todo=samples2bytes(bytes2samples(todo)); /* clear the buffer to zero (16 bits signed) or 0x80 (8 bits unsigned) */ if(vc_mode & DMODE_16BITS) memset(buf,0,todo); else memset(buf,0x80,todo); return todo; }
void VC_WritePortion(char *buf,UWORD todo) /* Writes 'todo' mixed SAMPLES (!!) to 'buf'. When todo is bigger than the number of samples that fit into VC_TICKBUF, the mixing operation is split up into a number of smaller chunks. */ { UWORD part; /* write 'part' samples to the buffer */ while(todo){ part=min(todo,samplesthatfit); VC_FillTick(buf,part); buf+=samples2bytes(part); todo-=part; } }
// returns bytes read, else -1 if error (0 is OK) int AmAudio::get(unsigned int user_ts, unsigned char* buffer, unsigned int nb_samples) { int size = samples2bytes(nb_samples); size = read(user_ts,size); //DBG("size = %d\n",size); if(size <= 0){ return size; } size = decode(size); if(size < 0) { DBG("decode returned %i\n",size); return -1; } size = downMix(size); if(size>0) memcpy(buffer,(unsigned char*)samples,size); return size; }
int sync_rx(struct bladerf *dev, void *samples, unsigned num_samples, struct bladerf_metadata *user_meta, unsigned int timeout_ms) { struct bladerf_sync *s = dev->sync[BLADERF_MODULE_RX]; struct buffer_mgmt *b; int status = 0; bool exit_early = false; bool copied_data = false; unsigned int samples_returned = 0; uint8_t *samples_dest = (uint8_t*)samples; uint8_t *buf_src = NULL; unsigned int samples_to_copy = 0; unsigned int samples_per_buffer = 0; uint64_t target_timestamp = UINT64_MAX; if (s == NULL || samples == NULL) { log_debug("NULL pointer passed to %s\n", __FUNCTION__); return BLADERF_ERR_INVAL; } else if (s->stream_config.format == BLADERF_FORMAT_SC16_Q11_META) { if (user_meta == NULL) { log_debug("NULL metadata pointer passed to %s\n", __FUNCTION__); return BLADERF_ERR_INVAL; } else { user_meta->status = 0; target_timestamp = user_meta->timestamp; } } b = &s->buf_mgmt; samples_per_buffer = s->stream_config.samples_per_buffer; log_verbose("%s: Requests %u samples.\n", __FUNCTION__, num_samples); while (!exit_early && samples_returned < num_samples && status == 0) { switch (s->state) { case SYNC_STATE_CHECK_WORKER: { int stream_error; sync_worker_state worker_state = sync_worker_get_state(s->worker, &stream_error); /* Propagate stream error back to the caller. * They can call this function again to restart the stream and * try again. */ if (stream_error != 0) { status = stream_error; } else { if (worker_state == SYNC_WORKER_STATE_IDLE) { log_debug("%s: Worker is idle. Going to reset buf " "mgmt.\n", __FUNCTION__); s->state = SYNC_STATE_RESET_BUF_MGMT; } else if (worker_state == SYNC_WORKER_STATE_RUNNING) { s->state = SYNC_STATE_WAIT_FOR_BUFFER; } else { status = BLADERF_ERR_UNEXPECTED; log_debug("%s: Unexpected worker state=%d\n", __FUNCTION__, worker_state); } } break; } case SYNC_STATE_RESET_BUF_MGMT: MUTEX_LOCK(&b->lock); /* When the RX stream starts up, it will submit the first T * transfers, so the consumer index must be reset to 0 */ b->cons_i = 0; MUTEX_UNLOCK(&b->lock); log_debug("%s: Reset buf_mgmt consumer index\n", __FUNCTION__); s->state = SYNC_STATE_START_WORKER; break; case SYNC_STATE_START_WORKER: sync_worker_submit_request(s->worker, SYNC_WORKER_START); status = sync_worker_wait_for_state( s->worker, SYNC_WORKER_STATE_RUNNING, SYNC_WORKER_START_TIMEOUT_MS); if (status == 0) { s->state = SYNC_STATE_WAIT_FOR_BUFFER; log_debug("%s: Worker is now running.\n", __FUNCTION__); } else { log_debug("%s: Failed to start worker, (%d)\n", __FUNCTION__, status); } break; case SYNC_STATE_WAIT_FOR_BUFFER: MUTEX_LOCK(&b->lock); /* Check the buffer state, as the worker may have produced one * since we last queried the status */ if (b->status[b->cons_i] == SYNC_BUFFER_FULL) { s->state = SYNC_STATE_BUFFER_READY; log_verbose("%s: buffer %u is ready to consume\n", __FUNCTION__, b->cons_i); } else { status = wait_for_buffer(b, timeout_ms, __FUNCTION__, b->cons_i); if (status == 0) { if (b->status[b->cons_i] != SYNC_BUFFER_FULL) { s->state = SYNC_STATE_CHECK_WORKER; } else { s->state = SYNC_STATE_BUFFER_READY; log_verbose("%s: buffer %u is ready to consume\n", __FUNCTION__, b->cons_i); } } } MUTEX_UNLOCK(&b->lock); break; case SYNC_STATE_BUFFER_READY: MUTEX_LOCK(&b->lock); b->status[b->cons_i] = SYNC_BUFFER_PARTIAL; b->partial_off = 0; MUTEX_UNLOCK(&b->lock); switch (s->stream_config.format) { case BLADERF_FORMAT_SC16_Q11: s->state = SYNC_STATE_USING_BUFFER; break; case BLADERF_FORMAT_SC16_Q11_META: s->state = SYNC_STATE_USING_BUFFER_META; s->meta.curr_msg_off = 0; s->meta.msg_num = 0; break; default: assert(!"Invalid stream format"); status = BLADERF_ERR_UNEXPECTED; } break; case SYNC_STATE_USING_BUFFER: /* SC16Q11 buffers w/o metadata */ MUTEX_LOCK(&b->lock); buf_src = (uint8_t*)b->buffers[b->cons_i]; samples_to_copy = uint_min(num_samples - samples_returned, samples_per_buffer - b->partial_off); memcpy(samples_dest + samples2bytes(s, samples_returned), buf_src + samples2bytes(s, b->partial_off), samples2bytes(s, samples_to_copy)); b->partial_off += samples_to_copy; samples_returned += samples_to_copy; log_verbose("%s: Provided %u samples to caller\n", __FUNCTION__, samples_to_copy); /* We've finished consuming this buffer and can start looking * for available samples in the next buffer */ if (b->partial_off >= samples_per_buffer) { /* Check for symptom of out-of-bounds accesses */ assert(b->partial_off == samples_per_buffer); advance_rx_buffer(b); s->state = SYNC_STATE_WAIT_FOR_BUFFER; } MUTEX_UNLOCK(&b->lock); break; case SYNC_STATE_USING_BUFFER_META: /* SC16Q11 buffers w/ metadata */ MUTEX_LOCK(&b->lock); switch (s->meta.state) { case SYNC_META_STATE_HEADER: assert(s->meta.msg_num < s->meta.msg_per_buf); buf_src = (uint8_t*)b->buffers[b->cons_i]; s->meta.curr_msg = buf_src + dev->msg_size * s->meta.msg_num; s->meta.msg_timestamp = metadata_get_timestamp(s->meta.curr_msg); s->meta.msg_flags = metadata_get_flags(s->meta.curr_msg); s->meta.curr_msg_off = 0; /* We've encountered a discontinuity and need to return * what we have so far, setting the status flags */ if (copied_data && s->meta.msg_timestamp != s->meta.curr_timestamp) { user_meta->status |= BLADERF_META_STATUS_OVERRUN; exit_early = true; log_debug("Sample discontinuity detected @ " "buffer %u, message %u: Expected t=%llu, " "got t=%llu\n", b->cons_i, s->meta.msg_num, (unsigned long long)s->meta.curr_timestamp, (unsigned long long)s->meta.msg_timestamp); } else { log_verbose("Got header for message %u: " "t_new=%u, t_old=%u\n", s->meta.msg_num, s->meta.msg_timestamp, s->meta.curr_timestamp); } s->meta.curr_timestamp = s->meta.msg_timestamp; s->meta.state = SYNC_META_STATE_SAMPLES; break; case SYNC_META_STATE_SAMPLES: if (!copied_data && (user_meta->flags & BLADERF_META_FLAG_RX_NOW) == 0 && target_timestamp < s->meta.curr_timestamp) { log_debug("Current timestamp is %llu, " "target=%llu (user=%llu)\n", (unsigned long long)s->meta.curr_timestamp, (unsigned long long)target_timestamp, (unsigned long long)user_meta->timestamp); status = BLADERF_ERR_TIME_PAST; } else if ((user_meta->flags & BLADERF_META_FLAG_RX_NOW) || target_timestamp == s->meta.curr_timestamp) { /* Copy the request amount up to the end of a * this message in the current buffer */ samples_to_copy = uint_min(num_samples - samples_returned, left_in_msg(s)); memcpy(samples_dest + samples2bytes(s, samples_returned), s->meta.curr_msg + METADATA_HEADER_SIZE + samples2bytes(s, s->meta.curr_msg_off), samples2bytes(s, samples_to_copy)); samples_returned += samples_to_copy; s->meta.curr_msg_off += samples_to_copy; if (!copied_data && (user_meta->flags & BLADERF_META_FLAG_RX_NOW)) { /* Provide the user with the timestamp at the * first returned sample when the * NOW flag has been provided */ user_meta->timestamp = s->meta.curr_timestamp; log_verbose("Updated user meta timestamp with: " "%llu\n", (unsigned long long) user_meta->timestamp); } copied_data = true; s->meta.curr_timestamp += samples_to_copy; /* We've begun copying samples, so our target will * just keep tracking the current timestamp. */ target_timestamp = s->meta.curr_timestamp; log_verbose("After copying samples, t=%llu\n", (unsigned long long)s->meta.curr_timestamp); if (left_in_msg(s) == 0) { assert(s->meta.curr_msg_off == s->meta.samples_per_msg); s->meta.state = SYNC_META_STATE_HEADER; s->meta.msg_num++; if (s->meta.msg_num >= s->meta.msg_per_buf) { assert(s->meta.msg_num == s->meta.msg_per_buf); advance_rx_buffer(b); s->meta.msg_num = 0; s->state = SYNC_STATE_WAIT_FOR_BUFFER; } } } else { const uint64_t time_delta = target_timestamp - s->meta.curr_timestamp; uint64_t left_in_buffer = (uint64_t) s->meta.samples_per_msg * (s->meta.msg_per_buf - s->meta.msg_num); /* Account for current position in buffer */ left_in_buffer -= s->meta.curr_msg_off; if (time_delta >= left_in_buffer) { /* Discard the remainder of this buffer */ advance_rx_buffer(b); s->state = SYNC_STATE_WAIT_FOR_BUFFER; s->meta.state = SYNC_META_STATE_HEADER; log_verbose("%s: Discarding rest of buffer.\n", __FUNCTION__); } else if (time_delta <= left_in_msg(s)) { /* Fast forward within the current message */ assert(time_delta <= SIZE_MAX); s->meta.curr_msg_off += (size_t) time_delta; s->meta.curr_timestamp += time_delta; log_verbose("%s: Seeking within message (t=%llu)\n", __FUNCTION__, s->meta.curr_timestamp); } else { s->meta.state = SYNC_META_STATE_HEADER; s->meta.msg_num += timestamp_to_msg(s, time_delta); log_verbose("%s: Seeking to message %u.\n", __FUNCTION__, s->meta.msg_num); } } break; default: assert(!"Invalid state"); status = BLADERF_ERR_UNEXPECTED; } MUTEX_UNLOCK(&b->lock); break; } } if (user_meta) { user_meta->actual_count = samples_returned; } return status; }