void netjack_send_silence( netjack_driver_state_t *netj, int syncstate ) { int tx_size = get_sample_size(netj->bitdepth) * netj->playback_channels * netj->net_period_up + sizeof(jacknet_packet_header); unsigned int *packet_buf, *packet_bufX; packet_buf = alloca( tx_size); jacknet_packet_header *tx_pkthdr = (jacknet_packet_header *)packet_buf; jacknet_packet_header *rx_pkthdr = (jacknet_packet_header *)netj->rx_buf; //framecnt = rx_pkthdr->framecnt; netj->reply_port = rx_pkthdr->reply_port; // offset packet_bufX by the packetheader. packet_bufX = packet_buf + sizeof(jacknet_packet_header) / sizeof(jack_default_audio_sample_t); tx_pkthdr->sync_state = syncstate; tx_pkthdr->framecnt = netj->expected_framecnt; // memset 0 the payload. int payload_size = get_sample_size(netj->bitdepth) * netj->playback_channels * netj->net_period_up; memset(packet_bufX, 0, payload_size); packet_header_hton(tx_pkthdr); if (netj->srcaddress_valid) { int r; if (netj->reply_port) netj->syncsource_address.sin_port = htons(netj->reply_port); for( r=0; r<netj->redundancy; r++ ) netjack_sendto(netj->outsockfd, (char *)packet_buf, tx_size, 0, (struct sockaddr*)&(netj->syncsource_address), sizeof(struct sockaddr_in), netj->mtu); } }
int process_cut_chunk_data( FILE * fin, FILE * fout, int chunk_index, uint32_t chunkfrom, uint32_t sync_chunkTo,uint32_t sampleFrom, uint32_t sync_to_chunk_offset, uint32_t sync_to,int num ,CMp4_root_box root) { int i=0; if (chunk_index+1==chunkfrom) { uint32_t sample_num_in_cur_chunk_ = get_sample_num_in_cur_chunk(root.sc[num], chunk_index+1); //@a mark获取chunk中sample的数目 uint32_t sample_index_ = get_sample_index(root.sc[num], chunk_index+1);//chunk中第一个sample的序号 _fseeki64(fin,root.co[num].chunk_offset_from_file_begin[chunk_index],SEEK_SET); unsigned int sam_off=_ftelli64(fin); for (int a=sample_index_;a<sampleFrom;a++) { uint32_t sample_size_ = get_sample_size(root.sz[num], a);//获取当前sample的大小 //int buf=sample_size_+4; _fseeki64(fin,sam_off,SEEK_SET); char *ptr1=new char [sample_size_]; fread(ptr1, sample_size_, 1, fin); fwrite(ptr1, sample_size_, 1, fout); delete [] ptr1; sam_off+=sample_size_; } } if (chunk_index+1==sync_chunkTo&&sync_chunkTo!=root.co[num].chunk_offset_amount) { uint32_t sample_num_in_cur_chunk_ = get_sample_num_in_cur_chunk(root.sc[num], chunk_index+1); //@a mark获取chunk中sample的数目 uint32_t sample_index_ = get_sample_index(root.sc[num], chunk_index+1);//chunk中第一个sample的序号 _fseeki64(fin,sync_to_chunk_offset,SEEK_SET); unsigned int to_off=_ftelli64(fin); if (sync_to!=root.sz[num].table_size) { for (int a=sync_to;a<sample_index_+sample_num_in_cur_chunk_;a++) { uint32_t sample_size_ = get_sample_size(root.sz[num], a);//获取当前sample的大小 _fseeki64(fin,to_off,SEEK_SET); char *ptr1=new char [sample_size_]; fread(ptr1, sample_size_, 1, fin); fwrite(ptr1, sample_size_, 1, fout); delete [] ptr1; to_off+=sample_size_; } } i=1; } return i; }
void copy_chunk_data(FILE *fin, const uint32_t chunk_index, //[0, end) 标识为第几个Chunk FILE *fout, int num, CMp4_root_box root ) { _fseeki64(fin, root.co[num].chunk_offset_from_file_begin[chunk_index], SEEK_SET); //获取当前chunk中有多少个sample uint32_t sample_num_in_cur_chunk_ = get_sample_num_in_cur_chunk(root.sc[num], chunk_index+1); //@a mark获取chunk中sample的数目 uint32_t sample_index_ = get_sample_index(root.sc[num], chunk_index+1);//chunk中第一个sample的序号 unsigned int cur=_ftelli64(fin); for(int i = 0; i < sample_num_in_cur_chunk_; i++) { uint32_t sample_size_ = get_sample_size(root.sz[num], sample_index_+i);//获取当前sample的大小 _fseeki64(fin,cur,SEEK_SET); char *ptr=new char [sample_size_]; fread(ptr, sample_size_, 1, fin); fwrite(ptr, sample_size_, 1, fout); delete [] ptr; cur+=sample_size_; } }
GameProducer::GameProducer(GameSoundBuffer* object, const gs_audio_format* format) : BMediaNode("GameProducer.h"), BBufferProducer(B_MEDIA_RAW_AUDIO), BMediaEventLooper(), fBufferGroup(NULL), fLatency(0), fInternalLatency(0), fOutputEnabled(true) { // initialize our preferred format object fPreferredFormat.type = B_MEDIA_RAW_AUDIO; fPreferredFormat.u.raw_audio.format = format->format; fPreferredFormat.u.raw_audio.channel_count = format->channel_count; fPreferredFormat.u.raw_audio.frame_rate = format->frame_rate; // Hertz fPreferredFormat.u.raw_audio.byte_order = format->byte_order; // fPreferredFormat.u.raw_audio.channel_mask // = B_CHANNEL_LEFT | B_CHANNEL_RIGHT; // fPreferredFormat.u.raw_audio.valid_bits = 32; // fPreferredFormat.u.raw_audio.matrix_mask = B_MATRIX_AMBISONIC_WXYZ; // we'll use the consumer's preferred buffer size, if any fPreferredFormat.u.raw_audio.buffer_size = media_raw_audio_format::wildcard.buffer_size; // we're not connected yet fOutput.destination = media_destination::null; fOutput.format = fPreferredFormat; fFrameSize = get_sample_size(format->format) * format->channel_count; fObject = object; }
BGameSound * BPushGameSound::Clone() const { gs_audio_format format = Format(); size_t frameSize = get_sample_size(format.format) * format.channel_count; size_t bufferFrameCount = fPageSize / frameSize; return new BPushGameSound(bufferFrameCount, &format, fPageCount, Device()); }
UINT CSoundFile::Read( LPVOID lpBuffer, UINT cbBuffer ) { mpcpplog(); if ( !mod ) { return 0; } mpcpplog(); if ( !lpBuffer ) { return 0; } mpcpplog(); if ( cbBuffer <= 0 ) { return 0; } mpcpplog(); if ( get_samplerate() <= 0 ) { return 0; } mpcpplog(); if ( get_sample_size() != 1 && get_sample_size() != 2 && get_sample_size() != 4 ) { return 0; } mpcpplog(); if ( get_num_channels() != 1 && get_num_channels() != 2 && get_num_channels() != 4 ) { return 0; } mpcpplog(); std::memset( lpBuffer, 0, cbBuffer ); const std::size_t frames_torender = cbBuffer / get_frame_size(); std::int16_t * out = (std::int16_t*)lpBuffer; std::vector<std::int16_t> tmpbuf; if ( get_sample_size() == 1 || get_sample_size() == 4 ) { tmpbuf.resize( frames_torender * get_num_channels() ); out = &tmpbuf[0]; } mod->set_render_param( openmpt::module::RENDER_INTERPOLATIONFILTER_LENGTH, get_filter_length() ); std::size_t frames_rendered = 0; if ( get_num_channels() == 1 ) { frames_rendered = mod->read( get_samplerate(), frames_torender, out ); } else if ( get_num_channels() == 4 ) { frames_rendered = mod->read_interleaved_quad( get_samplerate(), frames_torender, out ); } else { frames_rendered = mod->read_interleaved_stereo( get_samplerate(), frames_torender, out ); } if ( get_sample_size() == 1 ) { std::uint8_t * dst = (std::uint8_t*)lpBuffer; for ( std::size_t sample = 0; sample < frames_rendered * get_num_channels(); ++sample ) { dst[sample] = ( tmpbuf[sample] / 0x100 ) + 0x80; } } else if ( get_sample_size() == 4 ) { std::int32_t * dst = (std::int32_t*)lpBuffer; for ( std::size_t sample = 0; sample < frames_rendered * get_num_channels(); ++sample ) { dst[sample] = tmpbuf[sample] << (32-16-1-MIXING_ATTENUATION); } } return frames_rendered * get_frame_size(); }
status_t BFileGameSound::Init(const entry_ref* file) { fAudioStream = new _gs_media_tracker; memset(fAudioStream, 0, sizeof(_gs_media_tracker)); fAudioStream->file = new BMediaFile(file); status_t error = fAudioStream->file->InitCheck(); if (error != B_OK) return error; fAudioStream->stream = fAudioStream->file->TrackAt(0); // is this is an audio file? media_format playFormat; if ((error = fAudioStream->stream->EncodedFormat(&playFormat)) != B_OK) return error; if (!playFormat.IsAudio()) return B_MEDIA_BAD_FORMAT; gs_audio_format dformat = Device()->Format(); // request the format we want the sound memset(&playFormat, 0, sizeof(media_format)); playFormat.type = B_MEDIA_RAW_AUDIO; if (fAudioStream->stream->DecodedFormat(&playFormat) != B_OK) return B_MEDIA_BAD_FORMAT; // translate the format into a "GameKit" friendly one gs_audio_format gsformat; media_to_gs_format(&gsformat, &playFormat.u.raw_audio); // Since the buffer sized read from the file is most likely differnt // then the buffer used by the audio mixer, we must allocate a buffer // large enough to hold the largest request. fBufferSize = gsformat.buffer_size; if (fBufferSize < dformat.buffer_size) fBufferSize = dformat.buffer_size; // create the buffer fBuffer = new char[fBufferSize * 2]; memset(fBuffer, 0, fBufferSize * 2); fFrameSize = gsformat.channel_count * get_sample_size(gsformat.format); fAudioStream->frames = fAudioStream->stream->CountFrames(); // Ask the device to attach our sound to it gs_id sound; error = Device()->CreateBuffer(&sound, this, &gsformat); if (error != B_OK) return error; return BGameSound::Init(sound); }
unsigned int faad_decoder::update(void *dest, unsigned int const num_samples_to_write) { if (!is_initialized()) return 0; unsigned int multiplier = playback_properties_.num_channels * get_sample_size(playback_properties_.sample_type_); faacDecFrameInfo info; while (out_buffer.size() < (num_samples_to_write * multiplier)) { size_t actual_in_size = in_buffer.size(); if (actual_in_size < 32768) { size_t old_in_size = actual_in_size; in_buffer.resize(old_in_size + 32768); long num_read_bytes = source_->read(&in_buffer[old_in_size], 32768); if (num_read_bytes <= 0) break; actual_in_size = old_in_size + num_read_bytes; } void *output_samples = faacDecDecode(*faad_handle, &info, reinterpret_cast < unsigned char* > (&in_buffer[0]), actual_in_size); unsigned int num_remaining_bytes = actual_in_size; num_remaining_bytes -= info.bytesconsumed; std::memmove(&in_buffer[0], &in_buffer[info.bytesconsumed], num_remaining_bytes); in_buffer.resize(num_remaining_bytes); if ((output_samples == 0) || (info.error != 0) || (info.channels == 0)) { initialized = false; break; } if (info.samples > 0) { size_t old_out_size = out_buffer.size(); out_buffer.resize(old_out_size + info.samples * playback_properties_.num_channels); std::memcpy(&out_buffer[old_out_size], output_samples, info.samples * playback_properties_.num_channels); } } size_t num_bytes_to_copy = std::min(size_t(num_samples_to_write * multiplier), out_buffer.size()); std::memcpy(dest, &out_buffer[0], num_bytes_to_copy); if (out_buffer.size() > num_bytes_to_copy) std::memmove(&out_buffer[0], &out_buffer[num_bytes_to_copy], out_buffer.size() - num_bytes_to_copy); out_buffer.resize(out_buffer.size() - num_bytes_to_copy); current_position += num_bytes_to_copy / multiplier; return num_bytes_to_copy / multiplier; }
BPushGameSound::BPushGameSound(size_t inBufferFrameCount, const gs_audio_format *format, size_t inBufferCount, BGameSoundDevice *device) : BStreamingGameSound(inBufferFrameCount, format, inBufferCount, device) { fPageLocked = new BList; size_t frameSize = get_sample_size(format->format) * format->channel_count; fPageCount = inBufferCount; fPageSize = frameSize * inBufferFrameCount; fBufferSize = fPageSize * fPageCount; fBuffer = new char[fBufferSize]; }
void copy_sample_data(FILE *fin, const uint32_t chunk_index, //[0, end) 标识为第几个Chunk std::string name, int num, CMp4_root_box root, int& nSampleId ) { _fseeki64(fin, root.co[num].chunk_offset_from_file_begin[chunk_index], SEEK_SET); //获取当前chunk中有多少个sample uint32_t sample_num_in_cur_chunk_ = get_sample_num_in_cur_chunk(root.sc[num], chunk_index+1); //@a mark获取chunk中sample的数目 uint32_t sample_index_ = get_sample_index(root.sc[num], chunk_index+1);//chunk中第一个sample的序号 unsigned int cur=_ftelli64(fin); for(int i = 0; i < sample_num_in_cur_chunk_; i++) { uint32_t sample_size_ = get_sample_size(root.sz[num], sample_index_+i);//获取当前sample的大小 _fseeki64(fin,cur,SEEK_SET); char *ptr=new char [sample_size_]; fread(ptr, sample_size_, 1, fin); uint32_t sample_time = get_sample_time(root.ts[num], nSampleId ); char char_num[260]; sprintf(char_num, "%u_%u", nSampleId, sample_time); //sprintf(char_num, "E:/%s/%u_%u", name.c_str(),nSampleId,sample_time); FILE *fout = fopen(std::string(name + "//" + name + char_num).c_str(), "w"); if(fout == (FILE*)0){ printf("error\n"); std::exit(-1); } //写一帧数据 --- 可以直接进行网络推送 fwrite(ptr, sample_size_, 1, fout); delete [] ptr; cur+=sample_size_; nSampleId++; fclose(fout); } }
// GameSoundBuffer ------------------------------------------------------- GameSoundBuffer::GameSoundBuffer(const gs_audio_format * format) : fLooping(false), fIsConnected(false), fIsPlaying(false), fGain(1.0), fPan(0.0), fPanLeft(1.0), fPanRight(1.0), fGainRamp(NULL), fPanRamp(NULL) { fConnection = new Connection; fNode = new GameProducer(this, format); fFrameSize = get_sample_size(format->format) * format->channel_count; memcpy(&fFormat, format, sizeof(gs_audio_format)); }
OpenALBackend::OpenALBackend() : m_sampling_rate(get_sampling_rate()) , m_sample_size(get_sample_size()) { ALCdevice* m_device = alcOpenDevice(nullptr); checkForAlcError("OpenALBackend->alcOpenDevice"); ALCcontext* m_context = alcCreateContext(m_device, nullptr); checkForAlcError("OpenALBackend->alcCreateContext"); alcMakeContextCurrent(m_context); checkForAlcError("OpenALBackend->alcMakeContextCurrent"); if (get_channels() == 2) { m_format = (m_sample_size == 2) ? AL_FORMAT_STEREO16 : AL_FORMAT_STEREO_FLOAT32; } else { m_format = (m_sample_size == 2) ? AL_FORMAT_71CHN16 : AL_FORMAT_71CHN32; } }
void mix_channels( void const *source_data, void *dest_data, unsigned int const num_samples, sample_type const input_type, sample_type const output_type, unsigned int const num_input_channels, unsigned int const num_output_channels ) { if (num_input_channels == num_output_channels) { // the N input channels -> N output channels case; just do a copy if the sample types also match // if the types don't match, do a conversion step, but do not mix anything (since it doesn't have to be done) if (input_type == output_type) std::memcpy(dest_data, source_data, num_samples * get_sample_size(input_type)); else { for (unsigned int i = 0; i < num_samples * num_output_channels; ++i) { set_sample_value( dest_data, i, convert_sample_value( get_sample_value(source_data, i, input_type), input_type, output_type ), output_type ); } } return; } else if ((num_input_channels == 1) && (num_output_channels == 2)) mix_channels_1_to_2(source_data, dest_data, num_samples, input_type, output_type); else if ((num_input_channels == 2) && (num_output_channels == 1)) mix_channels_2_to_1(source_data, dest_data, num_samples, input_type, output_type); }
int main (int argc, char *argv[]) { /* Some startup related basics */ char *client_name, *server_name = NULL, *peer_ip; int peer_port = 3000; jack_options_t options = JackNullOption; jack_status_t status; #ifdef WIN32 WSADATA wsa; int rc = WSAStartup(MAKEWORD(2, 0), &wsa); #endif /* Torben's famous state variables, aka "the reporting API" ! */ /* heh ? these are only the copies of them ;) */ int statecopy_connected, statecopy_latency, statecopy_netxruns; jack_nframes_t net_period; /* Argument parsing stuff */ extern char *optarg; extern int optind, optopt; int errflg = 0, c; if (argc < 3) { printUsage (); return 1; } client_name = (char *) malloc (sizeof (char) * 10); peer_ip = (char *) malloc (sizeof (char) * 10); sprintf(client_name, "netjack"); sprintf(peer_ip, "localhost"); while ((c = getopt (argc, argv, ":h:H:o:i:O:I:n:p:r:B:b:c:m:R:e:N:s:P:")) != -1) { switch (c) { case 'h': printUsage(); exit (0); break; case 'H': free(peer_ip); peer_ip = (char *) malloc (sizeof (char) * strlen (optarg) + 1); strcpy (peer_ip, optarg); break; case 'o': playback_channels_audio = atoi (optarg); break; case 'i': capture_channels_audio = atoi (optarg); break; case 'O': playback_channels_midi = atoi (optarg); break; case 'I': capture_channels_midi = atoi (optarg); break; case 'n': latency = atoi (optarg); break; case 'p': peer_port = atoi (optarg); break; case 'r': reply_port = atoi (optarg); break; case 'B': bind_port = atoi (optarg); break; case 'f': factor = atoi (optarg); printf("This feature is deprecated and will be removed in future netjack versions. CELT offers a superiour way to conserve bandwidth"); break; case 'b': bitdepth = atoi (optarg); break; case 'c': #if HAVE_CELT bitdepth = 1000; factor = atoi (optarg); #else printf( "not built with celt support\n" ); exit(10); #endif break; case 'P': #if HAVE_OPUS bitdepth = 999; factor = atoi (optarg); #else printf( "not built with opus support\n" ); exit(10); #endif break; case 'm': mtu = atoi (optarg); break; case 'R': redundancy = atoi (optarg); break; case 'e': dont_htonl_floats = 1; break; case 'N': free(client_name); client_name = (char *) malloc (sizeof (char) * strlen (optarg) + 1); strcpy (client_name, optarg); break; case 's': server_name = (char *) malloc (sizeof (char) * strlen (optarg) + 1); strcpy (server_name, optarg); options |= JackServerName; break; case ':': fprintf (stderr, "Option -%c requires an operand\n", optopt); errflg++; break; case '?': fprintf (stderr, "Unrecognized option: -%c\n", optopt); errflg++; } } if (errflg) { printUsage (); exit (2); } capture_channels = capture_channels_audio + capture_channels_midi; playback_channels = playback_channels_audio + playback_channels_midi; outsockfd = socket (AF_INET, SOCK_DGRAM, 0); insockfd = socket (AF_INET, SOCK_DGRAM, 0); if ((outsockfd == -1) || (insockfd == -1)) { fprintf (stderr, "cant open sockets\n" ); return 1; } init_sockaddr_in ((struct sockaddr_in *) &destaddr, peer_ip, peer_port); if (bind_port) { init_sockaddr_in ((struct sockaddr_in *) &bindaddr, NULL, bind_port); if( bind (outsockfd, &bindaddr, sizeof (bindaddr)) ) { fprintf (stderr, "bind failure\n" ); } } if (reply_port) { init_sockaddr_in ((struct sockaddr_in *) &bindaddr, NULL, reply_port); if( bind (insockfd, &bindaddr, sizeof (bindaddr)) ) { fprintf (stderr, "bind failure\n" ); } } /* try to become a client of the JACK server */ client = jack_client_open (client_name, options, &status, server_name); if (client == NULL) { fprintf (stderr, "jack_client_open() failed, status = 0x%2.0x\n" "Is the JACK server running ?\n", status); return 1; } /* Set up jack callbacks */ jack_set_process_callback (client, process, 0); jack_set_sync_callback (client, sync_cb, 0); jack_set_freewheel_callback (client, freewheel_cb, 0); jack_on_shutdown (client, jack_shutdown, 0); alloc_ports (capture_channels_audio, playback_channels_audio, capture_channels_midi, playback_channels_midi); if( bitdepth == 1000 || bitdepth == 999) net_period = (factor * jack_get_buffer_size(client) * 1024 / jack_get_sample_rate(client) / 8) & (~1) ; else net_period = ceilf((float) jack_get_buffer_size (client) / (float) factor); int rx_bufsize = get_sample_size (bitdepth) * capture_channels * net_period + sizeof (jacknet_packet_header); packcache = packet_cache_new (latency + 50, rx_bufsize, mtu); /* tell the JACK server that we are ready to roll */ if (jack_activate (client)) { fprintf (stderr, "Cannot activate client"); return 1; } /* Now sleep forever... and evaluate the state_ vars */ signal( SIGTERM, sigterm_handler ); signal( SIGINT, sigterm_handler ); statecopy_connected = 2; // make it report unconnected on start. statecopy_latency = state_latency; statecopy_netxruns = state_netxruns; while ( !quit ) { #ifdef WIN32 Sleep (1000); #else sleep(1); #endif if (statecopy_connected != state_connected) { statecopy_connected = state_connected; if (statecopy_connected) { state_netxruns = 1; // We want to reset the netxrun count on each new connection printf ("Connected :-)\n"); } else printf ("Not Connected\n"); fflush(stdout); } if (statecopy_connected) { if (statecopy_netxruns != state_netxruns) { statecopy_netxruns = state_netxruns; printf ("%s: at frame %06d -> total netxruns %d (%d%%) queue time= %d\n", client_name, state_currentframe, statecopy_netxruns, 100 * statecopy_netxruns / state_currentframe, state_recv_packet_queue_time); fflush(stdout); } } else { if (statecopy_latency != state_latency) { statecopy_latency = state_latency; if (statecopy_latency > 1) printf ("current latency %d\n", statecopy_latency); fflush(stdout); } } } jack_client_close (client); packet_cache_free (packcache); exit (0); }
/** * The process callback for this JACK application. * It is called by JACK at the appropriate times. */ int process (jack_nframes_t nframes, void *arg) { jack_nframes_t net_period; int rx_bufsize, tx_bufsize; jack_default_audio_sample_t *buf; jack_port_t *port; JSList *node; int chn; int size, i; const char *porttype; int input_fd; jack_position_t local_trans_pos; uint32_t *packet_buf_tx, *packet_bufX; uint32_t *rx_packet_ptr; jack_time_t packet_recv_timestamp; if( bitdepth == 1000 || bitdepth == 999) net_period = (factor * jack_get_buffer_size(client) * 1024 / jack_get_sample_rate(client) / 8) & (~1) ; else net_period = (float) nframes / (float) factor; rx_bufsize = get_sample_size (bitdepth) * capture_channels * net_period + sizeof (jacknet_packet_header); tx_bufsize = get_sample_size (bitdepth) * playback_channels * net_period + sizeof (jacknet_packet_header); /* Allocate a buffer where both In and Out Buffer will fit */ packet_buf_tx = alloca (tx_bufsize); jacknet_packet_header *pkthdr_tx = (jacknet_packet_header *) packet_buf_tx; /* * for latency==0 we need to send out the packet before we wait on the reply. * but this introduces a cycle of latency, when netsource is connected to itself. * so we send out before read only in zero latency mode. * */ if( latency == 0 ) { /* reset packet_bufX... */ packet_bufX = packet_buf_tx + sizeof (jacknet_packet_header) / sizeof (jack_default_audio_sample_t); /* ---------- Send ---------- */ render_jack_ports_to_payload (bitdepth, playback_ports, playback_srcs, nframes, packet_bufX, net_period, dont_htonl_floats); /* fill in packet hdr */ pkthdr_tx->transport_state = jack_transport_query (client, &local_trans_pos); pkthdr_tx->transport_frame = local_trans_pos.frame; pkthdr_tx->framecnt = framecnt; pkthdr_tx->latency = latency; pkthdr_tx->reply_port = reply_port; pkthdr_tx->sample_rate = jack_get_sample_rate (client); pkthdr_tx->period_size = nframes; /* playback for us is capture on the other side */ pkthdr_tx->capture_channels_audio = playback_channels_audio; pkthdr_tx->playback_channels_audio = capture_channels_audio; pkthdr_tx->capture_channels_midi = playback_channels_midi; pkthdr_tx->playback_channels_midi = capture_channels_midi; pkthdr_tx->mtu = mtu; if( freewheeling != 0 ) pkthdr_tx->sync_state = (jack_nframes_t)MASTER_FREEWHEELS; else pkthdr_tx->sync_state = (jack_nframes_t)deadline_goodness; //printf("goodness=%d\n", deadline_goodness ); packet_header_hton (pkthdr_tx); if (cont_miss < 3 * latency + 5) { int r; for( r = 0; r < redundancy; r++ ) netjack_sendto (outsockfd, (char *) packet_buf_tx, tx_bufsize, 0, &destaddr, sizeof (destaddr), mtu); } else if (cont_miss > 50 + 5 * latency) { state_connected = 0; packet_cache_reset_master_address( packcache ); //printf ("Frame %d \tRealy too many packets missed (%d). Let's reset the counter\n", framecnt, cont_miss); cont_miss = 0; } } /* * ok... now the RECEIVE code. * */ if( reply_port ) input_fd = insockfd; else input_fd = outsockfd; // for latency == 0 we can poll. if( (latency == 0) || (freewheeling != 0) ) { jack_time_t deadline = jack_get_time() + 1000000 * jack_get_buffer_size(client) / jack_get_sample_rate(client); // Now loop until we get the right packet. while(1) { jack_nframes_t got_frame; if ( ! netjack_poll_deadline( input_fd, deadline ) ) break; packet_cache_drain_socket(packcache, input_fd); if (packet_cache_get_next_available_framecnt( packcache, framecnt - latency, &got_frame )) if( got_frame == (framecnt - latency) ) break; } } else { // normally: // only drain socket. packet_cache_drain_socket(packcache, input_fd); } size = packet_cache_retreive_packet_pointer( packcache, framecnt - latency, (char**)&rx_packet_ptr, rx_bufsize, &packet_recv_timestamp ); /* First alternative : we received what we expected. Render the data * to the JACK ports so it can be played. */ if (size == rx_bufsize) { uint32_t *packet_buf_rx = rx_packet_ptr; jacknet_packet_header *pkthdr_rx = (jacknet_packet_header *) packet_buf_rx; packet_bufX = packet_buf_rx + sizeof (jacknet_packet_header) / sizeof (jack_default_audio_sample_t); // calculate how much time there would have been, if this packet was sent at the deadline. int recv_time_offset = (int) (jack_get_time() - packet_recv_timestamp); packet_header_ntoh (pkthdr_rx); deadline_goodness = recv_time_offset - (int)pkthdr_rx->latency; //printf( "deadline goodness = %d ---> off: %d\n", deadline_goodness, recv_time_offset ); if (cont_miss) { //printf("Frame %d \tRecovered from dropouts\n", framecnt); cont_miss = 0; } render_payload_to_jack_ports (bitdepth, packet_bufX, net_period, capture_ports, capture_srcs, nframes, dont_htonl_floats); state_currentframe = framecnt; state_recv_packet_queue_time = recv_time_offset; state_connected = 1; sync_state = pkthdr_rx->sync_state; packet_cache_release_packet( packcache, framecnt - latency ); } /* Second alternative : we've received something that's not * as big as expected or we missed a packet. We render silence * to the ouput ports */ else { jack_nframes_t latency_estimate; if( packet_cache_find_latency( packcache, framecnt, &latency_estimate ) ) //if( (state_latency == 0) || (latency_estimate < state_latency) ) state_latency = latency_estimate; // Set the counters up. state_currentframe = framecnt; //state_latency = framecnt - pkthdr->framecnt; state_netxruns += 1; //printf ("Frame %d \tPacket missed or incomplete (expected: %d bytes, got: %d bytes)\n", framecnt, rx_bufsize, size); //printf ("Frame %d \tPacket missed or incomplete\n", framecnt); cont_miss += 1; chn = 0; node = capture_ports; while (node != NULL) { port = (jack_port_t *) node->data; buf = jack_port_get_buffer (port, nframes); porttype = jack_port_type (port); if (strncmp (porttype, JACK_DEFAULT_AUDIO_TYPE, jack_port_type_size ()) == 0) for (i = 0; i < nframes; i++) buf[i] = 0.0; else if (strncmp (porttype, JACK_DEFAULT_MIDI_TYPE, jack_port_type_size ()) == 0) jack_midi_clear_buffer (buf); node = jack_slist_next (node); chn++; } } if (latency != 0) { /* reset packet_bufX... */ packet_bufX = packet_buf_tx + sizeof (jacknet_packet_header) / sizeof (jack_default_audio_sample_t); /* ---------- Send ---------- */ render_jack_ports_to_payload (bitdepth, playback_ports, playback_srcs, nframes, packet_bufX, net_period, dont_htonl_floats); /* fill in packet hdr */ pkthdr_tx->transport_state = jack_transport_query (client, &local_trans_pos); pkthdr_tx->transport_frame = local_trans_pos.frame; pkthdr_tx->framecnt = framecnt; pkthdr_tx->latency = latency; pkthdr_tx->reply_port = reply_port; pkthdr_tx->sample_rate = jack_get_sample_rate (client); pkthdr_tx->period_size = nframes; /* playback for us is capture on the other side */ pkthdr_tx->capture_channels_audio = playback_channels_audio; pkthdr_tx->playback_channels_audio = capture_channels_audio; pkthdr_tx->capture_channels_midi = playback_channels_midi; pkthdr_tx->playback_channels_midi = capture_channels_midi; pkthdr_tx->mtu = mtu; if( freewheeling != 0 ) pkthdr_tx->sync_state = (jack_nframes_t)MASTER_FREEWHEELS; else pkthdr_tx->sync_state = (jack_nframes_t)deadline_goodness; //printf("goodness=%d\n", deadline_goodness ); packet_header_hton (pkthdr_tx); if (cont_miss < 3 * latency + 5) { int r; for( r = 0; r < redundancy; r++ ) netjack_sendto (outsockfd, (char *) packet_buf_tx, tx_bufsize, 0, &destaddr, sizeof (destaddr), mtu); } else if (cont_miss > 50 + 5 * latency) { state_connected = 0; packet_cache_reset_master_address( packcache ); //printf ("Frame %d \tRealy too many packets missed (%d). Let's reset the counter\n", framecnt, cont_miss); cont_miss = 0; } } framecnt++; return 0; }
int netjack_startup( netjack_driver_state_t *netj ) { int first_pack_len; struct sockaddr_in address; // Now open the socket, and wait for the first packet to arrive... netj->sockfd = socket (AF_INET, SOCK_DGRAM, 0); #ifdef WIN32 if (netj->sockfd == INVALID_SOCKET) #else if (netj->sockfd == -1) #endif { jack_info ("socket error"); return -1; } address.sin_family = AF_INET; address.sin_port = htons(netj->listen_port); address.sin_addr.s_addr = htonl(INADDR_ANY); if (bind (netj->sockfd, (struct sockaddr *) &address, sizeof (address)) < 0) { jack_info("bind error"); return -1; } netj->outsockfd = socket (AF_INET, SOCK_DGRAM, 0); #ifdef WIN32 if (netj->outsockfd == INVALID_SOCKET) #else if (netj->outsockfd == -1) #endif { jack_info ("socket error"); return -1; } netj->srcaddress_valid = 0; if (netj->use_autoconfig) { jacknet_packet_header *first_packet = alloca (sizeof (jacknet_packet_header)); #ifdef WIN32 int address_size = sizeof( struct sockaddr_in ); #else socklen_t address_size = sizeof (struct sockaddr_in); #endif //jack_info ("Waiting for an incoming packet !!!"); //jack_info ("*** IMPORTANT *** Dont connect a client to jackd until the driver is attached to a clock source !!!"); while(1) { if( ! netjack_poll( netj->sockfd, 1000 ) ) { jack_info ("Waiting aborted"); return -1; } first_pack_len = recvfrom (netj->sockfd, (char *)first_packet, sizeof (jacknet_packet_header), 0, (struct sockaddr*) & netj->syncsource_address, &address_size); #ifdef WIN32 if( first_pack_len == -1 ) { first_pack_len = sizeof(jacknet_packet_header); break; } #else if (first_pack_len == sizeof (jacknet_packet_header)) break; #endif } netj->srcaddress_valid = 1; if (first_pack_len == sizeof (jacknet_packet_header)) { packet_header_ntoh (first_packet); jack_info ("AutoConfig Override !!!"); if (netj->sample_rate != first_packet->sample_rate) { jack_info ("AutoConfig Override: Master JACK sample rate = %d", first_packet->sample_rate); netj->sample_rate = first_packet->sample_rate; } if (netj->period_size != first_packet->period_size) { jack_info ("AutoConfig Override: Master JACK period size is %d", first_packet->period_size); netj->period_size = first_packet->period_size; } if (netj->capture_channels_audio != first_packet->capture_channels_audio) { jack_info ("AutoConfig Override: capture_channels_audio = %d", first_packet->capture_channels_audio); netj->capture_channels_audio = first_packet->capture_channels_audio; } if (netj->capture_channels_midi != first_packet->capture_channels_midi) { jack_info ("AutoConfig Override: capture_channels_midi = %d", first_packet->capture_channels_midi); netj->capture_channels_midi = first_packet->capture_channels_midi; } if (netj->playback_channels_audio != first_packet->playback_channels_audio) { jack_info ("AutoConfig Override: playback_channels_audio = %d", first_packet->playback_channels_audio); netj->playback_channels_audio = first_packet->playback_channels_audio; } if (netj->playback_channels_midi != first_packet->playback_channels_midi) { jack_info ("AutoConfig Override: playback_channels_midi = %d", first_packet->playback_channels_midi); netj->playback_channels_midi = first_packet->playback_channels_midi; } netj->mtu = first_packet->mtu; jack_info ("MTU is set to %d bytes", first_packet->mtu); netj->latency = first_packet->latency; } } netj->capture_channels = netj->capture_channels_audio + netj->capture_channels_midi; netj->playback_channels = netj->playback_channels_audio + netj->playback_channels_midi; if( (netj->capture_channels * netj->period_size * netj->latency * 4) > 100000000 ) { jack_error( "autoconfig requests more than 100MB packet cache... bailing out" ); exit(1); } if( netj->playback_channels > 1000 ) { jack_error( "autoconfig requests more than 1000 playback channels... bailing out" ); exit(1); } if( netj->mtu < (2*sizeof( jacknet_packet_header )) ) { jack_error( "bullshit mtu requested by autoconfig" ); exit(1); } if( netj->sample_rate == 0 ) { jack_error( "sample_rate 0 requested by autoconfig" ); exit(1); } // After possible Autoconfig: do all calculations... netj->period_usecs = (jack_time_t) floor ((((float) netj->period_size) / (float)netj->sample_rate) * 1000000.0f); if( netj->latency == 0 ) netj->deadline_offset = 50*netj->period_usecs; else netj->deadline_offset = netj->period_usecs + 10*netj->latency*netj->period_usecs/100; if( netj->bitdepth == CELT_MODE ) { // celt mode. // TODO: this is a hack. But i dont want to change the packet header. netj->resample_factor = (netj->resample_factor * netj->period_size * 1024 / netj->sample_rate / 8)&(~1); netj->resample_factor_up = (netj->resample_factor_up * netj->period_size * 1024 / netj->sample_rate / 8)&(~1); netj->net_period_down = netj->resample_factor; netj->net_period_up = netj->resample_factor_up; } else { netj->net_period_down = (float) netj->period_size / (float) netj->resample_factor; netj->net_period_up = (float) netj->period_size / (float) netj->resample_factor_up; } netj->rx_bufsize = sizeof (jacknet_packet_header) + netj->net_period_down * netj->capture_channels * get_sample_size (netj->bitdepth); netj->packcache = packet_cache_new (netj->latency + 50, netj->rx_bufsize, netj->mtu); netj->expected_framecnt_valid = 0; netj->num_lost_packets = 0; netj->next_deadline_valid = 0; netj->deadline_goodness = 0; netj->time_to_deadline = 0; // Special handling for latency=0 if( netj->latency == 0 ) netj->resync_threshold = 0; else netj->resync_threshold = MIN( 15, netj->latency-1 ); netj->running_free = 0; return 0; }
static std::size_t get_frame_size() { return get_sample_size() * get_num_channels(); }
unsigned long operator()( SampleSource &sample_source, InputProperties const &input_properties, void *output, unsigned long const num_output_samples, OutputProperties const &output_properties, unsigned int const volume, unsigned int const max_volume ) { // prerequisites unsigned int num_input_channels = get_num_channels(input_properties); unsigned int num_output_channels = get_num_channels(output_properties); unsigned int input_frequency = get_frequency(input_properties); unsigned int output_frequency = get_frequency(output_properties); if (input_frequency == 0) // if the input frequency is 0, then the sample source can adapt to the output frequency input_frequency = output_frequency; bool frequencies_match = (input_frequency == output_frequency); bool num_channels_match = (num_input_channels == num_output_channels); bool sample_types_match = get_sample_type(input_properties) == get_sample_type(output_properties); // if the properties fully match, no processing is necessary - just transmit the samples directly to the output and exit // do a volume processing if necessary, but otherwise its just one transmission if (frequencies_match && sample_types_match && num_channels_match) { unsigned long num_retrieved_samples = retrieve_samples(sample_source, output, num_output_samples); if (volume != max_volume) { sample_type output_sample_type = get_sample_type(output_properties); for (unsigned long i = 0; i < num_retrieved_samples * num_output_channels; ++i) { set_sample_value( output, i, adjust_sample_volume(get_sample_value(output, i, output_sample_type), volume, max_volume), output_sample_type ); } } return num_retrieved_samples; } unsigned long num_retrieved_samples = 0; uint8_t *resampler_input = 0; sample_type dest_type = sample_unknown; // note that this returns true if the resampler is in an uninitialized state bool resampler_needed_more_input = is_more_input_needed_for(resampler, num_output_samples); // first processing stage: convert samples & mix channels if necessary // also, the resampler input is prepared here // if resampling is necessary, and the resampler has enough input data for now, this stage is bypassed if (frequencies_match || resampler_needed_more_input) { // with the adjusted value from above, retrieve samples from the sample source, and resize the buffer to the number of actually retrieved samples // (since the sample source may have given us less samples than we requested) unsigned long num_samples_to_retrieve = num_output_samples; source_data_buffer.resize(num_samples_to_retrieve * num_input_channels * get_sample_size(get_sample_type(input_properties))); num_retrieved_samples = retrieve_samples(sample_source, &source_data_buffer[0], num_samples_to_retrieve); source_data_buffer.resize(num_retrieved_samples * num_input_channels * get_sample_size(get_sample_type(input_properties))); // here, the dest and dest_type values are set, as well as the resampler input (if necessary) // several optimizations are done here: if the resampler is not necessary, then dest points to the output - any conversion steps will write data directly to the output then // if the resampler is necessary, and the sample types match, then the resampler's input is the source data buffer, otherwise it is the "dest" pointer // the reason for this is: if the sample types match, no conversion step is necessary, and the resampler can pull data directly from the source data buffer; // otherwise, the conversion step needs to convert to an intermediate buffer (the buffer the dest pointer points at), and then the resampler pulls data from this buffer uint8_t *dest; if (frequencies_match) { // frequencies match - dest is set to point at the output, meaning that the next step will directly write to the output dest_type = get_sample_type(output_properties); dest = reinterpret_cast < uint8_t* > (output); } else { // frequencies do not match, resampling is necessary - dest is set to point at an intermediate buffer, which the resampler will use // ask the resampler what resampling input type it needs - this may differ from the output properties' sample type, but this is ok, // since with resampling, an intermediate step between sample type conversion & mixing and actual output is present anyway dest_type = find_compatible_type(resampler, get_sample_type(input_properties)); if (sample_types_match && num_channels_match) { // if the sample types and channel count match, then no conversion step is necessary, and the resampler can pull data from the source data buffer directly resampler_input = &source_data_buffer[0]; dest = 0; } else { // if the sample types and channel count do not match, then the resampler needs to pull data from the intermediate buffer dest points to // the conversion step will write to dest resampling_input_buffer.resize(num_output_samples * num_output_channels * get_sample_size(dest_type)); dest = &resampling_input_buffer[0]; resampler_input = dest; } } // actual mixing and conversion is done here if (num_channels_match) { // channel counts match, no mixing necessary if (!sample_types_match) { assert(dest != 0); // sample types do not match // go through all the input samples, convert them, and write them to dest for (unsigned long i = 0; i < num_retrieved_samples * num_input_channels; ++i) { set_sample_value( dest, i, convert_sample_value( get_sample_value(&source_data_buffer[0], i, get_sample_type(input_properties)), get_sample_type(input_properties), dest_type ), dest_type ); } } // if the sample types match, nothing needs to be done here } else { // channels count do not match - call the mixer mix_channels(&source_data_buffer[0], dest, num_retrieved_samples, get_sample_type(input_properties), dest_type, get_num_channels(input_properties), get_num_channels(output_properties)); } } else { // either resampling is not necessary, or the resampler does not need any new input for now // set number of retrieved samples to number of output samples, and if resampling is necessary, set the dest_type value num_retrieved_samples = num_output_samples; // the dest_type value is set, since the resampler might reset itself if it sees a change in the input type if (!frequencies_match) dest_type = find_compatible_type(resampler, get_sample_type(input_properties)); } // the final stage adjusts the output volume; if the volume equals the max volume, it is unnecessary // this boolean conditionally enables this stage bool do_volume_stage = (volume != max_volume); // second processing stage: resample if necessary (otherwise this stage is bypassed) if (!frequencies_match) { sample_type resampler_input_type = dest_type; // the resampler may have only support for a fixed number of sample types - let it choose a suitable output one sample_type resampler_output_type = find_compatible_type(resampler, resampler_input_type, get_sample_type(output_properties)); sample_type output_type = get_sample_type(output_properties); bool conversion_needed = (resampler_output_type != output_type); uint8_t *resampler_output = reinterpret_cast < uint8_t* > (output); if (conversion_needed) { resampling_output_buffer.resize(num_output_samples * num_output_channels * get_sample_size(resampler_output_type)); resampler_output = &resampling_output_buffer[0]; } // resampler input -can- be zero - sometimes the resampler does not need any more input assert(!resampler_needed_more_input || (resampler_input != 0)); // call the actual resampler, which returns the number of samples that were actually sent to output // if this is the first call since the resampler was reset(), this call internally initializes the resampler // and sets its internal values to the given ones // note that the is_more_input_needed_for() call returns true if the resampler is in such an uninitialized state // if the resampler was initialized already, it may reinitialize itself internally if certain parameters change // (this is entirely implementation-dependent; from the outside, no such reinitialization is noticeable) num_retrieved_samples = resample( resampler, resampler_input, num_retrieved_samples, resampler_output, num_output_samples, input_frequency, output_frequency, resampler_input_type, resampler_output_type, num_output_channels ); // the output type chosen by the resampler may not match the output type given by the output properties, so a final conversion step may be necessary if (conversion_needed) { if (do_volume_stage) { // if the volume stage is required, use the opportunity to do it together with the final conversion step for (unsigned long i = 0; i < num_retrieved_samples * num_output_channels; ++i) { set_sample_value( output, i, adjust_sample_volume( get_sample_value(resampler_output, i, resampler_output_type), volume, max_volume ), output_type ); } // volume adjustment was done in-line in the conversion step above -> no further volume adjustment required do_volume_stage = false; } else { // conversion without volume adjustment for (unsigned long i = 0; i < num_retrieved_samples * num_output_channels; ++i) set_sample_value(output, i, get_sample_value(resampler_output, i, output_type), output_type); } } } // do the volume stage if required if (do_volume_stage) { sample_type output_sample_type = get_sample_type(output_properties); for (unsigned long i = 0; i < num_retrieved_samples * num_output_channels; ++i) { set_sample_value( output, i, adjust_sample_volume(get_sample_value(output, i, output_sample_type), volume, max_volume), output_sample_type ); } } // finally, return the number of retrieved samples, either from the resampler, or from the source directly, // depending on whether or not resampling was necessary return num_retrieved_samples; }