示例#1
0
static VoiceOp RT_play_voice(const Data *data, const Voice *voice, const int num_frames, float *destination){

  int start_writing_pos = 0;
  int end_writing_pos = num_frames;
  
  const int delta_pos_at_start = voice->delta_pos_at_start;
  const int delta_pos_at_end = voice->delta_pos_at_end;

  VoiceOp ret = VOICE_KEEP;

  
  if (delta_pos_at_start==0 && delta_pos_at_end==-1){

    // 1. continue playing;
    
    int num_consumed_frames = RT_get_resampled_data(voice, destination, num_frames);
    
    if (num_consumed_frames < num_frames) {
      end_writing_pos = num_consumed_frames;
      ret = VOICE_REMOVE;
    }
    
  }else if (delta_pos_at_start>0 && delta_pos_at_end==-1){

    // 2. start playing (without end playing)

    start_writing_pos = delta_pos_at_start;
    int new_num_frames = num_frames - delta_pos_at_start;
    
    int num_consumed_frames = RT_get_resampled_data(voice, destination, new_num_frames);

    if (num_consumed_frames < new_num_frames){
      end_writing_pos = start_writing_pos + num_consumed_frames;
      ret = VOICE_REMOVE;
    }
    
  }else{

    // 3. end playing
 
    R_ASSERT_RETURN_IF_FALSE2(delta_pos_at_end>=0, VOICE_REMOVE);
    R_ASSERT_RETURN_IF_FALSE2(delta_pos_at_end>=delta_pos_at_start, VOICE_REMOVE);
        
    int new_num_frames = delta_pos_at_end - delta_pos_at_start;
    
    int num_consumed_frames = RT_get_resampled_data(voice, destination+delta_pos_at_start, new_num_frames);

    start_writing_pos = delta_pos_at_start;
    end_writing_pos = delta_pos_at_start+num_consumed_frames;
    
    ret = VOICE_REMOVE;
  }

  
  if (start_writing_pos > 0)
    memset(destination, 0, start_writing_pos*sizeof(float));

  if (end_writing_pos < num_frames)
    memset(destination+end_writing_pos, 0, (num_frames-end_writing_pos)*sizeof(float));
  
  return ret;
}
示例#2
0
static bool RT_play_voice(Data *data, Voice *voice, int num_frames_to_produce, float **outputs){
  int startpos = voice->delta_pos_at_start;
  int endpos = voice->delta_pos_at_end;

  if(startpos>=0)
    voice->delta_pos_at_start = 0;
  if(endpos>=0)
    voice->delta_pos_at_end = -1;

  if(endpos>=0 && endpos<startpos) // Should not happen. Test for it just in case. It's a bit messy when notes are generated by the keyboard, player and other places at the same time.
    {
      printf("Oops. Endpos: %d. startpos: %d\n",endpos,startpos);
      return false;
    }

  float resampled_data[num_frames_to_produce-startpos];
  int frames_created_by_resampler = RT_get_resampled_data(data,voice,resampled_data,num_frames_to_produce-startpos);
  //printf("Frames created by resampler: %d\n",frames_created_by_resampler);
  //printf("peak: %f\n",get_peak(resampled_data,frames_created_by_resampler));

  int frames_created_by_envelope;

  float *adsr_sound_data[1]={&resampled_data[0]};

  if(endpos>=0){
    int pre_release_len = endpos-startpos;

    //printf("********** endpos>0: %d. prelen: %d, frames_created_by_resampler: %d\n",endpos,prelen,frames_created_by_resampler);

    if(frames_created_by_resampler <= pre_release_len){ // i.e. we reached the end of sound before beginning to release the ADSR envelope.

      frames_created_by_envelope = ADSR_apply(voice->adsr, adsr_sound_data, 1, frames_created_by_resampler);

    }else{
      frames_created_by_envelope = ADSR_apply(voice->adsr, adsr_sound_data, 1, pre_release_len);

      //printf("************************ Calling adsr release\n");
      ADSR_release(voice->adsr);

      int post_release_len = frames_created_by_resampler - frames_created_by_envelope;
      adsr_sound_data[0] = &resampled_data[frames_created_by_envelope];        
      frames_created_by_envelope += ADSR_apply(voice->adsr, adsr_sound_data, 1, post_release_len);
    }

  }else{

    frames_created_by_envelope = ADSR_apply(voice->adsr, adsr_sound_data, 1, frames_created_by_resampler);
    //printf("Frames created by envelope: %d, peak: %f\n",frames_created_by_envelope,get_peak(resampled_data,frames_created_by_envelope));
    //printf("peak: %f\n",get_peak(resampled_data,frames_created_by_resampler));
  }

  //float peak_in = get_peak(resampled_data,frames_created_by_envelope);

  const Sample *sample = voice->sample;

#define mix(input_channel, output_channel) do{                          \
    float panval = voice->pan.vals[input_channel][output_channel];      \
    if(panval>0.0f){                                                    \
      float *out          = outputs[output_channel] + startpos;         \
      float  start_volume = voice->start_volume*panval;                 \
      float  end_volume   = voice->end_volume*panval;                   \
      SMOOTH_mix_sounds_raw(out, resampled_data, frames_created_by_envelope, start_volume, end_volume); \
    }                                                                   \
  }while(0)

  if(sample->ch == -1){
    mix(0,0);
    mix(0,1);
  }else{
    mix(sample->ch,0);
    mix(sample->ch,1);
  }

  //printf("peak in/out: %.3f - %.3f\n",peak_in,get_peak(outputs[0], num_frames_to_produce));

  voice->start_volume = voice->end_volume;
  voice->start_pitch = voice->end_pitch;

  if(startpos+frames_created_by_envelope < num_frames_to_produce)
    return true;
  else
    return false;
}