Ejemplo n.º 1
0
int d2083_audio_hs_set_gain(enum d2083_audio_output_sel hs_path_sel, enum d2083_hp_vol_val hs_gain_val)
{
	int ret = 0;
	u8 regval;
	
	if(D2083_HPVOL_NEG_57DB > hs_gain_val || D2083_HPVOL_MUTE < hs_gain_val) {
		dlg_info("[%s]- Invalid gain, so set to -1dB \n", __func__);
		hs_gain_val = D2083_HPVOL_NEG_1DB;				
	}
	
	dlg_info("d2083_audio_hs_set_gain path=%d hs_pga_gain=0x%x \n", hs_path_sel, hs_gain_val);
	
	if(dlg_audio->HSenabled==true)
	{
		if (hs_path_sel & D2083_OUT_HPL) 
		{
			regval = audio_read(D2083_HP_L_GAIN_REG) & ~D2042_HP_AMP_GAIN;
			ret |= audio_write(D2083_HP_L_GAIN_REG, regval | (hs_gain_val & D2042_HP_AMP_GAIN));
			ret |= audio_clear_bits(D2083_HP_L_CTRL_REG, D2083_HP_AMP_MUTE_EN);
		}

		if (hs_path_sel & D2083_OUT_HPR) 
		{
			regval = audio_read(D2083_HP_R_GAIN_REG) & ~D2042_HP_AMP_GAIN;
			ret |= audio_write(D2083_HP_R_GAIN_REG, regval | (hs_gain_val & D2042_HP_AMP_GAIN));
			ret |= audio_clear_bits(D2083_HP_R_CTRL_REG, D2083_HP_AMP_MUTE_EN);
		}            

	}
	dlg_audio->hs_pga_gain=hs_gain_val;

	return ret;
}
Ejemplo n.º 2
0
int d2083_audio_hs_preamp_gain(enum d2083_preamp_gain_val hsgain_val)
{
	u8 regval = 0;
	int ret = 0;

	dlg_info("d2083_audio_hs_preamp_gain hs_pre_gain=0x%x \n",hsgain_val);
	//if(0 <  hsgain_val && D2083_PREAMP_GAIN_MUTE > hsgain_val ) 
		//dlg_audio->hs_pre_gain = hsgain_val;

	if(0 >  hsgain_val || D2083_PREAMP_GAIN_MUTE < hsgain_val ) {
		dlg_info("[%s]- Invalid preamp gain, so set to 0dB \n", __func__);
		hsgain_val = D2083_PREAMP_GAIN_0DB;				
	}

	if(dlg_audio->HSenabled==true)
	{
#ifdef USE_AUDIO_DIFFERENTIAL
		if(!dlg_audio->IHFenabled) 
		{
			regval = audio_read(D2083_PREAMP_A_CTRL1_REG) & ~D2083_PREAMP_VOL;
			regval |= (hsgain_val << D2083_PREAMP_VOL_SHIFT) & D2083_PREAMP_VOL;
			ret = audio_write(D2083_PREAMP_A_CTRL1_REG,regval);
		}
#endif		
		regval = audio_read(D2083_PREAMP_B_CTRL1_REG) & ~D2083_PREAMP_VOL;
		regval |= (hsgain_val << D2083_PREAMP_VOL_SHIFT) & D2083_PREAMP_VOL;
		ret = audio_write(D2083_PREAMP_B_CTRL1_REG,regval);
	}
	dlg_audio->hs_pre_gain = hsgain_val;

	return ret;
}
Ejemplo n.º 3
0
static int jz_spdif_set_rate(struct device *aic ,struct jz_aic* jz_aic, unsigned long sample_rate){
	unsigned long sysclk;
	struct clk* cgu_aic_clk = jz_aic->clk;
	__i2s_stop_bitclk(aic);
#ifndef CONFIG_PRODUCT_X1000_FPGA
	sysclk = calculate_cgu_aic_rate(clk_get_rate(cgu_aic_clk->parent), &sample_rate);
	clk_set_rate(cgu_aic_clk, (sysclk*2));
	jz_aic->sysclk = clk_get_rate(cgu_aic_clk);

	if (jz_aic->sysclk > (sysclk*2)) {
		printk("external codec set sysclk fail aic = %ld want is %ld .\n",jz_aic->sysclk,sysclk);
		return -1;
	}

	spdif_set_clk(jz_aic, jz_aic->sysclk/2, sample_rate);
#else
	audio_write((253<<13)|(4482),I2SCDR_PRE);
	*(volatile unsigned int*)0xb0000070 = *(volatile unsigned int*)0xb0000070;
	audio_write((253<<13)|(4482)|(1<<29),I2SCDR_PRE);
	audio_write(0,I2SDIV_PRE);
#endif
	__i2s_start_bitclk(aic);
	spdif_select_ori_sample_freq(aic,sample_rate);
	spdif_select_sample_freq(aic,sample_rate);

	return sample_rate;
}
Ejemplo n.º 4
0
static inline int d2083_audio_poweron(bool on)
{
	int ret = 0;
	int (*set_bits)(struct d2083 * const d2083, u8 const reg, u8 const mask);

	if(on==true) {
		audio_write(D2083_LDO1_MCTL_REG, 0x54);
		audio_write(D2083_LDO_AUD_MCTL_REG, 0x54);
	}	
	set_bits = (on) ? d2083_set_bits : d2083_clear_bits;

	ret |= set_bits(d2083, D2083_PREAMP_A_CTRL1_REG, D2083_PREAMP_EN);
	ret |= set_bits(d2083, D2083_PREAMP_B_CTRL1_REG, D2083_PREAMP_EN);
	ret |= set_bits(d2083, D2083_MXHPR_CTRL_REG, D2083_MX_EN);
	ret |= set_bits(d2083, D2083_MXHPL_CTRL_REG, D2083_MX_EN);
	ret |= set_bits(d2083, D2083_CP_CTRL_REG, D2083_CP_EN);

	if(on==false){
		audio_write(D2083_CP_CTRL_REG,0xC9);
		audio_write(D2083_LDO1_MCTL_REG, 0x00);
		audio_write(D2083_LDO_AUD_MCTL_REG, 0x00);
	}

	return ret;
}
static ssize_t sound_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
	int dev = iminor(file->f_path.dentry->d_inode);
	int ret = -EINVAL;
	
	lock_kernel();
	DEB(printk("sound_write(dev=%d, count=%d)\n", dev, count));
	switch (dev & 0x0f) {
	case SND_DEV_SEQ:
	case SND_DEV_SEQ2:
		ret =  sequencer_write(dev, file, buf, count);
		break;

	case SND_DEV_DSP:
	case SND_DEV_DSP16:
	case SND_DEV_AUDIO:
		ret = audio_write(dev, file, buf, count);
		break;

	case SND_DEV_MIDIN:
		ret =  MIDIbuf_write(dev, file, buf, count);
		break;
	}
	unlock_kernel();
	return ret;
}
Ejemplo n.º 6
0
int flowm_audio_stream_chunk(const cst_wave *w, int start, int size, 
                             int last, cst_audio_streaming_info *asi)
{

    if (fl_ad == NULL)
    {
        fl_ad = audio_open(w->sample_rate,w->num_channels,CST_AUDIO_LINEAR16);
    }

    if (flowm_play_status == FLOWM_PLAY)
    {
        audio_write(fl_ad,&w->samples[start],size*sizeof(short));
        return CST_AUDIO_STREAM_CONT;
    }
    else if (flowm_play_status == FLOWM_BENCH)
    {   /* Do TTS but don't actually play it */
        /* How much have we played */
        flowm_duration += (size*1.0)/w->sample_rate;
        return CST_AUDIO_STREAM_CONT;
    }
    else
    {   /* for STOP, and the SKIPS (if they get here) */
        return CST_AUDIO_STREAM_STOP;
    }
}
Ejemplo n.º 7
0
void audio_isr (void *context, unsigned int id) {
  uint32_t numToRead = (samples_for_fft_requested) ? (NUM_SAMPLES - numRead) : AUDIO_BUF_SIZE;
  // Want to read step
  size_t count = audio_read (left_buffer, right_buffer, STEP_SIZE*numToRead);
  audio_write (left_buffer, right_buffer, count);
  if (samples_for_fft_requested) {
      size_t i;
      red_leds_set (0xFF);
      //modify count by STEP_SIZE since only count/STEP_SIZE samples actually copied
      size_t numSamples = count/STEP_SIZE;
      if (numRead + numSamples >= NUM_SAMPLES) {
        numRead = 0;
        samples_for_fft_requested = false;
        signal_audio_ready();
        startVal = 0;
      }
      for (i = startVal; i < count; i+= STEP_SIZE) {
        samples_for_fft[i/STEP_SIZE + numRead].r = left_buffer[i];
      }
      numRead += numSamples;
      //maintian proper spacing on next set of samples when next interrupt received
      startVal = count%STEP_SIZE;
      red_leds_clear (0xFF);
    }
}
Ejemplo n.º 8
0
static ssize_t sound_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
{
	int dev = MINOR(file->f_dentry->d_inode->i_rdev);

	DEB(printk("sound_write(dev=%d, count=%d)\n", dev, count));
	switch (dev & 0x0f) {
#ifdef CONFIG_SEQUENCER
	case SND_DEV_SEQ:
	case SND_DEV_SEQ2:
		return sequencer_write(dev, file, buf, count);
#endif

#ifdef CONFIG_AUDIO
	case SND_DEV_DSP:
	case SND_DEV_DSP16:
	case SND_DEV_AUDIO:
		return audio_write(dev, file, buf, count);
#endif

#ifdef CONFIG_MIDI
	case SND_DEV_MIDIN:
		return MIDIbuf_write(dev, file, buf, count);
#endif
	}
	return -EINVAL;
}
Ejemplo n.º 9
0
int main( int argc, char *argv[] )
{
    int decode = FALSE;
    int stream_pos;


    if( argc != 2 ) {
        printf( "Usage: %s <filename>\n", argv[0] );
        return( EXIT_FAILURE );
    }

    if( MPG_Stream_Open( argv[argc - 1] ) ) {
        printf( "Failed to open: %s\n", argv[0] );
        return( EXIT_FAILURE );
    }
    strcpy( filename, argv[argc - 1] );

    while( (stream_pos = MPG_Get_Filepos()) != C_MPG_EOF ) {

        if( MPG_Read_Frame() == OK ) {
            decode = TRUE;
        } else {
            if( MPG_Get_Filepos() == C_MPG_EOF ) {
                break;
            } else {
                decode = FALSE;
                ERR( "Not enough data to decode frame\n" );
            }
        }

        if( decode ) {
            /* Decode the compressed frame into 1152 mono/stereo PCM audio samples */
            MPG_Decode_L3( mp3_outdata );
            audio_write( (uint32_t *)&mp3_outdata, 2 * 576,
                     g_sampling_frequency[g_frame_header.id][g_frame_header.sampling_frequency] );
        }

        /* Remote control handling would be here */
    }

#ifdef OUTPUT_WAV
    audio_write( 0, 0, 0 );
#endif

    /* Done */
    return( EXIT_SUCCESS );
}
Ejemplo n.º 10
0
int d2083_audio_hs_ihf_poweroff(void)
{
	int ret = 0;
	u8 regval;

	if(ihf_power_status ==0){
		return ret;
	}
	ihf_power_status = 0;
	dlg_info("%s, HP =%d, status:%d \n", __func__,dlg_audio->HSenabled,ihf_power_status);

	audio_write(D2083_SP_CTRL_REG,0x32);

#ifdef USE_AUDIO_DIFFERENTIAL
	if (dlg_audio->HSenabled == false) 
#endif
	{
		regval = audio_read(D2083_PREAMP_A_CTRL1_REG) & ~D2083_PREAMP_VOL;
		regval |= (D2083_PREAMP_MUTE);
		regval &= (~D2083_PREAMP_EN);
		ret |= audio_write(D2083_PREAMP_A_CTRL1_REG,regval);
	}

	audio_write(D2083_SP_PWR_REG, 0x00);
	audio_write(D2083_SP_NON_CLIP_ZC_REG, 0x00);
	audio_write(D2083_SP_NON_CLIP_REG, 0x00);
	audio_write(D2083_SP_NG1_REG, 0x00);
	audio_write(D2083_SP_NG2_REG, 0x00);
	
	dlg_audio->IHFenabled = false;
	//if(dlg_audio->HSenabled==false)
	//    audio_write(D2083_LDO_AUD_MCTL_REG, 0x00); //AUD_LDO off

	return ret;
}
Ejemplo n.º 11
0
int d2083_set_hs_noise_gate(u16 regval)		
{
	int ret = 0;
#ifdef NOISE_GATE_USE
	dlg_info("d2083_set_hs_noise_gate regval=%d \n",regval);
	if(dlg_audio->HSenabled==true) {

		ret |= audio_write(D2083_HP_NG1_REG, (u8)(regval & 0x00FF));
		ret |= audio_write(D2083_HP_NG2_REG, (u8)((regval >> 8) & 0x00FF));
	}
Ejemplo n.º 12
0
Archivo: msi.c Proyecto: NeoXiong/MP3
/*
 * write samples to device
 * must have correct set before write
 */
int32_t msi_snd_write(uint8_t * pcmStream, uint32_t pcmCnt)
{
	int32_t res = 0;
#ifdef BY_PASS_ADAPTER
	  res =  dai_link_write((uint16_t *)pcmStream, pcmCnt);
	  if(res == 0)
		  printf("%s  data NULL\n",__func__);
	return res;
#else
	audio_write((uint16_t *)pcmStream, pcmCnt);
#endif
}
Ejemplo n.º 13
0
// speaker on/off
int d2083_audio_hs_ihf_poweron1(void)
{
	int ret = 0;
	u8 regval;

	if(ihf_power_status == true)
	 return ret;
	ihf_power_status = true;
	 
	dlg_info("%s HP =%d \n",__func__, dlg_audio->HSenabled);

	// if(dlg_audio->HSenabled==false)
	//     audio_write(D2083_LDO_AUD_MCTL_REG, 0x44); //AUD_LDO on

	audio_write(D2083_MXHPL_CTRL_REG,0x19); 

	regval = audio_read(D2083_PREAMP_A_CTRL1_REG);
	regval = (regval | D2083_PREAMP_EN) & ~D2083_PREAMP_MUTE;
	ret |= audio_write(D2083_PREAMP_A_CTRL1_REG,regval);
	ret |= audio_write(D2083_PREAMP_A_CTRL2_REG,0x03); //pre amp A fully differential

	regval = audio_read(D2083_SP_CTRL_REG);
	regval &= ~D2083_SP_MUTE;
	regval |= D2083_SP_EN;
	ret |= audio_write(D2083_SP_CTRL_REG,regval);

	ret |= audio_write(D2083_SP_CFG1_REG,0x00); 
	ret |= audio_write(D2083_SP_CFG2_REG,0x00); 
	//ret |= audio_write(D2083_SP_NG1_REG,0x0F); 
	//ret |= audio_write(D2083_SP_NG2_REG,0x07); 
	ret |= audio_write(D2083_SP_NON_CLIP_ZC_REG,0x00);
	ret |= audio_write(D2083_SP_NON_CLIP_REG,0x00); 
	ret |= audio_write(D2083_SP_PWR_REG,0x00);

	ret |= audio_write(D2083_BIAS_CTRL_REG,0x3F); 
	dlg_audio->IHFenabled = true;

	return ret;
}
Ejemplo n.º 14
0
int d2083_audio_multicast(u8 flag)
{
	u8 regval;
	int ret = 0;
	
	dlg_info("%s = %d\n",__func__, flag);
	
	switch(flag)
	{
		case DLG_REMOVE_IHF:
		if(!is_playback_stop) {
			ret = audio_write(D2083_MXHPL_CTRL_REG,0x07);
			regval = audio_read(D2083_PREAMP_B_CTRL1_REG);
			ret = audio_write(D2083_PREAMP_A_CTRL1_REG,regval);
		}
		ret |= d2083_audio_hs_ihf_poweroff();
		break;

		case DLG_REMOVE_HS:
		ret = d2083_audio_hs_poweron1(0);
		break;
		
		case DLG_ADD_IHF:
		ret = audio_write(D2083_MXHPL_CTRL_REG,0x19);
		ret |= d2083_audio_hs_ihf_poweron1();
		break;
		
		case DLG_ADD_HS: 
		ret = d2083_audio_hs_poweron1(1);
		break;

		default:
		break;
	}
	
	return ret;
}
Ejemplo n.º 15
0
int d2083_audio_set_input_mode(enum d2083_input_path_sel inpath_sel, enum d2083_input_mode_val mode_val)
{
	int reg;
	u8 regval;

	if (inpath_sel == D2083_INPUTA)
		reg = D2083_PREAMP_A_CTRL2_REG;
	else if (inpath_sel == D2083_INPUTB)
		reg = D2083_PREAMP_B_CTRL2_REG;
	else
		return -EINVAL;

	regval = audio_read(reg) & ~D2083_PREAMP_CFG;
	regval |= mode_val & D2083_PREAMP_CFG;

	return audio_write(reg,regval);
}
Ejemplo n.º 16
0
int d2083_audio_ihf_preamp_gain(enum d2083_preamp_gain_val ihfgain_val)
{
	u8 regval;

	dlg_info("d2083_audio_ihf_preamp_gain gain=%d \n", ihfgain_val);

	if(0 >  ihfgain_val || D2083_PREAMP_GAIN_MUTE < ihfgain_val ) {
		dlg_info("[%s]- Invalid preamp gain, so set to 0dB \n", __func__);
		ihfgain_val = D2083_PREAMP_GAIN_0DB;				
	}

	regval = audio_read(D2083_PREAMP_A_CTRL1_REG) & ~D2083_PREAMP_VOL;
	regval |= (ihfgain_val << D2083_PREAMP_VOL_SHIFT) & D2083_PREAMP_VOL;

	return audio_write(D2083_PREAMP_A_CTRL1_REG,regval);

}
Ejemplo n.º 17
0
static void process_freedv_rx_buffer() {
  int j;
  int demod_samples;
  for(j=0;j<outputsamples;j++) {
    if(freedv_samples==0) {
      leftaudiosample=(short)(audiooutputbuffer[j*2]*32767.0*volume);
      rightaudiosample=(short)(audiooutputbuffer[(j*2)+1]*32767.0*volume);
      demod_samples=demod_sample_freedv(leftaudiosample);
      if(demod_samples!=0) {
        int s;
        int t;
        for(s=0;s<demod_samples;s++) {
          if(freedv_sync) {
            leftaudiosample=rightaudiosample=(short)((double)speech_out[s]*volume);
          } else {
            leftaudiosample=rightaudiosample=0;
          }
          for(t=0;t<6;t++) { // 8k to 48k
            if(local_audio) {
                audio_write(leftaudiosample,rightaudiosample);
                leftaudiosample=0;
                rightaudiosample=0;
            }

            audiobuffer[audioindex++]=leftaudiosample>>8;
            audiobuffer[audioindex++]=leftaudiosample;
            audiobuffer[audioindex++]=rightaudiosample>>8;
            audiobuffer[audioindex++]=rightaudiosample;
            
            if(audioindex>=sizeof(audiobuffer)) {
                // insert the sequence
              audiobuffer[0]=audiosequence>>24;
              audiobuffer[1]=audiosequence>>16;
              audiobuffer[2]=audiosequence>>8;
              audiobuffer[3]=audiosequence;
              // send the buffer
              if(sendto(data_socket,audiobuffer,sizeof(audiobuffer),0,(struct sockaddr*)&audio_addr,audio_addr_length)<0) {
                fprintf(stderr,"sendto socket failed for audio\n");
                exit(1);
              }
              audioindex=4;
              audiosequence++;
            }
          }
        }
      }
Ejemplo n.º 18
0
static int
audio_device_write(session_t *sp, sample *buf, int dur)
{
        const audio_format *ofmt = audio_get_ofmt(sp->audio_device);
        int len;

        assert(dur >= 0);

        if (sp->out_file) {
                snd_write_audio(&sp->out_file, buf, (uint16_t)(dur * ofmt->channels));
        }

        len =  audio_write(sp->audio_device, buf, dur * ofmt->channels);

        xmemchk();
        
        return len;
}
Ejemplo n.º 19
0
int d2083_audio_set_input_preamp_gain(enum d2083_input_path_sel inpath_sel, enum d2083_preamp_gain_val pagain_val)
{
	int reg;
	u8 regval;

	if (inpath_sel == D2083_INPUTA)
		reg = D2083_PREAMP_A_CTRL1_REG;
	else if (inpath_sel == D2083_INPUTB)
		reg = D2083_PREAMP_B_CTRL1_REG;
	else
		return -EINVAL;

	regval = audio_read(reg) & ~D2083_PREAMP_VOL;
	regval |= (pagain_val << D2083_PREAMP_VOL_SHIFT) & D2083_PREAMP_VOL;

	dlg_info("[%s]-addr[0x%x] ihfgain_val[0x%x]\n", __func__, reg,regval);

	return audio_write(reg,regval);
}
Ejemplo n.º 20
0
int d2083_audio_set_mixer_input(enum d2083_audio_output_sel path_sel, enum d2083_audio_input_val input_val)
{
	int reg;
	u8 regval;

	if(path_sel == D2083_OUT_HPL)
		reg = D2083_MXHPL_CTRL_REG;
	else if(path_sel == D2083_OUT_HPR)
		reg = D2083_MXHPR_CTRL_REG;
	else if(path_sel == D2083_OUT_SPKR)
		reg = D2083_MXSP_CTRL_REG;
	else
		return -EINVAL;

	regval = audio_read(reg) & ~D2083_MX_SEL;
	regval |= (input_val << D2083_MX_SEL_SHIFT) & D2083_MX_SEL;

	return audio_write(reg,regval);
}
Ejemplo n.º 21
0
void AudioDec::audio_decode(AVPacket *packet)
{
	if (packet->pts != AV_NOPTS_VALUE)
	{
		audio_clock = av_q2d(pStream->time_base)*packet->pts; // 오디오 클럭 초기화
	}

	int got_picture;
	out_buffer_sz = av_samples_get_buffer_size(NULL, av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO), nb_out_samples, AV_SAMPLE_FMT_S16, 0);
	uint8_t* buffer = (uint8_t*)av_malloc(out_buffer_sz);

	/* 오디오 프레임 디코딩 */
	if (avcodec_decode_audio4(pStream->codec, pFrame, &got_picture, packet) < 0)
	{
		TRACE("audio codec decode error! \n");
		return;
	}
	if (got_picture)
	{
		// 프레임이 디코딩 되면 지정한 형태로 변환 , 샘플 갯수 획득
		int numSamplesOut = swr_convert(pSwrContext, &buffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t**)pFrame->data, pFrame->nb_samples);

		if (numSamplesOut > 0)
		{
			if (numSamplesOut != nb_out_samples)
			{
				nb_out_samples = numSamplesOut;
			}
			// 사이즈 다시 계산
			out_buffer_sz = av_samples_get_buffer_size(NULL, av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO), numSamplesOut, AV_SAMPLE_FMT_S16, 0);
			
			/* 오디오 클럭 계산 */
			//int pts = audio_clock;
			int n = 2 * (pStream->codec->channels);
			audio_clock += (double)out_buffer_sz / (double)(n * pStream->codec->sample_rate);
			/* 오디오 출력  */
			audio_write(hWaveOut, (LPSTR)(unsigned char*)buffer, out_buffer_sz);
		}
	}
}
Ejemplo n.º 22
0
int d2083_audio_hs_ihf_set_gain(enum d2083_sp_vol_val ihfgain_val)
{
	u8 regval;

	dlg_info("[%s]-ihfgain_val[0x%x]\n", __func__, ihfgain_val);
	
	if(0 > ihfgain_val || D2083_SPVOL_MUTE < ihfgain_val) {
		dlg_info("[%s]- Invalid gain, so set to 4dB \n", __func__);
		ihfgain_val = D2083_SPVOL_0DB;				
	}

	regval = audio_read(D2083_SP_CTRL_REG);
	if (ihfgain_val == D2083_SPVOL_MUTE) 
	{
		regval |= D2083_SP_MUTE;
	} 
	else 
	{
		regval &= ~(D2083_SP_VOL | D2083_SP_MUTE);
		regval |= (ihfgain_val << D2083_SP_VOL_SHIFT) & D2083_SP_VOL;			
	}
	return  audio_write(D2083_SP_CTRL_REG,regval);
}
Ejemplo n.º 23
0
int ffplay(const char *filename, const char *force_format) {

	char errbuf[256];
	int r = 0;
	
	int frameFinished;
	AVPacket packet;
	int audio_buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
	int16_t *audio_buf = (int16_t *) malloc((AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2);
	
	if(!audio_buf) {
		ds_printf("DS_ERROR: No free memory\n");
		return -1;
	}
	
	memset(audio_buf, 0, (AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2);
	
	AVFormatContext *pFormatCtx = NULL;
	AVFrame *pFrame = NULL;
	AVCodecContext *pVideoCodecCtx = NULL, *pAudioCodecCtx = NULL;
	AVInputFormat *file_iformat = NULL;
	
	video_txr_t movie_txr;
	int videoStream = -1, audioStream = -1;
	
	maple_device_t *cont = NULL;
	cont_state_t *state = NULL;
	int pause = 0, done = 0;
	
	char fn[MAX_FN_LEN];
	sprintf(fn, "ds:%s", filename);

	memset(&movie_txr, 0, sizeof(movie_txr));
	
	if(!codecs_inited) {
		avcodec_register_all();
		avcodec_register(&mp1_decoder);
		avcodec_register(&mp2_decoder);
		avcodec_register(&mp3_decoder);
		avcodec_register(&vorbis_decoder);
		//avcodec_register(&mpeg4_decoder);
		codecs_inited = 1;
	}
	
	if(force_format)
      file_iformat = av_find_input_format(force_format);
    else
      file_iformat = NULL;


	// Open video file
	ds_printf("DS_PROCESS_FFMPEG: Opening file: %s\n", filename);
	if((r = av_open_input_file((AVFormatContext**)(&pFormatCtx), fn, file_iformat, /*FFM_PACKET_SIZE*/0, NULL)) != 0) {
		av_strerror(r, errbuf, 256);
		ds_printf("DS_ERROR_FFMPEG: %s\n", errbuf);
		free(audio_buf);
		return -1; // Couldn't open file
	}
	
	// Retrieve stream information
	ds_printf("DS_PROCESS_FFMPEG: Retrieve stream information...\n");
	if((r = av_find_stream_info(pFormatCtx)) < 0) {
		av_strerror(r, errbuf, 256);
		ds_printf("DS_ERROR_FFMPEG: %s\n", errbuf);
		av_close_input_file(pFormatCtx);
		free(audio_buf);
		return -1; // Couldn't find stream information
	}

	// Dump information about file onto standard error
	dump_format(pFormatCtx, 0, filename, 0);
	//thd_sleep(5000);
	
	pVideoCodecCtx = findDecoder(pFormatCtx, AVMEDIA_TYPE_VIDEO, &videoStream);
	pAudioCodecCtx = findDecoder(pFormatCtx, AVMEDIA_TYPE_AUDIO, &audioStream);
	
	//LockInput();
	
	if(pVideoCodecCtx) {
		
		//LockVideo();
		ShutdownVideoThread();
		SDL_DS_FreeScreenTexture(0);
		int format = 0;
		
		switch(pVideoCodecCtx->pix_fmt) {
			case PIX_FMT_YUV420P:
			case PIX_FMT_YUVJ420P:
			
				format = PVR_TXRFMT_YUV422;
#ifdef USE_HW_YUV				
				yuv_conv_init();
#endif
				break;
				
			case PIX_FMT_UYVY422:
			case PIX_FMT_YUVJ422P:
			
				format = PVR_TXRFMT_YUV422;
				break;
				
			default:
				format = PVR_TXRFMT_RGB565;
				break;
		}
		
		MakeVideoTexture(&movie_txr, pVideoCodecCtx->width, pVideoCodecCtx->height, format | PVR_TXRFMT_NONTWIDDLED, PVR_FILTER_BILINEAR);
		
#ifdef USE_HW_YUV				
		yuv_conv_setup(movie_txr.addr, PVR_YUV_MODE_MULTI, PVR_YUV_FORMAT_YUV420, movie_txr.width, movie_txr.height);
		pvr_dma_init();
#endif

	} else {
		ds_printf("DS_ERROR: Didn't find a video stream.\n");
	}
	
	
	if(pAudioCodecCtx) {
		
#ifdef USE_DIRECT_AUDIO
		audioinit(pAudioCodecCtx);
#else

		sprintf(fn, "%s/firmware/aica/ds_stream.drv", getenv("PATH"));
		
		if(snd_init_fw(fn) < 0) {
			goto exit_free;
		}
	
		if(aica_audio_open(pAudioCodecCtx->sample_rate, pAudioCodecCtx->channels, 8192) < 0) {
			goto exit_free;
		}
		//snd_cpu_clock(0x19);
		//snd_init_decoder(8192);
#endif
		
	} else {
		ds_printf("DS_ERROR: Didn't find a audio stream.\n");
	}
	
	//ds_printf("FORMAT: %d\n", pVideoCodecCtx->pix_fmt);

	// Allocate video frame
	pFrame = avcodec_alloc_frame();

	if(pFrame == NULL) {
		ds_printf("DS_ERROR: Can't alloc memory\n");
		goto exit_free;
	}
	
	int pressed = 0, framecnt = 0;
	uint32 fa = 0;
	
	fa = GET_EXPORT_ADDR("ffplay_format_handler");
	
	if(fa > 0 && fa != 0xffffffff) {
		EXPT_GUARD_BEGIN;
			void (*ff_format_func)(AVFormatContext *, AVCodecContext *, AVCodecContext *) = 
				(void (*)(AVFormatContext *, AVCodecContext *, AVCodecContext *))fa;
			ff_format_func(pFormatCtx, pVideoCodecCtx, pAudioCodecCtx);
		EXPT_GUARD_CATCH;
		EXPT_GUARD_END;
	}
	
	fa = GET_EXPORT_ADDR("ffplay_frame_handler");
	void (*ff_frame_func)(AVFrame *) = NULL;
	
	if(fa > 0 && fa != 0xffffffff) {
		EXPT_GUARD_BEGIN;
			ff_frame_func = (void (*)(AVFrame *))fa;
			// Test call
			ff_frame_func(NULL);
		EXPT_GUARD_CATCH;
			ff_frame_func = NULL;
		EXPT_GUARD_END;
	}
	
	fa = GET_EXPORT_ADDR("ffplay_render_handler");
	
	if(fa > 0 && fa != 0xffffffff) {
		EXPT_GUARD_BEGIN;
			movie_txr.render_cb = (void (*)(void *))fa;
			// Test call
			movie_txr.render_cb(NULL);
		EXPT_GUARD_CATCH;
			movie_txr.render_cb = NULL;
		EXPT_GUARD_END;
	}
	
	while(av_read_frame(pFormatCtx, &packet) >= 0 && !done) {
		
		do {
			if(ff_frame_func) 
				ff_frame_func(pFrame);
					
			cont = maple_enum_type(0, MAPLE_FUNC_CONTROLLER);
			framecnt++;

			if(cont) {
				state = (cont_state_t *)maple_dev_status(cont);
				
				if (!state) {
					break;
				}
				if (state->buttons & CONT_START || state->buttons & CONT_B) {
					av_free_packet(&packet);
					done = 1;
				}
				if (state->buttons & CONT_A) {
					if((framecnt - pressed) > 10) {
						pause = pause ? 0 : 1;
						if(pause) {
#ifdef USE_DIRECT_AUDIO
							audio_end();
#else
							stop_audio();
#endif
						} else {
#ifndef USE_DIRECT_AUDIO
							start_audio();
#endif
						}
					}
					pressed = framecnt;
				}
				
				if(state->buttons & CONT_DPAD_LEFT) {
					//av_seek_frame(pFormatCtx, -1, timestamp * ( AV_TIME_BASE / 1000 ), AVSEEK_FLAG_BACKWARD);
				}
				
				if(state->buttons & CONT_DPAD_RIGHT) {
					//av_seek_frame(pFormatCtx, -1, timestamp * ( AV_TIME_BASE / 1000 ), AVSEEK_FLAG_BACKWARD);
				}
			}
			
			if(pause) thd_sleep(100);
			
		} while(pause);
		
		//printf("Packet: size: %d data: %02x%02x%02x pst: %d\n", packet.size, packet.data[0], packet.data[1], packet.data[2], pFrame->pts);
		
		// Is this a packet from the video stream?
		if(packet.stream_index == videoStream) {
			//printf("video\n");
			// Decode video frame
			if((r = avcodec_decode_video2(pVideoCodecCtx, pFrame, &frameFinished, &packet)) < 0) {
				//av_strerror(r, errbuf, 256);
				//printf("DS_ERROR_FFMPEG: %s\n", errbuf);
			} else {
				
				// Did we get a video frame?
				if(frameFinished && !pVideoCodecCtx->hurry_up) {
					RenderVideo(&movie_txr, pFrame, pVideoCodecCtx);
				}
			}

		} else if(packet.stream_index == audioStream) {
			//printf("audio\n");
			//snd_decode((uint8*)audio_buf, audio_buf_size, AICA_CODEC_MP3);
			
			audio_buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
			if((r = avcodec_decode_audio3(pAudioCodecCtx, audio_buf, &audio_buf_size, &packet)) < 0) {
				//av_strerror(r, errbuf, 256);
				//printf("DS_ERROR_FFMPEG: %s\n", errbuf);
				//continue;
			} else {
				
				if(audio_buf_size > 0 && !pAudioCodecCtx->hurry_up) {

#ifdef USE_DIRECT_AUDIO
					audio_write(pAudioCodecCtx, audio_buf, audio_buf_size);
#else
					aica_audio_write((char*)audio_buf, audio_buf_size);
#endif
				}
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}
	
	goto exit_free;
	
exit_free:

	if(pFrame)
		av_free(pFrame);
	
	if(pFormatCtx)
		av_close_input_file(pFormatCtx);
	
	if(audioStream > -1) {
		if(pAudioCodecCtx)
			avcodec_close(pAudioCodecCtx);
#ifdef USE_DIRECT_AUDIO
		audio_end();
#else
		aica_audio_close();
		sprintf(fn, "%s/firmware/aica/kos_stream.drv", getenv("PATH"));
		snd_init_fw(fn);
#endif
	}
	
	if(audio_buf) {
		free(audio_buf);
	}
	
	if(videoStream > -1) {
		if(pVideoCodecCtx)
			avcodec_close(pVideoCodecCtx);
		FreeVideoTexture(&movie_txr);
		SDL_DS_AllocScreenTexture(GetScreen());
		InitVideoThread();
		//UnlockVideo();
	}
	
	//UnlockInput();
	ProcessVideoEventsUpdate(NULL);
	return 0;
}
Ejemplo n.º 24
0
static void *receive_thread(void *arg) {
    float isample;
    float qsample;
    int outsamples;
    int elements;
    int flags=0;
    long long timeNs=0;
    long timeoutUs=10000L;
    int i;
#ifdef TIMING
    struct timeval tv;
    long start_time, end_time;
    rate_samples=0;
    gettimeofday(&tv, NULL);
    start_time=tv.tv_usec + 1000000 * tv.tv_sec;
#endif
    running=1;
    fprintf(stderr,"lime_protocol: receive_thread\n");
    while(running) {
        elements=SoapySDRDevice_readStream(lime_device,stream,(void *)&buffer,max_samples,&flags,&timeNs,timeoutUs);
//fprintf(stderr,"read %d elements\n",elements);
        if(actual_rate!=sample_rate) {
            for(i=0; i<elements; i++) {
                resamples[i*2]=(double)buffer[i*2];
                resamples[(i*2)+1]=(double)buffer[(i*2)+1];
            }

            outsamples=xresample(resampler);

            for(i=0; i<outsamples; i++) {
                iqinputbuffer[samples*2]=(double)resampled[i*2];
                iqinputbuffer[(samples*2)+1]=(double)resampled[(i*2)+1];
                samples++;
#ifdef TIMING
                rate_samples++;
                if(rate_samples==sample_rate) {
                    gettimeofday(&tv, NULL);
                    end_time=tv.tv_usec + 1000000 * tv.tv_sec;
                    fprintf(stderr,"%d samples in %ld\n",rate_samples, end_time-start_time);
                    start_time=end_time;
                    rate_samples=0;
                }
#endif
                if(samples==buffer_size) {
                    int error;
                    fexchange0(CHANNEL_RX0, iqinputbuffer, audiooutputbuffer, &error);
                    if(error!=0) {
                        fprintf(stderr,"fexchange0 (CHANNEL_RX0) returned error: %d\n", error);
                    }

                    if(local_audio) {
                        audio_write(audiooutputbuffer,outputsamples);
                    }
                    Spectrum0(1, CHANNEL_RX0, 0, 0, iqinputbuffer);
                    samples=0;
                }
            }
        } else {
            for(i=0; i<elements; i++) {
                iqinputbuffer[samples*2]=(double)buffer[i*2];
                iqinputbuffer[(samples*2)+1]=(double)buffer[(i*2)+1];
                samples++;
                if(samples==buffer_size) {
                    int error;
                    fexchange0(CHANNEL_RX0, iqinputbuffer, audiooutputbuffer, &error);
                    if(error!=0) {
                        fprintf(stderr,"fexchange0 (CHANNEL_RX0) returned error: %d\n", error);
                    }

                    audio_write(audiooutputbuffer,outputsamples);
                    Spectrum0(1, CHANNEL_RX0, 0, 0, iqinputbuffer);
                    samples=0;
                }
            }
        }
    }

    fprintf(stderr,"lime_protocol: receive_thread: SoapySDRDevice_closeStream\n");
    SoapySDRDevice_closeStream(lime_device,stream);
    fprintf(stderr,"lime_protocol: receive_thread: SoapySDRDevice_unmake\n");
    SoapySDRDevice_unmake(lime_device);

}
Ejemplo n.º 25
0
int main(int argc, char **argv)
{
   int c;
   int option_index = 0;
   char *inFile, *outFile;
   FILE *fin, *fout=NULL, *frange=NULL;
   float *output;
   int frame_size=0;
   OpusMSDecoder *st=NULL;
   opus_int64 packet_count=0;
   int total_links=0;
   int stream_init = 0;
   int quiet = 0;
   ogg_int64_t page_granule=0;
   ogg_int64_t link_out=0;
   struct option long_options[] =
   {
      {"help", no_argument, NULL, 0},
      {"quiet", no_argument, NULL, 0},
      {"version", no_argument, NULL, 0},
      {"version-short", no_argument, NULL, 0},
      {"rate", required_argument, NULL, 0},
      {"gain", required_argument, NULL, 0},
      {"no-dither", no_argument, NULL, 0},
      {"packet-loss", required_argument, NULL, 0},
      {"save-range", required_argument, NULL, 0},
      {0, 0, 0, 0}
   };
   ogg_sync_state oy;
   ogg_page       og;
   ogg_packet     op;
   ogg_stream_state os;
   int close_in=0;
   int eos=0;
   ogg_int64_t audio_size=0;
   double last_coded_seconds=0;
   float loss_percent=-1;
   float manual_gain=0;
   int channels=-1;
   int mapping_family;
   int rate=0;
   int wav_format=0;
   int preskip=0;
   int gran_offset=0;
   int has_opus_stream=0;
   ogg_int32_t opus_serialno;
   int dither=1;
   shapestate shapemem;
   SpeexResamplerState *resampler=NULL;
   float gain=1;
   int streams=0;
   size_t last_spin=0;
#ifdef WIN_UNICODE
   int argc_utf8;
   char **argv_utf8;
#endif

   if(query_cpu_support()){
     fprintf(stderr,"\n\n** WARNING: This program with compiled with SSE%s\n",query_cpu_support()>1?"2":"");
     fprintf(stderr,"            but this CPU claims to lack these instructions. **\n\n");
   }

#ifdef WIN_UNICODE
   (void)argc;
   (void)argv;

   init_console_utf8();
   init_commandline_arguments_utf8(&argc_utf8, &argv_utf8);
#endif

   output=0;
   shapemem.a_buf=0;
   shapemem.b_buf=0;
   shapemem.mute=960;
   shapemem.fs=0;

   /*Process options*/
   while(1)
   {
      c = getopt_long (argc_utf8, argv_utf8, "hV",
                       long_options, &option_index);
      if (c==-1)
         break;

      switch(c)
      {
      case 0:
         if (strcmp(long_options[option_index].name,"help")==0)
         {
            usage();
            quit(0);
         } else if (strcmp(long_options[option_index].name,"quiet")==0)
         {
            quiet = 1;
         } else if (strcmp(long_options[option_index].name,"version")==0)
         {
            version();
            quit(0);
         } else if (strcmp(long_options[option_index].name,"version-short")==0)
         {
            version_short();
            quit(0);
         } else if (strcmp(long_options[option_index].name,"no-dither")==0)
         {
            dither=0;
         } else if (strcmp(long_options[option_index].name,"rate")==0)
         {
            rate=atoi (optarg);
         } else if (strcmp(long_options[option_index].name,"gain")==0)
         {
            manual_gain=atof (optarg);
         }else if(strcmp(long_options[option_index].name,"save-range")==0){
          frange=fopen_utf8(optarg,"w");
          if(frange==NULL){
            perror(optarg);
            fprintf(stderr,"Could not open save-range file: %s\n",optarg);
            fprintf(stderr,"Must provide a writable file name.\n");
            quit(1);
          }
         } else if (strcmp(long_options[option_index].name,"packet-loss")==0)
         {
            loss_percent = atof(optarg);
         }
         break;
      case 'h':
         usage();
         quit(0);
         break;
      case 'V':
         version();
         quit(0);
         break;
      case '?':
         usage();
         quit(1);
         break;
      }
   }
   if (argc_utf8-optind!=2 && argc_utf8-optind!=1)
   {
      usage();
      quit(1);
   }
   inFile=argv_utf8[optind];

   /*Output to a file or playback?*/
   if (argc_utf8-optind==2){
     /*If we're outputting to a file, should we apply a wav header?*/
     int i;
     char *ext;
     outFile=argv_utf8[optind+1];
     ext=".wav";
     i=strlen(outFile)-4;
     wav_format=i>=0;
     while(wav_format&&ext&&outFile[i]) {
       wav_format&=tolower(outFile[i++])==*ext++;
     }
   }else {
     outFile="";
     wav_format=0;
     /*If playing to audio out, default the rate to 48000
       instead of the original rate. The original rate is
       only important for minimizing surprise about the rate
       of output files and preserving length, which aren't
       relevant for playback. Many audio devices sound
       better at 48kHz and not resampling also saves CPU.*/
     if(rate==0)rate=48000;
   }

   /*Open input file*/
   if (strcmp(inFile, "-")==0)
   {
#if defined WIN32 || defined _WIN32
      _setmode(_fileno(stdin), _O_BINARY);
#endif
      fin=stdin;
   }
   else
   {
      fin = fopen_utf8(inFile, "rb");
      if (!fin)
      {
         perror(inFile);
         quit(1);
      }
      close_in=1;
   }

   /* .opus files use the Ogg container to provide framing and timekeeping.
    * http://tools.ietf.org/html/draft-terriberry-oggopus
    * The easiest way to decode the Ogg container is to use libogg, so
    *  thats what we do here.
    * Using libogg is fairly straight forward-- you take your stream of bytes
    *  and feed them to ogg_sync_ and it periodically returns Ogg pages, you
    *  check if the pages belong to the stream you're decoding then you give
    *  them to libogg and it gives you packets. You decode the packets. The
    *  pages also provide timing information.*/
   ogg_sync_init(&oy);

   /*Main decoding loop*/
   while (1)
   {
      char *data;
      int i, nb_read;
      /*Get the ogg buffer for writing*/
      data = ogg_sync_buffer(&oy, 200);
      /*Read bitstream from input file*/
      nb_read = fread(data, sizeof(char), 200, fin);
      ogg_sync_wrote(&oy, nb_read);

      /*Loop for all complete pages we got (most likely only one)*/
      while (ogg_sync_pageout(&oy, &og)==1)
      {
         if (stream_init == 0) {
            ogg_stream_init(&os, ogg_page_serialno(&og));
            stream_init = 1;
         }
         if (ogg_page_serialno(&og) != os.serialno) {
            /* so all streams are read. */
            ogg_stream_reset_serialno(&os, ogg_page_serialno(&og));
         }
         /*Add page to the bitstream*/
         ogg_stream_pagein(&os, &og);
         page_granule = ogg_page_granulepos(&og);
         /*Extract all available packets*/
         while (ogg_stream_packetout(&os, &op) == 1)
         {
            /*OggOpus streams are identified by a magic string in the initial
              stream header.*/
            if (op.b_o_s && op.bytes>=8 && !memcmp(op.packet, "OpusHead", 8)) {
               if(!has_opus_stream)
               {
                 opus_serialno = os.serialno;
                 has_opus_stream = 1;
                 link_out = 0;
                 packet_count = 0;
                 eos = 0;
                 total_links++;
               } else {
                 fprintf(stderr,"Warning: ignoring opus stream %" I64FORMAT "\n",(long long)os.serialno);
               }
            }
            if (!has_opus_stream || os.serialno != opus_serialno)
               break;
            /*If first packet in a logical stream, process the Opus header*/
            if (packet_count==0)
            {
               st = process_header(&op, &rate, &mapping_family, &channels, &preskip, &gain, manual_gain, &streams, wav_format, quiet);
               if (!st)
                  quit(1);

               /*Remember how many samples at the front we were told to skip
                 so that we can adjust the timestamp counting.*/
               gran_offset=preskip;

               /*Setup the memory for the dithered output*/
               if(!shapemem.a_buf)
               {
                  shapemem.a_buf=calloc(channels,sizeof(float)*4);
                  shapemem.b_buf=calloc(channels,sizeof(float)*4);
                  shapemem.fs=rate;
               }
               if(!output)output=malloc(sizeof(float)*MAX_FRAME_SIZE*channels);

               /*Normal players should just play at 48000 or their maximum rate,
                 as described in the OggOpus spec.  But for commandline tools
                 like opusdec it can be desirable to exactly preserve the original
                 sampling rate and duration, so we have a resampler here.*/
               if (rate != 48000)
               {
                  int err;
                  resampler = speex_resampler_init(channels, 48000, rate, 5, &err);
                  if (err!=0)
                     fprintf(stderr, "resampler error: %s\n", speex_resampler_strerror(err));
                  speex_resampler_skip_zeros(resampler);
               }
               if(!fout)fout=out_file_open(outFile, &wav_format, rate, mapping_family, &channels);
            } else if (packet_count==1)
            {
               if (!quiet)
                  print_comments((char*)op.packet, op.bytes);
            } else {
               int ret;
               opus_int64 maxout;
               opus_int64 outsamp;
               int lost=0;
               if (loss_percent>0 && 100*((float)rand())/RAND_MAX<loss_percent)
                  lost=1;

               /*End of stream condition*/
               if (op.e_o_s && os.serialno == opus_serialno)eos=1; /* don't care for anything except opus eos */

               /*Are we simulating loss for this packet?*/
               if (!lost){
                  /*Decode Opus packet*/
                  ret = opus_multistream_decode_float(st, (unsigned char*)op.packet, op.bytes, output, MAX_FRAME_SIZE, 0);
               } else {
                  /*Extract the original duration.
                    Normally you wouldn't have it for a lost packet, but normally the
                    transports used on lossy channels will effectively tell you.
                    This avoids opusdec squaking when the decoded samples and
                    granpos mismatches.*/
                  opus_int32 lost_size;
                  lost_size = MAX_FRAME_SIZE;
                  if(op.bytes>0){
                    opus_int32 spp;
                    spp=opus_packet_get_nb_frames(op.packet,op.bytes);
                    if(spp>0){
                      spp*=opus_packet_get_samples_per_frame(op.packet,48000/*decoding_rate*/);
                      if(spp>0)lost_size=spp;
                    }
                  }
                  /*Invoke packet loss concealment.*/
                  ret = opus_multistream_decode_float(st, NULL, 0, output, lost_size, 0);
               }

               if(!quiet){
                  /*Display a progress spinner while decoding.*/
                  static const char spinner[]="|/-\\";
                  double coded_seconds = (double)audio_size/(channels*rate*sizeof(short));
                  if(coded_seconds>=last_coded_seconds+1){
                     fprintf(stderr,"\r[%c] %02d:%02d:%02d", spinner[last_spin&3],
                             (int)(coded_seconds/3600),(int)(coded_seconds/60)%60,
                             (int)(coded_seconds)%60);
                     fflush(stderr);
                     last_spin++;
                     last_coded_seconds=coded_seconds;
                  }
               }

               /*If the decoder returned less than zero, we have an error.*/
               if (ret<0)
               {
                  fprintf (stderr, "Decoding error: %s\n", opus_strerror(ret));
                  break;
               }
               frame_size = ret;

               /*If we're collecting --save-range debugging data, collect it now.*/
               if(frange!=NULL){
                 OpusDecoder *od;
                 opus_uint32 rngs[256];
                 for(i=0;i<streams;i++){
                   ret=opus_multistream_decoder_ctl(st,OPUS_MULTISTREAM_GET_DECODER_STATE(i,&od));
                   ret=opus_decoder_ctl(od,OPUS_GET_FINAL_RANGE(&rngs[i]));
                 }
                 save_range(frange,frame_size*(48000/48000/*decoding_rate*/),op.packet,op.bytes,
                            rngs,streams);
               }

               /*Apply header gain, if we're not using an opus library new
                 enough to do this internally.*/
               if (gain!=0){
                 for (i=0;i<frame_size*channels;i++)
                    output[i] *= gain;
               }

               /*This handles making sure that our output duration respects
                 the final end-trim by not letting the output sample count
                 get ahead of the granpos indicated value.*/
               maxout=((page_granule-gran_offset)*rate/48000)-link_out;
               outsamp=audio_write(output, channels, frame_size, fout, resampler, &preskip, dither?&shapemem:0, strlen(outFile)!=0,0>maxout?0:maxout);
               link_out+=outsamp;
               audio_size+=sizeof(short)*outsamp*channels;
            }
            packet_count++;
         }
         /*We're done, drain the resampler if we were using it.*/
         if(eos && resampler)
         {
            float *zeros;
            int drain;

            zeros=(float *)calloc(100*channels,sizeof(float));
            drain = speex_resampler_get_input_latency(resampler);
            do {
               opus_int64 outsamp;
               int tmp = drain;
               if (tmp > 100)
                  tmp = 100;
               outsamp=audio_write(zeros, channels, tmp, fout, resampler, NULL, &shapemem, strlen(outFile)!=0, ((page_granule-gran_offset)*rate/48000)-link_out);
               link_out+=outsamp;
               audio_size+=sizeof(short)*outsamp*channels;
               drain -= tmp;
            } while (drain>0);
            free(zeros);
            speex_resampler_destroy(resampler);
            resampler=NULL;
         }
         if(eos)
         {
            has_opus_stream=0;
            if(st)opus_multistream_decoder_destroy(st);
            st=NULL;
         }
      }
      if (feof(fin)) {
         if(!quiet) {
           fprintf(stderr, "\rDecoding complete.        \n");
           fflush(stderr);
         }
         break;
      }
   }

   /*If we were writing wav, go set the duration.*/
   if (strlen(outFile)!=0 && fout && wav_format>=0 && audio_size<0x7FFFFFFF)
   {
      if (fseek(fout,4,SEEK_SET)==0)
      {
         int tmp;
         tmp = le_int(audio_size+20+wav_format);
         if(fwrite(&tmp,4,1,fout)!=1)fprintf(stderr,"Error writing end length.\n");
         if (fseek(fout,16+wav_format,SEEK_CUR)==0)
         {
            tmp = le_int(audio_size);
            if(fwrite(&tmp,4,1,fout)!=1)fprintf(stderr,"Error writing header length.\n");
         } else
         {
            fprintf (stderr, "First seek worked, second didn't\n");
         }
      } else {
         fprintf (stderr, "Cannot seek on wave file, size will be incorrect\n");
      }
   }

   /*Did we make it to the end without recovering ANY opus logical streams?*/
   if(!total_links)fprintf (stderr, "This doesn't look like a Opus file\n");

   if (stream_init)
      ogg_stream_clear(&os);
   ogg_sync_clear(&oy);

#if defined WIN32 || defined _WIN32
   if (strlen(outFile)==0)
      WIN_Audio_close ();
#endif

   if(shapemem.a_buf)free(shapemem.a_buf);
   if(shapemem.b_buf)free(shapemem.b_buf);

   if(output)free(output);

   if(frange)fclose(frange);

   if (close_in)
      fclose(fin);
   if (fout != NULL)
      fclose(fout);

#ifdef WIN_UNICODE
   free_commandline_arguments_utf8(&argc_utf8, &argv_utf8);
   uninit_console_utf8();
#endif

   return 0;
}
Ejemplo n.º 26
0
int
main (int ac, char **av)
{
    sisy_t sisy;
    bank_t bank;
    audio_t audio;
    int done=0;

    init_tab();

    memset(&sisy, 0, sizeof(sisy));

    bank.pos = 0;
    bank.watches_size = 0;
    bank.name = "sisy IO";
    bank.size = sizeof(sisy_IO_t);
    bank.data = &sisy.IO;
    bank.symtab = IO_symtab;
    bank_push(&bank);

    sisy_getopt(ac, av);

    instru_path[instru_path_size] = Malloc(strlen(getenv("HOME")) + strlen("/.sisy/") + 200);
    strcat(instru_path[instru_path_size], getenv("HOME"));
    strcat(instru_path[instru_path_size++], "/.sisy/");
    instru_path[instru_path_size++] = "/usr/share/sisy/";

    sisy.buffer = buffer_create();

    printf("Init midi: ");

    if(sisy_midi_init(&sisy, midi_device) < 0)
	{
	    fprintf(stderr, "ERROR: got some troubles while opening midi device %s\n", midi_device);
	    goto error;
	}

    printf("Ok\nInit audio: ");

    if(audio_init(&audio, audio_device) < 0)
	{
	    fprintf(stderr, "ERROR: got some troubles while opening audio device %s\n", audio_device);
	    goto error;
	}

    printf("Ok\nEnter in main loop...\n");

    while(!done)
	{
	    int t;
	    done=1;
	    ck_err(buffer_zero(sisy.buffer)<0);
//	    printf("%d\r", midi_timestamp_get());
	    for(t=0; t<sisy.nb_tracks; t++)
		{
		    sisy_track_t *track=&sisy.tracks[t];
		    if(!track->EOT)
			{
			    done=0;//We are not done !
			    ck_err(bank_push(&track->midi.bank));
			    ck_err(buffer_zero(track->buffer) < 0);

			    ck_err(sisy_track_process(track) < 0);

			    ck_err(bank_pop_check(&track->midi.bank));
			    ck_err(buffer_mix(track->buffer, sisy.buffer));
			}
		}
	    
/* 	    if(!is_buffer_flat(sisy.buffer)) */
/* 		printf("sisy: buffer qui bouge\n"); */

	    ck_err(audio_write(&audio, sisy.buffer)<0);
	}

/* 	ck_err(bank_pop_check(&sisy.midi.bank)); */
/* 	ck_err(bank_pop_check(&bank)); */

    audio_quit(&audio);

    return 0;
  error:
    return -1;
}
Ejemplo n.º 27
0
static int play_wave_from_socket(snd_header *header,int audiostream)
{
    /* Read audio from stream and play it to audio device, converting */
    /* it to pcm if required                                          */
    int num_samples;
    int sample_width;
    cst_audiodev *audio_device;
    int q,i,n,r;
    unsigned char bytes[CST_AUDIOBUFFSIZE];
    short shorts[CST_AUDIOBUFFSIZE];
    cst_file fff;

    fff = cst_fopen("/tmp/awb.wav",CST_OPEN_WRITE|CST_OPEN_BINARY);

    if ((audio_device = audio_open(header->sample_rate,1,
				   (header->encoding == CST_SND_SHORT) ?
				   CST_AUDIO_LINEAR16 : CST_AUDIO_LINEAR8)) == NULL)
    {
	cst_errmsg("play_wave_from_socket: can't open audio device\n");
	return -1;
    }

    if (header->encoding == CST_SND_SHORT)
	sample_width = 2;
    else
	sample_width = 1;

    num_samples = header->data_size / sample_width;
    /* we naively let the num_channels sort itself out */
    for (i=0; i < num_samples; i += r/2)
    {
	if (num_samples > i+CST_AUDIOBUFFSIZE)
	    n = CST_AUDIOBUFFSIZE;
	else
	    n = num_samples-i;
	if (header->encoding == CST_SND_ULAW)
	{
	    r = read(audiostream,bytes,n);
	    for (q=0; q<r; q++)
		shorts[q] = cst_ulaw_to_short(bytes[q]);
	    r *= 2;
	}
	else /* if (header->encoding == CST_SND_SHORT) */
	{
	    r = read(audiostream,shorts,n*2);
	    if (CST_LITTLE_ENDIAN)
		for (q=0; q<r/2; q++)
		    shorts[q] = SWAPSHORT(shorts[q]);
	}
	
	if (r <= 0)
	{   /* I'm not getting any data from the server */
	    audio_close(audio_device);
	    return CST_ERROR_FORMAT;
	}
	
	for (q=r; q > 0; q-=n)
	{
	    n = audio_write(audio_device,shorts,q);
	    cst_fwrite(fff,shorts,2,q);
	    if (n <= 0)
	    {
		audio_close(audio_device);
		return CST_ERROR_FORMAT;
	    }
	}
    }
    audio_close(audio_device);
    cst_fclose(fff);

    return CST_OK_FORMAT;

}
Ejemplo n.º 28
0
int d2083_audio_hs_poweron1(bool on)
{
    int ret = 0;
    u8 regval;

	//dlg_info("%s status:%d, cmd:%d\n",__func__,hs_pw_status, on);     
	
	if ( hs_pw_status == on){
		return ret;
	}
	hs_pw_status = on;
	
	dlg_info("%s HP=%d speaker_power=%d \n",__func__,on,dlg_audio->IHFenabled);     

    if(on) 
	{
		//if(dlg_audio->IHFenabled==false)
		//{
		    //audio_write(D2083_LDO_AUD_MCTL_REG, 0x44); //AUD_LDO on
		    //audio_write(D2083_LDO1_MCTL_REG, 0x54);
		    //mdelay(20);
		//}

		if (!dlg_audio_sleep) {
			ret |= audio_write(D2083_HP_L_CTRL_REG,0xF0);
			ret |= audio_write(D2083_HP_R_CTRL_REG,0xF0);

			ret = audio_write(D2083_HP_L_GAIN_REG,0x00);
			ret = audio_write(D2083_HP_R_GAIN_REG,0x00); 

			ret = audio_write(D2083_HP_NG1_REG,0x10);
			msleep(30); // will test for pop noise newly
		}

		//dlg_audio->hs_pga_gain -= (LIMITED_SWAP_PRE - dlg_audio->hs_pre_gain-1);
		//dlg_audio->hs_pre_gain += (LIMITED_SWAP_PRE - dlg_audio->hs_pre_gain);
#ifndef USE_AUDIO_DIFFERENTIAL
		regval = audio_read(D2083_PREAMP_B_CTRL1_REG) & ~D2083_PREAMP_VOL;
		regval |= ((dlg_audio->hs_pre_gain << D2083_PREAMP_VOL_SHIFT) & D2083_PREAMP_VOL);
		regval |= (D2083_PREAMP_EN | D2083_PREAMP_ZC_EN);
		regval &= ~D2083_PREAMP_MUTE;

		ret |= audio_write(D2083_PREAMP_B_CTRL1_REG,regval);
		ret |= audio_write(D2083_PREAMP_B_CTRL2_REG,0x00); 

		ret |= audio_write(D2083_MXHPR_CTRL_REG,0x11);
		ret |= audio_write(D2083_MXHPL_CTRL_REG,0x09);   

#else
		regval = audio_read(D2083_PREAMP_B_CTRL1_REG) & ~D2083_PREAMP_VOL;
		regval |= ((dlg_audio->hs_pre_gain << D2083_PREAMP_VOL_SHIFT) & D2083_PREAMP_VOL);
		regval |= (D2083_PREAMP_EN | D2083_PREAMP_ZC_EN);
		regval &= ~D2083_PREAMP_MUTE;

		ret |= audio_write(D2083_PREAMP_B_CTRL1_REG,regval);
		ret |= audio_write(D2083_PREAMP_B_CTRL2_REG,0x03); 

		    if(dlg_audio->IHFenabled == false) {
		regval = audio_read(D2083_PREAMP_A_CTRL1_REG) & ~D2083_PREAMP_VOL;
		regval |= ((dlg_audio->hs_pre_gain << D2083_PREAMP_VOL_SHIFT) & D2083_PREAMP_VOL);
		regval |= (D2083_PREAMP_EN | D2083_PREAMP_ZC_EN);
		regval &= ~D2083_PREAMP_MUTE;

		ret |= audio_write(D2083_PREAMP_A_CTRL1_REG,regval);
		ret |= audio_write(D2083_PREAMP_A_CTRL2_REG,0x03); 
		    }

		    // HP Mixer controll
		    if(dlg_audio->IHFenabled == false){
			    audio_write(D2083_MXHPL_CTRL_REG,0x07); 
		    } else {
			    audio_write(D2083_MXHPL_CTRL_REG,0x19); 
		    }
			    audio_write(D2083_MXHPR_CTRL_REG,0x19);
#endif

		regval = audio_read(D2083_HP_L_GAIN_REG) & ~D2042_HP_AMP_GAIN & ~D2083_HP_AMP_MUTE_EN; 
		ret |= audio_write(D2083_HP_L_GAIN_REG, regval | (dlg_audio->hs_pga_gain & D2042_HP_AMP_GAIN));

		regval = audio_read(D2083_HP_R_GAIN_REG) & ~D2042_HP_AMP_GAIN & ~D2083_HP_AMP_MUTE_EN;
		ret |= audio_write(D2083_HP_R_GAIN_REG, regval | (dlg_audio->hs_pga_gain & D2042_HP_AMP_GAIN));

		ret |= audio_write(D2083_CP_CTRL_REG,0xC9);
		ret |= audio_write(D2083_CP_DELAY_REG,0x85);

		ret |= audio_write(D2083_CP_DETECTOR_REG,0x00);
		ret |= audio_write(D2083_CP_VOL_THRESHOLD_REG,0x32);
		ret |= audio_write(D2083_BIAS_CTRL_REG,0x3F);

		msleep(65);
		
#ifndef USE_AUDIO_DIFFERENTIAL
		regval = audio_read(D2083_PREAMP_B_CTRL1_REG);
		regval &= ~D2083_PREAMP_ZC_EN;       
		ret |= audio_write(D2083_PREAMP_B_CTRL1_REG,regval);
#else
		regval = audio_read(D2083_PREAMP_B_CTRL1_REG);
		regval &= ~D2083_PREAMP_ZC_EN;       
		ret |= audio_write(D2083_PREAMP_B_CTRL1_REG,regval);
		
		if(dlg_audio->IHFenabled == false){
		regval = audio_read(D2083_PREAMP_A_CTRL1_REG);
		regval &= ~D2083_PREAMP_ZC_EN;       
		ret |= audio_write(D2083_PREAMP_A_CTRL1_REG,regval);
		}
#endif
		ret = audio_write(D2083_HP_NG1_REG, dlg_set_ng1);
		ret = audio_write(D2083_HP_NG2_REG, dlg_set_ng2);

		ret |= audio_write(D2083_HP_L_CTRL_REG,0xa0);
		ret |= audio_write(D2083_HP_R_CTRL_REG,0xa0);

		msleep(25);
		ret |= audio_write(D2083_CP_CTRL_REG,0xCD);

		msleep(40);
		dlg_audio->HSenabled = true;

	}
	else
	{

#ifndef USE_AUDIO_DIFFERENTIAL
		regval = audio_read(D2083_PREAMP_B_CTRL1_REG) & ~D2083_PREAMP_VOL;
		regval |= (D2083_PREAMP_MUTE);
		ret |= audio_write(D2083_PREAMP_B_CTRL1_REG,regval);
#else		
		regval = audio_read(D2083_PREAMP_B_CTRL1_REG) & ~D2083_PREAMP_VOL;
		regval |= (D2083_PREAMP_MUTE);
		ret |= audio_write(D2083_PREAMP_B_CTRL1_REG,regval);
		
		if(dlg_audio->IHFenabled != true) {
		regval = audio_read(D2083_PREAMP_A_CTRL1_REG) & ~D2083_PREAMP_VOL;
		regval |= (D2083_PREAMP_MUTE);
		ret |= audio_write(D2083_PREAMP_A_CTRL1_REG,regval);
		}
#endif
		ret = audio_write(D2083_HP_L_GAIN_REG, dlg_audio->hs_pga_gain -2);
		ret = audio_write(D2083_HP_R_GAIN_REG, dlg_audio->hs_pga_gain -2); 
		msleep(10);

		ret = audio_write(D2083_HP_L_GAIN_REG, dlg_audio->hs_pga_gain -5);
		ret = audio_write(D2083_HP_R_GAIN_REG, dlg_audio->hs_pga_gain -5); 
		msleep(10);

		ret = audio_write(D2083_HP_L_GAIN_REG, dlg_audio->hs_pga_gain -10);
		ret = audio_write(D2083_HP_R_GAIN_REG, dlg_audio->hs_pga_gain -10); 
		msleep(10);

		ret = audio_write(D2083_HP_NG1_REG,0x11); 
		ret = audio_write(D2083_HP_NG2_REG,0x07);    

		if(dlg_audio->IHFenabled != true)
		msleep(50);

		ret = audio_write(D2083_HP_L_GAIN_REG,0x00);
		ret = audio_write(D2083_HP_R_GAIN_REG,0x00); 
		msleep(10);

		audio_write(D2083_CP_CTRL_REG,0xC9);
			
		dlg_audio_sleep = false;
		dlg_audio->HSenabled = false;

		// if(dlg_audio->IHFenabled==false) {
		//     audio_write(D2083_LDO_AUD_MCTL_REG, 0x00); //AUD_LDO off
		//	 audio_write(D2083_LDO1_MCTL_REG, 0x54);
		// }

	}

	return ret;
}
Ejemplo n.º 29
0
JNIEXPORT jint JNICALL Java_net_avs234_AndLessSrv_wavPlay(JNIEnv *env, jobject obj, msm_ctx* ctx, jstring jfile, jint start) {

    const char *file = (*env)->GetStringUTFChars(env, jfile, NULL);
    int i, n;
    unsigned rate, channels, bps;
    unsigned char *buff;
//    fd_set fds;

    struct timeval tstart, tstop, ttmp; 	
    useconds_t  tminwrite;		

    int writes = 0;
    off_t fsize;
	
#ifdef DBG_TIME
    uint64_t total_tminwrite = 0, total_ttmp = 0, total_sleep = 0;
    int  fails = 0;
#endif

	if(!ctx) {
	    return LIBLOSSLESS_ERR_NOCTX;
	}
		
	if(!file) {
		(*env)->ReleaseStringUTFChars(env, jfile, file);
		return LIBLOSSLESS_ERR_INV_PARM;
	}

	audio_stop(ctx);

	ctx->fd = open(file, O_RDONLY);
	(*env)->ReleaseStringUTFChars(env, jfile, file);

	if(ctx->fd < 0) {
	    return LIBLOSSLESS_ERR_NOFILE;
	}

	fsize = lseek(ctx->fd, 0, SEEK_END) - sizeof(wav_hdr);
	lseek(ctx->fd, 0, SEEK_SET);

	if(wav_hdr(ctx->fd, &rate, &channels, &bps) != 0) {
	    return LIBLOSSLESS_ERR_FORMAT;
	}

    __android_log_print(ANDROID_LOG_INFO, "liblossless", "wav ctx mode: %d", ctx->mode);

	if(start) {
		int start_offs = start * (bps/8) * channels * rate;

		if(lseek(ctx->fd,start_offs,SEEK_CUR) < 0) {
		    return LIBLOSSLESS_ERR_OFFSET;
		}
	}

	i = audio_start(ctx, channels, rate);

	if(i != 0) {
        close(ctx->fd);
	    return i;
	}

	buff = (unsigned char *) malloc(ctx->conf_size);

	if(!buff) {
		close(ctx->fd);
		return LIBLOSSLESS_ERR_NOMEM;
	}

	tminwrite = ((long long)((long long)ctx->conf_size)*1000000)/((long long)rate*channels*(bps/8));
	

#if 0
	sprintf(buff,"*******TMINWRITEEEEE = %d %d %d %d %d",tminwrite,ctx->conf_size,rate,channels,bps);
	__android_log_print(ANDROID_LOG_INFO, "liblossless, buffer: ", buff);
#endif

    ctx->channels = channels;
    ctx->samplerate = rate;
    ctx->bps = bps;
	ctx->written = 0;

	pthread_mutex_lock(&ctx->mutex);
        ctx->state = MSM_PLAYING;
        ctx->track_time = fsize / (rate * channels * (bps / 8));
	pthread_mutex_unlock(&ctx->mutex);

    update_track_time(env, obj, ctx->track_time);

	while(ctx->state != MSM_STOPPED) {

		n = read(ctx->fd, buff, ctx->conf_size);

		if(n != ctx->conf_size) {

			if(ctx->state != MSM_STOPPED) {
			   if(ctx->state != MSM_PAUSED) {
			        pthread_mutex_lock(&ctx->mutex);
			   }
               ctx->state = MSM_STOPPED;
		       pthread_mutex_unlock(&ctx->mutex);
			}

			free(buff);

	        if(ctx->fd == -1) {
	            return 0; // we were stopped from the main thread
	        }

            close(ctx->fd);
            ctx->fd = -1;

	        return 	LIBLOSSLESS_ERR_IO_READ;
		}

	    if(ctx->mode != MODE_CALLBACK) {
            gettimeofday(&tstop,0);
            timersub(&tstop,&tstart,&ttmp);

		    if(tminwrite > ttmp.tv_usec) {
			    usleep((tminwrite-ttmp.tv_usec) / 4);
#ifdef DBG_TIME
                total_sleep += (tminwrite - ttmp.tv_usec) / 4;
#endif
            }
#ifdef DBG_TIME

            else {
                fails++;
            }

            total_tminwrite += tminwrite;
            total_ttmp += ttmp.tv_usec;
#endif
		    gettimeofday(&tstart,0);
	    }

		pthread_mutex_lock(&ctx->mutex);

		i = audio_write(ctx,buff,ctx->conf_size);

		if(i < ctx->conf_size) {
            ctx->state = MSM_STOPPED;
            pthread_mutex_unlock(&ctx->mutex);
		    free(buff); 	

            if(ctx->fd == -1) {

#ifdef DBG_TIME
                if(writes && (writes > fails)) {
                    int x = (int) (total_tminwrite/writes);
                    int y = (int) (total_ttmp/writes);
                    int z = (int) (total_sleep/(writes-fails));
                    __android_log_print(
                        ANDROID_LOG_INFO,
                        "liblossless",
                        "tminwrite %d ttmp %d sleep %d fails %d writes %d",
                        x, y, z, fails, writes);

                } else {
                    __android_log_print(
                        ANDROID_LOG_INFO,
                        "liblossless", "fails %d writes %d",
                        fails, writes);
                }
#endif

			    return 0; // we were stopped from the main thread
		    }	

            close(ctx->fd); ctx->fd = -1;

            return LIBLOSSLESS_ERR_IO_WRITE;
		}

		pthread_mutex_unlock(&ctx->mutex);

		ctx->written += i;
	    writes++;
	}

    if(ctx->state != MSM_STOPPED) {
        if(ctx->state != MSM_PAUSED) {
            pthread_mutex_lock(&ctx->mutex);
        }

        if(ctx->fd != -1) {
            close(ctx->fd); ctx->fd = -1;
        }

        ctx->state = MSM_STOPPED;
        pthread_mutex_unlock(&ctx->mutex);
    }

#ifdef DBG_TIME
        if(writes && (writes > fails)) {
           int x = (int) (total_tminwrite/writes);
           int y = (int) (total_ttmp/writes);
           int z = (int) (total_sleep/(writes-fails));
            __android_log_print(ANDROID_LOG_INFO,"liblossless","tminwrite %d ttmp %d sleep %d fails %d writes %d", x,y,z,fails,writes);
        } else __android_log_print(ANDROID_LOG_INFO,"liblossless","fails %d writes %d", fails,writes);
#endif
   free(buff);
   audio_wait_done(ctx);

   return 0;	
}