/**
  * @brief  Play audio stream
  * @param  frequency: Audio frequency used to play the audio stream.
  * @retval Audio state.
  */
AUDIO_RECORDER_ErrorTypdef  AUDIO_RECORDER_Play(uint32_t frequency)
{
  uint32_t numOfReadBytes;

  
  BSP_AUDIO_OUT_Init(OUTPUT_DEVICE_AUTO, DEFAULT_REC_AUDIO_VOLUME, DEFAULT_AUDIO_IN_FREQ);
  BSP_AUDIO_OUT_SetAudioFrameSlot(CODEC_AUDIOFRAME_SLOT_02);
  
  /* Fill whole buffer @ first time */
  if(f_read(&wav_file, 
            &haudio.buff[0], 
            AUDIO_OUT_BUFFER_SIZE, 
            (void *)&numOfReadBytes) == FR_OK)
  { 
    if(numOfReadBytes != 0)
    {
      if(haudio.in.state == AUDIO_RECORDER_SUSPENDED)
      {
        osThreadResume(AudioThreadId);
      }
      haudio.in.state = AUDIO_RECORDER_PLAYING;
      BSP_AUDIO_OUT_Play((uint16_t*)&haudio.buff[0], AUDIO_OUT_BUFFER_SIZE);   
      return AUDIO_RECORDER_ERROR_NONE;
    }
  }
  return AUDIO_RECORDER_ERROR_IO;
  
}
/**
  * @brief  Initializes the AUDIO media low layer.
  * @param  AudioFreq: Audio frequency used to play the audio stream.
  * @param  Volume: Initial volume level (from 0 (Mute) to 100 (Max))
  * @param  options: Reserved for future use 
  * @retval Result of the operation: USBD_OK if all operations are OK else USBD_FAIL
  */
static int8_t Audio_Init(uint32_t  AudioFreq, uint32_t Volume, uint32_t options)
{
  BSP_AUDIO_OUT_Init(OUTPUT_DEVICE_AUTO, Volume, AudioFreq);
  
  /* Update the Audio frame slot configuration to match the PCM standard instead of TDM */
  BSP_AUDIO_OUT_SetAudioFrameSlot(CODEC_AUDIOFRAME_SLOT_02);
  return 0;
}
예제 #3
0
/**
  * @brief  Initializes Audio Interface.
  * @param  None
  * @retval Audio error
  */
AUDIO_ErrorTypeDef AUDIO_Init(void)
{
 if(BSP_AUDIO_OUT_Init(OUTPUT_DEVICE_AUTO, uwVolume, I2S_AUDIOFREQ_44K) == 0)
 {
   BSP_AUDIO_OUT_SetAudioFrameSlot(CODEC_AUDIOFRAME_SLOT_02);
   return AUDIO_ERROR_NONE;
 }
  return AUDIO_ERROR_IO;
}
    /**
  * @brief  Initializes the Wave player.
  * @param  AudioFreq: Audio sampling frequency
  * @retval None
  */
uint8_t PlayerInit(uint32_t AudioFreq)
{ 
  /* Initialize the Audio codec and all related peripherals (I2S, I2C, IOExpander, IOs...) */  
  if(BSP_AUDIO_OUT_Init(OUTPUT_DEVICE_BOTH, uwVolume, AudioFreq) != 0)
  {
    return 1;
  }
  else
  {
    BSP_AUDIO_OUT_SetAudioFrameSlot(CODEC_AUDIOFRAME_SLOT_02);
    return 0;
  } 
}
예제 #5
0
/**
  * @brief  Initializes audio
  * @param  None.
  * @retval Audio state.
  */
AUDIOPLAYER_ErrorTypdef  AUDIOPLAYER_Init(void)
{
  /* Initialize internal audio structure */
  haudio.state  = AUDIOPLAYER_STOP;
  haudio.mute   = MUTE_OFF;
  haudio.volume = AUDIO_DEFAULT_VOLUME;  

  /* Create Audio Queue */
  osMessageQDef(AUDIO_Queue, 1, uint16_t);
  AudioEvent = osMessageCreate (osMessageQ(AUDIO_Queue), NULL); 
  
  /* Create Audio task */
  osThreadDef(osAudio_Thread, Audio_Thread, osPriorityHigh, 0, 4 * configMINIMAL_STACK_SIZE);
  AudioThreadId = osThreadCreate (osThread(osAudio_Thread), NULL);  

  BSP_AUDIO_OUT_SetAudioFrameSlot(CODEC_AUDIOFRAME_SLOT_02);
  return AUDIOPLAYER_ERROR_NONE;
}
/**
  * @brief  Main program
  * @param  None
  * @retval None
  */
int main(void)
{
  /* Configure the MPU attributes as Write Through */
  MPU_Config();

  /* Enable the CPU Cache */
  CPU_CACHE_Enable();

  /* STM32F7xx HAL library initialization:
       - Configure the Flash ART accelerator on ITCM interface
       - Systick timer is configured by default as source of time base, but user
         can eventually implement his proper time base source (a general purpose
         timer for example or other time source), keeping in mind that Time base
         duration should be kept 1ms since PPP_TIMEOUT_VALUEs are defined and
         handled in milliseconds basis.
       - Set NVIC Group Priority to 4
       - Low Level Initialization
     */
  HAL_Init();

  /* Configure the system clock to 200 MHz */
  SystemClock_Config();

  /* Configure LED1 and LED3 */
  BSP_LED_Init(LED1);

  HAL_Delay(100);

  setbuf(stdout, NULL);

  BSP_LED_On(LED1);

  SD_init();
  HAL_Delay(100);

  fluid_settings_t* settings;
  int sfont_id;

  /* Create the settings. */
  settings = new_fluid_settings();
  fluid_settings_setnum(settings, "synth.sample-rate", SAMPLE_RATE); 

  fluid_settings_setstr(settings, "synth.reverb.active", "no");
  fluid_settings_setstr(settings, "synth.chorus.active", "no");
  fluid_settings_setint(settings, "synth.polyphony", POLYPHONY);

  /* Create the synthesizer. */
  synth = new_fluid_synth(settings);

  sfont_id = fluid_synth_sfload(synth, SOUNDFONT_FILE, 1);
  fluid_synth_set_interp_method(synth, -1, FLUID_INTERP_NONE);
//  fluid_synth_set_interp_method(synth, -1, FLUID_INTERP_LINEAR);

  /* Make the connection and initialize to USB_OTG/usbdc_core */
  USBD_Init(&USBD_Device, &AUDIO_Desc, 0);
  USBD_RegisterClass(&USBD_Device, &USBD_Midi_ClassDriver);
  USBD_Midi_RegisterInterface(&USBD_Device, &USBD_Midi_fops);
  USBD_Start(&USBD_Device);

  HAL_Delay(5);

  BSP_AUDIO_OUT_Init(OUTPUT_DEVICE_AUTO, MASTER_VOLUME, SAMPLE_RATE);
  BSP_AUDIO_OUT_SetAudioFrameSlot(CODEC_AUDIOFRAME_SLOT_02);  // PCM 2-channel

#ifdef AUDIO_FORMAT_32BITS
  BSP_AUDIO_OUT_Play((uint32_t *)&buf[0], AUDIO_BUF_SIZE);
#else
  BSP_AUDIO_OUT_Play((uint16_t *)&buf[0], AUDIO_BUF_SIZE);
#endif

  BSP_LED_Off(LED1);

  while (1)
  {
                BSP_LED_Toggle(LED1);
                HAL_Delay(1000);
  }

}
예제 #7
0
/**
  * @brief  Initializes audio
  * @param  None.
  * @retval Audio state.
  */
AUDIOPLAYER_ErrorTypdef  AUDIOPLAYER_Init(uint8_t volume)
{
#if (!defined ( __GNUC__ ))  
  uint32_t index = 0;
  __IO uint32_t ldness_value;
#endif
  
   /* Try to Init Audio interface in diffrent config in case of failure */
  BSP_AUDIO_OUT_Init(OUTPUT_DEVICE_AUTO, volume, I2S_AUDIOFREQ_48K);
  BSP_AUDIO_OUT_SetAudioFrameSlot(CODEC_AUDIOFRAME_SLOT_02);
  
  /* Initialize internal audio structure */
  haudio.out.state  = AUDIOPLAYER_STOP;
  haudio.out.mute   = MUTE_OFF;
  haudio.out.volume = volume;  
  
  
#if (!defined ( __GNUC__ ))  
  /* Enable the Eq */
  SpiritEQ_Init((TSpiritEq *)AUDIO_EqInstance, I2S_AUDIOFREQ_48K);
  
  
  /* Retreive stored settings and set band params */
  SpiritEQ_FltGet((TSpiritEq *)AUDIO_EqInstance, &AUDIO_EQ_Bands[0], 0 );
  AUDIO_EQ_Bands[0].gainDb = k_BkupRestoreParameter(CALIBRATION_AUDIOPLAYER_EQU1_BKP);
  SET_BAND_PRMS(&AUDIO_EQ_Bands[0], SPIRIT_EQ_FLT_TYPE_SHELVING_LOWPASS , 0, 1000, AUDIO_EQ_Bands[0].gainDb);
  
  SpiritEQ_FltGet((TSpiritEq *)AUDIO_EqInstance, &AUDIO_EQ_Bands[1], 1 );
  AUDIO_EQ_Bands[1].gainDb = k_BkupRestoreParameter(CALIBRATION_AUDIOPLAYER_EQU2_BKP);    
  SET_BAND_PRMS(&AUDIO_EQ_Bands[1], SPIRIT_EQ_FLT_TYPE_PEAKING , 2000, 1000, AUDIO_EQ_Bands[1].gainDb);
  
  SpiritEQ_FltGet((TSpiritEq *)AUDIO_EqInstance, &AUDIO_EQ_Bands[2], 2 );
  AUDIO_EQ_Bands[2].gainDb = k_BkupRestoreParameter(CALIBRATION_AUDIOPLAYER_EQU3_BKP);   
  SET_BAND_PRMS(&AUDIO_EQ_Bands[2], SPIRIT_EQ_FLT_TYPE_PEAKING , 5000, 3000, AUDIO_EQ_Bands[2].gainDb);
  
  SpiritEQ_FltGet((TSpiritEq *)AUDIO_EqInstance, &AUDIO_EQ_Bands[3], 3 );
  AUDIO_EQ_Bands[3].gainDb = k_BkupRestoreParameter(CALIBRATION_AUDIOPLAYER_EQU4_BKP);;  
  SET_BAND_PRMS(&AUDIO_EQ_Bands[3], SPIRIT_EQ_FLT_TYPE_PEAKING , 10000, 6000, AUDIO_EQ_Bands[3].gainDb);
  
  SpiritEQ_FltGet((TSpiritEq *)AUDIO_EqInstance, &AUDIO_EQ_Bands[4], 4 );
  AUDIO_EQ_Bands[4].gainDb = k_BkupRestoreParameter(CALIBRATION_AUDIOPLAYER_EQU5_BKP);  
  SET_BAND_PRMS(&AUDIO_EQ_Bands[4], SPIRIT_EQ_FLT_TYPE_SHELVING_HIPASS , 15000, 2000, AUDIO_EQ_Bands[4].gainDb);

  for (index = 0; index < SPIRIT_EQ_MAX_BANDS ; index++)
  {
    tmpEqBand = &AUDIO_EQ_Bands[index];
    SpiritEQ_FltSet((TSpiritEq *)AUDIO_EqInstance, tmpEqBand, index);
  }
  
  /* Enable Loundness Control */
  SpiritLdCtrl_Init((TSpiritLdCtrl*)AUDIO_LdCtrlPersistance, I2S_AUDIOFREQ_48K);
  SpiritLdCtrl_GetPrms((TSpiritLdCtrl*)AUDIO_LdCtrlPersistance, &AUDIO_LdCtrlInstanceParams);
  ldness_value = k_BkupRestoreParameter(CALIBRATION_AUDIOPLAYER_LOUD_BKP);
  AUDIO_LdCtrlInstanceParams.gainQ8 = PERC_TO_LDNS_DB(ldness_value);
  SpiritLdCtrl_SetPrms((TSpiritLdCtrl*)AUDIO_LdCtrlPersistance, &AUDIO_LdCtrlInstanceParams);
#endif  
  
  /* Register audio BSP drivers callbacks */
  AUDIO_IF_RegisterCallbacks(AUDIO_TransferComplete_CallBack, 
                             AUDIO_HalfTransfer_CallBack, 
                             AUDIO_Error_CallBack);
    
    
  /* Create Audio Queue */
  osMessageQDef(AUDIO_Queue, 1, uint16_t);
  AudioEvent = osMessageCreate (osMessageQ(AUDIO_Queue), NULL); 
  
  /* Create Audio task */
  osThreadDef(osAudio_Thread, Audio_Thread, osPriorityRealtime, 0, 256);
  AudioThreadId = osThreadCreate (osThread(osAudio_Thread), NULL);  

  return AUDIOPLAYER_ERROR_NONE;
}
예제 #8
0
/**
  * @brief  Initializes BSP Audio
  * @param  None
  * @retval None
  */
void k_BspAudioInit(void)
{
  BSP_AUDIO_OUT_Init(OUTPUT_DEVICE_AUTO, AUDIO_DEFAULT_VOLUME, I2S_AUDIOFREQ_44K);
  BSP_AUDIO_OUT_SetAudioFrameSlot(CODEC_AUDIOFRAME_SLOT_02);
}
예제 #9
0
uint32_t Play_MOV(FIL *mfile) 
   {    
     
  uint8_t   _aucLine[2048];   
	

     register uint32_t i, j;
          int ret = 0;
                
	uint32_t fps, frames, prevFrames, sample_time_limit;
	uint32_t samples, frameDuration, numEntry;
	uint32_t prevChunkSound, prevSamplesSound, firstChunkSound, samplesSound;
	uint32_t firstChunk = 0, totalSamples = 0, prevChunk = 0, prevSamples = 0, totalBytes = 0;
	uint32_t videoStcoCount, soundStcoCount, stco_reads;
	uint32_t prevSamplesBuff[60];
        
		FILE fp_sound, fp_frame, fp_frame_cp, \
			fp_stsc, fp_stsz, fp_stco, \
			fp_sound_stsc, fp_sound_stsz, fp_sound_stco;
        
        
	uint8_t fpsCnt = 0;
	//danko const char fps1Hz[] = "|/-\\";
	char timeStr[20];
 
	raw_video_typedef raw;
        int soundEndFlag = 0;
	media.sound.flag.process = 0;
	media.sound.flag.complete = 0;
	media.video.flag.process = 0;
	media.video.flag.complete = 0;

	memcpy((void*)&fp_global, (void*)mfile, sizeof(FIL));
     
	int hasChild = atomHasChild[UDTA]; //
	atomHasChild[UDTA] = 0;            // No child
	printf("\r\n[Atoms]");
	if(collectAtoms(mfile, mfile->fsize, 0) != 0)
        {
		printf("\r\nread error file contents.");
		/// dani f_close(fp); 
                
      
	     //DANI///	LCDStatusStruct.waitExitKey = 0;
		atomHasChild[UDTA] = hasChild; // Moje da ima child
		return -99;

         }
        
        
	atomHasChild[UDTA] = hasChild; // Moje da ima child
     
	printf("\r\n\n[Video Sample Tables]");
	printf("\r\nstts:%d", video_stts.numEntry);
	printf("\r\nstsc:%d", video_stsc.numEntry);
	printf("\r\nstsz:%d %d", video_stsz.sampleSize, video_stsz.numEntry);
	printf("\r\nstco:%d", video_stco.numEntry);

	printf("\r\n\n[Sound Sample Tables]");
	printf("\r\nstts:%d", sound_stts.numEntry);
	printf("\r\nstsc:%d", sound_stsc.numEntry);
	printf("\r\nstsz:%d %d", sound_stsz.sampleSize, sound_stsz.numEntry);
	printf("\r\nstco:%d", sound_stco.numEntry);

	printf("\r\n\n[Video Track]");
	printf("\r\nformat:%s", media.video.videoFmtString);
	printf("\r\ncompression:%s", media.video.videoCmpString);
	printf("\r\nwidth:%d", media.video.width);
	printf("\r\nheight:%d", media.video.height);
	printf("\r\ntimeScale:%d", media.video.timeScale);
	printf("\r\nduration:%d", media.video.duration);
      
        setStrSec(timeStr, (int)((float)media.video.duration / (float)media.video.timeScale + 0.5f)); //Kvo Pravi
	media.video.frameRate = (int16_t)((float)(media.video.timeScale * video_stsz.numEntry) / media.video.duration + 0.5f);
	printf("\r\nframe rate:%d", media.video.frameRate);
	printf("\r\ntime:%s", timeStr);

	printf("\r\n\n[Sound Track]");
	char s[5];
	s[4] = '\0';
	memcpy(s, (void*)media.sound.format.audioFmtString, 4);
        
	printf("\r\ntype:%s", s);
	printf("\r\nnumChannel:%d", media.sound.format.numChannel);
	printf("\r\nsampleSize:%d", media.sound.format.sampleSize);
	printf("\r\nsampleRate:%d", media.sound.format.sampleRate);
	printf("\r\ntimeScale:%d", media.sound.timeScale);
	printf("\r\nduration:%d", media.sound.duration);
        
        setStrSec(timeStr, (int)((float)media.sound.duration / (float)media.sound.timeScale + 0.5f));
	printf("\r\ntime:%s", timeStr);

	if(media.video.width > LCD_WIDTH || media.video.height > LCD_HEIGHT){
		printf("\r\ntoo large video dimension size.");
		f_close(mfile);//
		//DANI////////////////////////////LCDStatusStruct.waitExitKey = 0;
		atomHasChild[UDTA] = hasChild;
		return  0; 
                //DANI/////////////////RET_PLAY_STOP;
	                                                                    }
        

	//DANI///////////////// FUNC_VIDEO_BGIMG;
	media.video.startPosX = (LCD_WIDTH - media.video.width) / 2 - 1;
	media.video.startPosY = (LCD_HEIGHT - media.video.height) / 2 - 1;
	media.video.startPosX = media.video.startPosX > 0 ? media.video.startPosX : 0;
	media.video.startPosY = media.video.startPosY > 0 ? media.video.startPosY : 0;
//	media.video.height += (media.video.height % 2); // if value is odd number, convert to even


	printf("\r\nmedia.video.startPosX:%d", media.video.startPosX);
	printf("\r\nmedia.video.startPosY:%d", media.video.startPosY);
	printf("\r\nmedia.video.width:%d", media.video.width);
	printf("\r\nmedia.video.height:%d", media.video.height);
        
        ////////////////////////////////////////////////////////////////////////
	printf("\r\n\n[Play]\n");/*** MotionJPEG Play Process ***/
 
	//Prehw v STEKA
        f_lseek(mfile, 0);
	memcpy((void*)&fp_frame, (void*)mfile, sizeof(FIL));
	memcpy((void*)&fp_stsz, (void*)&video_stsz.fp, sizeof(FIL));
	memcpy((void*)&fp_stco, (void*)&video_stco.fp, sizeof(FIL));
	memcpy((void*)&fp_stsc, (void*)&video_stsc.fp, sizeof(FIL));
	numEntry = video_stsc.numEntry;

	fps = frames = prevFrames = 0;
	totalSamples = firstChunk = prevChunk = prevSamples = 0;
	if(abs(video_stco.numEntry - sound_stco.numEntry) > 50)
        { // not interleaved correctly
		printf("\r\nError!! this is not an interleaved media.");
		goto EXIT_PROCESS;
	} else {
		prevChunk = getSampleSize(atombuf, 12, &fp_stsc); // firstChunk samplesPerChunk sampleDescriptionID 
                                                                  // The firstChunk of the first one to prevChunk
		prevSamples = getAtomSize(&atombuf[4]); //The samplesPerChunk of the first one to prevSamples
		firstChunk = getSampleSize(atombuf, 4, &fp_stsc); // The second firstChunk
		samples = firstChunk - prevChunk;
	       }

        //A 32-bit integer that indicates how long each frame lasts in real time.
	frameDuration = getVideoSampleTime(atombuf, totalSamples); //Ot time to sample table

	// SOUND
	memcpy((void*)&fp_sound_stsz, (void*)&sound_stsz.fp, sizeof(FILE));
	memcpy((void*)&fp_sound_stco, (void*)&sound_stco.fp, sizeof(FILE));
	memcpy((void*)&fp_sound_stsc, (void*)&sound_stsc.fp, sizeof(FILE));
	memcpy((void*)&fp_sound, (void*)mfile, sizeof(FILE));

	prevChunkSound   = getSampleSize(atombuf, 12, &fp_sound_stsc); // firstChunk samplesPerChunk sampleDescriptionID ????firstChunk?prevChunk?
	prevSamplesSound = (getAtomSize(&atombuf[4]) / 100) * 100; //The samplesPerChunk of the first 
        ////////////////////////////////////////////////////////////one so not out half of the sound buffer to prevSamples
        
	firstChunkSound  = getSampleSize(atombuf, 4, &fp_sound_stsc); // ????firstChunk

	samplesSound = (firstChunkSound - prevChunkSound) * prevSamplesSound;

//	uint8_t SOUND_BUFFER[38400];
//	uint8_t SOUND_BUFFER[12800];
	uint16_t soundSampleByte = media.sound.format.sampleSize / 8;
	uint32_t soundSampleBlocks = soundSampleByte * media.sound.format.numChannel;

	float timeScaleCoeff = (1.0f / media.video.timeScale) * 100000;


                 dac_intr.fp = &fp_sound; 
 		 dac_intr.buff = (uint8_t*)frame_buffer; //////////////////////////////////////////////////////////////// TUK ZVUKA e VAV FREIMA
//////////////////////////////////////////////////////dac_intr.buff = SOUND_BUFFER;
   	        dac_intr.bufferSize = ((media.sound.format.sampleRate / 10) * 2) * soundSampleByte * media.sound.format.numChannel; // kakva chast ot freima e zvuk
//	if(media.sound.format.sampleSize == 16){
//		dac_intr.func = DAC_Buffer_Process_Stereo_S16bit;
//	} else {
//		dac_intr.func = DAC_Buffer_Process_Mono_U8bit;
//	}

          	memset(dac_intr.buff, 0, dac_intr.bufferSize);
                f_lseek(&fp_sound, getSampleSize(atombuf, 4, &fp_sound_stco));
        
 	dac_intr.sound_reads = 0;

	stco_reads = 1;
	printf("\r\nframeDuration:%d", frameDuration);
 	TIM_HandleTypeDef Tim1SecHandle, TimDurationHandle;

 
        // TIM3 specified in 0.01ms seconds for sample time 
 		TimDurationHandle.Instance = TIM_DURATION;
 		HAL_TIM_Base_DeInit(&TimDurationHandle);
 	TimDurationHandle.Init.Period = (100000 * frameDuration) / media.video.timeScale - 1;
 	TimDurationHandle.Init.Prescaler = ((SystemCoreClock / 2) / 100000) * 2 - 1; // 0.01ms
        TimDurationHandle.Init.CounterMode = TIM_COUNTERMODE_UP;
  	TimDurationHandle.Init.ClockDivision = TIM_CLOCKDIVISION_DIV1;
 	if(HAL_TIM_Base_Init(&TimDurationHandle) != HAL_OK)
        {
 	while(1);
	}
 		HAL_TIM_Base_Start(&TimDurationHandle);

	/*
	while(1)
	{
		if(TIM3_SR_UIF_BB){
			TIM3_SR_UIF_BB = 0;
			HAL_GPIO_TogglePin(GPIOA, GPIO_PIN_5);
			TIM3->CNT = 0;
		}
	}
	*/

	Tim1SecHandle.Instance = TIM_1SEC;
	HAL_TIM_Base_DeInit(&Tim1SecHandle);
	Tim1SecHandle.Init.Prescaler =  100 - 1;
	Tim1SecHandle.Init.Period =   10000 - 1;
	Tim1SecHandle.Init.RepetitionCounter = (SystemCoreClock / 1000000UL) - 1;
	Tim1SecHandle.Init.CounterMode = TIM_COUNTERMODE_UP;
	Tim1SecHandle.Init.ClockDivision = TIM_CLOCKDIVISION_DIV1;
	if(HAL_TIM_Base_Init(&Tim1SecHandle) != HAL_OK)
        {
	while(1);
	}
	HAL_TIM_Base_Start_IT(&Tim1SecHandle);



	// Video
	pv_src.firstChunk = &firstChunk;
	pv_src.prevChunk = &prevChunk;
	pv_src.prevSamples = &prevSamples;
	pv_src.samples = &samples;
	pv_src.totalSamples = &totalSamples;
	pv_src.videoStcoCount = &videoStcoCount;

	pv_src.fp_video_stsc = &fp_stsc;
	pv_src.fp_video_stsz = &fp_stsz;
	pv_src.fp_video_stco = &fp_stco;
	pv_src.fp_frame =      &fp_frame;

	// Sound
	ps_src.firstChunk = &firstChunkSound;
	ps_src.prevChunk = &prevChunkSound;
	ps_src.prevSamples = &prevSamplesSound;
	ps_src.samples = &samplesSound;
	ps_src.soundStcoCount = &soundStcoCount;

	ps_src.fp_sound_stsc = &fp_sound_stsc;
	ps_src.fp_sound_stsz = &fp_sound_stsz;
	ps_src.fp_sound_stco = &fp_sound_stco;

//DANI      	mjpeg_touch.resynch = 0;
//DANI		LCD_SetRegion(media.video.startPosX, media.video.startPosY, media.video.startPosX + media.video.width - 1, media.video.startPosY + media.video.height - 1);

	float limitter;

	switch(SystemCoreClock){
	case 168000000:
		limitter = 0.91f;
		break;
	case 200000000:
		limitter = 0.93f;
		break;
	case 240000000:
		limitter = 0.96f;
		break;
	case 250000000:
		limitter = 0.98f;
		break;
	default:
		limitter = 0.8f;
		break;
	}

	videoStcoCount = 0, soundStcoCount = 0;


//DANI	
//DANI		pcf_font_typedef pcf_font_bak;
//DANI		if(pcf_font.ext_loaded)
//DANI		{
//DANI			memcpy((void*)&pcf_font_bak, (void*)&pcf_font, sizeof(pcf_font_typedef));
//DANI			/* internal flash pcf font */
//DANI			C_PCFFontInit((uint32_t)internal_flash_pcf_font, (size_t)_sizeof_internal_flash_pcf_font);
//DANI			PCF_RENDER_FUNC_C_PCF();
//DANI		}
 


	
 	    
 //DANI	   BSP_AUDIO_OUT_Init(0, 0, media.sound.format.sampleSize, media.sound.format.numChannel >= 2 ? media.sound.format.sampleRate : media.sound.format.sampleRate / 2);
 
     
        
        if(BSP_AUDIO_OUT_Init(OUTPUT_DEVICE_SPEAKER,50, \
          media.sound.format.numChannel >= 2 ? media.sound.format.sampleRate : media.sound.format.sampleRate / 2)!=0)
         {printf("\r\nAudio Init Error..");};
            BSP_AUDIO_OUT_SetAudioFrameSlot(CODEC_AUDIOFRAME_SLOT_02);


        
        //	wm8731_left_headphone_volume_set(121 + vol);

//DANI		printf("\r\nhaudio_i2s.State:%d", haudio_i2s.State);

	 //HAL_StatusTypeDef errorState;
	  //HAL_I2S_Transmit_DMA(&haudio_i2s, (uint16_t*)dac_intr.buff, DMA_MAX(dac_intr.bufferSize / ( AUDIODATA_SIZE )));
       
            
          BSP_AUDIO_OUT_Play( (uint16_t*)dac_intr.buff,DMA_MAX(dac_intr.bufferSize / ( AUDIODATA_SIZE )));
        
          Play_WAV(dac_intr.fp,DMA_MAX(dac_intr.bufferSize / ( AUDIODATA_SIZE )),70);

//DANI		DMA_SOUND_IT_ENABLE;
//DANI		LCDStatusStruct.waitExitKey = 1;

	int outflag = 0, count = 0, pause = 0;

           while(1){
		CHUNK_OFFSET_HEAD:
                  
		for(j = 0;j < samples;j++){
                  
			f_lseek(&fp_frame, getSampleSize(atombuf, 4, &fp_stco) ); //Chunk offset atom 
			if(media.video.playJpeg)
                        {
			 
                        //my_fread(prevSamplesBuff, 1, prevSamples * 4, &fp_stsz);
                          f_read(&fp_stsz, prevSamplesBuff, prevSamples * 4,NULL);  //Sample Size Atoms
                        }

			for(i = 0;i < prevSamples;i++)
                        {

			        sample_time_limit = TIM_DURATION->ARR * limitter;

				frameDuration = getVideoSampleTime(atombuf, ++totalSamples); // get next frame duration

				//DANKO LCD_SetGramAddr(media.video.startPosX, media.video.startPosY);
				//DANKO LCD_CMD(0x002C);

				raw.output_scanline = 0;
				raw.frame_size = media.video.width * media.video.height * sizeof(uint16_t);
                                
				raw.rasters = RASTER;
				raw.buf_size = raw.rasters * media.video.width * sizeof(uint16_t);
				memcpy((void*)&fp_frame_cp, (void*)&fp_frame, sizeof(FIL));
                  
				f_lseek(&fp_frame, raw.frame_size);
				totalBytes += raw.frame_size;

  //danko         DMA_SOUND_IT_ENABLE; // Enable DAC interrupt
                 
                                uint32_t tim=1;
		 while(tim--)//!TIM3_SR_UIF_BB)
                          { // while TIM3->SR Update Flag is unset
				//danko  if ((raw.output_scanline < media.video.height) && (TIM_DURATION->CNT < sample_time_limit))
                                         { // Uncompress draw rasters
						if(raw.frame_size < raw.buf_size){
							raw.buf_size = raw.frame_size;
						                                  }

				//danko 		while(SpiLcdHandle.State != HAL_SPI_STATE_READY)
						{
				//danko 			if((TIM_DURATION->CNT >= sample_time_limit)){
				//danko 				HAL_DMA_Abort(SpiLcdHandle.hdmatx);
				//danko 				SPI_LCD_NSS_PIN_DEASSERT;
				//danko 				goto EXIT_LOOP;
	                                        }
			                  }

				//danko    DMA_SOUND_IT_DISABLE;
				//danko    my_fread((void*)LINE_BUFFER, 1, raw.buf_size, &fp_frame_cp);
         
                                    
                  ////////////jpeg_decode(&fp_frame_cp, IMAGE_WIDTH, _aucLine, Jpeg_CallbackFunction);

					//danko 	DMA_SOUND_IT_ENABLE;
					//danko 	SPI_LCD_NSS_PIN_ASSERT;
					//danko 	SPI_LCD_RS_PIN_DEASSERT;
	//!!! HAL_SPI_Transmit_DMA(&SpiLcdHandle, (uint8_t*)LINE_BUFFER, raw.buf_size / sizeof(uint16_t));
            
						raw.frame_size -= raw.buf_size;
						raw.output_scanline += raw.rasters;
			
                                
					if((abs(soundStcoCount - videoStcoCount) > 1) && !soundEndFlag)
                                        { // correct synch unmatch
						if(soundStcoCount >= (sound_stco.numEntry - 2) || videoStcoCount >= (video_stco.numEntry - 2))
                                                {goto EXIT_PROCESS;}
                                                
					//danko	mjpeg_touch.resynch = 1;
					//danko	mjpeg_touch.resynch_entry = soundStcoCount > videoStcoCount ? videoStcoCount : soundStcoCount;
						printf("\r\n*synch unmatch at video_stco:%d sound_stco:%d\n", videoStcoCount, soundStcoCount);
					//danko	DMA_SOUND_IT_DISABLE; // Disable DAC interrupt
					//danko	mjpegTouch(id, mjpeg_touch.resynch_entry); //Touch pen interrupt processing
						samples /= prevSamples;
					//danko	mjpeg_touch.resynch = 0;
						getVideoSampleTime(atombuf, 0); // reset sample time
						getVideoSampleTime(atombuf, totalSamples); // get next sample time
                                                dac_intr.sound_reads = prevSamplesSound * soundSampleBlocks; // fill DAC buffer
						videoStcoCount -= 2, soundStcoCount -= 2;
					goto CHUNK_OFFSET_HEAD;
					}
                 	
				 	if(dac_intr.sound_reads >= (prevSamplesSound * soundSampleBlocks))
                                        {
						if(++soundStcoCount < sound_stco.numEntry)
                                                {
							soundEndFlag = 0;

					 		totalBytes += dac_intr.sound_reads;

							
                                                            //my_fseek(dac_intr.fp, getSampleSize(atombuf, 4, &fp_sound_stco), SEEK_SET);
                                               		      f_lseek(dac_intr.fp,  getSampleSize(atombuf, 4, &fp_sound_stco));
         

					   		dac_intr.sound_reads = 0;
							if(++stco_reads > samplesSound){
								stco_reads = 0;
								prevChunkSound = firstChunkSound; // ???firstChunk?prevChunk?
								prevSamplesSound = getSampleSize(atombuf, 12, &fp_sound_stsc); // samplesPerChunk sampleDescriptionID
								firstChunkSound = getAtomSize(&atombuf[8]); // ??firstChunk
								samplesSound = firstChunkSound - prevChunkSound; // The number of samples of the next time playback chunk
							                               }
						} else {
							soundEndFlag = 1;
					 		dac_intr.sound_reads = 0;
						//danko DMA_SOUND_IT_DISABLE;
						        }
					}
/*//danko 
					if(!outflag && (++count >= 100000))
                                        {
						outflag = 1;
						if(!music_control.b.mute){
							HAL_I2S_DMAPause(&haudio_i2s);
							Delay_us(3);
							wm8731_left_headphone_volume_set(121 + vol);
							HAL_I2S_DMAResume(&haudio_i2s);
						                          }
					}
*///danko 

                                        
             //case PLAY_LOOP_MODE:
                                        {
		 			//danko	HAL_I2S_DMAPause(&haudio_i2s);
		 			//danko	Delay_us(3);
	 	 			//danko	wm8731_left_headphone_volume_set(0);
		 			//danko	HAL_I2S_DMAResume(&haudio_i2s);
		 			//danko	DMA_SOUND_IT_DISABLE;

						raw.frame_size = media.video.width * media.video.height * sizeof(uint16_t);
						memcpy((void*)&fp_frame_cp, (void*)&fp_frame, sizeof(FIL));
		 				//my_fseek(&fp_frame_cp, -raw.frame_size, SEEK_CUR);
                                                f_lseek(&fp_frame_cp, -raw.frame_size);
						memset((void*)frame_buffer, 0, FRAME_BUFFER_SIZE);

						int v;
						for(v = 0;v < media.video.height;v++)
                                                {
							//my_fread(&frame_buffer[media.video.startPosX + v * LCD_WIDTH + media.video.startPosY * LCD_WIDTH], 2, media.video.width, &fp_frame_cp);
                                                        f_read(&fp_frame_cp,&frame_buffer[media.video.startPosX + v * LCD_WIDTH + media.video.startPosY * LCD_WIDTH],2* media.video.width ,NULL);
						}

					//danko		ret = mjpegPause(id);
						outflag = 0, count = 0;
                                                /*
						if(ret == RET_PLAY_STOP || ret == RET_PLAY_NEXT || ret == RET_PLAY_PREV){
							goto END_PROCESS;
						}
						if(ret == 1){ // ????????? ???:0 ?????? :1 ??????????????
							samples /= prevSamples;
							getVideoSampleTime(atombuf, 0); // ??????????
							getVideoSampleTime(atombuf, totalSamples); // ?????????????
							dac_intr.sound_reads = prevSamplesSound * soundSampleBlocks; // DAC???????????????
//							videoStcoCount -= 2, soundStcoCount -= 2;
							ret = 0;
							goto CHUNK_OFFSET_HEAD;
						}
		 				LCDStatusStruct.waitExitKey = 1;
						break;                           
                                                 */
                                        
                                        }          
         
                                        
                        }
	EXIT_LOOP:
 
				// Per frame time duration timer (specified in the 1 / 100ms units)
 			//danko	TIM_DURATION->ARR = frameDuration * timeScaleCoeff - 1;
 			//danko	TIM_DURATION->CR1 = 0;
  			//danko	TIM_DURATION->CNT = 0; // clear counter
 			//danko	TIM3_SR_UIF_BB = 0;    // clear update flag
 				//DANKO/////////////////////////////////////TIM3_DIER_UIE_BB = 1;  // set update interrupt
 				//DANKO/////////////////////////////////////TIM3_CR1_CEN_BB = 1;   // enable tim3

				frames++;
/*
				if(TIM1_SR_UIF_BB){ // ??????????
					TIM1_SR_UIF_BB = 0;
					fps = frames - prevFrames;
					debug.printf("\r%c%dfps %dkbps v:%d s:%d  ", fps1Hz[fpsCnt++ & 3], fps, (int)((float)(totalBytes * 8) * 0.001f), videoStcoCount, soundStcoCount);
					prevFrames = frames;
					totalBytes = 0;
				                  }
*/
 
			}
//			AUDIO_OUT_ENABLE;
			if(++videoStcoCount >= video_stco.numEntry)
                        {// || soundStcoCount >= (sound_stco.numEntry)){
				goto END_PROCESS; // Play the end how much video chunk count until the last
			}
		} //FOR SAMPLES
 
		prevChunk = firstChunk; // ???firstChunk?prevChunk?
		prevSamples = getSampleSize(atombuf, 12, &fp_stsc); // samplesPerChunk sampleDescriptionID
		firstChunk = getAtomSize(&atombuf[8]); // ??firstChunk
		samples = firstChunk - prevChunk; // Number of samples for the next play chunk

        }//WHILE

	END_PROCESS: // ??????
//	AUDIO_OUT_SHUTDOWN;
	printf("\r\ntotal_samples:%d video_stco_count:%d sound_stco_count:%d", totalSamples, videoStcoCount, soundStcoCount);
//	debug.printf("\r\ntotalRasters:%d", totalRasters);

//danko 	HAL_I2S_DMAStop(&haudio_i2s);
//danko 	DMA_SOUND_IT_DISABLE;
	HAL_Delay(10); //us
//	wm8731_set_active(0);

//danko 	wm8731_left_headphone_volume_set(121 -121);

//	if(media.video.playJpeg){
//		(void) jpeg_finish_decompress(&jdinfo);
//		jpeg_destroy_decompress(&jdinfo);
//	}

	EXIT_PROCESS: 
          
	memset(dac_intr.buff, 0, dac_intr.bufferSize);
        //HAL_I2S_Transmit(&haudio_i2s, (uint16_t*)dac_intr.buff, dac_intr.bufferSize / sizeof(uint16_t), 100);
        BSP_AUDIO_OUT_Play((uint16_t*)dac_intr.buff, dac_intr.bufferSize / sizeof(uint16_t));

 	dac_intr.func = '\0';
	//danko f_close(fp);

//danko 	LCD_SetRegion(0, 0, LCD_WIDTH - 1, LCD_HEIGHT - 1);

//danko 	LCD_DrawSquare(0, 0, LCD_WIDTH, LCD_HEIGHT, BLACK);

//danko 	if(pcf_font.ext_loaded)
	{
//danko 		memcpy((void*)&pcf_font, (void*)&pcf_font_bak, sizeof(pcf_font_typedef));
//danko 		PCF_RENDER_FUNC_PCF();
	}

//danko 	LCDStatusStruct.waitExitKey = 0;

	return ret;
}