Beispiel #1
0
void machine_run()
{
	/* Start audio processing */
	audio_start();

	/* Set running flag */
	machine->running = true;

#ifndef EMSCRIPTEN
	/* Run until user quits */
	while (machine->running) {
		/* Tick registered clocks */
		clock_tick_all(!no_sync);

		/* Stop machine if cycle count is reached */
		if ((cycles > 0) && (--cycles == 0))
			machine->running = false;
	}

	/* Clean up resources */
	quit();
#else
	/* Set emscripten loop */
	emscripten_set_main_loop(emscripten_run, 0, 0);
#endif
}
Beispiel #2
0
int main(int argc, char **argv) {
    PaStream *stream;
    sound data;
    writer wargs;
    pthread_t file_write_thread, wave_viewer, crude_detector;

    //wunused
    (void) argc;
    (void) argv;

    if (argc >= 2 && *argv[1] == 'r'){
        png_view_create("out/record.dog","waveform.png");
        return 0;
    }


    signal(SIGINT, shutdown);

    wargs.df = create_dogfile("out/record.dog", DF_COMPRESSED, DF_LOSSLESS);
    wargs.data = &data;

    audio_init(&stream, &data);
    audio_start(stream);
    if (pthread_create(&file_write_thread, NULL, file_writer, &wargs)){
        fprintf(stderr, "Error creating file writer\n");
        close();
        close_file(wargs.df);
        exit(1);
    }

    if (argc >= 2 && *argv[1] == 'n'){
        nc_setup();
        if (pthread_create(&wave_viewer, NULL, nc_view, &data)){
            fprintf(stderr, "Error creating waveform viewer\n");
            nc_stop();
            close();
            close_file(wargs.df);
            exit(1);
        }
    }

    detection_start();
    if (pthread_create(&crude_detector, NULL, detect, &data)){
        fprintf(stderr, "Error creating detection thread\n");
        nc_stop();
        close();
        close_file(wargs.df);
        exit(1);
    }

    audio_wait(stream);


    //nc_stop();
    close();
    close_file(wargs.df);

    return 0;
}
Beispiel #3
0
int main (void)
{
#ifdef AUDIO_HW_TEST_THROUGHPUT 
    if (audio_setup(NULL)) {
        THROW_ERR("Error setting up audio.");
    }
    audio_start();
    while(1) {
    }
#else
    if (audio_setup(NULL)) {
        THROW_ERR("Error setting up audio.");
    }
    if (midi_setup(NULL)) {
        THROW_ERR("Error setting up MIDI.");
    }
    SampleTable_init();
    signal_chain_setup();
    synth_control_setup();
    scheduler_setup();
    leds_setup();
    timers_setup();
    switches_setup();
    adc_setup_dma_scan(adc_mode_1SHOT);
    adc_channel_setup();
    synth_adc_control_setup();
    adc_start_conversion();
    int midi_channel = -1;
    int reset_request = 0;
    supo_get_midi_channel(&midi_channel);
    supo_get_preset_reset_rqst(&reset_request);
    sc_presets_init(reset_request,&midi_channel);
    synth_switch_control_setup();
    synth_midi_control_setup(midi_channel);
    audio_start();
#if defined(TIMER_EVENT_TEST) || defined(TIMER_TEST)
    timers_enable();
#endif
    while(1) {
    }
#endif /* AUDIO_HW_TEST_THROUGHPUT */
    return(0);
}
Beispiel #4
0
int main (void)
{
    if (audio_setup(NULL)) {
        THROW_ERR("Error setting up audio.");
    }
    leds_setup();
    switches_setup();
    sdt_setup();
    audio_start();
    while(1) {
    }

}
Beispiel #5
0
int audio_encoder_set(struct audio *a, const struct aucodec *ac,
		      int pt_tx, const char *params)
{
	struct autx *tx;
	int err = 0;
	bool reset;

	if (!a || !ac)
		return EINVAL;

	tx = &a->tx;

	reset = !aucodec_equal(ac, tx->ac);

	if (ac != tx->ac) {
		(void)re_fprintf(stderr, "Set audio encoder: %s %uHz %dch\n",
				 ac->name, get_srate(ac), ac->ch);

		/* Audio source must be stopped first */
		if (reset) {
			tx->ausrc = mem_deref(tx->ausrc);
		}

		tx->is_g722 = (0 == str_casecmp(ac->name, "G722"));
		tx->enc = mem_deref(tx->enc);
		tx->ac = ac;
	}

	if (ac->encupdh) {
		struct auenc_param prm;

		prm.ptime = tx->ptime;

		err = ac->encupdh(&tx->enc, ac, &prm, params);
		if (err) {
			DEBUG_WARNING("alloc encoder: %m\n", err);
			return err;
		}
	}

	stream_set_srate(a->strm, get_srate(ac), get_srate(ac));
	stream_update_encoder(a->strm, pt_tx);

	if (!tx->ausrc) {
		err |= audio_start(a);
	}

	return err;
}
Beispiel #6
0
int main (int argc, char **argv)
{
    progname = *argv;
    for (;;) {
        switch (getopt (argc, argv, "dw:t:")) {
        case EOF:
            break;
        case 'w':
            wpm = strtol (optarg, 0, 0);
            continue;
        case 't':
            tone = strtol (optarg, 0, 0);
            continue;
        case 'd':
            ++paddle_debug;
            continue;
        default:
            usage ();
        }
        break;
    }
    argc -= optind;
    argv += optind;

    if (argc != 0 || wpm == 0)
        usage ();

    keyer_init (tone, wpm);
    paddle_open();
    atexit (quit);
    signal (SIGINT, sigint);

    printf ("Speed: %d wpm\n", wpm);
    printf ("Tone: %d Hz\n", tone);
    printf ("Keyer ready.\n");
    audio_start();

    for (;;) {
        int daah = 0, dit = 0;
        const char *c;

        paddle_poll (&daah, &dit);
        c = keyer_decode (daah, dit);
        if (c) {
            printf ("%s", c);
            fflush (stdout);
        }
    }
}
Beispiel #7
0
int audio_decoder_set(struct audio *a, const struct aucodec *ac,
		      int pt_rx, const char *params)
{
	struct aurx *rx;
	bool reset = false;
	int err = 0;

	if (!a || !ac)
		return EINVAL;

	rx = &a->rx;

	reset = !aucodec_equal(ac, rx->ac);

	if (ac != rx->ac) {

		(void)re_fprintf(stderr, "Set audio decoder: %s %uHz %dch\n",
				 ac->name, get_srate(ac), ac->ch);

		rx->pt = pt_rx;
		rx->ac = ac;
		rx->dec = mem_deref(rx->dec);
	}

	if (ac->decupdh) {
		err = ac->decupdh(&rx->dec, ac, params);
		if (err) {
			DEBUG_WARNING("alloc decoder: %m\n", err);
			return err;
		}
	}

	stream_set_srate(a->strm, get_srate(ac), get_srate(ac));

	if (reset) {

		rx->auplay = mem_deref(rx->auplay);

		/* Reset audio filter chain */
		list_flush(&a->filtl);

		err |= audio_start(a);
	}

	return err;
}
Beispiel #8
0
int main(int argc, char* argv[])
{
    PaError err = paNoError;
    int device_id;
    BOOL rv;

    err = Pa_Initialize();
    if (err != paNoError) {
        fprintf(stderr, "Pa_Initialize() failed...\n");
        exit(1);
    }

    if (argc != 3) {
        printf("usage: %s devide_id wav_file\n\n", argv[0]);
        print_deviceinfo();
        Pa_Terminate();
        return 1;
    }

    rv = wavfile_open(argv[2]);
    if (rv == FALSE) {
        fprintf(stderr, "wav file open failed...\n");
        Pa_Terminate();
        exit(1);
    }

    device_id = atoi(argv[1]);
    rv = audio_start(device_id);
    if (rv == FALSE) {
        fprintf(stderr, "AudioDevice initialize failed...device_id=%d\n", device_id);
        Pa_Terminate();
        return 1;
    }

    while (1) {
        Pa_Sleep(100);
        if (finish_flag == TRUE) break;
    }

    audio_stop();
    Pa_Terminate();

    wavfile_close();

    return 0;
}
Beispiel #9
0
int main(int argc, char *argv[]) {
	GError *error = NULL;
	GOptionContext *context;


	printf("lysdr starting\n");

	// gtk3 remove threads, see how badly things go wrong
	//gdk_threads_init();
	//gdk_threads_enter();

	gtk_init(&argc, &argv);

	context = g_option_context_new ("-");
	g_option_context_add_main_entries (context, opts, NULL);
	g_option_context_add_group (context, gtk_get_option_group (TRUE));
	if (!g_option_context_parse (context, &argc, &argv, &error)) {
		g_print ("option parsing failed: %s\n", error->message);
		exit (1);
	}

	// create a new SDR, and set up the jack client
	sdr = sdr_new(fft_size);
	audio_start(sdr);

	// define a filter and configure a default shape
	sdr->filter = filter_fir_new(128, sdr->size);

	// hook up the jack ports and start the client
	fft_setup(sdr);
	audio_connect(sdr, connect_input, connect_output);

	sdr->centre_freq = centre_freq;

	gui_display(sdr);
	gtk_adjustment_set_value(GTK_ADJUSTMENT(sdr->tuning), 0);

	gtk_main();
	audio_stop(sdr);
	filter_fir_destroy(sdr->filter);
	fft_teardown(sdr);

	sdr_destroy(sdr);
	//gdk_threads_leave();
}
Beispiel #10
0
bool retro_load_game(const struct retro_game_info *info)
{
	/* Set data path */
	cmdline_set_param(NULL, NULL, (char *)info->path);

	/* Initialize machine */
	if (!machine_init()) {
		LOG_E("Failed to initialize machine!\n");
		return false;
	}
	
	/* Flag machine as initialized */
	machine_initialized = true;

	/* Start audio processing */
	audio_start();

	return true;
}
Beispiel #11
0
void Aeolus::init(float samplerate)
      {
      Synthesizer::init(samplerate);

      setlocale(LC_ALL, "C"); // scanf of floats does not work otherwise

      QString stops = mscoreGlobalShare + "/sound/aeolus/stops";
      int n = strlen(qPrintable(stops));
      char* stopsPath = new char[n+1];
      strcpy(stopsPath, qPrintable(stops));

      QDir dir;
      QString waves = dataPath + QString("/aeolus/waves%1").arg(samplerate);
      dir.mkpath(waves);
      n = strlen(qPrintable(waves));
      char* wavesPath = new char[n+1];
      strcpy(wavesPath, qPrintable(waves));

      audio_init(samplerate);
      model = new Model (this, _midimap, stopsPath, "Aeolus", wavesPath);

      audio_start();
      model->init();
      }
Beispiel #12
0
void Aeolus::init(int samplerate)
      {
      setlocale(LC_ALL, "C"); // scanf of floats does not work otherwise

      // QString stops = mscoreGlobalShare + "/sound/aeolus/stops";
      QString stops = "/Users/jenniferguo/cos597b/FinalProject/resources/MuseScoreXcode/aeolus/stops/";     // naturegirl. HACK!!
      int n = strlen(qPrintable(stops));
      char* stopsPath = new char[n+1];
      strcpy(stopsPath, qPrintable(stops));

      QDir dir;
      QString waves = dataPath + QString("/aeolus/waves%1").arg(samplerate);
      dir.mkpath(waves);
      n = strlen(qPrintable(waves));
      char* wavesPath = new char[n+1];
      strcpy(wavesPath, qPrintable(waves));

      audio_init(samplerate);
      model = new Model (this, _midimap, stopsPath, "Aeolus", wavesPath);

      audio_start();
      model->init();
//      printGui();
      }
Beispiel #13
0
static javacall_result video_start(javacall_handle handle)
{
    return audio_start(handle);
}
Beispiel #14
0
int main (int argc, char *argv[])
{
	const char *snapshot;
	atexit(shutdown_sdl);
	if (SDL_Init(
#ifdef __EMSCRIPTEN__
		// It seems there is an issue with emscripten SDL2: SDL_Init does not work if TIMER and/or HAPTIC is tried to be intialized or just "EVERYTHING" is used!!
		SDL_INIT_EVERYTHING & ~(SDL_INIT_TIMER | SDL_INIT_HAPTIC)
#else
		SDL_INIT_EVERYTHING
#endif
	) != 0) {
		ERROR_WINDOW("Fatal SDL initialization problem: %s", SDL_GetError());
		return 1;
	}
	if (config_init(argc, argv)) {
#ifdef __EMSCRIPTEN__
		ERROR_WINDOW("Error with config parsing. Please check the (javascript) console of your browser to learn about the error.");
#endif
		return 1;
	}
	guarded_exit = 1;	// turn on guarded exit, with custom de-init stuffs
	DEBUGPRINT("EMU: sleeping = \"%s\", timing = \"%s\"" NL,
		__SLEEP_METHOD_DESC, __TIMING_METHOD_DESC
	);
	fileio_init(
#ifdef __EMSCRIPTEN__
		"/",
#else
		app_pref_path,
#endif
	"files");
	if (screen_init())
		return 1;
	if (xepgui_init())
		return 1;
	audio_init(config_getopt_int("audio"));
	z80ex_init();
	set_ep_cpu(CPU_Z80);
	ep_pixels = nick_init();
	if (ep_pixels == NULL)
		return 1;
	snapshot = config_getopt_str("snapshot");
	if (strcmp(snapshot, "none")) {
		if (ep128snap_load(snapshot))
			snapshot = NULL;
	} else
		snapshot = NULL;
	if (!snapshot) {
		if (roms_load())
			return 1;
		primo_rom_seg = primo_search_rom();
		ep_set_ram_config(config_getopt_str("ram"));
	}
	mouse_setup(config_getopt_int("mousemode"));
	ep_reset();
	kbd_matrix_reset();
	joy_sdl_event(NULL); // this simply inits joy layer ...
#ifdef CONFIG_SDEXT_SUPPORT
	if (!snapshot)
		sdext_init();
#endif
#ifdef CONFIG_EXDOS_SUPPORT
	wd_exdos_reset();
	wd_attach_disk_image(config_getopt_str("wdimg"));
#endif
#ifdef CONFIG_W5300_SUPPORT
	w5300_init(NULL);
#endif
	ticks = SDL_GetTicks();
	balancer = 0;
	set_cpu_clock(DEFAULT_CPU_CLOCK);
	emu_timekeeping_start();
	audio_start();
	if (config_getopt_int("fullscreen"))
		screen_set_fullscreen(1);
	DEBUGPRINT(NL "EMU: entering into main emulation loop" NL);
	sram_ready = 1;
	if (strcmp(config_getopt_str("primo"), "none") && !snapshot) {
		// TODO: da stuff ...
		primo_emulator_execute();
		OSD("Primo Emulator Mode");
	}
	if (snapshot)
		ep128snap_set_cpu_and_io();
	console_monitor_ready();	// OK to run monitor on console now!
#ifdef __EMSCRIPTEN__
	emscripten_set_main_loop(xep128_emulation, 50, 1);
#else
	for (;;)
		xep128_emulation();
#endif
	printf("EXITING FROM main()?!" NL);
	return 0;
}
Beispiel #15
0
static int audio_ioctl(FAR struct file *filep, int cmd, unsigned long arg)
{
  FAR struct inode *inode = filep->f_inode;
  FAR struct audio_upperhalf_s *upper = inode->i_private;
  FAR struct audio_lowerhalf_s *lower = upper->dev;
  FAR struct audio_buf_desc_s  *bufdesc;
#ifdef CONFIG_AUDIO_MULTI_SESSION
  FAR void *session;
#endif
  int ret;

  audvdbg("cmd: %d arg: %ld\n", cmd, arg);

  /* Get exclusive access to the device structures */

  ret = sem_wait(&upper->exclsem);
  if (ret < 0)
    {
      return ret;
    }

  /* Handle built-in ioctl commands */

  switch (cmd)
    {
      /* AUDIOIOC_GETCAPS - Get the audio device capabilities.
       *
       *   ioctl argument:  A pointer to the audio_caps_s structure.
       */

      case AUDIOIOC_GETCAPS:
        {
          FAR struct audio_caps_s *caps = (FAR struct audio_caps_s*)((uintptr_t)arg);
          DEBUGASSERT(lower->ops->getcaps != NULL);

          audvdbg("AUDIOIOC_GETCAPS: Device=%d\n", caps->ac_type);

          /* Call the lower-half driver capabilities handler */

          ret = lower->ops->getcaps(lower, caps->ac_type, caps);
        }
        break;

      case AUDIOIOC_CONFIGURE:
        {
          FAR const struct audio_caps_desc_s *caps =
            (FAR const struct audio_caps_desc_s*)((uintptr_t)arg);
          DEBUGASSERT(lower->ops->configure != NULL);

          audvdbg("AUDIOIOC_INITIALIZE: Device=%d\n", caps->caps.ac_type);

          /* Call the lower-half driver configure handler */

#ifdef CONFIG_AUDIO_MULTI_SESSION
          ret = lower->ops->configure(lower, caps->session, &caps->caps);
#else
          ret = lower->ops->configure(lower, &caps->caps);
#endif
        }
        break;

      case AUDIOIOC_SHUTDOWN:
        {
          DEBUGASSERT(lower->ops->shutdown != NULL);

          audvdbg("AUDIOIOC_SHUTDOWN\n");

          /* Call the lower-half driver initialize handler */
          ret = lower->ops->shutdown(lower);
        }
        break;

      /* AUDIOIOC_START - Start the audio stream.  The AUDIOIOC_SETCHARACTERISTICS
       *   command must have previously been sent.
       *
       *   ioctl argument:  Audio session
       */

      case AUDIOIOC_START:
        {
          audvdbg("AUDIOIOC_START\n");
          DEBUGASSERT(lower->ops->start != NULL);

          /* Start the audio stream */

#ifdef CONFIG_AUDIO_MULTI_SESSION
          session = (FAR void *) arg;
          ret = audio_start(upper, session);
#else
          ret = audio_start(upper);
#endif
        }
        break;

      /* AUDIOIOC_STOP - Stop the audio stream.
       *
       *   ioctl argument:  Audio session
       */

#ifndef CONFIG_AUDIO_EXCLUDE_STOP
      case AUDIOIOC_STOP:
        {
          audvdbg("AUDIOIOC_STOP\n");
          DEBUGASSERT(lower->ops->stop != NULL);

          if (upper->started)
            {
#ifdef CONFIG_AUDIO_MULTI_SESSION
              session = (FAR void *) arg;
              ret = lower->ops->stop(lower, session);
#else
              ret = lower->ops->stop(lower);
#endif
              upper->started = false;
            }
        }
        break;
#endif  /* CONFIG_AUDIO_EXCLUDE_STOP */

      /* AUDIOIOC_PAUSE - Pause the audio stream.
       *
       *   ioctl argument:  Audio session
       */

#ifndef CONFIG_AUDIO_EXCLUDE_PAUSE_RESUME

      case AUDIOIOC_PAUSE:
        {
          audvdbg("AUDIOIOC_PAUSE\n");
          DEBUGASSERT(lower->ops->pause != NULL);

          if (upper->started)
            {
#ifdef CONFIG_AUDIO_MULTI_SESSION
              session = (FAR void *) arg;
              ret = lower->ops->pause(lower, session);
#else
              ret = lower->ops->pause(lower);
#endif
            }
        }
        break;

      /* AUDIOIOC_RESUME - Resume the audio stream.
       *
       *   ioctl argument:  Audio session
       */

      case AUDIOIOC_RESUME:
        {
          audvdbg("AUDIOIOC_RESUME\n");
          DEBUGASSERT(lower->ops->resume != NULL);

          if (upper->started)
            {
#ifdef CONFIG_AUDIO_MULTI_SESSION
              session = (FAR void *) arg;
              ret = lower->ops->resume(lower, session);
#else
              ret = lower->ops->resume(lower);
#endif
            }
        }
        break;

#endif  /* CONFIG_AUDIO_EXCLUDE_PAUSE_RESUME */

      /* AUDIOIOC_ALLOCBUFFER - Allocate an audio buffer
       *
       *   ioctl argument:  pointer to an audio_buf_desc_s structure
       */

      case AUDIOIOC_ALLOCBUFFER:
        {
          audvdbg("AUDIOIOC_ALLOCBUFFER\n");

          bufdesc = (FAR struct audio_buf_desc_s *) arg;
          if (lower->ops->allocbuffer)
            {
              ret = lower->ops->allocbuffer(lower, bufdesc);
            }
          else
            {
              /* Perform a simple kumalloc operation assuming 1 session */

              ret = apb_alloc(bufdesc);
            }
        }
        break;

      /* AUDIOIOC_FREEBUFFER - Free an audio buffer
       *
       *   ioctl argument:  pointer to an audio_buf_desc_s structure
       */

      case AUDIOIOC_FREEBUFFER:
        {
          audvdbg("AUDIOIOC_FREEBUFFER\n");

          bufdesc = (FAR struct audio_buf_desc_s *) arg;
          if (lower->ops->freebuffer)
            {
              ret = lower->ops->freebuffer(lower, bufdesc);
            }
          else
            {
              /* Perform a simple kufree operation */

              DEBUGASSERT(bufdesc->u.pBuffer != NULL);
              apb_free(bufdesc->u.pBuffer);
              ret = sizeof(struct audio_buf_desc_s);
            }
        }
        break;

      /* AUDIOIOC_ENQUEUEBUFFER - Enqueue an audio buffer
       *
       *   ioctl argument:  pointer to an audio_buf_desc_s structure
       */

      case AUDIOIOC_ENQUEUEBUFFER:
        {
          audvdbg("AUDIOIOC_ENQUEUEBUFFER\n");

          DEBUGASSERT(lower->ops->enqueuebuffer != NULL);

          bufdesc = (FAR struct audio_buf_desc_s *) arg;
          ret = lower->ops->enqueuebuffer(lower, bufdesc->u.pBuffer);
        }
        break;

      /* AUDIOIOC_REGISTERMQ - Register a client Message Queue
       *
       * TODO:  This needs to have multi session support.
       */

      case AUDIOIOC_REGISTERMQ:
        {
          audvdbg("AUDIOIOC_REGISTERMQ\n");

          upper->usermq = (mqd_t) arg;
          ret = OK;
        }
        break;

      /* AUDIOIOC_UNREGISTERMQ - Register a client Message Queue
       *
       * TODO:  This needs to have multi session support.
       */

      case AUDIOIOC_UNREGISTERMQ:
        {
          audvdbg("AUDIOIOC_UNREGISTERMQ\n");

          upper->usermq = NULL;
          ret = OK;
        }
        break;

      /* AUDIOIOC_RESERVE - Reserve a session with the driver
       *
       *   ioctl argument - pointer to receive the session context
       */

      case AUDIOIOC_RESERVE:
        {
          audvdbg("AUDIOIOC_RESERVE\n");
          DEBUGASSERT(lower->ops->reserve != NULL);

          /* Call lower-half to perform the reservation */

#ifdef CONFIG_AUDIO_MULTI_SESSION
          ret = lower->ops->reserve(lower, (FAR void **) arg);
#else
          ret = lower->ops->reserve(lower);
#endif
        }
        break;

      /* AUDIOIOC_RESERVE - Reserve a session with the driver
       *
       *   ioctl argument - pointer to receive the session context
       */

      case AUDIOIOC_RELEASE:
        {
          audvdbg("AUDIOIOC_RELEASE\n");
          DEBUGASSERT(lower->ops->release != NULL);

          /* Call lower-half to perform the release */

#ifdef CONFIG_AUDIO_MULTI_SESSION
          ret = lower->ops->release(lower, (FAR void *) arg);
#else
          ret = lower->ops->release(lower);
#endif
        }
        break;

      /* Any unrecognized IOCTL commands might be platform-specific ioctl commands */

      default:
        {
          audvdbg("Forwarding unrecognized cmd: %d arg: %ld\n", cmd, arg);
          DEBUGASSERT(lower->ops->ioctl != NULL);
          ret = lower->ops->ioctl(lower, cmd, arg);
        }
        break;
    }

  sem_post(&upper->exclsem);
  return ret;
}
Beispiel #16
0
//播放某个WAV文件
//fname:wav文件路径.
//返回值:
//KEY0_PRES:下一曲
//KEY1_PRES:上一曲
//其他:错误
u8 wav_play_song(u8* fname)
{
	u8 key;
	u8 t=0; 
	u8 res;  
	u32 fillnum; 
	audiodev.file=(FIL*)mymalloc(SRAMIN,sizeof(FIL));
	audiodev.i2sbuf1=mymalloc(SRAMIN,WAV_I2S_TX_DMA_BUFSIZE);
	audiodev.i2sbuf2=mymalloc(SRAMIN,WAV_I2S_TX_DMA_BUFSIZE);
	audiodev.tbuf=mymalloc(SRAMIN,WAV_I2S_TX_DMA_BUFSIZE);
	if(audiodev.file&&audiodev.i2sbuf1&&audiodev.i2sbuf2&&audiodev.tbuf)
	{ 
		res=wav_decode_init(fname,&wavctrl);//得到文件的信息
		if(res==0)//解析文件成功
		{
			if(wavctrl.bps==16)
			{
				WM8978_I2S_Cfg(2,0);	//飞利浦标准,16位数据长度
				I2S2_Init(0,2,0,0);		//飞利浦标准,主机发送,时钟低电平有效,16位帧长度
			}else if(wavctrl.bps==24)
			{
				WM8978_I2S_Cfg(2,2);	//飞利浦标准,24位数据长度
				I2S2_Init(0,2,0,2);		//飞利浦标准,主机发送,时钟低电平有效,24位扩展帧长度
			}
			I2S2_SampleRate_Set(wavctrl.samplerate);//设置采样率
			I2S2_TX_DMA_Init(audiodev.i2sbuf1,audiodev.i2sbuf2,WAV_I2S_TX_DMA_BUFSIZE/2); //配置TX DMA
			i2s_tx_callback=wav_i2s_dma_tx_callback;			//回调函数指wav_i2s_dma_callback
			audio_stop();
			res=f_open(audiodev.file,(TCHAR*)fname,FA_READ);	//打开文件
			if(res==0)
			{
				f_lseek(audiodev.file, wavctrl.datastart);		//跳过文件头
				fillnum=wav_buffill(audiodev.i2sbuf1,WAV_I2S_TX_DMA_BUFSIZE,wavctrl.bps);
				fillnum=wav_buffill(audiodev.i2sbuf2,WAV_I2S_TX_DMA_BUFSIZE,wavctrl.bps);
				audio_start();  
				while(res==0)
				{ 
					while(wavtransferend==0);//等待wav传输完成; 
					wavtransferend=0;
					if(fillnum!=WAV_I2S_TX_DMA_BUFSIZE)//播放结束?
					{
						res=KEY0_PRES;
						break;
					} 
 					if(wavwitchbuf)fillnum=wav_buffill(audiodev.i2sbuf2,WAV_I2S_TX_DMA_BUFSIZE,wavctrl.bps);//填充buf2
					else fillnum=wav_buffill(audiodev.i2sbuf1,WAV_I2S_TX_DMA_BUFSIZE,wavctrl.bps);//填充buf1
					while(1)
					{
						key=KEY_Scan(0); 
						if(key==WKUP_PRES)//暂停
						{
							if(audiodev.status&0X01)audiodev.status&=~(1<<0);
							else audiodev.status|=0X01;  
						}
						if(key==KEY2_PRES||key==KEY0_PRES)//下一曲/上一曲
						{
							res=key;
							break; 
						}
						wav_get_curtime(audiodev.file,&wavctrl);//得到总时间和当前播放的时间 
						audio_msg_show(wavctrl.totsec,wavctrl.cursec,wavctrl.bitrate);
						t++;
						if(t==20)
						{
							t=0;
 							LED0=!LED0;
						}
						if((audiodev.status&0X01)==0)delay_ms(10);
						else break;
					}
				}
				audio_stop(); 
			}else res=0XFF; 
		}else res=0XFF;
	}else res=0XFF; 
	myfree(SRAMIN,audiodev.tbuf);	//释放内存
	myfree(SRAMIN,audiodev.i2sbuf1);//释放内存
	myfree(SRAMIN,audiodev.i2sbuf2);//释放内存 
	myfree(SRAMIN,audiodev.file);	//释放内存 
	return res;
} 
Beispiel #17
0
int main(){
	// Initialize Peripherals
	interface_init();
	red_led_on();
	uart_init(BAUDRATE);
	animation_manager_init();
	sys_timer_start();
	audio_init();
	sei();	// enable global interrupts

	// Load Default Animation
	animation_manager_load_animation(START_ANIMATION);

	// Enter Setup if Requested
	_delay_ms(100);
	if(deb_switch_1()){
		setup_wb_run();
	}
	else if(deb_switch_2()){
		setup_orientation_run();
	}

	// Load Default Animation
	animation_manager_load_animation(START_ANIMATION);

	// Set White Balance
	_delay_ms(300);
	display_wb_update();
	while(uart_async_run());	// setup white balance

	// Control Panel is Ready => Signal this by Turning the LED Green
	red_led_off();
	green_led_on();

	while(1){
		// Sleep Mode
		if(!switch_on_off()){	// if switched off
			go_to_sleep();
		}

		// Change animations
		sw_check();
		if(sw_check_pressed(SW_LEFT, 200, true)){
			animation_manager_dec_animation();
		}
		else if(sw_check_pressed(SW_RIGHT, 200, true)){
			animation_manager_inc_animation();
		}
		else if(sw_check_pressed(SW_RAND, 300, true)){
			animation_manager_random_animation();
		}

		// Generate Image
		animation_manager_run(0);

		// Check Audio
		audio_start();
		while(audio_run());
		audio_process();

		// Display Image
		while(uart_async_run()){
			interface_async_run();
		}

	}
}
Beispiel #18
0
static int audio_ioctl(FAR struct file *filep, int cmd, unsigned long arg)
{
  FAR struct inode           *inode = filep->f_inode;
  FAR struct audio_upperhalf_s *upper = inode->i_private;
  FAR struct audio_lowerhalf_s *lower = upper->dev;
  int                         ret;

  audvdbg("cmd: %d arg: %ld\n", cmd, arg);

  /* Get exclusive access to the device structures */

  ret = sem_wait(&upper->exclsem);
  if (ret < 0)
    {
      return ret;
    }

  /* Handle built-in ioctl commands */

  switch (cmd)
    {
      /* AUDIOIOC_GETCAPS - Get the audio device capabilities.
       *
       *   ioctl argument:  A pointer to the audio_caps_s structure.
       */

      case AUDIOIOC_GETCAPS:
        {
          FAR struct audio_caps_s *caps = (FAR struct audio_caps_s*)((uintptr_t)arg);
          DEBUGASSERT(lower->ops->getcaps != NULL);

          audvdbg("AUDIOIOC_GETCAPS: Device=%d", caps->ac_type);

          /* Call the lower-half driver capabilities handler */
          ret = lower->ops->getcaps(lower, caps->ac_type, caps);
        }
        break;

      case AUDIOIOC_CONFIGURE:
        {
          FAR const struct audio_caps_s *caps = (FAR const struct audio_caps_s*)((uintptr_t)arg);
          DEBUGASSERT(lower->ops->configure != NULL);

          audvdbg("AUDIOIOC_INITIALIZE: Device=%d", caps->ac_type);

          /* Call the lower-half driver configure handler */

          ret = lower->ops->configure(lower, caps, &audio_callback, upper);
        }
        break;

      case AUDIOIOC_SHUTDOWN:
        {
          DEBUGASSERT(lower->ops->shutdown != NULL);

          audvdbg("AUDIOIOC_SHUTDOWN\n");

          /* Call the lower-half driver initialize handler */
          ret = lower->ops->shutdown(lower);
        }
        break;

      /* AUDIOIOC_START - Start the pulsed output.  The AUDIOIOC_SETCHARACTERISTICS
       *   command must have previously been sent.
       *
       *   ioctl argument:  None
       */

      case AUDIOIOC_START:
        {
          audvdbg("AUDIOIOC_START\n");
          DEBUGASSERT(lower->ops->start != NULL);

          /* Start the pulse train */

          ret = audio_start(upper, filep->f_oflags);
        }
        break;

      /* AUDIOIOC_STOP - Stop the pulsed output.
       *
       *   ioctl argument:  None
       */

      case AUDIOIOC_STOP:
        {
          audvdbg("AUDIOIOC_STOP\n");
          DEBUGASSERT(lower->ops->stop != NULL);

          if (upper->started)
            {
              ret = lower->ops->stop(lower);
              upper->started = false;
            }
        }
        break;

      /* Any unrecognized IOCTL commands might be platform-specific ioctl commands */

      default:
        {
          audvdbg("Forwarding unrecognized cmd: %d arg: %ld\n", cmd, arg);
          DEBUGASSERT(lower->ops->ioctl != NULL);
          ret = lower->ops->ioctl(lower, cmd, arg);
        }
        break;
    }

  sem_post(&upper->exclsem);
  return ret;
}
Beispiel #19
0
JNIEXPORT jint JNICALL Java_net_avs234_AndLessSrv_wavPlay(JNIEnv *env, jobject obj, msm_ctx* ctx, jstring jfile, jint start) {

    const char *file = (*env)->GetStringUTFChars(env, jfile, NULL);
    int i, n;
    unsigned rate, channels, bps;
    unsigned char *buff;
//    fd_set fds;

    struct timeval tstart, tstop, ttmp; 	
    useconds_t  tminwrite;		

    int writes = 0;
    off_t fsize;
	
#ifdef DBG_TIME
    uint64_t total_tminwrite = 0, total_ttmp = 0, total_sleep = 0;
    int  fails = 0;
#endif

	if(!ctx) {
	    return LIBLOSSLESS_ERR_NOCTX;
	}
		
	if(!file) {
		(*env)->ReleaseStringUTFChars(env, jfile, file);
		return LIBLOSSLESS_ERR_INV_PARM;
	}

	audio_stop(ctx);

	ctx->fd = open(file, O_RDONLY);
	(*env)->ReleaseStringUTFChars(env, jfile, file);

	if(ctx->fd < 0) {
	    return LIBLOSSLESS_ERR_NOFILE;
	}

	fsize = lseek(ctx->fd, 0, SEEK_END) - sizeof(wav_hdr);
	lseek(ctx->fd, 0, SEEK_SET);

	if(wav_hdr(ctx->fd, &rate, &channels, &bps) != 0) {
	    return LIBLOSSLESS_ERR_FORMAT;
	}

    __android_log_print(ANDROID_LOG_INFO, "liblossless", "wav ctx mode: %d", ctx->mode);

	if(start) {
		int start_offs = start * (bps/8) * channels * rate;

		if(lseek(ctx->fd,start_offs,SEEK_CUR) < 0) {
		    return LIBLOSSLESS_ERR_OFFSET;
		}
	}

	i = audio_start(ctx, channels, rate);

	if(i != 0) {
        close(ctx->fd);
	    return i;
	}

	buff = (unsigned char *) malloc(ctx->conf_size);

	if(!buff) {
		close(ctx->fd);
		return LIBLOSSLESS_ERR_NOMEM;
	}

	tminwrite = ((long long)((long long)ctx->conf_size)*1000000)/((long long)rate*channels*(bps/8));
	

#if 0
	sprintf(buff,"*******TMINWRITEEEEE = %d %d %d %d %d",tminwrite,ctx->conf_size,rate,channels,bps);
	__android_log_print(ANDROID_LOG_INFO, "liblossless, buffer: ", buff);
#endif

    ctx->channels = channels;
    ctx->samplerate = rate;
    ctx->bps = bps;
	ctx->written = 0;

	pthread_mutex_lock(&ctx->mutex);
        ctx->state = MSM_PLAYING;
        ctx->track_time = fsize / (rate * channels * (bps / 8));
	pthread_mutex_unlock(&ctx->mutex);

    update_track_time(env, obj, ctx->track_time);

	while(ctx->state != MSM_STOPPED) {

		n = read(ctx->fd, buff, ctx->conf_size);

		if(n != ctx->conf_size) {

			if(ctx->state != MSM_STOPPED) {
			   if(ctx->state != MSM_PAUSED) {
			        pthread_mutex_lock(&ctx->mutex);
			   }
               ctx->state = MSM_STOPPED;
		       pthread_mutex_unlock(&ctx->mutex);
			}

			free(buff);

	        if(ctx->fd == -1) {
	            return 0; // we were stopped from the main thread
	        }

            close(ctx->fd);
            ctx->fd = -1;

	        return 	LIBLOSSLESS_ERR_IO_READ;
		}

	    if(ctx->mode != MODE_CALLBACK) {
            gettimeofday(&tstop,0);
            timersub(&tstop,&tstart,&ttmp);

		    if(tminwrite > ttmp.tv_usec) {
			    usleep((tminwrite-ttmp.tv_usec) / 4);
#ifdef DBG_TIME
                total_sleep += (tminwrite - ttmp.tv_usec) / 4;
#endif
            }
#ifdef DBG_TIME

            else {
                fails++;
            }

            total_tminwrite += tminwrite;
            total_ttmp += ttmp.tv_usec;
#endif
		    gettimeofday(&tstart,0);
	    }

		pthread_mutex_lock(&ctx->mutex);

		i = audio_write(ctx,buff,ctx->conf_size);

		if(i < ctx->conf_size) {
            ctx->state = MSM_STOPPED;
            pthread_mutex_unlock(&ctx->mutex);
		    free(buff); 	

            if(ctx->fd == -1) {

#ifdef DBG_TIME
                if(writes && (writes > fails)) {
                    int x = (int) (total_tminwrite/writes);
                    int y = (int) (total_ttmp/writes);
                    int z = (int) (total_sleep/(writes-fails));
                    __android_log_print(
                        ANDROID_LOG_INFO,
                        "liblossless",
                        "tminwrite %d ttmp %d sleep %d fails %d writes %d",
                        x, y, z, fails, writes);

                } else {
                    __android_log_print(
                        ANDROID_LOG_INFO,
                        "liblossless", "fails %d writes %d",
                        fails, writes);
                }
#endif

			    return 0; // we were stopped from the main thread
		    }	

            close(ctx->fd); ctx->fd = -1;

            return LIBLOSSLESS_ERR_IO_WRITE;
		}

		pthread_mutex_unlock(&ctx->mutex);

		ctx->written += i;
	    writes++;
	}

    if(ctx->state != MSM_STOPPED) {
        if(ctx->state != MSM_PAUSED) {
            pthread_mutex_lock(&ctx->mutex);
        }

        if(ctx->fd != -1) {
            close(ctx->fd); ctx->fd = -1;
        }

        ctx->state = MSM_STOPPED;
        pthread_mutex_unlock(&ctx->mutex);
    }

#ifdef DBG_TIME
        if(writes && (writes > fails)) {
           int x = (int) (total_tminwrite/writes);
           int y = (int) (total_ttmp/writes);
           int z = (int) (total_sleep/(writes-fails));
            __android_log_print(ANDROID_LOG_INFO,"liblossless","tminwrite %d ttmp %d sleep %d fails %d writes %d", x,y,z,fails,writes);
        } else __android_log_print(ANDROID_LOG_INFO,"liblossless","fails %d writes %d", fails,writes);
#endif
   free(buff);
   audio_wait_done(ctx);

   return 0;	
}
Beispiel #20
0
int main(int argc, char **argv)
{
    struct sockaddr_in serv_addr;
    struct pollfd   poll_fds[1];
    int             exit_code = EXIT_FAILURE;
    int             net_fd = -1;
    int             connected = 0;
    int             res;

    audio_t        *audio;
    OpusDecoder    *decoder;
    uint64_t        encoded_bytes = 0;
    uint64_t        decoder_errors = 0;
    int             error;

    struct app_data app = {
        .sample_rate = 48000,
        .device_index = -1,
        .server_port = DEFAULT_AUDIO_PORT,
    };

    parse_options(argc, argv, &app);
    if (app.server_ip == NULL)
        app.server_ip = strdup("127.0.0.1");

    fprintf(stderr, "Using server IP %s\n", app.server_ip);
    fprintf(stderr, "using server port %d\n", app.server_port);

    /* initialize audio subsystem */
    audio = audio_init(app.device_index, app.sample_rate, AUDIO_CONF_OUTPUT);
    if (audio == NULL)
        exit(EXIT_FAILURE);

    decoder = opus_decoder_create(app.sample_rate, 1, &error);
    if (error != OPUS_OK)
    {
        fprintf(stderr, "Error creating opus decoder: %d (%s)\n",
                error, opus_strerror(error));
        audio_close(audio);
        exit(EXIT_FAILURE);
    }

    /* setup signal handler */
    if (signal(SIGINT, signal_handler) == SIG_ERR)
        printf("Warning: Can't catch SIGINT\n");
    if (signal(SIGTERM, signal_handler) == SIG_ERR)
        printf("Warning: Can't catch SIGTERM\n");

    memset(&serv_addr, 0, sizeof(serv_addr));
    serv_addr.sin_family = AF_INET;
    serv_addr.sin_port = htons(app.server_port);
    if (inet_pton(AF_INET, app.server_ip, &serv_addr.sin_addr) == -1)
    {
        fprintf(stderr, "Error calling inet_pton(): %d: %s\n", errno,
                strerror(errno));
        goto cleanup;
    }

    while (keep_running)
    {
        if (net_fd == -1)
        {
            net_fd = socket(AF_INET, SOCK_STREAM, 0);
            if (net_fd == -1)
            {
                fprintf(stderr, "Error creating socket: %d: %s\n", errno,
                        strerror(errno));
                goto cleanup;
            }
        }

        /* Try to connect to server */
        if (connect(net_fd, (struct sockaddr *)&serv_addr, sizeof(serv_addr))
            == -1)
        {
            fprintf(stderr, "Connect error %d: %s\n", errno, strerror(errno));

            /* These errors may be temporary; try again */
            if (errno == ECONNREFUSED || errno == ENETUNREACH ||
                errno == ETIMEDOUT)
            {
                sleep(1);
                continue;
            }
            else
            {
                goto cleanup;
            }
        }

        poll_fds[0].fd = net_fd;
        poll_fds[0].events = POLLIN;
        connected = 1;
        fprintf(stderr, "Connected...\n");

        /* start audio system */
        audio_start(audio);

        while (keep_running && connected)
        {
            res = poll(poll_fds, 1, 500);

            if (res <= 0)
                continue;

            /* service network socket */
            if (poll_fds[0].revents & POLLIN)
            {

#define AUDIO_FRAMES 5760       // allows receiving up to 120 msec frames
#define AUDIO_BUFLEN 2 * AUDIO_FRAMES   // 120 msec: 48000 * 0.12
                uint8_t         buffer1[AUDIO_BUFLEN];
                uint8_t         buffer2[AUDIO_BUFLEN * 2];
                uint16_t        length;

                int             num;

                /* read 2 byte header */
                num = read(net_fd, buffer1, 2);
                if (num != 2)
                {
                    /* unrecovarable error; disconnect */
                    fprintf(stderr, "Error reading packet header: %d\n", num);
                    close(net_fd);
                    net_fd = -1;
                    connected = 0;
                    poll_fds[0].fd = -1;
                    audio_stop(audio);

                    num = opus_decode(decoder, NULL, 0, (opus_int16 *) buffer2,
                                      AUDIO_FRAMES, 0);

                    continue;
                }

                length = buffer1[0] + ((buffer1[1] & 0x1F) << 8);
                length -= 2;
                num = read(net_fd, buffer1, length);

                if (num == length)
                {
                    encoded_bytes += num;
                    num = opus_decode(decoder, buffer1, num,
                                      (opus_int16 *) buffer2, AUDIO_FRAMES, 0);

                    if (num > 0)
                    {
                        audio_write_frames(audio, buffer2, num);
                    }
                    else
                    {
                        decoder_errors++;
                        fprintf(stderr, "Decoder error: %d (%s)\n", num,
                                opus_strerror(num));
                    }
                }
                else if (num == 0)
                {
                    fprintf(stderr, "Connection closed (FD=%d)\n", net_fd);
                    close(net_fd);
                    net_fd = -1;
                    connected = 0;
                    poll_fds[0].fd = -1;
                    audio_stop(audio);
                }
                else
                {
                    fprintf(stderr, "Error reading from net: %d / \n", num);
                }

            }
        }
    }

    fprintf(stderr, "Shutting down...\n");
    exit_code = EXIT_SUCCESS;

  cleanup:
    close(net_fd);
    if (app.server_ip != NULL)
        free(app.server_ip);

    audio_stop(audio);
    audio_close(audio);
    opus_decoder_destroy(decoder);

    fprintf(stderr, "  Encoded bytes in: %" PRIu64 "\n", encoded_bytes);
    fprintf(stderr, "  Decoder errors  : %" PRIu64 "\n", decoder_errors);

    exit(exit_code);
}
Beispiel #21
0
int main( int argc, char **argv )
{
	//int ret = 0;
	PTZControlInit();
	demo_setting * ext_gSettings = NULL;
	
	// Allocate the "global" settings
	ext_gSettings = (demo_setting*)malloc( sizeof( demo_setting ) );
	if ( NULL == ext_gSettings ) {
		printf( "main::out of memory!\n" );
		return -1;
	}
	
	sig_init();
    atexit(appExit);
	//init the setting struct
	Settings_Initialize( ext_gSettings );

	read_Parse(ext_gSettings);
	//printf("video type = %d \n", ext_gSettings->video_types);
	//...do your job

	//close the led
	setled_off();
	//init dma memory
	akuio_pmem_init();
	encode_init();
	printf("encode_init ok\n");
	//open camera
	camera_open(ext_gSettings->width, ext_gSettings->height);
	printf("camera_open ok\n");

	//encode_open
	T_ENC_INPUT encInput;
	encInput.width = ext_gSettings->width;			//实际编码图像的宽度,能被4整除
	encInput.height = ext_gSettings->height;			//实际编码图像的长度,能被2整除
	encInput.kbpsmode = ext_gSettings->kbpsmode; 
	encInput.qpHdr = ext_gSettings->qpHdr;			//初始的QP的值
	encInput.iqpHdr = ext_gSettings->iqpHdr;			//初始的QP的值
	encInput.bitPerSecond = ext_gSettings->bitPerSecond;	//目标bps
	encInput.minQp = ext_gSettings->minQp;
	encInput.maxQp = ext_gSettings->maxQp;
	encInput.framePerSecond = ext_gSettings->framePerSecond;
	encInput.video_tytes = ext_gSettings->video_types;
	encode_open(&encInput);
	printf("encode_open ok\n");

	//set mux
	mux_input.rec_path = ext_gSettings->rec_path;
	mux_input.m_MediaRecType = MEDIALIB_REC_AVI_NORMAL;

	if (ext_gSettings->bhasAudio)
	{
		bHasAudio = 1;
		//mux_input.m_bCaptureAudio = 1;
	}
	else
	{
		bHasAudio = 0;
		//mux_input.m_bCaptureAudio = 0;
	}
	mux_input.m_bCaptureAudio = 1;
	//mux video
	if(parse.format2 == 0)
	{
		mux_input.m_eVideoType = MEDIALIB_VIDEO_H264;
	}
	else if(parse.format2 == 1)
	{
		mux_input.m_eVideoType = MEDIALIB_VIDEO_MJPEG;
	}
	mux_input.m_nWidth = parse.width2;
	mux_input.m_nHeight = parse.height2;
	
	//mux audio
	mux_input.m_eAudioType = MEDIALIB_AUDIO_AAC;
	mux_input.m_nSampleRate = 8000;
	//mux_input.abitsrate = ext_gSettings->abitsrate;

	printf("mux_open ok\n");

	//if (ext_gSettings->bhasAudio)
	{
		T_AUDIO_INPUT audioInput;
		audioInput.enc_type = (AUDIO_ENCODE_TYPE_CC)ext_gSettings->audioType;
		audioInput.nBitsRate = ext_gSettings->abitsrate;
		audioInput.nBitsPerSample = 16;
		audioInput.nChannels = 1;
		audioInput.nSampleRate = ext_gSettings->aSamplerate;
		audio_open(&audioInput);
		printf("audio_open ok\n");
		audio_start();
	}

	//start ftp server
	//startFTPSrv();

	Init_photograph();
	//PTZControlInit();
	//start video process
	video_process_start();
	InitMotionDetect();
	DemuxForLiveSetCallBack();
	TaskScheduler* scheduler = BasicTaskScheduler::createNew();
	env = BasicUsageEnvironment::createNew(*scheduler);
	UserAuthenticationDatabase* authDB = NULL;
#ifdef ACCESS_CONTROL
	// To implement client access control to the RTSP server, do the following:
	authDB = new UserAuthenticationDatabase;
	authDB->addUserRecord("username1", "password1"); // replace these with real strings
	// Repeat the above with each <username>, <password> that you wish to allow
	// access to the server.
#endif
       
	// Create the RTSP server:
	RTSPServer* rtspServer = AKRTSPServer::createNew(*env, RTSPPORT, authDB);
	if (rtspServer == NULL) 
	{
		*env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
		appExit();
		exit(1);
	}

	char const* descriptionString = "Session streamed by \"testOnDemandRTSPServer\"";

	// Set up each of the possible streams that can be served by the
	// RTSP server.  Each such stream is implemented using a
	// "ServerMediaSession" object, plus one or more
	// "ServerMediaSubsession" objects for each audio/video substream.

	int vsIndex = 0;
	VIDEO_MODE vm[2] = {VIDEO_MODE_VGA,VIDEO_MODE_VGA};
	const char* streamName1 = "vs1";
	const char* streamName2 = "vs2";
	((AKRTSPServer*)rtspServer)->SetStreamName(streamName1, streamName2);	
	
	if(ext_gSettings->video_types == 1)
	{
		if(ext_gSettings->width == 640)
		{
			vm[0] = VIDEO_MODE_VGA;
		}
		else if(ext_gSettings->width == 320)
		{
			vm[0] = VIDEO_MODE_QVGA;
		}
		else if(ext_gSettings->width == 720)
		{
			vm[0] = VIDEO_MODE_D1;
		}
		
		AKIPCMJPEGFramedSource* ipcMJPEGSourcecam = NULL;
		ServerMediaSession* smsMJPEGcam = ServerMediaSession::createNew(*env, streamName1, 0, descriptionString);
		AKIPCMJPEGOnDemandMediaSubsession* subsMJPEGcam = AKIPCMJPEGOnDemandMediaSubsession::createNew(*env,ipcMJPEGSourcecam, ext_gSettings->width, ext_gSettings->height, vsIndex);
		smsMJPEGcam->addSubsession(subsMJPEGcam); 
		subsMJPEGcam->getframefunc = video_process_get_buf;
		subsMJPEGcam->setledstart = setled_view_start;
		subsMJPEGcam->setledexit = setled_view_stop;
		
		if(bHasAudio)
			smsMJPEGcam->addSubsession(AKIPCAACAudioOnDemandMediaSubsession::createNew(*env,True,getAACBuf, vsIndex));

		rtspServer->addServerMediaSession(smsMJPEGcam);
		char* url1 = rtspServer->rtspURL(smsMJPEGcam);
		*env << "using url \"" << url1 <<"\"\n";
		delete[] url1;
	}
	else if(ext_gSettings->video_types == 0)
	{
		if(ext_gSettings->width == 1280)
		{
			vm[0] = VIDEO_MODE_720P;
		}
		else if(ext_gSettings->width == 640)
		{
			vm[0] = VIDEO_MODE_VGA;
		}
		else if(ext_gSettings->width == 320)
		{
			vm[0] = VIDEO_MODE_QVGA;
		}
		else if(ext_gSettings->width == 720)
		{
			vm[0] = VIDEO_MODE_D1;
		}
		
		AKIPCH264FramedSource* ipcSourcecam = NULL;
		ServerMediaSession* smscam = ServerMediaSession::createNew(*env, streamName1, 0, descriptionString);
		AKIPCH264OnDemandMediaSubsession* subscam = AKIPCH264OnDemandMediaSubsession::createNew(*env,ipcSourcecam, 0, vsIndex);
		smscam->addSubsession(subscam);
		if(bHasAudio)
			smscam->addSubsession(AKIPCAACAudioOnDemandMediaSubsession::createNew(*env,True,getAACBuf, vsIndex));
	
		subscam->getframefunc = video_process_get_buf;
		subscam->setledstart = setled_view_start;
		subscam->setledexit = setled_view_stop;

		rtspServer->addServerMediaSession(smscam);
		char* url1 = rtspServer->rtspURL(smscam);
		*env << "using url \"" << url1 <<"\"\n";
		delete[] url1;
	}

	vsIndex = 1;
	
	if(parse.format2 == 0)//264
	{
		if(parse.width2 == 1280)
		{
			vm[1] = VIDEO_MODE_720P;
		}
		else if(parse.width2 == 640)
		{
			vm[1] = VIDEO_MODE_VGA;
		}
		else if(parse.width2 == 320)
		{
			vm[1] = VIDEO_MODE_QVGA;
		}
		else if(parse.width2 == 720)
		{
			vm[1] = VIDEO_MODE_D1;
		}
		
		AKIPCH264FramedSource* ipcSourcecam = NULL;
		ServerMediaSession* smscam = ServerMediaSession::createNew(*env, streamName2, 0, descriptionString);
		AKIPCH264OnDemandMediaSubsession* subscam = AKIPCH264OnDemandMediaSubsession::createNew(*env,ipcSourcecam, 0, vsIndex);
		smscam->addSubsession(subscam);
		if(bHasAudio)
			smscam->addSubsession(AKIPCAACAudioOnDemandMediaSubsession::createNew(*env,True,getAACBuf, vsIndex));
	
		subscam->getframefunc = video_process_get_buf;
		subscam->setledstart = setled_view_start;
		subscam->setledexit = setled_view_stop;

		rtspServer->addServerMediaSession(smscam);
		char* url2 = rtspServer->rtspURL(smscam);
		*env << "using url \"" << url2 <<"\"\n";
		delete[] url2;
	}
	else if(parse.format2 == 1)//mjpeg
	{
		if(parse.width2 == 640)
		{
			vm[1] = VIDEO_MODE_VGA;
		}
		else if(parse.width2 == 320)
		{
			vm[1] = VIDEO_MODE_QVGA;
		}
		else if(parse.width2 == 720)
		{
			vm[1] = VIDEO_MODE_D1;
		}
		
		AKIPCMJPEGFramedSource* ipcMJPEGSourcecam = NULL;
		ServerMediaSession* smsMJPEGcam = ServerMediaSession::createNew(*env, streamName2, 0, descriptionString);
		AKIPCMJPEGOnDemandMediaSubsession* subsMJPEGcam = AKIPCMJPEGOnDemandMediaSubsession::createNew(*env,ipcMJPEGSourcecam, parse.width2, parse.height2, vsIndex);
		smsMJPEGcam->addSubsession(subsMJPEGcam); 
		subsMJPEGcam->getframefunc = video_process_get_buf;
		subsMJPEGcam->setledstart = setled_view_start;
		subsMJPEGcam->setledexit = setled_view_stop;
		
		if(bHasAudio)
			smsMJPEGcam->addSubsession(AKIPCAACAudioOnDemandMediaSubsession::createNew(*env,True,getAACBuf, vsIndex));

		rtspServer->addServerMediaSession(smsMJPEGcam);
		char* url2 = rtspServer->rtspURL(smsMJPEGcam);
		*env << "using url \"" << url2 <<"\"\n";
		delete[] url2;
	}
#if 0
	if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) 
	{
		*env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n";
	}
	else 
	{
		*env << "\n(RTSP-over-HTTP tunneling is not available.)\n";
	}
#endif

	//printf("streamName:%s,Port:%d\n", streamName1, RTSPPORT);
	
	
	NetCtlSrvPar ncsp;
	memset(&ncsp, 0, sizeof(ncsp));
	getDeviceID(ncsp.strDeviceID);
	printf("device id:**%s**\n", ncsp.strDeviceID);
	strcpy(ncsp.strStreamName1, streamName1);
	strcpy(ncsp.strStreamName2, streamName2);
	ncsp.vm1 = vm[0];
	ncsp.vm2 = vm[1];
	ncsp.nRtspPort = RTSPPORT;
	ncsp.nMainFps = parse.fps1;
	ncsp.nSubFps = parse.fps2;
	//start net command server
	startNetCtlServer(&ncsp);

    printf("[##]start record...\n");
    auto_record_file();
    printf("[##]auto_record_file() called..\n");

	//at last,start rtsp loop
	env->taskScheduler().doEventLoop(); // does not return

	return 0;
}
Beispiel #22
0
static void call_stream_start(struct call *call, bool active)
{
	const struct sdp_format *sc;
	int err;

	/* Audio Stream */
	sc = sdp_media_rformat(stream_sdpmedia(audio_strm(call->audio)), NULL);
	if (sc) {
		struct aucodec *ac = sc->data;

		if (ac) {
			err  = audio_encoder_set(call->audio, sc->data,
						 sc->pt, sc->params);
			err |= audio_decoder_set(call->audio, sc->data,
						 sc->pt, sc->params);
			if (!err) {
				err = audio_start(call->audio);
			}
			if (err) {
				warning("call: audio stream error: %m\n", err);
			}
		}
		else {
			info("call: no common audio-codecs..\n");
		}
	}
	else {
		info("call: audio stream is disabled..\n");
	}

#ifdef USE_VIDEO
	/* Video Stream */
	sc = sdp_media_rformat(stream_sdpmedia(video_strm(call->video)), NULL);
	if (sc) {
		err  = video_encoder_set(call->video, sc->data, sc->pt,
					 sc->params);
		err |= video_decoder_set(call->video, sc->data, sc->pt,
					 sc->rparams);
		if (!err) {
			err = video_start(call->video, call->peer_uri);
		}
		if (err) {
			warning("call: video stream error: %m\n", err);
		}
	}
	else if (call->video) {
		info("call: video stream is disabled..\n");
	}

	if (call->bfcp) {
		err = bfcp_start(call->bfcp);
		if (err) {
			warning("call: could not start BFCP: %m\n", err);
		}
	}
#endif

	if (active) {
		struct le *le;

		tmr_cancel(&call->tmr_inv);
		call->time_start = time(NULL);

		FOREACH_STREAM {
			stream_reset(le->data);
		}
	}
}
Beispiel #23
0
/*
 * audio processing loop (should run in a separate thread)
 * args:
 *    data - pointer to user data
 *
 * asserts:
 *   none
 *
 * returns: pointer to return code
 */
static void *audio_processing_loop(void *data)
{
	encoder_context_t *encoder_ctx = (encoder_context_t *) data;

	if(debug_level > 1)
		printf("GUVCVIEW: audio thread (tid: %u)\n",
			(unsigned int) syscall (SYS_gettid));
		
	audio_context_t *audio_ctx = get_audio_context();
	if(!audio_ctx)
	{
		fprintf(stderr, "GUVCVIEW: no audio context: skiping audio processing\n");
		return ((void *) -1);
	}
	audio_buff_t *audio_buff = NULL;

	/*start audio capture*/
	int frame_size = encoder_get_audio_frame_size(encoder_ctx);

	audio_ctx->capture_buff_size = frame_size * audio_ctx->channels;
	audio_start(audio_ctx);
	/*
	 * alloc the buffer after audio_start
	 * otherwise capture_buff_size may not
	 * be correct
	 * allocated data is big enough for float samples (32 bit)
	 * although it may contain int16 samples (16 bit)
	 */
	audio_buff = audio_get_buffer(audio_ctx);

	int sample_type = encoder_get_audio_sample_fmt(encoder_ctx);
	

	/*enable vu meter OSD display*/
	if(audio_ctx->channels > 1)
		render_set_osd_mask(REND_OSD_VUMETER_STEREO);
	else
		render_set_osd_mask(REND_OSD_VUMETER_MONO);

	while(video_capture_get_save_video())
	{
		int ret = audio_get_next_buffer(audio_ctx, audio_buff,
				sample_type, my_audio_mask);

		if(ret == 0)
		{
			encoder_ctx->enc_audio_ctx->pts = audio_buff->timestamp;

			/*OSD vu meter level*/
			render_set_vu_level(audio_buff->level_meter);

			encoder_process_audio_buffer(encoder_ctx, audio_buff->data);
		}
	}

	/*flush any delayed audio frames*/
	encoder_flush_audio_buffer(encoder_ctx);

	/*reset vu meter*/
	audio_buff->level_meter[0] = 0;
	audio_buff->level_meter[1] = 0;
	render_set_vu_level(audio_buff->level_meter);

	/*disable OSD*/
	render_set_osd_mask(REND_OSD_NONE);

	audio_stop(audio_ctx);
	audio_delete_buffer(audio_buff);

	return ((void *) 0);
}