예제 #1
0
static size_t audio_get_feature_descriptor_data(app_usbd_class_inst_t const * p_inst,
                                                uint32_t                      cur_byte)
{
    app_usbd_audio_t const * p_audio = audio_get(p_inst);

    return p_audio->specific.inst.p_feature_dsc->p_data[cur_byte];
}
예제 #2
0
static void* alsa_audio_start(void *aux)
{
	audio_fifo_t *af = aux;

	while(1)
		free(audio_get(af));
	return NULL;
}
예제 #3
0
ret_code_t app_usbd_audio_sof_interrupt_register(app_usbd_class_inst_t const * p_inst, 
                                                 app_usbd_sof_interrupt_handler_t handler)
{
    app_usbd_audio_t const * p_audio     = audio_get(p_inst);
    app_usbd_audio_ctx_t   * p_audio_ctx = audio_ctx_get(p_audio);

    p_audio_ctx->sof_handler = handler;

    return NRF_SUCCESS;
}
예제 #4
0
static size_t audio_get_feature_descriptor_size(app_usbd_class_inst_t const * p_inst)
{
    app_usbd_audio_t const * p_audio = audio_get(p_inst);

    if (p_audio->specific.inst.p_feature_dsc == NULL)
    {
        return 0;
    }

    return p_audio->specific.inst.p_feature_dsc->size;
}
예제 #5
0
/**
 * @brief User event handler.
 *
 * @param[in] p_inst        Class instance.
 * @param[in] event user    Event type @ref app_usbd_audio_user_event_t
 */
static inline void user_event_handler(
    app_usbd_class_inst_t const * p_inst,
    app_usbd_audio_user_event_t   event)
{
    app_usbd_audio_t const * p_audio = audio_get(p_inst);

    if (p_audio->specific.inst.user_ev_handler != NULL)
    {
        p_audio->specific.inst.user_ev_handler(p_inst, event);
    }
}
예제 #6
0
static int queue_buffer(ALuint source, audio_fifo_t *af, ALuint buffer)
{
    audio_fifo_data_t *afd = audio_get(af);
    alBufferData(buffer, 
		 afd->channels == 1 ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16, 
		 afd->samples, 
		 afd->nsamples * afd->channels * sizeof(short), 
		 afd->rate);
    alSourceQueueBuffers(source, 1, &buffer);
	free(afd);
	return 1;
}
예제 #7
0
static void audio_callback (void *aux, AudioQueueRef aq, AudioQueueBufferRef bufout)
{
    audio_fifo_t *af = aux;
    audio_fifo_data_t *afd = audio_get(af);

    bufout->mAudioDataByteSize = afd->nsamples * sizeof(short) * afd->channels;

    assert(bufout->mAudioDataByteSize <= state.buffer_size);
    memcpy(bufout->mAudioData, afd->samples, bufout->mAudioDataByteSize);

    AudioQueueEnqueueBuffer(state.queue, bufout, 0, NULL);
    free(afd);
}
예제 #8
0
static void* alsa_audio_start(void *aux)
{
	audio_fifo_t *af = aux;

	while(1)
	{
		void *o = audio_get(af);
		if(o != NULL)
			free(o);
	}

	return NULL;
}
예제 #9
0
static void iface_deselect(
    app_usbd_class_inst_t const * const p_inst,
    uint8_t                             iface_idx)
{
    app_usbd_class_iface_conf_t const * p_iface = app_usbd_class_iface_get(p_inst, iface_idx);

    /* Simple check if this is data interface */
    if (p_iface->ep_cnt > 0)
    {
        app_usbd_audio_t const * p_audio     = audio_get(p_inst);
        app_usbd_audio_ctx_t   * p_audio_ctx = audio_ctx_get(p_audio);
        p_audio_ctx->streaming = false;
    }
    /* Note that all the interface endpoints would be disabled automatically after this function */
}
예제 #10
0
파일: demod.c 프로젝트: glneo/direwolf
int demod_get_sample (int a)		
{
	int x1, x2;
	signed short sam;	/* short to force sign extention. */


	assert (save_audio_config_p->adev[a].bits_per_sample == 8 || save_audio_config_p->adev[a].bits_per_sample == 16);


	if (save_audio_config_p->adev[a].bits_per_sample == 8) {

	  x1 = audio_get(a);				
	  if (x1 < 0) return(FSK_READ_ERR);

	  assert (x1 >= 0 && x1 <= 255);

	  /* Scale 0..255 into -32k..+32k */

	  sam = (x1 - 128) * 256;

	}
	else {
	  x1 = audio_get(a);	/* lower byte first */
	  if (x1 < 0) return(FSK_READ_ERR);

	  x2 = audio_get(a);
	  if (x2 < 0) return(FSK_READ_ERR);

	  assert (x1 >= 0 && x1 <= 255);
	  assert (x2 >= 0 && x2 <= 255);

          sam = ( x2 << 8 ) | x1;
	}

	return (sam);
}
예제 #11
0
void gaplessloop(void *arg) {
	printf("Starting gaplessloop...\n");
	sleep(1);
	while(1) {
		usleep(1000);

		int a = audio_fifo_available(&g_gaplessfifo);
		if (a > 5000) {
			// printf("gapless: output is full (%d)\n", a);
			continue;
		}

		if (!g_is_playing) {
			// printf("gapless: not playing, generate silence.\n");
			audio_fifo_data_t *afd = audio_data_create(2048, 2);
			audio_fifo_queue(&g_gaplessfifo, afd);
			continue;
		}

		// int av = audio_fifo_available(&g_gaplessfifo);
		int av2 = audio_fifo_available(&g_musicfifo);
		if (av2 < 4000) {
			// printf("gapless: not enough music input (%d)\n", av2);
			continue;
		}

		audio_fifo_data_t *inp = audio_get(&g_musicfifo);
		if (inp == NULL) {
			printf("gapless: nothing read.\n");
			continue;
		}


		audio_fifo_data_t *afd = audio_data_create(inp->nsamples, inp->channels);
		int16_t *ptr1 = afd->samples;
		int16_t *ptr2 = inp->samples;
		int i;
		// printf("music data (%d samples x %d channels):\n", inp->nsamples, inp->channels);
		for (i=0; i<inp->nsamples * inp->channels; i++) {
			ptr1[i] = ptr2[i];
		//	if (i<5) printf("%6d ", ptr2[i]);
		}
		// printf("  -> 0x%08X\n", afd);
		audio_fifo_queue(&g_gaplessfifo, afd);

		free(inp);
	}
}
예제 #12
0
static uint8_t iface_selection_get(
    app_usbd_class_inst_t const * const p_inst,
    uint8_t                             iface_idx)
{
    app_usbd_class_iface_conf_t const * p_iface = app_usbd_class_iface_get(p_inst, iface_idx);
    /* Simple check if this is data interface */
    uint8_t const ep_count = app_usbd_class_iface_ep_count_get(p_iface);

    if (ep_count > 0)
    {
        app_usbd_audio_t const * p_audio     = audio_get(p_inst);
        app_usbd_audio_ctx_t   * p_audio_ctx = audio_ctx_get(p_audio);
        return (p_audio_ctx->streaming) ? 1 : 0;
    }
    return 0;
}
예제 #13
0
/**
 * @brief Internal SETUP class IN request handler.
 *
 * @param[in] p_inst        Generic class instance.
 * @param[in] p_setup_ev    Setup event.
 *
 * @return Standard error code.
 * @retval NRF_SUCCESS              Request handled correctly.
 * @retval NRF_ERROR_NOT_SUPPORTED  Request is not supported.
 */
static ret_code_t setup_req_class_in(
    app_usbd_class_inst_t const * p_inst,
    app_usbd_setup_evt_t const  * p_setup_ev)
{
    switch (p_setup_ev->setup.bRequest)
    {
        case APP_USBD_AUDIO_REQ_GET_CUR:
        case APP_USBD_AUDIO_REQ_GET_MIN:
        case APP_USBD_AUDIO_REQ_GET_MAX:
        case APP_USBD_AUDIO_REQ_SET_RES:
        case APP_USBD_AUDIO_REQ_GET_MEM:
        {
            app_usbd_audio_t const * p_audio     = audio_get(p_inst);
            app_usbd_audio_ctx_t   * p_audio_ctx = audio_ctx_get(p_audio);


            p_audio_ctx->request.req_type  = (app_usbd_audio_req_type_t)p_setup_ev->setup.bRequest;
            p_audio_ctx->request.control   = p_setup_ev->setup.wValue.hb;
            p_audio_ctx->request.channel   = p_setup_ev->setup.wValue.lb;
            p_audio_ctx->request.interface = p_setup_ev->setup.wIndex.hb;
            p_audio_ctx->request.entity    = p_setup_ev->setup.wIndex.lb;

            p_audio_ctx->request.length = p_setup_ev->setup.wLength.w;

            p_audio_ctx->request.req_target = APP_USBD_AUDIO_CLASS_REQ_IN;

            app_usbd_setup_reqrec_t rec = app_usbd_setup_req_rec(p_setup_ev->setup.bmRequestType);
            if (rec == APP_USBD_SETUP_REQREC_ENDPOINT)
            {
                p_audio_ctx->request.req_target = APP_USBD_AUDIO_EP_REQ_IN;
            }

            user_event_handler((app_usbd_class_inst_t const *)p_audio,
                               APP_USBD_AUDIO_USER_EVT_CLASS_REQ);

            return app_usbd_core_setup_rsp(&p_setup_ev->setup,
                                           p_audio_ctx->request.payload,
                                           p_audio_ctx->request.length);
        }

        default:
            break;
    }

    return NRF_ERROR_NOT_SUPPORTED;
}
예제 #14
0
파일: alsa-audio.c 프로젝트: raphui/wMusic
static void* alsa_audio_start(void *aux)
{
	audio_fifo_t *af = aux;
	snd_pcm_t *h = NULL;
	int c;
	int cur_channels = 0;
	int cur_rate = 0;

	audio_fifo_data_t *afd;

	for (;;) {
		afd = audio_get(af);

		if (!h || cur_rate != afd->rate || cur_channels != afd->channels) {
			if (h) snd_pcm_close(h);

			cur_rate = afd->rate;
			cur_channels = afd->channels;

            h = alsa_open("default", cur_rate, cur_channels);

            if (!h) {
                fprintf(stderr, "Unable to open ALSA device (%d channels, %d Hz), dying\n",
                        cur_channels, cur_rate);
                exit(1);
            }
		}

		c = snd_pcm_wait(h, 1000);

		if (c >= 0)
			c = snd_pcm_avail_update(h);

		if (c == -EPIPE)
			snd_pcm_prepare(h);

        snd_pcm_writei(h, afd->samples, afd->nsamples);
        writeFile( &afd->samples );
        zfree(afd);
	}
}
예제 #15
0
static ret_code_t audio_req_out(
    app_usbd_class_inst_t const * p_inst,
    app_usbd_setup_evt_t const  * p_setup_ev)
{
    app_usbd_audio_t const * p_audio     = audio_get(p_inst);
    app_usbd_audio_ctx_t   * p_audio_ctx = audio_ctx_get(p_audio);


    p_audio_ctx->request.req_type  = (app_usbd_audio_req_type_t)p_setup_ev->setup.bRequest;
    p_audio_ctx->request.control   = p_setup_ev->setup.wValue.hb;
    p_audio_ctx->request.channel   = p_setup_ev->setup.wValue.lb;
    p_audio_ctx->request.interface = p_setup_ev->setup.wIndex.hb;
    p_audio_ctx->request.entity    = p_setup_ev->setup.wIndex.lb;

    p_audio_ctx->request.length = p_setup_ev->setup.wLength.w;

    p_audio_ctx->request.req_target = APP_USBD_AUDIO_CLASS_REQ_OUT;
    if (app_usbd_setup_req_rec(p_setup_ev->setup.bmRequestType) == APP_USBD_SETUP_REQREC_ENDPOINT)
    {
        p_audio_ctx->request.req_target = APP_USBD_AUDIO_EP_REQ_OUT;
    }

    /*Request setup data*/
    NRF_DRV_USBD_TRANSFER_OUT(transfer, p_audio_ctx->request.payload, p_audio_ctx->request.length);
    ret_code_t ret;
    CRITICAL_REGION_ENTER();
    ret = app_usbd_ep_transfer(NRF_DRV_USBD_EPOUT0, &transfer);
    if (ret == NRF_SUCCESS)
    {
        app_usbd_core_setup_data_handler_desc_t desc = {
            .handler   = audio_req_out_data_cb,
            .p_context = (void *)p_audio
        };

        ret = app_usbd_core_setup_data_handler_set(NRF_DRV_USBD_EPOUT0, &desc);
    }
    CRITICAL_REGION_EXIT();

    return ret;
}
예제 #16
0
void static_generate(STATICSTATE statics, audio_fifo_t *inputfifo, audio_fifo_t *outputfifo, bool comped) {
	staticstate_private *state = (staticstate_private *)statics;

	audio_fifo_t *af = outputfifo;
	int i, o, x;

	audio_fifo_data_t *infd = audio_get(inputfifo);
	if (infd == NULL)
 	{
		printf("static input ready.\n");
		return;
	}
	// printf("Generating noise; samples=%d, channels=%d\n", infd->nsamples, infd->channels);
	audio_fifo_data_t *afd = audio_data_create(infd->nsamples, infd->channels);

	o = state->offset;
	for(i=0; i<afd->nsamples * afd->channels; i+=2) {
		x = infd->samples[i];
		if (comped) {
			x -= (x * state->vol) / 100000;
			x += (state->buf[o] * state->vol) / 60000;
		}
		else {
			x += (state->buf[o] * state->vol) / 32768;
		}
		if (x<-32767) x=-32767;
		if (x>32767) x=32767;
		afd->samples[i] = x;
		afd->samples[i+1] = x;
		o ++;
		if (o > state->length) o=0;
		if (state->vol < state->targetvol) state->vol++;
		if (state->vol > state->targetvol) state->vol--;
	}
	state->offset = o;

	free(infd);

	audio_fifo_queue(outputfifo, afd);
}
예제 #17
0
/**
 * @brief Select interface.
 *
 * @param[in,out] p_inst    Instance of the class.
 * @param[in]     iface_idx Index of the interface inside class structure.
 * @param[in]     alternate Alternate setting that should be selected.
 */
static ret_code_t iface_select(
    app_usbd_class_inst_t const * const p_inst,
    uint8_t                             iface_idx,
    uint8_t                             alternate)
{
    app_usbd_class_iface_conf_t const * p_iface = app_usbd_class_iface_get(p_inst, iface_idx);
    /* Simple check if this is data interface */
    uint8_t const ep_count = app_usbd_class_iface_ep_count_get(p_iface);

    if (ep_count > 0)
    {
        if (alternate > 1)
        {
            return NRF_ERROR_INVALID_PARAM;
        }
        app_usbd_audio_t const * p_audio     = audio_get(p_inst);
        app_usbd_audio_ctx_t   * p_audio_ctx = audio_ctx_get(p_audio);
        p_audio_ctx->streaming = (alternate != 0);

        uint8_t i;

        for (i = 0; i < ep_count; ++i)
        {
            nrf_drv_usbd_ep_t ep_addr =
                app_usbd_class_ep_address_get(app_usbd_class_iface_ep_get(p_iface, i));
            if (alternate)
            {
                app_usbd_ep_enable(ep_addr);
            }
            else
            {
                app_usbd_ep_disable(ep_addr);
            }
        }
        return NRF_SUCCESS;
    }
    return NRF_ERROR_NOT_SUPPORTED;
}
예제 #18
0
static void* audio_start(void *aux)
{
    audio_fifo_t *af = aux;
    audio_fifo_data_t *afd;
    ALCdevice *device = NULL;
    ALCcontext *context = NULL;
    ALuint buffers[NUM_BUFFERS];
    ALint processed;
    ALenum error;
    ALint rate;
    ALint channels;
    
    device = alcOpenDevice(NULL); /* Use the default device */
    if (!device) error_exit("failed to open device");
    context = alcCreateContext(device, NULL);
    alcMakeContextCurrent(context);
    alListenerf(AL_GAIN, 1.0f);
    alDistanceModel(AL_NONE);
    alGenBuffers((ALsizei)NUM_BUFFERS, buffers);
    alGenSources(1, &source);

    /* First prebuffer some audio */
    queue_buffer(source, af, buffers[0]);
    queue_buffer(source, af, buffers[1]);
    queue_buffer(source, af, buffers[2]);
    for (;;) {

        alSourcePlay(source);
        for (;;) {
            /* Wait for some audio to play */
            alGetSourcei(source, AL_BUFFERS_PROCESSED, &processed);
            if (processed <= 0)
            {
                usleep(200);
                continue;
            }

            /* Remove old audio from the queue.. */
            ALuint buffer;
            
            alSourceUnqueueBuffers(source, 1, &buffer);

            /* and queue some more audio */
            afd = audio_get(af);

            alGetBufferi(buffer, AL_FREQUENCY, &rate);
            alGetBufferi(buffer, AL_CHANNELS, &channels);
            if (afd->rate != rate || afd->channels != channels) 
            {
                log_debug("openal","audio_start","rate or channel count changed, resetting");
                break;
            }

            alBufferData(buffer,
                afd->channels == 1 ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16,
                afd->samples,
                afd->nsamples * afd->channels * sizeof(int16_t),
                afd->rate);

            free(afd);

            ALenum error = alGetError();
            if (error != AL_NO_ERROR)
            {
                log_error("openal","audio_start","Error buffering: %s", alGetString(error));
                return NULL;
            }

            alSourceQueueBuffers(source, 1, &buffer);

            error = alGetError();
            if (alGetError() != AL_NO_ERROR)
            {
                log_error("openal","audio_start","Error queing buffering: %s", alGetString(error));
                return NULL;
            }


            alGetSourcei(source, AL_SOURCE_STATE, &processed);
            if (processed != AL_PLAYING)
            {
                // Resume playing
                alSourcePlay(source);
            }

            if ((error = alcGetError(device)) != AL_NO_ERROR) {
                log_error("openal","audio_start","Error queing buffering: %s", alGetString(error));
                exit(1);
            }
            
        }

        /* Format or rate changed, so we need to reset all buffers */
        alSourcei(source, AL_BUFFER, 0);
        alSourceStop(source);

        /* Make sure we don't lose the audio packet that caused the change */
        alBufferData(buffers[0],
                     afd->channels == 1 ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16,
                     afd->samples,
                     afd->nsamples * afd->channels * sizeof(short),
                     afd->rate);

        free(afd);

        alSourceQueueBuffers(source, 1, &buffers[0]);
        queue_buffer(source, af, buffers[1]);
        queue_buffer(source, af, buffers[2]);
      
    }
}
예제 #19
0
static bool audio_feed_descriptors(app_usbd_class_descriptor_ctx_t * p_ctx,
                                   app_usbd_class_inst_t const     * p_inst,
                                   uint8_t                         * p_buff,
                                   size_t                            max_size)
{
    static uint8_t ifaces   = 0;
    ifaces = app_usbd_class_iface_count_get(p_inst);
    ASSERT(ifaces == 2);
    app_usbd_audio_t const * p_audio = audio_get(p_inst);

    APP_USBD_CLASS_DESCRIPTOR_BEGIN(p_ctx, p_buff, max_size);

    /* CONTROL INTERFACE DESCRIPTOR */
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x09); // bLength
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_DESCRIPTOR_INTERFACE); // bDescriptorType = Interface

    static app_usbd_class_iface_conf_t const * p_cur_iface = NULL;
    p_cur_iface = app_usbd_class_iface_get(p_inst, 0);

    APP_USBD_CLASS_DESCRIPTOR_WRITE(app_usbd_class_iface_number_get(p_cur_iface)); // bInterfaceNumber
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x00); // bAlternateSetting
    APP_USBD_CLASS_DESCRIPTOR_WRITE(app_usbd_class_iface_ep_count_get(p_cur_iface)); // bNumEndpoints
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_CLASS); // bInterfaceClass = Audio
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_SUBCLASS_AUDIOCONTROL); // bInterfaceSubclass (Audio Control)
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_CLASS_PROTOCOL_UNDEFINED); // bInterfaceProtocol
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x00); // iInterface

    /* HEADER INTERFACE */
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x09); // bLength
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_DESCRIPTOR_INTERFACE); // bDescriptorType = Audio Interfaces
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_AC_IFACE_SUBTYPE_HEADER); // bDescriptorSubtype = Header
    APP_USBD_CLASS_DESCRIPTOR_WRITE(LSB_16(0x0100)); // bcdADC LSB
    APP_USBD_CLASS_DESCRIPTOR_WRITE(MSB_16(0x0100)); // bcdADC MSB

    static uint16_t header_desc_len = 0;
    header_desc_len = 9 + audio_get_feature_descriptor_size(p_inst) +
                      audio_get_input_descriptor_size(p_inst) + audio_get_output_descriptor_size(
        p_inst);
    APP_USBD_CLASS_DESCRIPTOR_WRITE(LSB_16(header_desc_len)); // wTotalLength LSB
    APP_USBD_CLASS_DESCRIPTOR_WRITE(MSB_16(header_desc_len)); // wTotalLength MSB
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x01);                    // bInCollection
    APP_USBD_CLASS_DESCRIPTOR_WRITE(audio_get_control_interface_number(p_inst) + 1); // baInterfaceNr(1)

    /* INPUT TERMINAL DESCRIPTOR */
    static uint32_t cur_byte        = 0;
    static size_t   input_desc_size = 0;
    input_desc_size = audio_get_input_descriptor_size(p_inst);

    for (cur_byte = 0; cur_byte < input_desc_size; cur_byte++)
    {
        APP_USBD_CLASS_DESCRIPTOR_WRITE(audio_get_input_descriptor_data(p_inst, cur_byte));
    }

    /* FEATURE UNIT DESCRIPTOR */
    static size_t feature_desc_size = 0;
    feature_desc_size = audio_get_feature_descriptor_size(p_inst);

    for (cur_byte = 0; cur_byte < feature_desc_size; cur_byte++)
    {
        APP_USBD_CLASS_DESCRIPTOR_WRITE(audio_get_feature_descriptor_data(p_inst, cur_byte));
    }

    /* OUTPUT TERMINAL DESCRIPTOR */
    static size_t output_desc_size = 0;
    output_desc_size = audio_get_output_descriptor_size(p_inst);

    for (cur_byte = 0; cur_byte < output_desc_size; cur_byte++)
    {
        APP_USBD_CLASS_DESCRIPTOR_WRITE(audio_get_output_descriptor_data(p_inst, cur_byte));
    }

    p_cur_iface++;

    /* STREAM INTERFACE DESCRIPTOR ALT 0 */
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x09); // bLength
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_DESCRIPTOR_INTERFACE); // bDescriptorType = Interface
    APP_USBD_CLASS_DESCRIPTOR_WRITE(audio_get_control_interface_number(p_inst) + 1); // bInterfaceNumber
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x00); // bAlternateSetting
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x00); // bNumEndpoints
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_CLASS); // bInterfaceClass = Audio
    APP_USBD_CLASS_DESCRIPTOR_WRITE(p_audio->specific.inst.type_streaming); // bInterfaceSubclass (Audio Control)
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_CLASS_PROTOCOL_UNDEFINED); // bInterfaceProtocol
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x00); // iInterface

    /* STREAM INTERFACE DESCRIPTOR ALT 1 */
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x09); // bLength
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_DESCRIPTOR_INTERFACE); // bDescriptorType = Interface
    APP_USBD_CLASS_DESCRIPTOR_WRITE(audio_get_control_interface_number(p_inst) + 1); // bInterfaceNumber
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x01); // bAlternateSetting
    APP_USBD_CLASS_DESCRIPTOR_WRITE(app_usbd_class_iface_ep_count_get(p_cur_iface)); // bNumEndpoints
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_CLASS); // bInterfaceClass = Audio
    APP_USBD_CLASS_DESCRIPTOR_WRITE(p_audio->specific.inst.type_streaming); // bInterfaceSubclass (Audio Control)
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_CLASS_PROTOCOL_UNDEFINED); // bInterfaceProtocol
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x00); // iInterface

    /* AudioStreaming GENERAL DESCRIPTOR */
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x07); // bLength
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_DESCRIPTOR_INTERFACE); // bDescriptorType = Audio Interface
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_AS_IFACE_SUBTYPE_GENERAL); // bDescriptorSubtype = General
    APP_USBD_CLASS_DESCRIPTOR_WRITE(audio_get_control_interface_number(p_inst) + 1); // bTerminalLink
    APP_USBD_CLASS_DESCRIPTOR_WRITE(p_audio->specific.inst.delay); // bDelay
    APP_USBD_CLASS_DESCRIPTOR_WRITE(LSB_16(p_audio->specific.inst.format)); // wFormatTag LSB
    APP_USBD_CLASS_DESCRIPTOR_WRITE(MSB_16(p_audio->specific.inst.format)); // wFormatTag MSB

    /* FORMAT DESCRIPTOR */
    static size_t format_desc_size = 0;
    format_desc_size = audio_get_format_descriptor_size(p_inst);

    for (cur_byte = 0; cur_byte < format_desc_size; cur_byte++)
    {
        APP_USBD_CLASS_DESCRIPTOR_WRITE(audio_get_format_descriptor_data(p_inst, cur_byte));
    }

    /* ENDPOINT GENERAL DESCRIPTOR */
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x07); // bLength
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_DESCRIPTOR_ENDPOINT); // bDescriptorType = Audio Descriptor
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_AUDIO_EP_SUBTYPE_GENERAL);  // bDescriptorSubtype = EP General
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x00); // bmAttributes
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x00); // bLockDelayUnits
    APP_USBD_CLASS_DESCRIPTOR_WRITE(LSB_16(0x0000)); // wLockDelay LSB
    APP_USBD_CLASS_DESCRIPTOR_WRITE(MSB_16(0x0000)); // wLockDelay MSB

    /* ENDPOINT ISO DESCRIPTOR */
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x09); // bLength
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_DESCRIPTOR_ENDPOINT); // bDescriptorType = Endpoint

    static app_usbd_class_ep_conf_t const * p_cur_ep = NULL;
    p_cur_ep = app_usbd_class_iface_ep_get(p_cur_iface, 0);
    APP_USBD_CLASS_DESCRIPTOR_WRITE(app_usbd_class_ep_address_get(p_cur_ep)); // bEndpointAddress
    APP_USBD_CLASS_DESCRIPTOR_WRITE(APP_USBD_DESCRIPTOR_EP_ATTR_TYPE_ISOCHRONOUS); // bmAttributes
    APP_USBD_CLASS_DESCRIPTOR_WRITE(LSB_16(p_audio->specific.inst.ep_size)); // wMaxPacketSize LSB
    APP_USBD_CLASS_DESCRIPTOR_WRITE(MSB_16(p_audio->specific.inst.ep_size)); // wMaxPacketSize MSB
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x01); // bInterval
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x00); // bRefresh
    APP_USBD_CLASS_DESCRIPTOR_WRITE(0x00); // bSynchAddress

    APP_USBD_CLASS_DESCRIPTOR_END();
}
예제 #20
0
파일: get.c 프로젝트: JammyWei/ipc_dm36x
int process_get(request * req)
{
    int bytes_written;
    volatile int bytes_to_write;

    bytes_to_write = req->filesize - req->filepos;
    if (bytes_to_write > SOCKETBUF_SIZE)
        bytes_to_write = SOCKETBUF_SIZE;


    if (sigsetjmp(env, 1) == 0) {
        handle_sigbus = 1;
#ifdef SERVER_SSL
	if(req->ssl == NULL){
#endif /*SERVER_SSL*/
        bytes_written = write(req->fd, req->data_mem + req->filepos,
                              bytes_to_write);
#ifdef SERVER_SSL
	}else{
		bytes_written = SSL_write(req->ssl, req->data_mem + req->filepos, bytes_to_write);
#if 0
		printf("SSL_write\n");
#endif /*0*/
	}
#endif /*SERVER_SSL*/
        handle_sigbus = 0;
        /* OK, SIGBUS **after** this point is very bad! */
    } else {
        /* sigbus! */
        log_error_doc(req);
        /* sending an error here is inappropriate
         * if we are here, the file is mmapped, and thus,
         * a content-length has been sent. If we send fewer bytes
         * the client knows there has been a problem.
         * We run the risk of accidentally sending the right number
         * of bytes (or a few too many) and the client
         * won't be the wiser.
         */
        req->status = DEAD;
        fprintf(stderr, "%sGot SIGBUS in write(2)!\n", get_commonlog_time());
        return 0;
    }

    if (bytes_written < 0) {
        if (errno == EWOULDBLOCK || errno == EAGAIN)
            return -1;
        /* request blocked at the pipe level, but keep going */
        else {
            if (errno != EPIPE) {
                log_error_doc(req);
                /* Can generate lots of log entries, */
                perror("write");
                /* OK to disable if your logs get too big */
            }
            req->status = DEAD;
            return 0;
        }
    }
    req->filepos += bytes_written;
	req->busy_flag = BUSY_FLAG_AUDIO|BUSY_FLAG_VIDEO;
    if (req->filepos == req->filesize) { /* EOF */

#ifdef DAVINCI_IPCAM
        if (req->http_stream == URI_STREAM_MJPEG) {
#if 1
#if 0
            while (audio_get(req) > 0);
            if (req->audio_length >= AUDIO_SEND_SIZE) {
                audio_send(req);
                return 1;
            }
#else
            if (audio_get(req, FMT_MJPEG) > 0)
			return 1;
#endif
#else
		req->busy_flag &= ~BUSY_FLAG_AUDIO;
#endif
            if (req->serial_lock) {
                GetAVData(AV_OP_UNLOCK_MJPEG, req->serial_lock, NULL);
                req->serial_lock = 0;
            }
            GetAVData(AV_OP_GET_MJPEG_SERIAL, -1, &req->av_data);
            if (req->av_data.serial < req->serial_book) {
                req->busy_flag &= ~BUSY_FLAG_VIDEO;
                return 1;
            }
            GetAVData(AV_OP_LOCK_MJPEG, req->av_data.serial, &req->av_data );
            req->data_mem = req->av_data.ptr;
            req->filesize = req->av_data.size+16;
            req->filepos = 0;
            req->serial_lock = req->av_data.serial;
            req->serial_book = req->av_data.serial+1;
            reset_output_buffer(req);
            req_write(req, "\r\n");
            print_mjpeg_headers(req);
            return 1;
        }
        if (req->http_stream == URI_STREAM_MPEG4 || req->http_stream == URI_STREAM_AVC) {
            int ret;
#if 1
#if 0
		while (audio_get(req) > 0);
		if (req->audio_length > AUDIO_SEND_SIZE) {
			req->busy_flag |= BUSY_FLAG_AUDIO;
			audio_send(req);
      		      return 1;
		}
#else
            if (audio_get(req, FMT_MPEG4) > 0)
			return 1;
#endif
#else
		req->busy_flag &= ~BUSY_FLAG_AUDIO;
#endif
            ret = GetAVData(AV_OP_LOCK_MP4, req->serial_book, &req->av_data);
            if (ret == RET_SUCCESS) {
                GetAVData(AV_OP_UNLOCK_MP4, req->serial_lock, NULL);
                req->data_mem = req->av_data.ptr;
                req->filesize = req->av_data.size+16;
                req->filepos = 0;

                req->serial_lock = req->av_data.serial;
                req->serial_book = req->av_data.serial+1;

                reset_output_buffer(req);
                req_write(req, "\r\n");
				if (req->http_stream == URI_STREAM_AVC)
                { print_avc_headers(req); }
				else
				{ print_mpeg4_headers(req); }
                return 1;
            }
            else if (ret == RET_NO_VALID_DATA) {
                req->busy_flag &= ~BUSY_FLAG_VIDEO;
                return 1;
            }
            else {
                GetAVData(AV_OP_GET_MPEG4_SERIAL, -1, &req->av_data );
                req->serial_book = req->av_data.serial;
                dbg("lock error ret=%d\n", ret);
                return 1;
            }
        }
        if (req->http_stream == URI_STREAM_MPEG4CIF || req->http_stream == URI_STREAM_AVCCIF) {
            int ret;
#if 1
#if 0
		while (audio_get(req) > 0);
		if (req->audio_length > AUDIO_SEND_SIZE) {
			audio_send(req);
      		      return 1;
		}
#else
            if (audio_get(req, FMT_MPEG4_EXT) > 0)
			return 1;
#endif
#else
		req->busy_flag &= ~BUSY_FLAG_AUDIO;
#endif
            ret = GetAVData(AV_OP_LOCK_MP4_CIF, req->serial_book, &req->av_data);
            if (ret == RET_SUCCESS) {
                GetAVData(AV_OP_UNLOCK_MP4_CIF, req->serial_lock, NULL);
                req->data_mem = req->av_data.ptr;
                req->filesize = req->av_data.size+16;
                req->filepos = 0;

                req->serial_lock = req->av_data.serial;
                req->serial_book = req->av_data.serial+1;

                reset_output_buffer(req);
                req_write(req, "\r\n");
				if (req->http_stream == URI_STREAM_AVCCIF)
                { print_avc_headers(req); }
				else
				{ print_mpeg4_headers(req); }
                return 1;
            }
            else if (ret == RET_NO_VALID_DATA) {
                req->busy_flag &= ~BUSY_FLAG_VIDEO;
                return 1;
            }
            else {
                GetAVData(AV_OP_GET_MPEG4_CIF_SERIAL, -1, &req->av_data );
                req->serial_book = req->av_data.serial;
                dbg("lock error ret=%d\n", ret);
                return 1;
            }
        }
#endif  // DAVINCI_IPCAM
        return 0;
    } else
        return 1;               /* more to do */
}
예제 #21
0
static void* audio_start(void *aux)
{
	audio_fifo_t *af = aux;
    audio_fifo_data_t *afd;
	unsigned int frame = 0;
	ALCdevice *device = NULL;
	ALCcontext *context = NULL;
	ALuint buffers[NUM_BUFFERS];
	ALuint source;
	ALint processed;
	ALenum error;
	ALint rate;
	ALint channels;
	device = alcOpenDevice(NULL); /* Use the default device */
	if (!device) error_exit("failed to open device");
	context = alcCreateContext(device, NULL);
	alcMakeContextCurrent(context);
	alListenerf(AL_GAIN, 1.0f);
	alDistanceModel(AL_NONE);
	alGenBuffers((ALsizei)NUM_BUFFERS, buffers);
	alGenSources(1, &source);

	/* First prebuffer some audio */
	queue_buffer(source, af, buffers[0]);
	queue_buffer(source, af, buffers[1]);
	queue_buffer(source, af, buffers[2]);
	for (;;) {
      
		alSourcePlay(source);
		for (;;) {
			/* Wait for some audio to play */
			do {
				alGetSourcei(source, AL_BUFFERS_PROCESSED, &processed);
				usleep(100);
			} while (!processed);
			
			/* Remove old audio from the queue.. */
			alSourceUnqueueBuffers(source, 1, &buffers[frame % 3]);
			
			/* and queue some more audio */
			afd = audio_get(af);
			alGetBufferi(buffers[frame % 3], AL_FREQUENCY, &rate);
			alGetBufferi(buffers[frame % 3], AL_CHANNELS, &channels);
			if (afd->rate != rate || afd->channels != channels) {
				printf("rate or channel count changed, resetting\n");
                                free(afd);
				break;
			}
			alBufferData(buffers[frame % 3], 
						 afd->channels == 1 ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16, 
						 afd->samples, 
						 afd->nsamples * afd->channels * sizeof(short), 
						 afd->rate);
                        free(afd);
			alSourceQueueBuffers(source, 1, &buffers[frame % 3]);
			
			if ((error = alcGetError(device)) != AL_NO_ERROR) {
				printf("openal al error: %d\n", error);
				exit(1);
			}
			frame++;
		}
		/* Format or rate changed, so we need to reset all buffers */
		alSourcei(source, AL_BUFFER, 0);
		alSourceStop(source);

		/* Make sure we don't lose the audio packet that caused the change */
		alBufferData(buffers[0], 
					 afd->channels == 1 ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16, 
					 afd->samples, 
					 afd->nsamples * afd->channels * sizeof(short), 
					 afd->rate);

		alSourceQueueBuffers(source, 1, &buffers[0]);
		queue_buffer(source, af, buffers[1]);
		queue_buffer(source, af, buffers[2]);
		frame = 0;
	}
}
예제 #22
0
/**
 * @brief @ref app_usbd_class_methods_t::event_handler
 */
static ret_code_t audio_event_handler(
    app_usbd_class_inst_t const  * p_inst,
    app_usbd_complex_evt_t const * p_event)
{
    ASSERT(p_inst != NULL);
    ASSERT(p_event != NULL);

    ret_code_t ret = NRF_SUCCESS;

    switch (p_event->app_evt.type)
    {
        case APP_USBD_EVT_DRV_RESET:
            break;

        case APP_USBD_EVT_DRV_SETUP:
            ret = setup_event_handler(p_inst, (app_usbd_setup_evt_t const *)p_event);
            break;

        case APP_USBD_EVT_DRV_EPTRANSFER:
            if (NRF_USBD_EPIN_CHECK(p_event->drv_evt.data.eptransfer.ep))
            {
                ret = endpoint_in_event_handler(p_inst);
            }
            else
            {
                ret = endpoint_out_event_handler(p_inst);
            }
            break;

        case APP_USBD_EVT_DRV_SUSPEND:
            break;

        case APP_USBD_EVT_DRV_RESUME:
            break;

        case APP_USBD_EVT_INST_APPEND:
        {
            app_usbd_audio_t const * p_audio     = audio_get(p_inst);
            app_usbd_audio_ctx_t   * p_audio_ctx = audio_ctx_get(p_audio);
            if(p_audio_ctx->sof_handler != NULL)
            {
                ret = app_usbd_class_sof_interrupt_register(p_inst, p_audio_ctx->sof_handler);
                APP_ERROR_CHECK(ret);
            }
            break;
        }

        case APP_USBD_EVT_INST_REMOVE:
        {
            app_usbd_audio_t const * p_audio     = audio_get(p_inst);
            app_usbd_audio_ctx_t   * p_audio_ctx = audio_ctx_get(p_audio);
            if(p_audio_ctx->sof_handler != NULL)
            {
                ret = app_usbd_class_sof_interrupt_unregister(p_inst);
                APP_ERROR_CHECK(ret);
            }
            break;
        }

        case APP_USBD_EVT_STARTED:
            break;

        case APP_USBD_EVT_STOPPED:
            break;

        default:
            ret = NRF_ERROR_NOT_SUPPORTED;
            break;
    }

    return ret;
}