コード例 #1
0
ファイル: prsound.c プロジェクト: padsof-uam/redes2
int main()
{
	char buf[256];
	register int i;
	FILE *f;

	sampleFormat(PA_SAMPLE_S16BE,2); /* Valor por defecto, no hace falta llamar a esta función */

	if(openRecord("prueba")) puts("error");
	if(openPlay("prueba")) puts("error");

	f=fopen("pruebasonido","w+b");
	puts("Grabando");
	for(i=0; i < 10000; ++i)
	{
		recordSound(buf,160);
		fwrite(buf,1,160,f);
	}
	fclose(f);
	closeRecord();

	f=fopen("pruebasonido","rb");
	puts("Reproduciendo");
	for(i=0; i < 10000; ++i)
	{
		fread(buf,1,160,f);
		playSound(buf,160);
	}
	fclose(f);
	closePlay();
}
コード例 #2
0
status_t AudioRecord::start()
{
    status_t ret = NO_ERROR;
    sp<ClientRecordThread> t = mClientRecordThread;

    LOGV("start");

    if (t != 0) {
        if (t->exitPending()) {
            if (t->requestExitAndWait() == WOULD_BLOCK) {
                LOGE("AudioRecord::start called from thread");
                return WOULD_BLOCK;
            }
        }
        t->mLock.lock();
    }

    if (android_atomic_or(1, &mActive) == 0) {
        ret = mAudioRecord->start();
        if (ret == DEAD_OBJECT) {
            LOGV("start() dead IAudioRecord: creating a new one");
            ret = openRecord(mCblk->sampleRate, mFormat, mChannelCount,
                             mFrameCount, mFlags, getInput());
            if (ret == NO_ERROR) {
                ret = mAudioRecord->start();
            }
        }
        if (ret == NO_ERROR) {
            mNewPosition = mCblk->user + mUpdatePeriod;
            mCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
            mCblk->waitTimeMs = 0;
            if (t != 0) {
                t->run("ClientRecordThread", THREAD_PRIORITY_AUDIO_CLIENT);
            } else {
                setpriority(PRIO_PROCESS, 0, THREAD_PRIORITY_AUDIO_CLIENT);
            }
        } else {
            LOGV("start() failed");
            android_atomic_and(~1, &mActive);
        }
    }

    if (t != 0) {
        t->mLock.unlock();
    }

    return ret;
}
コード例 #3
0
ファイル: FMRadioSource.cpp プロジェクト: Alberto97/STE_av
FMRadioSource::FMRadioSource()
    : mInitCheck(NO_INIT),
      mStarted(false),
      mSessionId(AudioSystem::newAudioSessionId()) {

    // get FM Radio RX input
    audio_in_acoustics_t flags = (audio_in_acoustics_t)
                    (AUDIO_IN_ACOUSTICS_AGC_DISABLE |
                     AUDIO_IN_ACOUSTICS_NS_DISABLE  |
                     AUDIO_IN_ACOUSTICS_TX_DISABLE );

    audio_io_handle_t input = AudioSystem::getInput(AUDIO_SOURCE_FM_RADIO_RX,
                                                    kSampleRate,
                                                    kAudioFormat,
                                                    kChannelMask,
                                                    (audio_in_acoustics_t)flags,
                                                    mSessionId);
    if (input == 0) {
        ALOGE("Could not get audio input for FM Radio source");
        mInitCheck = UNKNOWN_ERROR;
        return;
    }

    // get frame count
    int frameCount = 0;
    status_t status = AudioRecord::getMinFrameCount(&frameCount, kSampleRate,
                                                    kAudioFormat, popcount(kChannelMask));
    if (status != NO_ERROR) {
        mInitCheck = status;
        return;
    }

    // create the IAudioRecord
    status = openRecord(frameCount, input);
    if (status != NO_ERROR) {
        mInitCheck = status;
        return;
    }

    AudioSystem::acquireAudioSessionId(mSessionId);

    mInitCheck = OK;
    return;
}
コード例 #4
0
status_t BnCameraRecordService::onTransact(uint32_t code, const Parcel& data,
            Parcel* reply, uint32_t flags)
{
    REPORT_FUNCTION();

    switch (code) {
        case INIT_RECORD: {
            CHECK_INTERFACE(ICameraRecordService, data, reply);
            uint32_t sampleRate = data.readInt32();
            audio_format_t format = (audio_format_t) data.readInt32();
            audio_channel_mask_t channelMask = data.readInt32();
            reply->writeInt32(initRecord(sampleRate, format, channelMask));
            return NO_ERROR;
        } break;
        case OPEN_RECORD: {
            CHECK_INTERFACE(ICameraRecordService, data, reply);
            uint32_t sampleRate = data.readInt32();
            audio_format_t format = (audio_format_t) data.readInt32();
            audio_channel_mask_t channelMask = data.readInt32();
            size_t frameCount = data.readInt32();
            pid_t tid = (pid_t) data.readInt32();
            int sessionId = data.readInt32();
            status_t status;
            sp<IAudioRecord> record = openRecord(sampleRate, format, channelMask,
                frameCount, tid, &sessionId, &status);
            LOG_ALWAYS_FATAL_IF((record != 0) != (status == NO_ERROR));

            reply->writeInt32(sessionId);
            reply->writeInt32(status);
            reply->writeStrongBinder(record->asBinder());
            return NO_ERROR;
        } break;
        default:
            return BBinder::onTransact(code, data, reply, flags);
    }

    return NO_ERROR;
}
コード例 #5
0
status_t AudioRecord::set(
    int inputSource,
    uint32_t sampleRate,
    int format,
    uint32_t channels,
    int frameCount,
    uint32_t flags,
    callback_t cbf,
    void* user,
    int notificationFrames,
    bool threadCanCallJava)
{

    LOGV("set(): sampleRate %d, channels %d, frameCount %d",sampleRate, channels, frameCount);
    if (mAudioRecord != 0) {
        return INVALID_OPERATION;
    }

    if (inputSource == AUDIO_SOURCE_DEFAULT) {
        inputSource = AUDIO_SOURCE_MIC;
    }

    if (sampleRate == 0) {
        sampleRate = DEFAULT_SAMPLE_RATE;
    }
    // these below should probably come from the audioFlinger too...
    if (format == 0) {
        format = AudioSystem::PCM_16_BIT;
    }
    // validate parameters
    if (!AudioSystem::isValidFormat(format)) {
        LOGE("Invalid format");
        return BAD_VALUE;
    }
    //hengai Fix:Record channels error
    if(channels == 1)       channels = 0x04;
    else if(channels == 2)  channels = 0x0c;

    if (!AudioSystem::isInputChannel(channels)) {
        return BAD_VALUE;
    }
    int channelCount = AudioSystem::popCount(channels);

    audio_io_handle_t input = AudioSystem::getInput(inputSource,
                              sampleRate, format, channels, (AudioSystem::audio_in_acoustics)flags);
    if (input == 0) {
        LOGE("Could not get audio input for record source %d", inputSource);
        return BAD_VALUE;
    }

    // validate framecount
    size_t inputBuffSizeInBytes = -1;
    if (AudioSystem::getInputBufferSize(sampleRate, format, channelCount, &inputBuffSizeInBytes)
            != NO_ERROR) {
        LOGE("AudioSystem could not query the input buffer size.");
        return NO_INIT;
    }

    if (inputBuffSizeInBytes == 0) {
        LOGE("Recording parameters are not supported: sampleRate %d, channelCount %d, format %d",
             sampleRate, channelCount, format);
        return BAD_VALUE;
    }

    int frameSizeInBytes = channelCount * (format == AudioSystem::PCM_16_BIT ? 2 : 1);
    if (AudioSystem::isLinearPCM(format)) {
        frameSizeInBytes = channelCount * (format == AudioSystem::PCM_16_BIT ? sizeof(int16_t) : sizeof(int8_t));
    } else {
        frameSizeInBytes = sizeof(int8_t);
    }


    // We use 2* size of input buffer for ping pong use of record buffer.
    int minFrameCount = 2 * inputBuffSizeInBytes / frameSizeInBytes;
    LOGV("AudioRecord::set() minFrameCount = %d", minFrameCount);

    if (frameCount == 0) {
        frameCount = minFrameCount;
    } else if (frameCount < minFrameCount) {
        return BAD_VALUE;
    }

    if (notificationFrames == 0) {
        notificationFrames = frameCount/2;
    }

    // create the IAudioRecord
    status_t status = openRecord(sampleRate, format, channelCount,
                                 frameCount, flags, input);

    if (status != NO_ERROR) {
        return status;
    }

    if (cbf != 0) {
        mClientRecordThread = new ClientRecordThread(*this, threadCanCallJava);
        if (mClientRecordThread == 0) {
            return NO_INIT;
        }
    }

    mStatus = NO_ERROR;

    mFormat = format;
    // Update buffer size in case it has been limited by AudioFlinger during track creation
    mFrameCount = mCblk->frameCount;
    mChannelCount = (uint8_t)channelCount;
    mChannels = channels;
    mActive = 0;
    mCbf = cbf;
    mNotificationFrames = notificationFrames;
    mRemainingFrames = notificationFrames;
    mUserData = user;
    // TODO: add audio hardware input latency here
    mLatency = (1000*mFrameCount) / sampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    mInputSource = (uint8_t)inputSource;
    mFlags = flags;
    mInput = input;

    return NO_ERROR;
}
コード例 #6
0
status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
{
    int active;
    status_t result;
    audio_track_cblk_t* cblk = mCblk;
    uint32_t framesReq = audioBuffer->frameCount;
    uint32_t waitTimeMs = (waitCount < 0) ? cblk->bufferTimeoutMs : WAIT_PERIOD_MS;

    audioBuffer->frameCount  = 0;
    audioBuffer->size        = 0;

    uint32_t framesReady = cblk->framesReady();

    if (framesReady == 0) {
        cblk->lock.lock();
        goto start_loop_here;
        while (framesReady == 0) {
            active = mActive;
            if (UNLIKELY(!active)) {
                cblk->lock.unlock();
                return NO_MORE_BUFFERS;
            }
            if (UNLIKELY(!waitCount)) {
                cblk->lock.unlock();
                return WOULD_BLOCK;
            }
            result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
            if (__builtin_expect(result!=NO_ERROR, false)) {
                cblk->waitTimeMs += waitTimeMs;
                if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {
                    LOGW(   "obtainBuffer timed out (is the CPU pegged?) "
                            "user=%08x, server=%08x", cblk->user, cblk->server);
                    cblk->lock.unlock();
                    result = mAudioRecord->start();
                    if (result == DEAD_OBJECT) {
                        LOGW("obtainBuffer() dead IAudioRecord: creating a new one");
                        result = openRecord(cblk->sampleRate, mFormat, mChannelCount,
                                            mFrameCount, mFlags, getInput());
                        if (result == NO_ERROR) {
                            cblk = mCblk;
                            mAudioRecord->start();
                        }
                    }
                    cblk->lock.lock();
                    cblk->waitTimeMs = 0;
                }
                if (--waitCount == 0) {
                    cblk->lock.unlock();
                    return TIMED_OUT;
                }
            }
            // read the server count again
start_loop_here:
            framesReady = cblk->framesReady();
        }
        cblk->lock.unlock();
    }

    cblk->waitTimeMs = 0;

    if (framesReq > framesReady) {
        framesReq = framesReady;
    }

    uint32_t u = cblk->user;
    uint32_t bufferEnd = cblk->userBase + cblk->frameCount;

    if (u + framesReq > bufferEnd) {
        framesReq = bufferEnd - u;
    }

    audioBuffer->flags       = 0;
    audioBuffer->channelCount= mChannelCount;
    audioBuffer->format      = mFormat;
    audioBuffer->frameCount  = framesReq;
    audioBuffer->size        = framesReq*cblk->frameSize;
    audioBuffer->raw         = (int8_t*)cblk->buffer(u);
    active = mActive;
    return active ? status_t(NO_ERROR) : status_t(STOPPED);
}