/* Put bytes into the input buffer of the sound alteration object
   lenBytes bytes will be read from buffer into the sound alteration object
   buffer is not guaranteed not to change after this function is called,
   so data should be copied from it */
jboolean Java_org_vinuxproject_sonic_Sonic_putBytesNative(
    JNIEnv *env,
    jobject thiz,
    jlong sonicID,
    jbyteArray buffer,
    jint lenBytes)
{
    sonicInst inst = getInst(sonicID);
    sonicStream stream = inst->stream;
    int samples = lenBytes/(sizeof(short)*sonicGetNumChannels(stream));
    int remainingBytes = lenBytes - samples*sizeof(short)*sonicGetNumChannels(stream);

if(remainingBytes != 0) {
    LOGV("Remaining bytes == %d!!!", remainingBytes);
}
    if(lenBytes > inst->byteBufSize*sizeof(short)) {
        inst->byteBufSize = lenBytes*(2/sizeof(short));
        inst->byteBuf = (short *)realloc(inst->byteBuf, inst->byteBufSize*sizeof(short));
        if(inst->byteBuf == NULL) {
            return 0;
        }
    }
    LOGV("Writing %d bytes to stream", lenBytes);
    (*env)->GetByteArrayRegion(env, buffer, 0, lenBytes, (jbyte *)inst->byteBuf);
    return sonicWriteShortToStream(stream, inst->byteBuf, samples);
}
示例#2
0
 void speech_stream::flush_input_buffer()
 {
   if(!sonic_stream)
     return;
   if(samples.empty())
     return;
   if(!sonicWriteShortToStream(sonic_stream,&samples[0],samples.size()))
     throw std::bad_alloc();
   samples.clear();
 }
示例#3
0
文件: main.c 项目: jaredbracken/sonic
/* Run sonic. */
static void runSonic(
    waveFile inFile,
    waveFile outFile,
    float speed,
    float pitch,
    float rate,
    float volume,
    int emulateChordPitch,
    int quality,
    int sampleRate,
    int numChannels)
{
    sonicStream stream = sonicCreateStream(sampleRate, numChannels);
    short inBuffer[BUFFER_SIZE], outBuffer[BUFFER_SIZE];
    int samplesRead, samplesWritten;

    sonicSetSpeed(stream, speed);
    sonicSetPitch(stream, pitch);
    sonicSetRate(stream, rate);
    sonicSetVolume(stream, volume);
    sonicSetChordPitch(stream, emulateChordPitch);
    sonicSetQuality(stream, quality);
    do {
        samplesRead = readFromWaveFile(inFile, inBuffer, BUFFER_SIZE/numChannels);
        if(samplesRead == 0) {
            sonicFlushStream(stream);
        } else {
            sonicWriteShortToStream(stream, inBuffer, samplesRead);
        }
        do {
            samplesWritten = sonicReadShortFromStream(stream, outBuffer,
                BUFFER_SIZE/numChannels);
            if(samplesWritten > 0) {
                writeToWaveFile(outFile, outBuffer, samplesWritten);
            }
        } while(samplesWritten > 0);
    } while(samplesRead > 0);
    sonicDestroyStream(stream);
}
void TimestretchBufferProvider::processFrames(void *dstBuffer, size_t *dstFrames,
        const void *srcBuffer, size_t *srcFrames)
{
    ALOGV("processFrames(%zu %zu)  remaining(%zu)", *dstFrames, *srcFrames, mRemaining);
    // Note dstFrames is the required number of frames.

    // Ensure consumption from src is as expected.
    //TODO: add logic to track "very accurate" consumption related to speed, original sampling
    //rate, actual frames processed.
    const size_t targetSrc = *dstFrames * mPlaybackRate.mSpeed;
    if (*srcFrames < targetSrc) { // limit dst frames to that possible
        *dstFrames = *srcFrames / mPlaybackRate.mSpeed;
    } else if (*srcFrames > targetSrc + 1) {
        *srcFrames = targetSrc + 1;
    }

    if (!mAudioPlaybackRateValid) {
        //fallback mode
        if (*dstFrames > 0) {
            switch(mPlaybackRate.mFallbackMode) {
            case AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT:
                if (*dstFrames <= *srcFrames) {
                      size_t copySize = mFrameSize * *dstFrames;
                      memcpy(dstBuffer, srcBuffer, copySize);
                  } else {
                      // cyclically repeat the source.
                      for (size_t count = 0; count < *dstFrames; count += *srcFrames) {
                          size_t remaining = min(*srcFrames, *dstFrames - count);
                          memcpy((uint8_t*)dstBuffer + mFrameSize * count,
                                  srcBuffer, mFrameSize * remaining);
                      }
                  }
                break;
            case AUDIO_TIMESTRETCH_FALLBACK_DEFAULT:
            case AUDIO_TIMESTRETCH_FALLBACK_MUTE:
                memset(dstBuffer,0, mFrameSize * *dstFrames);
                break;
            case AUDIO_TIMESTRETCH_FALLBACK_FAIL:
            default:
                if(!mFallbackFailErrorShown) {
                    ALOGE("invalid parameters in TimestretchBufferProvider fallbackMode:%d",
                            mPlaybackRate.mFallbackMode);
                    mFallbackFailErrorShown = true;
                }
                break;
            }
        }
    } else {
        switch (mFormat) {
        case AUDIO_FORMAT_PCM_FLOAT:
            if (sonicWriteFloatToStream(mSonicStream, (float*)srcBuffer, *srcFrames) != 1) {
                ALOGE("sonicWriteFloatToStream cannot realloc");
                *srcFrames = 0; // cannot consume all of srcBuffer
            }
            *dstFrames = sonicReadFloatFromStream(mSonicStream, (float*)dstBuffer, *dstFrames);
            break;
        case AUDIO_FORMAT_PCM_16_BIT:
            if (sonicWriteShortToStream(mSonicStream, (short*)srcBuffer, *srcFrames) != 1) {
                ALOGE("sonicWriteShortToStream cannot realloc");
                *srcFrames = 0; // cannot consume all of srcBuffer
            }
            *dstFrames = sonicReadShortFromStream(mSonicStream, (short*)dstBuffer, *dstFrames);
            break;
        default:
            // could also be caught on construction
            LOG_ALWAYS_FATAL("invalid format %#x for TimestretchBufferProvider", mFormat);
        }
    }
}