void WAVAudioFileServerMediaSubsession
::setStreamSourceScale(FramedSource* inputSource, float scale) {
  int iScale = (int)scale;
  WAVAudioFileSource* wavSource;
  if (fBitsPerSample > 8) {
    // "inputSource" is a filter; its input source is the original WAV file source:
    wavSource = (WAVAudioFileSource*)(((FramedFilter*)inputSource)->inputSource());
  } else {
    // "inputSource" is the original WAV file source:
    wavSource = (WAVAudioFileSource*)inputSource;
  }

  wavSource->setScaleFactor(iScale);
}
void WAVAudioFileServerMediaSubsession
::seekStreamSource(FramedSource* inputSource, float seekNPT) {
  WAVAudioFileSource* wavSource;
  if (fBitsPerSample == 16) {
    // "inputSource" is a filter; its input source is the original WAV file source:
    wavSource = (WAVAudioFileSource*)(((FramedFilter*)inputSource)->inputSource());
  } else {
    // "inputSource" is the original WAV file source:
    wavSource = (WAVAudioFileSource*)inputSource;
  }

  unsigned seekSampleNumber = (unsigned)(seekNPT*fSamplingFrequency);
  unsigned seekByteNumber = (seekSampleNumber*fNumChannels*fBitsPerSample)/8;
  
  wavSource->seekToPCMByte(seekByteNumber);
}
void WAVAudioFileServerMediaSubsession
::setStreamSourceDuration(FramedSource* inputSource, double streamDuration, u_int64_t& numBytes) {
  WAVAudioFileSource* wavSource;
  if (fBitsPerSample > 8) {
    // "inputSource" is a filter; its input source is the original WAV file source:
    wavSource = (WAVAudioFileSource*)(((FramedFilter*)inputSource)->inputSource());
  } else {
    // "inputSource" is the original WAV file source:
    wavSource = (WAVAudioFileSource*)inputSource;
  }

  unsigned numDurationSamples = (unsigned)(streamDuration*fSamplingFrequency);
  unsigned numDurationBytes = numDurationSamples*((fNumChannels*fBitsPerSample)/8);
  numBytes = (u_int64_t)numDurationBytes;

  wavSource->limitNumBytesToStream(numDurationBytes);
}
void WAVAudioFileServerMediaSubsession
::seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes) {
  WAVAudioFileSource* wavSource;
  if (fBitsPerSample > 8) {
    // "inputSource" is a filter; its input source is the original WAV file source:
    wavSource = (WAVAudioFileSource*)(((FramedFilter*)inputSource)->inputSource());
  } else {
    // "inputSource" is the original WAV file source:
    wavSource = (WAVAudioFileSource*)inputSource;
  }

  unsigned seekSampleNumber = (unsigned)(seekNPT*fSamplingFrequency);
  unsigned seekByteNumber = seekSampleNumber*((fNumChannels*fBitsPerSample)/8);

  wavSource->seekToPCMByte(seekByteNumber);

  setStreamSourceDuration(inputSource, streamDuration, numBytes);
}
Exemplo n.º 5
0
WAVAudioFileSource*
WAVAudioFileSource::createNew(UsageEnvironment& env, char const* fileName) {
  do {
    FILE* fid = OpenInputFile(env, fileName);
    if (fid == NULL) break;

    WAVAudioFileSource* newSource = new WAVAudioFileSource(env, fid);
    if (newSource != NULL && newSource->bitsPerSample() == 0) {
      // The WAV file header was apparently invalid.
      Medium::close(newSource);
      break;
    }

    newSource->fFileSize = (unsigned)GetFileSize(fileName, fid);

    return newSource;
  } while (0);

  return NULL;
}
FramedSource* WAVAudioFileServerMediaSubsession
::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
  FramedSource* resultSource = NULL;
  do {
    WAVAudioFileSource* wavSource
      = WAVAudioFileSource::createNew(envir(), fFileName);
    if (wavSource == NULL) break;

    // Get attributes of the audio source:

    fAudioFormat = wavSource->getAudioFormat();
    fBitsPerSample = wavSource->bitsPerSample();
    if (fBitsPerSample != 8 && fBitsPerSample !=  16) {
      envir() << "The input file contains " << fBitsPerSample
	      << " bit-per-sample audio, which we don't handle\n";
      break;
    }
    fSamplingFrequency = wavSource->samplingFrequency();
    fNumChannels = wavSource->numChannels();
    unsigned bitsPerSecond
      = fSamplingFrequency*fBitsPerSample*fNumChannels;

    fFileDuration = (float)((8.0*wavSource->numPCMBytes())
      /(fSamplingFrequency*fNumChannels*fBitsPerSample));

    // Add in any filter necessary to transform the data prior to streaming:
    if (fBitsPerSample == 16) {
      // Note that samples in the WAV audio file are in little-endian order.
      if (fConvertToULaw) {
	// Add a filter that converts from raw 16-bit PCM audio
	// to 8-bit u-law audio:
	resultSource
	  = uLawFromPCMAudioSource::createNew(envir(), wavSource, 1/*little-endian*/);
	bitsPerSecond /= 2;
      } else {
	// Add a filter that converts from little-endian to network (big-endian) order: 
	resultSource = EndianSwap16::createNew(envir(), wavSource);
      }
    } else { // fBitsPerSample == 8
      // Don't do any transformation; send the 8-bit PCM data 'as is':
      resultSource = wavSource;
    }

    estBitrate = (bitsPerSecond+500)/1000; // kbps
    return resultSource;
  } while (0);

  // An error occurred:
  Medium::close(resultSource);
  return NULL;
}
Exemplo n.º 7
0
void play() {
  // Open the file as a 'WAV' file:
  WAVAudioFileSource* pcmSource
    = WAVAudioFileSource::createNew(*env, inputFileName);
  if (pcmSource == NULL) {
    *env << "Unable to open file \"" << inputFileName
	 << "\" as a WAV audio file source: "
	 << env->getResultMsg() << "\n";
    exit(1);
  }

  // Get attributes of the audio source:
  unsigned char const bitsPerSample = pcmSource->bitsPerSample();
  if (bitsPerSample != 8 && bitsPerSample !=  16) {
    *env << "The input file contains " << bitsPerSample
	 << " bit-per-sample audio, which we don't handle\n";
    exit(1);
  }
  sessionState.source = pcmSource;
  unsigned const samplingFrequency = pcmSource->samplingFrequency();
  unsigned char const numChannels = pcmSource->numChannels();
  unsigned bitsPerSecond
    = samplingFrequency*bitsPerSample*numChannels;
  *env << "Audio source parameters:\n\t" << samplingFrequency << " Hz, ";
  *env << bitsPerSample << " bits-per-sample, ";
  *env << numChannels << " channels => ";
  *env << bitsPerSecond << " bits-per-second\n";

  // Add in any filter necessary to transform the data prior to streaming.
  // (This is where any audio compression would get added.)
  char const* mimeType;
  unsigned char payloadFormatCode;
  if (bitsPerSample == 16) {
#ifdef CONVERT_TO_ULAW
    // Add a filter that converts from raw 16-bit PCM audio (in little-endian order)
    // to 8-bit u-law audio:
    sessionState.source
      = uLawFromPCMAudioSource::createNew(*env, pcmSource, 1/*little-endian*/);
    if (sessionState.source == NULL) {
      *env << "Unable to create a u-law filter from the PCM audio source: "
	   << env->getResultMsg() << "\n";
      exit(1);
    }
    bitsPerSecond /= 2;
    mimeType = "PCMU";
    if (samplingFrequency == 8000 && numChannels == 1) {
      payloadFormatCode = 0; // a static RTP payload type
    } else {
      payloadFormatCode = 96; // a dynamic RTP payload type
    }
    *env << "Converting to 8-bit u-law audio for streaming => "
	 << bitsPerSecond << " bits-per-second\n";
#else
    // The 16-bit samples in WAV files are in little-endian order.
    // Add a filter that converts them to network (i.e., big-endian) order:
    sessionState.source = EndianSwap16::createNew(*env, pcmSource);
    if (sessionState.source == NULL) {
      *env << "Unable to create a little->bit-endian order filter from the PCM audio source: "
	   << env->getResultMsg() << "\n";
      exit(1);
    }
    mimeType = "L16";
    if (samplingFrequency == 44100 && numChannels == 2) {
      payloadFormatCode = 10; // a static RTP payload type
    } else if (samplingFrequency == 44100 && numChannels == 1) {
      payloadFormatCode = 11; // a static RTP payload type
    } else {
      payloadFormatCode = 96; // a dynamic RTP payload type
    }
    *env << "Converting to network byte order for streaming\n";
#endif
  } else { // bitsPerSample == 8
    // Don't do any transformation; send the 8-bit PCM data 'as is':
    mimeType = "L8";
    payloadFormatCode = 96; // a dynamic RTP payload type
  }

  // Create 'groupsocks' for RTP and RTCP:
  struct in_addr destinationAddress;
  destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
  // Note: This is a multicast address.  If you wish instead to stream
  // using unicast, then you should use the "testOnDemandRTSPServer"
  // test program - not this test program - as a model.

  const unsigned short rtpPortNum = 2222;
  const unsigned short rtcpPortNum = rtpPortNum+1;
  const unsigned char ttl = 255;

  const Port rtpPort(rtpPortNum);
  const Port rtcpPort(rtcpPortNum);

  sessionState.rtpGroupsock
    = new Groupsock(*env, destinationAddress, rtpPort, ttl);
  sessionState.rtpGroupsock->multicastSendOnly(); // we're a SSM source
  sessionState.rtcpGroupsock
    = new Groupsock(*env, destinationAddress, rtcpPort, ttl);
  sessionState.rtcpGroupsock->multicastSendOnly(); // we're a SSM source

  // Create an appropriate audio RTP sink (using "SimpleRTPSink")
  // from the RTP 'groupsock':
  sessionState.sink
    = SimpleRTPSink::createNew(*env, sessionState.rtpGroupsock,
			       payloadFormatCode, samplingFrequency,
			       "audio", mimeType, numChannels);

  // Create (and start) a 'RTCP instance' for this RTP sink:
  const unsigned estimatedSessionBandwidth = bitsPerSecond/1000;
      // in kbps; for RTCP b/w share
  const unsigned maxCNAMElen = 100;
  unsigned char CNAME[maxCNAMElen+1];
  gethostname((char*)CNAME, maxCNAMElen);
  CNAME[maxCNAMElen] = '\0'; // just in case
  sessionState.rtcpInstance
    = RTCPInstance::createNew(*env, sessionState.rtcpGroupsock,
			      estimatedSessionBandwidth, CNAME,
			      sessionState.sink, NULL /* we're a server */,
			      True /* we're a SSM source*/);
  // Note: This starts RTCP running automatically

  // Create and start a RTSP server to serve this stream:
  sessionState.rtspServer = RTSPServer::createNew(*env, 8554);
  if (sessionState.rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }
  ServerMediaSession* sms
    = ServerMediaSession::createNew(*env, "testStream", inputFileName,
	   "Session streamed by \"testWAVAudiotreamer\"", True/*SSM*/);
  sms->addSubsession(PassiveServerMediaSubsession::createNew(*sessionState.sink, sessionState.rtcpInstance));
  sessionState.rtspServer->addServerMediaSession(sms);

  char* url = sessionState.rtspServer->rtspURL(sms);
  *env << "Play this stream using the URL \"" << url << "\"\n";
  delete[] url;

  // Finally, start the streaming:
  *env << "Beginning streaming...\n";
  sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL);
}