예제 #1
0
//-------------------------------------------------------------------------
// 									MAIN
//-------------------------------------------------------------------------
int main(int argc, char *argv[] )
{
	char appname[256];
    char rcfilename[256];
    char* home = getenv("HOME");
    
	snprintf(appname, 255, "%s", basename(argv[0]));
    snprintf(rcfilename, 255, "%s/.%src", home, appname);

	CMDUI* interface = new CMDUI(argc, argv);
    FUI* finterface	= new FUI();
	DSP.buildUserInterface(interface);
	DSP.buildUserInterface(finterface);

#ifdef OSCCTRL
	GUI* oscinterface = new OSCUI(appname, argc, argv);
	DSP.buildUserInterface(oscinterface);
#endif

#ifdef HTTPCTRL
	httpdUI* httpdinterface = new httpdUI(appname, DSP.getNumInputs(), DSP.getNumOutputs(), argc, argv);
	DSP.buildUserInterface(httpdinterface);
 #endif

	jackaudio audio;
	audio.init(appname, &DSP);
	interface->process_command();
	audio.start();

#ifdef HTTPCTRL
	httpdinterface->run();
#endif	
	
#ifdef OSCCTRL
	oscinterface->run();
#endif
	interface->run();
	
	audio.stop();
    finterface->saveState(rcfilename);
    
    // desallocation
    delete interface;
    delete finterface;
#ifdef HTTPCTRL
	 delete httpdinterface;
#endif
#ifdef OSCCTRL
	 delete oscinterface;
#endif

	return 0;
} 
예제 #2
0
파일: plot.cpp 프로젝트: antoniotuzzi/faust
int main(int argc, char *argv[] )
{
	float fnbsamples;

	CMDUI* interface = new CMDUI(argc, argv);
	DSP.buildUserInterface(interface);
	interface->addOption("-n", &fnbsamples, 16, 0.0, 100000000.0);
	
	if (DSP.getNumInputs() > 0) {
		fprintf(stderr, "no inputs allowed\n");
		exit(1);
	}
	
	// init signal processor and the user interface values
	DSP.init(44100);
	
	// modify the UI values according to the command line options
	interface->process_command();
	
	int nouts = DSP.getNumOutputs();
	channels chan (kFrames, nouts);

	int nbsamples = int(fnbsamples);
	while (nbsamples > kFrames) {
		DSP.compute(kFrames, 0, chan.buffers());
		for (int i = 0; i < kFrames; i++) {
			for (int c = 0; c < nouts; c++) {
				printf("%8f\t", chan.buffers()[c][i]);
			}
			cout << endl;
		}
		nbsamples -= kFrames;
	}
	
	DSP.compute(nbsamples, 0, chan.buffers());
	for (int i = 0; i < nbsamples; i++) {
		for (int c = 0; c < nouts; c++) {
			printf("%8f\t", chan.buffers()[c][i]);
		}
		cout << endl;
	}
	return 0;
} 
예제 #3
0
 /*
  * init(samplingRate, bufferFrames)
  * Initializes the Audio engine and the DSP code
  * with samplingRate and bufferFrames.
  * This method also looks for the [style:poly]
  * metadata in the Faust code and initializes a
  * polyphonic object or not based on that. init
  * should be called before start.
  */
 bool init(int samplingRate, int bufferSize) {
     DSP.init(samplingRate);
     inChanNumb = DSP.getNumInputs();
     outChanNumb = DSP.getNumOutputs();
     
     // configuring the UI
     DSP.buildUserInterface(&mapUI);
     DSP.buildUserInterface(&json);
     
     jsonString = json.JSON();
     
     if (jsonString.find("keyboard") != std::string::npos ||
        jsonString.find("poly") != std::string::npos){
         polyMax = 4;
         DSPpoly = new mydsp_poly(polyMax, true);
         DSPpoly->init(samplingRate);
     } else {
         polyMax = 0;
     }
     
     return (fAudioDevice.Open(((polyMax > 0) ? DSPpoly : &DSP), inChanNumb, outChanNumb, bufferSize, samplingRate) == 0);
 }
예제 #4
0
/*
 * init(samplingRate, bufferFrames)
 * Initializes the Audio engine and the DSP code
 * with samplingRate and bufferFrames.
 * This method also looks for the [style:poly]
 * metadata in the Faust code and initializes a
 * polyphonic object or not based on that. init
 * should be called before start.
 */
void init(int samplingRate, int bufferFrames) {
	// configuring global variables
	SR = samplingRate;
	bufferSize = bufferFrames;
	vecSamps = bufferSize;
	DSP.init(SR);
	inChanNumb = DSP.getNumInputs();
	outChanNumb = DSP.getNumOutputs();

	// configuring the UI
	DSP.buildUserInterface(&mapUI);
	DSP.buildUserInterface(&json);

	jsonString = json.JSON();

	if(jsonString.find("keyboard") != std::string::npos ||
       jsonString.find("poly") != std::string::npos){
		polyMax = 4;
		polyCoef = 1.0f / polyMax;
		DSPpoly = new mydsp_poly(SR, bufferSize, polyMax);
	}
	else{
		polyMax = 0;
	}

	// allocating memory for output channel
	bufferout = new float *[outChanNumb];
	for (int i = 0; i < outChanNumb; i++) {
		bufferout[i] = new float[vecSamps];
	}

	// allocating memory for input channel
	if (inChanNumb >= 1) {
		bufferin = new float *[inChanNumb];
		for (int i = 0; i < inChanNumb; i++) {
			bufferin[i] = new float[vecSamps];
		}
	}
}
예제 #5
0
int main(int argc, char *argv[])
{
  SNDFILE*	in_sf;
  SNDFILE*	out_sf;
  SF_INFO	in_info;
  SF_INFO	out_info;
  unsigned int nAppend = 0; // number of frames to append beyond input file

  if (argc < 3) {
    fprintf(stderr,"*** USAGE: %s input_soundfile output_soundfile\n",argv[0]);
    exit(1);
  }

  nAppend = loptrm(&argc, argv, "--continue", "-c", 0);
    
  CMDUI* interface = new CMDUI(argc, argv);
  DSP.buildUserInterface(interface);
  interface->process_command();

  // open input file
  in_info.format = 0;
  in_sf = sf_open(interface->input_file(), SFM_READ, &in_info);
  if (in_sf == NULL) {
    fprintf(stderr,"*** Input file not found.\n");
    sf_perror(in_sf); 
    exit(1); 
  }

  // open output file
  out_info = in_info;
  out_info.format = in_info.format;
  out_info.channels = DSP.getNumOutputs();
  out_sf = sf_open(interface->output_file(), SFM_WRITE, &out_info);
  if (out_sf == NULL) { 
    fprintf(stderr,"*** Cannot write output file.\n");
    sf_perror(out_sf); 
    exit(1); 
  }

  // create separator and interleaver
  Separator   sep(kFrames, in_info.channels, DSP.getNumInputs());
  Interleaver ilv(kFrames, DSP.getNumOutputs());

  // init signal processor
  DSP.init(in_info.samplerate);
  //DSP.buildUserInterface(interface);
  interface->process_init();

  // process all samples
  int nbf;
  do {
    nbf = READ_SAMPLE(in_sf, sep.input(), kFrames);
    sep.separate();
    DSP.compute(nbf, sep.outputs(), ilv.inputs());
    ilv.interleave();
    sf_writef_float(out_sf, ilv.output(), nbf);
    //sf_write_raw(out_sf, ilv.output(), nbf);
  } while (nbf == kFrames);

  sf_close(in_sf);

  // compute tail, if any
  if (nAppend>0) {
    FAUSTFLOAT *input = (FAUSTFLOAT*) calloc(nAppend * DSP.getNumInputs(), sizeof(FAUSTFLOAT));
    FAUSTFLOAT *inputs[1] = { input };
    Interleaver ailv(nAppend, DSP.getNumOutputs());
    DSP.compute(nAppend, inputs, ailv.inputs());
    ailv.interleave();
    sf_writef_float(out_sf, ailv.output(), nAppend);
  }

  sf_close(out_sf);
}
예제 #6
0
 dsp_faust() : json(DSP.getNumInputs(), DSP.getNumOutputs()),DSPpoly(0),on(false) {}
예제 #7
0
//-------------------------------------------------------------------------
// 									MAIN
//-------------------------------------------------------------------------
int main(int argc, char *argv[])
{
    char appname[256];
    char rcfilename[256];
    char* home = getenv("HOME");

    int	celt = lopt(argv, "--celt", -1);
    const char* master_ip = lopts(argv, "--a", DEFAULT_MULTICAST_IP);
    int master_port = lopt(argv, "--p", DEFAULT_PORT);
    int mtu = lopt(argv, "--m", DEFAULT_MTU);
    int latency = lopt(argv, "--l", 2);

    snprintf(appname, 256, "%s", basename(argv[0]));
    snprintf(rcfilename, 256, "%s/.%src", home, appname);

    CMDUI* interface = new CMDUI(argc, argv);
    FUI* finterface	= new FUI();
    DSP.buildUserInterface(interface);
    DSP.buildUserInterface(finterface);

#ifdef OSCCTRL
    GUI* oscinterface = new OSCUI(appname, argc, argv);
    DSP.buildUserInterface(oscinterface);
#endif

#ifdef HTTPCTRL
    httpdUI* httpdinterface = new httpdUI(appname, DSP.getNumInputs(), DSP.getNumOutputs(), argc, argv);
    DSP.buildUserInterface(httpdinterface);
 #endif

    netjackaudio audio(celt, master_ip, master_port, mtu, latency);
    if (!audio.init(appname, &DSP)) {
        return 0;
    }
    finterface->recallState(rcfilename);
    if (!audio.start()) {
        return 0;
    }

#ifdef HTTPCTRL
    httpdinterface->run();
#endif

#ifdef OSCCTRL
    oscinterface->run();
#endif
    interface->run();

    audio.stop();
    finterface->saveState(rcfilename);
    
    // desallocation
    delete interface;
    delete finterface;
#ifdef HTTPCTRL
	 delete httpdinterface;
#endif
#ifdef OSCCTRL
	 delete oscinterface;
#endif

    return 0;
}
예제 #8
0
#include <android/log.h>
#include "dsp_faust.h"
#include <stdio.h>
#include <string.h>

#define FAUSTFLOAT float

using namespace std;

OPENSL_STREAM *p; // the audio engine
mydsp DSP; // the monophonic Faust object
mydsp_poly *DSPpoly; // the polyphonic Faust object
MapUI mapUI; // the UI description
pthread_t audioThread; // native thread for audio
JSONUI json(DSP.getNumInputs(), DSP.getNumOutputs());
string jsonString;

// Global variables
int SR, bufferSize, vecSamps, polyMax, inChanNumb, outChanNumb, on;
float **bufferout, **bufferin, polyCoef;

/*
 * init(samplingRate, bufferFrames)
 * Initializes the Audio engine and the DSP code
 * with samplingRate and bufferFrames.
 * This method also looks for the [style:poly]
 * metadata in the Faust code and initializes a
 * polyphonic object or not based on that. init
 * should be called before start.
 */