void PipelineManager::addFiltersToWorkerEvent(Jzon::Node* params, Jzon::Object &outputNode)
{
    int workerId;

    if(!params) {
        outputNode.Add("error", "Error adding slaves to worker. Invalid JSON format...");
        return;
    }

    if (!params->Has("worker")) {
        outputNode.Add("error", "Error adding filters to worker. Invalid JSON format...");
        return;
    }

    if (!params->Has("filters") || !params->Get("filters").IsArray()) {
        outputNode.Add("error", "Error adding filters to worker. Invalid JSON format...");
        return;
    }

    workerId = params->Get("worker").ToInt();
    Jzon::Array& jsonFiltersIds = params->Get("filters").AsArray();

    for (Jzon::Array::iterator it = jsonFiltersIds.begin(); it != jsonFiltersIds.end(); ++it) {
        if (!addFilterToWorker(workerId, (*it).ToInt())) {
            outputNode.Add("error", "Error adding filters to worker. Invalid internal error...");
            return;
        }
    }

    startWorkers();

    outputNode.Add("error", Jzon::null);
}
Esempio n. 2
0
int main(void) {
  EXPECTED_RECV_LEN = strlen(EXPECTED_HTTP_REQUEST);
  RESPONSE_LEN = strlen(RESPONSE);
  acceptLoop();
  startWorkers();
  socketCheck();
  return 0;
}
Esempio n. 3
0
void Controller::run(uint32_t numThreads, uint32_t maxThreads,
                     double monitorInterval) {
  maxThreads_ = maxThreads;

  // start all of the worker threads
  startWorkers(numThreads);

  // drive the monitor
  runMonitor(monitorInterval);
}
Esempio n. 4
0
int main(int argc, char *argv[]) {
  EXPECTED_RECV_LEN = strlen(EXPECTED_HTTP_REQUEST);
  RESPONSE_LEN = strlen(RESPONSE);

  printf("Length of requst: %d;  response: %d\n", EXPECTED_RECV_LEN, RESPONSE_LEN);
  
  if (argc != 2) {
    printf( "usage: %s #workers\n", argv[0] );
    return -1;
  }
  int numWorkers = atoi(argv[1]);
  if (numWorkers >= MAX_NUM_WORKERS) {
    printf("error: number of workers must be less than %d\n", MAX_NUM_WORKERS);
    return -1;
  }

  startWorkers(numWorkers);
#if !(defined SHOW_PEAK_PERFORMANCE)
  startWakeupThread();
  startSocketCheckThread();
#endif
  acceptLoop(numWorkers);
  return 0;
}
Esempio n. 5
0
// For our use case (RNA sequencing) we can reverse search without backtracking
// on some fixed size seed (e.g. 14); STAR uses maximum mappable seed,
// which is sort of interesting as well
int main(int argc, char **argv) {
	
  // Performance testing code
  // To be honest, this should have the same performance as
  // histsortcomptest.c's testing code.
  
  // Single-threaded it takes about 4.6 seconds to construct the index
  // for 10^7 elements ; this suggests ~90 seconds for 10^8 elements
  // and 1800 seconds (30 minutes) for 10^9. Fortunately index-building
  // is a one-time thing, so runtime is not all that significant.
  
  // For some reason multithreaded index building is slower (overhead
  // and cache conflicts no doubt) below about 10 million base pairs
  // OTOH 4-threaded histogram sort is faster than SACA-K below
  // a billion nucleotides, so memory usage aside this should be
  // fine (and they make servers with enough memory to run it anyway)
  // Performance testing code
  long long len, i;
  unsigned char *str, *pats;
  fm_index *fmi;
  unsigned long long a, b;
  if (argc != 2) {
    fprintf(stderr, "Usage: fmitest len\n");
    exit(-1);
  }

  len = atoi(argv[1]);
  str = malloc(len/4 + 1);
  for (i = 0; i < len/4 + 1; ++i)
    str[i] = rand();
  switch (len % 4) {
  case 1:
    str[len/4] &= 0xC0;
    break;
  case 2:
    str[len/4] &= 0xF0;
    break;
  case 3:
    str[len/4] &= 0xFC;
    break;
  case 0:
    str[len/4] = 0;
  }
  rdtscll(a);
  fmi = make_fmi(str, len);
  rdtscll(b);
  printf("Built index with %lld base pairs in %lld cycles (%f s)\n",
	 len, b-a, ((double)(b-a)) / 2500000000.);
  printf("(%f cycles per base pair (%e seconds))\n", ((double)(b-a))	\
	 / len, ((double)(b-a)) / (len * 2500000000.));
  pats = malloc(sizeof(char) * 10000011);
  for (i = 0; i < 10000011; ++i) {
    pats[i] = rand() & 3;
  }
  // Comment: This is huge in 64-bit mode and thus makes valgrind
  // runs somewhat untenable (at least with that number of bps to
  // search; we can, of course, always scale things down but this
  // reveals more of the performance overhead from threading and less
  // asymptotic performance (which is what we actually care about)
  rdtscll(a);
  startWorkers(fmi, 10000000, pats);
  rdtscll(b);
  printf("Searched 10000000 12bp sequences in %lld cycles (%f s)\n",
	 b-a, ((double)(b-a)) / 2400000000.);
  printf("(%f cycles per base pair (%e seconds))\n", ((double)(b-a))	\
	 / 120000000., ((double)(b-a)) / 288000000000000000.);
  
  destroy_fmi(fmi);
  // With current settings the FMI with 1000000 base pairs takes
  // 1.5 million bytes to store (50% auxiliary data)
  // which is kind of bad but yeah
  free(str);
  free(pats);
}
void RequestServer<SpecificCommandRunner>::startServer() {
	startWorkers(this->output);

}
Esempio n. 7
0
void Server::startServer(int port) {
    int sock;
    event *ev_accept;
    struct sockaddr_in server_addr;
    
    
    event_enable_debug_mode();
    event_set_log_callback(write_to_file_cb);
    
    memset(&server_addr, 0, sizeof(server_addr));
    
    server_addr.sin_family = AF_INET;
    server_addr.sin_addr.s_addr = inet_addr("127.0.0.1"); // INADDR_ANY;
    server_addr.sin_port = htons(2000);

    sock = socket(AF_INET, SOCK_STREAM, 0);
    
    if(sock < 0) {
        printf("ERROR opening socket\n");
        exit(1);
    }

    if(bind(sock, (struct sockaddr *) &server_addr, sizeof(server_addr)) < 0) {
        printf("ERROR binding socket\n");
        exit(1);
    }
    
    if(listen(sock, 5) < 0) {
        perror("ERROR listening");
        exit(1);
    }
    
    /*
    int reuse = 1;
    setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse));
    */
    
    printf("DEBUG: listening\n");
    
    
    if(evthread_use_pthreads() < 0) {
        perror("failed start event thread");
        exit(1);
    }
    
    
    base = event_base_new();
    evutil_make_socket_nonblocking(sock);
    
    startWorkers();
    
    ev_accept = event_new(base, sock, EV_READ|EV_PERSIST, Server::acceptCallback, (void *)this);
    
    event_add(ev_accept, NULL);
    
    event_base_dispatch(base);
    event_base_free(base);
    base = NULL;
    
    close(sock);
}