/*V2 - Functions update_tables from the second file*/
void scan_periodically(void *nfd)
{
    	int r;
	sync_socket = *((int *) nfd);
	/*Modify r and sleep to change the loop count and sleep time*/
	for(r=0; r<1000; r++)
	{
		update_tables();
		printf("\n---------------------------------------------------------------------------------------------------");
		fflush(stdout);
		sleep(10);
	}

//-----------------------------------------------------------Edit

			
			




//-----------------------------------------------------------Edit End



}
Example #2
0
/*===========================================================================*
 *				getdents_hook				     *
 *===========================================================================*/
int getdents_hook(struct inode *node, cbdata_t UNUSED(cbdata))
{
	/* Directory entry retrieval hook, for potentially all files in a
	 * directory. Make sure that all files that are supposed to be
	 * returned, are actually part of the virtual tree.
	 */

	if (node == get_root_inode()) {
		update_tables();

		construct_pid_dirs();
	} else if (dir_is_pid(node)) {
		construct_pid_entries(node, NULL /*name*/);
	}

	return OK;
}
Example #3
0
/*
 * Initialize this module, before VTreeFS is started.  As part of the process,
 * check if we're not compiled against a kernel different from the one that is
 * running at the moment.
 */
int
init_tree(void)
{
	int i, r;

	if ((r = update_tables()) != OK)
		return r;

	/*
	 * Get the maximum number of entries that we may add to each PID's
	 * directory.  We could just default to a large value, but why not get
	 * it right?
	 */
	for (i = 0; pid_files[i].name != NULL; i++);

	nr_pid_entries = i;

	return OK;
}
Example #4
0
/*===========================================================================*
 *				lookup_hook				     *
 *===========================================================================*/
int lookup_hook(struct inode *parent, char *name,
	cbdata_t UNUSED(cbdata))
{
	/* Path name resolution hook, for a specific parent and name pair.
	 * If needed, update our own view of the system first; after that,
	 * determine whether we need to (re)generate certain files.
	 */
	static clock_t last_update = 0;
	clock_t now;
	int r;

	/* Update lazily for lookups, as this gets too expensive otherwise.
	 * Alternative: pull in only PM's table?
	 */
	if ((r = getticks(&now)) != OK)
		panic(__FILE__, "unable to get uptime", r);

	if (last_update != now) {
		update_tables();

		last_update = now;
	}

	/* If the parent is the root directory, we must now reconstruct all
	 * entries, because some of them might have been garbage collected.
	 * We must update the entire tree at once; if we update individual
	 * entries, we risk name collisions.
	 */
	if (parent == get_root_inode()) {
		construct_pid_dirs();
	}
	/* If the parent is a process directory, we may need to (re)construct
	 * the entry being looked up.
	 */
	else if (dir_is_pid(parent)) {
		/* We might now have deleted our current containing directory;
		 * construct_pid_entries() will take care of this case.
		 */
		construct_pid_entries(parent, name);
	}

	return OK;
}
Example #5
0
File: lda.cpp Project: dcmoyer/lda
void LDA::initialize(){

  Document target;

  initialize_tables();

//  std::cout << "init matrices\n";
  for(int i=0; i < filenames.size(); ++i){
    
    target = Document(filenames[i]);
    target.load_document();
    target.init_random_topics(K);
    target.save_topics();

    update_tables(target);

    target.clear();
  }

}
Example #6
0
/*===========================================================================*
 *				init_tree				     *
 *===========================================================================*/
int init_tree(void)
{
	/* Initialize this module, before VTreeFS is started. As part of the
	 * process, check if we're not compiled against a kernel different from
	 * the one that is running at the moment.
	 */
	int i, r;

	if ((r = update_tables()) != OK)
		return r;

	/* Get the maximum number of entries that we may add to each PID's
	 * directory. We could just default to a large value, but why not get
	 * it right?
	 */
	for (i = 0; pid_files[i].name != NULL; i++);

	nr_pid_entries = i;

	return OK;
}
Example #7
0
static void proxy_thread(void *arg)
{
    int ss, cs; /* server and client sockets */
    int fl; /* socket flags */
    Octstr *addr = NULL;
    int forward;
    Octstr *tmp;

    run_thread = 1;
    ss = cs = -1;

    /* create client binding, only if we have a remote server
     * and make the client socet non-blocking */
    if (remote_host != NULL) {
        cs = udp_client_socket();
        fl = fcntl(cs, F_GETFL);
        fcntl(cs, F_SETFL, fl | O_NONBLOCK);
        addr = udp_create_address(remote_host, remote_port);
    }

    /* create server binding */
    ss = udp_bind(our_port, octstr_get_cstr(our_host));

    /* make the server socket non-blocking */
    fl = fcntl(ss, F_GETFL);
    fcntl(ss, F_SETFL, fl | O_NONBLOCK);

    if (ss == -1)
        panic(0, "RADIUS: Couldn't set up server socket for port %ld.", our_port);

    while (run_thread) {
        RADIUS_PDU *pdu, *r;
        Octstr *data, *rdata;
        Octstr *from_nas, *from_radius;

        pdu = r = NULL;
        data = rdata = from_nas = from_radius = NULL;
        
        if (read_available(ss, 100000) < 1)
            continue;

        /* get request from NAS */
        if (udp_recvfrom(ss, &data, &from_nas) == -1) {
            if (errno == EAGAIN)
                /* No datagram available, don't block. */
                continue;

            error(0, "RADIUS: Couldn't receive request data from NAS");
            continue;
        }

        tmp = udp_get_ip(from_nas);
        info(0, "RADIUS: Got data from NAS <%s:%d>",
             octstr_get_cstr(tmp), udp_get_port(from_nas));
        octstr_destroy(tmp);
        octstr_dump(data, 0);

        /* unpacking the RADIUS PDU */
        if ((pdu = radius_pdu_unpack(data)) == NULL) {
            warning(0, "RADIUS: Couldn't unpack PDU from NAS, ignoring.");
            goto error;
        }
        info(0, "RADIUS: from NAS: PDU type: %s", pdu->type_name);

        /* authenticate the Accounting-Request packet */
        if (radius_authenticate_pdu(pdu, &data, secret_nas) == 0) {
            warning(0, "RADIUS: Authentication failed for PDU from NAS, ignoring.");
            goto error;
        }

        /* store to hash table if not present yet */
        mutex_lock(radius_mutex);
        forward = update_tables(pdu);
        mutex_unlock(radius_mutex);

        /* create response PDU for NAS */
        r = radius_pdu_create(0x05, pdu);

        /*
         * create response authenticator 
         * code+identifier(req)+length+authenticator(req)+(attributes)+secret 
         */
        r->u.Accounting_Response.identifier = pdu->u.Accounting_Request.identifier;
        r->u.Accounting_Response.authenticator =
            octstr_duplicate(pdu->u.Accounting_Request.authenticator);

        /* pack response for NAS */
        rdata = radius_pdu_pack(r);

        /* creates response autenticator in encoded PDU */
        radius_authenticate_pdu(r, &rdata, secret_nas);

        /* 
         * forward request to remote RADIUS server only if updated
         * and if we have a configured remote RADIUS server 
         */
        if ((remote_host != NULL) && forward) {
            if (udp_sendto(cs, data, addr) == -1) {
                error(0, "RADIUS: Couldn't send to remote RADIUS <%s:%ld>.",
                      octstr_get_cstr(remote_host), remote_port);
            } else 
            if (read_available(cs, remote_timeout) < 1) {
                error(0, "RADIUS: Timeout for response from remote RADIUS <%s:%ld>.",
                      octstr_get_cstr(remote_host), remote_port);
            } else 
            if (udp_recvfrom(cs, &data, &from_radius) == -1) {
                error(0, "RADIUS: Couldn't receive from remote RADIUS <%s:%ld>.",
                      octstr_get_cstr(remote_host), remote_port);
            } else {
                info(0, "RADIUS: Got data from remote RADIUS <%s:%d>.",
                     octstr_get_cstr(udp_get_ip(from_radius)), udp_get_port(from_radius));
                octstr_dump(data, 0);

                /* XXX unpack the response PDU and check if the response
                 * authenticator is valid */
            }
        }

        /* send response to NAS */
        if (udp_sendto(ss, rdata, from_nas) == -1)
            error(0, "RADIUS: Couldn't send response data to NAS <%s:%d>.",
                  octstr_get_cstr(udp_get_ip(from_nas)), udp_get_port(from_nas));

error:
        radius_pdu_destroy(pdu);
        radius_pdu_destroy(r);

        octstr_destroy(rdata);
        octstr_destroy(data);
        octstr_destroy(from_nas);

        debug("radius.proxy", 0, "RADIUS: Mapping table contains %ld elements",
              dict_key_count(radius_table));
        debug("radius.proxy", 0, "RADIUS: Session table contains %ld elements",
              dict_key_count(session_table));
        debug("radius.proxy", 0, "RADIUS: Client table contains %ld elements",
              dict_key_count(client_table));

    }

    octstr_destroy(addr);
}
Example #8
0
File: lda.cpp Project: dcmoyer/lda
void LDA::run_iterations_mpi(int num_iterations){
  
  Document target;
  int first_file_idx, last_file_idx;

#if MPI_ENABLED
    int rank, namelen, num_procs;
    char processor_name[MPI_MAX_PROCESSOR_NAME];

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &num_procs);

    //sync nodes before broadcast
    //MPI_Barrier(MPI_COMM_WORLD);

    //distribute the initial tables to all nodes
//    broadcast_data(topic_x_words, K*V);
//    broadcast_data(total_words_in_topics, K);

    if (rank == 0)    {
      std::cout <<"[proc "<<rank<<"]" << "Tables sent to children" << std::endl;
    }

    int num_files_per_proc = ceil((float)filenames.size() / num_procs);
    first_file_idx = rank * num_files_per_proc;
    last_file_idx = first_file_idx + num_files_per_proc - 1;

    if (rank == num_procs - 1)
        last_file_idx = filenames.size() - 1;

      std::cout <<"[proc "<<rank<<"]" << "My first index is "<<first_file_idx<<std::endl;
      std::cout <<"[proc "<<rank<<"]" << "My last index is "<<last_file_idx<<std::endl;
#else
    first_file_idx = 0; last_file_idx = filenames.size();
#endif

  for(int iter_idx=0; iter_idx < num_iterations; ++iter_idx){
#if MPI_ENABLED
    if (rank == 0)    {
      std::cout <<"[proc "<<rank<<"]" << "Iteration " << iter_idx << std::endl;
    }

    if (iter_idx % sync_frequency == 0)  {
      if (rank == 0)  {
        // recount the tables from current topic assignments
        initialize_tables();

        for (int i = 0; i<filenames.size(); i++)    {
            Document tmp_doc = Document(filenames[i]);
//            tmp_doc.load_document();
//            tmp_doc.load_topics();
            update_tables(tmp_doc);
        }
      }
    }

    // send tables to all processes in the pool
    broadcast_data(topic_x_words, K*V);
    broadcast_data(total_words_in_topics, K);
#else
    std::cout << "Iteration " << iter_idx << std::endl;
    first_file_idx = 0; last_file_idx = filenames.size();
#endif

    //Big loop of iteration over files
    //TODO: MPI Goes Here

    for(int file_idx=first_file_idx; file_idx < last_file_idx; ++file_idx){
            
      //std::cout << rank << "am I here " << first_file_idx << std::endl;   
      target = Document(filenames[file_idx]);
      target.load_document();
      target.load_topics();

      //loop of iteration over words
      int size_of_doc = target.num_words();
      
      //create_document_topic_distribution
      boost::numeric::ublas::matrix<double> document_x_topic(1,K);

      //initialize dist
      for(int i=0; i<K; ++i){
        document_x_topic(0,i) = 0;
      }
      
      //fill distribution
      for(int word_idx=0; word_idx < size_of_doc; ++word_idx){
        int topic = target.get_word_topic(word_idx);
        assert( topic>=0 && topic<=K );
        document_x_topic(0,topic) += 1;
      }

      //actual gibbs sampling
      //this is where OpenMP would be nice.
      //TODO: OpenMP atomic or barrier/syncs
    int word_idx = 0;
#if OMP_ENABLED 
    int threadCount = 4;
    omp_set_num_threads(threadCount);
    #pragma omp parallel shared(target, document_x_topic, size_of_doc) private(word_idx)
#endif 
    {
#if OMP_ENABLED
      #pragma omp for
#endif 
      for(word_idx=0; word_idx < size_of_doc; ++word_idx){
        //getword
        int word = target.get_word(word_idx);
        //gettopic
        int topic = target.get_word_topic(word_idx);

        //update dists.
        //comment out the following since with OMP, topic_x_words could be poisoned a little bit and go below 1
        //assert((*topic_x_words)(topic,word) > 0);
#if MPI_ENABLED
        //_topic_x_words(topic,word) -= 1;
        //_total_words_in_topics(topic,0) -= 1;

        topic_x_words[V*topic + word] -= 1;
        total_words_in_topics[topic] -= 1;
#else
        (*topic_x_words)(topic,word) -= 1;
        (*total_words_in_topics)(topic,0) -= 1;
#endif
        assert(document_x_topic(0,topic) > 0);
        document_x_topic(0,topic) -= 1;
        boost::numeric::ublas::matrix<double> topic_dist(K,1);
        for(int topic_idx=0;topic_idx < K; ++topic_idx){
#if MPI_ENABLED
        // double topic_word_prob = ((double) _topic_x_words(topic_idx,word) + beta)/
        //   ((double)_total_words_in_topics(topic_idx,0) + V*beta);
        double topic_word_prob = ((double) topic_x_words[V*topic_idx + word] + beta)/
           ((double )total_words_in_topics[topic_idx] + V*beta);
#else
          double topic_word_prob = ((double) (*topic_x_words)(topic_idx,word) + beta)/
            ((double)(*total_words_in_topics)(topic_idx,0) + V*beta);
#endif
          double topic_doc_prob = ((double)(document_x_topic(0,topic_idx) + alpha)/
            ((double) size_of_doc + K*alpha/*CHECKTHIS*/));

          assert (topic_idx>=0 && topic_idx<=K);
          topic_dist(topic_idx,0) = topic_word_prob * topic_doc_prob;
        }

        //sum of the topic dist vector
        double normalizing_constant = sum(prod(
          boost::numeric::ublas::scalar_vector<double>(topic_dist.size1()),
          topic_dist));
        for(int topic_idx=0; topic_idx < K; ++topic_idx){
          topic_dist(topic_idx,0) = topic_dist(topic_idx,0)/normalizing_constant;
        }
        double prob = unif();
        int new_topic = -1;
        
        while(prob > 0){
          new_topic += 1;
          prob -= topic_dist(new_topic,0);
        }
        assert(new_topic < K);

        //assign_topic
        target.set_word_topic(word_idx,new_topic);

        //update dists
#if MPI_ENABLED
        //_topic_x_words(new_topic,word) += 1;
        //_total_words_in_topics(new_topic,0) += 1;
        topic_x_words[V*new_topic + word] += 1;
        total_words_in_topics[new_topic] += 1;
#else
        (*topic_x_words)(new_topic,word) += 1;
        (*total_words_in_topics)(new_topic,0) += 1;
#endif
        document_x_topic(0,new_topic) += 1;
      }
      target.save_topics();
    } //omp parallel

    }

#if MPI_ENABLED
    if (rank == 0) {
      if(iter_idx % thinning == 0){
        if(iter_idx < burnin){
          continue;
        }
        print_neg_log_likelihood(vocab_path.substr(0, vocab_path.length()-9) + "neg_log_likeMPI.csv");
        //TODO: PRINT
      }
    }
#elif OMP_ENABLED
    if(iter_idx % thinning == 0){
      if(iter_idx < burnin){
        continue;
      }
      print_neg_log_likelihood(vocab_path.substr(0, vocab_path.length()-9) + "neg_log_likeOMP.csv");
      //TODO: PRINT
    }
#else
    if(iter_idx % thinning == 0){
      if(iter_idx < burnin){
        continue;
      }
      print_neg_log_likelihood(vocab_path.substr(0, vocab_path.length()-9) + "neg_log_likeSER.csv");
      //TODO: PRINT
    }
#endif
  } // outer for loop
}