示例#1
0
void *
PayloadSearchManager<WriterType>::hbfQueryThread(void *caller) {
	PayloadSearchManager<WriterType> *_this
		(reinterpret_cast<PayloadSearchManager<WriterType> *>(caller));

	// initialize read enumerator
	StrftimeReadEnumerator readEnum(_this->_inputDir,
									"%Y/%m/%d/hbf_%H",
									_this->_startTime,
									_this->_endTime);
	if (!readEnum) {
		_this->_error = true;
		_this->_errorMsg.assign("StrftimeReadEnumerator: ");
		_this->_errorMsg.append(readEnum.error());
		// lock results lock
		if (pthread_mutex_lock(&(_this->_resultsLock)) != 0) {
			// error
			_this->_error = true;
			_this->_errorMsg.assign("Unable to lock _resultsLock");
			return NULL;
		}
		_this->_hbfRunning = false;

		// signal condition for correlator thread
		if (pthread_cond_signal(&(_this->_resultsCondition)) != 0) {
			_this->_error = true;
			_this->_errorMsg.assign("Unable to signal _resultsCondition");
			return NULL;
		}

		// unlock results lock
		if (pthread_mutex_unlock(&(_this->_resultsLock)) != 0) {
			// error
			_this->_error = true;
			_this->_errorMsg.assign("Unable to unlock _resultsLock");
			return NULL;
		}
		return NULL;
	}

	HBFQueryProcessor
		<FlatFileReader
			<ZlibCompressedHBF>,
		 SetWriter
		 	<HBFResult>
		> processor;

	FlatFileReader <ZlibCompressedHBF> reader;

	std::set <HBFResult> *curResults;

	// for each file
	for (StrftimeReadEnumerator::const_iterator it(readEnum.begin());
		 it != readEnum.end();
		 ++it)
	{
		// initialize FlatFileReader
		if (reader.open(*it) != E_SUCCESS) {
			_this->_error = true;
			_this->_errorMsg.assign("FlatFileReader error");
			// lock results lock
			if (pthread_mutex_lock(&(_this->_resultsLock)) != 0) {
				// error
				_this->_error = true;
				_this->_errorMsg.assign("Unable to lock _resultsLock");
				return NULL;
			}
			_this->_hbfRunning = false;

			// signal condition for correlator thread
			if (pthread_cond_signal(&(_this->_resultsCondition)) != 0) {
				_this->_error = true;
				_this->_errorMsg.assign("Unable to signal _resultsCondition");
				return NULL;
			}

			// unlock results lock
			if (pthread_mutex_unlock(&(_this->_resultsLock)) != 0) {
				// error
				_this->_error = true;
				_this->_errorMsg.assign("Unable to unlock _resultsLock");
				return NULL;
			}
			return NULL;
		}

		// initialize SetWriter
		curResults = new std::set <HBFResult>;
		SetWriter <HBFResult> writer(*curResults);

		// initialize HBFQuery <FlatFileReader, SetWriter>
		processor.init(&reader,
					   &writer,
					   _this->_queryString,
					   _this->_queryLength,
					   _this->_matchLength,
					   _this->_flowMatcher,
					   _this->_maxMTU,
					   _this->_maxFlows,
					   _this->_hbfThreadCount);

		// hbfQuery.run()
		if (processor.run() != 0) {
			_this->_error = true;
			_this->_errorMsg.assign("HBFQueryProcessor: ");
			_this->_errorMsg.append(processor.error());
			// lock results lock
			if (pthread_mutex_lock(&(_this->_resultsLock)) != 0) {
				// error
				_this->_error = true;
				_this->_errorMsg.assign("Unable to lock _resultsLock");
				return NULL;
			}
			_this->_hbfRunning = false;

			// signal condition for correlator thread
			if (pthread_cond_signal(&(_this->_resultsCondition)) != 0) {
				_this->_error = true;
				_this->_errorMsg.assign("Unable to signal _resultsCondition");
				return NULL;
			}

			// unlock results lock
			if (pthread_mutex_unlock(&(_this->_resultsLock)) != 0) {
				// error
				_this->_error = true;
				_this->_errorMsg.assign("Unable to unlock _resultsLock");
				return NULL;
			}
			return NULL;
		}

		// lock results lock
		if (pthread_mutex_lock(&(_this->_resultsLock)) != 0) {
			// error
			_this->_error = true;
			_this->_errorMsg.assign("Unable to lock _resultsLock");
			return NULL;
		}
		// push_back set into _results
		_this->_results.push(curResults);

		// signal condition for correlator thread
		if (pthread_cond_signal(&(_this->_resultsCondition)) != 0) {
			_this->_error = true;
			_this->_errorMsg.assign("Unable to signal _resultsCondition");
			return NULL;
		}

		// unlock results lock
		if (pthread_mutex_unlock(&(_this->_resultsLock)) != 0) {
			// error
			_this->_error = true;
			_this->_errorMsg.assign("Unable to unlock _resultsLock");
			return NULL;
		}

		reader.close();
	}

	// lock results lock
	if (pthread_mutex_lock(&(_this->_resultsLock)) != 0) {
		// error
		_this->_error = true;
		_this->_errorMsg.assign("Unable to lock _resultsLock");
		return NULL;
	}
	_this->_hbfRunning = false;

	// signal condition for correlator thread
	if (pthread_cond_signal(&(_this->_resultsCondition)) != 0) {
		_this->_error = true;
		_this->_errorMsg.assign("Unable to signal _resultsCondition");
		return NULL;
	}

	// unlock results lock
	if (pthread_mutex_unlock(&(_this->_resultsLock)) != 0) {
		// error
		_this->_error = true;
		_this->_errorMsg.assign("Unable to unlock _resultsLock");
		return NULL;
	}

	return NULL;
}
示例#2
0
文件: bwtaln.c 项目: agongdai/peta
void bwa_cal_sa_reg_gap(int tid, bwt_t * const bwt[2], int n_seqs,
		bwa_seq_t *seqs, const gap_opt_t *opt) {
	int i, max_l = 0, max_len;
	gap_stack_t *stack;
	bwt_width_t *w[2], *seed_w[2];
	const ubyte_t *seq[2];
	gap_opt_t local_opt = *opt;

	// initiate priority stack
	for (i = max_len = 0; i != n_seqs; ++i)
		if (seqs[i].len > max_len)
			max_len = seqs[i].len;
	if (opt->fnr > 0.0)
		local_opt.max_diff = bwa_cal_maxdiff(max_len, BWA_AVG_ERR, opt->fnr);
	if (local_opt.max_diff < local_opt.max_gapo)
		local_opt.max_gapo = local_opt.max_diff;
	stack = gap_init_stack(local_opt.max_diff, local_opt.max_gapo,
			local_opt.max_gape, &local_opt);

	seed_w[0] = (bwt_width_t*) calloc(opt->seed_len + 1, sizeof(bwt_width_t));
	seed_w[1] = (bwt_width_t*) calloc(opt->seed_len + 1, sizeof(bwt_width_t));
	w[0] = w[1] = 0;
	for (i = 0; i != n_seqs; ++i) {
		bwa_seq_t *p = seqs + i;
#ifdef HAVE_PTHREAD
		if (opt->n_threads > 1) {
			pthread_mutex_lock(&g_seq_lock);
			if (p->tid < 0) { // unassigned
				int j;
				for (j = i; j < n_seqs && j < i + THREAD_BLOCK_SIZE; ++j)
					seqs[j].tid = tid;
			} else if (p->tid != tid) {
				pthread_mutex_unlock(&g_seq_lock);
				continue;
			}
			pthread_mutex_unlock(&g_seq_lock);
		}
#endif
		p->sa = 0;
		p->type = BWA_TYPE_NO_MATCH;
		p->c1 = p->c2 = 0;
		p->n_aln = 0;
		p->aln = 0;
		seq[0] = p->seq;
		seq[1] = p->rseq;
		if (max_l < p->len) {
			max_l = p->len;
			w[0] = (bwt_width_t*) realloc(w[0], (max_l + 1)
					* sizeof(bwt_width_t));
			w[1] = (bwt_width_t*) realloc(w[1], (max_l + 1)
					* sizeof(bwt_width_t));
			memset(w[0], 0, (max_l + 1) * sizeof(bwt_width_t));
			memset(w[1], 0, (max_l + 1) * sizeof(bwt_width_t));
		}
		bwt_cal_width(bwt[0], p->len, seq[0], w[0]);
		bwt_cal_width(bwt[1], p->len, seq[1], w[1]);
		if (opt->fnr > 0.0)
			local_opt.max_diff = bwa_cal_maxdiff(p->len, BWA_AVG_ERR, opt->fnr);
		local_opt.seed_len = opt->seed_len < p->len ? opt->seed_len
				: 0x7fffffff;
		if (p->len > opt->seed_len) {
			bwt_cal_width(bwt[0], opt->seed_len, seq[0] + (p->len
					- opt->seed_len), seed_w[0]);
			bwt_cal_width(bwt[1], opt->seed_len, seq[1] + (p->len
					- opt->seed_len), seed_w[1]);
		}
		// core function
		p->aln = bwt_match_gap(bwt, p->len, seq, w, p->len <= opt->seed_len ? 0
				: seed_w, &local_opt, &p->n_aln, stack);
		// store the alignment
		free(p->name);
		free(p->seq);
		free(p->rseq);
		free(p->qual);
		p->name = 0;
		p->seq = p->rseq = p->qual = 0;
	}
	free(seed_w[0]);
	free(seed_w[1]);
	free(w[0]);
	free(w[1]);
	gap_destroy_stack(stack);
}
示例#3
0
void console(int sockfd)
{
	char buffer[BUFF_SIZE];
	char *recipient, *msg, *tmp;

	memset(buffer, 0, sizeof buffer);
	printf("%s\n%s\n", "Welcome to chat client console. Please enter commands",
		"syntax: [command] [optional recipient] [optional msg]");

	/*
	* Issue the prompt and wait for command,
	* process the command and
	* repeat forever
	*/
	while(1) {
		/* console prompt */
		printf("[%s]$ ", username);
		fgets(buffer, sizeof buffer, stdin);
		/* fgets also reads the \n from stdin, strip it */
		buffer[strlen(buffer) - 1] = '\0';

		if(strcmp(buffer, "") == 0)
			continue;

		if(strncmp(buffer, "exit", 4) == 0) {
			/* tell server to clean up structures for the client */
			write(sockfd, "exit", 6);
			/* clean up self and exit */
			pthread_mutex_destroy(&console_cv_lock);
			pthread_cond_destroy(&console_cv);
			_exit(EXIT_SUCCESS);
		}

		/*
		* `ls` is sent to server to get list of connected users.
		* It is written to server's socket, then using conditional wait,
		* we `wait` until the reply arrives in the receiver thread, where
		* `signal` is done immediately when the reply is read
		*/
		if(strncmp(buffer, "ls", 2) == 0) {
			/*
			* The mutex the protects the conditional has to
			* be locked before a conditional wait.
			*/
			pthread_mutex_lock(&console_cv_lock);
			write(sockfd, "ls", 2);
			/* not protected from spurious wakeups */
			/*
			* This operation unlocks the given mutex and waits until a 
			* pthread_cond_signal() happens on the same conditonal variable.
			* Then the given mutex is again unlocked
			*/
			pthread_cond_wait(&console_cv, &console_cv_lock);
			/* release the mutex */
			pthread_mutex_unlock(&console_cv_lock);
			continue;
		}

		/* `send <recipient> <msg>` sends <msg> to the given <username> */
		if(strncmp(buffer, "send ", 5) == 0) {
			/* the following is to validate the syntax */
			tmp = strchr(buffer, ' ');
			if(tmp == NULL) {
				error();
				continue;
			}
			recipient = tmp + 1;

			tmp = strchr(recipient, ' ');
			if(tmp == NULL) {
				error();
				continue;
			}
			msg = tmp + 1;

			/* issue the `send` command to server */
			write(sockfd, buffer, 5 + strlen(recipient) + 1 + strlen(msg) + 1);
			continue;
		}

		error();
	}
}
示例#4
0
int PayloadSearchManager<WriterType>::run() {
	// create HBFQueryProcessor thread
	_hbfRunning = true;
	if (pthread_create(&_hbfQueryThread,
					   NULL,
					   hbfQueryThread,
					   this) != 0)
	{
		_error = true;
		_errorMsg.assign("Unable ot create HBF query thread");
		return 1;
	}

	// prepare the correlator
	HBFHTTPCorrelator
		<EnumeratedFileReader
			<FlatFileReader
				<HTTPRequest>,
			 StrftimeReadEnumerator
			>,
		 EnumeratedFileReader
			<FlatFileReader
				<HTTPResponse>,
			 StrftimeReadEnumerator
			>,
		 WriterType
		> correlator;

	// prepare a StrftimeReadEnumerator
	boost::shared_ptr<StrftimeReadEnumerator> request_enum
												(new StrftimeReadEnumerator());
	boost::shared_ptr<StrftimeReadEnumerator> response_enum
												(new StrftimeReadEnumerator());

	std::set <HBFResult> *curResults;

	while (true) {
		// lock results lock
		if (pthread_mutex_lock(&_resultsLock) != 0) {
			// error
			_error = true;
			_errorMsg.assign("Unable to lock _resultsLock [1st in loop]");
			if (pthread_join(_hbfQueryThread, NULL) != 0) {
				// something is terribly wrong
				_errorMsg.append(", Unable to join _hbfQueryThread");
			}
			return 1;
		}
		while (_results.empty()) {
			if (!_hbfRunning) {
				break;
			}
			// wait for condition signal
			if (pthread_cond_wait(&_resultsCondition, &_resultsLock) != 0) {
				_error = true;
				_errorMsg.assign("Error waiting for _resultsCondition");
				if (pthread_join(_hbfQueryThread, NULL) != 0) {
					// something is terribly wrong
					_errorMsg.append(", Unable to join _hbfQueryThread");
				}
				return 1;
			}
		}
		if (!_hbfRunning) {
			// unlock results lock
			if (pthread_mutex_unlock(&_resultsLock) != 0) {
				// error
				_error = true;
				_errorMsg.assign("Unable to unlock _resultsLock");
				if (pthread_join(_hbfQueryThread, NULL) != 0) {
					// something is terribly wrong
					_errorMsg.append(", Unable to join _hbfQueryThread");
				}
				return 1;
			}

			break;
		}
		// pop result
		curResults = _results.front();
		_results.pop();

		// unlock results lock
		if (pthread_mutex_unlock(&_resultsLock) != 0) {
			// error
			_error = true;
			_errorMsg.assign("Unable to unlock _resultsLock");
			if (pthread_join(_hbfQueryThread, NULL) != 0) {
				// something is terribly wrong
				_errorMsg.append(", Unable to join _hbfQueryThread");
			}
			return 1;
		}

		// only run the correlator if there are results in the set
		if (curResults->empty()) {
			// free the empty set
			delete curResults;
			continue;
		}

		// initialize read enumerator using a result from the set
		request_enum->init(_inputDir,
					   "%Y/%m/%d/http_request_%H",
					   curResults->begin()->startTime(),
					   curResults->begin()->startTime());
		if (!(*request_enum)) {
			// error
			_error = true;
			_errorMsg.assign("Unable to initialize read enumerator");
			if (pthread_join(_hbfQueryThread, NULL) != 0) {
				// something is terribly wrong
				_errorMsg.append(", Unable to join _hbfQueryThread");
			}
			return 1;
		}

		response_enum->init(_inputDir,
					   "%Y/%m/%d/http_response_%H",
					   curResults->begin()->startTime(),
					   curResults->begin()->startTime());
		if (!(*response_enum)) {
			// error
			_error = true;
			_errorMsg.assign("Unable to initialize read enumerator");
			if (pthread_join(_hbfQueryThread, NULL) != 0) {
				// something is terribly wrong
				_errorMsg.append(", Unable to join _hbfQueryThread");
			}
			return 1;
		}

		// initialize EnumeratedFileReader of FlatFileReaders
		EnumeratedFileReader
			<FlatFileReader
				<HTTPRequest>,
			 StrftimeReadEnumerator
			> request_reader;
		if (request_reader.init(request_enum) != E_SUCCESS) {
			_error = true;
			_errorMsg.assign("Error initializing EnumeratedFileReader.");
			if (pthread_join(_hbfQueryThread, NULL) != 0) {
				// something is terribly wrong
				_errorMsg.append(", Unable to join _hbfQueryThread");
			}
			return 1;
		}

		EnumeratedFileReader
			<FlatFileReader
				<HTTPResponse>,
			 StrftimeReadEnumerator
			> response_reader;
		if (response_reader.init(response_enum) != E_SUCCESS) {
			_error = true;
			_errorMsg.assign("Error initializing EnumeratedFileReader.");
			if (pthread_join(_hbfQueryThread, NULL) != 0) {
				// something is terribly wrong
				_errorMsg.append(", Unable to join _hbfQueryThread");
			}
			return 1;
		}

		// reinit correlator with EnumeratedFileReader and Writer
		correlator.init(&request_reader, &response_reader, _writer, curResults);

		// correlator.run()
		if (correlator.run() != 0) {
			_error = true;
			_errorMsg.assign(correlator.error());
			if (pthread_join(_hbfQueryThread, NULL) != 0) {
				// something is terribly wrong
				_errorMsg.append(", Unable to join _hbfQueryThread");
			}
			return 1;
		}

		// free the processed set
		delete curResults;
	}

	// join query processor thread
	if (pthread_join(_hbfQueryThread, NULL) != 0) {
		_error = true;
		_errorMsg.assign("Unable to join _hbfQueryThread");
		return 1;
	}

	return _error?1:0;
}
示例#5
0
void thread_end()
{
	pthread_mutex_lock(&mutex);
	thread_running = 0;
	pthread_mutex_unlock(&mutex);
}
示例#6
0
int
pthread_setcanceltype (int type, int *oldtype)
/*
 * ------------------------------------------------------
 * DOCPUBLIC
 *      This function atomically sets the calling thread's
 *      cancelability type to 'type' and returns the previous
 *      cancelability type at the location referenced by
 *      'oldtype'
 *
 * PARAMETERS
 *      type,
 *      oldtype
 *              PTHREAD_CANCEL_DEFERRED
 *                      only deferred cancelation is allowed,
 *
 *              PTHREAD_CANCEL_ASYNCHRONOUS
 *                      Asynchronous cancellation is allowed
 *
 *
 * DESCRIPTION
 *      This function atomically sets the calling thread's
 *      cancelability type to 'type' and returns the previous
 *      cancelability type at the location referenced by
 *      'oldtype'
 *
 *      NOTES:
 *      1)      Use with caution; most code is not safe for use
 *              with asynchronous cancelability.
 *
 * COMPATIBILITY ADDITIONS
 *      If 'oldtype' is NULL then the previous type is not returned
 *      but the function still succeeds. (Solaris)
 *
 * RESULTS
 *              0               successfully set cancelability type,
 *              EINVAL          'type' is invalid
 *
 * ------------------------------------------------------
 */
{
    int result = 0;
    pthread_t self = pthread_self ();

    if (self == NULL
            || (type != PTHREAD_CANCEL_DEFERRED
                && type != PTHREAD_CANCEL_ASYNCHRONOUS))
    {
        return EINVAL;
    }

    /*
     * Lock for async-cancel safety.
     */
    (void) pthread_mutex_lock (&self->cancelLock);

    if (oldtype != NULL)
    {
        *oldtype = self->cancelType;
    }

    self->cancelType = type;

    /*
     * Check if there is a pending asynchronous cancel
     */
    if (self->cancelState == PTHREAD_CANCEL_ENABLE
            && type == PTHREAD_CANCEL_ASYNCHRONOUS
            && WaitForSingleObject (self->cancelEvent, 0) == WAIT_OBJECT_0)
    {
        self->state = PThreadStateCanceling;
        self->cancelState = PTHREAD_CANCEL_DISABLE;
        ResetEvent (self->cancelEvent);
        (void) pthread_mutex_unlock (&self->cancelLock);
        ptw32_throw (PTW32_EPS_CANCEL);

        /* Never reached */
    }

    (void) pthread_mutex_unlock (&self->cancelLock);

    return (result);

}				/* pthread_setcanceltype */
WORD32 ithread_mutex_lock(void *mutex)
{
    return pthread_mutex_lock((pthread_mutex_t *)mutex);
}
示例#8
0
void EventHandler::signal(Event *event) {
	pthread_mutex_lock(&d->mutex);
	// event->setSignaled();
	pthread_cond_signal(&d->event);
	pthread_mutex_unlock(&d->mutex);
}
示例#9
0
static inline void *__gnix_generic_register(
		struct gnix_fid_domain *domain,
		struct gnix_fid_mem_desc *md,
		void *address,
		size_t length,
		gni_cq_handle_t dst_cq_hndl,
		int flags,
		int vmdh_index)
{
	struct gnix_nic *nic;
	gni_return_t grc = GNI_RC_SUCCESS;
	int rc;

	pthread_mutex_lock(&gnix_nic_list_lock);

	/* If the nic list is empty, create a nic */
	if (unlikely((dlist_empty(&gnix_nic_list_ptag[domain->ptag])))) {
		/* release the lock because we are not checking the list after
			this point. Additionally, gnix_nic_alloc takes the 
			lock to add the nic. */
		pthread_mutex_unlock(&gnix_nic_list_lock);

		rc = gnix_nic_alloc(domain, NULL, &nic);
		if (rc) {
			GNIX_INFO(FI_LOG_MR,
				  "could not allocate nic to do mr_reg,"
				  " ret=%i\n", rc);
			return NULL;
		}
	} else {
		nic = dlist_first_entry(&gnix_nic_list_ptag[domain->ptag], 
			struct gnix_nic, ptag_nic_list);
		if (unlikely(nic == NULL)) {
			GNIX_ERR(FI_LOG_MR, "Failed to find nic on "
				"ptag list\n");
			pthread_mutex_unlock(&gnix_nic_list_lock);
			return NULL;
		}
		_gnix_ref_get(nic);	
		pthread_mutex_unlock(&gnix_nic_list_lock);
        }

	COND_ACQUIRE(nic->requires_lock, &nic->lock);
	grc = GNI_MemRegister(nic->gni_nic_hndl, (uint64_t) address,
				  length,	dst_cq_hndl, flags,
				  vmdh_index, &md->mem_hndl);
	COND_RELEASE(nic->requires_lock, &nic->lock);

	if (unlikely(grc != GNI_RC_SUCCESS)) {
		GNIX_INFO(FI_LOG_MR, "failed to register memory with uGNI, "
			  "ret=%s\n", gni_err_str[grc]);
		_gnix_ref_put(nic);

		return NULL;
	}

	/* set up the mem desc */
	md->nic = nic;
	md->domain = domain;

	/* take references on domain */
	_gnix_ref_get(md->domain);

	return md;
}
示例#10
0
void* viagem(void *arg){
	int tipoViag = (int) arg;
	time_t mytime;
	time_t mytime2;
	int nrComboio;
	FILE *ficheiro;
	
	ficheiro= fopen("output.txt", "a");
	
	
	

	if(tipoViag==1){
		pthread_mutex_lock(&muxNrComboio);
		strt->numeroComboio++;
		nrComboio=strt->numeroComboio;
		pthread_mutex_unlock(&muxNrComboio);
		pthread_mutex_lock(&muxB_C);
		mytime=horaAtual();
		strt->c++;
		fprintf(ficheiro,"\n\nComboio nº %d.   Origem: Cidade C.   Destino: Cidade A.   Linha: cidadeC-cidadeB\n", nrComboio); 
		sleep(3);
		pthread_mutex_unlock(&muxB_C);
		pthread_mutex_lock(&muxA_B);
		fprintf(ficheiro,"\nComboio nº %d.   Origem: Cidade C.   Destino: Cidade A.   Linha: cidadeB-cidadeA\n", nrComboio); 
		strt->b++;
		sleep(10);
		strt->a++;
		pthread_mutex_unlock(&muxA_B);
		fprintf(ficheiro,"\nComboio nº %d chegou à cidade A.\nData de saída: ", nrComboio);
		fprintf(ficheiro, ctime(&mytime));
		fprintf(ficheiro,"Data de chegada: ");
		mytime2=horaAtual(); 
		fprintf(ficheiro,ctime(&mytime2));
	}
	
	if(tipoViag==2){
		pthread_mutex_lock(&muxNrComboio);
		strt->numeroComboio++;
		nrComboio=strt->numeroComboio;
		pthread_mutex_unlock(&muxNrComboio); 
		pthread_mutex_lock(&muxA_B);
		mytime=horaAtual();
		strt->a++;
		fprintf(ficheiro,"\n\nComboio nº %d.   Origem: Cidade A.   Destino: Cidade D.   Linha: cidadeA-cidadeB\n", nrComboio); 
		sleep(10);
		pthread_mutex_unlock(&muxA_B);
		pthread_mutex_lock(&muxB_D);
		strt->b++;
		fprintf(ficheiro,"\nComboio nº %d.   Origem: Cidade C.   Destino: Cidade A.   Linha: cidadeB-cidadeA\n", nrComboio); 
		sleep(3);
		strt->d++;
		pthread_mutex_unlock(&muxB_D);
		fprintf(ficheiro,"\nComboio nº %d chegou à cidade D.\nData de saída: ", nrComboio);
		fprintf(ficheiro,ctime(&mytime));
		fprintf(ficheiro,"Data de chegada: ");
		mytime2=horaAtual(); 
		fprintf(ficheiro,ctime(&mytime2));
	}
	fclose(ficheiro);
	
	pthread_exit(NULL);
}
示例#11
0
void *halide_hexagon_host_malloc(size_t size) {
    const int heap_id = system_heap_id;
    const int ion_flags = ion_flag_cached;

    // Hexagon can only access a small number of mappings of these
    // sizes. We reduce the number of mappings required by aligning
    // large allocations to these sizes.
    static const size_t alignments[] = { 0x1000, 0x4000, 0x10000, 0x40000, 0x100000 };
    size_t alignment = alignments[0];

    // Align the size up to the minimum alignment.
    size = (size + alignment - 1) & ~(alignment - 1);

    if (heap_id != system_heap_id) {
        for (size_t i = 0; i < sizeof(alignments) / sizeof(alignments[0]); i++) {
            if (size >= alignments[i]) {
                alignment = alignments[i];
            }
        }
    }

    ion_user_handle_t handle = ion_alloc(ion_fd, size, alignment, 1 << heap_id, ion_flags);
    if (handle < 0) {
        __android_log_print(ANDROID_LOG_ERROR, "halide", "ion_alloc(%d, %d, %d, %d, %d) failed",
                            ion_fd, size, alignment, 1 << heap_id, ion_flags);
        return NULL;
    }

    // Map the ion handle to a file buffer.
    int buf_fd = ion_map(ion_fd, handle);
    if (buf_fd < 0) {
        __android_log_print(ANDROID_LOG_ERROR, "halide", "ion_map(%d, %d) failed", ion_fd, handle);
        ion_free(ion_fd, handle);
        return NULL;
    }

    // Map the file buffer to a pointer.
    void *buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, buf_fd, 0);
    if (buf == MAP_FAILED) {
        __android_log_print(ANDROID_LOG_ERROR, "halide", "mmap(NULL, %d, PROT_READ | PROT_WRITE, MAP_SHARED, %d, 0) failed",
                            size, buf_fd);
        close(buf_fd);
        ion_free(ion_fd, handle);
        return NULL;
    }

    // Register the buffer, so we get zero copy.
    if (remote_register_buf) {
        remote_register_buf(buf, size, buf_fd);
    }

    // Build a record for this allocation.
    allocation_record *rec = (allocation_record *)malloc(sizeof(allocation_record));
    if (!rec) {
        __android_log_print(ANDROID_LOG_ERROR, "halide", "malloc failed");
        munmap(buf, size);
        close(buf_fd);
        ion_free(ion_fd, handle);
        return NULL;
    }

    rec->next = NULL;
    rec->handle = handle;
    rec->buf_fd = buf_fd;
    rec->buf = buf;
    rec->size = size;

    // Insert this record into the list of allocations. Insert it at
    // the front, since it's simpler, and most likely to be freed
    // next.
    pthread_mutex_lock(&allocations_mutex);
    rec->next = allocations.next;
    allocations.next = rec;
    pthread_mutex_unlock(&allocations_mutex);

    return buf;
}
示例#12
0
int
sem_destroy (sem_t * sem)
/*
 * ------------------------------------------------------
 * DOCPUBLIC
 *      This function destroys an unnamed semaphore.
 *
 * PARAMETERS
 *      sem
 *              pointer to an instance of sem_t
 *
 * DESCRIPTION
 *      This function destroys an unnamed semaphore.
 *
 * RESULTS
 *              0               successfully destroyed semaphore,
 *              -1              failed, error in errno
 * ERRNO
 *              EINVAL          'sem' is not a valid semaphore,
 *              ENOSYS          semaphores are not supported,
 *              EBUSY           threads (or processes) are currently
 *                                      blocked on 'sem'
 *
 * ------------------------------------------------------
 */
{
    int result = 0;
    sem_t s = NULL;

    if (sem == NULL || *sem == NULL)
    {
        result = EINVAL;
    }
    else
    {
        s = *sem;

        if ((result = pthread_mutex_lock (&s->lock)) == 0)
        {
            if (s->value < 0)
            {
                (void) pthread_mutex_unlock (&s->lock);
                result = EBUSY;
            }
            else
            {
                /* There are no threads currently blocked on this semaphore. */

                if (!CloseHandle (s->sem))
                {
                    (void) pthread_mutex_unlock (&s->lock);
                    result = EINVAL;
                }
                else
                {
                    /*
                     * Invalidate the semaphore handle when we have the lock.
                     * Other sema operations should test this after acquiring the lock
                     * to check that the sema is still valid, i.e. before performing any
                     * operations. This may only be necessary before the sema op routine
                     * returns so that the routine can return EINVAL - e.g. if setting
                     * s->value to SEM_VALUE_MAX below does force a fall-through.
                     */
                    *sem = NULL;

                    /* Prevent anyone else actually waiting on or posting this sema.
                     */
                    s->value = SEM_VALUE_MAX;

                    (void) pthread_mutex_unlock (&s->lock);

                    do
                    {
                        /* Give other threads a chance to run and exit any sema op
                         * routines. Due to the SEM_VALUE_MAX value, if sem_post or
                         * sem_wait were blocked by us they should fall through.
                         */
                        Sleep(0);
                    }
                    while (pthread_mutex_destroy (&s->lock) == EBUSY);
                }
            }
        }
    }

    if (result != 0)
    {
        errno = result;
        return -1;
    }

    free (s);

    return 0;

}				/* sem_destroy */
示例#13
0
void wi_lock_lock(wi_lock_t *lock) {
    int     err;
    
    if((err = pthread_mutex_lock(&lock->mutex)) != 0)
        WI_ASSERT(false, "pthread_mutex_lock: %s", strerror(err));
}
示例#14
0
static void monitor_update(iterator_t *it, pthread_mutex_t *recv_ready_mutex)
{
	uint32_t total_sent = iterator_get_sent(it);
	if (last_now > 0.0) {
		double age = now() - zsend.start;
		double delta = now() - last_now;
		double remaining_secs = compute_remaining_time(age, total_sent);
		double percent_complete = 100.*age/(age + remaining_secs);


		// ask pcap for fresh values
		pthread_mutex_lock(recv_ready_mutex);
		recv_update_pcap_stats();
		pthread_mutex_unlock(recv_ready_mutex);

		// format times for display
		char time_left[20];
		if (age < 5) {
			time_left[0] = '\0';
		} else {
			char buf[20];
			time_string((int)remaining_secs, 1, buf, sizeof(buf));
			snprintf(time_left, sizeof(time_left), " (%s left)", buf);
		}
		char time_past[20];
		time_string((int)age, 0, time_past, sizeof(time_past));

		char send_rate[20], send_avg[20],
			 recv_rate[20], recv_avg[20],
			 pcap_drop[20], pcap_drop_avg[20];
		// recv stats
		number_string((zrecv.success_unique - last_rcvd)/delta,
						recv_rate, sizeof(recv_rate));
		number_string((zrecv.success_unique/age), recv_avg, sizeof(recv_avg));
		// dropped stats
		number_string((zrecv.pcap_drop + zrecv.pcap_ifdrop - last_drop)/delta,
						pcap_drop, sizeof(pcap_drop));
		number_string(((zrecv.pcap_drop + zrecv.pcap_ifdrop)/age),
						pcap_drop_avg, sizeof(pcap_drop_avg));

		// Warn if we drop > 5% of our average receive rate
		uint32_t drop_rate = (uint32_t)((zrecv.pcap_drop + zrecv.pcap_ifdrop - last_drop) / delta);
		if (drop_rate > (uint32_t)((zrecv.success_unique - last_rcvd) / delta) / 20) {
			log_warn("monitor", "Dropped %d packets in the last second, (%d total dropped (pcap: %d + iface: %d))",
					 drop_rate, zrecv.pcap_drop + zrecv.pcap_ifdrop, zrecv.pcap_drop, zrecv.pcap_ifdrop);
		}

		// Warn if we fail to send > 1% of our average send rate
		uint32_t fail_rate = (uint32_t)((zsend.sendto_failures - last_failures) / delta); // failures/sec
		if (fail_rate > ((total_sent / age) / 100)) {
			log_warn("monitor", "Failed to send %d packets/sec (%d total failures)",
					 fail_rate, zsend.sendto_failures);
		}
		float hits;
		if (!total_sent) {
			hits = 0;
		} else {
			hits = zrecv.success_unique*100./total_sent;
		}
		if (!zsend.complete) {
			// main display (during sending)
			number_string((total_sent - last_sent)/delta,
							send_rate, sizeof(send_rate));
			number_string((total_sent/age), send_avg, sizeof(send_avg));
			fprintf(stderr,
					"%5s %0.0f%%%s; send: %u %sp/s (%sp/s avg); "
					"recv: %u %sp/s (%sp/s avg); "
					"drops: %sp/s (%sp/s avg); "
					"hits: %0.2f%%\n",
					time_past,
					percent_complete,
					time_left,
					total_sent,
					send_rate,
					send_avg,
					zrecv.success_unique,
					recv_rate,
					recv_avg,
					pcap_drop,
					pcap_drop_avg,
					hits);
		} else {
		  	// alternate display (during cooldown)
			number_string((total_sent/(zsend.finish - zsend.start)), send_avg, sizeof(send_avg));
			fprintf(stderr,
					"%5s %0.0f%%%s; send: %u done (%sp/s avg); "
					"recv: %u %sp/s (%sp/s avg); "
					"drops: %sp/s (%sp/s avg); "
					"hits: %0.2f%%\n",
					time_past,
					percent_complete,
					time_left,
					total_sent,
					send_avg,
					zrecv.success_unique,
					recv_rate,
					recv_avg,
					pcap_drop,
					pcap_drop_avg,
					hits);
		}
	}
	last_now  = now();
	last_sent = total_sent;
	last_rcvd = zrecv.success_unique;
	last_drop = zrecv.pcap_drop + zrecv.pcap_ifdrop;
	last_failures = zsend.sendto_failures;
}
示例#15
0
void set_progress(gfloat progress)
{
	pthread_mutex_lock(&mutex);
	search_progress = progress;
	pthread_mutex_unlock(&mutex);
}
示例#16
0
int main(int argc, char **argv) {
    char *hexaeskey = 0, *hexaesiv = 0;
    char *fmtpstr = 0;
    char *arg;
    int i;
    assert(RAND_MAX >= 0x10000);    // XXX move this to compile time
    while ( (arg = *++argv) ) {
        if (!strcasecmp(arg, "iv")) {
            hexaesiv = *++argv;
            argc--;
        } else
        if (!strcasecmp(arg, "key")) {
            hexaeskey = *++argv;
            argc--;
        } else
        if (!strcasecmp(arg, "fmtp")) {
            fmtpstr = *++argv;
        } else
        if (!strcasecmp(arg, "cport")) {
            controlport = atoi(*++argv);
        } else
        if (!strcasecmp(arg, "tport")) {
            timingport = atoi(*++argv);
        } else
        if (!strcasecmp(arg, "dport")) {
            dataport = atoi(*++argv);
        } else
        if (!strcasecmp(arg, "host")) {
            rtphost = *++argv;
        }
#ifdef FANCY_RESAMPLING
        else
        if (!strcasecmp(arg, "resamp")) {
            fancy_resampling = atoi(*++argv);
        }
#endif
    }

    if (!hexaeskey || !hexaesiv)
        die("Must supply AES key and IV!");

    if (hex2bin(aesiv, hexaesiv))
        die("can't understand IV");
    if (hex2bin(aeskey, hexaeskey))
        die("can't understand key");
    AES_set_decrypt_key(aeskey, 128, &aes);

    memset(fmtp, 0, sizeof(fmtp));
    i = 0;
    while ( (arg = strsep(&fmtpstr, " \t")) )
        fmtp[i++] = atoi(arg);

    init_decoder();
    init_buffer();
    init_rtp();      // open a UDP listen port and start a listener; decode into ring buffer
    fflush(stdout);
    init_output();              // resample and output from ring buffer

    char line[128];
    int in_line = 0;
    int n;
    double f;
    while (fgets(line + in_line, sizeof(line) - in_line, stdin)) {
        n = strlen(line);
        if (line[n-1] != '\n') {
            in_line = strlen(line) - 1;
            if (n == sizeof(line)-1)
                in_line = 0;
            continue;
        }
        if (sscanf(line, "vol: %lf\n", &f)) {
            assert(f<=0);
            if (debug)
                fprintf(stderr, "VOL: %lf\n", f);
            volume = pow(10.0,0.1*f);
            fix_volume = 65536.0 * volume;
            continue;
        }
        if (!strcmp(line, "exit\n")) {
            exit(0);
        }
        if (!strcmp(line, "flush\n")) {
            pthread_mutex_lock(&ab_mutex);
            ab_resync();
            pthread_mutex_unlock(&ab_mutex);
            if (debug)
                fprintf(stderr, "FLUSH\n");
        }
    }
    fprintf(stderr, "bye!\n");
    fflush(stderr);

    uninit_output();

    return EXIT_SUCCESS;
}
示例#17
0
int main(int ac, char *av[])
{
	//initialize a lot of struct and data points needed
    struct addrinfo *in, *pin;
    struct addrinfo start;
    int sockets[10], socket_num = 0;	
	allocatedMemory[0] = 0;
	myStruct *arg;
	pthread_t dthread;
	pthread_mutex_t *mutex;
	//clear the struct data values
	memset(&start, 0, sizeof start);//clear the object and then check for the right number of arguments passed
	
    if (ac < 3) 
	{
		printf("Usage: %s -p <port>\n", av[0]), exit(0);
	}
	if (ac == 4)
	{
		printf("Usage: %s -p <port> -R <path>\n", av[0]), exit(0);
	}
	else if (ac == 5) 
	{
		path = av[4];
	}
	//set the flags for the correct type of server
    start.ai_flags = AI_PASSIVE | AI_NUMERICSERV | AI_ADDRCONFIG;
    start.ai_protocol = IPPROTO_TCP; // only interested in TCP
    start.ai_family = AF_INET6;
	
	if (strcmp(av[1],"-p"))
		return -1;
	//gets the port number
    char *nport = av[2];

    int gai = getaddrinfo(NULL, nport, &start, &in);
	
    if (gai != 0)
	{
        gai_strerror(gai);
		exit(-1);
	}

    char printed_addr[1024];
    for (pin = in; pin; pin = pin->ai_next) {
        assert (pin->ai_protocol == IPPROTO_TCP);
        int gai = getnameinfo(pin->ai_addr, pin->ai_addrlen,
                             printed_addr, sizeof printed_addr, NULL, 0,
                             NI_NUMERICHOST);
		
        if (gai != 0)
            gai_strerror(gai), exit(-1);

        printf("%s: %s\n", pin->ai_family == AF_INET ? "AF_INET" :
                           pin->ai_family == AF_INET6 ? "AF_INET6" : "?", 
                           printed_addr);

        int s = socket(pin->ai_family, pin->ai_socktype, pin->ai_protocol);
        if (s == -1)
            perror("socket"), exit(-1);

        int opt = 1;
        setsockopt (s, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof (opt));

        gai = bind(s, pin->ai_addr, pin->ai_addrlen);
        if (gai == -1 && errno == EADDRINUSE) {
            // ignore Linux limitation
            close(s);
            continue;
        }

        if (gai == -1)
            perror("bind"), exit(-1);
		//begin listening on the socket
        gai = listen(s, 10);
        if (gai == -1)
            perror("listen"), exit(-1);

        assert(socket_num < sizeof(sockets)/sizeof(sockets[0]));
        sockets[socket_num++] = s;
    }
	freeaddrinfo(in);
    assert(socket_num == 1);
	//initialize mutex before accepting
    struct sockaddr_storage rem;
    socklen_t remlen = sizeof (rem);
	
	mutex = malloc(sizeof(pthread_mutex_t));
	pthread_mutex_init(mutex, NULL);
	while(1){
		//accept a connection
		arg = malloc(sizeof(myStruct));
		arg->mutex = mutex;
		//accept the connection
        arg->s = accept (sockets[0], (struct sockaddr *) &rem, &remlen);
		
		//if the connection works
        if (arg->s == -1)
		{
			pthread_mutex_lock(mutex);
            perror ("accept");
			pthread_mutex_unlock(mutex);
			free(arg);
			pthread_mutex_destroy(mutex);
			exit(-1);			
		}
		pthread_mutex_lock(mutex);

        char buffer[200];

		//get information about the connection
        if (getnameinfo ((struct sockaddr *) &rem, remlen, buffer, sizeof (buffer), NULL, 0, 0))
            strcpy (buffer, "???");   // hostname unknown

        char buf2[100];
        (void) getnameinfo ((struct sockaddr *) &rem, remlen, 
                buf2, sizeof (buf2), NULL, 0, NI_NUMERICHOST);
        printf ("connection from %s (%s)\n", buffer, buf2);
		
		
		//create the thread and call the echo functions
		//makes sure to lock and unlock and set the stack size
		pthread_mutex_unlock(mutex);
		arg->attribute = malloc(sizeof(pthread_attr_t));
		pthread_attr_init(arg->attribute);
		pthread_attr_setstacksize(arg->attribute, PTHREAD_STACK_MIN+8192);
		pthread_attr_setdetachstate(arg->attribute, PTHREAD_CREATE_DETACHED);
		pthread_create(&dthread, arg->attribute, echo, arg);
		
    }

    return 0;
}