Example #1
0
File: arreglo.c Project: Elitos/ls2
void * hebra(void * input) {
	int tid = input;
	int myVal;
	int ptid = (tid == 0) ? SIZE-1 : tid-1;

	int minTid = (tid == 0) ? tid : ptid
	,maxTid = (tid == 0) ? ptid : tid;

	myVal = a[tid];

	pthread_barrier_wait(barrera);
	while(1) {
		pthread_mutex_lock(&mutexs[minTid]);
		pthread_mutex_lock(&mutexs[maxTid]);
			if(a[ptid] > myVal) {
				a[tid] = a[ptid];
			} else if(a[ptid] == myVal) {
				printf("El mayor es %d\n", myVal);
				exit(0);
			}
		pthread_mutex_unlock(&mutexs[maxTid]);
		pthread_mutex_unlock(&mutexs[minTid]);
	}
	printf("Hebra %d launched a[%d] = %d\n", tid, tid, a[tid]);
}
Example #2
0
void* Girl::drawAnimation(void * arg) {
	Girl * caller = (Girl*) arg;
	while (true) {
		pthread_mutex_lock(&(caller->lock));
		canvas.draw();
		face.draw();
		eyesOpen.draw();
		mouth.draw();
		pthread_mutex_unlock(&(caller->lock));
		Printer::printToScreen();
		usleep(25000);
		pthread_mutex_lock(&(caller->lock));
		attribute.draw();
		pthread_mutex_unlock(&(caller->lock));
		Printer::printToScreen();
		usleep(25000);
		pthread_mutex_lock(&(caller->lock));
		canvas.draw();
		face.draw();
		eyesClosed.draw();
		mouth.draw();
		pthread_mutex_unlock(&(caller->lock));
		Printer::printToScreen();
		usleep(25000);
		pthread_mutex_lock(&(caller->lock));
		attribute.draw();
		pthread_mutex_unlock(&(caller->lock));
		Printer::printToScreen();
		usleep(25000);
	}

}
Example #3
0
void merge_queues(Queue * q1, Queue * q2) 
{
    Queue_element temp;

    // to avoid deadlock, this function acquires a global package
    // lock!
    pthread_mutex_lock(&global_lock);

    // lock entire queues q1, q2
    pthread_mutex_lock(&(q1->lock));
    pthread_mutex_lock(&(q2->lock));

    temp = q2->queue;

    while (temp != 0) {
        nolock_add_to_queue(q1, temp->info, temp->priority);
        temp = temp->next;
    }

    nolock_rewind_queue(q1);

    // release locks on q1, q2
    pthread_mutex_unlock(&(q2->lock));
    pthread_mutex_unlock(&(q1->lock));

    // release global package lock
    pthread_mutex_unlock(&global_lock);

}
Example #4
0
int msi_change_capabilities(MSICall *call, uint8_t capabilities)
{
    if (!call || !call->session) {
        return -1;
    }

    MSISession *session = call->session;

    LOGGER_DEBUG(session->messenger->log, "Session: %p Trying to change capabilities to friend %u", call->session,
                 call->friend_number);

    if (pthread_mutex_trylock(session->mutex) != 0) {
        LOGGER_ERROR(session->messenger->log, "Failed to aquire lock on msi mutex");
        return -1;
    }

    if (call->state != msi_CallActive) {
        LOGGER_ERROR(session->messenger->log, "Call is in invalid state!");
        pthread_mutex_unlock(session->mutex);
        return -1;
    }

    call->self_capabilities = capabilities;

    MSIMessage msg;
    msg_init(&msg, requ_push);

    msg.capabilities.exists = true;
    msg.capabilities.value = capabilities;

    send_message(call->session->messenger, call->friend_number, &msg);

    pthread_mutex_unlock(session->mutex);
    return 0;
}
Example #5
0
void balance_queues(void)
{
	int core_index;
	int count;
	int min_count = RUN_QUEUE_SIZE;
	int max_count = 0;
	int min_index = 0;
	int max_index = 0;

	//Lock Consumer threads, hard coded them for better performance, to avoid branches.
	pthread_mutex_lock(&consumer_mutexes[0]);
	pthread_mutex_lock(&consumer_mutexes[1]);
	pthread_mutex_lock(&consumer_mutexes[2]);
	pthread_mutex_lock(&consumer_mutexes[3]);

	//Go through four cores
	for (core_index = 0; core_index < CORE_NUMBER; ++core_index)
	{
		count = (cpu_queues[core_index].rq0.tail - cpu_queues[core_index].rq0.head) + (cpu_queues[core_index].rq1.tail - cpu_queues[core_index].rq1.head) + (cpu_queues[core_index].rq2.tail - cpu_queues[core_index].rq2.head);
		printf("[Balancer] Core%d has %d processes in its queues\n", core_index, count);	
		if (min_count >= count)
		{
			min_count = count;
			min_index = core_index;
		}
		else if (max_count <= count)
		{
			max_count = count;
			max_index = core_index;
		}
	}

	if (max_count == 0)
	{
		printf("[Balancer] No need to balance this time.\n");
		done_flag = 1;
	}
	else {
		//Check if balance necesery
		if ((max_count - min_count) >= 2)
		{
			/* Move from CPU[max_index] to CPU[min_index] */
			cpu_queues[min_index].rq1.processes[cpu_queues[min_index].rq1.tail] = cpu_queues[max_index].rq1.processes[cpu_queues[max_index].rq1.tail];
			/* Update queues */
			cpu_queues[min_index].rq1.tail++;
			cpu_queues[max_index].rq1.tail--;
		} else {
			printf("[Balancer] No need to balance this time.\n");
		}
	}




	/* Release the mutex */
	pthread_mutex_unlock(&consumer_mutexes[3]);
	pthread_mutex_unlock(&consumer_mutexes[2]);
	pthread_mutex_unlock(&consumer_mutexes[1]);
	pthread_mutex_unlock(&consumer_mutexes[0]);
}
Example #6
0
void
dcache_get_at(const char path[], uint64_t *size, uint64_t *nitems)
{
	/* Initialization to make condition false by default. */
	dcache_data_t size_data, nitems_data;

	pthread_mutex_lock(&dcache_size_mutex);
	if(fsdata_get(dcache_size, path, &size_data, sizeof(size_data)) != 0)
	{
		size_data.value = DCACHE_UNKNOWN;
	}
	pthread_mutex_unlock(&dcache_size_mutex);

	pthread_mutex_lock(&dcache_nitems_mutex);
	if(fsdata_get(dcache_nitems, path, &nitems_data, sizeof(nitems_data)) != 0)
	{
		nitems_data.value = DCACHE_UNKNOWN;
	}
	pthread_mutex_unlock(&dcache_nitems_mutex);

	if(size != NULL)
	{
		*size = size_data.value;
	}
	if(nitems != NULL)
	{
		*nitems = nitems_data.value;
	}
}
Example #7
0
char *strmalloc(char *s) {
   unsigned int hval = hash(s);
   Node *p;
   char *ans;

   pthread_mutex_lock(&lock);
   for (p = hashTable[hval]; p != NULL; p=p->next)
      if (strcmp(s, p->str) == 0) {
         ans = p->str;
         p->refcount++;
	 pthread_mutex_unlock(&lock);
	 return ans;
      }
   /* string not found, must malloc, copy, and insert into bucket */
   p = (Node *)malloc(sizeof(Node));
   if (! p) {
      pthread_mutex_unlock(&lock);
      return NULL;
   }
   p->str = strdup(s);
   if (! p->str) {
      free(p);
      pthread_mutex_unlock(&lock);
      return NULL;
   }
   ans = p->str;
   p->refcount = 1;
   p->next = hashTable[hval];
   hashTable[hval] = p;
   pthread_mutex_unlock(&lock);
   return ans;
}
Example #8
0
//*****************************************************************************************
//增加一个任务到thiz 所指向的线程池对象中, arg 是用户自定义参数;
//成功返回0, 失败返回 -1;
//*****************************************************************************************
int threadpool_add_task(ThreadPool *thiz, threadpool_task_fun_ task_fun, void *arg) {
    int task_num;
    return_val_if_fail(thiz, -1);
 
    pthread_mutex_lock(&thiz->mutex_);
    task_num = thiz->curr_task_num;
 
    //任务过多, 超过了任务队列最大缓存数时, 丢弃任务
    if (task_num > thiz->max_task_num - 2) {
        pthread_mutex_unlock(&thiz->mutex_);
        return -1;
    }
 
    ++task_num;
    thiz->curr_task_num = task_num;
    thiz->task_queue_[thiz->end] = arg;
    thiz->task_fun_[thiz->end] = task_fun;
    if (++thiz->end > thiz->max_task_num) {
        thiz->end = 1;
    }
 
    pthread_cond_signal(&thiz->cond_);
    pthread_mutex_unlock(&thiz->mutex_);
 
    return 0;
}
Example #9
0
int mmap_string_ref(MMAPString * string)
{
  chash * ht;
  int r;
  chashdatum key;
  chashdatum data;
  
  pthread_mutex_lock(&mmapstring_lock);
  if (mmapstring_hashtable == NULL) {
    mmapstring_hashtable_init();
  }
  ht = mmapstring_hashtable;
  
  if (ht == NULL) {
    pthread_mutex_unlock(&mmapstring_lock);
    return -1;
  }
  
  key.data = &string->str;
  key.len = sizeof(string->str);
  data.data = string;
  data.len = 0;
  
  r = chash_set(mmapstring_hashtable, &key, &data, NULL);
  pthread_mutex_unlock(&mmapstring_lock);
  
  if (r < 0)
    return r;

  return 0;
}
Example #10
0
int evict_cache(void)
{
    struct cache *temp = head.next;
    int flag = 0;

    pthread_mutex_lock(&head.c_lock);
    pthread_mutex_lock(&temp->c_lock);

    if (validate(&head, temp)) {
	head.next = temp->next;
	temp->valid = 0;
	pthread_mutex_lock(&s_lock);
	total_cache_size -= strlen(temp->data);
	pthread_mutex_unlock(&s_lock);
	flag = 1;
    }

    pthread_mutex_unlock(&temp->c_lock);
    pthread_mutex_unlock(&head.c_lock);

    if (flag == 1) {
	Free(temp->data);
	Free(temp->uri);
	Free(temp);
    }
    return flag;
}
Example #11
0
//*****************************************************************************************
//执行任务;
//*****************************************************************************************
static void* threadpool_excute_task(void *arg) {
    ThreadPool *thiz = (ThreadPool *) arg;
    return_null_if_fail(thiz);
 
    while (1) {
        int thread_num = 0;
        void *tmp_arg = NULL;
        threadpool_task_fun_ task_fun;
        pthread_mutex_lock(&thiz->mutex_);
        if ((thread_num = thiz->curr_task_num) > 0) {
            tmp_arg = thiz->task_queue_[thiz->start];
            task_fun = thiz->task_fun_[thiz->start];
            if (++thiz->start > thiz->max_task_num) {
                thiz->start = 1;
            }
            --thread_num;
            thiz->curr_task_num = thread_num;
            thread_num = 1; //执行调度函数
        } else {
            pthread_cond_wait(&thiz->cond_, &thiz->mutex_); //等待线程调度
            pthread_mutex_unlock(&thiz->mutex_);
            continue;
        }
        pthread_mutex_unlock(&thiz->mutex_);
        if (thread_num > 0)
            task_fun(tmp_arg);
    }
 
    return (void *) 0;
}
Example #12
0
void* workerThread(void *resources){

  //Take the resource, make sure to access and check if there are enough
  do{
    //wait for access
    pthread_mutex_lock(&leMutex);
    //check status
    if((int)available_resources<(int)resources){
     //release mutex if there are not enough resources
     pthread_mutex_unlock(&leMutex);
    }
    else{
     //if there are enough resources use them
     decrease_count(resources);
     printf("Taking %d resources current count = %d\n",resources,available_resources);
     //release access to the resources
     pthread_mutex_unlock(&leMutex);
     break;
    }
  }while(1);

  //do that random sleep
  sleep(rand()%5);

  //Give the resources back, less strict because we are just returning.
  //wait for access
  pthread_mutex_lock(&leMutex);
  //recieve access to resources
  increase_count(resources);
  printf("Returning %d resources current count = %d\n",resources,available_resources);
  //release access to the resources
  pthread_mutex_unlock(&leMutex);

pthread_exit(NULL);
}
void *blue(void *num_b){
	int number;
	int i;
	number = *(int *)(num_b);
	
	/*perimenoun ola ta ble amaksia gia na boun sthn oura ths gefuras*/
	pthread_mutex_lock(&m_ex_blue);
	
	check_blue++;
	/*elegxoume an exoun bei sthn oura ths gefuras ta ble autokinhta kai ksupname to prwto pou bhke sthn oura*/
	if(check_blue == i_blue) {
		pthread_cond_signal(&cond_blue);
	}
	/*an uparxoun perissotera tou 1 ble autokinhta, vazoume ta ble autokinhta sthn oura ths gefuras*/
	if(i_blue!=1){
		pthread_cond_wait(&cond_blue,&m_ex_blue);
	}
	/*kathe Niosto amaksi stelnei shma gia N-1 autokinhta*/
	if(count_b%N==0){
		count_b++;
		/*shma gia ta N-1 autokinhta*/
		for(i=1; i<N && i<=(i_blue-count_b); i++) {
			pthread_cond_signal(&cond_blue);
		}
	}
	/*an den hmaste sto Niosto autokinhto auksanoume to metrhth*/
	else {
		count_b++;
	}
	pthread_mutex_unlock(&m_ex_blue);
	
	printf("blue %d\n",number);
	
	/*kleidwnoume to mtx gia na uparxei amoivaios apokleismos metaksu twn ble autokinhtwn*/
	pthread_mutex_lock(&mtx);
	/*auksanoume to metrhth twn amaksiwn pou bainoun sth gefura*/
	un_count_b++;
	if(un_count_b%N == 0 || i_blue == un_count_b) {
		printf("count_blue %d\n",un_count_b);
		/*energopoiei ta kokkina an den exoun teleiwsei*/
		if(i_red != un_count_r) {
			pthread_cond_signal(&cond_red);
		}
		/*an exoun teleiwsei ta kokkina*/
		else {
			/*kai exoun teleiwsei kai ta ble energopoiei th main*/
			if(i_blue == un_count_b) {
				pthread_cond_signal(&cond_main);
			}
			/*kai den exoun teleiwsei ta ble energopoiei ta ble*/
			else {
				pthread_cond_signal(&cond_blue);
			}
		}
	}
	/*stamatame ton amoivaio apokleismo afou teleiwse o krisimos kwdikas*/
	pthread_mutex_unlock(&mtx);
	
	return(NULL);
}
/*
 *	Thread function
 *		Each thread on the pool will be running this function since creation
 *		The idea is that each one of them will be waiting for some job to be
 *		added to the queue. When that happens, one of them will acquire the job
 *		and execute it
 */
static void* thread_func(void *args) {
	thread_pool_t* pool =(thread_pool_t*) args;

	while(1) {
		pthread_mutex_lock(&pool->mutex);
		while(pool->queue.length == 0) {
			DEBUG("Wating for jobs...");
			pthread_cond_wait(&pool->has_jobs, &pool->mutex);
		}
		pool->n_threads_working++;
		DEBUG("Got a Job!");

		job_t* job = next_job(&pool->queue);
		if(job == NULL)
			continue;

		pthread_mutex_unlock(&pool->mutex);
		job->func(job->arg);

		pthread_mutex_lock(&pool->mutex);
		pool->n_threads_working--;
		pthread_mutex_unlock(&pool->mutex);
	}

	return NULL;
}
Example #15
0
//------------------------------------------------
// Save a linear histogram "snapshot".
//
void
linear_hist_save_info(linear_hist *h)
{
	pthread_mutex_lock(&h->info_lock);

	if (h->num_buckets > 100) {
		// For now, just don't bother if there's too much to save.
		sprintf(h->info_snapshot, "%u,%u ...", h->num_buckets, h->bucket_width);

		pthread_mutex_unlock(&h->info_lock);
		return;
	}

	// Write num_buckets, the bucket width, and the first bucket's count.
	int i = 0;
	int pos = snprintf(h->info_snapshot, INFO_SNAPSHOT_SIZE, "%u,%u,%u",
			h->num_buckets, h->bucket_width, h->counts[i++]);

	while (pos < INFO_SNAPSHOT_SIZE && i < h->num_buckets) {
		pos += snprintf(h->info_snapshot + pos, INFO_SNAPSHOT_SIZE - pos,
				",%u", h->counts[i++]);
	}

	pthread_mutex_unlock(&h->info_lock);
}
Example #16
0
int
lvm_pool_remove_wait(struct lvm_pool* pool, struct lvm* vm, int seconds) {
    int result = lvm_pool_remove(pool, vm);
    if (result == QS_OK)
        return result;

    if (pthread_mutex_lock(&pool->mutex) == 0) {
        struct timeval now;
        struct timespec timeout;

        result = lvm_pool_remove(pool, vm);
        if(result == QS_OK) {
            pthread_mutex_unlock(&pool->mutex);
            return result;
        }
        gettimeofday(&now,NULL);
        timeout.tv_sec = now.tv_sec + seconds;
        timeout.tv_nsec = now.tv_usec * 1000;
        pool->sleep = 1;
        pthread_cond_timedwait(&pool->cond, &pool->mutex, &timeout);
        pool->sleep = 0;
        pthread_mutex_unlock(&pool->mutex);
    }

    return lvm_pool_remove(pool, vm);
}
Example #17
0
int pkb_get_job(struct pk_job_pile* pkj, struct pk_job* dest)
{
  int i;
  PK_TRACE_FUNCTION;

  pthread_mutex_lock(&(pkj->mutex));
  while (pkj->count == 0)
    pthread_cond_wait(&(pkj->cond), &(pkj->mutex));

  for (i = 0; i < pkj->max; i++) {
    if ((pkj->pile+i)->job != PK_NO_JOB) {
      dest->job = (pkj->pile+i)->job;
      dest->int_data = (pkj->pile+i)->int_data;
      dest->ptr_data = (pkj->pile+i)->ptr_data;
      (pkj->pile+i)->job = PK_NO_JOB;
      (pkj->pile+i)->int_data = 0;
      (pkj->pile+i)->ptr_data = NULL;
      pkj->count -= 1;
      pthread_mutex_unlock(&(pkj->mutex));
      return 1;
    }
  }

  dest->job = PK_NO_JOB;
  dest->int_data = 0;
  dest->ptr_data = NULL;
  pthread_mutex_unlock(&(pkj->mutex));
  PK_CHECK_MEMORY_CANARIES;
  return -1;
}
Example #18
0
/*sends update messages to other servers, excluding the one who send the update message*/
void updateSrvs(char *msg,char *srvId){
	int i;
	int excluded=0;
	if(msg){
		pthread_mutex_lock (&mtx);
		printf("sending updates for serv : %s \n",msg);
		for(i=0;i<savedIpv4;i++){
			if(excluded == 1 || strcmp(servIds_ipv4[i],srvId)!=0){
				printf("sending updates for IPV4 SRV : %s \n",servIds_ipv4[i]);
				sendMsg(msg,socket_ipv4,&ipv4[i],sizeof(skaddr_in));
			}	
			else
				excluded=1;
		}
		pthread_mutex_unlock (&mtx);
	
		//for ipv6 addressed servers
		pthread_mutex_lock(&mtx);
		for(i=0;i<savedIpv6;i++){
			if(excluded == 1 || strcmp(servIds_ipv6[i],srvId)!=0){
				printf("sending updates for IPV6 SRV : %s \n",servIds_ipv6[i]);
				sendMsg(msg,socket_ipv6,&ipv6[i],sizeof(skaddr_in6));
			}
			else
				excluded=1;
		}			
		pthread_mutex_unlock (&mtx);
	}	
}	
Example #19
0
int
dcache_set_at(const char path[], uint64_t size, uint64_t nitems)
{
	int ret = 0;
	const time_t ts = time(NULL);

	if(size != DCACHE_UNKNOWN)
	{
		const dcache_data_t data = { .value = size, .timestamp = ts };

		pthread_mutex_lock(&dcache_size_mutex);
		ret |= fsdata_set(dcache_size, path, &data, sizeof(data));
		pthread_mutex_unlock(&dcache_size_mutex);
	}

	if(nitems != DCACHE_UNKNOWN)
	{
		const dcache_data_t data = { .value = nitems, .timestamp = ts };

		pthread_mutex_lock(&dcache_nitems_mutex);
		ret |= fsdata_set(dcache_nitems, path, &data, sizeof(data));
		pthread_mutex_unlock(&dcache_nitems_mutex);
	}

	return ret;
}
Example #20
0
//deletes serverId and serveraddress there by deleting the server from the communication
int removeServ(char *srvId){
	int indx;
	if(srvId){
		pthread_mutex_lock(&mtx);
		for(indx=0;indx<savedIpv4;indx++){
			if(strcmp(servIds_ipv4[indx],srvId)==0){
				delSrvId((char **)servIds_ipv4,indx,savedIpv4);
				delSrvAddr(ipv4,indx,savedIpv4);
				pthread_mutex_unlock(&mtx);
				savedIpv4--;
				return 0;
			}				
		}
	
		for(indx=0;indx<savedIpv6;indx++){
			if(strcmp(servIds_ipv6[indx],srvId)==0){
				delSrvId((char **)servIds_ipv6,indx,savedIpv6);
				delSrvAddr(ipv6,indx,savedIpv6);
				savedIpv6--;
				pthread_mutex_unlock(&mtx);
				return 0;
			}		
		}
		pthread_mutex_unlock(&mtx);
	}
}
Example #21
0
int msi_hangup(MSICall *call)
{
    if (!call || !call->session) {
        return -1;
    }

    MSISession *session = call->session;

    LOGGER_DEBUG(session->messenger->log, "Session: %p Hanging up call with friend: %u", call->session,
                 call->friend_number);

    if (pthread_mutex_trylock(session->mutex) != 0) {
        LOGGER_ERROR(session->messenger->log, "Failed to aquire lock on msi mutex");
        return -1;
    }

    if (call->state == msi_CallInactive) {
        LOGGER_ERROR(session->messenger->log, "Call is in invalid state!");
        pthread_mutex_unlock(session->mutex);
        return -1;
    }

    MSIMessage msg;
    msg_init(&msg, requ_pop);

    send_message(session->messenger, call->friend_number, &msg);

    kill_call(call);
    pthread_mutex_unlock(session->mutex);
    return 0;
}
Example #22
0
long job_log_size(void)

  {
#if defined(HAVE_STRUCT_STAT64) && defined(HAVE_STAT64) && defined(LARGEFILE_WORKS)

  struct stat64 file_stat;
#else

  struct stat file_stat;
#endif

  memset(&file_stat, 0, sizeof(file_stat));
  pthread_mutex_lock(&job_log_mutex);

#if defined(HAVE_STRUCT_STAT64) && defined(HAVE_STAT64) && defined(LARGEFILE_WORKS)

  if (job_log_opened && (fstat64(fileno(joblogfile), &file_stat) != 0))
#else
  if (job_log_opened && (fstat(fileno(joblogfile), &file_stat) != 0))
#endif
    {
    /* FAILURE */

    log_err(errno, __func__, "PBS cannot fstat joblogfile");
    pthread_mutex_unlock(&job_log_mutex);

    return(0);
    }

  pthread_mutex_unlock(&job_log_mutex);

  return(file_stat.st_size / 1024);
  }
Example #23
0
void on_peer_status(Messenger *m, uint32_t friend_number, uint8_t status, void *data)
{
    (void)m;
    MSISession *session = (MSISession *)data;

    switch (status) {
        case 0: { /* Friend is now offline */
            LOGGER_DEBUG(m->log, "Friend %d is now offline", friend_number);

            pthread_mutex_lock(session->mutex);
            MSICall *call = get_call(session, friend_number);

            if (call == NULL) {
                pthread_mutex_unlock(session->mutex);
                return;
            }

            invoke_callback(call, msi_OnPeerTimeout); /* Failure is ignored */
            kill_call(call);
            pthread_mutex_unlock(session->mutex);
        }
        break;

        default:
            break;
    }
}
Example #24
0
/* record job information of completed job to job log */
int log_job_record(const char *buf)
  {
  struct tm *ptm;
  struct tm tmpPtm;
  time_t now;

  now = time((time_t *)0);
  ptm = localtime_r(&now,&tmpPtm);

  pthread_mutex_lock(&job_log_mutex);

  /* do we need to switch the log to the new day? */
  if (job_log_auto_switch && (ptm->tm_yday != joblog_open_day))
    {
    job_log_close(1);

    job_log_open(NULL, job_log_directory);

    if (job_log_opened < 1)
      {
      log_err(-1, __func__, "job_log_opened < 1");
      pthread_mutex_unlock(&job_log_mutex);
      return(-1);
      }
    }

  fprintf(joblogfile, "%s\n", buf);
  fflush(joblogfile);
  pthread_mutex_unlock(&job_log_mutex);

  return(0);
  }
Example #25
0
static int send_async(struct ploop_copy_handle *h, void *data,
		__u64 size, __u64 pos)
{
	struct sender_data *sd = &h->sd;

	pthread_mutex_lock(&sd->mutex);

	if (sd->ret) {
		ploop_err(sd->err_no, "write error");
		pthread_mutex_unlock(&sd->mutex);
		return sd->ret;
	}

	sd->buf = data;
	sd->len = size;
	sd->pos = pos;

	pthread_cond_signal(&sd->cond);
	pthread_mutex_unlock(&sd->mutex);

	/* wait till sender start processing */
	pthread_cond_wait(&sd->wait_cond, &sd->wait_mutex);

	return 0;
}
// using mutex in this function
int withdraw (struct account *account, int amount)
{
	// debug trace
	printf("%u\t%s\t%f\t%d\n", pthread_self(), account->name, account->balance, amount);
	/*
	 * Most uses of locks avoid global locks and instead associate locks wth
	 * specific instances of data structures. This is called fine-grained locking. 
	 * It can make your locking semantics more complicated, particularly aroud 
	 * deadlock avoidance, but is key in scaling to the number of cores on modern machines
	 *
	 * In this example, instead of defining a global the_mutex lock, we define a mutex inside
	 * of the account structure, giving each account its own lock. This works wellas the data
	 * within the critical region is only the account structure. By locking only the account
	 * being debited, we allow the bank to process other customers' withdraws in parallel.
	 */
	pthread_mutex_lock (&account->mutex);
	const int balance = account->balance;
	if(balance < amount ) {
		pthread_mutex_unlock(&account->mutex);
		return -1;
	}
	account->balance = balance - amount;
	pthread_mutex_unlock (&account->mutex);
	//
	printf("cash out $%d\n", amount);
	//
	return 0;
}
Example #27
0
bool pe_set_event(pe_event_t event) {
    int result;
    pe_event_impl_t *e;

    e = (pe_event_impl_t *)event;

    result = pthread_mutex_lock(&e->m_lock);
    assert(result == 0);
    result = result;

    e->m_signaled = true;

    if (e->m_auto_reset) {
        if (e->m_signaled) {
            result = pthread_mutex_unlock(&e->m_lock);
            assert(result == 0);
            result = result;

            result = pthread_cond_signal(&e->m_condition);
            assert(result == 0);
            result = result;
        }
    } else {
        result = pthread_mutex_unlock(&e->m_lock);
        assert(result == 0);
        result = result;

        result = pthread_cond_broadcast(&e->m_condition);
        assert(result == 0);
        result = result;
    }

    return true;
} /* pe_set_event */
Example #28
0
/*Updates map portion of screen*/
void update_map(screen_t* my_screen){
	car_t* player = &my_screen->vw.player;
	car_t* cpu = my_screen->vw.cpu;
	int i;
	int xblit = cord2px(player->bound_box.x);
	int yblit = cord2px(player->bound_box.y);

	pthread_mutex_lock(&my_screen->sems.pcar_mux);
	blit(my_screen->map.background, screen, xblit -10, yblit-10, xblit-10, yblit-10, cord2px(player->bound_box.w)+20, cord2px(player->bound_box.h)+20);
	for(i = 0; i<CPUCARS; i++){
			if(cpu[i].alive){
				pthread_mutex_lock(&my_screen->sems.cpucar_mux[i]);
				xblit = cord2px(cpu[i].bound_box.x);
				yblit = cord2px(cpu[i].bound_box.y);
				blit(my_screen->map.background, screen, xblit-10, yblit-10, xblit-10, yblit-10, cord2px(cpu[i].bound_box.w)+20, cord2px(cpu[i].bound_box.h)+20);
			}
		}
	for(i=0;i<CPUCARS; i++)
		if(cpu[i].alive){
			rotate_sprite(screen, cpu[i].sprite, carx2px(cpu[i].pos.x), cary2px(cpu[i].pos.y), ftofix(cpu[i].angle*256/360));
			pthread_mutex_unlock(&my_screen->sems.cpucar_mux[i]);
		}
	rotate_sprite(screen, player->sprite, carx2px(player->pos.x), cary2px(player->pos.y), ftofix(player->angle*256/360));
	pthread_mutex_unlock(&my_screen->sems.pcar_mux);
}
Example #29
0
File: chan.c Project: irr/chan
static int buffered_chan_recv(chan_t* chan, void** data)
{
    pthread_mutex_lock(chan->m_mu);
    while (chan->queue->size == 0)
    {
        if (chan->closed)
        {
            pthread_mutex_unlock(chan->m_mu);
            errno = EPIPE;
            return -1;
        }

        // Block until something is added.
        pthread_cond_wait(chan->m_cond, chan->m_mu);
    }

    *data = queue_remove(&chan->queue);

    if (chan->queue->size == chan->queue->capacity - 1)
    {
        // If the buffer was previously full, notify.
        pthread_cond_signal(chan->m_cond);
    }

    pthread_mutex_unlock(chan->m_mu);
    return 0;
}
Example #30
0
  //-----------------------------------------------------------------------
  void ProcessZero::bspLocalSynch(){

    synchedProcecess.inc();
    pthread_mutex_lock(&bspSynchLock);

    if((synchedProcecess.value() == totalNumProcs)){
      synchedProcecess.reset();
      for(int i = 1; i < totalNumProcs; i++)
        stubPool_->bspSynchDone(i);
      pthread_mutex_unlock(&bspSynchLock);
    }
    else{
      pthread_mutex_unlock(&bspSynchLock);
      //cerr << "Waitin' in ProcessZero::bspLocalSynch()" << endl << endl;
      synchDone.wait();
    }
    drmaManager_->processPendingOperations();
    bsmpManager_->processPendingOperations();
    
    {
        ostringstream logStr;
        logStr << "########### END OF SUPERSTEP  " << superstep_.value() << " ##########"; 
        bspLogger.debug( logStr.str() );        
    }
    
    superstep_.inc();
  }