Exemplo n.º 1
0
void fibril_rwlock_write_lock(fibril_rwlock_t *frw)
{
	fibril_t *f = (fibril_t *) fibril_get_id();
	
	if (fibril_get_sercount() != 0)
		abort();

	futex_down(&async_futex);
	if (frw->writers || frw->readers) {
		awaiter_t wdata;

		awaiter_initialize(&wdata);
		wdata.fid = (fid_t) f;
		wdata.wu_event.inlist = true;
		f->flags |= FIBRIL_WRITER;
		list_append(&wdata.wu_event.link, &frw->waiters);
		check_for_deadlock(&frw->oi);
		f->waits_for = &frw->oi;
		fibril_switch(FIBRIL_TO_MANAGER);
	} else {
		frw->oi.owned_by = f;
		frw->writers++;
		futex_up(&async_futex);
	}
}
Exemplo n.º 2
0
// return 0 on success, 1 on failure (extra credit)
static int lock_acquire_helper(lock_t * l)
{
    ASSERT(disable_count);
    if (LOCKED == l->status)
    {
        current_running->waiting_for_lock = l;

        if( check_for_deadlock(l->owner, current_running) )
        {
          current_running->waiting_for_lock = NULL;
          return 1;
        }

        block(&l->wait_queue);
    }
    else
    {
      current_running->waiting_for_lock = NULL;
      l->owner = current_running;
    }

    l->status = LOCKED;

    return 0;
}
Exemplo n.º 3
0
static void sync_wait_and_lock(chpl_sync_aux_t *s,
                               chpl_bool want_full,
                               int32_t lineno, int32_t filename) {
  chpl_bool suspend_using_cond;

  chpl_thread_mutexLock(&s->lock);

  // If we're oversubscribing the hardware, we wait using conditionals
  // in order to ensure fairness and thus progress.  If we're not, we
  // can spin-wait.
  suspend_using_cond = (chpl_thread_getNumThreads() >=
                        chpl_getNumLogicalCpus(true));

  while (s->is_full != want_full) {
    if (!suspend_using_cond) {
      chpl_thread_mutexUnlock(&s->lock);
    }
    if (set_block_loc(lineno, filename)) {
      // all other tasks appear to be blocked
      struct timeval deadline, now;
      chpl_bool timed_out = false;
      // default value so that always allows condition to be true if not
      // using conditionals

      gettimeofday(&deadline, NULL);
      deadline.tv_sec += 1;
      do {
        if (suspend_using_cond)
          timed_out = chpl_thread_sync_suspend(s, &deadline);
        else
          chpl_thread_yield();
        
        if (s->is_full != want_full && !timed_out)
          gettimeofday(&now, NULL);
      } while (s->is_full != want_full
               && !timed_out
               && (now.tv_sec < deadline.tv_sec
                   || (now.tv_sec == deadline.tv_sec
                       && now.tv_usec < deadline.tv_usec)));
      if (s->is_full != want_full)
        check_for_deadlock();
    }
    else {
      do {
        if (suspend_using_cond)
          (void) chpl_thread_sync_suspend(s, NULL);
        else
          chpl_thread_yield();
      } while (s->is_full != want_full);
    }
    unset_block_loc();
    if (!suspend_using_cond)
      chpl_thread_mutexLock(&s->lock);
  }

  if (blockreport)
    progress_cnt++;
}
Exemplo n.º 4
0
int main(int argc, char **argv)
{
  int i;
  int deadlock;
  deadlock = 0;

  srand(time(NULL));

  set_table();

  do {
    /*
     * Let the philosophers do some thinking and eating
     */
    sleep(5);

    /*
     * Check for deadlock (i.e. none of the philosophers are
     * making progress)
     */
    deadlock = 0;

    if (check_for_deadlock()) {
      deadlock = 1;
      break;
    }

    /*
     * Print out the philosophers progress
     */
    print_progress();
  } while (!deadlock);

  stop = 1;
  printf ("Reached deadlock\n");

  /*
   * Release all locks so philosophers can exit
   */
  for (i = 0; i < NUM_CHOPS; i++)
    pthread_mutex_unlock(&chopstick[i]);

  /*
   * Wait for philosophers to finish
   */
  for (i = 0; i < NUM_PHILS; i++)
    pthread_join(diners[i].thread, NULL);

  return 0;
}
Exemplo n.º 5
0
void fibril_mutex_lock(fibril_mutex_t *fm)
{
	fibril_t *f = (fibril_t *) fibril_get_id();

	if (fibril_get_sercount() != 0)
		abort();

	futex_down(&async_futex);
	if (fm->counter-- <= 0) {
		awaiter_t wdata;

		awaiter_initialize(&wdata);
		wdata.fid = fibril_get_id();
		wdata.wu_event.inlist = true;
		list_append(&wdata.wu_event.link, &fm->waiters);
		check_for_deadlock(&fm->oi);
		f->waits_for = &fm->oi;
		fibril_switch(FIBRIL_TO_MANAGER);
	} else {
		fm->oi.owned_by = f;
		futex_up(&async_futex);
	}
}
Exemplo n.º 6
0
//
// When we create a thread it runs this wrapper function, which just
// executes tasks out of the pool as they become available.
//
static void
thread_begin(void* ptask_void) {
  task_pool_p ptask;
  thread_private_data_t *tp;

  tp = (thread_private_data_t*) chpl_mem_alloc(sizeof(thread_private_data_t),
                                               CHPL_RT_MD_THREAD_PRV_DATA,
                                               0, 0);
  chpl_thread_setPrivateData(tp);

  tp->lockRprt = NULL;
  if (blockreport)
    initializeLockReportForThread();

  while (true) {
    //
    // wait for a task to be present in the task pool
    //

    // In revision 22137, we investigated whether it was beneficial to
    // implement this while loop in a hybrid style, where depending on
    // the number of tasks available, idle threads would either yield or
    // wait on a condition variable to waken them.  Through analysis, we
    // realized this could potential create a case where a thread would
    // become stranded, waiting for a condition signal that would never
    // come.  A potential solution to this was to keep a count of threads
    // that were waiting on the signal, but since there was a performance
    // impact from keeping it as a hybrid as opposed to merely yielding,
    // it was decided that we would return to the simple yield case.
    while (!task_pool_head) {
      if (set_block_loc(0, CHPL_FILE_IDX_IDLE_TASK)) {
        // all other tasks appear to be blocked
        struct timeval deadline, now;
        gettimeofday(&deadline, NULL);
        deadline.tv_sec += 1;
        do {
          chpl_thread_yield();
          if (!task_pool_head)
            gettimeofday(&now, NULL);
        } while (!task_pool_head
                 && (now.tv_sec < deadline.tv_sec
                     || (now.tv_sec == deadline.tv_sec
                         && now.tv_usec < deadline.tv_usec)));
        if (!task_pool_head) {
          check_for_deadlock();
        }
      }
      else {
        do {
          chpl_thread_yield();
        } while (!task_pool_head);
      }

      unset_block_loc();
    }
 
    //
    // Just now the pool had at least one task in it.  Lock and see if
    // there's something still there.
    //
    chpl_thread_mutexLock(&threading_lock);
    if (!task_pool_head) {
      chpl_thread_mutexUnlock(&threading_lock);
      continue;
    }

    //
    // We've found a task to run.
    //

    if (blockreport)
      progress_cnt++;

    //
    // start new task; increment running count and remove task from pool
    // also add to task to task-table (structure in ChapelRuntime that keeps
    // track of currently running tasks for task-reports on deadlock or
    // Ctrl+C).
    //
    ptask = task_pool_head;
    idle_thread_cnt--;
    running_task_cnt++;

    dequeue_task(ptask);

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);

    tp->ptask = ptask;

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_set_active(ptask->id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    chpl_task_do_callbacks(chpl_task_cb_event_kind_begin,
                           ptask->filename,
                           ptask->lineno,
                           ptask->id,
                           ptask->is_executeOn);

    (*ptask->fun)(ptask->arg);

    chpl_task_do_callbacks(chpl_task_cb_event_kind_end,
                           ptask->filename,
                           ptask->lineno,
                           ptask->id,
                           ptask->is_executeOn);

    if (do_taskReport) {
      chpl_thread_mutexLock(&taskTable_lock);
      chpldev_taskTable_remove(ptask->id);
      chpl_thread_mutexUnlock(&taskTable_lock);
    }

    tp->ptask = NULL;
    chpl_mem_free(ptask, 0, 0);

    // begin critical section
    chpl_thread_mutexLock(&threading_lock);

    //
    // finished task; decrement running count and increment idle count
    //
    assert(running_task_cnt > 0);
    running_task_cnt--;
    idle_thread_cnt++;

    // end critical section
    chpl_thread_mutexUnlock(&threading_lock);
  }
}
Exemplo n.º 7
0
static PyObject *
schedule_task_block(PyTaskletObject *prev, int stackless)
{
	PyThreadState *ts = PyThreadState_GET();
	PyObject *retval;
	PyTaskletObject *next = NULL;
	PyObject *unlocker_lock;

	if (check_for_deadlock()) {
		/* revive real main if floating */
		if (ts == slp_initial_tstate && ts->st.main->next == NULL) {
			/* emulate old revive_main behavior:
			 * passing a value only if it is an exception
			 */
			if (PyBomb_Check(prev->tempval))
				TASKLET_SETVAL(ts->st.main, prev->tempval);
			return slp_schedule_task(prev, ts->st.main, stackless);
		}
		if (!(retval = make_deadlock_bomb()))
			return NULL;
		TASKLET_SETVAL_OWN(prev, retval);
		return slp_schedule_task(prev, prev, stackless);
	}
#ifdef WITH_THREAD
	if (ts->st.thread.self_lock == NULL) {
		if (!(ts->st.thread.self_lock = new_lock()))
			return NULL;
		acquire_lock(ts->st.thread.self_lock, 1);
		if (!(ts->st.thread.unlock_lock = new_lock()))
			return NULL;
	}

	/* let somebody reactivate us */

	ts->st.thread.is_locked = 1; /* flag as blocked and wait */

	PyEval_SaveThread();
	PR("locker waiting for my lock");
	acquire_lock(ts->st.thread.self_lock, 1);
	PR("HAVE my lock");
	PyEval_RestoreThread(ts);

	if (temp.unlock_target != NULL) {
		next = temp.unlock_target;
		temp.unlock_target = NULL;
	}
	else
		next = prev;

	/*
	 * get in shape. can't do this with schedule here because
	 * hard switching might not get us back, soon enough.
	 */
	if (next->flags.blocked) {
		/* unblock from channel */
		slp_channel_remove_slow(next);
		slp_current_insert(next);
	}
	else if (next->next == NULL) {
		/* reactivate floating task */
		Py_INCREF(next);
		slp_current_insert(next);
	}

	if (temp.other_lock != NULL) {
		PR("releasing unlocker");
		unlocker_lock = temp.other_lock;
		temp.other_lock = NULL;
		release_lock(unlocker_lock);
		Py_DECREF(unlocker_lock);
	}

	ts->st.thread.is_locked = 0;
#else
	(void)unlocker_lock;
	next = prev;
#endif
	/* this must be after releasing the locks because of hard switching */
	retval = slp_schedule_task(prev, next, stackless);
	PR("schedule() is done");
	return retval;
}
Exemplo n.º 8
0
int main(int argc, char **argv)
{
  if(argc < 3)
  {
    printf("Usage : Requires an arguments --Input file having sequence of trains --Probablity with which the system shoud check deadlock condition(percent)\n");
    exit(0);
  }
  int prob_check = atoi(argv[2]);
  FILE *fp;
  if((fp=fopen(argv[1], "r")) == NULL)
  {
    printf("Cannot open file.\n");
  }
  else
  {
    printf("-------START-------\n");
    int N = 0, r, i, j, numtrains_seen = 0, trains_exhausted = 0;
    int semid, semid_jn, semid_file;
    key_t key, key_jn, key_file;
    key = 123;
    key_jn = 1234;
    key_file = 12;
    //Creating Semaphore
    if((semid = semget(key, NUMSMPHRS, IPC_CREAT | 0660))<0)//4 semaphores for 4 tracks
  	{
    	printf("Error Creating Semaphore\n");
    	exit(-1);
  	}
    if((semid_jn = semget(key_jn, 1, IPC_CREAT | 0660))<0)//Semaphore to ensure mutual exclusion of the junction
  	{
    	printf("Error Creating Junction Semaphore\n");
    	exit(-1);
  	}
    if((semid_file = semget(key_file, 1, IPC_CREAT | 0660))<0)//Semaphore for ensuring mutual exclusion of file access
  	{
    	printf("Error Creating Junction Semaphore\n");
    	exit(-1);
  	}
    //Initializing all (sub)semaphores to 0
    for(i = 0; i < NUMSMPHRS; i++)
    {
      semctl(semid, i, SETVAL, 0);
      int retval = semctl(semid, i, GETVAL, 0);
      printf("(Sub)Semaphore #%d: Value = %d\n",i, retval);
    }
    semctl(semid_jn, 0, SETVAL, 0);
    int retval = semctl(semid_jn, 0, GETVAL, 0);
    printf("Junction Semaphore Value = %d\n", retval);
    semctl(semid_file, 0, SETVAL, 0);
    retval = semctl(semid_file, 0, GETVAL, 0);
    printf("File Semaphore Value = %d\n", retval);

    char c;
    char *line = (char*) malloc(MAX_NUM_TRAINS*sizeof(char));//Read from file
    ssize_t bufsize = 0;
    r = getline(&line, &bufsize, fp);
    printf("Input Sequence: '%s'\n", line);
    int num_trains = strlen(line) - 1;
    //Initializing matrix array
    matrix = (int **)malloc(num_trains*sizeof(int *));
    for(i=0;i<num_trains;i++)
    {
      matrix[i] = (int *)malloc(NUMSMPHRS*sizeof(int));
      for(j=0;j<NUMSMPHRS;j++)
      {
        matrix[i][j] = 0;
      }
    }
    write_matrix_file(num_trains, NUMSMPHRS);//Update file
    i = 0;
    srand ( time(NULL) );
    while(i < NUM_ITERATIONS)
    {
      sleep(1);
      int random_num = rand()%100;
      printf("---------Iteration #%d: Random number generated: %d---------\n", i, random_num);
      for(j = 0; j<NUMSMPHRS; j++)
      {
        int retval=semctl(semid, j, GETVAL, 0);
        printf("semaphore[%d] = %d\n", j, retval);
      }
      if(random_num < prob_check || trains_exhausted == 1)
      {
        //Code to check deadlock
        printf("\tChecking for deadlock\n");
        check_for_deadlock();
      }
      else
      {
        //Next train
        if(line[numtrains_seen] != '\n')
        {
          printf("\tCreating next train process\n");
          char parameter1[15], parameter2[15];
          sprintf(parameter1, "%c", line[numtrains_seen]);//Set parameter1 to the direction of the train
          sprintf(parameter2, "%d", numtrains_seen);//Set parameter1 to the direction of the train
          if(fork() == 0)
          {
            //Child process opens train in new xterm window
            int execret = execlp("xterm","xterm","-hold","-e","./train", parameter1,parameter2,(const char*) NULL);
            if(execret <0 ) perror("Error in exec");
              exit(0);
          }
          printf("\t\t--- %c train started\n", line[numtrains_seen]);
          numtrains_seen++;
        }
        else
        {
          printf("\tTrains Exhausted -- All subsequent iterations will only check for deadlock condition\n");
          trains_exhausted = 1;
        }
      }//end else
      i++;
    }//End while
  }//End Else
  fclose(fp);
}