extern int 
main (int argc, char * argv[])
{
    int     rtnval;

	#ifdef VERBOSE
        printf ("Verbose output\n");
    #endif

    // Create a shared memory.
    shm_addr = my_shm_create (shm_name, SHM_SIZE);
    printf ("\nShared memory name '%s' is created. \n\n", shm_name);
    set_turn(0);

    wait_for_turn(0);
	printf ("1\n");
	set_turn(1);

	wait_for_turn(0);
	printf ("3\n");
	set_turn(1);

	wait_for_turn(0);
	printf ("5\n");
	set_turn(1);

	wait_for_turn(0);

	// Close the shared memory
    printf ("\nCalling close(%#x)", shm_fd);
    rtnval = close (shm_fd);
    if (rtnval != 0)
    {
        perror ("ERROR: close() failed");
    }
    printf (" -> returned %d\n", rtnval);
    shm_fd = -1;

    // Unlink the shared memory so it can be removed (reference counting)
    printf ("Calling shm_unlink('%s')", shm_name);
    rtnval = shm_unlink (shm_name);
    if (rtnval != 0)
    {
        perror ("ERROR: shm_unlink() failed");
    }
    printf (" -> returned %d\n", rtnval);

    // Close the program
    printf ("\nBye bye. Hope to see you soon.\n");
    return 0;	
}
示例#2
0
void resource_pool_t::reserve(int n)
{
  
    /* Error checking. If 'n' is larger than the pool capacity, we will
       never be able to satisfy the request. */
    TASSERT(n <= _capacity);
  
    TRACE(TRACE_RESOURCE_POOL & TRACE_ALWAYS, "%s was %d:%d:%d\n",
          _name.data(),
          _capacity,
          _reserved,
          _non_idle);
  
    /* Checks:
     
       - If there are other threads waiting, add ourselves to the queue
       of waiters so we can maintain FIFO ordering.

       - If there are no waiting threads, but the number of unreserved
       threads is too small, add ourselves to the queue of waiters. */
    int num_unreserved = _capacity - _reserved;
    if (!static_list_is_empty(&_waiters) || (num_unreserved < n)) {

        wait_for_turn(n);
    
        /* If we are here, we have been granted the resources. The thread
           which gave them to us has already updated the pool's state. */
        TRACE(TRACE_RESOURCE_POOL & TRACE_ALWAYS, "%s after_woken %d:%d:%d\n",
              _name.data(),
              _capacity,
              _reserved,
              _non_idle);

        return;
    }


    /* If we are here, we did not wait. We are responsible for updating
       the state of the rpaphore before we exit. */
    _reserved += n;
  
    TRACE(TRACE_RESOURCE_POOL & TRACE_ALWAYS, "%s didnt_sleep %d:%d:%d\n",
          _name.data(),
          _capacity,
          _reserved,
          _non_idle);
}
示例#3
0
文件: router.c 项目: amohtasham/rstm
/* =============================================================================
 * router_solve
 * =============================================================================
 */
void
router_solve (void* argPtr)
{
  TM_THREAD_ENTER();

  long threadId = thread_getId();

  router_solve_arg_t* routerArgPtr = (router_solve_arg_t*)argPtr;
  router_t* routerPtr = routerArgPtr->routerPtr;
  maze_t* mazePtr = routerArgPtr->mazePtr;  
  long* numPathArray = routerArgPtr->numPathArray;
  vector_t* myPathVectorPtr = PVECTOR_ALLOC(1);
  assert(myPathVectorPtr);

  queue_t* workQueuePtr = mazePtr->workQueuePtr;
  grid_t* gridPtr = mazePtr->gridPtr;
  grid_t* myGridPtr =
    PGRID_ALLOC(gridPtr->width, gridPtr->height, gridPtr->depth);
  assert(myGridPtr);
  long bendCost = routerPtr->bendCost;
  queue_t* myExpansionQueuePtr = PQUEUE_ALLOC(-1);

  long numPath = 0;
  /*
   * Iterate over work list to route each path. This involves an
   * 'expansion' and 'traceback' phase for each source/destination pair.
   */
  while ((global_timedExecution && !global_isTerminated) || (!global_timedExecution)) {
  //while (1) {
    wait_for_turn(threadId);
    if (global_timedExecution && global_isTerminated)
        break;

    ulong_t beginTime;
    pair_t* coordinatePairPtr;
    TM_BEGIN();
    beginTime = get_thread_time();
    if (TMQUEUE_ISEMPTY(workQueuePtr)) {
        if (TMQUEUE_ISEMPTY(workQueuePtr))
            coordinatePairPtr = NULL;
    } else {
      coordinatePairPtr = (pair_t*)TMQUEUE_POP(workQueuePtr);
    }
    TM_END();
    //add_throughput(threadId , get_thread_time() - beginTime);
    if (coordinatePairPtr == NULL) {
      break;
    }

    coordinate_t* srcPtr = (coordinate_t*)coordinatePairPtr->firstPtr;
    coordinate_t* dstPtr = (coordinate_t*)coordinatePairPtr->secondPtr;

    bool_t success = FALSE;
    vector_t* pointVectorPtr = NULL;

    TM_BEGIN();
    beginTime = get_thread_time();
    grid_copy(myGridPtr, gridPtr); /* ok if not most up-to-date */
    if (PdoExpansion(routerPtr, myGridPtr, myExpansionQueuePtr,
                     srcPtr, dstPtr)) {
      pointVectorPtr = PdoTraceback(gridPtr, myGridPtr, dstPtr, bendCost);
      /*
       * TODO: fix memory leak
       *
       * pointVectorPtr will be a memory leak if we abort this transaction
       */
      if (pointVectorPtr) {
        TMGRID_ADDPATH(gridPtr, pointVectorPtr);
        TM_LOCAL_WRITE_L(success, TRUE);
      }
    }
    TM_END();
    add_throughput(threadId , get_thread_time() - beginTime);

    numPath++;
    if (success) {
      bool_t status = PVECTOR_PUSHBACK(myPathVectorPtr,
                                       (void*)pointVectorPtr);
      assert(status);
    }

  }
  numPathArray[threadId] = numPath;
  /*
   * Add my paths to global list
   */
  list_t* pathVectorListPtr = routerArgPtr->pathVectorListPtr;
  TM_BEGIN();
  TMLIST_INSERT(pathVectorListPtr, (void*)myPathVectorPtr);
  TM_END();

  PGRID_FREE(myGridPtr);
  PQUEUE_FREE(myExpansionQueuePtr);

#if DEBUG
  puts("\nFinal Grid:");
  grid_print(gridPtr);
#endif /* DEBUG */

  TM_THREAD_EXIT();
}