Ejemplo n.º 1
0
/*
 * A signal handler for SIGVTALRM
 * Comes here when a thread runs up its time slot. This handler implements
 * a preemptive thread scheduler. It looks at the global ready queue, pop
 * the thread in the front, save the current thread context and switch context. 
 */
void sigvtalrm_handler(int sig)
{
    /* block the signal */
    sigprocmask(SIG_BLOCK, &vtalrm, NULL);

    /* if no thread in the ready queue, resume execution */
    if (steque_isempty(&ready_queue))
        return;

    /* get the next runnable thread and use preemptive scheduling */
    thread_t* next = (thread_t*) steque_pop(&ready_queue);
    while (next->state == GTTHREAD_CANCEL)
    {
        steque_enqueue(&zombie_queue, next);
        next = (thread_t*) steque_pop(&ready_queue); 
    }
    thread_t* prev = current;
    steque_enqueue(&ready_queue, current);
    next->state = GTTHREAD_RUNNING; 
    current = next;

    /* unblock the signal */
    sigprocmask(SIG_UNBLOCK, &vtalrm, NULL); 
    swapcontext(prev->ucp, current->ucp);
}
Ejemplo n.º 2
0
/* schedules next thread in the currently_running queue*/
void scheduleNextAndSwap(gtthread* current)
{
	gtthread *nextThread, *runnable;
	
	nextThread = (gtthread *) steque_front(&currently_running);
    		
	if(nextThread -> cancel)
	{
		nextThread -> finished = 1;
		steque_pop(&currently_running);
			
		while( !steque_isempty(&nextThread -> joining))
		{
			runnable = (gtthread *) steque_pop(&nextThread -> joining);
			steque_enqueue(&currently_running, runnable);
		}
		nextThread = (gtthread *) steque_front(&currently_running);
	}
	
	/* swapcontext(old, new) */

	if(swapcontext(&current -> uctx, &nextThread -> uctx) == -1)
	{		
		/*printf("swapcontext");*/
		exit(EXIT_FAILURE);
	}
}
Ejemplo n.º 3
0
void alarm_handler(int sig){

  gtthread_blk_t *cur, *next;

  DEBUG_MSG("alarm_handler\n");

  cur = (gtthread_blk_t *)steque_front(&thread_queue);
  if(cur->state == RUNNING){                                       // avoid update state when it's already done
    cur->state = READY;
    steque_cycle(&thread_queue);
  }

  /*if((cur->state == TERMINATED) && (cur->tID == 1)){

    while(!steque_isempty(&thread_queue)){
      steque_pop(&thread_queue);
    }

    return;

  } */

  while((next = (gtthread_blk_t *)steque_front(&thread_queue))){ 
  
    if(next->state == READY){
      next->state = RUNNING;
      DEBUG_MSG("Schedule #%ld thread\n", next->tID);
      break;
    }
    else if(next->state == JOINED){
      steque_pop(&thread_queue);
      DEBUG_MSG("POP #%ld thread\n", next->tID);
    }
    else if(next->state == TERMINATED)
      steque_cycle(&thread_queue);
    else{
      steque_pop(&thread_queue);
      DEBUG_MSG("ERROR! RUNNING thread?\n");
    }
  } 

  DEBUG_MSG("swap: cur thread: # %ld, next thread: # %ld\n", cur->tID, next->tID);
  reset_timer();

  if (swapcontext(&cur->uctx, &next->uctx) != 0)
    DEBUG_MSG("ERROR! Swap failed?\n");
  DEBUG_MSG("GOOD! Swap SUCCESS!\n");

  list_thread();

}
Ejemplo n.º 4
0
Archivo: lru.c Proyecto: ql2723/cs6210
int gtcache_set(char *key, void *value, size_t val_size){
  struct timeval *time;
  cache_entry_t *victim;
  int id, *idp;

  /* Determine if we can add this to the cache without evicting */
  while (need_eviction(val_size)) {
    /* Evict based on LFU policy */
    if (!indexminpq_isempty(&id_by_time_pq)) {
      id = indexminpq_minindex(&id_by_time_pq);
      time = indexminpq_keyof(&id_by_time_pq, id);

      indexminpq_delmin(&id_by_time_pq);
      victim = &cache.entries[id];

      idp = (int *) hshtbl_get(&url_to_id_tbl, victim->url);
      hshtbl_delete(&url_to_id_tbl, victim->url);
      steque_push(&free_ids, id);

      free(idp);
      free(time);

      cache.mem_used -= victim->size;
      cache.num_entries--;
      free(victim->data);
      free(victim->url);
      victim->size = 0;
    } else {
      /* the val size is bigger than the total cache available memory */
      return 0;
    }
  }

  /* Get next free ID */
  id = (int) steque_pop(&free_ids);
  idp = malloc(sizeof(int));
  *idp = id;

  /* Allocate memory for new entry */
  cache.entries[id].size = val_size;
  cache.entries[id].data = (char *) malloc(val_size);
  cache.entries[id].url = (char *) malloc(strlen(key));
  cache.mem_used += val_size;
  cache.num_entries++;

  /* Add to cache */
  memcpy(cache.entries[id].data, value, val_size);
  strcpy(cache.entries[id].url, key); // FIXME: Should probably use strncpy for safety

  /* Create hash table mapping */
  hshtbl_put(&url_to_id_tbl, cache.entries[id].url, (hshtbl_item) idp);

  time = (struct timeval *) malloc(sizeof(struct timeval));
  gettimeofday(time, NULL);

  /* Add ID to minpq */
  indexminpq_insert(&id_by_time_pq, id, (void *) time);

  return 1;
}
Ejemplo n.º 5
0
gtthread_blk_t *get_thread(gtthread_t tID){

  int queue_len, i;
  gtthread_blk_t *tmp, *ret;
  int match = 0;

  DEBUG_MSG("get_thread, tID: %ld\n", tID);

  queue_len = steque_size(&thread_queue);
  
  for(i = 0; i < queue_len; i++){

    tmp = (gtthread_blk_t *)steque_pop(&thread_queue);
    steque_enqueue(&thread_queue, tmp);

    if(tmp->tID == tID){
      ret = tmp;
      match = 1;
    } 

  }

  if(!match)
    DEBUG_MSG("No Match!\n");

  return ret;

}
Ejemplo n.º 6
0
/*
  The gtthread_exit() function is analogous to pthread_exit.
 */
void gtthread_exit(void* retval){
  gtthread_int_t *self;
  sigset_t oldset;

  /* Block alarms */
  sigprocmask(SIG_BLOCK, &vtalrm, &oldset);

  /* Remove the thread from run_queue */
  self = (gtthread_int_t *) steque_pop(&run_queue);

  /* Set the return value */
  self->retval = retval;

  /* Mark thread as completed */
  self->completed = 1;

  /* Reschedule joined threads */
  reschedule_joined(self);

  /* Need to reschedule so we don't just drop back
     into parent context */
  schedule_next(self);

  /* Unblock alarms */
  sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
}
Ejemplo n.º 7
0
static void *worker_main(void *arg){
  request_t *request;
  void *src_val, *dst_val;
  size_t src_len, dst_len;
  CLIENT *cl = (CLIENT*) arg;

  pthread_mutex_lock(&mutex);
  while(!steque_isempty(&queue)){
    request = steque_pop(&queue);
    pthread_mutex_unlock(&mutex); 

    src_val = read_from_file(request->inputfilename, &src_len);

    dst_val = minify_via_rpc(cl, src_val, src_len, &dst_len);

    write_to_file(request->outputfilename, dst_val, dst_len);

    free(src_val);
    free(dst_val);
    free(request);

    pthread_mutex_lock(&mutex);
  }
  pthread_mutex_unlock(&mutex); 

  return NULL;
}
Ejemplo n.º 8
0
static void _sig_handler(int signo){
  struct shm_info *shm_blk;

  if (signo == SIGINT || signo == SIGTERM){
    gfserver_stop(&gfs);
    pthread_mutex_lock(&segfds_q_mutex);
    while (!steque_isempty(&segfds_q)) {
        shm_blk = (struct shm_info *)steque_pop(&segfds_q);
        if (shm_unlink(shm_blk->mem_name) == 0) {
          fprintf(stdout, "Shared mem %s removed from system.\n",
            shm_blk->mem_name);
        }
        if (sem_unlink(shm_blk->sem1_name) == 0) {
          fprintf(stdout, "Semaphore %s removed from system.\n",
            shm_blk->sem1_name);
        }
        if (sem_unlink(shm_blk->sem2_name) == 0) {
          fprintf(stdout, "Semaphore %s removed from system.\n",
            shm_blk->sem1_name);
        }
    }
    pthread_mutex_unlock(&segfds_q_mutex);
    pthread_cond_destroy(&segfds_q_cond);
    pthread_mutex_destroy(&segfds_q_mutex);
    exit(signo);
  }
}
Ejemplo n.º 9
0
/* consumer_routine - thread that prints characters off the queue */
void *consumer_routine(void *arg) {
  steque_t *queue_p = arg;
  intptr_t c;
  long count = 0; /* number of nodes this thread printed */

  printf("Consumer thread started with thread id %"PRIdPTR"\n", (intptr_t) pthread_self());

  /* terminate the loop only when there are no more items in the queue
   * AND the producer threads are all done */

  pthread_mutex_lock(&g_queue_lock);
  pthread_mutex_lock(&g_num_prod_lock);

  while(!steque_isempty(queue_p) || g_num_prod > 0) {
    pthread_mutex_unlock(&g_num_prod_lock);

    if (!steque_isempty(queue_p)) {
      c = (intptr_t) steque_pop(queue_p);
      printf("%c", (char) c);
      ++count;
      pthread_mutex_unlock(&g_queue_lock);
    }
    else { /* Queue is empty, so let some other thread run */
      pthread_mutex_unlock(&g_queue_lock);
      sched_yield();
    }
  }
  pthread_mutex_unlock(&g_num_prod_lock);
  pthread_mutex_unlock(&g_queue_lock);

  return (void*) count;
}
Ejemplo n.º 10
0
/* Reschedule all threads that are in finished thread's join queue */
static void reschedule_joined(gtthread_int_t *finished) {
  gtthread_int_t *wait;

  while (!steque_isempty(&finished->join_queue)) {
    wait = (gtthread_int_t *) steque_pop(&finished->join_queue);
    steque_enqueue(&run_queue, wait);
  }
}
Ejemplo n.º 11
0
cache_status_request *DequeueRequest(int *threadID)
{
    pthread_mutex_lock(&_queueLock);
    cache_status_request* request = steque_pop(_queue);
    pthread_mutex_unlock(&_queueLock);
    printf("Popped request from queue on thread: %d\n", *threadID);
    return request;
}
Ejemplo n.º 12
0
void gtthread_exit(void *retval)
{
	gtthread *exiting, *runnable;
	sigset_t oldset;

	sigprocmask(SIG_BLOCK, &alrm, &oldset);
	exiting = (gtthread *) steque_pop(&currently_running);
	exiting -> retval = retval;
	exiting -> finished = 1;
	/*reschedJoinedThreads(exiting);*//* reschedule other threads in exiting thread's join Q */
		
	while( !steque_isempty(&exiting -> joining))
	{
		runnable = (gtthread *) steque_pop(&exiting -> joining);
		steque_enqueue(&currently_running, runnable);
	}
	scheduleNextAndSwap(exiting);

	sigprocmask(SIG_UNBLOCK, &alrm, NULL);
	
}
Ejemplo n.º 13
0
int gtthread_yield(void)
{
	gtthread *lastThread;
	sigset_t oldset;
	
	sigprocmask(SIG_BLOCK, &alrm, &oldset);
	lastThread = (gtthread *) steque_pop(&currently_running);
	steque_enqueue(&currently_running, lastThread);
	/* steque_cycle(&currently_running); */

	scheduleNextAndSwap(lastThread); /* swapcontext  */
	sigprocmask(SIG_UNBLOCK, &alrm, NULL);
	return 0;
}
Ejemplo n.º 14
0
int gtthread_join(gtthread_t thread, void **status)
{
  gtthread *target_thread, *callingThread;
  sigset_t oldset;

  /* Block alarms */
  sigprocmask(SIG_BLOCK, &alrm, &oldset);

  /* find target thread in globalQ */
  target_thread = getThread(thread);

  if(target_thread != NULL) /*target thread found */
  {
  	/* Check if it has finished */
	if(target_thread -> finished) 
	{
		/*If it has finished, unblock alarm and then set status and return*/
		sigprocmask(SIG_UNBLOCK,&alrm, NULL);
	}
	else
	{
		/* If not finished, get the currently running thread(calling thread)
		   and queue it to join target */
		callingThread = (gtthread *) steque_pop(&currently_running);
		steque_enqueue(&target_thread -> joining, callingThread);
		/* schedule next thread */
		scheduleNextAndSwap(callingThread);
		/* Now unblock alarms */
		sigprocmask(SIG_UNBLOCK, &alrm, NULL);
	}


	/* Set status */
	if(status != NULL)
	{
		*status = target_thread -> retval;
	}

	/* successful so return 0 */
	return 0;
  
  }
  else /* if target thread not found  */
  {
	/* unclock alarms and return */
	sigprocmask(SIG_UNBLOCK, &alrm, NULL);
	return 1;
  }
}
Ejemplo n.º 15
0
/* Function call by individual threads.  Threads atomically 
 * dequeue requests or wait till queue is not empty
 * thread_info can be used for debugging */
void thread_main(void* _thread_info){
    pthread_mutex_lock(&req_mutex);
    while(1){
        while(steque_isempty(&request_queue))
            pthread_cond_wait(&req_cond, &req_mutex);
        
        steque_node_t* node = steque_pop(&request_queue);
        request_t* req = (request_t*) node->item;
        pthread_mutex_unlock(&req_mutex);

        handler_perform(req->ctx, req->path, req->arg);
    }
    
    free(_thread_info);
}
Ejemplo n.º 16
0
void print_run_queue(void) {
  int queue_len;
  int i;
  gtthread_int_t *item;

  queue_len = steque_size(&run_queue);

  for (i = 0; i < queue_len; i++) {
    item = (gtthread_int_t *) steque_pop(&run_queue);
    steque_enqueue(&run_queue, item);
    printf("%d->", (int) item->id);
    fflush(stdout);
  }

  printf("\n");
  fflush(stdout);
}
Ejemplo n.º 17
0
/* Find a thread by its id */
static gtthread_int_t * find_thread(gtthread_t thread) {
  int queue_len;
  int i;
  gtthread_int_t *item;

  queue_len = steque_size(&threads);

  for (i = 0; i < queue_len; i++) {
    item = (gtthread_int_t *) steque_pop(&threads);
    steque_enqueue(&threads, item);
    if (item->id == thread) {
      return item;
    }
  }

  return NULL;
}
Ejemplo n.º 18
0
/*
  The gtthread_yield() function is analogous to pthread_yield, causing
  the calling thread to relinquish the cpu and place itself at the
  back of the schedule queue.
 */
void gtthread_yield(void){
  gtthread_int_t *old;
  sigset_t oldset;

  /* Block alarms */
  sigprocmask(SIG_BLOCK, &vtalrm, &oldset);

  /* Put current thread at end of run queue */
  old = (gtthread_int_t *) steque_pop(&run_queue);
  steque_enqueue(&run_queue, old);
  
  /* Start running the new thread */
  schedule_next(old);

  /* Unblock alarms */
  sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
}
Ejemplo n.º 19
0
/*
  The gtthread_join() function is analogous to pthread_join.
  All gtthreads are joinable.
 */
int gtthread_join(gtthread_t thread, void **status){
  gtthread_int_t *target, *self;
  sigset_t oldset;
  
  /* Block alarms */
  sigprocmask(SIG_BLOCK, &vtalrm, &oldset);

  /* Find the thread id */
  target = find_thread(thread);

  if (target != NULL) {

    /* If the target thread isn't complete, need to schedule another thread */
    if (!target->completed) {
      // 2. Pop this thread off of the main run queue
      self = (gtthread_int_t *) steque_pop(&run_queue);

      // 3. Enqueue this thread on the target thread run queue
      steque_enqueue(&target->join_queue, self);

      // 4. Schedule the next thread, since this is no longer in the run queue
      schedule_next(self);

      /* Unblock alarms */
      sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
    } else {
      /* Unblock alarms */
      sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
    }

    // 5. Set status
    if (status != NULL) {
      *status = target->retval;
    }

    return 0;
  } else {
    /* Unblock alarms */
    sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);

    // target thread not found
    return 1;
  }
}
Ejemplo n.º 20
0
/*
  The gtthread_yield() function is analogous to pthread_yield, causing
  the calling thread to relinquish the cpu and place itself at the
  back of the schedule queue.
 */
int gtthread_yield(void)
{
    /* block SIGVTALRM signal */
    sigprocmask(SIG_BLOCK, &vtalrm, NULL);
    
    /* if no thread to yield, simply return */
    if (steque_isempty(&ready_queue))
        return 0;

    thread_t* next = (thread_t*) steque_pop(&ready_queue);
    thread_t* prev = current;
    steque_enqueue(&ready_queue, current);
    current = next;

    /* unblock the signal */
    sigprocmask(SIG_UNBLOCK, &vtalrm, NULL); 
    swapcontext(prev->ucp, current->ucp); 
    return 0; 
}
Ejemplo n.º 21
0
/*
  The gtthread_exit() function is analogous to pthread_exit.
 */
void gtthread_exit(void* retval)
{
    /* block alarm signal */
    sigprocmask(SIG_BLOCK, &vtalrm, NULL);

    if (steque_isempty(&ready_queue))
    { 
        sigprocmask(SIG_UNBLOCK, &vtalrm, NULL); 
        exit((long) retval);
    }

    /* if the main thread call gtthread_exit */
    if (current->tid == 1)
    {
        while (!steque_isempty(&ready_queue))
        {
            sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);  
            sigvtalrm_handler(SIGVTALRM);
            sigprocmask(SIG_BLOCK, &vtalrm, NULL);
        }
        sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);   
        exit((long) retval);
    }

    thread_t* prev = current; 
    current = (thread_t*) steque_pop(&ready_queue);
    current->state = GTTHREAD_RUNNING; 

    /* free up memory allocated for exit thread */
    free(prev->ucp->uc_stack.ss_sp); 
    free(prev->ucp);                
    prev->ucp = NULL;

    /* mark the exit thread as DONE and add to zombie_queue */ 
    prev->state = GTTHREAD_DONE; 
    prev->retval = retval;
    prev->joining = 0;
    steque_enqueue(&zombie_queue, prev);

    /* unblock alarm signal and setcontext for next thread */
    sigprocmask(SIG_UNBLOCK, &vtalrm, NULL); 
    setcontext(current->ucp);
}
Ejemplo n.º 22
0
void list_thread(){

  int queue_len, i;
  gtthread_blk_t *tmp; 

  DEBUG_MSG("----------------------------------------------\n");

  queue_len = steque_size(&thread_queue);
  
  for(i = 0; i < queue_len; i++){

    tmp = (gtthread_blk_t *)steque_pop(&thread_queue);

    DEBUG_MSG("tmp->tID: %ld, state: %d, context: %x\n", tmp->tID, tmp->state, &tmp->uctx);

    steque_enqueue(&thread_queue, tmp);

  }
  DEBUG_MSG("----------------------------------------------\n");

}
Ejemplo n.º 23
0
/* Schedule the next runnable thread */
static void schedule_next(gtthread_int_t *cur) {
  gtthread_int_t *target;

  do {

    target = (gtthread_int_t *) steque_front(&run_queue);

    /* If we got a cancel request, just comply! */
    if (target->cancelreq) {
      target->completed = 1;

      /* Remove completed thread from run queue */
      steque_pop(&run_queue);

      reschedule_joined(target);
    }

    /* Don't exit loop until we have a non-cancelled thread */
  } while (target->completed);

  swapcontext(&cur->context, &target->context);
}
Ejemplo n.º 24
0
/*
 The gtthread_join() function is analogous to pthread_join.
 All gtthreads are joinable.
*/
int gtthread_join(gtthread_t thread, void **status) {
  sigprocmask(SIG_BLOCK, &vtalrm, NULL);
  gtthread_t *self = steque_front(&g_threads_steque);
  sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
  
  /* The range [0, g_thread_id) indicates the range of thread ids that have ever belonged to valid threads.
  Also can't join with self. */
  if(thread.id >= g_thread_id || thread.id == self->id)
    return 1;
  
  sigprocmask(SIG_BLOCK, &vtalrm, NULL);
  self->is_joined = 0;
  self->wait_tid = thread.id;
  sigprocmask(SIG_BLOCK, &vtalrm, NULL);
  int found_among_dead = 0;
  
  /* First look for joinee among threads that have already terminated. */
  int i;
  for(i=0; i<steque_size(&g_dead_threads_steque); i++) {
    sigprocmask(SIG_BLOCK, &vtalrm, NULL);
    gtthread_t *curr = steque_front(&g_dead_threads_steque);
    sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
    
    if(curr->id == self->wait_tid) {
      found_among_dead = 1;
      sigprocmask(SIG_BLOCK, &vtalrm, NULL);
      self->joinee = curr;
      self->is_joined = 1;
      sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
      break;
    }
    
    sigprocmask(SIG_BLOCK, &vtalrm, NULL);
    steque_cycle(&g_dead_threads_steque);
    sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
  }
  
  /* If we haven't found it, wait until it terminates. */
  if(!found_among_dead) {
    // First check to see that the thread I am waiting on is not already waiting on me.
    for(i=0; i<steque_size(&g_join_steque); i++) {
      sigprocmask(SIG_BLOCK, &vtalrm, NULL);
      gtthread_t *curr = steque_front(&g_join_steque);
      sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
      
      if(curr->wait_tid == self->id && curr->id == self->wait_tid) {
        sigprocmask(SIG_BLOCK, &vtalrm, NULL);
        self->is_joined = -1;
        self->wait_tid = -1L;
        sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
        return 1;
      }
      sigprocmask(SIG_BLOCK, &vtalrm, NULL);
      steque_cycle(&g_join_steque);
      sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
    }
    
    // If joinee isn't already waiting on me, enqueue myself in the join queue...
    sigprocmask(SIG_BLOCK, &vtalrm, NULL);
    steque_enqueue(&g_join_steque, self);
    sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
    
    // ... and wait until joinee terminates.
    while(!self->is_joined)
    alarm_safe_yield();
  }
  
  if(status)
    *status = self->joinee->retval;
  
  sigprocmask(SIG_BLOCK, &vtalrm, NULL);
  self->joinee = NULL;
  self->wait_tid = -1L;
  self->is_joined = -1;
  sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
  
  /* Remove yourself from the join steque, if you put yourself there. */
  if(!found_among_dead) {
    for(i=0; i<steque_size(&g_join_steque); i++) {
      sigprocmask(SIG_BLOCK, &vtalrm, NULL);
      gtthread_t *curr = steque_front(&g_join_steque);
      sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
      
      if(gtthread_equal(*self, *curr)) {
        steque_pop(&g_join_steque);
        break;
      }
      if(steque_size(&g_join_steque) > 0) {
        sigprocmask(SIG_BLOCK, &vtalrm, NULL);
        steque_cycle(&g_join_steque);
        sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
      }
    }
  }
  
  return 0;
}
Ejemplo n.º 25
0
/* NOTE: Assumes signal has been blocked before entering this call */
gtthread_t unschedule_cur(void) {
  gtthread_int_t *cur;
  cur = (gtthread_int_t *) steque_pop(&run_queue);
  return cur->id;
}
Ejemplo n.º 26
0
/**
 * Helper function for yield.
 */
static void yield_helper(int is_alarm_safe) {
  if(is_alarm_safe)
    sigprocmask(SIG_BLOCK, &vtalrm, NULL);
  
  // Don't need to do anything if there's just one thread in the queue.
  if(steque_size(&g_threads_steque) == 1)
    return;
  
  gtthread_t *old_thread = steque_pop(&g_threads_steque);
  gtthread_t *new_thread = NULL;
  
  /* Find an eligible new thread - i.e., a thread that isn't queued for cancelation. */
  if(!is_alarm_safe)
    sigprocmask(SIG_BLOCK, &vtalrm, NULL);
  
  while(steque_size(&g_threads_steque) > 0) {
    new_thread = steque_front(&g_threads_steque);
    
    /* Cancels threads when it's their turn to run */
    int i;
    int canceled=0;
    for(i=0; i < steque_size(&g_cancelatorium); i++) {
      if((long) steque_front(&g_cancelatorium) == new_thread->id) {
        new_thread->is_finished = 1;
        new_thread->retval = (void *) -1;
        steque_pop(&g_cancelatorium);
        steque_pop(&g_threads_steque);
        steque_enqueue(&g_dead_threads_steque, new_thread);
        
        canceled=1;

        joininator(new_thread); // Attempt to join the thread you just canceled.
        break;
      }
      if(steque_size(&g_cancelatorium) > 0)
        steque_cycle(&g_cancelatorium);
    }
    
    if(!canceled)
      break;
  }
  
  /* If the thread that yielded finsihed executing, put it in the finished steque. */
  if(old_thread->is_finished) {
    steque_enqueue(&g_dead_threads_steque, old_thread);
    joininator(old_thread);
  } else {
    steque_enqueue(&g_threads_steque, old_thread);
  }
  
  if(!is_alarm_safe)
    sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
  
  // All threads have finished running. Is this necessary?
  if(steque_size(&g_threads_steque) == 0)
    exit(0);
  
  // Don't context switch if the original thread is the only one left in the queue.
  if(gtthread_equal(*((gtthread_t *) steque_front(&g_threads_steque)), *old_thread))
    return;
  
  if(is_alarm_safe) {
    T.it_value.tv_usec = global_period; // Reset timer so that the next period can start immediately.
    sigprocmask(SIG_UNBLOCK, &vtalrm, NULL);
  }
  
  swapcontext(old_thread->context, new_thread->context);
}
Ejemplo n.º 27
0
ssize_t handle_with_cache(gfcontext_t *ctx, char *path, void *arg) {

    /* Get thread id */
    Workload *wld = (Workload *)arg;

    pthread_t tid = pthread_self();

    printf("entered %ld\n", tid);
    /* Sequence of steps to pop a segment number from the queue  */
    /* Read phase */
    /* Lock the mutex */
    pthread_mutex_lock(wld->sq_mtx_p);

    /* Wait until the queue is empty and writers are waiting */
    while(steque_isempty(wld->segqueue)) {
        fprintf(stderr, "[handle_with_cache, thread_id = %ld] Waiting for shm segment\n", tid);
        pthread_cond_wait(wld->sq_notempty_p, wld->sq_mtx_p);
    }

    /* Pop a segment number */
    long shm_segnum = (long) steque_pop(wld->segqueue);
    fprintf(stderr, "[handle_with_cache, thread_id = %ld] Popped shm segment %ld\n", tid, shm_segnum);

    /* unlock the mutex */
    fprintf(stderr, "[handle_with_cache, thread_id = %ld] Unlocking the mutex\n", tid);
    pthread_mutex_unlock(wld->sq_mtx_p);

    /* convert the shm_segnum to shm_segid string */
    fprintf(stderr, "[handle_with_cache, thread_id = %ld] converting segnum to segid\n", tid);
    char shm_segid[SEGID_LEN];
    memset(shm_segid, 0, SEGID_LEN);
    if (sprintf(shm_segid, "%ld", shm_segnum) < 0) {
        err_exit("handle_with_cache", "sprintf", 1);
    }

    /* Open descriptor for shared memory region */
    int shm_fd = shm_open(shm_segid, O_RDWR, 0);
    if (shm_fd == -1) {
        strerr_exit("handle_with_cache, shm_open", errno);
    }

    /* obtain the pointer to shared memory */
    Shm_Block *shmb_p;
    shmb_p = mmap(NULL, wld->shm_blocksize, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
    if (shmb_p == MAP_FAILED) {
        err_exit("handle_with_cache", "mmap failed", errno);
    }

    /* close shm file descriptor; its no longer needed */
    close(shm_fd);

    /* SHM data buffer starts after the shm metadata */
    char *shm_buf = (char *) shmb_p + sizeof(Shm_Block);

    /* Initialize writer (simplecached) semaphore to 1; initially available */
    if (sem_init(&shmb_p->sem_writer, 1, 1) == -1) {
        err_exit("handle_with_cache", "Unable to initialize writer semaphore.", errno);
    }

    /* Initialize reader (handle_with_cache) semaphore to 0; initially unavailable */
    if (sem_init(&shmb_p->sem_reader, 1, 0) == -1) {
        err_exit("handle_with_cache", "Unable to initialize reader semaphore.", errno);
    }

    /* Compose message to be sent to simplecached with URL, shm segment id and shm_blocksize */
    char msg[MQ_MSGSIZE];
    if(sprintf(msg, "%s %ld %zu", path, shm_segnum, wld->shm_blocksize) < 0 ) {
        err_exit("handle_with_cache", "sprintf", 1);
    }

    /* Send message */
    fprintf(stderr, "[handle_with_cache, thread_id = %ld] Sending message on mqueue\n", tid);
    if (mq_send(wld->mqd, msg, strlen(msg), 0) == -1)
        strerr_exit("handle_with_cache, mq_send", errno);

    /* Lock the reader */
    if (sem_wait(&shmb_p->sem_reader) == -1) {
        strerr_exit("handle_with_cache, sem_wait (hdr)", errno);
    }

    /* Read file size from simplecached */
    fprintf(stderr, "[handle_with_cache, thread_id = %ld] Reading file-size from simplecached\n", tid);
    int file_size = shmb_p->file_size;
    fprintf(stderr, "[handle_with_cache] Req path = %s, file_size = %d\n", path, file_size);
    /* send GFS header */

    if (file_size == -1) {
        fprintf(stderr, "[handle_with_cache, thread_id = %ld] FNF, about to lock mutex\n", tid);
        pthread_mutex_lock(wld->sq_mtx_p);
        fprintf(stderr, "[handle_with_cache, thread_id = %ld] Enqueuing the segnum %ld\n", tid, shm_segnum);
        steque_enqueue(wld->segqueue, (steque_item) shm_segnum);
        pthread_mutex_unlock(wld->sq_mtx_p);
        pthread_cond_broadcast(wld->sq_notempty_p);
        return gfs_sendheader(ctx, GF_FILE_NOT_FOUND, 0);
    }

    /* unlock the writer */
    if (sem_post(&shmb_p->sem_writer) == -1) {
        strerr_exit("handle_with_cache, sem_post (hdr)", errno);
    }

    gfs_sendheader(ctx, GF_OK, file_size);

    /* STEPS FOR FILE TRANSFER */
    /* We use a pair of semaphores to alternate control between reader (i.e. handle_with_cache)
    and the writer (i.e. simplecached). We also keep track of number of trnasfers and bytes
    transferred */
    int nxfrs = 0;
    ssize_t bytes_xfrd = 0;

    for (nxfrs = 0, bytes_xfrd = 0 ;; nxfrs++, bytes_xfrd += shmb_p->cnt) {

        /* Lock the reader semaphore. If it is already locked, this call will block. Thus, the
        control will never go past this point unless the writer has unlocked the semaphore. */
        if (sem_wait(&shmb_p->sem_reader) == -1) {
            strerr_exit("handle_with_cache, sem_wait (data)", errno);
        }

        /* File is done on the writer side */
        if (shmb_p->cnt == 0) {
            break;
        }

        /* Read Bytes from the shared memory */
        // printf("\n[handle_with_cache] writing data below \n\n");
        // write(STDOUT_FILENO, shm_buf, shmb_p->cnt);

        if (gfs_send(ctx, shm_buf, shmb_p->cnt) != shmb_p->cnt) {
            err_exit("handle_with_cache", "gfs_send", EXIT_FAILURE);
        }

        /* Unlock the writer */
        if (sem_post(&shmb_p->sem_writer) == -1) {
            strerr_exit("handle_with_cache, sem_post (data)", errno);
        }
    }

    /* Work is done; steps to put the shared memory segment back into queue */
    fprintf(stderr, "[handle_with_cache, thread_id = %ld] FF, about to lock mutex\n", tid);
    pthread_mutex_lock(wld->sq_mtx_p);
    fprintf(stderr, "[handle_with_cache, thread_id = %ld] Enqueuing the segnum %ld\n", tid, shm_segnum);
    steque_enqueue(wld->segqueue, (steque_item) shm_segnum);
    pthread_mutex_unlock(wld->sq_mtx_p);
    pthread_cond_broadcast(wld->sq_notempty_p);

    return bytes_xfrd;
}
Ejemplo n.º 28
0
ssize_t handle_with_cache(gfcontext_t *ctx, char *path, void* arg)
{
	mqd_t msg_q;
	void *mem;
	struct request_info *req;
	struct shm_info *shm_blk;
	sem_t *sem1;
	sem_t *sem2;
	int file_in_cache;
	size_t file_size = 0;
	size_t bytes_transferred = 0;
	ssize_t write_len;
	size_t cache_file_size;

	pthread_mutex_lock(seg_q_mutex);
	while (steque_isempty(seg_q)) {
		pthread_cond_wait(seg_q_cond, seg_q_mutex);
	}
	shm_blk = (struct shm_info *)steque_pop(seg_q);
	pthread_mutex_unlock(seg_q_mutex);

	if ((sem1 = sem_open(shm_blk->sem1_name, O_CREAT, 0644, 0)) ==
	    SEM_FAILED) {
		perror("sem_open");
		return -1;
    	}
    	if ((sem2 = sem_open(shm_blk->sem2_name, O_CREAT, 0644, 0)) ==
	    SEM_FAILED) {
		perror("sem_open");
		return -1;
    	}
retry:
	errno = 0;
	msg_q = mq_open(QUEUE_NAME, O_WRONLY);
	if (msg_q == -1) {
		if (errno == ENOENT || errno == EACCES) {
			/* simplecached isn't ready yet, sleep and then retry */
			fprintf(stdout, "waiting for simplecached\n");
			sleep(2);
			goto retry;
		}
		perror("mq_open");
		return -1;
	}

	mem = mmap(NULL, seg_size, PROT_READ | PROT_WRITE, MAP_SHARED,
	    shm_blk->memfd, 0);
	if (!mem) {
		perror("mmap");
		return -1;
	}
	req = malloc(sizeof(*req) + strlen(path) + 1);
	memcpy(&req->mem_i, (char *)shm_blk + sizeof(int), sizeof(req->mem_i));
	req->mem_size = seg_size;
	req->file_len = strlen(path) + 1;
	strncpy(req->file_path, path, strlen(path));
	req->file_path[strlen(path)] = '\0';
	mq_send(msg_q, (char *)req, sizeof(*req) + strlen(path) + 1, 0);
	free(req);

	sem_wait(sem1);
	file_in_cache = *(int *)mem;
	sem_post(sem1);

	if (file_in_cache == -1) {
		 gfs_sendheader(ctx, GF_FILE_NOT_FOUND, 0);
		 goto finish;
	}
	sem_wait(sem1);
	file_size = *(size_t *)mem;
	cache_file_size = file_size;
	gfs_sendheader(ctx, GF_OK, file_size);
	sem_post(sem1);
	if (!file_size) {
		goto finish;
	}
	while (file_size) {
		sem_wait(sem1);
		bytes_transferred =  seg_size < file_size ?
		    seg_size : file_size;
		write_len = gfs_send(ctx, (char *)mem, bytes_transferred);
		if (write_len != bytes_transferred) {
			fprintf(stderr, "write error");
		}
		file_size -= bytes_transferred;
		sem_post(sem1);
	}
	sem_wait(sem1);
	file_size = *(size_t *)mem;
	if (file_size) {
		fprintf(stderr, "transfer error");
	}
	sem_post(sem1);

finish:
	mq_close(msg_q);
	sem_close(sem1);
	sem_close(sem2);
	sem_unlink(shm_blk->sem1_name);
	sem_unlink(shm_blk->sem2_name);
	munmap(mem, seg_size);
	pthread_mutex_lock(seg_q_mutex);
	steque_push(seg_q, shm_blk);
	pthread_mutex_unlock(seg_q_mutex);
	pthread_cond_signal(seg_q_cond);

	return cache_file_size;
}
ssize_t handle_with_cache(gfcontext_t* ctx, char *path, void* arg) {
        char* fldes;

        pthread_mutex_lock(&fldes_mutex);
        while (fldes_queue_size == 0) {
                pthread_cond_wait(&read_cond, &fldes_mutex);
        }
        fldes = (char*)steque_pop(&fldes_queue);
        fldes_queue_size--;
//		fprintf(stdout, "fldes is:%s.", fldes);
        pthread_mutex_unlock(&fldes_mutex);
        pthread_cond_broadcast(&write_cond);
		//pthread_cond_signal(&write_cond);



        shm_data* mem = create_shm_channel(fldes);

        mqd_t mq;

        do {
                mq = mq_open (MSGQID, O_RDWR);
        } while(mq == (mqd_t) -1 || mq == 0);

        msgq_data data;
        strcpy(data.path, path);
        strcpy(data.fldes, fldes);
        data.segment_size = segment_size;

 //       pthread_mutex_lock(&msgq_mutex);

        int msg_rsp = mq_send(mq, (const char *) &data, 1024, 1);
        if (msg_rsp < 0) {
                fprintf(stderr, "Error %d (%s) on server proxy mq_send.\n", errno, strerror(errno));
				fflush(stdout);
  //              exit(1);
        }
        mq_close(mq);
 //       pthread_mutex_unlock(&msgq_mutex);

        pthread_mutex_lock(&mem->file_len_mutex);
        while(mem->file_length == 0) {
			pthread_cond_wait(&mem->proxy_cond, &mem->file_len_mutex);
        }

        int file_len = mem->file_length;

        pthread_mutex_unlock(&mem->file_len_mutex);
        pthread_cond_broadcast(&mem->cache_cond);

 //       fprintf(stdout, "Received file length is: %d\n", file_len);
//        fflush(stdout);
        
        if(file_len < 0) {
 //               fprintf(stdout, "File not found in cache\n");
//				fflush(stdout);
                destroy_shm_seg(fldes, mem);
                return gfs_sendheader(ctx, GF_FILE_NOT_FOUND, 0);
        }else {

                gfs_sendheader(ctx, GF_OK, file_len);

             
                int bytes_transferred = 0;
                int write_len = 0;
                char *data_start = (void *)(mem + 1);
                while (bytes_transferred < file_len) {

                        pthread_mutex_lock(&mem->data_mutex);
                        while(mem->bytes_written == 0) {
                                pthread_cond_wait(&mem->proxy_cond, &mem->data_mutex);
                        }
                        int read_len = mem->bytes_written;
                        write_len = gfs_send(ctx, data_start, read_len);
                        if (write_len != read_len) {
                                fprintf(stderr, "handle_with_cache write error");
                                return EXIT_FAILURE;
                        }
                        mem->bytes_written = 0;

                        pthread_mutex_unlock(&mem->data_mutex);
                        pthread_cond_broadcast(&mem->cache_cond);
                        bytes_transferred += write_len;
                }


                destroy_shm_seg(fldes, mem);
                fflush(stdout);
                return bytes_transferred;
        }
}