示例#1
0
/*
 * Create or destroy enough new threads to make the number
 * of threads the given number.  If `pool' is non-NULL, applies
 * only to threads in that pool, otherwise round-robins between
 * all pools.  Caller must ensure that mutual exclusion between this and
 * server startup or shutdown.
 *
 * Destroying threads relies on the service threads filling in
 * rqstp->rq_task, which only the nfs ones do.  Assumes the serv
 * has been created using svc_create_pooled().
 *
 * Based on code that used to be in nfsd_svc() but tweaked
 * to be pool-aware.
 */
int
svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
	struct svc_rqst	*rqstp;
	struct task_struct *task;
	struct svc_pool *chosen_pool;
	int error = 0;
	unsigned int state = serv->sv_nrthreads-1;
	int node;

	if (pool == NULL) {
		/* The -1 assumes caller has done a svc_get() */
		nrservs -= (serv->sv_nrthreads-1);
	} else {
		spin_lock_bh(&pool->sp_lock);
		nrservs -= pool->sp_nrthreads;
		spin_unlock_bh(&pool->sp_lock);
	}

	/* create new threads */
	while (nrservs > 0) {
		nrservs--;
		chosen_pool = choose_pool(serv, pool, &state);

		node = svc_pool_map_get_node(chosen_pool->sp_id);
		rqstp = svc_prepare_thread(serv, chosen_pool, node);
		if (IS_ERR(rqstp)) {
			error = PTR_ERR(rqstp);
			break;
		}

		__module_get(serv->sv_ops->svo_module);
		task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
					      node, "%s", serv->sv_name);
		if (IS_ERR(task)) {
			error = PTR_ERR(task);
			module_put(serv->sv_ops->svo_module);
			svc_exit_thread(rqstp);
			break;
		}

		rqstp->rq_task = task;
		if (serv->sv_nrpools > 1)
			svc_pool_map_set_cpumask(task, chosen_pool->sp_id);

		svc_sock_update_bufs(serv);
		wake_up_process(task);
	}
	/* destroy old threads */
	while (nrservs < 0 &&
	       (task = choose_victim(serv, pool, &state)) != NULL) {
		send_sig(SIGINT, task, 1);
		nrservs++;
	}

	return error;
}
示例#2
0
/**
 * Use mpmcqs to allow stealing directly from a victim, without waiting for a
 * response.
 */
static pony_actor_t* steal(scheduler_t* sched, pony_actor_t* prev)
{
  send_msg(0, SCHED_BLOCK, 0);
  uint64_t tsc = ponyint_cpu_tick();
  pony_actor_t* actor;

  while(true)
  {
    scheduler_t* victim = choose_victim(sched);

    if(victim == NULL)
      actor = (pony_actor_t*)ponyint_mpmcq_pop(&inject);
    else
      actor = pop_global(victim);

    if(actor != NULL)
    {
      DTRACE3(WORK_STEAL_SUCCESSFUL, (uintptr_t)sched, (uintptr_t)victim, (uintptr_t)actor);
      break;
    }

    uint64_t tsc2 = ponyint_cpu_tick();

    if(quiescent(sched, tsc, tsc2))
    {
      DTRACE2(WORK_STEAL_FAILURE, (uintptr_t)sched, (uintptr_t)victim);
      return NULL;
    }

    // If we have been passed an actor (implicitly, the cycle detector), and
    // enough time has elapsed without stealing or quiescing, return the actor
    // we were passed (allowing the cycle detector to run).
    if((prev != NULL) && ((tsc2 - tsc) > 10000000000))
    {
      actor = prev;
      break;
    }
  }

  send_msg(0, SCHED_UNBLOCK, 0);
  return actor;
}
示例#3
0
/**
 * Use mpmcqs to allow stealing directly from a victim, without waiting for a
 * response.
 */
static pony_actor_t* steal(scheduler_t* sched, pony_actor_t* prev)
{
  send_msg(0, SCHED_BLOCK, 0);
  uint64_t tsc = ponyint_cpu_tick();
  pony_actor_t* actor;

  while(true)
  {
    scheduler_t* victim = choose_victim(sched);

    if(victim == NULL)
      victim = sched;

    actor = pop_global(victim);

    if(actor != NULL)
      break;

    uint64_t tsc2 = ponyint_cpu_tick();

    if(quiescent(sched, tsc, tsc2))
      return NULL;

    // If we have been passed an actor (implicitly, the cycle detector), and
    // enough time has elapsed without stealing or quiescing, return the actor
    // we were passed (allowing the cycle detector to run).
    if((prev != NULL) && ((tsc2 - tsc) > 10000000000))
    {
      actor = prev;
      break;
    }
  }

  send_msg(0, SCHED_UNBLOCK, 0);
  return actor;
}
示例#4
0
文件: vm.c 项目: abi93k/OS161-Kernel
vaddr_t page_alloc(struct addrspace *as, vaddr_t vaddr) {
	spinlock_acquire(&coremap_lock);

	for(int i = no_of_coremap_entries-1; i >= 0; i--) {

		if(coremap[i].state == FREE) {
			coremap[i].state = DIRTY;
			coremap[i].last_page = 1;
			KASSERT(as!=NULL); // User process has no address space ?!
			coremap[i].as = as;
			coremap[i].vaddr = vaddr;
			coremap[i].cpu = -1;
			coremap[i].pinned = 0;
			coremap[i].page = NULL;

			
			coremap_used += PAGE_SIZE;

			spinlock_release(&coremap_lock);
			bzero((void *)PADDR_TO_KVADDR(CM_TO_PADDR(i)), PAGE_SIZE);

			return PADDR_TO_KVADDR(CM_TO_PADDR(i));
		}


	}
	// Reached here only if we run out of coremap entries, need to swap.
	if(swap_enabled == false) {
		spinlock_release(&coremap_lock);		
		return 0;
	}
	int victim_index = choose_victim();
	KASSERT(victim_index>=0);
	int previous_state = coremap[victim_index].state;


    struct addrspace *lock_as = coremap[victim_index].as;
    vaddr_t lock_vaddr = coremap[victim_index].vaddr;
    KASSERT(coremap[victim_index].state != FREE);

    coremap[victim_index].state = VICTIM; // F**k, missed this!
    coremap[victim_index].pinned = 1; // F**k, missed this!
    KASSERT(lock_as != NULL); // WHAT ?!
    KASSERT(lock_as->pagetable!=NULL);

    //struct pte* target = pte_get(lock_as,lock_vaddr);
    struct pte* target =  coremap[victim_index].page; // O(1)
    if(target == NULL) {
		kprintf("\n %d \n",lock_vaddr);
		kprintf("\n %d \n",(int)lock_as);
	}
    KASSERT(target->pte_lock!=NULL);
	spinlock_release(&coremap_lock);
	//KASSERT(coremap[victim_index].state == VICTIM);



    lock_acquire(target->pte_lock);

	KASSERT(coremap[victim_index].pinned == 1);

	if(target -> in_memory == 1) { // Pagetable destroy.
		if(coremap[victim_index].state != VICTIM)
			previous_state = coremap[victim_index].state;

		MAKE_PAGE_AVAIL(victim_index,previous_state);
	}

 
	coremap[victim_index].state = VICTIM;
	coremap[victim_index].as = as; // new as
	coremap[victim_index].vaddr = vaddr; // new vaddr;
	coremap[victim_index].last_page = 1;
	coremap[victim_index].pinned = 0;
	coremap[victim_index].page = NULL;
	

	bzero((void *)PADDR_TO_KVADDR(CM_TO_PADDR(victim_index)), PAGE_SIZE);

	lock_release(target->pte_lock);


	return PADDR_TO_KVADDR(CM_TO_PADDR(victim_index));
	

}