Exemple #1
0
/*
This function will return:
0 - if failed
1 - if fmap was removed without IO
2 - if fmap removal begun and has IO (i.e async)
*/
int vmm_fmap_release_mreg(struct pm_task *task, struct vmm_memory_region *mreg)
{
	struct vmm_fmap_descriptor *fmd = (struct vmm_fmap_descriptor*)mreg->descriptor;

    if(fmd->status == VMM_FMAP_FLUSHING) // since we only allow one command on the task command 
                                         // queue, there cannot be a flush command executing.. 
                                         // then. the task is dying.
    {
        // fmap is flushing.. we must wait until it completes
        // but let's tell the current action we intend to close the FMAP
        fmd->status = VMM_FMAP_CLOSING_RELEASE;
        return 0;
    }
	else if(fmd->status != VMM_FMAP_ACTIVE)
    {
        return 0;
    }

	/* Begin removal only if references is 1 */
	fmd->references--;
        
	/* Unmap Pages from task address space */
	UINT32 addr = (UINT32)mreg->tsk_node.low;

	for(; addr < (UINT32)mreg->tsk_node.high; addr += 0x1000)
	{
		page_out(task->id, (ADDR)addr, 2);
	}

	/* Remove memory region from trees */
	ma_remove(&task->vmm_info.regions, &mreg->tsk_node);
    rb_remove(&task->vmm_info.regions_id, &mreg->tsk_id_node);

    fmd->regions = fmd->regions->next;
	if(fmd->regions)
		fmd->regions->prev = NULL;
	kfree(mreg);

	if(fmd->references == 0)
	{
	    /* Set FMAP as shutting down */
	    fmd->status = VMM_FMAP_CLOSING;

	    /* Write all dirty pages */
	    fmd->io_finished.params[0] = 0;	// 0 will indicate we are closing and not flushing
	    fmd->io_finished.callback = vmm_fmap_flush_callback;

	    fmd->release_addr = (UINT32)mreg->tsk_node.low - 0x1000;
	    fmd->creating_task = task->id;

	    vmm_fmap_flush_callback(&fmd->iosrc, IO_RET_OK);

        return 2;
    }
	return 1;
}
Exemple #2
0
static	int	makemore(long s)
{
	VMHEAD	HUGE	*h;

	if (DMhandle ==	-1  ||	!VMlru.i)     return(1);
	while (s > 0L  &&  VMlru.i)  {
		h = &VMbase[VMlru.p.b][VMlru.p.l];
		page_out(h);
		vm_unlink(h);
		s -= h->size;
	}
	return(0);
}
Exemple #3
0
UINT32 vmm_fmap(UINT16 task_id, UINT32 fileid, UINT16 fs_task, ADDR start, UINT32 size, UINT32 perms, UINT32 offset)
{
	struct pm_task *task = tsk_get(task_id);
	struct vmm_fmap_descriptor *new_fm = NULL;
	struct vmm_memory_region *new_mreg = NULL, *mreg = NULL;
	struct vmm_page_directory *pdir = NULL;
	struct vmm_page_table *ptbl = NULL;
	UINT32 dindex, tindex, tindex_max, pg_addr;
	struct pm_thread *thr = NULL;

    if(task == NULL) return PM_TASK_ID_INVALID;

	/* On this implementation we will require start to be page aligned along with start + size */
	if(((UINT32)start & 0x00000FFF) || (((UINT32)start + size) & 0x00000FFF))
		return PM_INVALID_PARAMS;

	/* Check address is not above max_addr */
	if(task->vmm_info.max_addr <= (UINT32)start + size)
	{
		return PM_ERROR;
	}

    /* Translate address adding base. */
	start = TRANSLATE_ADDR(start, ADDR);

    // check lstart and lend are "safe" to map
    if(loader_collides(task, start, start+size))
        return FALSE;

    /* Check region does not intersect with other region on this task address space. */
	ma_node *n = ma_collition(&task->vmm_info.regions, (UINT32)start, (UINT32)start + size);

    if(n)
    {
		return PM_MEM_COLITION;
    }

	/* Allocate structures */
	new_fm = kmalloc(sizeof(struct vmm_fmap_descriptor));   // descriptor

	if(new_fm == NULL) return PM_NOT_ENOUGH_MEM;

	new_mreg = kmalloc(sizeof(struct vmm_memory_region));   // memory region on the task

	if(new_mreg== NULL)
	{
		kfree(new_fm);
		return PM_NOT_ENOUGH_MEM;
	}

    // get an ID for the memory descriptor on the task 
	if(!rb_free_value(&task->vmm_info.regions_id, &new_mreg->tsk_id_node.value))
	{
		kfree(new_fm);
		kfree(new_mreg);
		return PM_TOO_MANY_FILES_OPENED;
	}

	io_init_source(&new_fm->iosrc, FILE_IO_FMAP, new_mreg->tsk_id_node.value);
	
	/* Initialize structures */
	new_fm->io_finished.callback = NULL;
	new_fm->iosrc.fs_service = fs_task;
	new_fm->iosrc.smo = -1;
	new_fm->status = VMM_FMAP_ACTIVE;
	new_fm->offset = offset;
	new_fm->creating_task = task->id;
	new_fm->references = 1;	// This should change when we can share files mapped between tasks.
    new_fm->regions = new_mreg;
	
	new_mreg->owner_task = task_id;
    new_mreg->next = new_mreg->prev = NULL;
	new_mreg->tsk_node.high = ((UINT32)start + size);
	new_mreg->tsk_node.low = (UINT32)start;
	new_mreg->flags = perms;
	new_mreg->type = VMM_MEMREGION_FMAP;
    new_mreg->descriptor = new_fm;
    	
	/* 
	Check pages are not swapped.
	If pages are paged in, I'll page them out so an exception is raised if reading/writing.
	*/
	pdir = task->vmm_info.page_directory;
	ptbl = NULL;

	dindex = PM_LINEAR_TO_DIR(new_mreg->tsk_node.low);
	
	for(; dindex < PM_LINEAR_TO_DIR(new_mreg->tsk_node.high); dindex++)
	{
		if(pdir->tables[dindex].ia32entry.present == 0 && pdir->tables[dindex].record.swapped == 1)
		{
			kfree(new_fm);
			kfree(new_mreg);
			return PM_NOT_ENOUGH_MEM;
		}

		if(pdir->tables[dindex].ia32entry.present == 1)
		{
			/* Table is present check pages */
			if(dindex == PM_LINEAR_TO_DIR(new_mreg->tsk_node.low))
				tindex = PM_LINEAR_TO_TAB(new_mreg->tsk_node.low);
			else
				tindex = 0;

			if(dindex == PM_LINEAR_TO_DIR(new_mreg->tsk_node.high))
				tindex_max = PM_LINEAR_TO_TAB(new_mreg->tsk_node.high);
			else
				tindex_max = 1024;

			for(;tindex < tindex_max; tindex++)
			{
				if(ptbl->pages[tindex].entry.ia32entry.present == 0 && ptbl->pages[tindex].entry.record.swapped == 1)
				{
					kfree(new_fm);
					kfree(new_mreg);
					return PM_NOT_ENOUGH_MEM;
				}
				else if(ptbl->pages[tindex].entry.ia32entry.present == 1)
				{
					pg_addr = PHYSICAL2LINEAR(PG_ADDRESS(ptbl->pages[tindex].entry.phy_page_addr));

					/* Page will be paged out */
					page_out(task->id, (ADDR)(dindex * 0x400000 + (tindex << 12)), 2);
					
					/* Put page onto PMAN */
					vmm_put_page((ADDR)pg_addr);

                    task->vmm_info.page_count--;
				}
			}
		}
	}

    /* Add new_mreg on the task lists */
    ma_insert(&task->vmm_info.regions, &new_mreg->tsk_node);
    rb_insert(&task->vmm_info.regions_id, &new_mreg->tsk_id_node, FALSE);

	/* Once takeover is OK, new_fm will be added to the global list. */

	/* Tell stdfss we are taking control of the file. */
    new_fm->io_finished.params[0] = task->id;
	new_fm->io_finished.callback  = fmap_takeover_callback;
	io_begin_takeover(&new_fm->iosrc, fileid, new_fm, task->id, 0xFFFFFFFF);

	/* Block all threads for this task, until the file is opened so we don't have page faults */
	task->state = TSK_MMAPPING;
	thr = task->first_thread;

	while(thr != NULL)
	{
		sch_deactivate(thr);
		thr = thr->next_thread;
	}

	return PM_OK;
}
Exemple #4
0
/* Tries to allocate and lock a frame for PAGE.
   Returns the frame if successful, false on failure. */
static struct frame *
try_frame_alloc_and_lock (struct page *page) 
{
  size_t i;

  lock_acquire (&scan_lock);

  /* Find a free frame. */
  for (i = 0; i < frame_cnt; i++)
    {
      struct frame *f = &frames[i];
      if (!lock_try_acquire (&f->lock))
        continue;
      if (f->page == NULL) 
        {
          f->page = page;
          lock_release (&scan_lock);
          return f;
        } 
      lock_release (&f->lock);
    }

  /* No free frame.  Find a frame to evict. */
  for (i = 0; i < frame_cnt * 2; i++) 
    {
      /* Get a frame. */
      struct frame *f = &frames[hand];
      if (++hand >= frame_cnt)
        hand = 0;

      if (!lock_try_acquire (&f->lock))
        continue;

      if (f->page == NULL) 
        {
          f->page = page;
          lock_release (&scan_lock);
          return f;
        } 

      if (page_accessed_recently (f->page)) 
        {
          lock_release (&f->lock);
          continue;
        }
          
      lock_release (&scan_lock);
      
      /* Evict this frame. */
      if (!page_out (f->page))
        {
          lock_release (&f->lock);
          return NULL;
        }

      f->page = page;
      return f;
    }

  lock_release (&scan_lock);
  return NULL;
}