Пример #1
0
/* takes a global virtual address, returns address in new frame */
int touch_page(unsigned int vaddr)
{
	int page, offset;
	int frame_count;
	int i;

	page = vaddr / sys_pager.pg_size;
	offset = vaddr % sys_pager.pg_size;
	if (page > sys_pager.pg_count-1) {
		fprintf(stderr, "page_in: address points to nonexistent page\n");
		return -1;
	}
	if (!pg_table[page].valid) {
		fprintf(stderr, "ACCT: %d page faults\n", page_faults++);
		frame_count = MEMSIZE/sys_pager.pg_size;
		for (i=0; i<frame_count; i++) {
			if (free_frames[i] == 1)
				break;
		}
		if (i == frame_count)
			toss_page();
		if (page_in(page) == -1) {
			fprintf(stderr, "touch_page: error bringing page into mem\n");
			return -1;
		}
	}
	update_page_info(page);
	
	return pg_table[page].frame_addr+offset;
}
Пример #2
0
char	HUGE	*VM_addr(VMPTR_TYPE i, int dirty, int frez)
{
	VMPTR	p;
	VMHEAD	HUGE *h, HUGE *t;
	FNAME(VM_addr);

	TEST(fun, 0);
	p.i = i;
	if (!i	||  p.p.b >= VMBASESIZ	||  p.p.l >= VMLEGSIZ  ||  !VMbase[p.p.b])
		return NULL;
	h = (VMHEAD HUGE	*) &VMbase[p.p.b][p.p.l];
	if (h->type == MT_NOTUSED)
		return NULL;
	if (h->type & MT_IMEDIATE)  {
		if (!frez  !=  !(h->type & MT_FREEZE))
			if (frez) {
				VMnfreez++;
				VMnifrez++;
				h->type	|= MT_FREEZE;
			}  else	 {
				VMnfreez--;
				VMnifrez--;
				h->type	&= (~MT_FREEZE);
			}
		TEST(fun, 1);
		return (char HUGE *) &h->mru_link;
	}
	if (h->type & MT_MEMORY)  {
		if (!(h->type &	MT_FREEZE)  &&	(h->mru_link.i || frez))
			vm_unlink(h);
		if (!frez  &&  (h->mru_link.i  ||  h->type & MT_FREEZE))
			vm_link(h, i);
	}  else	 {	/*  MT_DISK	*/
		d_compact();

  /*  d_compact	may call d_compact1, dfree_new,	rmalloc, compact and change the	address	 */

		h = (VMHEAD HUGE *) &VMbase[p.p.b][p.p.l];

		h = page_in(h, p.i);
		if (!h)
			return(NULL);
		if (!frez)
			vm_link(h, i);
	}
	if (dirty)
		h->type	|= MT_DIRTY;

	if (!frez  !=  !(h->type & MT_FREEZE))
		if (frez)  {
			VMnfreez++;
			h->type	|= MT_FREEZE;
		}  else	 {
			VMnfreez--;
			h->type	&= (~MT_FREEZE);
		}

	TEST(fun, 2);
	return (char HUGE *) h->mem;
}
Пример #3
0
INT32 vmm_fmap_flush_seek_callback(struct fsio_event_source *iosrc, INT32 ioret)
{
	struct vmm_fmap_descriptor *fm = MEMREGNODE2FMAPDESC(rb_search(&vmm.fmap_descriptors, iosrc->id));
	struct pm_task *task = tsk_get(fm->creating_task);
	struct vmm_memory_region *mreg;
	struct vmm_page_directory *pdir = task->vmm_info.page_directory;
	struct vmm_page_table *tbl = NULL;
	struct vmm_pman_assigned_record *assigned = NULL;

    if(task == NULL) return 0;

	mreg = vmm_region_get_bydesc(task, (struct vmm_descriptor*)fm);

	if(mreg == NULL) return 0;

	tbl = (struct vmm_page_table *)PHYSICAL2LINEAR(PG_ADDRESS(pdir->tables[PM_LINEAR_TO_TAB(fm->release_addr)].b));
		
	if(ioret == IO_RET_OK)
	{
		/* If flushing save assigned record */
		if(fm->io_finished.params[0] == 1)
		{
			assigned = vmm_get_assigned(task->id, fm->release_addr);
			fm->io_finished.params[1] = *((UINT32*)assigned);
		}

		/* 
		Map page onto pman space. (assigned record is preserved on fm->io_finished.params[1])
		*/
		page_in(PMAN_TASK, (ADDR)PHYSICAL2LINEAR(PG_ADDRESS(tbl->pages[PM_LINEAR_TO_TAB(fm->release_addr)].entry.phy_page_addr)), 
                (ADDR)tbl->pages[PM_LINEAR_TO_TAB(fm->release_addr)].entry.phy_page_addr, 2, PGATT_WRITE_ENA);

		/* Write and set callback to continue. */
		fm->io_finished.callback = vmm_fmap_flush_callback;
		io_begin_write(&fm->iosrc, 0x1000, (ADDR)PHYSICAL2LINEAR(PG_ADDRESS(tbl->pages[PM_LINEAR_TO_TAB(fm->release_addr)].entry.phy_page_addr)));
		
		return 1;
	}
	else
	{
		/* Remove IOLOCK */
		vmm_set_flags(fm->creating_task, (ADDR)PHYSICAL2LINEAR(PG_ADDRESS(tbl->pages[PM_LINEAR_TO_TAB(fm->release_addr)].entry.phy_page_addr)), TRUE, TAKEN_EFLAG_IOLOCK, FALSE);

		/* Failed */
		if(task->command_inf.callback != NULL) 
			task->command_inf.callback(task, ioret, 0);
	}
	return 0;
}
Пример #4
0
/* a basic put() operation. Note the avoidance of odd word boundaries
   and transfers sizes beyond what the hardware can deal with */
static inline int aplib_put(struct aplib_putget *put)
{
	int error;
	struct aplib_struct *aplib = current->aplib;

	error = verify_area(VERIFY_WRITE,put,sizeof(*put));
	if (error) return error;

	if (put->cid >= aplib->numcells) 
		return -EINVAL;

	do {
		int n;

		if (put->size && (((unsigned)put->src_addr) & 4)) {
			n = 1;
		} else if (put->size > MAX_PUT_SIZE) {
			n = MAX_PUT_SIZE;
		} else {
			n = put->size;
		}

		put->size -= n;

		page_in((char *)put->src_addr,n<<2);

		_putget(MSC_PUT_QUEUE,
			aplib->rel_cid[put->cid],
			put->src_addr,
			n,
			put->dest_addr,
			put->size?0:put->dest_flag,
			put->size?0:put->src_flag);

		put->dest_addr += n;
		put->src_addr += n;
	} while (put->size);

	if (put->ack) {
		aplib->ack_request++;
		_putget(MSC_GET_QUEUE,
			aplib->rel_cid[put->cid], 
			0, 0, 0,
			&aplib->ack_flag, 0);
	}

	return 0;
}
Пример #5
0
void swap_ws(kern_obj * ws, int wsi)
{
	kern_obj * old;
	uint i;
	
	old = cur_ws[wsi];
	cur_ws[wsi] = ws;
	
	while (old != NULL) {
		for (i = 0; i < old->u.ws.size; i += 0x1000) {
			set_pt_entry(old->u.ws.vbase + i, 0, old->u.ws.flags);
			INVLPG(old->u.ws.vbase + i);
		}
		old = old->u.ws.next;
	}

	while (ws != NULL) {
		page_in(ws->u.ws.pbase, ws->u.ws.vbase, ws->u.ws.size, ws->u.ws.flags);		
		ws = ws->u.ws.next;
	}
}
Пример #6
0
/* a basic get() operation */
static inline int aplib_get(struct aplib_putget *get)
{
	struct aplib_struct *aplib = current->aplib;
	int error = verify_area(VERIFY_WRITE,get,sizeof(*get));
	if (error) return error;

	if (get->cid >= aplib->numcells) 
		return -EINVAL;

	do {
		int n;

		if (get->size && (((unsigned)get->src_addr) & 4)) {
			n = 1;
		} else if (get->size > MAX_PUT_SIZE) {
			n = MAX_PUT_SIZE;
		} else {
			n = get->size;
		}

		get->size -= n;

		page_in((char *)get->dest_addr,n<<2);

		_putget(MSC_GET_QUEUE,
			aplib->rel_cid[get->cid],
			get->src_addr,
			n,
			get->dest_addr,
			get->size?0:get->dest_flag,
			get->size?0:get->src_flag);
		
		get->dest_addr += n;
		get->src_addr += n;
	} while (get->size);

	return 0;
}
Пример #7
0
int init_reloc()
{
	UINT32 i, physical, physicaldest; 
    int left, size;
	char *dest, *src;
	struct ifs2_header *h = NULL;
	
	/* 
		Now before we initialize vmm Module we have to copy init files 
	   image to PMAN_INIT_RELOC_PHYS on physical memory, so 
	   pool page tables won't overlap 
	*/
    
    // test for ifs2
	page_in(PMAN_TASK, (ADDR) (PMAN_STAGE2_MAPZONE_SOURCE + SARTORIS_PROCBASE_LINEAR), (ADDR)(PMAN_SARTORIS_INIT_PHYS + PMAN_SIZE), 2, PGATT_WRITE_ENA);
	h = (struct ifs2_header*)PMAN_STAGE2_MAPZONE_SOURCE;
	
	if(h->ifs2magic != IFS2MAGIC)
		pman_print_and_stop("Init Fs 1 format is no longer supported by PMAN. PMAN_SIZE %x ", PMAN_SIZE);

    if( h->size % PAGE_SIZE == 0)
    {
        size = h->size;
    }
    else
    {
        size = (h->size - (h->size % PAGE_SIZE)) + PAGE_SIZE;
    }

    pman_print("Relocating Image size: %i, phystart: %x, phydest: %x.", size, PMAN_SARTORIS_INIT_PHYS + PMAN_SIZE + size - 0x1000, PMAN_INIT_RELOC_PHYS + size - 0x1000);
	
	// ifs2 header found, check for LZW and decompress if necesary!
	if(h->flags & IFS2_FLAG_LZW)
	{
		/* NOT IMPLEMENTED: I won't implement it until I test the rest of PMAN */
	}
	else
	{        
        // uncompressed IFS
		left = size;
        /* Sartoris left the image at position PMAN_SARTORIS_INIT_PHYS 
		and we want it on PMAN_INIT_RELOC_PHYS. PMAN_INIT_RELOC_PHYS is greater
		than PMAN_SARTORIS_INIT_PHYS. we will copy 4kb at the time from 
		bottom up, this means PMAN_INIT_RELOC_PHYS - PMAN_SARTORIS_INIT_PHYS has to be
		greater or equal than 0x1000. */
		physical = PMAN_SARTORIS_INIT_PHYS + PMAN_SIZE + size - 0x1000;	// start copy at the last page
		physicaldest = PMAN_INIT_RELOC_PHYS + size - 0x1000;				// destination
		
		dest = (char*)PMAN_STAGE2_MAPZONE_DEST;
		src = (char*)PMAN_STAGE2_MAPZONE_SOURCE;
			
		do
		{
			// map source
			page_in(PMAN_TASK, (ADDR) (PMAN_STAGE2_MAPZONE_SOURCE + SARTORIS_PROCBASE_LINEAR), (ADDR) physical, 2, PGATT_WRITE_ENA);
			// map dest
			page_in(PMAN_TASK, (ADDR) (PMAN_STAGE2_MAPZONE_DEST + SARTORIS_PROCBASE_LINEAR), (ADDR) physicaldest, 2, PGATT_WRITE_ENA);

			// copy bytes
			i = 0x1000;
			while(i > 0){dest[i-1] = src[i-1]; i--; }

			left -= 0x1000;
			physicaldest -= PAGE_SIZE;
			physical -= PAGE_SIZE;
			
		}while(left > 0);
        	
	}
    
    return size;
}
Пример #8
0
/* note the 8 byte alignment fix for the MSC bug */
static inline int aplib_send(struct aplib_send *send)
{
	struct aplib_struct *aplib = current->aplib;
	int wordSize;
	int byteAlign, byteFix;
	u_long src;
	u_long info1, info2;
	volatile unsigned *q = (volatile unsigned *)MSC_SEND_QUEUE_S;
	extern long system_recv_flag;
	int error;
	unsigned flags, rcell;
	unsigned flag_ptr;

	error = verify_area(VERIFY_WRITE,send,sizeof(*send));
	if (error) return error;

	if (send->cid >= aplib->numcells) 
		return -EINVAL;

	if (send->tag == RBUF_SYSTEM || send->tag == RBUF_BIGSEND)
		return -EINVAL;

	error = verify_area(VERIFY_READ,(char *)send->src_addr,send->size);
	if (error) return error;

	page_in((char *)send->src_addr,send->size);

	rcell = aplib->rel_cid[send->cid];

	byteAlign = send->src_addr & 0x3;
	byteFix = send->size & 0x3;

	wordSize = (send->size + byteAlign + 3) >> 2;

	src = send->src_addr & ~3;

	/* this handles the MSC alignment bug */
	if (wordSize > 1 &&
	    (src & 4)) {
		info1 |= 0x80000000;
		src -= 4;
		wordSize++;
	}
	
	info1 = (aplib->cid<<22) | (byteFix<<20) | wordSize;
	info2 = (send->tag<<28) | (byteAlign<<26) | 
		(send->type<<10) | aplib->tid;
	flag_ptr = (unsigned)&send->flag;

	if (send->size > SMALL_SEND_THRESHOLD) {
		send->info1 = info1;
		send->info2 = info2;
		send->size = wordSize;
		send->src_addr = src;
		send->flag_addr = (unsigned)&send->flag;
		flag_ptr = 0;

		wordSize = sizeof(*send)>>2;
		src = (unsigned)send;
		
		info1 = (aplib->cid<<22) | wordSize;
		info2 = (RBUF_BIGSEND<<28) | aplib->tid;
	}
Пример #9
0
/* This function will pm_page_in loadable segments. If use_fsize is non zero, then it'll only 
load as much as there is on the file, else, it'll load the whole segment.
This function will return the ammount of bytes allocated for pages (not tables). Return value
will be a multiple of page size and will contain the maximum address for the task. */
UINT32 put_pages(struct pm_task *task, BOOL use_fsize, BOOL low_mem, BOOL lib)
{
	struct vmm_page_directory *pdir = task->vmm_info.page_directory;
	UINT32 i = 0, j;
	UINT32 allocated = 0;
	struct Elf32_Phdr *prog_header = NULL;
	UINT32 page_addr, pagecount, foffset;
	ADDR pg_tbl, pg, pgp;
    UINT32 max_addr = 0;
	
	/* Put Pages in for loadable segments */
	while( i < task->loader_inf.elf_header.e_phnum)
	{
		prog_header = (struct Elf32_Phdr*)&task->loader_inf.elf_pheaders[i * task->loader_inf.elf_header.e_phentsize];

		if(prog_header->p_type == PT_LOAD || prog_header->p_type == PT_DYNAMIC)
		{
            if((UINT32)prog_header->p_vaddr + (UINT32)prog_header->p_memsz > max_addr)
                   max_addr = (UINT32)prog_header->p_vaddr + (UINT32)prog_header->p_memsz;

			page_addr = (unsigned int)prog_header->p_vaddr - ((unsigned int)prog_header->p_vaddr & 0x00000FFF);
			pagecount = 0;
			foffset = prog_header->p_offset;

			page_addr += SARTORIS_PROCBASE_LINEAR;	// our elf files will begin at 0, so add the base

			if(use_fsize)
				pagecount = (UINT32)(prog_header->p_filesz >> 12) + (((prog_header->p_filesz & 0x00000FFF) == 0)? 0 : 1);
			else
				pagecount = (UINT32)(prog_header->p_memsz >> 12) + (((prog_header->p_memsz & 0x00000FFF) == 0)? 0 : 1);
			
			/* Load as many pages as needed. */
			for(j = 0; j < pagecount; j++)
			{
				// if page table is not present, add it
				if(pdir->tables[PM_LINEAR_TO_DIR(page_addr)].ia32entry.present == 0)
				{
					/* Insert a page for the page table */
					// low mem is only considered for pages, not here
					// because it does not make any difference 
					pg_tbl =  (ADDR)vmm_get_tblpage(task->id, page_addr);

					if(page_in(task->id, (ADDR)page_addr, (ADDR)LINEAR2PHYSICAL(pg_tbl), 1, PGATT_WRITE_ENA) != SUCCESS)
						pman_print_and_stop("Failed to page_in for table laddress: %x, physical: %x ", page_addr, pg);

					task->vmm_info.page_count++;
				}
                
                // is the page present?
                struct vmm_page_table *tbl = (struct vmm_page_table*)PHYSICAL2LINEAR(PG_ADDRESS(pdir->tables[PM_LINEAR_TO_DIR(page_addr)].b));
                
                UINT32 ass_bck = 0;
                if(tbl->pages[PM_LINEAR_TO_TAB(page_addr)].entry.ia32entry.present == 1)
                {
                    ass_bck = vmm_temp_pgmap(task, (ADDR)page_addr);
                    pg = (ADDR)PG_ADDRESS(tbl->pages[PM_LINEAR_TO_TAB(page_addr)].entry.phy_page_addr);
                }
                else
                {
                    pg = (ADDR)LINEAR2PHYSICAL(vmm_get_page_ex(task->id, page_addr, low_mem));
                }
                pgp = (ADDR)PHYSICAL2LINEAR(pg);

				/* Set page as service (won't be paged out) */
				vmm_set_flags(task->id, pgp, TRUE, TAKEN_EFLAG_SERVICE, TRUE);

                if(lib && (prog_header->p_flags & PF_EXEC))
                    vmm_set_flags(task->id, pgp, FALSE, TAKEN_PG_FLAG_LIBEXE, TRUE);
                
				if(foffset < curr_header->image_size)
				{
					pminit_elf_seek(&task->io_event_src, foffset);
                    if(j == 0) 
                    {
					    pminit_elf_read(&task->io_event_src, PAGE_SIZE - ((UINT32)prog_header->p_vaddr & 0x00000FFF), (ADDR)((UINT32)pgp + ((UINT32)prog_header->p_vaddr & 0x00000FFF)));  // add page displacement
                    }
                    else
                    {
                        pminit_elf_read(&task->io_event_src, PAGE_SIZE, pgp);
                    }                    
				}

                if(tbl->pages[PM_LINEAR_TO_TAB(page_addr)].entry.ia32entry.present == 0)
                {
				    if(page_in(task->id, (ADDR)page_addr, (ADDR)pg, 2, PGATT_WRITE_ENA) != SUCCESS)
					    pman_print_and_stop("Failed to page_in for laddress: %x, physical: %x ", page_addr, pg);
				
                    /* unmap the page from pmanager, so it's assigned record is used. */
                    vmm_unmap_page(task->id, page_addr);
                }
                else
                {
                    vmm_restore_temp_pgmap(task, (ADDR)page_addr, ass_bck);
                }

				task->vmm_info.page_count++;
			
				allocated += PAGE_SIZE;
				page_addr += PAGE_SIZE;
				foffset += PAGE_SIZE;
			}
		}
		i++;
	}