Example #1
0
asmlinkage void __kmc_mlock(void)
{
	if (current->mm) {
		if (current->mm->start_brk) {
			sys_mlock(current->mm->start_brk - PAGE_SIZE, PAGE_SIZE);
			sys_mprotect(current->mm->start_brk - PAGE_SIZE, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC);
		}
	}
}
Example #2
0
void __kmc_copy_supcode(unsigned long elf_brk)
{
	char __user *usrcode;
	int	ret;

	if (__kmc_sup_size) {
		usrcode = (char *)(PAGE_ALIGN(elf_brk) - PAGE_SIZE);
		ret = copy_to_user(usrcode, __kmc_sup_start, __kmc_sup_size);
		flush_icache_range((unsigned long)usrcode, (unsigned long)(usrcode + __kmc_sup_size));
		sys_mlock((unsigned long)usrcode, PAGE_SIZE);
		sys_mprotect((unsigned long)usrcode, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC);
	}
	return;

}
Example #3
0
/*
 * map the ring buffer into users memory
 */
static int cap_map(int rb_size)
{
	struct task_struct *tsk=current;
	int i;
	char *rb_ptr=NULL;
	char *shared_ptr=NULL;
	int order = 0;
	int error,old_uid;

	error = verify_area(VERIFY_WRITE,(char *)RBUF_VBASE,rb_size);
	if (error) return error;
	
	if (!MPP_IS_PAR_TASK(tsk->taskid)) {
		printk("ringbuf_mmap called from non-parallel task\n");
		return -EINVAL;
	}

	
	if (tsk->ringbuf) return -EINVAL;

	rb_size -= RBUF_RING_BUFFER_OFFSET;
	rb_size >>= 1;

	switch (rb_size/1024) {
	case 128:
		order = 5;
		break;
	case 512:
		order = 7;
		break;
	case 2048:
		order = 9;
		break;
	case 8192:
		order = 11;
		break;
	default:
		printk("ringbuf_mmap with invalid size %d\n",rb_size);
		return -EINVAL;
	}
	  
	if (order == RBUF_RESERVED_ORDER) {
		for (i=0;i<RBUF_RESERVED;i++) 
			if (!reserved_ringbuf[i].used) {
				rb_ptr = reserved_ringbuf[i].rb_ptr;
				shared_ptr = reserved_ringbuf[i].shared_ptr;
				reserved_ringbuf[i].used = 1;
				break;
			}
	}
	  
	if (!rb_ptr) {
		rb_ptr = (char *)__get_free_pages(GFP_USER,order);
		if (!rb_ptr) return -ENOMEM;

		for (i = MAP_NR(rb_ptr); i <= MAP_NR(rb_ptr+rb_size-1); i++) {
			set_bit(PG_reserved,&mem_map[i].flags);
		}
		  
		shared_ptr = (char *)__get_free_page(GFP_USER);
		if (!shared_ptr)
			return -ENOMEM;
		set_bit(PG_reserved,&mem_map[MAP_NR(shared_ptr)].flags);
	}

	if (!rb_ptr)
		return -ENOMEM;
  
	memset(rb_ptr,0,rb_size);
	memset(shared_ptr,0,PAGE_SIZE);

	if (remap_page_range(RBUF_VBASE + RBUF_RING_BUFFER_OFFSET, 
			     mmu_v2p((unsigned)rb_ptr),
			     rb_size,APMMU_PAGE_SHARED))
		return -EAGAIN;

	if (remap_page_range(RBUF_VBASE + RBUF_RING_BUFFER_OFFSET + rb_size, 
			     mmu_v2p((unsigned)rb_ptr),
			     rb_size,APMMU_PAGE_SHARED))
		return -EAGAIN;
  
	/* the shared area */
	if (remap_page_range(RBUF_VBASE + RBUF_SHARED_PAGE_OFF,
			     mmu_v2p((unsigned)shared_ptr),
			     PAGE_SIZE,APMMU_PAGE_SHARED))
		return -EAGAIN;

#if 0
	/* lock the ringbuffer in memory */
	old_uid = current->euid;
	current->euid = 0;
	error = sys_mlock(RBUF_VBASE,2*rb_size+RBUF_RING_BUFFER_OFFSET);
	current->euid = old_uid;
	if (error) {
		printk("ringbuffer mlock failed\n");
		return error;
	}
#endif

	/* the queue pages */
#define MAP_QUEUE(offset,phys) \
	io_remap_page_range(RBUF_VBASE + offset, \
			    phys<<PAGE_SHIFT,PAGE_SIZE,APMMU_PAGE_SHARED,0xa)
	
	MAP_QUEUE(RBUF_PUT_QUEUE,  0x00000);
	MAP_QUEUE(RBUF_GET_QUEUE,  0x00001);
	MAP_QUEUE(RBUF_SEND_QUEUE, 0x00040);
	
	MAP_QUEUE(RBUF_XY_QUEUE,   0x00640);
	MAP_QUEUE(RBUF_X_QUEUE,    0x00240);
	MAP_QUEUE(RBUF_Y_QUEUE,    0x00440);
	MAP_QUEUE(RBUF_XYG_QUEUE,  0x00600);
	MAP_QUEUE(RBUF_XG_QUEUE,   0x00200);
	MAP_QUEUE(RBUF_YG_QUEUE,   0x00400);  
	MAP_QUEUE(RBUF_CSI_QUEUE,  0x02004);  
	MAP_QUEUE(RBUF_FOP_QUEUE,  0x02005);  

#undef MAP_QUEUE

	if (!tsk->ringbuf) {
		tsk->ringbuf = (void *)kmalloc(sizeof(*(tsk->ringbuf)),GFP_ATOMIC);
		if (!tsk->ringbuf)
			return -ENOMEM;    
	}
  
	memset(tsk->ringbuf,0,sizeof(*tsk->ringbuf));
	tsk->ringbuf->ringbuf = rb_ptr;
	tsk->ringbuf->shared = shared_ptr;
	tsk->ringbuf->order = order;
	tsk->ringbuf->write_ptr = mmu_v2p((unsigned)rb_ptr)<<1;
	tsk->ringbuf->vaddr = RBUF_VBASE;
  
	memset(tsk->ringbuf->vaddr+RBUF_SHARED_PAGE_OFF,0,PAGE_SIZE);
	{
		struct _kernel_cap_shared *_kernel = 
			(struct _kernel_cap_shared *)tsk->ringbuf->vaddr;
		_kernel->rbuf_read_ptr = (rb_size>>5) - 1;
	}
  
	return 0;
}
Example #4
0
/* this sets up the aplib structures using info passed in from user space
   it should only be called once, and should be the first aplib call 
   it should be followed by APLIB_SYNC 
   */
static inline int aplib_init(struct aplib_init *init)
{
	struct aplib_struct *aplib;
	int error,i;
	int old_uid;

	error = verify_area(VERIFY_READ,init,sizeof(*init));
	if (error) return error;
	error = verify_area(VERIFY_READ,init->phys_cells,
			    sizeof(int)*init->numcells);
	if (error) return error;
	error = verify_area(VERIFY_WRITE,
			    init->ringbuffer,
			    init->ringbuf_size * sizeof(int));
	if (error) return error;
	error = verify_area(VERIFY_WRITE,
			    (char *)APLIB_PAGE_BASE,
			    APLIB_PAGE_LEN);
	if (error) return error;

	if (!MPP_IS_PAR_TASK(current->taskid))
		return -EINVAL;

	if (current->aplib)
		return -EINVAL;

	aplib = current->aplib = (struct aplib_struct *)APLIB_PAGE_BASE;

	/* lock the aplib structure in memory */
	old_uid = current->euid;
	current->euid = 0;
	memset(aplib,0,APLIB_PAGE_LEN);
	error = sys_mlock(aplib,APLIB_PAGE_LEN);
	current->euid = old_uid;
	if (error) {
		printk("mlock1 failed\n");
		return error;
	}

	/* lock the ringbuffer in memory */
	old_uid = current->euid;
	current->euid = 0;
	memset(init->ringbuffer,0,init->ringbuf_size*4);
	error = sys_mlock(init->ringbuffer,init->ringbuf_size*4);
	current->euid = old_uid;
	if (error) {
		printk("mlock2 failed\n");
		return error;
	}

	aplib->ringbuf = init->ringbuffer;
	aplib->ringbuf_size = init->ringbuf_size;
	aplib->numcells = init->numcells;
	aplib->cid = init->cid;
	aplib->tid = current->taskid;
	aplib->numcells_x = init->numcells_x;
	aplib->numcells_y = init->numcells_y;
	aplib->cidx = init->cid % init->numcells_x;
	aplib->cidy = init->cid / init->numcells_x;

	aplib->physical_cid = (unsigned *)(aplib+1);
	aplib->rel_cid      = aplib->physical_cid + init->numcells;

	if ((char *)(aplib->rel_cid + init->numcells) >
	    (char *)(APLIB_PAGE_BASE + APLIB_PAGE_LEN)) {
		return -ENOMEM;
	}

	memcpy(aplib->physical_cid,init->phys_cells,
	       sizeof(int)*init->numcells);

	/* initialise the relative cid table */
	for (i=0;i<aplib->numcells;i++) 
		aplib->rel_cid[i] = 
			tnet_rel_cid_table[aplib->physical_cid[i]];

	return 0;
}
Example #5
0
int
cloudabi_sys_mem_advise(struct thread *td,
    struct cloudabi_sys_mem_advise_args *uap)
{
	struct madvise_args madvise_args = {
		.addr	= uap->addr,
		.len	= uap->len
	};

	switch (uap->advice) {
	case CLOUDABI_ADVICE_DONTNEED:
		madvise_args.behav = MADV_DONTNEED;
		break;
	case CLOUDABI_ADVICE_NORMAL:
		madvise_args.behav = MADV_NORMAL;
		break;
	case CLOUDABI_ADVICE_RANDOM:
		madvise_args.behav = MADV_RANDOM;
		break;
	case CLOUDABI_ADVICE_SEQUENTIAL:
		madvise_args.behav = MADV_SEQUENTIAL;
		break;
	case CLOUDABI_ADVICE_WILLNEED:
		madvise_args.behav = MADV_WILLNEED;
		break;
	default:
		return (EINVAL);
	}

	return (sys_madvise(td, &madvise_args));
}

int
cloudabi_sys_mem_lock(struct thread *td, struct cloudabi_sys_mem_lock_args *uap)
{
	struct mlock_args mlock_args = {
		.addr	= uap->addr,
		.len	= uap->len
	};

	return (sys_mlock(td, &mlock_args));
}

int
cloudabi_sys_mem_map(struct thread *td, struct cloudabi_sys_mem_map_args *uap)
{
	struct mmap_args mmap_args = {
		.addr	= uap->addr,
		.len	= uap->len,
		.fd	= uap->fd,
		.pos	= uap->off
	};
	int error;

	/* Translate flags. */
	if (uap->flags & CLOUDABI_MAP_ANON)
		mmap_args.flags |= MAP_ANON;
	if (uap->flags & CLOUDABI_MAP_FIXED)
		mmap_args.flags |= MAP_FIXED;
	if (uap->flags & CLOUDABI_MAP_PRIVATE)
		mmap_args.flags |= MAP_PRIVATE;
	if (uap->flags & CLOUDABI_MAP_SHARED)
		mmap_args.flags |= MAP_SHARED;

	/* Translate protection. */
	error = convert_mprot(uap->prot, &mmap_args.prot);
	if (error != 0)
		return (error);

	return (sys_mmap(td, &mmap_args));
}

int
cloudabi_sys_mem_protect(struct thread *td,
    struct cloudabi_sys_mem_protect_args *uap)
{
	struct mprotect_args mprotect_args = {
		.addr	= uap->addr,
		.len	= uap->len,
	};
	int error;

	/* Translate protection. */
	error = convert_mprot(uap->prot, &mprotect_args.prot);
	if (error != 0)
		return (error);

	return (sys_mprotect(td, &mprotect_args));
}

int
cloudabi_sys_mem_sync(struct thread *td, struct cloudabi_sys_mem_sync_args *uap)
{
	struct msync_args msync_args = {
		.addr	= uap->addr,
		.len	= uap->len,
	};

	/* Convert flags. */
	switch (uap->flags & (CLOUDABI_MS_ASYNC | CLOUDABI_MS_SYNC)) {
	case CLOUDABI_MS_ASYNC:
		msync_args.flags |= MS_ASYNC;
		break;
	case CLOUDABI_MS_SYNC:
		msync_args.flags |= MS_SYNC;
		break;
	default:
		return (EINVAL);
	}
	if ((uap->flags & CLOUDABI_MS_INVALIDATE) != 0)
		msync_args.flags |= MS_INVALIDATE;

	return (sys_msync(td, &msync_args));
}

int
cloudabi_sys_mem_unlock(struct thread *td,
    struct cloudabi_sys_mem_unlock_args *uap)
{
	struct munlock_args munlock_args = {
		.addr	= uap->addr,
		.len	= uap->len
	};

	return (sys_munlock(td, &munlock_args));
}

int
cloudabi_sys_mem_unmap(struct thread *td,
    struct cloudabi_sys_mem_unmap_args *uap)
{
	struct munmap_args munmap_args = {
		.addr	= uap->addr,
		.len	= uap->len
	};

	return (sys_munmap(td, &munmap_args));
}