示例#1
0
文件: cdep.c 项目: lu-zero/libfirm
void compute_cdep(ir_graph *irg)
{
	free_cdep(irg);
	cdep_data = XMALLOC(cdep_info);
	obstack_init(&cdep_data->obst);

	cdep_data->cdep_map = pmap_create();

	assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_POSTDOMINANCE);

	/* we must temporary change the post dominator relation:
	   the ipdom of the startblock is the end block.
	   Firm does NOT add the phantom edge from Start to End.
	 */
	ir_node *const start_block = get_irg_start_block(irg);
	ir_node *const end_block   = get_irg_end_block(irg);
	ir_node *const rem         = get_Block_ipostdom(start_block);
	set_Block_ipostdom(start_block, end_block);

	irg_block_walk_graph(irg, cdep_pre, NULL, NULL);

	(void) cdep_edge_hook;

	/* restore the post dominator relation */
	set_Block_ipostdom(start_block, rem);
}
示例#2
0
/**
 *  Create a new incomplete ir_prog.
 */
static ir_prog *new_incomplete_ir_prog(void)
{
	ir_prog *res = XMALLOCZ(ir_prog);

	res->graphs         = NEW_ARR_F(ir_graph *, 0);
	res->types          = NEW_ARR_F(ir_type *, 0);
	res->global_asms    = NEW_ARR_F(ident *, 0);
	res->last_label_nr  = 1;  /* 0 is reserved as non-label */
	res->max_irg_idx    = 0;
	res->max_node_nr    = 0;
#ifndef NDEBUG
	res->reserved_resources = IRP_RESOURCE_NONE;
#endif
	res->globals        = pmap_create();

	return res;
}
示例#3
0
void acpi_init(void)
{
	pmap_create(&acpi_pmap, 0);
	struct acpi_rsdp *rsdp = apci_get_RSDP();
	if(!rsdp) return;
	printk(0, "[acpi]: found valid RSDP structure at %x\n", rsdp);
	struct acpi_dt_header *rsdt = (struct acpi_dt_header *)(rsdp->revision ? (addr_t)rsdp->xsdt_addr : (addr_t)rsdp->rsdt_addr);
	int pointer_size = (rsdp->revision ? 8 : 4);
	const char *sig = (rsdp->revision ? "XSDT" : "RSDT");
	addr_t rsdt_v = pmap_get_mapping(&acpi_pmap, (addr_t)rsdt);
	int valid = acpi_validate_dt((void *)(rsdt_v), sig);
	
	acpi_rsdt = (void *)rsdt_v;
	acpi_rsdt_pt_sz = pointer_size;
	if(valid) __acpi_enable=1;
#if CONFIG_MODULES
	loader_add_kernel_symbol(acpi_get_table_data);
	loader_add_kernel_symbol(find_RSDT_entry);
#endif
}
示例#4
0
load_return_t
load_machfile(
	struct image_params	*imgp,
	struct mach_header	*header,
	thread_t 		thread,
	vm_map_t 		new_map,
	load_result_t		*result
)
{
	struct vnode		*vp = imgp->ip_vp;
	off_t			file_offset = imgp->ip_arch_offset;
	off_t			macho_size = imgp->ip_arch_size;
	off_t			file_size = imgp->ip_vattr->va_data_size;
	
	pmap_t			pmap = 0;	/* protected by create_map */
	vm_map_t		map;
	vm_map_t		old_map;
	task_t			old_task = TASK_NULL; /* protected by create_map */
	load_result_t		myresult;
	load_return_t		lret;
	boolean_t create_map = FALSE;
	int spawn = (imgp->ip_flags & IMGPF_SPAWN);
	task_t task = current_task();
	proc_t p = current_proc();
	mach_vm_offset_t	aslr_offset = 0;
	kern_return_t 		kret;

	if (macho_size > file_size) {
		return(LOAD_BADMACHO);
	}

	if (new_map == VM_MAP_NULL) {
		create_map = TRUE;
		old_task = current_task();
	}

	/*
	 * If we are spawning, we have created backing objects for the process
	 * already, which include non-lazily creating the task map.  So we
	 * are going to switch out the task map with one appropriate for the
	 * bitness of the image being loaded.
	 */
	if (spawn) {
		create_map = TRUE;
		old_task = get_threadtask(thread);
	}

	if (create_map) {
		pmap = pmap_create(get_task_ledger(task), (vm_map_size_t) 0,
				(imgp->ip_flags & IMGPF_IS_64BIT));
		map = vm_map_create(pmap,
				0,
				vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
				TRUE);

	} else
		map = new_map;

#ifndef	CONFIG_ENFORCE_SIGNED_CODE
	/* This turns off faulting for executable pages, which allows to 
	 * circumvent Code Signing Enforcement */
	if ( (header->flags & MH_ALLOW_STACK_EXECUTION) )
	        vm_map_disable_NX(map);
#endif

	/* Forcibly disallow execution from data pages on even if the arch
	 * normally permits it. */
	if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC))
		vm_map_disallow_data_exec(map);
	
	/*
	 * Compute a random offset for ASLR.
	 */
	if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
		aslr_offset = random();
		aslr_offset %= 1 << ((imgp->ip_flags & IMGPF_IS_64BIT) ? 16 : 8);
		aslr_offset <<= PAGE_SHIFT;
	}
	
	if (!result)
		result = &myresult;

	*result = load_result_null;

	lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
			      0, (int64_t)aslr_offset, result);

	if (lret != LOAD_SUCCESS) {
		if (create_map) {
			vm_map_deallocate(map);	/* will lose pmap reference too */
		}
		return(lret);
	}

#if CONFIG_EMBEDDED
	/*
	 * Check to see if the page zero is enforced by the map->min_offset.
	 */ 
	if (vm_map_has_hard_pagezero(map, 0x1000) == FALSE) {
		if (create_map) {
			vm_map_deallocate(map);	/* will lose pmap reference too */
		}
		printf("Cannot enforce a hard page-zero for %s\n", imgp->ip_strings);
		psignal(vfs_context_proc(imgp->ip_vfs_context), SIGKILL);
		return (LOAD_BADMACHO);
	}
#else
	/*
	 * For 64-bit users, check for presence of a 4GB page zero
	 * which will enable the kernel to share the user's address space
	 * and hence avoid TLB flushes on kernel entry/exit
	 */ 

	if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
	     vm_map_has_4GB_pagezero(map)) {
		vm_map_set_4GB_pagezero(map);
	}
#endif
	/*
	 *	Commit to new map.
	 *
	 *	Swap the new map for the old, which  consumes our new map
	 *	reference but each leaves us responsible for the old_map reference.
	 *	That lets us get off the pmap associated with it, and
	 *	then we can release it.
	 */

	 if (create_map) {
		/*
		 * If this is an exec, then we are going to destroy the old
		 * task, and it's correct to halt it; if it's spawn, the
		 * task is not yet running, and it makes no sense.
		 */
	 	if (!spawn) {
			/*
			 * Mark the task as halting and start the other
			 * threads towards terminating themselves.  Then
			 * make sure any threads waiting for a process
			 * transition get informed that we are committed to
			 * this transition, and then finally complete the
			 * task halting (wait for threads and then cleanup
			 * task resources).
			 *
			 * NOTE: task_start_halt() makes sure that no new
			 * threads are created in the task during the transition.
			 * We need to mark the workqueue as exiting before we
			 * wait for threads to terminate (at the end of which
			 * we no longer have a prohibition on thread creation).
			 * 
			 * Finally, clean up any lingering workqueue data structures
			 * that may have been left behind by the workqueue threads
			 * as they exited (and then clean up the work queue itself).
			 */
			kret = task_start_halt(task);
			if (kret != KERN_SUCCESS) {
				return(kret);		
			}
			proc_transcommit(p, 0);
			workqueue_mark_exiting(p);
			task_complete_halt(task);
			workqueue_exit(p);
		}
		old_map = swap_task_map(old_task, thread, map, !spawn);
		vm_map_clear_4GB_pagezero(old_map);
		vm_map_deallocate(old_map);
	}
	return(LOAD_SUCCESS);
}
/*
 * Obtain the port for a given RPC service on a given host. This one can
 * be called for an ongoing RPC request.
 */
void
rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt)
{
	struct rpc_portmap *map = clnt->cl_pmap;
	struct sockaddr_in *sap = &clnt->cl_xprt->addr;
	struct rpc_message msg = {
		.rpc_proc	= &pmap_procedures[PMAP_GETPORT],
		.rpc_argp	= map,
		.rpc_resp	= &clnt->cl_port,
		.rpc_cred	= NULL
	};
	struct rpc_clnt	*pmap_clnt;
	struct rpc_task	*child;

	dprintk("RPC: %4d rpc_getport(%s, %d, %d, %d)\n",
			task->tk_pid, clnt->cl_server,
			map->pm_prog, map->pm_vers, map->pm_prot);

	/* Autobind on cloned rpc clients is discouraged */
	BUG_ON(clnt->cl_parent != clnt);

	spin_lock(&pmap_lock);
	if (map->pm_binding) {
		rpc_sleep_on(&map->pm_bindwait, task, NULL, NULL);
		spin_unlock(&pmap_lock);
		return;
	}
	map->pm_binding = 1;
	spin_unlock(&pmap_lock);

	pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot, 0);
	if (IS_ERR(pmap_clnt)) {
		task->tk_status = PTR_ERR(pmap_clnt);
		goto bailout;
	}
	task->tk_status = 0;

	/*
	 * Note: rpc_new_child will release client after a failure.
	 */
	if (!(child = rpc_new_child(pmap_clnt, task)))
		goto bailout;

	/* Setup the call info struct */
	rpc_call_setup(child, &msg, 0);

	/* ... and run the child task */
	rpc_run_child(task, child, pmap_getport_done);
	return;

bailout:
	spin_lock(&pmap_lock);
	map->pm_binding = 0;
	rpc_wake_up(&map->pm_bindwait);
	spin_unlock(&pmap_lock);
	rpc_exit(task, -EIO);
}

#ifdef CONFIG_ROOT_NFS
int
rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot)
{
	struct rpc_portmap map = {
		.pm_prog	= prog,
		.pm_vers	= vers,
		.pm_prot	= prot,
		.pm_port	= 0
	};
	struct rpc_clnt	*pmap_clnt;
	char		hostname[32];
	int		status;

	dprintk("RPC:      rpc_getport_external(%u.%u.%u.%u, %d, %d, %d)\n",
			NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot);

	sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr));
	pmap_clnt = pmap_create(hostname, sin, prot, 0);
	if (IS_ERR(pmap_clnt))
		return PTR_ERR(pmap_clnt);

	/* Setup the call info struct */
	status = rpc_call(pmap_clnt, PMAP_GETPORT, &map, &map.pm_port, 0);

	if (status >= 0) {
		if (map.pm_port != 0)
			return map.pm_port;
		status = -EACCES;
	}
	return status;
}
#endif

static void
pmap_getport_done(struct rpc_task *task)
{
	struct rpc_clnt	*clnt = task->tk_client;
	struct rpc_xprt *xprt = task->tk_xprt;
	struct rpc_portmap *map = clnt->cl_pmap;

	dprintk("RPC: %4d pmap_getport_done(status %d, port %d)\n",
			task->tk_pid, task->tk_status, clnt->cl_port);

	xprt->ops->set_port(xprt, 0);
	if (task->tk_status < 0) {
		/* Make the calling task exit with an error */
		task->tk_action = rpc_exit_task;
	} else if (clnt->cl_port == 0) {
		/* Program not registered */
		rpc_exit(task, -EACCES);
	} else {
		xprt->ops->set_port(xprt, clnt->cl_port);
		clnt->cl_port = htons(clnt->cl_port);
	}
	spin_lock(&pmap_lock);
	map->pm_binding = 0;
	rpc_wake_up(&map->pm_bindwait);
	spin_unlock(&pmap_lock);
}

/*
 * Set or unset a port registration with the local portmapper.
 * port == 0 means unregister, port != 0 means register.
 */
int
rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
{
	struct sockaddr_in	sin;
	struct rpc_portmap	map;
	struct rpc_clnt		*pmap_clnt;
	int error = 0;

	dprintk("RPC: registering (%d, %d, %d, %d) with portmapper.\n",
			prog, vers, prot, port);

	sin.sin_family = AF_INET;
	sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
	pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1);
	if (IS_ERR(pmap_clnt)) {
		error = PTR_ERR(pmap_clnt);
		dprintk("RPC: couldn't create pmap client. Error = %d\n", error);
		return error;
	}

	map.pm_prog = prog;
	map.pm_vers = vers;
	map.pm_prot = prot;
	map.pm_port = port;

	error = rpc_call(pmap_clnt, port? PMAP_SET : PMAP_UNSET,
					&map, okay, 0);

	if (error < 0) {
		printk(KERN_WARNING
			"RPC: failed to contact portmap (errno %d).\n",
			error);
	}
	dprintk("RPC: registration status %d/%d\n", error, *okay);

	/* Client deleted automatically because cl_oneshot == 1 */
	return error;
}

static struct rpc_clnt *
pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged)
{
	struct rpc_xprt	*xprt;
	struct rpc_clnt	*clnt;

	/* printk("pmap: create xprt\n"); */
	xprt = xprt_create_proto(proto, srvaddr, NULL);
	if (IS_ERR(xprt))
		return (struct rpc_clnt *)xprt;
	xprt->ops->set_port(xprt, RPC_PMAP_PORT);
	if (!privileged)
		xprt->resvport = 0;

	/* printk("pmap: create clnt\n"); */
	clnt = rpc_new_client(xprt, hostname,
				&pmap_program, RPC_PMAP_VERSION,
				RPC_AUTH_UNIX);
	if (!IS_ERR(clnt)) {
		clnt->cl_softrtry = 1;
		clnt->cl_oneshot  = 1;
	}
	return clnt;
}

/*
 * XDR encode/decode functions for PMAP
 */
static int
xdr_encode_mapping(struct rpc_rqst *req, u32 *p, struct rpc_portmap *map)
{
	dprintk("RPC: xdr_encode_mapping(%d, %d, %d, %d)\n",
		map->pm_prog, map->pm_vers, map->pm_prot, map->pm_port);
	*p++ = htonl(map->pm_prog);
	*p++ = htonl(map->pm_vers);
	*p++ = htonl(map->pm_prot);
	*p++ = htonl(map->pm_port);

	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
	return 0;
}

static int
xdr_decode_port(struct rpc_rqst *req, u32 *p, unsigned short *portp)
{
	*portp = (unsigned short) ntohl(*p++);
	return 0;
}
示例#6
0
load_return_t
load_machfile(
    struct image_params	*imgp,
    struct mach_header	*header,
    thread_t 		thread,
    vm_map_t 		new_map,
    load_result_t		*result
)
{
    struct vnode		*vp = imgp->ip_vp;
    off_t			file_offset = imgp->ip_arch_offset;
    off_t			macho_size = imgp->ip_arch_size;

    pmap_t			pmap = 0;	/* protected by create_map */
    vm_map_t		map;
    vm_map_t		old_map;
    load_result_t		myresult;
    load_return_t		lret;
    boolean_t create_map = TRUE;

    if (new_map != VM_MAP_NULL) {
        create_map = FALSE;
    }

    if (create_map) {
        old_map = current_map();
        pmap = pmap_create((vm_map_size_t) 0, (imgp->ip_flags & IMGPF_IS_64BIT));
        map = vm_map_create(pmap,
                            0,
                            vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
                            TRUE);
    } else
        map = new_map;

    if ( (header->flags & MH_ALLOW_STACK_EXECUTION) )
        vm_map_disable_NX(map);

    if (!result)
        result = &myresult;

    *result = load_result_null;

    lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
                          0, result);

    if (lret != LOAD_SUCCESS) {
        if (create_map) {
            vm_map_deallocate(map);	/* will lose pmap reference too */
        }
        return(lret);
    }

    /*
     * For 64-bit users, check for presence of a 4GB page zero
     * which will enable the kernel to share the user's address space
     * and hence avoid TLB flushes on kernel entry/exit
     */
    if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
            vm_map_has_4GB_pagezero(map))
        vm_map_set_4GB_pagezero(map);

    /*
     *	Commit to new map.  First make sure that the current
     *	users of the task get done with it, and that we clean
     *	up the old contents of IPC and memory.  The task is
     *	guaranteed to be single threaded upon return (us).
     *
     *	Swap the new map for the old, which  consumes our new map
     *	reference but each leaves us responsible for the old_map reference.
     *	That lets us get off the pmap associated with it, and
     *	then we can release it.
     */

    if (create_map) {
        task_halt(current_task());

        old_map = swap_task_map(current_task(), map);
        vm_map_clear_4GB_pagezero(old_map);
        pmap_switch(pmap);	/* Make sure we are using the new pmap */
        vm_map_deallocate(old_map);
    }
    return(LOAD_SUCCESS);
}
示例#7
0
static
load_return_t
load_dylinker(
    struct dylinker_command	*lcp,
    integer_t		archbits,
    vm_map_t		map,
    thread_t	thread,
    int			depth,
    load_result_t		*result,
    boolean_t		is_64bit
)
{
    char			*name;
    char			*p;
    struct vnode		*vp = NULLVP;	/* set by get_macho_vnode() */
    struct mach_header	header;
    off_t			file_offset = 0; /* set by get_macho_vnode() */
    off_t			macho_size = 0;	/* set by get_macho_vnode() */
    vm_map_t		copy_map;
    load_result_t		myresult;
    kern_return_t		ret;
    vm_map_copy_t	tmp;
    mach_vm_offset_t	dyl_start, map_addr;
    mach_vm_size_t		dyl_length;

    name = (char *)lcp + lcp->name.offset;
    /*
     *	Check for a proper null terminated string.
     */
    p = name;
    do {
        if (p >= (char *)lcp + lcp->cmdsize)
            return(LOAD_BADMACHO);
    } while (*p++);

    ret = get_macho_vnode(name, archbits, &header, &file_offset, &macho_size, &vp);
    if (ret)
        return (ret);

    myresult = load_result_null;

    /*
     *	First try to map dyld in directly.  This should work most of
     *	the time since there shouldn't normally be something already
     *	mapped to its address.
     */

    ret = parse_machfile(vp, map, thread, &header, file_offset, macho_size,
                         depth, &myresult);

    /*
     *	If it turned out something was in the way, then we'll take
     *	take this longer path to map dyld into a temporary map and
     *	copy it into destination map at a different address.
     */

    if (ret == LOAD_NOSPACE) {

        /*
         *	Load the Mach-O.
         *	Use a temporary map to do the work.
         */
        copy_map = vm_map_create(pmap_create(vm_map_round_page(macho_size),
                                             is_64bit),
                                 get_map_min(map), get_map_max(map), TRUE);
        if (VM_MAP_NULL == copy_map) {
            ret = LOAD_RESOURCE;
            goto out;
        }

        myresult = load_result_null;

        ret = parse_machfile(vp, copy_map, thread, &header,
                             file_offset, macho_size,
                             depth, &myresult);

        if (ret) {
            vm_map_deallocate(copy_map);
            goto out;
        }

        if (get_map_nentries(copy_map) > 0) {

            dyl_start = mach_get_vm_start(copy_map);
            dyl_length = mach_get_vm_end(copy_map) - dyl_start;

            map_addr = dyl_start;
            ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE);

            if (ret != KERN_SUCCESS) {
                vm_map_deallocate(copy_map);
                ret = LOAD_NOSPACE;
                goto out;

            }

            ret = vm_map_copyin(copy_map,
                                (vm_map_address_t)dyl_start,
                                (vm_map_size_t)dyl_length,
                                TRUE, &tmp);
            if (ret != KERN_SUCCESS) {
                (void) vm_map_remove(map,
                                     vm_map_trunc_page(map_addr),
                                     vm_map_round_page(map_addr + dyl_length),
                                     VM_MAP_NO_FLAGS);
                vm_map_deallocate(copy_map);
                goto out;
            }

            ret = vm_map_copy_overwrite(map,
                                        (vm_map_address_t)map_addr,
                                        tmp, FALSE);
            if (ret != KERN_SUCCESS) {
                vm_map_copy_discard(tmp);
                (void) vm_map_remove(map,
                                     vm_map_trunc_page(map_addr),
                                     vm_map_round_page(map_addr + dyl_length),
                                     VM_MAP_NO_FLAGS);
                vm_map_deallocate(copy_map);
                goto out;
            }

            if (map_addr != dyl_start)
                myresult.entry_point += (map_addr - dyl_start);
        } else {
            ret = LOAD_FAILURE;
        }

        vm_map_deallocate(copy_map);
    }

    if (ret == LOAD_SUCCESS) {
        result->dynlinker = TRUE;
        result->entry_point = myresult.entry_point;
        (void)ubc_map(vp, PROT_READ | PROT_EXEC);
    }
out:
    vnode_put(vp);
    return (ret);

}