Exemple #1
0
void rate_submit_exit(struct thread_data *td)
{
    if (td->o.io_submit_mode != IO_MODE_OFFLOAD)
        return;

    workqueue_exit(&td->io_wq);
}
Exemple #2
0
load_return_t
load_machfile(
	struct image_params	*imgp,
	struct mach_header	*header,
	thread_t 		thread,
	vm_map_t 		new_map,
	load_result_t		*result
)
{
	struct vnode		*vp = imgp->ip_vp;
	off_t			file_offset = imgp->ip_arch_offset;
	off_t			macho_size = imgp->ip_arch_size;
	off_t			file_size = imgp->ip_vattr->va_data_size;
	
	pmap_t			pmap = 0;	/* protected by create_map */
	vm_map_t		map;
	vm_map_t		old_map;
	task_t			old_task = TASK_NULL; /* protected by create_map */
	load_result_t		myresult;
	load_return_t		lret;
	boolean_t create_map = FALSE;
	int spawn = (imgp->ip_flags & IMGPF_SPAWN);
	task_t task = current_task();
	proc_t p = current_proc();
	mach_vm_offset_t	aslr_offset = 0;
	kern_return_t 		kret;

	if (macho_size > file_size) {
		return(LOAD_BADMACHO);
	}

	if (new_map == VM_MAP_NULL) {
		create_map = TRUE;
		old_task = current_task();
	}

	/*
	 * If we are spawning, we have created backing objects for the process
	 * already, which include non-lazily creating the task map.  So we
	 * are going to switch out the task map with one appropriate for the
	 * bitness of the image being loaded.
	 */
	if (spawn) {
		create_map = TRUE;
		old_task = get_threadtask(thread);
	}

	if (create_map) {
		pmap = pmap_create(get_task_ledger(task), (vm_map_size_t) 0,
				(imgp->ip_flags & IMGPF_IS_64BIT));
		map = vm_map_create(pmap,
				0,
				vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
				TRUE);

	} else
		map = new_map;

#ifndef	CONFIG_ENFORCE_SIGNED_CODE
	/* This turns off faulting for executable pages, which allows to 
	 * circumvent Code Signing Enforcement */
	if ( (header->flags & MH_ALLOW_STACK_EXECUTION) )
	        vm_map_disable_NX(map);
#endif

	/* Forcibly disallow execution from data pages on even if the arch
	 * normally permits it. */
	if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC))
		vm_map_disallow_data_exec(map);
	
	/*
	 * Compute a random offset for ASLR.
	 */
	if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
		aslr_offset = random();
		aslr_offset %= 1 << ((imgp->ip_flags & IMGPF_IS_64BIT) ? 16 : 8);
		aslr_offset <<= PAGE_SHIFT;
	}
	
	if (!result)
		result = &myresult;

	*result = load_result_null;

	lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
			      0, (int64_t)aslr_offset, result);

	if (lret != LOAD_SUCCESS) {
		if (create_map) {
			vm_map_deallocate(map);	/* will lose pmap reference too */
		}
		return(lret);
	}

#if CONFIG_EMBEDDED
	/*
	 * Check to see if the page zero is enforced by the map->min_offset.
	 */ 
	if (vm_map_has_hard_pagezero(map, 0x1000) == FALSE) {
		if (create_map) {
			vm_map_deallocate(map);	/* will lose pmap reference too */
		}
		printf("Cannot enforce a hard page-zero for %s\n", imgp->ip_strings);
		psignal(vfs_context_proc(imgp->ip_vfs_context), SIGKILL);
		return (LOAD_BADMACHO);
	}
#else
	/*
	 * For 64-bit users, check for presence of a 4GB page zero
	 * which will enable the kernel to share the user's address space
	 * and hence avoid TLB flushes on kernel entry/exit
	 */ 

	if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
	     vm_map_has_4GB_pagezero(map)) {
		vm_map_set_4GB_pagezero(map);
	}
#endif
	/*
	 *	Commit to new map.
	 *
	 *	Swap the new map for the old, which  consumes our new map
	 *	reference but each leaves us responsible for the old_map reference.
	 *	That lets us get off the pmap associated with it, and
	 *	then we can release it.
	 */

	 if (create_map) {
		/*
		 * If this is an exec, then we are going to destroy the old
		 * task, and it's correct to halt it; if it's spawn, the
		 * task is not yet running, and it makes no sense.
		 */
	 	if (!spawn) {
			/*
			 * Mark the task as halting and start the other
			 * threads towards terminating themselves.  Then
			 * make sure any threads waiting for a process
			 * transition get informed that we are committed to
			 * this transition, and then finally complete the
			 * task halting (wait for threads and then cleanup
			 * task resources).
			 *
			 * NOTE: task_start_halt() makes sure that no new
			 * threads are created in the task during the transition.
			 * We need to mark the workqueue as exiting before we
			 * wait for threads to terminate (at the end of which
			 * we no longer have a prohibition on thread creation).
			 * 
			 * Finally, clean up any lingering workqueue data structures
			 * that may have been left behind by the workqueue threads
			 * as they exited (and then clean up the work queue itself).
			 */
			kret = task_start_halt(task);
			if (kret != KERN_SUCCESS) {
				return(kret);		
			}
			proc_transcommit(p, 0);
			workqueue_mark_exiting(p);
			task_complete_halt(task);
			workqueue_exit(p);
		}
		old_map = swap_task_map(old_task, thread, map, !spawn);
		vm_map_clear_4GB_pagezero(old_map);
		vm_map_deallocate(old_map);
	}
	return(LOAD_SUCCESS);
}
Exemple #3
0
int workqueue_init(struct thread_data *td, struct workqueue *wq,
		   workqueue_fn *fn, unsigned max_pending)
{
	unsigned int running;
	int i, error;

	wq->max_workers = max_pending;
	wq->td = td;
	wq->fn = fn;
	wq->work_seq = 0;
	wq->next_free_worker = 0;
	pthread_cond_init(&wq->flush_cond, NULL);
	pthread_mutex_init(&wq->flush_lock, NULL);
	pthread_mutex_init(&wq->stat_lock, NULL);

	wq->workers = calloc(wq->max_workers, sizeof(struct submit_worker));

	for (i = 0; i < wq->max_workers; i++)
		if (start_worker(wq, i))
			break;

	wq->max_workers = i;
	if (!wq->max_workers)
		goto err;

	/*
	 * Wait for them all to be started and initialized
	 */
	error = 0;
	do {
		struct submit_worker *sw;

		running = 0;
		pthread_mutex_lock(&wq->flush_lock);
		for (i = 0; i < wq->max_workers; i++) {
			sw = &wq->workers[i];
			pthread_mutex_lock(&sw->lock);
			if (sw->flags & SW_F_RUNNING)
				running++;
			if (sw->flags & SW_F_ERROR)
				error++;
			pthread_mutex_unlock(&sw->lock);
		}

		if (error || running == wq->max_workers) {
			pthread_mutex_unlock(&wq->flush_lock);
			break;
		}

		pthread_cond_wait(&wq->flush_cond, &wq->flush_lock);
		pthread_mutex_unlock(&wq->flush_lock);
	} while (1);

	if (!error)
		return 0;

err:
	log_err("Can't create rate workqueue\n");
	td_verror(td, ESRCH, "workqueue_init");
	workqueue_exit(wq);
	return 1;
}