Esempio n. 1
0
void ipc_hdb_destructor (void *context ) {
	struct ipc_instance *ipc_instance = (struct ipc_instance *)context;

	/*
	 * << 1 (or multiplied by 2) because this is a wrapped memory buffer
	 */
	memory_unmap (ipc_instance->control_buffer, ipc_instance->control_size);
	memory_unmap (ipc_instance->request_buffer, ipc_instance->request_size);
	memory_unmap (ipc_instance->response_buffer, ipc_instance->response_size);
	memory_unmap (ipc_instance->dispatch_buffer, (ipc_instance->dispatch_size) << 1);
}
Esempio n. 2
0
static void _process_dispose_thread_map(uint32_t pid) {
    // Unmap thread map
    uintptr_t thread_map = MEMORY_THREAD_MAP_VADDR + pid * THREAD_MAP_SIZE;
    size_t offset = 0;

    for (offset = 0; offset < THREAD_MAP_SIZE; offset += 0x1000) {
        uintptr_t phys = memory_physical(thread_map + offset);
        memory_unmap(thread_map + offset);
        frame_free(phys);
    }
}
Esempio n. 3
0
/*
 * cds_lfht_free_bucket_table() should be called with decreasing order.
 * When cds_lfht_free_bucket_table(0) is called, it means the whole
 * lfht is destroyed.
 */
static
void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
{
	if (order == 0) {
		if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
			/* small table */
			poison_free(ht->tbl_mmap);
			return;
		}
		/* large table */
		memory_unmap(ht->tbl_mmap,
			ht->max_nr_buckets * sizeof(*ht->tbl_mmap));
	} else if (order > ht->min_alloc_buckets_order) {
		/* large table */
		unsigned long len = 1UL << (order - 1);

		assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
		memory_discard(ht->tbl_mmap + len, len * sizeof(*ht->tbl_mmap));
	}
	/* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
}
Esempio n. 4
0
File: stack.c Progetto: zrho/Carbon
void stack_resize(stack_t *stack, uintptr_t new_len, process_t *process) {
    // Check address space
    if (UNLIKELY(process->addr_space != memory_space_get()))
    	PANIC("Failed trying to resize a stack for a process while not being in "
    	      "its address space.");

    // Greater than maximum length?
    if (UNLIKELY(new_len > STACK_LENGTH_MAX))
        PANIC("Failed trying to increase a stack's size over the maximum stack "
            "size.");

    // Size increased or decreased?
    if (new_len > stack->length) { // Increased
        // Map region
        uintptr_t reg_end = stack->address - stack->length;
        uintptr_t reg_addr;
        uint16_t flags = PAGE_FLAG_WRITEABLE | PAGE_FLAG_USER;

        for (reg_addr = stack->address - new_len; reg_addr < reg_end; reg_addr += PAGE_SIZE) {
        	uintptr_t phys = frame_alloc();
            memory_map(reg_addr, phys, flags);
        }

    } else if (new_len < stack->length) {
        // Unmap region
        uintptr_t reg_end = stack->address - new_len;
        uintptr_t reg_addr;

        for (reg_addr = stack->address - stack->length; reg_addr < reg_end; reg_addr += 0x1000) {
            uintptr_t phys = memory_physical(reg_addr);
            memory_unmap(reg_addr);
            frame_free(phys);
        }
    }

    // Set new size
    stack->length = new_len;
}
Esempio n. 5
0
/* copy memory from safely from any type of memory to any type of memory (optional - from different processes) */
int safe_memory_copy(void *dst, const void *src, word len, int dst_hint, int src_hint, word dst_pid, word src_pid)
{
	word new_len = len;
	int dst_type, src_type;
	void *dst_map = NULL;
	void *src_map = NULL;
	struct task_struct *dst_task = current;
	struct task_struct *src_task = current;
	struct pid *pid_struct;
	int err = 0;

	if (!len) {
		return 0;
	}

	/* load the correct task structs: */

	if (dst_pid || src_pid) {
		rcu_read_lock();
	}

	if (dst_pid) {
		pid_struct = find_vpid(dst_pid);
		 if (NULL == pid_struct) {
			ERROR_CLEAN(-ERROR_PARAM);
		}
		dst_task = pid_task(pid_struct, PIDTYPE_PID);
	}

	if (NULL == dst_task) {
		ERROR_CLEAN(-ERROR_PARAM);
	}

	if (src_pid) {
		pid_struct = find_vpid(src_pid);
		 if (NULL == pid_struct) {
			ERROR_CLEAN(-ERROR_PARAM);
		}
		src_task = pid_task(pid_struct, PIDTYPE_PID);
	}

	if (NULL == src_task) {
		ERROR_CLEAN(-ERROR_PARAM);
	}


	/* if we don't know where this addresses came from, find out: */

	if (dst_hint != ADDR_UNDEF) {
		dst_type = dst_hint;
	} else {
		dst_type = memory_check_addr_perm_task(dst, &new_len, 1, NULL, NULL, dst_task);
		if (dst_type == ADDR_UNDEF || new_len != len) {
			ERROR_CLEAN(-ERROR_POINT);
		}
	}

	if (src_hint != ADDR_UNDEF) {
		src_type = src_hint;
	} else {
		src_type = memory_check_addr_perm_task(src, &new_len, 0, NULL, NULL, src_task);
		if (src_type == ADDR_UNDEF || new_len != len) {
			ERROR_CLEAN(-ERROR_POINT);
		}
	}

	/* map user pages if we need to: */

	/* IMPORTANT:
	 * if you need to map user pages it cannot be atomic!
	 * so if you are using a page from user space you should not
	 * use this function in an atomic only context.
	 */
	if (dst_type == ADDR_OUTSIDE) {
		err = memory_map_task(dst, &new_len, &dst_map, (byte **)&dst, 1, dst_task);
		if (err < 0 || new_len != len) {
			goto clean;	
		}
	}

	if (src_type == ADDR_OUTSIDE) {
		err = memory_map_task(src, &new_len, &src_map, (byte **)&src, 0, src_task);
		if (err < 0 || new_len != len) {
			goto clean;	
		}
	}

	memory_copy(dst, src, len);
	err = 0;
clean:
	if (dst_pid || src_pid) {
		rcu_read_unlock();
	}

	if (dst_map) {
		memory_unmap(dst_map);
	}
	if (src_map) {
		memory_unmap(src_map);
	}

	return err;
}
Esempio n. 6
0
static void *huge_move_expand(struct thread_cache *cache, void *old_addr, size_t old_size, size_t new_size) {
    struct arena *arena;
    void *new_addr = huge_chunk_alloc(cache, new_size, CHUNK_SIZE, &arena);
    if (unlikely(!new_addr)) {
        return NULL;
    }

    bool gap = true;
    if (unlikely(memory_remap_fixed(old_addr, old_size, new_addr, new_size))) {
        memcpy(new_addr, old_addr, old_size);
        if (purge_ratio >= 0) {
            memory_decommit(old_addr, old_size);
        }
        gap = false;
    } else {
        // Attempt to fill the virtual memory hole. The kernel should provide a flag for preserving
        // the old mapping to avoid the possibility of this failing and creating fragmentation.
        //
        // https://lkml.org/lkml/2014/10/2/624
        void *extra = memory_map(old_addr, old_size, false);
        if (likely(extra)) {
            if (unlikely(extra != old_addr)) {
                memory_unmap(extra, old_size);
            } else {
                gap = false;
            }
        }
    }

    struct extent_node key;
    key.addr = old_addr;

    struct arena *old_arena = get_huge_arena(old_addr);

    extent_tree *huge = acquire_huge(old_arena);
    struct extent_node *node = extent_tree_ad_search(huge, &key);
    assert(node);
    extent_tree_ad_remove(huge, node);
    node->addr = new_addr;
    node->size = new_size;

    if (arena != old_arena) {
        release_huge(old_arena);
        huge = acquire_huge(arena);
    }

    extent_tree_ad_insert(huge, node);
    release_huge(arena);

    if (!gap) {
        if (arena != old_arena && old_arena) {
            mutex_lock(&old_arena->mutex);
        }
        chunk_free(get_recycler(old_arena), old_addr, old_size);
        if (arena != old_arena && old_arena) {
            mutex_unlock(&old_arena->mutex);
        }
    }

    maybe_unlock_arena(arena);
    return new_addr;
}
Esempio n. 7
0
/*
 * External API
 */
cs_error_t
coroipcc_service_connect (
	const char *socket_name,
	unsigned int service,
	size_t request_size,
	size_t response_size,
	size_t dispatch_size,
	hdb_handle_t *handle)

{
	int request_fd;
	struct sockaddr_un address;
	cs_error_t res;
	struct ipc_instance *ipc_instance;
#if _POSIX_THREAD_PROCESS_SHARED < 1
	key_t semkey = 0;
	union semun semun;
#endif
	int sys_res;
	mar_req_setup_t req_setup;
	mar_res_setup_t res_setup;
	char control_map_path[PATH_MAX];
	char request_map_path[PATH_MAX];
	char response_map_path[PATH_MAX];
	char dispatch_map_path[PATH_MAX];

	res = hdb_error_to_cs (hdb_handle_create (&ipc_hdb,
		sizeof (struct ipc_instance), handle));
	if (res != CS_OK) {
		return (res);
	}

	res = hdb_error_to_cs (hdb_handle_get (&ipc_hdb, *handle, (void **)&ipc_instance));
	if (res != CS_OK) {
		return (res);
	}

	res_setup.error = CS_ERR_LIBRARY;

#if defined(COROSYNC_SOLARIS)
	request_fd = socket (PF_UNIX, SOCK_STREAM, 0);
#else
	request_fd = socket (PF_LOCAL, SOCK_STREAM, 0);
#endif
	if (request_fd == -1) {
		return (CS_ERR_LIBRARY);
	}
#ifdef SO_NOSIGPIPE
	socket_nosigpipe (request_fd);
#endif

	memset (&address, 0, sizeof (struct sockaddr_un));
	address.sun_family = AF_UNIX;
#if defined(COROSYNC_BSD) || defined(COROSYNC_DARWIN)
	address.sun_len = SUN_LEN(&address);
#endif

#if defined(COROSYNC_LINUX)
	sprintf (address.sun_path + 1, "%s", socket_name);
#else
	sprintf (address.sun_path, "%s/%s", SOCKETDIR, socket_name);
#endif
	sys_res = connect (request_fd, (struct sockaddr *)&address,
		COROSYNC_SUN_LEN(&address));
	if (sys_res == -1) {
		res = CS_ERR_TRY_AGAIN;
		goto error_connect;
	}

	sys_res = memory_map (
		control_map_path,
		"control_buffer-XXXXXX",
		(void *)&ipc_instance->control_buffer,
		8192);
	if (sys_res == -1) {
		res = CS_ERR_LIBRARY;
		goto error_connect;
	}

	sys_res = memory_map (
		request_map_path,
		"request_buffer-XXXXXX",
		(void *)&ipc_instance->request_buffer,
		request_size);
	if (sys_res == -1) {
		res = CS_ERR_LIBRARY;
		goto error_request_buffer;
	}

	sys_res = memory_map (
		response_map_path,
		"response_buffer-XXXXXX",
		(void *)&ipc_instance->response_buffer,
		response_size);
	if (sys_res == -1) {
		res = CS_ERR_LIBRARY;
		goto error_response_buffer;
	}

	sys_res = circular_memory_map (
		dispatch_map_path,
		"dispatch_buffer-XXXXXX",
		(void *)&ipc_instance->dispatch_buffer,
		dispatch_size);
	if (sys_res == -1) {
		res = CS_ERR_LIBRARY;
		goto error_dispatch_buffer;
	}

#if _POSIX_THREAD_PROCESS_SHARED > 0
	sem_init (&ipc_instance->control_buffer->sem_request_or_flush_or_exit, 1, 0);
	sem_init (&ipc_instance->control_buffer->sem_request, 1, 0);
	sem_init (&ipc_instance->control_buffer->sem_response, 1, 0);
	sem_init (&ipc_instance->control_buffer->sem_dispatch, 1, 0);
#else
{
	int i;

	/*
	 * Allocate a semaphore segment
	 */
	while (1) {
		semkey = random();
		ipc_instance->euid = geteuid ();
		if ((ipc_instance->control_buffer->semid
		     = semget (semkey, 4, IPC_CREAT|IPC_EXCL|0600)) != -1) {
		      break;
		}
		/*
		 * EACCESS can be returned as non root user when opening a different
		 * users semaphore.
		 *
		 * EEXIST can happen when we are a root or nonroot user opening
		 * an existing shared memory segment for which we have access
		 */
		if (errno != EEXIST && errno != EACCES) {
			res = CS_ERR_LIBRARY;
			goto error_exit;
		}
	}

	for (i = 0; i < 4; i++) {
		semun.val = 0;
		sys_res = semctl (ipc_instance->control_buffer->semid, i, SETVAL, semun);
		if (sys_res != 0) {
			res = CS_ERR_LIBRARY;
			goto error_exit;
		}
	}
}
#endif

	/*
	 * Initialize IPC setup message
	 */
	req_setup.service = service;
	strcpy (req_setup.control_file, control_map_path);
	strcpy (req_setup.request_file, request_map_path);
	strcpy (req_setup.response_file, response_map_path);
	strcpy (req_setup.dispatch_file, dispatch_map_path);
	req_setup.control_size = 8192;
	req_setup.request_size = request_size;
	req_setup.response_size = response_size;
	req_setup.dispatch_size = dispatch_size;

#if _POSIX_THREAD_PROCESS_SHARED < 1
	req_setup.semkey = semkey;
#endif

	res = socket_send (request_fd, &req_setup, sizeof (mar_req_setup_t));
	if (res != CS_OK) {
		goto error_exit;
	}
	res = socket_recv (request_fd, &res_setup, sizeof (mar_res_setup_t));
	if (res != CS_OK) {
		goto error_exit;
	}

	ipc_instance->fd = request_fd;

	if (res_setup.error == CS_ERR_TRY_AGAIN) {
		res = res_setup.error;
		goto error_exit;
	}

	ipc_instance->control_size = 8192;
	ipc_instance->request_size = request_size;
	ipc_instance->response_size = response_size;
	ipc_instance->dispatch_size = dispatch_size;

	pthread_mutex_init (&ipc_instance->mutex, NULL);

	hdb_handle_put (&ipc_hdb, *handle);

	return (res_setup.error);

error_exit:
#if _POSIX_THREAD_PROCESS_SHARED < 1
	if (ipc_instance->control_buffer->semid > 0)
		semctl (ipc_instance->control_buffer->semid, 0, IPC_RMID);
#endif
	memory_unmap (ipc_instance->dispatch_buffer, dispatch_size);
error_dispatch_buffer:
	memory_unmap (ipc_instance->response_buffer, response_size);
error_response_buffer:
	memory_unmap (ipc_instance->request_buffer, request_size);
error_request_buffer:
	memory_unmap (ipc_instance->control_buffer, 8192);
error_connect:
	close (request_fd);

	hdb_handle_destroy (&ipc_hdb, *handle);
	hdb_handle_put (&ipc_hdb, *handle);

	return (res);
}