Ejemplo n.º 1
0
/** Notify the creation of a new shm segment to Kerrighed.
 *
 *  @author Renaud Lottiaux
 */
int krg_ipc_shm_newseg (struct ipc_namespace *ns, struct shmid_kernel *shp)
{
	shmid_object_t *shp_object;
	struct kddm_set *kddm;
	long *key_index;
	int index, err;

	BUG_ON(!shm_ids(ns).krgops);

	index = ipcid_to_idx(shp->shm_perm.id);

	shp_object = _kddm_grab_object_manual_ft(
		shm_ids(ns).krgops->data_kddm_set, index);

	BUG_ON(shp_object);

	shp_object = kmem_cache_alloc(shmid_object_cachep, GFP_KERNEL);
	if (!shp_object) {
		err = -ENOMEM;
		goto err_put;
	}

	/* Create a KDDM set to host segment pages */
	kddm = _create_new_kddm_set (kddm_def_ns, 0, SHM_MEMORY_LINKER,
				     kerrighed_node_id, PAGE_SIZE,
				     &shp->shm_perm.id, sizeof(int), 0);

	if (IS_ERR(kddm)) {
		err = PTR_ERR(kddm);
		goto err_put;
	}

	shp->shm_file->f_dentry->d_inode->i_mapping->kddm_set = kddm;
	shp->shm_file->f_op = &krg_shm_file_operations;

	shp_object->set_id = kddm->id;

	shp_object->local_shp = shp;

	_kddm_set_object(shm_ids(ns).krgops->data_kddm_set, index, shp_object);

	if (shp->shm_perm.key != IPC_PRIVATE)
	{
		key_index = _kddm_grab_object(shm_ids(ns).krgops->key_kddm_set,
					      shp->shm_perm.key);
		*key_index = index;
		_kddm_put_object (shm_ids(ns).krgops->key_kddm_set,
				  shp->shm_perm.key);
	}

	shp->shm_perm.krgops = shm_ids(ns).krgops;

err_put:
	_kddm_put_object(shm_ids(ns).krgops->data_kddm_set, index);

	return 0;

}
Ejemplo n.º 2
0
/**
 * @author Pascal Gallard
 */
static struct task_kddm_object *task_writelock(pid_t pid, int nested)
{
	struct task_kddm_object *obj;

	/* Filter well known cases of no task kddm object. */
	if (!(pid & GLOBAL_PID_MASK))
		return NULL;

	obj = _kddm_grab_object_no_ft(task_kddm_set, pid);
	if (likely(obj)) {
		if (!nested)
			down_write(&obj->sem);
		else
			down_write_nested(&obj->sem, SINGLE_DEPTH_NESTING);
		if (obj->write_locked == 2) {
			/* Dying object */
			up_write(&obj->sem);
			_kddm_put_object(task_kddm_set, pid);
			return NULL;
		}
		/* Marker for unlock. Dirty but temporary. */
		obj->write_locked = 1;
	}

	return obj;
}
Ejemplo n.º 3
0
static void faf_poll_notify_nodes(unsigned long dvfs_id)
{
	struct dvfs_file_struct *dvfs_file;
	struct faf_polled_fd *polled_fd;
	struct faf_polled_fd_node *polled_fd_node;
	struct hlist_node *pos;

	dvfs_file = _kddm_get_object_no_ft(dvfs_file_struct_ctnr, dvfs_id);
	if (dvfs_file && dvfs_file->file) {
		/* TODO: still required? */
		if (atomic_read (&dvfs_file->file->f_count) == 0)
			dvfs_file->file = NULL;
	}
	if (!dvfs_file || !dvfs_file->file)
		goto out_put_dvfs_file;

	mutex_lock(&faf_polled_fd_mutex);

	polled_fd = __faf_polled_fd_find(dvfs_id);
	if (!polled_fd)
		goto out_unlock;

	hlist_for_each_entry(polled_fd_node, pos, &polled_fd->nodes, list)
		faf_poll_notify_node(polled_fd_node->node_id, dvfs_id);

out_unlock:
	mutex_unlock(&faf_polled_fd_mutex);

out_put_dvfs_file:
	_kddm_put_object(dvfs_file_struct_ctnr, dvfs_id);
}
Ejemplo n.º 4
0
int static_node_info_init()
{
	krg_static_node_info_t *static_node_info;

	register_io_linker(STATIC_NODE_INFO_LINKER,
			   &static_node_info_io_linker);

	/* Create the static node info kddm set */

	static_node_info_kddm_set =
		create_new_kddm_set(kddm_def_ns,
				    STATIC_NODE_INFO_KDDM_ID,
				    STATIC_NODE_INFO_LINKER,
				    KDDM_CUSTOM_DEF_OWNER,
				    sizeof(krg_static_node_info_t),
				    0);
	if (IS_ERR(static_node_info_kddm_set))
		OOM;

	static_node_info = _kddm_grab_object(static_node_info_kddm_set,
					     kerrighed_node_id);

	static_node_info->nr_cpu = num_online_cpus();
	static_node_info->totalram = totalram_pages;
	static_node_info->totalhigh = totalhigh_pages;

	_kddm_put_object(static_node_info_kddm_set, kerrighed_node_id);

	return 0;
}
Ejemplo n.º 5
0
static struct kern_ipc_perm *kcb_ipc_shm_lock(struct ipc_ids *ids, int id)
{
	shmid_object_t *shp_object;
	struct shmid_kernel *shp;
	int index;

	rcu_read_lock();

	index = ipcid_to_idx(id);

	shp_object = _kddm_grab_object_no_ft(ids->krgops->data_kddm_set, index);

	if (!shp_object)
		goto error;

	shp = shp_object->local_shp;

	BUG_ON(!shp);

	mutex_lock(&shp->shm_perm.mutex);

	if (shp->shm_perm.deleted) {
		mutex_unlock(&shp->shm_perm.mutex);
		goto error;
	}

	return &(shp->shm_perm);

error:
	_kddm_put_object(ids->krgops->data_kddm_set, index);
	rcu_read_unlock();

	return ERR_PTR(-EINVAL);
}
Ejemplo n.º 6
0
static int create_pid_kddm_object(struct pid *pid, int early)
{
	int nr = pid_knr(pid);
	struct pid_kddm_object *obj;
	struct task_kddm_object *task_obj;

	obj = _kddm_grab_object(pid_kddm_set, nr);
	if (IS_ERR(obj)) {
		_kddm_put_object(pid_kddm_set, nr);
		return PTR_ERR(obj);
	}
	BUG_ON(!obj);
	task_obj = krg_task_readlock(nr);

	spin_lock(&pid_kddm_lock);
	BUG_ON(early && pid->kddm_obj);
	if (!pid->kddm_obj) {
		obj->pid = pid;
		obj->active = 1;
		if (early)
			obj->attach_pending = 1;
		BUG_ON(obj->task_obj);
		if (task_obj) {
			BUG_ON(task_obj->pid_obj);
			/*
			 * These rcu_assign_pointer are not really needed,
			 * but are cleaner :)
			 */
			rcu_assign_pointer(obj->task_obj, task_obj);
			rcu_assign_pointer(obj->task_obj->pid_obj, obj);
		}
		pid->kddm_obj = obj;
	}
	BUG_ON(pid->kddm_obj != obj);
	spin_unlock(&pid_kddm_lock);

	krg_task_unlock(nr);
	_kddm_put_object(pid_kddm_set, nr);

	return 0;
}
Ejemplo n.º 7
0
static int faf_polled_fd_remove(kerrighed_node_t client,
				int server_fd,
				unsigned long dvfs_id)
{
	struct dvfs_file_struct *dvfs_file;
	struct faf_polled_fd *polled_fd;
	struct faf_polled_fd_node *polled_fd_node;
	int err;

	dvfs_file = _kddm_get_object_no_ft(dvfs_file_struct_ctnr, dvfs_id);
	if (dvfs_file && dvfs_file->file) {
		/* TODO: still required? */
		if (atomic_read (&dvfs_file->file->f_count) == 0)
			dvfs_file->file = NULL;
	}

	mutex_lock(&faf_polled_fd_mutex);

	polled_fd = __faf_polled_fd_find(dvfs_id);
	BUG_ON(!polled_fd);
	BUG_ON(!polled_fd->count);
	polled_fd_node = __faf_polled_fd_find_node(polled_fd, client);
	BUG_ON(!polled_fd_node);
	BUG_ON(!polled_fd_node->count);

	polled_fd_node->count--;
	if (!polled_fd_node->count)
		faf_polled_fd_node_free(polled_fd, polled_fd_node);
	if (polled_fd->count)
		goto out_unlock;

	if (!dvfs_file || !dvfs_file->file)
		/*
		 * The file is already closed or about to be closed. The last
		 * __fput() automatically removes it from the interest set of
		 * faf_poll_epfd.
		 */
		goto free_polled_fd;

	BUG_ON(faf_poll_epfd < 0);
	err = sys_epoll_ctl(faf_poll_epfd, EPOLL_CTL_DEL, server_fd, NULL);
	BUG_ON(err);

free_polled_fd:
	faf_polled_fd_free(polled_fd);

out_unlock:
	mutex_unlock(&faf_polled_fd_mutex);

	_kddm_put_object(dvfs_file_struct_ctnr, dvfs_id);

	return 0;
}
Ejemplo n.º 8
0
static void __put_pid(struct pid_kddm_object *obj)
{
	struct pid *pid = obj->pid;
	int nr = pid_knr(pid);
	int may_put;
	int grabbed = 0;

	/* Try to avoid grabing the kddm object */
	read_lock(&tasklist_lock);
	spin_lock(&pid_kddm_lock);
	may_put = may_put_pid(obj);
	spin_unlock(&pid_kddm_lock);
	if (!may_put)
		goto release_work;
	read_unlock(&tasklist_lock);

	/* The pid seems to be unused locally. Have to check globally. */
	/* Prevent pidmaps from changing host nodes. */
	pidmap_map_read_lock();
	fkddm_grab_object(kddm_def_ns, PID_KDDM_ID, nr,
			  KDDM_NO_FT_REQ | KDDM_DONT_KILL);
	grabbed = 1;

	read_lock(&tasklist_lock);

	spin_lock(&pid_kddm_lock);
	may_put = may_put_pid(obj);
	if (may_put) {
		obj->active = 0;
		obj->node_count--;
		if (obj->node_count)
			/* Still used elsewhere */
			may_put = 0;
	}
	spin_unlock(&pid_kddm_lock);

release_work:
	spin_lock(&put_pid_wq_lock);
	list_del_init(&obj->wq);
	spin_unlock(&put_pid_wq_lock);

	read_unlock(&tasklist_lock);

	if (may_put) {
		_kddm_remove_frozen_object(pid_kddm_set, nr);
		pidmap_map_read_unlock();
	} else if (grabbed) {
		_kddm_put_object(pid_kddm_set, nr);
		pidmap_map_read_unlock();
	}
}
Ejemplo n.º 9
0
/**
 * @author Pascal Gallard
 */
void krg_task_unlock(pid_t pid)
{
	/* Filter well known cases of no task kddm object. */
	if (!(pid & GLOBAL_PID_MASK))
		return;

	{
		/*
		 * Dirty tricks here. Hopefully it should be temporary waiting
		 * for kddm to implement locking on a task basis.
		 */
		struct task_kddm_object *obj;

		obj = _kddm_find_object(task_kddm_set, pid);
		if (likely(obj)) {
			_kddm_put_object(task_kddm_set, pid);
			if (obj->write_locked)
				up_write(&obj->sem);
			else
				up_read(&obj->sem);
		}
	}
	_kddm_put_object(task_kddm_set, pid);
}
Ejemplo n.º 10
0
static void kcb_ipc_shm_unlock(struct kern_ipc_perm *ipcp)
{
	int index, deleted = 0;

	index = ipcid_to_idx(ipcp->id);

	if (ipcp->deleted)
		deleted = 1;

	_kddm_put_object(ipcp->krgops->data_kddm_set, index);

	if (!deleted)
		mutex_unlock(&ipcp->mutex);

	rcu_read_unlock();
}
Ejemplo n.º 11
0
static struct kern_ipc_perm *kcb_ipc_shm_findkey(struct ipc_ids *ids, key_t key)
{
	long *key_index;
	int id = -1;

	key_index = _kddm_get_object_no_ft(ids->krgops->key_kddm_set, key);

	if (key_index)
		id = *key_index;

	_kddm_put_object(ids->krgops->key_kddm_set, key);

	if (id != -1)
		return kcb_ipc_shm_lock(ids, id);

	return NULL;
}
Ejemplo n.º 12
0
struct pid *krg_get_pid(int nr)
{
	struct pid_kddm_object *obj;
	struct pid *pid;

	rcu_read_lock();
	pid = find_kpid(nr);
	rcu_read_unlock();
	/*
	 * No need to get a reference on pid since we know that it is used on
	 * another node: nobody will free it for the moment.
	 */

	if (!pid)
		return no_pid(nr);

	spin_lock(&pid_kddm_lock);
	obj = pid->kddm_obj;
	BUG_ON(!obj);
	BUG_ON(obj->pid != pid);

	if (likely(obj->active)) {
		obj->attach_pending++;
		spin_unlock(&pid_kddm_lock);
		return pid;
	}
	/* Slow path: we must grab the kddm object. */
	spin_unlock(&pid_kddm_lock);

	obj = _kddm_grab_object_no_ft(pid_kddm_set, nr);
	if (IS_ERR(obj))
		return NULL;
	BUG_ON(obj != pid->kddm_obj);
	BUG_ON(obj->pid != pid);

	spin_lock(&pid_kddm_lock);
	__get_pid(obj);
	spin_unlock(&pid_kddm_lock);

	_kddm_put_object(pid_kddm_set, nr);

	return pid;
}
Ejemplo n.º 13
0
/**
 * @author Louis Rilling
 */
struct task_kddm_object *krg_task_create_writelock(pid_t pid)
{
	struct task_kddm_object *obj;

	/* Filter well known cases of no task kddm object. */
	/* The exact filter is expected to be implemented by the caller. */
	BUG_ON(!(pid & GLOBAL_PID_MASK));

	obj = _kddm_grab_object(task_kddm_set, pid);
	if (likely(obj && !IS_ERR(obj))) {
		down_write(&obj->sem);
		/* No dying object race or this is really smelly */
		BUG_ON(obj->write_locked == 2);
		/* Marker for unlock. Dirty but temporary. */
		obj->write_locked = 1;
	} else {
		_kddm_put_object(task_kddm_set, pid);
	}

	return obj;
}
Ejemplo n.º 14
0
static struct pid *no_pid(int nr)
{
	struct pid_namespace *ns;
	struct pid_kddm_object *obj;
	struct pid *pid;

	obj = _kddm_grab_object_no_ft(pid_kddm_set, nr);
	if (IS_ERR(obj))
		return NULL;
	BUG_ON(!obj);

	spin_lock(&pid_kddm_lock);
	rcu_read_lock();
	pid = find_kpid(nr); /* Double check once locked */
	rcu_read_unlock();
	/*
	 * No need to get a reference on pid since we know that it is used on
	 * another node: nobody will free it for the moment.
	 */

	if (!pid) {
		ns = find_get_krg_pid_ns();
		pid = __alloc_pid(ns, &nr);
		put_pid_ns(ns);
		if (!pid)
			goto out_unlock;
		obj->pid = pid;
		pid->kddm_obj = obj;
	}
	BUG_ON(pid->kddm_obj != obj);

	__get_pid(obj);

out_unlock:
	spin_unlock(&pid_kddm_lock);
	_kddm_put_object(pid_kddm_set, nr);

	return pid;
}
Ejemplo n.º 15
0
/**
 * @author Pascal Gallard
 */
struct task_kddm_object *krg_task_readlock(pid_t pid)
{
	struct task_kddm_object *obj;

	/* Filter well known cases of no task kddm object. */
	if (!(pid & GLOBAL_PID_MASK))
		return NULL;

	obj = _kddm_get_object_no_ft(task_kddm_set, pid);
	if (likely(obj)) {
		down_read(&obj->sem);
		if (obj->write_locked == 2) {
			/* Dying object */
			up_read(&obj->sem);
			_kddm_put_object(task_kddm_set, pid);
			return NULL;
		}
		/* Marker for unlock. Dirty but temporary. */
		obj->write_locked = 0;
	}

	return obj;
}
Ejemplo n.º 16
0
int static_cpu_info_init(void)
{
	krg_static_cpu_info_t *static_cpu_info;
	int cpu_id, i;

	register_io_linker(STATIC_CPU_INFO_LINKER, &static_cpu_info_io_linker);

	/* Create the CPU info kddm set */

	static_cpu_info_kddm_set =
		create_new_kddm_set(kddm_def_ns,
				    STATIC_CPU_INFO_KDDM_ID,
				    STATIC_CPU_INFO_LINKER,
				    KDDM_CUSTOM_DEF_OWNER,
				    sizeof(krg_static_cpu_info_t),
				    0);
	if (IS_ERR(static_cpu_info_kddm_set))
		OOM;

	for_each_online_cpu (i) {
		cpu_id = krg_cpu_id(i);
		cpu_data(i).krg_cpu_id = cpu_id;

		static_cpu_info =
			_kddm_grab_object(static_cpu_info_kddm_set, cpu_id);

		static_cpu_info->info = cpu_data(i);
#ifndef CONFIG_USERMODE
		static_cpu_info->info.cpu_khz = cpu_khz;
#endif

		_kddm_put_object(static_cpu_info_kddm_set, cpu_id);
	}

	return 0;
}