Exemple #1
0
int __lockfunc rt_read_trylock(rwlock_t *rwlock)
{
	struct rt_mutex *lock = &rwlock->lock;
	unsigned long flags;
	int ret;

	/*
	 * Read locks within the self-held write lock succeed.
	 */
	spin_lock_irqsave(&lock->wait_lock, flags);
	if (rt_mutex_real_owner(lock) == current) {
		spin_unlock_irqrestore(&lock->wait_lock, flags);
		rwlock->read_depth++;
		/*
		 * NOTE: we handle it as a write-lock:
		 */
		rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
		return 1;
	}
	spin_unlock_irqrestore(&lock->wait_lock, flags);

	ret = rt_mutex_trylock(lock);
	if (ret)
		rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);

	return ret;
}
Exemple #2
0
int destroy_cache(cache_t *c)
{
	printk(1, "[cache]: Destroying cache '%s'...\n", c->name);
	rwlock_acquire(c->rwl, RWL_WRITER);
	chash_t *h = c->hash;
	c->hash = 0;
	sync_cache(c);
	/* Destroy the tree */
	chash_destroy(h);
	
	struct llistnode *curnode, *next;
	struct ce_t *obj;
	ll_for_each_entry_safe(&c->primary_ll, curnode, next, struct ce_t *, obj)
	{
		ll_maybe_reset_loop(&c->primary_ll, curnode, next);
		remove_element(c, obj, 1);
	}
	ll_destroy(&c->dirty_ll);
	ll_destroy(&c->primary_ll);
	ll_remove_entry(cache_list, c);
	rwlock_release(c->rwl, RWL_WRITER);
	rwlock_destroy(c->rwl);
	printk(1, "[cache]: Cache '%s' destroyed\n", c->name);
	return 1;
}
Exemple #3
0
void __lockfunc _write_lock_bh(rwlock_t *lock)
{
	local_bh_disable();
	preempt_disable();
	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
	LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
}
Exemple #4
0
int do_cache_object(cache_t *c, u64 id, u64 key, int sz, char *buf, int dirty)
{
	accessed_cache(c);
	rwlock_acquire(c->rwl, RWL_WRITER);
	struct ce_t *obj = chash_search(c->hash, id, key);
	if(obj)
	{
		memcpy(obj->data, buf, obj->length);
		set_dirty(c, obj, dirty);
		rwlock_release(c->rwl, RWL_WRITER);
		return 0;
	}
	if(!should_element_be_added(c))
	{
		u64 a, b;
		struct ce_t *q;
		if((q = chash_get_any_object(c->hash, &a, &b)))
		{
			if(q->dirty)
				do_sync_element(c, q, 1);
			remove_element(c, q, 1);
		}
	}
	obj = (struct ce_t *)kmalloc(sizeof(struct ce_t));
	obj->data = (char *)kmalloc(sz);
	obj->length = sz;
	obj->rwl = rwlock_create(0);
	memcpy(obj->data, buf, sz);
	obj->key = key;
	obj->id = id;
	set_dirty(c, obj, dirty);
	cache_add_element(c, obj, 1);
	rwlock_release(c->rwl, RWL_WRITER);
	return 0;
}
Exemple #5
0
struct ce_t *find_cache_element(cache_t *c, u64 id, u64 key)
{
	accessed_cache(c);
	rwlock_acquire(c->rwl, RWL_READER);
	struct ce_t *ret = c->hash ? chash_search(c->hash, id, key) : 0;
	rwlock_release(c->rwl, RWL_READER);
	return ret;
}
Exemple #6
0
int kernel_cache_sync()
{
	struct llistnode *cur;
	cache_t *ent;
	rwlock_acquire(&cache_list->rwl, RWL_READER);
	ll_for_each_entry(cache_list, cur, cache_t *, ent)
	{
		sync_cache(ent);
	}
Exemple #7
0
/*
 * rwlock_t functions
 */
int __lockfunc rt_write_trylock(rwlock_t *rwlock)
{
	int ret = rt_mutex_trylock(&rwlock->lock);

	if (ret)
		rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);

	return ret;
}
Exemple #8
0
int fs_link(struct inode *dir, struct inode *target, const char *name, size_t namelen, bool allow_incomplete_directories)
{
	if(!vfs_inode_check_permissions(dir, MAY_WRITE, 0))
		return -EACCES;
	if(!S_ISDIR(dir->mode))
		return -ENOTDIR;
	rwlock_acquire(&dir->lock, RWL_WRITER);
	if(S_ISDIR(dir->mode) && (dir->nlink == 1) && !allow_incomplete_directories) {
		rwlock_release(&dir->lock, RWL_WRITER);
		return -ENOSPC;
	}
	rwlock_acquire(&target->metalock, RWL_WRITER);
	int r = fs_callback_inode_link(dir, target, name, namelen);
	if(!r)
		atomic_fetch_add(&target->nlink, 1);
	rwlock_release(&target->metalock, RWL_WRITER);
	rwlock_release(&dir->lock, RWL_WRITER);
	return r;
}
Exemple #9
0
int do_sync_element(cache_t *c, struct ce_t *e, int locked)
{
	int ret=0;
	if(!locked) rwlock_acquire(c->rwl, RWL_WRITER);
	if(c->sync)
		ret = c->sync(e);
	set_dirty(c, e, 0);
	if(!locked) rwlock_release(c->rwl, RWL_WRITER);
	return ret;
}
Exemple #10
0
unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
{
	unsigned long flags;

	local_irq_save(flags);
	preempt_disable();
	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
	LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
	return flags;
}
Exemple #11
0
int __lockfunc _write_trylock(rwlock_t *lock)
{
	preempt_disable();
	if (_raw_write_trylock(lock)) {
		rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
		return 1;
	}

	preempt_enable();
	return 0;
}
Exemple #12
0
int cache_add_element(cache_t *c, struct ce_t *obj, int locked)
{
	accessed_cache(c);
	if(!locked) rwlock_acquire(c->rwl, RWL_WRITER);
	
	chash_add(c->hash, obj->id, obj->key, obj);
	obj->list_node = ll_insert(&c->primary_ll, obj);
	c->count++;
	obj->acount=1;
	if(!locked) rwlock_release(c->rwl, RWL_WRITER);
	return 0;
}
Exemple #13
0
void task_unblock_all(struct llist *list)
{
	int old = set_int(0);
	rwlock_acquire(&list->rwl, RWL_WRITER);
	struct llistnode *cur, *next;
	task_t *entry;
	ll_for_each_entry_safe(list, cur, next, task_t *, entry)
	{
		entry->blocklist = 0;
		assert(entry->blocknode == cur);
		ll_do_remove(list, cur, 1);
		tqueue_insert(((cpu_t *)entry->cpu)->active_queue, (void *)entry, entry->activenode);
		task_resume(entry);
	}
Exemple #14
0
ext2_fs_t *get_fs(int v)
{
	rwlock_acquire(&fslist->rwl, RWL_READER);
	struct llistnode *cur;
	ext2_fs_t *f;
	ll_for_each_entry(fslist, cur, ext2_fs_t *, f)
	{
		if(f->flag == v)
		{
			rwlock_release(&fslist->rwl, RWL_READER);
			return f;
		}
	}
	rwlock_release(&fslist->rwl, RWL_READER);
	return 0;
}
Exemple #15
0
int sys_dirstat(char *dir, unsigned num, char *namebuf, struct stat *statbuf)
{
	if(!namebuf || !statbuf || !dir)
		return -EINVAL;
	struct inode *i = read_dir(dir, num);
	if(!i)
		return -ESRCH;
	do_stat(i, statbuf);
	strncpy(namebuf, i->name, 128);
	if(i->dynamic) 
	{
		rwlock_acquire(&i->rwl, RWL_WRITER);
		free_inode(i, 0);
	}
	return 0;
}
Exemple #16
0
int sys_getdents(int fd, struct dirent_posix *dirs, unsigned int count)
{
	struct file *f = file_get(fd);
	if(!f) return -EBADF;

	unsigned nex;
	if(!vfs_inode_check_permissions(f->inode, MAY_READ, 0)) {
		file_put(f);
		return -EACCES;
	}
	rwlock_acquire(&f->inode->lock, RWL_READER);
	int r = fs_callback_inode_getdents(f->inode, f->pos, dirs, count, &nex);
	rwlock_release(&f->inode->lock, RWL_READER);
	f->pos = nex;

	file_put(f);
	return r;
}
Exemple #17
0
struct inode *do_readdir(struct inode *i, int num)
{
	assert(i);
	int n = num;
	if(!is_directory(i))
		return 0;
	if(!permissions(i, MAY_READ))
		return 0;
	struct inode *c=0;
	if(!i->dynamic) {
		rwlock_acquire(&i->rwl, RWL_READER);
		struct llistnode *cur;
		ll_for_each_entry((&i->children), cur, struct inode *, c)
		{
			if(!n--) break;
		}
		rwlock_release(&i->rwl, RWL_READER);
	}
Exemple #18
0
/* WARNING: This does not sync!!! */
void remove_element(cache_t *c, struct ce_t *o, int locked)
{
	if(!o) return;
	if(o->dirty)
		panic(PANIC_NOSYNC, "tried to remove non-sync'd element");
	
	if(!locked) rwlock_acquire(c->rwl, RWL_WRITER);
	if(o->dirty)
		set_dirty(c, o, 0);
	assert(c->count);
	sub_atomic(&c->count, 1);
	ll_remove(&c->primary_ll, o->list_node);
	if(c->hash) chash_delete(c->hash, o->id, o->key);
	if(o->data)
		kfree(o->data);
	rwlock_destroy(o->rwl);
	kfree(o);
	if(!locked) rwlock_release(c->rwl, RWL_WRITER);
}
Exemple #19
0
int destroy_all_id(cache_t *c, u64 id)
{
	rwlock_acquire(c->rwl, RWL_WRITER);
	struct llistnode *curnode, *next;
	struct ce_t *obj;
	ll_for_each_entry_safe(&c->primary_ll, curnode, next, struct ce_t *, obj)
	{
		if(obj->id == id)
		{
			if(obj->dirty)
				do_sync_element(c, obj, 1);
			ll_maybe_reset_loop(&c->primary_ll, curnode, next);
			remove_element(c, obj, 1);
		}
		
	}
	rwlock_release(c->rwl, RWL_WRITER);
	return 0;
}
Exemple #20
0
int sys_dirstat_fd(int fd, unsigned num, char *namebuf, struct stat *statbuf)
{
	if(!namebuf || !statbuf)
		return -EINVAL;
	struct file *f = get_file_pointer((task_t *)current_task, fd);
	if(!f) return -EBADF;
	struct inode *i = read_idir(f->inode, num);
	fput((task_t *)current_task, fd, 0);
	if(!i)
		return -ESRCH;
	do_stat(i, statbuf);
	strncpy(namebuf, i->name, 128);
	if(i->dynamic) 
	{
		rwlock_acquire(&i->rwl, RWL_WRITER);
		free_inode(i, 0);
	}
	return 0;
}
Exemple #21
0
void __lockfunc rt_read_lock(rwlock_t *rwlock)
{
	unsigned long flags;
	struct rt_mutex *lock = &rwlock->lock;

	/*
	 * NOTE: we handle it as a write-lock:
	 */
	rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
	/*
	 * Read locks within the write lock succeed.
	 */
	spin_lock_irqsave(&lock->wait_lock, flags);
	if (rt_mutex_real_owner(lock) == current) {
		spin_unlock_irqrestore(&lock->wait_lock, flags);
		rwlock->read_depth++;
		return;
	}
	spin_unlock_irqrestore(&lock->wait_lock, flags);
	__rt_spin_lock(lock);
}
Exemple #22
0
/* this function is called when a reference to a dirent is released. There is one
 * special thing to note: according to POSIX, an unlinked dirent is only actually
 * deleted when the last reference to it is dropped. Because of that, unlink just
 * sets a flag which tells this function to do the actual deletion. In either case,
 * this function doesn't deallocate anything, it just moves it to an LRU queue. */
int vfs_dirent_release(struct dirent *dir)
{
	int r = 0;
	struct inode *parent = dir->parent;
	rwlock_acquire(&parent->lock, RWL_WRITER);
	if(atomic_fetch_sub(&dir->count, 1) == 1) {
		if(dir->flags & DIRENT_UNLINK) {
			struct inode *target = fs_dirent_readinode(dir, false);
			/* Well, sadly, target being null is a pretty bad thing,
			 * but we can't panic, because the filesystem could be
			 * set up to actually act like this... :( */
			vfs_inode_del_dirent(parent, dir);
			if(!target) {
				printk(KERN_ERROR, "belated unlink failed to read target inode: %s - %d\n",
						dir->name, dir->ino);
			} else {
				r = fs_callback_inode_unlink(parent, dir->name, dir->namelen, target);
				if(!r) {
					assert(target->nlink > 0);
					atomic_fetch_sub(&target->nlink, 1);
					if(!target->nlink && (target->flags & INODE_DIRTY))
						vfs_inode_unset_dirty(target);
				}
				vfs_icache_put(target);
			}
			vfs_dirent_destroy(dir);
		} else {
			/* add to LRU */
			queue_enqueue_item(dirent_lru, &dir->lru_item, dir);
		}
		rwlock_release(&parent->lock, RWL_WRITER);
		/* So, here's the thing. Technically, we still have a pointer that points
		 * to parent: in dir->parent. We just have to make sure that each time
		 * we use this pointer, we don't screw up */
		vfs_icache_put(parent); /* for the dir->parent pointer */
	} else
		rwlock_release(&parent->lock, RWL_WRITER);

	return r;
}
Exemple #23
0
size_t fs_dirent_reclaim_lru(void)
{
	mutex_acquire(dirent_cache_lock);
	struct queue_item *qi = queue_dequeue_item(dirent_lru);
	if(!qi) {
		mutex_release(dirent_cache_lock);
		return 0;
	}
	struct dirent *dir = qi->ent;
	struct inode *parent = dir->parent;
	rwlock_acquire(&parent->lock, RWL_WRITER);
	atomic_fetch_add(&parent->count, 1);
	if(dir && dir->count == 0) {
		/* reclaim this node */
		vfs_inode_del_dirent(parent, dir);
		vfs_dirent_destroy(dir);
	}
	atomic_fetch_sub(&parent->count, 1);
	rwlock_release(&parent->lock, RWL_WRITER);
	mutex_release(dirent_cache_lock);
	return sizeof(struct dirent);
}
Exemple #24
0
void sync_cache(cache_t *c)
{
	if(!c->dirty || !c->sync) return;
	accessed_cache(c);
	printk(0, "[cache]: Cache '%s' is syncing\n", c->name);
	volatile unsigned int num = c->dirty;
	volatile unsigned int i=1;
	struct ce_t *obj;
	c->syncing=1;
	while(c->dirty > 0)
	{
		rwlock_acquire(c->rwl, RWL_WRITER);
		if(c->dirty == 0) {
			c->syncing = 0;
			rwlock_release(c->rwl, RWL_WRITER);
			break;
		}
		assert(c->dirty_ll.head);
		obj = c->dirty_ll.head->entry;
		if(num < (c->dirty+i))
			num=(c->dirty+i);
		
		printk((kernel_state_flags & KSF_SHUTDOWN) ? 4 : 0, "\r[cache]: Syncing '%s': %d/%d (%d.%d%%)...   "
				,c->name, i, num, (i*100)/num, ((i*1000)/num) % 10);
		
		do_sync_element(c, obj, 1);
		rwlock_release(c->rwl, RWL_WRITER);
		
		if(got_signal(current_task))
			return;
		i++;
	}
	
	c->syncing=0;
	printk((kernel_state_flags & KSF_SHUTDOWN) ? 4 : 0, "\r[cache]: Syncing '%s': %d/%d (%d.%d%%)\n"
			, c->name, num, num, 100, 0);
	printk(0, "[cache]: Cache '%s' has sunk\n", c->name);
}
Exemple #25
0
size_t fs_inode_reclaim_lru(void)
{
	int released = 0;
	mutex_acquire(ic_lock);
	struct queue_item *qi = queue_dequeue_item(ic_lru);
	if(!qi) {
		mutex_release(ic_lock);
		return 0;
	}
	struct inode *remove = qi->ent;
	assert(remove);
	/* there's a subtlety here: If this reclaim and the dirent reclaim
	 * run at the same time, there could be an issue. Since the inode
	 * in the dirent reclaim may have a zero-count, we have to make sure
	 * that it doesn't free the inode in the middle of the dirent being freed.
	 * that's why the rwlock is acquired in both. */
	rwlock_acquire(&remove->lock, RWL_WRITER);
	if(!remove->dirents.count) {
		assert(!remove->count);
		assert(!(remove->flags & INODE_INUSE));
		assert(!remove->dirents.count);
		//printk(0, "reclaim node %d\n", remove->id);
		if(remove->filesystem) {
			uint32_t key[2] = {remove->filesystem->id, remove->id};
			hash_delete(icache, key, sizeof(key));
		}
		fs_inode_push(remove);
		rwlock_release(&remove->lock, RWL_WRITER);
		vfs_inode_destroy(remove);
		released = 1;
	} else {
		queue_enqueue_item(ic_lru, qi, remove);
		rwlock_release(&remove->lock, RWL_WRITER);
	}
	mutex_release(ic_lock);
	return released ? sizeof(struct inode) : 0;
}
Exemple #26
0
int __KT_try_releasing_tasks()
{
	struct llistnode *cur;
	rwlock_acquire(&kill_queue->rwl, RWL_WRITER);
	if(ll_is_empty(kill_queue))
	{
		rwlock_release(&kill_queue->rwl, RWL_WRITER);
		return 0;
	}
	task_t *t=0;
	ll_for_each_entry(kill_queue, cur, task_t *, t)
	{
		/* need to check for orphaned zombie tasks */
		if(t->flags & TF_BURIED && (t != ((cpu_t *)t->cpu)->cur)) {
			if(t->parent == 0 || t->parent->state == TASK_DEAD || (t->parent->flags & TF_KTASK) || t->parent == kernel_task)
				move_task_to_kill_queue(t, 0);
			if(t->flags & TF_KILLREADY)
				break;
		}
	}
	if(!t || !((t->flags & TF_BURIED) && (t->flags & TF_KILLREADY)))
	{
		rwlock_release(&kill_queue->rwl, RWL_WRITER);
		return 0;
	}
	assert(cur->entry == t);
	void *node = ll_do_remove(kill_queue, cur, 1);
	assert(node == cur);
	int ret = 0;
	if(!ll_is_empty(kill_queue))
		ret = 1;
	rwlock_release(&kill_queue->rwl, RWL_WRITER);
	release_task(t);
	kfree(cur);
	return ret;
}
Exemple #27
0
/* This function returns the directory entry associated with the name 'name' under
 * the inode 'node'. It must be careful to lookup the entry in the cache first. */
struct dirent *fs_dirent_lookup(struct inode *node, const char *name, size_t namelen)
{
	if(!vfs_inode_check_permissions(node, MAY_READ, 0))
		return 0;
	if(!S_ISDIR(node->mode))
		return 0;
	if(node == current_process->root && !strncmp(name, "..", 2) && namelen == 2)
		return fs_dirent_lookup(node, ".", 1);
	mutex_acquire(dirent_cache_lock);
	rwlock_acquire(&node->lock, RWL_WRITER);
	struct dirent *dir = vfs_inode_get_dirent(node, name, namelen);
	if(!dir) {
		dir = vfs_dirent_create(node);
		dir->count = 1;
		strncpy(dir->name, name, namelen);
		dir->namelen = namelen;
		int r = fs_callback_inode_lookup(node, name, namelen, dir);
		if(r) {
			dir->count = 0;
			vfs_dirent_destroy(dir);
			rwlock_release(&node->lock, RWL_WRITER);
			mutex_release(dirent_cache_lock);
			return 0;
		}
		vfs_inode_get(node);
		vfs_inode_add_dirent(node, dir);
	} else {
		if(atomic_fetch_add(&dir->count, 1) == 0) {
			fs_dirent_remove_lru(dir);
			vfs_inode_get(node);
		}
	}
	rwlock_release(&node->lock, RWL_WRITER);
	mutex_release(dirent_cache_lock);
	return dir;
}
Exemple #28
0
void enter_mono_region(void)
{
    rwlock_acquire(&rwlock, true);
}
Exemple #29
0
void __lockfunc rt_write_lock(rwlock_t *rwlock)
{
	rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
	__rt_spin_lock(&rwlock->lock);
}
Exemple #30
0
void enter_multi_region(void)
{
    rwlock_acquire(&rwlock, false);
}