Ejemplo n.º 1
0
int do_cache_object(cache_t *c, u64 id, u64 key, int sz, char *buf, int dirty)
{
	accessed_cache(c);
	rwlock_acquire(c->rwl, RWL_WRITER);
	struct ce_t *obj = chash_search(c->hash, id, key);
	if(obj)
	{
		memcpy(obj->data, buf, obj->length);
		set_dirty(c, obj, dirty);
		rwlock_release(c->rwl, RWL_WRITER);
		return 0;
	}
	if(!should_element_be_added(c))
	{
		u64 a, b;
		struct ce_t *q;
		if((q = chash_get_any_object(c->hash, &a, &b)))
		{
			if(q->dirty)
				do_sync_element(c, q, 1);
			remove_element(c, q, 1);
		}
	}
	obj = (struct ce_t *)kmalloc(sizeof(struct ce_t));
	obj->data = (char *)kmalloc(sz);
	obj->length = sz;
	obj->rwl = rwlock_create(0);
	memcpy(obj->data, buf, sz);
	obj->key = key;
	obj->id = id;
	set_dirty(c, obj, dirty);
	cache_add_element(c, obj, 1);
	rwlock_release(c->rwl, RWL_WRITER);
	return 0;
}
Ejemplo n.º 2
0
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
	rwlock_release(&lock->dep_map, 1, _RET_IP_);
	_raw_write_unlock(lock);
	local_irq_restore(flags);
	preempt_enable();
}
Ejemplo n.º 3
0
void __lockfunc _write_unlock_bh(rwlock_t *lock)
{
	rwlock_release(&lock->dep_map, 1, _RET_IP_);
	_raw_write_unlock(lock);
	preempt_enable_no_resched();
	local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
Ejemplo n.º 4
0
ext2_fs_t *get_fs(int v)
{
	rwlock_acquire(&fslist->rwl, RWL_READER);
	struct llistnode *cur;
	ext2_fs_t *f;
	ll_for_each_entry(fslist, cur, ext2_fs_t *, f)
	{
		if(f->flag == v)
		{
			rwlock_release(&fslist->rwl, RWL_READER);
			return f;
		}
	}
	rwlock_release(&fslist->rwl, RWL_READER);
	return 0;
}
Ejemplo n.º 5
0
int destroy_cache(cache_t *c)
{
	printk(1, "[cache]: Destroying cache '%s'...\n", c->name);
	rwlock_acquire(c->rwl, RWL_WRITER);
	chash_t *h = c->hash;
	c->hash = 0;
	sync_cache(c);
	/* Destroy the tree */
	chash_destroy(h);
	
	struct llistnode *curnode, *next;
	struct ce_t *obj;
	ll_for_each_entry_safe(&c->primary_ll, curnode, next, struct ce_t *, obj)
	{
		ll_maybe_reset_loop(&c->primary_ll, curnode, next);
		remove_element(c, obj, 1);
	}
	ll_destroy(&c->dirty_ll);
	ll_destroy(&c->primary_ll);
	ll_remove_entry(cache_list, c);
	rwlock_release(c->rwl, RWL_WRITER);
	rwlock_destroy(c->rwl);
	printk(1, "[cache]: Cache '%s' destroyed\n", c->name);
	return 1;
}
Ejemplo n.º 6
0
void __lockfunc _write_unlock_irq(rwlock_t *lock)
{
	rwlock_release(&lock->dep_map, 1, _RET_IP_);
	_raw_write_unlock(lock);
	local_irq_enable();
	preempt_enable();
}
Ejemplo n.º 7
0
struct ce_t *find_cache_element(cache_t *c, u64 id, u64 key)
{
	accessed_cache(c);
	rwlock_acquire(c->rwl, RWL_READER);
	struct ce_t *ret = c->hash ? chash_search(c->hash, id, key) : 0;
	rwlock_release(c->rwl, RWL_READER);
	return ret;
}
Ejemplo n.º 8
0
void __lockfunc rt_read_unlock(rwlock_t *rwlock)
{
	rwlock_release(&rwlock->dep_map, 1, _RET_IP_);

	/* Release the lock only when read_depth is down to 0 */
	if (--rwlock->read_depth == 0)
		__rt_spin_unlock(&rwlock->lock);
}
Ejemplo n.º 9
0
int bacstack_session_send(bacstack_session_t *session, bacstack_message_t *message)
{
  assert(session != NULL);
  assert(message != NULL);
  int ret = 0;

  bacstack_route_t *route = NULL;
  int has_route = 0;
  int route_local = 0;
  uint8_t port_id = 0;
  bacstack_mac_t mac;

  if(message->endpoint.is_device_instance) {
    // the message has been addressed using a device instance,
    // which we must resolve to a port/mac here.

    // since we don't have a device table yet, we are unable
    // to send these sorts of message
  }
  
  if(!message->endpoint.is_device_instance) {
    // the message has been addressed using a network/mac
    // pair, and we must resolve our route to the network here

    if(!rwlock_acquire_read(&session->routetable_lock)) {
      warn("failed to acquire routetable read lock");
      goto done;
    }

    if(bacstack_routetable_get_route(
          &session->routetable,
          message->endpoint.network_number,
          &route)) {
      has_route = 1;
      route_local = route->is_local;
      port_id = route->port_id;
      mac = route->next_hop_mac;
    }

    if(!rwlock_release(&session->routetable_lock)) {
      error("failed to release routetable read lock");
      goto done;
    }
  }

  if(!has_route) {
    // we failed to find a route to the network,
    // so we should search for the network, and queue
    // this message to be sent when and if we find
    // that new network
    

  }

done:
  return ret;
}
Ejemplo n.º 10
0
int fs_link(struct inode *dir, struct inode *target, const char *name, size_t namelen, bool allow_incomplete_directories)
{
	if(!vfs_inode_check_permissions(dir, MAY_WRITE, 0))
		return -EACCES;
	if(!S_ISDIR(dir->mode))
		return -ENOTDIR;
	rwlock_acquire(&dir->lock, RWL_WRITER);
	if(S_ISDIR(dir->mode) && (dir->nlink == 1) && !allow_incomplete_directories) {
		rwlock_release(&dir->lock, RWL_WRITER);
		return -ENOSPC;
	}
	rwlock_acquire(&target->metalock, RWL_WRITER);
	int r = fs_callback_inode_link(dir, target, name, namelen);
	if(!r)
		atomic_fetch_add(&target->nlink, 1);
	rwlock_release(&target->metalock, RWL_WRITER);
	rwlock_release(&dir->lock, RWL_WRITER);
	return r;
}
Ejemplo n.º 11
0
int do_sync_element(cache_t *c, struct ce_t *e, int locked)
{
	int ret=0;
	if(!locked) rwlock_acquire(c->rwl, RWL_WRITER);
	if(c->sync)
		ret = c->sync(e);
	set_dirty(c, e, 0);
	if(!locked) rwlock_release(c->rwl, RWL_WRITER);
	return ret;
}
Ejemplo n.º 12
0
int cache_add_element(cache_t *c, struct ce_t *obj, int locked)
{
	accessed_cache(c);
	if(!locked) rwlock_acquire(c->rwl, RWL_WRITER);
	
	chash_add(c->hash, obj->id, obj->key, obj);
	obj->list_node = ll_insert(&c->primary_ll, obj);
	c->count++;
	obj->acount=1;
	if(!locked) rwlock_release(c->rwl, RWL_WRITER);
	return 0;
}
Ejemplo n.º 13
0
/* this function is called when a reference to a dirent is released. There is one
 * special thing to note: according to POSIX, an unlinked dirent is only actually
 * deleted when the last reference to it is dropped. Because of that, unlink just
 * sets a flag which tells this function to do the actual deletion. In either case,
 * this function doesn't deallocate anything, it just moves it to an LRU queue. */
int vfs_dirent_release(struct dirent *dir)
{
	int r = 0;
	struct inode *parent = dir->parent;
	rwlock_acquire(&parent->lock, RWL_WRITER);
	if(atomic_fetch_sub(&dir->count, 1) == 1) {
		if(dir->flags & DIRENT_UNLINK) {
			struct inode *target = fs_dirent_readinode(dir, false);
			/* Well, sadly, target being null is a pretty bad thing,
			 * but we can't panic, because the filesystem could be
			 * set up to actually act like this... :( */
			vfs_inode_del_dirent(parent, dir);
			if(!target) {
				printk(KERN_ERROR, "belated unlink failed to read target inode: %s - %d\n",
						dir->name, dir->ino);
			} else {
				r = fs_callback_inode_unlink(parent, dir->name, dir->namelen, target);
				if(!r) {
					assert(target->nlink > 0);
					atomic_fetch_sub(&target->nlink, 1);
					if(!target->nlink && (target->flags & INODE_DIRTY))
						vfs_inode_unset_dirty(target);
				}
				vfs_icache_put(target);
			}
			vfs_dirent_destroy(dir);
		} else {
			/* add to LRU */
			queue_enqueue_item(dirent_lru, &dir->lru_item, dir);
		}
		rwlock_release(&parent->lock, RWL_WRITER);
		/* So, here's the thing. Technically, we still have a pointer that points
		 * to parent: in dir->parent. We just have to make sure that each time
		 * we use this pointer, we don't screw up */
		vfs_icache_put(parent); /* for the dir->parent pointer */
	} else
		rwlock_release(&parent->lock, RWL_WRITER);

	return r;
}
Ejemplo n.º 14
0
void sync_cache(cache_t *c)
{
	if(!c->dirty || !c->sync) return;
	accessed_cache(c);
	printk(0, "[cache]: Cache '%s' is syncing\n", c->name);
	volatile unsigned int num = c->dirty;
	volatile unsigned int i=1;
	struct ce_t *obj;
	c->syncing=1;
	while(c->dirty > 0)
	{
		rwlock_acquire(c->rwl, RWL_WRITER);
		if(c->dirty == 0) {
			c->syncing = 0;
			rwlock_release(c->rwl, RWL_WRITER);
			break;
		}
		assert(c->dirty_ll.head);
		obj = c->dirty_ll.head->entry;
		if(num < (c->dirty+i))
			num=(c->dirty+i);
		
		printk((kernel_state_flags & KSF_SHUTDOWN) ? 4 : 0, "\r[cache]: Syncing '%s': %d/%d (%d.%d%%)...   "
				,c->name, i, num, (i*100)/num, ((i*1000)/num) % 10);
		
		do_sync_element(c, obj, 1);
		rwlock_release(c->rwl, RWL_WRITER);
		
		if(got_signal(current_task))
			return;
		i++;
	}
	
	c->syncing=0;
	printk((kernel_state_flags & KSF_SHUTDOWN) ? 4 : 0, "\r[cache]: Syncing '%s': %d/%d (%d.%d%%)\n"
			, c->name, num, num, 100, 0);
	printk(0, "[cache]: Cache '%s' has sunk\n", c->name);
}
Ejemplo n.º 15
0
size_t fs_inode_reclaim_lru(void)
{
	int released = 0;
	mutex_acquire(ic_lock);
	struct queue_item *qi = queue_dequeue_item(ic_lru);
	if(!qi) {
		mutex_release(ic_lock);
		return 0;
	}
	struct inode *remove = qi->ent;
	assert(remove);
	/* there's a subtlety here: If this reclaim and the dirent reclaim
	 * run at the same time, there could be an issue. Since the inode
	 * in the dirent reclaim may have a zero-count, we have to make sure
	 * that it doesn't free the inode in the middle of the dirent being freed.
	 * that's why the rwlock is acquired in both. */
	rwlock_acquire(&remove->lock, RWL_WRITER);
	if(!remove->dirents.count) {
		assert(!remove->count);
		assert(!(remove->flags & INODE_INUSE));
		assert(!remove->dirents.count);
		//printk(0, "reclaim node %d\n", remove->id);
		if(remove->filesystem) {
			uint32_t key[2] = {remove->filesystem->id, remove->id};
			hash_delete(icache, key, sizeof(key));
		}
		fs_inode_push(remove);
		rwlock_release(&remove->lock, RWL_WRITER);
		vfs_inode_destroy(remove);
		released = 1;
	} else {
		queue_enqueue_item(ic_lru, qi, remove);
		rwlock_release(&remove->lock, RWL_WRITER);
	}
	mutex_release(ic_lock);
	return released ? sizeof(struct inode) : 0;
}
Ejemplo n.º 16
0
/* This function returns the directory entry associated with the name 'name' under
 * the inode 'node'. It must be careful to lookup the entry in the cache first. */
struct dirent *fs_dirent_lookup(struct inode *node, const char *name, size_t namelen)
{
	if(!vfs_inode_check_permissions(node, MAY_READ, 0))
		return 0;
	if(!S_ISDIR(node->mode))
		return 0;
	if(node == current_process->root && !strncmp(name, "..", 2) && namelen == 2)
		return fs_dirent_lookup(node, ".", 1);
	mutex_acquire(dirent_cache_lock);
	rwlock_acquire(&node->lock, RWL_WRITER);
	struct dirent *dir = vfs_inode_get_dirent(node, name, namelen);
	if(!dir) {
		dir = vfs_dirent_create(node);
		dir->count = 1;
		strncpy(dir->name, name, namelen);
		dir->namelen = namelen;
		int r = fs_callback_inode_lookup(node, name, namelen, dir);
		if(r) {
			dir->count = 0;
			vfs_dirent_destroy(dir);
			rwlock_release(&node->lock, RWL_WRITER);
			mutex_release(dirent_cache_lock);
			return 0;
		}
		vfs_inode_get(node);
		vfs_inode_add_dirent(node, dir);
	} else {
		if(atomic_fetch_add(&dir->count, 1) == 0) {
			fs_dirent_remove_lru(dir);
			vfs_inode_get(node);
		}
	}
	rwlock_release(&node->lock, RWL_WRITER);
	mutex_release(dirent_cache_lock);
	return dir;
}
Ejemplo n.º 17
0
int __KT_try_releasing_tasks()
{
	struct llistnode *cur;
	rwlock_acquire(&kill_queue->rwl, RWL_WRITER);
	if(ll_is_empty(kill_queue))
	{
		rwlock_release(&kill_queue->rwl, RWL_WRITER);
		return 0;
	}
	task_t *t=0;
	ll_for_each_entry(kill_queue, cur, task_t *, t)
	{
		/* need to check for orphaned zombie tasks */
		if(t->flags & TF_BURIED && (t != ((cpu_t *)t->cpu)->cur)) {
			if(t->parent == 0 || t->parent->state == TASK_DEAD || (t->parent->flags & TF_KTASK) || t->parent == kernel_task)
				move_task_to_kill_queue(t, 0);
			if(t->flags & TF_KILLREADY)
				break;
		}
	}
	if(!t || !((t->flags & TF_BURIED) && (t->flags & TF_KILLREADY)))
	{
		rwlock_release(&kill_queue->rwl, RWL_WRITER);
		return 0;
	}
	assert(cur->entry == t);
	void *node = ll_do_remove(kill_queue, cur, 1);
	assert(node == cur);
	int ret = 0;
	if(!ll_is_empty(kill_queue))
		ret = 1;
	rwlock_release(&kill_queue->rwl, RWL_WRITER);
	release_task(t);
	kfree(cur);
	return ret;
}
Ejemplo n.º 18
0
struct inode *do_readdir(struct inode *i, int num)
{
	assert(i);
	int n = num;
	if(!is_directory(i))
		return 0;
	if(!permissions(i, MAY_READ))
		return 0;
	struct inode *c=0;
	if(!i->dynamic) {
		rwlock_acquire(&i->rwl, RWL_READER);
		struct llistnode *cur;
		ll_for_each_entry((&i->children), cur, struct inode *, c)
		{
			if(!n--) break;
		}
		rwlock_release(&i->rwl, RWL_READER);
	}
Ejemplo n.º 19
0
int sys_getdents(int fd, struct dirent_posix *dirs, unsigned int count)
{
	struct file *f = file_get(fd);
	if(!f) return -EBADF;

	unsigned nex;
	if(!vfs_inode_check_permissions(f->inode, MAY_READ, 0)) {
		file_put(f);
		return -EACCES;
	}
	rwlock_acquire(&f->inode->lock, RWL_READER);
	int r = fs_callback_inode_getdents(f->inode, f->pos, dirs, count, &nex);
	rwlock_release(&f->inode->lock, RWL_READER);
	f->pos = nex;

	file_put(f);
	return r;
}
Ejemplo n.º 20
0
/* WARNING: This does not sync!!! */
void remove_element(cache_t *c, struct ce_t *o, int locked)
{
	if(!o) return;
	if(o->dirty)
		panic(PANIC_NOSYNC, "tried to remove non-sync'd element");
	
	if(!locked) rwlock_acquire(c->rwl, RWL_WRITER);
	if(o->dirty)
		set_dirty(c, o, 0);
	assert(c->count);
	sub_atomic(&c->count, 1);
	ll_remove(&c->primary_ll, o->list_node);
	if(c->hash) chash_delete(c->hash, o->id, o->key);
	if(o->data)
		kfree(o->data);
	rwlock_destroy(o->rwl);
	kfree(o);
	if(!locked) rwlock_release(c->rwl, RWL_WRITER);
}
Ejemplo n.º 21
0
void __lockfunc rt_read_unlock(rwlock_t *rwlock)
{
	struct rt_mutex *lock = &rwlock->lock;
	unsigned long flags;

	rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
	// TRACE_WARN_ON(lock->save_state != 1);
	/*
	 * Read locks within the self-held write lock succeed.
	 */
	spin_lock_irqsave(&lock->wait_lock, flags);
	if (rt_mutex_real_owner(lock) == current && rwlock->read_depth) {
		spin_unlock_irqrestore(&lock->wait_lock, flags);
		rwlock->read_depth--;
		return;
	}
	spin_unlock_irqrestore(&lock->wait_lock, flags);
	__rt_spin_unlock(&rwlock->lock);
}
Ejemplo n.º 22
0
int destroy_all_id(cache_t *c, u64 id)
{
	rwlock_acquire(c->rwl, RWL_WRITER);
	struct llistnode *curnode, *next;
	struct ce_t *obj;
	ll_for_each_entry_safe(&c->primary_ll, curnode, next, struct ce_t *, obj)
	{
		if(obj->id == id)
		{
			if(obj->dirty)
				do_sync_element(c, obj, 1);
			ll_maybe_reset_loop(&c->primary_ll, curnode, next);
			remove_element(c, obj, 1);
		}
		
	}
	rwlock_release(c->rwl, RWL_WRITER);
	return 0;
}
Ejemplo n.º 23
0
size_t fs_dirent_reclaim_lru(void)
{
	mutex_acquire(dirent_cache_lock);
	struct queue_item *qi = queue_dequeue_item(dirent_lru);
	if(!qi) {
		mutex_release(dirent_cache_lock);
		return 0;
	}
	struct dirent *dir = qi->ent;
	struct inode *parent = dir->parent;
	rwlock_acquire(&parent->lock, RWL_WRITER);
	atomic_fetch_add(&parent->count, 1);
	if(dir && dir->count == 0) {
		/* reclaim this node */
		vfs_inode_del_dirent(parent, dir);
		vfs_dirent_destroy(dir);
	}
	atomic_fetch_sub(&parent->count, 1);
	rwlock_release(&parent->lock, RWL_WRITER);
	mutex_release(dirent_cache_lock);
	return sizeof(struct dirent);
}
Ejemplo n.º 24
0
void __lockfunc rt_write_unlock(rwlock_t *rwlock)
{
	/* NOTE: we always pass in '1' for nested, for simplicity */
	rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
	__rt_spin_unlock(&rwlock->lock);
}
Ejemplo n.º 25
0
void leave_protected_region(void)
{
    rwlock_release(&rwlock);
}
Ejemplo n.º 26
0
void __lockfunc _read_unlock(rwlock_t *lock)
{
	rwlock_release(&lock->dep_map, 1, _RET_IP_);
	_raw_read_unlock(lock);
	preempt_enable();
}