Esempio n. 1
0
int sys_chdir (char *pathname)
{
	register error_t err = 0;
	register struct thread_s *this;
	register struct task_s *task;

	this = current_thread;
	task = current_task;

	if(!pathname)
	{
		this->info.errno = EINVAL;
		return -1;
	}
      
	rwlock_wrlock(&task->cwd_lock);

	if((err = vfs_chdir(pathname, task->vfs_cwd, &task->vfs_cwd)))
	{
		rwlock_unlock(&task->cwd_lock);
		this->info.errno = (err < 0) ? -err : err;
		return -1;
	}
   
	rwlock_unlock(&task->cwd_lock);
	return 0;
}
Esempio n. 2
0
int mm_info(mm_context *ctx, void *addr, anvil_vmm_info_t& info)
{
    struct mm_mapping   *pmap;

    kdebug("mm_info\n");
    mm_context_dump(ctx);

    // Iterate thru till we find the first mapping whose start is >= addr
    rwlock_wrlock(&ctx->lock);
    pmap = first_mapping(ctx);

    while (pmap)
    {
        uintptr_t pmap_end = pmap->start_addr + pmap->length;

        if ((uintptr_t)addr <= pmap->start_addr)
        {
            info.addr = (void *)pmap->start_addr;
            info.object = pmap->object;
            info.len = pmap->length;
            info.fd = pmap->fd;
            info.prot = pmap->prot;
            kdebug("mm_info found\n");
            rwlock_unlock(&ctx->lock);
            return 0;
        }
        pmap = next_mapping(ctx, pmap);
    }
    rwlock_unlock(&ctx->lock);
    kdebug("mm_info done\n");
    return -1;
}
Esempio n. 3
0
struct phys_page *search_obj_chain(mm_context *ctx, struct mm_mapping *mapping, uintptr_t fault_addr)
{
    struct mm_object *front_pobj = mapping->object;
    struct mm_object *pobj = front_pobj;
    struct phys_page *page;
    struct phys_page *new_page;
    uintptr_t fault_page = (uintptr_t)fault_addr & ~0xfff;

    int prot = PM_USER | (mapping->prot & PM_WRITABLE ? PM_WRITABLE : 0);

    while (pobj)
    {
        rwlock_wrlock(&pobj->lock);
        page = first_page(pobj);
        while (page)
        {
            if (mapping->start_addr + page->offset == fault_page)
            {
                switch (pobj->share_type)
                {
                    case share_private:
//                        kdebug("Object is private %p\n", fault_page);
                        kpage_map_user((void *)fault_page, page, mm_pgtable_alloc_callback, ctx, ctx->cr3, prot);
                        //mm_ctx_reload(ctx);
                        break;

                    case share_shared:
//                        kdebug("Object is shared %p\n", fault_page);
                        kpage_map_user((void *)fault_page, page, mm_pgtable_alloc_callback, ctx, ctx->cr3, prot);
//                        mm_ctx_reload(ctx);
                        break;

                    case share_cow:
//                        kdebug("%d: Object is cow %p %p\n", get_core_id(),  fault_addr, ctx);
                        /* Make a copy of the page and put it in front_pobj */
                        new_page = kphys_alloc(physmem_state_user);
                        new_page->offset = fault_page - mapping->start_addr;
                        front_pobj->page_list.add_head(new_page);
                        ++front_pobj->npages;
                        memcpy((void *)(pagestruct_to_phys(new_page) + VADDR_PHYSMEM), (void *)(pagestruct_to_phys(page) + VADDR_PHYSMEM), __PAGESIZE);
                        kpage_map_user((void *)fault_page, new_page, mm_pgtable_alloc_callback, ctx, ctx->cr3, PM_WRITABLE | PM_USER);
                        break;

                    default:
                        ;

                }
                rwlock_unlock(&pobj->lock);
                return page;
            }
            /* Go to the next one */
            page = next_page(pobj, page);
        }
        rwlock_unlock(&pobj->lock);
        pobj = pobj->chain;
    }

    return NULL;
}
Esempio n. 4
0
struct rpc_dplx_rec *
rpc_dplx_lookup_rec(int fd, uint32_t iflags, uint32_t *oflags)
{
    struct rbtree_x_part *t;
    struct rpc_dplx_rec rk, *rec = NULL;
    struct opr_rbtree_node *nv;

    cond_init_rpc_dplx();

    rk.fd_k = fd;
    t = rbtx_partition_of_scalar(&(rpc_dplx_rec_set.xt), fd);

    rwlock_rdlock(&t->lock);
    nv = opr_rbtree_lookup(&t->t, &rk.node_k);

    /* XXX rework lock+insert case, so that new entries are inserted
     * locked, and t->lock critical section is reduced */

    if (! nv) {
        rwlock_unlock(&t->lock);
        rwlock_wrlock(&t->lock);
        nv = opr_rbtree_lookup(&t->t, &rk.node_k);
        if (! nv) {
            rec = alloc_dplx_rec();
            if (! rec) {
                __warnx(TIRPC_DEBUG_FLAG_LOCK,
                        "%s: failed allocating rpc_dplx_rec", __func__);
                goto unlock;
            }

            /* tell the caller */
            *oflags = RPC_DPLX_LKP_OFLAG_ALLOC;

            rec->fd_k = fd;

            if (opr_rbtree_insert(&t->t, &rec->node_k)) {
                /* cant happen */
                __warnx(TIRPC_DEBUG_FLAG_LOCK,
                        "%s: collision inserting in locked rbtree partition",
                        __func__);
                free_dplx_rec(rec);
            }
        }
    }
    else {
        rec = opr_containerof(nv, struct rpc_dplx_rec, node_k);
        *oflags = RPC_DPLX_LKP_FLAG_NONE;
    }

    rpc_dplx_ref(rec, (iflags & RPC_DPLX_LKP_IFLAG_LOCKREC) ?
                 RPC_DPLX_FLAG_LOCK :
                 RPC_DPLX_FLAG_NONE);

unlock:
    rwlock_unlock(&t->lock);

    return (rec);
}
Esempio n. 5
0
int Thread::try_join(Thread* joiner, void **value_ptr)
{
    /*
     * We lock here because the joinable member might be cleared by the
     * detach code
     */
    rwlock_wrlock(&m_join_lock);

    if (!m_joinable || m_joiner != NULL)
    {
        // Someone is already joining us so fail
        rwlock_unlock(&m_join_lock);
        kdebug("kcall_threadjoin not joinable or already joined\n");
        return EINVAL;
    }

    if (m_state == THR_ST_ZOMBIE)
    {
        // We are a zombie, ready to be reaped
        void *value = zombie_state_info.exit_val;
        int err;
        if (value_ptr && (err = joiner->kcopy_touser(value_ptr, &value, sizeof(value))) < 0)
        {
            return err;
        }
        rwlock_unlock(&m_join_lock);
        die();
        kdebug("kcall_threadjoin returning value\n");
        return 0;
    }

    // We are not ready to be joined yet so block the joiner and queue him
    // on our m_joiner member
    //
    // NOTE: Normally we don't allow threads to block other threads but this
    // is a helper method executing because of a call from joiner
    m_joiner = joiner;
    joiner->joining_state_info.joined_to = this;
    joiner->joining_state_info.value_ptr = value_ptr;
    joiner->m_continuation_func = &Thread::join_cont;

    // This blocks the tread that called this method i.e. joiner
    sys.m_sched.block(THR_ST_JOINING);

    //rwlock_unlock(&exiter->m_join_lock);


    rwlock_unlock(&m_join_lock);

    return 0;
}
Esempio n. 6
0
/* TODO: don't use locks to lookup region as if address has been accessed it must be in kyesdb */
static error_t check_args(struct vmm_s *vmm, uint_t start, uint_t len, struct vm_region_s **reg)
{
	error_t err;
	struct vm_region_s *region;
  
	err = 0;
	rwlock_rdlock(&vmm->rwlock);

	region = vmm->last_region;
  
	if((start >= region->vm_limit) || (start < region->vm_start))
	{
		region = vm_region_find(vmm, start);
  
		if((region == NULL) || (start < region->vm_start))
			err = EINVAL;
	}

	if((err == 0) && ((start + len) >= region->vm_limit))
		err = EINVAL;
  
	rwlock_unlock(&vmm->rwlock);

	*reg = region;
	return 0;
}
Esempio n. 7
0
int get_user_by_sockaddr(struct sockaddr_storage *sa, size_t sa_len,
			 struct curve25519_proto **proto)
{
	int ret = -1;
	struct sockaddr_map_entry *entry;
	unsigned int hash = hash_name((char *) sa, sa_len);

	errno = 0;

	rwlock_rd_lock(&sockaddr_map_lock);

	entry = lookup_hash(hash, &sockaddr_mapper);
	while (entry && entry->sa_len == sa_len &&
	       memcmp(sa, entry->sa, entry->sa_len))
		entry = entry->next;
	if (entry && entry->sa_len == sa_len &&
	    !memcmp(sa, entry->sa, entry->sa_len)) {
		(*proto) = entry->proto;
		ret = 0;
	} else {
		(*proto) = NULL;
		errno = ENOENT;
	}

	rwlock_unlock(&sockaddr_map_lock);

	return ret;
}
Esempio n. 8
0
void remove_user_by_sockaddr(struct sockaddr_storage *sa, size_t sa_len)
{
	struct sockaddr_map_entry *pos;
	struct sockaddr_map_entry *entry;
	unsigned int hash = hash_name((char *) sa, sa_len);

	entry = sockaddr_to_sockaddr_map_entry(sa, sa_len);
	if (!entry)
		return;

	rwlock_wr_lock(&sockaddr_map_lock);

	pos = remove_hash(hash, entry, entry->next, &sockaddr_mapper);
	while (pos && pos->next && pos->next != entry)
		pos = pos->next;
	if (pos && pos->next && pos->next == entry)
		pos->next = entry->next;

	memset(entry->proto->enonce, 0, sizeof(entry->proto->enonce));
	memset(entry->proto->dnonce, 0, sizeof(entry->proto->dnonce));

	entry->proto = NULL;
	entry->next = NULL;

	xfree(entry->sa);
	xfree(entry);

	rwlock_unlock(&sockaddr_map_lock);
}
Esempio n. 9
0
static int register_user_by_sockaddr(struct sockaddr_storage *sa,
				     size_t sa_len,
				     struct curve25519_proto *proto)
{
	void **pos;
	struct sockaddr_map_entry *entry;
	unsigned int hash = hash_name((char *) sa, sa_len);

	rwlock_wr_lock(&sockaddr_map_lock);

	entry = xzmalloc(sizeof(*entry));
	entry->sa = xmemdupz(sa, sa_len);
	entry->sa_len = sa_len;
	entry->proto = proto;

	pos = insert_hash(hash, entry, &sockaddr_mapper);
	if (pos) {
		entry->next = (*pos);
		(*pos) = entry;
	}

	rwlock_unlock(&sockaddr_map_lock);

	return 0;
}
Esempio n. 10
0
void
svc_run()
{
	fd_set readfds, cleanfds;
	struct timeval timeout;

	timeout.tv_sec = 30;
	timeout.tv_usec = 0;

	for (;;) {
		rwlock_rdlock(&svc_fd_lock);
		readfds = svc_fdset;
		cleanfds = svc_fdset;
		rwlock_unlock(&svc_fd_lock);
		switch (_select(svc_maxfd+1, &readfds, NULL, NULL, &timeout)) {
		case -1:
			FD_ZERO(&readfds);
			if (errno == EINTR) {
				continue;
			}
			_warn("svc_run: - select failed");
			return;
		case 0:
			__svc_clean_idle(&cleanfds, 30, FALSE);
			continue;
		default:
			svc_getreqset(&readfds);
		}
	}
}
Esempio n. 11
0
int sys_getcwd (char *buff, size_t size)
{
	register struct thread_s *this;
	register struct task_s *task;
	register error_t err;
	struct ku_obj ku_buff;
  
	this      = current_thread;
	task      = current_task;

	if((size < VFS_MAX_NAME_LENGTH) || (!buff)) 
	{
		err = ERANGE;
		goto SYS_GETCWD_ERROR;
	}

	if(vmm_check_address("usr cwd buffer", task, buff, size))
	{
		err = EFAULT;
		goto SYS_GETCWD_ERROR;
	}

	KU_SZ_BUFF(ku_buff, buff, size);

	rwlock_rdlock(&task->cwd_lock);

	err = vfs_get_path(&task->vfs_cwd, &ku_buff);

	rwlock_unlock(&task->cwd_lock);


SYS_GETCWD_ERROR:
	this->info.errno = err;
	return (int)buff;
}
Esempio n. 12
0
/*
 *      This function causes svc_run() to exit by telling it that it has no
 *      more work to do.
 */
void
svc_exit()
{
	rwlock_wrlock(&svc_fd_lock);
	FD_ZERO(&svc_fdset);
	rwlock_unlock(&svc_fd_lock);
}
Esempio n. 13
0
void trie_cleanup(void)
{
	rwlock_wr_lock(&tree_lock);
	ptree_free(tree);
	rwlock_unlock(&tree_lock);
	rwlock_destroy(&tree_lock);
}
Esempio n. 14
0
char *
elf_rawfile (Elf *elf, size_t *ptr)
{
  char *result;

  if (elf == NULL)
    {
      /* No valid descriptor.  */
      __libelf_seterrno (ELF_E_INVALID_HANDLE);
    error_out:
      if (ptr != NULL)
	*ptr = 0;
      return NULL;
    }

  /* If the file is not mmap'ed and not previously loaded, do it now.  */
  if (elf->map_address == NULL && __libelf_readall (elf) == NULL)
    goto error_out;

  rwlock_rdlock (elf->lock);
  if (ptr != NULL)
    *ptr = elf->maximum_size;

  result = (char *) elf->map_address + elf->start_offset;
  rwlock_unlock (elf->lock);

  return result;
}
Esempio n. 15
0
void remove_user_by_socket(int fd)
{
	struct sock_map_entry *pos;
	struct sock_map_entry *entry = socket_to_sock_map_entry(fd);

	if (!entry)
		return;

	rwlock_wr_lock(&sock_map_lock);

	pos = remove_hash(entry->fd, entry, entry->next, &sock_mapper);
	while (pos && pos->next && pos->next != entry)
		pos = pos->next;
	if (pos && pos->next && pos->next == entry)
		pos->next = entry->next;

	memset(entry->proto->enonce, 0, sizeof(entry->proto->enonce));
	memset(entry->proto->dnonce, 0, sizeof(entry->proto->dnonce));

	entry->proto = NULL;
	entry->next = NULL;

	xfree(entry);

	rwlock_unlock(&sock_map_lock);
}
Esempio n. 16
0
static void* thread_func(void* arg)
{
  int i;
  int sum = 0;

  for (i = 0; i < 1000; i++)
  {
    rwlock_rdlock(&s_rwlock);
    sum += s_counter;
    rwlock_unlock(&s_rwlock);
    rwlock_wrlock(&s_rwlock);
    s_counter++;
    rwlock_unlock(&s_rwlock);
  }

  return 0;
}
Esempio n. 17
0
/*
 * pclose --
 *	Pclose returns -1 if stream is not associated with a `popened' command,
 *	if already `pclosed', or waitpid returns an error.
 */
int
pclose(FILE *iop)
{
	struct pid *cur, *last;
	int pstat;
	pid_t pid;

	_DIAGASSERT(iop != NULL);

	rwlock_wrlock(&pidlist_lock);

	/* Find the appropriate file pointer. */
	for (last = NULL, cur = pidlist; cur; last = cur, cur = cur->next)
		if (cur->fp == iop)
			break;
	if (cur == NULL) {
#if defined(__minix)
		rwlock_unlock(&pidlist_lock);
#else
		(void)rwlock_unlock(&pidlist_lock);
#endif /* defined(__minix) */
		return (-1);
	}

	(void)fclose(iop);

	/* Remove the entry from the linked list. */
	if (last == NULL)
		pidlist = cur->next;
	else
		last->next = cur->next;

#if defined(__minix)
	rwlock_unlock(&pidlist_lock);
#else
	(void)rwlock_unlock(&pidlist_lock);
#endif /* defined(__minix) */

	do {
		pid = waitpid(cur->pid, &pstat, 0);
	} while (pid == -1 && errno == EINTR);

	free(cur);

	return (pid == -1 ? -1 : pstat);
}
Esempio n. 18
0
void parse_userfile_and_generate_user_store_or_die(char *homedir)
{
	FILE *fp;
	char path[PATH_MAX], buff[512];
	int line = 1, ret, fd;

	memset(path, 0, sizeof(path));
	slprintf(path, sizeof(path), "%s/%s", homedir, FILE_CLIENTS);

	rwlock_init(&store_lock);
	rwlock_wr_lock(&store_lock);

	fp = fopen(path, "r");
	if (!fp)
		panic("Cannot open client file!\n");

	memset(buff, 0, sizeof(buff));
	while (fgets(buff, sizeof(buff), fp) != NULL) {
		buff[sizeof(buff) - 1] = 0;
		/* A comment. Skip this line */
		if (buff[0] == '#' || buff[0] == '\n') {
			memset(buff, 0, sizeof(buff));
			line++;
			continue;
		}

		ret = parse_line(buff, homedir);
		if (ret < 0)
			panic("Cannot parse line %d from clients!\n", line);
		line++;
		memset(buff, 0, sizeof(buff));
	}

	fclose(fp);

	if (store == NULL)
		panic("No registered clients found!\n");

	rwlock_unlock(&store_lock);

	init_sock_mapper();
	init_sockaddr_mapper();

	/*
	 * Pubkey is also used as a hmac of the initial packet to check
	 * the integrity of the packet, so that we know if it's just random
	 * garbage or a 'valid' packet. Again, just for the integrity!
	 */

	memset(path, 0, sizeof(path));
	slprintf(path, sizeof(path), "%s/%s", homedir, FILE_PUBKEY);

	fd = open_or_die(path, O_RDONLY);
	ret = read(fd, token, sizeof(token));
	if (ret != crypto_auth_hmacsha512256_KEYBYTES)
		panic("Cannot read public key!\n");
	close(fd);
}
Esempio n. 19
0
/** Create a new semaphore.
 * @param name		Optional name for the semaphore, for debugging purposes.
 * @param count		Initial count of the semaphore.
 * @param security	Security attributes for the ACL. If NULL, default
 *			attributes will be constructed which grant full access
 *			to the semaphore to the calling process' user.
 * @param rights	Access rights for the handle.
 * @param handlep	Where to store handle to the semaphore.
 * @return		Status code describing result of the operation. */
status_t kern_semaphore_create(const char *name, size_t count,
	const object_security_t *security, object_rights_t rights,
	handle_t *handlep)
{
	object_security_t ksecurity = { -1, -1, NULL };
	user_semaphore_t *sem;
	status_t ret;

	if(!handlep)
		return STATUS_INVALID_ARG;

	if(security) {
		ret = object_security_from_user(&ksecurity, security, true);
		if(ret != STATUS_SUCCESS)
			return ret;
	}

	/* Construct a default ACL if required. */
	if(!ksecurity.acl) {
		ksecurity.acl = kmalloc(sizeof(*ksecurity.acl), MM_WAIT);
		object_acl_init(ksecurity.acl);
		object_acl_add_entry(ksecurity.acl, ACL_ENTRY_USER, -1, SEMAPHORE_RIGHT_USAGE);
	}

	sem = kmalloc(sizeof(user_semaphore_t), MM_WAIT);
	sem->id = id_allocator_alloc(&semaphore_id_allocator);
	if(sem->id < 0) {
		kfree(sem);
		object_security_destroy(&ksecurity);
		return STATUS_NO_SEMAPHORES;
	}
	if(name) {
		ret = strndup_from_user(name, SEMAPHORE_NAME_MAX, &sem->name);
		if(ret != STATUS_SUCCESS) {
			id_allocator_free(&semaphore_id_allocator, sem->id);
			kfree(sem);
			object_security_destroy(&ksecurity);
			return ret;
		}
	} else {
		sem->name = NULL;
	}

	object_init(&sem->obj, &semaphore_object_type, &ksecurity, NULL);
	object_security_destroy(&ksecurity);
	semaphore_init(&sem->sem, (sem->name) ? sem->name : "user_semaphore", count);
	refcount_set(&sem->count, 1);

	rwlock_write_lock(&semaphore_tree_lock);
	avl_tree_insert(&semaphore_tree, &sem->tree_link, sem->id, sem);
	rwlock_unlock(&semaphore_tree_lock);

	ret = object_handle_create(&sem->obj, NULL, rights, NULL, 0, NULL, NULL, handlep);
	if(ret != STATUS_SUCCESS)
		user_semaphore_release(sem);

	return ret;
}
Esempio n. 20
0
Pe *
pe_begin(int fildes, Pe_Cmd cmd, Pe *ref)
{
	Pe *retval = NULL;

	if (ref != NULL) {
		rwlock_rdlock(ref->lock);
	} else if (fcntl(fildes, F_GETFL) == -1 && errno == EBADF) {
		__libpe_seterrno(PE_E_INVALID_FILE);
		return NULL;
	}

	switch (cmd) {
		case PE_C_NULL:
			break;

		case PE_C_READ_MMAP_PRIVATE:
			if (ref != NULL && ref->cmd != PE_C_READ_MMAP_PRIVATE) {
				__libpe_seterrno(PE_E_INVALID_CMD);
				break;
			}
			/* fall through */
		case PE_C_READ:
		case PE_C_READ_MMAP:
			if (ref != NULL)
				retval = dup_pe(fildes, cmd, ref);
			else
				retval = read_file(fildes, ~((size_t)0), cmd,
						NULL);
			break;
		case PE_C_RDWR:
		case PE_C_RDWR_MMAP:
			if (ref != NULL) {
				if (ref->cmd != PE_C_RDWR &&
						ref->cmd != PE_C_RDWR_MMAP &&
						ref->cmd != PE_C_WRITE &&
						ref->cmd != PE_C_WRITE_MMAP) {
					__libpe_seterrno(PE_E_INVALID_CMD);
					retval = NULL;
				}
			} else {
				retval = read_file(fildes, ~((size_t) 0), cmd,
						NULL);
			}
			break;
		case PE_C_WRITE:
		case PE_C_WRITE_MMAP:
			retval = write_file(fildes, cmd);
			break;
		default:
			__libpe_seterrno(PE_E_INVALID_CMD);
			break;
	}

	if (ref != NULL)
		rwlock_unlock(ref->lock);
	return retval;
}
Esempio n. 21
0
int Thread::do_exit_thread(void *value)
{
    /*
     * Now we lock so others can't change our joiner member. Note that not
     * only do joiners join us but the timer subsystem may timeout a joiner
     * and remove him.
     */

    deactivate();

    rwlock_wrlock(&m_join_lock);

    if (!m_joinable)
    {
        /* We cannot be joined so we die immediately */
        kdebug("Thread::do_exit_thread() not joinable\n");
        sys.m_sched.block(THR_ST_DEAD);
        rwlock_unlock(&m_join_lock);
        die();
        return 0;
    }

    if (m_joiner != NULL)
    {
        /*
         * Someone is waiting for us so make him ready. We don't need to
         * lock him. The lock on us prevents anyone from touching the joiner
         */
        kdebug("Thread::do_exit_thread() have joiner\n");
        m_joiner->joining_state_info.value = value;
        m_joiner->m_continuation_func = &Thread::join_cont;
        sys.m_sched.add(m_joiner, 0);
        sys.m_sched.block(THR_ST_DEAD);
        rwlock_unlock(&m_join_lock);
        die();
        return 0;
    }

    kdebug("Thread::do_exit_thread() becoming zombie\n");
    zombie_state_info.exit_val = value;
    sys.m_sched.block(THR_ST_ZOMBIE);

    rwlock_unlock(&m_join_lock);
    return 0;
}
Esempio n. 22
0
/**
 * @brief	This function is not called from anywhere and may be removed.
 */
void neue_unlock(neue_t *neue) {

	if (neue) {
		rwlock_unlock(&(neue->lock));
		//log_pedantic("%20.li unlocking", thread_get_thread_id());
	}

	return;
}
Esempio n. 23
0
static void destroy_sockaddr_mapper(void)
{
	rwlock_wr_lock(&sockaddr_map_lock);
	for_each_hash(&sockaddr_mapper, cleanup_batch_sockaddr_mapper);
	free_hash(&sockaddr_mapper);
	rwlock_unlock(&sockaddr_map_lock);

	rwlock_destroy(&sockaddr_map_lock);
}
Esempio n. 24
0
void get_serv_store_entry_by_alias(char *alias, size_t len,
				   char **host, char **port, int *udp)
{
	struct server_store *elem;

	rwlock_rd_lock(&store_lock);
	elem = store;
	if (!alias) {
		while (elem && elem->next)
			elem = elem->next;
		if (elem) {
			(*host) = elem->host;
			(*port) = elem->port;
			(*udp) = elem->udp;
			selected = elem;
		} else {
			rwlock_unlock(&store_lock);
			goto nothing;
		}
	} else {
		while (elem) {
			if (!strncmp(elem->alias, alias,
				     min(len, strlen(elem->alias) + 1)))
				break;
			elem = elem->next;
		}
		if (elem) {
			(*host) = elem->host;
			(*port) = elem->port;
			(*udp) = elem->udp;
			selected = elem;
		} else {
			rwlock_unlock(&store_lock);
			goto nothing;
		}
	}
	rwlock_unlock(&store_lock);

	return;
nothing:
	(*host) = NULL;
	(*port) = NULL;
	(*udp) = -1;
}
FILE *
popen(const char *cmd, const char *type)
{
	struct pid *cur;
	int pdes[2], serrno;
	pid_t pid;

	_DIAGASSERT(cmd != NULL);
	_DIAGASSERT(type != NULL);

	if ((cur = pdes_get(pdes, &type)) == NULL)
		return NULL;

#ifdef _REENTRANT
	(void)rwlock_rdlock(&pidlist_lock);
#endif
	(void)__readlockenv();
	switch (pid = vfork()) {
	case -1:			/* Error. */
		serrno = errno;
		(void)__unlockenv();
#ifdef _REENTRANT
		(void)rwlock_unlock(&pidlist_lock);
#endif
		pdes_error(pdes, cur);
		errno = serrno;
		return NULL;
		/* NOTREACHED */
	case 0:				/* Child. */
		pdes_child(pdes, type);
		execl(_PATH_BSHELL, "sh", "-c", cmd, NULL);
		_exit(127);
		/* NOTREACHED */
	}
	(void)__unlockenv();

	pdes_parent(pdes, cur, pid, type);

#ifdef _REENTRANT
	(void)rwlock_unlock(&pidlist_lock);
#endif

	return cur->fp;
}
Esempio n. 26
0
//thread for read operations
void *threadreadlock(void *vargp)
{
	printf("%s\n", "Wait for reading!");
	rwlock_lockRead(&rwl); //read lock
    printf("%s%d\n", "Reading... Value: ", i); //read and print value
    //rwlock_getInfo(&rwl);
	rwlock_unlock(&rwl); //read unlock
    
    return NULL;
}
Esempio n. 27
0
int sys_mkdir (char *pathname, uint_t mode)
{
	register error_t err = 0;
	struct task_s *task = current_task;
	struct ku_obj ku_path;

	KU_BUFF(ku_path, pathname);
	rwlock_rdlock(&task->cwd_lock);

	if((err = vfs_mkdir(&task->vfs_cwd, &ku_path, mode)))
	{
		current_thread->info.errno = (err < 0) ? -err : err;
		rwlock_unlock(&task->cwd_lock);
		return -1;
	}
   
	rwlock_unlock(&task->cwd_lock);
	return 0;
}
Esempio n. 28
0
static void init_sockaddr_mapper(void)
{
	rwlock_init(&sockaddr_map_lock);
	rwlock_wr_lock(&sockaddr_map_lock);

	memset(&sockaddr_mapper, 0, sizeof(sockaddr_mapper));
	init_hash(&sockaddr_mapper);

	rwlock_unlock(&sockaddr_map_lock);
}
Esempio n. 29
0
int sys_stat(char *pathname, struct vfs_stat_s *buff, int fd)
{
	struct thread_s *this;
	register error_t err = 0;
	struct vfs_file_s *file;
	struct vfs_node_s *node;
	struct task_s *task;

	this = current_thread;
	task = current_task;

	if((buff == NULL) || ((pathname == NULL) && (fd == -1)))
	{
		this->info.errno = EINVAL;
		return -1;
	}

	if((uint_t)buff >= CONFIG_KERNEL_OFFSET)
	{
		this->info.errno = EPERM;
		return -1;
	}

	if(pathname == NULL)
	{
		if((fd >= CONFIG_TASK_FILE_MAX_NR) || (task_fd_lookup(task,fd) == NULL))
			return EBADFD;
 
		file = task_fd_lookup(task,fd);
		node = file->f_node;
		err = vfs_stat(task->vfs_cwd, NULL, &node);
	}
	else
	{
		node = NULL;
		rwlock_rdlock(&task->cwd_lock);
		err = vfs_stat(task->vfs_cwd, pathname, &node);
		rwlock_unlock(&task->cwd_lock);
	}
 
	if(err) goto SYS_STAT_ERR;
  
	err = cpu_uspace_copy(buff, &node->n_stat, sizeof(node->n_stat));
  
	if(err == 0)
		return 0;

SYS_STAT_ERR:
  
	if(pathname == NULL)
		vfs_node_down_atomic(node);

	this->info.errno = err;
	return -1;
}
Esempio n. 30
0
unsigned char *get_serv_store_entry_auth_token(void)
{
	unsigned char *ret = NULL;

	rwlock_rd_lock(&store_lock);
	if (selected)
		ret = selected->auth_token;
	rwlock_unlock(&store_lock);

	return ret;
}