Esempio n. 1
0
void * /* returns value associated with key */
hashtable_search(struct hashtable *h, void *k)
{
    struct entry *e;
    unsigned int hashvalue, index;

    // Use global read lock for hashing/indexing
    rwlock_rdlock(&h->globallock);
    hashvalue = hash(h,k);
    index = indexFor(h->tablelength,hashvalue);
    rwlock_rdunlock(&h->globallock);

    // Use local read lock for searching
    rwlock_rdlock(&h->locks[index]);
    e = h->table[index];
    while (NULL != e)
    {
        /* Check hash value to short circuit heavier comparison */
        if ((hashvalue == e->h) && (h->eqfn(k, e->k))) {
            // Release local read lock for early return (key found)
            rwlock_rdunlock(&h->locks[index]);
            return e->v;
        }
        e = e->next;
    }
    // Release local read lock for late return (key not found)
    rwlock_rdunlock(&h->locks[index]);

    return NULL;
}
Esempio n. 2
0
/* TODO: don't use locks to lookup region as if address has been accessed it must be in kyesdb */
static error_t check_args(struct vmm_s *vmm, uint_t start, uint_t len, struct vm_region_s **reg)
{
	error_t err;
	struct vm_region_s *region;
  
	err = 0;
	rwlock_rdlock(&vmm->rwlock);

	region = vmm->last_region;
  
	if((start >= region->vm_limit) || (start < region->vm_start))
	{
		region = vm_region_find(vmm, start);
  
		if((region == NULL) || (start < region->vm_start))
			err = EINVAL;
	}

	if((err == 0) && ((start + len) >= region->vm_limit))
		err = EINVAL;
  
	rwlock_unlock(&vmm->rwlock);

	*reg = region;
	return 0;
}
Esempio n. 3
0
char *
elf_rawfile (Elf *elf, size_t *ptr)
{
  char *result;

  if (elf == NULL)
    {
      /* No valid descriptor.  */
      __libelf_seterrno (ELF_E_INVALID_HANDLE);
    error_out:
      if (ptr != NULL)
	*ptr = 0;
      return NULL;
    }

  /* If the file is not mmap'ed and not previously loaded, do it now.  */
  if (elf->map_address == NULL && __libelf_readall (elf) == NULL)
    goto error_out;

  rwlock_rdlock (elf->lock);
  if (ptr != NULL)
    *ptr = elf->maximum_size;

  result = (char *) elf->map_address + elf->start_offset;
  rwlock_unlock (elf->lock);

  return result;
}
Esempio n. 4
0
void
svc_run()
{
	fd_set readfds, cleanfds;
	struct timeval timeout;

	timeout.tv_sec = 30;
	timeout.tv_usec = 0;

	for (;;) {
		rwlock_rdlock(&svc_fd_lock);
		readfds = svc_fdset;
		cleanfds = svc_fdset;
		rwlock_unlock(&svc_fd_lock);
		switch (_select(svc_maxfd+1, &readfds, NULL, NULL, &timeout)) {
		case -1:
			FD_ZERO(&readfds);
			if (errno == EINTR) {
				continue;
			}
			_warn("svc_run: - select failed");
			return;
		case 0:
			__svc_clean_idle(&cleanfds, 30, FALSE);
			continue;
		default:
			svc_getreqset(&readfds);
		}
	}
}
Esempio n. 5
0
int sys_getcwd (char *buff, size_t size)
{
	register struct thread_s *this;
	register struct task_s *task;
	register error_t err;
	struct ku_obj ku_buff;
  
	this      = current_thread;
	task      = current_task;

	if((size < VFS_MAX_NAME_LENGTH) || (!buff)) 
	{
		err = ERANGE;
		goto SYS_GETCWD_ERROR;
	}

	if(vmm_check_address("usr cwd buffer", task, buff, size))
	{
		err = EFAULT;
		goto SYS_GETCWD_ERROR;
	}

	KU_SZ_BUFF(ku_buff, buff, size);

	rwlock_rdlock(&task->cwd_lock);

	err = vfs_get_path(&task->vfs_cwd, &ku_buff);

	rwlock_unlock(&task->cwd_lock);


SYS_GETCWD_ERROR:
	this->info.errno = err;
	return (int)buff;
}
Esempio n. 6
0
struct rpc_dplx_rec *
rpc_dplx_lookup_rec(int fd, uint32_t iflags, uint32_t *oflags)
{
    struct rbtree_x_part *t;
    struct rpc_dplx_rec rk, *rec = NULL;
    struct opr_rbtree_node *nv;

    cond_init_rpc_dplx();

    rk.fd_k = fd;
    t = rbtx_partition_of_scalar(&(rpc_dplx_rec_set.xt), fd);

    rwlock_rdlock(&t->lock);
    nv = opr_rbtree_lookup(&t->t, &rk.node_k);

    /* XXX rework lock+insert case, so that new entries are inserted
     * locked, and t->lock critical section is reduced */

    if (! nv) {
        rwlock_unlock(&t->lock);
        rwlock_wrlock(&t->lock);
        nv = opr_rbtree_lookup(&t->t, &rk.node_k);
        if (! nv) {
            rec = alloc_dplx_rec();
            if (! rec) {
                __warnx(TIRPC_DEBUG_FLAG_LOCK,
                        "%s: failed allocating rpc_dplx_rec", __func__);
                goto unlock;
            }

            /* tell the caller */
            *oflags = RPC_DPLX_LKP_OFLAG_ALLOC;

            rec->fd_k = fd;

            if (opr_rbtree_insert(&t->t, &rec->node_k)) {
                /* cant happen */
                __warnx(TIRPC_DEBUG_FLAG_LOCK,
                        "%s: collision inserting in locked rbtree partition",
                        __func__);
                free_dplx_rec(rec);
            }
        }
    }
    else {
        rec = opr_containerof(nv, struct rpc_dplx_rec, node_k);
        *oflags = RPC_DPLX_LKP_FLAG_NONE;
    }

    rpc_dplx_ref(rec, (iflags & RPC_DPLX_LKP_IFLAG_LOCKREC) ?
                 RPC_DPLX_FLAG_LOCK :
                 RPC_DPLX_FLAG_NONE);

unlock:
    rwlock_unlock(&t->lock);

    return (rec);
}
Esempio n. 7
0
Pe *
pe_begin(int fildes, Pe_Cmd cmd, Pe *ref)
{
	Pe *retval = NULL;

	if (ref != NULL) {
		rwlock_rdlock(ref->lock);
	} else if (fcntl(fildes, F_GETFL) == -1 && errno == EBADF) {
		__libpe_seterrno(PE_E_INVALID_FILE);
		return NULL;
	}

	switch (cmd) {
		case PE_C_NULL:
			break;

		case PE_C_READ_MMAP_PRIVATE:
			if (ref != NULL && ref->cmd != PE_C_READ_MMAP_PRIVATE) {
				__libpe_seterrno(PE_E_INVALID_CMD);
				break;
			}
			/* fall through */
		case PE_C_READ:
		case PE_C_READ_MMAP:
			if (ref != NULL)
				retval = dup_pe(fildes, cmd, ref);
			else
				retval = read_file(fildes, ~((size_t)0), cmd,
						NULL);
			break;
		case PE_C_RDWR:
		case PE_C_RDWR_MMAP:
			if (ref != NULL) {
				if (ref->cmd != PE_C_RDWR &&
						ref->cmd != PE_C_RDWR_MMAP &&
						ref->cmd != PE_C_WRITE &&
						ref->cmd != PE_C_WRITE_MMAP) {
					__libpe_seterrno(PE_E_INVALID_CMD);
					retval = NULL;
				}
			} else {
				retval = read_file(fildes, ~((size_t) 0), cmd,
						NULL);
			}
			break;
		case PE_C_WRITE:
		case PE_C_WRITE_MMAP:
			retval = write_file(fildes, cmd);
			break;
		default:
			__libpe_seterrno(PE_E_INVALID_CMD);
			break;
	}

	if (ref != NULL)
		rwlock_unlock(ref->lock);
	return retval;
}
Esempio n. 8
0
unsigned int
hashtable_count(struct hashtable *h)
{
    // Use read lock for entry count
    rwlock_rdlock(&h->entrycountlock);
    unsigned int cnt = h->entrycount;
    rwlock_rdunlock(&h->entrycountlock);
    return cnt;
}
Esempio n. 9
0
int sys_stat(char *pathname, struct vfs_stat_s *buff, int fd)
{
	struct thread_s *this;
	register error_t err = 0;
	struct vfs_file_s *file;
	struct vfs_node_s *node;
	struct task_s *task;

	this = current_thread;
	task = current_task;

	if((buff == NULL) || ((pathname == NULL) && (fd == -1)))
	{
		this->info.errno = EINVAL;
		return -1;
	}

	if((uint_t)buff >= CONFIG_KERNEL_OFFSET)
	{
		this->info.errno = EPERM;
		return -1;
	}

	if(pathname == NULL)
	{
		if((fd >= CONFIG_TASK_FILE_MAX_NR) || (task_fd_lookup(task,fd) == NULL))
			return EBADFD;
 
		file = task_fd_lookup(task,fd);
		node = file->f_node;
		err = vfs_stat(task->vfs_cwd, NULL, &node);
	}
	else
	{
		node = NULL;
		rwlock_rdlock(&task->cwd_lock);
		err = vfs_stat(task->vfs_cwd, pathname, &node);
		rwlock_unlock(&task->cwd_lock);
	}
 
	if(err) goto SYS_STAT_ERR;
  
	err = cpu_uspace_copy(buff, &node->n_stat, sizeof(node->n_stat));
  
	if(err == 0)
		return 0;

SYS_STAT_ERR:
  
	if(pathname == NULL)
		vfs_node_down_atomic(node);

	this->info.errno = err;
	return -1;
}
Esempio n. 10
0
int
hashtable_insert(struct hashtable *h, void *k, void *v)
{
    /* This method allows duplicate keys - but they shouldn't be used */
    unsigned int index;
    struct entry *e;

    // Use write lock for entry count increment 
    rwlock_wrlock(&h->entrycountlock);
    if (++(h->entrycount) > h->loadlimit)
    {
        rwlock_wrunlock(&h->entrycountlock);

        /* Ignore the return value. If expand fails, we should
         * still try cramming just this value into the existing table
         * -- we may not have memory for a larger table, but one more
         * element may be ok. Next time we insert, we'll try expanding again.*/
        hashtable_expand(h);
    } else {
        rwlock_wrunlock(&h->entrycountlock);
    }

    e = (struct entry *)malloc(sizeof(struct entry));
    if (NULL == e) {
        // Use write lock for entry count decrement 
        rwlock_wrlock(&h->entrycountlock);
        --(h->entrycount);
        rwlock_wrunlock(&h->entrycountlock);
        return 0;
    } /*oom*/

    // Use global read lock for hashing/index calculations
    rwlock_rdlock(&h->globallock);
    e->h = hash(h,k);
    index = indexFor(h->tablelength,e->h);
    e->k = k;
    e->v = v;
    rwlock_rdunlock(&h->globallock);

    // Use global write lock for list insertion
    // TODO: internal lock causes problems, figure out why, using global instead
    //rwlock_wrlock(&h->locks[index]);
    rwlock_wrlock(&h->globallock);
#ifdef DEBUG 
    printf("[%.8x indexer] inserting '%s' into index[%d]...\n", pthread_self(), k, index);
#endif 
    e->next = h->table[index];
    h->table[index] = e;
    rwlock_wrunlock(&h->globallock);
    // TODO: internal lock causes problems, figure out why, using global instead
    //rwlock_wrunlock(&h->locks[index]);

    return -1;
}
Esempio n. 11
0
static void *readthread(void *p) {
    threadata_t *d = (threadata_t *)p;
    int i=0;
    rwlock_rdlock(&d->lock);
    log("read\n");
    for (i = 0; i < 10; i++)
        printf("%d\n", d->data[i]);
    rwlock_rdunlock(&d->lock);
    log("Done read\n");
    return NULL;
}
Esempio n. 12
0
/*
 * getenv --
 *	Returns ptr to value associated with name, if any, else NULL.
 *	XXX: we cannot use getenv_r to implement this, because getenv()
 *	cannot use a shared buffer, because if it did, subsequent calls
 *	to getenv would trash previous results.
 */
char *
getenv(const char *name)
{
    int offset;
    char *result;

    _DIAGASSERT(name != NULL);

    rwlock_rdlock(&__environ_lock);
    result = __findenv(name, &offset);
    rwlock_unlock(&__environ_lock);
    return result;
}
Esempio n. 13
0
struct pe_hdr *
pe_getpehdr(Pe *pe, struct pe_hdr *dest)
{
	struct pe_hdr *result;

	if (pe == NULL)
		return NULL;

	rwlock_rdlock(pe->lock);
	result = __pe_getpehdr_rdlock(pe, dest);
	rwlock_unlock(pe->lock);

	return result;
}
Esempio n. 14
0
void * /* returns value associated with key */
hashtable_remove(struct hashtable *h, void *k)
{
    /* TODO: consider compacting the table when the load factor drops enough,
     *       or provide a 'compact' method. */

    struct entry *e;
    struct entry **pE;
    void *v;
    unsigned int hashvalue, index;

    // Use global read lock for hashing/indexing
    rwlock_rdlock(&h->globallock);
    hashvalue = hash(h,k);
    index = indexFor(h->tablelength,hash(h,k));
    rwlock_rdunlock(&h->globallock);

    // Use local write lock for removal
    rwlock_wrlock(&h->locks[index]);
    pE = &(h->table[index]);
    e = *pE;
    while (NULL != e)
    {
        /* Check hash value to short circuit heavier comparison */
        if ((hashvalue == e->h) && (h->eqfn(k, e->k)))
        {
            *pE = e->next;

            // Use write lock for entry count decrement
            rwlock_wrlock(&h->entrycountlock);
            h->entrycount--;
            rwlock_wrunlock(&h->entrycountlock);

            v = e->v;
            freekey(e->k);
            free(e);

            rwlock_wrunlock(&h->locks[index]);
            return v;
        }
        pE = &(e->next);
        e = e->next;
    }
    rwlock_wrunlock(&h->locks[index]);

    return NULL;
}
Esempio n. 15
0
void
svc_run(void)
{
	fd_set readfds, cleanfds;
	struct timeval timeout;
	int maxfd;
#ifndef RUMP_RPC		
	int probs = 0;
#endif
#ifdef _REENTRANT
	extern rwlock_t svc_fd_lock;
#endif

	timeout.tv_sec = 30;
	timeout.tv_usec = 0;

	for (;;) {
		rwlock_rdlock(&svc_fd_lock);
		readfds = *get_fdset();
		cleanfds = *get_fdset();
		maxfd = *get_fdsetmax();
		rwlock_unlock(&svc_fd_lock);
		switch (select(maxfd + 1, &readfds, NULL, NULL, &timeout)) {
		case -1:
#ifndef RUMP_RPC		
			if ((errno == EINTR || errno == EBADF) && probs < 100) {
				probs++;
				continue;
			}
#endif
			if (errno == EINTR) {
				continue;
			}
			warn("%s: select failed", __func__);
			return;
		case 0:
			__svc_clean_idle(&cleanfds, 30, FALSE);
			continue;
		default:
			svc_getreqset(&readfds);
#ifndef RUMP_RPC
			probs = 0;
#endif
		}
	}
}
Esempio n. 16
0
static void* thread_func(void* arg)
{
  int i;
  int sum = 0;

  for (i = 0; i < 1000; i++)
  {
    rwlock_rdlock(&s_rwlock);
    sum += s_counter;
    rwlock_unlock(&s_rwlock);
    rwlock_wrlock(&s_rwlock);
    s_counter++;
    rwlock_unlock(&s_rwlock);
  }

  return 0;
}
Esempio n. 17
0
GElf_Versym *
gelf_getversym (Elf_Data *data, int ndx, GElf_Versym *dst)
{
  Elf_Data_Scn *data_scn = (Elf_Data_Scn *) data;
  Elf_Scn *scn;
  GElf_Versym *result;

  if (data == NULL)
    return NULL;

  if (unlikely (data->d_type != ELF_T_HALF))
    {
      __libelf_seterrno (ELF_E_INVALID_HANDLE);
      return NULL;
    }

  /* This is the one place where we have to take advantage of the fact
     that an `Elf_Data' pointer is also a pointer to `Elf_Data_Scn'.
     The interface is broken so that it requires this hack.  */
  scn = data_scn->s;

  /* It's easy to handle this type.  It has the same size for 32 and
     64 bit objects.  */
  assert (sizeof (GElf_Versym) == sizeof (Elf32_Versym));
  assert (sizeof (GElf_Versym) == sizeof (Elf64_Versym));

  rwlock_rdlock (scn->elf->lock);

  /* The data is already in the correct form.  Just make sure the
     index is OK.  */
  if (INVALID_NDX (ndx, GElf_Versym, data))
    {
      __libelf_seterrno (ELF_E_INVALID_INDEX);
      result = NULL;
    }
  else
    {
      *dst = ((GElf_Versym *) data->d_buf)[ndx];

      result = dst;
    }

  rwlock_unlock (scn->elf->lock);

  return result;
}
FILE *
popen(const char *cmd, const char *type)
{
	struct pid *cur;
	int pdes[2], serrno;
	pid_t pid;

	_DIAGASSERT(cmd != NULL);
	_DIAGASSERT(type != NULL);

	if ((cur = pdes_get(pdes, &type)) == NULL)
		return NULL;

#ifdef _REENTRANT
	(void)rwlock_rdlock(&pidlist_lock);
#endif
	(void)__readlockenv();
	switch (pid = vfork()) {
	case -1:			/* Error. */
		serrno = errno;
		(void)__unlockenv();
#ifdef _REENTRANT
		(void)rwlock_unlock(&pidlist_lock);
#endif
		pdes_error(pdes, cur);
		errno = serrno;
		return NULL;
		/* NOTREACHED */
	case 0:				/* Child. */
		pdes_child(pdes, type);
		execl(_PATH_BSHELL, "sh", "-c", cmd, NULL);
		_exit(127);
		/* NOTREACHED */
	}
	(void)__unlockenv();

	pdes_parent(pdes, cur, pid, type);

#ifdef _REENTRANT
	(void)rwlock_unlock(&pidlist_lock);
#endif

	return cur->fp;
}
Esempio n. 19
0
int sys_mkdir (char *pathname, uint_t mode)
{
	register error_t err = 0;
	struct task_s *task = current_task;
	struct ku_obj ku_path;

	KU_BUFF(ku_path, pathname);
	rwlock_rdlock(&task->cwd_lock);

	if((err = vfs_mkdir(&task->vfs_cwd, &ku_path, mode)))
	{
		current_thread->info.errno = (err < 0) ? -err : err;
		rwlock_unlock(&task->cwd_lock);
		return -1;
	}
   
	rwlock_unlock(&task->cwd_lock);
	return 0;
}
Esempio n. 20
0
int sys_stat(char *pathname, struct vfs_stat_s *buff, int fd)
{
	struct thread_s *this;
	register error_t err = 0;
	struct vfs_file_s *file;
	struct ku_obj ku_path;
	struct task_s *task;

	file = NULL;
	this = current_thread;
	task = current_task;

	if((buff == NULL) || ((pathname == NULL) && (fd == -1)))
	{
		this->info.errno = EINVAL;
		return -1;
	}

	if(NOT_IN_USPACE((uint_t)buff))
	{
		this->info.errno = EPERM;
		return -1;
	}

	if(pathname == NULL)
	{
		if((fd >= CONFIG_TASK_FILE_MAX_NR) || (task_fd_lookup(task, fd, &file)))
			return EBADFD;
 
		err = vfs_stat(&task->vfs_cwd, NULL, buff, file);
	}
	else
	{
		KU_BUFF(ku_path, pathname);
		rwlock_rdlock(&task->cwd_lock);
		err = vfs_stat(&task->vfs_cwd, &ku_path, buff, NULL);
		rwlock_unlock(&task->cwd_lock);
	}
 
	this->info.errno = err;
	return -1;
}
Esempio n. 21
0
struct section_header *
pe_getshdr(Pe_Scn *scn, struct section_header *dst)
{
	struct section_header *result = NULL;

	if (scn == NULL)
		return NULL;

	if (dst == NULL) {
		__libpe_seterrno(PE_E_INVALID_OPERAND);
		return NULL;
	}

	rwlock_rdlock(scn->pe->lock);

	result = memcpy(dst, scn->shdr, sizeof(*dst));
	rwlock_unlock(scn->pe->lock);

	return result;
}
FILE *
popenve(const char *cmd, char *const *argv, char *const *envp, const char *type)
{
	struct pid *cur;
	int pdes[2], serrno;
	pid_t pid;

	_DIAGASSERT(cmd != NULL);
	_DIAGASSERT(type != NULL);

	if ((cur = pdes_get(pdes, &type)) == NULL)
		return NULL;

#ifdef _REENTRANT
	(void)rwlock_rdlock(&pidlist_lock);
#endif
	switch (pid = vfork()) {
	case -1:			/* Error. */
		serrno = errno;
#ifdef _REENTRANT
		(void)rwlock_unlock(&pidlist_lock);
#endif
		pdes_error(pdes, cur);
		errno = serrno;
		return NULL;
		/* NOTREACHED */
	case 0:				/* Child. */
		pdes_child(pdes, type);
		execve(cmd, argv, envp);
		_exit(127);
		/* NOTREACHED */
	}

	pdes_parent(pdes, cur, pid, type);

#ifdef _REENTRANT
	(void)rwlock_unlock(&pidlist_lock);
#endif

	return cur->fp;
}
Esempio n. 23
0
GElf_Lib *
gelf_getlib (Elf_Data *data, int ndx, GElf_Lib *dst)
{
  if (data == NULL)
    return NULL;

  if (unlikely (data->d_type != ELF_T_LIB))
    {
      __libelf_seterrno (ELF_E_INVALID_HANDLE);
      return NULL;
    }

  Elf_Data_Scn *data_scn = (Elf_Data_Scn *) data;

  rwlock_rdlock (data_scn->s->elf->lock);

  /* The on disk format of Elf32_Lib and Elf64_Lib is identical.  So
     we can simplify things significantly.  */
  assert (sizeof (GElf_Lib) == sizeof (Elf32_Lib));
  assert (sizeof (GElf_Lib) == sizeof (Elf64_Lib));

  /* The data is already in the correct form.  Just make sure the
     index is OK.  */
  GElf_Lib *result = NULL;
  if (INVALID_NDX (ndx, GElf_Lib, data))
    __libelf_seterrno (ELF_E_INVALID_INDEX);
  else
    {
      *dst = ((GElf_Lib *) data->d_buf)[ndx];

      result = dst;
    }

  rwlock_unlock (data_scn->s->elf->lock);

  return result;
}
Esempio n. 24
0
int
getenv_r(const char *name, char *buf, size_t len)
{
    int offset;
    char *result;
    int rv = -1;

    _DIAGASSERT(name != NULL);

    rwlock_rdlock(&__environ_lock);
    result = __findenv(name, &offset);
    if (result == NULL) {
        errno = ENOENT;
        goto out;
    }
    if (strlcpy(buf, result, len) >= len) {
        errno = ERANGE;
        goto out;
    }
    rv = 0;
out:
    rwlock_unlock(&__environ_lock);
    return rv;
}
Esempio n. 25
0
int mzrt_rwlock_tryrdlock(mzrt_rwlock *lock) {
    return rwlock_rdlock(lock, 1);
}
Esempio n. 26
0
int mzrt_rwlock_rdlock(mzrt_rwlock *lock) {
    return rwlock_rdlock(lock, 0);
}
Esempio n. 27
0
File: refill.c Progetto: B-Rich/edk2
/*
 * Refill a stdio buffer.
 * Return EOF on eof or error, 0 otherwise.
 */
int
__srefill(FILE *fp)
{

  _DIAGASSERT(fp != NULL);
  if(fp == NULL) {
    errno = EINVAL;
    return (EOF);
  }

  /* make sure stdio is set up */
  if (!__sdidinit)
    __sinit();

  fp->_r = 0;   /* largely a convenience for callers */

  /* SysV does not make this test; take it out for compatibility */
  if (fp->_flags & __SEOF) {
    return (EOF);
  }

  /* if not already reading, have to be reading and writing */
  if ((fp->_flags & __SRD) == 0) {
    if ((fp->_flags & __SRW) == 0) {
      errno = EBADF;
      fp->_flags |= __SERR;   //<dvm> Allows differentiation between errors and EOF
      return (EOF);
    }
    /* switch to reading */
    if (fp->_flags & __SWR) {
      if (__sflush(fp)) {
        return (EOF);
      }
      fp->_flags &= ~__SWR;
      fp->_w = 0;
      fp->_lbfsize = 0;
    }
    fp->_flags |= __SRD;
  } else {
    /*
     * We were reading.  If there is an ungetc buffer,
     * we must have been reading from that.  Drop it,
     * restoring the previous buffer (if any).  If there
     * is anything in that buffer, return.
     */
    if (HASUB(fp)) {
      FREEUB(fp);
      if ((fp->_r = fp->_ur) != 0) {
        fp->_p = fp->_up;
        return (0);
      }
    }
  }

  if (fp->_bf._base == NULL)
    __smakebuf(fp);

  /*
   * Before reading from a line buffered or unbuffered file,
   * flush all line buffered output files, per the ANSI C
   * standard.
   */
  if (fp->_flags & (__SLBF|__SNBF)) {
    rwlock_rdlock(&__sfp_lock);
    (void) _fwalk(lflush);
    rwlock_unlock(&__sfp_lock);
  }
  fp->_p = fp->_bf._base;
  fp->_r = (*fp->_read)(fp->_cookie, (char *)fp->_p, fp->_bf._size);
  fp->_flags &= ~__SMOD;  /* buffer contents are again pristine */
  if (fp->_r <= 0) {
    if (fp->_r == 0)
      fp->_flags |= __SEOF;
    else {
      fp->_r = 0;
      fp->_flags |= __SERR;
    }
    return (EOF);
  }
  return (0);
}
Esempio n. 28
0
FILE *
popen(const char *command, const char *type)
{
	struct pid *cur, *old;
	FILE *iop;
	const char * volatile xtype = type;
	int pdes[2], pid, serrno;
	volatile int twoway;
	int flags;

	_DIAGASSERT(command != NULL);
	_DIAGASSERT(xtype != NULL);

	flags = strchr(xtype, 'e') ? O_CLOEXEC : 0;
	if (strchr(xtype, '+')) {
		int stype = flags ? (SOCK_STREAM | SOCK_CLOEXEC) : SOCK_STREAM;
		twoway = 1;
		xtype = "r+";
		if (socketpair(AF_LOCAL, stype, 0, pdes) < 0)
			return NULL;
	} else  {
		twoway = 0;
		xtype = strrchr(xtype, 'r') ? "r" : "w";
		if (pipe2(pdes, flags) == -1)
			return NULL;
	}

	if ((cur = malloc(sizeof(struct pid))) == NULL) {
		(void)close(pdes[0]);
		(void)close(pdes[1]);
		errno = ENOMEM;
		return (NULL);
	}

#if defined(__minix)
	rwlock_rdlock(&pidlist_lock);
#else
	(void)rwlock_rdlock(&pidlist_lock);
#endif /* defined(__minix) */
	(void)__readlockenv();
	switch (pid = vfork()) {
	case -1:			/* Error. */
		serrno = errno;
		(void)__unlockenv();
#if defined(__minix)
		rwlock_unlock(&pidlist_lock);
#else
		(void)rwlock_unlock(&pidlist_lock);
#endif /* defined(__minix) */
		free(cur);
		(void)close(pdes[0]);
		(void)close(pdes[1]);
		errno = serrno;
		return (NULL);
		/* NOTREACHED */
	case 0:				/* Child. */
		/* POSIX.2 B.3.2.2 "popen() shall ensure that any streams
		   from previous popen() calls that remain open in the 
		   parent process are closed in the new child process. */
		for (old = pidlist; old; old = old->next)
#ifdef _REENTRANT
			close(old->fd); /* don't allow a flush */
#else
			close(fileno(old->fp)); /* don't allow a flush */
#endif

		if (*xtype == 'r') {
			(void)close(pdes[0]);
			if (pdes[1] != STDOUT_FILENO) {
				(void)dup2(pdes[1], STDOUT_FILENO);
				(void)close(pdes[1]);
			}
			if (twoway)
				(void)dup2(STDOUT_FILENO, STDIN_FILENO);
		} else {
			(void)close(pdes[1]);
			if (pdes[0] != STDIN_FILENO) {
				(void)dup2(pdes[0], STDIN_FILENO);
				(void)close(pdes[0]);
			}
		}

		execl(_PATH_BSHELL, "sh", "-c", command, NULL);
		_exit(127);
		/* NOTREACHED */
	}
	(void)__unlockenv();

	/* Parent; assume fdopen can't fail. */
	if (*xtype == 'r') {
		iop = fdopen(pdes[0], xtype);
#ifdef _REENTRANT
		cur->fd = pdes[0];
#endif
		(void)close(pdes[1]);
	} else {
		iop = fdopen(pdes[1], xtype);
#ifdef _REENTRANT
		cur->fd = pdes[1];
#endif
		(void)close(pdes[0]);
	}

	/* Link into list of file descriptors. */
	cur->fp = iop;
	cur->pid =  pid;
	cur->next = pidlist;
	pidlist = cur;
#if defined(__minix)
	rwlock_unlock(&pidlist_lock);
#else
	(void)rwlock_unlock(&pidlist_lock);
#endif /* defined(__minix) */

	return (iop);
}
Esempio n. 29
0
size_t
gelf_getnote (Elf_Data *data, size_t offset, GElf_Nhdr *result,
	      size_t *name_offset, size_t *desc_offset)
{
  if (data == NULL)
    return 0;

  if (unlikely (data->d_type != ELF_T_NHDR))
    {
      __libelf_seterrno (ELF_E_INVALID_HANDLE);
      return 0;
    }

  /* It's easy to handle this type.  It has the same size for 32 and
     64 bit objects.  */
  assert (sizeof (GElf_Nhdr) == sizeof (Elf32_Nhdr));
  assert (sizeof (GElf_Nhdr) == sizeof (Elf64_Nhdr));

  rwlock_rdlock (((Elf_Data_Scn *) data)->s->elf->lock);

  /* The data is already in the correct form.  Just make sure the
     offset is OK.  */
  if (unlikely (offset > data->d_size
		|| data->d_size - offset < sizeof (GElf_Nhdr)))
    {
      __libelf_seterrno (ELF_E_OFFSET_RANGE);
      offset = 0;
    }
  else
    {
      const GElf_Nhdr *n = data->d_buf + offset;
      offset += sizeof *n;

      /* Include padding.  Check below for overflow.  */
      GElf_Word namesz = NOTE_ALIGN (n->n_namesz);
      GElf_Word descsz = NOTE_ALIGN (n->n_descsz);

      if (unlikely (offset > data->d_size
		    || data->d_size - offset < namesz
		    || (namesz == 0 && n->n_namesz != 0)))
	offset = 0;
      else
	{
	  *name_offset = offset;
	  offset += namesz;
	  if (unlikely (offset > data->d_size
			|| data->d_size - offset < descsz
			|| (descsz == 0 && n->n_descsz != 0)))
	    offset = 0;
	  else
	    {
	      *desc_offset = offset;
	      offset += descsz;
	      *result = *n;
	    }
	}
    }

  rwlock_unlock (((Elf_Data_Scn *) data)->s->elf->lock);

  return offset;
}
Esempio n. 30
0
void ListRDLock(list_p list)
{
  rwlock_rdlock(list->rwlock);
}