Пример #1
0
int
hashtable_insert(struct hashtable *h, void *k, void *v)
{
    /* This method allows duplicate keys - but they shouldn't be used */
    unsigned int index;
    struct entry *e;

    // Use write lock for entry count increment 
    rwlock_wrlock(&h->entrycountlock);
    if (++(h->entrycount) > h->loadlimit)
    {
        rwlock_wrunlock(&h->entrycountlock);

        /* Ignore the return value. If expand fails, we should
         * still try cramming just this value into the existing table
         * -- we may not have memory for a larger table, but one more
         * element may be ok. Next time we insert, we'll try expanding again.*/
        hashtable_expand(h);
    } else {
        rwlock_wrunlock(&h->entrycountlock);
    }

    e = (struct entry *)malloc(sizeof(struct entry));
    if (NULL == e) {
        // Use write lock for entry count decrement 
        rwlock_wrlock(&h->entrycountlock);
        --(h->entrycount);
        rwlock_wrunlock(&h->entrycountlock);
        return 0;
    } /*oom*/

    // Use global read lock for hashing/index calculations
    rwlock_rdlock(&h->globallock);
    e->h = hash(h,k);
    index = indexFor(h->tablelength,e->h);
    e->k = k;
    e->v = v;
    rwlock_rdunlock(&h->globallock);

    // Use global write lock for list insertion
    // TODO: internal lock causes problems, figure out why, using global instead
    //rwlock_wrlock(&h->locks[index]);
    rwlock_wrlock(&h->globallock);
#ifdef DEBUG 
    printf("[%.8x indexer] inserting '%s' into index[%d]...\n", pthread_self(), k, index);
#endif 
    e->next = h->table[index];
    h->table[index] = e;
    rwlock_wrunlock(&h->globallock);
    // TODO: internal lock causes problems, figure out why, using global instead
    //rwlock_wrunlock(&h->locks[index]);

    return -1;
}
Пример #2
0
/*
 *      This function causes svc_run() to exit by telling it that it has no
 *      more work to do.
 */
void
svc_exit()
{
	rwlock_wrlock(&svc_fd_lock);
	FD_ZERO(&svc_fdset);
	rwlock_unlock(&svc_fd_lock);
}
Пример #3
0
int mm_info(mm_context *ctx, void *addr, anvil_vmm_info_t& info)
{
    struct mm_mapping   *pmap;

    kdebug("mm_info\n");
    mm_context_dump(ctx);

    // Iterate thru till we find the first mapping whose start is >= addr
    rwlock_wrlock(&ctx->lock);
    pmap = first_mapping(ctx);

    while (pmap)
    {
        uintptr_t pmap_end = pmap->start_addr + pmap->length;

        if ((uintptr_t)addr <= pmap->start_addr)
        {
            info.addr = (void *)pmap->start_addr;
            info.object = pmap->object;
            info.len = pmap->length;
            info.fd = pmap->fd;
            info.prot = pmap->prot;
            kdebug("mm_info found\n");
            rwlock_unlock(&ctx->lock);
            return 0;
        }
        pmap = next_mapping(ctx, pmap);
    }
    rwlock_unlock(&ctx->lock);
    kdebug("mm_info done\n");
    return -1;
}
Пример #4
0
int sys_chdir (char *pathname)
{
	register error_t err = 0;
	register struct thread_s *this;
	register struct task_s *task;

	this = current_thread;
	task = current_task;

	if(!pathname)
	{
		this->info.errno = EINVAL;
		return -1;
	}
      
	rwlock_wrlock(&task->cwd_lock);

	if((err = vfs_chdir(pathname, task->vfs_cwd, &task->vfs_cwd)))
	{
		rwlock_unlock(&task->cwd_lock);
		this->info.errno = (err < 0) ? -err : err;
		return -1;
	}
   
	rwlock_unlock(&task->cwd_lock);
	return 0;
}
Пример #5
0
void * /* returns value associated with key */
hashtable_remove(struct hashtable *h, void *k)
{
    /* TODO: consider compacting the table when the load factor drops enough,
     *       or provide a 'compact' method. */

    struct entry *e;
    struct entry **pE;
    void *v;
    unsigned int hashvalue, index;

    // Use global read lock for hashing/indexing
    rwlock_rdlock(&h->globallock);
    hashvalue = hash(h,k);
    index = indexFor(h->tablelength,hash(h,k));
    rwlock_rdunlock(&h->globallock);

    // Use local write lock for removal
    rwlock_wrlock(&h->locks[index]);
    pE = &(h->table[index]);
    e = *pE;
    while (NULL != e)
    {
        /* Check hash value to short circuit heavier comparison */
        if ((hashvalue == e->h) && (h->eqfn(k, e->k)))
        {
            *pE = e->next;

            // Use write lock for entry count decrement
            rwlock_wrlock(&h->entrycountlock);
            h->entrycount--;
            rwlock_wrunlock(&h->entrycountlock);

            v = e->v;
            freekey(e->k);
            free(e);

            rwlock_wrunlock(&h->locks[index]);
            return v;
        }
        pE = &(e->next);
        e = e->next;
    }
    rwlock_wrunlock(&h->locks[index]);

    return NULL;
}
Пример #6
0
struct phys_page *search_obj_chain(mm_context *ctx, struct mm_mapping *mapping, uintptr_t fault_addr)
{
    struct mm_object *front_pobj = mapping->object;
    struct mm_object *pobj = front_pobj;
    struct phys_page *page;
    struct phys_page *new_page;
    uintptr_t fault_page = (uintptr_t)fault_addr & ~0xfff;

    int prot = PM_USER | (mapping->prot & PM_WRITABLE ? PM_WRITABLE : 0);

    while (pobj)
    {
        rwlock_wrlock(&pobj->lock);
        page = first_page(pobj);
        while (page)
        {
            if (mapping->start_addr + page->offset == fault_page)
            {
                switch (pobj->share_type)
                {
                    case share_private:
//                        kdebug("Object is private %p\n", fault_page);
                        kpage_map_user((void *)fault_page, page, mm_pgtable_alloc_callback, ctx, ctx->cr3, prot);
                        //mm_ctx_reload(ctx);
                        break;

                    case share_shared:
//                        kdebug("Object is shared %p\n", fault_page);
                        kpage_map_user((void *)fault_page, page, mm_pgtable_alloc_callback, ctx, ctx->cr3, prot);
//                        mm_ctx_reload(ctx);
                        break;

                    case share_cow:
//                        kdebug("%d: Object is cow %p %p\n", get_core_id(),  fault_addr, ctx);
                        /* Make a copy of the page and put it in front_pobj */
                        new_page = kphys_alloc(physmem_state_user);
                        new_page->offset = fault_page - mapping->start_addr;
                        front_pobj->page_list.add_head(new_page);
                        ++front_pobj->npages;
                        memcpy((void *)(pagestruct_to_phys(new_page) + VADDR_PHYSMEM), (void *)(pagestruct_to_phys(page) + VADDR_PHYSMEM), __PAGESIZE);
                        kpage_map_user((void *)fault_page, new_page, mm_pgtable_alloc_callback, ctx, ctx->cr3, PM_WRITABLE | PM_USER);
                        break;

                    default:
                        ;

                }
                rwlock_unlock(&pobj->lock);
                return page;
            }
            /* Go to the next one */
            page = next_page(pobj, page);
        }
        rwlock_unlock(&pobj->lock);
        pobj = pobj->chain;
    }

    return NULL;
}
Пример #7
0
struct rpc_dplx_rec *
rpc_dplx_lookup_rec(int fd, uint32_t iflags, uint32_t *oflags)
{
    struct rbtree_x_part *t;
    struct rpc_dplx_rec rk, *rec = NULL;
    struct opr_rbtree_node *nv;

    cond_init_rpc_dplx();

    rk.fd_k = fd;
    t = rbtx_partition_of_scalar(&(rpc_dplx_rec_set.xt), fd);

    rwlock_rdlock(&t->lock);
    nv = opr_rbtree_lookup(&t->t, &rk.node_k);

    /* XXX rework lock+insert case, so that new entries are inserted
     * locked, and t->lock critical section is reduced */

    if (! nv) {
        rwlock_unlock(&t->lock);
        rwlock_wrlock(&t->lock);
        nv = opr_rbtree_lookup(&t->t, &rk.node_k);
        if (! nv) {
            rec = alloc_dplx_rec();
            if (! rec) {
                __warnx(TIRPC_DEBUG_FLAG_LOCK,
                        "%s: failed allocating rpc_dplx_rec", __func__);
                goto unlock;
            }

            /* tell the caller */
            *oflags = RPC_DPLX_LKP_OFLAG_ALLOC;

            rec->fd_k = fd;

            if (opr_rbtree_insert(&t->t, &rec->node_k)) {
                /* cant happen */
                __warnx(TIRPC_DEBUG_FLAG_LOCK,
                        "%s: collision inserting in locked rbtree partition",
                        __func__);
                free_dplx_rec(rec);
            }
        }
    }
    else {
        rec = opr_containerof(nv, struct rpc_dplx_rec, node_k);
        *oflags = RPC_DPLX_LKP_FLAG_NONE;
    }

    rpc_dplx_ref(rec, (iflags & RPC_DPLX_LKP_IFLAG_LOCKREC) ?
                 RPC_DPLX_FLAG_LOCK :
                 RPC_DPLX_FLAG_NONE);

unlock:
    rwlock_unlock(&t->lock);

    return (rec);
}
Пример #8
0
/*
 *      This function causes svc_run() to exit by telling it that it has no
 *      more work to do.
 */
void
svc_exit(void)
{
#ifdef _REENTRANT
	extern rwlock_t svc_fd_lock;
#endif

	rwlock_wrlock(&svc_fd_lock);
	FD_ZERO(get_fdset());
	rwlock_unlock(&svc_fd_lock);
}
Пример #9
0
void mm_ctx_clean(mm_context *ctx)
{
    /*
     * Delete all the mappings in the context
     */
    struct mm_mapping *pmap;

    kdebug("MM_CTX_CLEAN\n");

    rwlock_wrlock(&ctx->lock);

//    while (1) {
//        /* Delete the mapping */
//        pmap = (struct mm_mapping *)klist_rem_head(&ctx->mapping_list);
//        if (!pmap)
//            break;
//
//        kdebug("%016lx %016lx\n", pmap->start_addr, pmap->start_addr+pmap->length-1);
//        struct mm_object *pobj = pmap->object;
//
//        if (--pobj->refcnt <= 0)
//        {
//            struct phys_page *page = first_page(pobj);
//            while (page)
//            {
//                struct phys_page *p = page;
//                page = next_page(pobj, page);
//                kphys_free(p);
//            }
//            kfree(pobj);
//        }
//        kfree(pmap);
//    }

    // Todo: This should actually delete not just lose memory
    ctx->mapping_list.set_empty();

    /* Initialise the top pagetable. We fill  */
    //memcpy((void *)(ctx->cr3 + VADDR_PHYSMEM), (void *)(kernel_cr3 + VADDR_PHYSMEM), __PAGESIZE);

    uint64_t curr_cr3 = rd_cr3();
    memcpy((void *)((uintptr_t)ctx->cr3 + VADDR_PHYSMEM), (void *)((uintptr_t)curr_cr3 + VADDR_PHYSMEM), __PAGESIZE);
    memset((void *)((uintptr_t)ctx->cr3 + VADDR_PHYSMEM), 0, __PAGESIZE / 2);

    cpu_tlb_flush_global();
    //mm_ctx_reload(ctx);
    //wr_cr3(rd_cr3());
    //ksmp_send_ipi();

    rwlock_unlock(&ctx->lock);
}
Пример #10
0
int Thread::try_join(Thread* joiner, void **value_ptr)
{
    /*
     * We lock here because the joinable member might be cleared by the
     * detach code
     */
    rwlock_wrlock(&m_join_lock);

    if (!m_joinable || m_joiner != NULL)
    {
        // Someone is already joining us so fail
        rwlock_unlock(&m_join_lock);
        kdebug("kcall_threadjoin not joinable or already joined\n");
        return EINVAL;
    }

    if (m_state == THR_ST_ZOMBIE)
    {
        // We are a zombie, ready to be reaped
        void *value = zombie_state_info.exit_val;
        int err;
        if (value_ptr && (err = joiner->kcopy_touser(value_ptr, &value, sizeof(value))) < 0)
        {
            return err;
        }
        rwlock_unlock(&m_join_lock);
        die();
        kdebug("kcall_threadjoin returning value\n");
        return 0;
    }

    // We are not ready to be joined yet so block the joiner and queue him
    // on our m_joiner member
    //
    // NOTE: Normally we don't allow threads to block other threads but this
    // is a helper method executing because of a call from joiner
    m_joiner = joiner;
    joiner->joining_state_info.joined_to = this;
    joiner->joining_state_info.value_ptr = value_ptr;
    joiner->m_continuation_func = &Thread::join_cont;

    // This blocks the tread that called this method i.e. joiner
    sys.m_sched.block(THR_ST_JOINING);

    //rwlock_unlock(&exiter->m_join_lock);


    rwlock_unlock(&m_join_lock);

    return 0;
}
Пример #11
0
static __inline void
init_cache(void)
{
	rwlock_wrlock(&lock);
	if (!isinit) {
		_CITRUS_HASH_INIT(&shared_pool, CI_HASH_SIZE);
		TAILQ_INIT(&shared_unused);
		shared_max_reuse = -1;
		if (!issetugid() && getenv(CI_ENV_MAX_REUSE))
			shared_max_reuse = atoi(getenv(CI_ENV_MAX_REUSE));
		if (shared_max_reuse < 0)
			shared_max_reuse = CI_INITIAL_MAX_REUSE;
		isinit = true;
	}
	rwlock_unlock(&lock);
}
Пример #12
0
static void* thread_func(void* arg)
{
  int i;
  int sum = 0;

  for (i = 0; i < 1000; i++)
  {
    rwlock_rdlock(&s_rwlock);
    sum += s_counter;
    rwlock_unlock(&s_rwlock);
    rwlock_wrlock(&s_rwlock);
    s_counter++;
    rwlock_unlock(&s_rwlock);
  }

  return 0;
}
Пример #13
0
int main(int argc, char **argv) {
    pthread_t threads[2];
    threadata_t *td;
    xmalloc(td, sizeof(*td), return 0);

    srand(time(NULL));
    log_init();

    prog = argv[0];

    rwlock_wrlock(&td->lock);
    checked(pthread_create(&threads[0], NULL, writethread, td));
    checked(pthread_create(&threads[1], NULL, readthread, td));

    pthread_exit(NULL);
    fatal("rwlock tests done!\n");
}
Пример #14
0
/*
 * pclose --
 *	Pclose returns -1 if stream is not associated with a `popened' command,
 *	if already `pclosed', or waitpid returns an error.
 */
int
pclose(FILE *iop)
{
	struct pid *cur, *last;
	int pstat;
	pid_t pid;

	_DIAGASSERT(iop != NULL);

	rwlock_wrlock(&pidlist_lock);

	/* Find the appropriate file pointer. */
	for (last = NULL, cur = pidlist; cur; last = cur, cur = cur->next)
		if (cur->fp == iop)
			break;
	if (cur == NULL) {
#if defined(__minix)
		rwlock_unlock(&pidlist_lock);
#else
		(void)rwlock_unlock(&pidlist_lock);
#endif /* defined(__minix) */
		return (-1);
	}

	(void)fclose(iop);

	/* Remove the entry from the linked list. */
	if (last == NULL)
		pidlist = cur->next;
	else
		last->next = cur->next;

#if defined(__minix)
	rwlock_unlock(&pidlist_lock);
#else
	(void)rwlock_unlock(&pidlist_lock);
#endif /* defined(__minix) */

	do {
		pid = waitpid(cur->pid, &pstat, 0);
	} while (pid == -1 && errno == EINTR);

	free(cur);

	return (pid == -1 ? -1 : pstat);
}
Пример #15
0
int Thread::do_exit_thread(void *value)
{
    /*
     * Now we lock so others can't change our joiner member. Note that not
     * only do joiners join us but the timer subsystem may timeout a joiner
     * and remove him.
     */

    deactivate();

    rwlock_wrlock(&m_join_lock);

    if (!m_joinable)
    {
        /* We cannot be joined so we die immediately */
        kdebug("Thread::do_exit_thread() not joinable\n");
        sys.m_sched.block(THR_ST_DEAD);
        rwlock_unlock(&m_join_lock);
        die();
        return 0;
    }

    if (m_joiner != NULL)
    {
        /*
         * Someone is waiting for us so make him ready. We don't need to
         * lock him. The lock on us prevents anyone from touching the joiner
         */
        kdebug("Thread::do_exit_thread() have joiner\n");
        m_joiner->joining_state_info.value = value;
        m_joiner->m_continuation_func = &Thread::join_cont;
        sys.m_sched.add(m_joiner, 0);
        sys.m_sched.block(THR_ST_DEAD);
        rwlock_unlock(&m_join_lock);
        die();
        return 0;
    }

    kdebug("Thread::do_exit_thread() becoming zombie\n");
    zombie_state_info.exit_val = value;
    sys.m_sched.block(THR_ST_ZOMBIE);

    rwlock_unlock(&m_join_lock);
    return 0;
}
Пример #16
0
void read_file_bufferizer::set_buf_size(unsigned long value) {
	if (buf_size == value)
		return;
	if (!not_caching) {
		rwlock_wrlock(hidden_locker);
		cur_hidden_size -= buf_size;
		cur_hidden_size += value;
#ifdef LOG_HIDDEN
		log_cur_hidden_size("set_buf", (int)value - (int)buf_size);
#endif
		rwlock_unlock(hidden_locker);
	}
	buf_size = value; 
	buffer = (char*) realloc(buffer, buf_size);
	if (bytes_in_buffer > buf_size) {
		cur_file_pos_read -= (bytes_in_buffer - buf_size);
		bytes_in_buffer = buf_size;
		buf_pos = 0;
	}
}
Пример #17
0
int repo_commit(repo* rep, const char* branchpath) {
	int err = 0;
	char* srcpath = gen_malloc(MAX_PATH_LEN);
	if (!srcpath) {
		err = -ENOMEM;
		goto exit;
	}
	char* dstpath = gen_malloc(MAX_PATH_LEN);
	if (!dstpath) {
		err = -ENOMEM;
		goto exit;
	}

	// Quiesce all FS activity and wait for outstanding meta-data updates
	// to the underlying FS to flush.
	rwlock_wrlock(&rep->fslock);
	sync();

	// All objects in stage are now frozen, they can be moved into
	// the globally shared object stores.
	gen_sprintf(srcpath, "%s/stage/objs", branchpath);
	gen_sprintf(dstpath, "%s/objs", rep->repo);
	err = moveobjects(dstpath, srcpath);
	if (err)
		goto exit_unlock;

	// Now move the staged root into the set of old roots
	uint64_t id = repo_newid(rep);
	gen_sprintf(srcpath, "%s/stage/root", branchpath);
	gen_sprintf(dstpath, "%s/oldroots/i%lu", branchpath, id);
	err = gen_rename(srcpath, dstpath);

exit_unlock:
	rwlock_wrunlock(&rep->fslock);
exit:
	if (srcpath)
		gen_free(srcpath);
	if (dstpath)
		gen_free(dstpath);
	return err;
}
Пример #18
0
int Thread::try_detach()
{
    /*
     * We lock here because the joinable member might be cleared by the
     * detach code
     */
    rwlock_wrlock(&m_join_lock);

    if (!m_joinable)
    {
        // Already detached so fail
        rwlock_unlock(&m_join_lock);
        kdebug("try_detach() not joinable\n");
        return EINVAL;
    }

    m_joinable = 0;

    if (m_joiner)
    {
        // Someone is already joining us we need to ready him
        sys.m_sched.add(m_joiner, 0);
        m_joiner = nullptr;
        //abort();

//        rwlock_unlock(&m_join_lock);
//        kdebug("try_detach() already joined\n");
//        return 0;
    }

    if (m_state == THR_ST_ZOMBIE)
    {
        // We are a zombie, ready to be reaped
        kdebug("try_detach() THR_ST_ZOMBIE\n");
        rwlock_unlock(&m_join_lock);
        die();
        return 0;
    }
    rwlock_unlock(&m_join_lock);
    return 0;
}
Пример #19
0
/*
 * Remove a service program from the callout list.
 */
void
svc_unreg(const rpcprog_t prog, const rpcvers_t vers)
{
	struct svc_callout *prev;
	struct svc_callout *s;

	/* unregister the information anyway */
	(void)rpcb_unset(prog, vers, NULL);
	rwlock_wrlock(&svc_lock);
	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL) {
		if (prev == NULL)
			svc_head = s->sc_next;
		else
			prev->sc_next = s->sc_next;
		s->sc_next = NULL;
		if (s->rec.sc_netid)
			mem_free(s->rec.sc_netid, sizeof(s->rec.sc_netid) + 1);
		mem_free(s, sizeof(struct svc_callout));
	}
	rwlock_unlock(&svc_lock);
}
Пример #20
0
int32_t
rpc_dplx_unref(struct rpc_dplx_rec *rec, u_int flags)
{
    struct rbtree_x_part *t;
    struct opr_rbtree_node *nv;
    int32_t refcnt;

    if (! (flags & RPC_DPLX_FLAG_LOCKED))
        REC_LOCK(rec);

    refcnt = --(rec->refcnt);

    __warnx(TIRPC_DEBUG_FLAG_REFCNT,
            "%s: postunref %p rec->refcnt %u",
            __func__, rec, refcnt);

    if (rec->refcnt == 0) {
        t = rbtx_partition_of_scalar(&rpc_dplx_rec_set.xt, rec->fd_k);
        REC_UNLOCK(rec);
        rwlock_wrlock(&t->lock);
        nv = opr_rbtree_lookup(&t->t, &rec->node_k);
        rec = NULL;
        if (nv) {
            rec = opr_containerof(nv, struct rpc_dplx_rec, node_k);
            REC_LOCK(rec);
            if (rec->refcnt == 0) {
                (void) opr_rbtree_remove(&t->t, &rec->node_k);
                REC_UNLOCK(rec);
                __warnx(TIRPC_DEBUG_FLAG_REFCNT,
                        "%s: free rec %p rec->refcnt %u",
                        __func__, rec, refcnt);

                free_dplx_rec(rec);
                rec = NULL;
            } else {
                refcnt = rec->refcnt;
            }
        }
        rwlock_unlock(&t->lock);
    }
Пример #21
0
/*
 * De-activate a transport handle.
 */
static void
__xprt_do_unregister (SVCXPRT *xprt, bool_t dolock)
{
    int code, sock;

    assert (xprt != NULL);

    sock = xprt->xp_fd;

    if (dolock)
        rwlock_wrlock (&svc_fd_lock);

    if ((sock < __svc_params->max_connections) && 
        (__svc_xports[sock] == xprt)) {
        __svc_xports[sock] = NULL;
        switch (__svc_params->ev_type) {
#if defined(TIRPC_EPOLL)
        case SVC_EVENT_EPOLL:
            code = epoll_ctl(__svc_params->ev_u.epoll.epoll_fd,
                             EPOLL_CTL_DEL,
                             sock,
                             &xprt->xp_epoll_ev);
          break;
#endif
        default:
            FD_CLR (sock, &svc_fdset);
            break;
        } /* switch */

        if (sock >= svc_maxfd) {
            for (svc_maxfd--; svc_maxfd >= 0; svc_maxfd--)
                if (__svc_xports[svc_maxfd])
                    break;
        }
    } /* sock */

    if (dolock)
        rwlock_unlock (&svc_fd_lock);
}
Пример #22
0
/*
 * Find a free FILE for fopen et al.
 */
FILE *
__sfp()
{
  FILE *fp;
  int n;
  struct glue *g;

  if (!__sdidinit)
    __sinit();

  rwlock_wrlock(&__sfp_lock);
  for (g = &__sglue;; g = g->next) {
    for (fp = g->iobs, n = g->niobs; --n >= 0; fp++)
      if (fp->_flags == 0)
        goto found;
    if (g->next == NULL && (g->next = moreglue(NDYNAMIC)) == NULL)
      break;
  }
  rwlock_unlock(&__sfp_lock);
  return (NULL);
found:
  fp->_flags = 1;   /* reserve this slot; caller sets real flags */
  fp->_p = NULL;    /* no current pointer */
  fp->_w = 0;   /* nothing to read or write */
  fp->_r = 0;
  fp->_bf._base = NULL; /* no buffer */
  fp->_bf._size = 0;
  fp->_lbfsize = 0; /* not line buffered */
  fp->_file = -1;   /* no file */
/*  fp->_cookie = <any>; */ /* caller sets cookie, _read/_write etc */
  _UB(fp)._base = NULL; /* no ungetc buffer */
  _UB(fp)._size = 0;
  fp->_lb._base = NULL; /* no line buffer */
  fp->_lb._size = 0;
  memset(WCIO_GET(fp), 0, sizeof(struct wchar_io_data));
  rwlock_unlock(&__sfp_lock);
  return (fp);
}
Пример #23
0
int
gelf_update_versym (Elf_Data *data, int ndx, GElf_Versym *src)
{
  Elf_Data_Scn *data_scn = (Elf_Data_Scn *) data;

  if (data == NULL)
    return 0;

  /* The types for 32 and 64 bit are the same.  Lucky us.  */
  assert (sizeof (GElf_Versym) == sizeof (Elf32_Versym));
  assert (sizeof (GElf_Versym) == sizeof (Elf64_Versym));

  /* Check whether we have to resize the data buffer.  */
  if (INVALID_NDX (ndx, GElf_Versym, &data_scn->d))
    {
      __libelf_seterrno (ELF_E_INVALID_INDEX);
      return 0;
    }

  if (unlikely (data_scn->d.d_type != ELF_T_HALF))
    {
      /* The type of the data better should match.  */
      __libelf_seterrno (ELF_E_DATA_MISMATCH);
      return 0;
    }

  rwlock_wrlock (data_scn->s->elf->lock);

  ((GElf_Versym *) data_scn->d.d_buf)[ndx] = *src;

  /* Mark the section as modified.  */
  data_scn->s->flags |= ELF_F_DIRTY;

  rwlock_unlock (data_scn->s->elf->lock);

  return 1;
}
Пример #24
0
static void mm_change_commit(mm_context *ctx, struct mm_mapping *mapping, int flags)
{
    struct mm_object *pobj = mapping->object->chain;
    struct phys_page *page;

    while (pobj && pobj->share_type == share_private)
    {
        rwlock_wrlock(&pobj->lock);
        page = first_page(pobj);
        while (page)
        {
            kpage_map_prot(ctx->cr3, (void *)(mapping->start_addr + page->offset), flags);

            /* Go to the next one */
            page = next_page(pobj, page);
        }
        pobj->share_type = share_cow;
        rwlock_unlock(&pobj->lock);
        pobj = pobj->chain;
    }

//    uint64_t p;
//    uintptr_t start, end;
//
//    start = mapping->start_addr;
//    end = start + mapping->length - 1;
//
//    start &= ~0xfff;
//    end = __PAGEROUND(end);
//    //kdebug("s=%016lx e=%016lx\n", start, end);
//
//    for (p=start; p<end; p+=__PAGESIZE)
//    {
//        //kdebug("Unmapping %016lx\n", p);
//        kpage_map_prot(ctx->cr3, (void *)p, flags);
//    }
}
Пример #25
0
static int
hashtable_expand(struct hashtable *h)
{
    // Acquire global write lock for entire function
    rwlock_wrlock(&h->globallock);

    /* Double the size of the table to accomodate more entries */
    struct entry **newtable;
    struct entry *e;
    struct entry **pE;
    unsigned int newsize, i, index;
    /* Check we're not hitting max capacity */
    if (h->primeindex == (prime_table_length - 1)) {
        // Release global write lock for early return
        rwlock_wrunlock(&h->globallock);
        return 0;
    }
    newsize = primes[++(h->primeindex)];

    newtable = (struct entry **)malloc(sizeof(struct entry*) * newsize);
    if (NULL != newtable)
    {
        memset(newtable, 0, newsize * sizeof(struct entry *));
        /* This algorithm is not 'stable'. ie. it reverses the list
         * when it transfers entries between the tables */
        for (i = 0; i < h->tablelength; i++) {
            while (NULL != (e = h->table[i])) {
                h->table[i] = e->next;
                index = indexFor(newsize,e->h);
                e->next = newtable[index];
                newtable[index] = e;
            }
        }
        free(h->table);
        h->table = newtable;
    }
    /* Plan B: realloc instead */
    else 
    {
        newtable = (struct entry **)
                   realloc(h->table, newsize * sizeof(struct entry *));
        if (NULL == newtable) {
            (h->primeindex)--;
            // Release global write lock for early return
            rwlock_wrunlock(&h->globallock);
            return 0;
        }
        h->table = newtable;
        memset(newtable[h->tablelength], 0, newsize - h->tablelength);
        for (i = 0; i < h->tablelength; i++) {
            for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
                index = indexFor(newsize,e->h);
                if (index == i)
                {
                    pE = &(e->next);
                }
                else
                {
                    *pE = e->next;
                    e->next = newtable[index];
                    newtable[index] = e;
                }
            }
        }
    }

#ifdef DEBUG
    printf("resizing fine-grained rwlock array to %d locks.\n", newsize);
#endif
    // Realloc more rwlocks for newly resized table
    h->locks = (pthread_rwlock_t *) realloc(h->locks, sizeof(pthread_rwlock_t) * newsize);
    for(unsigned int i = h->num_locks; i < newsize; ++i) {
        if (pthread_rwlock_init(&h->locks[i], NULL)) {
            perror("pthread_rwlock_init");
            exit(1);
        }
    }
    h->num_locks = newsize;

    h->tablelength = newsize;
    h->loadlimit   = (unsigned int) ceil(newsize * max_load_factor);

    // Release global write lock
    rwlock_wrunlock(&h->globallock);

    return -1;
}
Пример #26
0
void ListWRLock(list_p list)
{
  rwlock_wrlock(list->rwlock);
}
Пример #27
0
/* Package init function.
 * It is intended that applications which must make use of global state
 * will call svc_init() before accessing such state and before executing
 * any svc exported functions.   Traditional TI-RPC programs need not
 * call the function as presently integrated. */
void
svc_init (svc_init_params * params)
{
    __svc_params->max_connections = FD_SETSIZE;

    if (params->flags & SVC_INIT_WARNX)
        __pkg_params.warnx = params->warnx;
    else
        __pkg_params.warnx = warnx;

#if defined(TIRPC_EPOLL)
    if (params->flags & SVC_INIT_EPOLL) {
        __svc_params->ev_type = SVC_EVENT_EPOLL;
        __svc_params->max_connections = params->max_connections;
        __svc_params->ev_u.epoll.max_events = params->max_events;
        __svc_params->ev_u.epoll.epoll_fd = epoll_create1(EPOLL_CLOEXEC);
        if (__svc_params->ev_u.epoll.epoll_fd == -1) {
            warnx("svc_init:  epoll_create failed");
            return;
        }
    } else {
#else
    if (TRUE) {
#endif
        __svc_params->ev_type = SVC_EVENT_FDSET;
        FD_ZERO(&svc_fdset);
    }

    if (params->flags & SVC_INIT_XPORTS) {
	if (__svc_xports == NULL) {
	    __svc_xports = (SVCXPRT **) mem_alloc (
                __svc_params->max_connections * sizeof (SVCXPRT *));
	    if (__svc_xports == NULL) {
		warnx(
                    "svc_init: __svc_xports allocation failure");
		return;
	    }
	    memset (__svc_xports, 0,
                    __svc_params->max_connections * sizeof (SVCXPRT *));
	} /* !__svc_xports */   
    } /* SVC_INIT_XPORTS */

    return;
}

/* ***************  SVCXPRT related stuff **************** */

/*
 * This is used to set xprt->xp_raddr in a way legacy
 * apps can deal with
 */
void
__xprt_set_raddr(SVCXPRT *xprt, const struct sockaddr_storage *ss)
{
	switch (ss->ss_family) {
	case AF_INET6:
		memcpy(&xprt->xp_raddr, ss, sizeof(struct sockaddr_in6));
		xprt->xp_addrlen = sizeof (struct sockaddr_in6);
		break;
	case AF_INET:
		memcpy(&xprt->xp_raddr, ss, sizeof(struct sockaddr_in));
		xprt->xp_addrlen = sizeof (struct sockaddr_in);
		break;
	default:
		xprt->xp_raddr.sin6_family = AF_UNSPEC;
		xprt->xp_addrlen = sizeof (struct sockaddr);
		break;
	}
}

/*
 * Activate a transport handle.
 */
void
xprt_register (SVCXPRT * xprt)
{
    int code, sock;

    assert (xprt != NULL);

    sock = xprt->xp_fd;

    rwlock_wrlock (&svc_fd_lock);
    if (__svc_xports == NULL) {
        __svc_params->max_connections = FD_SETSIZE;
        __svc_xports = (SVCXPRT **) mem_alloc (FD_SETSIZE * sizeof (SVCXPRT *));
        if (__svc_xports == NULL) {
            __warnx("xprt_register: __svc_xports allocation failure");
            rwlock_unlock (&svc_fd_lock);
            return;
        }
        memset (__svc_xports, 0, FD_SETSIZE * sizeof (SVCXPRT *));
    }
    if (sock < __svc_params->max_connections) {
        __svc_xports[sock] = xprt;
        switch (__svc_params->ev_type) {
#if defined(TIRPC_EPOLL)
        case SVC_EVENT_EPOLL:
            /* set up epoll user data */
            xprt->xp_epoll_ev.data.fd = sock;
            /* wait for read events, level triggered */
            xprt->xp_epoll_ev.events = EPOLLIN;
            /* add to epoll vector */
            code = epoll_ctl(__svc_params->ev_u.epoll.epoll_fd,
                             EPOLL_CTL_ADD,
                             sock,
                             &xprt->xp_epoll_ev);
            break;
#endif
        default:
            FD_SET (sock, &svc_fdset);
            break;
        } /* switch */
      svc_maxfd = max (svc_maxfd, sock);
    }
    rwlock_unlock (&svc_fd_lock);
} /* xprt_register */
Пример #28
0
int mzrt_rwlock_wrlock(mzrt_rwlock *lock) {
    return rwlock_wrlock(lock, 0);
}
Пример #29
0
int mzrt_rwlock_trywrlock(mzrt_rwlock *lock) {
    return rwlock_wrlock(lock, 1);
}
Пример #30
0
/*
 * Add a service program to the callout list.
 * The dispatch routine will be called when a rpc request for this
 * program number comes in.
 */
bool
svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
	void (*dispatch) (struct svc_req *req, SVCXPRT *xprt),
	const struct netconfig *nconf)
{
	bool dummy;
	struct svc_callout *prev;
	struct svc_callout *s;
	struct netconfig *tnconf;
	char *netid = NULL;
	int flag = 0;

	/* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
	if (xprt->xp_netid) {
		netid = mem_strdup(xprt->xp_netid);
		flag = 1;
	} else if (nconf) {
		netid = mem_strdup(nconf->nc_netid);
		flag = 1;
	} else {
		tnconf = __rpcgettp(xprt->xp_fd);
		if (tnconf) {
			netid = mem_strdup(tnconf->nc_netid);
			flag = 1;
			freenetconfigent(tnconf);
		}
	} /* must have been created with svc_raw_create */
	if ((netid == NULL) && (flag == 1))
		return (false);

	rwlock_wrlock(&svc_lock);
	s = svc_find(prog, vers, &prev, netid);
	if (s) {
		if (netid)
			mem_free(netid, 0);
		if (s->rec.sc_dispatch == dispatch)
			goto rpcb_it;	/* he is registering another xptr */
		rwlock_unlock(&svc_lock);
		return (false);
	}
	s = mem_alloc(sizeof(struct svc_callout));
	s->rec.sc_prog = prog;
	s->rec.sc_vers = vers;
	s->rec.sc_dispatch = dispatch;
	s->rec.sc_netid = netid;
	s->sc_next = svc_head;
	svc_head = s;

	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
		((SVCXPRT *) xprt)->xp_netid = mem_strdup(netid);

 rpcb_it:
	rwlock_unlock(&svc_lock);
	/* now register the information with the local binder service */
	if (nconf) {
		/*LINTED const castaway */
		dummy =
		    rpcb_set(prog, vers, (struct netconfig *)nconf,
			     &((SVCXPRT *) xprt)->xp_local.nb);
		return (dummy);
	}
	return (true);
}