Ejemplo n.º 1
0
// funzione realtiva allo spinlock
void *do_something(long int who_i_am) {
    int i;
    int dummy;

    // imposta la cancellazione asincrona
    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE,&dummy);
    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS,&dummy);

    // aspetta che pure l'altro thread l'abbia fatto
    pthread_barrier_wait(&pbarrier);
    // sblocca il segnale SIGUSR1
    pthread_sigmask(SIG_UNBLOCK,&block_sig,NULL);

    while (1) {
        // selezione un intervallo casuale di tempo
        i=rand()%9+1;
        // avvisa
        printf("Thread %ld: going to sleep for %d sec...\n",who_i_am,i);
        // e dormi
        sleep(i);
        // prova ad acquisire lo spinlock
        if (pthread_spin_trylock(&spinlock)) {
            // in caso contrario avvisa
            printf("Thread %ld: wait for the spinlock...\n",who_i_am);
            // e attendi
            pthread_spin_lock(&spinlock);
        }
        // da qui ho acquisito lo spinlock
        // seleziona un intervallo casuale di tempo
        i=rand()%9+1;
        // avvisa
        printf("Thread %ld: spinlock obtained and now wait for %d sec...\n",who_i_am,i);
        // e dormi
        sleep(i);
        printf("Thread %ld: unlock spinlock...\n",who_i_am);
        // rilascia lo spinlock
        pthread_spin_unlock(&spinlock);
    }
    return NULL;
}
Ejemplo n.º 2
0
ssize_t
usdf_msg_recv(struct fid_ep *fep, void *buf, size_t len,
		void *desc, fi_addr_t src_addr, void *context)
{
	struct usdf_ep *ep;
	struct usdf_rx *rx;
	struct usdf_msg_qe *rqe;
	struct usdf_domain *udp;

	ep = ep_ftou(fep);
	rx = ep->ep_rx;
	udp = ep->ep_domain;

	if (TAILQ_EMPTY(&rx->r.msg.rx_free_rqe)) {
		return -FI_EAGAIN;
	}

	pthread_spin_lock(&udp->dom_progress_lock);

	rqe = TAILQ_FIRST(&rx->r.msg.rx_free_rqe);
	TAILQ_REMOVE(&rx->r.msg.rx_free_rqe, rqe, ms_link);
	--rx->r.msg.rx_num_free_rqe;

	rqe->ms_context = context;
	rqe->ms_iov[0].iov_base = buf;
	rqe->ms_iov[0].iov_len = len;
	rqe->ms_last_iov = 0;

	rqe->ms_cur_iov = 0;
	rqe->ms_cur_ptr = buf;
	rqe->ms_iov_resid = len;
	rqe->ms_length = 0;
	rqe->ms_resid = len;

	TAILQ_INSERT_TAIL(&rx->r.msg.rx_posted_rqe, rqe, ms_link);

	pthread_spin_unlock(&udp->dom_progress_lock);

	return 0;
}
Ejemplo n.º 3
0
inline int
sstack_sfsd_add(uint32_t weight, sstack_sfsd_pool_t *pools,
				sfsd_t *sfsd)
{
	sstack_sfsd_pool_t *temp = NULL;
	int i = 0;
	int index = -1;


	// Parameter validation
	if (weight < MAXIMUM_WEIGHT || weight > MINIMUM_WEIGHT ||
				NULL == pools || NULL == sfsd) {
		sfs_log(sfs_ctx, SFS_ERR, "%s: Invalid parametes specified \n",
						__FUNCTION__);

		errno = EINVAL;
		return -1;
	}

	// Get index of sfsd pool that covers the specified storeg weight
	for (i = 0; i < MAX_SFSD_POOLS; i++) {
		uint32_t low = *(uint32_t *) &weights[i][0];
		uint32_t high = *(uint32_t *) &weights[i][1];
		if (weight >= low && weight <= high)
			break;
	}
	index = i;
	temp = pools;

	for (i = 0; i < index; i++)
		temp ++;

	// Now temp points to the right pool
	pthread_spin_lock(&temp->lock);
	bds_list_add_tail((bds_list_head_t) &sfsd->list,
								(bds_list_head_t) &temp->list);
	pthread_spin_unlock(&temp->lock);

	return 0;
}
Ejemplo n.º 4
0
/**
	@brief This function set value to thread_specific data by the key.
*/
int pthread_setspecific (pthread_key_t key, const void *value)
{
    hurd_ihash_t speci_tb = (hurd_ihash_t)get_current_pt_specific();

    /* key is valid ? */
    if (key > __pthread_key_nums ||
            __destructort_arry[key] == DESTRUCTORT_INVALID)
        return -EINVAL;

    /* has created? */
    if (!speci_tb)
    {
        int ret;

        /* if failt, it must out of memory */
        if (hurd_ihash_create(&speci_tb, HURD_IHASH_NO_LOCP) != 0)
            return -ENOMEM;

        /* add speci_tb to speci_tables */
        pthread_spin_lock(&__pthread_specific_lock);
        ret = find_elem_index_and_add_to_arry(speci_tb, &__pthread_specific_arry, (int *)&__pthread_specific_nums, NULL);
        pthread_spin_unlock(&__pthread_specific_lock);

        /* errno is always no mem */
        if (ret < 0)
        {
            hurd_ihash_destroy(speci_tb);
            return -ENOMEM;
        }

        /* set to pthread */
        set_current_pt_specific((void *)speci_tb);
    }

    /* add to ihash tables */
    if (hurd_ihash_add(speci_tb, (hurd_ihash_key_t)key, (hurd_ihash_value_t)value) != 0)
        return -ENOMEM;

    return 0;
}
Ejemplo n.º 5
0
void calqueue_put(double timestamp, void *payload) {

	calqueue_node *new_node;

	//printf("calqueue: inserendo %f, cwidth %f, bukettop %f, nbuckets %d, lastprio %f\n", timestamp, cwidth, buckettop, nbuckets, lastprio);

	// Fill the node entry
	new_node = malloc(sizeof(calqueue_node));
	new_node->timestamp = timestamp;
	new_node->payload = payload;
	new_node->next = NULL;

	if(new_node == NULL){
		printf("Out of memory in %s:%d\n", __FILE__, __LINE__);
		abort();
	}

	pthread_spin_lock(&cal_spinlock);
	calqueue_enq(new_node);
	pthread_spin_unlock(&cal_spinlock);

}
Ejemplo n.º 6
0
static void
my_free_hook (void *ptr, const void *caller)
{
    pthread_spin_lock(&lock);
    /* Restore all old hooks */
    __malloc_hook = old_malloc_hook;
    __realloc_hook = old_realloc_hook;
    __free_hook = old_free_hook;
    /* Call recursively */
    free (ptr);
    /* Save underlying hooks */
    old_malloc_hook = __malloc_hook;
    old_realloc_hook = __realloc_hook;
    old_free_hook = __free_hook;
    /* printf might call free, so protect it too. */
    printf ("freed pointer %p\n", ptr);
    /* Restore our own hooks */
    __malloc_hook = my_malloc_hook;
    __realloc_hook = my_realloc_hook;
    __free_hook = my_free_hook;
    pthread_spin_unlock(&lock);
}
int pxp_init(void)
{
	pthread_spin_lock(&lock);
	if (fd > 0) {
		active_open_nr++;
		pthread_spin_unlock(&lock);
		return 0;
	}

	if (fd < 0) {
		fd = open(PXP_DEVICE_NAME, O_RDWR, 0);
		if (fd < 0) {
			pthread_spin_unlock(&lock);
			dbg(DBG_ERR, "open file error.\n");
			return -1;
		}
	}

	active_open_nr++;
	pthread_spin_unlock(&lock);
	return 0;
}
Ejemplo n.º 8
0
int nvmed_queue_complete(NVMED_QUEUE* nvmed_queue) {
	NVMED* nvmed;
	NVMED_IOD* iod;
	volatile struct nvme_completion *cqe;
	u16 head, phase;
	int num_proc = 0;
	nvmed = nvmed_queue->nvmed;
	
	pthread_spin_lock(&nvmed_queue->cq_lock);
	head = nvmed_queue->cq_head;
	phase = nvmed_queue->cq_phase;
	for(;;) {
		cqe = (volatile struct nvme_completion *)&nvmed_queue->cqes[head];
		if((cqe->status & 1) != nvmed_queue->cq_phase)
			break;

		if(++head == nvmed->dev_info->q_depth) {
			head = 0;
			phase = !phase;
		}
		
		iod = nvmed_queue->iod_arr + cqe->command_id;
		nvmed_complete_iod(iod);
		num_proc++;
		if(head == 0 || num_proc == COMPLETE_QUEUE_MAX_PROC) break;
	}
	if(head == nvmed_queue->cq_head && phase == nvmed_queue->cq_phase) {
		pthread_spin_unlock(&nvmed_queue->cq_lock);
		return num_proc;
	}

	COMPILER_BARRIER();
	*(volatile u32 *)nvmed_queue->cq_db = head;
	nvmed_queue->cq_head = head;
	nvmed_queue->cq_phase = phase;
	pthread_spin_unlock(&nvmed_queue->cq_lock);

	return num_proc;
}
Ejemplo n.º 9
0
static ssize_t
usdf_eq_write(struct fid_eq *feq, uint32_t event, const void *buf,
		size_t len, uint64_t flags)
{
	struct usdf_eq *eq;
	int ret;

	eq = eq_ftou(feq);

	pthread_spin_lock(&eq->eq_lock);

	/* EQ full? */
	if (atomic_get(&eq->eq_num_events) == eq->eq_ev_ring_size) {
		ret = -FI_EAGAIN;
		goto done;
	}

	ret = usdf_eq_write_event(eq, event, buf, len, flags);
done:
	pthread_spin_unlock(&eq->eq_lock);
	return ret;
}
Ejemplo n.º 10
0
Archivo: sc_sv.c Proyecto: djs55/xha
MTC_STATIC  MTC_STATUS
script_terminate(void)
{
    MTC_U32 i;

    pthread_spin_lock(&lock);
    terminate = TRUE;
    for (i = 0 ; i < SCRIPT_SOCKET_NUM; i++) 
    {
        if (sc_listening_socket[i] > 0) 
        {
#if 0
            //  let exit system call handle this to give
            //  an accurate intication of daemon termination
            //  to calldaemon (command)

            close(sc_listening_socket[i]);
#endif
            sc_listening_socket[i] = -1;
        }
    }
    pthread_spin_unlock(&lock);

#if 0
    {
        int pthread_ret;
        // wait for thread termination;

        for (i = 0 ; i < SCRIPT_SOCKET_NUM; i++) 
        {
            if ((pthread_ret = pthread_join(sc_thread[i], NULL)) != 0) 
            {
                pthread_ret = pthread_kill(sc_thread[i], SIGKILL);
            }
        }
    }
#endif
    return MTC_SUCCESS;
}
Ejemplo n.º 11
0
//for tcp
//add to list, sentinel will delete this in the main event loop
int
delete_close_event(int fd, struct fetcher *f)
{
    struct list *el = NULL;
    struct list_node *nd = NULL;
    el = f->el;
    if (el == NULL)
        return -1;
    if ((nd = malloc(sizeof(struct list_node))) == NULL)
        return -1;
    nd->data = malloc(sizeof(int));
    if (nd->data == NULL) {
        free(nd);
        return -1;
    }
    memcpy(nd->data, &fd, sizeof(int));
    pthread_spin_lock(&el->lock);
    nd->next = el->head;
    el->head = nd;
    pthread_spin_unlock(&el->lock);
    return 0;
}
Ejemplo n.º 12
0
/*
 * Set this timer to fire "ms" milliseconds from now.  If the timer is already
 * queued, previous timeout will be discarded.
 *
 * When timer expires, the registered timer callback will be called and
 * the timer entry removed from the queued list.  The timer routine will not
 * be called again until usdf_timer_set() is called again to re-set it.
 * usdf_timer_set() is safe to call from timer service routine.
 */
int
usdf_timer_set(struct usdf_fabric *fp, struct usdf_timer_entry *entry,
		uint32_t ms)
{
	int ret;
	unsigned bucket;

	pthread_spin_lock(&fp->fab_timer_lock);

	/* If no timers active, cur_bucket_ms may need catchup */
	if (fp->fab_active_timer_count == 0) {
		fp->fab_cur_bucket_ms = usdf_get_ms();
		ret = usdf_fabric_wake_thread(fp);
		if (ret != 0) {
			goto out;
		}
	}

	if (entry->te_flags & USDF_TF_QUEUED) {
		LIST_REMOVE(entry, te_link);
		--fp->fab_active_timer_count;
	}

	// we could make "overflow" bucket...
	if (ms >= USDF_NUM_TIMER_BUCKETS) {
		ret = -FI_EINVAL;
		goto out;
	}
	bucket = (fp->fab_cur_bucket + ms) & (USDF_NUM_TIMER_BUCKETS - 1);

	LIST_INSERT_HEAD(&fp->fab_timer_buckets[bucket], entry, te_link);
	entry->te_flags |= USDF_TF_QUEUED;
	++fp->fab_active_timer_count;
	ret = 0;

out:
	pthread_spin_unlock(&fp->fab_timer_lock);
	return ret;
}
Ejemplo n.º 13
0
int mlx4_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
{
	struct mlx4_cq *cq = to_mcq(ibcq);
	struct mlx4_qp *qp = NULL;
	int npolled;
	int err = CQ_OK;

	pthread_spin_lock(&cq->lock);

	for (npolled = 0; npolled < ne; ++npolled) {
		err = mlx4_poll_one(cq, &qp, wc + npolled);
		if (err != CQ_OK)
			break;
	}

	if (npolled)
		update_cons_index(cq);

	pthread_spin_unlock(&cq->lock);

	return err == CQ_POLL_ERR ? err : npolled;
}
static void *                   /* Loop 'arg' times incrementing 'glob' */
threadFunc(void *arg)
{
    int loops = *((int *) arg);
    int loc, j, s;

    for (j = 0; j < loops; j++) {
        s = pthread_spin_lock(&splock);
        if (s != 0)
            errExitEN(s, "pthread_spin_lock");

        loc = glob;
        loc++;
        glob = loc;

        s = pthread_spin_unlock(&splock);
        if (s != 0)
            errExitEN(s, "pthread_spin_unlock");
    }

    return NULL;
}
Ejemplo n.º 15
0
void tprintf(char *msg, ...)
{
	int ret;
	va_list vl;

	va_start(vl, msg);
	pthread_spin_lock(&buffer_lock);

	ret = vsnprintf(buffer + buffer_use,
			sizeof(buffer) - buffer_use,
			msg, vl);
	if (ret < 0)
		/* Something screwed up! Unexpected. */
		goto out;

	if (ret >= sizeof(buffer) - buffer_use) {
		tprintf_flush();

		/* Rewrite the buffer */
		ret = vsnprintf(buffer + buffer_use,
				sizeof(buffer) - buffer_use,
				msg, vl);
		/* Again, we've failed! This shouldn't happen! So 
		 * switch to vfprintf temporarily :-( */
		if (ret >= sizeof(buffer) - buffer_use) {
			fprintf(stderr, "BUG (buffer too large) -->\n");
			vfprintf(stdout, msg, vl);
			fprintf(stderr, " <--\n");
			goto out;
		}
	}

	if (ret < sizeof(buffer) - buffer_use)
		buffer_use += ret;

out:
	pthread_spin_unlock(&buffer_lock);
	va_end(vl);
}
Ejemplo n.º 16
0
void flush_buf(struct pipe* p, int id)
{
	struct pipe_elem* elem;

	// lock
	pthread_spin_lock(&p->lock);

	if (id < 0 || id >= p->n_dst)
		goto unlock;

	while (!dequeue(&p->dst[id], (void**)&elem)) {
		elem->ref_cnt--;
		if (elem->ref_cnt <= 0) {
			assert(elem->ref_cnt >= 0);
			assert(!enqueue(&p->src, elem));
		}
	}

unlock : 
	// unlock
	pthread_spin_unlock(&p->lock);
}
Ejemplo n.º 17
0
/*
 * I/O Completion of specific I/O
 * target_id : submission id
 */
void nvmed_io_polling(NVMED_HANDLE* nvmed_handle, u16 target_id) {
	NVMED* nvmed;
	NVMED_QUEUE* nvmed_queue;
	NVMED_IOD* iod;
	volatile struct nvme_completion *cqe;
	u16 head, phase;
	nvmed_queue = HtoQ(nvmed_handle);
	nvmed = HtoD(nvmed_handle);

	pthread_spin_lock(&nvmed_queue->cq_lock);
	while(1) {
		head = nvmed_queue->cq_head;
		phase = nvmed_queue->cq_phase;
		iod = nvmed_queue->iod_arr + target_id;
		if(iod->status == IO_COMPLETE) {
			break;
		}
		cqe = (volatile struct nvme_completion *)&nvmed_queue->cqes[head];
		for (;;) {
			if((cqe->status & 1) == nvmed_queue->cq_phase)
				break;
		}

		if(++head == nvmed->dev_info->q_depth) {
			head = 0;
			phase = !phase;
		}

		iod = nvmed_queue->iod_arr + cqe->command_id;
		nvmed_complete_iod(iod);

		COMPILER_BARRIER();
		*(volatile u32 *)nvmed_queue->cq_db = head;
		nvmed_queue->cq_head = head;
		nvmed_queue->cq_phase = phase;
	}
	pthread_spin_unlock(&nvmed_queue->cq_lock);
}
Ejemplo n.º 18
0
Archivo: 3-1.c Proyecto: 8l/rose
static void* fn_chld(void *arg)
{ 
	int rc = 0;

	/* Initialize spin lock */
	if(pthread_spin_init(&spinlock, PTHREAD_PROCESS_PRIVATE) != 0)
	{
		printf("main: Error at pthread_spin_init()\n");
		exit(PTS_UNRESOLVED);
	}

	/* Lock the spinlock */
	printf("thread: attempt spin lock\n");
	rc = pthread_spin_lock(&spinlock);
	if(rc != 0)
	{
		printf("Error: thread failed to get spin lock error code:%d\n" , rc);
		exit(PTS_UNRESOLVED);
	}
	printf("thread: acquired spin lock\n");
	
	/* Wait for main to try and unlock this spinlock */
	sem = INMAIN;
	while(sem == INMAIN)
		sleep(1);

	/* Cleanup just in case */
	pthread_spin_unlock(&spinlock);
	
	if(pthread_spin_destroy(&spinlock) != 0)
	{
		printf("Error at pthread_spin_destroy()");
		exit(PTS_UNRESOLVED);
	}	

	pthread_exit(0);
	return NULL;
}
Ejemplo n.º 19
0
Archivo: 1-1.c Proyecto: 1587/ltp
int main(void)
{
	int rc = 0;

	printf("main: initialize spin lock\n");
	if (pthread_spin_init(&spinlock, PTHREAD_PROCESS_PRIVATE) != 0) {
		printf("main: Error at pthread_spin_init()\n");
		return PTS_UNRESOLVED;
	}

	printf("main: attempt spin lock\n");

	/* We should get the lock */
	if (pthread_spin_lock(&spinlock) != 0) {
		printf
		    ("Unresolved: main cannot get spin lock when no one owns the lock\n");
		return PTS_UNRESOLVED;
	}

	printf("main: acquired spin lock\n");

	printf("main: unlock spin lock\n");
	if (pthread_spin_unlock(&spinlock) != 0) {
		printf("main: Error at pthread_spin_unlock()\n");
		return PTS_UNRESOLVED;
	}

	printf("main: destroy spin lock\n");
	rc = pthread_spin_destroy(&spinlock);
	if (rc != 0) {
		printf("Test FAILED: Error at pthread_spin_destroy()"
		       "Return code : %d\n", rc);
		return PTS_FAIL;
	}

	printf("Test PASSED\n");
	return PTS_PASS;
}
Ejemplo n.º 20
0
void deallocate_vbufs(int hca_num)
{
    vbuf_region *r = vbuf_region_head;

#if !defined(CKPT)
    if (MPIDI_CH3I_RDMA_Process.has_srq
#if defined(RDMA_CM)
        || MPIDI_CH3I_RDMA_Process.use_rdma_cm_on_demand
#endif /* defined(RDMA_CM) */
        || MPIDI_CH3I_Process.cm_type == MPIDI_CH3I_CM_ON_DEMAND)
#endif /* !defined(CKPT) */
    {
        pthread_spin_lock(&vbuf_lock);
    }

    while (r)
    {
        if (r->mem_handle[hca_num] != NULL
            && ibv_dereg_mr(r->mem_handle[hca_num]))
        {
            ibv_error_abort(IBV_RETURN_ERR, "could not deregister MR");
        }

        DEBUG_PRINT("deregister vbufs\n");
        r = r->next;
    }

#if !defined(CKPT)
    if (MPIDI_CH3I_RDMA_Process.has_srq
#if defined(RDMA_CM)
        || MPIDI_CH3I_RDMA_Process.use_rdma_cm_on_demand
#endif /* defined(RDMA_CM) */
        || MPIDI_CH3I_Process.cm_type == MPIDI_CH3I_CM_ON_DEMAND)
#endif /* !defined(CKPT) */
    {
         pthread_spin_unlock(&vbuf_lock);
    }
}
Ejemplo n.º 21
0
codec_buffer_t *codec_async_mem_fetch_memory(void)
{
	codec_buffer_t *ret;

	sem_wait(&_sem_empty);

	pthread_spin_lock(&_mem_spin);

	ret = _mem_head;

	if (_mem_head == &(mem[_codec_mem_pool_size-1])) {
		_mem_head = &(mem[0]);
	}
	else {
		_mem_head++;
	}

	_mem_count--;

	pthread_spin_unlock(&_mem_spin);

	return ret;
}
Ejemplo n.º 22
0
/* Makes sure there are some request threads for sock operations, and starts
   a server if necessary.  This routine should be called *after* creating the
   port(s) which need server, as the server routine only operates while there
   are any ports.  */
void
ensure_sock_server ()
{
  pthread_t thread;
  error_t err;

  pthread_spin_lock (&sock_server_active_lock);
  if (sock_server_active)
    pthread_spin_unlock (&sock_server_active_lock);
  else
    {
      sock_server_active = 1;
      pthread_spin_unlock (&sock_server_active_lock);
      err = pthread_create (&thread, NULL, handle_sock_requests, NULL);
      if (!err)
	pthread_detach (thread);
      else
	{
	  errno = err;
	  perror ("pthread_create");
	}
    }
}
Ejemplo n.º 23
0
int32_t msgqueue_push( struct msgqueue * self, struct task * task, uint8_t isnotify  )
{
    int32_t rc = -1;
    uint32_t isbc = 0;

    pthread_spin_lock( &self->lock );

    rc = queue_push( self->queue, task );
    if ( isnotify != 0 )
    {
		isbc = queue_count( self->queue );
    }
    
    pthread_spin_unlock( &self->lock );

    if ( rc == 0 && isbc == 1 )
    {
        char buf[1] = {0};
        write( self->pushfd, buf, 1 );
    }

    return rc;
}
Ejemplo n.º 24
0
psm2_error_t
ips_tid_release(struct ips_tid *tidc,
		uint32_t *tid_array, uint32_t tidcnt)
{
	struct ips_tid_ctrl *ctrl = tidc->tid_ctrl;
	psm2_error_t err = PSM2_OK;

	psmi_assert(tidcnt > 0);
	if (tidc->context->tid_ctrl)
		pthread_spin_lock(&ctrl->tid_ctrl_lock);

	if (hfi_free_tid(tidc->context->ctrl,
		    (uint64_t) (uintptr_t) tid_array, tidcnt) < 0) {
		if (tidc->context->tid_ctrl)
			pthread_spin_unlock(&ctrl->tid_ctrl_lock);

		/* If failed to unpin pages, it's fatal error */
		err = psmi_handle_error(tidc->context->ep,
			PSM2_EP_DEVICE_FAILURE,
			"Failed to tid free %d tids",
			tidcnt);
		goto fail;
	}

	ctrl->tid_num_avail += tidcnt;
	if (tidc->context->tid_ctrl)
		pthread_spin_unlock(&ctrl->tid_ctrl_lock);

	tidc->tid_num_inuse -= tidcnt;
	/* If an available callback is registered invoke it */
	if (((tidc->tid_num_inuse + tidcnt) == ctrl->tid_num_max)
	    && tidc->tid_avail_cb)
		tidc->tid_avail_cb(tidc, tidc->tid_avail_context);

fail:
	return err;
}
Ejemplo n.º 25
0
static void
usdf_dom_rdc_free_data(struct usdf_domain *udp)
{
	struct usdf_rdm_connection *rdc;
	int i;

	if (udp->dom_rdc_hashtab != NULL) {

		pthread_spin_lock(&udp->dom_progress_lock);
		for (i = 0; i < USDF_RDM_HASH_SIZE; ++i) {
			rdc = udp->dom_rdc_hashtab[i];
			while (rdc != NULL) {
				usdf_timer_reset(udp->dom_fabric,
						rdc->dc_timer, 0);
				rdc = rdc->dc_hash_next;
			}
		}
		pthread_spin_unlock(&udp->dom_progress_lock);

		/* XXX probably want a timeout here... */
		while (ofi_atomic_get32(&udp->dom_rdc_free_cnt) <
		       (int)udp->dom_rdc_total) {
			pthread_yield();
		}

		free(udp->dom_rdc_hashtab);
		udp->dom_rdc_hashtab = NULL;
	}

	while (!SLIST_EMPTY(&udp->dom_rdc_free)) {
		rdc = SLIST_FIRST(&udp->dom_rdc_free);
		SLIST_REMOVE_HEAD(&udp->dom_rdc_free, dc_addr_link);
		usdf_timer_free(udp->dom_fabric, rdc->dc_timer);
		free(rdc);
	}
}
Ejemplo n.º 26
0
/* Get the inode V_INODE belonging to inode number INODE.
   Returns 0 if this inode number is free.  */
inode_t
vi_lookup(ino_t inode)
{
  struct table_page *table = inode_table;
  /* See above for rationale of decrement. */
  int page = (inode - 1) >> LOG2_TABLE_PAGE_SIZE;
  int offset = (inode - 1) & (TABLE_PAGE_SIZE - 1);
  inode_t v_inode = 0;

  pthread_spin_lock (&inode_table_lock);

  while (table && page > 0)
    {
      page--;
      table = table->next;
    }

  if (table)
    v_inode = &table->vi[offset];

  pthread_spin_unlock (&inode_table_lock);

  return v_inode;
}
Ejemplo n.º 27
0
static void overlord_apply_deferred_rules(struct zsession *sess)
{
    if (utarray_len(&sess->client->deferred_rules)) {
        struct zcrules parsed_rules;
        uint64_t curr_clock = zclock(false);

        pthread_spin_lock(&sess->client->lock);

        crules_init(&parsed_rules);

        while (utarray_back(&sess->client->deferred_rules)) {
            struct zrule_deferred *rule =
                    *(struct zrule_deferred **) utarray_back(&sess->client->deferred_rules);

            if (rule->when > curr_clock) {
                break;
            }

            if (0 != crules_parse(&parsed_rules, rule->rule)) {
                zero_syslog(LOG_INFO, "Failed to parse deferred rule '%s' for client %s",
                            rule->rule, ipv4_to_str(htonl(sess->ip)));
            } else {
                zero_syslog(LOG_INFO, "Applying deferred rule '%s' for client %s",
                            rule->rule, ipv4_to_str(htonl(sess->ip)));
            }

            free(rule->rule);
            free(rule);
            utarray_pop_back(&sess->client->deferred_rules);
        }

        pthread_spin_unlock(&sess->client->lock);
        client_apply_rules(sess->client, &parsed_rules);
        crules_free(&parsed_rules);
    }
}
Ejemplo n.º 28
0
/*
 * Create MQ Handle
 * (*func) should defined - callback function for pick I/O queue from MQ
 *							Argument - Handle, ops, offset, len
 */
NVMED_HANDLE* nvmed_handle_create_mq(NVMED_QUEUE** nvmed_queue, int num_mq, int flags,
		NVMED_QUEUE* (*func)(NVMED_HANDLE*, u8, unsigned long, unsigned int)) {
	NVMED_HANDLE* nvmed_handle;
	int i;

	if(func == NULL)
		return NULL;

	nvmed_handle = nvmed_handle_create(nvmed_queue[0], flags);
	if(nvmed_handle != NULL) {
		for(i=1; i<num_mq; i++) {
			pthread_spin_lock(&nvmed_queue[i]->mngt_lock);
			nvmed_queue[i]->numHandle++;
			pthread_spin_unlock(&nvmed_queue[i]->mngt_lock);
		}
		nvmed_handle->queue_mq = nvmed_queue;
		nvmed_handle->num_mq = num_mq;
		nvmed_handle->flags |= HANDLE_MQ;
		nvmed_handle->mq_get_queue = func;
	}
	else return NULL;

	return nvmed_handle;
}
Ejemplo n.º 29
0
static void *
my_realloc_hook (void *ptr, size_t size, const void *caller)
{
    pthread_spin_lock(&lock);
    void *result;
    /* Restore all old hooks */
    __malloc_hook = old_malloc_hook;
    __realloc_hook = old_realloc_hook;
    __free_hook = old_free_hook;
    /* Call recursively */
    result = realloc (ptr, size);
    /* Save underlying hooks */
    old_malloc_hook = __malloc_hook;
    old_realloc_hook = __realloc_hook;
    old_free_hook = __free_hook;
    /* printf might call malloc, so protect it too. */
    printf ("realloc (%p, %u) returns %p\n", ptr, (unsigned int) size, result);
    /* Restore our own hooks */
    __malloc_hook = my_malloc_hook;
    __realloc_hook = my_realloc_hook;
    __free_hook = my_free_hook;
    pthread_spin_unlock(&lock);
    return result;
}
Ejemplo n.º 30
0
void* conn_pool_get(conn_pool_t* pool)
{
	void* conn = NULL;
	conn_cb_t* cbs =pool->cbs;
	int islocked = (pthread_spin_lock(&pool->spin)==0);

	int null_count = 0;
	while(!cpool_is_empty(pool)){			
		if(pool->conns[pool->start] == NULL){
			pool->start = (pool->start+1)%pool->size;
			if(++null_count >= pool->size){//表示所有的链接都是空的。
				printf("############## conn_poo_get pool is empty ##############\n");
				pool->curconns = 0;
				break;
			}
			continue;
		}else{
			conn = pool->conns[pool->start];
			pool->conns[pool->start] = NULL;
			sync_dec(&pool->curconns);
			pool->start = (pool->start+1)%pool->size;
			if(conn != NULL)uint64_inc(&pool->statis.get);
			break;
		}
	}
	
	if(islocked)pthread_spin_unlock(&pool->spin);
	
	if(conn==NULL){
		conn = new_and_connect(cbs, pool->args, &pool->statis);
		if(conn != NULL)uint64_inc(&pool->statis.get_real);
	}
	
	return conn;
	
}