Пример #1
0
int pagefault_handler(int pid, int pageNo, char type) {
	struct Node *head = getHead();
	if (allocated < NUM_FRAME) {
		frame_table[allocated].page = pageNo;
		pageTable.entry[pid][pageNo].valid = true;
		int ret = list_insert_tail(head, allocated);
		allocated++;
		return allocated;
	} else {
		int ret = page_replacement(allocated);
		list_insert_tail(head, allocated);
		frame_table[allocated].page = pageNo;
		pageTable.entry[pid][pageNo].valid = true;
		int victimPid = frame_table[ret].pid;
		int victimPageNo = frame_table[ret].page;
		pageTable.entry[victimPid][victimPageNo].valid = false;
		if (pageTable.entry[victimPid][victimPageNo].dirty) {
			disk_write(allocated, pid, pageNo);
			pageTable.entry[victimPid][victimPageNo].dirty = false;
		}
		clockHelper(allocated);
		allocated++;
		return allocated;
	}
}
Пример #2
0
Файл: split.c Проект: ysei/blib
int split(char *buf, char *sep,list_t **flds)
{
    int		rval, eol;
    char        *sp, qchar, *fldptr, *fld;
    entry_t	*ent;

    qchar ='\0';
    sp = buf;
    eol= FALSE;
    rval = SPLIT_OK;
    fldptr = sp;
    while ( !eol ) {
        switch(*sp) {
        case '\"':
            if ( qchar == '\"' ) { // this must be a closing quote
                qchar = '\0'; // stop quote
            } else { // not already in a quote
                qchar = '\"';	 // set quote char
            }
            break;

        case '\'':
            if ( qchar == '\"' ) { // this must be a closing quote
                qchar = '\0'; /* no longer quote */
            } else { // not already in a quote
                qchar = '\"';	 // set quote char
            }
            break;

        case '\n':
        case '\r':
            *sp='\0'; /* null it and fall through */
        case '\0':
            if ( qchar != '\0' ) { // have a start of quoted text but found eol before closing
                rval=SPLIT_MISSQUOT;
            }
            fld = newfld(fldptr);
            ent = new_entry(fld);
            list_insert_tail(flds, ent);
            eol = TRUE; // and we done
            break;

        default: /* anything else must be start of non quoted field */
            if ( qchar == '\0' ) { // if not quoting then we can look for sep
                if (index(sep,*sp) != (char *) NULL ) {
                    *sp='\0';	// null terminate it as a C String
                    fld = newfld(fldptr);
                    ent = new_entry(fld);
                    list_insert_tail(flds, ent);

                    fldptr = (sp+1); // set a point to the start
                }
            }
            break;
        }
        sp++; /* next char */
    }
    return(rval);
}
Пример #3
0
/*
 * Add a FUID node to the list of fuid's being created for this
 * ACL
 *
 * If ACL has multiple domains, then keep only one copy of each unique
 * domain.
 */
void
zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
    uint64_t idx, uint64_t id, zfs_fuid_type_t type)
{
	zfs_fuid_t *fuid;
	zfs_fuid_domain_t *fuid_domain;
	zfs_fuid_info_t *fuidp;
	uint64_t fuididx;
	boolean_t found = B_FALSE;

	if (*fuidpp == NULL)
		*fuidpp = zfs_fuid_info_alloc();

	fuidp = *fuidpp;
	/*
	 * First find fuid domain index in linked list
	 *
	 * If one isn't found then create an entry.
	 */

	for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains);
	    fuid_domain; fuid_domain = list_next(&fuidp->z_domains,
	    fuid_domain), fuididx++) {
		if (idx == fuid_domain->z_domidx) {
			found = B_TRUE;
			break;
		}
	}

	if (!found) {
		fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP);
		fuid_domain->z_domain = domain;
		fuid_domain->z_domidx = idx;
		list_insert_tail(&fuidp->z_domains, fuid_domain);
		fuidp->z_domain_str_sz += strlen(domain) + 1;
		fuidp->z_domain_cnt++;
	}

	if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) {

		/*
		 * Now allocate fuid entry and add it on the end of the list
		 */

		fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
		fuid->z_id = id;
		fuid->z_domidx = idx;
		fuid->z_logfuid = FUID_ENCODE(fuididx, rid);

		list_insert_tail(&fuidp->z_fuids, fuid);
		fuidp->z_fuid_cnt++;
	} else {
		if (type == ZFS_OWNER)
			fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid);
		else
			fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid);
	}
}
Пример #4
0
void
rdsv3_queue_work(rdsv3_workqueue_struct_t *wq, rdsv3_work_t *wp)
{
	RDSV3_DPRINTF4("rdsv3_queue_work", "Enter(wq: %p, wp: %p)", wq, wp);

	mutex_enter(&wq->wq_lock);

	if (list_link_active(&wp->work_item)) {
		/* This is already in the queue, ignore this call */
		mutex_exit(&wq->wq_lock);
		RDSV3_DPRINTF3("rdsv3_queue_work", "already queued: %p", wp);
		return;
	}

	switch (wq->wq_state) {
	case RDSV3_WQ_THREAD_RUNNING:
		list_insert_tail(&wq->wq_queue, wp);
		mutex_exit(&wq->wq_lock);
		break;

	case RDSV3_WQ_THREAD_FLUSHING:
		do {
			mutex_exit(&wq->wq_lock);
			delay(drv_usectohz(1000000));
			mutex_enter(&wq->wq_lock);
		} while (wq->wq_state == RDSV3_WQ_THREAD_FLUSHING);

		if (wq->wq_state == RDSV3_WQ_THREAD_RUNNING) {
			list_insert_tail(&wq->wq_queue, wp);
			mutex_exit(&wq->wq_lock);
			break;
		}
		/* FALLTHRU */

	case RDSV3_WQ_THREAD_IDLE:
		list_insert_tail(&wq->wq_queue, wp);
		wq->wq_state = RDSV3_WQ_THREAD_RUNNING;
		mutex_exit(&wq->wq_lock);

		(void) ddi_taskq_dispatch(rdsv3_taskq, rdsv3_worker_thread, wq,
		    DDI_SLEEP);
		break;

	case RDSV3_WQ_THREAD_EXITING:
		mutex_exit(&wq->wq_lock);
		break;
	}

	RDSV3_DPRINTF4("rdsv3_queue_work", "Return(wq: %p, wp: %p)", wq, wp);
}
Пример #5
0
int main()
{
		/*Driver code to test the implementation*/
		struct Node *head = NULL; // empty list. set head as NULL. 

		// Calling an Insert and printing list both in forward as well as reverse direction. 
		head = list_insert_head(head, 2); list_print(head);
		head = list_insert_head(head, 4); list_print(head);
		head = list_insert_tail(head, 6); list_print(head);
		head = list_insert_tail(head, 8); list_print(head);
		head = list_remove(head, 6); list_print(head);
		head = list_remove(head, 8); list_print(head);

}
Пример #6
0
void task_queue_insert(int type, struct task_st *task)
{
    switch (type)
    {
        case WAITING_QUEUE:
        list_insert_tail(g_global.waiting_list, task);
        break;
        case RUNNING_QUEUE:
        list_insert_tail(g_global.running_list, task);
        break;
        default:
        LogAbort("no this queue type.");
        break;
    }
}
Пример #7
0
void HCI_recv_packet(unsigned char* packet_buffer, unsigned int packet_length) {
  tHciDataPacket * hciReadPacket = NULL;

  if (!list_is_empty ((tListNode*)&hciReadPktPool)){
      
    if(packet_length > 0) {
      /* enqueueing a packet for read */
      list_remove_head ((tListNode*)&hciReadPktPool, (tListNode **)&hciReadPacket);
      
      Osal_MemCpy(hciReadPacket->dataBuff, packet_buffer, MIN(HCI_READ_PACKET_SIZE, packet_length));
    
      hciReadPacket->data_len = packet_length;
      switch(HCI_verify(hciReadPacket)) {
        case 0:
          list_insert_tail((tListNode*)&hciReadPktRxQueue, (tListNode *)hciReadPacket);
          break;

        default:
        case 1:
        case 2:
          list_insert_head((tListNode*)&hciReadPktPool, (tListNode *)hciReadPacket);
          break;
      }
    }
  }
  else{
    // HCI Read Packet Pool is empty, wait for a free packet.
    readPacketListFull = TRUE;
    return;
  }

  // process incoming packet

  // don't process when hci_send_req is undergoing
  if (hciAwaitReply) {
    return;
  }

  /* process any pending events read */
  while(!list_is_empty((tListNode*)&hciReadPktRxQueue))
  {
    list_remove_head ((tListNode*)&hciReadPktRxQueue, (tListNode **)&hciReadPacket);
    //Enable_SPI_IRQ();
    HCI_Event_CB(hciReadPacket->dataBuff);
    //Disable_SPI_IRQ();
    list_insert_tail((tListNode*)&hciReadPktPool, (tListNode *)hciReadPacket);
  }
}
Пример #8
0
u32 enqueue_evt(struct evt_priv *pevtpriv, struct evt_obj *obj)
{
	_irqL irqL;
	int	res;
	_queue *queue = &(pevtpriv->evt_queue);
	
_func_enter_;	

	res = _SUCCESS; 		

	if(obj == NULL)
	{
		res = _FAIL;
		goto exit;
	}	

	_enter_critical_ex(&(queue->lock), &irqL);

	list_insert_tail(&(obj->list),&(queue->queue));

	_exit_critical_ex(&(queue->lock), &irqL);
	

	//evt_notify_isr(pevtpriv);

exit:
	
_func_exit_;		

	return res;
	
}
Пример #9
0
/*
  Initialize a malloc_state struct.

  This is called only from within __malloc_consolidate, which needs
  be called in the same contexts anyway.  It is never called directly
  outside of __malloc_consolidate because some optimizing compilers try
  to inline it at all call points, which turns out not to be an
  optimization at all. (Inlining it in __malloc_consolidate is fine though.)
*/
static void malloc_init_state(mstate av)
{
    int     i;
    mbinptr bin;

    /* Establish circular links for normal bins */
    for (i = 1; i < NBINS; ++i) {
	bin = bin_at(av,i);
	bin->fd = bin->bk = bin;
    }

    av->top_pad        = DEFAULT_TOP_PAD;
    av->n_mmaps_max    = DEFAULT_MMAP_MAX;
    av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
    av->trim_threshold = DEFAULT_TRIM_THRESHOLD;

#if MORECORE_CONTIGUOUS
    set_contiguous(av);
#else
    set_noncontiguous(av);
#endif


    set_max_fast(av, DEFAULT_MXFAST);

    //av->top            = initial_top(av);
    init_linked_list(&(av->ustate_list));     //init ustate list
    av->pagesize       = malloc_getpagesize;
    
    // add new mstate to the mstate list
    list_insert_tail(&(get_abstate()->mstate_list), (void *)av);
}
Пример #10
0
/*
 * Open-context function to add one entry to the new mapping.  The new
 * entry will be remembered and written from syncing context.
 */
static void
spa_condense_indirect_commit_entry(spa_t *spa,
    vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
{
	spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;

	ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));

	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
	dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
	int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;

	/*
	 * If we are the first entry committed this txg, kick off the sync
	 * task to write to the MOS on our behalf.
	 */
	if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
		dsl_sync_task_nowait(dmu_tx_pool(tx),
		    spa_condense_indirect_commit_sync, sci,
		    0, ZFS_SPACE_CHECK_NONE, tx);
	}

	vdev_indirect_mapping_entry_t *vime =
	    kmem_alloc(sizeof (*vime), KM_SLEEP);
	vime->vime_mapping = *vimep;
	vime->vime_obsolete_count = count;
	list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);

	dmu_tx_commit(tx);
}
Пример #11
0
sint	enqueue_recvframe(union recv_frame *precvframe, _queue *queue)
{	
       _irqL irqL;
	_adapter *padapter=precvframe->u.hdr.adapter;
	struct recv_priv *precvpriv = &padapter->recvpriv;
	
_func_enter_;


	//_spinlock(&pfree_recv_queue->lock);
	 _enter_critical(&queue->lock, &irqL);

	//_init_listhead(&(precvframe->u.hdr.list));
	list_delete(&(precvframe->u.hdr.list));
	
	
	list_insert_tail(&(precvframe->u.hdr.list), get_list_head(queue));

	if(padapter !=NULL){			
			if(queue == &precvpriv->free_recv_queue)
				precvpriv->free_recvframe_cnt++;
	}

	//_spinunlock(&pfree_recv_queue->lock);
	 _exit_critical(&queue->lock, &irqL);
		

_func_exit_;	

	return _SUCCESS;
}
Пример #12
0
void
vdev_queue_init(vdev_t *vd)
{
	vdev_queue_t *vq = &vd->vdev_queue;
	int i;

	mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);

	avl_create(&vq->vq_deadline_tree, vdev_queue_deadline_compare,
	    sizeof (zio_t), offsetof(struct zio, io_deadline_node));

	avl_create(&vq->vq_read_tree, vdev_queue_offset_compare,
	    sizeof (zio_t), offsetof(struct zio, io_offset_node));

	avl_create(&vq->vq_write_tree, vdev_queue_offset_compare,
	    sizeof (zio_t), offsetof(struct zio, io_offset_node));

	avl_create(&vq->vq_pending_tree, vdev_queue_offset_compare,
	    sizeof (zio_t), offsetof(struct zio, io_offset_node));

	/*
	 * A list of buffers which can be used for aggregate I/O, this
	 * avoids the need to allocate them on demand when memory is low.
	 */
	list_create(&vq->vq_io_list, sizeof (vdev_io_t),
	    offsetof(vdev_io_t, vi_node));

	for (i = 0; i < zfs_vdev_max_pending; i++)
		list_insert_tail(&vq->vq_io_list, zio_vdev_alloc());
}
Пример #13
0
void list_move_tail (list_node *list, list_node *node)
{
	MESSAGE_DEBUG("list:%p node:%p\n", list, node);
	list_delete (node);
	list_insert_tail (list, node);
//	list_dump(list);
}
Пример #14
0
/**
 * Tworzy kopię segmentu.
 * @param dst deskryptor segmentu docelowego.
 * @param space przestrzeń adresowa segmentu docelowego.
 * @param src segment źródłowy.
 */
int
vm_seg_clone(vm_seg_t *dst, vm_space_t *space, vm_seg_t *src)
{
    vm_seg_create(dst, space, src->base, src->size, src->limit,
        src->prot, src->flags);
    vm_region_t *reg = NULL;
    vm_region_t *clonereg;
//     TRACE_IN("dst=%p space=%p src=%p", dst, space, src);
    while ( (reg = list_next(&src->regions, reg)) ) {
        clonereg = vm_lpool_alloc(&vm_unused_regions);
        clonereg->begin = reg->begin;
        clonereg->size = reg->size;
        clonereg->end = reg->end;
        clonereg->segment = dst;
        list_insert_tail(&dst->regions, clonereg);
//         TRACE_IN("%p-%p", clonereg->begin, clonereg->end);
        vm_pmap_fill(&space->pmap, clonereg->begin, clonereg->size,
            dst->prot);

        vm_addr_t SRC,DST;
        vm_segmap(dst, clonereg->begin, clonereg->size, &DST);
        vm_segmap(src, reg->begin, reg->size, &SRC);
        mem_cpy((void*)DST, (void*)SRC, reg->size);
        vm_unmap(DST, reg->size);
        vm_unmap(SRC, reg->size);
    }
#if 0
    TRACE_IN("present %u %u",
        vm_pmap_is_avail(&dst->space->pmap, 0xbfffff00),
        vm_pmap_is_avail(&src->space->pmap, 0xbfffff00)
    );
#endif
    return 0;
}
Пример #15
0
u32	enqueue_cmd_ex(struct cmd_priv *pcmdpriv, struct cmd_obj *obj)
{
	_irqL irqL;
	_queue *queue;

_func_enter_;

	if (obj == NULL)
		goto exit;


	if(pcmdpriv->padapter->eeprompriv.bautoload_fail_flag==_TRUE)
	{		
		return _FAIL;
	}

	queue = &(pcmdpriv->cmd_queue);
	
	_enter_critical(&(queue->lock), &irqL);

	list_insert_tail(&(obj->list), &(queue->queue));

	_exit_critical(&(queue->lock), &irqL);

	_up_sema(&pcmdpriv->cmd_queue_sema);
	
exit:	
	
_func_exit_;

	return _SUCCESS;

}
Пример #16
0
void error_add_message(char *msg)
{
	if (!error_wnd) init_error();
	if (error_wnd)
	{
		struct error_node *enode = (struct error_node*)malloc(sizeof(struct error_node));
		if (enode)
		{
			if ((enode->text = mystrdup(msg)))
			{
				static char error_label[32];

				set(text_list, MUIA_NList_Quiet, TRUE);
				DoMethod(text_list, MUIM_NList_Clear);
				DoMethod(text_list, MUIM_NList_InsertSingleWrap, (ULONG)enode->text, MUIV_NList_Insert_Bottom, WRAPCOL0, ALIGN_LEFT);
				set(text_list, MUIA_NList_Quiet, FALSE);

				list_insert_tail(&error_list, &enode->node);

				sprintf(error_label, "Error %%ld/%d",list_length(&error_list));

				SetAttrs(error_numeric,
						MUIA_Numeric_Min, 1,
						MUIA_Numeric_Max, list_length(&error_list),
						MUIA_Numeric_Value, list_length(&error_list),
						MUIA_Numeric_Format, error_label,
						TAG_DONE);
			} else free(enode);
		}

		set(error_wnd, MUIA_Window_Open, TRUE);
	}
}
Пример #17
0
void uthread_init(){
  // Initialize the thread queue
  list_init(&thread_queue, thread_compare_pri, thread_data_delete);

  // Initialize the handler thread
  getcontext(&handler);
  handler.uc_stack.ss_sp = &handler_stack;
  handler.uc_stack.ss_size = sizeof(handler_stack);
  handler.uc_link = NULL;
  makecontext(&handler, uthread_handler, 0);

  // Create a new thread to represent the calling process
  thread_t *new_thread = malloc( sizeof(thread_t) );

  // We initialize the calling thread as a zero priority
  thread_id = 0;
  thread_init(new_thread, 0, thread_id);
  thread_id++;
  
  // Insert the new thread into the thread queue
  list_insert_tail(&thread_queue, new_thread);

  // Now that we're done with setup we should gather the current context
  // into our thread's ucp.
  ucontext_t *new_context = &new_thread->ucp;
  getcontext(new_context);
};
Пример #18
0
Файл: vg.c Проект: nurh/copterfs
struct objstore *objstore_vg_create(const char *name,
                                    enum objstore_vg_type type)
{
    struct objstore *vg;

    if (type != OS_VG_SIMPLE)
        return ERR_PTR(EINVAL);

    vg = umem_cache_alloc(vg_cache, 0);
    if (!vg)
        return ERR_PTR(ENOMEM);

    vg->name = strdup(name);
    if (!vg->name) {
        umem_cache_free(vg_cache, vg);
        return ERR_PTR(ENOMEM);
    }

    list_create(&vg->vols, sizeof(struct objstore_vol),
                offsetof(struct objstore_vol, vg_list));

    mxinit(&vg->lock);

    mxlock(&vgs_lock);
    list_insert_tail(&vgs, vg);
    mxunlock(&vgs_lock);

    return vg;
}
Пример #19
0
void rr_wait(sched_queue_t *queue) {
  list_elem_t *head;
  thread_info_t *worker;

  /* Get head of queue */
  if(!pthread_mutex_lock(queue->access_mutex)) {
    head = list_get_head(queue->list);
    if(head) {
      worker = (thread_info_t*) head->datum;
      list_remove_elem(queue->list, head);
      list_insert_tail(queue->list, head);
    } else {
      /* Queue is empty. Just leave then. */
      pthread_mutex_unlock(queue->access_mutex);
      return;
    }
    pthread_mutex_unlock(queue->access_mutex);
  } else {
    /* Handle queue access lock failure */
  }

  /* Block until worker has finished */
  if(!pthread_mutex_lock(worker->yield_cpu)) {
    /* Error handling */
  }
}
Пример #20
0
/*
 * Print these messages by running:
 * echo ::zfs_dbgmsg | mdb -k
 *
 * Monitor these messages by running:
 * 	dtrace -q -n 'zfs-dbgmsg{printf("%s\n", stringof(arg0))}'
 */
void
zfs_dbgmsg(const char *fmt, ...)
{
	int size;
	va_list adx;
	zfs_dbgmsg_t *zdm;

	va_start(adx, fmt);
	size = vsnprintf(NULL, 0, fmt, adx);
	va_end(adx);

	/*
	 * There is one byte of string in sizeof (zfs_dbgmsg_t), used
	 * for the terminating null.
	 */
	zdm = kmem_alloc(sizeof (zfs_dbgmsg_t) + size, KM_SLEEP);
	zdm->zdm_timestamp = gethrestime_sec();

	va_start(adx, fmt);
	(void) vsnprintf(zdm->zdm_msg, size + 1, fmt, adx);
	va_end(adx);

	DTRACE_PROBE1(zfs__dbgmsg, char *, zdm->zdm_msg);

	mutex_enter(&zfs_dbgmsgs_lock);
	list_insert_tail(&zfs_dbgmsgs, zdm);
	zfs_dbgmsg_size += sizeof (zfs_dbgmsg_t) + size;
	while (zfs_dbgmsg_size > zfs_dbgmsg_maxsize) {
		zdm = list_remove_head(&zfs_dbgmsgs);
		size = sizeof (zfs_dbgmsg_t) + strlen(zdm->zdm_msg);
		kmem_free(zdm, size);
		zfs_dbgmsg_size -= size;
	}
	mutex_exit(&zfs_dbgmsgs_lock);
}
Пример #21
0
/*
 * load zfs_fuid_t's and fuid_domains into fuid_info_t
 */
static zfs_fuid_info_t *
zfs_replay_fuids(void *start, void **end, int idcnt, int domcnt, uint64_t uid,
    uint64_t gid)
{
	uint64_t *log_fuid = (uint64_t *)start;
	zfs_fuid_info_t *fuid_infop;
	int i;

	fuid_infop = zfs_fuid_info_alloc();
	fuid_infop->z_domain_cnt = domcnt;

	fuid_infop->z_domain_table =
	    kmem_zalloc(domcnt * sizeof (char **), KM_SLEEP);

	for (i = 0; i != idcnt; i++) {
		zfs_fuid_t *zfuid;

		zfuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
		zfuid->z_logfuid = *log_fuid;
		zfuid->z_id = -1;
		zfuid->z_domidx = 0;
		list_insert_tail(&fuid_infop->z_fuids, zfuid);
		log_fuid++;
	}

	zfs_replay_fuid_ugid(fuid_infop, uid, gid);

	*end = zfs_replay_fuid_domain_common(fuid_infop, log_fuid, domcnt);
	return (fuid_infop);
}
Пример #22
0
/* list_insert_tail: create a new node at the end of the linked list
   Parameters
   - node*: a pointer to the head of a linked list
   - char*: data to be stored in the new node
   Return: a pointer to the head of the linked list */
node* list_insert_tail(node* list, char* input){

    // Best case:
    // List is empty and we just add the node
    if(list == NULL){
        //  Allocate memory base on the size of the data structure
        node *list = malloc(sizeof(struct s_node));

        list->data = strdup(input);

        list->next = NULL;

        return list;
    }

    // But we could be at the last node
    if(list->next == NULL){

        // Allocate memory for the new node to be attached to the list
        node *newNode = malloc(sizeof(struct s_node));

        // Link the new node to the tail of the list
        list->next = newNode;

        newNode->data = strdup(input);

        return list;
    }

    // If we are at a random node advance until the end
    list->next = list_insert_tail(list->next, input);
    return list;
}
Пример #23
0
static dmu_tx_hold_t *
dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
    uint64_t arg1, uint64_t arg2)
{
	dmu_tx_hold_t *txh;

	if (dn != NULL) {
		(void) refcount_add(&dn->dn_holds, tx);
		if (tx->tx_txg != 0) {
			mutex_enter(&dn->dn_mtx);
			/*
			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
			 * problem, but there's no way for it to happen (for
			 * now, at least).
			 */
			ASSERT(dn->dn_assigned_txg == 0);
			dn->dn_assigned_txg = tx->tx_txg;
			(void) refcount_add(&dn->dn_tx_holds, tx);
			mutex_exit(&dn->dn_mtx);
		}
	}

	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
	txh->txh_tx = tx;
	txh->txh_dnode = dn;
	refcount_create(&txh->txh_space_towrite);
	refcount_create(&txh->txh_memory_tohold);
	txh->txh_type = type;
	txh->txh_arg1 = arg1;
	txh->txh_arg2 = arg2;
	list_insert_tail(&tx->tx_holds, txh);

	return (txh);
}
u32 _r8712_init_sta_priv(struct	sta_priv *pstapriv)
{
	struct sta_info *psta;
	s32 i;

	pstapriv->pallocated_stainfo_buf = _malloc(sizeof(struct sta_info) *
						   NUM_STA + 4);
	if (pstapriv->pallocated_stainfo_buf == NULL)
		return _FAIL;
	pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 -
		((addr_t)(pstapriv->pallocated_stainfo_buf) & 3);
	_init_queue(&pstapriv->free_sta_queue);
	spin_lock_init(&pstapriv->sta_hash_lock);
	pstapriv->asoc_sta_count = 0;
	_init_queue(&pstapriv->sleep_q);
	_init_queue(&pstapriv->wakeup_q);
	psta = (struct sta_info *)(pstapriv->pstainfo_buf);
	for (i = 0; i < NUM_STA; i++) {
		_init_stainfo(psta);
		_init_listhead(&(pstapriv->sta_hash[i]));
		list_insert_tail(&psta->list,
				 get_list_head(&pstapriv->free_sta_queue));
		psta++;
	}
	_init_listhead(&pstapriv->asoc_list);
	_init_listhead(&pstapriv->auth_list);
	return _SUCCESS;
}
Пример #25
0
int http_client_pool_init(struct http_client_pool *http_client_pool, size_t initial, size_t grow) {
    LOGGER_INFO("http client pool: initial=%zu, grow=%zu", initial, grow);
    if (0 > ctx_pool_init(&http_client_pool->ctx_pool, initial, grow, CLIENT_STACK_SIZE, sizeof(struct http_client_context)))
        return -1;

    /* Global to all clients */
    if (!client_chains) {
        struct rlimit rlim;
        if (0 > getrlimit(RLIMIT_NOFILE, &rlim))
            return LOGGER_PERROR("getrlimit(RLIMIT_NOFILE)"), -1;

        client_chains = calloc(rlim.rlim_cur, sizeof(struct list));
        if (!client_chains)
            return LOGGER_PERROR("calloc client_chains"), -1;

        /* storage for multiple client chains */
        client_heads = calloc(rlim.rlim_cur, sizeof(struct list));
        struct list *tmp = client_heads, *tmp_end = tmp + rlim.rlim_cur;
        if (!client_heads)
            return LOGGER_PERROR("calloc client_heads"), -1;
        for (; tmp != tmp_end; ++tmp)
            list_insert_tail(&free_list, tmp);

        idle_ctx = ribs_context_create(SMALL_STACK_SIZE, http_client_idle_handler);

        hashtable_init(&ht_persistent_clients, rlim.rlim_cur);
    }
    return timeout_handler_init(&http_client_pool->timeout_handler);
}
Пример #26
0
static void
dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx)
{
	dnode_t *dn;

	while (dn = list_head(list)) {
		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
		ASSERT(dn->dn_dbuf->db_data_pending);
		/*
		 * Initialize dn_zio outside dnode_sync() because the
		 * meta-dnode needs to set it ouside dnode_sync().
		 */
		dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
		ASSERT(dn->dn_zio);

		ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
		list_remove(list, dn);

		if (newlist) {
			(void) dnode_add_ref(dn, newlist);
			list_insert_tail(newlist, dn);
		}

		dnode_sync(dn, tx);
	}
}
Пример #27
0
void HCI_Process(void)
{
  uint8_t data_len;
  uint8_t buffer[HCI_READ_PACKET_SIZE];
  tHciDataPacket * hciReadPacket = NULL;
  
  Disable_SPI_IRQ();
  uint8_t list_empty = list_is_empty(&hciReadPktRxQueue);        
  /* process any pending events read */
  while(list_empty == FALSE)
  {
    list_remove_head (&hciReadPktRxQueue, (tListNode **)&hciReadPacket);
    Enable_SPI_IRQ();
    HCI_Event_CB(hciReadPacket->dataBuff);
    Disable_SPI_IRQ();
    list_insert_tail(&hciReadPktPool, (tListNode *)hciReadPacket);
    list_empty = list_is_empty(&hciReadPktRxQueue);
  }
  if (readPacketListFull) {
    while(BlueNRG_DataPresent()) {
      data_len = BlueNRG_SPI_Read_All(&SpiHandle, buffer, HCI_READ_PACKET_SIZE);
      if(data_len > 0)
        HCI_Event_CB(buffer);
    }
    readPacketListFull = FALSE;
  }
  
  Enable_SPI_IRQ();    
}
Пример #28
0
/*
 * Add a callback to be invoked when the calling process exits.
 */
int
zfs_onexit_add_cb(minor_t minor, void (*func)(void *), void *data,
    uint64_t *action_handle)
{
	zfs_onexit_t *zo;
	zfs_onexit_action_node_t *ap;
	int error;

	error = zfs_onexit_minor_to_state(minor, &zo);
	if (error)
		return (error);

	ap = kmem_alloc(sizeof (zfs_onexit_action_node_t), KM_SLEEP);
	list_link_init(&ap->za_link);
	ap->za_func = func;
	ap->za_data = data;

	mutex_enter(&zo->zo_lock);
	list_insert_tail(&zo->zo_actions, ap);
	mutex_exit(&zo->zo_lock);
	if (action_handle)
		*action_handle = (uint64_t)(uintptr_t)ap;

	return (0);
}
Пример #29
0
void HCI_Isr(void)
{
  tHciDataPacket * hciReadPacket = NULL;
  uint8_t data_len,i=0;
  uint8_t retries = 0;
  
  while(BlueNRG_DataPresent())
  {        
    if (list_is_empty (&hciReadPktPool) == FALSE){//check if we have free hci read packets
      
      /* enqueueing a packet for read */
      list_remove_head (&hciReadPktPool, (tListNode **)&hciReadPacket);
      
      data_len = BlueNRG_SPI_Read_All(hciReadPacket->dataBuff,HCI_READ_PACKET_SIZE);
      
      if(data_len > 0){   
        
        retries = 0;
        
        hciReadPacket->data_len = data_len;
        
        if(HCI_verify(hciReadPacket) == 0)
          list_insert_tail(&hciReadPktRxQueue, (tListNode *)hciReadPacket);
        else
          list_insert_head(&hciReadPktPool, (tListNode *)hciReadPacket);

        i++;
        if( i > HCI_READ_PACKET_NUM_MAX)
        {
            goto error;
        }
      }
      else 
      {
        // Insert the packet back into the pool.
        list_insert_head(&hciReadPktPool, (tListNode *)hciReadPacket);
        
        retries++; //Device was busy or did not respond correctly
        
        if(retries > 10)
        {    
            goto error;
        }
      }
    }
    else{
      // HCI Read Packet Pool is empty, wait for a free packet.
      readPacketListFull = TRUE;
      return;
    }

  }
  return;
  error:
  ISRDevice_busy = TRUE;
  return;
  
  
}
Пример #30
0
/**
 * Wrzuca wskaźnik w kolejkę.
 * @param q kolejka
 * @param d wskaźnik do zakolejkowania.
 *
 * Procedura po zakolejkowaniu wskaźnika budzi jeden
 * z wątków oczekujących na dane.
 */
void
cqueue_insert(cqueue_t *q, void *d)
{
    MUTEX_LOCK(&q->q_mtx, "cqueue");
    list_insert_tail(&q->q_data, d);
    mutex_wakeup(&q->q_mtx);
    mutex_unlock(&q->q_mtx);
}