/* * Register interest in the named property. We'll call the callback * once to notify it of the current property value, and again each time * the property changes, until this callback is unregistered. * * Return 0 on success, errno if the prop is not an integer value. */ int dsl_prop_register(dsl_dataset_t *ds, const char *propname, dsl_prop_changed_cb_t *callback, void *cbarg) { dsl_dir_t *dd = ds->ds_dir; uint64_t value; dsl_prop_record_t *pr; dsl_prop_cb_record_t *cbr; int err; ASSERTV(dsl_pool_t *dp = dd->dd_pool); ASSERT(dsl_pool_config_held(dp)); err = dsl_prop_get_int_ds(ds, propname, &value); if (err != 0) return (err); cbr = kmem_alloc(sizeof (dsl_prop_cb_record_t), KM_SLEEP); cbr->cbr_ds = ds; cbr->cbr_func = callback; cbr->cbr_arg = cbarg; mutex_enter(&dd->dd_lock); pr = dsl_prop_record_find(dd, propname); if (pr == NULL) pr = dsl_prop_record_create(dd, propname); cbr->cbr_pr = pr; list_insert_head(&pr->pr_cbs, cbr); list_insert_head(&ds->ds_prop_cbs, cbr); mutex_exit(&dd->dd_lock); cbr->cbr_func(cbr->cbr_arg, value); return (0); }
void HCI_Isr(void) { tHciDataPacket * hciReadPacket = NULL; uint8_t data_len,i=0; uint8_t retries = 0; while(BlueNRG_DataPresent()) { if (list_is_empty (&hciReadPktPool) == FALSE){//check if we have free hci read packets /* enqueueing a packet for read */ list_remove_head (&hciReadPktPool, (tListNode **)&hciReadPacket); data_len = BlueNRG_SPI_Read_All(hciReadPacket->dataBuff,HCI_READ_PACKET_SIZE); if(data_len > 0){ retries = 0; hciReadPacket->data_len = data_len; if(HCI_verify(hciReadPacket) == 0) list_insert_tail(&hciReadPktRxQueue, (tListNode *)hciReadPacket); else list_insert_head(&hciReadPktPool, (tListNode *)hciReadPacket); i++; if( i > HCI_READ_PACKET_NUM_MAX) { goto error; } } else { // Insert the packet back into the pool. list_insert_head(&hciReadPktPool, (tListNode *)hciReadPacket); retries++; //Device was busy or did not respond correctly if(retries > 10) { goto error; } } } else{ // HCI Read Packet Pool is empty, wait for a free packet. readPacketListFull = TRUE; return; } } return; error: ISRDevice_busy = TRUE; return; }
slice_allocator_alloc(slice_allocator_t *sa, sa_size_t size) #endif /* !DEBUG */ { slice_t *slice = 0; lck_spin_lock(sa->spinlock); /* * Locate a slice with residual capacity. First, check for a partially * full slice, and use some more of its capacity. Next, look to see if * we have a ready to go empty slice. If not, finally go to underlying * allocator for a new slice. */ if (!list_is_empty(&sa->partial)) { slice = list_head(&sa->partial); } else if (!list_is_empty(&sa->free)) { slice = list_tail(&sa->free); list_remove_tail(&sa->free); list_insert_head(&sa->partial, slice); } else { lck_spin_unlock(sa->spinlock); slice = (slice_t *)osif_malloc(sa->slice_size);; slice_init(slice, sa); lck_spin_lock(sa->spinlock); list_insert_head(&sa->partial, slice); } #ifdef SA_CHECK_SLICE_SIZE if (sa->max_alloc_size != slice->sa->max_alloc_size) { REPORT("slice_allocator_alloc - alloc size (%llu) sa %llu slice" " %llu\n", size, sa->max_alloc_size, slice->sa->max_alloc_size); } #endif /* SA_CHECK_SLICE_SIZE */ /* Grab memory from the slice */ #ifndef DEBUG void *p = slice_alloc(slice); #else void *p = slice_alloc(slice, size); #endif /* !DEBUG */ /* * Check to see if the slice buffer has become full. If it has, then * move it into the full list so that we no longer keep trying to * allocate from it. */ if (slice_is_full(slice)) { list_remove(&sa->partial, slice); #ifdef SLICE_ALLOCATOR_TRACK_FULL_SLABS list_insert_head(&sa->full, slice); #endif /* SLICE_ALLOCATOR_TRACK_FULL_SLABS */ } lck_spin_unlock(sa->spinlock); return (p); }
int main() { /*Driver code to test the implementation*/ struct Node *head = NULL; // empty list. set head as NULL. // Calling an Insert and printing list both in forward as well as reverse direction. head = list_insert_head(head, 2); list_print(head); head = list_insert_head(head, 4); list_print(head); head = list_insert_tail(head, 6); list_print(head); head = list_insert_tail(head, 8); list_print(head); head = list_remove(head, 6); list_print(head); head = list_remove(head, 8); list_print(head); }
txg_history_t * dsl_pool_txg_history_add(dsl_pool_t *dp, uint64_t txg) { txg_history_t *th, *rm; th = kmem_zalloc(sizeof(txg_history_t), KM_PUSHPAGE); mutex_init(&th->th_lock, NULL, MUTEX_DEFAULT, NULL); th->th_kstat.txg = txg; th->th_kstat.state = TXG_STATE_OPEN; th->th_kstat.birth = gethrtime(); mutex_enter(&dp->dp_lock); list_insert_head(&dp->dp_txg_history, th); dp->dp_txg_history_size++; while (dp->dp_txg_history_size > zfs_txg_history) { dp->dp_txg_history_size--; rm = list_remove_tail(&dp->dp_txg_history); mutex_destroy(&rm->th_lock); kmem_free(rm, sizeof(txg_history_t)); } mutex_exit(&dp->dp_lock); return (th); }
void HCI_Isr(void) { tHciDataPacket * hciReadPacket = NULL; uint8_t data_len; Clear_SPI_EXTI_Flag(); while(BlueNRG_DataPresent()){ if (list_is_empty (&hciReadPktPool) == FALSE){ /* enqueueing a packet for read */ list_remove_head (&hciReadPktPool, (tListNode **)&hciReadPacket); data_len = BlueNRG_SPI_Read_All(&SpiHandle, hciReadPacket->dataBuff, HCI_PACKET_SIZE); if(data_len > 0){ HCI_Input(hciReadPacket); // Packet will be inserted to te correct queue by } else { // Insert the packet back into the pool. list_insert_head(&hciReadPktPool, (tListNode *)hciReadPacket); } } else{ // HCI Read Packet Pool is empty, wait for a free packet. readPacketListFull = TRUE; Clear_SPI_EXTI_Flag(); return; } Clear_SPI_EXTI_Flag(); } }
/** * list_insert() * Insert data into a list at the head (if *prev is NULL) or after *prev * @list -- Pointer to list to operate on * @prev -- Pointer to node to insert after * @data -- Data to insert in the new node * @len -- Length of the data to insert * @return -- pointer to the newly created node */ list_node *list_insert(linked_list *list, list_node *prev, void *data, size_t len) { if(prev == NULL) return list_insert_head(list, data, len); else return list_insert_after(prev, data, len); }
rtmp_live_stream_t* rtmp_app_live_alloc(rtmp_app_t *app, const char *livestream) { mem_pool_t *pool; rtmp_live_stream_t *live; uint32_t k; list_t *h; if (list_empty(&app->free_lives) == 0) { live = struct_entry(app->free_lives.next, rtmp_live_stream_t,link); list_remove(&live->link); } else { pool = app->host->cycle->pool; live = mem_pcalloc(pool,sizeof(rtmp_live_stream_t)); } if (live) { live->epoch = rtmp_current_sec; live->timestamp = 0; strncpy(live->name,livestream,63); live->publisher = NULL; live->players = NULL; k = rtmp_hash_string(livestream); h = app->lives + (k % app->conf->stream_buckets); list_insert_head(h,&live->link); } return live; }
void list_move_head (list_node *list, list_node *node) { MESSAGE_DEBUG("list:%p node:%p\n", list, node); list_delete (node); list_insert_head (list, node); // list_dump(list); }
/* * Register interest in the named property. We'll call the callback * once to notify it of the current property value, and again each time * the property changes, until this callback is unregistered. * * Return 0 on success, errno if the prop is not an integer value. */ int dsl_prop_register(dsl_dataset_t *ds, const char *propname, dsl_prop_changed_cb_t *callback, void *cbarg) { dsl_dir_t *dd = ds->ds_dir; uint64_t value; dsl_prop_cb_record_t *cbr; int err; ASSERTV(dsl_pool_t *dp = dd->dd_pool); ASSERT(dsl_pool_config_held(dp)); err = dsl_prop_get_int_ds(ds, propname, &value); if (err != 0) return (err); cbr = kmem_alloc(sizeof (dsl_prop_cb_record_t), KM_PUSHPAGE); cbr->cbr_ds = ds; cbr->cbr_propname = kmem_alloc(strlen(propname)+1, KM_PUSHPAGE); (void) strcpy((char *)cbr->cbr_propname, propname); cbr->cbr_func = callback; cbr->cbr_arg = cbarg; mutex_enter(&dd->dd_lock); list_insert_head(&dd->dd_prop_cbs, cbr); mutex_exit(&dd->dd_lock); cbr->cbr_func(cbr->cbr_arg, value); return (0); }
/*ARGSUSED*/ static int ipmi_open(dev_t *devp, int flag, int otyp, cred_t *cred) { minor_t minor; ipmi_device_t *dev; if (ipmi_attached == B_FALSE) return (ENXIO); if (ipmi_found == B_FALSE) return (ENODEV); /* exclusive opens are not supported */ if (flag & FEXCL) return (ENOTSUP); if ((minor = (minor_t)id_alloc_nosleep(minor_ids)) == 0) return (ENODEV); /* Initialize the per file descriptor data. */ dev = kmem_zalloc(sizeof (ipmi_device_t), KM_SLEEP); dev->ipmi_pollhead = kmem_zalloc(sizeof (pollhead_t), KM_SLEEP); TAILQ_INIT(&dev->ipmi_completed_requests); dev->ipmi_address = IPMI_BMC_SLAVE_ADDR; dev->ipmi_lun = IPMI_BMC_SMS_LUN; *devp = makedevice(getmajor(*devp), minor); dev->ipmi_dev = *devp; list_insert_head(&dev_list, dev); return (0); }
/* * list_insert_before * * Insert an element before the given element */ int list_insert_before (list_t *list, list_elem_t *next_elem, list_elem_t *elem) { list_elem_t *link; /* Sanity check */ if (!list || !next_elem || !elem) { return EINVAL; } link = list->list_head; /* Are we asked to insert before head? */ if (link == next_elem) { return (list_insert_head(list, elem)); } while (link != NULL) { if (link->next == next_elem) { elem->next = next_elem; link->next = elem; /* Bump up the count */ list->list_count++; return EOK; } link = link->next; } return ENOTFOUND; }
static int splat_list_test7(struct file *file, void *arg) { list_t list; list_item_t *li; int rc = 0; splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Creating list\n%s", ""); list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node)); li = kmem_alloc(sizeof(list_item_t), KM_SLEEP); if (li == NULL) { rc = -ENOMEM; goto out; } /* Validate newly initialized node is inactive */ splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Init list node\n%s", ""); list_link_init(&li->li_node); if (list_link_active(&li->li_node)) { splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Newly initialized " "list node should inactive %p/%p\n", li->li_node.prev, li->li_node.next); rc = -EINVAL; goto out_li; } /* Validate node is active when linked in to a list */ splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Insert list node\n%s", ""); list_insert_head(&list, li); if (!list_link_active(&li->li_node)) { splat_vprint(file, SPLAT_LIST_TEST7_NAME, "List node " "inserted in list should be active %p/%p\n", li->li_node.prev, li->li_node.next); rc = -EINVAL; goto out; } /* Validate node is inactive when removed from list */ splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Remove list node\n%s", ""); list_remove(&list, li); if (list_link_active(&li->li_node)) { splat_vprint(file, SPLAT_LIST_TEST7_NAME, "List node " "removed from list should be inactive %p/%p\n", li->li_node.prev, li->li_node.next); rc = -EINVAL; } out_li: kmem_free(li, sizeof(list_item_t)); out: /* Remove all items */ while ((li = list_remove_head(&list))) kmem_free(li, sizeof(list_item_t)); splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Destroying list\n%s", ""); list_destroy(&list); return rc; }
/** * Enqueues a thread onto a queue. * * @param q the queue to enqueue the thread onto * @param thr the thread to enqueue onto the queue */ static void ktqueue_enqueue(ktqueue_t *q, kthread_t *thr) { KASSERT(!thr->kt_wchan); list_insert_head(&q->tq_list, &thr->kt_qlink); thr->kt_wchan = q; q->tq_size++; }
/* * utqueue_enqueue * add a thread onto the front of the queue */ void utqueue_enqueue(utqueue_t *q, uthread_t *thr) { assert(thr->ut_link.l_next == NULL && thr->ut_link.l_prev == NULL); list_insert_head(&q->tq_waiters, &thr->ut_link); q->tq_size++; }
static int create_entry(struct list *l) { struct mysql_pool_entry *entry = (struct mysql_pool_entry *)calloc(1, sizeof(struct mysql_pool_entry)); if (!mysql_init(&(entry->helper.mysql))) { free(entry); return -1; } list_insert_head(l, &entry->l); return 0; }
void list_insert_after(list_t *list, void *object, void *nobject) { if (object == NULL) { list_insert_head(list, nobject); } else { list_node_t *lold = list_d2l(list, object); list_insert_after_node(list, lold, nobject); } }
void vq_free_entry(struct virtqueue *vq, struct vq_entry *qe) { mutex_enter(&vq->vq_freelist_lock); list_insert_head(&vq->vq_freelist, qe); vq->vq_used_entries--; ASSERT(vq->vq_used_entries >= 0); mutex_exit(&vq->vq_freelist_lock); }
void rtmp_app_live_free(rtmp_app_t *app,rtmp_live_stream_t *live) { list_remove(&live->link); #ifdef HAVE_DEBUG memset(live,0,sizeof(rtmp_live_stream_t)); #endif list_insert_head(&app->free_lives,&live->link); }
slice_allocator_free(slice_allocator_t *sa, void *buf, sa_size_t size) #endif /* !DEBUG */ { lck_spin_lock(sa->spinlock); /* Locate the slice buffer that the allocation lives within. */ slice_t *slice; allocatable_row_t *row = 0; small_allocatable_row_t *small_row = 0; if (sa->flags & SMALL_ALLOC) { slice = slice_small_get_slice_from_row(buf, &small_row); } else { slice = slice_get_slice_from_row(buf, &row); } #ifdef SA_CHECK_SLICE_SIZE if (sa != slice->sa) { REPORT0("slice_allocator_free - slice not owned by sa detected.\n") } #endif /* SA_CHECK_SLICE_SIZE */ /* * If the slice was previously full, remove it from the free list and * place it in the available list. */ if (slice_is_full(slice)) { #ifdef SLICE_ALLOCATOR_TRACK_FULL_SLABS list_remove(&sa->full, slice); #endif /* SLICE_ALLOCATOR_TRACK_FULL_SLABS */ list_insert_tail(&sa->partial, slice); } #ifndef DEBUG if (sa->flags & SMALL_ALLOC) { slice_small_free_row(slice, small_row); } else { slice_free_row(slice, row); } #else slice_free_row(slice, row, size); #endif /* !DEBUG */ /* Finally migrate to the free list if needed. */ if (slice_is_empty(slice)) { list_remove(&sa->partial, slice); slice->time_freed = osif_gethrtime(); list_insert_head(&sa->free, slice); } lck_spin_unlock(sa->spinlock); }
static void waitobject_generic_sleep(WaitObject* obj, uint flags) { Thread* us = g_current_thread; assert_spinlock(&obj->lock); //enqueue us into the wait queue list_insert_head(&obj->wait_queue, us); //if (flags & WAITOBJECT_APPLY_PI) // pi_apply(obj, us); us->wait_object = obj; }
int mysql_pool_free(struct mysql_login_info *info, struct mysql_pool_entry *mysql) { if (0 > make_ht_key(&misc, info)) return -1; char *ht_key = vmbuf_data(&misc); size_t key_len = vmbuf_wlocpos(&misc); uint32_t ofs = hashtable_lookup(&ht_idle_connections, ht_key, key_len); if (0 == ofs) return -1; struct list *l = *(struct list **)hashtable_get_val(&ht_idle_connections, ofs); list_insert_head(l, &(mysql->l)); return 0; }
int do_first_region(vm_seg_t *vseg, vm_region_t **reg) { vm_region_t *region = vm_lpool_alloc(&vm_unused_regions); if(!region) return -ENOMEM; region->segment = vseg; region->begin = vseg->base; region->size = vseg->size; region->end = vseg->end; list_insert_head(&vseg->regions, region); *reg = region; return 0; }
void sem_yield(sem_t *sp) { sem *sema = (sem*)sp; ucontext_t *curr = NULL, *next = NULL; tcb *running = NULL, *tcbnext = NULL; struct list_elem *e = NULL; int queueflag = -1; running = list_entry(list_begin(&q_running), thread_p, elem)->p; if (is_list_empty(&q_ready_H) && is_list_empty(&q_ready_L)) { printf("Q_ready_H and L are both empty!\n"); return; } thread_p *tmp_p = (thread_p *)calloc(1, sizeof(thread_p)); tmp_p->p = running; list_insert_tail(&sema->waiters, &tmp_p->elem); curr = &running->context; e = list_begin(&q_running); tmp_p = list_entry(e, thread_p, elem); list_remove(e); free(tmp_p); if (!is_list_empty(&q_ready_H)) { tcbnext = list_entry(list_begin(&q_ready_H), thread_p, elem)->p; queueflag = 0; } else if (!is_list_empty(&q_ready_L)) { tcbnext = list_entry(list_begin(&q_ready_L), thread_p, elem)->p; queueflag = 1; } tmp_p = (thread_p *)calloc(1, sizeof(thread_p)); tmp_p->p = tcbnext; list_insert_head(&q_running, &tmp_p->elem); next = &tcbnext->context; if (queueflag == 0) e = list_begin(&q_ready_H); else if (queueflag == 1) e = list_begin(&q_ready_L); tmp_p = list_entry(e, thread_p, elem); list_remove(e); free(tmp_p); assert(curr); assert(next); if (swapcontext(curr, next) == -1) { printf("Swapcontext error: %s\n", strerror(errno)); } }
static dsl_prop_record_t * dsl_prop_record_create(dsl_dir_t *dd, const char *propname) { dsl_prop_record_t *pr; ASSERT(MUTEX_HELD(&dd->dd_lock)); pr = kmem_alloc(sizeof (dsl_prop_record_t), KM_SLEEP); pr->pr_propname = spa_strdup(propname); list_create(&pr->pr_cbs, sizeof (dsl_prop_cb_record_t), offsetof(dsl_prop_cb_record_t, cbr_pr_node)); list_insert_head(&dd->dd_props, pr); return (pr); }
/* * Initialize processor set plugin. Called once at boot time. */ void pool_pset_init(void) { ASSERT(pool_pset_default == NULL); pool_pset_default = kmem_zalloc(sizeof (pool_pset_t), KM_SLEEP); pool_pset_default->pset_id = PS_NONE; pool_pset_default->pset_npools = 1; /* for pool_default */ pool_default->pool_pset = pool_pset_default; list_create(&pool_pset_list, sizeof (pool_pset_t), offsetof(pool_pset_t, pset_link)); list_insert_head(&pool_pset_list, pool_pset_default); mutex_enter(&cpu_lock); register_cpu_setup_func(pool_pset_cpu_setup, NULL); mutex_exit(&cpu_lock); }
void HCI_recv_packet(unsigned char* packet_buffer, unsigned int packet_length) { tHciDataPacket * hciReadPacket = NULL; if (!list_is_empty ((tListNode*)&hciReadPktPool)){ if(packet_length > 0) { /* enqueueing a packet for read */ list_remove_head ((tListNode*)&hciReadPktPool, (tListNode **)&hciReadPacket); Osal_MemCpy(hciReadPacket->dataBuff, packet_buffer, MIN(HCI_READ_PACKET_SIZE, packet_length)); hciReadPacket->data_len = packet_length; switch(HCI_verify(hciReadPacket)) { case 0: list_insert_tail((tListNode*)&hciReadPktRxQueue, (tListNode *)hciReadPacket); break; default: case 1: case 2: list_insert_head((tListNode*)&hciReadPktPool, (tListNode *)hciReadPacket); break; } } } else{ // HCI Read Packet Pool is empty, wait for a free packet. readPacketListFull = TRUE; return; } // process incoming packet // don't process when hci_send_req is undergoing if (hciAwaitReply) { return; } /* process any pending events read */ while(!list_is_empty((tListNode*)&hciReadPktRxQueue)) { list_remove_head ((tListNode*)&hciReadPktRxQueue, (tListNode **)&hciReadPacket); //Enable_SPI_IRQ(); HCI_Event_CB(hciReadPacket->dataBuff); //Disable_SPI_IRQ(); list_insert_tail((tListNode*)&hciReadPktPool, (tListNode *)hciReadPacket); } }
/* * If there aren't too many streams already, create a new stream. * The "blkid" argument is the next block that we expect this stream to access. * While we're here, clean up old streams (which haven't been * accessed for at least zfetch_min_sec_reap seconds). */ static void dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid) { zstream_t *zs; zstream_t *zs_next; int numstreams = 0; uint32_t max_streams; ASSERT(RW_WRITE_HELD(&zf->zf_rwlock)); /* * Clean up old streams. */ for (zs = list_head(&zf->zf_stream); zs != NULL; zs = zs_next) { zs_next = list_next(&zf->zf_stream, zs); if (((gethrtime() - zs->zs_atime) / NANOSEC) > zfetch_min_sec_reap) dmu_zfetch_stream_remove(zf, zs); else numstreams++; } /* * The maximum number of streams is normally zfetch_max_streams, * but for small files we lower it such that it's at least possible * for all the streams to be non-overlapping. * * If we are already at the maximum number of streams for this file, * even after removing old streams, then don't create this stream. */ max_streams = MAX(1, MIN(zfetch_max_streams, zf->zf_dnode->dn_maxblkid * zf->zf_dnode->dn_datablksz / zfetch_max_distance)); if (numstreams >= max_streams) { ZFETCHSTAT_BUMP(zfetchstat_max_streams); return; } zs = kmem_zalloc(sizeof (*zs), KM_SLEEP); zs->zs_blkid = blkid; zs->zs_pf_blkid = blkid; zs->zs_ipf_blkid = blkid; zs->zs_atime = gethrtime(); mutex_init(&zs->zs_lock, NULL, MUTEX_DEFAULT, NULL); list_insert_head(&zf->zf_stream, zs); }
int64_t refcount_remove_many(refcount_t *rc, uint64_t number, void *holder) { reference_t *ref; int64_t count; mutex_enter(&rc->rc_mtx); ASSERT(rc->rc_count >= number); if (!rc->rc_tracked) { rc->rc_count -= number; count = rc->rc_count; mutex_exit(&rc->rc_mtx); return (count); } for (ref = list_head(&rc->rc_list); ref; ref = list_next(&rc->rc_list, ref)) { if (ref->ref_holder == holder && ref->ref_number == number) { list_remove(&rc->rc_list, ref); if (reference_history > 0) { ref->ref_removed = kmem_cache_alloc(reference_history_cache, KM_SLEEP); list_insert_head(&rc->rc_removed, ref); rc->rc_removed_count++; if (rc->rc_removed_count > reference_history) { ref = list_tail(&rc->rc_removed); list_remove(&rc->rc_removed, ref); kmem_cache_free(reference_history_cache, ref->ref_removed); kmem_cache_free(reference_cache, ref); rc->rc_removed_count--; } } else { kmem_cache_free(reference_cache, ref); } rc->rc_count -= number; count = rc->rc_count; mutex_exit(&rc->rc_mtx); return (count); } } panic("No such hold %p on refcount %llx", holder, (u_longlong_t)(uintptr_t)rc); return (-1); }
static int splat_list_test2(struct file *file, void *arg) { list_t list; list_item_t *li; int i, list_size = 8, rc = 0; splat_vprint(file, SPLAT_LIST_TEST2_NAME, "Creating list\n%s", ""); list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node)); /* Insert all items at the list head to form a stack */ splat_vprint(file, SPLAT_LIST_TEST2_NAME, "Adding %d items to list head\n", list_size); for (i = 0; i < list_size; i++) { li = kmem_alloc(sizeof(list_item_t), KM_SLEEP); if (li == NULL) { rc = -ENOMEM; goto out; } list_link_init(&li->li_node); li->li_data = i; list_insert_head(&list, li); } splat_vprint(file, SPLAT_LIST_TEST2_NAME, "Validating %d item list is a stack\n", list_size); rc = splat_list_validate(&list, list_size, LIST_ORDER_STACK, 1); if (rc) splat_vprint(file, SPLAT_LIST_TEST2_NAME, "List validation failed, %d\n", rc); out: /* Remove all items */ splat_vprint(file, SPLAT_LIST_TEST2_NAME, "Removing %d items from list head\n", list_size); while ((li = list_remove_head(&list))) kmem_free(li, sizeof(list_item_t)); splat_vprint(file, SPLAT_LIST_TEST2_NAME, "Destroying list\n%s", ""); list_destroy(&list); return rc; }