void handler_crear_segmento(uint32_t *respuesta, char *orden, t_param_memoria *parametros_memoria, char **savePtr1, t_log *logger) { char *id_prog = strtok_r(NULL, ",", savePtr1); char *tamanio = strtok_r(NULL, ",", savePtr1); pthread_mutex_lock(&op_atomica); void *mem_ppal = parametros_memoria->mem_ppal; uint32_t tamanio_mem_ppal = parametros_memoria->tamanio_mem_ppal; t_list *lista_segmentos = parametros_memoria->listaSegmentos; char *algoritmo_compactacion = parametros_memoria->algoritmo_comp; t_list *espacios_libres = buscarEspaciosLibres(lista_segmentos, mem_ppal, tamanio_mem_ppal); *respuesta = crearSegmento(atoi(id_prog), atoi(tamanio), espacios_libres, lista_segmentos, algoritmo_compactacion); if(*respuesta == 0){ log_info(logger, "[ATENCION_CONN] No hay espacio para este segmento. Voy a intentar compactar."); compactar(lista_segmentos, mem_ppal, tamanio_mem_ppal); if (!list_is_empty(espacios_libres)) { list_clean_and_destroy_elements(espacios_libres, eliminarEspacioLibre); } list_destroy(espacios_libres); espacios_libres = buscarEspaciosLibres(lista_segmentos, mem_ppal, tamanio_mem_ppal); *respuesta = crearSegmento(atoi(id_prog), atoi(tamanio), espacios_libres, lista_segmentos, algoritmo_compactacion); if((*respuesta == 0) || (respuesta == NULL)) log_info(logger, "[ATENCION_CONN] No hay lugar para crear un segmento, incluso después de haber compactado la memoria."); } list_destroy_and_destroy_elements(espacios_libres, eliminarEspacioLibre); pthread_mutex_unlock(&op_atomica); if(respuesta == NULL) log_info(logger, "[ATENCION_CONN] RESPUESTA ES NULL!!!! QUE CARAJO PASÓ???"); log_info(logger, "[ATENCION_CONN] La respuesta al comando crear_segmento será: %d.", *respuesta); return; }
void alarm_cancel(alarm_t *alarm) { assert(alarms != NULL); assert(alarm != NULL); pthread_mutex_lock(&monitor); bool needs_reschedule = (!list_is_empty(alarms) && list_front(alarms) == alarm); list_remove(alarms, alarm); alarm->deadline = 0; alarm->callback = NULL; alarm->data = NULL; if (needs_reschedule) reschedule(); pthread_mutex_unlock(&monitor); // If the callback for |alarm| is in progress, wait here until it completes. pthread_mutex_lock(&alarm->callback_lock); pthread_mutex_unlock(&alarm->callback_lock); }
/* * Report whether all transactions are committed */ int zil_is_committed(zilog_t *zilog) { lwb_t *lwb; int ret; mutex_enter(&zilog->zl_lock); while (zilog->zl_writer) cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock); /* recent unpushed intent log transactions? */ if (!list_is_empty(&zilog->zl_itx_list)) { ret = B_FALSE; goto out; } /* intent log never used? */ lwb = list_head(&zilog->zl_lwb_list); if (lwb == NULL) { ret = B_TRUE; goto out; } /* * more than 1 log buffer means zil_sync() hasn't yet freed * entries after a txg has committed */ if (list_next(&zilog->zl_lwb_list, lwb)) { ret = B_FALSE; goto out; } ASSERT(zil_empty(zilog)); ret = B_TRUE; out: cv_broadcast(&zilog->zl_cv_writer); mutex_exit(&zilog->zl_lock); return (ret); }
static struct unipro_xfer_descriptor *pick_tx_descriptor(unsigned int cportid) { struct unipro_xfer_descriptor *desc; unsigned int cport_count = unipro_cport_count(); int i; for (i = 0; i < cport_count; i++, cportid++) { struct cport *cport; cportid = cportid % cport_count; cport = cport_handle(cportid); if (!cport) continue; if (list_is_empty(&cport->tx_fifo)) { if (cport->pending_reset) { unipro_flush_cport(cport); } continue; } if (cport->pending_reset) { unipro_flush_cport(cport); } desc = containerof(cport->tx_fifo.next, struct unipro_xfer_descriptor, list); if (desc->channel) continue; if (!unipro_get_tx_free_buffer_space(desc->cport)) continue; return desc; } return NULL; }
static void sess_sm_event_locked(iscsit_sess_t *ist, iscsit_session_event_t event, iscsit_conn_t *ict) { sess_event_ctx_t *ctx; iscsit_sess_hold(ist); ctx = kmem_zalloc(sizeof (*ctx), KM_SLEEP); ctx->se_ctx_event = event; ctx->se_event_data = ict; list_insert_tail(&ist->ist_events, ctx); /* * Use the ist_sm_busy to keep the state machine single threaded. * This also serves as recursion avoidance since this flag will * always be set if we call login_sm_event from within the * state machine code. */ if (!ist->ist_sm_busy) { ist->ist_sm_busy = B_TRUE; while (!list_is_empty(&ist->ist_events)) { ctx = list_head(&ist->ist_events); list_remove(&ist->ist_events, ctx); idm_sm_audit_event(&ist->ist_state_audit, SAS_ISCSIT_SESS, (int)ist->ist_state, (int)ctx->se_ctx_event, (uintptr_t)ict); mutex_exit(&ist->ist_mutex); sess_sm_event_dispatch(ist, ctx); mutex_enter(&ist->ist_mutex); } ist->ist_sm_busy = B_FALSE; } iscsit_sess_rele(ist); }
static int query_consoles_via_globbing( server_conf_t *conf, req_t *req, List matches) { /* Match request patterns against console names using shell-style globbing. * This is less efficient than matching via regular expressions * since the console list must be traversed for each pattern, and the * matches list must be traversed for each match to prevent duplicates. */ char *p; ListIterator i, j; char *pat; obj_t *obj; /* An empty list for the QUERY command matches all consoles. */ if (list_is_empty(req->consoles)) { p = create_string("*"); list_append(req->consoles, p); } /* Search objs for console names matching console patterns in the request. */ i = list_iterator_create(req->consoles); j = list_iterator_create(conf->objs); while ((pat = list_next(i))) { list_iterator_reset(j); while ((obj = list_next(j))) { if (!is_console_obj(obj)) continue; if (!fnmatch(pat, obj->name, 0) && !list_find_first(matches, (ListFindF) find_obj, obj)) list_append(matches, obj); } } list_iterator_destroy(i); list_iterator_destroy(j); return(0); }
Programa *crearPrograma(uint32_t pid, void *script, void *etiquetas, void *instrucciones_serializado, socket_pedirMemoria *pedidoMemoria) { Programa *programa = malloc(sizeof(Programa)); programa->pid = pid; pthread_rwlock_rdlock(&lockEscrituraLectura); programa->stack = crearSegmento(pedidoMemoria->stackSegmentSize); programa->script = crearYllenarSegmento(pedidoMemoria->codeSegmentSize, script); programa->instrucciones = crearYllenarSegmento(pedidoMemoria->instruccionesSegmentSize,instrucciones_serializado); if(list_is_empty(programas)) { crearDireccionesVirtuales(programa->stack, pedidoMemoria->stackSegmentSize, 0); }else{ Programa *ultimoPrograma = list_get(programas, list_size(programas) - 1); crearDireccionesVirtuales(programa->stack, pedidoMemoria->stackSegmentSize, ultimoPrograma->instrucciones->finVirtual); } crearDireccionesVirtuales(programa->script, pedidoMemoria->codeSegmentSize, programa->stack->finVirtual); if(pedidoMemoria->etiquetasSegmentSize == 0) { programa->etiquetas = NULL; crearDireccionesVirtuales(programa->instrucciones, pedidoMemoria->instruccionesSegmentSize, programa->script->finVirtual); }else{ programa->etiquetas = crearYllenarSegmento(pedidoMemoria->etiquetasSegmentSize, etiquetas); crearDireccionesVirtuales( programa->etiquetas, pedidoMemoria->etiquetasSegmentSize, programa->script->finVirtual); crearDireccionesVirtuales( programa->instrucciones, pedidoMemoria->instruccionesSegmentSize, programa->etiquetas->finVirtual); } list_add(programas, programa); pthread_rwlock_unlock(&lockEscrituraLectura); return programa; }
void *worker_thread(void *local) { struct pool_worker *worker = (struct pool_worker *) local; PRINT_DEBUG("Entered: id=%u", worker->id); while (1) { secure_sem_wait(worker->inactive_sem); PRINT_DEBUG("queue=%p", worker->queue); if (list_is_empty(worker->queue)) { *worker->inactive_num += 1; worker->inactive = 1; PRINT_DEBUG("inactive: worker=%p, inactive_num=%u", worker, *worker->inactive_num); sem_post(worker->inactive_sem); secure_sem_wait(&worker->activate_sem); if (!worker->running) { break; } } else { if (worker->running) { struct pool_request *request = (struct pool_request *) list_remove_front(worker->queue); worker->work = request->work; worker->local = request->local; PRINT_DEBUG("Freeing: request=%p", request); free(request); sem_post(worker->inactive_sem); } else { sem_post(worker->inactive_sem); break; } } worker->work(worker->local); } PRINT_DEBUG("Exited: id=%u", worker->id); return NULL; }
/* * Dispatch the commit callbacks registered on this txg to worker threads. * * If no callbacks are registered for a given TXG, nothing happens. * This function creates a taskq for the associated pool, if needed. */ static void txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) { int c; tx_state_t *tx = &dp->dp_tx; list_t *cb_list; for (c = 0; c < max_ncpus; c++) { tx_cpu_t *tc = &tx->tx_cpu[c]; /* * No need to lock tx_cpu_t at this point, since this can * only be called once a txg has been synced. */ int g = txg & TXG_MASK; if (list_is_empty(&tc->tc_callbacks[g])) continue; if (tx->tx_commit_cb_taskq == NULL) { /* * Commit callback taskq hasn't been created yet. */ tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb", max_ncpus, minclsyspri, max_ncpus, max_ncpus * 2, TASKQ_PREPOPULATE); } cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP); list_create(cb_list, sizeof (dmu_tx_callback_t), offsetof(dmu_tx_callback_t, dcb_node)); list_move_tail(cb_list, &tc->tc_callbacks[g]); (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *) txg_do_callbacks, cb_list, TQ_SLEEP); } }
struct volume *iscsi_target_lookup_lu(struct iscsi_target *target, uint64 lun) { struct volume *vol = NULL; log_dbg1("target=%p\n", target); if (list_is_empty(&(target->list_vol))) { log_dbg1("There are no LUs in target \"%s\"\n", target->name); return NULL; } do_each_list_elem(struct volume *, &(target->list_vol), vol, listelem) { log_dbg1("vol->lun=%llu\n", vol->lun); if (vol->lun == lun) { log_dbg1("Found LU (LUN=%llu) in target \"%s\".\n", lun, target->name); return vol; } } while_each_list_elem(struct volume *, &(target->list_vol), vol, listelem); log_dbg1("Not found LU (LUN=%llu) in target \"%s\".\n", lun, target->name); return NULL; } // iscsi_target_lookup_lu
/* * Free descriptor management. */ struct vq_entry * vq_alloc_entry(struct virtqueue *vq) { struct vq_entry *qe; mutex_enter(&vq->vq_freelist_lock); if (list_is_empty(&vq->vq_freelist)) { mutex_exit(&vq->vq_freelist_lock); return (NULL); } qe = list_remove_head(&vq->vq_freelist); ASSERT(vq->vq_used_entries >= 0); vq->vq_used_entries++; mutex_exit(&vq->vq_freelist_lock); qe->qe_next = NULL; qe->qe_indirect_next = 0; (void) memset(qe->qe_desc, 0, sizeof (struct vring_desc)); return (qe); }
} END_TEST // Ensure a single value can be appended then popped off of a list // ✔ Data should be as expected // ✔ Size should be 0 START_TEST (test_append_2_pop_2_list) { kld_list_t * list = (kld_list_t *) new_list(); char * buf = "test data"; char * buf2 = "test data2"; list_append(list, buf); list_append(list, buf2); kld_list_node_t * tmp = (kld_list_node_t *) list_pop(list); kld_list_node_t * tmp2 = (kld_list_node_t *) list_pop(list); fail_if(tmp == NULL, "Returned value is null"); fail_if(tmp->data != "test data2", "Unexpected data value for returned list node"); fail_if(tmp2->data != "test data", "Unexpected data value for returned list node"); fail_if(list->size != 0, "Unexpected list size"); fail_if(!list_is_empty(list), "List does not report itself as empty"); } END_TEST
static void cache_timer(void * x) { LINK * l; time_t time_now = time(NULL); assert(cache_timer_posted); cache_timer_posted = 0; for (l = cache_list.next; l != &cache_list; l = l->next) { unsigned i; AbstractCache * cache = link_all2cache(l); assert(cache->wait_list_cnt > 0); for (i = 0; i < cache->wait_list_cnt; i++) { WaitingCacheClient * client = cache->wait_list_buf + i; if (time_now - client->time_stamp >= 30) { /* Client is waiting longer than 30 sec - it might be a bug */ trace(LOG_ALWAYS, "Stalled cache at %s:%d", client->file, client->line); } } } if (!list_is_empty(&cache_list)) { post_event_with_delay(cache_timer, NULL, 5000000); cache_timer_posted = 1; } }
/* * Search an iSCSI command from command list at the iSCSI connection. */ struct iscsi_task *iscsi_search_task(struct iscsi_conn *conn, uint32 itt) { struct iscsi_task *task, *task_found; task_found = NULL; if (list_is_empty(&(conn->list_task))) { log_dbg3("Not found the task (itt=0x%08lX)\n", itt); } else { ASSERT((conn->list_task.head != NULL), "conn->list_task.head == NULL\n"); do_each_list_elem(struct iscsi_task *, &(conn->list_task), task, listelem) { log_dbg3("task->itt="U32_FMT"(0x%08lX)\n", itt, itt); if (task->itt == itt) { log_dbg3("Found the task (itt="U32_FMT"(0x%08lX))\n", itt, itt); task_found = task; break; } } while_each_list_elem(struct iscsi_task *, &(conn->list_task), task, listelem); log_dbg3("Not found the task (itt=0x%08lX)\n", itt); } return task_found; } // iscsi_search_task
bool build_rec(int d, struct list *s, struct tree **t) //@ requires list(s, ?vs) &*& pointer(t, _); /*@ ensures list(s, ?rvs) &*& pointer(t, ?rt) &*& switch (build_rec1(d, vs)) { case fail: return result == false; case success(rvt, rvs0): return result == true &*& rvs == rvs0 &*& tree(rt, rvt); }; @*/ // decreases max_func(0, fold_left(0, max_func, vs) - d); // Not yet checked by VeriFast. { //@ build_rec1_eq(d, vs); struct tree *l; struct tree *r; if (list_is_empty(s)) return false; int h = list_head(s); if (h < d) return false; if (h == d) { list_pop(s); struct tree *leaf = create_leaf(); *t = leaf; return true; } if (!build_rec(d+1, s, &l)) return false; if (!build_rec(d+1, s, &r)) { tree_dispose(l); return false; } struct tree *node = create_node(l, r); *t = node; return true; }
int install_irq_handler(unsigned char irqno, struct irq_handler* irqh) { bool ints_on = cli_if_on(); struct list* head = g_irq_handling.irqh + irqno; if (list_is_empty(head)) { /* We're handling a new interrupt, install an * interupt handler. */ int res = g_irq_handling.enable_irq(irqno); if (res < 0) { sti_if_on(ints_on); return res; } } list_enqueue_back(head, &irqh->irqh); sti_if_on(ints_on); return 0; }
static void delete_client(StreamClient * client) { VirtualStream * stream = client->stream; Trap trap; LINK * n; assert(stream->ref_cnt > 0); if (set_trap(&trap)) { send_event_stream_disposed(&client->channel->out, stream); clear_trap(&trap); } else { trace(LOG_ALWAYS, "Exception sending stream deleted event: %d %s", trap.error, errno_to_str(trap.error)); } list_remove(&client->link_hash); list_remove(&client->link_stream); list_remove(&client->link_all); for (n = client->read_requests.next; n != &client->read_requests;) { ReadRequest * r = client2read_request(n); n = n->next; delete_read_request(r); } for (n = client->write_requests.next; n != &client->write_requests;) { WriteRequest * r = client2write_request(n); n = n->next; delete_write_request(r, ERR_COMMAND_CANCELLED); } loc_free(client); if (--stream->ref_cnt == 0) { assert(list_is_empty(&stream->clients)); assert(stream->deleted); post_event(delete_stream, stream); } else if (stream->access & VS_ENABLE_REMOTE_READ) { advance_stream_buffer(stream); } }
List *merge(List *first, List *second, List *result) { if (list_is_empty(first) && list_is_empty(second)) return result; if (list_is_empty(first)) { if (list_is_empty(result)) return second; result->tail->next = second->head; second->head->prev = result->tail; list_delete(first); list_delete(second); return result; } if (list_is_empty(second)) { if (list_is_empty(result)) return first; result->tail->next = first->head; first->head->prev = result->tail; list_delete(first); list_delete(second); return result; } if (first->head->data->key <= second->head->data->key) { list_insert_last(result, first->head->data); list_delete_elem(first, first->head); } else { list_insert_last(result, second->head->data); list_delete_elem(second, second->head); } // list_print(result); return merge(first, second, result); }
/* TODO implement a search based on more sophisticated patterns */ int find_item(char *str, int start, int search_fields[]) { int i, id; char *findstr = NULL; char *tmp = NULL; int ret = -1; /* not found */ struct db_enumerator e = init_db_enumerator(ENUM_ALL); if(list_is_empty() || !is_valid_item(start)) return -2; /* error */ findstr = xstrdup(str); findstr = strlower(findstr); e.item = start - 1; /* must be "real start" - 1 */ db_enumerate_items(e) { for(i = 0; search_fields[i] >= 0; i++) { if((id = field_id(search_fields[i])) == -1) continue; if(database[e.item][id] == NULL) continue; tmp = xstrdup(database[e.item][id]); if( tmp && strstr(strlower(tmp), findstr) ) { ret = e.item; goto out; } xfree(tmp); } } out: free(findstr); free(tmp); return ret; }
} END_TEST // Ensure a single value can be appended then popped off of a list // ✔ Data should be as expected // ✔ Size should be 1 // ✔ Head should be the same as tail START_TEST (test_append_2_pop_1_list) { kld_list_t * list = (kld_list_t *) new_list(); char * buf = "test data"; char * buf2 = "test data2"; list_append(list, buf); list_append(list, buf2); kld_list_node_t * tmp = (kld_list_node_t *) list_pop(list); fail_if(tmp == NULL, "Returned value is null"); fail_if(tmp->data != "test data2", "Unexpected data value for returned list node"); fail_if(list->head->data != list->tail->data, "Head and Tail have diverged"); fail_if(list->size != 1, "Unexpected list size"); fail_if(list_is_empty(list), "List reports itself as empty"); } END_TEST
static void restart_comamnd_waiting_response_timer( command_waiting_response_t *cmd_wait_q, bool tigger_by_sending_command) { uint32_t timeout; waiting_command_t *wait_entry; if (!cmd_wait_q) { return; } if (cmd_wait_q->timer_is_set) { if (tigger_by_sending_command) { return; } //Cancel Previous command timeout timer setted when sending command osi_alarm_cancel(cmd_wait_q->command_response_timer); cmd_wait_q->timer_is_set = false; } pthread_mutex_lock(&cmd_wait_q->commands_pending_response_lock); wait_entry = (list_is_empty(cmd_wait_q->commands_pending_response) ? NULL : list_front(cmd_wait_q->commands_pending_response)); pthread_mutex_unlock(&cmd_wait_q->commands_pending_response_lock); if (wait_entry == NULL) { return; } timeout = osi_alarm_time_diff(osi_alarm_now(), wait_entry->sent_time); timeout = osi_alarm_time_diff(COMMAND_PENDING_TIMEOUT, timeout); timeout = (timeout <= COMMAND_PENDING_TIMEOUT) ? timeout : COMMAND_PENDING_TIMEOUT; cmd_wait_q->timer_is_set = true; osi_alarm_set(cmd_wait_q->command_response_timer, timeout); }
/* Handle STREAM_EV_COMPLETE */ static void stream_on_ev_complete(struct stream *str) { stream_mgr_lock(); /* Stream is active? */ if (actl_stream_remove(str)) { /* No - remove this stream from the active list */ DEBUGF(" finished: 0x%02x\n", str->id); if (list_is_empty(stream_mgr.actl)) { /* All streams have acked - stop playback */ stream_on_stop(false); stream_mgr.resume_time = 0; /* Played to end - no resume */ } else { /* Stream is done - stop it and place back in pool */ str_send_msg(str, STREAM_STOP, 1); } } stream_mgr_unlock(); }
void HCI_Isr(void) { tHciDataPacket * hciReadPacket = NULL; uint8_t data_len; Clear_SPI_EXTI_Flag(); while(BlueNRG_DataPresent()){ if (list_is_empty (&hciReadPktPool) == FALSE){ /* enqueueing a packet for read */ list_remove_head (&hciReadPktPool, (tListNode **)&hciReadPacket); data_len = BlueNRG_SPI_Read_All(&SpiHandle, hciReadPacket->dataBuff, HCI_READ_PACKET_SIZE); if(data_len > 0){ hciReadPacket->data_len = data_len; if(HCI_verify(hciReadPacket) == 0) list_insert_tail(&hciReadPktRxQueue, (tListNode *)hciReadPacket); else list_insert_head(&hciReadPktPool, (tListNode *)hciReadPacket); } else { // Insert the packet back into the pool. list_insert_head(&hciReadPktPool, (tListNode *)hciReadPacket); } } else{ // HCI Read Packet Pool is empty, wait for a free packet. readPacketListFull = TRUE; Clear_SPI_EXTI_Flag(); return; } Clear_SPI_EXTI_Flag(); } }
/* * Finish final uninitialization */ static int fcoe_detach_uninit(fcoe_soft_state_t *ss) { int ret; if (!list_is_empty(&ss->ss_mac_list)) { FCOE_LOG("fcoe", "ss_mac_list is not empty when detach"); return (FCOE_FAILURE); } if ((ret = fcoe_worker_fini()) != FCOE_SUCCESS) { return (ret); } /* * Stop watchdog */ if (ss->ss_flags & SS_FLAG_WATCHDOG_RUNNING) { mutex_enter(&ss->ss_watch_mutex); ss->ss_flags |= SS_FLAG_TERMINATE_WATCHDOG; cv_broadcast(&ss->ss_watch_cv); mutex_exit(&ss->ss_watch_mutex); while (ss->ss_flags & SS_FLAG_WATCHDOG_RUNNING) { delay(10); } } ddi_taskq_destroy(ss->ss_watchdog_taskq); mutex_destroy(&ss->ss_watch_mutex); cv_destroy(&ss->ss_watch_cv); ddi_remove_minor_node(ss->ss_dip, NULL); mutex_destroy(&ss->ss_ioctl_mutex); list_destroy(&ss->ss_mac_list); return (FCOE_SUCCESS); }
/* * Free a cookie that is no longer being used. */ void olsr_free_cookie(struct olsr_cookie_info *ci) { struct list_node *memory_list; /* Mark the cookie as unused */ cookies[ci->ci_id] = NULL; /* Free name if set */ if (ci->ci_name) { free(ci->ci_name); } /* Flush all the memory on the free list */ if (ci->ci_type == OLSR_COOKIE_TYPE_MEMORY) { while (!list_is_empty(&ci->ci_free_list)) { memory_list = ci->ci_free_list.next; list_remove(memory_list); free(memory_list); } } free(ci); }
void tree_breadth_first_search (struct TreeNode* tree, void(*callback) (struct TreeNode*)) { struct ListNode queue, *current, *next; list_init (&queue); current = list_push_back (&queue, tree); for (; current != &queue; current = next) { struct TreeNode* item = current->data; if (item->left) { list_push_back (&queue, item->left); } if (item->right) { list_push_back (&queue, item->right); } callback (item); next = current->next; list_delete (current); } assert (list_is_empty (&queue)); list_clear (&queue); }
/** レディーキューから動作可能なスレッドを取得する @param[in] rdq レディーキュー @retval NULL レディーキューが空だった @retval スレッド管理情報のアドレス レディーキュー中の実行可能スレッド */ thread_t * rdq_find_runnable_thread(void){ psw_t psw; thread_t *thr; int idx, rc; thread_ready_queue_t *rdq = &rd_queue; psw_disable_and_save_interrupt(&psw); rc = find_msr_bit(rdq->bitmap, &idx); if ( rc != 0 ) { thr = NULL; goto out; } kassert( !list_is_empty( &rdq->head[idx] ) ); thr = CONTAINER_OF( list_ref_top(&rdq->head[idx]), thread_t, link); out: psw_restore_interrupt(&psw); return thr; }
void sbd_ats_remove_by_task(scsi_task_t *task) { ats_state_t *ats_state; sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private; sbd_cmd_t *scmd = task->task_lu_private; /* * Scan the list and take the task off of the list. It is possible * that the call is made in a situation where the task is not * listed. That is a valid but unlikely case. If it happens * just fall through and return. The list removal is done by * task not LBA range and a task cannot be active for more than * one command so there is never an issue about removing the * wrong element. */ mutex_enter(&sl->sl_lock); if (list_is_empty(&sl->sl_ats_io_list)) { mutex_exit(&sl->sl_lock); return; } for (ats_state = list_head(&sl->sl_ats_io_list); ats_state != NULL; ats_state = list_next(&sl->sl_ats_io_list, ats_state)) { if (ats_state->as_cur_ats_task == task) { list_remove(&sl->sl_ats_io_list, ats_state); kmem_free(ats_state, sizeof (ats_state_t)); scmd->flags &= ~SBD_SCSI_CMD_ATS_RELATED; scmd->ats_state = NULL; sbd_list_length--; break; } } mutex_exit(&sl->sl_lock); }
boolean_t dsl_prop_hascb(dsl_dataset_t *ds) { return (!list_is_empty(&ds->ds_prop_cbs)); }
void *heap_alloc(size_t size, unsigned int alignment) { void *ptr; #if DEBUG_HEAP size_t original_size = size; #endif LTRACEF("size %zd, align %d\n", size, alignment); // deal with the pending free list if (unlikely(!list_is_empty(&theheap.delayed_free_list))) { heap_free_delayed_list(); } // alignment must be power of 2 if (alignment & (alignment - 1)) return NULL; // we always put a size field + base pointer + magic in front of the allocation size += sizeof(struct alloc_struct_begin); #if DEBUG_HEAP size += PADDING_SIZE; #endif // make sure we allocate at least the size of a struct free_heap_chunk so that // when we free it, we can create a struct free_heap_chunk struct and stick it // in the spot if (size < sizeof(struct free_heap_chunk)) size = sizeof(struct free_heap_chunk); // round up size to a multiple of native pointer size size = ROUNDUP(size, sizeof(void *)); // deal with nonzero alignments if (alignment > 0) { if (alignment < 16) alignment = 16; // add alignment for worst case fit size += alignment; } #if WITH_KERNEL_VM int retry_count = 0; retry: #endif mutex_acquire(&theheap.lock); // walk through the list ptr = NULL; struct free_heap_chunk *chunk; list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) { DEBUG_ASSERT((chunk->len % sizeof(void *)) == 0); // len should always be a multiple of pointer size // is it big enough to service our allocation? if (chunk->len >= size) { ptr = chunk; // remove it from the list struct list_node *next_node = list_next(&theheap.free_list, &chunk->node); list_delete(&chunk->node); if (chunk->len > size + sizeof(struct free_heap_chunk)) { // there's enough space in this chunk to create a new one after the allocation struct free_heap_chunk *newchunk = heap_create_free_chunk((uint8_t *)ptr + size, chunk->len - size, true); // truncate this chunk chunk->len -= chunk->len - size; // add the new one where chunk used to be if (next_node) list_add_before(next_node, &newchunk->node); else list_add_tail(&theheap.free_list, &newchunk->node); } // the allocated size is actually the length of this chunk, not the size requested DEBUG_ASSERT(chunk->len >= size); size = chunk->len; #if DEBUG_HEAP memset(ptr, ALLOC_FILL, size); #endif ptr = (void *)((addr_t)ptr + sizeof(struct alloc_struct_begin)); // align the output if requested if (alignment > 0) { ptr = (void *)ROUNDUP((addr_t)ptr, (addr_t)alignment); } struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr; as--; #if LK_DEBUGLEVEL > 1 as->magic = HEAP_MAGIC; #endif as->ptr = (void *)chunk; as->size = size; theheap.remaining -= size; if (theheap.remaining < theheap.low_watermark) { theheap.low_watermark = theheap.remaining; } #if DEBUG_HEAP as->padding_start = ((uint8_t *)ptr + original_size); as->padding_size = (((addr_t)chunk + size) - ((addr_t)ptr + original_size)); // printf("padding start %p, size %u, chunk %p, size %u\n", as->padding_start, as->padding_size, chunk, size); memset(as->padding_start, PADDING_FILL, as->padding_size); #endif break; } } mutex_release(&theheap.lock); #if WITH_KERNEL_VM /* try to grow the heap if we can */ if (ptr == NULL && retry_count == 0) { size_t growby = MAX(HEAP_GROW_SIZE, ROUNDUP(size, PAGE_SIZE)); ssize_t err = heap_grow(growby); if (err >= 0) { retry_count++; goto retry; } } #endif LTRACEF("returning ptr %p\n", ptr); return ptr; }