void schedule_purge(schedule_type* schedule) { ldns_rbnode_t* node; if (!schedule || !schedule->tasks) return; pthread_mutex_lock(&schedule->schedule_lock); /* don't attempt to free payload, still referenced by other tree*/ while ((node = ldns_rbtree_first(schedule->tasks)) != LDNS_RBTREE_NULL) { node = ldns_rbtree_delete(schedule->tasks, node->data); if (node == 0) break; free(node); } /* also clean up name tree */ while ((node = ldns_rbtree_first(schedule->tasks_by_name)) != LDNS_RBTREE_NULL) { node = ldns_rbtree_delete(schedule->tasks_by_name, node->data); if (node == 0) break; task_cleanup((task_type*) node->data); free(node); } pthread_mutex_unlock(&schedule->schedule_lock); }
/** * pop the first scheduled task. Caller must hold * schedule->schedule_lock. Result is safe to use outside lock. * * \param[in] schedule schedule * \return task_type* first scheduled task, NULL on no task or error. */ static task_type* pop_first_task(schedule_type* schedule) { ldns_rbnode_t *node, *delnode; task_type *task; if (!schedule || !schedule->tasks) return NULL; node = ldns_rbtree_first(schedule->tasks); if (!node) return NULL; delnode = ldns_rbtree_delete(schedule->tasks, node->data); /* delnode == node, but we don't free it just yet, data is shared * with tasks_by_name tree */ if (!delnode) return NULL; delnode = ldns_rbtree_delete(schedule->tasks_by_name, node->data); free(node); if (!delnode) return NULL; task = (task_type*) delnode->data; free(delnode); /* this delnode != node */ set_alarm(schedule); return task; }
int schedule_flush_type(schedule_type* schedule, task_id id) { ldns_rbnode_t *node, *nextnode; int nflushed = 0; ods_log_debug("[%s] flush task", schedule_str); if (!schedule || !schedule->tasks) return 0; pthread_mutex_lock(&schedule->schedule_lock); node = ldns_rbtree_first(schedule->tasks); while (node && node != LDNS_RBTREE_NULL) { nextnode = ldns_rbtree_next(node); if (node->data && ((task_type*)node->data)->what == id) { /* Merely setting flush is not enough. We must set it * to the front of the queue as well. */ node = ldns_rbtree_delete(schedule->tasks, node->data); if (!node) break; /* stange, bail out */ if (node->data) { /* task */ ((task_type*)node->data)->flush = 1; /* This is important for our tests only. If a task is * set to flush it should not affect the current time. * Otherwise timeleap will advance time. */ ((task_type*)node->data)->when = time_now(); if (!ldns_rbtree_insert(schedule->tasks, node)) { ods_log_crit("[%s] Could not reschedule task " "after flush. A task has been lost!", schedule_str); free(node); /* Do not free node->data it is still in use * by the other rbtree. */ break; } nflushed++; } } node = nextnode; } /* wakeup! work to do! */ pthread_cond_signal(&schedule->schedule_cond); pthread_mutex_unlock(&schedule->schedule_lock); return nflushed; }
/** * Delete zone. * */ zone_type* zonelist_del_zone(zonelist_type* zlist, zone_type* zone) { ldns_rbnode_t* old_node = LDNS_RBTREE_NULL; if (!zone) { return NULL; } if (!zlist || !zlist->zones) { goto zone_not_present; } old_node = ldns_rbtree_delete(zlist->zones, zone); if (!old_node) { goto zone_not_present; } free((void*) old_node); return zone; zone_not_present: ods_log_warning("[%s] unable to delete zone %s: not present", zl_str, zone->name); return zone; }
ods_status schedule_task(schedule_type* schedule, task_type* task) { ldns_rbnode_t *node1, *node2; ods_status status; task_type* task2; if (!task) { ods_log_error("[%s] unable to schedule task: no task", schedule_str); return ODS_STATUS_ERR; } task->flush = 0; if (!schedule || !schedule->tasks) { ods_log_error("[%s] unable to schedule task: no schedule", schedule_str); return ODS_STATUS_ERR; } ods_log_debug("[%s] schedule task [%s] for %s", schedule_str, task_what2str(task->what), task_who2str(task->who)); pthread_mutex_lock(&schedule->schedule_lock); status = ODS_STATUS_ERR; if ((node1 = task2node(task))) { if (ldns_rbtree_insert(schedule->tasks_by_name, node1)) { if ((node2 = task2node(task))) { if(ldns_rbtree_insert(schedule->tasks, node2)) { /* success inserting in two trees */ set_alarm(schedule); status = ODS_STATUS_OK; } else { /* insert in tasks tree failed */ ods_log_error("[%s] unable to schedule task [%s] for %s: " " already present", schedule_str, task_what2str(task->what), task_who2str(task->who)); /* this will free node1 */ free(ldns_rbtree_delete(schedule->tasks_by_name, node1)); free(node2); } } else { /* could not alloc node2 */ /* this will free node1 */ free(ldns_rbtree_delete(schedule->tasks_by_name, node1)); } } else {/* insert in name tree failed */ free(node1); /** * Task is already in tasks_by_name queue, so we must * update it in tasks queue */ /* still in lock guaranteed to succeed. */ node1 = ldns_rbtree_search(schedule->tasks_by_name, task); /* This copy of 'task' is referenced by both trees */ task2 = (task_type*)node1->key; node1 = ldns_rbtree_delete(schedule->tasks, task2); if (task->when < task2->when) task2->when = task->when; if (task2->context && task2->clean_context) { task2->clean_context(task2); } task2->context = task->context; task2->clean_context = task->clean_context; task->context = NULL; task_cleanup(task); (void) ldns_rbtree_insert(schedule->tasks, node1); /* node1 now owned by tree */ node1 = NULL; set_alarm(schedule); status = ODS_STATUS_OK; } } /* else {failure) */ pthread_mutex_unlock(&schedule->schedule_lock); return status; }
static ldns_rbnode_t *MV_SHM_delete_virtaddress_node(ldns_rbtree_t *shm_root, shm_address_t *shm_node) { shm_node->m_flag = SHM_VIRT_DELETE; return ldns_rbtree_delete(shm_root, shm_node->virt_node.key); }
static ldns_rbnode_t *MV_SHM_delete_phyaddress_node(ldns_rbtree_t *shm_root, shm_address_t *shm_node) { shm_node->m_flag = SHM_PHYS_DELETE; return ldns_rbtree_delete(shm_root, shm_node->phys_node.key); }