static int memc_min_pwr_update(struct kona_memc *kmemc, struct kona_memc_node *memc_node, int action) { u32 new_val; int ret = 0; spin_lock(&kmemc->memc_lock); switch (action) { case MEMC_NODE_ADD: plist_node_init(&memc_node->node, memc_node->min_pwr); plist_add(&memc_node->node, &kmemc->min_pwr_list); break; case MEMC_NODE_DEL: plist_del(&memc_node->node, &kmemc->min_pwr_list); break; case MEMC_NODE_UPDATE: plist_del(&memc_node->node, &kmemc->min_pwr_list); plist_node_init(&memc_node->node, memc_node->min_pwr); plist_add(&memc_node->node, &kmemc->min_pwr_list); break; default: BUG(); return -EINVAL; } new_val = plist_last(&kmemc->min_pwr_list)->prio; if (new_val != kmemc->active_min_pwr) { ret = memc_set_min_pwr(kmemc, new_val, MEMC_AP_MIN_PWR); if (!ret) kmemc->active_min_pwr = new_val; } spin_unlock(&kmemc->memc_lock); return ret; }
/* * Task blocks on lock. * * Prepare waiter and propagate pi chain * * This must be called with lock->wait_lock held. */ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int detect_deadlock) { struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; unsigned long flags; int boost = 0, res; spin_lock_irqsave(¤t->pi_lock, flags); __rt_mutex_adjust_prio(current); waiter->task = current; waiter->lock = lock; plist_node_init(&waiter->list_entry, current->prio); plist_node_init(&waiter->pi_list_entry, current->prio); /* Get the top priority waiter on the lock */ if (rt_mutex_has_waiters(lock)) top_waiter = rt_mutex_top_waiter(lock); plist_add(&waiter->list_entry, &lock->wait_list); current->pi_blocked_on = waiter; spin_unlock_irqrestore(¤t->pi_lock, flags); if (waiter == rt_mutex_top_waiter(lock)) { spin_lock_irqsave(&owner->pi_lock, flags); plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); if (owner->pi_blocked_on) { boost = 1; /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); } spin_unlock_irqrestore(&owner->pi_lock, flags); } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { spin_lock_irqsave(&owner->pi_lock, flags); if (owner->pi_blocked_on) { boost = 1; /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); } spin_unlock_irqrestore(&owner->pi_lock, flags); } if (!boost) return 0; spin_unlock(&lock->wait_lock); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, current); spin_lock(&lock->wait_lock); return res; }
/* * Task blocks on lock. * * Prepare waiter and propagate pi chain * * This must be called with lock->wait_lock held. */ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, int detect_deadlock) { struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; unsigned long flags; int chain_walk = 0, res; raw_spin_lock_irqsave(&task->pi_lock, flags); __rt_mutex_adjust_prio(task); waiter->task = task; waiter->lock = lock; plist_node_init(&waiter->list_entry, task->prio); plist_node_init(&waiter->pi_list_entry, task->prio); /* Get the top priority waiter on the lock */ if (rt_mutex_has_waiters(lock)) top_waiter = rt_mutex_top_waiter(lock); plist_add(&waiter->list_entry, &lock->wait_list); task->pi_blocked_on = waiter; raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (waiter == rt_mutex_top_waiter(lock)) { raw_spin_lock_irqsave(&owner->pi_lock, flags); plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); if (owner->pi_blocked_on) chain_walk = 1; raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) chain_walk = 1; if (!chain_walk) return 0; /* * The owner can't disappear while holding a lock, * so the owner struct is protected by wait_lock. * Gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); raw_spin_unlock(&lock->wait_lock); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, task); raw_spin_lock(&lock->wait_lock); return res; }
int pdict_add_persistent_change_listener(pdict_t *pd, const char *kpat, pdl_notify_func_t notify, void *arg) { pdict_persistent_listener_t *pl; static int lid = 1; if (!(pl = malloc(sizeof (*pl)))) return 0; memset(pl, 0, sizeof (*pl)); pl->pdpl_l.pdl_notify = notify; pl->pdpl_l.pdl_arg = arg; if (regcomp(&pl->pdpl_regex, kpat, REG_EXTENDED | REG_NOSUB) != 0) { // XXX todo: communicate error context is not libc free(pl); pl = NULL; pu_log(PUL_WARN, 0, "Failed regcomp in pdict_add_persistent_change_listener."); return 0; } plist_add((void *)(size_t)lid, pl, &pd->pd_persistent_listeners); pl->pdpl_new = 1; if (!_pdict_walk_int(pd, pdict_ent_add_persistent_change_listener_dcb, pl)) { _pdict_walk_int(pd, pdict_ent_remove_persistent_change_listener_dcb, pl); plist_remove((void *)(size_t)lid, &pd->pd_persistent_listeners, NULL); regfree(&pl->pdpl_regex); free(pl); pl = NULL; pu_log(PUL_WARN, 0, "Failed _pdict_walk_int in pdict_add_persistent_change_listener."); return 0; } pl->pdpl_new = 0; return lid++; }
void traillist_update(traillist tlist, vector2 p, float dt){ tlist->col_timer += dt; if(tlist->trailtoggle == 1 && tlist->firsttrail == NULL){ tlist->firsttrail = add_trail(); plist_add(&tlist->firsttrail->t.draw_trail, p); plist_add(&tlist->firsttrail->t.col_trail, p); } trailnode* trail = tlist->firsttrail; trailnode* prev = NULL; if(tlist->trailtoggle){ plist_add(&trail->t.draw_trail, p); if(tlist->col_timer > 0.1){ plist_add(&trail->t.col_trail, p); tlist->col_timer = 0; } plist_head(&trail->t.col_trail, p); } /*TODO once you find out that a trail is done, * free the trailnode and reoranize the list. * */ while(trail != NULL){ if(plist_update(trail->t.col_trail, dt)){ trail->t.col_trail = NULL; } if(plist_update(trail->t.draw_trail, dt)){ if(prev == NULL){ tlist->firsttrail = NULL; return; } trail->t.draw_trail = NULL; trailnode* tmp = trail; prev->next = trail->next; trail = trail->next; free(tmp); } else{ prev = trail; trail = trail->next; } } }
/* * Optimization: check if we can steal the lock from the * assigned pending owner [which might not have taken the * lock yet]: */ static inline int try_to_steal_lock(struct rt_mutex *lock, struct task_struct *task) { struct task_struct *pendowner = rt_mutex_owner(lock); struct rt_mutex_waiter *next; unsigned long flags; if (!rt_mutex_owner_pending(lock)) return 0; if (pendowner == task) return 1; raw_spin_lock_irqsave(&pendowner->pi_lock, flags); if (task->prio >= pendowner->prio) { raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); return 0; } /* * Check if a waiter is enqueued on the pending owners * pi_waiters list. Remove it and readjust pending owners * priority. */ if (likely(!rt_mutex_has_waiters(lock))) { raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); return 1; } /* No chain handling, pending owner is not blocked on anything: */ next = rt_mutex_top_waiter(lock); plist_del(&next->pi_list_entry, &pendowner->pi_waiters); __rt_mutex_adjust_prio(pendowner); raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); /* * We are going to steal the lock and a waiter was * enqueued on the pending owners pi_waiters queue. So * we have to enqueue this waiter into * task->pi_waiters list. This covers the case, * where task is boosted because it holds another * lock and gets unboosted because the booster is * interrupted, so we would delay a waiter with higher * priority as task->normal_prio. * * Note: in the rare case of a SCHED_OTHER task changing * its priority and thus stealing the lock, next->task * might be task: */ if (likely(next->task != task)) { raw_spin_lock_irqsave(&task->pi_lock, flags); plist_add(&next->pi_list_entry, &task->pi_waiters); __rt_mutex_adjust_prio(task); raw_spin_unlock_irqrestore(&task->pi_lock, flags); } return 1; }
static int add_to_wa_list(const char *k, const char *v, void *arg) { wa_t *wa = arg; if (regexec(&wa->wa_regex, k, 0, NULL, 0) == 0) return plist_add((void *)k, (void *)v, &wa->wa_l); return 1; }
/* Read the content of the directory, make an array of absolute paths for * all recognized files. Put directories, playlists and sound files * in proper structures. Return 0 on error.*/ int read_directory (const char *directory, struct file_list *dirs, struct file_list *playlists, struct plist *plist) { DIR *dir; struct dirent *entry; int show_hidden = options_get_int ("ShowHiddenFiles"); int dir_is_root; assert (directory != NULL); assert (*directory == '/'); assert (dirs != NULL); assert (playlists != NULL); assert (plist != NULL); if (!(dir = opendir(directory))) { error ("Can't read directory: %s", strerror(errno)); return 0; } if (!strcmp(directory, "/")) dir_is_root = 1; else dir_is_root = 0; while ((entry = readdir(dir))) { char file[PATH_MAX]; enum file_type type; if (user_wants_interrupt()) { error ("Interrupted! Not all files read!"); break; } if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, "..")) continue; if (!show_hidden && entry->d_name[0] == '.') continue; if (snprintf(file, sizeof(file), "%s/%s", dir_is_root ? "" : directory, entry->d_name) >= (int)sizeof(file)) { error ("Path too long!"); return 0; } type = file_type (file); if (type == F_SOUND) plist_add (plist, file); else if (type == F_DIR) file_list_add (dirs, file); else if (type == F_PLAYLIST) file_list_add (playlists, file); } closedir (dir); return 1; }
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) { plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); plist_node_init(&p->pushable_tasks, p->prio); plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); /* Update the highest prio pushable task */ if (p->prio < rq->rt.highest_prio.next) rq->rt.highest_prio.next = p->prio; }
/* * Remove a waiter from a lock * * Must be called with lock->wait_lock held */ static void remove_waiter(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, unsigned long flags) { int first = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); int chain_walk = 0; raw_spin_lock(¤t->pi_lock); plist_del(&waiter->list_entry, &lock->wait_list); current->pi_blocked_on = NULL; raw_spin_unlock(¤t->pi_lock); if (!owner) { BUG_ON(first); return; } if (first) { raw_spin_lock(&owner->pi_lock); plist_del(&waiter->pi_list_entry, &owner->pi_waiters); if (rt_mutex_has_waiters(lock)) { struct rt_mutex_waiter *next; next = rt_mutex_top_waiter(lock); plist_add(&next->pi_list_entry, &owner->pi_waiters); } __rt_mutex_adjust_prio(owner); if (rt_mutex_real_waiter(owner->pi_blocked_on)) chain_walk = 1; raw_spin_unlock(&owner->pi_lock); } WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); if (!chain_walk) return; /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); raw_spin_lock_irq(&lock->wait_lock); }
/* * Wake up the next waiter on the lock. * * Remove the top waiter from the current tasks waiter list and from * the lock waiter list. Set it as pending owner. Then wake it up. * * Called with lock->wait_lock held. */ static void wakeup_next_waiter(struct rt_mutex *lock, int savestate) { struct rt_mutex_waiter *waiter; struct task_struct *pendowner; spin_lock(¤t->pi_lock); waiter = rt_mutex_top_waiter(lock); plist_del(&waiter->list_entry, &lock->wait_list); /* * Remove it from current->pi_waiters. We do not adjust a * possible priority boost right now. We execute wakeup in the * boosted mode and go back to normal after releasing * lock->wait_lock. */ plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); pendowner = waiter->task; waiter->task = NULL; rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); spin_unlock(¤t->pi_lock); /* * Clear the pi_blocked_on variable and enqueue a possible * waiter into the pi_waiters list of the pending owner. This * prevents that in case the pending owner gets unboosted a * waiter with higher priority than pending-owner->normal_prio * is blocked on the unboosted (pending) owner. */ spin_lock(&pendowner->pi_lock); WARN_ON(!pendowner->pi_blocked_on); WARN_ON(pendowner->pi_blocked_on != waiter); WARN_ON(pendowner->pi_blocked_on->lock != lock); pendowner->pi_blocked_on = NULL; if (rt_mutex_has_waiters(lock)) { struct rt_mutex_waiter *next; next = rt_mutex_top_waiter(lock); plist_add(&next->pi_list_entry, &pendowner->pi_waiters); } spin_unlock(&pendowner->pi_lock); if (savestate) wake_up_process_mutex(pendowner); else wake_up_process(pendowner); }
/* Copy the item to the playlist. Return the index of the added item. */ int plist_add_from_item (struct plist *plist, const struct plist_item *item) { int pos = plist_add (plist, item->file); plist_item_copy (&plist->items[pos], item); if (item->tags && item->tags->time != -1) { plist->total_time += item->tags->time; plist->items_with_time++; } return pos; }
void emit_frame(emit_desc* frame) { for (int i = 0; i < frame->n; ++i) { particle* p = NULL; float r = randfloat(frame->r - frame->ur, frame->r + frame->ur); float g = randfloat(frame->g - frame->ug, frame->g + frame->ug); float b = randfloat(frame->b - frame->ub, frame->b + frame->ub); pextra* pe = new_pextra(r, g, b, frame->blender); p = particle_new_full(frame->x, frame->y, frame->ux, frame->uy, frame->rad, frame->urad, frame->ds, frame->uds, frame->theta, frame->utheta, frame->life, frame->ulife, frame->force, frame->limit, pe); plist_add(emitter.particles, p); } }
/** * pm_qos_update_target - manages the constraints list and calls the notifiers * if needed * @c: constraints data struct * @node: request to add to the list, to update or to remove * @action: action to take on the constraints list * @value: value of the request to add or update * * This function returns 1 if the aggregated constraint value has changed, 0 * otherwise. */ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, enum pm_qos_req_action action, int value) { unsigned long flags; int prev_value, curr_value, new_value; spin_lock_irqsave(&pm_qos_lock, flags); prev_value = pm_qos_get_value(c); if (value == PM_QOS_DEFAULT_VALUE) new_value = c->default_value; else new_value = value; switch (action) { case PM_QOS_REMOVE_REQ: plist_del(node, &c->list); break; case PM_QOS_UPDATE_REQ: /* * to change the list, we atomically remove, reinit * with new value and add, then see if the extremal * changed */ plist_del(node, &c->list); case PM_QOS_ADD_REQ: plist_node_init(node, new_value); plist_add(node, &c->list); break; default: /* no action */ ; } curr_value = pm_qos_get_value(c); pm_qos_set_value(c, curr_value); spin_unlock_irqrestore(&pm_qos_lock, flags); if (prev_value != curr_value) { blocking_notifier_call_chain(c->notifiers, (unsigned long)curr_value, NULL); return 1; } else { return 0; } }
/* * Remove a waiter from a lock * * Must be called with lock->wait_lock held */ static void remove_waiter(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { int first = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); unsigned long flags; int boost = 0; spin_lock_irqsave(¤t->pi_lock, flags); plist_del(&waiter->list_entry, &lock->wait_list); waiter->task = NULL; current->pi_blocked_on = NULL; spin_unlock_irqrestore(¤t->pi_lock, flags); if (first && owner != current) { spin_lock_irqsave(&owner->pi_lock, flags); plist_del(&waiter->pi_list_entry, &owner->pi_waiters); if (rt_mutex_has_waiters(lock)) { struct rt_mutex_waiter *next; next = rt_mutex_top_waiter(lock); plist_add(&next->pi_list_entry, &owner->pi_waiters); } __rt_mutex_adjust_prio(owner); if (owner->pi_blocked_on) { boost = 1; /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); } spin_unlock_irqrestore(&owner->pi_lock, flags); } WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); if (!boost) return; spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); spin_lock(&lock->wait_lock); }
static int _pdict_ent_add_change_listener(pdict_ent_t *pde, pdl_notify_func_t notify, void *arg) { pdict_listener_t *l; if (!(l = malloc(sizeof (*l)))) return 0; memset(l, 0, sizeof (*l)); l->pdl_notify = notify; l->pdl_arg = arg; if (!plist_add(l, 0, &pde->pde_listeners)) { free(l); l = NULL; pu_log(PUL_WARN, 0, "Failed plist_add in _pdict_ent_add_change_listener."); return 0; } return 1; }
HNode* hlist_add(HList* hlist, const char* name, const void* obj) { if (!hlist) { return 0; } HNode* n = hlist_lookup(hlist, name, HEADER_TYPE_NONE, 0, 0); if (!n) { Header* h = header_lookup_standard(HEADER_TYPE_NONE, name); if (!h) { h = header_create(name); } hlist_grow(hlist); n = &hlist->data[hlist->ulen++]; n->header = h; n->values = plist_create(); HLIST_FLAG_CLR(hlist, HLIST_FLAGS_SORTED); } plist_add(n->values, obj); GLOG(("=C= Added [%s] => %p (%d)", name, obj, n->header->order)); return n; }
/* * Task blocks on lock. * * Prepare waiter and propagate pi chain * * This must be called with lock->wait_lock held. */ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, int detect_deadlock) { struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; unsigned long flags; int chain_walk = 0, res; /* * Early deadlock detection. We really don't want the task to * enqueue on itself just to untangle the mess later. It's not * only an optimization. We drop the locks, so another waiter * can come in before the chain walk detects the deadlock. So * the other will detect the deadlock and return -EDEADLOCK, * which is wrong, as the other waiter is not in a deadlock * situation. */ if (detect_deadlock && owner == task) return -EDEADLK; raw_spin_lock_irqsave(&task->pi_lock, flags); __rt_mutex_adjust_prio(task); waiter->task = task; waiter->lock = lock; plist_node_init(&waiter->list_entry, task->prio); plist_node_init(&waiter->pi_list_entry, task->prio); /* Get the top priority waiter on the lock */ if (rt_mutex_has_waiters(lock)) top_waiter = rt_mutex_top_waiter(lock); plist_add(&waiter->list_entry, &lock->wait_list); task->pi_blocked_on = waiter; raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (!owner) return 0; if (waiter == rt_mutex_top_waiter(lock)) { raw_spin_lock_irqsave(&owner->pi_lock, flags); plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); if (owner->pi_blocked_on) chain_walk = 1; raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) chain_walk = 1; if (!chain_walk) return 0; /* * The owner can't disappear while holding a lock, * so the owner struct is protected by wait_lock. * Gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); raw_spin_unlock(&lock->wait_lock); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, task); raw_spin_lock(&lock->wait_lock); return res; }
/* * Try to take an rt-mutex * * Must be called with lock->wait_lock held. * * @lock: the lock to be acquired. * @task: the task which wants to acquire the lock * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) */ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, struct rt_mutex_waiter *waiter) { /* * We have to be careful here if the atomic speedups are * enabled, such that, when * - no other waiter is on the lock * - the lock has been released since we did the cmpxchg * the lock can be released or taken while we are doing the * checks and marking the lock with RT_MUTEX_HAS_WAITERS. * * The atomic acquire/release aware variant of * mark_rt_mutex_waiters uses a cmpxchg loop. After setting * the WAITERS bit, the atomic release / acquire can not * happen anymore and lock->wait_lock protects us from the * non-atomic case. * * Note, that this might set lock->owner = * RT_MUTEX_HAS_WAITERS in the case the lock is not contended * any more. This is fixed up when we take the ownership. * This is the transitional state explained at the top of this file. */ mark_rt_mutex_waiters(lock); if (rt_mutex_owner(lock)) return 0; /* * It will get the lock because of one of these conditions: * 1) there is no waiter * 2) higher priority than waiters * 3) it is top waiter */ if (rt_mutex_has_waiters(lock)) { if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { if (!waiter || waiter != rt_mutex_top_waiter(lock)) return 0; } } if (waiter || rt_mutex_has_waiters(lock)) { unsigned long flags; struct rt_mutex_waiter *top; raw_spin_lock_irqsave(&task->pi_lock, flags); /* remove the queued waiter. */ if (waiter) { plist_del(&waiter->list_entry, &lock->wait_list); task->pi_blocked_on = NULL; } /* * We have to enqueue the top waiter(if it exists) into * task->pi_waiters list. */ if (rt_mutex_has_waiters(lock)) { top = rt_mutex_top_waiter(lock); top->pi_list_entry.prio = top->list_entry.prio; plist_add(&top->pi_list_entry, &task->pi_waiters); } raw_spin_unlock_irqrestore(&task->pi_lock, flags); } /* We got the lock. */ debug_rt_mutex_lock(lock); rt_mutex_set_owner(lock, task); rt_mutex_deadlock_account_lock(lock, task); return 1; }
/* * Adjust the priority chain. Also used for deadlock detection. * Decreases task's usage by one - may thus free the task. * Returns 0 or -EDEADLK. */ static int rt_mutex_adjust_prio_chain(struct task_struct *task, int deadlock_detect, struct rt_mutex *orig_lock, struct rt_mutex_waiter *orig_waiter, struct task_struct *top_task) { struct rt_mutex *lock; struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; int detect_deadlock, ret = 0, depth = 0; unsigned long flags; detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, deadlock_detect); /* * The (de)boosting is a step by step approach with a lot of * pitfalls. We want this to be preemptible and we want hold a * maximum of two locks per step. So we have to check * carefully whether things change under us. */ again: if (++depth > max_lock_depth) { static int prev_max; /* * Print this only once. If the admin changes the limit, * print a new message when reaching the limit again. */ if (prev_max != max_lock_depth) { prev_max = max_lock_depth; printk(KERN_WARNING "Maximum lock depth %d reached " "task: %s (%d)\n", max_lock_depth, top_task->comm, task_pid_nr(top_task)); } put_task_struct(task); return deadlock_detect ? -EDEADLK : 0; } retry: /* * Task can not go away as we did a get_task() before ! */ raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; /* * Check whether the end of the boosting chain has been * reached or the state of the chain has changed while we * dropped the locks. */ if (!waiter) goto out_unlock_pi; /* * Check the orig_waiter state. After we dropped the locks, * the previous owner of the lock might have released the lock. */ if (orig_waiter && !rt_mutex_owner(orig_lock)) goto out_unlock_pi; /* * Drop out, when the task has no waiters. Note, * top_waiter can be NULL, when we are in the deboosting * mode! */ if (top_waiter && (!task_has_pi_waiters(task) || top_waiter != task_top_pi_waiter(task))) goto out_unlock_pi; /* * When deadlock detection is off then we check, if further * priority adjustment is necessary. */ if (!detect_deadlock && waiter->list_entry.prio == task->prio) goto out_unlock_pi; lock = waiter->lock; if (!raw_spin_trylock(&lock->wait_lock)) { raw_spin_unlock_irqrestore(&task->pi_lock, flags); cpu_relax(); goto retry; } /* Deadlock detection */ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); raw_spin_unlock(&lock->wait_lock); ret = deadlock_detect ? -EDEADLK : 0; goto out_unlock_pi; } top_waiter = rt_mutex_top_waiter(lock); /* Requeue the waiter */ plist_del(&waiter->list_entry, &lock->wait_list); waiter->list_entry.prio = task->prio; plist_add(&waiter->list_entry, &lock->wait_list); /* Release the task */ raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (!rt_mutex_owner(lock)) { /* * If the requeue above changed the top waiter, then we need * to wake the new top waiter up to try to get the lock. */ if (top_waiter != rt_mutex_top_waiter(lock)) wake_up_process(rt_mutex_top_waiter(lock)->task); raw_spin_unlock(&lock->wait_lock); goto out_put_task; } put_task_struct(task); /* Grab the next task */ task = rt_mutex_owner(lock); get_task_struct(task); raw_spin_lock_irqsave(&task->pi_lock, flags); if (waiter == rt_mutex_top_waiter(lock)) { /* Boost the owner */ plist_del(&top_waiter->pi_list_entry, &task->pi_waiters); waiter->pi_list_entry.prio = waiter->list_entry.prio; plist_add(&waiter->pi_list_entry, &task->pi_waiters); __rt_mutex_adjust_prio(task); } else if (top_waiter == waiter) { /* Deboost the owner */ plist_del(&waiter->pi_list_entry, &task->pi_waiters); waiter = rt_mutex_top_waiter(lock); waiter->pi_list_entry.prio = waiter->list_entry.prio; plist_add(&waiter->pi_list_entry, &task->pi_waiters); __rt_mutex_adjust_prio(task); } raw_spin_unlock_irqrestore(&task->pi_lock, flags); top_waiter = rt_mutex_top_waiter(lock); raw_spin_unlock(&lock->wait_lock); if (!detect_deadlock && waiter != top_waiter) goto out_put_task; goto again; out_unlock_pi: raw_spin_unlock_irqrestore(&task->pi_lock, flags); out_put_task: put_task_struct(task); return ret; }
void init(char *str) { transceiver_command_t tcmd; msg_t m; uint8_t chan = RADIO_CHANNEL; char command; int res = sscanf(str, "init %c", &command); if (res < 1) { printf("Usage: init (r|n)\n"); printf("\tr\tinitialize as root\n"); printf("\tn\tinitialize as node router\n"); } uint8_t state; if ((command == 'n') || (command == 'r')) { printf("INFO: Initialize as %s on address %d\n", ((command == 'n') ? "node" : "root"), id); if (!id || (id > 255)) { printf("ERROR: address not a valid 8 bit integer\n"); return; } state = rpl_init(TRANSCEIVER, id); if (state != SIXLOWERROR_SUCCESS) { printf("Error initializing RPL\n"); } else { puts("6LoWPAN and RPL initialized."); } if (command == 'r') { rpl_init_root(); is_root = 1; } else { ipv6_iface_set_routing_provider(rpl_get_next_hop); } int monitor_pid = thread_create(monitor_stack_buffer, MONITOR_STACK_SIZE, PRIORITY_MAIN-2, CREATE_STACKTEST, monitor, "monitor"); transceiver_register(TRANSCEIVER, monitor_pid); ipv6_register_packet_handler(monitor_pid); //sixlowpan_lowpan_register(monitor_pid); } else { printf("ERROR: Unknown command '%c'\n", command); return; } /* TODO: check if this works as intended */ ipv6_addr_t prefix, tmp; ipv6_addr_init(&std_addr, 0xABCD, 0xEF12, 0, 0, 0x1034, 0x00FF, 0xFE00, id); ipv6_addr_init_prefix(&prefix, &std_addr, 64); plist_add(&prefix, 64, NDP_OPT_PI_VLIFETIME_INFINITE, 0, 1, ICMPV6_NDP_OPT_PI_FLAG_AUTONOM); ipv6_init_iface_as_router(); /* add global address */ ipv6_addr_set_by_eui64(&tmp, &std_addr); ipv6_iface_add_addr(&tmp, IPV6_ADDR_TYPE_GLOBAL, NDP_ADDR_STATE_PREFERRED, 0, 0); /* set channel to 10 */ tcmd.transceivers = TRANSCEIVER; tcmd.data = &chan; m.type = SET_CHANNEL; m.content.ptr = (void *) &tcmd; msg_send_receive(&m, &m, transceiver_pid); printf("Channel set to %u\n", RADIO_CHANNEL); destiny_init_transport_layer(); puts("Destiny initialized"); /* start transceiver watchdog */ }
/* Recursively add files from the directory to the playlist. * Return 1 if OK (and even some errors), 0 if the user interrupted. */ static int read_directory_recurr_internal (const char *directory, struct plist *plist, ino_t **dir_stack, int *depth) { DIR *dir; struct dirent *entry; struct stat st; if (stat(directory, &st)) { error ("Can't stat %s: %s", directory, strerror(errno)); return 0; } assert (plist != NULL); assert (directory != NULL); if (*dir_stack && dir_symlink_loop(st.st_ino, *dir_stack, *depth)) { logit ("Detected symlink loop on %s", directory); return 1; } if (!(dir = opendir(directory))) { error ("Can't read directory: %s", strerror(errno)); return 1; } (*depth)++; *dir_stack = (ino_t *)xrealloc (*dir_stack, sizeof(ino_t) * (*depth)); (*dir_stack)[*depth - 1] = st.st_ino; while ((entry = readdir(dir))) { char file[PATH_MAX]; enum file_type type; if (user_wants_interrupt()) { error ("Interrupted! Not all files read!"); break; } if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, "..")) continue; if (snprintf(file, sizeof(file), "%s/%s", directory, entry->d_name) >= (int)sizeof(file)) { error ("Path too long!"); continue; } type = file_type (file); if (type == F_DIR) read_directory_recurr_internal(file, plist, dir_stack, depth); else if (type == F_SOUND && plist_find_fname(plist, file) == -1) plist_add (plist, file); } (*depth)--; *dir_stack = (ino_t *)xrealloc (*dir_stack, sizeof(ino_t) * (*depth)); closedir (dir); return 1; }
/* Load M3U file into plist. Return the number of items read. */ static int plist_load_m3u (struct plist *plist, const char *fname, const char *cwd, const int load_serial) { FILE *file; char *line; int last_added = -1; int after_extinf = 0; int added = 0; if (!(file = fopen(fname, "r"))) { error ("Can't open playlist file: %s", strerror(errno)); return 0; } if (flock(fileno(file), LOCK_SH) == -1) logit ("Can't flock() the playlist file: %s", strerror(errno)); while ((line = read_line(file))) { if (!strncmp(line, "#EXTINF:", sizeof("#EXTINF:")-1)) { char *comma; char *num_err; char time_text[10] = ""; int time_sec; if (after_extinf) { error ("Broken M3U file: double " "#EXTINF."); free (line); plist_delete (plist, last_added); return added; } /* Find the comma */ comma = strchr (line + (sizeof("#EXTINF:") - 1), ','); if (!comma) { error ("Broken M3U file: no comma " "in #EXTINF."); free (line); return added; } /* Get the time string */ time_text[sizeof(time_text)-1] = 0; strncpy (time_text, line + sizeof("#EXTINF:") - 1, MIN(comma - line - (sizeof("#EXTINF:") - 1), sizeof(time_text))); if (time_text[sizeof(time_text)-1]) { error ("Broken M3U file: " "wrong time."); free (line); return added; } /* Extract the time */ time_sec = strtol (time_text, &num_err, 10); if (*num_err) { error ("Broken M3U file: " "time is not a number."); free (line); return added; } after_extinf = 1; last_added = plist_add (plist, NULL); plist_set_title_tags (plist, last_added, comma + 1); if (*time_text) plist_set_item_time (plist, last_added, time_sec); } else if (line[0] != '#') { char path[2*PATH_MAX]; strip_string (line); if (strlen(line) <= PATH_MAX) { make_path (path, sizeof(path), cwd, line); if (plist_find_fname(plist, path) == -1) { if (after_extinf) plist_set_file (plist, last_added, path); else plist_add (plist, path); added++; } else if (after_extinf) plist_delete (plist, last_added); } else if (after_extinf) plist_delete (plist, last_added); after_extinf = 0; } else if (load_serial && !strncmp(line, "#MOCSERIAL: ", sizeof("#MOCSERIAL: ") - 1)) { char *serial_str = line + sizeof("#MOCSERIAL: ") - 1; if (serial_str[0]) { char *err; long serial; serial = strtol (serial_str, &err, 0); if (!*err) { plist_set_serial (plist, serial); logit ("Got MOCSERIAL tag with serial %d", (int)serial); } } } free (line); } if (flock(fileno(file), LOCK_UN) == -1) logit ("Can't flock() (unlock) the playlist file: %s", strerror(errno)); fclose (file); return added; }
/* Load PLS file into plist. Return the number of items read. */ static int plist_load_pls (struct plist *plist, const char *fname, const char *cwd) { FILE *file; char *line; long i, nitems, added = 0; char *e; if (!(file = fopen(fname, "r"))) { error ("Can't open playlist file: %s", strerror(errno)); return 0; } line = read_ini_value (file, "playlist", "NumberOfEntries"); if (!line) { /* Assume that it is a pls file version 1 - plist_load_m3u() * should handle it like an m3u file without the m3u extensions. */ fclose (file); return plist_load_m3u (plist, fname, cwd, 0); } nitems = strtol (line, &e, 10); if (*e) { error ("Broken PLS file"); free (line); return 0; } free (line); for (i = 1; i <= nitems; i++) { char *pls_file, *pls_title, *pls_length; char key[16]; int time; int last_added; char path[2*PATH_MAX]; sprintf (key, "File%ld", i); if (!(pls_file = read_ini_value(file, "playlist", key))) { error ("Broken PLS file"); break; } sprintf (key, "Title%ld", i); pls_title = read_ini_value(file, "playlist", key); sprintf (key, "Length%ld", i); pls_length = read_ini_value(file, "playlist", key); if (pls_length) { time = strtol (pls_length, &e, 10); if (*e) time = -1; } else time = -1; if (strlen(pls_file) <= PATH_MAX) { make_path (path, sizeof(path), cwd, pls_file); if (plist_find_fname(plist, path) == -1) { last_added = plist_add (plist, path); if (pls_title && pls_title[0]) plist_set_title_tags (plist, last_added, pls_title); if (time > 0) { plist->items[last_added].tags = tags_new (); plist->items[last_added].tags->time = time; plist->items[last_added].tags->filled |= TAGS_TIME; } } } free (pls_file); if (pls_title) free (pls_title); if (pls_length) free (pls_length); added++; } fclose (file); return added; }
/* * Task blocks on lock. * * Prepare waiter and propagate pi chain * * This must be called with lock->wait_lock held. */ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, int detect_deadlock, unsigned long flags, int savestate) { struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; int chain_walk = 0, res; raw_spin_lock(&task->pi_lock); /* * In the case of futex requeue PI, this will be a proxy * lock. The task will wake unaware that it is enqueueed on * this lock. Avoid blocking on two locks and corrupting * pi_blocked_on via the PI_WAKEUP_INPROGRESS * flag. futex_wait_requeue_pi() sets this when it wakes up * before requeue (due to a signal or timeout). Do not enqueue * the task if PI_WAKEUP_INPROGRESS is set. */ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { raw_spin_unlock(&task->pi_lock); return -EAGAIN; } BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); __rt_mutex_adjust_prio(task); waiter->task = task; waiter->lock = lock; waiter->savestate = savestate; plist_node_init(&waiter->list_entry, task->prio); plist_node_init(&waiter->pi_list_entry, task->prio); /* Get the top priority waiter on the lock */ if (rt_mutex_has_waiters(lock)) top_waiter = rt_mutex_top_waiter(lock); plist_add(&waiter->list_entry, &lock->wait_list); task->pi_blocked_on = waiter; raw_spin_unlock(&task->pi_lock); if (!owner) return 0; if (waiter == rt_mutex_top_waiter(lock)) { raw_spin_lock(&owner->pi_lock); plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); if (rt_mutex_real_waiter(owner->pi_blocked_on)) chain_walk = 1; raw_spin_unlock(&owner->pi_lock); } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) chain_walk = 1; if (!chain_walk) return 0; /* * The owner can't disappear while holding a lock, * so the owner struct is protected by wait_lock. * Gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, task); raw_spin_lock_irq(&lock->wait_lock); return res; }
void pds_process_line(pds_session_t *pdss, char *line) { regmatch_t pmatch[9]; char *cmdtag = NULL; char *cmdstr = NULL; char *key = NULL; char *val = NULL; char *buf = NULL; char *listenid = NULL; int reportperiod; int forsession; int res; //DPRINT(line); if ((res = regexec(&setex, line, 7, pmatch, 0)) == 0) { getmatchsub(line, &cmdtag, pmatch, 1); if (cmdtag && strcmp(cmdtag, "unknown") == 0) { result(pdss, cmdtag, "304 unknown is a reserved command tag"); free(cmdtag); cmdtag = NULL; return; } if (!getmatchsub(line, &key, pmatch, 2) || !key) goto fail; if (!getmatchsub(line, &val, pmatch, 5) && !getmatchsub(line, &val, pmatch, 4)) goto fail; if (!val) goto fail; if ((forsession = getmatchsub(line, NULL, pmatch, 6)) > 0) { /* mark for expiration, but make sure it's only added once! */ if(plist_walk(pdss->pdss_expire, is_key_in_list, key)) { char *e; if (!(e = strdup(key))) goto fail; plist_add(e, NULL, &pdss->pdss_expire); } } pdss->pdss_pd_lock(pdss->pdss_pd_lock_arg); res = pdict_add(pdss->pdss_pd, key, val, NULL); pdss->pdss_pd_unlock(pdss->pdss_pd_lock_arg); if (!res) result(pdss, cmdtag, "304 set failed"); else result(pdss, cmdtag, "200 set successful"); if (cmdtag) { free(cmdtag); cmdtag = NULL; } free(key); key = NULL; free(val); val = NULL; return; } if ((res = regexec(&listenex, line, 6, pmatch, 0)) == 0) { notify_arg_t *nap; notify_arg_t na; getmatchsub(line, &cmdtag, pmatch, 1); if (cmdtag && strcmp(cmdtag, "unknown") == 0) { result(pdss, cmdtag, "304 unknown is a reserved command tag"); free(cmdtag); cmdtag = NULL; return; } if ((!getmatchsub(line, &buf, pmatch, 4) && !getmatchsub(line, &buf, pmatch, 3)) || !buf) goto fail; getmatchsub(line, &listenid, pmatch, 5); na.na_pdss = pdss; na.na_listenid = listenid; pthread_mutex_lock(&pdss->pdss_lock); pdss->pdss_pd_lock(pdss->pdss_pd_lock_arg); if (!ptree_contains(&na, pdss->pdss_notify_args, nacmp, (void **)&nap)) { if (!(nap = malloc(sizeof (*nap)))) { pu_log(PUL_WARN, pdss->pdss_id, "insufficient memory"); pdss->pdss_pd_unlock(pdss->pdss_pd_lock_arg); pthread_mutex_unlock(&pdss->pdss_lock); goto fail; } nap->na_pdss = pdss; nap->na_listenid = listenid; nap->na_pd_ids = NULL; if (!ptree_replace(nap, &pdss->pdss_notify_args, nacmp, NULL)) { free(nap); nap = NULL; pu_log(PUL_WARN, pdss->pdss_id, "insufficient memory"); pdss->pdss_pd_unlock(pdss->pdss_pd_lock_arg); pthread_mutex_unlock(&pdss->pdss_lock); free(listenid); listenid = NULL; free(buf); buf = NULL; goto fail; } } if (!(res = pdict_add_persistent_change_listener(pdss->pdss_pd, buf, notify, (void *)nap))) { if (!nap->na_pd_ids) { ptree_remove(nap, &pdss->pdss_notify_args, nacmp, NULL); free((void *)nap->na_listenid); nap->na_listenid = NULL; free(nap); nap = NULL; } result(pdss, cmdtag, "303 listen not established--bad pattern?"); } else { if (!plist_add((void *)res, NULL, &nap->na_pd_ids)) { pdict_remove_persistent_change_listener(pdss->pdss_pd, res); if (!nap->na_pd_ids) { ptree_remove(nap, &pdss->pdss_notify_args, nacmp, NULL); free((void *)nap->na_listenid); nap->na_listenid = NULL; free(nap); nap = NULL; } free(buf); buf = NULL; if (cmdtag) { free(cmdtag); cmdtag = NULL; } pdss->pdss_pd_unlock(pdss->pdss_pd_lock_arg); pthread_mutex_unlock(&pdss->pdss_lock); goto fail; } else { result(pdss, cmdtag,"200 listening, id %s", listenid); } } pdss->pdss_pd_unlock(pdss->pdss_pd_lock_arg); pthread_mutex_unlock(&pdss->pdss_lock); free(buf); buf = NULL; if (cmdtag) { free(cmdtag); cmdtag = NULL; } return; } if ((res = regexec(&reportex, line, 5, pmatch, 0)) == 0) { getmatchsub(line, &cmdtag, pmatch, 1); if (cmdtag && strcmp(cmdtag, "unknown") == 0) { result(pdss, cmdtag, "304 unknown is a reserved command tag"); free(cmdtag); cmdtag = NULL; return; } if (!getmatchsub(line, &buf, pmatch, 2) || !buf) goto fail; if ((reportperiod = atoi(buf)) < 0 || reportperiod > 10000) { result(pdss, cmdtag, "301 invalid report/wait period" "--specify milliseconds or 0 for off (max 10s)"); if (cmdtag) { free(cmdtag); cmdtag = NULL; } free(buf); buf = NULL; return; } free(buf); buf = NULL; getmatchsub(line, &buf, pmatch, 3); set_report_period(pdss, cmdtag, buf, reportperiod); if (cmdtag) { free(cmdtag); cmdtag = NULL; } if (buf) { free(buf); buf = NULL; } return; } if ((res = regexec(&waitex, line, 5, pmatch, 0)) == 0) { getmatchsub(line, &cmdtag, pmatch, 1); if (cmdtag && strcmp(cmdtag, "unknown") == 0) { result(pdss, cmdtag, "304 unknown is a reserved command tag"); free(cmdtag); cmdtag = NULL; return; } if (!getmatchsub(line, &buf, pmatch, 2) || !buf) goto fail; if (atoi(buf) < 0 || atoi(buf) > 10000) { result(pdss, cmdtag, "301 invalid wait period" "--specify milliseconds (max 10s)"); if (cmdtag) { free(cmdtag); cmdtag = NULL; } return; } usleep(atoi(buf) * 1000); result(pdss, cmdtag, "200 nothin' doin'"); if (cmdtag) { free(cmdtag); cmdtag = NULL; } return; } if ((res = regexec(&flushex, line, 5, pmatch, 0)) == 0) { getmatchsub(line, &cmdtag, pmatch, 1); if (cmdtag && strcmp(cmdtag, "unknown") == 0) { result(pdss, cmdtag, "304 unknown is a reserved command tag"); free(cmdtag); cmdtag = NULL; return; } pthread_mutex_lock(&pdss->pdss_lock); pdss->pdss_pd_lock(pdss->pdss_pd_lock_arg); _flush(pdss); pdss->pdss_pd_unlock(pdss->pdss_pd_lock_arg); pthread_mutex_unlock(&pdss->pdss_lock); result(pdss, cmdtag, "200 glug glug"); if (cmdtag) { free(cmdtag); cmdtag = NULL; } return; } if ((res = regexec(&walkex, line, 5, pmatch, 0)) == 0) { wa_t wa; getmatchsub(line, &cmdtag, pmatch, 1); if (cmdtag && strcmp(cmdtag, "unknown") == 0) { result(pdss, cmdtag, "304 unknown is a reserved command tag"); free(cmdtag); cmdtag = NULL; return; } if (!getmatchsub(line, &cmdstr, pmatch, 2) || !cmdstr) goto fail; if (!getmatchsub(line, &buf, pmatch, 3) || !buf) { free(cmdstr); cmdstr = NULL; goto fail; } if ((regcomp(&wa.wa_regex, buf, REG_EXTENDED)) != 0) { free(cmdstr); cmdstr = NULL; free(buf); buf = NULL; result(pdss, cmdtag, "305 expression error"); if (cmdtag) { free(cmdtag); cmdtag = NULL; } return; } wa.wa_pdss = pdss; wa.wa_cmdtag = cmdtag; wa.wa_l = NULL; pdss->pdss_pd_lock(pdss->pdss_pd_lock_arg); if (!pdict_walk(pdss->pdss_pd, add_to_wa_list, &wa)) { int e = errno; pu_log(PUL_WARN, pdss->pdss_id, "temporary failure: %s", strerror(e)); result(pdss, cmdtag, "300 temporary failure: %s", strerror(e)); } else { if (strcmp(cmdstr, "remove") == 0) plist_walk(wa.wa_l, remove_wa_list, &wa); else plist_walk(wa.wa_l, print_wa_list, &wa); result(pdss, cmdtag, "200 done"); } plist_clear(&wa.wa_l); pdss->pdss_pd_unlock(pdss->pdss_pd_lock_arg); regfree(&wa.wa_regex); if (cmdtag) { free(cmdtag); cmdtag = NULL; } free(cmdstr); cmdstr = NULL; free(buf); buf = NULL; return; } if ((res = regexec(&ignoreex, line, 5, pmatch, 0)) == 0) { notify_arg_t *nap; notify_arg_t na; void *arg[2]; int n; getmatchsub(line, &cmdtag, pmatch, 1); if (cmdtag && strcmp(cmdtag, "unknown") == 0) { result(pdss, cmdtag, "304 unknown is a reserved command tag"); if (cmdtag) { free(cmdtag); cmdtag = NULL; } return; } if (!getmatchsub(line, &listenid, pmatch, 2) || !listenid) goto fail; pthread_mutex_lock(&pdss->pdss_lock); pdss->pdss_pd_lock(pdss->pdss_pd_lock_arg); na.na_pdss = pdss; na.na_listenid = listenid; if (!ptree_remove(&na, &pdss->pdss_notify_args, nacmp, (void **)&nap)) { pdss->pdss_pd_unlock(pdss->pdss_pd_lock_arg); pthread_mutex_unlock(&pdss->pdss_lock); result(pdss, cmdtag, "306 nonexistent key/id"); if (cmdtag) { free(cmdtag); cmdtag = NULL; } return; } n = 0; plist_walk(nap->na_pd_ids, _count, &n); plist_walk(nap->na_pd_ids, remove_persistent_change_listener_cb, pdss->pdss_pd); arg[0] = (void *)nap->na_listenid; arg[1] = &pdss->pdss_pending; ptree_walk(pdss->pdss_pending, PTREE_POSTORDER, remove_pending_id, ipmcmp, arg); free((void *)nap->na_listenid); nap->na_listenid = NULL; plist_clear(&nap->na_pd_ids); free(nap); nap = NULL; assert(!ptree_contains(&na, pdss->pdss_notify_args, nacmp, NULL)); pdss->pdss_pd_unlock(pdss->pdss_pd_lock_arg); pthread_mutex_unlock(&pdss->pdss_lock); result(pdss, cmdtag, "200 %d listener%s ignored", n, n > 1 ? "s" : ""); if (cmdtag) { free(cmdtag); cmdtag = NULL; } free(listenid); return; } if ((res = regexec(&quitex, line, 5, pmatch, 0)) == 0) { getmatchsub(line, &cmdtag, pmatch, 1); result(pdss, cmdtag, "200 goodbye"); pdss->pdss_should_close = 1; pdss->pdss_close(pdss->pdss_wfd, NULL, 0); if (cmdtag) { free(cmdtag); cmdtag = NULL; } return; } if ((res = regexec(&getidex, line, 5, pmatch, 0)) == 0) { getmatchsub(line, &cmdtag, pmatch, 1); result(pdss, cmdtag, "200 %d", pdss->pdss_id); if (cmdtag) { free(cmdtag); cmdtag = NULL; } return; } if ((res = regexec(&okex, line, 5, pmatch, 0)) == 0) { if (cmdtag) { free(cmdtag); cmdtag = NULL; } return; } result(pdss, NULL, "400 input unrecognized: %s", line); return; fail: result(pdss, cmdtag, "300 command failed: %s", strerror(errno)); if (cmdtag) free (cmdtag); }
/* Load M3U file into plist. Return the number of items read. */ static int plist_load_m3u (struct plist *plist, const char *fname, const char *cwd, const int load_serial) { FILE *file; char *line = NULL; int last_added = -1; int after_extinf = 0; int added = 0; struct flock read_lock = {.l_type = F_RDLCK, .l_whence = SEEK_SET}; file = fopen (fname, "r"); if (!file) { error_errno ("Can't open playlist file", errno); return 0; } /* Lock gets released by fclose(). */ if (fcntl (fileno (file), F_SETLKW, &read_lock) == -1) log_errno ("Can't lock the playlist file", errno); while ((line = read_line (file))) { if (!strncmp (line, "#EXTINF:", sizeof("#EXTINF:") - 1)) { char *comma, *num_err; char time_text[10] = ""; int time_sec; if (after_extinf) { error ("Broken M3U file: double #EXTINF!"); plist_delete (plist, last_added); goto err; } /* Find the comma */ comma = strchr (line + (sizeof("#EXTINF:") - 1), ','); if (!comma) { error ("Broken M3U file: no comma in #EXTINF!"); goto err; } /* Get the time string */ time_text[sizeof(time_text) - 1] = 0; strncpy (time_text, line + sizeof("#EXTINF:") - 1, MIN(comma - line - (sizeof("#EXTINF:") - 1), sizeof(time_text))); if (time_text[sizeof(time_text) - 1]) { error ("Broken M3U file: wrong time!"); goto err; } /* Extract the time. */ time_sec = strtol (time_text, &num_err, 10); if (*num_err) { error ("Broken M3U file: time is not a number!"); goto err; } after_extinf = 1; last_added = plist_add (plist, NULL); plist_set_title_tags (plist, last_added, comma + 1); if (*time_text) plist_set_item_time (plist, last_added, time_sec); } else if (line[0] != '#') { char path[2 * PATH_MAX]; strip_string (line); if (strlen (line) <= PATH_MAX) { make_path (path, sizeof(path), cwd, line); if (plist_find_fname (plist, path) == -1) { if (after_extinf) plist_set_file (plist, last_added, path); else plist_add (plist, path); added += 1; } else if (after_extinf) plist_delete (plist, last_added); } else if (after_extinf) plist_delete (plist, last_added); after_extinf = 0; } else if (load_serial && !strncmp (line, "#MOCSERIAL: ", sizeof("#MOCSERIAL: ") - 1)) { char *serial_str = line + sizeof("#MOCSERIAL: ") - 1; if (serial_str[0]) { char *err; long serial; serial = strtol (serial_str, &err, 0); if (!*err) { plist_set_serial (plist, serial); logit ("Got MOCSERIAL tag with serial %ld", serial); } } } free (line); } err: free (line); fclose (file); return added; }