ret_t cherokee_handler_proxy_poll_free (cherokee_handler_proxy_poll_t *poll) { cherokee_list_t *i, *j; list_for_each_safe (i, j, &poll->active) { } list_for_each_safe (i, j, &poll->reuse) { } CHEROKEE_MUTEX_DESTROY (&poll->mutex); return ret_ok; }
/* ************************************************************************** * FunctionName: get_queue_size * Description : get nodes count of queue * Input : l: buffer pointer; * Output : NA; * ReturnValue : NA; * Other : NA; ************************************************************************** */ inline int get_queue_size(struct list_head *l) { struct list_head *pos = NULL, *tmp = NULL; int size = 0; list_for_each_safe(pos, tmp, l) size++; return size; }
void ratbag_device_destroy(struct ratbag_device *device) { struct ratbag_profile *profile, *next; if (!device) return; /* if we get to the point where the device is destroyed, profiles, * buttons, etc. are at a refcount of 0, so we can destroy * everything */ if (device->driver && device->driver->remove) device->driver->remove(device); list_for_each_safe(profile, next, &device->profiles, link) ratbag_profile_destroy(profile); if (device->udev_device) udev_device_unref(device->udev_device); list_remove(&device->link); ratbag_unref(device->ratbag); ratbag_device_data_unref(device->data); free(device->name); free(device); }
/** * blk_queue_invalidate_tags - invalidate all pending tags * @q: the request queue for the device * * Description: * Hardware conditions may dictate a need to stop all pending requests. * In this case, we will safely clear the block side of the tag queue and * readd all requests to the request queue in the right order. * * Notes: * queue lock must be held. **/ void blk_queue_invalidate_tags(struct request_queue *q) { struct list_head *tmp, *n; list_for_each_safe(tmp, n, &q->tag_busy_list) blk_requeue_request(q, list_entry_rq(tmp)); }
static void test_empty(void) { struct list_node *spos, *ssafe; struct node *pos, *safe; check(!slist_is_empty(&slist), "slist is empty but slist_is_empty returned false"); check(!list_is_empty(&list), "list is empty but list_is_empty returned false"); check(slist_head(&slist) != &slist, "slist is empty but slist_head returned non-self"); check(list_head(&list) != NULL, "list is empty but list_head returned non-NULL"); check(slist_tail(&slist) != &slist, "slist is empty but slist_tail returned non-self"); check(list_tail(&list) != NULL, "list is empty but list_tail returned non-NULL"); check_loop_never(slist_for_each(spos, &slist), "slist is empty, but slist_for_each looped"); check_loop_never(list_for_each(pos, &list), "list is empty, but list_for_each looped"); check_loop_never(slist_for_each_safe(spos, ssafe, &slist), "slist is empty, but slist_for_each_safe looped"); check_loop_never(list_for_each_safe(pos, safe, &list), "list is empty, but list_for_each_safe looped"); check_loop_never(slist_for_each_entry(pos, &slist, node), "slist is empty, but slist_for_each_entry looped"); check_loop_never(slist_for_each_entry_safe(pos, safe, &slist, node), "slist is empty, but slist_for_each-entry_safe looped"); }
void aoedev_downdev(struct aoedev *d) { struct aoetgt *t, **tt, **te; struct list_head *head, *pos, *nx; struct request *rq; int i; d->flags &= ~DEVFL_UP; /* clean out active and to-be-retransmitted buffers */ for (i = 0; i < NFACTIVE; i++) { head = &d->factive[i]; list_for_each_safe(pos, nx, head) downdev_frame(pos); } head = &d->rexmitq; list_for_each_safe(pos, nx, head) downdev_frame(pos); /* reset window dressings */ tt = d->targets; te = tt + d->ntargets; for (; tt < te && (t = *tt); tt++) { aoecmd_wreset(t); t->nout = 0; } /* clean out the in-process request (if any) */ aoe_failip(d); /* fast fail all pending I/O */ if (d->blkq) { while ((rq = blk_peek_request(d->blkq))) { blk_start_request(rq); aoe_end_request(d, rq, 1); } } if (d->gd) set_capacity(d->gd, 0); }
static void rcar_vin_stop_streaming(struct vb2_queue *vq) { struct soc_camera_device *icd = soc_camera_from_vb2q(vq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct rcar_vin_priv *priv = ici->priv; struct list_head *buf_head, *tmp; spin_lock_irq(&priv->lock); list_for_each_safe(buf_head, tmp, &priv->capture) list_del_init(buf_head); spin_unlock_irq(&priv->lock); }
static int write_elem_block(snd_tplg_t *tplg, struct list_head *base, int size, int tplg_type, const char *obj_name) { struct list_head *pos, *npos; struct tplg_elem *elem; int ret, wsize = 0, count = 0; /* count number of elements */ list_for_each_safe(pos, npos, base) count++; /* write the header for this block */ ret = write_block_header(tplg, tplg_type, 0, SND_SOC_TPLG_ABI_VERSION, 0, size, count); if (ret < 0) { fprintf(stderr, "error: failed to write %s block %d\n", obj_name, ret); return ret; } /* write each elem to block */ list_for_each_safe(pos, npos, base) { elem = list_entry(pos, struct tplg_elem, list); /* compound elems have already been copied to other elems */ if (elem->compound_elem) continue; if (elem->type != PARSER_TYPE_DAPM_GRAPH) verbose(tplg, " %s '%s': write %d bytes\n", obj_name, elem->id, elem->size); else verbose(tplg, " %s '%s': write %d bytes\n", obj_name, elem->route->source, elem->size); count = write(tplg->out_fd, elem->obj, elem->size); if (count < 0) { fprintf(stderr, "error: failed to write %s %d\n", obj_name, ret); return ret; } wsize += count; }