void qdisc_destroy(struct Qdisc *qdisc)
{
	struct list_head cql = LIST_HEAD_INIT(cql);
	struct Qdisc *cq, *q, *n;

	if (qdisc->flags & TCQ_F_BUILTIN ||
		!atomic_dec_and_test(&qdisc->refcnt))
		return;

	if (!list_empty(&qdisc->list)) {
		if (qdisc->ops->cl_ops == NULL)
			list_del(&qdisc->list);
		else
			list_move(&qdisc->list, &cql);
	}

	/* unlink inner qdiscs from dev->qdisc_list immediately */
	list_for_each_entry(cq, &cql, list)
		list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list)
			if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) {
				if (q->ops->cl_ops == NULL)
					list_del_init(&q->list);
				else
					list_move_tail(&q->list, &cql);
			}
	list_for_each_entry_safe(cq, n, &cql, list)
		list_del_init(&cq->list);

	call_rcu(&qdisc->q_rcu, __qdisc_destroy);
}
Exemple #2
0
static void local_ct_uid_gid_free(struct container *ct)
{
	struct _uid_gid_map *map, *t;

	list_for_each_entry_safe(map, t, &ct->uid_map, node)
		xfree(map);
	list_for_each_entry_safe(map, t, &ct->gid_map, node)
		xfree(map);
}
Exemple #3
0
void clear_exceptional_node_lists(void)
{
	struct node *n, *t;

	list_for_each_entry_safe(n, t, &sys->failed_nodes, list)
		list_del(&n->list);
	list_for_each_entry_safe(n, t, &sys->delayed_nodes, list)
		list_del(&n->list);
}
Exemple #4
0
static void cleanup_timers_list(struct list_head *head)
{
	struct cpu_timer_list *timer, *next;

	list_for_each_entry_safe(timer, next, head, entry)
		list_del_init(&timer->entry);
}
Exemple #5
0
void cpt_unlock_tcp_connections(void)
{
	struct inet_sk_desc *sk, *n;

	list_for_each_entry_safe(sk, n, &cpt_tcp_repair_sockets, rlist)
		tcp_unlock_one(sk);
}
Exemple #6
0
static void add_div(struct unit *unit, int type, cfg_t *variant)
{
	int ndiv, n, ntxt;

	if (!(ndiv = cfg_size(variant, "div")))
		return;

	ntxt = cfg_size(variant, "txt");
	if (ntxt != ndiv)
		quit("Number of elements for div and txt not equal\n");

	if (!list_empty(&unit->u_div[type])) {
		struct fraction *f, *n;

		list_for_each_entry_safe(f, n, &unit->u_div[type], f_list)
			fraction_free(f);
	}

	for (n = 0; n < ndiv; n++) {
		char *txt;
		float div;

		div = cfg_getnfloat(variant, "div", n);
		txt = cfg_getnstr(variant, "txt", n);

		unit_add_div(unit, type, txt, div);
	}
}
Exemple #7
0
static int hyper_parse_containers(struct hyper_pod *pod, char *json, jsmntok_t *toks)
{
	int i = 0, j = 0, next, c_num;
	struct hyper_container *c, *n;

	if (toks[i].type != JSMN_ARRAY) {
		fprintf(stdout, "format incorrect\n");
		return -1;
	}

	c_num = toks[i].size;
	fprintf(stdout, "container count %d\n", c_num);

	i++;
	for (j = 0; j < c_num; j++) {
		next = hyper_parse_container(pod, &c, json, toks + i);
		if (next < 0)
			goto fail;

		/* Pod created containers, Add to list immediately */
		list_add_tail(&c->list, &pod->containers);
		i += next;
	}

	return i;
fail:
	list_for_each_entry_safe(c, n, &pod->containers, list)
		hyper_free_container(c);

	return -1;
}
Exemple #8
0
void x_destroy(EV_P_ struct x_node *x)
{
  struct x_node *c, *t;
  struct sub_node *s, *u;

  if (x_which(x) == 0)
    k_destroy(EV_A_ x, x_all[1], 0);
  else
    k_destroy(EV_A_ x_all[0], x, 1);

  if (x->x_parent != NULL)
    x->x_parent->x_nr_child--;
  x->x_parent = NULL;

  list_del_init(&x->x_parent_link);

  x_for_each_child_safe(c, t, x)
    x_set_parent(c, x_all[x_which(x)]);

  ASSERT(x->x_nr_child == 0);

  /* Do we need this with k_destroy() above? */
  list_for_each_entry_safe(s, u, &x->x_sub_list, s_x_link[x_which(x)])
    sub_cancel(EV_A_ s);

  hlist_del(&x->x_hash_node);

  x->x_type->x_nr--;
  memset(x, 0, sizeof(*x));
}
Exemple #9
0
void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata)
{
	struct ieee80211_key *key, *tmp;

	list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
		ieee80211_key_free(key);
}
Exemple #10
0
void io_reset()
{
  /* Clear all active requests */
  io_request r, nr;
  list_for_each_entry_safe(r, nr, &active_requests, lh)
    io_req_stop(r);
}
Exemple #11
0
static void ip6_sublist_rcv_finish(struct list_head *head)
{
	struct sk_buff *skb, *next;

	list_for_each_entry_safe(skb, next, head, list)
		dst_input(skb);
}
static void lg_del_vqs(struct virtio_device *vdev)
{
	struct virtqueue *vq, *n;

	list_for_each_entry_safe(vq, n, &vdev->vqs, list)
		lg_del_vq(vq);
}
Exemple #13
0
static void destroy_fsync_dnodes(struct list_head *head, int drop)
{
	struct fsync_inode_entry *entry, *tmp;

	list_for_each_entry_safe(entry, tmp, head, list)
		del_fsync_inode(entry, drop);
}
void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local)
{
	struct ieee80211_bss *bss, *tmp;

	list_for_each_entry_safe(bss, tmp, &local->bss_list, list)
		ieee80211_rx_bss_put(local, bss);
}
Exemple #15
0
static int fw_device_op_release(struct inode *inode, struct file *file)
{
	struct client *client = file->private_data;
	struct event *e, *next_e;

	mutex_lock(&client->device->client_list_mutex);
	list_del(&client->link);
	mutex_unlock(&client->device->client_list_mutex);

	if (client->iso_context)
		fw_iso_context_destroy(client->iso_context);

	if (client->buffer.pages)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

	/* Freeze client->resource_idr and client->event_list */
	spin_lock_irq(&client->lock);
	client->in_shutdown = true;
	spin_unlock_irq(&client->lock);

	idr_for_each(&client->resource_idr, shutdown_resource, client);
	idr_remove_all(&client->resource_idr);
	idr_destroy(&client->resource_idr);

	list_for_each_entry_safe(e, next_e, &client->event_list, link)
		kfree(e);

	client_put(client);

	return 0;
}
Exemple #16
0
static void nfnl_err_reset(struct list_head *err_list)
{
	struct nfnl_err *nfnl_err, *next;

	list_for_each_entry_safe(nfnl_err, next, err_list, head)
		nfnl_err_del(nfnl_err);
}
Exemple #17
0
static void __kvm_free_irq_routing(struct list_head *irq_routing)
{
	struct kvm_kernel_irq_routing_entry *e, *n;

	list_for_each_entry_safe(e, n, irq_routing, link)
		kfree(e);
}
Exemple #18
0
/**
 * __ns_list_release - remove all profile namespaces on the list put refs
 * @head: list of profile namespaces  (NOT NULL)
 *
 * Requires: namespace lock be held
 */
static void __ns_list_release(struct list_head *head)
{
	struct aa_ns *ns, *tmp;

	list_for_each_entry_safe(ns, tmp, head, base.list)
		__aa_remove_ns(ns);

}
Exemple #19
0
static __init void free_all_mmcfg(void)
{
	struct pci_mmcfg_region *cfg, *tmp;

	pci_mmcfg_arch_free();
	list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
		pci_mmconfig_remove(cfg);
}
Exemple #20
0
static void
l3ml3p(layer3_t *l3, int pr)
{
	l3_process_t *p, *np;

	list_for_each_entry_safe(p, np, &l3->plist, list) 
		l3->p_mgr(p, pr, NULL);
}
Exemple #21
0
static void
netifd_kill_processes(void)
{
	struct netifd_process *proc, *tmp;

	list_for_each_entry_safe(proc, tmp, &process_list, list)
		netifd_kill_process(proc);
}
Exemple #22
0
static void usbhsh_endpoint_detach_all(struct usbhsh_hpriv *hpriv,
				       struct usbhsh_device *udev)
{
	struct usbhsh_ep *uep, *next;

	list_for_each_entry_safe(uep, next, &udev->ep_list_head, ep_list)
		usbhsh_endpoint_detach(hpriv, usbhsh_uep_to_ep(uep));
}
Exemple #23
0
static void release_manifest_descriptors(struct gb_interface *intf)
{
	struct manifest_desc *descriptor;
	struct manifest_desc *next;

	list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
		release_manifest_descriptor(descriptor);
}
Exemple #24
0
int udev_device_event(struct udev_rules *rules, struct udevice *udev)
{
	int retval = 0;

	/* add device node */
	if (major(udev->devt) != 0 &&
	    (strcmp(udev->action, "add") == 0 || strcmp(udev->action, "change") == 0)) {
		struct udevice *udev_old;

		dbg("device node add '%s'", udev->dev->devpath);

		udev_rules_get_name(rules, udev);
		if (udev->ignore_device) {
			info("device event will be ignored");
			goto exit;
		}
		if (udev->name[0] == '\0') {
			info("device node creation supressed");
			goto exit;
		}

		/* read current database entry, we may want to cleanup symlinks */
		udev_old = udev_device_init();
		if (udev_old != NULL) {
			if (udev_db_get_device(udev_old, udev->dev->devpath) != 0) {
				udev_device_cleanup(udev_old);
				udev_old = NULL;
			} else
				info("device '%s' already in database, validate currently present symlinks",
				     udev->dev->devpath);
		}

		/* create node and symlinks */
		retval = udev_node_add(udev, udev_old);
		if (retval == 0) {
			/* store record in database */
			udev_db_add_device(udev);

			/* remove possibly left-over symlinks */
			if (udev_old != NULL) {
				struct name_entry *link_loop;
				struct name_entry *link_old_loop;
				struct name_entry *link_old_tmp_loop;

				/* remove still valid symlinks from old list */
				list_for_each_entry_safe(link_old_loop, link_old_tmp_loop, &udev_old->symlink_list, node)
					list_for_each_entry(link_loop, &udev->symlink_list, node)
						if (strcmp(link_old_loop->name, link_loop->name) == 0) {
							dbg("symlink '%s' still valid, keep it", link_old_loop->name);
							list_del(&link_old_loop->node);
							free(link_old_loop);
						}
				udev_node_remove_symlinks(udev_old);
				udev_device_cleanup(udev_old);
			}
		}
		goto exit;
	}
Exemple #25
0
void conn_destroy(struct conn_data *data)
{
	pthread_cond_destroy(&data->workers_cond);
	pthread_mutex_destroy(&data->workers_lock);

	struct conn_worker_list *w, *p;
	list_for_each_entry_safe(w, p, &data->workers, list)
		free(w);
}
static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
{
	struct iio_dev_attr *p, *n;
	struct iio_buffer *buffer = indio_dev->buffer;

	list_for_each_entry_safe(p, n,
				 &buffer->scan_el_dev_attr_list, l)
		iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
}
void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device)
{
	struct drm_device *dev, *tmp;
	DRM_DEBUG("\n");

	list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
		drm_put_dev(dev);
	DRM_INFO("Module unloaded\n");
}
Exemple #28
0
static void free_page_list(struct list_head *pages)
{
	struct page *p, *n;

	list_for_each_entry_safe(p, n, pages, lru)
		__free_page(p);

	INIT_LIST_HEAD(pages);
}
Exemple #29
0
static void vm_del_vqs(struct virtio_device *vdev)
{
	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
	struct virtqueue *vq, *n;

	list_for_each_entry_safe(vq, n, &vdev->vqs, list)
		vm_del_vq(vq);

	free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
}
Exemple #30
0
static void inmem_destroy(struct btrfs_fs_info *fs_info)
{
	struct inmem_hash *entry, *tmp;
	struct btrfs_dedup_info *dedup_info = fs_info->dedup_info;

	mutex_lock(&dedup_info->lock);
	list_for_each_entry_safe(entry, tmp, &dedup_info->lru_list, lru_list)
		__inmem_del(dedup_info, entry);
	mutex_unlock(&dedup_info->lock);
}