Пример #1
0
/* periodic work to do:
 *  * purge structures when they are too old
 *  * send announcements
 */
static void batadv_bla_periodic_work(struct work_struct *work)
{
	struct delayed_work *delayed_work;
	struct batadv_priv *bat_priv;
	struct batadv_priv_bla *priv_bla;
	struct hlist_node *node;
	struct hlist_head *head;
	struct batadv_backbone_gw *backbone_gw;
	struct batadv_hashtable *hash;
	struct batadv_hard_iface *primary_if;
	int i;

	delayed_work = container_of(work, struct delayed_work, work);
	priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
	bat_priv = container_of(priv_bla, struct batadv_priv, bla);
	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		goto out;

	batadv_bla_purge_claims(bat_priv, primary_if, 0);
	batadv_bla_purge_backbone_gw(bat_priv, 0);

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto out;

	hash = bat_priv->bla.backbone_hash;
	if (!hash)
		goto out;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
		hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
			if (!batadv_compare_eth(backbone_gw->orig,
						primary_if->net_dev->dev_addr))
				continue;

			backbone_gw->lasttime = jiffies;

			batadv_bla_send_announce(bat_priv, backbone_gw);
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
		batadv_hardif_free_ref(primary_if);

	batadv_bla_start_timer(bat_priv);
}
Пример #2
0
/**
 * batadv_bla_update_orig_address
 * @bat_priv: the bat priv with all the soft interface information
 * @primary_if: the new selected primary_if
 * @oldif: the old primary interface, may be NULL
 *
 * Update the backbone gateways when the own orig address changes.
 */
void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
				    struct batadv_hard_iface *primary_if,
				    struct batadv_hard_iface *oldif)
{
	struct batadv_bla_backbone_gw *backbone_gw;
	struct hlist_head *head;
	struct batadv_hashtable *hash;
	__be16 group;
	int i;

	/* reset bridge loop avoidance group id */
	group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
	bat_priv->bla.claim_dest.group = group;

	/* purge everything when bridge loop avoidance is turned off */
	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		oldif = NULL;

	if (!oldif) {
		batadv_bla_purge_claims(bat_priv, NULL, 1);
		batadv_bla_purge_backbone_gw(bat_priv, 1);
		return;
	}

	hash = bat_priv->bla.backbone_hash;
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
			/* own orig still holds the old value. */
			if (!batadv_compare_eth(backbone_gw->orig,
						oldif->net_dev->dev_addr))
				continue;

			ether_addr_copy(backbone_gw->orig,
					primary_if->net_dev->dev_addr);
			/* send an announce frame so others will ask for our
			 * claims and update their tables.
			 */
			batadv_bla_send_announce(bat_priv, backbone_gw);
		}
		rcu_read_unlock();
	}
}
Пример #3
0
/* free all bla structures (for softinterface free or module unload) */
void batadv_bla_free(struct batadv_priv *bat_priv)
{
	struct batadv_hard_iface *primary_if;

	cancel_delayed_work_sync(&bat_priv->bla.work);
	primary_if = batadv_primary_if_get_selected(bat_priv);

	if (bat_priv->bla.claim_hash) {
		batadv_bla_purge_claims(bat_priv, primary_if, 1);
		batadv_hash_destroy(bat_priv->bla.claim_hash);
		bat_priv->bla.claim_hash = NULL;
	}
	if (bat_priv->bla.backbone_hash) {
		batadv_bla_purge_backbone_gw(bat_priv, 1);
		batadv_hash_destroy(bat_priv->bla.backbone_hash);
		bat_priv->bla.backbone_hash = NULL;
	}
	if (primary_if)
		batadv_hardif_free_ref(primary_if);
}
Пример #4
0
/* periodic work to do:
 *  * purge structures when they are too old
 *  * send announcements
 */
static void batadv_bla_periodic_work(struct work_struct *work)
{
	struct delayed_work *delayed_work;
	struct batadv_priv *bat_priv;
	struct batadv_priv_bla *priv_bla;
	struct hlist_head *head;
	struct batadv_bla_backbone_gw *backbone_gw;
	struct batadv_hashtable *hash;
	struct batadv_hard_iface *primary_if;
	int i;

	delayed_work = container_of(work, struct delayed_work, work);
	priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
	bat_priv = container_of(priv_bla, struct batadv_priv, bla);
	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
		goto out;

	batadv_bla_purge_claims(bat_priv, primary_if, 0);
	batadv_bla_purge_backbone_gw(bat_priv, 0);

	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
		goto out;

	hash = bat_priv->bla.backbone_hash;
	if (!hash)
		goto out;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
			if (!batadv_compare_eth(backbone_gw->orig,
						primary_if->net_dev->dev_addr))
				continue;

			backbone_gw->lasttime = jiffies;

			batadv_bla_send_announce(bat_priv, backbone_gw);

			/* request_sent is only set after creation to avoid
			 * problems when we are not yet known as backbone gw
			 * in the backbone.
			 *
			 * We can reset this now after we waited some periods
			 * to give bridge forward delays and bla group forming
			 * some grace time.
			 */

			if (atomic_read(&backbone_gw->request_sent) == 0)
				continue;

			if (!atomic_dec_and_test(&backbone_gw->wait_periods))
				continue;

			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
			atomic_set(&backbone_gw->request_sent, 0);
		}
		rcu_read_unlock();
	}
out:
	if (primary_if)
		batadv_hardif_free_ref(primary_if);

	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
}