Ejemplo n.º 1
0
static void igmp6_join_group(struct ifmcaddr6 *ma)
{
	unsigned long delay;
	int addr_type;

	addr_type = ipv6_addr_type(&ma->mca_addr);

	if ((addr_type & (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_LOOPBACK)))
		return;

	igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);

	delay = net_random() % IGMP6_UNSOLICITED_IVAL;

	spin_lock_bh(&ma->mca_lock);
	if (del_timer(&ma->mca_timer)) {
		atomic_dec(&ma->mca_refcnt);
		delay = ma->mca_timer.expires - jiffies;
	}

	if (!mod_timer(&ma->mca_timer, jiffies + delay))
		atomic_inc(&ma->mca_refcnt);
	ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
	spin_unlock_bh(&ma->mca_lock);
}
Ejemplo n.º 2
0
static int sample(struct datapath *dp, struct sk_buff *skb,
		  const struct nlattr *attr,
		  struct ovs_key_ipv4_tunnel *tun_key)
{
	const struct nlattr *acts_list = NULL;
	const struct nlattr *a;
	int rem;

	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
		 a = nla_next(a, &rem)) {
		switch (nla_type(a)) {
		case OVS_SAMPLE_ATTR_PROBABILITY:
			if (net_random() >= nla_get_u32(a))
				return 0;
			break;

		case OVS_SAMPLE_ATTR_ACTIONS:
			acts_list = a;
			break;
		}
	}

	return do_execute_actions(dp, skb, nla_data(acts_list),
				  nla_len(acts_list), tun_key, true);
}
Ejemplo n.º 3
0
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	int i;

	q->perturb_timer.function = sfq_perturbation;
	q->perturb_timer.data = (unsigned long)sch;
	init_timer_deferrable(&q->perturb_timer);

	for (i = 0; i < SFQ_HASH_DIVISOR; i++)
		q->ht[i] = SFQ_DEPTH;

	for (i = 0; i < SFQ_DEPTH; i++) {
		skb_queue_head_init(&q->qs[i]);
		q->dep[i + SFQ_DEPTH].next = i + SFQ_DEPTH;
		q->dep[i + SFQ_DEPTH].prev = i + SFQ_DEPTH;
	}

	q->limit = SFQ_DEPTH - 1;
	q->max_depth = 0;
	q->tail = SFQ_DEPTH;
	if (opt == NULL) {
		q->quantum = psched_mtu(qdisc_dev(sch));
		q->perturb_period = 0;
		q->perturbation = net_random();
	} else {
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}

	for (i = 0; i < SFQ_DEPTH; i++)
		sfq_link(q, i);
	return 0;
}
Ejemplo n.º 4
0
static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
{
	unsigned long delay = resptime;

	/* Do not start timer for addresses with link/host scope */
	if (ipv6_addr_type(&ma->mca_addr)&(IPV6_ADDR_LINKLOCAL|IPV6_ADDR_LOOPBACK))
		return;

	spin_lock(&ma->mca_lock);
	if (del_timer(&ma->mca_timer)) {
		atomic_dec(&ma->mca_refcnt);
		delay = ma->mca_timer.expires - jiffies;
	}

	if (delay >= resptime) {
		if (resptime)
			delay = net_random() % resptime;
		else
			delay = 1;
	}

	ma->mca_timer.expires = jiffies + delay;
	if (!mod_timer(&ma->mca_timer, jiffies + delay))
		atomic_inc(&ma->mca_refcnt);
	spin_unlock(&ma->mca_lock);
}
Ejemplo n.º 5
0
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct tc_sfq_qopt *ctl = nla_data(opt);
	unsigned int qlen;

	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
		return -EINVAL;

	if (ctl->divisor &&
	    (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
		return -EINVAL;

	sch_tree_lock(sch);
	q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
	q->perturb_period = ctl->perturb_period * HZ;
	if (ctl->limit)
		q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
	if (ctl->divisor)
		q->divisor = ctl->divisor;
	qlen = sch->q.qlen;
	while (sch->q.qlen > q->limit)
		sfq_drop(sch);
	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);

	del_timer(&q->perturb_timer);
	if (q->perturb_period) {
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
		q->perturbation = net_random();
	}
	sch_tree_unlock(sch);
	return 0;
}
Ejemplo n.º 6
0
static int udp_v4_get_port(struct sock *sk, unsigned short snum)
{
	struct hlist_node *node;
	struct sock *sk2;
	struct inet_sock *inet = inet_sk(sk);

	write_lock_bh(&udp_hash_lock);
	if (!snum) {
		int i, low, high, remaining;
		unsigned rover, best, best_size_so_far;

		inet_get_local_port_range(&low, &high);
		remaining = (high - low) + 1;

		best_size_so_far = UINT_MAX;
		best = rover = net_random() % remaining + low;

		if (!udp_lport_inuse(rover) &&
		    !inet_is_reserved_local_port(rover))
			goto gotit;

		/* 1st pass: look for empty (or shortest) hash chain */
		for (i = 0; i < UDP_HTABLE_SIZE; i++) {
			struct hlist_head *list;
			int size = 0;

			list = &udp_hash[rover & (UDP_HTABLE_SIZE - 1)];
			if (hlist_empty(list) &&
			    !inet_is_reserved_local_port(rover))
				goto gotit;

			sk_for_each(sk2, node, list)
				if (++size >= best_size_so_far)
					goto next;
			best_size_so_far = size;
			best = rover;
		next:
			/* fold back if end of range */
			if (++rover > high)
				rover = low + ((rover - low)
				            & (UDP_HTABLE_SIZE - 1));
		}
		/* 2nd pass: find hole in shortest hash chain */
		rover = best;
		for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++) {
			if (!udp_lport_inuse(rover) &&
			    !inet_is_reserved_local_port(rover))
				goto gotit;
			rover += UDP_HTABLE_SIZE;
			if (rover > high)
				rover = low + ((rover - low)
				            & (UDP_HTABLE_SIZE - 1));
		}
		/* All ports in use! */
		goto fail;

gotit:
		snum = rover;
	} else {
/* It must be called with locked im->lock */
static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
{
    int tv=net_random() % max_delay;

    im->tm_running=1;
    if (!mod_timer(&im->timer, jiffies+tv+2))
        atomic_inc(&im->refcnt);
}
Ejemplo n.º 8
0
int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
{
	int err = 0;
	long vm_wait = 0;
	long current_timeo = *timeo_p;
	DEFINE_WAIT(wait);

	if (sk_stream_memory_free(sk))
		current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;

	while (1) {
		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);

		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);

		if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
			goto do_error;
		if (!*timeo_p)
			goto do_nonblock;
		if (signal_pending(current))
			goto do_interrupted;
		clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
		if (sk_stream_memory_free(sk) && !vm_wait)
			break;

		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
		sk->sk_write_pending++;
		sk_wait_event(sk, &current_timeo, sk->sk_err ||
						  (sk->sk_shutdown & SEND_SHUTDOWN) ||
						  (sk_stream_memory_free(sk) &&
						  !vm_wait));
		sk->sk_write_pending--;

		if (vm_wait) {
			vm_wait -= current_timeo;
			current_timeo = *timeo_p;
			if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
			    (current_timeo -= vm_wait) < 0)
				current_timeo = 0;
			vm_wait = 0;
		}
		*timeo_p = current_timeo;
	}
out:
	finish_wait(sk_sleep(sk), &wait);
	return err;

do_error:
	err = -EPIPE;
	goto out;
do_nonblock:
	err = -EAGAIN;
	goto out;
do_interrupted:
	err = sock_intr_errno(*timeo_p);
	goto out;
}
Ejemplo n.º 9
0
void generateRand(UINT8 *Data, UINT32 length)
{
	UINT32 i;

	for (i = length;i--;)
	{
		Data[i] = net_random();
	}
}
Ejemplo n.º 10
0
static void sfq_perturbation(unsigned long arg)
{
	struct Qdisc *sch = (struct Qdisc *)arg;
	struct sfq_sched_data *q = qdisc_priv(sch);

	q->perturbation = net_random();

	if (q->perturb_period)
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
}
Ejemplo n.º 11
0
/*  32-bit random number     */
u_int32_t generate32 (void)
{
	// should be seeded by main program
/* Kao
    return random();
*/
//	return rand();
//	return 1483;
	return net_random();
}
Ejemplo n.º 12
0
/* loss_gilb_ell - Gilbert-Elliot model loss generator
 * Generates losses according to the Gilbert-Elliot loss model or
 * its special cases  (Gilbert or Simple Gilbert)
 *
 * Makes a comparison between random number and the transition
 * probabilities outgoing from the current state, then decides the
 * next state. A second random number is extracted and the comparison
 * with the loss probability of the current state decides if the next
 * packet will be transmitted or lost.
 */
static bool loss_gilb_ell(struct netem_sched_data *q)
{
	struct clgstate *clg = &q->clg;

	switch (clg->state) {
	case 1:
		if (net_random() < clg->a1)
			clg->state = 2;
		if (net_random() < clg->a4)
			return true;
	case 2:
		if (net_random() < clg->a2)
			clg->state = 1;
		if (clg->a3 > net_random())
			return true;
	}

	return false;
}
Ejemplo n.º 13
0
static __inline__ void igmp_start_timer(struct ip_mc_list *im, int max_delay)
{
	int tv;
	if (im->tm_running)
		return;
	tv=net_random() % max_delay;
	im->timer.expires=jiffies+tv+2;
	im->tm_running=1;
	add_timer(&im->timer);
}
Ejemplo n.º 14
0
/* loss_4state - 4-state model loss generator
 * Generates losses according to the 4-state Markov chain adopted in
 * the GI (General and Intuitive) loss model.
 */
static bool loss_4state(struct netem_sched_data *q)
{
	struct clgstate *clg = &q->clg;
	u32 rnd = net_random();

	/*
	 * Makes a comparison between rnd and the transition
	 * probabilities outgoing from the current state, then decides the
	 * next state and if the next packet has to be transmitted or lost.
	 * The four states correspond to:
	 *   1 => successfully transmitted packets within a gap period
	 *   4 => isolated losses within a gap period
	 *   3 => lost packets within a burst period
	 *   2 => successfully transmitted packets within a burst period
	 */
	switch (clg->state) {
	case 1:
		if (rnd < clg->a4) {
			clg->state = 4;
			return true;
		} else if (clg->a4 < rnd && rnd < clg->a1) {
			clg->state = 3;
			return true;
		} else if (clg->a1 < rnd)
			clg->state = 1;

		break;
	case 2:
		if (rnd < clg->a5) {
			clg->state = 3;
			return true;
		} else
			clg->state = 2;

		break;
	case 3:
		if (rnd < clg->a3)
			clg->state = 2;
		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
			clg->state = 1;
			return true;
		} else if (clg->a2 + clg->a3 < rnd) {
			clg->state = 3;
			return true;
		}
		break;
	case 4:
		clg->state = 1;
		break;
	}

	return false;
}
Ejemplo n.º 15
0
static void sfq_perturbation(unsigned long arg)
{
	struct Qdisc *sch = (struct Qdisc*)arg;
	struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;

	q->perturbation = net_random()&0x1F;
	q->perturb_timer.expires = jiffies + q->perturb_period;

	if (q->perturb_period) {
		q->perturb_timer.expires = jiffies + q->perturb_period;
		add_timer(&q->perturb_timer);
	}
}
Ejemplo n.º 16
0
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	int i;

	q->perturb_timer.function = sfq_perturbation;
	q->perturb_timer.data = (unsigned long)sch;
	init_timer_deferrable(&q->perturb_timer);

	for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
		q->dep[i].next = i + SFQ_MAX_FLOWS;
		q->dep[i].prev = i + SFQ_MAX_FLOWS;
	}

	q->limit = SFQ_MAX_DEPTH;
	q->maxdepth = SFQ_MAX_DEPTH;
	q->cur_depth = 0;
	q->tail = NULL;
	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
	q->maxflows = SFQ_DEFAULT_FLOWS;
	q->quantum = psched_mtu(qdisc_dev(sch));
	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
	q->perturb_period = 0;
	q->perturbation = net_random();

	if (opt) {
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}

	q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
	q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
	if (!q->ht || !q->slots) {
		sfq_destroy(sch);
		return -ENOMEM;
	}
	for (i = 0; i < q->divisor; i++)
		q->ht[i] = SFQ_EMPTY_SLOT;

	for (i = 0; i < q->maxflows; i++) {
		slot_queue_init(&q->slots[i]);
		sfq_link(q, i);
	}
	if (q->limit >= 1)
		sch->flags |= TCQ_F_CAN_BYPASS;
	else
		sch->flags &= ~TCQ_F_CAN_BYPASS;
	return 0;
}
static int
udp_unique_tuple(struct ip_conntrack_tuple *tuple,
		 const struct ip_nat_range *range,
		 enum ip_nat_manip_type maniptype,
		 const struct ip_conntrack *conntrack)
{
	static u_int16_t port;
	__be16 *portptr;
	unsigned int range_size, min, i;

	if (maniptype == IP_NAT_MANIP_SRC)
		portptr = &tuple->src.u.udp.port;
	else
		portptr = &tuple->dst.u.udp.port;

	/* If no range specified... */
	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
		/* If it's dst rewrite, can't change port */
		if (maniptype == IP_NAT_MANIP_DST)
			return 0;

		if (ntohs(*portptr) < 1024) {
			/* Loose convention: >> 512 is credential passing */
			if (ntohs(*portptr)<512) {
				min = 1;
				range_size = 511 - min + 1;
			} else {
				min = 600;
				range_size = 1023 - min + 1;
			}
		} else {
			min = 1024;
			range_size = 65535 - 1024 + 1;
		}
	} else {
		min = ntohs(range->min.udp.port);
		range_size = ntohs(range->max.udp.port) - min + 1;
	}

	/* Start from random port to avoid prediction */
	if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
		port = net_random();

	for (i = 0; i < range_size; i++, port++) {
		*portptr = htons(min + port % range_size);
		if (!ip_nat_used_tuple(tuple, conntrack))
			return 1;
	}
	return 0;
}
Ejemplo n.º 18
0
static int
tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
		 const struct nf_nat_range *range,
		 enum nf_nat_manip_type maniptype,
		 const struct nf_conn *ct)
{
	static u_int16_t port;
	__be16 *portptr;
	unsigned int range_size, min, i;

	if (maniptype == IP_NAT_MANIP_SRC)
		portptr = &tuple->src.u.tcp.port;
	else
		portptr = &tuple->dst.u.tcp.port;

	/* If no range specified... */
	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
		/* If it's dst rewrite, can't change port */
		if (maniptype == IP_NAT_MANIP_DST)
			return 0;

		/* Map privileged onto privileged. */
		if (ntohs(*portptr) < 1024) {
			/* Loose convention: >> 512 is credential passing */
			if (ntohs(*portptr)<512) {
				min = 1;
				range_size = 511 - min + 1;
			} else {
				min = 600;
				range_size = 1023 - min + 1;
			}
		} else {
			min = 1024;
			range_size = 65535 - 1024 + 1;
		}
	} else {
		min = ntohs(range->min.tcp.port);
		range_size = ntohs(range->max.tcp.port) - min + 1;
	}

	if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
		port =  net_random();

	for (i = 0; ; ++port) {
		*portptr = htons(min + port % range_size);
		if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
			return 1;
	}
	return 0;
}
Ejemplo n.º 19
0
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	size_t sz;
	int i;

	q->perturb_timer.function = sfq_perturbation;
	q->perturb_timer.data = (unsigned long)sch;
	init_timer_deferrable(&q->perturb_timer);

	for (i = 0; i < SFQ_DEPTH; i++) {
		q->dep[i].next = i + SFQ_SLOTS;
		q->dep[i].prev = i + SFQ_SLOTS;
	}

	q->limit = SFQ_DEPTH - 1;
	q->cur_depth = 0;
	q->tail = NULL;
	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
	if (opt == NULL) {
		q->quantum = psched_mtu(qdisc_dev(sch));
		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
		q->perturb_period = 0;
		q->perturbation = net_random();
	} else {
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}

	sz = sizeof(q->ht[0]) * q->divisor;
	q->ht = kmalloc(sz, GFP_KERNEL);
	if (!q->ht && sz > PAGE_SIZE)
		q->ht = vmalloc(sz);
	if (!q->ht)
		return -ENOMEM;
	for (i = 0; i < q->divisor; i++)
		q->ht[i] = SFQ_EMPTY_SLOT;

	for (i = 0; i < SFQ_SLOTS; i++) {
		slot_queue_init(&q->slots[i]);
		sfq_link(q, i);
	}
	if (q->limit >= 1)
		sch->flags |= TCQ_F_CAN_BYPASS;
	else
		sch->flags &= ~TCQ_F_CAN_BYPASS;
	return 0;
}
Ejemplo n.º 20
0
static void sfq_perturbation(unsigned long arg)
{
	struct Qdisc *sch = (struct Qdisc *)arg;
	struct sfq_sched_data *q = qdisc_priv(sch);
	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));

	spin_lock(root_lock);
	q->perturbation = net_random();
	if (!q->filter_list && q->tail)
		sfq_rehash(sch);
	spin_unlock(root_lock);

	if (q->perturb_period)
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
}
Ejemplo n.º 21
0
static int __init brc_init(void)
{
	int err;

	printk("Open vSwitch Bridge Compatibility, built "__DATE__" "__TIME__"\n");

	/* Set the bridge ioctl handler */
	brioctl_set(brc_ioctl_deviceless_stub);

	/* Set the openvswitch_mod device ioctl handler */
	dp_ioctl_hook = brc_dev_ioctl;

	/* Randomize the initial sequence number.  This is not a security
	 * feature; it only helps avoid crossed wires between userspace and
	 * the kernel when the module is unloaded and reloaded. */
	brc_seq = net_random();

	/* Register generic netlink family to communicate changes to
	 * userspace. */
	err = genl_register_family(&brc_genl_family);
	if (err)
		goto error;

	err = genl_register_ops(&brc_genl_family, &brc_genl_ops_query_dp);
	if (err != 0)
		goto err_unregister;

	err = genl_register_ops(&brc_genl_family, &brc_genl_ops_dp_result);
	if (err != 0)
		goto err_unregister;

	err = genl_register_ops(&brc_genl_family, &brc_genl_ops_set_proc);
	if (err != 0)
		goto err_unregister;

	strcpy(brc_mc_group.name, "brcompat");
	err = genl_register_mc_group(&brc_genl_family, &brc_mc_group);
	if (err < 0)
		goto err_unregister;

	return 0;

err_unregister:
	genl_unregister_family(&brc_genl_family);
error:
	pr_emerg("failed to install!\n");
	return err;
}
Ejemplo n.º 22
0
static int
select_deleted_name(char * name, cpt_context_t *ctx)
{
    int i;

    for (i=0; i<100; i++) {
        struct nameidata nd;
        unsigned int rnd = net_random();

        sprintf(name, "/tmp/SOCK.%08x", rnd);

        if (path_lookup(name, 0, &nd) != 0)
            return 0;

        path_put(&nd.path);
    }

    eprintk_ctx("failed to allocate deleted socket inode\n");
    return -ELOOP;
}
Ejemplo n.º 23
0
static struct ip6_flowlabel *fl_intern(struct net *net,
				       struct ip6_flowlabel *fl, __be32 label)
{
	struct ip6_flowlabel *lfl;

	fl->label = label & IPV6_FLOWLABEL_MASK;

	write_lock_bh(&ip6_fl_lock);
	if (label == 0) {
		for (;;) {
			fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
			if (fl->label) {
				lfl = __fl_lookup(net, fl->label);
				if (lfl == NULL)
					break;
			}
		}
	} else {
		/*
		 * we dropper the ip6_fl_lock, so this entry could reappear
		 * and we need to recheck with it.
		 *
		 * OTOH no need to search the active socket first, like it is
		 * done in ipv6_flowlabel_opt - sock is locked, so new entry
		 * with the same label can only appear on another sock
		 */
		lfl = __fl_lookup(net, fl->label);
		if (lfl != NULL) {
			atomic_inc(&lfl->users);
			write_unlock_bh(&ip6_fl_lock);
			return lfl;
		}
	}

	fl->lastuse = jiffies;
	fl->next = fl_ht[FL_HASH(fl->label)];
	fl_ht[FL_HASH(fl->label)] = fl;
	atomic_inc(&fl_size);
	write_unlock_bh(&ip6_fl_lock);
	return NULL;
}
Ejemplo n.º 24
0
/* returns -ve errno or +ve port */
static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
{
	unsigned long flags;
	int ret = -EADDRINUSE;
	u16 rover, last;

	if (*port != 0) {
		rover = be16_to_cpu(*port);
		last = rover;
	} else {
		rover = max_t(u16, net_random(), 2);
		last = rover - 1;
	}

	spin_lock_irqsave(&rds_bind_lock, flags);

	do {
		if (rover == 0)
			rover++;
		if (rds_bind_tree_walk(addr, cpu_to_be16(rover), rs) == NULL) {
			*port = cpu_to_be16(rover);
			ret = 0;
			break;
		}
	} while (rover++ != last);

	if (ret == 0)  {
		rs->rs_bound_addr = addr;
		rs->rs_bound_port = *port;
		rds_sock_addref(rs);

		rdsdebug("rs %p binding to %pI4:%d\n",
		  rs, &addr, (int)ntohs(*port));
	}

	spin_unlock_irqrestore(&rds_bind_lock, flags);

	return ret;
}
Ejemplo n.º 25
0
static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
{
	unsigned long delay = resptime;

	/* Do not start timer for addresses with link/host scope */
	if (ipv6_addr_type(&ma->mca_addr)&(IPV6_ADDR_LINKLOCAL|IPV6_ADDR_LOOPBACK))
		return;

	if (del_timer(&ma->mca_timer))
		delay = ma->mca_timer.expires - jiffies;

	if (delay >= resptime) {
		if (resptime)
			delay = net_random() % resptime;
		else
			delay = 1;
	}

	ma->mca_flags |= MAF_TIMER_RUNNING;
	ma->mca_timer.expires = jiffies + delay;
	add_timer(&ma->mca_timer);
}
Ejemplo n.º 26
0
/* CHAOS functions */
static void
xt_chaos_total(struct sk_buff *skb, const struct xt_action_param *par)
{
	const struct xt_chaos_tginfo *info = par->targinfo;
	const struct iphdr *iph = ip_hdr(skb);
	const int thoff         = 4 * iph->ihl;
	const int fragoff       = ntohs(iph->frag_off) & IP_OFFSET;
	typeof(xt_tarpit) destiny;
	bool ret;
	bool hotdrop = false;

	{
		struct xt_action_param local_par;
		local_par.in        = par->in,
		local_par.out       = par->out,
		local_par.match     = xm_tcp;
		local_par.matchinfo = &tcp_params;
		local_par.fragoff   = fragoff;
		local_par.thoff     = thoff;
		local_par.hotdrop   = false;
		ret = xm_tcp->match(skb, &local_par);
		hotdrop = local_par.hotdrop;
	}
	if (!ret || hotdrop || (unsigned int)net_random() > delude_percentage)
		return;

	destiny = (info->variant == XTCHAOS_TARPIT) ? xt_tarpit : xt_delude;
	{
		struct xt_action_param local_par;
		local_par.in       = par->in;
		local_par.out      = par->out;
		local_par.hooknum  = par->hooknum;
		local_par.target   = destiny;
		local_par.targinfo = par->targinfo;
		local_par.family   = par->family;
		destiny->target(skb, &local_par);
	}
}
Ejemplo n.º 27
0
static void cache_defer_req(struct cache_req *req, struct cache_head *item)
{
	struct cache_deferred_req *dreq;
	int hash = DFR_HASH(item);

	dreq = req->defer(req);
	if (dreq == NULL)
		return;

	dreq->item = item;
	dreq->recv_time = get_seconds();

	spin_lock(&cache_defer_lock);

	list_add(&dreq->recent, &cache_defer_list);

	if (cache_defer_hash[hash].next == NULL)
		INIT_LIST_HEAD(&cache_defer_hash[hash]);
	list_add(&dreq->hash, &cache_defer_hash[hash]);

	/* it is in, now maybe clean up */
	dreq = NULL;
	if (++cache_defer_cnt > DFR_MAX) {
		/* too much in the cache, randomly drop
		 * first or last
		 */
		if (net_random()&1) 
			dreq = list_entry(cache_defer_list.next,
					  struct cache_deferred_req,
					  recent);
		else
			dreq = list_entry(cache_defer_list.prev,
					  struct cache_deferred_req,
					  recent);
		list_del(&dreq->recent);
		list_del(&dreq->hash);
		cache_defer_cnt--;
	}
Ejemplo n.º 28
0
static bool
statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_statistic_info *info = (void *)par->matchinfo;
	bool ret = info->flags & XT_STATISTIC_INVERT;

	switch (info->mode) {
	case XT_STATISTIC_MODE_RANDOM:
		if ((net_random() & 0x7FFFFFFF) < info->u.random.probability)
			ret = !ret;
		break;
	case XT_STATISTIC_MODE_NTH:
		spin_lock_bh(&nth_lock);
		if (info->master->count++ == info->u.nth.every) {
			info->master->count = 0;
			ret = !ret;
		}
		spin_unlock_bh(&nth_lock);
		break;
	}

	return ret;
}
Ejemplo n.º 29
0
static void igmp6_join_group(struct ifmcaddr6 *ma)
{
	unsigned long delay;
	int addr_type;

	addr_type = ipv6_addr_type(&ma->mca_addr);

	if ((addr_type & (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_LOOPBACK)))
		return;

	igmp6_send(&ma->mca_addr, ma->dev, ICMPV6_MGM_REPORT);

	delay = net_random() % IGMP6_UNSOLICITED_IVAL;
	start_bh_atomic();
	if (del_timer(&ma->mca_timer))
		delay = ma->mca_timer.expires - jiffies;

	ma->mca_timer.expires = jiffies + delay;

	add_timer(&ma->mca_timer);
	ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
	end_bh_atomic();
}
Ejemplo n.º 30
0
static int fl_intern(struct ip6_flowlabel *fl, __be32 label)
{
	fl->label = label & IPV6_FLOWLABEL_MASK;

	write_lock_bh(&ip6_fl_lock);
	if (label == 0) {
		for (;;) {
			fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
			if (fl->label) {
				struct ip6_flowlabel *lfl;
				lfl = __fl_lookup(fl->label);
				if (lfl == NULL)
					break;
			}
		}
	}

	fl->lastuse = jiffies;
	fl->next = fl_ht[FL_HASH(fl->label)];
	fl_ht[FL_HASH(fl->label)] = fl;
	atomic_inc(&fl_size);
	write_unlock_bh(&ip6_fl_lock);
	return 0;
}