/*
 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
 * whose grace period has elapsed.
 */
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
{
	const char *rn = NULL;
	struct rcu_head *next, *list;
	unsigned long flags;
	RCU_TRACE(int cb_count = 0);

	/* If no RCU callbacks ready to invoke, just return. */
	if (&rcp->rcucblist == rcp->donetail) {
		RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
		RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
					      !!ACCESS_ONCE(rcp->rcucblist),
					      need_resched(),
					      is_idle_task(current),
					      false));
		return;
	}

	/* Move the ready-to-invoke callbacks to a local list. */
	local_irq_save(flags);
	RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
	list = rcp->rcucblist;
	rcp->rcucblist = *rcp->donetail;
	*rcp->donetail = NULL;
	if (rcp->curtail == rcp->donetail)
		rcp->curtail = &rcp->rcucblist;
	rcp->donetail = &rcp->rcucblist;
	local_irq_restore(flags);

	/* Invoke the callbacks on the local list. */
	RCU_TRACE(rn = rcp->name);
	while (list) {
		next = list->next;
		prefetch(next);
		debug_rcu_head_unqueue(list);
		local_bh_disable();
		__rcu_reclaim(rn, list);
		local_bh_enable();
		list = next;
		RCU_TRACE(cb_count++);
	}
	RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
	RCU_TRACE(trace_rcu_batch_end(rcp->name,
				      cb_count, 0, need_resched(),
				      is_idle_task(current),
				      false));
}
Example #2
0
static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
{
	struct sock *sk;

	local_bh_disable();

	sk = icmpv6_sk(net);
	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
		/* This can happen if the output path (f.e. SIT or
		 * ip6ip6 tunnel) signals dst_link_failure() for an
		 * outgoing ICMP6 packet.
		 */
		local_bh_enable();
		return NULL;
	}
	return sk;
}
Example #3
0
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
{
	int err;

	skb_orphan(skb);

	local_bh_disable();
	err = ip_defrag(skb, user);
	local_bh_enable();

	if (!err) {
		ip_send_check(ip_hdr(skb));
		skb->ignore_df = 1;
	}

	return err;
}
Example #4
0
static void sync_backup_loop(void)
{
	struct socket *sock;
	char *buf;
	int len;

	if (!(buf=kmalloc(SYNC_MESG_MAX_SIZE, GFP_ATOMIC))) {
		IP_VS_ERR("sync_backup_loop: kmalloc error\n");
		return;
	}

	/* create the receiving multicast socket */
	sock = make_receive_sock();
	if (!sock)
		goto out;

	for (;;) {
		/* do you have data now? */
		while (!skb_queue_empty(&(sock->sk->receive_queue))) {
			if ((len=ip_vs_receive(sock, buf,
					       SYNC_MESG_MAX_SIZE))<=0) {
				IP_VS_ERR("receiving message error\n");
				break;
			}
			/* disable bottom half, because it accessed the data
			   shared by softirq while getting/creating conns */
			local_bh_disable();
			ip_vs_process_message(buf, len);
			local_bh_enable();
		}

		if (stop_sync)
			break;

		__set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
		__set_current_state(TASK_RUNNING);
	}

	/* release the sending multicast socket */
	sock_release(sock);

  out:
	kfree(buf);
}
Example #5
0
/*
 * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the
 * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu
 * disappears and another task or vcpu appears that recycles the same
 * struct fpsimd_state.
 */
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
{
	local_bh_disable();

	update_thread_flag(TIF_SVE,
			   vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);

	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
		/* Clean guest FP state to memory and invalidate cpu view */
		fpsimd_save();
		fpsimd_flush_cpu_state();
	} else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
		/* Ensure user trap controls are correctly restored */
		fpsimd_bind_task_to_cpu();
	}

	local_bh_enable();
}
static void
ssh_interceptor_uninit_engine(SshInterceptor interceptor)
{
  /* Stop packet processing engine */
  if (interceptor->engine != NULL)
    {
      while (ssh_engine_stop(interceptor->engine) == FALSE)
	{
	  local_bh_enable();
	  schedule();
	  mdelay(300);
	  local_bh_disable();
	}
      interceptor->engine = NULL;
    }

  /* Free packet data structure */
  ssh_interceptor_packet_freelist_uninit(interceptor);
}
Example #7
0
static inline void nft_counter_do_eval(struct nft_counter_percpu_priv *priv,
				       struct nft_regs *regs,
				       const struct nft_pktinfo *pkt)
{
	struct nft_counter *this_cpu;
	seqcount_t *myseq;

	local_bh_disable();
	this_cpu = this_cpu_ptr(priv->counter);
	myseq = this_cpu_ptr(&nft_counter_seq);

	write_seqcount_begin(myseq);

	this_cpu->bytes += pkt->skb->len;
	this_cpu->packets++;

	write_seqcount_end(myseq);
	local_bh_enable();
}
/*
 * mdp4_dsi_cmd_dma_busy_wait: check dsi link activity
 * dsi link is a shared resource and it can only be used
 * while it is in idle state.
 * ov_mutex need to be acquired before call this function.
 */
void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
{
	unsigned long flag;
	int need_wait = 0;



	if (dsi_clock_timer.function) {
		if (time_after(jiffies, tout_expired)) {
			tout_expired = jiffies + TOUT_PERIOD;
			mod_timer(&dsi_clock_timer, tout_expired);
			tout_expired -= MS_100;
		}
	}

	pr_debug("%s: start pid=%d dsi_clk_on=%d\n",
			__func__, current->pid, mipi_dsi_clk_on);

	/* satrt dsi clock if necessary */
	if (mipi_dsi_clk_on == 0) {
		local_bh_disable();
		mipi_dsi_turn_on_clks();
		local_bh_enable();
	}

	spin_lock_irqsave(&mdp_spin_lock, flag);
	if (mfd->dma->busy == TRUE) {
		if (busy_wait_cnt == 0)
			INIT_COMPLETION(mfd->dma->comp);
		busy_wait_cnt++;
		need_wait++;
	}
	spin_unlock_irqrestore(&mdp_spin_lock, flag);

	if (need_wait) {
		/* wait until DMA finishes the current job */
		pr_debug("%s: pending pid=%d dsi_clk_on=%d\n",
				__func__, current->pid, mipi_dsi_clk_on);
		wait_for_completion(&mfd->dma->comp);
	}
	pr_debug("%s: done pid=%d dsi_clk_on=%d\n",
			 __func__, current->pid, mipi_dsi_clk_on);
}
Example #9
0
static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
{
	int err;
	const struct iphdr *rxiph;
	struct sk_buff *skb;
	struct dst_entry *dst;
	struct net *net = dev_net(skb_dst(rxskb)->dev);
	struct sock *ctl_sk = net->dccp.v4_ctl_sk;

	/* Never send a reset in response to a reset. */
	if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
		return;

	if (skb_rtable(rxskb)->rt_type != RTN_LOCAL)
		return;

	dst = dccp_v4_route_skb(net, ctl_sk, rxskb);
	if (dst == NULL)
		return;

	skb = dccp_ctl_make_reset(ctl_sk, rxskb);
	if (skb == NULL)
		goto out;

	rxiph = ip_hdr(rxskb);
	dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr,
								 rxiph->daddr);
	skb_dst_set(skb, dst_clone(dst));

	local_bh_disable();
	bh_lock_sock(ctl_sk);
	err = ip_build_and_send_pkt(skb, ctl_sk,
				    rxiph->daddr, rxiph->saddr, NULL);
	bh_unlock_sock(ctl_sk);

	if (net_xmit_eval(err) == 0) {
		__DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
		__DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
	}
	local_bh_enable();
out:
	dst_release(dst);
}
Example #10
0
/*
 * Workqueue handler to drive one grace period and invoke any callbacks
 * that become ready as a result.  Single-CPU and !PREEMPT operation
 * means that we get away with murder on synchronization.  ;-)
 */
void srcu_drive_gp(struct work_struct *wp)
{
	int idx;
	struct rcu_head *lh;
	struct rcu_head *rhp;
	struct srcu_struct *sp;

	sp = container_of(wp, struct srcu_struct, srcu_work);
	if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
		return; /* Already running or nothing to do. */

	/* Remove recently arrived callbacks and wait for readers. */
	WRITE_ONCE(sp->srcu_gp_running, true);
	local_irq_disable();
	lh = sp->srcu_cb_head;
	sp->srcu_cb_head = NULL;
	sp->srcu_cb_tail = &sp->srcu_cb_head;
	local_irq_enable();
	idx = sp->srcu_idx;
	WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
	WRITE_ONCE(sp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */
	swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
	WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */

	/* Invoke the callbacks we removed above. */
	while (lh) {
		rhp = lh;
		lh = lh->next;
		local_bh_disable();
		rhp->func(rhp);
		local_bh_enable();
	}

	/*
	 * Enable rescheduling, and if there are more callbacks,
	 * reschedule ourselves.  This can race with a call_srcu()
	 * at interrupt level, but the ->srcu_gp_running checks will
	 * straighten that out.
	 */
	WRITE_ONCE(sp->srcu_gp_running, false);
	if (READ_ONCE(sp->srcu_cb_head))
		schedule_work(&sp->srcu_work);
}
Example #11
0
u32 ff_half_dpi_process(struct sk_buff *skb)
{
	skb_shinfo(skb)->nr_frags	= 0;
	skb_shinfo(skb)->gso_size = 0;
	skb_shinfo(skb)->gso_segs = 0;
	skb_shinfo(skb)->gso_type = 0;
	skb_shinfo(skb)->frag_list = NULL;
	skb_set_dse(skb, DSE_INVALID);
	
    (void)fpga_proc(skb);

    skb->ucpktaction = 0x0;    
    skb->segment_id = 0;
    local_bh_disable(); 
    skb->ucpktaction = dpi_rcv(skb);
    local_bh_enable();

    return DPI_PACKET_NOT_BYPASS;
    
}
Example #12
0
void flow_cache_flush(void)
{
	struct flow_flush_info info;
	static DECLARE_MUTEX(flow_flush_sem);

	/* Don't want cpus going down or up during this. */
	lock_cpu_hotplug();
	down(&flow_flush_sem);
	atomic_set(&info.cpuleft, num_online_cpus());
	init_completion(&info.completion);

	local_bh_disable();
	smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
	flow_cache_flush_tasklet((unsigned long)&info);
	local_bh_enable();

	wait_for_completion(&info.completion);
	up(&flow_flush_sem);
	unlock_cpu_hotplug();
}
Example #13
0
static noinline void nft_update_chain_stats(const struct nft_chain *chain,
					    const struct nft_pktinfo *pkt)
{
	struct nft_base_chain *base_chain;
	struct nft_stats *stats;

	base_chain = nft_base_chain(chain);
	if (!base_chain->stats)
		return;

	local_bh_disable();
	stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
	if (stats) {
		u64_stats_update_begin(&stats->syncp);
		stats->pkts++;
		stats->bytes += pkt->skb->len;
		u64_stats_update_end(&stats->syncp);
	}
	local_bh_enable();
}
Example #14
0
File: flow.c Project: IDM350/linux
void ovs_flow_stats_clear(struct sw_flow *flow)
{
	int cpu, cur_cpu;

	if (!flow->stats.is_percpu) {
		stats_reset(flow->stats.stat);
	} else {
		cur_cpu = get_cpu();

		for_each_possible_cpu(cpu) {

			if (cpu == cur_cpu)
				local_bh_disable();

			stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));

			if (cpu == cur_cpu)
				local_bh_enable();
		}
		put_cpu();
	}
}
Example #15
0
void ssh_interceptor_proc_uninit(SshInterceptor interceptor)
{
  /* Enable softirqs. */
  SSH_ASSERT(in_softirq());
  local_bh_enable();
  SSH_ASSERT(!in_softirq());

  interceptor_ipm_proc_entry_uninit(interceptor);
#ifdef DEBUG_LIGHT
  interceptor_stats_proc_entry_uninit(interceptor);
  interceptor_debug_proc_entry_uninit(interceptor);
#endif /* DEBUG_LIGHT */
  interceptor_version_proc_entry_uninit(interceptor);

  if (interceptor->proc_dir)
    remove_proc_entry(interceptor->proc_dir->name, NULL);
  interceptor->proc_dir = NULL;

  /* Disable softirqs. */
  local_bh_disable();
  SSH_ASSERT(in_softirq());
}
/* called from BPF program, therefore rcu_read_lock is held */
void bpf_channel_push_packet(struct bpf_context *pctx)
{
	struct bpf_dp_context *ctx = container_of(pctx, struct bpf_dp_context,
						  context);
	struct dp_upcall_info upcall;
	struct sk_buff *nskb;
	struct plum *plum;

	if (unlikely(!ctx->skb))
		return;

	plum = rcu_dereference(ctx->dp->plums[pctx->plum_id]);
	if (unlikely(!plum))
		return;

	/* queue_gso_packets() inside ovs_dp_upcall() changes skb,
	 * so copy it here, since BPF program might still be using it
	 */
	nskb = skb_clone(ctx->skb, GFP_ATOMIC);
	if (unlikely(!nskb))
		return;

	upcall.cmd = OVS_PACKET_CMD_ACTION;
	upcall.key = NULL;
	upcall.userdata = NULL;
	upcall.portid = plum->upcall_pid;
	/* don't exit earlier even if upcall_pid is invalid,
	 * since we want 'lost' count to be incremented
	 */
	/* disable softirq to make sure that genlmsg_unicast()->gfp_any() picks
	 * GFP_ATOMIC flag
	 * note that bpf_channel_push_struct() doesn't need to do it,
	 * since skb==NULL
	 */
	local_bh_disable();
	ovs_dp_upcall(ctx->dp, nskb, &upcall);
	local_bh_enable();
	consume_skb(nskb);
}
Example #17
0
static void
ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
{
	struct ieee80211_txq *txq = sta->sta.txq[tid];
	struct txq_info *txqi;

	if (!txq)
		return;

	txqi = to_txq_info(txq);

	if (enable)
		set_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
	else
		clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);

	clear_bit(IEEE80211_TXQ_STOP, &txqi->flags);
	local_bh_disable();
	rcu_read_lock();
	drv_wake_tx_queue(sta->sdata->local, txqi);
	rcu_read_unlock();
	local_bh_enable();
}
static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
	int status;

	if (!entry->next_outfn) {
		spin_lock_bh(&imq_nf_queue_lock);
		nf_reinject(entry, verdict);
		spin_unlock_bh(&imq_nf_queue_lock);
		return;
	}

	rcu_read_lock();
	local_bh_disable();
	status = entry->next_outfn(entry, entry->next_queuenum);
	local_bh_enable();
	if (status < 0) {
		nf_queue_entry_release_refs(entry);
		kfree_skb(entry->skb);
		kfree(entry);
	}

	rcu_read_unlock();
}
Example #19
0
void ovs_flow_stats_clear(struct sw_flow *flow)
{
	int cpu, cur_cpu;

	cur_cpu = get_cpu();
	for_each_possible_cpu(cpu) {
		struct sw_flow_stats *stats = &flow->stats[cpu];

		if (cpu == cur_cpu)
			local_bh_disable();

		spin_lock(&stats->lock);
		stats->used = 0;
		stats->packet_count = 0;
		stats->byte_count = 0;
		stats->tcp_flags = 0;
		spin_unlock(&stats->lock);

		if (cpu == cur_cpu)
			local_bh_enable();
	}
	put_cpu();
}
Example #20
0
/**
 * selinux_netlbl_socket_connect - Label a client-side socket on connect
 * @sk: the socket to label
 * @addr: the destination address
 *
 * Description:
 * Attempt to label a connected socket with NetLabel using the given address.
 * Returns zero values on success, negative values on failure.
 *
 */
int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
{
	int rc;
	struct sk_security_struct *sksec = sk->sk_security;
	struct netlbl_lsm_secattr *secattr;

	if (sksec->nlbl_state != NLBL_REQSKB &&
	    sksec->nlbl_state != NLBL_CONNLABELED)
		return 0;

	local_bh_disable();
	bh_lock_sock_nested(sk);

	/* connected sockets are allowed to disconnect when the address family
	 * is set to AF_UNSPEC, if that is what is happening we want to reset
	 * the socket */
	if (addr->sa_family == AF_UNSPEC) {
		netlbl_sock_delattr(sk);
		sksec->nlbl_state = NLBL_REQSKB;
		rc = 0;
		goto socket_connect_return;
	}
	secattr = selinux_netlbl_sock_genattr(sk);
	if (secattr == NULL) {
		rc = -ENOMEM;
		goto socket_connect_return;
	}
	rc = netlbl_conn_setattr(sk, addr, secattr);
	if (rc == 0)
		sksec->nlbl_state = NLBL_CONNLABELED;

socket_connect_return:
	bh_unlock_sock(sk);
	local_bh_enable();
	return rc;
}
Example #21
0
int br_port_get_emuswitch(struct net_bridge_port *p, 
			   struct __emuswitch_info *info) {
	local_bh_disable();
#define READINFO(DIR,OFFS)					\
	do {						\
		info->direction[OFFS].maxQueueLen =	\
			p->DIR##_state.maxQueueLen;	\
		info->direction[OFFS].bytesPerSecond =	\
			p->DIR##_state.bytesPerSecond;	\
		info->direction[OFFS].useThrottle =	\
			p->DIR##_state.useThrottle;	\
	} while(0);

	if(info->directions & EMU_INPUT) {
		READINFO(input,0);
	}
	if(info->directions & EMU_OUTPUT) {
		READINFO(output,1);
	}
#undef READINFO
	local_bh_enable();
	printk("get emuswitch enabled %d = %d\n", p->input_state.useThrottle, info->direction[0].useThrottle);
	return 0;
}
/* Interceptor uninitialization. Called by cleanup_module() with
   softirqs disabled. */
static int
ssh_interceptor_uninit(void)
{
  /* Uninitialize external interfaces. We leave softirqs enabled for
     this as we have to make calls into the netfilter API that will
     execute scheduling in Linux 2.6. */
  ssh_interceptor_uninit_external_interfaces(ssh_interceptor_context);

  /* Uninitialize engine. Via ssh_interceptor_stop() this
     function makes sure that no callouts to the interceptor
     are in progress after it returns. ssh_interceptor_stop()
     _WILL_ grab the interceptor_lock, so make sure that it
     is not held.*/
  local_bh_disable();
  ssh_interceptor_uninit_engine(ssh_interceptor_context);

  /* Uninitialize basic kernel services to the engine and the 
     interceptor. This frees all remaining memory. Note that all locks 
     are also freed here, so none of them can be held. */
  ssh_interceptor_uninit_kernel_services();
  local_bh_enable();

  return 0;
}
Example #23
0
int ipv6_setsockopt(struct sock *sk, int level, int optname,
		    char __user *optval, int optlen)
{
	struct ipv6_pinfo *np = inet6_sk(sk);
	int val, valbool;
	int retv = -ENOPROTOOPT;

	if (level == SOL_IP && sk->sk_type != SOCK_RAW)
		return udp_prot.setsockopt(sk, level, optname, optval, optlen);

	if(level!=SOL_IPV6)
		goto out;

	if (optval == NULL)
		val=0;
	else if (get_user(val, (int __user *) optval))
		return -EFAULT;

	valbool = (val!=0);

	lock_sock(sk);

	switch (optname) {

	case IPV6_ADDRFORM:
		if (val == PF_INET) {
			struct ipv6_txoptions *opt;
			struct sk_buff *pktopt;

			if (sk->sk_protocol != IPPROTO_UDP &&
			    sk->sk_protocol != IPPROTO_TCP)
				break;

			if (sk->sk_state != TCP_ESTABLISHED) {
				retv = -ENOTCONN;
				break;
			}

			if (ipv6_only_sock(sk) ||
			    !(ipv6_addr_type(&np->daddr) & IPV6_ADDR_MAPPED)) {
				retv = -EADDRNOTAVAIL;
				break;
			}

			fl6_free_socklist(sk);
			ipv6_sock_mc_close(sk);

			if (sk->sk_protocol == IPPROTO_TCP) {
				struct tcp_sock *tp = tcp_sk(sk);

				local_bh_disable();
				sock_prot_dec_use(sk->sk_prot);
				sock_prot_inc_use(&tcp_prot);
				local_bh_enable();
				sk->sk_prot = &tcp_prot;
				tp->af_specific = &ipv4_specific;
				sk->sk_socket->ops = &inet_stream_ops;
				sk->sk_family = PF_INET;
				tcp_sync_mss(sk, tp->pmtu_cookie);
			} else {
				local_bh_disable();
				sock_prot_dec_use(sk->sk_prot);
				sock_prot_inc_use(&udp_prot);
				local_bh_enable();
				sk->sk_prot = &udp_prot;
				sk->sk_socket->ops = &inet_dgram_ops;
				sk->sk_family = PF_INET;
			}
			opt = xchg(&np->opt, NULL);
			if (opt)
				sock_kfree_s(sk, opt, opt->tot_len);
			pktopt = xchg(&np->pktoptions, NULL);
			if (pktopt)
				kfree_skb(pktopt);

			sk->sk_destruct = inet_sock_destruct;
#ifdef INET_REFCNT_DEBUG
			atomic_dec(&inet6_sock_nr);
#endif
			module_put(THIS_MODULE);
			retv = 0;
			break;
		}
		goto e_inval;

	case IPV6_V6ONLY:
		if (inet_sk(sk)->num)
			goto e_inval;
		np->ipv6only = valbool;
		retv = 0;
		break;

	case IPV6_PKTINFO:
		np->rxopt.bits.rxinfo = valbool;
		retv = 0;
		break;

	case IPV6_HOPLIMIT:
		np->rxopt.bits.rxhlim = valbool;
		retv = 0;
		break;

	case IPV6_RTHDR:
		if (val < 0 || val > 2)
			goto e_inval;
		np->rxopt.bits.srcrt = val;
		retv = 0;
		break;

	case IPV6_HOPOPTS:
		np->rxopt.bits.hopopts = valbool;
		retv = 0;
		break;

	case IPV6_DSTOPTS:
		np->rxopt.bits.dstopts = valbool;
		retv = 0;
		break;

	case IPV6_FLOWINFO:
		np->rxopt.bits.rxflow = valbool;
		retv = 0;
		break;

	case IPV6_PKTOPTIONS:
	{
		struct ipv6_txoptions *opt = NULL;
		struct msghdr msg;
		struct flowi fl;
		int junk;

		fl.fl6_flowlabel = 0;
		fl.oif = sk->sk_bound_dev_if;

		if (optlen == 0)
			goto update;

		/* 1K is probably excessive
		 * 1K is surely not enough, 2K per standard header is 16K.
		 */
		retv = -EINVAL;
		if (optlen > 64*1024)
			break;

		opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL);
		retv = -ENOBUFS;
		if (opt == NULL)
			break;

		memset(opt, 0, sizeof(*opt));
		opt->tot_len = sizeof(*opt) + optlen;
		retv = -EFAULT;
		if (copy_from_user(opt+1, optval, optlen))
			goto done;

		msg.msg_controllen = optlen;
		msg.msg_control = (void*)(opt+1);

		retv = datagram_send_ctl(&msg, &fl, opt, &junk);
		if (retv)
			goto done;
update:
		retv = 0;
		if (sk->sk_type == SOCK_STREAM) {
			if (opt) {
				struct tcp_sock *tp = tcp_sk(sk);
				if (!((1 << sk->sk_state) &
				      (TCPF_LISTEN | TCPF_CLOSE))
				    && inet_sk(sk)->daddr != LOOPBACK4_IPV6) {
					tp->ext_header_len = opt->opt_flen + opt->opt_nflen;
					tcp_sync_mss(sk, tp->pmtu_cookie);
				}
			}
			opt = xchg(&np->opt, opt);
			sk_dst_reset(sk);
		} else {
			write_lock(&sk->sk_dst_lock);
			opt = xchg(&np->opt, opt);
			write_unlock(&sk->sk_dst_lock);
			sk_dst_reset(sk);
		}

done:
		if (opt)
			sock_kfree_s(sk, opt, opt->tot_len);
		break;
	}
	case IPV6_UNICAST_HOPS:
		if (val > 255 || val < -1)
			goto e_inval;
		np->hop_limit = val;
		retv = 0;
		break;

	case IPV6_MULTICAST_HOPS:
		if (sk->sk_type == SOCK_STREAM)
			goto e_inval;
		if (val > 255 || val < -1)
			goto e_inval;
		np->mcast_hops = val;
		retv = 0;
		break;

	case IPV6_MULTICAST_LOOP:
		np->mc_loop = valbool;
		retv = 0;
		break;

	case IPV6_MULTICAST_IF:
		if (sk->sk_type == SOCK_STREAM)
			goto e_inval;
		if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
			goto e_inval;

		if (__dev_get_by_index(val) == NULL) {
			retv = -ENODEV;
			break;
		}
		np->mcast_oif = val;
		retv = 0;
		break;
	case IPV6_ADD_MEMBERSHIP:
	case IPV6_DROP_MEMBERSHIP:
	{
		struct ipv6_mreq mreq;

		retv = -EFAULT;
		if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
			break;

		if (optname == IPV6_ADD_MEMBERSHIP)
			retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		else
			retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		break;
	}
	case IPV6_JOIN_ANYCAST:
	case IPV6_LEAVE_ANYCAST:
	{
		struct ipv6_mreq mreq;

		if (optlen != sizeof(struct ipv6_mreq))
			goto e_inval;

		retv = -EFAULT;
		if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
			break;

		if (optname == IPV6_JOIN_ANYCAST)
			retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
		else
			retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
		break;
	}
	case MCAST_JOIN_GROUP:
	case MCAST_LEAVE_GROUP:
	{
		struct group_req greq;
		struct sockaddr_in6 *psin6;

		retv = -EFAULT;
		if (copy_from_user(&greq, optval, sizeof(struct group_req)))
			break;
		if (greq.gr_group.ss_family != AF_INET6) {
			retv = -EADDRNOTAVAIL;
			break;
		}
		psin6 = (struct sockaddr_in6 *)&greq.gr_group;
		if (optname == MCAST_JOIN_GROUP)
			retv = ipv6_sock_mc_join(sk, greq.gr_interface,
				&psin6->sin6_addr);
		else
			retv = ipv6_sock_mc_drop(sk, greq.gr_interface,
				&psin6->sin6_addr);
		break;
	}
	case MCAST_JOIN_SOURCE_GROUP:
	case MCAST_LEAVE_SOURCE_GROUP:
	case MCAST_BLOCK_SOURCE:
	case MCAST_UNBLOCK_SOURCE:
	{
		struct group_source_req greqs;
		int omode, add;

		if (optlen != sizeof(struct group_source_req))
			goto e_inval;
		if (copy_from_user(&greqs, optval, sizeof(greqs))) {
			retv = -EFAULT;
			break;
		}
		if (greqs.gsr_group.ss_family != AF_INET6 ||
		    greqs.gsr_source.ss_family != AF_INET6) {
			retv = -EADDRNOTAVAIL;
			break;
		}
		if (optname == MCAST_BLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 1;
		} else if (optname == MCAST_UNBLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 0;
		} else if (optname == MCAST_JOIN_SOURCE_GROUP) {
			struct sockaddr_in6 *psin6;

			psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
			retv = ipv6_sock_mc_join(sk, greqs.gsr_interface,
				&psin6->sin6_addr);
			if (retv)
				break;
			omode = MCAST_INCLUDE;
			add = 1;
		} else /*IP_DROP_SOURCE_MEMBERSHIP */ {
			omode = MCAST_INCLUDE;
			add = 0;
		}
		retv = ip6_mc_source(add, omode, sk, &greqs);
		break;
	}
	case MCAST_MSFILTER:
	{
		extern int sysctl_optmem_max;
		extern int sysctl_mld_max_msf;
		struct group_filter *gsf;

		if (optlen < GROUP_FILTER_SIZE(0))
			goto e_inval;
		if (optlen > sysctl_optmem_max) {
			retv = -ENOBUFS;
			break;
		}
		gsf = (struct group_filter *)kmalloc(optlen,GFP_KERNEL);
		if (gsf == 0) {
			retv = -ENOBUFS;
			break;
		}
		retv = -EFAULT;
		if (copy_from_user(gsf, optval, optlen)) {
			kfree(gsf);
			break;
		}
		/* numsrc >= (4G-140)/128 overflow in 32 bits */
		if (gsf->gf_numsrc >= 0x1ffffffU ||
		    gsf->gf_numsrc > sysctl_mld_max_msf) {
			kfree(gsf);
			retv = -ENOBUFS;
			break;
		}
		if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
			kfree(gsf);
			retv = -EINVAL;
			break;
		}
		retv = ip6_mc_msfilter(sk, gsf);
		kfree(gsf);

		break;
	}
	case IPV6_ROUTER_ALERT:
		retv = ip6_ra_control(sk, val, NULL);
		break;
	case IPV6_MTU_DISCOVER:
		if (val<0 || val>2)
			goto e_inval;
		np->pmtudisc = val;
		retv = 0;
		break;
	case IPV6_MTU:
		if (val && val < IPV6_MIN_MTU)
			goto e_inval;
		np->frag_size = val;
		retv = 0;
		break;
	case IPV6_RECVERR:
		np->recverr = valbool;
		if (!val)
			skb_queue_purge(&sk->sk_error_queue);
		retv = 0;
		break;
	case IPV6_FLOWINFO_SEND:
		np->sndflow = valbool;
		retv = 0;
		break;
	case IPV6_FLOWLABEL_MGR:
		retv = ipv6_flowlabel_opt(sk, optval, optlen);
		break;
	case IPV6_IPSEC_POLICY:
	case IPV6_XFRM_POLICY:
		retv = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			break;
		retv = xfrm_user_policy(sk, optname, optval, optlen);
		break;

#ifdef CONFIG_NETFILTER
	default:
		retv = nf_setsockopt(sk, PF_INET6, optname, optval, 
					    optlen);
		break;
#endif

	}
	release_sock(sk);

out:
	return retv;

e_inval:
	release_sock(sk);
	return -EINVAL;
}
static int mipi_dsi_off(struct platform_device *pdev)
{
	int ret = 0;
	struct msm_fb_data_type *mfd;
	struct msm_panel_info *pinfo;
#ifdef CONFIG_HUAWEI_KERNEL
	struct mipi_panel_info *mipi;
	unsigned int datamask = 0;
#endif

	mfd = platform_get_drvdata(pdev);
	pinfo = &mfd->panel_info;

	if (mdp_rev >= MDP_REV_41)
		mutex_lock(&mfd->dma->ov_mutex);
	else
		down(&mfd->dma->mutex);

	mdp4_overlay_dsi_state_set(ST_DSI_SUSPEND);

	/*
	 * Description: dsi clock is need to perform shutdown.
	 * mdp4_dsi_cmd_dma_busy_wait() will enable dsi clock if disabled.
	 * also, wait until dma (overlay and dmap) finish.
	 */
	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
		if (mdp_rev >= MDP_REV_41) {
			mdp4_dsi_cmd_dma_busy_wait(mfd);
			mdp4_dsi_blt_dmap_busy_wait(mfd);
			mipi_dsi_mdp_busy_wait(mfd);
		} else {
			mdp3_dsi_cmd_dma_busy_wait(mfd);
		}
	}

	/*
	 * Desctiption: change to DSI_CMD_MODE since it needed to
	 * tx DCS dsiplay off comamnd to panel
	 */
	mipi_dsi_op_mode_config(DSI_CMD_MODE);

	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
		if (pinfo->lcd.vsync_enable) {
			if (pinfo->lcd.hw_vsync_mode && vsync_gpio >= 0) {
				if (MDP_REV_303 != mdp_rev)
					gpio_free(vsync_gpio);
			}
			mipi_dsi_set_tear_off(mfd);
		}
	}

	ret = panel_next_off(pdev);
#ifdef CONFIG_HUAWEI_KERNEL
	
	mipi  = &mfd->panel_info.mipi;
	if (mipi->data_lane3)
		datamask |= 1<<3;
	if (mipi->data_lane2)
		datamask |= 1<<2;
	if (mipi->data_lane1)
		datamask |= 1<<1;
	if (mipi->data_lane0)
		datamask |= 1<<0;
		
	MIPI_OUTP(MIPI_DSI_BASE + 0xA8, datamask );
	mdelay(1);
	MIPI_OUTP(MIPI_DSI_BASE + 0xA8, datamask|(1<<4));
	mdelay(1);
#endif
#ifdef CONFIG_MSM_BUS_SCALING
	mdp_bus_scale_update_request(0);
#endif

	local_bh_disable();
	mipi_dsi_clk_disable();
	local_bh_enable();

	/* disbale dsi engine */
	MIPI_OUTP(MIPI_DSI_BASE + 0x0000, 0);

	mipi_dsi_phy_ctrl(0);


	local_bh_disable();
	mipi_dsi_ahb_ctrl(0);
	local_bh_enable();

	if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_power_save)
		mipi_dsi_pdata->dsi_power_save(0);

	if (mdp_rev >= MDP_REV_41)
		mutex_unlock(&mfd->dma->ov_mutex);
	else
		up(&mfd->dma->mutex);

	pr_debug("%s-:\n", __func__);

	return ret;
}
Example #25
0
File: proto.c Project: 7799/linux
void dccp_close(struct sock *sk, long timeout)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
	u32 data_was_unread = 0;
	int state;

	lock_sock(sk);

	sk->sk_shutdown = SHUTDOWN_MASK;

	if (sk->sk_state == DCCP_LISTEN) {
		dccp_set_state(sk, DCCP_CLOSED);

		/* Special case. */
		inet_csk_listen_stop(sk);

		goto adjudge_to_death;
	}

	sk_stop_timer(sk, &dp->dccps_xmit_timer);

	/*
	 * We need to flush the recv. buffs.  We do this only on the
	 * descriptor close, not protocol-sourced closes, because the
	  *reader process may not have drained the data yet!
	 */
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		data_was_unread += skb->len;
		__kfree_skb(skb);
	}

	if (data_was_unread) {
		/* Unread data was tossed, send an appropriate Reset Code */
		DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
		dccp_set_state(sk, DCCP_CLOSED);
	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
		/* Check zero linger _after_ checking for unread data. */
		sk->sk_prot->disconnect(sk, 0);
	} else if (sk->sk_state != DCCP_CLOSED) {
		/*
		 * Normal connection termination. May need to wait if there are
		 * still packets in the TX queue that are delayed by the CCID.
		 */
		dccp_flush_write_queue(sk, &timeout);
		dccp_terminate_connection(sk);
	}

	/*
	 * Flush write queue. This may be necessary in several cases:
	 * - we have been closed by the peer but still have application data;
	 * - abortive termination (unread data or zero linger time),
	 * - normal termination but queue could not be flushed within time limit
	 */
	__skb_queue_purge(&sk->sk_write_queue);

	sk_stream_wait_close(sk, timeout);

adjudge_to_death:
	state = sk->sk_state;
	sock_hold(sk);
	sock_orphan(sk);

	/*
	 * It is the last release_sock in its life. It will remove backlog.
	 */
	release_sock(sk);
	/*
	 * Now socket is owned by kernel and we acquire BH lock
	 * to finish close. No need to check for user refs.
	 */
	local_bh_disable();
	bh_lock_sock(sk);
	WARN_ON(sock_owned_by_user(sk));

	percpu_counter_inc(sk->sk_prot->orphan_count);

	/* Have we already been destroyed by a softirq or backlog? */
	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
		goto out;

	if (sk->sk_state == DCCP_CLOSED)
		inet_csk_destroy_sock(sk);

	/* Otherwise, socket is reprieved until protocol close. */

out:
	bh_unlock_sock(sk);
	local_bh_enable();
	sock_put(sk);
}
static int mipi_dsi_on(struct platform_device *pdev)
{
	int ret = 0;
	u32 clk_rate;
	struct msm_fb_data_type *mfd;
	struct fb_info *fbi;
	struct fb_var_screeninfo *var;
	struct msm_panel_info *pinfo;
	struct mipi_panel_info *mipi;
	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
	u32 ystride, bpp, data;
	u32 dummy_xres, dummy_yres;
	int target_type = 0;
#ifdef CONFIG_HUAWEI_KERNEL
	unsigned int datamask = 0;
#endif
	mfd = platform_get_drvdata(pdev);
	fbi = mfd->fbi;
	var = &fbi->var;
	pinfo = &mfd->panel_info;

	if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_power_save)
		mipi_dsi_pdata->dsi_power_save(1);

	local_bh_disable();
	mipi_dsi_ahb_ctrl(1);
	local_bh_enable();

	clk_rate = mfd->fbi->var.pixclock;
	clk_rate = min(clk_rate, mfd->panel_info.clk_max);

/*
  * It because of the reset and clock order,
  * that Qualcomm baseband will be issued a special waveform,
  * this cause the lcd enter the wrong state and data 0 will be pulled low,
  * and mipi dsi will work abnormal.
  */
#ifdef CONFIG_HUAWEI_KERNEL
	local_bh_disable();
	mipi_dsi_clk_enable();
	local_bh_enable();
#endif

	MIPI_OUTP(MIPI_DSI_BASE + 0x114, 1);
	MIPI_OUTP(MIPI_DSI_BASE + 0x114, 0);

	hbp = var->left_margin;
	hfp = var->right_margin;
	vbp = var->upper_margin;
	vfp = var->lower_margin;
	hspw = var->hsync_len;
	vspw = var->vsync_len;
	width = mfd->panel_info.xres;
	height = mfd->panel_info.yres;

	mipi_dsi_phy_ctrl(1);

	if (mdp_rev == MDP_REV_42 && mipi_dsi_pdata)
		target_type = mipi_dsi_pdata->target_type;

	mipi_dsi_phy_init(0, &(mfd->panel_info), target_type);
/*
  * It because of the reset and clock order,
  * that Qualcomm baseband will be issued a special waveform,
  * this cause the lcd enter the wrong state and data 0 will be pulled low,
  * and mipi dsi will work abnormal.
  */
#ifndef CONFIG_HUAWEI_KERNEL
	local_bh_disable();
	mipi_dsi_clk_enable();
	local_bh_enable();
#endif
	mipi  = &mfd->panel_info.mipi;
	if (mfd->panel_info.type == MIPI_VIDEO_PANEL) {
		dummy_xres = mfd->panel_info.mipi.xres_pad;
		dummy_yres = mfd->panel_info.mipi.yres_pad;

		if (mdp_rev >= MDP_REV_41) {
			MIPI_OUTP(MIPI_DSI_BASE + 0x20,
				((hspw + hbp + width + dummy_xres) << 16 |
				(hspw + hbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x24,
				((vspw + vbp + height + dummy_yres) << 16 |
				(vspw + vbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x28,
				(vspw + vbp + height + dummy_yres +
					vfp - 1) << 16 | (hspw + hbp +
					width + dummy_xres + hfp - 1));
		} else {
			/* DSI_LAN_SWAP_CTRL */
			MIPI_OUTP(MIPI_DSI_BASE + 0x00ac, mipi->dlane_swap);

			MIPI_OUTP(MIPI_DSI_BASE + 0x20,
				((hbp + width + dummy_xres) << 16 | (hbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x24,
				((vbp + height + dummy_yres) << 16 | (vbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x28,
				(vbp + height + dummy_yres + vfp) << 16 |
					(hbp + width + dummy_xres + hfp));
		}

		MIPI_OUTP(MIPI_DSI_BASE + 0x2c, (hspw << 16));
		MIPI_OUTP(MIPI_DSI_BASE + 0x30, 0);
		MIPI_OUTP(MIPI_DSI_BASE + 0x34, (vspw << 16));

	} else {		/* command mode */
		if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
			bpp = 3;
		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
			bpp = 3;
		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
			bpp = 2;
		else
			bpp = 3;	/* Default format set to RGB888 */

		ystride = width * bpp + 1;

		/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
		data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
		MIPI_OUTP(MIPI_DSI_BASE + 0x5c, data);
		MIPI_OUTP(MIPI_DSI_BASE + 0x54, data);

		/* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
		data = height << 16 | width;
		MIPI_OUTP(MIPI_DSI_BASE + 0x60, data);
		MIPI_OUTP(MIPI_DSI_BASE + 0x58, data);
	}

	mipi_dsi_host_init(mipi);

	if (mipi->force_clk_lane_hs) {
		u32 tmp;

		tmp = MIPI_INP(MIPI_DSI_BASE + 0xA8);
		tmp |= (1<<28);
		MIPI_OUTP(MIPI_DSI_BASE + 0xA8, tmp);
		wmb();
	}

	if (mdp_rev >= MDP_REV_41)
		mutex_lock(&mfd->dma->ov_mutex);
	else
		down(&mfd->dma->mutex);
#ifdef CONFIG_HUAWEI_KERNEL
	/*when here there is a wrong sequence bofore ,so add 5 ms hope lcd panel can enter the right mode */
	mdelay(5);	
	/* request data line and clock line to exit the ulps mode */				
	if (mipi->data_lane3)
		datamask |= 1<<11;
	if (mipi->data_lane2)
		datamask |= 1<<10;
	if (mipi->data_lane1)
		datamask |= 1<<9;
	if (mipi->data_lane0)
		datamask |= 1<<8;				
	MIPI_OUTP(MIPI_DSI_BASE + 0xA8, datamask | (1<<12));
	/* It is the mipi request ,at least 1 ms*/
	mdelay(2);	
	/*absolutely exit the ulps mode */				
	MIPI_OUTP(MIPI_DSI_BASE + 0xA8, 0);		
#endif

	ret = panel_next_on(pdev);

	mipi_dsi_op_mode_config(mipi->mode);

	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
		if (pinfo->lcd.vsync_enable) {
			if (pinfo->lcd.hw_vsync_mode && vsync_gpio >= 0) {
				if (mdp_rev >= MDP_REV_41) {
					if (gpio_request(vsync_gpio,
						"MDP_VSYNC") == 0)
						gpio_direction_input(
							vsync_gpio);
					else
						pr_err("%s: unable to \
							request gpio=%d\n",
							__func__, vsync_gpio);
				} else if (mdp_rev == MDP_REV_303) {
					if (!tlmm_settings && gpio_request(
						vsync_gpio, "MDP_VSYNC") == 0) {
						ret = gpio_tlmm_config(
							GPIO_CFG(
							vsync_gpio, 1,
							GPIO_CFG_INPUT,
							GPIO_CFG_PULL_DOWN,
							GPIO_CFG_2MA),
							GPIO_CFG_ENABLE);

						if (ret) {
							pr_err(
							"%s: unable to config \
							tlmm = %d\n",
							__func__, vsync_gpio);
						}
						tlmm_settings = TRUE;

						gpio_direction_input(
							vsync_gpio);
					} else {
						if (!tlmm_settings) {
							pr_err(
							"%s: unable to request \
							gpio=%d\n",
							__func__, vsync_gpio);
						}
					}
				}
Example #27
0
static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
		    char __user *optval, unsigned int optlen)
{
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct net *net = sock_net(sk);
	int val, valbool;
	int retv = -ENOPROTOOPT;

	if (optval == NULL)
		val=0;
	else {
		if (optlen >= sizeof(int)) {
			if (get_user(val, (int __user *) optval))
				return -EFAULT;
		} else
			val = 0;
	}

	valbool = (val!=0);

	if (ip6_mroute_opt(optname))
		return ip6_mroute_setsockopt(sk, optname, optval, optlen);

	lock_sock(sk);

	switch (optname) {

	case IPV6_ADDRFORM:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val == PF_INET) {
			struct ipv6_txoptions *opt;
			struct sk_buff *pktopt;

			if (sk->sk_type == SOCK_RAW)
				break;

			if (sk->sk_protocol == IPPROTO_UDP ||
			    sk->sk_protocol == IPPROTO_UDPLITE) {
				struct udp_sock *up = udp_sk(sk);
				if (up->pending == AF_INET6) {
					retv = -EBUSY;
					break;
				}
			} else if (sk->sk_protocol != IPPROTO_TCP)
				break;

			if (sk->sk_state != TCP_ESTABLISHED) {
				retv = -ENOTCONN;
				break;
			}

			if (ipv6_only_sock(sk) ||
			    !ipv6_addr_v4mapped(&np->daddr)) {
				retv = -EADDRNOTAVAIL;
				break;
			}

			fl6_free_socklist(sk);
			ipv6_sock_mc_close(sk);

			/*
			 * Sock is moving from IPv6 to IPv4 (sk_prot), so
			 * remove it from the refcnt debug socks count in the
			 * original family...
			 */
			sk_refcnt_debug_dec(sk);

			if (sk->sk_protocol == IPPROTO_TCP) {
				struct inet_connection_sock *icsk = inet_csk(sk);
				local_bh_disable();
				sock_prot_inuse_add(net, sk->sk_prot, -1);
				sock_prot_inuse_add(net, &tcp_prot, 1);
				local_bh_enable();
				sk->sk_prot = &tcp_prot;
				icsk->icsk_af_ops = &ipv4_specific;
				sk->sk_socket->ops = &inet_stream_ops;
				sk->sk_family = PF_INET;
				tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
			} else {
				struct proto *prot = &udp_prot;

				if (sk->sk_protocol == IPPROTO_UDPLITE)
					prot = &udplite_prot;
				local_bh_disable();
				sock_prot_inuse_add(net, sk->sk_prot, -1);
				sock_prot_inuse_add(net, prot, 1);
				local_bh_enable();
				sk->sk_prot = prot;
				sk->sk_socket->ops = &inet_dgram_ops;
				sk->sk_family = PF_INET;
			}
			opt = xchg(&np->opt, NULL);
			if (opt)
				sock_kfree_s(sk, opt, opt->tot_len);
			pktopt = xchg(&np->pktoptions, NULL);
			kfree_skb(pktopt);

			sk->sk_destruct = inet_sock_destruct;
			/*
			 * ... and add it to the refcnt debug socks count
			 * in the new family. -acme
			 */
			sk_refcnt_debug_inc(sk);
			module_put(THIS_MODULE);
			retv = 0;
			break;
		}
		goto e_inval;

	case IPV6_V6ONLY:
		if (optlen < sizeof(int) ||
		    inet_sk(sk)->inet_num)
			goto e_inval;
		np->ipv6only = valbool;
		retv = 0;
		break;

	case IPV6_RECVPKTINFO:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxinfo = valbool;
		retv = 0;
		break;

	case IPV6_2292PKTINFO:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxoinfo = valbool;
		retv = 0;
		break;

	case IPV6_RECVHOPLIMIT:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxhlim = valbool;
		retv = 0;
		break;

	case IPV6_2292HOPLIMIT:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxohlim = valbool;
		retv = 0;
		break;

	case IPV6_RECVRTHDR:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.srcrt = valbool;
		retv = 0;
		break;

	case IPV6_2292RTHDR:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.osrcrt = valbool;
		retv = 0;
		break;

	case IPV6_RECVHOPOPTS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.hopopts = valbool;
		retv = 0;
		break;

	case IPV6_2292HOPOPTS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.ohopopts = valbool;
		retv = 0;
		break;

	case IPV6_RECVDSTOPTS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.dstopts = valbool;
		retv = 0;
		break;

	case IPV6_2292DSTOPTS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.odstopts = valbool;
		retv = 0;
		break;

	case IPV6_TCLASS:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val < -1 || val > 0xff)
			goto e_inval;
		/* RFC 3542, 6.5: default traffic class of 0x0 */
		if (val == -1)
			val = 0;
		np->tclass = val;
		retv = 0;
		break;

	case IPV6_RECVTCLASS:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxtclass = valbool;
		retv = 0;
		break;

	case IPV6_FLOWINFO:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxflow = valbool;
		retv = 0;
		break;

	case IPV6_RECVPATHMTU:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxpmtu = valbool;
		retv = 0;
		break;

	case IPV6_TRANSPARENT:
		if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) &&
		    !ns_capable(net->user_ns, CAP_NET_RAW)) {
			retv = -EPERM;
			break;
		}
		if (optlen < sizeof(int))
			goto e_inval;
		/* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */
		inet_sk(sk)->transparent = valbool;
		retv = 0;
		break;

	case IPV6_RECVORIGDSTADDR:
		if (optlen < sizeof(int))
			goto e_inval;
		np->rxopt.bits.rxorigdstaddr = valbool;
		retv = 0;
		break;

	case IPV6_HOPOPTS:
	case IPV6_RTHDRDSTOPTS:
	case IPV6_RTHDR:
	case IPV6_DSTOPTS:
	{
		struct ipv6_txoptions *opt;

		/* remove any sticky options header with a zero option
		 * length, per RFC3542.
		 */
		if (optlen == 0)
			optval = NULL;
		else if (optval == NULL)
			goto e_inval;
		else if (optlen < sizeof(struct ipv6_opt_hdr) ||
			 optlen & 0x7 || optlen > 8 * 255)
			goto e_inval;

		/* hop-by-hop / destination options are privileged option */
		retv = -EPERM;
		if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
			break;

		opt = ipv6_renew_options(sk, np->opt, optname,
					 (struct ipv6_opt_hdr __user *)optval,
					 optlen);
		if (IS_ERR(opt)) {
			retv = PTR_ERR(opt);
			break;
		}

		/* routing header option needs extra check */
		retv = -EINVAL;
		if (optname == IPV6_RTHDR && opt && opt->srcrt) {
			struct ipv6_rt_hdr *rthdr = opt->srcrt;
			switch (rthdr->type) {
#if IS_ENABLED(CONFIG_IPV6_MIP6)
			case IPV6_SRCRT_TYPE_2:
				if (rthdr->hdrlen != 2 ||
				    rthdr->segments_left != 1)
					goto sticky_done;

				break;
#endif
			default:
				goto sticky_done;
			}
		}

		retv = 0;
		opt = ipv6_update_options(sk, opt);
sticky_done:
		if (opt)
			sock_kfree_s(sk, opt, opt->tot_len);
		break;
	}

	case IPV6_PKTINFO:
	{
		struct in6_pktinfo pkt;

		if (optlen == 0)
			goto e_inval;
		else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL)
			goto e_inval;

		if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) {
				retv = -EFAULT;
				break;
		}
		if (sk->sk_bound_dev_if && pkt.ipi6_ifindex != sk->sk_bound_dev_if)
			goto e_inval;

		np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
		np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr;
		retv = 0;
		break;
	}

	case IPV6_2292PKTOPTIONS:
	{
		struct ipv6_txoptions *opt = NULL;
		struct msghdr msg;
		struct flowi6 fl6;
		int junk;

		memset(&fl6, 0, sizeof(fl6));
		fl6.flowi6_oif = sk->sk_bound_dev_if;
		fl6.flowi6_mark = sk->sk_mark;

		if (optlen == 0)
			goto update;

		/* 1K is probably excessive
		 * 1K is surely not enough, 2K per standard header is 16K.
		 */
		retv = -EINVAL;
		if (optlen > 64*1024)
			break;

		opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL);
		retv = -ENOBUFS;
		if (opt == NULL)
			break;

		memset(opt, 0, sizeof(*opt));
		opt->tot_len = sizeof(*opt) + optlen;
		retv = -EFAULT;
		if (copy_from_user(opt+1, optval, optlen))
			goto done;

		msg.msg_controllen = optlen;
		msg.msg_control = (void*)(opt+1);

		retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
					     &junk, &junk);
		if (retv)
			goto done;
update:
		retv = 0;
		opt = ipv6_update_options(sk, opt);
done:
		if (opt)
			sock_kfree_s(sk, opt, opt->tot_len);
		break;
	}
	case IPV6_UNICAST_HOPS:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val > 255 || val < -1)
			goto e_inval;
		np->hop_limit = val;
		retv = 0;
		break;

	case IPV6_MULTICAST_HOPS:
		if (sk->sk_type == SOCK_STREAM)
			break;
		if (optlen < sizeof(int))
			goto e_inval;
		if (val > 255 || val < -1)
			goto e_inval;
		np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
		retv = 0;
		break;

	case IPV6_MULTICAST_LOOP:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val != valbool)
			goto e_inval;
		np->mc_loop = valbool;
		retv = 0;
		break;

	case IPV6_UNICAST_IF:
	{
		struct net_device *dev = NULL;
		int ifindex;

		if (optlen != sizeof(int))
			goto e_inval;

		ifindex = (__force int)ntohl((__force __be32)val);
		if (ifindex == 0) {
			np->ucast_oif = 0;
			retv = 0;
			break;
		}

		dev = dev_get_by_index(net, ifindex);
		retv = -EADDRNOTAVAIL;
		if (!dev)
			break;
		dev_put(dev);

		retv = -EINVAL;
		if (sk->sk_bound_dev_if)
			break;

		np->ucast_oif = ifindex;
		retv = 0;
		break;
	}

	case IPV6_MULTICAST_IF:
		if (sk->sk_type == SOCK_STREAM)
			break;
		if (optlen < sizeof(int))
			goto e_inval;

		if (val) {
			struct net_device *dev;

			if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
				goto e_inval;

			dev = dev_get_by_index(net, val);
			if (!dev) {
				retv = -ENODEV;
				break;
			}
			dev_put(dev);
		}
		np->mcast_oif = val;
		retv = 0;
		break;
	case IPV6_ADD_MEMBERSHIP:
	case IPV6_DROP_MEMBERSHIP:
	{
		struct ipv6_mreq mreq;

		if (optlen < sizeof(struct ipv6_mreq))
			goto e_inval;

		retv = -EPROTO;
		if (inet_sk(sk)->is_icsk)
			break;

		retv = -EFAULT;
		if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
			break;

		if (optname == IPV6_ADD_MEMBERSHIP)
			retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		else
			retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		break;
	}
	case IPV6_JOIN_ANYCAST:
	case IPV6_LEAVE_ANYCAST:
	{
		struct ipv6_mreq mreq;

		if (optlen < sizeof(struct ipv6_mreq))
			goto e_inval;

		retv = -EFAULT;
		if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
			break;

		if (optname == IPV6_JOIN_ANYCAST)
			retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
		else
			retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
		break;
	}
	case MCAST_JOIN_GROUP:
	case MCAST_LEAVE_GROUP:
	{
		struct group_req greq;
		struct sockaddr_in6 *psin6;

		if (optlen < sizeof(struct group_req))
			goto e_inval;

		retv = -EFAULT;
		if (copy_from_user(&greq, optval, sizeof(struct group_req)))
			break;
		if (greq.gr_group.ss_family != AF_INET6) {
			retv = -EADDRNOTAVAIL;
			break;
		}
		psin6 = (struct sockaddr_in6 *)&greq.gr_group;
		if (optname == MCAST_JOIN_GROUP)
			retv = ipv6_sock_mc_join(sk, greq.gr_interface,
				&psin6->sin6_addr);
		else
			retv = ipv6_sock_mc_drop(sk, greq.gr_interface,
				&psin6->sin6_addr);
		break;
	}
	case MCAST_JOIN_SOURCE_GROUP:
	case MCAST_LEAVE_SOURCE_GROUP:
	case MCAST_BLOCK_SOURCE:
	case MCAST_UNBLOCK_SOURCE:
	{
		struct group_source_req greqs;
		int omode, add;

		if (optlen < sizeof(struct group_source_req))
			goto e_inval;
		if (copy_from_user(&greqs, optval, sizeof(greqs))) {
			retv = -EFAULT;
			break;
		}
		if (greqs.gsr_group.ss_family != AF_INET6 ||
		    greqs.gsr_source.ss_family != AF_INET6) {
			retv = -EADDRNOTAVAIL;
			break;
		}
		if (optname == MCAST_BLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 1;
		} else if (optname == MCAST_UNBLOCK_SOURCE) {
			omode = MCAST_EXCLUDE;
			add = 0;
		} else if (optname == MCAST_JOIN_SOURCE_GROUP) {
			struct sockaddr_in6 *psin6;

			psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
			retv = ipv6_sock_mc_join(sk, greqs.gsr_interface,
				&psin6->sin6_addr);
			/* prior join w/ different source is ok */
			if (retv && retv != -EADDRINUSE)
				break;
			omode = MCAST_INCLUDE;
			add = 1;
		} else /* MCAST_LEAVE_SOURCE_GROUP */ {
			omode = MCAST_INCLUDE;
			add = 0;
		}
		retv = ip6_mc_source(add, omode, sk, &greqs);
		break;
	}
	case MCAST_MSFILTER:
	{
		struct group_filter *gsf;

		if (optlen < GROUP_FILTER_SIZE(0))
			goto e_inval;
		if (optlen > sysctl_optmem_max) {
			retv = -ENOBUFS;
			break;
		}
		gsf = kmalloc(optlen,GFP_KERNEL);
		if (!gsf) {
			retv = -ENOBUFS;
			break;
		}
		retv = -EFAULT;
		if (copy_from_user(gsf, optval, optlen)) {
			kfree(gsf);
			break;
		}
		/* numsrc >= (4G-140)/128 overflow in 32 bits */
		if (gsf->gf_numsrc >= 0x1ffffffU ||
		    gsf->gf_numsrc > sysctl_mld_max_msf) {
			kfree(gsf);
			retv = -ENOBUFS;
			break;
		}
		if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
			kfree(gsf);
			retv = -EINVAL;
			break;
		}
		retv = ip6_mc_msfilter(sk, gsf);
		kfree(gsf);

		break;
	}
	case IPV6_ROUTER_ALERT:
		if (optlen < sizeof(int))
			goto e_inval;
		retv = ip6_ra_control(sk, val);
		break;
	case IPV6_MTU_DISCOVER:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
			goto e_inval;
		np->pmtudisc = val;
		retv = 0;
		break;
	case IPV6_MTU:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val && val < IPV6_MIN_MTU)
			goto e_inval;
		np->frag_size = val;
		retv = 0;
		break;
	case IPV6_RECVERR:
		if (optlen < sizeof(int))
			goto e_inval;
		np->recverr = valbool;
		if (!val)
			skb_queue_purge(&sk->sk_error_queue);
		retv = 0;
		break;
	case IPV6_FLOWINFO_SEND:
		if (optlen < sizeof(int))
			goto e_inval;
		np->sndflow = valbool;
		retv = 0;
		break;
	case IPV6_FLOWLABEL_MGR:
		retv = ipv6_flowlabel_opt(sk, optval, optlen);
		break;
	case IPV6_IPSEC_POLICY:
	case IPV6_XFRM_POLICY:
		retv = -EPERM;
		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
			break;
		retv = xfrm_user_policy(sk, optname, optval, optlen);
		break;

	case IPV6_ADDR_PREFERENCES:
	    {
		unsigned int pref = 0;
		unsigned int prefmask = ~0;

		if (optlen < sizeof(int))
			goto e_inval;

		retv = -EINVAL;

		/* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */
		switch (val & (IPV6_PREFER_SRC_PUBLIC|
			       IPV6_PREFER_SRC_TMP|
			       IPV6_PREFER_SRC_PUBTMP_DEFAULT)) {
		case IPV6_PREFER_SRC_PUBLIC:
			pref |= IPV6_PREFER_SRC_PUBLIC;
			break;
		case IPV6_PREFER_SRC_TMP:
			pref |= IPV6_PREFER_SRC_TMP;
			break;
		case IPV6_PREFER_SRC_PUBTMP_DEFAULT:
			break;
		case 0:
			goto pref_skip_pubtmp;
		default:
			goto e_inval;
		}

		prefmask &= ~(IPV6_PREFER_SRC_PUBLIC|
			      IPV6_PREFER_SRC_TMP);
pref_skip_pubtmp:

		/* check HOME/COA conflicts */
		switch (val & (IPV6_PREFER_SRC_HOME|IPV6_PREFER_SRC_COA)) {
		case IPV6_PREFER_SRC_HOME:
			break;
		case IPV6_PREFER_SRC_COA:
			pref |= IPV6_PREFER_SRC_COA;
		case 0:
			goto pref_skip_coa;
		default:
			goto e_inval;
		}

		prefmask &= ~IPV6_PREFER_SRC_COA;
pref_skip_coa:

		/* check CGA/NONCGA conflicts */
		switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) {
		case IPV6_PREFER_SRC_CGA:
		case IPV6_PREFER_SRC_NONCGA:
		case 0:
			break;
		default:
			goto e_inval;
		}

		np->srcprefs = (np->srcprefs & prefmask) | pref;
		retv = 0;

		break;
	    }
	case IPV6_MINHOPCOUNT:
		if (optlen < sizeof(int))
			goto e_inval;
		if (val < 0 || val > 255)
			goto e_inval;
		np->min_hopcount = val;
		retv = 0;
		break;
	case IPV6_DONTFRAG:
		np->dontfrag = valbool;
		retv = 0;
		break;
	}

	release_sock(sk);

	return retv;

e_inval:
	release_sock(sk);
	return -EINVAL;
}
Example #28
0
static int rst_socket_tcp(struct cpt_sock_image *si, loff_t pos, struct sock *sk,
			  struct cpt_context *ctx)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct sk_buff *skb;
	tp->pred_flags = si->cpt_pred_flags;
	tp->rcv_nxt = si->cpt_rcv_nxt;
	tp->snd_nxt = si->cpt_snd_nxt;
	tp->snd_una = si->cpt_snd_una;
	tp->snd_sml = si->cpt_snd_sml;
	tp->rcv_tstamp = tcp_jiffies_import(si->cpt_rcv_tstamp);
	tp->lsndtime = tcp_jiffies_import(si->cpt_lsndtime);
	tp->tcp_header_len = si->cpt_tcp_header_len;
	inet_csk(sk)->icsk_ack.pending = si->cpt_ack_pending;
	inet_csk(sk)->icsk_ack.quick = si->cpt_quick;
	inet_csk(sk)->icsk_ack.pingpong = si->cpt_pingpong;
	inet_csk(sk)->icsk_ack.blocked = si->cpt_blocked;
	inet_csk(sk)->icsk_ack.ato = si->cpt_ato;
	inet_csk(sk)->icsk_ack.timeout = jiffies_import(si->cpt_ack_timeout);
	inet_csk(sk)->icsk_ack.lrcvtime = tcp_jiffies_import(si->cpt_lrcvtime);
	inet_csk(sk)->icsk_ack.last_seg_size = si->cpt_last_seg_size;
	inet_csk(sk)->icsk_ack.rcv_mss = si->cpt_rcv_mss;
	tp->snd_wl1 = si->cpt_snd_wl1;
	tp->snd_wnd = si->cpt_snd_wnd;
	tp->max_window = si->cpt_max_window;
	inet_csk(sk)->icsk_pmtu_cookie = si->cpt_pmtu_cookie;
	tp->mss_cache = si->cpt_mss_cache;
	tp->rx_opt.mss_clamp = si->cpt_mss_clamp;
	inet_csk(sk)->icsk_ext_hdr_len = si->cpt_ext_header_len;
	inet_csk(sk)->icsk_ca_state = si->cpt_ca_state;
	inet_csk(sk)->icsk_retransmits = si->cpt_retransmits;
	tp->reordering = si->cpt_reordering;
	tp->frto_counter = si->cpt_frto_counter;
	tp->frto_highmark = si->cpt_frto_highmark;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
	// // tp->adv_cong = si->cpt_adv_cong;
#endif
	inet_csk(sk)->icsk_accept_queue.rskq_defer_accept = si->cpt_defer_accept;
	inet_csk(sk)->icsk_backoff = si->cpt_backoff;
	tp->srtt = si->cpt_srtt;
	tp->mdev = si->cpt_mdev;
	tp->mdev_max = si->cpt_mdev_max;
	tp->rttvar = si->cpt_rttvar;
	tp->rtt_seq = si->cpt_rtt_seq;
	inet_csk(sk)->icsk_rto = si->cpt_rto;
	tp->packets_out = si->cpt_packets_out;
	tp->retrans_out = si->cpt_retrans_out;
	tp->lost_out = si->cpt_lost_out;
	tp->sacked_out = si->cpt_sacked_out;
	tp->fackets_out = si->cpt_fackets_out;
	tp->snd_ssthresh = si->cpt_snd_ssthresh;
	tp->snd_cwnd = si->cpt_snd_cwnd;
	tp->snd_cwnd_cnt = si->cpt_snd_cwnd_cnt;
	tp->snd_cwnd_clamp = si->cpt_snd_cwnd_clamp;
	tp->snd_cwnd_used = si->cpt_snd_cwnd_used;
	tp->snd_cwnd_stamp = tcp_jiffies_import(si->cpt_snd_cwnd_stamp);
	inet_csk(sk)->icsk_timeout = tcp_jiffies_import(si->cpt_timeout);
	tp->rcv_wnd = si->cpt_rcv_wnd;
	tp->rcv_wup = si->cpt_rcv_wup;
	tp->write_seq = si->cpt_write_seq;
	tp->pushed_seq = si->cpt_pushed_seq;
	tp->copied_seq = si->cpt_copied_seq;
	tp->rx_opt.tstamp_ok = si->cpt_tstamp_ok;
	tp->rx_opt.wscale_ok = si->cpt_wscale_ok;
	tp->rx_opt.sack_ok = si->cpt_sack_ok;
	tp->rx_opt.saw_tstamp = si->cpt_saw_tstamp;
	tp->rx_opt.snd_wscale = si->cpt_snd_wscale;
	tp->rx_opt.rcv_wscale = si->cpt_rcv_wscale;
	tp->nonagle = si->cpt_nonagle;
	tp->keepalive_probes = si->cpt_keepalive_probes;
	tp->rx_opt.rcv_tsval = si->cpt_rcv_tsval;
	tp->rx_opt.rcv_tsecr = si->cpt_rcv_tsecr;
	tp->rx_opt.ts_recent = si->cpt_ts_recent;
	tp->rx_opt.ts_recent_stamp = si->cpt_ts_recent_stamp;
	tp->rx_opt.user_mss = si->cpt_user_mss;
	tp->rx_opt.dsack = si->cpt_dsack;
	tp->duplicate_sack[0].start_seq = si->cpt_sack_array[0];
	tp->duplicate_sack[0].end_seq = si->cpt_sack_array[1];
	tp->selective_acks[0].start_seq = si->cpt_sack_array[2];
	tp->selective_acks[0].end_seq = si->cpt_sack_array[3];
	tp->selective_acks[1].start_seq = si->cpt_sack_array[4];
	tp->selective_acks[1].end_seq = si->cpt_sack_array[5];
	tp->selective_acks[2].start_seq = si->cpt_sack_array[6];
	tp->selective_acks[2].end_seq = si->cpt_sack_array[7];
	tp->selective_acks[3].start_seq = si->cpt_sack_array[8];
	tp->selective_acks[3].end_seq = si->cpt_sack_array[9];

	tp->window_clamp = si->cpt_window_clamp;
	tp->rcv_ssthresh = si->cpt_rcv_ssthresh;
	inet_csk(sk)->icsk_probes_out = si->cpt_probes_out;
	tp->rx_opt.num_sacks = si->cpt_num_sacks;
	tp->advmss = si->cpt_advmss;
	inet_csk(sk)->icsk_syn_retries = si->cpt_syn_retries;
	tp->ecn_flags = si->cpt_ecn_flags;
	tp->prior_ssthresh = si->cpt_prior_ssthresh;
	tp->high_seq = si->cpt_high_seq;
	tp->retrans_stamp = si->cpt_retrans_stamp;
	tp->undo_marker = si->cpt_undo_marker;
	tp->undo_retrans = si->cpt_undo_retrans;
	tp->urg_seq = si->cpt_urg_seq;
	tp->urg_data = si->cpt_urg_data;
	inet_csk(sk)->icsk_pending = si->cpt_pending;
	tp->snd_up = si->cpt_snd_up;
	tp->keepalive_time = si->cpt_keepalive_time;
	tp->keepalive_intvl = si->cpt_keepalive_intvl;
	tp->linger2 = si->cpt_linger2;

	sk->sk_send_head = NULL;
	for (skb = skb_peek(&sk->sk_write_queue);
	     skb && skb != (struct sk_buff*)&sk->sk_write_queue;
	     skb = skb->next) {
		if (!after(tp->snd_nxt, TCP_SKB_CB(skb)->seq)) {
			sk->sk_send_head = skb;
			break;
		}
	}

	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN) {
		struct inet_sock *inet = inet_sk(sk);
		if (inet->num == 0) {
			cpt_object_t *lobj = NULL;

			if ((int)si->cpt_parent != -1)
				lobj = lookup_cpt_obj_byindex(CPT_OBJ_SOCKET, si->cpt_parent, ctx);

			if (lobj && lobj->o_obj) {
				inet->num = ntohs(inet->sport);
				local_bh_disable();
				__inet_inherit_port(lobj->o_obj, sk);
				local_bh_enable();
				dprintk_ctx("port inherited from parent\n");
			} else {
				struct sock *lsk = find_parent(inet->sport, ctx);
				if (lsk) {
					inet->num = ntohs(inet->sport);
					local_bh_disable();
					__inet_inherit_port(lsk, sk);
					local_bh_enable();
					dprintk_ctx("port inherited\n");
				} else {
					eprintk_ctx("we are kinda lost...\n");
				}
			}
		}

		sk->sk_prot->hash(sk);

		if (inet_csk(sk)->icsk_ack.pending&ICSK_ACK_TIMER)
			sk_reset_timer(sk, &inet_csk(sk)->icsk_delack_timer, inet_csk(sk)->icsk_ack.timeout);
		if (inet_csk(sk)->icsk_pending)
			sk_reset_timer(sk, &inet_csk(sk)->icsk_retransmit_timer,
				       inet_csk(sk)->icsk_timeout);
		if (sock_flag(sk, SOCK_KEEPOPEN)) {
			unsigned long expires = jiffies_import(si->cpt_ka_timeout);
			if (time_after(jiffies, expires))
				expires = jiffies + HZ;
			sk_reset_timer(sk, &sk->sk_timer, expires);
		}
	}

	if (sk->sk_family == AF_INET6)
		sk->sk_gso_type = SKB_GSO_TCPV6;
	else
		sk->sk_gso_type = SKB_GSO_TCPV4;

	return 0;
}
Example #29
0
int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval, 
		    int optlen)
{
	struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
	int val, valbool;
	int retv = -ENOPROTOOPT;

	if(level==SOL_IP && sk->type != SOCK_RAW)
		return udp_prot.setsockopt(sk, level, optname, optval, optlen);

	if(level!=SOL_IPV6)
		goto out;

	if (optval == NULL)
		val=0;
	else if (get_user(val, (int *) optval))
		return -EFAULT;

	valbool = (val!=0);

	lock_sock(sk);

	switch (optname) {

	case IPV6_ADDRFORM:
		if (val == PF_INET) {
			struct ipv6_txoptions *opt;
			struct sk_buff *pktopt;

			if (sk->protocol != IPPROTO_UDP &&
			    sk->protocol != IPPROTO_TCP)
				break;

			if (sk->state != TCP_ESTABLISHED) {
				retv = -ENOTCONN;
				break;
			}

			if (!(ipv6_addr_type(&np->daddr) & IPV6_ADDR_MAPPED)) {
				retv = -EADDRNOTAVAIL;
				break;
			}

			fl6_free_socklist(sk);
			ipv6_sock_mc_close(sk);

			if (sk->protocol == IPPROTO_TCP) {
				struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);

				local_bh_disable();
				sock_prot_dec_use(sk->prot);
				sock_prot_inc_use(&tcp_prot);
				local_bh_enable();
				sk->prot = &tcp_prot;
				tp->af_specific = &ipv4_specific;
				sk->socket->ops = &inet_stream_ops;
				sk->family = PF_INET;
				tcp_sync_mss(sk, tp->pmtu_cookie);
			} else {
				local_bh_disable();
				sock_prot_dec_use(sk->prot);
				sock_prot_inc_use(&udp_prot);
				local_bh_enable();
				sk->prot = &udp_prot;
				sk->socket->ops = &inet_dgram_ops;
				sk->family = PF_INET;
			}
			opt = xchg(&np->opt, NULL);
			if (opt)
				sock_kfree_s(sk, opt, opt->tot_len);
			pktopt = xchg(&np->pktoptions, NULL);
			if (pktopt)
				kfree_skb(pktopt);

			sk->destruct = inet_sock_destruct;
#ifdef INET_REFCNT_DEBUG
			atomic_dec(&inet6_sock_nr);
#endif
			MOD_DEC_USE_COUNT;
			retv = 0;
			break;
		}
		goto e_inval;

	case IPV6_PKTINFO:
		np->rxopt.bits.rxinfo = valbool;
		retv = 0;
		break;

	case IPV6_HOPLIMIT:
		np->rxopt.bits.rxhlim = valbool;
		retv = 0;
		break;

	case IPV6_RTHDR:
		if (val < 0 || val > 2)
			goto e_inval;
		np->rxopt.bits.srcrt = val;
		retv = 0;
		break;

	case IPV6_HOPOPTS:
		np->rxopt.bits.hopopts = valbool;
		retv = 0;
		break;

	case IPV6_AUTHHDR:
		np->rxopt.bits.authhdr = valbool;
		retv = 0;
		break;

	case IPV6_DSTOPTS:
		np->rxopt.bits.dstopts = valbool;
		retv = 0;
		break;

	case IPV6_FLOWINFO:
		np->rxopt.bits.rxflow = valbool;
		retv = 0;
		break;

	case IPV6_PKTOPTIONS:
	{
		struct ipv6_txoptions *opt = NULL;
		struct msghdr msg;
		struct flowi fl;
		int junk;

		fl.fl6_flowlabel = 0;
		fl.oif = sk->bound_dev_if;

		if (optlen == 0)
			goto update;

		/* 1K is probably excessive
		 * 1K is surely not enough, 2K per standard header is 16K.
		 */
		retv = -EINVAL;
		if (optlen > 64*1024)
			break;

		opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL);
		retv = -ENOBUFS;
		if (opt == NULL)
			break;

		memset(opt, 0, sizeof(*opt));
		opt->tot_len = sizeof(*opt) + optlen;
		retv = -EFAULT;
		if (copy_from_user(opt+1, optval, optlen))
			goto done;

		msg.msg_controllen = optlen;
		msg.msg_control = (void*)(opt+1);

		retv = datagram_send_ctl(&msg, &fl, opt, &junk);
		if (retv)
			goto done;
update:
		retv = 0;
		if (sk->type == SOCK_STREAM) {
			if (opt) {
				struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
				if (!((1<<sk->state)&(TCPF_LISTEN|TCPF_CLOSE))
				    && sk->daddr != LOOPBACK4_IPV6) {
					tp->ext_header_len = opt->opt_flen + opt->opt_nflen;
					tcp_sync_mss(sk, tp->pmtu_cookie);
				}
			}
			opt = xchg(&np->opt, opt);
			sk_dst_reset(sk);
		} else {
			write_lock(&sk->dst_lock);
			opt = xchg(&np->opt, opt);
			write_unlock(&sk->dst_lock);
			sk_dst_reset(sk);
		}

done:
		if (opt)
			sock_kfree_s(sk, opt, opt->tot_len);
		break;
	}
	case IPV6_UNICAST_HOPS:
		if (val > 255 || val < -1)
			goto e_inval;
		np->hop_limit = val;
		retv = 0;
		break;

	case IPV6_MULTICAST_HOPS:
		if (sk->type == SOCK_STREAM)
			goto e_inval;
		if (val > 255 || val < -1)
			goto e_inval;
		np->mcast_hops = val;
		retv = 0;
		break;

	case IPV6_MULTICAST_LOOP:
		np->mc_loop = valbool;
		retv = 0;
		break;

	case IPV6_MULTICAST_IF:
		if (sk->type == SOCK_STREAM)
			goto e_inval;
		if (sk->bound_dev_if && sk->bound_dev_if != val)
			goto e_inval;

		if (__dev_get_by_index(val) == NULL) {
			retv = -ENODEV;
			break;
		}
		np->mcast_oif = val;
		retv = 0;
		break;
	case IPV6_ADD_MEMBERSHIP:
	case IPV6_DROP_MEMBERSHIP:
	{
		struct ipv6_mreq mreq;

		retv = -EFAULT;
		if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
			break;

		if (optname == IPV6_ADD_MEMBERSHIP)
			retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		else
			retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
		break;
	}
	case IPV6_ROUTER_ALERT:
		retv = ip6_ra_control(sk, val, NULL);
		break;
	case IPV6_MTU_DISCOVER:
		if (val<0 || val>2)
			goto e_inval;
		np->pmtudisc = val;
		retv = 0;
		break;
	case IPV6_MTU:
		if (val && val < IPV6_MIN_MTU)
			goto e_inval;
		np->frag_size = val;
		retv = 0;
		break;
	case IPV6_RECVERR:
		np->recverr = valbool;
		if (!val)
			skb_queue_purge(&sk->error_queue);
		retv = 0;
		break;
	case IPV6_FLOWINFO_SEND:
		np->sndflow = valbool;
		retv = 0;
		break;
	case IPV6_FLOWLABEL_MGR:
		retv = ipv6_flowlabel_opt(sk, optval, optlen);
		break;

#ifdef CONFIG_NETFILTER
	default:
		retv = nf_setsockopt(sk, PF_INET6, optname, optval, 
					    optlen);
		break;
#endif

	}
	release_sock(sk);

out:
	return retv;

e_inval:
	release_sock(sk);
	return -EINVAL;
}
static int mipi_dsi_on(struct platform_device *pdev)
{
	int ret = 0;
	u32 clk_rate;
	struct msm_fb_data_type *mfd;
	struct fb_info *fbi;
	struct fb_var_screeninfo *var;
	struct msm_panel_info *pinfo;
	struct mipi_panel_info *mipi;
	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
	u32 ystride, bpp, data;
	u32 dummy_xres, dummy_yres;
	int target_type = 0;

	PR_DISP_DEBUG(" %s\n", __func__);

	mfd = platform_get_drvdata(pdev);
	fbi = mfd->fbi;
	var = &fbi->var;
	pinfo = &mfd->panel_info;

	if (mfd->init_mipi_lcd != 0) {
		if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_power_save)
			mipi_dsi_pdata->dsi_power_save(1);
	}

	local_bh_disable();
	mipi_dsi_ahb_ctrl(1);
	local_bh_enable();

	clk_rate = mfd->fbi->var.pixclock;
	clk_rate = min(clk_rate, mfd->panel_info.clk_max);

	MIPI_OUTP(MIPI_DSI_BASE + 0x114, 1);
	MIPI_OUTP(MIPI_DSI_BASE + 0x114, 0);

	hbp = var->left_margin;
	hfp = var->right_margin;
	vbp = var->upper_margin;
	vfp = var->lower_margin;
	hspw = var->hsync_len;
	vspw = var->vsync_len;
	width = mfd->panel_info.xres;
	height = mfd->panel_info.yres;

	mipi_dsi_phy_ctrl(1);

	if (mdp_rev == MDP_REV_42 && mipi_dsi_pdata)
		target_type = mipi_dsi_pdata->target_type;

	mipi_dsi_phy_init(0, &(mfd->panel_info), target_type);

	local_bh_disable();
	mipi_dsi_clk_enable();
	local_bh_enable();

	mipi  = &mfd->panel_info.mipi;
	if (mfd->panel_info.type == MIPI_VIDEO_PANEL) {
		dummy_xres = mfd->panel_info.mipi.xres_pad;
		dummy_yres = mfd->panel_info.mipi.yres_pad;

		if (mdp_rev >= MDP_REV_41) {
			MIPI_OUTP(MIPI_DSI_BASE + 0x20,
				((hspw + hbp + width + dummy_xres) << 16 |
				(hspw + hbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x24,
				((vspw + vbp + height + dummy_yres) << 16 |
				(vspw + vbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x28,
				(vspw + vbp + height + dummy_yres +
					vfp - 1) << 16 | (hspw + hbp +
					width + dummy_xres + hfp - 1));
		} else {
			/* DSI_LAN_SWAP_CTRL */
			MIPI_OUTP(MIPI_DSI_BASE + 0x00ac, mipi->dlane_swap);

			MIPI_OUTP(MIPI_DSI_BASE + 0x20,
				((hbp + width + dummy_xres) << 16 | (hbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x24,
				((vbp + height + dummy_yres) << 16 | (vbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x28,
				(vbp + height + dummy_yres + vfp) << 16 |
					(hbp + width + dummy_xres + hfp));
		}

		MIPI_OUTP(MIPI_DSI_BASE + 0x2c, (hspw << 16));
		MIPI_OUTP(MIPI_DSI_BASE + 0x30, 0);
		MIPI_OUTP(MIPI_DSI_BASE + 0x34, (vspw << 16));

	} else {		/* command mode */
		if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
			bpp = 3;
		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
			bpp = 3;
		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
			bpp = 2;
		else
			bpp = 3;	/* Default format set to RGB888 */

		ystride = width * bpp + 1;

		/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
		data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
		MIPI_OUTP(MIPI_DSI_BASE + 0x5c, data);
		MIPI_OUTP(MIPI_DSI_BASE + 0x54, data);

		/* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
		data = height << 16 | width;
		MIPI_OUTP(MIPI_DSI_BASE + 0x60, data);
		MIPI_OUTP(MIPI_DSI_BASE + 0x58, data);
	}

	mipi_dsi_host_init(mipi);

	if (mipi->force_clk_lane_hs) {
		u32 tmp;

		tmp = MIPI_INP(MIPI_DSI_BASE + 0xA8);
		tmp |= (1<<28);
		MIPI_OUTP(MIPI_DSI_BASE + 0xA8, tmp);
		wmb();
	}

	/* PrimoDS/DD sharp panel workaround to fix the abnormal wave form  */
	if ((panel_type == PANEL_ID_PRIMODS_SHARP || panel_type == PANEL_ID_PRIMODS_SHARP_C1 ||
		panel_type == PANEL_ID_PRIMODD_SHARP || panel_type == PANEL_ID_PRIMODD_SHARP_C1) &&
		mfd->init_mipi_lcd == 0) {
		mipi_orise_lcd_pre_off(pdev);
		if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_power_save)
			mipi_dsi_pdata->dsi_power_save(1);
	}

	ret = panel_next_on(pdev);

	mipi_dsi_op_mode_config(mipi->mode);

	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
		if (pinfo->lcd.vsync_enable) {
			if (pinfo->lcd.hw_vsync_mode && vsync_gpio >= 0) {
				if (mdp_rev >= MDP_REV_41) {
					if (gpio_request(vsync_gpio,
						"MDP_VSYNC") == 0)
						gpio_direction_input(
							vsync_gpio);
					else
						pr_err("%s: unable to \
							request gpio=%d\n",
							__func__, vsync_gpio);
				} else if (mdp_rev == MDP_REV_303) {
					if (!tlmm_settings && gpio_request(
						vsync_gpio, "MDP_VSYNC") == 0) {
						ret = gpio_tlmm_config(
							GPIO_CFG(
							vsync_gpio, 1,
							GPIO_CFG_INPUT,
							GPIO_CFG_PULL_DOWN,
							GPIO_CFG_2MA),
							GPIO_CFG_ENABLE);

						if (ret) {
							pr_err(
							"%s: unable to config \
							tlmm = %d\n",
							__func__, vsync_gpio);
						}
						tlmm_settings = TRUE;

						gpio_direction_input(
							vsync_gpio);
					} else {
						if (!tlmm_settings) {
							pr_err(
							"%s: unable to request \
							gpio=%d\n",
							__func__, vsync_gpio);
						}
					}
				}