Example #1
0
/*
 * Enter an interrupt context.
 */
void irq_enter(void)
{
	rcu_irq_enter();
	if (is_idle_task(current) && !in_interrupt()) {
		/*
		 * Prevent raise_softirq from needlessly waking up ksoftirqd
		 * here, as softirq will be serviced on return from interrupt.
		 */
		local_bh_disable();
		tick_irq_enter();
		_local_bh_enable();
	}

	__irq_enter();
}
/* Returns new sk_buff, or NULL */
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
{
	int err;

	skb_orphan(skb);

	local_bh_disable();
	err = ip_defrag(skb, user);
	local_bh_enable();

	if (!err)
		ip_send_check(ip_hdr(skb));

	return err;
}
Example #3
0
static irqreturn_t mv_cesa_int(int irq, void *priv)
{
	struct mv_cesa_engine *engine = priv;
	struct crypto_async_request *req;
	struct mv_cesa_ctx *ctx;
	u32 status, mask;
	irqreturn_t ret = IRQ_NONE;

	while (true) {
		int res;

		mask = mv_cesa_get_int_mask(engine);
		status = readl(engine->regs + CESA_SA_INT_STATUS);

		if (!(status & mask))
			break;

		/*
		 * TODO: avoid clearing the FPGA_INT_STATUS if this not
		 * relevant on some platforms.
		 */
		writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
		writel(~status, engine->regs + CESA_SA_INT_STATUS);

		ret = IRQ_HANDLED;
		spin_lock_bh(&engine->lock);
		req = engine->req;
		spin_unlock_bh(&engine->lock);
		if (req) {
			ctx = crypto_tfm_ctx(req->tfm);
			res = ctx->ops->process(req, status & mask);
			if (res != -EINPROGRESS) {
				spin_lock_bh(&engine->lock);
				engine->req = NULL;
				mv_cesa_dequeue_req_unlocked(engine);
				spin_unlock_bh(&engine->lock);
				ctx->ops->cleanup(req);
				local_bh_disable();
				req->complete(req, res);
				local_bh_enable();
			} else {
				ctx->ops->step(req);
			}
		}
	}

	return ret;
}
Example #4
0
/*
 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
 * whose grace period has elapsed.
 */
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
{
	const char *rn = NULL;
	struct rcu_head *next, *list;
	unsigned long flags;
	RCU_TRACE(int cb_count = 0);

	/* If no RCU callbacks ready to invoke, just return. */
	if (&rcp->rcucblist == rcp->donetail) {
		RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
		RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
!!ACCESS_ONCE(rcp->rcucblist),
need_resched(),
is_idle_task(current),
false));
		return;
	}

	/* Move the ready-to-invoke callbacks to a local list. */
	local_irq_save(flags);
	RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
	list = rcp->rcucblist;
	rcp->rcucblist = *rcp->donetail;
	*rcp->donetail = NULL;
	if (rcp->curtail == rcp->donetail) {
		rcp->curtail = &rcp->rcucblist;
	}
	rcp->donetail = &rcp->rcucblist;
	local_irq_restore(flags);

	/* Invoke the callbacks on the local list. */
	RCU_TRACE(rn = rcp->name);
	while (list) {
		next = list->next;
		prefetch(next);
		debug_rcu_head_unqueue(list);
		local_bh_disable();
		__rcu_reclaim(rn, list);
		local_bh_enable();
		list = next;
		RCU_TRACE(cb_count++);
	}
	RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
	RCU_TRACE(trace_rcu_batch_end(rcp->name,
cb_count, 0, need_resched(),
is_idle_task(current),
false));
}
Example #5
0
static int test_cipher_cycles(struct blkcipher_desc *desc, int enc,
			      struct scatterlist *sg, int blen)
{
	unsigned long cycles = 0;
	int ret = 0;
	int i;

	local_bh_disable();
	local_irq_disable();

	/* Warm-up run. */
	for (i = 0; i < 4; i++) {
		if (enc)
			ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
		else
			ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);

		if (ret)
			goto out;
	}

	/* The real thing. */
	for (i = 0; i < 8; i++) {
		cycles_t start, end;

		start = get_cycles();
		if (enc)
			ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
		else
			ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
		end = get_cycles();

		if (ret)
			goto out;

		cycles += end - start;
	}

out:
	local_irq_enable();
	local_bh_enable();

	if (ret == 0)
		printk("1 operation in %lu cycles (%d bytes)\n",
		       (cycles + 4) / 8, blen);

	return ret;
}
Example #6
0
/*******************************************************************************
 函数名称  :    ddos_cpu_globle_syscall
 功能描述  :     ddos globle 系统调用控制
 输入参数  :     cmd 系统调用号;buf下发的数据;size数据长度
 输出参数  :    
 返 回 值     :     返回DDOS_GLOBLE_SYSCALL_SUCCESS,系统调用成功
                              返回DDOS_GLOBLE_SYSCALL_ERR,       系统调用出错
--------------------------------------------------------------------------------
 最近一次修改记录 :
 修改作者   :   田佳星
 修改目的   :   新增
 修改日期   :   2012-12-5                                                                                                                     
*******************************************************************************/
s32 ddos_cpu_global_syscall( unsigned int cmd, void* buf, u32 size )
{
    s32 ret = DDOS_GLOBAL_SYSCALL_SUCCESS;

    if( NULL == buf )
    {
        return DDOS_GLOBAL_SYSCALL_ERR;
    }
    
    local_bh_disable();

    switch ( cmd )
    {
        case DDOS_GLOBAL_SYN_FLOOD_DOWNLOAD:
		{
            ret = ddos_cpu_global_synflood_protect_cfg((u8 *)buf);
			break;
        }

        case DDOS_GLOBAL_TCP_STATE_PROTECT_DOWNLOAD:
		{
            ret = ddos_cpu_global_tcp_state_protect_cfg((ddos_global_tcp_state_sw_s *)buf);
			break;
        }

        case DDOS_FPGA_GLOBAL_SF_REG:
        {
            ret = ddos_fpga_download_global_synflood_reg( buf );
            break;
        }

        case DDOS_UMC_LOG_CFG_DOWNLOAD:
        {
            ret = ddos_umc_log_cfg( buf );
            break;
        }
 
        default:
        {
            ret = DDOS_GLOBAL_SYSCALL_ERR;
            break;
        }
    }

    local_bh_enable();

    return ret;
}
static
#endif
 struct sk_buff *
nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
{
	skb_orphan(skb);

	local_bh_disable();
	skb = ip_defrag(skb, user);
	local_bh_enable();

	if (skb)
		ip_send_check(ip_hdr(skb));

	return skb;
}
Example #8
0
static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
			       size_t count, loff_t *off)
{
	struct seq_file *seq = file->private_data;
	struct qdio_q *q = seq->private;

	if (!q)
		return 0;

	if (q->is_input_q)
		xchg(q->irq_ptr->dsci, 1);
	local_bh_disable();
	tasklet_schedule(&q->tasklet);
	local_bh_enable();
	return count;
}
Example #9
0
static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
{
	struct sock *sk;

	local_bh_disable();

	sk = icmpv6_sk(net);
	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
		/* This can happen if the output path (f.e. SIT or
		 * ip6ip6 tunnel) signals dst_link_failure() for an
		 * outgoing ICMP6 packet.
		 */
		local_bh_enable();
		return NULL;
	}
	return sk;
}
Example #10
0
static void sync_backup_loop(void)
{
	struct socket *sock;
	char *buf;
	int len;

	if (!(buf=kmalloc(SYNC_MESG_MAX_SIZE, GFP_ATOMIC))) {
		IP_VS_ERR("sync_backup_loop: kmalloc error\n");
		return;
	}

	/* create the receiving multicast socket */
	sock = make_receive_sock();
	if (!sock)
		goto out;

	for (;;) {
		/* do you have data now? */
		while (!skb_queue_empty(&(sock->sk->receive_queue))) {
			if ((len=ip_vs_receive(sock, buf,
					       SYNC_MESG_MAX_SIZE))<=0) {
				IP_VS_ERR("receiving message error\n");
				break;
			}
			/* disable bottom half, because it accessed the data
			   shared by softirq while getting/creating conns */
			local_bh_disable();
			ip_vs_process_message(buf, len);
			local_bh_enable();
		}

		if (stop_sync)
			break;

		__set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
		__set_current_state(TASK_RUNNING);
	}

	/* release the sending multicast socket */
	sock_release(sock);

  out:
	kfree(buf);
}
Example #11
0
/*
 * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the
 * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu
 * disappears and another task or vcpu appears that recycles the same
 * struct fpsimd_state.
 */
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
{
	local_bh_disable();

	update_thread_flag(TIF_SVE,
			   vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);

	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
		/* Clean guest FP state to memory and invalidate cpu view */
		fpsimd_save();
		fpsimd_flush_cpu_state();
	} else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
		/* Ensure user trap controls are correctly restored */
		fpsimd_bind_task_to_cpu();
	}

	local_bh_enable();
}
/* This function is called during module unload. */
static void
ssh_virtual_adapter_destroy_all(SshInterceptor interceptor)
{
  SshVirtualAdapter adapter;
  Boolean check_again;
  SshUInt32 i;

 restart:
  local_bh_disable();
  ssh_kernel_mutex_lock(interceptor->interceptor_lock);

  check_again = FALSE;
  for (i = 0; i < SSH_LINUX_MAX_VIRTUAL_ADAPTERS; i++)
    {
      adapter = interceptor->virtual_adapters[i];
      if (adapter == NULL)
	continue;

      if (!adapter->initialized)
	{
	  /* Initialization is underway, mark adapter to be destroyed. */
	  adapter->destroyed = 1;
	  check_again = TRUE;
	  continue;
	}
      
      /* Remove adapter from table. */
      interceptor->virtual_adapters[i] = NULL;

      /* Detach and destroy adapter. */
      ssh_virtual_adapter_destroy(interceptor, adapter);

      /* Unlock and restart. */
      ssh_kernel_mutex_unlock(interceptor->interceptor_lock);
      local_bh_enable();
      goto restart;
    }

  ssh_kernel_mutex_unlock(interceptor->interceptor_lock);
  local_bh_enable();

  if (check_again)
    goto restart;
}
Example #13
0
/*****************************************************************************
 函 数 名: acl_rfc_prepare_user_data
 功能描述: 准备执行RFC算法,初始化各变量
 输入参数: 无
 输出参数: cnt             ---- 规则数
 返 回 值: ERROR_SUCCESS     ---- 未查到
            else            ---- 找到的规则的动作位置
 -----------------------------------------------------------------------------
 最近一次修改记录:
 修改作者: Fuzhiqing
 修改目的: 合并本函数与acl_rfc_init函数
 修改日期: 2011-09-24
*****************************************************************************/
s32 acl_rfc_prepare_user_data(void)
{
    /*先将acl_policy_rfc_ready置0同时sleep 100ms,确保没有报文再访问rfc表再对rfc表进行重新编译*/
    acl_rfc_ready = ACL_RFC_NOT_READY;
    /*函数里会sleep,因此需要先打开软中断*/
    __local_bh_enable();
    msleep(100);
    local_bh_disable();

    printk("<3>Start to compile RFC ACL...\n");

    gs_rfc_rule_num = 0;
    memset (gs_action_bitmap, 0, sizeof(gs_action_bitmap));
    memset (g_acl_rfc_result, 0, sizeof(g_acl_rfc_result));
    gs_current_num = 0;
    memset(&gs_cur_rule_prio, 0, sizeof(gs_cur_rule_prio));
    memset(&acl_rfc_k_size, 0, sizeof(acl_rfc_k_size));
    return ERROR_SUCCESS;
}
static void
ssh_interceptor_uninit_engine(SshInterceptor interceptor)
{
  /* Stop packet processing engine */
  if (interceptor->engine != NULL)
    {
      while (ssh_engine_stop(interceptor->engine) == FALSE)
	{
	  local_bh_enable();
	  schedule();
	  mdelay(300);
	  local_bh_disable();
	}
      interceptor->engine = NULL;
    }

  /* Free packet data structure */
  ssh_interceptor_packet_freelist_uninit(interceptor);
}
/*
 * Use cio_tpi to get a pending interrupt and call the interrupt handler.
 * Return non-zero if an interrupt was processed, zero otherwise.
 */
static int cio_tpi(void)
{
	struct tpi_info *tpi_info;
	struct subchannel *sch;
	struct irb *irb;
	int irq_context;

	tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
	if (tpi(NULL) != 1)
		return 0;
	kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
	if (tpi_info->adapter_IO) {
		do_adapter_IO(tpi_info->isc);
		return 1;
	}
	irb = (struct irb *)&S390_lowcore.irb;
	/* Store interrupt response block to lowcore. */
	if (tsch(tpi_info->schid, irb) != 0) {
		/* Not status pending or not operational. */
		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
		return 1;
	}
	sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
	if (!sch) {
		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
		return 1;
	}
	irq_context = in_interrupt();
	if (!irq_context)
		local_bh_disable();
	irq_enter();
	spin_lock(sch->lock);
	memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
	if (sch->driver && sch->driver->irq)
		sch->driver->irq(sch);
	else
		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
	spin_unlock(sch->lock);
	irq_exit();
	if (!irq_context)
		_local_bh_enable();
	return 1;
}
Example #16
0
static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
{
	int err;
	const struct iphdr *rxiph;
	struct sk_buff *skb;
	struct dst_entry *dst;
	struct net *net = dev_net(skb_dst(rxskb)->dev);
	struct sock *ctl_sk = net->dccp.v4_ctl_sk;

	/* Never send a reset in response to a reset. */
	if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
		return;

	if (skb_rtable(rxskb)->rt_type != RTN_LOCAL)
		return;

	dst = dccp_v4_route_skb(net, ctl_sk, rxskb);
	if (dst == NULL)
		return;

	skb = dccp_ctl_make_reset(ctl_sk, rxskb);
	if (skb == NULL)
		goto out;

	rxiph = ip_hdr(rxskb);
	dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr,
								 rxiph->daddr);
	skb_dst_set(skb, dst_clone(dst));

	local_bh_disable();
	bh_lock_sock(ctl_sk);
	err = ip_build_and_send_pkt(skb, ctl_sk,
				    rxiph->daddr, rxiph->saddr, NULL);
	bh_unlock_sock(ctl_sk);

	if (net_xmit_eval(err) == 0) {
		__DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
		__DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
	}
	local_bh_enable();
out:
	dst_release(dst);
}
Example #17
0
/*
 * Workqueue handler to drive one grace period and invoke any callbacks
 * that become ready as a result.  Single-CPU and !PREEMPT operation
 * means that we get away with murder on synchronization.  ;-)
 */
void srcu_drive_gp(struct work_struct *wp)
{
	int idx;
	struct rcu_head *lh;
	struct rcu_head *rhp;
	struct srcu_struct *sp;

	sp = container_of(wp, struct srcu_struct, srcu_work);
	if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
		return; /* Already running or nothing to do. */

	/* Remove recently arrived callbacks and wait for readers. */
	WRITE_ONCE(sp->srcu_gp_running, true);
	local_irq_disable();
	lh = sp->srcu_cb_head;
	sp->srcu_cb_head = NULL;
	sp->srcu_cb_tail = &sp->srcu_cb_head;
	local_irq_enable();
	idx = sp->srcu_idx;
	WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
	WRITE_ONCE(sp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */
	swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
	WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */

	/* Invoke the callbacks we removed above. */
	while (lh) {
		rhp = lh;
		lh = lh->next;
		local_bh_disable();
		rhp->func(rhp);
		local_bh_enable();
	}

	/*
	 * Enable rescheduling, and if there are more callbacks,
	 * reschedule ourselves.  This can race with a call_srcu()
	 * at interrupt level, but the ->srcu_gp_running checks will
	 * straighten that out.
	 */
	WRITE_ONCE(sp->srcu_gp_running, false);
	if (READ_ONCE(sp->srcu_cb_head))
		schedule_work(&sp->srcu_work);
}
Example #18
0
static inline void nft_counter_do_eval(struct nft_counter_percpu_priv *priv,
				       struct nft_regs *regs,
				       const struct nft_pktinfo *pkt)
{
	struct nft_counter *this_cpu;
	seqcount_t *myseq;

	local_bh_disable();
	this_cpu = this_cpu_ptr(priv->counter);
	myseq = this_cpu_ptr(&nft_counter_seq);

	write_seqcount_begin(myseq);

	this_cpu->bytes += pkt->skb->len;
	this_cpu->packets++;

	write_seqcount_end(myseq);
	local_bh_enable();
}
/*
 * mdp4_dsi_cmd_dma_busy_wait: check dsi link activity
 * dsi link is a shared resource and it can only be used
 * while it is in idle state.
 * ov_mutex need to be acquired before call this function.
 */
void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
{
	unsigned long flag;
	int need_wait = 0;



	if (dsi_clock_timer.function) {
		if (time_after(jiffies, tout_expired)) {
			tout_expired = jiffies + TOUT_PERIOD;
			mod_timer(&dsi_clock_timer, tout_expired);
			tout_expired -= MS_100;
		}
	}

	pr_debug("%s: start pid=%d dsi_clk_on=%d\n",
			__func__, current->pid, mipi_dsi_clk_on);

	/* satrt dsi clock if necessary */
	if (mipi_dsi_clk_on == 0) {
		local_bh_disable();
		mipi_dsi_turn_on_clks();
		local_bh_enable();
	}

	spin_lock_irqsave(&mdp_spin_lock, flag);
	if (mfd->dma->busy == TRUE) {
		if (busy_wait_cnt == 0)
			INIT_COMPLETION(mfd->dma->comp);
		busy_wait_cnt++;
		need_wait++;
	}
	spin_unlock_irqrestore(&mdp_spin_lock, flag);

	if (need_wait) {
		/* wait until DMA finishes the current job */
		pr_debug("%s: pending pid=%d dsi_clk_on=%d\n",
				__func__, current->pid, mipi_dsi_clk_on);
		wait_for_completion(&mfd->dma->comp);
	}
	pr_debug("%s: done pid=%d dsi_clk_on=%d\n",
			 __func__, current->pid, mipi_dsi_clk_on);
}
Example #20
0
void flow_cache_flush(void)
{
	struct flow_flush_info info;
	static DECLARE_MUTEX(flow_flush_sem);

	/* Don't want cpus going down or up during this. */
	lock_cpu_hotplug();
	down(&flow_flush_sem);
	atomic_set(&info.cpuleft, num_online_cpus());
	init_completion(&info.completion);

	local_bh_disable();
	smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
	flow_cache_flush_tasklet((unsigned long)&info);
	local_bh_enable();

	wait_for_completion(&info.completion);
	up(&flow_flush_sem);
	unlock_cpu_hotplug();
}
Example #21
0
static noinline void nft_update_chain_stats(const struct nft_chain *chain,
					    const struct nft_pktinfo *pkt)
{
	struct nft_base_chain *base_chain;
	struct nft_stats *stats;

	base_chain = nft_base_chain(chain);
	if (!base_chain->stats)
		return;

	local_bh_disable();
	stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
	if (stats) {
		u64_stats_update_begin(&stats->syncp);
		stats->pkts++;
		stats->bytes += pkt->skb->len;
		u64_stats_update_end(&stats->syncp);
	}
	local_bh_enable();
}
Example #22
0
u32 ff_half_dpi_process(struct sk_buff *skb)
{
	skb_shinfo(skb)->nr_frags	= 0;
	skb_shinfo(skb)->gso_size = 0;
	skb_shinfo(skb)->gso_segs = 0;
	skb_shinfo(skb)->gso_type = 0;
	skb_shinfo(skb)->frag_list = NULL;
	skb_set_dse(skb, DSE_INVALID);
	
    (void)fpga_proc(skb);

    skb->ucpktaction = 0x0;    
    skb->segment_id = 0;
    local_bh_disable(); 
    skb->ucpktaction = dpi_rcv(skb);
    local_bh_enable();

    return DPI_PACKET_NOT_BYPASS;
    
}
Example #23
0
u32 netmode_bridge_rcv(u32 hooknum,
					struct sk_buff **pskb,
					const struct net_device *in,
					const struct net_device *out,
					int (*okfn)(struct sk_buff *))
{
    s32 ret;
    struct sk_buff *skb = *pskb;
    (void)hooknum;
    (void)in;
    (void)out;

    if((CONPLAT_SKB_FROM_WAN_OR_LAN != skb->ucpktflag) && (CONPLAT_BR_SKB_FROM_LOCAL != skb->ucpktflag))
    {
        return NF_ACCEPT;
    }

    ckdc_counter_inc(nm,netmode_rcv);

    ret = netmode_iftable_find(skb, NULL, NETMODE_BRIDGE_FLAG);
    if(ERROR_SUCCESS != ret)
    {
        ckdc_counter_inc(nm,netmode_send);
        return NF_ACCEPT;
    }

    local_bh_disable();

    ret = netmode_ucpktflag_proc(&skb, NETMODE_BRIDGE_FLAG, okfn);
    if(0 != ret)
    {
        goto dpi_process;
    }

dpi_process:

    netmode_skb_dpi_process(skb, ret, NETMODE_BRIDGE_FLAG, okfn);
    
    __local_bh_enable();
    return NF_STOLEN;
}
Example #24
0
asmlinkage void __do_softirq(void)
{
    struct softirq_action *h;
    __u32 pending;
    int max_restart = MAX_SOFTIRQ_RESTART;
    int cpu;

    pending = local_softirq_pending();

    local_bh_disable();
    cpu = smp_processor_id();
restart:
    /* Reset the pending bitmask before enabling irqs */
    set_softirq_pending(0);

    local_irq_enable();

    h = softirq_vec;

    do {
        if (pending & 1) {
            trace_kernel_soft_irq_entry((void*)(h - softirq_vec));
            h->action(h);
            trace_kernel_soft_irq_exit((void*)(h - softirq_vec));
            rcu_bh_qsctr_inc(cpu);
        }
        h++;
        pending >>= 1;
    } while (pending);

    local_irq_disable();

    pending = local_softirq_pending();
    if (pending && --max_restart)
        goto restart;

    if (pending)
        wakeup_softirqd();

    __local_bh_enable();
}
void ssh_virtual_adapter_detach_all(SshInterceptor interceptor)
{
  SshVirtualAdapter adapter;
  SshUInt32 i = 0;
  SshVirtualAdapterDetachCB detach_cb;
  void *adapter_context;

 restart:
  local_bh_disable();
  ssh_kernel_mutex_lock(interceptor->interceptor_lock);
  for (; i < SSH_LINUX_MAX_VIRTUAL_ADAPTERS; i++)
    {
      adapter = interceptor->virtual_adapters[i];
      if (adapter == NULL)
	continue;
      
      detach_cb = NULL;
      adapter_context = NULL;
      if (adapter->detach_cb != NULL)
	{
	  detach_cb = adapter->detach_cb;
	  adapter_context = adapter->adapter_context;
	}

      ssh_virtual_adapter_clear(adapter);
      adapter->attached = 0;

      if (detach_cb != NULL)
	{
	  ssh_kernel_mutex_unlock(interceptor->interceptor_lock);
	  local_bh_enable();
	  
	  /* Destroy adapter_context. */
	  (*detach_cb)(adapter_context);
	  
	  goto restart;
	}
    }
  ssh_kernel_mutex_unlock(interceptor->interceptor_lock);
  local_bh_enable();
}
Example #26
0
void ssh_interceptor_proc_uninit(SshInterceptor interceptor)
{
  /* Enable softirqs. */
  SSH_ASSERT(in_softirq());
  local_bh_enable();
  SSH_ASSERT(!in_softirq());

  interceptor_ipm_proc_entry_uninit(interceptor);
#ifdef DEBUG_LIGHT
  interceptor_stats_proc_entry_uninit(interceptor);
  interceptor_debug_proc_entry_uninit(interceptor);
#endif /* DEBUG_LIGHT */
  interceptor_version_proc_entry_uninit(interceptor);

  if (interceptor->proc_dir)
    remove_proc_entry(interceptor->proc_dir->name, NULL);
  interceptor->proc_dir = NULL;

  /* Disable softirqs. */
  local_bh_disable();
  SSH_ASSERT(in_softirq());
}
Example #27
0
File: flow.c Project: IDM350/linux
void ovs_flow_stats_clear(struct sw_flow *flow)
{
	int cpu, cur_cpu;

	if (!flow->stats.is_percpu) {
		stats_reset(flow->stats.stat);
	} else {
		cur_cpu = get_cpu();

		for_each_possible_cpu(cpu) {

			if (cpu == cur_cpu)
				local_bh_disable();

			stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));

			if (cpu == cur_cpu)
				local_bh_enable();
		}
		put_cpu();
	}
}
/* called from BPF program, therefore rcu_read_lock is held */
void bpf_channel_push_packet(struct bpf_context *pctx)
{
	struct bpf_dp_context *ctx = container_of(pctx, struct bpf_dp_context,
						  context);
	struct dp_upcall_info upcall;
	struct sk_buff *nskb;
	struct plum *plum;

	if (unlikely(!ctx->skb))
		return;

	plum = rcu_dereference(ctx->dp->plums[pctx->plum_id]);
	if (unlikely(!plum))
		return;

	/* queue_gso_packets() inside ovs_dp_upcall() changes skb,
	 * so copy it here, since BPF program might still be using it
	 */
	nskb = skb_clone(ctx->skb, GFP_ATOMIC);
	if (unlikely(!nskb))
		return;

	upcall.cmd = OVS_PACKET_CMD_ACTION;
	upcall.key = NULL;
	upcall.userdata = NULL;
	upcall.portid = plum->upcall_pid;
	/* don't exit earlier even if upcall_pid is invalid,
	 * since we want 'lost' count to be incremented
	 */
	/* disable softirq to make sure that genlmsg_unicast()->gfp_any() picks
	 * GFP_ATOMIC flag
	 * note that bpf_channel_push_struct() doesn't need to do it,
	 * since skb==NULL
	 */
	local_bh_disable();
	ovs_dp_upcall(ctx->dp, nskb, &upcall);
	local_bh_enable();
	consume_skb(nskb);
}
Example #29
0
s32 bypass_syscall_callback(unsigned int cmd, void* buf, unsigned int size)
{
    drv_bypass_vip_rule_s * iplist;
    u32 ret = ERROR_SUCCESS;
    u32 ipnum = 0;
    
    if(buf == NULL)
    {
        return 1;
    }
    
	local_bh_disable();
    switch(cmd)
    {
        case BYPASS_CMD_INIT_BYPASS:
            ret = bypass_timer_init();
            break;
        case BYPASS_CMD_WEB_EXEC_BYPASS:
            ret = bypass_web_exec();
            break;
        case BYPASS_CMD_COPY_IPLIST:
            iplist  = (drv_bypass_vip_rule_s *)buf;
            ipnum = size/sizeof(drv_bypass_vip_rule_s);
            ret = bypass_iplist_to_kernel(iplist, ipnum);
            break;
        case BYPASS_CMD_ENABLE:
            ret = bypass_enable();
            break;
        case BYPASS_CMD_DISABLE:
            ret = bypass_disable();
            break;
        default:
            DEBUG_PRINT("unkown cmd receive:%s,%dn",__FUNCTION__,__LINE__);
            break;
    }
	__local_bh_enable();
    return 0;
}
static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
	int status;

	if (!entry->next_outfn) {
		spin_lock_bh(&imq_nf_queue_lock);
		nf_reinject(entry, verdict);
		spin_unlock_bh(&imq_nf_queue_lock);
		return;
	}

	rcu_read_lock();
	local_bh_disable();
	status = entry->next_outfn(entry, entry->next_queuenum);
	local_bh_enable();
	if (status < 0) {
		nf_queue_entry_release_refs(entry);
		kfree_skb(entry->skb);
		kfree(entry);
	}

	rcu_read_unlock();
}