Example #1
0
/*
 * Check to see if the scheduling-clock interrupt came from an extended
 * quiescent state, and, if so, tell RCU about it.  This function must
 * be called from hardirq context.  It is normally called from the
 * scheduling-clock interrupt.
 */
void rcu_check_callbacks(int user)
{
	RCU_TRACE(check_cpu_stalls());
	if (user)
		rcu_sched_qs();
	else if (!in_softirq())
		rcu_bh_qs();
	if (user)
		rcu_note_voluntary_context_switch(current);
}
Example #2
0
/**
 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
 *
 * Check for bottom half being disabled, which covers both the
 * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
 * will show the situation.  This is useful for debug checks in functions
 * that require that they be called within an RCU read-side critical
 * section.
 *
 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
 *
 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
 * offline from an RCU perspective, so check for those as well.
 */
int rcu_read_lock_bh_held(void)
{
	if (!debug_lockdep_rcu_enabled())
		return 1;
	if (!rcu_is_watching())
		return 0;
	if (!rcu_lockdep_current_cpu_online())
		return 0;
	return in_softirq() || irqs_disabled();
}
/*
 * Check to see if the scheduling-clock interrupt came from an extended
 * quiescent state, and, if so, tell RCU about it.  This function must
 * be called from hardirq context.  It is normally called from the
 * scheduling-clock interrupt.
 */
void rcu_check_callbacks(int user)
{
  mutant_covered = 1;
 /* MUTANT (del_stmt) */ /* 	RCU_TRACE(check_cpu_stalls()); */ 
	if (user || rcu_is_cpu_rrupt_from_idle())
		rcu_sched_qs();
	else if (!in_softirq())
		rcu_bh_qs();
	if (user)
		rcu_note_voluntary_context_switch(current);
}
Example #4
0
/*
 * Update current's FPSIMD/SVE registers from thread_struct.
 *
 * This function should be called only when the FPSIMD/SVE state in
 * thread_struct is known to be up to date, when preparing to enter
 * userspace.
 *
 * Softirqs (and preemption) must be disabled.
 */
static void task_fpsimd_load(void)
{
	WARN_ON(!in_softirq() && !irqs_disabled());

	if (system_supports_sve() && test_thread_flag(TIF_SVE))
		sve_load_state(sve_pffr(&current->thread),
			       &current->thread.uw.fpsimd_state.fpsr,
			       sve_vq_from_vl(current->thread.sve_vl) - 1);
	else
		fpsimd_load_state(&current->thread.uw.fpsimd_state);
}
Example #5
0
static void do_things(struct work_struct *work)
{
	printk(KERN_INFO "workqueue: I am in a workqueue\n");

	printk(KERN_INFO "workqueue: in_atomic=%d\n", in_atomic());
	printk(KERN_INFO "workqueue: in_interrupt=%lu\n", in_interrupt());
	printk(KERN_INFO "workqueue: in_irq=%lu\n", in_irq());
	printk(KERN_INFO "workqueue: in_softirq=%lu\n", in_softirq());
	printk(KERN_INFO "\n");

	return;
}
Example #6
0
void ssh_interceptor_proc_uninit(SshInterceptor interceptor)
{
  /* Enable softirqs. */
  SSH_ASSERT(in_softirq());
  local_bh_enable();
  SSH_ASSERT(!in_softirq());

  interceptor_ipm_proc_entry_uninit(interceptor);
#ifdef DEBUG_LIGHT
  interceptor_stats_proc_entry_uninit(interceptor);
  interceptor_debug_proc_entry_uninit(interceptor);
#endif /* DEBUG_LIGHT */
  interceptor_version_proc_entry_uninit(interceptor);

  if (interceptor->proc_dir)
    remove_proc_entry(interceptor->proc_dir->name, NULL);
  interceptor->proc_dir = NULL;

  /* Disable softirqs. */
  local_bh_disable();
  SSH_ASSERT(in_softirq());
}
/*
 * Check to see if the scheduling-clock interrupt came from an extended
 * quiescent state, and, if so, tell RCU about it.  This function must
 * be called from hardirq context.  It is normally called from the
 * scheduling-clock interrupt.
 */
void rcu_check_callbacks(int user)
{
	RCU_TRACE(check_cpu_stalls());
	if (user || rcu_is_cpu_rrupt_from_idle()) {
		rcu_sched_qs();
	}
	else if (!in_softirq()) {
		rcu_bh_qs();
	}
	if (user) {
		rcu_note_voluntary_context_switch(current);
	}
}
Example #8
0
File: irq.c Project: 1youhun1/linux
void arch_local_irq_enable(void)
{

	unsigned long flags;
	flags = arch_local_save_flags();

	/* Allow both L1 and L2 at the onset */
	flags |= (STATUS_E1_MASK | STATUS_E2_MASK);

	/* Called from hard ISR (between irq_enter and irq_exit) */
	if (in_irq()) {

		/* If in L2 ISR, don't re-enable any further IRQs as this can
		 * cause IRQ priorities to get upside down. e.g. it could allow
		 * L1 be taken while in L2 hard ISR which is wrong not only in
		 * theory, it can also cause the dreaded L1-L2-L1 scenario
		 */
		if (flags & STATUS_A2_MASK)
			flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);

		/* Even if in L1 ISR, allowe Higher prio L2 IRQs */
		else if (flags & STATUS_A1_MASK)
			flags &= ~(STATUS_E1_MASK);
	}

	/* called from soft IRQ, ideally we want to re-enable all levels */

	else if (in_softirq()) {

		/* However if this is case of L1 interrupted by L2,
		 * re-enabling both may cause whaco L1-L2-L1 scenario
		 * because ARC700 allows level 1 to interrupt an active L2 ISR
		 * Thus we disable both
		 * However some code, executing in soft ISR wants some IRQs
		 * to be enabled so we re-enable L2 only
		 *
		 * How do we determine L1 intr by L2
		 *  -A2 is set (means in L2 ISR)
		 *  -E1 is set in this ISR's pt_regs->status32 which is
		 *      saved copy of status32_l2 when l2 ISR happened
		 */
		struct pt_regs *pt = get_irq_regs();
		if ((flags & STATUS_A2_MASK) && pt &&
		    (pt->status32 & STATUS_A1_MASK)) {
			/*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
			flags &= ~(STATUS_E1_MASK);
		}
	}

	arch_local_irq_restore(flags);
}
/*
 * Check to see if the scheduling-clock interrupt came from an extended
 * quiescent state, and, if so, tell RCU about it.  This function must
 * be called from hardirq context.  It is normally called from the
 * scheduling-clock interrupt.
 */
void rcu_check_callbacks(int user)
{
	RCU_TRACE(check_cpu_stalls());
 mutant_covered = 1;
 /* MUTANT (negate) */	if(! (user || rcu_is_cpu_rrupt_from_idle())) {
		rcu_sched_qs();
	}
	else if (!in_softirq()) {
		rcu_bh_qs();
	}
	if (user) {
		rcu_note_voluntary_context_switch(current);
	}
}
/*
 * Check to see if the scheduling-clock interrupt came from an extended
 * quiescent state, and, if so, tell RCU about it.  This function must
 * be called from hardirq context.  It is normally called from the
 * scheduling-clock interrupt.
 */
void rcu_check_callbacks(int user)
{
	RCU_TRACE(check_cpu_stalls());
	if (user || rcu_is_cpu_rrupt_from_idle()) {
		if (!__covered11) {__covered11 = 1; total_covered += 1;}
		rcu_sched_qs();
	}
	else if (!in_softirq()) {
		if (!__covered12) {__covered12 = 1; total_covered += 1;}
		rcu_bh_qs();
	}
	if (user) {
		if (!__covered13) {__covered13 = 1; total_covered += 1;}
		rcu_note_voluntary_context_switch(current);
	}
}
Example #11
0
static char *
trace_get_console_buffer(void)
{
        int  cpu = get_cpu();
        int  idx;

        if (in_irq()) {
                idx = 0;
        } else if (in_softirq()) {
                idx = 1;
        } else {
                idx = 2;
        }

        return trace_console_buffers[cpu][idx];
}
Example #12
0
Boolean ssh_interceptor_proc_init(SshInterceptor interceptor)
{
  char name[128];

  /* Softirqs are always enabled here. */
  SSH_ASSERT(!in_softirq());

  /* Create a directory under /proc/ */
  ssh_snprintf(name, sizeof(name), "%s%s", SSH_PROC_ROOT, ssh_device_suffix);
  interceptor->proc_dir = create_proc_entry(name, S_IFDIR, NULL);
  
  if (interceptor->proc_dir == NULL)
    goto error;

  if (interceptor_ipm_proc_entry_init(interceptor) == FALSE)
    goto error;

#ifdef DEBUG_LIGHT
  if (interceptor_stats_proc_entry_init(interceptor) == FALSE)
    goto error;

  if (interceptor_debug_proc_entry_init(interceptor) == FALSE)
    goto error;
#endif /* DEBUG_LIGHT */

  if (interceptor_version_proc_entry_init(interceptor) == FALSE)
    goto error;

  return TRUE;

 error:
  SSH_DEBUG(SSH_D_ERROR, ("Could not create /proc/%s", name));

  interceptor_ipm_proc_entry_uninit(interceptor);
#ifdef DEBUG_LIGHT
  interceptor_stats_proc_entry_uninit(interceptor);
  interceptor_debug_proc_entry_uninit(interceptor);
#endif /* DEBUG_LIGHT */
  interceptor_version_proc_entry_uninit(interceptor);

  if (interceptor->proc_dir)
    remove_proc_entry(interceptor->proc_dir->name, NULL);
  interceptor->proc_dir = NULL;

  return FALSE;
}
Example #13
0
static struct trace_cpu_data *
trace_get_tcd(void)
{
        int cpu;
        struct trace_cpu_data *tcd;

        cpu = get_cpu();
        if (in_irq())
                tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
        else if (in_softirq())
                tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
        else
                tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;

        trace_lock_tcd(tcd);

        return tcd;
}
Example #14
0
int ramster_remote_flush(struct tmem_xhandle *xh, int remotenode)
{
	int ret = -1, status;
	struct r2nm_node *node = NULL;
	struct kvec vec[1];
	size_t veclen = 1;

	node = r2nm_get_node_by_num(remotenode);
	BUG_ON(node == NULL);
	xh->client_id = r2nm_this_node(); /* which node is flushing */
	vec[0].iov_len = sizeof(*xh);
	vec[0].iov_base = xh;
	BUG_ON(irqs_disabled());
	BUG_ON(in_softirq());
	ret = r2net_send_message_vec(RMSTR_TMEM_FLUSH, RMSTR_KEY,
					vec, veclen, remotenode, &status);
	r2nm_node_put(node);
	return ret;
}
Example #15
0
void
osl_long_delay(osl_t *osh, uint usec, bool yield)
{
	uint d;
	bool yielded = TRUE;
	int usec_to_delay = usec;
	unsigned long tick1, tick2, tick_diff = 0;

	
	while (usec_to_delay > 0) {
		if (!yield || !yielded) {
			d = MIN(usec_to_delay, 10);
			udelay(d);
			usec_to_delay -= d;
		}
		if (usec_to_delay > 0) {
			osh->oshsh->long_delay++;
			OSL_GETCYCLES(tick1);
			spin_unlock_bh(osh->oshsh->lock);
			if (usec_to_delay > 0 && !in_irq() && !in_softirq() && !in_interrupt()) {
				schedule();
				yielded = TRUE;
			} else {
				yielded = FALSE;
			}
			spin_lock_bh(osh->oshsh->lock);
			OSL_GETCYCLES(tick2);

			if (yielded) {
				tick_diff = TICKDIFF(tick2, tick1);
				tick_diff = (tick_diff * 2)/(osh->oshsh->MIPS);
				if (tick_diff) {
					usec_to_delay -= tick_diff;
				} else
					yielded = 0;
			}
			osh->oshsh->long_delay--;
			ASSERT(osh->oshsh->long_delay >= 0);
		}
	}
}
Example #16
0
/*
 * Probe for the availability of crypto algorithms, and set the available
 * flag for any algorithms found on the system.  This is typically called by
 * pfkey during userspace SA add, update or register.
 */
void xfrm_probe_algs(void)
{
#ifdef CONFIG_CRYPTO
	int i, status;
	
	BUG_ON(in_softirq());

	for (i = 0; i < aalg_entries(); i++) {
		aalg_list[i].available = 1;
	}
	
	for (i = 0; i < ealg_entries(); i++) {
		ealg_list[i].available = 1;
	}
	
	for (i = 0; i < calg_entries(); i++) {
		status = crypto_alg_available(calg_list[i].name, 0);
		if (calg_list[i].available != status)
			calg_list[i].available = status;
	}
#endif
}
Example #17
0
void hba_msleep(MV_U32 msec)
{
	MV_U32	tmp=0;
	MV_U32	mod_msec=2000;
	if (in_softirq()||in_interrupt() || irqs_disabled()){
		mv_touch_nmi_watchdog();
		if (msec<=mod_msec)
			ossw_mdelay(msec);
		else
		{
			for(tmp=0;tmp<msec/mod_msec;tmp++)
			{
				ossw_mdelay(mod_msec);
				mv_touch_nmi_watchdog();
			}
			if (msec%mod_msec)
				ossw_mdelay(msec%mod_msec);
		}
		mv_touch_nmi_watchdog();
	}else {
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout( msecs_to_jiffies(msec));
	}
}
Example #18
0
/*
 * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
 * date with respect to the CPU registers.
 *
 * Softirqs (and preemption) must be disabled.
 */
void fpsimd_save(void)
{
	struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st);
	/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */

	WARN_ON(!in_softirq() && !irqs_disabled());

	if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
		if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
			if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
				/*
				 * Can't save the user regs, so current would
				 * re-enter user with corrupt state.
				 * There's no way to recover, so kill it:
				 */
				force_signal_inject(SIGKILL, SI_KERNEL, 0);
				return;
			}

			sve_save_state(sve_pffr(&current->thread), &st->fpsr);
		} else
			fpsimd_save_state(st);
	}
}
Example #19
0
Boolean ssh_interceptor_iface_init(SshInterceptor interceptor)
{
  SSH_ASSERT(!in_softirq());










































































  /* This will register notifier that notifies about bringing the
     interface up and down. */
  interceptor->nf->notifier_netdev.notifier_call = 
    ssh_interceptor_notifier_callback;
  interceptor->nf->notifier_netdev.priority = 1;
  interceptor->nf->notifier_netdev.next = NULL;
  register_netdevice_notifier(&interceptor->nf->notifier_netdev);

  /* This will register notifier that notifies when address of the
     interface changes without bringing the interface down. */
  interceptor->nf->notifier_inetaddr.notifier_call =
    ssh_interceptor_notifier_callback;
  interceptor->nf->notifier_inetaddr.priority = 1;
  interceptor->nf->notifier_inetaddr.next = NULL;
  register_inetaddr_notifier(&interceptor->nf->notifier_inetaddr);

#ifdef SSH_LINUX_INTERCEPTOR_IPV6
  interceptor->nf->notifier_inet6addr.notifier_call =
    ssh_interceptor_notifier_callback;
  interceptor->nf->notifier_inet6addr.priority = 1;
  interceptor->nf->notifier_inet6addr.next = NULL;
  register_inet6addr_notifier(&interceptor->nf->notifier_inet6addr);
#endif /* SSH_LINUX_INTERCEPTOR_IPV6 */

  interceptor->nf->iface_notifiers_installed = TRUE;

  /* Send interface information to engine. This causes the interceptor
     to grab reference to each net_device. On error cases 
     ssh_interceptor_clear_ifaces() or ssh_interceptor_iface_uninit() 
     must be called to release the references. */
  ssh_interceptor_receive_ifaces(interceptor);

  return TRUE;
}
Example #20
0
/**
 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
 *
 * Check for bottom half being disabled, which covers both the
 * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
 * will show the situation.  This is useful for debug checks in functions
 * that require that they be called within an RCU read-side critical
 * section.
 *
 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
 */
int rcu_read_lock_bh_held(void)
{
	if (!debug_lockdep_rcu_enabled())
		return 1;
	return in_softirq() || irqs_disabled();
}
void _mipi_csi2_unlock(struct mipi_csi2_info *info)
{
	if (!in_irq() && !in_softirq())
		mutex_unlock(&info->mutex_lock);
}
Example #22
0
static int display_secfp_proc_out_sa(char *page, char **start,
				off_t off, int count,
				int *eof, void *data)
{
	ASF_uint32_t ulVSGId = 0;
	ASF_uint32_t ulTunnelId = 0;
	struct SPDCILinkNode_s *pCINode;
	SPDOutContainer_t *pOutContainer = NULL;
	SPDOutSALinkNode_t *pOutSALinkNode;
	outSA_t *pOutSA = NULL;

	int bVal = in_softirq();

	if (!bVal)
		local_bh_disable();

	if (secFP_TunnelIfaces[ulVSGId][ulTunnelId].bInUse == 0) {
		printk(KERN_INFO"Tunnel Interface is not in use"\
			".TunnelId=%u, VSGId=%u\n",
			ulTunnelId, ulVSGId);
		if (!bVal)
			local_bh_enable();
		return ASF_IPSEC_TUNNEL_NOT_FOUND;
	}
	printk(KERN_INFO"\nVSGID= %d TUNNELID= %d, MAGIC NUM = %d\n",
		ulVSGId, ulTunnelId,
		secFP_TunnelIfaces[ulVSGId][ulTunnelId].ulTunnelMagicNumber);

	pCINode = secFP_TunnelIfaces[ulVSGId][ulTunnelId].pSPDCIOutList;
	for (; pCINode != NULL; pCINode = pCINode->pNext) {

		pOutContainer = (SPDOutContainer_t *)(ptrIArray_getData(
					&(secfp_OutDB),
					pCINode->ulIndex));
		if (!pOutContainer)
			continue;
		printk(KERN_INFO"=========OUT Policy==================\n");
		printk(KERN_INFO"Id=%d, Proto %d, Dscp %d "\
			"Flags:Udp(%d) RED(%d),ESN(%d),DSCP(%d),DF(%d)\n",
		pCINode->ulIndex,
		pOutContainer->SPDParams.ucProto,
		pOutContainer->SPDParams.ucDscp,
		pOutContainer->SPDParams.bUdpEncap,
		pOutContainer->SPDParams.bRedSideFrag,
		pOutContainer->SPDParams.bESN,
		pOutContainer->SPDParams.bCopyDscp,
		pOutContainer->SPDParams.handleDf);

		print_SPDPolPPStats(pOutContainer->PPStats);
		printk(KERN_INFO"--------------SA_LIST--------------------");
		for (pOutSALinkNode = pOutContainer->SAHolder.pSAList;
			pOutSALinkNode != NULL;
			pOutSALinkNode = pOutSALinkNode->pNext) {
			printk(KERN_INFO"\nSA-ID= %d ", pOutSALinkNode->ulSAIndex);
			pOutSA =
				(outSA_t *) ptrIArray_getData(&secFP_OutSATable,
					pOutSALinkNode->ulSAIndex);
			if (pOutSA) {
				ASFSAStats_t outParams = {0, 0};
				ASFIPSecGetSAQueryParams_t inParams;

				print_SAParams(&pOutSA->SAParams);

				inParams.ulVSGId = ulVSGId;
				inParams.ulTunnelId = ulTunnelId;
				inParams.ulSPDContainerIndex = pCINode->ulIndex;
				inParams.ulSPI = pOutSA->SAParams.ulSPI;
				inParams.gwAddr.bIPv4OrIPv6 = pOutSA->SAParams.tunnelInfo.bIPv4OrIPv6;
				if (pOutSA->SAParams.tunnelInfo.bIPv4OrIPv6)
					memcpy(inParams.gwAddr.ipv6addr, pOutSA->SAParams.tunnelInfo.addr.iphv6.daddr, 16);
				else
					inParams.gwAddr.ipv4addr =
						pOutSA->SAParams.tunnelInfo.addr.iphv4.daddr;

				inParams.ucProtocol =
						pOutSA->SAParams.ucProtocol;
				inParams.bDir = SECFP_OUT;
				ASFIPSecSAQueryStats(&inParams, &outParams);
				printk(KERN_INFO"Stats:ulBytes=%llu, ulPkts=%llu",
					outParams.ulBytes, outParams.ulPkts);

				printk(KERN_INFO"L2BlobLen = %d, Magic = %d\n",
					pOutSA->ulL2BlobLen,
				pOutSA->l2blobConfig.ulL2blobMagicNumber);
#ifdef ASF_QMAN_IPSEC
				printk(KERN_INFO"SecFQ=%d, RecvFQ=%d\n",
					pOutSA->ctx.SecFq->qman_fq.fqid,
					pOutSA->ctx.RecvFq->qman_fq.fqid);
#endif
			}
		}
		printk(KERN_INFO"\n");
	}
	if (!bVal)
		local_bh_enable();

	return 0;
}
Example #23
0
static int display_secfp_proc_in_spd(char *page, char **start,
				off_t off, int count,
				int *eof, void *data)
{
	int ulSAIndex;
	ASF_uint32_t ulVSGId = 0;
	ASF_uint32_t ulTunnelId = 0;
	struct SPDCILinkNode_s *pCINode;
	SPDInContainer_t *pInContainer = NULL;
	SPDInSPIValLinkNode_t *pSPILinkNode;

	int bVal = in_softirq();

	if (!bVal)
		local_bh_disable();

	if (secFP_TunnelIfaces[ulVSGId][ulTunnelId].bInUse == 0) {
		printk(KERN_INFO"\nTunnel Interface is not in use"\
			".TunnelId=%u, VSGId=%u\n",
			ulTunnelId, ulVSGId);
		if (!bVal)
			local_bh_enable();
		return ASF_IPSEC_TUNNEL_NOT_FOUND;
	}
	printk(KERN_INFO"\nVSGID= %d TUNNELID= %d, MAGIC NUM = %d",
		ulVSGId, ulTunnelId,
		secFP_TunnelIfaces[ulVSGId][ulTunnelId].ulTunnelMagicNumber);

	pCINode = secFP_TunnelIfaces[ulVSGId][ulTunnelId].pSPDCIInList;
	for (; pCINode != NULL; pCINode = pCINode->pNext) {

		pInContainer = (SPDInContainer_t *)(ptrIArray_getData(
					&(secfp_InDB),
					pCINode->ulIndex));
		if (!pInContainer)
			continue;
		printk(KERN_INFO"=========IN Policy==================\n");
		printk(KERN_INFO"Id=%d, Proto 0x%x, Dscp 0x%x "\
			"Flags:Udp(%d) ESN(%d),DSCP(%d),ECN(%d)\n",
		pCINode->ulIndex,
		pInContainer->SPDParams.ucProto,
		pInContainer->SPDParams.ucDscp,
		pInContainer->SPDParams.bUdpEncap,
		pInContainer->SPDParams.bESN,
		pInContainer->SPDParams.bCopyDscp,
		pInContainer->SPDParams.bCopyEcn);

		print_SPDPolPPStats(pInContainer->PPStats);

		printk(KERN_INFO"List IN SA -SPI Val:");

		for (pSPILinkNode = pInContainer->pSPIValList, ulSAIndex = 0;
			pSPILinkNode != NULL;
			pSPILinkNode = pSPILinkNode->pNext, ulSAIndex++) {

			printk(KERN_INFO"0x%x ", pSPILinkNode->ulSPIVal);
			if (ulSAIndex % 10)
				printk(KERN_INFO"\n");
		}
		printk(KERN_INFO"\n");
	}
	if (!bVal)
		local_bh_enable();
	return 0;
}
Example #24
0
static int display_secfp_proc_out_spd(char *page, char **start,
				off_t off, int count,
				int *eof, void *data)
{
	ASF_uint32_t ulVSGId = 0;
	ASF_uint32_t ulTunnelId = 0;
	struct SPDCILinkNode_s *pCINode;
	SPDOutContainer_t *pOutContainer = NULL;
	SPDOutSALinkNode_t *pOutSALinkNode;

	int bVal = in_softirq();

	if (!bVal)
		local_bh_disable();

	if (secFP_TunnelIfaces[ulVSGId][ulTunnelId].bInUse == 0) {
		printk(KERN_INFO"Tunnel Interface is not in use"\
			".TunnelId=%u, VSGId=%u\n",
			ulTunnelId, ulVSGId);
		if (!bVal)
			local_bh_enable();
		return ASF_IPSEC_TUNNEL_NOT_FOUND;
	}
	printk(KERN_INFO"\nVSGID= %d TUNNELID= %d, MAGIC NUM = %d\n",
		ulVSGId, ulTunnelId,
		secFP_TunnelIfaces[ulVSGId][ulTunnelId].ulTunnelMagicNumber);

	pCINode = secFP_TunnelIfaces[ulVSGId][ulTunnelId].pSPDCIOutList;
	for (; pCINode != NULL; pCINode = pCINode->pNext) {

		pOutContainer = (SPDOutContainer_t *)(ptrIArray_getData(
					&(secfp_OutDB),
					pCINode->ulIndex));
		if (!pOutContainer)
			continue;
		printk(KERN_INFO"=========OUT Policy==================\n");
		printk(KERN_INFO"Id=%d, Proto 0x%x, Dscp 0x%x"\
			"Flags:Udp(%d) RED(%d),ESN(%d),DSCP(%d),DF(%d)\n",
		pCINode->ulIndex,
		pOutContainer->SPDParams.ucProto,
		pOutContainer->SPDParams.ucDscp,
		pOutContainer->SPDParams.bUdpEncap,
		pOutContainer->SPDParams.bRedSideFrag,
		pOutContainer->SPDParams.bESN,
		pOutContainer->SPDParams.bCopyDscp,
		pOutContainer->SPDParams.handleDf);

		print_SPDPolPPStats(pOutContainer->PPStats);

		printk(KERN_INFO"List SA IDs:");
		for (pOutSALinkNode = pOutContainer->SAHolder.pSAList;
			pOutSALinkNode != NULL;
			pOutSALinkNode = pOutSALinkNode->pNext) {
			printk(KERN_INFO" %d ", pOutSALinkNode->ulSAIndex);
			if (pOutSALinkNode->ulSAIndex % 10)
				printk(KERN_INFO"\n\t");
		}
		printk(KERN_INFO"\n");
	}
	if (!bVal)
		local_bh_enable();
	return 0;
}
void sm_pwrite_miss_copy_from_device_start(struct bittern_cache *bc,
					   struct work_item *wi)
{
	struct bio *bio = wi->wi_original_bio;
	struct cache_block *cache_block = wi->wi_cache_block;
	int val;
	struct page *cache_page;

	M_ASSERT(bio != NULL);
	ASSERT((wi->wi_flags & WI_FLAG_BIO_CLONED) != 0);
	ASSERT(wi->wi_original_bio != NULL);
	ASSERT(bio_is_request_single_cache_block(bio));
	ASSERT(cache_block->bcb_sector ==
	       bio_sector_to_cache_block_sector(bio));
	ASSERT(cache_block->bcb_state ==
	       S_CLEAN_P_WRITE_MISS_CPF_DEVICE_START ||
	       cache_block->bcb_state ==
	       S_DIRTY_P_WRITE_MISS_CPF_DEVICE_START);
	ASSERT(wi->wi_original_cache_block == NULL);

	pmem_data_get_page_write(bc,
				 cache_block,
				 &wi->wi_pmem_ctx);

	cache_page = pmem_context_data_page(&wi->wi_pmem_ctx);

	atomic_inc(&bc->bc_read_cached_device_requests);
	val = atomic_inc_return(&bc->bc_pending_cached_device_requests);
	atomic_set_if_higher(&bc->bc_highest_pending_cached_device_requests,
			     val);

	BT_TRACE(BT_LEVEL_TRACE2, bc, wi, cache_block, bio, wi->wi_cloned_bio,
		 "copy-from-device");
	ASSERT_BITTERN_CACHE(bc);
	ASSERT_CACHE_BLOCK(cache_block, bc);

	if (cache_block->bcb_state == S_CLEAN_P_WRITE_MISS_CPF_DEVICE_START) {
		cache_state_transition3(bc,
					cache_block,
					TS_P_WRITE_MISS_WT,
					S_CLEAN_P_WRITE_MISS_CPF_DEVICE_START,
					S_CLEAN_P_WRITE_MISS_CPF_DEVICE_END);
	} else {
		ASSERT(cache_block->bcb_state ==
		       S_DIRTY_P_WRITE_MISS_CPF_DEVICE_START);
		cache_state_transition3(bc,
					cache_block,
					TS_P_WRITE_MISS_WB,
					S_DIRTY_P_WRITE_MISS_CPF_DEVICE_START,
					S_DIRTY_P_WRITE_MISS_CPF_DEVICE_END);
	}

	/*
	 * we are in the first state -- process context
	 */
	M_ASSERT(!in_irq() && !in_softirq());
	wi->wi_ts_workqueue = current_kernel_time_nsec();
	cached_dev_do_make_request(bc,
				   wi,
				   READ, /* datadir */
				   false); /* do not set original bio */
}
void sm_pwrite_miss_copy_from_device_end(struct bittern_cache *bc,
					 struct work_item *wi,
					 int err)
{
	struct bio *bio = wi->wi_original_bio;
	struct cache_block *cache_block = wi->wi_cache_block;
	uint128_t hash_data;
	char *cache_vaddr;
	struct page *cache_page;

	M_ASSERT_FIXME(err == 0);

	M_ASSERT(bio != NULL);
	ASSERT((wi->wi_flags & WI_FLAG_BIO_CLONED) != 0);
	ASSERT(wi->wi_original_bio != NULL);
	ASSERT(bio_is_request_single_cache_block(bio));
	ASSERT(cache_block->bcb_sector ==
	       bio_sector_to_cache_block_sector(bio));
	ASSERT(cache_block->bcb_state ==
	       S_CLEAN_P_WRITE_MISS_CPF_DEVICE_END ||
	       cache_block->bcb_state ==
	       S_DIRTY_P_WRITE_MISS_CPF_DEVICE_END);
	ASSERT(wi->wi_original_cache_block == NULL);

	BT_TRACE(BT_LEVEL_TRACE2, bc, wi, cache_block, bio, NULL, "endio");

	cache_vaddr = pmem_context_data_vaddr(&wi->wi_pmem_ctx);
	cache_page = pmem_context_data_page(&wi->wi_pmem_ctx);

	atomic_dec(&bc->bc_pending_cached_device_requests);

	/*
	 * we can check the original hash
	 */
	cache_track_hash_check_buffer(bc, cache_block, cache_vaddr);

	/*
	 * copy to cache from bio, aka userland writes
	 */
	bio_copy_to_cache(wi, bio, &hash_data);

	/* update hash */
	cache_block->bcb_hash_data = hash_data;

	/*
	 * update hash
	 */
	cache_track_hash_set(bc, cache_block, cache_block->bcb_hash_data);

	ASSERT(wi->wi_original_cache_block == NULL);

	if (cache_block->bcb_state ==
	    S_CLEAN_P_WRITE_MISS_CPF_DEVICE_END) {
		int val;

		BT_TRACE(BT_LEVEL_TRACE2, bc, wi, cache_block, bio,
			 wi->wi_cloned_bio, "copy-to-device");

		atomic_inc(&bc->bc_write_cached_device_requests);
		val = atomic_inc_return(&bc->bc_pending_cached_device_requests);
		atomic_set_if_higher(
				&bc->bc_highest_pending_cached_device_requests,
				val);

		ASSERT_BITTERN_CACHE(bc);
		ASSERT_CACHE_BLOCK(cache_block, bc);
		ASSERT_WORK_ITEM(wi, bc);

		cache_state_transition3(bc,
					cache_block,
					TS_P_WRITE_MISS_WT,
					S_CLEAN_P_WRITE_MISS_CPF_DEVICE_END,
					S_CLEAN_P_WRITE_MISS_CPT_DEVICE_END);

		/*
		 * we are in the first state -- process context
		 */
		M_ASSERT(!in_irq() && !in_softirq());
		wi->wi_ts_workqueue = current_kernel_time_nsec();
		cached_dev_do_make_request(bc,
					   wi,
					   WRITE, /* datadir */
					   false); /* do not set original bio */

	} else {

		BT_TRACE(BT_LEVEL_TRACE2, bc, wi, cache_block, bio,
			 wi->wi_cloned_bio, "copy-to-cache");
		/*
		 * for writeback we commit to cache and then we are done
		 */
		cache_state_transition3(bc,
					cache_block,
					TS_P_WRITE_MISS_WB,
					S_DIRTY_P_WRITE_MISS_CPF_DEVICE_END,
					S_DIRTY_P_WRITE_MISS_CPT_CACHE_END);

		pmem_data_put_page_write(bc,
					 cache_block,
					 &wi->wi_pmem_ctx,
					 wi, /*callback context */
					 cache_put_page_write_callback,
					 S_DIRTY);

	}
}
Example #27
0
ASF_int32_t asfctrl_delete_dev_map(struct net_device *dev)
{
	ASF_int32_t  cii;
	ASF_uint32_t ulVSGId;
	ASFCTRL_FUNC_ENTRY;
#ifdef CONFIG_PPPOE
	if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_PPP)) {
#else
	if (dev->type == ARPHRD_ETHER) {
#endif
		cii = asfctrl_dev_get_cii(dev);
		if (cii < 0) {
			ASFCTRL_DBG("Failed to determine cii for device %s\n",
				dev->name);
			return T_FAILURE;
		}
		ASFCTRL_DBG("UNMAP interface %s\n",  dev->name);
		/* Get the VSG Id from asfctrl_netns Subha 11/3 */
		ulVSGId = asfctrl_netns_net_to_vsg(dev_net(dev));
		printk("UnRegister Device: ulVSGId = %d\n", ulVSGId);
		ASFUnBindDeviceToVSG(ulVSGId, cii);
		ASFUnMapInterface(cii);
		dev_put(dev);
		p_asfctrl_netdev_cii[cii] = NULL;
		return T_SUCCESS;
	}

	ASFCTRL_FUNC_EXIT;
	return T_FAILURE;
}
EXPORT_SYMBOL(asfctrl_delete_dev_map);

#if (ASFCTRL_DEBUG_LEVEL >= LOGS)
char *print_netevent(int event)
{
	switch (event) {
	case NETDEV_UP:
		return (char *)"NETDEV_UP";
	case NETDEV_DOWN:
		return (char *)"NETDEV_DOWN";
	case NETDEV_REBOOT:
		return (char *)"NETDEV_REBOOT";
	case NETDEV_CHANGE:
		return (char *)"NETDEV_CHANGE";
	case NETDEV_REGISTER:
		return (char *)"NETDEV_REGISTER";
	case NETDEV_UNREGISTER:
		return (char *)"NETDEV_UNREGISTER";
	case NETDEV_CHANGEMTU:
		return (char *)"NETDEV_CHANGEMTU";
	case NETDEV_CHANGEADDR:
		return (char *)"NETDEV_CHANGEADDR";
	case NETDEV_GOING_DOWN:
		return (char *)"NETDEV_GOING_DOWN";
	case NETDEV_CHANGENAME:
		return (char *)"NETDEV_CHANGENAME";
	case NETDEV_PRE_UP:
		return (char *)"NETDEV_PRE_UP";
	default:
		return (char *)"UNKNOWN";
	}
}
#endif

static int asfctrl_dev_notifier_fn(struct notifier_block *this,
				unsigned long event, void *ptr)
{
#if 0 /* Linux verions less than 3.8 */
	struct net_device *dev = (struct net_device *)(ptr);
#else
	/* Versions above 3.8 */
	struct net_device *dev = ((struct netdev_notifier_info *)(ptr))->dev;
#endif

	if (dev == NULL)
	{
		ASFCTRL_DBG("asfctrl_dev_notifier: NULL String for dev? \n");
		return NOTIFY_DONE;
	}

	ASFCTRL_FUNC_ENTRY;
	printk(KERN_INFO "Subha: asfctrl_dev_notifier called for dev = 0x%x\n", dev);
	ASFCTRL_DBG("%s - event %ld (%s)\n",
			dev->name, event, print_netevent(event));

	/* handle only ethernet, vlan, bridge and pppoe (ppp) interfaces */
	switch (event) {
	case NETDEV_REGISTER: /* A  new device is allocated*/
		printk(KERN_INFO "Subha: NETDEV_REGISTER\n");
		printk(KERN_INFO "dev->type = %d, ARPHDR_ETHER =%d device=%s\n", dev->type, ARPHRD_ETHER, dev->name);
		ASFCTRL_INFO("Register Device type %d mac %pM\n", dev->type,
			dev->dev_addr);
		if (dev->type == ARPHRD_ETHER)
			asfctrl_create_dev_map(dev, 1);
		break;

	case NETDEV_UNREGISTER:/* A new device is deallocated*/
		ASFCTRL_INFO("Unregister Device type %d mac %pM\n", dev->type,
			dev->dev_addr);
#ifdef CONFIG_PPPOE
		if (dev->type == ARPHRD_ETHER  || dev->type == ARPHRD_PPP)
#else
		if (dev->type == ARPHRD_ETHER)
#endif
			asfctrl_delete_dev_map(dev);
		break;

#ifdef CONFIG_PPPOE
	case NETDEV_UP:
		if (dev->type == ARPHRD_PPP)
			asfctrl_create_dev_map(dev, 1);
		break;
#endif
	}
	ASFCTRL_FUNC_EXIT;
	return NOTIFY_DONE;
}

int asfctrl_dev_fp_tx_hook(struct sk_buff *skb, struct net_device *dev)
{
	ASF_uint16_t	usEthType;
	ASF_int32_t		hh_len;
	ASF_boolean_t	bPPPoE = 0;
	struct iphdr       *iph = 0;
	struct ipv6hdr       *ipv6h;
	unsigned int proto;
	unsigned int  tun_hdr = 0;

	ASFCTRL_FUNC_ENTRY;

	if (!asfctrl_skb_is_dummy(skb))
		return AS_FP_PROCEED;
	

	asfctrl_skb_unmark_dummy(skb);

	if (dev->type != ARPHRD_ETHER)
		goto drop;

	ASFCTRL_INFO("asfctrl_dev_fp_tx: 2\n");

	usEthType = skb->protocol;
	hh_len = ETH_HLEN;

	if (usEthType == __constant_htons(ETH_P_8021Q)) {
		struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data+hh_len);
		ASFCTRL_TRACE("8021Q packet");
		hh_len += VLAN_HLEN;
		usEthType = vhdr->h_vlan_encapsulated_proto;
	}

	if (usEthType == __constant_htons(ETH_P_PPP_SES)) {
		unsigned char *poe_hdr = skb->data+hh_len;
		unsigned short ppp_proto;

		ASFCTRL_TRACE("PPPoE packet");

		/*PPPoE header is of 6 bytes */
		ppp_proto = *(unsigned short *)(poe_hdr+6);
		/* PPPOE: VER=1,TYPE=1,CODE=0 and  PPP:_PROTO=0x0021 (IP) */
		if ((poe_hdr[0] != 0x11) || (poe_hdr[1] != 0) ||
			(ppp_proto != __constant_htons(0x0021))) {
				goto drop;
		}

		hh_len += (8); /* 6+2 -- pppoe+ppp headers */
		usEthType = __constant_htons(ETH_P_IP);
		bPPPoE = 1;
	}

	ASFCTRL_INFO("subha: asfctrl_dev_fp_tx: 3\n");
	if (usEthType != __constant_htons(ETH_P_IP) &&
		usEthType != __constant_htons(ETH_P_IPV6))
		goto drop;

	ASFCTRL_INFO("subha: asfctrl_dev_fp_tx: 4\n");

	if (usEthType == __constant_htons(ETH_P_IP)) {
		iph = (struct iphdr *)(skb->data+hh_len);
		proto = iph->protocol;
		if (proto == IPPROTO_IPV6) {
			ipv6h = (struct ipv6hdr *)(skb->data+hh_len+sizeof(struct iphdr));
			proto = ipv6h->nexthdr;
			tun_hdr = sizeof(struct iphdr);
		}
	} else {
		ipv6h = (struct ipv6hdr *)(skb->data+hh_len);
		proto = ipv6h->nexthdr;
		if (proto == IPPROTO_IPIP) {
			iph = (struct iphdr *)(skb->data+hh_len+sizeof(struct ipv6hdr));
			proto = iph->protocol;
			tun_hdr = sizeof(struct ipv6hdr);
		}
	}

	ASFCTRL_INFO("subha: asfctrl_dev_fp_tx: 5\n");
	switch (proto) {
		asf_linux_L2blobPktData_t *pData;
		ASFFFPUpdateFlowParams_t  cmd;

	case ASFCTRL_IPPROTO_DUMMY_L2BLOB:

		/*
		* if the packet is coming on a PPP interface,
		* network header points to start of PPPOE header
		* instaed of IP header.
		*  So always dynamically identify start of IP header!
		*/

		memset(&cmd, 0, sizeof(cmd));
		cmd.u.l2blob.bUpdatePPPoELen = bPPPoE;


		ASFCTRL_INFO(
			"DUMMY_L2BLOB: %pM:%pM..%02x%02x (skb->proto 0x%04x) "
			"data 0x%p nw_hdr 0x%p tr_hdr 0x%p\n",
			skb->data, skb->data+6, skb->data[12], skb->data[13],
			skb->protocol, skb->data, skb_network_header(skb),
			skb_transport_header(skb));

		if (usEthType == __constant_htons(ETH_P_IP)) {
			pData = (asf_linux_L2blobPktData_t *)(skb->data+hh_len +
							(iph->ihl * 4) + (tun_hdr ? sizeof(struct ipv6hdr) : 0));
			cmd.u.l2blob.tunnel.bIP6IP4Out = tun_hdr ? 1 : 0;
		} else {
			pData = (asf_linux_L2blobPktData_t *)(skb->data+hh_len +
							sizeof(struct ipv6hdr) + (tun_hdr ? sizeof(struct iphdr) : 0));
			cmd.u.l2blob.tunnel.bIP4IP6Out = tun_hdr ? 1 : 0;
		}

		memcpy(&cmd.tuple, &pData->tuple, sizeof(cmd.tuple));
		cmd.ulZoneId = pData->ulZoneId;
		cmd.bL2blobUpdate = 1;
		cmd.u.l2blob.ulDeviceId = asfctrl_dev_get_cii(dev);
		cmd.u.l2blob.ulPathMTU = pData->ulPathMTU;

		cmd.u.l2blob.ulL2blobMagicNumber = asfctrl_vsg_l2blobconfig_id;

		/* need to include PPPOE+PPP header if any */
		cmd.u.l2blob.l2blobLen = hh_len + tun_hdr;

		memcpy(cmd.u.l2blob.l2blob, skb->data, cmd.u.l2blob.l2blobLen);
#ifdef CONFIG_VLAN_8021Q
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
		if (vlan_tx_tag_present(skb)) {
#else
		if (skb_vlan_tag_present(skb)) {
#endif
			cmd.u.l2blob.bTxVlan = 1;

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
			cmd.u.l2blob.usTxVlanId = (vlan_tx_tag_get(skb)
							| VLAN_TAG_PRESENT);
#else
			cmd.u.l2blob.usTxVlanId = (skb_vlan_tag_get(skb)
							| VLAN_TAG_PRESENT);
#endif
		} else
#endif
			cmd.u.l2blob.bTxVlan = 0;

		ASFFFPRuntime(pData->ulVsgId, ASF_FFP_MODIFY_FLOWS, &cmd,
			sizeof(cmd), NULL, 0);
		break;

#ifdef ASFCTRL_IPSEC_FP_SUPPORT
	case ASFCTRL_IPPROTO_DUMMY_IPSEC_L2BLOB:
		ASFCTRL_INFO("DUMMY_IPSEC_L2BLOB");

		skb->protocol = usEthType;
		if (fn_ipsec_l2blob_update)
			fn_ipsec_l2blob_update(skb,
				hh_len, asfctrl_dev_get_cii(dev));

		break;
#endif

#ifdef ASFCTRL_FWD_FP_SUPPORT
	case ASFCTRL_IPPROTO_DUMMY_FWD_L2BLOB:
		ASFCTRL_INFO("DUMMY_FWD_L2BLOB");

		if (fn_fwd_l2blob_update)
			fn_fwd_l2blob_update(skb, hh_len,
				asfctrl_dev_get_cii(dev));

		break;
#endif
#ifdef ASFCTRL_TERM_FP_SUPPORT
	case ASFCTRL_IPPROTO_DUMMY_TERM_L2BLOB:
		ASFCTRL_INFO("DUMMY_TERM_L2BLOB");

		if (fn_term_l2blob_update)
			fn_term_l2blob_update(skb, hh_len,
				asfctrl_dev_get_cii(dev));

		break;
#endif
	}
drop:
	ASFCTRLKernelSkbFree(skb);
	ASFCTRL_FUNC_EXIT;
	return AS_FP_STOLEN;
}

static struct notifier_block asfctrl_dev_notifier = {
	.notifier_call = asfctrl_dev_notifier_fn,
};

ASF_void_t  asfctrl_fnInterfaceNotFound(
			ASFBuffer_t Buffer,
			genericFreeFn_t pFreeFn,
			ASF_void_t *freeArg)
{
	struct sk_buff  *skb;
	int bVal = in_softirq();

	ASFCTRL_FUNC_ENTRY;
	skb = AsfBuf2Skb(Buffer);

	if (!bVal)
		local_bh_disable();
	/* Send it to for normal path handling */
	ASFCTRL_netif_receive_skb(skb);

	if (!bVal)
		local_bh_enable();
	ASFCTRL_FUNC_EXIT;
}

ASF_void_t  asfctrl_fnVSGMappingNotFound(
			ASF_uint32_t ulCommonInterfaceId,
			ASFBuffer_t Buffer,
			genericFreeFn_t pFreeFn,
			ASF_void_t *freeArg)
{
	struct sk_buff  *skb;
	int bVal = in_softirq();

	ASFCTRL_FUNC_ENTRY;
	skb = AsfBuf2Skb(Buffer);

	if (!bVal)
		local_bh_disable();
	/* Send it to for normal path handling */
	ASFCTRL_netif_receive_skb(skb);

	if (!bVal)
		local_bh_enable();
	ASFCTRL_FUNC_EXIT;
}


static int __init asfctrl_init(void)
{
	int ret;
	ASFFFPConfigIdentity_t cmd;
	ASFFFPCallbackFns_t asfctrl_Cbs = {
		asfctrl_fnInterfaceNotFound,
		asfctrl_fnVSGMappingNotFound,
		asfctrl_fnZoneMappingNotFound,
		asfctrl_fnNoFlowFound,
		asfctrl_fnRuntime,
		asfctrl_fnFlowRefreshL2Blob,
		asfctrl_fnFlowActivityRefresh,
		asfctrl_fnFlowTcpSpecialPkts,
		asfctrl_fnFlowValidate,
		asfctrl_fnAuditLog
	};

	ASFCTRL_FUNC_ENTRY;

	memset(p_asfctrl_netdev_cii, 0, sizeof(p_asfctrl_netdev_cii));

	ASFGetCapabilities(&g_cap);

	if (!g_cap.bBufferHomogenous) {
		ASFCTRL_ERR("ASF capabilities: Non homogenous buffer");
		return -1;
	}
	asfctrl_vsg_config_id = jiffies;
	memset(&cmd, 0, sizeof(cmd));
	cmd.ulConfigMagicNumber = asfctrl_vsg_config_id;
	ASFFFPUpdateConfigIdentity(ASF_DEF_VSG, cmd);

	memset(&cmd, 0, sizeof(cmd));
	cmd.bL2blobMagicNumber = 1;
	cmd.l2blobConfig.ulL2blobMagicNumber = asfctrl_vsg_l2blobconfig_id;
	ASFFFPUpdateConfigIdentity(ASF_DEF_VSG, cmd);


	ASFFFPRegisterCallbackFns(&asfctrl_Cbs);

	ret = asfctrl_netns_vsg_init();
	printk("asfctrl_netns_vsg_init returned %d\n", ret);

	printk(KERN_INFO "Subha: before asfctrl_dev_notifier\r\n");
	register_netdevice_notifier(&asfctrl_dev_notifier);
	printk(KERN_INFO "Subha: before devfp_register_hook\r\n");
	printk(KERN_INFO "Subha: devfp_register_hook called asf_ffp_devfp_rx=0x%x, asfctrl_dev_fp_tx_hook=%x\r\n",
		(int)asf_ffp_devfp_rx, (int)asfctrl_dev_fp_tx_hook);
	//devfp_register_hook(asf_ffp_devfp_rx, asfctrl_dev_fp_tx_hook);
	devfp_register_rx_hook_veth(asf_ffp_devfp_rx_veth);
	devfp_register_tx_hook_veth(asfctrl_dev_fp_tx_hook);
	route_hook_fn_register(&asfctrl_l3_route_flush);
#ifdef ASF_IPV6_FP_SUPPORT
	ipv6_route_hook_fn_register(&asfctrl_l3_ipv6_route_flush);
#endif

	asfctrl_sysfs_init();


	if (g_cap.mode & fwMode)
		asfctrl_linux_register_ffp();

	if (ASFGetStatus() == 0)
		ASFDeploy();

	ASFCTRL_INFO("ASF Control Module - Core Loaded.\n");
	ASFCTRL_FUNC_EXIT;
	return 0;
}
Example #28
0
static int display_secfp_proc_in_sa(char *page, char **start,
				off_t off, int count,
				int *eof, void *data)
{
	int ulSAIndex;
	ASF_uint32_t ulVSGId = 0;
	ASF_uint32_t ulTunnelId = 0;
	struct SPDCILinkNode_s *pCINode;
	SPDInContainer_t *pInContainer = NULL;
	SPDInSPIValLinkNode_t *pSPILinkNode;
	inSA_t  *pInSA =  NULL;
	unsigned int ulHashVal;

	int bVal = in_softirq();

	if (!bVal)
		local_bh_disable();

	if (secFP_TunnelIfaces[ulVSGId][ulTunnelId].bInUse == 0) {
		printk(KERN_INFO"Tunnel Interface is not in use"\
			".TunnelId=%u, VSGId=%u\n",
			ulTunnelId, ulVSGId);
		if (!bVal)
			local_bh_enable();
		return ASF_IPSEC_TUNNEL_NOT_FOUND;
	}
	printk(KERN_INFO"\nVSGID= %d TUNNELID= %d, MAGIC NUM = %d\n",
		ulVSGId, ulTunnelId,
		secFP_TunnelIfaces[ulVSGId][ulTunnelId].ulTunnelMagicNumber);

	pCINode = secFP_TunnelIfaces[ulVSGId][ulTunnelId].pSPDCIInList;
	for (; pCINode != NULL; pCINode = pCINode->pNext) {

		pInContainer = (SPDInContainer_t *)(ptrIArray_getData(
					&(secfp_InDB),
					pCINode->ulIndex));
		if (!pInContainer)
			continue;

		printk(KERN_INFO"=========IN Policy==================\n");
		printk(KERN_INFO"Id=%d, Proto %d, Dscp %d "\
			"Flags:Udp(%d) ESN(%d),DSCP(%d),ECN(%d)\n",
		pCINode->ulIndex,
		pInContainer->SPDParams.ucProto,
		pInContainer->SPDParams.ucDscp,
		pInContainer->SPDParams.bUdpEncap,
		pInContainer->SPDParams.bESN,
		pInContainer->SPDParams.bCopyDscp,
		pInContainer->SPDParams.bCopyEcn);

		print_SPDPolPPStats(pInContainer->PPStats);
		printk(KERN_INFO"--------------SA_LIST--------------------");
		for (pSPILinkNode = pInContainer->pSPIValList, ulSAIndex = 0;
			pSPILinkNode != NULL;
			pSPILinkNode = pSPILinkNode->pNext, ulSAIndex++) {
			printk(KERN_INFO"\nSPI = 0x%x", pSPILinkNode->ulSPIVal);
			ulHashVal = secfp_compute_hash(pSPILinkNode->ulSPIVal);
			for (pInSA = secFP_SPIHashTable[ulHashVal].pHeadSA;
				pInSA != NULL; pInSA = pInSA->pNext) {

				ASFSAStats_t outParams = {0, 0};
				ASFIPSecGetSAQueryParams_t inParams;

				printk(KERN_INFO"SpdContId =%d",
					pInSA->ulSPDInContainerIndex);
				print_SAParams(&pInSA->SAParams);

				inParams.ulVSGId = ulVSGId;
				inParams.ulTunnelId = ulTunnelId;
				inParams.ulSPDContainerIndex = pCINode->ulIndex;
				inParams.ulSPI = pInSA->SAParams.ulSPI;
				inParams.gwAddr.bIPv4OrIPv6 = pInSA->SAParams.tunnelInfo.bIPv4OrIPv6;
				if (pInSA->SAParams.tunnelInfo.bIPv4OrIPv6)
					memcpy(inParams.gwAddr.ipv6addr, pInSA->SAParams.tunnelInfo.addr.iphv6.daddr, 16);
				else
					inParams.gwAddr.ipv4addr =
						pInSA->SAParams.tunnelInfo.addr.iphv4.daddr;

				inParams.ucProtocol =
					pInSA->SAParams.ucProtocol;
				inParams.bDir = SECFP_IN;
				ASFIPSecSAQueryStats(&inParams, &outParams);
				printk(KERN_INFO"Stats:ulBytes=%llu,ulPkts= %llu",
					outParams.ulBytes, outParams.ulPkts);
#ifdef ASF_QMAN_IPSEC
				printk(KERN_INFO"SecFQ=%d, RecvFQ=%d\n",
					pInSA->ctx.SecFq->qman_fq.fqid,
					pInSA->ctx.RecvFq->qman_fq.fqid);
#endif
			}
		}
		printk(KERN_INFO"\n");
	}
	if (!bVal)
		local_bh_enable();
	return 0;

}