Example #1
0
static int rt2870_probe(
	IN struct usb_interface *intf,
	IN struct usb_device *usb_dev,
	IN const USB_DEVICE_ID *dev_id,
	IN VOID **ppAd)
{
	struct  net_device		*net_dev = NULL;
	VOID       				*pAd = (VOID *) NULL;
	INT                 	status, rv;
	PVOID					handle;
	RTMP_OS_NETDEV_OP_HOOK	netDevHook;
	ULONG					OpMode;
#ifdef CONFIG_PM
#ifdef USB_SUPPORT_SELECTIVE_SUSPEND
/*	INT 		pm_usage_cnt; */
	INT		 res =1 ; 
#endif /* USB_SUPPORT_SELECTIVE_SUSPEND */
#endif /* CONFIG_PM */	
/*
  rt_intf = intf;
  rt_usb_dev = usb_dev;
  rt_dev_id = dev_id;
 *rt_ppAd = ppAd;	
*/
	DBGPRINT(RT_DEBUG_TRACE, ("===>rt2870_probe()!\n"));
	
#ifdef CONFIG_PM
#ifdef USB_SUPPORT_SELECTIVE_SUSPEND

        res = usb_autopm_get_interface(intf);
	if (res)
	{
			DBGPRINT(RT_DEBUG_ERROR, ("rt2870_probe autopm_resume fail ------\n"));
		     return -EIO;
	}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
	atomic_set(&intf->pm_usage_cnt, 1);
	 printk(" rt2870_probe ====> pm_usage_cnt %d \n", atomic_read(&intf->pm_usage_cnt));
#else
         intf->pm_usage_cnt = 1;
	 printk(" rt2870_probe ====> pm_usage_cnt %d \n", intf->pm_usage_cnt);
#endif
	

#endif /* USB_SUPPORT_SELECTIVE_SUSPEND */
#endif /* CONFIG_PM */



/*RtmpDevInit============================================= */
	/* Allocate RTMP_ADAPTER adapter structure */
/*	handle = kmalloc(sizeof(struct os_cookie), GFP_KERNEL); */
	os_alloc_mem(NULL, (UCHAR **)&handle, sizeof(struct os_cookie));
	if (handle == NULL)
	{
		printk("rt2870_probe(): Allocate memory for os handle failed!\n");
		return -ENOMEM;
	}
	memset(handle, 0, sizeof(struct os_cookie));

	((POS_COOKIE)handle)->pUsb_Dev = usb_dev;

#ifdef CONFIG_PM
#ifdef USB_SUPPORT_SELECTIVE_SUSPEND
	((POS_COOKIE)handle)->intf = intf;
#endif /* USB_SUPPORT_SELECTIVE_SUSPEND */
#endif /* CONFIG_PM */



	/* set/get operators to/from DRIVER module */
#ifdef OS_ABL_FUNC_SUPPORT
	/* get DRIVER operations */
	RtmpNetOpsInit(pRtmpDrvNetOps);
	RTMP_DRV_OPS_FUNCTION(pRtmpDrvOps, pRtmpDrvNetOps, NULL, NULL);
	RtmpNetOpsSet(pRtmpDrvNetOps);
#endif /* OS_ABL_FUNC_SUPPORT */

	rv = RTMPAllocAdapterBlock(handle, &pAd);
	if (rv != NDIS_STATUS_SUCCESS) 
	{
/*		kfree(handle); */
		os_free_mem(NULL, handle);
		goto err_out;
	}

/*USBDevInit============================================== */
	if (USBDevConfigInit(usb_dev, intf, pAd) == FALSE)
		goto err_out_free_radev;

	RtmpRaDevCtrlInit(pAd, RTMP_DEV_INF_USB);
	
/*NetDevInit============================================== */
	net_dev = RtmpPhyNetDevInit(pAd, &netDevHook);
	if (net_dev == NULL)
		goto err_out_free_radev;
	
	/* Here are the net_device structure with usb specific parameters. */
#ifdef NATIVE_WPA_SUPPLICANT_SUPPORT
	/* for supporting Network Manager.
	  * Set the sysfs physical device reference for the network logical device if set prior to registration will 
	  * cause a symlink during initialization.
	 */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
	SET_NETDEV_DEV(net_dev, &(usb_dev->dev));
#endif
#endif /* NATIVE_WPA_SUPPLICANT_SUPPORT */

#ifdef CONFIG_STA_SUPPORT
/*    pAd->StaCfg.OriDevType = net_dev->type; */
	RTMP_DRIVER_STA_DEV_TYPE_SET(pAd, net_dev->type);
#endif /* CONFIG_STA_SUPPORT */

/*All done, it's time to register the net device to linux kernel. */
	/* Register this device */
#ifdef RT_CFG80211_SUPPORT
{
/*	pAd->pCfgDev = &(usb_dev->dev); */
/*	pAd->CFG80211_Register = CFG80211_Register; */
/*	RTMP_DRIVER_CFG80211_INIT(pAd, usb_dev); */

	/*
		In 2.6.32, cfg80211 register must be before register_netdevice();
		We can not put the register in rt28xx_open();
		Or you will suffer NULL pointer in list_add of
		cfg80211_netdev_notifier_call().
	*/
	CFG80211_Register(pAd, &(usb_dev->dev), net_dev);
}
#endif /* RT_CFG80211_SUPPORT */

	RTMP_DRIVER_OP_MODE_GET(pAd, &OpMode);
	status = RtmpOSNetDevAttach(OpMode, net_dev, &netDevHook);
	if (status != 0)
		goto err_out_free_netdev;

/*#ifdef KTHREAD_SUPPORT */

	*ppAd = pAd;




#ifdef INF_PPA_SUPPORT
/*	pAd->pDirectpathCb = (PPA_DIRECTPATH_CB *) kmalloc (sizeof(PPA_DIRECTPATH_CB), GFP_ATOMIC); */
/*	os_alloc_mem(NULL, (UCHAR **)&(pAd->pDirectpathCb), sizeof(PPA_DIRECTPATH_CB)); */
	RTMP_DRIVER_INF_PPA_INIT(pAd);
#endif /* INF_PPA_SUPPORT */

#ifdef PRE_ASSIGN_MAC_ADDR
	UCHAR PermanentAddress[MAC_ADDR_LEN];
	RTMP_DRIVER_MAC_ADDR_GET(pAd, &PermanentAddress[0]);
	DBGPRINT(RT_DEBUG_TRACE, ("@%s MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__, PermanentAddress[0], PermanentAddress[1],PermanentAddress[2],PermanentAddress[3],PermanentAddress[4],PermanentAddress[5]));
	/* Set up the Mac address */
	RtmpOSNetDevAddrSet(OpMode, net_dev, &PermanentAddress[0], NULL);
#endif /* PRE_ASSIGN_MAC_ADDR */

#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_ANDROID_POWER)
	((PRTMP_ADAPTER)pAd)->early_suspend.suspend = NULL;
	RTRegisterEarlySuspend(pAd);
#endif

	DBGPRINT(RT_DEBUG_TRACE, ("<===rt2870_probe()!\n"));

	return 0;

	/* --------------------------- ERROR HANDLE --------------------------- */	
err_out_free_netdev:
	RtmpOSNetDevFree(net_dev);
	
err_out_free_radev:
	RTMPFreeAdapter(pAd);
	
err_out:
	*ppAd = NULL;

	return -1;
	
}
Example #2
0
int s5p_hpd_get_state(void)
{
       return atomic_read(&hpd_struct.state);
}
void mem_forced_cp_crash(struct mem_link_device *mld)
{
	struct link_device *ld = &mld->link_dev;
	struct modem_ctl *mc = ld->mc;
	bool duplicated = false;
	unsigned long flags;

	/* Disable normal IPC */
	set_magic(mld, MEM_CRASH_MAGIC);
	set_access(mld, 0);

	spin_lock_irqsave(&mld->lock, flags);
	if (atomic_read(&mc->forced_cp_crash))
		duplicated = true;
	else
		atomic_set(&mc->forced_cp_crash, 1);
	spin_unlock_irqrestore(&mld->lock, flags);

	if (duplicated) {
		evt_log(0, "%s: %s: ALREADY in progress <%pf>\n",
			FUNC, ld->name, CALLER);
		return;
	}

	if (!cp_online(mc)) {
		evt_log(0, "%s: %s: %s.state %s != ONLINE <%pf>\n",
			FUNC, ld->name, mc->name, mc_state(mc), CALLER);
		return;
	}

	if (mc->wake_lock) {
		if (!wake_lock_active(mc->wake_lock)) {
			wake_lock(mc->wake_lock);
			mif_err("%s->wake_lock locked\n", mc->name);
		}
	}

	if (mld->attrs & LINK_ATTR(LINK_ATTR_MEM_DUMP)) {
		stop_net_ifaces(ld);

		if (mld->debug_info)
			mld->debug_info();

		/**
		 * If there is no CRASH_ACK from CP in a timeout,
		 * handle_no_cp_crash_ack() will be executed.
		 */
		mif_add_timer(&mc->crash_ack_timer, FORCE_CRASH_ACK_TIMEOUT,
			      handle_no_cp_crash_ack, (unsigned long)mld);

		/* Send CRASH_EXIT command to a CP */
		send_ipc_irq(mld, cmd2int(CMD_CRASH_EXIT));
	} else {
		modemctl_notify_event(MDM_EVENT_CP_FORCE_CRASH);
	}

	evt_log(0, "%s->%s: CP_CRASH_REQ <%pf>\n", ld->name, mc->name, CALLER);

#ifdef DEBUG_MODEM_IF
	if (in_interrupt())
		queue_work(system_nrt_wq, &mld->dump_work);
	else
		save_mem_dump(mld);
#endif
}
Example #4
0
/*
 * Add a new chunk of uncached memory pages to the specified pool.
 *
 * @pool: pool to add new chunk of uncached memory to
 * @nid: node id of node to allocate memory from, or -1
 *
 * This is accomplished by first allocating a granule of cached memory pages
 * and then converting them to uncached memory pages.
 */
static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
{
	struct page *page;
	int status, i, nchunks_added = uc_pool->nchunks_added;
	unsigned long c_addr, uc_addr;

	if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
		return -1;	/* interrupted by a signal */

	if (uc_pool->nchunks_added > nchunks_added) {
		/* someone added a new chunk while we were waiting */
		mutex_unlock(&uc_pool->add_chunk_mutex);
		return 0;
	}

	if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
		mutex_unlock(&uc_pool->add_chunk_mutex);
		return -1;
	}

	/* attempt to allocate a granule's worth of cached memory pages */

	page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
				IA64_GRANULE_SHIFT-PAGE_SHIFT);
	if (!page) {
		mutex_unlock(&uc_pool->add_chunk_mutex);
		return -1;
	}

	/* convert the memory pages from cached to uncached */

	c_addr = (unsigned long)page_address(page);
	uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;

	/*
	 * There's a small race here where it's possible for someone to
	 * access the page through /dev/mem halfway through the conversion
	 * to uncached - not sure it's really worth bothering about
	 */
	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
		SetPageUncached(&page[i]);

	flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);

	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
	if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
		atomic_set(&uc_pool->status, 0);
		status = smp_call_function(uncached_ipi_visibility, uc_pool,
					   0, 1);
		if (status || atomic_read(&uc_pool->status))
			goto failed;
	} else if (status != PAL_VISIBILITY_OK)
		goto failed;

	preempt_disable();

	if (ia64_platform_is("sn2"))
		sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
	else
		flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);

	/* flush the just introduced uncached translation from the TLB */
	local_flush_tlb_all();

	preempt_enable();

	status = ia64_pal_mc_drain();
	if (status != PAL_STATUS_SUCCESS)
		goto failed;
	atomic_set(&uc_pool->status, 0);
	status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);
	if (status || atomic_read(&uc_pool->status))
		goto failed;

	/*
	 * The chunk of memory pages has been converted to uncached so now we
	 * can add it to the pool.
	 */
	status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
	if (status)
		goto failed;

	uc_pool->nchunks_added++;
	mutex_unlock(&uc_pool->add_chunk_mutex);
	return 0;

	/* failed to convert or add the chunk so give it back to the kernel */
failed:
	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
		ClearPageUncached(&page[i]);

	free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
	mutex_unlock(&uc_pool->add_chunk_mutex);
	return -1;
}
Example #5
0
static int crw_collect_info(void *unused)
{
	struct crw crw[2];
	int ccode, signal;
	unsigned int chain;

repeat:
	signal = wait_event_interruptible(crw_handler_wait_q,
					  atomic_read(&crw_nr_req) > 0);
	if (unlikely(signal))
		atomic_inc(&crw_nr_req);
	chain = 0;
	while (1) {
		crw_handler_t handler;

		if (unlikely(chain > 1)) {
			struct crw tmp_crw;

			printk(KERN_WARNING"%s: Code does not support more "
			       "than two chained crws; please report to "
			       "[email protected]!\n", __func__);
			ccode = stcrw(&tmp_crw);
			printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
			       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
			       __func__, tmp_crw.slct, tmp_crw.oflw,
			       tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
			       tmp_crw.erc, tmp_crw.rsid);
			printk(KERN_WARNING"%s: This was crw number %x in the "
			       "chain\n", __func__, chain);
			if (ccode != 0)
				break;
			chain = tmp_crw.chn ? chain + 1 : 0;
			continue;
		}
		ccode = stcrw(&crw[chain]);
		if (ccode != 0)
			break;
		printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
		       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
		       crw[chain].slct, crw[chain].oflw, crw[chain].chn,
		       crw[chain].rsc, crw[chain].anc, crw[chain].erc,
		       crw[chain].rsid);
		/*                      */
		if (crw[chain].oflw) {
			int i;

			pr_debug("%s: crw overflow detected!\n", __func__);
			mutex_lock(&crw_handler_mutex);
			for (i = 0; i < NR_RSCS; i++) {
				if (crw_handlers[i])
					crw_handlers[i](NULL, NULL, 1);
			}
			mutex_unlock(&crw_handler_mutex);
			chain = 0;
			continue;
		}
		if (crw[0].chn && !chain) {
			chain++;
			continue;
		}
		mutex_lock(&crw_handler_mutex);
		handler = crw_handlers[crw[chain].rsc];
		if (handler)
			handler(&crw[0], chain ? &crw[1] : NULL, 0);
		mutex_unlock(&crw_handler_mutex);
		/*                              */
		chain = crw[chain].chn ? chain + 1 : 0;
	}
	if (atomic_dec_and_test(&crw_nr_req))
		wake_up(&crw_handler_wait_q);
	goto repeat;
	return 0;
}
Example #6
0
/*
 * /proc/interrupts printing:
 */
static int show_other_interrupts(struct seq_file *p, int prec)
{
	int j;

	seq_printf(p, "%*s: ", prec, "NMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
	seq_printf(p, "  Non-maskable interrupts\n");
#ifdef CONFIG_X86_LOCAL_APIC
	seq_printf(p, "%*s: ", prec, "LOC");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
	seq_printf(p, "  Local timer interrupts\n");

	seq_printf(p, "%*s: ", prec, "SPU");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
	seq_printf(p, "  Spurious interrupts\n");
	seq_printf(p, "%*s: ", prec, "PMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
	seq_printf(p, "  Performance monitoring interrupts\n");
	seq_printf(p, "%*s: ", prec, "PND");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
	seq_printf(p, "  Performance pending work\n");
#endif
	if (generic_interrupt_extension) {
		seq_printf(p, "%*s: ", prec, "PLT");
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
		seq_printf(p, "  Platform interrupts\n");
	}
#ifdef CONFIG_SMP
	seq_printf(p, "%*s: ", prec, "RES");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
	seq_printf(p, "  Rescheduling interrupts\n");
	seq_printf(p, "%*s: ", prec, "CAL");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
	seq_printf(p, "  Function call interrupts\n");
	seq_printf(p, "%*s: ", prec, "TLB");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
	seq_printf(p, "  TLB shootdowns\n");
#endif
#ifdef CONFIG_X86_MCE
	seq_printf(p, "%*s: ", prec, "TRM");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
	seq_printf(p, "  Thermal event interrupts\n");
# ifdef CONFIG_X86_MCE_THRESHOLD
	seq_printf(p, "%*s: ", prec, "THR");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
	seq_printf(p, "  Threshold APIC interrupts\n");
# endif
#endif
#ifdef CONFIG_X86_MCE
	seq_printf(p, "%*s: ", prec, "MCE");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
	seq_printf(p, "  Machine check exceptions\n");
	seq_printf(p, "%*s: ", prec, "MCP");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
	seq_printf(p, "  Machine check polls\n");
#endif
	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
#if defined(CONFIG_X86_IO_APIC)
	seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
#endif
	return 0;
}
Example #7
0
int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi)
{
	int err;
	__be32 seq;
	struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
	struct xfrm_state *x;
	int xfrm_nr = 0;
	int decaps = 0;
	int nexthdr;
	unsigned int nhoff;

	nhoff = IP6CB(skb)->nhoff;
	nexthdr = skb_network_header(skb)[nhoff];

	seq = 0;
	if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0)
		goto drop;

	do {
		struct ipv6hdr *iph = ipv6_hdr(skb);

		if (xfrm_nr == XFRM_MAX_DEPTH)
			goto drop;

		x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi,
				nexthdr != IPPROTO_IPIP ? nexthdr : IPPROTO_IPV6, AF_INET6);
		if (x == NULL)
			goto drop;
		spin_lock(&x->lock);
		if (unlikely(x->km.state != XFRM_STATE_VALID))
			goto drop_unlock;

		if (x->props.replay_window && xfrm_replay_check(x, seq))
			goto drop_unlock;

		if (xfrm_state_check_expire(x))
			goto drop_unlock;

		nexthdr = x->type->input(x, skb);
		if (nexthdr <= 0)
			goto drop_unlock;

		skb_network_header(skb)[nhoff] = nexthdr;

		if (x->props.replay_window)
			xfrm_replay_advance(x, seq);

		x->curlft.bytes += skb->len;
		x->curlft.packets++;

		spin_unlock(&x->lock);

		xfrm_vec[xfrm_nr++] = x;

		if (x->mode->input(x, skb))
			goto drop;

		if (x->props.mode == XFRM_MODE_TUNNEL) { /* XXX */
			decaps = 1;
			break;
		}

		if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) < 0)
			goto drop;
	} while (!err);

	/* Allocate new secpath or COW existing one. */
	if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
		struct sec_path *sp;
		sp = secpath_dup(skb->sp);
		if (!sp)
			goto drop;
		if (skb->sp)
			secpath_put(skb->sp);
		skb->sp = sp;
	}

	if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH)
		goto drop;

	memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec,
	       xfrm_nr * sizeof(xfrm_vec[0]));
	skb->sp->len += xfrm_nr;
	skb->ip_summed = CHECKSUM_NONE;

	nf_reset(skb);

	if (decaps) {
		dst_release(skb->dst);
		skb->dst = NULL;
		netif_rx(skb);
		return -1;
	} else {
#ifdef CONFIG_NETFILTER
		ipv6_hdr(skb)->payload_len = htons(skb->len);
		__skb_push(skb, skb->data - skb_network_header(skb));

		NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL,
			ip6_rcv_finish);
		return -1;
#else
		return 1;
#endif
	}

drop_unlock:
	spin_unlock(&x->lock);
	xfrm_state_put(x);
drop:
	while (--xfrm_nr >= 0)
		xfrm_state_put(xfrm_vec[xfrm_nr]);
	kfree_skb(skb);
	return -1;
}
Example #8
0
File: procfs.c Project: 7799/linux
static int show_super(struct seq_file *m, void *unused)
{
	struct super_block *sb = m->private;
	struct reiserfs_sb_info *r = REISERFS_SB(sb);

	seq_printf(m, "state: \t%s\n"
		   "mount options: \t%s%s%s%s%s%s%s%s%s%s%s\n"
		   "gen. counter: \t%i\n"
		   "s_disk_reads: \t%i\n"
		   "s_disk_writes: \t%i\n"
		   "s_fix_nodes: \t%i\n"
		   "s_do_balance: \t%i\n"
		   "s_unneeded_left_neighbor: \t%i\n"
		   "s_good_search_by_key_reada: \t%i\n"
		   "s_bmaps: \t%i\n"
		   "s_bmaps_without_search: \t%i\n"
		   "s_direct2indirect: \t%i\n"
		   "s_indirect2direct: \t%i\n"
		   "\n"
		   "max_hash_collisions: \t%i\n"
		   "breads: \t%lu\n"
		   "bread_misses: \t%lu\n"
		   "search_by_key: \t%lu\n"
		   "search_by_key_fs_changed: \t%lu\n"
		   "search_by_key_restarted: \t%lu\n"
		   "insert_item_restarted: \t%lu\n"
		   "paste_into_item_restarted: \t%lu\n"
		   "cut_from_item_restarted: \t%lu\n"
		   "delete_solid_item_restarted: \t%lu\n"
		   "delete_item_restarted: \t%lu\n"
		   "leaked_oid: \t%lu\n"
		   "leaves_removable: \t%lu\n",
		   SF(s_mount_state) == REISERFS_VALID_FS ?
		   "REISERFS_VALID_FS" : "REISERFS_ERROR_FS",
		   reiserfs_r5_hash(sb) ? "FORCE_R5 " : "",
		   reiserfs_rupasov_hash(sb) ? "FORCE_RUPASOV " : "",
		   reiserfs_tea_hash(sb) ? "FORCE_TEA " : "",
		   reiserfs_hash_detect(sb) ? "DETECT_HASH " : "",
		   reiserfs_no_border(sb) ? "NO_BORDER " : "BORDER ",
		   reiserfs_no_unhashed_relocation(sb) ?
		   "NO_UNHASHED_RELOCATION " : "",
		   reiserfs_hashed_relocation(sb) ? "UNHASHED_RELOCATION " : "",
		   reiserfs_test4(sb) ? "TEST4 " : "",
		   have_large_tails(sb) ? "TAILS " : have_small_tails(sb) ?
		   "SMALL_TAILS " : "NO_TAILS ",
		   replay_only(sb) ? "REPLAY_ONLY " : "",
		   convert_reiserfs(sb) ? "CONV " : "",
		   atomic_read(&r->s_generation_counter),
		   SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
		   SF(s_do_balance), SF(s_unneeded_left_neighbor),
		   SF(s_good_search_by_key_reada), SF(s_bmaps),
		   SF(s_bmaps_without_search), SF(s_direct2indirect),
		   SF(s_indirect2direct), SFP(max_hash_collisions), SFP(breads),
		   SFP(bread_miss), SFP(search_by_key),
		   SFP(search_by_key_fs_changed), SFP(search_by_key_restarted),
		   SFP(insert_item_restarted), SFP(paste_into_item_restarted),
		   SFP(cut_from_item_restarted),
		   SFP(delete_solid_item_restarted), SFP(delete_item_restarted),
		   SFP(leaked_oid), SFP(leaves_removable));

	return 0;
}
Example #9
0
static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
{
	u32 i, offset, max_scan, qpn;
	struct qpn_map *map;
	u32 ret = -1;

	if (type == IB_QPT_SMI)
		ret = 0;
	else if (type == IB_QPT_GSI)
		ret = 1;

	if (ret != -1) {
		map = &qpt->map[0];
		if (unlikely(!map->page)) {
			get_map_page(qpt, map);
			if (unlikely(!map->page)) {
				ret = -ENOMEM;
				goto bail;
			}
		}
		if (!test_and_set_bit(ret, map->page))
			atomic_dec(&map->n_free);
		else
			ret = -EBUSY;
		goto bail;
	}

	qpn = qpt->last + 1;
	if (qpn >= QPN_MAX)
		qpn = 2;
	offset = qpn & BITS_PER_PAGE_MASK;
	map = &qpt->map[qpn / BITS_PER_PAGE];
	max_scan = qpt->nmaps - !offset;
	for (i = 0;;) {
		if (unlikely(!map->page)) {
			get_map_page(qpt, map);
			if (unlikely(!map->page))
				break;
		}
		if (likely(atomic_read(&map->n_free))) {
			do {
				if (!test_and_set_bit(offset, map->page)) {
					atomic_dec(&map->n_free);
					qpt->last = qpn;
					ret = qpn;
					goto bail;
				}
				offset = find_next_offset(map, offset);
				qpn = mk_qpn(qpt, map, offset);
				/*
				 * This test differs from alloc_pidmap().
				 * If find_next_offset() does find a zero
				 * bit, we don't need to check for QPN
				 * wrapping around past our starting QPN.
				 * We just need to be sure we don't loop
				 * forever.
				 */
			} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
		}
		/*
		 * In order to keep the number of pages allocated to a
		 * minimum, we scan the all existing pages before increasing
		 * the size of the bitmap table.
		 */
		if (++i > max_scan) {
			if (qpt->nmaps == QPNMAP_ENTRIES)
				break;
			map = &qpt->map[qpt->nmaps++];
			offset = 0;
		} else if (map < &qpt->map[qpt->nmaps]) {
			++map;
			offset = 0;
		} else {
			map = &qpt->map[0];
			offset = 2;
		}
		qpn = mk_qpn(qpt, map, offset);
	}

	ret = -ENOMEM;

bail:
	return ret;
}
Example #10
0
/*
 * timer_interrupt - gets called when the decrementer overflows,
 * with interrupts disabled.
 * We set it up to overflow again in 1/HZ seconds.
 */
void timer_interrupt(struct pt_regs * regs)
{
	int next_dec;
	unsigned long cpu = smp_processor_id();
	unsigned jiffy_stamp = last_jiffy_stamp(cpu);
	extern void do_IRQ(struct pt_regs *);

	if (atomic_read(&ppc_n_lost_interrupts) != 0)
		do_IRQ(regs);
#if 0   /* added by 2.6.17 lttng patch */
 	trace_kernel_trap_entry(regs->trap, instruction_pointer(regs));
#endif
	irq_enter();

	while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) {
		jiffy_stamp += tb_ticks_per_jiffy;
		
		profile_tick(CPU_PROFILING, regs);
		update_process_times(user_mode(regs));

	  	if (smp_processor_id())
			continue;

		/* We are in an interrupt, no need to save/restore flags */
		write_seqlock(&xtime_lock);
		tb_last_stamp = jiffy_stamp;
#ifdef CONFIG_LTT
		ltt_reset_timestamp();
#endif //CONFIG_LTT
		do_timer(regs);

		/*
		 * update the rtc when needed, this should be performed on the
		 * right fraction of a second. Half or full second ?
		 * Full second works on mk48t59 clocks, others need testing.
		 * Note that this update is basically only used through
		 * the adjtimex system calls. Setting the HW clock in
		 * any other way is a /dev/rtc and userland business.
		 * This is still wrong by -0.5/+1.5 jiffies because of the
		 * timer interrupt resolution and possible delay, but here we
		 * hit a quantization limit which can only be solved by higher
		 * resolution timers and decoupling time management from timer
		 * interrupts. This is also wrong on the clocks
		 * which require being written at the half second boundary.
		 * We should have an rtc call that only sets the minutes and
		 * seconds like on Intel to avoid problems with non UTC clocks.
		 */
		if ( ppc_md.set_rtc_time && ntp_synced() &&
		     xtime.tv_sec - last_rtc_update >= 659 &&
		     abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
		     jiffies - wall_jiffies == 1) {
		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
				last_rtc_update = xtime.tv_sec+1;
			else
				/* Try again one minute later */
				last_rtc_update += 60;
		}
		write_sequnlock(&xtime_lock);
	}
	if ( !disarm_decr[smp_processor_id()] )
		set_dec(next_dec);
	last_jiffy_stamp(cpu) = jiffy_stamp;

	if (ppc_md.heartbeat && !ppc_md.heartbeat_count--)
		ppc_md.heartbeat();

	irq_exit();
#if 0   /* added by 2.6.17 lttng patch */
 	trace_kernel_trap_exit();
#endif
}
Example #11
0
File: procfs.c Project: 7799/linux
static int show_journal(struct seq_file *m, void *unused)
{
	struct super_block *sb = m->private;
	struct reiserfs_sb_info *r = REISERFS_SB(sb);
	struct reiserfs_super_block *rs = r->s_rs;
	struct journal_params *jp = &rs->s_v1.s_journal;
	char b[BDEVNAME_SIZE];

	seq_printf(m,		/* on-disk fields */
		   "jp_journal_1st_block: \t%i\n"
		   "jp_journal_dev: \t%s[%x]\n"
		   "jp_journal_size: \t%i\n"
		   "jp_journal_trans_max: \t%i\n"
		   "jp_journal_magic: \t%i\n"
		   "jp_journal_max_batch: \t%i\n"
		   "jp_journal_max_commit_age: \t%i\n"
		   "jp_journal_max_trans_age: \t%i\n"
		   /* incore fields */
		   "j_1st_reserved_block: \t%i\n"
		   "j_state: \t%li\n"
		   "j_trans_id: \t%u\n"
		   "j_mount_id: \t%lu\n"
		   "j_start: \t%lu\n"
		   "j_len: \t%lu\n"
		   "j_len_alloc: \t%lu\n"
		   "j_wcount: \t%i\n"
		   "j_bcount: \t%lu\n"
		   "j_first_unflushed_offset: \t%lu\n"
		   "j_last_flush_trans_id: \t%u\n"
		   "j_trans_start_time: \t%li\n"
		   "j_list_bitmap_index: \t%i\n"
		   "j_must_wait: \t%i\n"
		   "j_next_full_flush: \t%i\n"
		   "j_next_async_flush: \t%i\n"
		   "j_cnode_used: \t%i\n" "j_cnode_free: \t%i\n" "\n"
		   /* reiserfs_proc_info_data_t.journal fields */
		   "in_journal: \t%12lu\n"
		   "in_journal_bitmap: \t%12lu\n"
		   "in_journal_reusable: \t%12lu\n"
		   "lock_journal: \t%12lu\n"
		   "lock_journal_wait: \t%12lu\n"
		   "journal_begin: \t%12lu\n"
		   "journal_relock_writers: \t%12lu\n"
		   "journal_relock_wcount: \t%12lu\n"
		   "mark_dirty: \t%12lu\n"
		   "mark_dirty_already: \t%12lu\n"
		   "mark_dirty_notjournal: \t%12lu\n"
		   "restore_prepared: \t%12lu\n"
		   "prepare: \t%12lu\n"
		   "prepare_retry: \t%12lu\n",
		   DJP(jp_journal_1st_block),
		   bdevname(SB_JOURNAL(sb)->j_dev_bd, b),
		   DJP(jp_journal_dev),
		   DJP(jp_journal_size),
		   DJP(jp_journal_trans_max),
		   DJP(jp_journal_magic),
		   DJP(jp_journal_max_batch),
		   SB_JOURNAL(sb)->j_max_commit_age,
		   DJP(jp_journal_max_trans_age),
		   JF(j_1st_reserved_block),
		   JF(j_state),
		   JF(j_trans_id),
		   JF(j_mount_id),
		   JF(j_start),
		   JF(j_len),
		   JF(j_len_alloc),
		   atomic_read(&r->s_journal->j_wcount),
		   JF(j_bcount),
		   JF(j_first_unflushed_offset),
		   JF(j_last_flush_trans_id),
		   JF(j_trans_start_time),
		   JF(j_list_bitmap_index),
		   JF(j_must_wait),
		   JF(j_next_full_flush),
		   JF(j_next_async_flush),
		   JF(j_cnode_used),
		   JF(j_cnode_free),
		   SFPJ(in_journal),
		   SFPJ(in_journal_bitmap),
		   SFPJ(in_journal_reusable),
		   SFPJ(lock_journal),
		   SFPJ(lock_journal_wait),
		   SFPJ(journal_being),
		   SFPJ(journal_relock_writers),
		   SFPJ(journal_relock_wcount),
		   SFPJ(mark_dirty),
		   SFPJ(mark_dirty_already),
		   SFPJ(mark_dirty_notjournal),
		   SFPJ(restore_prepared), SFPJ(prepare), SFPJ(prepare_retry)
	    );
	return 0;
}
Example #12
0
int main(int argc, char *argv[])
{
	int done;
	int i;
	int iter;

	smp_init();

	if (argc > 1) {
		nkids = strtoul(argv[1], NULL, 0);
		if (nkids > NR_THREADS) {
			fprintf(stderr, "nkids = %d too large, max = %d\n",
				nkids, NR_THREADS);
			usage(argv[0]);
		}
	}
	printf("Number of threads: %d\n", nkids);

	spin_lock_init(&mutex);
	goflag = GOFLAG_INIT;
	for (i = 0; i < nkids; i++)
		create_thread(init_test, NULL);

	for (iter = 0; iter < 100; iter++) {
		spin_lock(&mutex);
		for_each_thread(i) {
			per_thread(doneflag, i) = 0;
		}
		__get_thread_var(doneflag) = 1;
		atomic_set(&counter, 0);
		atomic_set(&start_count, 0);
		initialized = 0;
		spin_unlock(&mutex);
		spin_lock(&mutex);
		goflag = GOFLAG_START;
		spin_unlock(&mutex);
		poll(NULL, 0, 1);
		done = 0;
		while (!done) {
			done = 1;
			for (i = 0; i < nkids; i++)
				if (!per_thread(doneflag, i)) {
					done = 0;
					break;
				}
			poll(NULL, 0, 1);
		}
		if (atomic_read(&counter) != 1) {
			printf("Double initialization, counter = %d\n",
			       atomic_read(&counter));
			exit(-1);
		} else {
			printf("Iteration %d succeeded\n", iter);
		}
		spin_lock(&mutex);
		atomic_set(&counter, 0);
		spin_unlock(&mutex);
		spin_lock(&mutex);
		goflag = GOFLAG_INIT;
		while (atomic_read(&counter) < nkids)
			poll(NULL, 0, 1);
		spin_unlock(&mutex);
		spin_lock(&mutex);
		atomic_set(&counter, 0);
		spin_unlock(&mutex);
	}

	goflag = GOFLAG_STOP;

	wait_all_threads();

	exit(0);
}
int data_bridge_write(unsigned int id, struct sk_buff *skb)
{
    int			result;
    int			size = skb->len;
    int			pending;
    struct urb		*txurb;
    struct timestamp_info	*info = (struct timestamp_info *)skb->cb;
    struct data_bridge	*dev = __dev[id];
    struct bridge		*brdg;

    if (!dev || !dev->brdg || dev->err || !usb_get_intfdata(dev->intf))
        return -ENODEV;

    brdg = dev->brdg;
    if (!brdg)
        return -ENODEV;

    dev_dbg(&dev->intf->dev, "%s: write (%d bytes)\n", __func__, skb->len);

    result = usb_autopm_get_interface(dev->intf);
    if (result < 0) {
        dev_dbg(&dev->intf->dev, "%s: resume failure\n", __func__);
        goto pm_error;
    }

    txurb = usb_alloc_urb(0, GFP_KERNEL);
    if (!txurb) {
        dev_err(&dev->intf->dev, "%s: error allocating read urb\n",
                __func__);
        result = -ENOMEM;
        goto error;
    }

    /* store dev pointer in skb */
    info->dev = dev;
    info->tx_queued = get_timestamp();

    usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out,
                      skb->data, skb->len, data_bridge_write_cb, skb);

    txurb->transfer_flags |= URB_ZERO_PACKET;

    if (test_bit(SUSPENDED, &dev->flags)) {
        usb_anchor_urb(txurb, &dev->delayed);
        goto free_urb;
    }

    pending = atomic_inc_return(&dev->pending_txurbs);
    usb_anchor_urb(txurb, &dev->tx_active);

    if (atomic_read(&dev->pending_txurbs) % tx_urb_mult)
        txurb->transfer_flags |= URB_NO_INTERRUPT;

    result = usb_submit_urb(txurb, GFP_KERNEL);
    if (result < 0) {
        usb_unanchor_urb(txurb);
        atomic_dec(&dev->pending_txurbs);
        dev_err(&dev->intf->dev, "%s: submit URB error %d\n",
                __func__, result);
        goto free_urb;
    }

    dev->to_modem++;
    dev_dbg(&dev->intf->dev, "%s: pending_txurbs: %u\n", __func__, pending);

    /* flow control: last urb submitted but return -EBUSY */
    if (fctrl_support && pending > fctrl_en_thld) {
        set_bit(TX_THROTTLED, &brdg->flags);
        dev->tx_throttled_cnt++;
        pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n",
                             __func__, pending);
        return -EBUSY;
    }

    return size;

free_urb:
    usb_free_urb(txurb);
error:
    dev->txurb_drp_cnt++;
    usb_autopm_put_interface(dev->intf);
pm_error:
    return result;
}
Example #14
0
void rds_inc_addref(struct rds_incoming *inc)
{
	rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
	atomic_inc(&inc->i_refcount);
}
Example #15
0
void nvhost_syncpt_set_min_eq_max(struct nvhost_syncpt *sp, u32 id)
{
	atomic_set(&sp->min_val[id], atomic_read(&sp->max_val[id]));
	syncpt_op().reset(sp, id);
}
Example #16
0
/**
 * ipath_modify_qp - modify the attributes of a queue pair
 * @ibqp: the queue pair who's attributes we're modifying
 * @attr: the new attributes
 * @attr_mask: the mask of attributes to modify
 * @udata: user data for ipathverbs.so
 *
 * Returns 0 on success, otherwise returns an errno.
 */
int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		    int attr_mask, struct ib_udata *udata)
{
	struct ipath_ibdev *dev = to_idev(ibqp->device);
	struct ipath_qp *qp = to_iqp(ibqp);
	enum ib_qp_state cur_state, new_state;
	int lastwqe = 0;
	int ret;

	spin_lock_irq(&qp->s_lock);

	cur_state = attr_mask & IB_QP_CUR_STATE ?
		attr->cur_qp_state : qp->state;
	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;

	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
				attr_mask))
		goto inval;

	if (attr_mask & IB_QP_AV) {
		if (attr->ah_attr.dlid == 0 ||
		    attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
			goto inval;

		if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
		    (attr->ah_attr.grh.sgid_index > 1))
			goto inval;
	}

	if (attr_mask & IB_QP_PKEY_INDEX)
		if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
			goto inval;

	if (attr_mask & IB_QP_MIN_RNR_TIMER)
		if (attr->min_rnr_timer > 31)
			goto inval;

	if (attr_mask & IB_QP_PORT)
		if (attr->port_num == 0 ||
		    attr->port_num > ibqp->device->phys_port_cnt)
			goto inval;

	/*
	 * don't allow invalid Path MTU values or greater than 2048
	 * unless we are configured for a 4KB MTU
	 */
	if ((attr_mask & IB_QP_PATH_MTU) &&
		(ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
		(attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
		goto inval;

	if (attr_mask & IB_QP_PATH_MIG_STATE)
		if (attr->path_mig_state != IB_MIG_MIGRATED &&
		    attr->path_mig_state != IB_MIG_REARM)
			goto inval;

	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
		if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
			goto inval;

	switch (new_state) {
	case IB_QPS_RESET:
		if (qp->state != IB_QPS_RESET) {
			qp->state = IB_QPS_RESET;
			spin_lock(&dev->pending_lock);
			if (!list_empty(&qp->timerwait))
				list_del_init(&qp->timerwait);
			if (!list_empty(&qp->piowait))
				list_del_init(&qp->piowait);
			spin_unlock(&dev->pending_lock);
			qp->s_flags &= ~IPATH_S_ANY_WAIT;
			spin_unlock_irq(&qp->s_lock);
			/* Stop the sending tasklet */
			tasklet_kill(&qp->s_task);
			wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
			spin_lock_irq(&qp->s_lock);
		}
		ipath_reset_qp(qp, ibqp->qp_type);
		break;

	case IB_QPS_SQD:
		qp->s_draining = qp->s_last != qp->s_cur;
		qp->state = new_state;
		break;

	case IB_QPS_SQE:
		if (qp->ibqp.qp_type == IB_QPT_RC)
			goto inval;
		qp->state = new_state;
		break;

	case IB_QPS_ERR:
		lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
		break;

	default:
		qp->state = new_state;
		break;
	}

	if (attr_mask & IB_QP_PKEY_INDEX)
		qp->s_pkey_index = attr->pkey_index;

	if (attr_mask & IB_QP_DEST_QPN)
		qp->remote_qpn = attr->dest_qp_num;

	if (attr_mask & IB_QP_SQ_PSN) {
		qp->s_psn = qp->s_next_psn = attr->sq_psn;
		qp->s_last_psn = qp->s_next_psn - 1;
	}

	if (attr_mask & IB_QP_RQ_PSN)
		qp->r_psn = attr->rq_psn;

	if (attr_mask & IB_QP_ACCESS_FLAGS)
		qp->qp_access_flags = attr->qp_access_flags;

	if (attr_mask & IB_QP_AV) {
		qp->remote_ah_attr = attr->ah_attr;
		qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
	}

	if (attr_mask & IB_QP_PATH_MTU)
		qp->path_mtu = attr->path_mtu;

	if (attr_mask & IB_QP_RETRY_CNT)
		qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;

	if (attr_mask & IB_QP_RNR_RETRY) {
		qp->s_rnr_retry = attr->rnr_retry;
		if (qp->s_rnr_retry > 7)
			qp->s_rnr_retry = 7;
		qp->s_rnr_retry_cnt = qp->s_rnr_retry;
	}

	if (attr_mask & IB_QP_MIN_RNR_TIMER)
		qp->r_min_rnr_timer = attr->min_rnr_timer;

	if (attr_mask & IB_QP_TIMEOUT)
		qp->timeout = attr->timeout;

	if (attr_mask & IB_QP_QKEY)
		qp->qkey = attr->qkey;

	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
		qp->r_max_rd_atomic = attr->max_dest_rd_atomic;

	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
		qp->s_max_rd_atomic = attr->max_rd_atomic;

	spin_unlock_irq(&qp->s_lock);

	if (lastwqe) {
		struct ib_event ev;

		ev.device = qp->ibqp.device;
		ev.element.qp = &qp->ibqp;
		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
	}
	ret = 0;
	goto bail;

inval:
	spin_unlock_irq(&qp->s_lock);
	ret = -EINVAL;

bail:
	return ret;
}
static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
	ax25_address *dev_addr, struct packet_type *ptype)
{
	ax25_address src, dest, *next_digi = NULL;
	int type = 0, mine = 0, dama;
	struct sock *make, *sk;
	ax25_digi dp, reverse_dp;
	ax25_cb *ax25;
	ax25_dev *ax25_dev;

	/*
	 *	Process the AX.25/LAPB frame.
	 */

	skb_reset_transport_header(skb);

	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
		goto free;

	/*
	 *	Parse the address header.
	 */

	if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL)
		goto free;

	/*
	 *	Ours perhaps ?
	 */
	if (dp.lastrepeat + 1 < dp.ndigi)		/* Not yet digipeated completely */
		next_digi = &dp.calls[dp.lastrepeat + 1];

	/*
	 *	Pull of the AX.25 headers leaving the CTRL/PID bytes
	 */
	skb_pull(skb, ax25_addr_size(&dp));

	/* For our port addresses ? */
	if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
		mine = 1;

	/* Also match on any registered callsign from L3/4 */
	if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
		mine = 1;

	/* UI frame - bypass LAPB processing */
	if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
		skb_set_transport_header(skb, 2); /* skip control and pid */

		ax25_send_to_raw(&dest, skb, skb->data[1]);

		if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0)
			goto free;

		/* Now we are pointing at the pid byte */
		switch (skb->data[1]) {
		case AX25_P_IP:
			skb_pull(skb,2);		/* drop PID/CTRL */
			skb_reset_transport_header(skb);
			skb_reset_network_header(skb);
			skb->dev      = dev;
			skb->pkt_type = PACKET_HOST;
			skb->protocol = htons(ETH_P_IP);
			netif_rx(skb);
			break;

		case AX25_P_ARP:
			skb_pull(skb,2);
			skb_reset_transport_header(skb);
			skb_reset_network_header(skb);
			skb->dev      = dev;
			skb->pkt_type = PACKET_HOST;
			skb->protocol = htons(ETH_P_ARP);
			netif_rx(skb);
			break;
		case AX25_P_TEXT:
			/* Now find a suitable dgram socket */
			sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
			if (sk != NULL) {
				bh_lock_sock(sk);
				if (atomic_read(&sk->sk_rmem_alloc) >=
				    sk->sk_rcvbuf) {
					kfree_skb(skb);
				} else {
					/*
					 *	Remove the control and PID.
					 */
					skb_pull(skb, 2);
					if (sock_queue_rcv_skb(sk, skb) != 0)
						kfree_skb(skb);
				}
				bh_unlock_sock(sk);
				sock_put(sk);
			} else {
				kfree_skb(skb);
			}
			break;

		default:
			kfree_skb(skb);	/* Will scan SOCK_AX25 RAW sockets */
			break;
		}

		return 0;
	}

	/*
	 *	Is connected mode supported on this device ?
	 *	If not, should we DM the incoming frame (except DMs) or
	 *	silently ignore them. For now we stay quiet.
	 */
	if (ax25_dev->values[AX25_VALUES_CONMODE] == 0)
		goto free;

	/* LAPB */

	/* AX.25 state 1-4 */

	ax25_digi_invert(&dp, &reverse_dp);

	if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
		/*
		 *	Process the frame. If it is queued up internally it
		 *	returns one otherwise we free it immediately. This
		 *	routine itself wakes the user context layers so we do
		 *	no further work
		 */
		if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
			kfree_skb(skb);

		ax25_cb_put(ax25);
		return 0;
	}

	/* AX.25 state 0 (disconnected) */

	/* a) received not a SABM(E) */

	if ((*skb->data & ~AX25_PF) != AX25_SABM &&
	    (*skb->data & ~AX25_PF) != AX25_SABME) {
		/*
		 *	Never reply to a DM. Also ignore any connects for
		 *	addresses that are not our interfaces and not a socket.
		 */
		if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
			ax25_return_dm(dev, &src, &dest, &dp);

		goto free;
	}

	/* b) received SABM(E) */

	if (dp.lastrepeat + 1 == dp.ndigi)
		sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
	else
		sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);

	if (sk != NULL) {
		bh_lock_sock(sk);
		if (sk_acceptq_is_full(sk) ||
		    (make = ax25_make_new(sk, ax25_dev)) == NULL) {
			if (mine)
				ax25_return_dm(dev, &src, &dest, &dp);
			kfree_skb(skb);
			bh_unlock_sock(sk);
			sock_put(sk);

			return 0;
		}

		ax25 = ax25_sk(make);
		skb_set_owner_r(skb, make);
		skb_queue_head(&sk->sk_receive_queue, skb);

		make->sk_state = TCP_ESTABLISHED;

		sk->sk_ack_backlog++;
		bh_unlock_sock(sk);
	} else {
		if (!mine)
			goto free;

		if ((ax25 = ax25_create_cb()) == NULL) {
			ax25_return_dm(dev, &src, &dest, &dp);
			goto free;
		}

		ax25_fillin_cb(ax25, ax25_dev);
	}

	ax25->source_addr = dest;
	ax25->dest_addr   = src;

	/*
	 *	Sort out any digipeated paths.
	 */
	if (dp.ndigi && !ax25->digipeat &&
	    (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
		kfree_skb(skb);
		ax25_destroy_socket(ax25);
		if (sk)
			sock_put(sk);
		return 0;
	}

	if (dp.ndigi == 0) {
		kfree(ax25->digipeat);
		ax25->digipeat = NULL;
	} else {
		/* Reverse the source SABM's path */
		memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
	}

	if ((*skb->data & ~AX25_PF) == AX25_SABME) {
		ax25->modulus = AX25_EMODULUS;
		ax25->window  = ax25_dev->values[AX25_VALUES_EWINDOW];
	} else {
		ax25->modulus = AX25_MODULUS;
		ax25->window  = ax25_dev->values[AX25_VALUES_WINDOW];
	}

	ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);

#ifdef CONFIG_AX25_DAMA_SLAVE
	if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
		ax25_dama_on(ax25);
#endif

	ax25->state = AX25_STATE_3;

	ax25_cb_add(ax25);

	ax25_start_heartbeat(ax25);
	ax25_start_t3timer(ax25);
	ax25_start_idletimer(ax25);

	if (sk) {
		if (!sock_flag(sk, SOCK_DEAD))
			sk->sk_data_ready(sk, skb->len);
		sock_put(sk);
	} else {
free:
		kfree_skb(skb);
	}
	return 0;
}
Example #18
0
/*
 * This is the NFS server kernel thread
 */
static void
nfsd(struct svc_rqst *rqstp)
{
	struct svc_serv	*serv = rqstp->rq_server;
	struct fs_struct *fsp;
	int		err;
	struct nfsd_list me;
	sigset_t shutdown_mask, allowed_mask;

	/* Lock module and set up kernel thread */
	lock_kernel();
	daemonize("nfsd");

	/* After daemonize() this kernel thread shares current->fs
	 * with the init process. We need to create files with a
	 * umask of 0 instead of init's umask. */
	fsp = copy_fs_struct(current->fs);
	if (!fsp) {
		printk("Unable to start nfsd thread: out of memory\n");
		goto out;
	}
	exit_fs(current);
	current->fs = fsp;
	current->fs->umask = 0;

	siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS);
	siginitsetinv(&allowed_mask, ALLOWED_SIGS);

	nfsdstats.th_cnt++;

	lockd_up();				/* start lockd */

	me.task = current;
	list_add(&me.list, &nfsd_list);

	unlock_kernel();

	/*
	 * We want less throttling in balance_dirty_pages() so that nfs to
	 * localhost doesn't cause nfsd to lock up due to all the client's
	 * dirty pages.
	 */
	current->flags |= PF_LESS_THROTTLE;

	/*
	 * The main request loop
	 */
	for (;;) {
		/* Block all but the shutdown signals */
		sigprocmask(SIG_SETMASK, &shutdown_mask, NULL);

		/*
		 * Find a socket with data available and call its
		 * recvfrom routine.
		 */
		while ((err = svc_recv(serv, rqstp,
				       60*60*HZ)) == -EAGAIN)
			;
		if (err < 0)
			break;
		update_thread_usage(atomic_read(&nfsd_busy));
		atomic_inc(&nfsd_busy);

		/* Lock the export hash tables for reading. */
		exp_readlock();

		/* Process request with signals blocked.  */
		sigprocmask(SIG_SETMASK, &allowed_mask, NULL);

		svc_process(serv, rqstp);

		/* Unlock export hash tables */
		exp_readunlock();
		update_thread_usage(atomic_read(&nfsd_busy));
		atomic_dec(&nfsd_busy);
	}

	if (err != -EINTR) {
		printk(KERN_WARNING "nfsd: terminating on error %d\n", -err);
	} else {
		unsigned int	signo;

		for (signo = 1; signo <= _NSIG; signo++)
			if (sigismember(&current->pending.signal, signo) &&
			    !sigismember(&current->blocked, signo))
				break;
		err = signo;
	}

	lock_kernel();

	/* Release lockd */
	lockd_down();

	/* Check if this is last thread */
	if (serv->sv_nrthreads==1) {
		
		printk(KERN_WARNING "nfsd: last server has exited\n");
		if (err != SIG_NOCLEAN) {
			printk(KERN_WARNING "nfsd: unexporting all filesystems\n");
			nfsd_export_flush();
		}
		nfsd_serv = NULL;
	        nfsd_racache_shutdown();	/* release read-ahead cache */
		nfs4_state_shutdown();
	}
	list_del(&me.list);
	nfsdstats.th_cnt --;

out:
	/* Release the thread */
	svc_exit_thread(rqstp);

	/* Release module */
	module_put_and_exit(0);
}
Example #19
0
int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
		     xfrm_address_t *saddr, u8 proto)
{
	struct xfrm_state *x = NULL;
	int wildcard = 0;
	xfrm_address_t *xany;
	struct xfrm_state *xfrm_vec_one = NULL;
	int nh = 0;
	int i = 0;

	xany = (xfrm_address_t *)&in6addr_any;

	for (i = 0; i < 3; i++) {
		xfrm_address_t *dst, *src;
		switch (i) {
		case 0:
			dst = daddr;
			src = saddr;
			break;
		case 1:
			/* lookup state with wild-card source address */
			wildcard = 1;
			dst = daddr;
			src = xany;
			break;
		case 2:
		default:
			/* lookup state with wild-card addresses */
			wildcard = 1; /* XXX */
			dst = xany;
			src = xany;
			break;
		}

		x = xfrm_state_lookup_byaddr(dst, src, proto, AF_INET6);
		if (!x)
			continue;

		spin_lock(&x->lock);

		if (wildcard) {
			if ((x->props.flags & XFRM_STATE_WILDRECV) == 0) {
				spin_unlock(&x->lock);
				xfrm_state_put(x);
				x = NULL;
				continue;
			}
		}

		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
			spin_unlock(&x->lock);
			xfrm_state_put(x);
			x = NULL;
			continue;
		}
		if (xfrm_state_check_expire(x)) {
			spin_unlock(&x->lock);
			xfrm_state_put(x);
			x = NULL;
			continue;
		}

		nh = x->type->input(x, skb);
		if (nh <= 0) {
			spin_unlock(&x->lock);
			xfrm_state_put(x);
			x = NULL;
			continue;
		}

		x->curlft.bytes += skb->len;
		x->curlft.packets++;

		spin_unlock(&x->lock);

		xfrm_vec_one = x;
		break;
	}

	if (!xfrm_vec_one)
		goto drop;

	/* Allocate new secpath or COW existing one. */
	if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
		struct sec_path *sp;
		sp = secpath_dup(skb->sp);
		if (!sp)
			goto drop;
		if (skb->sp)
			secpath_put(skb->sp);
		skb->sp = sp;
	}

	if (1 + skb->sp->len > XFRM_MAX_DEPTH)
		goto drop;

	skb->sp->xvec[skb->sp->len] = xfrm_vec_one;
	skb->sp->len ++;

	return 1;
drop:
	if (xfrm_vec_one)
		xfrm_state_put(xfrm_vec_one);
	return -1;
}
Example #20
0
void s5p_mfc_clock_off(struct s5p_mfc_dev *dev)
{
	int state, val;
	unsigned long timeout, flags;
	int ret = 0;

	dev->pm.clock_off_steps = 1;

	MFC_TRACE_DEV("++ clock_off\n");
	if (IS_MFCV6(dev)) {
		spin_lock_irqsave(&dev->pm.clklock, flags);
		dev->pm.clock_off_steps = 2;
		if ((atomic_dec_return(&dev->clk_ref) == 0) &&
				FW_HAS_BUS_RESET(dev)) {
			s5p_mfc_write_reg(dev, 0x1, S5P_FIMV_MFC_BUS_RESET_CTRL);

			timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
			/* Check bus status */
			do {
				if (time_after(jiffies, timeout)) {
					mfc_err_dev("Timeout while resetting MFC.\n");
					break;
				}
				val = s5p_mfc_read_reg(dev,
						S5P_FIMV_MFC_BUS_RESET_CTRL);
			} while ((val & 0x2) == 0);
		dev->pm.clock_off_steps = 3;
		}
		spin_unlock_irqrestore(&dev->pm.clklock, flags);
	} else {
		atomic_dec_return(&dev->clk_ref);
	}

	dev->pm.clock_off_steps = 4;
	state = atomic_read(&dev->clk_ref);
	if (state < 0) {
		mfc_err_dev("Clock state is wrong(%d)\n", state);
		atomic_set(&dev->clk_ref, 0);
		dev->pm.clock_off_steps = 5;
	} else {
		if (dev->curr_ctx_drm && dev->is_support_smc) {
			mfc_debug(3, "Begin: disable protection\n");
			spin_lock_irqsave(&dev->pm.clklock, flags);
			dev->pm.clock_off_steps = 6;
			ret = exynos_smc(SMC_PROTECTION_SET, 0,
					dev->id, SMC_PROTECTION_DISABLE);
			if (!ret) {
				printk("Protection Disable failed! ret(%u)\n", ret);
				spin_unlock_irqrestore(&dev->pm.clklock, flags);
				clk_disable(dev->pm.clock);
				return;
			}
			mfc_debug(3, "End: disable protection\n");
			dev->pm.clock_off_steps = 7;
			spin_unlock_irqrestore(&dev->pm.clklock, flags);
		} else {
			dev->pm.clock_off_steps = 8;
			s5p_mfc_mem_suspend(dev->alloc_ctx[0]);
			dev->pm.clock_off_steps = 9;
		}
		dev->pm.clock_off_steps = 10;
		clk_disable(dev->pm.clock);
	}
	mfc_debug(2, "- %d\n", state);
	MFC_TRACE_DEV("-- clock_off: ref state(%d)\n", state);
	dev->pm.clock_off_steps = 11;
}
Example #21
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;
	int cgroup_callbacks_done = 0;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->real_cred->user != INIT_USER)
			goto bad_fork_free;
	}

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = cputime_zero;
	p->stime = cputime_zero;
	p->gtime = cputime_zero;
	p->utimescaled = cputime_zero;
	p->stimescaled = cputime_zero;
	p->prev_utime = cputime_zero;
	p->prev_stime = cputime_zero;

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
	p->io_context = NULL;
	p->audit_context = NULL;
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup_cgroup;
 	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif

	p->bts = NULL;

	p->stack_start = stack_start;

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;

	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_policy;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_namespaces(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_io(clone_flags, p)))
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(p->nsproxy->pid_ns);
		if (!pid)
			goto bad_fork_cleanup_io;

		if (clone_flags & CLONE_NEWPID) {
			retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
			if (retval < 0)
				goto bad_fork_free_pid;
		}
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if (current->nsproxy != p->nsproxy) {
		retval = ns_cgroup_clone(p, pid);
		if (retval)
			goto bad_fork_free_pid;
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);

	/* Now that the task is set up, run cgroup callbacks if
	 * necessary. We need to run them before the task is visible
	 * on the tasklist. */
	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/*
	 * The task hasn't been attached yet, so its cpus_allowed mask will
	 * not be changed, nor will its assigned CPU.
	 *
	 * The cpus_allowed mask of the parent may have changed after it was
	 * copied first time - so re-copy it here, then check the child's CPU
	 * to ensure it is on a valid CPU (and if not, just force it back to
	 * parent's CPU). This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
			!cpu_online(task_cpu(p))))
		set_task_cpu(p, smp_processor_id());

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		atomic_inc(&current->signal->count);
		atomic_inc(&current->signal->live);
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
	}

	if (likely(p->pid)) {
		list_add_tail(&p->sibling, &p->real_parent->children);
		tracehook_finish_clone(p, clone_flags, trace);

		if (thread_group_leader(p)) {
			if (clone_flags & CLONE_NEWPID)
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			tty_kref_put(p->signal->tty);
			p->signal->tty = tty_kref_get(current->signal->tty);
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__get_cpu_var(process_counts)++;
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	cgroup_post_fork(p);
	perf_event_fork(p);
	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	put_io_context(p->io_context);
bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		__cleanup_signal(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_policy:
	perf_event_free_task(p);
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	cgroup_exit(p, cgroup_callbacks_done);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Example #22
0
int s5p_mfc_get_power_ref_cnt(struct s5p_mfc_dev *dev)
{
	return atomic_read(&dev->pm.power);
}
Example #23
0
void crw_wait_for_channel_report(void)
{
	crw_handle_channel_report();
	wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
}
Example #24
0
int s5p_mfc_get_clk_ref_cnt(struct s5p_mfc_dev *dev)
{
	return atomic_read(&dev->clk_ref);
}
Example #25
0
static inline int rt_overloaded(struct rq *rq)
{
	return atomic_read(&rq->rd->rto_count);
}
Example #26
0
File: irq.c Project: 168519/linux
u64 arch_irq_stat(void)
{
	u64 sum = atomic_read(&irq_err_count);
	return sum;
}
Example #27
0
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
{
	__be32 saddr = 0;
	u8 dst_ha[MAX_ADDR_LEN], *dst_hw = NULL;
	struct net_device *dev = neigh->dev;
	__be32 target = *(__be32 *)neigh->primary_key;
	int probes = atomic_read(&neigh->probes);
	struct in_device *in_dev;
	struct dst_entry *dst = NULL;

	rcu_read_lock();
	in_dev = __in_dev_get_rcu(dev);
	if (!in_dev) {
		rcu_read_unlock();
		return;
	}
	switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
	default:
	case 0:		/* By default announce any local IP */
		if (skb && inet_addr_type_dev_table(dev_net(dev), dev,
					  ip_hdr(skb)->saddr) == RTN_LOCAL)
			saddr = ip_hdr(skb)->saddr;
		break;
	case 1:		/* Restrict announcements of saddr in same subnet */
		if (!skb)
			break;
		saddr = ip_hdr(skb)->saddr;
		if (inet_addr_type_dev_table(dev_net(dev), dev,
					     saddr) == RTN_LOCAL) {
			/* saddr should be known to target */
			if (inet_addr_onlink(in_dev, target, saddr))
				break;
		}
		saddr = 0;
		break;
	case 2:		/* Avoid secondary IPs, get a primary/preferred one */
		break;
	}
	rcu_read_unlock();

	if (!saddr)
		saddr = inet_select_addr(dev, target, RT_SCOPE_LINK);

	probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES);
	if (probes < 0) {
		if (!(neigh->nud_state & NUD_VALID))
			pr_debug("trying to ucast probe in NUD_INVALID\n");
		neigh_ha_snapshot(dst_ha, neigh, dev);
		dst_hw = dst_ha;
	} else {
		probes -= NEIGH_VAR(neigh->parms, APP_PROBES);
		if (probes < 0) {
			neigh_app_ns(neigh);
			return;
		}
	}

	if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE))
		dst = skb_dst(skb);
	arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
		     dst_hw, dev->dev_addr, NULL, dst);
}
Example #28
0
File: irq.c Project: 168519/linux
/*
 * /proc/interrupts printing for arch specific interrupts
 */
int arch_show_interrupts(struct seq_file *p, int prec)
{
	int j;

	seq_printf(p, "%*s: ", prec, "NMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
	seq_puts(p, "  Non-maskable interrupts\n");
#ifdef CONFIG_X86_LOCAL_APIC
	seq_printf(p, "%*s: ", prec, "LOC");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
	seq_puts(p, "  Local timer interrupts\n");

	seq_printf(p, "%*s: ", prec, "SPU");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
	seq_puts(p, "  Spurious interrupts\n");
	seq_printf(p, "%*s: ", prec, "PMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
	seq_puts(p, "  Performance monitoring interrupts\n");
	seq_printf(p, "%*s: ", prec, "IWI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
	seq_puts(p, "  IRQ work interrupts\n");
	seq_printf(p, "%*s: ", prec, "RTR");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
	seq_puts(p, "  APIC ICR read retries\n");
#endif
	if (x86_platform_ipi_callback) {
		seq_printf(p, "%*s: ", prec, "PLT");
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
		seq_puts(p, "  Platform interrupts\n");
	}
#ifdef CONFIG_SMP
	seq_printf(p, "%*s: ", prec, "RES");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
	seq_puts(p, "  Rescheduling interrupts\n");
	seq_printf(p, "%*s: ", prec, "CAL");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
					irq_stats(j)->irq_tlb_count);
	seq_puts(p, "  Function call interrupts\n");
	seq_printf(p, "%*s: ", prec, "TLB");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
	seq_puts(p, "  TLB shootdowns\n");
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
	seq_printf(p, "%*s: ", prec, "TRM");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
	seq_puts(p, "  Thermal event interrupts\n");
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
	seq_printf(p, "%*s: ", prec, "THR");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
	seq_puts(p, "  Threshold APIC interrupts\n");
#endif
#ifdef CONFIG_X86_MCE
	seq_printf(p, "%*s: ", prec, "MCE");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
	seq_puts(p, "  Machine check exceptions\n");
	seq_printf(p, "%*s: ", prec, "MCP");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
	seq_puts(p, "  Machine check polls\n");
#endif
#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
	seq_printf(p, "%*s: ", prec, "HYP");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
	seq_puts(p, "  Hypervisor callback interrupts\n");
#endif
	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
#if defined(CONFIG_X86_IO_APIC)
	seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
#endif
	return 0;
}
Example #29
0
struct net_device *__init
c4_add_dev (hdw_info_t * hi, int brdno, unsigned long f0, unsigned long f1,
            int irq0, int irq1)
{
    struct net_device *ndev;
    ci_t       *ci;

    ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, c4_setup);
    if (!ndev)
    {
        pr_warning("%s: no memory for struct net_device !\n", hi->devname);
        error_flag = ENOMEM;
        return 0;
    }
    ci = (ci_t *)(netdev_priv(ndev));
    ndev->irq = irq0;

    ci->hdw_info = hi;
    ci->state = C_INIT;         /* mark as hardware not available */
    ci->next = c4_list;
    c4_list = ci;
    ci->brdno = ci->next ? ci->next->brdno + 1 : 0;

    if (CI == 0)
        CI = ci;                    /* DEBUG, only board 0 usage */

    strcpy (ci->devname, hi->devname);
    ci->release = &pmcc4_OSSI_release[0];

    /* tasklet */
#if defined(SBE_ISR_TASKLET)
    tasklet_init (&ci->ci_musycc_isr_tasklet,
                  (void (*) (unsigned long)) musycc_intr_bh_tasklet,
                  (unsigned long) ci);

    if (atomic_read (&ci->ci_musycc_isr_tasklet.count) == 0)
        tasklet_disable_nosync (&ci->ci_musycc_isr_tasklet);
#elif defined(SBE_ISR_IMMEDIATE)
    ci->ci_musycc_isr_tq.routine = (void *) (unsigned long) musycc_intr_bh_tasklet;
    ci->ci_musycc_isr_tq.data = ci;
#endif

    if (register_netdev (ndev) ||
        (c4_init (ci, (u_char *) f0, (u_char *) f1) != SBE_DRVR_SUCCESS))
    {
        OS_kfree (netdev_priv(ndev));
        OS_kfree (ndev);
        error_flag = ENODEV;
        return 0;
    }
    /*************************************************************
     *  int request_irq(unsigned int irq,
     *                  void (*handler)(int, void *, struct pt_regs *),
     *                  unsigned long flags, const char *dev_name, void *dev_id);
     *  wherein:
     *  irq      -> The interrupt number that is being requested.
     *  handler  -> Pointer to handling function being installed.
     *  flags    -> A bit mask of options related to interrupt management.
     *  dev_name -> String used in /proc/interrupts to show owner of interrupt.
     *  dev_id   -> Pointer (for shared interrupt lines) to point to its own
     *              private data area (to identify which device is interrupting).
     *
     *  extern void free_irq(unsigned int irq, void *dev_id);
     **************************************************************/

    if (request_irq (irq0, &c4_linux_interrupt,
#if defined(SBE_ISR_TASKLET)
                     IRQF_DISABLED | IRQF_SHARED,
#elif defined(SBE_ISR_IMMEDIATE)
                     IRQF_DISABLED | IRQF_SHARED,
#elif defined(SBE_ISR_INLINE)
                     IRQF_SHARED,
#endif
                     ndev->name, ndev))
    {
        pr_warning("%s: MUSYCC could not get irq: %d\n", ndev->name, irq0);
        unregister_netdev (ndev);
        OS_kfree (netdev_priv(ndev));
        OS_kfree (ndev);
        error_flag = EIO;
        return 0;
    }
#ifdef CONFIG_SBE_PMCC4_NCOMM
    if (request_irq (irq1, &c4_ebus_interrupt, IRQF_SHARED, ndev->name, ndev))
    {
        pr_warning("%s: EBUS could not get irq: %d\n", hi->devname, irq1);
        unregister_netdev (ndev);
        free_irq (irq0, ndev);
        OS_kfree (netdev_priv(ndev));
        OS_kfree (ndev);
        error_flag = EIO;
        return 0;
    }
#endif

    /* setup board identification information */

    {
        u_int32_t   tmp;

        hdw_sn_get (hi, brdno);     /* also sets PROM format type (promfmt)
                                     * for later usage */

        switch (hi->promfmt)
        {
        case PROM_FORMAT_TYPE1:
            memcpy (ndev->dev_addr, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
            memcpy (&tmp, (FLD_TYPE1 *) (hi->mfg_info.pft1.Id), 4);     /* unaligned data
                                                                         * acquisition */
            ci->brd_id = cpu_to_be32 (tmp);
            break;
        case PROM_FORMAT_TYPE2:
            memcpy (ndev->dev_addr, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
            memcpy (&tmp, (FLD_TYPE2 *) (hi->mfg_info.pft2.Id), 4);     /* unaligned data
                                                                         * acquisition */
            ci->brd_id = cpu_to_be32 (tmp);
            break;
        default:
            ci->brd_id = 0;
            memset (ndev->dev_addr, 0, 6);
            break;
        }

#if 1
        sbeid_set_hdwbid (ci);      /* requires bid to be preset */
#else
        sbeid_set_bdtype (ci);      /* requires hdw_bid to be preset */
#endif

    }

#ifdef CONFIG_PROC_FS
    sbecom_proc_brd_init (ci);
#endif
#if defined(SBE_ISR_TASKLET)
    tasklet_enable (&ci->ci_musycc_isr_tasklet);
#endif

    if ((error_flag = c4_init2 (ci)) != SBE_DRVR_SUCCESS)
    {
#ifdef CONFIG_PROC_FS
        sbecom_proc_brd_cleanup (ci);
#endif
        unregister_netdev (ndev);
        free_irq (irq1, ndev);
        free_irq (irq0, ndev);
        OS_kfree (netdev_priv(ndev));
        OS_kfree (ndev);
        return 0;                   /* failure, error_flag is set */
    }
    return ndev;
}
Example #30
0
static int rt2870_resume(
	struct usb_interface *intf)
{
	struct net_device *net_dev;
	VOID *pAd = usb_get_intfdata(intf);

#ifdef USB_SUPPORT_SELECTIVE_SUSPEND
	struct usb_device		*pUsb_Dev;
	UCHAR Flag;
	INT 		pm_usage_cnt;
#endif

#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_ANDROID_POWER)
	if ((RT_IS_EARLYSUSPEND_REGISTERED((PRTMP_ADAPTER)pAd)) && (late_resume_flag == TRUE)){
		DBGPRINT(RT_DEBUG_OFF, ("%s, We has already register earlysuspend, call VIRTUAL_IF_UP\n", __func__));
		return 0;
	}
#endif

#ifdef USB_SUPPORT_SELECTIVE_SUSPEND
//	struct usb_device		*pUsb_Dev;
//	UCHAR Flag;
//	INT 		pm_usage_cnt;

	RTMP_DRIVER_USB_DEV_GET(pAd, &pUsb_Dev);
	RTMP_DRIVER_USB_INTF_GET(pAd, &intf);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
	pm_usage_cnt = atomic_read(&intf->pm_usage_cnt);	
#else
	pm_usage_cnt = intf->pm_usage_cnt;
#endif
#if 0
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)
	if(pUsb_Dev->autosuspend_disabled == 0)
#else
	if(pUsb_Dev->auto_pm == 1)
#endif
#endif
	{
		if(pm_usage_cnt  <= 0)
			usb_autopm_get_interface(intf);

	}
	DBGPRINT(RT_DEBUG_ERROR, ("autosuspend===> rt2870_resume()\n"));
	/*RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_SUSPEND); */
	RTMP_DRIVER_ADAPTER_SUSPEND_CLEAR(pAd);

	RTMP_DRIVER_ADAPTER_CPU_SUSPEND_TEST(pAd, &Flag);
	if(Flag)
	/*if(RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_CPU_SUSPEND)) */
	{
		/*RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_CPU_SUSPEND); */
		RTMP_DRIVER_ADAPTER_CPU_SUSPEND_CLEAR(pAd);
	}

	/*RT28xxUsbAsicRadioOn(pAd); */
	RTMP_DRIVER_ADAPTER_RT28XX_USB_ASICRADIO_ON(pAd);
	DBGPRINT(RT_DEBUG_ERROR, ("autosuspend<===  rt2870_resume()\n"));
	return 0;
#endif /* USB_SUPPORT_SELECTIVE_SUSPEND */


	DBGPRINT(RT_DEBUG_TRACE, ("===> rt2870_resume()\n"));
/*	pAd->PM_FlgSuspend = 0; */
	//RTMP_DRIVER_ADAPTER_RT28XX_USB_ASICRADIO_ON(pAd);
	RTMP_DRIVER_USB_RESUME(pAd);

/*	net_dev = pAd->net_dev; */
	RTMP_DRIVER_NET_DEV_GET(pAd, &net_dev);
	netif_device_attach(net_dev);
	netif_start_queue(net_dev);
	netif_carrier_on(net_dev);
	netif_wake_queue(net_dev);

	DBGPRINT(RT_DEBUG_TRACE, ("<=== rt2870_resume()\n"));
	return 0;
}