static unsigned long noinline
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
{
	int atomic;

	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
		memcpy((void *)to, from, n);
		return 0;
	}

	/* the mmap semaphore is taken only if not in an atomic context */
	atomic = in_atomic();

	if (!atomic)
		down_read(&current->mm->mmap_sem);
	while (n) {
		pte_t *pte;
		spinlock_t *ptl;
		int tocopy;

		while (!pin_page_for_write(to, &pte, &ptl)) {
			if (!atomic)
				up_read(&current->mm->mmap_sem);
			if (__put_user(0, (char __user *)to))
				goto out;
			if (!atomic)
				down_read(&current->mm->mmap_sem);
		}

		tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
		if (tocopy > n)
			tocopy = n;

		memcpy((void *)to, from, tocopy);
		to += tocopy;
		from += tocopy;
		n -= tocopy;

		pte_unmap_unlock(pte, ptl);
	}
	if (!atomic)
		up_read(&current->mm->mmap_sem);

out:
	return n;
}
Example #2
0
void*
osl_pktget(osl_t *osh, uint len)
{
	struct sk_buff *skb;
	gfp_t flags;

	flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
	if ((skb = __dev_alloc_skb(len, flags))) {
		skb_put(skb, len);
		skb->priority = 0;


		osh->pub.pktalloced++;
	}

	return ((void*) skb);
}
Example #3
0
osl_t *
osl_attach(void *pdev, uint bustype, bool pkttag)
{
	osl_t *osh;

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
	if (!in_atomic())		
		osh = kmalloc(sizeof(osl_t), GFP_KERNEL);
	else
#endif
	osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);

	ASSERT(osh);

	bzero(osh, sizeof(osl_t));

	
	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));

	osh->magic = OS_HANDLE_MAGIC;
	osh->malloced = 0;
	osh->failed = 0;
	osh->dbgmem_list = NULL;
	osh->pdev = pdev;
	osh->pub.pkttag = pkttag;
	osh->bustype = bustype;

	switch (bustype) {
		case PCI_BUS:
		case SI_BUS:
		case PCMCIA_BUS:
			osh->pub.mmbus = TRUE;
			break;
		case JTAG_BUS:
		case SDIO_BUS:
		case USB_BUS:
		case SPI_BUS:
			osh->pub.mmbus = FALSE;
			break;
		default:
			ASSERT(FALSE);
			break;
	}

	return osh;
}
Example #4
0
static void gta02_bl_set_intensity(int intensity)
{
	struct pcf50633 *pcf = gta02_pcf;
	int old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT);

	/* We map 8-bit intensity to 6-bit intensity in hardware. */
	intensity >>= 2;

	/*
	 * This can happen during, eg, print of panic on blanked console,
	 * but we can't service i2c without interrupts active, so abort.
	 */
	if (in_atomic()) {
		printk(KERN_ERR "gta02_bl_set_intensity called while atomic\n");
		return;
	}

	old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT);
	if (intensity == old_intensity)
		return;

	/* We can't do this anywhere else. */
	pcf50633_reg_write(pcf, PCF50633_REG_LEDDIM, 5);

	if (!(pcf50633_reg_read(pcf, PCF50633_REG_LEDENA) & 3))
		old_intensity = 0;

	/*
	 * The PCF50633 cannot handle LEDOUT = 0 (datasheet p60)
	 * if seen, you have to re-enable the LED unit.
	 */
	if (!intensity || !old_intensity)
		pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 0);

	/* Illegal to set LEDOUT to 0. */
	if (!intensity)
		pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f, 2);
	else
		pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f,
					  intensity);

	if (intensity)
		pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 2);

}
Example #5
0
int usbd_fifo_add_pointer(struct fifo_descriptor *pfifodesc, const void *buf, int bufsize, int user_mode_buffer)
{
	struct fifo_entry *pfifoentry;

	if( !pfifodesc ) return -EINVAL;
	if( !buf ) return -EINVAL;

	if( !bufsize ) return -EINVAL;

	if( in_atomic() )
	{
	   	pfifoentry = kmalloc(sizeof(*pfifoentry)+bufsize-1, GFP_ATOMIC); /*TODO: this is wrong!*/
	} else
	{
	   	pfifoentry = kmalloc(sizeof(*pfifoentry)+bufsize-1, GFP_KERNEL);
	}

   	if( pfifoentry )
   	{
		if( user_mode_buffer )
		{
			if( copy_from_user(pfifoentry->data, buf, bufsize) != 0 )
			{
				TRACE("usbd_fifo_get: cannot copy to user buffer\n");
			}
		} else
		{
			memcpy(pfifoentry->data, buf, bufsize);
		}

   		pfifoentry->read = 0;
   		pfifoentry->size = bufsize;

		spin_lock(&pfifodesc->lock);
		list_add_tail(&pfifoentry->entry, &pfifodesc->entrylist);
		pfifodesc->num_entries++;
		spin_unlock(&pfifodesc->lock);

		wake_up(&pfifodesc->waitqueue);

		return 0;
   	}

   	return -ENOMEM;
}
Example #6
0
/**
 *	freeze_processes - tell processes to enter the refrigerator
 *
 *	Returns 0 on success, or the number of processes that didn't freeze,
 *	although they were told to.
 */
int freeze_processes(void)
{
	unsigned int nr_unfrozen;

	printk("Stopping tasks ... ");
	nr_unfrozen = try_to_freeze_tasks(FREEZER_USER_SPACE);
	if (nr_unfrozen)
		return nr_unfrozen;

	sys_sync();
	nr_unfrozen = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
	if (nr_unfrozen)
		return nr_unfrozen;

	printk("done.\n");
	BUG_ON(in_atomic());
	return 0;
}
Example #7
0
void *
osl_pktdup(osl_t *osh, void *skb)
{
    void * p;
    gfp_t flags;

    flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
    if ((p = skb_clone((struct sk_buff*)skb, flags)) == NULL)
        return NULL;


    if (osh->pub.pkttag)
        bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);


    osh->pub.pktalloced++;
    return (p);
}
Example #8
0
v_VOID_t * vos_mem_malloc( v_SIZE_t size )
{
   int flags = GFP_KERNEL;
#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
    v_VOID_t* pmem;
#endif
   v_VOID_t* memPtr = NULL;
   unsigned long  time_before_kmalloc;

   if (size > (1024*1024) || size == 0)
   {
       VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
            "%s: called with invalid arg %u !!", __func__, size);
       return NULL;
   }
   if (in_interrupt() || irqs_disabled() || in_atomic())
   {
      flags = GFP_ATOMIC;
   }
#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
   if(size > WCNSS_PRE_ALLOC_GET_THRESHOLD)
   {
       pmem = wcnss_prealloc_get(size);
       if(NULL != pmem) {
           memset(pmem, 0, size);
           return pmem;
       }
   }
#endif
   time_before_kmalloc = vos_timer_get_system_time();
   memPtr = kmalloc(size, flags);
   /* If time taken by kmalloc is greater than
    * VOS_GET_MEMORY_TIME_THRESHOLD msec
    */
   if (vos_timer_get_system_time() - time_before_kmalloc >=
                                    VOS_GET_MEMORY_TIME_THRESHOLD)
       VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
           "%s: kmalloc took %lu msec for size %d from %pS",
           __func__,
           vos_timer_get_system_time() - time_before_kmalloc,
           size, (void *)_RET_IP_);
   return memPtr;
}
Example #9
0
void *
osl_malloc(osl_t *osh, uint size)
{
	void *addr;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
	gfp_t flags;

	
	if (osh)
		ASSERT(osh->magic == OS_HANDLE_MAGIC);

	flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
	if ((addr = kmalloc(size, flags)) == NULL) {
#else
	if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
#endif 
		if (osh)
			osh->failed++;
		return (NULL);
	}
	if (osh)
		atomic_add(size, &osh->malloced);

	return (addr);
}

void
osl_mfree(osl_t *osh, void *addr, uint size)
{
	if (osh) {
		ASSERT(osh->magic == OS_HANDLE_MAGIC);
		atomic_sub(size, &osh->malloced);
	}
	kfree(addr);
}

uint
osl_malloced(osl_t *osh)
{
	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
	return (atomic_read(&osh->malloced));
}
void *
osl_pktdup(osl_t *osh, void *skb)
{
	void * p;
	unsigned long irqflags;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
	gfp_t flags;
#endif

	PKTCTFMAP(osh, skb);

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
	flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
	if ((p = skb_clone((struct sk_buff *)skb, flags)) == NULL)
#else
	if ((p = skb_clone((struct sk_buff*)skb, GFP_ATOMIC)) == NULL)
#endif 
		return NULL;

#ifdef CTFPOOL
	if (PKTISFAST(osh, skb)) {
		ctfpool_t *ctfpool;

		
		ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
		ASSERT(ctfpool != NULL);
		PKTCLRFAST(osh, p);
		PKTCLRFAST(osh, skb);
		ctfpool->refills++;
	}
#endif 

	
	if (osh->pub.pkttag)
		bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);

	
	spin_lock_irqsave(&osh->pktalloc_lock, irqflags);
	osh->pub.pktalloced++;
	spin_unlock_irqrestore(&osh->pktalloc_lock, irqflags);
	return (p);
}
Example #11
0
/* read a value from the MII */
int mii_read(struct net_device *dev, int phy_id, int location) 
{
    BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
    volatile EmacRegisters *emac = pDevCtrl->emac;

    emac->mdioData = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (location << MDIO_REG_SHIFT);

#if defined (CONFIG_MIPS_BCM_NDVD)
	while ( ! (emac->intStatus & EMAC_MDIO_INT) ) {
		if (!in_atomic()) {
			yield();
		}
	}
#else
    while ( ! (emac->intStatus & EMAC_MDIO_INT) )
        ;
#endif
    emac->intStatus |= EMAC_MDIO_INT;
    return emac->mdioData & 0xffff;
}
Example #12
0
File: simd.c Project: avagin/linux
static int simd_skcipher_decrypt(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct skcipher_request *subreq;
	struct crypto_skcipher *child;

	subreq = skcipher_request_ctx(req);
	*subreq = *req;

	if (!crypto_simd_usable() ||
	    (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
		child = &ctx->cryptd_tfm->base;
	else
		child = cryptd_skcipher_child(ctx->cryptd_tfm);

	skcipher_request_set_tfm(subreq, child);

	return crypto_skcipher_decrypt(subreq);
}
static int mtd_ipanic_block_write(struct mtd_ipanic_data *ctx, loff_t to, int bounce_len)
{
	int rc;
	size_t wlen;
	int panic = in_interrupt() | in_atomic();
	int index = to >> ctx->mtd->writesize_shift;

	if ((index < 0) || (index >= IPANIC_OOPS_BLOCK_COUNT)) {
		return -EINVAL;
	}

	if (bounce_len > ctx->mtd->writesize) {
		xlog_printk(ANDROID_LOG_ERROR, IPANIC_LOG_TAG, "aee-ipanic(%s) len too large\n", __func__);
		return -EINVAL;
	}
	if (panic && !ctx->mtd->_panic_write) {
		xlog_printk(ANDROID_LOG_ERROR, IPANIC_LOG_TAG, "%s: No panic_write available\n", __func__);
		return 0;
	} else if (!panic && !ctx->mtd->_write) {
		xlog_printk(ANDROID_LOG_ERROR, IPANIC_LOG_TAG, "%s: No write available\n", __func__);
		return 0;
	}

	if (bounce_len < ctx->mtd->writesize)
		memset(ctx->bounce + bounce_len, 0, ctx->mtd->writesize - bounce_len);
	ipanic_block_scramble(ctx->bounce, ctx->mtd->writesize);

	if (panic)
		rc = ctx->mtd->_panic_write(ctx->mtd, ctx->blk_offset[index], ctx->mtd->writesize, &wlen, ctx->bounce);
	else
		rc = ctx->mtd->_write(ctx->mtd, ctx->blk_offset[index], ctx->mtd->writesize, &wlen, ctx->bounce);

	if (rc) {
		xlog_printk(ANDROID_LOG_ERROR, IPANIC_LOG_TAG,
		       "%s: Error writing data to flash (%d)\n",
		       __func__, rc);
		return rc;
	}

	return wlen;
}
Example #14
0
/* write a value to the MII */
void mii_write(struct net_device *dev, int phy_id, int location, int val)
{
    BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
    volatile EmacRegisters *emac = pDevCtrl->emac;
    uint32 data;

    data = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | (location << MDIO_REG_SHIFT) | val;
    emac->mdioData = data;

#if defined (CONFIG_MIPS_BCM_NDVD)
	while ( ! (emac->intStatus & EMAC_MDIO_INT) ) {
		if (!in_atomic()) {
			yield();
		}
	}
#else
    while ( ! (emac->intStatus & EMAC_MDIO_INT) )
        ;
#endif
    emac->intStatus |= EMAC_MDIO_INT;
}
Example #15
0
void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
		    const char *modname, const char *fmt, ...)
{
	if (unlikely((comp & rtlpriv->dbg.global_debugcomponents) &&
		     (level <= rtlpriv->dbg.global_debuglevel))) {
		struct va_format vaf;
		va_list args;

		va_start(args, fmt);

		vaf.fmt = fmt;
		vaf.va = &args;

		printk(KERN_DEBUG "%s:%ps:<%lx-%x> %pV",
		       modname, __builtin_return_address(0),
		       in_interrupt(), in_atomic(),
		       &vaf);

		va_end(args);
	}
}
/*************I2C functions*******************/
static int ds2482_get_i2c_bus(struct i2c_client *client)
{
    struct i2c_adapter *adap = client->adapter;
    int ret;

    if (adap->algo->master_xfer) {
        if (in_atomic() || irqs_disabled()) {
            ret = rt_mutex_trylock(&adap->bus_lock);
            if (!ret)
                /* I2C activity is ongoing. */
                return -EAGAIN;
        } else {
            rt_mutex_lock(&adap->bus_lock);
        }

        return 0;
    } else {
        dev_err(&client->dev, "I2C level transfers not supported\n");
        return -EOPNOTSUPP;
    }
}
Example #17
0
void *
osl_pktdup(osl_t *osh, void *skb)
{
	void * p;

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
	if (!in_atomic())		
		p = skb_clone((struct sk_buff*)skb, GFP_KERNEL);
	else
#endif
	p = skb_clone((struct sk_buff*)skb, GFP_ATOMIC);
	if (!p)
		return p;

	
	if (osh->pub.pkttag)
		bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);

	
	osh->pub.pktalloced++;
	return (p);
}
/*
 * Copy as much as we can into the page and return the number of bytes which
 * were sucessfully copied.  If a fault is encountered then return the number of
 * bytes which were copied.
 */
static size_t ii_iovec_copy_to_user_atomic(struct page *page,
		struct iov_iter *i, unsigned long offset, size_t bytes)
{
	struct iovec *iov = (struct iovec *)i->data;
	char *kaddr;
	size_t copied;

	BUG_ON(!in_atomic());
	kaddr = kmap_atomic(page);
	if (likely(i->nr_segs == 1)) {
		int left;
		char __user *buf = iov->iov_base + i->iov_offset;
		left = __copy_to_user_inatomic(buf, kaddr + offset, bytes);
		copied = bytes - left;
	} else {
		copied = __iovec_copy_to_user(kaddr + offset, iov,
					      i->iov_offset, bytes, 1);
	}
	kunmap_atomic(kaddr);

	return copied;
}
Example #19
0
/*
 * This API is to be used for asynchronous vendor events. This
 * shouldn't be used in response to a vendor command from its
 * do_it handler context (instead wl_cfgvendor_send_cmd_reply should
 * be used).
 */
int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
	struct net_device *dev, int event_id, const void  *data, int len)
{
	u16 kflags;
	struct sk_buff *skb;

	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;

	/* Alloc the SKB for vendor_event */
	skb = cfg80211_vendor_event_alloc(wiphy, len, event_id, kflags);
	if (!skb) {
		WL_ERR(("skb alloc failed"));
		return -ENOMEM;
	}

	/* Push the data to the skb */
	nla_put_nohdr(skb, len, data);

	cfg80211_vendor_event(skb, kflags);

	return 0;
}
Example #20
0
/*
 * This API is to be used for asynchronous vendor events. This
 * shouldn't be used in response to a vendor command from its
 * do_it handler context (instead rtw_cfgvendor_send_cmd_reply should
 * be used).
 */
int rtw_cfgvendor_send_async_event(struct wiphy *wiphy,
	struct net_device *dev, int event_id, const void  *data, int len)
{
	u16 kflags;
	struct sk_buff *skb;

	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;

	/* Alloc the SKB for vendor_event */
	skb = rtw_cfg80211_vendor_event_alloc(wiphy, len, event_id, kflags);
	if (!skb) {
		DBG_871X_LEVEL(_drv_err_, FUNC_NDEV_FMT" skb alloc failed", FUNC_NDEV_ARG(dev));
		return -ENOMEM;
	}

	/* Push the data to the skb */
	nla_put_nohdr(skb, len, data);

	rtw_cfg80211_vendor_event(skb, kflags);

	return 0;
}
/**
 * nl_srv_bcast() - wrapper function to do broadcast tx through cnss_logger
 * @skb:	the socket buffer to send
 *
 * The cnss_logger_nl_bcast() is used to transmit the socket buffer.
 *
 * Return: status of transmission
 */
int nl_srv_bcast(struct sk_buff *skb)
{
	int err = -EINVAL;
	int flags = GFP_KERNEL;

	if (in_interrupt() || irqs_disabled() || in_atomic())
		flags = GFP_ATOMIC;

	/* sender's pid */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
	NETLINK_CB(skb).pid = 0;
#else
	NETLINK_CB(skb).portid = 0;
#endif
	 /* destination group */
	NETLINK_CB(skb).dst_group = WLAN_NLINK_MCAST_GRP_ID;

	if (nl_srv_is_initialized() == 0)
		err = cnss_logger_nl_bcast(skb, WLAN_NLINK_MCAST_GRP_ID, flags);
	else
		dev_kfree_skb(skb);
	return err;
}
/* 
 * Allocate BUF Rx packets.
 * Add 16 bytes before the data buffer for WSPI overhead!
 */
void* RxBufAlloc(TI_HANDLE hOs, TI_UINT32 len,PacketClassTag_e ePacketClassTag)
{
    TI_UINT32      alloc_len = len + WSPI_PAD_BYTES + PAYLOAD_ALIGN_PAD_BYTES + RX_HEAD_LEN_ALIGNED;
	struct sk_buff *skb;
	rx_head_t *rx_head;
	gfp_t flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;

	skb = alloc_skb(alloc_len, flags);
	if (!skb)
	{
        return NULL;
    }
	rx_head  = (rx_head_t *)skb->head;

	rx_head->skb = skb;
	skb_reserve(skb, RX_HEAD_LEN_ALIGNED+WSPI_PAD_BYTES);
/*
	printk("-->> RxBufAlloc(len=%d)  skb=0x%x skb->data=0x%x skb->head=0x%x skb->len=%d\n",
		   (int)len, (int)skb, (int)skb->data, (int)skb->head, (int)skb->len);
*/
	return skb->data;
    
}
Example #23
0
/** 
 * \<\<public\>\> Flushes all pending methods in the queue submits a method for execution. 
 * This operation must be atomic - under a queue lock, all methods are
 * removed and a new wrapper is added to the method queue.
 *
 * @param *self - pointer to this task instance
 * @param *method - pointer to the method that is to be submitted
 * @param *data - argument for the submitted method
 * @param size - size of the argument in bytes
 */
void tcmi_task_flush_and_submit_method(struct tcmi_task *self, tcmi_method_t *method, 
				       void *data, int size)
{
	struct tcmi_method_wrapper *wr, *old_w;
	if (!(wr = tcmi_method_wrapper_new(method, data, size))) {
		mdbg(ERR3, "Can't create method wrapper");
		goto exit0;
	}

	tcmi_queue_lock(&self->method_queue);
	/* flush everything in the queue. */
	while (!tcmi_queue_empty(&self->method_queue)) {
		__tcmi_queue_remove_entry(&self->method_queue, old_w, node);
		tcmi_method_wrapper_put(old_w);
	}
	/* append the requested method into the queue. */
	__tcmi_queue_add(&self->method_queue, &wr->node);

	tcmi_queue_unlock(&self->method_queue);
	mdbg(INFO3, "Flushed method queue and submitted new method in atomic=%d", in_atomic());
 exit0:
	return;
}
Example #24
0
/*Write function in ISP memory management*/
int hmm_store(void *virt, const void *data, unsigned int bytes)
{
	unsigned int ptr;
	struct hmm_buffer_object *bo;
	unsigned int idx, offset, len;
	char *src, *des;
	int ret;

	ptr = (unsigned int)virt;

	bo = hmm_bo_device_search_in_range(&bo_device, ptr);
	ret = hmm_check_bo(bo, ptr);
	if (ret)
		return ret;

	src = (char *)data;
	while (bytes) {
		idx = (ptr - bo->vm_node->start) >> PAGE_SHIFT;
		offset = (ptr - bo->vm_node->start) - (idx << PAGE_SHIFT);

		if (in_atomic())
			des = (char *)kmap_atomic(bo->page_obj[idx].page);
		else
			des = (char *)kmap(bo->page_obj[idx].page);

		if (!des) {
			v4l2_err(&atomisp_dev,
				    "kmap buffer object page failed: "
				    "pg_idx = %d\n", idx);
			return -EINVAL;
		}

		des += offset;

		if ((bytes + offset) >= PAGE_SIZE) {
			len = PAGE_SIZE - offset;
			bytes -= len;
		} else {
			len = bytes;
			bytes = 0;
		}

		ptr += len;

#ifdef USE_SSSE3
		_ssse3_memcpy(des, src, len);
#else
		memcpy(des, src, len);
#endif
		src += len;

		if (in_atomic())
			/*
			 * Note: kunmap_atomic requires return addr from
			 * kmap_atomic, not the page. See linux/highmem.h
			 */
			kunmap_atomic(des - offset);
		else
			kunmap(bo->page_obj[idx].page);
	}

	return 0;
}
Example #25
0
/* 0 = success, else # of processes that we failed to stop */
int freeze_processes(void)
{
	int todo, nr_user, user_frozen;
	unsigned long start_time;
	struct task_struct *g, *p;

	printk( "Stopping tasks: " );
	start_time = jiffies;
	user_frozen = 0;
	do {
		nr_user = todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (!freezeable(p))
				continue;
			if (frozen(p))
				continue;
			if (p->state == TASK_TRACED && frozen(p->parent)) {
				cancel_freezing(p);
				continue;
			}
			if (p->mm && !(p->flags & PF_BORROWED_MM)) {
				/* The task is a user-space one.
				 * Freeze it unless there's a vfork completion
				 * pending
				 */
				if (!task_aux(p)->vfork_done)
					freeze_process(p);
				nr_user++;
			} else {
				/* Freeze only if the user space is frozen */
				if (user_frozen)
					freeze_process(p);
				todo++;
			}
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
		todo += nr_user;
		if (!user_frozen && !nr_user) {
			sys_sync();
			start_time = jiffies;
		}
		user_frozen = !nr_user;
		yield();			/* Yield is okay here */
		if (todo && time_after(jiffies, start_time + TIMEOUT))
			break;
	} while(todo);

	/* This does not unfreeze processes that are already frozen
	 * (we have slightly ugly calling convention in that respect,
	 * and caller must call thaw_processes() if something fails),
	 * but it cleans up leftover PF_FREEZE requests.
	 */
	if (todo) {
		printk( "\n" );
		printk(KERN_ERR " stopping tasks timed out "
			"after %d seconds (%d tasks remaining):\n",
			TIMEOUT / HZ, todo);
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (freezeable(p) && !frozen(p))
				printk(KERN_ERR "  %s\n", p->comm);
			cancel_freezing(p);
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
		return todo;
	}

	printk( "|\n" );
	BUG_ON(in_atomic());
	return 0;
}
Example #26
0
/*
 * Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by
 * segv().
 */
int handle_page_fault(unsigned long address, unsigned long ip,
                      int is_write, int is_user, int *code_out)
{
    struct mm_struct *mm = current->mm;
    struct vm_area_struct *vma;
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    int err = -EFAULT;

    *code_out = SEGV_MAPERR;

    /*
     * If the fault was during atomic operation, don't take the fault, just
     * fail.
     */
    if (in_atomic())
        goto out_nosemaphore;

    down_read(&mm->mmap_sem);
    vma = find_vma(mm, address);
    if (!vma)
        goto out;
    else if (vma->vm_start <= address)
        goto good_area;
    else if (!(vma->vm_flags & VM_GROWSDOWN))
        goto out;
    else if (is_user && !ARCH_IS_STACKGROW(address))
        goto out;
    else if (expand_stack(vma, address))
        goto out;

good_area:
    *code_out = SEGV_ACCERR;
    if (is_write && !(vma->vm_flags & VM_WRITE))
        goto out;

    /* Don't require VM_READ|VM_EXEC for write faults! */
    if (!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC)))
        goto out;

    do {
        int fault;
survive:
        fault = handle_mm_fault(mm, vma, address, is_write);
        if (unlikely(fault & VM_FAULT_ERROR)) {
            if (fault & VM_FAULT_OOM) {
                err = -ENOMEM;
                goto out_of_memory;
            } else if (fault & VM_FAULT_SIGBUS) {
                err = -EACCES;
                goto out;
            }
            BUG();
        }
        if (fault & VM_FAULT_MAJOR)
            current->maj_flt++;
        else
            current->min_flt++;

        pgd = pgd_offset(mm, address);
        pud = pud_offset(pgd, address);
        pmd = pmd_offset(pud, address);
        pte = pte_offset_kernel(pmd, address);
    } while (!pte_present(*pte));
    err = 0;
    /*
     * The below warning was added in place of
     *	pte_mkyoung(); if (is_write) pte_mkdirty();
     * If it's triggered, we'd see normally a hang here (a clean pte is
     * marked read-only to emulate the dirty bit).
     * However, the generic code can mark a PTE writable but clean on a
     * concurrent read fault, triggering this harmlessly. So comment it out.
     */
#if 0
    WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte)));
#endif
    flush_tlb_page(vma, address);
out:
    up_read(&mm->mmap_sem);
out_nosemaphore:
    return err;

    /*
     * We ran out of memory, or some other thing happened to us that made
     * us unable to handle the page fault gracefully.
     */
out_of_memory:
    if (is_global_init(current)) {
        up_read(&mm->mmap_sem);
        yield();
        down_read(&mm->mmap_sem);
        goto survive;
    }
    goto out;
}
/* rk30_i2c_doxfer
 *
 * this starts an i2c transfer
*/
static int rk30_i2c_doxfer(struct rk30_i2c *i2c,
                           struct i2c_msg *msgs, int num)
{
    unsigned long timeout, flags;
    int error = 0;
    /* 32 -- max transfer bytes
     * 2 -- addr bytes * 2
     * 3 -- max reg addr bytes
     * 9 -- cycles per bytes
     * max cycles: (32 + 2 + 3) * 9 --> 400 cycles
     */
    int msleep_time = 400 * 1000/ i2c->scl_rate; // ms

    if (i2c->suspended) {
        dev_err(i2c->dev, "i2c is suspended\n");
        return -EIO;
    }

    spin_lock_irqsave(&i2c->lock, flags);
    if(rk30_i2c_set_master(i2c, msgs, num) < 0) {
        spin_unlock_irqrestore(&i2c->lock, flags);
        dev_err(i2c->dev, "addr[0x%02x] set master error\n", msgs[0].addr);
        return -EIO;
    }
    i2c->addr = msgs[0].addr;
    i2c->msg_ptr = 0;
    i2c->error = 0;
    i2c->is_busy = 1;
    i2c->state = STATE_START;
    i2c->complete_what = 0;
    i2c_writel(I2C_STARTIEN, i2c->regs + I2C_IEN);
    spin_unlock_irqrestore(&i2c->lock, flags);

    rk30_i2c_enable(i2c, (i2c->count > 32)?0:1); //if count > 32,  byte(32) send ack

    if (in_atomic()) {
        int tmo = I2C_WAIT_TIMEOUT * USEC_PER_MSEC;
        while(tmo-- && i2c->is_busy != 0)
            udelay(1);
        timeout = (tmo <= 0)?0:1;
    } else
        timeout = wait_event_timeout(i2c->wait, (i2c->is_busy == 0), msecs_to_jiffies(I2C_WAIT_TIMEOUT));

    spin_lock_irqsave(&i2c->lock, flags);
    i2c->state = STATE_IDLE;
    error = i2c->error;
    spin_unlock_irqrestore(&i2c->lock, flags);

    if (timeout == 0) {
        if(error < 0)
            i2c_dbg(i2c->dev, "error = %d\n", error);
        else if((i2c->complete_what !=COMPLETE_READ  && i2c->complete_what != COMPLETE_WRITE)) {
            dev_err(i2c->dev, "Addr[0x%02x] wait event timeout, state: %d, is_busy: %d, error: %d, complete_what: 0x%x, ipd: 0x%x\n",
                    msgs[0].addr, i2c->state, i2c->is_busy, error, i2c->complete_what, i2c_readl(i2c->regs + I2C_IPD));
            //rk30_show_regs(i2c);
            error = -ETIMEDOUT;
            msleep(msleep_time);
            rk30_i2c_send_stop(i2c);
            msleep(1);
        }
        else
            i2c_dbg(i2c->dev, "Addr[0x%02x] wait event timeout, but transfer complete\n", i2c->addr);
    }
    i2c_writel(I2C_IPD_ALL_CLEAN, i2c->regs + I2C_IPD);
    rk30_i2c_disable_irq(i2c);
    rk30_i2c_disable(i2c);

    if(error == -EAGAIN)
        i2c_dbg(i2c->dev, "No ack(complete_what: 0x%x), Maybe slave(addr: 0x%02x) not exist or abnormal power-on\n",
                i2c->complete_what, i2c->addr);
    return error;
}
static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
				   struct pt_regs *regs)
{
	struct task_struct *tsk;
	struct mm_struct *mm;
	int fault, sig, code;
	unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
	unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

	tsk = current;
	mm  = tsk->mm;

	/* Enable interrupts if they were enabled in the parent context. */
	if (interrupts_enabled(regs))
		local_irq_enable();

	/*
	 * If we're in an interrupt or have no user context, we must not take
	 * the fault.
	 */
	if (in_atomic() || !mm)
		goto no_context;

	if (user_mode(regs))
		mm_flags |= FAULT_FLAG_USER;

	if (esr & ESR_LNX_EXEC) {
		vm_flags = VM_EXEC;
	} else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) {
		vm_flags = VM_WRITE;
		mm_flags |= FAULT_FLAG_WRITE;
	}

	/*
	 * As per x86, we may deadlock here. However, since the kernel only
	 * validly references user space from well defined areas of the code,
	 * we can bug out early if this is from code which shouldn't.
	 */
	if (!down_read_trylock(&mm->mmap_sem)) {
		if (!user_mode(regs) && !search_exception_tables(regs->pc))
			goto no_context;
retry:
		down_read(&mm->mmap_sem);
	} else {
		/*
		 * The above down_read_trylock() might have succeeded in which
		 * case, we'll have missed the might_sleep() from down_read().
		 */
		might_sleep();
#ifdef CONFIG_DEBUG_VM
		if (!user_mode(regs) && !search_exception_tables(regs->pc))
			goto no_context;
#endif
	}

	fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);

	/*
	 * If we need to retry but a fatal signal is pending, handle the
	 * signal first. We do not need to release the mmap_sem because it
	 * would already be released in __lock_page_or_retry in mm/filemap.c.
	 */
	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
		return 0;

	/*
	 * Major/minor page fault accounting is only done on the initial
	 * attempt. If we go through a retry, it is extremely likely that the
	 * page will be found in page cache at that point.
	 */

	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
	if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR) {
			tsk->maj_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
				      addr);
		} else {
			tsk->min_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
				      addr);
		}
		if (fault & VM_FAULT_RETRY) {
			/*
			 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
			 * starvation.
			 */
			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
			mm_flags |= FAULT_FLAG_TRIED;
			goto retry;
		}
	}

	up_read(&mm->mmap_sem);

	/*
	 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
	 */
	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
			      VM_FAULT_BADACCESS))))
		return 0;

	/*
	 * If we are in kernel mode at this point, we have no context to
	 * handle this fault with.
	 */
	if (!user_mode(regs))
		goto no_context;

	if (fault & VM_FAULT_OOM) {
		/*
		 * We ran out of memory, call the OOM killer, and return to
		 * userspace (which will retry the fault, or kill us if we got
		 * oom-killed).
		 */
		pagefault_out_of_memory();
		return 0;
	}

	if (fault & VM_FAULT_SIGBUS) {
		/*
		 * We had some memory, but were unable to successfully fix up
		 * this page fault.
		 */
		sig = SIGBUS;
		code = BUS_ADRERR;
	} else {
		/*
		 * Something tried to access memory that isn't in our memory
		 * map.
		 */
		sig = SIGSEGV;
		code = fault == VM_FAULT_BADACCESS ?
			SEGV_ACCERR : SEGV_MAPERR;
	}

	__do_user_fault(tsk, addr, esr, sig, code, regs);
	return 0;

no_context:
	__do_kernel_fault(mm, addr, esr, regs);
	return 0;
}
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 *
 * interruption code (int_code):
 *   04       Protection           ->  Write-Protection  (suprression)
 *   10       Segment translation  ->  Not present       (nullification)
 *   11       Page translation     ->  Not present       (nullification)
 *   3b       Region third trans.  ->  Not present       (nullification)
 */
static inline int do_exception(struct pt_regs *regs, int access)
{
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	unsigned long trans_exc_code;
	unsigned long address;
	unsigned int flags;
	int fault;

	if (notify_page_fault(regs))
		return 0;

	tsk = current;
	mm = tsk->mm;
	trans_exc_code = regs->int_parm_long;

	/*
	 * Verify that the fault happened in user space, that
	 * we are not in an interrupt and that there is a 
	 * user context.
	 */
	fault = VM_FAULT_BADCONTEXT;
	if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
		goto out;

	address = trans_exc_code & __FAIL_ADDR_MASK;
	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
	flags = FAULT_FLAG_ALLOW_RETRY;
	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
		flags |= FAULT_FLAG_WRITE;
	down_read(&mm->mmap_sem);

#ifdef CONFIG_PGSTE
	if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
		address = __gmap_fault(address,
				     (struct gmap *) S390_lowcore.gmap);
		if (address == -EFAULT) {
			fault = VM_FAULT_BADMAP;
			goto out_up;
		}
		if (address == -ENOMEM) {
			fault = VM_FAULT_OOM;
			goto out_up;
		}
	}
#endif

retry:
	fault = VM_FAULT_BADMAP;
	vma = find_vma(mm, address);
	if (!vma)
		goto out_up;

	if (unlikely(vma->vm_start > address)) {
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out_up;
		if (expand_stack(vma, address))
			goto out_up;
	}

	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it..
	 */
	fault = VM_FAULT_BADACCESS;
	if (unlikely(!(vma->vm_flags & access)))
		goto out_up;

	if (is_vm_hugetlb_page(vma))
		address &= HPAGE_MASK;
	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
	fault = handle_mm_fault(mm, vma, address, flags);
	if (unlikely(fault & VM_FAULT_ERROR))
		goto out_up;

	/*
	 * Major/minor page fault accounting is only done on the
	 * initial attempt. If we go through a retry, it is extremely
	 * likely that the page will be found in page cache at that point.
	 */
	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR) {
			tsk->maj_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
				      regs, address);
		} else {
			tsk->min_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
				      regs, address);
		}
		if (fault & VM_FAULT_RETRY) {
			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
			 * of starvation. */
			flags &= ~FAULT_FLAG_ALLOW_RETRY;
			flags |= FAULT_FLAG_TRIED;
			down_read(&mm->mmap_sem);
			goto retry;
		}
	}
	/*
	 * The instruction that caused the program check will
	 * be repeated. Don't signal single step via SIGTRAP.
	 */
	clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
	fault = 0;
out_up:
	up_read(&mm->mmap_sem);
out:
	return fault;
}
Example #30
0
int
wl_cfgnan_transmit_handler(struct net_device *ndev,
	struct bcm_cfg80211 *cfg, char *cmd, nan_cmd_data_t *cmd_data)
{
	wl_nan_ioc_t *nanioc = NULL;
	void *pxtlv;
	s32 ret = BCME_OK;
	u16 start, end;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
	uint16 nanioc_size = sizeof(wl_nan_ioc_t) + NAN_IOCTL_BUF_SIZE;

	/*
	 * proceed only if mandatory arguments are present - subscriber id,
	 * publisher id, mac address
	 */
	if ((!cmd_data->sub_id) || (!cmd_data->pub_id) ||
		ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
		WL_ERR((" mandatory arguments are not present \n"));
		return -EINVAL;
	}

	nanioc = kzalloc(nanioc_size, kflags);
	if (!nanioc) {
		WL_ERR((" memory allocation failed \n"));
		return -ENOMEM;
	}

	/*
	 * command to test
	 *
	 * wl: wl nan trasnmit <sub_id> <pub_id> <mac_addr> -info <hex_string>
	 *
	 * wpa_cli: DRIVER NAN_TRANSMIT SUB_ID=<sub_id> PUB_ID=<pub_id>
	 *          MAC_ADDR=<mac_addr> SVC_INFO=<hex_string>
	 */

	/* nan transmit */
	start = end = NAN_IOCTL_BUF_SIZE;
	nanioc->version = htod16(WL_NAN_IOCTL_VERSION);
	nanioc->id = htod16(WL_NAN_CMD_TRANSMIT);
	pxtlv = nanioc->data;

	ret = bcm_pack_xtlv_entry(&pxtlv, &end, WL_NAN_XTLV_INSTANCE_ID,
		sizeof(wl_nan_instance_id_t), &cmd_data->sub_id);
	if (unlikely(ret)) {
		goto fail;
	}
	ret = bcm_pack_xtlv_entry(&pxtlv, &end, WL_NAN_XTLV_REQUESTOR_ID,
		sizeof(wl_nan_instance_id_t), &cmd_data->pub_id);
	if (unlikely(ret)) {
		goto fail;
	}
	ret = bcm_pack_xtlv_entry(&pxtlv, &end, WL_NAN_XTLV_MAC_ADDR,
		ETHER_ADDR_LEN, &cmd_data->mac_addr.octet);
	if (unlikely(ret)) {
		goto fail;
	}
	if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
		WL_DBG((" optional svc_info present, pack it \n"));
		ret = bcm_pack_xtlv_entry_from_hex_string(&pxtlv,
			&end, WL_NAN_XTLV_SVC_INFO, cmd_data->svc_info.data);
		if (unlikely(ret)) {
			goto fail;
		}
	}

	nanioc->len = start - end;
	nanioc_size = sizeof(wl_nan_ioc_t) + nanioc->len;
	ret = wldev_iovar_setbuf(ndev, "nan", nanioc, nanioc_size,
		cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
	if (unlikely(ret)) {
		WL_ERR((" nan transmit failed, error = %d \n", ret));
		goto fail;
	} else {
		WL_DBG((" nan transmit successful \n"));
	}

fail:
	if (nanioc) {
		kfree(nanioc);
	}

	return ret;
}