コード例 #1
0
/**
 * __cb_qd_chain - queue done callback for case DTC_CHAIN_MODE
 * @dma_hdl:	dma handle
 * @parg:	args registerd with cb function
 * @cause:	case for this cb, DMA_CB_OK means data transfer OK,
 * 		DMA_CB_ABORT means stopped before transfer complete
 *
 * Returns 0 if sucess, the err line number if failed.
 */
u32 __cb_qd_chain(dm_hdl_t dma_hdl, void *parg, enum dma_cb_cause_e cause)
{
	u32 	uret = 0;
	u32	ucur_saddr = 0, ucur_daddr = 0;
	u32	uloop_cnt = DTC_TOTAL_LEN / DTC_ONE_LEN;
	u32 	ucur_cnt = 0;

	pr_info("%s: called!\n", __func__);
	switch(cause) {
	case DMA_CB_OK:
		pr_info("%s: DMA_CB_OK!\n", __func__);
		/* enqueue if not done */
		ucur_cnt = atomic_add_return(1, &g_acur_cnt);
		if(ucur_cnt < uloop_cnt) {
			printk("%s, line %d\n", __func__, __LINE__);
			/* NOTE: fatal err, when read here, g_acur_cnt has changed by other place, 2012-12-2 */
			//ucur_saddr = g_src_addr + atomic_read(&g_acur_cnt) * DTC_ONE_LEN;
			ucur_saddr = g_src_addr + ucur_cnt * DTC_ONE_LEN;
			ucur_daddr = g_dst_addr + ucur_cnt * DTC_ONE_LEN;
			if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_QD))
				printk("%s err, line %d\n", __func__, __LINE__);
#if 0
		/*
		 * we have complete enqueueing, but not means it's the last qd irq,
		 * in test, we found sometimes never meet if(ucur_cnt == uloop_cnt...
		 * that is, enqueue complete during hd/fd callback.
		 */
		} else if(ucur_cnt == uloop_cnt){
			printk("%s, line %d\n", __func__, __LINE__);
			sw_dma_dump_chan(dma_hdl); /* for debug */

			/* maybe it's the last irq; or next will be the last irq, need think about */
			atomic_set(&g_adma_done, 1);
			wake_up_interruptible(&g_dtc_queue[DTC_CHAIN_MODE]);
#endif
		} else {
			printk("%s, line %d\n", __func__, __LINE__);
			sw_dma_dump_chan(dma_hdl); /* for debug */

			/* maybe it's the last irq */
			atomic_set(&g_adma_done, 1);
			wake_up_interruptible(&g_dtc_queue[DTC_CHAIN_MODE]);
		}
		break;
	case DMA_CB_ABORT:
		pr_info("%s: DMA_CB_ABORT!\n", __func__);
		break;
	default:
		uret = __LINE__;
		goto end;
	}

end:
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret);
	return uret;
}
コード例 #2
0
/**
 * of_device_make_bus_id - Use the device node data to assign a unique name
 * @dev: pointer to device structure that is linked to a device tree node
 *
 * This routine will first try using either the dcr-reg or the reg property
 * value to derive a unique name.  As a last resort it will use the node
 * name followed by a unique number.
 */
void of_device_make_bus_id(struct device *dev)
{
	static atomic_t bus_no_reg_magic;
	struct device_node *node = dev->of_node;
	const __be32 *reg;
	u64 addr;
	const __be32 *addrp;
	int magic;

#ifdef CONFIG_PPC_DCR
	/*
	 * If it's a DCR based device, use 'd' for native DCRs
	 * and 'D' for MMIO DCRs.
	 */
	reg = of_get_property(node, "dcr-reg", NULL);
	if (reg) {
#ifdef CONFIG_PPC_DCR_NATIVE
		dev_set_name(dev, "d%x.%s", *reg, node->name);
#else /* CONFIG_PPC_DCR_NATIVE */
		u64 addr = of_translate_dcr_address(node, *reg, NULL);
		if (addr != OF_BAD_ADDR) {
			dev_set_name(dev, "D%llx.%s",
				     (unsigned long long)addr, node->name);
			return;
		}
#endif /* !CONFIG_PPC_DCR_NATIVE */
	}
#endif /* CONFIG_PPC_DCR */

	/*
	 * For MMIO, get the physical address
	 */
	reg = of_get_property(node, "reg", NULL);
	if (reg) {
		if (of_can_translate_address(node)) {
			addr = of_translate_address(node, reg);
		} else {
			addrp = of_get_address(node, 0, NULL, NULL);
			if (addrp)
				addr = of_read_number(addrp, 1);
			else
				addr = OF_BAD_ADDR;
		}
		if (addr != OF_BAD_ADDR) {
			dev_set_name(dev, "%llx.%s",
				     (unsigned long long)addr, node->name);
			return;
		}
	}

	/*
	 * No BusID, use the node name and add a globally incremented
	 * counter (and pray...)
	 */
	magic = atomic_add_return(1, &bus_no_reg_magic);
	dev_set_name(dev, "%s.%d", node->name, magic - 1);
}
コード例 #3
0
ファイル: atomic.c プロジェクト: lcnight/lcnight
int main(int argc, char *argv[])
{
    //atomic_t a = 123; # err initilizer
    atomic_t a;
    atomic_set(&a, 123);
    atomic_inc(&a);
    printf("%d\n", atomic_add_return(1, &a));
    printf("%d\n", atomic_read(&a));
    return 0;
}/* -- end of main  -- */
コード例 #4
0
void jump_label_inc(struct jump_label_key *key)
{
	if (atomic_inc_not_zero(&key->enabled))
		return;

	jump_label_lock();
	if (atomic_add_return(1, &key->enabled) == 1)
		jump_label_update(key, JUMP_LABEL_ENABLE);
	jump_label_unlock();
}
コード例 #5
0
void q6audio_dsp_not_responding(void)
{
	if (cb_ptr)
		cb_ptr(DSP_STATE_CRASHED);
	if (atomic_add_return(1, &dsp_crash_count) != 1) {
		pr_err("q6audio_dsp_not_responding() \
			- parking additional crasher...\n");
		for (;;)
			msleep(1000);
	}
コード例 #6
0
ファイル: netlink.c プロジェクト: 020gzh/linux
void quota_send_warning(struct kqid qid, dev_t dev,
			const char warntype)
{
	static atomic_t seq;
	struct sk_buff *skb;
	void *msg_head;
	int ret;
	int msg_size = 4 * nla_total_size(sizeof(u32)) +
		       2 * nla_total_size(sizeof(u64));

	/* We have to allocate using GFP_NOFS as we are called from a
	 * filesystem performing write and thus further recursion into
	 * the fs to free some data could cause deadlocks. */
	skb = genlmsg_new(msg_size, GFP_NOFS);
	if (!skb) {
		printk(KERN_ERR
		  "VFS: Not enough memory to send quota warning.\n");
		return;
	}
	msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
			&quota_genl_family, 0, QUOTA_NL_C_WARNING);
	if (!msg_head) {
		printk(KERN_ERR
		  "VFS: Cannot store netlink header in quota warning.\n");
		goto err_out;
	}
	ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type);
	if (ret)
		goto attr_err_out;
	ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID,
			  from_kqid_munged(&init_user_ns, qid));
	if (ret)
		goto attr_err_out;
	ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
	if (ret)
		goto attr_err_out;
	ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
	if (ret)
		goto attr_err_out;
	ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
	if (ret)
		goto attr_err_out;
	ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID,
			  from_kuid_munged(&init_user_ns, current_uid()));
	if (ret)
		goto attr_err_out;
	genlmsg_end(skb, msg_head);

	genlmsg_multicast(&quota_genl_family, skb, 0, 0, GFP_NOFS);
	return;
attr_err_out:
	printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
err_out:
	kfree_skb(skb);
}
コード例 #7
0
static void flounder_enable_shared_gpios(void)
{
	if (1 == atomic_add_return(1, &shared_gpios_refcnt)) {
		gpio_set_value(CAM_VCM2V85_EN, 1);
		usleep_range(100, 120);
		gpio_set_value(CAM_1V2_EN, 1);
		gpio_set_value(CAM_A2V85_EN, 1);
		gpio_set_value(CAM_1V8_EN, 1);
		pr_debug("%s\n", __func__);
	}
}
コード例 #8
0
void __down_write_failed(int count, struct rw_semaphore *sem)
{
	do {
		if (count < 0 && count > -RW_LOCK_BIAS) {
			down_write_failed_biased(sem);
			break;
		}
		down_write_failed(sem);
		count = atomic_add_return(-RW_LOCK_BIAS, &sem->count);
	} while (count != 0);
}
コード例 #9
0
GED_ERROR ged_monitor_3D_fence_add(int fence_fd)
{
    int err;
    GED_MONITOR_3D_FENCE* psMonitor = (GED_MONITOR_3D_FENCE*)ged_alloc(sizeof(GED_MONITOR_3D_FENCE));

#ifdef GED_DEBUG_MONITOR_3D_FENCE
    ged_log_buf_print(ghLogBuf_GED, "[+]ged_monitor_3D_fence_add");
#endif

    if (!psMonitor)
    {
        return GED_ERROR_OOM;
    }

    sync_fence_waiter_init(&psMonitor->sSyncWaiter, ged_sync_cb);
    INIT_WORK(&psMonitor->sWork, ged_monitor_3D_fence_work_cb);
    psMonitor->psSyncFence = sync_fence_fdget(fence_fd);
    if (NULL == psMonitor->psSyncFence)
    {
        ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE));
        return GED_ERROR_INVALID_PARAMS;
    }

#ifdef GED_DEBUG_MONITOR_3D_FENCE
    ged_log_buf_print(ghLogBuf_GED, "[+]sync_fence_wait_async");
#endif

    err = sync_fence_wait_async(psMonitor->psSyncFence, &psMonitor->sSyncWaiter);

#ifdef GED_DEBUG_MONITOR_3D_FENCE
    ged_log_buf_print(ghLogBuf_GED, "[-]sync_fence_wait_async, err = %d", err);
#endif

    if ((1 == err) || (0 > err))
    {
        sync_fence_put(psMonitor->psSyncFence);
        ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE));
    }
    else if (0 == err)
    {
        int iCount = atomic_add_return (1, &g_i32Count);
        if (iCount > 1)
        {
            //mtk_set_bottom_gpu_freq(iCount + 1);
            mtk_set_bottom_gpu_freq(4);
        }
    }

#ifdef GED_DEBUG_MONITOR_3D_FENCE
    ged_log_buf_print(ghLogBuf_GED, "[-]ged_monitor_3D_fence_add, count = %d", atomic_read(&g_i32Count));
#endif

    return GED_OK;
}
コード例 #10
0
ファイル: atomic_add_return.c プロジェクト: fjrti/snippets
int __init atomic_add_return_init(void) 
{	 
	int ret, i;
	atomic_set( &my_atomic, 5 );	
	i = 2;
	ret = atomic_add_return( i, &my_atomic );  //将原子类型的变量my_atomic原子地增加i
	printk("<0>after atomic_add_return, my_atomic.counter = %d\n", atomic_read( &my_atomic));
	printk("<0>return ret = %d\n", ret);
	
	return 0;	
}
コード例 #11
0
inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
{
	#ifdef PLATFORM_LINUX
	return atomic_add_return(i,v);
	#elif defined(PLATFORM_WINDOWS)
	return InterlockedAdd(v,i);
	#elif defined(PLATFORM_FREEBSD)
	atomic_add_int(v,i);
	return atomic_load_acq_32(v);
	#endif
}
コード例 #12
0
void quota_send_warning(short type, unsigned int id, dev_t dev,
			const char warntype)
{
	static atomic_t seq;
	struct sk_buff *skb;
	void *msg_head;
	int ret;
	int msg_size = 4 * nla_total_size(sizeof(u32)) +
		       2 * nla_total_size(sizeof(u64));

	/*                                                           
                                                               
                                                    */
	skb = genlmsg_new(msg_size, GFP_NOFS);
	if (!skb) {
		printk(KERN_ERR
		  "VFS: Not enough memory to send quota warning.\n");
		return;
	}
	msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
			&quota_genl_family, 0, QUOTA_NL_C_WARNING);
	if (!msg_head) {
		printk(KERN_ERR
		  "VFS: Cannot store netlink header in quota warning.\n");
		goto err_out;
	}
	ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type);
	if (ret)
		goto attr_err_out;
	ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id);
	if (ret)
		goto attr_err_out;
	ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
	if (ret)
		goto attr_err_out;
	ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
	if (ret)
		goto attr_err_out;
	ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
	if (ret)
		goto attr_err_out;
	ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
	if (ret)
		goto attr_err_out;
	genlmsg_end(skb, msg_head);

	genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
	return;
attr_err_out:
	printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
err_out:
	kfree_skb(skb);
}
コード例 #13
0
ファイル: rxe_verbs.c プロジェクト: AK101111/linux
static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
			 unsigned int mask, unsigned int length,
			 struct rxe_send_wqe *wqe)
{
	int num_sge = ibwr->num_sge;
	struct ib_sge *sge;
	int i;
	u8 *p;

	init_send_wr(qp, &wqe->wr, ibwr);

	if (qp_type(qp) == IB_QPT_UD ||
	    qp_type(qp) == IB_QPT_SMI ||
	    qp_type(qp) == IB_QPT_GSI)
		memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));

	if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
		p = wqe->dma.inline_data;

		sge = ibwr->sg_list;
		for (i = 0; i < num_sge; i++, sge++) {
			if (qp->is_user && copy_from_user(p, (__user void *)
					    (uintptr_t)sge->addr, sge->length))
				return -EFAULT;

			else if (!qp->is_user)
				memcpy(p, (void *)(uintptr_t)sge->addr,
				       sge->length);

			p += sge->length;
		}
	} else if (mask & WR_REG_MASK) {
		wqe->mask = mask;
		wqe->state = wqe_state_posted;
		return 0;
	} else
		memcpy(wqe->dma.sge, ibwr->sg_list,
		       num_sge * sizeof(struct ib_sge));

	wqe->iova		= (mask & WR_ATOMIC_MASK) ?
					atomic_wr(ibwr)->remote_addr :
					rdma_wr(ibwr)->remote_addr;
	wqe->mask		= mask;
	wqe->dma.length		= length;
	wqe->dma.resid		= length;
	wqe->dma.num_sge	= num_sge;
	wqe->dma.cur_sge	= 0;
	wqe->dma.sge_offset	= 0;
	wqe->state		= wqe_state_posted;
	wqe->ssn		= atomic_add_return(1, &qp->ssn);

	return 0;
}
コード例 #14
0
static void dsps_restart_handler(struct dsps_data *drv)
{
	pr_debug("%s: Restart lvl %d\n",
		__func__, get_restart_level());

	if (atomic_add_return(1, &drv->crash_in_progress) > 1) {
		pr_err("%s: DSPS already resetting. Count %d\n", __func__,
		       atomic_read(&drv->crash_in_progress));
	} else {
		subsystem_restart_dev(drv->subsys);
	}
}
コード例 #15
0
ファイル: of_device.c プロジェクト: E-LLP/n900
static void of_device_make_bus_id(struct of_device *dev)
{
	static atomic_t bus_no_reg_magic;
	struct device_node *node = dev->node;
	char *name = dev->dev.bus_id;
	const u32 *reg;
	u64 addr;
	int magic;

	/*
	 * If it's a DCR based device, use 'd' for native DCRs
	 * and 'D' for MMIO DCRs.
	 */
#ifdef CONFIG_PPC_DCR
	reg = of_get_property(node, "dcr-reg", NULL);
	if (reg) {
#ifdef CONFIG_PPC_DCR_NATIVE
		snprintf(name, BUS_ID_SIZE, "d%x.%s",
			 *reg, node->name);
#else /* CONFIG_PPC_DCR_NATIVE */
		addr = of_translate_dcr_address(node, *reg, NULL);
		if (addr != OF_BAD_ADDR) {
			snprintf(name, BUS_ID_SIZE,
				 "D%llx.%s", (unsigned long long)addr,
				 node->name);
			return;
		}
#endif /* !CONFIG_PPC_DCR_NATIVE */
	}
#endif /* CONFIG_PPC_DCR */

	/*
	 * For MMIO, get the physical address
	 */
	reg = of_get_property(node, "reg", NULL);
	if (reg) {
		addr = of_translate_address(node, reg);
		if (addr != OF_BAD_ADDR) {
			snprintf(name, BUS_ID_SIZE,
				 "%llx.%s", (unsigned long long)addr,
				 node->name);
			return;
		}
	}

	/*
	 * No BusID, use the node name and add a globally incremented
	 * counter (and pray...)
	 */
	magic = atomic_add_return(1, &bus_no_reg_magic);
	snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1);
}
コード例 #16
0
static int vmw_fifo_wait(struct vmw_private *dev_priv,
			 uint32_t bytes, bool interruptible,
			 unsigned long timeout)
{
	long ret = 1L;
	unsigned long irq_flags;

	if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
		return 0;

	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
		return vmw_fifo_wait_noirq(dev_priv, bytes,
					   interruptible, timeout);

	mutex_lock(&dev_priv->hw_mutex);
	if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
		outl(SVGA_IRQFLAG_FIFO_PROGRESS,
		     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
		dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
	}
	mutex_unlock(&dev_priv->hw_mutex);

	if (interruptible)
		ret = wait_event_interruptible_timeout
		    (dev_priv->fifo_queue,
		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
	else
		ret = wait_event_timeout
		    (dev_priv->fifo_queue,
		     !vmw_fifo_is_full(dev_priv, bytes), timeout);

	if (unlikely(ret == 0))
		ret = -EBUSY;
	else if (likely(ret > 0))
		ret = 0;

	mutex_lock(&dev_priv->hw_mutex);
	if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
		dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
	}
	mutex_unlock(&dev_priv->hw_mutex);

	return ret;
}
コード例 #17
0
ファイル: huawei_cdc_ncm.c プロジェクト: 7799/linux
static int huawei_cdc_ncm_manage_power(struct usbnet *usbnet_dev, int on)
{
	struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
	int rv;

	if ((on && atomic_add_return(1, &drvstate->pmcount) == 1) ||
			(!on && atomic_dec_and_test(&drvstate->pmcount))) {
		rv = usb_autopm_get_interface(usbnet_dev->intf);
		usbnet_dev->intf->needs_remote_wakeup = on;
		if (!rv)
			usb_autopm_put_interface(usbnet_dev->intf);
	}
	return 0;
}
コード例 #18
0
ファイル: emd_ctl_chr.c プロジェクト: SelfImp/m75
int request_ext_md_reset()
{
    int ret = 0;

    if(atomic_add_return(1, &rst_on_going) == 1){
        ret = send_message_to_user(&drv_client[0], EMD_MSG_REQUEST_RST);
        if(ret!=0){
            EMD_MSG_INF("chr","request_ext_md_reset fail, msg does not send\n");
            atomic_dec(&rst_on_going);
        }
    }else{
        EMD_MSG_INF("chr","reset is on-going\n");
    }
    return ret;
}
コード例 #19
0
static int sbd_request_port(struct uart_port *uport)
{
	const char *err = KERN_ERR "sbd: Unable to reserve MMIO resource\n";
	struct sbd_duart *duart = to_sport(uport)->duart;
	int map_guard;
	int ret = 0;

	if (!request_mem_region(uport->mapbase, DUART_CHANREG_SPACING,
				"sb1250-duart")) {
;
		return -EBUSY;
	}
	map_guard = atomic_add_return(1, &duart->map_guard);
	if (map_guard == 1) {
		if (!request_mem_region(duart->mapctrl, DUART_CHANREG_SPACING,
					"sb1250-duart")) {
			atomic_add(-1, &duart->map_guard);
;
			ret = -EBUSY;
		}
	}
	if (!ret) {
		ret = sbd_map_port(uport);
		if (ret) {
			map_guard = atomic_add_return(-1, &duart->map_guard);
			if (!map_guard)
				release_mem_region(duart->mapctrl,
						   DUART_CHANREG_SPACING);
		}
	}
	if (ret) {
		release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
		return ret;
	}
	return 0;
}
コード例 #20
0
static void sbd_release_port(struct uart_port *uport)
{
	struct sbd_port *sport = to_sport(uport);
	struct sbd_duart *duart = sport->duart;
	int map_guard;

	iounmap(sport->memctrl);
	sport->memctrl = NULL;
	iounmap(uport->membase);
	uport->membase = NULL;

	map_guard = atomic_add_return(-1, &duart->map_guard);
	if (!map_guard)
		release_mem_region(duart->mapctrl, DUART_CHANREG_SPACING);
	release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
}
コード例 #21
0
ファイル: cdc_mbim.c プロジェクト: 7799/linux
/* using a counter to merge subdriver requests with our own into a combined state */
static int cdc_mbim_manage_power(struct usbnet *dev, int on)
{
	struct cdc_mbim_state *info = (void *)&dev->data;
	int rv = 0;

	dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);

	if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
		/* need autopm_get/put here to ensure the usbcore sees the new value */
		rv = usb_autopm_get_interface(dev->intf);
		dev->intf->needs_remote_wakeup = on;
		if (!rv)
			usb_autopm_put_interface(dev->intf);
	}
	return 0;
}
コード例 #22
0
static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
						 struct sk_buff *skb,
						 struct txentry_desc *txdesc)
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
	u16 seqno;

	if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
		return;

	__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);

	if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
		/*
		 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
		 * seqno on retransmited data (non-QOS) frames. To workaround
		 * the problem let's generate seqno in software if QOS is
		 * disabled.
		 */
		if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
			__clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
		else
			/* H/W will generate sequence number */
			return;
	}

	/*
	 * The hardware is not able to insert a sequence number. Assign a
	 * software generated one here.
	 *
	 * This is wrong because beacons are not getting sequence
	 * numbers assigned properly.
	 *
	 * A secondary problem exists for drivers that cannot toggle
	 * sequence counting per-frame, since those will override the
	 * sequence counter given by mac80211.
	 */
	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
		seqno = atomic_add_return(0x10, &intf->seqno);
	else
		seqno = atomic_read(&intf->seqno);

	hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
	hdr->seq_ctrl |= cpu_to_le16(seqno);
}
コード例 #23
0
void pvdrm_slot_request_async(struct pvdrm_device* pvdrm, struct pvdrm_slot* slot)
{
	struct pvdrm_slots* slots;
	uint32_t pos;
	struct pvdrm_mapped* mapped;

	slots = pvdrm->slots;
        mapped = slots->mapped;

	BUG_ON(!is_used(slot));

	/* Request slot, increment counter. */
	pos = ((uint32_t)atomic_add_return(1, &slots->put)) % PVDRM_SLOT_NR;
        mapped->ring[pos] = pvdrm_slot_id(mapped, slot);
	wmb();
	atomic_inc(&mapped->count);
}
コード例 #24
0
void of_device_make_bus_id(struct device *dev)
{
	static atomic_t bus_no_reg_magic;
	struct device_node *node = dev->of_node;
	const u32 *reg;
	u64 addr;
	const __be32 *addrp;
	int magic;

#ifdef CONFIG_PPC_DCR
	reg = of_get_property(node, "dcr-reg", NULL);
	if (reg) {
#ifdef CONFIG_PPC_DCR_NATIVE
		dev_set_name(dev, "d%x.%s", *reg, node->name);
#else 
		u64 addr = of_translate_dcr_address(node, *reg, NULL);
		if (addr != OF_BAD_ADDR) {
			dev_set_name(dev, "D%llx.%s",
				     (unsigned long long)addr, node->name);
			return;
		}
#endif 
	}
#endif 

	reg = of_get_property(node, "reg", NULL);
	if (reg) {
		if (of_can_translate_address(node)) {
			addr = of_translate_address(node, reg);
		} else {
			addrp = of_get_address(node, 0, NULL, NULL);
			if (addrp)
				addr = of_read_number(addrp, 1);
			else
				addr = OF_BAD_ADDR;
		}
		if (addr != OF_BAD_ADDR) {
			dev_set_name(dev, "%llx.%s",
				     (unsigned long long)addr, node->name);
			return;
		}
	}

	magic = atomic_add_return(1, &bus_no_reg_magic);
	dev_set_name(dev, "%s.%d", node->name, magic - 1);
}
コード例 #25
0
u32 __cb_qd_single_mode(dm_hdl_t dma_hdl, void *parg, enum dma_cb_cause_e cause)
{
	u32 	uret = 0;
	u32	ucur_saddr = 0, ucur_daddr = 0;
	u32	uloop_cnt = DTC_1T_TOTAL_LEN / DTC_1T_ONE_LEN;
	u32 	ucur_cnt = 0;

	pr_info("%s: called!\n", __func__);
	switch(cause) {
	case DMA_CB_OK:
		g_qd_cnt++;
		/* enqueue if not done */
		ucur_cnt = atomic_add_return(1, &g_acur_cnt);
		if(ucur_cnt < uloop_cnt) {
			ucur_saddr = g_src_addr + ucur_cnt * DTC_1T_ONE_LEN;
			ucur_daddr = g_dst_addr + ucur_cnt * DTC_1T_ONE_LEN;
			if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_1T_ONE_LEN, ENQUE_PHASE_QD))
				printk("%s err, line %d\n", __func__, __LINE__);
		} else if(ucur_cnt >= uloop_cnt){
			/* we have complete enqueueing, but not means it's the last qd irq */
			//if(true == sw_dma_sgmd_buflist_empty(dma_hdl)) {
			if(true) {
				/* 这里也不能认为是传完, 测试发现两次到这里,原因, __dtc_single_mode中enqueue
				之前加了cnt, 但enqueue被irq打断, 一直挂着, 只等irq的enqueue和transfer结束, 此时
				当然buflist_empty, 这时__dtc_single_mode才有机会继续未完成的唯一enqueue, 导致两次
				到这里.	因此本demo, 这里不能认为数据完全传完, 但对于其他场景, 一般可认为qd中list空了就结束了.
				*/
				/* maybe it's the last irq */
				atomic_set(&g_adma_done, 1);
				wake_up_interruptible(&g_dtc_queue[DTC_SINGLE_MODE]);
			}
		}
		break;
	case DMA_CB_ABORT:
		pr_info("%s: DMA_CB_ABORT!\n", __func__);
		break;
	default:
		uret = __LINE__;
		goto end;
	}

end:
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret);
	return uret;
}
コード例 #26
0
u32 __cb_qd_single_mode(dm_hdl_t dma_hdl, void *parg, enum dma_cb_cause_e cause)
{
	u32 	uret = 0;
	u32	ucur_saddr = 0, ucur_daddr = 0;
	u32	uloop_cnt = DTC_TOTAL_LEN / DTC_ONE_LEN;
	u32 	ucur_cnt = 0;

	pr_info("%s: called!\n", __func__);
	switch(cause) {
	case DMA_CB_OK:
		g_qd_cnt++;
		/* enqueue if not done */
		ucur_cnt = atomic_add_return(1, &g_acur_cnt);
		if(ucur_cnt < uloop_cnt) {
			ucur_saddr = g_src_addr + ucur_cnt * DTC_ONE_LEN;
			ucur_daddr = g_dst_addr + ucur_cnt * DTC_ONE_LEN;
			if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_QD))
				printk("%s err, line %d\n", __func__, __LINE__);
		} else if(ucur_cnt >= uloop_cnt){
			/* we have complete enqueueing, but not means it's the last qd irq */
			//if(true == sw_dma_sgmd_buflist_empty(dma_hdl)) {
			if(true) {
				/* ����Ҳ������Ϊ�Ǵ���, ���Է������ε�����,ԭ��, __dtc_single_mode��enqueue
				֮ǰ����cnt, ��enqueue��irq���, һֱ����, ֻ��irq��enqueue��transfer����, ��ʱ
				��Ȼbuflist_empty, ��ʱ__dtc_single_mode���л������δ��ɵ�Ψһenqueue, ��������
				������.	��˱�demo, ���ﲻ����Ϊ������ȫ����, ��������������, һ�����Ϊqd��list���˾ͽ�����.
				*/
				/* maybe it's the last irq */
				atomic_set(&g_adma_done, 1);
				wake_up_interruptible(&g_dtc_queue[DTC_SINGLE_MODE]);
			}
		}
		break;
	case DMA_CB_ABORT:
		pr_info("%s: DMA_CB_ABORT!\n", __func__);
		break;
	default:
		uret = __LINE__;
		goto end;
	}

end:
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret);
	return uret;
}
コード例 #27
0
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
{
	unsigned int curr;
	unsigned int snap;

	curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
	snap = (unsigned int)rdp->dynticks_snap;

	if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti");
		rdp->dynticks_fqs++;
		return 1;
	}

	
	return rcu_implicit_offline_qs(rdp);
}
コード例 #28
0
static enum hrtimer_restart hrtimer_handle(struct hrtimer *cur_timer)
{
	struct timeval tv_cur;

	if(atomic_add_return(1, &g_cur_cnt[1]) >= g_loopcnt[1]) {
		/* print cur time in sec */
		do_gettimeofday(&tv_cur);
		printk("%s: cur sec %d\n", __func__, (int)tv_cur.tv_sec);

		/* clear g_cur_cnt[1] */
		atomic_set(&g_cur_cnt[1], 0);
	}

	/* if not call this, hrtimer_handle will called again and again */
	hrtimer_forward(cur_timer, cur_timer->base->get_time(), g_ktime);
	return HRTIMER_RESTART;
}
コード例 #29
0
static void timer_handle(unsigned long arg)
{
	unsigned long ms = arg;
	struct timeval tv_cur;

	if(atomic_add_return(1, &g_cur_cnt[0]) >= g_loopcnt[0]) {
		/* print cur time in sec */
		do_gettimeofday(&tv_cur);
		printk("%s: cur sec %d\n", __func__, (int)tv_cur.tv_sec);

		/* clear g_cur_cnt[0] */
		atomic_set(&g_cur_cnt[0], 0);
	}

	/* set next trig */
	mod_timer(&g_timer, jiffies + (HZ * ms) / 1000);
}
コード例 #30
0
ファイル: dir.c プロジェクト: cilynx/dd-wrt
/**
 *	sysfs_deactivate - deactivate sysfs_dirent
 *	@sd: sysfs_dirent to deactivate
 *
 *	Deny new active references and drain existing ones.
 */
static void sysfs_deactivate(struct sysfs_dirent *sd)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	int v;

	BUG_ON(sd->s_sibling || !(sd->s_flags & SYSFS_FLAG_REMOVED));
	sd->s_sibling = (void *)&wait;

	/* atomic_add_return() is a mb(), put_active() will always see
	 * the updated sd->s_sibling.
	 */
	v = atomic_add_return(SD_DEACTIVATED_BIAS, &sd->s_active);

	if (v != SD_DEACTIVATED_BIAS)
		wait_for_completion(&wait);

	sd->s_sibling = NULL;
}