Exemple #1
0
/**
 * amdgpu_ih_process - interrupt handler
 *
 * @adev: amdgpu_device pointer
 *
 * Interrupt hander (VI), walk the IH ring.
 * Returns irq process return code.
 */
int amdgpu_ih_process(struct amdgpu_device *adev)
{
	struct amdgpu_iv_entry entry;
	u32 wptr;

	if (!adev->irq.ih.enabled || adev->shutdown)
		return IRQ_NONE;

	wptr = amdgpu_ih_get_wptr(adev);

restart_ih:
	/* is somebody else already processing irqs? */
	if (atomic_xchg(&adev->irq.ih.lock, 1))
		return IRQ_NONE;

	DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr);

	/* Order reading of wptr vs. reading of IH ring data */
	rmb();

	while (adev->irq.ih.rptr != wptr) {
		u32 ring_index = adev->irq.ih.rptr >> 2;

		/* Before dispatching irq to IP blocks, send it to amdkfd */
		amdgpu_amdkfd_interrupt(adev,
				(const void *) &adev->irq.ih.ring[ring_index]);

		entry.iv_entry = (const uint32_t *)
			&adev->irq.ih.ring[ring_index];
		amdgpu_ih_decode_iv(adev, &entry);
		adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;

		amdgpu_irq_dispatch(adev, &entry);
	}
	amdgpu_ih_set_rptr(adev);
	atomic_set(&adev->irq.ih.lock, 0);

	/* make sure wptr hasn't changed while processing */
	wptr = amdgpu_ih_get_wptr(adev);
	if (wptr != adev->irq.ih.rptr)
		goto restart_ih;

	return IRQ_HANDLED;
}
Exemple #2
0
static
int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
{
	int result = 0;
	struct device *dev = &iface->dev;
	struct i2400mu *i2400mu = usb_get_intfdata(iface);
	unsigned is_autosuspend = 0;
	struct i2400m *i2400m = &i2400mu->i2400m;

#ifdef CONFIG_PM
	if (PMSG_IS_AUTO(pm_msg))
		is_autosuspend = 1;
#endif

	d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event);
	rmb();		
	if (i2400m->updown == 0)
		goto no_firmware;
	if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) {
		result = -EBADF;
		d_printf(1, dev, "fw up, link up, not-idle, autosuspend: "
			 "not entering powersave\n");
		goto error_not_now;
	}
	d_printf(1, dev, "fw up: entering powersave\n");
	atomic_dec(&i2400mu->do_autopm);
	result = i2400m_cmd_enter_powersave(i2400m);
	atomic_inc(&i2400mu->do_autopm);
	if (result < 0 && !is_autosuspend) {
		
		dev_err(dev, "failed to suspend, will reset on resume\n");
		result = 0;
	}
	if (result < 0)
		goto error_enter_powersave;
	i2400mu_notification_release(i2400mu);
	d_printf(1, dev, "powersave requested\n");
error_enter_powersave:
error_not_now:
no_firmware:
	d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n",
		iface, pm_msg.event, result);
	return result;
}
Exemple #3
0
//----------------------------------------------------------------------------------------
// Procedure:   eeprom_wait_cmd_done
//
// Description: This routine waits for the the EEPROM to finish its command.  
//                              Specifically, it waits for EEDO (data out) to go high.
// Returns:     true - If the command finished
//              false - If the command never finished (EEDO stayed low)
//----------------------------------------------------------------------------------------
static u16
eeprom_wait_cmd_done(struct e100_private *adapter)
{
	u16 x;
	unsigned long expiration_time = jiffies + HZ / 100 + 1;

	eeprom_stand_by(adapter);

	do {
		rmb();
		x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
		if (x & EEDO)
			return true;
		if (time_before(jiffies, expiration_time))
			yield();
		else
			return false;
	} while (true);
}
Exemple #4
0
void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
                              void *buf, int words)
{
    unsigned long flags;
    int offs;
    u32 *vals = buf;

    spin_lock_irqsave(&bus->reg_lock, flags);
    iwl_grab_nic_access(bus);

    iwl_write32(bus, HBUS_TARG_MEM_RADDR, addr);
    rmb();

    for (offs = 0; offs < words; offs++)
        vals[offs] = iwl_read32(bus, HBUS_TARG_MEM_RDAT);

    iwl_release_nic_access(bus);
    spin_unlock_irqrestore(&bus->reg_lock, flags);
}
Exemple #5
0
/*
 * Waiting for a read lock or a write lock on a rwlock...
 * This turns out to be the same for read and write locks, since
 * we only know the holder if it is write-locked.
 */
void __rw_yield(arch_rwlock_t *rw)
{
	int lock_value;
	unsigned int holder_cpu, yield_count;

	lock_value = rw->lock;
	if (lock_value >= 0)
		return;		/* no write lock at present */
	holder_cpu = lock_value & 0xffff;
	BUG_ON(holder_cpu >= NR_CPUS);
	yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
	if ((yield_count & 1) == 0)
		return;		/* virtual cpu is currently running */
	rmb();
	if (rw->lock != lock_value)
		return;		/* something has changed */
	plpar_hcall_norets(H_CONFER,
		get_hard_smp_processor_id(holder_cpu), yield_count);
}
Exemple #6
0
void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd)
{
	struct fb_info *fbi = mfd->fbi;
	uint8 *buf;
	int bpp;
	int data = 0;
    int vg_active = 0; 
	struct mdp4_overlay_pipe *pipe;

	if (!mfd->panel_power_on)
		return;

	/* no need to power on cmd block since it's dsi mode */
	bpp = fbi->var.bits_per_pixel / 8;
	buf = (uint8 *) fbi->fix.smem_start;
    buf += fbi->var.xoffset * bpp +	fbi->var.yoffset * fbi->fix.line_length;

	mutex_lock(&mfd->dma->ov_mutex);

	pipe = dsi_pipe;
	pipe->srcp0_addr = (uint32) buf;
	mdp4_overlay_rgb_setup(pipe);
	mdp4_overlay_reg_flush(pipe, 1);
	mdp4_overlay_dsi_video_vsync_push(mfd, pipe);

    // This will check if there is any VG pipe connected to layermixer 0 
    mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);     
    data = inpdw(MDP_BASE + 0x10100); 
    
    rmb(); 
    if (data & 0xff) 
        vg_active = 1; 
    else 
        vg_active = 0; 
    
    mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); 
	mutex_unlock(&mfd->dma->ov_mutex); 

    if (vg_active) 
	    yield();	
	mdp4_stat.kickoff_dsi++;
}
Exemple #7
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	/*
	 * If we're the non-boot CPU, nothing set the stack canary up
	 * for us.  CPU0 already has it initialized but no harm in
	 * doing it again.  This is a good place for updating it, as
	 * we wont ever return from this function (so the invalid
	 * canaries already on the stack wont ever trigger).
	 */
	boot_init_stack_canary();

	current_thread_info()->status |= TS_POLLING;

	/* endless idle loop with no priority at all */
	while (1) {
		tick_nohz_idle_enter();
		rcu_idle_enter();
		while (!need_resched()) {

			check_pgt_cache();
			rmb();

			if (cpu_is_offline(cpu))
				play_dead();

			local_touch_nmi();
			local_irq_disable();
			/* Don't trace irqs off for idle */
			stop_critical_timings();
			if (cpuidle_idle_call())
				pm_idle();
			start_critical_timings();
		}
		rcu_idle_exit();
		tick_nohz_idle_exit();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
static void xdma_err_tasklet(unsigned long data)
{
	struct xdma_chan *chan = (struct xdma_chan *)data;

	if (chan->err) {
		/* If reset failed, need to hard reset
		 * Channel is no longer functional
		 */
		if (!dma_init(chan))
			chan->err = 0;
		else
			dev_err(chan->dev,
			    "DMA channel reset failed, please reset system\n");
	}

	rmb();
	xilinx_chan_desc_cleanup(chan);

	xilinx_chan_desc_reinit(chan);
}
Exemple #9
0
/*
 * When we write to the ring buffer, check if the host needs to
 * be signaled. Here is the details of this protocol:
 *
 *	1. The host guarantees that while it is draining the
 *	   ring buffer, it will set the interrupt_mask to
 *	   indicate it does not need to be interrupted when
 *	   new data is placed.
 *
 *	2. The host guarantees that it will completely drain
 *	   the ring buffer before exiting the read loop. Further,
 *	   once the ring buffer is empty, it will clear the
 *	   interrupt_mask and re-check to see if new data has
 *	   arrived.
 */
static boolean_t
hv_ring_buffer_needsig_on_write(
	uint32_t			old_write_location,
	hv_vmbus_ring_buffer_info*	rbi)
{
	mb();
	if (rbi->ring_buffer->interrupt_mask)
		return (FALSE);

	/* Read memory barrier */
	rmb();
	/*
	 * This is the only case we need to signal when the
	 * ring transitions from being empty to non-empty.
	 */
	if (old_write_location == rbi->ring_buffer->read_index)
		return (TRUE);

	return (FALSE);
}
Exemple #10
0
int shm_signal_enable(struct shm_signal *s, int flags)
{
	struct shm_signal_irq *irq = &s->desc->irq[s->locale];
	unsigned long iflags;

	spin_lock_irqsave(&s->lock, iflags);

	irq->enabled = 1;
	wmb();

	if ((irq->dirty || irq->pending)
	    && !test_bit(shm_signal_in_wakeup, &s->flags)) {
		rmb();
		tasklet_schedule(&s->deferred_notify);
	}

	spin_unlock_irqrestore(&s->lock, iflags);

	return 0;
}
Exemple #11
0
static int
netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags)
{
        struct netmap_kring *txkring = rxkring->pipe;
	uint32_t oldhwcur = rxkring->nr_hwcur;

        ND("%s %x <- %s", rxkring->name, flags, txkring->name);
        rxkring->nr_hwcur = rxkring->rhead; /* recover user-relased slots */
        ND(5, "hwcur %d hwtail %d cur %d head %d tail %d", rxkring->nr_hwcur, rxkring->nr_hwtail,
                rxkring->rcur, rxkring->rhead, rxkring->rtail);
        rmb(); /* paired with the first wmb() in txsync */
        nm_rxsync_finalize(rxkring);

	if (oldhwcur != rxkring->nr_hwcur) {
		/* we have released some slots, notify the other end */
		wmb(); /* make sure nr_hwcur is updated before notifying */
		txkring->na->nm_notify(txkring->na, txkring->ring_id, NR_TX, 0);
	}
        return 0;
}
Exemple #12
0
static
int i2400mu_resume(struct usb_interface *iface)
{
	int ret = 0;
	struct device *dev = &iface->dev;
	struct i2400mu *i2400mu = usb_get_intfdata(iface);
	struct i2400m *i2400m = &i2400mu->i2400m;

	d_fnstart(3, dev, "(iface %p)\n", iface);
	rmb();		
	if (i2400m->updown == 0) {
		d_printf(1, dev, "fw was down, no resume neeed\n");
		goto out;
	}
	d_printf(1, dev, "fw was up, resuming\n");
	i2400mu_notification_setup(i2400mu);
out:
	d_fnend(3, dev, "(iface %p) = %d\n", iface, ret);
	return ret;
}
Exemple #13
0
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	u16 last_used;
	unsigned i;
	void *ret;

	rmb();

	last_used = (vq->last_used_idx & (vq->vring.num-1));
	i = vq->vring.used->ring[last_used].id;
	*len = vq->vring.used->ring[last_used].len;

	ret = vq->data[i];
	detach_buf(vq, i);

	vq->last_used_idx++;

	return ret;
}
static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
                                  struct drm_crtc_state *old_crtc_state)
{
	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);

	WARN_ON(omap_crtc->vblank_irq.registered);

	if (dispc_mgr_is_enabled(omap_crtc->channel)) {

		DBG("%s: GO", omap_crtc->name);

		rmb();
		WARN_ON(omap_crtc->pending);
		omap_crtc->pending = true;
		wmb();

		dispc_mgr_go(omap_crtc->channel);
		omap_irq_register(crtc->dev, &omap_crtc->vblank_irq);
	}
}
Exemple #15
0
static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
{
	struct vring_virtqueue *vq = to_vvq(_vq);
	void *ret;
	unsigned int i;

	START_USE(vq);

	if (unlikely(vq->broken)) {
		END_USE(vq);
		return NULL;
	}

	if (!more_used(vq)) {
		pr_debug("No more buffers in queue\n");
		END_USE(vq);
		return NULL;
	}

	/* Only get used array entries after they have been exposed by host. */
	rmb();

	i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
	*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;

	if (unlikely(i >= vq->vring.num)) {
		BAD_RING(vq, "id %u out of range\n", i);
		return NULL;
	}
	if (unlikely(!vq->data[i])) {
		BAD_RING(vq, "id %u is not a head!\n", i);
		return NULL;
	}

	/* detach_buf clears data, so grab it now. */
	ret = vq->data[i];
	detach_buf(vq, i);
	vq->last_used_idx++;
	END_USE(vq);
	return ret;
}
static int htifblk_segment(struct htifblk_device *dev,
	struct request *req)
{
	static struct htifblk_request pkt __aligned(HTIF_ALIGN);
	u64 offset, size, end;
	unsigned long cmd;

	offset = (blk_rq_pos(req) << SECTOR_SIZE_SHIFT);
	size = (blk_rq_cur_sectors(req) << SECTOR_SIZE_SHIFT);

	end = offset + size;
	if (unlikely(end < offset || end > dev->size)) {
		dev_err(&dev->dev->dev, "out-of-bounds access:"
			" offset=%llu size=%llu\n", offset, size);
		return -EINVAL;
	}

	rmb();
	pkt.addr = __pa(req->buffer);
	pkt.offset = offset;
	pkt.size = size;
	pkt.tag = dev->tag;

	switch (rq_data_dir(req)) {
	case READ:
		cmd = HTIF_CMD_READ;
		break;
	case WRITE:
		cmd = HTIF_CMD_WRITE;
		break;
	default:
		return -EINVAL;
	}

	dev->req = req;
	dev->msg_buf.dev = dev->dev->index;
	dev->msg_buf.cmd = cmd;
	dev->msg_buf.data = __pa(&pkt);
	htif_tohost(&dev->msg_buf);
	return 0;
}
Exemple #17
0
static void
rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
                     void *ioaddr)
{
    unsigned long dirty_tx, tx_left;

    assert(dev != NULL);
    assert(tp != NULL);
    assert(ioaddr != NULL);

    dirty_tx = tp->dirty_tx;
    tx_left = tp->cur_tx - dirty_tx;

    while (tx_left > 0) {
        int entry = dirty_tx % NUM_TX_DESC;
        struct sk_buff *skb = tp->Tx_skbuff[entry];
        u32 status;

        rmb();
        status = le32_to_cpu(tp->TxDescArray[entry].status);
        if (status & OWNbit)
            break;

        /* FIXME: is it really accurate for TxErr ? */
        tp->stats.tx_bytes += skb->len >= ETH_ZLEN ?
                              skb->len : ETH_ZLEN;
        tp->stats.tx_packets++;
        rtl8169_unmap_tx_skb(tp->pci_dev, tp->Tx_skbuff + entry,
                             tp->TxDescArray + entry);
        dev_kfree_skb_irq(skb);
        tp->Tx_skbuff[entry] = NULL;
        dirty_tx++;
        tx_left--;
    }

    if (tp->dirty_tx != dirty_tx) {
        tp->dirty_tx = dirty_tx;
        if (netif_queue_stopped(dev))
            netif_wake_queue(dev);
    }
}
void cpm2_pic_init(struct device_node *node)
{
	int i;

	cpm2_intctl = cpm2_map(im_intctl);

	/* Clear the CPM IRQ controller, in case it has any bits set
	 * from the bootloader
	 */

	/* Mask out everything */

	out_be32(&cpm2_intctl->ic_simrh, 0x00000000);
	out_be32(&cpm2_intctl->ic_simrl, 0x00000000);

	wmb();

	/* Ack everything */
	out_be32(&cpm2_intctl->ic_sipnrh, 0xffffffff);
	out_be32(&cpm2_intctl->ic_sipnrl, 0xffffffff);
	wmb();

	/* Dummy read of the vector */
	i = in_be32(&cpm2_intctl->ic_sivec);
	rmb();

	/* Initialize the default interrupt mapping priorities,
	 * in case the boot rom changed something on us.
	 */
	out_be16(&cpm2_intctl->ic_sicr, 0);
	out_be32(&cpm2_intctl->ic_scprrh, 0x05309770);
	out_be32(&cpm2_intctl->ic_scprrl, 0x05309770);

	/* create a legacy host */
	cpm2_pic_host = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR,
				       64, &cpm2_pic_host_ops, 64);
	if (cpm2_pic_host == NULL) {
		printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
		return;
	}
}
Exemple #19
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	 * set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/* Initialize the boot status and give the secondary core
	 * the start address of the kernel, let the write buffer drain
	 */
	__raw_writel(0, OCM_HIGH_BASE + BOOT_STATUS_OFFSET);

	__raw_writel(virt_to_phys(secondary_startup),
					OCM_HIGH_BASE + BOOT_ADDR_OFFSET);
	wmb();

	/*
	 * Send an event to wake the secondary core from WFE state.
	 */
	sev();

	/*
	 * Wait for the other CPU to boot, but timeout if it doesn't
	 */
	timeout = jiffies + (1 * HZ);
	while ((__raw_readl(OCM_HIGH_BASE + BOOT_STATUS_OFFSET) !=
				BOOT_STATUS_CPU1_UP) &&
				(time_before(jiffies, timeout)))
		rmb();

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return 0;
}
Exemple #20
0
struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue )
{
	struct HvLpEvent * nextLpEvent = 
		(struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
	if ( nextLpEvent->xFlags.xValid ) {
		/* rmb() needed only for weakly consistent machines (regatta) */
		rmb();
		/* Set pointer to next potential event */
		lpQueue->xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
				      LpEventAlign ) /
				      LpEventAlign ) *
				      LpEventAlign;
		/* Wrap to beginning if no room at end */
		if (lpQueue->xSlicCurEventPtr > lpQueue->xSlicLastValidEventPtr)
			lpQueue->xSlicCurEventPtr = lpQueue->xSlicEventStackPtr;
	}
	else 
		nextLpEvent = NULL;

	return nextLpEvent;
}
int nvhost_read_module_regs(struct nvhost_device *ndev,
			u32 offset, int count, u32 *values)
{
	void __iomem *p = ndev->aperture + offset;
	int err;

	/* verify offset */
	err = validate_reg(ndev, offset, count);
	if (err)
		return err;

	nvhost_module_busy(ndev);
	while (count--) {
		*(values++) = readl(p);
		p += 4;
	}
	rmb();
	nvhost_module_idle(ndev);

	return 0;
}
Exemple #22
0
static inline int __insert_record(struct t_buf *buf,
                                  unsigned long event,
                                  int extra,
                                  int cycles,
                                  int rec_size,
                                  unsigned char *extra_data)
{
    struct t_rec *rec;
    unsigned char *dst;
    unsigned long extra_word = extra/sizeof(u32);
    int local_rec_size = calc_rec_size(cycles, extra);
    uint32_t next;

    BUG_ON(local_rec_size != rec_size);
    BUG_ON(extra & 3);

    /* Double-check once more that we have enough space.
     * Don't bugcheck here, in case the userland tool is doing
     * something stupid. */
    if ( calc_bytes_avail(buf) < rec_size )
    {
        printk("%s: %u bytes left (%u - ((%u - %u) %% %u) recsize %u.\n",
               __func__,
               calc_bytes_avail(buf),
               data_size, buf->prod, buf->cons, data_size, rec_size);
        return 0;
    }
    rmb();

    rec = next_record(buf);
    rec->event = event;
    rec->extra_u32 = extra_word;
    dst = (unsigned char *)rec->u.nocycles.extra_u32;
    if ( (rec->cycles_included = cycles) != 0 )
    {
        u64 tsc = (u64)get_cycles();
        rec->u.cycles.cycles_lo = (uint32_t)tsc;
        rec->u.cycles.cycles_hi = (uint32_t)(tsc >> 32);
        dst = (unsigned char *)rec->u.cycles.extra_u32;
    } 
/**
  @brief wpalReadRegister provides a mechansim for a client
         to read data from a hardware data register

  @param  address:  Physical memory address of the register
  @param  data:     Return location for value that is read

  @return SUCCESS if the data was successfully read
*/
wpt_status wpalReadRegister
(
   wpt_uint32   address,
   wpt_uint32  *data
)
{
   /* if SSR is in progress, and WCNSS is not out of reset (re-init
    * not invoked), then do not access WCNSS registers */
   if (NULL == gpEnv ||
        (vos_is_logp_in_progress(VOS_MODULE_ID_WDI, NULL) &&
            !vos_is_reinit_in_progress(VOS_MODULE_ID_WDI, NULL))) {
      WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                 "%s: invoked before subsystem initialized",
                 __func__);
      return eWLAN_PAL_STATUS_E_INVAL;
   }

   if ((address < gpEnv->wcnss_memory->start) ||
       (address > gpEnv->wcnss_memory->end)) {
      WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                 "%s: Register address 0x%0x out of range 0x%0x - 0x%0x",
                 __func__, address,
                 (u32) gpEnv->wcnss_memory->start,
                 (u32) gpEnv->wcnss_memory->end);
      return eWLAN_PAL_STATUS_E_INVAL;
   }

   if (0 != (address & 0x3)) {
      WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                 "%s: Register address 0x%0x is not word aligned",
                 __func__, address);
      return eWLAN_PAL_STATUS_E_INVAL;
   }

   *data = readl_relaxed(gpEnv->mmio + (address - WCNSS_BASE_ADDRESS));
   rmb();

   return eWLAN_PAL_STATUS_SUCCESS;
}
Exemple #24
0
static int scsifront_ring_drain(struct vscsifrnt_info *info)
{
	struct vscsiif_response *ring_rsp;
	RING_IDX i, rp;
	int more_to_do = 0;

	rp = info->ring.sring->rsp_prod;
	rmb();	/* ordering required respective to dom0 */
	for (i = info->ring.rsp_cons; i != rp; i++) {
		ring_rsp = RING_GET_RESPONSE(&info->ring, i);
		scsifront_do_response(info, ring_rsp);
	}

	info->ring.rsp_cons = i;

	if (i != info->ring.req_prod_pvt)
		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
	else
		info->ring.sring->rsp_event = i + 1;

	return more_to_do;
}
Exemple #25
0
static void xenfb_on_fb_event(struct xenfb *xenfb)
{
	uint32_t prod, cons;
	struct xenfb_page *page = xenfb->fb.page;

	prod = page->out_prod;
	if (prod == page->out_cons)
		return;
	rmb();			/* ensure we see ring contents up to prod */
	for (cons = page->out_cons; cons != prod; cons++) {
		union xenfb_out_event *event = &XENFB_OUT_RING_REF(page, cons);
		int x, y, w, h;

		switch (event->type) {
		case XENFB_TYPE_UPDATE:
			x = MAX(event->update.x, 0);
			y = MAX(event->update.y, 0);
			w = MIN(event->update.width, xenfb->width - x);
			h = MIN(event->update.height, xenfb->height - y);
			if (w < 0 || h < 0) {
				fprintf(stderr, "%s bogus update ignored\n",
					xenfb->fb.nodename);
				break;
			}
			if (x != event->update.x || y != event->update.y
			    || w != event->update.width
			    || h != event->update.height) {
				fprintf(stderr, "%s bogus update clipped\n",
					xenfb->fb.nodename);
				break;
			}
			xenfb_guest_copy(xenfb, x, y, w, h);
			break;
		}
	}
	mb();			/* ensure we're done with ring contents */
	page->out_cons = cons;
	xc_evtchn_notify(xenfb->evt_xch, xenfb->fb.port);
}
Exemple #26
0
void console_write(const char *msg, int len) {
	static int was_cr = 0;

	int sent = 0;
	while (sent < len)
	{
		XENCONS_RING_IDX cons, prod;
		cons = console.intf->out_cons;
		rmb();
		prod = console.intf->out_prod;

//	while ((sent < len) && (prod - cons < sizeof(console.intf->out)))
//		console.intf->out[MASK_XENCONS_IDX(prod++, console.intf->out)] = msg[sent++];
//
// It may be possible to use stty or ESC sequence instead of this nastiness
// 

		while ((sent < len) && (prod - cons < sizeof(console.intf->out))) {
			if (msg[sent] == '\n' && !was_cr)
			{
				int idx = MASK_XENCONS_IDX(prod, console.intf->out);
				console.intf->out[idx] = '\r';
				prod++;
				if (prod - cons >= sizeof(console.intf->out))
					break;
			}
			was_cr = (msg[sent] == '\r');
			int idx = MASK_XENCONS_IDX(prod, console.intf->out);
			console.intf->out[idx] = msg[sent++];
			prod++;
		}

		console.intf->out_prod = prod;
		wmb();

		event_kick(console.chan);
	}
	ssa(SYS_STATS_IO_OUTPUT, len);
}
Exemple #27
0
long long perf_mmap_aux_read(int which) {

	struct perf_event_mmap_page *control_page;
	long long head;
	int parent_mmap;

	parent_mmap=mmaps[which].parent_mmap;

	stats.mmap_aux_read_attempts++;

	if (!(mmaps[which].prot&PROT_READ)) return 0;

	if (!(mmaps[parent_mmap].prot&PROT_READ)) return 0;

	if (!mmaps[parent_mmap].active) return 0;

	if (mmaps[parent_mmap].addr==MAP_FAILED) return 0;

	if (mmaps[parent_mmap].size==0) return 0;

	control_page=(struct perf_event_mmap_page *)mmaps[parent_mmap].addr;

	if (control_page==NULL) {
		return -1;
	}
	head=control_page->aux_head;
	rmb(); /* Must always follow read of data_head */

	/* Mark all as read */
	if (mmaps[parent_mmap].prot&PROT_WRITE) {
		control_page->aux_tail=head;
		mmaps[which].aux_head=head;
	}

	stats.mmap_aux_read_successful++;

	return head;

}
Exemple #28
0
void __spin_yield(__raw_spinlock_t *lock)
{
	unsigned int lock_value, holder_cpu, yield_count;

	lock_value = lock->slock;
	if (lock_value == 0)
		return;
	holder_cpu = lock_value & 0xffff;
	BUG_ON(holder_cpu >= NR_CPUS);
	yield_count = lppaca[holder_cpu].yield_count;
	if ((yield_count & 1) == 0)
		return;		/* virtual cpu is currently running */
	rmb();
	if (lock->slock != lock_value)
		return;		/* something has changed */
	if (firmware_has_feature(FW_FEATURE_ISERIES))
		HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
			((u64)holder_cpu << 32) | yield_count);
	else
		plpar_hcall_norets(H_CONFER,
			get_hard_smp_processor_id(holder_cpu), yield_count);
}
Exemple #29
0
/* Send a mesasge to xenbus, in the same fashion as xb_write, and
   block waiting for a reply.  The reply is malloced and should be
   freed by the caller. */
struct xsd_sockmsg *
xenbus_msg_reply(int type,
		 xenbus_transaction_t trans,
		 struct write_req *io,
		 int nr_reqs)
{
    int id;
    struct xsd_sockmsg *rep;

    id = allocate_xenbus_id();
    xb_write(type, id, trans, io, nr_reqs);

    DEBUG("waiting on response to request %d\n", id);
    wait_event(&req_info[id].waitq, req_info[id].ready);
    DEBUG("woke up on response to request %d\n", id);

    rmb();
    rep = req_info[id].reply;
    BUG_ON(rep->req_id != id);
    release_xenbus_id(id);
    return rep;
}
Exemple #30
0
/**
 * Receive XenStore response raw data
 *
 * @v xen		Xen hypervisor
 * @v data		Data buffer, or NULL to discard data
 * @v len		Length of data
 */
static void xenstore_recv ( struct xen_hypervisor *xen, void *data,
			    size_t len ) {
	struct xenstore_domain_interface *intf = xen->store.intf;
	XENSTORE_RING_IDX cons = readl ( &intf->rsp_cons );
	XENSTORE_RING_IDX prod;
	XENSTORE_RING_IDX idx;
	char *bytes = data;
	size_t offset = 0;
	size_t fill;

	DBGCP ( intf, "XENSTORE raw response:\n" );

	/* Read one byte at a time */
	while ( offset < len ) {

		/* Wait for data to be ready */
		while ( 1 ) {
			prod = readl ( &intf->rsp_prod );
			fill = ( prod - cons );
			if ( fill > 0 )
				break;
			DBGC2 ( xen, "." );
			cpu_nap();
			rmb();
		}

		/* Read byte */
		idx = MASK_XENSTORE_IDX ( cons++ );
		if ( data )
			bytes[offset++] = readb ( &intf->rsp[idx] );
	}
	if ( data )
		DBGCP_HDA ( intf, MASK_XENSTORE_IDX ( cons - len ), data, len );

	/* Update consumer counter */
	writel ( cons, &intf->rsp_cons );
	wmb();
}