示例#1
0
static void invalidate_buckets_lru(struct cache *ca)
{
	struct bucket *b;
	ssize_t i;

	ca->heap.used = 0;

	for_each_bucket(b, ca) {
		/*
		 * If we fill up the unused list, if we then return before
		 * adding anything to the free_inc list we'll skip writing
		 * prios/gens and just go back to allocating from the unused
		 * list:
		 */
		if (fifo_full(&ca->unused))
			return;

		if (!can_invalidate_bucket(ca, b))
			continue;

		if (!GC_SECTORS_USED(b) &&
		    bch_bucket_add_unused(ca, b))
			continue;

		if (!heap_full(&ca->heap))
			heap_add(&ca->heap, b, bucket_max_cmp);
		else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
			ca->heap.data[0] = b;
			heap_sift(&ca->heap, 0, bucket_max_cmp);
		}
	}

	for (i = ca->heap.used / 2 - 1; i >= 0; --i)
		heap_sift(&ca->heap, i, bucket_min_cmp);

	while (!fifo_full(&ca->free_inc)) {
		if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
			/*
			 * We don't want to be calling invalidate_buckets()
			 * multiple times when it can't do anything
			 */
			ca->invalidate_needs_gc = 1;
			wake_up_gc(ca->set);
			return;
		}

		invalidate_one_bucket(ca, b);
	}
}
/*
 * Append a character to our SBP message fifo.
 * Returns 1 if char successfully appended to fifo.
 * Returns 0 if fifo is full.
 */
u8 fifo_write(char c){
  if (fifo_full())
    return 0;

  sbp_msg_fifo[tail] = c;
  tail = (tail+1) % FIFO_LEN;
  return 1;
}
示例#3
0
uint8_t fifo_write(fifo_t *fifo, uint8_t byte) {
    if(fifo_full(fifo))
        return 1;

    fifo->data[fifo->write] = byte;
    fifo->write++;
    if(fifo->write >= fifo->size)
        fifo->write = 0;
    return 0;
}
示例#4
0
static int
push(position_t pos)
{
	if (!fifo_full())
	{
		movement[write] = pos;
		write = (write + 1) % (MOV_MAX + 1);
		last_op = 1;

		return 1;
	}
	else
	{
		return 0;
	}
}
示例#5
0
文件: alloc.c 项目: 7799/linux
static void invalidate_buckets_fifo(struct cache *ca)
{
	struct bucket *b;
	size_t checked = 0;

	while (!fifo_full(&ca->free_inc)) {
		if (ca->fifo_last_bucket <  ca->sb.first_bucket ||
		    ca->fifo_last_bucket >= ca->sb.nbuckets)
			ca->fifo_last_bucket = ca->sb.first_bucket;

		b = ca->buckets + ca->fifo_last_bucket++;

		if (bch_can_invalidate_bucket(ca, b))
			bch_invalidate_one_bucket(ca, b);

		if (++checked >= ca->sb.nbuckets) {
			ca->invalidate_needs_gc = 1;
			wake_up_gc(ca->set);
			return;
		}
	}
}
示例#6
0
文件: alloc.c 项目: 7799/linux
static void invalidate_buckets_random(struct cache *ca)
{
	struct bucket *b;
	size_t checked = 0;

	while (!fifo_full(&ca->free_inc)) {
		size_t n;
		get_random_bytes(&n, sizeof(n));

		n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
		n += ca->sb.first_bucket;

		b = ca->buckets + n;

		if (bch_can_invalidate_bucket(ca, b))
			bch_invalidate_one_bucket(ca, b);

		if (++checked >= ca->sb.nbuckets / 2) {
			ca->invalidate_needs_gc = 1;
			wake_up_gc(ca->set);
			return;
		}
	}
}
示例#7
0
文件: alloc.c 项目: 7799/linux
static void invalidate_buckets_lru(struct cache *ca)
{
	struct bucket *b;
	ssize_t i;

	ca->heap.used = 0;

	for_each_bucket(b, ca) {
		if (!bch_can_invalidate_bucket(ca, b))
			continue;

		if (!heap_full(&ca->heap))
			heap_add(&ca->heap, b, bucket_max_cmp);
		else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
			ca->heap.data[0] = b;
			heap_sift(&ca->heap, 0, bucket_max_cmp);
		}
	}

	for (i = ca->heap.used / 2 - 1; i >= 0; --i)
		heap_sift(&ca->heap, i, bucket_min_cmp);

	while (!fifo_full(&ca->free_inc)) {
		if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
			/*
			 * We don't want to be calling invalidate_buckets()
			 * multiple times when it can't do anything
			 */
			ca->invalidate_needs_gc = 1;
			wake_up_gc(ca->set);
			return;
		}

		bch_invalidate_one_bucket(ca, b);
	}
}
示例#8
0
bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
{
	BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));

	if (CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) {
		unsigned i;

		for (i = 0; i < RESERVE_NONE; i++)
			if (!fifo_full(&ca->free[i]))
				goto add;

		return false;
	}
add:
	b->prio = 0;

	if (can_inc_bucket_gen(b) &&
	    fifo_push(&ca->unused, b - ca->buckets)) {
		atomic_inc(&b->pin);
		return true;
	}

	return false;
}
示例#9
0
文件: alloc.c 项目: 7799/linux
static int bch_allocator_thread(void *arg)
{
	struct cache *ca = arg;

	mutex_lock(&ca->set->bucket_lock);

	while (1) {
		/*
		 * First, we pull buckets off of the unused and free_inc lists,
		 * possibly issue discards to them, then we add the bucket to
		 * the free list:
		 */
		while (!fifo_empty(&ca->free_inc)) {
			long bucket;

			fifo_pop(&ca->free_inc, bucket);

			if (ca->discard) {
				mutex_unlock(&ca->set->bucket_lock);
				blkdev_issue_discard(ca->bdev,
					bucket_to_sector(ca->set, bucket),
					ca->sb.block_size, GFP_KERNEL, 0);
				mutex_lock(&ca->set->bucket_lock);
			}

			allocator_wait(ca, bch_allocator_push(ca, bucket));
			wake_up(&ca->set->btree_cache_wait);
			wake_up(&ca->set->bucket_wait);
		}

		/*
		 * We've run out of free buckets, we need to find some buckets
		 * we can invalidate. First, invalidate them in memory and add
		 * them to the free_inc list:
		 */

retry_invalidate:
		allocator_wait(ca, ca->set->gc_mark_valid &&
			       !ca->invalidate_needs_gc);
		invalidate_buckets(ca);

		/*
		 * Now, we write their new gens to disk so we can start writing
		 * new stuff to them:
		 */
		allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
		if (CACHE_SYNC(&ca->set->sb)) {
			/*
			 * This could deadlock if an allocation with a btree
			 * node locked ever blocked - having the btree node
			 * locked would block garbage collection, but here we're
			 * waiting on garbage collection before we invalidate
			 * and free anything.
			 *
			 * But this should be safe since the btree code always
			 * uses btree_check_reserve() before allocating now, and
			 * if it fails it blocks without btree nodes locked.
			 */
			if (!fifo_full(&ca->free_inc))
				goto retry_invalidate;

			bch_prio_write(ca);
		}
	}
}
示例#10
0
DSP_STATUS CHNLSM_InterruptDSP2(struct WMD_DEV_CONTEXT *pDevContext,
				u16 wMbVal)
{
	struct CFG_HOSTRES resources;
	DSP_STATUS status = DSP_SOK;
	unsigned long timeout;
	u32 temp;

	status = CFG_GetHostResources((struct CFG_DEVNODE *)
			DRV_GetFirstDevExtension(), &resources);
	if (DSP_FAILED(status))
		return DSP_EFAIL;

	if (pDevContext->dwBrdState == BRD_DSP_HIBERNATION ||
	    pDevContext->dwBrdState == BRD_HIBERNATION) {
#ifdef CONFIG_BRIDGE_DVFS
		struct dspbridge_platform_data *pdata =
			omap_dspbridge_dev->dev.platform_data;
		/*
		 * When Smartreflex is ON, DSP requires at least OPP level 3
		 * to operate reliably. So boost lower OPP levels to OPP3.
		 */
		if (pdata->dsp_set_min_opp)
			(*pdata->dsp_set_min_opp)(min_active_opp);
#endif
		/* Restart the peripheral clocks */
		DSP_PeripheralClocks_Enable(pDevContext, NULL);

		/*
		 * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control
		 *     in CM_AUTOIDLE_PLL_IVA2 register
		 */
		*(REG_UWORD32 *)(resources.dwCmBase + 0x34) = 0x1;

		/*
		 * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to
		 *     0.75 MHz - 1.0 MHz
		 * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode
		 */
		temp = *(REG_UWORD32 *)(resources.dwCmBase + 0x4);
		temp = (temp & 0xFFFFFF08) | 0x37;
		*(REG_UWORD32 *)(resources.dwCmBase + 0x4) = temp;

		/*
		 * This delay is needed to avoid mailbox timed out
		 * issue experienced while SmartReflex is ON.
		 * TODO: Instead of 1 ms calculate proper value.
		 */
		mdelay(1);

		/* Restore mailbox settings */
		HW_MBOX_restoreSettings(resources.dwMboxBase);

		/* Access MMU SYS CONFIG register to generate a short wakeup */
		temp = *(REG_UWORD32 *)(resources.dwDmmuBase + 0x10);

		pDevContext->dwBrdState = BRD_RUNNING;
	}

	timeout = jiffies + msecs_to_jiffies(1);
	while (fifo_full((void __iomem *) resources.dwMboxBase, 0)) {
		if (time_after(jiffies, timeout)) {
			pr_err("dspbridge: timed out waiting for mailbox\n");
			return WMD_E_TIMEOUT;
		}
	}

	DBG_Trace(DBG_LEVEL3, "writing %x to Mailbox\n", wMbVal);
	HW_MBOX_MsgWrite(resources.dwMboxBase, MBOX_ARM2DSP, wMbVal);
	return DSP_SOK;
}
示例#11
0
DSP_STATUS CHNLSM_InterruptDSP2(struct WMD_DEV_CONTEXT *pDevContext,
				u16 wMbVal)
{
#ifdef CONFIG_BRIDGE_DVFS
	struct dspbridge_platform_data *pdata =
		omap_dspbridge_dev->dev.platform_data;
	u32 opplevel = 0;
#endif
	struct CFG_HOSTRES resources;
	DSP_STATUS status = DSP_SOK;
	unsigned long timeout;
	u32 temp;

	status = CFG_GetHostResources((struct CFG_DEVNODE *)DRV_GetFirstDevExtension(),
				      &resources);
	if (DSP_FAILED(status))
		return DSP_EFAIL;
#ifdef CONFIG_BRIDGE_DVFS
	if (pDevContext->dwBrdState == BRD_DSP_HIBERNATION ||
	    pDevContext->dwBrdState == BRD_HIBERNATION) {
		if (pdata->dsp_get_opp)
			opplevel = (*pdata->dsp_get_opp)();
		if (opplevel == 1) {
			if (pdata->dsp_set_min_opp)
				(*pdata->dsp_set_min_opp)(opplevel+1);
		}
	}
#endif

	if (pDevContext->dwBrdState == BRD_DSP_HIBERNATION ||
	    pDevContext->dwBrdState == BRD_HIBERNATION) {
		/* Restart the IVA clock that was disabled while
		 * the DSP initiated Hibernation. */
			status = CLK_Enable(SERVICESCLK_iva2_ck);
			if (DSP_FAILED(status))
				return status;

		/* Restore mailbox settings */
		/* Restart the peripheral clocks that were disabled only
		 * in DSP initiated Hibernation case.*/
		if (pDevContext->dwBrdState == BRD_DSP_HIBERNATION) {
			DSP_PeripheralClocks_Enable(pDevContext, NULL);
			/* Enabling Dpll in lock mode*/
			temp = (u32) *((REG_UWORD32 *)
				       ((u32) (resources.dwCmBase) + 0x34));
			temp = (temp & 0xFFFFFFFE) | 0x1;
			*((REG_UWORD32 *) ((u32) (resources.dwCmBase) + 0x34)) =
				(u32) temp;
			temp = (u32) *((REG_UWORD32 *)
				       ((u32) (resources.dwCmBase) + 0x4));
			temp = (temp & 0xFFFFFC8) | 0x37;

			*((REG_UWORD32 *) ((u32) (resources.dwCmBase) + 0x4)) =
				(u32) temp;
		}
		HW_MBOX_restoreSettings(resources.dwMboxBase);

		/*  Access MMU SYS CONFIG register to generate a short wakeup */
		temp = (u32) *((REG_UWORD32 *) ((u32)
						(resources.dwDmmuBase) + 0x10));

		pDevContext->dwBrdState = BRD_RUNNING;
	}
	timeout = jiffies + msecs_to_jiffies(1);
	while (fifo_full((void __iomem *) resources.dwMboxBase, 0)) {
		if (time_after(jiffies, timeout)) {
			printk(KERN_ERR "dspbridge: timed out waiting for mailbox\n");
			return WMD_E_TIMEOUT;
		}
	}
	DBG_Trace(DBG_LEVEL3, "writing %x to Mailbox\n",
		  wMbVal);

	HW_MBOX_MsgWrite(resources.dwMboxBase, MBOX_ARM2DSP,
			 wMbVal);
	return DSP_SOK;
}