Esempio n. 1
0
/**
 * Split @len length memory area from @ma.
 */
static MArea *
ma_split(MArea *ma, unsigned long len)
{
	unsigned long req_pages = len / PAGE_SIZE;
	MArea *ret;

	BUG_ON(ma->pages < req_pages);
	BUG_ON(!MA_FREE(ma));

	if (ma->pages == req_pages) {
		ma->flags |= MA_F_USED;
		return ma;
	}

	/* ma is larger than we need, split it. */
	ret = kmalloc(sizeof(MArea), GFP_KERNEL);
	if (!ret)
		return NULL;

	ma->pages -= req_pages;

	ret->pages = req_pages;
	ret->flags = MA_F_USED;
	/* @ret is the tail of @ma. */
	ret->start = ma->start + ma->pages * PAGE_SIZE;
	ret->prev = ma;
	ret->next = ma->next;
	if (ret->next)
		ret->next->prev = ret;
	ma->next = ret;

	return ret;
}
Esempio n. 2
0
/**
 * Merge two free memory areas if they aren't the same.
 */
static MArea *
__ma_merge(MArea *left, MArea *right)
{
	if (!MA_FREE(left) || !MA_FREE(right))
		return left;
	BUG_ON(left->start == right->start);

	left->pages += right->pages;

	left->next = right->next;
	if (right->next)
		right->next->prev = left;

	kfree(right);

	return left;
}
Esempio n. 3
0
static void bpmp_ack_master(int ch, int flags)
{
	__raw_writel(MA_ACKD(ch), RES_SEMA_SHRD_SMP_SET);

	if (flags & DO_ACK)
		return;

	/*
	 * We have to violate the bit modification rule while
	 * moving from SL_QUED to MA_FREE (DO_ACK not set) so that
	 * the channel won't be in ACKD state forever.
	 */
	__raw_writel(MA_ACKD(ch) ^ MA_FREE(ch), RES_SEMA_SHRD_SMP_CLR);
}
Esempio n. 4
0
/**
 * Find and return best fit free memory area.
 */
static MArea *
ma_get_best_fit(unsigned long len, int node)
{
	MArea *ma, *best_fit = NULL;
	unsigned long req_pages = len / PAGE_SIZE;

	for (ma = &mas[node]; ma; ma = ma->next) {
		if (MA_FREE(ma)
		    && ma->pages > req_pages
		    && (!best_fit || best_fit->pages > ma->pages))
			best_fit = ma;
	}

	return best_fit;
}
Esempio n. 5
0
bool bpmp_master_free(int ch)
{
	return bpmp_ch_sta(ch) == MA_FREE(ch);
}
Esempio n. 6
0
/* MA_ACKD to MA_FREE */
void bpmp_free_master(int ch)
{
	__raw_writel(MA_ACKD(ch) ^ MA_FREE(ch), RES_SEMA_SHRD_SMP_CLR);
}