void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr)
{
	bool tmp;

	buf->vstart = vaddr;
	buf->pstart = paddr;

	if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) {
		/*
		 * Keep whatever is in the cache. This way we avoid an
		 * unnecessary synch if CPU is the first user.
		 */
		buf->range_in_cpu_cache.start = 0;
		buf->range_in_cpu_cache.end = buf->size;
		align_range_up(&buf->range_in_cpu_cache,
						get_dcache_granularity());
		buf->range_dirty_in_cpu_cache.start = 0;
		buf->range_dirty_in_cpu_cache.end = buf->size;
		align_range_up(&buf->range_dirty_in_cpu_cache,
						get_dcache_granularity());
	} else {
		flush_cpu_dcache(buf->vstart, buf->pstart, buf->size, false,
									&tmp);
		drain_cpu_write_buf();

		null_range(&buf->range_in_cpu_cache);
		null_range(&buf->range_dirty_in_cpu_cache);
	}
	null_range(&buf->range_invalid_in_cpu_cache);
}
static void flush_cpu_cache(struct cach_buf *buf, struct cach_range *range)
{
	struct cach_range intersection;

	intersect_range(&buf->range_in_cpu_cache, range, &intersection);
	if (is_non_empty_range(&intersection)) {
		bool flushed_everything;

		expand_range_2_edge(&intersection, &buf->range_in_cpu_cache);

		flush_cpu_dcache(
				offset_2_vaddr(buf, intersection.start),
				offset_2_paddr(buf, intersection.start),
				range_length(&intersection),
				buf->cache_settings &
					HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
							&flushed_everything);

		if (flushed_everything) {
			if (!speculative_data_prefetch())
				null_range(&buf->range_in_cpu_cache);
			null_range(&buf->range_dirty_in_cpu_cache);
			null_range(&buf->range_invalid_in_cpu_cache);
		} else {
			if (!speculative_data_prefetch())
				shrink_range(&buf->range_in_cpu_cache,
							 &intersection);
			shrink_range(&buf->range_dirty_in_cpu_cache,
								&intersection);
			shrink_range(&buf->range_invalid_in_cpu_cache,
								&intersection);
		}
	}
}
Esempio n. 3
0
static void write_at_offset(const char *key, const void *buffer,
                            int buf_len, unsigned int sector_offset)
{
    int key_len = strlen(key) + 1;
    unsigned int record_size = key_len + buf_len + sizeof(record_size);
    unsigned int flash_addr = (unsigned int)STORAGE_ADDRESS + sector_offset;

    write_to_flash(flash_addr, (unsigned char *)&record_size, sizeof(record_size));
    write_to_flash(flash_addr+sizeof(record_size), (unsigned char *)key, key_len);
    write_to_flash(flash_addr+sizeof(record_size)+key_len, buffer, buf_len);
    flush_cpu_dcache();
}
Esempio n. 4
0
void snd_isr_dmaw()
{
	flush_cpu_dcache();

	if(record_level == 0) {
		printf("SND: stray DMAW irq\n");
		return;
	}
	
	/* NB. the callback can give us buffers by calling snd_record_refill() */
	record_callback(record_queue[record_consume], record_user);

	record_consume = (record_consume + 1) & RECORD_BUFQ_MASK;
	record_level--;

	irq_ack(IRQ_AC97DMAW);

	if(record_level > 0)
		record_start(record_queue[record_consume]);
	else
		record_overrun = 1;
}
static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range)
{
	struct cach_range intersection;

	intersect_range(&buf->range_invalid_in_cpu_cache, range,
								&intersection);
	if (is_non_empty_range(&intersection)) {
		bool flushed_everything;

		expand_range_2_edge(&intersection,
					&buf->range_invalid_in_cpu_cache);

		/*
		 * Cache handler never uses invalidate to discard data in the
		 * cache so we can use flush instead which is considerably
		 * faster for large buffers.
		 */
		flush_cpu_dcache(
				offset_2_vaddr(buf, intersection.start),
				offset_2_paddr(buf, intersection.start),
				range_length(&intersection),
				buf->cache_settings &
					HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
							&flushed_everything);

		if (flushed_everything) {
			null_range(&buf->range_invalid_in_cpu_cache);
			null_range(&buf->range_dirty_in_cpu_cache);
		} else {
			/*
			 * No need to shrink range_in_cpu_cache as invalidate
			 * is only used when we can't keep track of what's in
			 * the CPU cache.
			 */
			shrink_range(&buf->range_invalid_in_cpu_cache,
								&intersection);
		}
	}
}
Esempio n. 6
0
int memtest_silent(void)
{
	volatile unsigned int *array = (unsigned int *)MAIN_RAM_BASE;
	int i;
	unsigned int seed_32;
	unsigned short seed_16;
	unsigned int error_cnt;

	error_cnt = 0;

	/* test data bus */
	for(i=0;i<128;i++) {
		array[i] = ONEZERO;
	}
	flush_cpu_dcache();
	flush_l2_cache();
	for(i=0;i<128;i++) {
		if(array[i] != ONEZERO)
			error_cnt++;
	}

	for(i=0;i<128;i++) {
		array[i] = ZEROONE;
	}
	flush_cpu_dcache();
	flush_l2_cache();
	for(i=0;i<128;i++) {
		if(array[i] != ZEROONE)
			error_cnt++;
	}

	/* test counter or random data */
	seed_32 = 0;
	for(i=0;i<TEST_DATA_SIZE/4;i++) {
		seed_32 = seed_to_data_32(seed_32, TEST_DATA_RANDOM);
		array[i] = seed_32;
	}

	seed_32 = 0;
	flush_cpu_dcache();
	flush_l2_cache();
	for(i=0;i<TEST_DATA_SIZE/4;i++) {
		seed_32 = seed_to_data_32(seed_32, TEST_DATA_RANDOM);
		if(array[i] != seed_32)
			error_cnt++;
	}

	/* test random addressing */
	seed_16 = 0;
	for(i=0;i<TEST_ADDR_SIZE/4;i++) {
		seed_16 = seed_to_data_16(seed_16, TEST_ADDR_RANDOM);
		array[(unsigned int) seed_16] = i;
	}

	seed_16 = 0;
	flush_cpu_dcache();
	flush_l2_cache();
	for(i=0;i<TEST_ADDR_SIZE/4;i++) {
		seed_16 = seed_to_data_16(seed_16, TEST_ADDR_RANDOM);
		if(array[(unsigned int) seed_16] != i)
			error_cnt++;
	}

	return error_cnt;
}
Esempio n. 7
0
void fs_erase(void)
{
    erase_flash_sector((unsigned int)STORAGE_ADDRESS);
    flush_cpu_dcache();
}