Example #1
0
static BT_ERROR extend_cache(BT_CACHE *pCache) {

	BT_u32 ulSize = BT_PAGE_ALIGN(pCache->ulObjectSize);

	void *p = (void *) bt_page_alloc(ulSize);
	if(!p) {
		return BT_ERR_NO_MEMORY;
	}

	struct block *pBlock = (struct block *) bt_phys_to_virt(p);
	init_attach_block(pCache, pBlock, ulSize);

	return BT_ERR_NONE;
}
Example #2
0
void *BT_kMalloc(BT_u32 ulSize) {

	void *p;

	if(!ulSize) {
		return NULL;
	}

	ulSize=(ulSize+3)&0xFFFFFFFC;

	BT_CACHE *pCache = BT_GetSuitableCache(ulSize+sizeof(struct MEM_TAG)+sizeof(struct MAGIC_TAG));
	if(pCache) {
		p = BT_CacheAlloc(pCache);
	} else {
		bt_paddr_t phys = bt_page_alloc(ulSize+sizeof(struct MEM_TAG)+sizeof(struct MAGIC_TAG));
		if(!phys) {
			return NULL;
		}
		p = (void *) bt_phys_to_virt(phys);
	}

	if(!p) {
		return NULL;
	}

	struct MEM_TAG *tag = (struct MEM_TAG *) p;
	tag->pCache = pCache;
	tag->size = ulSize;
	set_magic(&tag->tag_0);
	set_magic(&tag->tag_1);
	struct MAGIC_TAG *mempost = (struct MAGIC_TAG *) ((BT_u8 *) (tag+1) + ulSize);
	set_magic(mempost);

	/*
	 *	Before the allocated memory we place a pointer to the pCache.
	 *	This will be 0 in the case of a page allocation!
	 */

	return ((void *) (tag + 1));
}
Example #3
0
void *BT_kMalloc(BT_u32 ulSize) {

	void *p;

	BT_CACHE *pCache = BT_GetSuitableCache(ulSize+sizeof(BT_CACHE *));
	if(pCache) {
		p = BT_CacheAlloc(pCache);
	} else {
		p = (void *) bt_phys_to_virt(bt_page_alloc(ulSize+sizeof(BT_CACHE *)));
		if(!p) {
			return NULL;
		}
	}

	BT_CACHE **tag = (BT_CACHE **) p;
    *tag = pCache;

	/*
	 *	Before the allocated memory we place a pointer to the pCache.
	 *	This will be 0 in the case of a page allocation!
	 */

	return ((void *) (tag + 1));
}
Example #4
0
bt_pgd_t bt_mmu_newmap(void) {
	bt_paddr_t pg;
	bt_pgd_t pgd;

	pg = create_pgd();
	if(!pg) {
		return 0;
	}

	pgd = (bt_pgd_t) bt_phys_to_virt(GET_PGD(pg));
	memset(pgd, 0, MMU_L1TBL_SIZE);

	/*
	 *	At this point the kernel page table will point to valid page tables,
	 *	that were created during the bt_mmu_initialise routine.
	 *
	 *	The user-space section should all be 0, i.e. cause page faults.
	 *	This means process VMs always match the kernel ptes correctly,
	 *	as the kernel pgd will never be updated after mmu initialisation.
	 */
	memcpy(pgd, g_MMUTable, MMU_L1TBL_SIZE);

	return (bt_pgd_t) ((BT_u32)pgd | (GET_ASID(pg)));
}
Example #5
0
/**
 *	This assumes a single write request will be generated.
 **/
static BT_s32 devcfg_write(BT_HANDLE hDevcfg, BT_u32 ulFlags, BT_u32 ulSize, const void *pBuffer) {

	BT_u32 user_count = ulSize;

	BT_u32 kmem_size = ulSize + hDevcfg->residue_len;
	bt_paddr_t kmem = bt_page_alloc_coherent(kmem_size);
	if(!kmem) {
		BT_kPrint("xdevcfg: Cannot allocate memory.");
		return BT_ERR_NO_MEMORY;
	}

	BT_u8 *buf = (BT_u8 *) bt_phys_to_virt(kmem);

	// Collect stragglers from last time (0 to 3 bytes).
	memcpy(buf, hDevcfg->residue_buf, hDevcfg->residue_len);

	// Copy the user data.
	memcpy(buf + hDevcfg->residue_len, pBuffer, ulSize);

	// Include straggles in total to be counted.
	ulSize += hDevcfg->residue_len;

	// Check if header?
	if(hDevcfg->offset == 0 && ulSize > 4) {
		BT_u32 i;
		for(i = 0; i < ulSize - 4; i++) {
			if(!memcmp(buf + i, "\x66\x55\x99\xAA", 4)) {
				BT_kPrint("xdevcfg: found normal sync word.");
				hDevcfg->bEndianSwap = 0;
				break;
			}

			if(!memcmp(buf + i, "\xAA\x99\x55\x66", 4)) {
				BT_kPrint("xdevcfg: found byte-swapped sync word.");
				hDevcfg->bEndianSwap = 1;
				break;
			}
		}

		if(i != ulSize - 4) {
			ulSize -= i;
			memmove(buf, buf + i, ulSize);	// ulSize - i ??
		}
	}

	// Save stragglers for next time.
	hDevcfg->residue_len = ulSize % 4;
	ulSize -= hDevcfg->residue_len;
	memcpy(hDevcfg->residue_buf, buf + ulSize, hDevcfg->residue_len);

	// Fixup the endianness
	if(hDevcfg->bEndianSwap) {
		BT_u32 i;
		for (i = 0; i < ulSize; i += 4) {
			BT_u32 *p = (BT_u32 *) &buf[i];
			p[0] = __builtin_bswap32(p[0]);
		}
	}

	// Transfer the data.

	hDevcfg->pRegs->DMA_SRC_ADDR = (BT_u32 ) kmem | 1;
	hDevcfg->pRegs->DMA_DST_ADDR = 0xFFFFFFFF;

	BT_u32 transfer_len = 0;
	if(ulSize % 4) {
		transfer_len = (ulSize / 4) + 1;
	} else {
		transfer_len = (ulSize / 4);
	}

	hDevcfg->pRegs->DMA_SRC_LEN = transfer_len;
	hDevcfg->pRegs->DMA_DST_LEN = 0;

	while(!(hDevcfg->pRegs->INT_STS & INT_STS_DMA_DONE_INT)) {
		BT_ThreadYield();
	}

	hDevcfg->pRegs->INT_STS = INT_STS_DMA_DONE_INT;	// Clear DMA_DONE status

	hDevcfg->offset += user_count;

	bt_page_free_coherent(kmem, kmem_size);

	return user_count;
}