Exemple #1
0
void dmaIRQHandler(uint32_t token) {
	uint32_t wholeSegment;
	uint32_t currentSegment;
	uint8_t segmentId;
	int channel = token;
	uint32_t channel_reg = channel << 12;
	DMAInfo* dma = &dmaInfo[channel];

	GET_REG(DMA + channel_reg);
	uint32_t status = GET_REG(DMA + channel_reg);
	//bufferPrintf("cdma: intsts 0x%08x\r\n", status);

	if (status & 0x40000)
		system_panic("CDMA: channel %d error interrupt\r\n", channel);

	if (status & 0x100000)
		system_panic("CDMA: channel %d spurious CIR\r\n", channel);

	SET_REG(DMA + channel_reg, 0x80000);

	if (dma->unsegmentedSize || GET_REG(DMA + DMA_SIZE + channel_reg)) {
		if (GET_REG(DMA + channel_reg) & 0x30000) {
			GET_REG(DMA + channel_reg);
			dma->unsegmentedSize = GET_REG(DMA + DMA_SIZE + channel_reg) + dma->unsegmentedSize;
			wholeSegment = dma->previousUnsegmentedSize - dma->unsegmentedSize;

			for (segmentId = 0; segmentId != 32; segmentId++) {
				currentSegment = GET_REG(8 * (dma->previousDmaSegmentNumber + segmentId) + dma->segmentationSetting + 4) - dma->previousSegmentOffset;
				if (wholeSegment <= currentSegment)
					break;
				wholeSegment -= currentSegment;
				dma->previousSegmentOffset = 0;
			}

			dma->dmaSegmentNumber = segmentId + dma->previousDmaSegmentNumber;
			dma->segmentOffset = dma->previousSegmentOffset + wholeSegment;
			dma->irq_state = 1;
		} else {
			dma->irq_state = 2;
		}
		dma_continue_async(channel);
	} else {
		dma->signalled = 1;
		
		//bufferPrintf("cmda: done\r\n");

		dma_set_aes(channel, 0);

		dma_channel_activate(channel, 0);

		if (dma->handler)
			dma->handler(dma->channel);
	}
}
Exemple #2
0
static error_t vfl_vsvfl_write_single_page(vfl_device_t *_vfl, uint32_t dwVpn, uint8_t* buffer, uint8_t* spare, int _scrub)
{
	vfl_vsvfl_device_t *vfl = CONTAINER_OF(vfl_vsvfl_device_t, vfl, _vfl);

	uint32_t pCE = 0, pPage = 0;
	int ret;

	ret = virtual_page_number_to_physical(vfl, dwVpn, &pCE, &pPage);

	if(FAILED(ret)) {
		bufferPrintf("vfl_vsvfl_write_single_page: virtual_page_number_to_physical returned an error (dwVpn %d)!\r\n", dwVpn);
		return ret;
	}

	ret = nand_device_write_single_page(vfl->device, pCE, 0, pPage, buffer, spare);

	if(FAILED(ret)) {
		if(!vfl_check_checksum(vfl, pCE))
			system_panic("vfl_vsfl_write_single_page: failed checksum\r\n");

		vfl->contexts[pCE].write_failure_count++;
		vfl_gen_checksum(vfl, pCE);

		// TODO: add block map support
		// vsvfl_mark_page_as_bad(pCE, pPage, ret);

		if(_scrub)
			add_block_to_scrub_list(vfl, pCE, pPage / vfl->geometry.pages_per_block); // Something like that, I think

		return ret;
	}

	return SUCCESS;
}
Exemple #3
0
int dma_cancel(int channel) {

	//bufferPrintf("cdma: dma_cancel.\r\n");

	DMAInfo* dma = &dmaInfo[channel];
	uint64_t startTime = timer_get_system_microtime();

	if (!dma->signalled)
		return channel;

	dma_channel_activate(channel, 1);

	uint32_t channel_reg = DMA + (channel << 12);
	if (GET_BITS(GET_REG(channel_reg), 16, 2) == 1) {
		SET_REG(channel_reg, 4);

		while (GET_BITS(GET_REG(channel_reg), 16, 2) == 1) {
			if (has_elapsed(startTime, 10000))
				system_panic("CDMA: channel %d timeout during abort\r\n", channel);
		}

		SET_REG(channel_reg, 2);
	}

	dma->signalled = 1;
	dma_set_aes(channel, 0);

	return dma_channel_activate(channel, 0);
}
Exemple #4
0
static error_t vsvfl_write_vfl_cxt_to_flash(vfl_vsvfl_device_t *_vfl, uint32_t _ce) {
	if(_ce >= _vfl->geometry.num_ce)
		return EINVAL;

	if(!vfl_check_checksum(_vfl, _ce))
		system_panic("vsvfl_write_vfl_cxt_to_flash: failed checksum\r\n");

	uint8_t* pageBuffer = memalign(0x40, _vfl->geometry.bytes_per_page);
	uint8_t* spareBuffer = memalign(0x40, _vfl->geometry.bytes_per_spare);
	if(pageBuffer == NULL || spareBuffer == NULL) {
		bufferPrintf("vfl: cannot allocate page and spare buffer\r\n");
		return ENOMEM;
	}
	memset(pageBuffer, 0x0, _vfl->geometry.bytes_per_page);

	vfl_vsvfl_context_t *curVFLCxt = &_vfl->contexts[_ce];
	curVFLCxt->usn_inc = _vfl->current_version++;
	uint32_t curPage = curVFLCxt->usn_page;
	curVFLCxt->usn_page += 8;
	curVFLCxt->usn_dec -= 1;
	vfl_gen_checksum(_vfl, _ce);

	memcpy(pageBuffer, curVFLCxt, 0x800);
	int i;
	for (i = 0; i < 8; i++) {
		memset(spareBuffer, 0xFF, _vfl->geometry.bytes_per_spare);
		((uint32_t*)spareBuffer)[0] = curVFLCxt->usn_dec;
		spareBuffer[8] = 0;
		spareBuffer[9] = 0x80;
		uint32_t bankStart = (curVFLCxt->vfl_context_block[curVFLCxt->usn_block] / _vfl->geometry.blocks_per_bank) * _vfl->geometry.bank_address_space;
		uint32_t blockOffset = curVFLCxt->vfl_context_block[curVFLCxt->usn_block] % _vfl->geometry.blocks_per_bank;
		int status = nand_device_write_single_page(_vfl->device, _ce, 0, (bankStart + blockOffset) * _vfl->geometry.pages_per_block_2 + curPage + i, pageBuffer, spareBuffer);
		if(FAILED(status)) {
			bufferPrintf("vfl_write_vfl_cxt_to_flash: Failed write\r\n");
			free(pageBuffer);
			free(spareBuffer);
			// vsvfl_mark_page_as_bad(_ce, (bankStart + blockOffset) * _vfl->geometry.pages_per_block_2 + curPage + i, status);
			return EIO;
		}
	}
	int fails = 0;
	for (i = 0; i < 8; i++) {
		uint32_t bankStart = (curVFLCxt->vfl_context_block[curVFLCxt->usn_block] / _vfl->geometry.blocks_per_bank) * _vfl->geometry.bank_address_space;
		uint32_t blockOffset = curVFLCxt->vfl_context_block[curVFLCxt->usn_block] % _vfl->geometry.blocks_per_bank;
		if(FAILED(nand_device_read_single_page(_vfl->device, _ce, 0, (bankStart + blockOffset) * _vfl->geometry.pages_per_block_2 + curPage + i, pageBuffer, spareBuffer, 0))) {
			//vsvfl_store_block_map_single_page(_ce, (bankStart + blockOffset) * _vfl->geometry.pages_per_block_2 + curPage + i);
			fails++;
			continue;
		}
		if(memcmp(pageBuffer, curVFLCxt, 0x6E0) || ((uint32_t*)spareBuffer)[0] != curVFLCxt->usn_dec || spareBuffer[8] || spareBuffer[9] != 0x80)
			fails++;
	}
	free(pageBuffer);
	free(spareBuffer);
	if(fails > 3)
		return EIO;
	else
		return SUCCESS;
}
Exemple #5
0
void* yaftl_alloc(size_t size)
{
	void* buffer = memalign(0x40, size);

	if (!buffer)
		system_panic("yaftl_alloc failed\r\n");

	memset(buffer, 0, size);
	return buffer;
}
Exemple #6
0
static error_t vsvfl_store_vfl_cxt(vfl_vsvfl_device_t *_vfl, uint32_t _ce) {
	if(_ce >= _vfl->geometry.num_ce)
		system_panic("vfl: Can't store VFLCxt on non-existent CE\r\n");

	vfl_vsvfl_context_t *curVFLCxt = &_vfl->contexts[_ce];
	if(curVFLCxt->usn_page + 8 > _vfl->geometry.pages_per_block || FAILED(vsvfl_write_vfl_cxt_to_flash(_vfl, _ce))) {
		int startBlock = curVFLCxt->usn_block;
		int nextBlock = (curVFLCxt->usn_block + 1) % 4;
		while(startBlock != nextBlock) {
			if(curVFLCxt->vfl_context_block[nextBlock] != 0xFFFF) {
				int fail = 0;
				int i;
				for (i = 0; i < 4; i++) {
					uint32_t bankStart = (curVFLCxt->vfl_context_block[nextBlock] / _vfl->geometry.blocks_per_bank) * _vfl->geometry.bank_address_space;
					uint32_t blockOffset = curVFLCxt->vfl_context_block[nextBlock] % _vfl->geometry.blocks_per_bank;
					int status = nand_device_erase_single_block(_vfl->device, _ce, bankStart + blockOffset);
					if(SUCCEEDED(status))
						break;
					//vsvfl_mark_bad_vfl_block(_vfl, _ce, curVFLCxt->vfl_context_block[nextBlock], status);
					if(i == 3)
						fail = 1;
				}
				if(!fail) {
					if(!vfl_check_checksum(_vfl, _ce))
						system_panic("vsvfl_store_vfl_cxt: failed checksum\r\n");
					curVFLCxt->usn_block = nextBlock;
					curVFLCxt->usn_page = 0;
					vfl_gen_checksum(_vfl, _ce);
					int result = vsvfl_write_vfl_cxt_to_flash(_vfl, _ce);
					if(SUCCEEDED(result))
						return result;
				}
			}
			nextBlock = (nextBlock + 1) % 4;
		}
		return EIO;
	}
	return SUCCESS;
}
Exemple #7
0
// returns the sub-buffer offset
void* bufzone_alloc(bufzone_t* _zone, size_t size)
{
	size_t oldSizeRounded;

	if (_zone->state != 1)
		system_panic("bufzone_alloc: bad state\r\n");

	oldSizeRounded = ROUND_UP(_zone->size, 64);
	_zone->paddingsSize = _zone->paddingsSize + (oldSizeRounded - _zone->size);
	_zone->size = oldSizeRounded + size;
	_zone->numAllocs++;

	return (void*)oldSizeRounded;
}
Exemple #8
0
static int add_block_to_scrub_list(vfl_vsvfl_device_t *_vfl, uint32_t _ce, uint32_t _block) {
	if(is_block_in_scrub_list(_vfl, _ce, _block))
			return 0;

	if(_vfl->contexts[_ce].scrub_list_length > 0x13) {
		bufferPrintf("vfl: too many scrubs!\r\n");
		return 0;
	}

	if(!vfl_check_checksum(_vfl, _ce))
		system_panic("vfl_add_block_to_scrub_list: failed checksum\r\n");

	_vfl->contexts[_ce].scrub_list[_vfl->contexts[_ce].scrub_list_length++] = _block;
	vfl_gen_checksum(_vfl, _ce);
	return vsvfl_store_vfl_cxt(_vfl, _ce);
}
Exemple #9
0
static error_t vfl_vsvfl_write_context(vfl_device_t *_vfl, uint16_t *_control_block)
{
	vfl_vsvfl_device_t *vfl = CONTAINER_OF(vfl_vsvfl_device_t, vfl, _vfl);
	uint32_t ce = vfl->current_version % vfl->geometry.num_ce;
	uint32_t i;

	// check and update cxt of each CE
	for(i = 0; i < vfl->geometry.num_ce; i++) {
		if(vfl_check_checksum(vfl, i) == FALSE)
			system_panic("vsvfl: VFLCxt has bad checksum.\r\n");
		memmove(vfl->contexts[i].control_block, _control_block, 6);
		vfl_gen_checksum(vfl, i);
	}

	// write cxt on the ce with the oldest cxt
	if(FAILED(vsvfl_store_vfl_cxt(vfl, ce))) {
		bufferPrintf("vsvfl: context write fail!\r\n");
		return EIO;
	}

	return 0;
}
Exemple #10
0
static uint32_t remap_block(vfl_vsvfl_device_t *_vfl, uint32_t _ce, uint32_t _block, uint32_t *_isGood) {
	DebugPrintf("vsvfl: remap_block: CE %d, block %d\r\n", _ce, _block);

	if(vfl_is_good_block(_vfl->bbt[_ce], _block))
		return _block;

	DebugPrintf("vsvfl: remapping block...\r\n");

	if(_isGood)
		_isGood = 0;

	int pwDesPbn;
	for(pwDesPbn = 0; pwDesPbn < _vfl->geometry.blocks_per_ce - _vfl->contexts[_ce].reserved_block_pool_start * _vfl->geometry.banks_per_ce; pwDesPbn++)
	{
		if(_vfl->contexts[_ce].reserved_block_pool_map[pwDesPbn] == _block)
		{
			uint32_t vBank, vBlock, pBlock;

			/*
			if(pwDesPbn >= _vfl->geometry.blocks_per_ce)
				bufferPrintf("ftl: Destination physical block for remapping is greater than number of blocks per CE!");
			*/

			vBank = _ce + _vfl->geometry.num_ce * (pwDesPbn / (_vfl->geometry.blocks_per_bank_vfl - _vfl->contexts[_ce].reserved_block_pool_start));
			vBlock = _vfl->contexts[_ce].reserved_block_pool_start + (pwDesPbn % (_vfl->geometry.blocks_per_bank_vfl - _vfl->contexts[_ce].reserved_block_pool_start));

			if(FAILED(virtual_block_to_physical_block(_vfl, vBank, vBlock, &pBlock)))
				system_panic("vfl: failed to convert virtual reserved block to physical\r\n");

			return pBlock;
		}
	}

	bufferPrintf("vfl: failed to remap CE %d block 0x%04x\r\n", _ce, _block);
	return _block;
}
Exemple #11
0
signed int dma_init_channel(uint8_t direction, uint32_t channel, int segmentationSetting, uint32_t txrx_register, uint32_t size, uint32_t Setting1Index, uint32_t Setting2Index, void* handler) {
	int i = 0;
	DMAInfo* dma = &dmaInfo[channel];

	if (!dma->signalled) {
		dma->segmentBuffer = memalign(0x20, 32 * sizeof(*dma->segmentBuffer));
		memset(dma->segmentBuffer, 0, (32 * sizeof(*dma->segmentBuffer)));

		//bufferPrintf("cdma: new segment buffer 0x%08x.\r\n", dma->segmentBuffer);

		if (!dma->segmentBuffer)
			system_panic("CDMA: can't allocate command chain\r\n");

		for (i = 0; i != 32; i ++)
			dma->segmentBuffer[i].address = get_physical_address((uint32_t)(&dma->segmentBuffer[i+1]));

		dma->signalled = 1;
		dma->txrx_register = 0;
		dma->unk_separator = 0;

		interrupt_set_int_type(DMA_CHANNEL_INTERRUPT_BASE + channel, 0);
		interrupt_install(DMA_CHANNEL_INTERRUPT_BASE + channel, dmaIRQHandler, channel);
		interrupt_enable(DMA_CHANNEL_INTERRUPT_BASE + channel);
	}

	dma->irq_state = 0;
	dma->dmaSegmentNumber = 0;
	dma->segmentationSetting = segmentationSetting;
	dma->segmentOffset = 0;
	dma->dataSize = size;
	dma->unsegmentedSize = size;
	dma->handler = handler;
	dma->channel = channel;

	uint8_t Setting1;
	uint8_t Setting2;

	switch(Setting1Index)
	{
	case 1:
		Setting1 = 0 << 2;
		break;

	case 2:
		Setting1 = 1 << 2;
		break;

	case 4:
		Setting1 = 2 << 2;
		break;

	default:
		return -1;
	}

	switch (Setting2Index)
	{
	case 1:
		Setting2 = 0 << 4;
		break;

	case 2:
		Setting2 = 1 << 4;
		break;

	case 4:
		Setting2 = 2 << 4;
		break;

	case 8:
		Setting2 = 3 << 4;
		break;

	case 16:
		Setting2 = 4 << 4;
		break;

	case 32:
		Setting2 = 5 << 4;
		break;

	default:
		return -1;
	}

	uint32_t channel_reg = channel << 12;
	SET_REG(DMA + channel_reg, 2);

	uint8_t direction_setting;
	if (direction == 1) // if out
		direction_setting = 1 << 1;
	else
		direction_setting = 0 << 1;

	SET_REG(DMA + DMA_SETTINGS + channel_reg, dma->unk_separator | Setting1 | Setting2 | direction_setting);
	SET_REG(DMA + DMA_TXRX_REGISTER + channel_reg, txrx_register);
	SET_REG(DMA + DMA_SIZE + channel_reg, size);

	if (dma->dmaAESInfo)
		dma->current_segment = 0;

	dma_continue_async(channel);
	return 0;
}
Exemple #12
0
int dma_set_aes(int channel, dmaAES* dmaAESInfo) {
	//bufferPrintf("cdma: set_aes.\r\n");

    DMAInfo* dma = &dmaInfo[channel];
	uint32_t value;
	int i;

	dma->dmaAESInfo = dmaAESInfo;
	if(!dmaAESInfo)
		return 0;

	if (!dma->dmaAES_channel)
	{
		EnterCriticalSection();
		for (i = 2; i < 9; i++) {
			if (dmaAES_channel_used & (1 << i))
				continue;

			dmaAES_channel_used |= (1 << i);
			dma->dmaAES_channel = i;
			break;
		}
		LeaveCriticalSection();

		if (!dma->dmaAES_channel)
			system_panic("CDMA: no AES filter contexts: 0x%08x\r\n", dmaAES_channel_used);
	}

	uint32_t dmaAES_channel_reg = dma->dmaAES_channel << 12;

	value = ((channel & 0xFF) << 8) | 0x20000;
	if (!(dma->dmaAESInfo->inverse & 0xF))
		value |= 0x30000;

	switch(GET_BITS(dma->dmaAESInfo->type, 28, 4))
	{
		case 2: // AES 256
			value |= 0x80000;
			break;

		case 1: // AES 192
			value |= 0x40000;
			break;

		case 0: // AES 128
			break;

		default: // Fail
			return -1;
	}

	uint32_t someval = dma->dmaAESInfo->type & 0xFFF;
	if(someval == 0x200)
	{
		value |= 0x200000;
	}
	else if(someval == 0x201)
	{
		value |= 0x400000;
	}
	else if(someval == 0)
	{
		switch(GET_BITS(dma->dmaAESInfo->type, 28, 4))
		{
		case 2: // AES-256
			SET_REG(DMA + DMA_AES + DMA_AES_KEY_7 + dmaAES_channel_reg, dma->dmaAESInfo->key[7]);
			SET_REG(DMA + DMA_AES + DMA_AES_KEY_6 + dmaAES_channel_reg, dma->dmaAESInfo->key[6]);

		case 1: // AES-192
			SET_REG(DMA + DMA_AES + DMA_AES_KEY_5 + dmaAES_channel_reg, dma->dmaAESInfo->key[5]);
			SET_REG(DMA + DMA_AES + DMA_AES_KEY_4 + dmaAES_channel_reg, dma->dmaAESInfo->key[4]);

		case 0: // AES-128
			SET_REG(DMA + DMA_AES + DMA_AES_KEY_3 + dmaAES_channel_reg, dma->dmaAESInfo->key[3]);
			SET_REG(DMA + DMA_AES + DMA_AES_KEY_2 + dmaAES_channel_reg, dma->dmaAESInfo->key[2]);
			SET_REG(DMA + DMA_AES + DMA_AES_KEY_1 + dmaAES_channel_reg, dma->dmaAESInfo->key[1]);
			SET_REG(DMA + DMA_AES + DMA_AES_KEY_0 + dmaAES_channel_reg, dma->dmaAESInfo->key[0]);
			value |= 0x100000;
			break;

		default:
			return -1;
		}
	}
	else if(someval != 0x100)
		return -1;

	SET_REG(DMA + dmaAES_channel_reg + DMA_AES, value);
	return 0;
}
Exemple #13
0
void dma_continue_async(int channel) {

	//bufferPrintf("cdma: continue_async.\r\n");

	uint32_t endOffset;
	uint8_t segmentId;
	uint32_t segmentLength;
	uint32_t value;
	DMAInfo* dma = &dmaInfo[channel];

	if (!dma->unsegmentedSize)
		system_panic("CDMA: ASSERT FAILED\r\n");

	dma->previousUnsegmentedSize = dma->unsegmentedSize;
	dma->previousDmaSegmentNumber = dma->dmaSegmentNumber;
	dma->previousSegmentOffset = dma->segmentOffset;

	if (dma->dmaAESInfo)
	{
		endOffset = dma->segmentationSetting + 8 * dma->dmaSegmentNumber;
		for (segmentId = 0; segmentId != 28;) {
			if (!dma->unsegmentedSize)
				break;

			dma->segmentBuffer[segmentId].value = 2;
			dma->dmaAESInfo->ivGenerator(dma->dmaAESInfo->ivParameter, dma->current_segment, dma->segmentBuffer[segmentId].iv);
			segmentId++;
			dma->current_segment++;
			segmentLength = 0;

			int encryptedSegmentOffset;
			int encryptedSegmentOffsetEnd = 0;
			for (encryptedSegmentOffset = 0; encryptedSegmentOffset < dma->dmaAESInfo->dataSize; encryptedSegmentOffset += segmentLength) {
				encryptedSegmentOffsetEnd = dma->dmaAESInfo->dataSize;
				if (encryptedSegmentOffset >= encryptedSegmentOffsetEnd)
					break;

				segmentLength = endOffset + 4 - dma->segmentOffset;
				if (encryptedSegmentOffset + segmentLength > encryptedSegmentOffsetEnd)
					segmentLength = encryptedSegmentOffsetEnd - encryptedSegmentOffset;

				value = 0x10003;
				if (!encryptedSegmentOffset)
					value = 0x30003;

				dma->segmentBuffer[segmentId].value = value;
				dma->segmentBuffer[segmentId].offset = dma->segmentOffset + endOffset;
				dma->segmentBuffer[segmentId].length = segmentLength;

				if (!segmentLength)
					system_panic("Caught trying to generate zero-length cdma segment on channel %d, irqState: %d\r\n", channel, dma->irq_state);

				dma->segmentOffset += segmentLength;

				if (dma->segmentOffset >= endOffset + 4)
				{
					endOffset += 8;
					++dma->dmaSegmentNumber;
					dma->segmentOffset = 0;
				}

				++segmentId;
			}

			dma->unsegmentedSize -= encryptedSegmentOffsetEnd;
		}

		if (!dma->unsegmentedSize)
			dma->segmentBuffer[segmentId-1].value |= 0x100;

		dma->segmentBuffer[segmentId].value = 0;
	} else {
		for (segmentId = 0; segmentId < 31; segmentId++) {
			int segmentLength = dma->segmentationSetting + 8 * dma->dmaSegmentNumber + 4 - dma->segmentOffset;

			dma->segmentBuffer[segmentId].value = 3;
			dma->segmentBuffer[segmentId].offset = dma->segmentationSetting + 8 * dma->dmaSegmentNumber + dma->segmentOffset;
			dma->segmentBuffer[segmentId].length = segmentLength;

			if (!segmentLength)
				system_panic("Caught trying to generate zero-length cdma segment on channel %d, irqState: %d\r\n", channel, dma->irq_state);

			dma->segmentOffset = 0;

			if (segmentLength >= dma->unsegmentedSize) {
				dma->segmentBuffer[segmentId].value |= 0x100;
				dma->unsegmentedSize = 0;
				break;
			}

			dma->unsegmentedSize -= segmentLength;
			dma->dmaSegmentNumber++;
		}

		dma->segmentBuffer[segmentId+1].value = 0;
	}

	DataCacheOperation(1, (uint32_t)dma->segmentBuffer, 32 * sizeof(*dma->segmentBuffer));

	uint32_t channel_reg = channel << 12;
	SET_REG(DMA + channel_reg + 0x14, get_physical_address((uint32_t)dma->segmentBuffer));

	value = 0x1C0009;

	if (dma->dmaAESInfo)
		value |= (dma->dmaAES_channel << 8);

	//bufferPrintf("cdma: continue async 0x%08x.\r\n", value);
	SET_REG(DMA + channel_reg, value);
}
Exemple #14
0
static error_t vfl_vsvfl_open(vfl_device_t *_vfl, nand_device_t *_nand)
{
    vfl_vsvfl_device_t *vfl = CONTAINER_OF(vfl_vsvfl_device_t, vfl, _vfl);

    if(vfl->device || !_nand)
        return EINVAL;

    vfl->device = _nand;
    error_t ret = vfl_vsvfl_setup_geometry(vfl);
    if(FAILED(ret))
        return ret;

    bufferPrintf("vsvfl: Opening %p.\r\n", _nand);

    vfl->contexts = malloc(vfl->geometry.num_ce * sizeof(vfl_vsvfl_context_t));
    memset(vfl->contexts, 0, vfl->geometry.num_ce * sizeof(vfl_vsvfl_context_t));

    vfl->pageBuffer = (uint32_t*) malloc(vfl->geometry.pages_per_block * sizeof(uint32_t));
    vfl->chipBuffer = (uint16_t*) malloc(vfl->geometry.pages_per_block * sizeof(uint16_t));
    vfl->blockBuffer = (uint16_t*) malloc(vfl->geometry.banks_total * sizeof(uint16_t));

    uint32_t ce = 0;
    for(ce = 0; ce < vfl->geometry.num_ce; ce++) {
        vfl->bbt[ce] = (uint8_t*) malloc(CEIL_DIVIDE(vfl->geometry.blocks_per_ce, 8));

        bufferPrintf("vsvfl: Checking CE %d.\r\n", ce);

        if(FAILED(nand_device_read_special_page(_nand, ce, "DEVICEINFOBBT\0\0\0",
                                                vfl->bbt[ce], CEIL_DIVIDE(vfl->geometry.blocks_per_ce, 8))))
        {
            bufferPrintf("vsvfl: Failed to find DEVICEINFOBBT!\r\n");
            return EIO;
        }

        if(ce >= vfl->geometry.num_ce)
            return EIO;

        vfl_vsvfl_context_t *curVFLCxt = &vfl->contexts[ce];
        uint8_t* pageBuffer = malloc(vfl->geometry.bytes_per_page);
        uint8_t* spareBuffer = malloc(vfl->geometry.bytes_per_spare);
        if(pageBuffer == NULL || spareBuffer == NULL) {
            bufferPrintf("ftl: cannot allocate page and spare buffer\r\n");
            return ENOMEM;
        }

        // Any VFLCxt page will contain an up-to-date list of all blocks used to store VFLCxt pages. Find any such
        // page in the system area.

        int i;
        for(i = vfl->geometry.reserved_blocks; i < vfl->geometry.fs_start_block; i++) {
            // so pstBBTArea is a bit array of some sort
            if(!(vfl->bbt[ce][i / 8] & (1 << (i  & 0x7))))
                continue;

            if(SUCCEEDED(nand_device_read_single_page(vfl->device, ce, i, 0, pageBuffer, spareBuffer, 0)))
            {
                memcpy(curVFLCxt->vfl_context_block, ((vfl_vsvfl_context_t*)pageBuffer)->vfl_context_block,
                       sizeof(curVFLCxt->vfl_context_block));
                break;
            }
        }

        if(i == vfl->geometry.fs_start_block) {
            bufferPrintf("vsvfl: cannot find readable VFLCxtBlock\r\n");
            free(pageBuffer);
            free(spareBuffer);
            return EIO;
        }

        // Since VFLCxtBlock is a ringbuffer, if blockA.page0.spare.usnDec < blockB.page0.usnDec, then for any page a
        // in blockA and any page b in blockB, a.spare.usNDec < b.spare.usnDec. Therefore, to begin finding the
        // page/VFLCxt with the lowest usnDec, we should just look at the first page of each block in the ring.
        int minUsn = 0xFFFFFFFF;
        int VFLCxtIdx = 4;
        for(i = 0; i < 4; i++) {
            uint16_t block = curVFLCxt->vfl_context_block[i];
            if(block == 0xFFFF)
                continue;

            if(FAILED(nand_device_read_single_page(vfl->device, ce, block, 0, pageBuffer, spareBuffer, 0)))
                continue;

            vfl_vsvfl_spare_data_t *spareData = (vfl_vsvfl_spare_data_t*)spareBuffer;

            if(spareData->meta.usnDec > 0 && spareData->meta.usnDec <= minUsn) {
                minUsn = spareData->meta.usnDec;
                VFLCxtIdx = i;
            }
        }

        if(VFLCxtIdx == 4) {
            bufferPrintf("vsvfl: cannot find readable VFLCxtBlock index in spares\r\n");
            free(pageBuffer);
            free(spareBuffer);
            return EIO;
        }

        // VFLCxts are stored in the block such that they are duplicated 8 times. Therefore, we only need to
        // read every 8th page, and nand_readvfl_cxt_page will try the 7 subsequent pages if the first was
        // no good. The last non-blank page will have the lowest spare.usnDec and highest usnInc for VFLCxt
        // in all the land (and is the newest).
        int page = 8;
        int last = 0;
        for(page = 8; page < vfl->geometry.pages_per_block; page += 8) {
            if(nand_device_read_single_page(vfl->device, ce, curVFLCxt->vfl_context_block[VFLCxtIdx], page, pageBuffer, spareBuffer, 0) != 0) {
                break;
            }

            last = page;
        }

        if(nand_device_read_single_page(vfl->device, ce, curVFLCxt->vfl_context_block[VFLCxtIdx], last, pageBuffer, spareBuffer, 0) != 0) {
            bufferPrintf("vsvfl: cannot find readable VFLCxt\n");
            free(pageBuffer);
            free(spareBuffer);
            return -1;
        }

        // Aha, so the upshot is that this finds the VFLCxt and copies it into vfl->contexts
        memcpy(&vfl->contexts[ce], pageBuffer, sizeof(vfl_vsvfl_context_t));

        // This is the newest VFLCxt across all CEs
        if(curVFLCxt->usn_inc >= vfl->current_version) {
            vfl->current_version = curVFLCxt->usn_inc;
        }

        free(pageBuffer);
        free(spareBuffer);

        // Verify the checksum
        if(vfl_check_checksum(vfl, ce) == FALSE)
        {
            bufferPrintf("vsvfl: VFLCxt has bad checksum.\r\n");
            return EIO;
        }
    }

    // retrieve some global parameters from the latest VFL across all CEs.
    vfl_vsvfl_context_t *latestCxt = get_most_updated_context(vfl);

    // Then we update the VFLCxts on every ce with that information.
    for(ce = 0; ce < vfl->geometry.num_ce; ce++) {
        // Don't copy over own data.
        if(&vfl->contexts[ce] != latestCxt) {
            // Copy the data, and generate the new checksum.
            memcpy(vfl->contexts[ce].control_block, latestCxt->control_block, sizeof(latestCxt->control_block));
            vfl->contexts[ce].usable_blocks_per_bank = latestCxt->usable_blocks_per_bank;
            vfl->contexts[ce].reserved_block_pool_start = latestCxt->reserved_block_pool_start;
            vfl->contexts[ce].ftl_type = latestCxt->ftl_type;
            memcpy(vfl->contexts[ce].field_6CA, latestCxt->field_6CA, sizeof(latestCxt->field_6CA));

            vfl_gen_checksum(vfl, ce);
        }
    }

    // Vendor-specific virtual-from/to-physical functions.
    // Note: support for some vendors is still missing.
    nand_device_t *nand = vfl->device;
    uint32_t vendorType = vfl->contexts[0].vendor_type;

    if(!vendorType)
        if(FAILED(nand_device_get_info(nand, diVendorType, &vendorType, sizeof(vendorType))))
            return EIO;

    switch(vendorType) {
    case 0x10001:
        vfl->geometry.banks_per_ce = 1;
        vfl->virtual_to_physical = virtual_to_physical_10001;
        break;

    case 0x100010:
    case 0x100014:
    case 0x120014:
        vfl->geometry.banks_per_ce = 2;
        vfl->virtual_to_physical = virtual_to_physical_100014;
        break;

    case 0x150011:
        vfl->geometry.banks_per_ce = 2;
        vfl->virtual_to_physical = virtual_to_physical_150011;
        break;

    default:
        bufferPrintf("vsvfl: unsupported vendor 0x%06x\r\n", vendorType);
        return EIO;
    }

    if(FAILED(nand_device_set_info(nand, diVendorType, &vendorType, sizeof(vendorType))))
        return EIO;

    vfl->geometry.pages_per_sublk = vfl->geometry.pages_per_block * vfl->geometry.banks_per_ce * vfl->geometry.num_ce;
    vfl->geometry.banks_total = vfl->geometry.num_ce * vfl->geometry.banks_per_ce;
    vfl->geometry.blocks_per_bank_vfl = vfl->geometry.blocks_per_ce / vfl->geometry.banks_per_ce;

    uint32_t banksPerCE = vfl->geometry.banks_per_ce;
    if(FAILED(nand_device_set_info(nand, diBanksPerCE_VFL, &banksPerCE, sizeof(banksPerCE))))
        return EIO;

    bufferPrintf("vsvfl: detected chip vendor 0x%06x\r\n", vendorType);

    // Now, discard the old scfg bad-block table, and set it using the VFL context's reserved block pool map.
    uint32_t bank, i;
    uint32_t num_reserved = vfl->contexts[0].reserved_block_pool_start;
    uint32_t num_non_reserved = vfl->geometry.blocks_per_bank_vfl - num_reserved;

    for(ce = 0; ce < vfl->geometry.num_ce; ce++) {
        memset(vfl->bbt[ce], 0xFF, CEIL_DIVIDE(vfl->geometry.blocks_per_ce, 8));

        for(bank = 0; bank < banksPerCE; bank++) {
            for(i = 0; i < num_non_reserved; i++) {
                uint16_t mapEntry = vfl->contexts[ce].reserved_block_pool_map[bank * num_non_reserved + i];
                uint32_t pBlock;

                if(mapEntry == 0xFFF0)
                    continue;

                if(mapEntry < vfl->geometry.blocks_per_ce) {
                    pBlock = mapEntry;
                } else if(mapEntry > 0xFFF0) {
                    virtual_block_to_physical_block(vfl, ce + bank * vfl->geometry.num_ce, num_reserved + i, &pBlock);
                } else {
                    system_panic("vsvfl: bad map table: CE %d, entry %d, value 0x%08x\r\n",
                                 ce, bank * num_non_reserved + i, mapEntry);
                }

                vfl->bbt[ce][pBlock / 8] &= ~(1 << (pBlock % 8));
            }
        }
    }

    bufferPrintf("vsvfl: VFL successfully opened!\r\n");

    return SUCCESS;
}
Exemple #15
0
static error_t vfl_vsvfl_erase_single_block(vfl_device_t *_vfl, uint32_t _vbn, int _replaceBadBlock) {
    vfl_vsvfl_device_t *vfl = CONTAINER_OF(vfl_vsvfl_device_t, vfl, _vfl);
    uint32_t bank;

    // In order to erase a single virtual block, we have to erase the matching
    // blocks across all banks.
    for (bank = 0; bank < vfl->geometry.banks_total; bank++) {
        uint32_t pBlock, pCE, blockRemapped;

        // Find the physical block before bad-block remapping.
        virtual_block_to_physical_block(vfl, bank, _vbn, &pBlock);
        pCE = bank % vfl->geometry.num_ce;
        vfl->blockBuffer[bank] = pBlock;

        if (is_block_in_scrub_list(vfl, pCE, pBlock)) {
            // TODO: this.
            system_panic("vsvfl: scrub list support not yet!\r\n");
        }

        // Remap the block and calculate its physical number (considering bank address space).
        blockRemapped = remap_block(vfl, pCE, pBlock, 0);
        vfl->blockBuffer[bank] = blockRemapped % vfl->geometry.blocks_per_bank
                                 + (blockRemapped / vfl->geometry.blocks_per_bank) * vfl->geometry.bank_address_space;
    }

    // TODO: H2FMI erase multiple blocks. Currently we erase the blocks one by one.
    // Actually, the block buffer is used for erase multiple blocks, so we won't use it here.
    uint32_t status = EINVAL;

    for (bank = 0; bank < vfl->geometry.banks_total; bank++) {
        uint32_t pBlock, pCE, tries;

        virtual_block_to_physical_block(vfl, bank, _vbn, &pBlock);
        pCE = bank % vfl->geometry.num_ce;

        // Try to erase each block at most 3 times.
        for (tries = 0; tries < 3; tries++) {
            uint32_t blockRemapped, bankStart, blockOffset;

            blockRemapped = remap_block(vfl, pCE, pBlock, 0);
            bankStart = (blockRemapped / vfl->geometry.blocks_per_bank) * vfl->geometry.bank_address_space;
            blockOffset = blockRemapped % vfl->geometry.blocks_per_bank;

            status = nand_device_erase_single_block(vfl->device, pCE, bankStart + blockOffset);
            if (status == 0)
                break;

            // TODO: add block map support.
            //mark_bad_block(vfl, pCE, pBlock, status);
            bufferPrintf("vfl: failed erasing physical block %d on bank %d. status: 0x%08x\r\n",
                         blockRemapped, bank, status);

            if (!_replaceBadBlock)
                return EINVAL;

            // TODO: complete bad block replacement.
            system_panic("vfl: found a bad block. we don't treat those for now. sorry!\r\n");
        }
    }

    if (status)
        system_panic("vfl: failed to erase virtual block %d!\r\n", _vbn);

    return 0;
}