Esempio n. 1
0
static void __meminit early_make_page_readonly(void *va, unsigned int feature)
{
	unsigned long addr, _va = (unsigned long)va;
	pte_t pte, *ptep;
	unsigned long *page = (unsigned long *) init_level4_pgt;

	BUG_ON(after_bootmem);

	if (xen_feature(feature))
		return;

	addr = (unsigned long) page[pgd_index(_va)];
	addr_to_page(addr, page);

	addr = page[pud_index(_va)];
	addr_to_page(addr, page);

	addr = page[pmd_index(_va)];
	addr_to_page(addr, page);

	ptep = (pte_t *) &page[pte_index(_va)];

	pte.pte = ptep->pte & ~_PAGE_RW;
	if (HYPERVISOR_update_va_mapping(_va, pte, 0))
		BUG();
}
Esempio n. 2
0
static void phy_pages_init(e820map_t *e820map)
{
        uint32_t phy_mem_length = 0;
        for (uint32_t i = 0; i < e820map->count; ++i){
                if (e820map->map[i].addr_low > ZONE_HIGHMEM_ADDR) {
                      break;
                }
                if (e820map->map[i].addr_low + e820map->map[i].length_low > ZONE_HIGHMEM_ADDR) {
                        phy_mem_length = ZONE_HIGHMEM_ADDR;
                        break;
                }
                phy_mem_length = e820map->map[i].length_low;
        }

        uint32_t pages_mem_length = sizeof(page_t) * (phy_mem_length / PMM_PAGE_SIZE);
        bzero(phy_pages, pages_mem_length);

        // 物理内存页管理起始地址
        pmm_addr_start = ((uint32_t)phy_pages - KERNBASE + pages_mem_length + PMM_PAGE_SIZE) & PMM_PAGE_MASK;

        for (uint32_t i = 0; i < e820map->count; ++i){
                uint32_t start_addr = e820map->map[i].addr_low;
                uint32_t end_addr = e820map->map[i].addr_low + e820map->map[i].length_low;
                if (start_addr < pmm_addr_start) {
                        start_addr = pmm_addr_start;
                }
                if (end_addr > ZONE_HIGHMEM_ADDR) {
                        end_addr = ZONE_HIGHMEM_ADDR;
                }
                for (uint32_t addr = start_addr; addr < end_addr; addr += PMM_PAGE_SIZE) {
                        phy_pages_count++;
                }
                pmm_addr_end = end_addr;
        }

        assert(pmm_addr_start == page_to_addr(&phy_pages[0]),
                        "phy_pages_init error pmm_start != &page[0]");
        assert(pmm_addr_end - PMM_PAGE_SIZE == page_to_addr(&phy_pages[phy_pages_count-1]),
                        "phy_pages_init error pmm_end != &page[n-1]");
        assert(&phy_pages[0] == addr_to_page(page_to_addr(&phy_pages[0])),
                        "phy_pages_init error addr_to_page error");
        assert(&phy_pages[1] == addr_to_page(page_to_addr(&phy_pages[1])),
                        "phy_pages_init error addr_to_page error");
}
Esempio n. 3
0
/*
 * Sanity checks to assure we're passed a valid address:
 * - Does given address reside in an allocated page?
 * - If yes, does it reside in a page allocated by us?
 * - If yes, does it reside in a buffer-aligned address?
 * - If yes, does it reside in a not-already-freed buffer?
 * Any NO above means an invalid address, thus a kernel bug
 */
void kfree(void *addr)
{
	struct page *page;
	struct bucket *bucket;
	int buf_size;
	char *buf;

	buf = addr;
	page = addr_to_page(buf);
	bucket = &kmembuckets[page->bucket_idx];

	if (page_is_free(page))
		panic("Bucket: Freeing address 0x%lx which resides in "
		      "an unallocated page frame", buf);

	if (!page->in_bucket)
		panic("Bucket: Freeing address 0x%lx which resides in "
		      "a foreign page frame (not allocated by us)", buf);

	buf_size = 1 << page->bucket_idx;
	if (!is_aligned((uintptr_t)buf, buf_size))
		panic("Bucket: Freeing invalidly-aligned 0x%lx address; "
		      "bucket buffer size = 0x%lx\n", buf, buf_size);

	if (is_free_buf(buf))
		panic("Bucket: Freeing already free buffer at 0x%lx, "
		      "with size = 0x%lx bytes", buf, buf_size);

	sign_buf(buf, FREEBUF_SIG);

	spin_lock(&bucket->lock);

	*(void **)buf = bucket->head;
	bucket->head = buf;
	bucket->totalfree++;

	spin_unlock(&bucket->lock);
}
Esempio n. 4
0
File: report.c Progetto: Lyude/linux
static void print_address_description(void *addr)
{
	struct page *page = addr_to_page(addr);

	dump_stack();
	pr_err("\n");

	if (page && PageSlab(page)) {
		struct kmem_cache *cache = page->slab_cache;
		void *object = nearest_obj(cache, page,	addr);

		describe_object(cache, object, addr);
	}

	if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
		pr_err("The buggy address belongs to the variable:\n");
		pr_err(" %pS\n", addr);
	}

	if (page) {
		pr_err("The buggy address belongs to the page:\n");
		dump_page(page, "kasan: bad access detected");
	}
}
Esempio n. 5
0
/* This function is the SIGSEGV handler for the virtual memory system.  If the
 * faulting address is within the user-space virtual memory pool then this
 * function responds appropriately to allow the faulting operation to be
 * retried.  If the faulting address falls outside of the virtual memory pool
 * then the segmentation fault is reported as an error.
 *
 * While a SIGSEGV is being processed, SIGALRM signals are blocked.  Therefore
 * a timer interrupt will never interrupt the segmentation-fault handler.
 */
static void sigsegv_handler(int signum, siginfo_t *infop, void *data) {
    void *addr;
    page_t page;

    /* Only handle SIGSEGVs addresses in range */
    addr = infop->si_addr;
    if (addr < vmem_start || addr >= vmem_end) {
        fprintf(stderr, "segmentation fault at address %p\n", addr);
        abort();
    }

    num_faults++;

    /* Figure out what page generated the fault. */
    page = addr_to_page(addr);
    assert(page < NUM_PAGES);

#if VERBOSE
    fprintf(stderr,
        "================================================================\n");
    fprintf(stderr, "SIGSEGV:  Address %p, Page %u, Code %s (%d)\n",
           addr, page, signal_code(infop->si_code), infop->si_code);
#endif

    /* We really can't handle any other type of code.  On Linux this should be
     * fine though.
     */
    assert(infop->si_code == SEGV_MAPERR || infop->si_code == SEGV_ACCERR);

    /* Map the page into memory so that the fault can be resolved.  Of course,
     * this may result in some other page being unmapped.
     */

    /* Handle unmapped address (SEGV_MAPERR). */
    if (infop->si_code == SEGV_MAPERR) {
        /* Evict a page. */
        assert(num_resident <= max_resident);
        if (num_resident == max_resident) {
            page_t victim = choose_victim_page();
            assert(is_page_resident(victim));
            unmap_page(victim);
            assert(!is_page_resident(victim));
        }

        /*
         * There should now be space, so load the new page. No permissions
         * initially.
         */
        assert(num_resident < max_resident);
        map_page(page, PAGEPERM_NONE);
    }

    /* Handle unpermitted access (SEGV_ACCERR). */
    else {
        /* Regardless of attempted read or write, it is now accessed. */
        set_page_accessed(page);
        assert(is_page_accessed(page));

        switch(get_page_permission(page)) {
            case PAGEPERM_NONE:
                /*
                 * Tried to read or write. Give read access, if it was a write
                 * then it will segfault again and read-write access will be
                 * given then.
                 */
                set_page_permission(page, PAGEPERM_READ);
                break;
            case PAGEPERM_READ:
                /* Tried to write, so make it read-write access. */
                set_page_permission(page, PAGEPERM_RDWR);
                /* Since it is a write it is also dirty. */
                set_page_dirty(page);
                assert(is_page_dirty(page));
                break;
            case PAGEPERM_RDWR:
                fprintf(stderr, "sigsegv_handler: got unpermitted access error \
                    on page that already has read-write permission.\n");
                abort();
                break;
        }
    }
}
Esempio n. 6
0
void async_fault_processing()
{
	struct nand_chip *chip;
	int i, j, k, l, plane, block, page;

	fm.power_fail_flag = 1;
	memset(damaged_block, 0xFF, sizeof(damaged_block));

	//on-going operation들 fault 처리
	for (i = 0; i < NUM_OF_BUS; i++)
	{
		for (j = 0; j < CHIPS_PER_BUS; j++)
		{
			if (fm_status->wq[i][j].status == OP_STARTED)
			{
				fm_status->wq[i][j].ftl_req.addr;

				chip = &fm.buses[i].chips[j];
				plane = addr_to_plane(fm_status->wq[i][j].ftl_req.addr);
				block = addr_to_block(fm_status->wq[i][j].ftl_req.addr);
				page = addr_to_page(fm_status->wq[i][j].ftl_req.addr);

				switch (fm_status->wq[i][j].ftl_req.cmd)
				{
				case PAGE_PROGRAM_FINISH:
					if (chip->cmd == PAGE_PROGRAM_MP)
					{
						for (k = 0; k < PLANES_PER_CHIP; k++)
						{
							damaged_block[i][j][k] = block;
							chip->planes[k].blocks[block].pages[page].state = page_state_transition(chip->planes[k].blocks[block].pages[page].state, PROGRAM_PF);
						}
					}
					else
					{
						damaged_block[i][j][plane] = block;
						chip->planes[plane].blocks[block].pages[page].state = page_state_transition(chip->planes[plane].blocks[block].pages[page].state, PROGRAM_PF);
					}
					break;

				case BLOCK_ERASE:
					if (chip->cmd == BLOCK_ERASE_MP)
					{
						for (k = 0; k < PLANES_PER_CHIP; k++)
						{
							damaged_block[i][j][k] = block;
							for (l = 0; l < PAGES_PER_BLOCK; l++)
							{
								chip->planes[k].blocks[block].pages[l].state = page_state_transition(chip->planes[k].blocks[block].pages[l].state, ERASE_PF);
							}
						}
					}
					else
					{
						damaged_block[i][j][plane] = block;
						for (l = 0; l < PAGES_PER_BLOCK; l++)
						{
							chip->planes[plane].blocks[block].pages[l].state = page_state_transition(chip->planes[plane].blocks[block].pages[l].state, ERASE_PF);
						}
					}
				default:
					break;
				}
			}
		}
	}

	//---reset nand flash emulator---
	//clear event queue
	while (eq->eq_size)
	{
		dequeue_event_queue(eq);
	}

	//clear externel request queue
	while (ftl_to_nand->num_of_entries)
	{
		if_dequeue(ftl_to_nand);
	}

	//clear request queue
	for (i = 0; i < NUM_OF_BUS; i++)
	{
		for (j = 0; j < CHIPS_PER_BUS; j++)
		{
			while (request_queue_arr[i + NUM_OF_BUS * j].num_of_entry)
			{
				dequeue_request_queue(i, j, request_queue_arr);
			}

		}
	}
	//clear dynamic scheduler queue
	while (ds_queue->num_of_entries)
	{
		dequeue(ds_queue);
	}

	//clear flash operation queue
	while (fou_queue->num_of_entries)
	{
		dequeue(fou_queue);
	}

	//init chip and bus status
	reset_flash_module_status(fm_status);
	reset_flashmodule(&fm);

	async_fault_gen();
}
Esempio n. 7
0
//flash chip/bus status를 변경하고
//flash memory를 contorl 한다.
int run_nand_operation(int p_channel, int p_way)
{
	struct event_queue_node eq_node;
	struct ftl_request ftl_req;
	struct nand_chip *chip;
	struct dte_request dte_req;
	int plane, block, page;
	int op_result = 0;
	int i, j;
	int msb_page_flag = 0;
	int lsb_page_index;
	long long delay;
	int sector_offset;

	fm_status->wq[p_channel][p_way].status = OP_STARTED;
	ftl_req = fm_status->wq[p_channel][p_way].ftl_req;
	chip = &fm.buses[p_channel].chips[p_way];
	plane = addr_to_plane(ftl_req.addr);
	block = addr_to_block(ftl_req.addr);
	page = addr_to_page(ftl_req.addr);
	sector_offset = addr_to_sector_offset(ftl_req.addr);

	if (check_cmd_validity(ftl_req.cmd, chip->cmd) == FAIL)
	{
		chip->cmd = IDLE;
		fm_status->wq[p_channel][p_way].status = IDLE;
		if (ftl_req.cmd == READ_FINISH || ftl_req.cmd == PAGE_PROGRAM_FINISH || ftl_req.cmd == BLOCK_ERASE)
		{
			ftl_req.ack = INVALID_CMD_CHAIN;
			put_reorder_buffer(ftl_req);
		}
		set_bus_idle(p_channel);
		set_chip_idle(p_channel, p_way);

		dynamic_scheduling();
		return FAIL;
	}
	
	ftl_req.ack = SUCCESS;
	eq_node.ftl_req = ftl_req;
	QueryPerformanceCounter(&eq_node.time);
	//for debugging
	QueryPerformanceCounter(&eq_node.ftl_req.start_tick);
// 	cmd_to_char(ftl_req.cmd, char_cmd);
// 	printf("start tick\t: %16I64d, ID: %3d, cmd: %s\n", eq_node.ftl_req.start_tick.QuadPart, eq_node.ftl_req.id, char_cmd);
	delay = get_timing(ftl_req, ftl_req.addr);
	eq_node.time.QuadPart += delay;
	eq_node.time_offset = 0;
	eq_node.dst = FOU;
	
	enqueue_event_queue(eq, eq_node);

	msb_page_flag = is_msb_page(ftl_req.addr);
	
	switch (ftl_req.cmd)
	{
	case READ:
		op_result = sync_fault_gen(ftl_req.cmd, ftl_req.addr); //UECC_ERROR;
		chip->status = op_result;
		chip->planes[plane].reg_addr = ftl_req.addr;
#ifdef DATA_TRANSFER_ENGINE
		dte_req.deadline = 0;
		dte_req.dst = ftl_req.data;
		dte_req.id = ftl_req.id * PLANES_PER_CHIP + plane;
		dte_req.size = ftl_req.length * SIZE_OF_SECTOR;
		dte_req.src = chip->planes[plane].blocks[block].pages[page].data + sector_offset * SIZE_OF_SECTOR;

		pthread_mutex_lock(&dte_req_q->mutex);
		dte_request_enqueue(dte_req_q, dte_req);
		pthread_mutex_unlock(&dte_req_q->mutex);
#endif

		if (chip->cmd == READ_MP)
		{
			chip->cmd = READ;
			for (i = 0; i < PLANES_PER_CHIP; i++)
			{
				unreliable_read_violation(chip->planes[i].blocks[block].pages[page].state);
#ifdef MEMCPY
				memcpy(chip->planes[i].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
					chip->planes[i].blocks[block].pages[page].data + (sector_offset * SIZE_OF_SECTOR / 4),
					ftl_req.length * SIZE_OF_SECTOR);
#endif
			}
		}
		else
		{
			unreliable_read_violation(chip->planes[plane].blocks[block].pages[page].state);
			chip->cmd = READ;
#ifdef MEMCPY
			memcpy(chip->planes[plane].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
				chip->planes[plane].blocks[block].pages[page].data + (sector_offset * SIZE_OF_SECTOR / 4),
				ftl_req.length * SIZE_OF_SECTOR);
#endif
		}
		//chip->status = sync_fault()
		break;

	case READ_MP:
		chip->planes[plane].reg_addr = ftl_req.addr;
		chip->cmd = READ_MP;

#ifdef DATA_TRANSFER_ENGINE
		dte_req.deadline = 0;
		dte_req.dst = ftl_req.data;
		dte_req.id = ftl_req.id * PLANES_PER_CHIP + plane;
		dte_req.size = ftl_req.length * SIZE_OF_SECTOR;
		dte_req.src = chip->planes[plane].blocks[block].pages[page].data + sector_offset * SIZE_OF_SECTOR;

		pthread_mutex_lock(&dte_req_q->mutex);
		dte_request_enqueue(dte_req_q, dte_req);
		pthread_mutex_unlock(&dte_req_q->mutex);
#endif
		break;

	case DATA_OUT:
#ifdef MEMCPY
		memcpy(fm_status->wq[p_channel][p_way].ftl_req.data,
			chip->planes[plane].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
			ftl_req.length * SIZE_OF_SECTOR);
#endif
#ifdef DATA_TRANSFER_ENGINE
		pthread_mutex_lock(&dte_req_q->mutex);
		set_dte_request_deadline(dte_req_q, ftl_req.id * PLANES_PER_CHIP + plane, eq_node.time.QuadPart);
		pthread_mutex_unlock(&dte_req_q->mutex);
#endif
		break;

	case READ_FINISH:
#ifdef DATA_TRANSFER_ENGINE
		pthread_mutex_lock(&dte_req_q->mutex);
		if (is_data_transfer_done(dte_req_q, ftl_req.id * PLANES_PER_CHIP + plane) == 0)
		{
			printf("data transfer is not done!\n");
			assert(0);
		}
		pthread_mutex_unlock(&dte_req_q->mutex);
#endif
		chip->cmd = IDLE;
		break;

	case BLOCK_ERASE:
		op_result = sync_fault_gen(ftl_req.cmd, ftl_req.addr); //UECC_ERROR;
		chip->status = op_result;
		chip->planes[plane].reg_addr = ftl_req.addr;

		if (chip->cmd == BLOCK_ERASE_MP)
		{
			for (j = 0; j < PLANES_PER_CHIP; j++)
			{
				chip->planes[j].blocks[block].last_programmed_page = 0;
				chip->planes[j].blocks[block].pecycle++;
				chip->planes[j].blocks[block].block_access_mode = chip->current_access_mode;

				for (i = 0; i < PAGES_PER_BLOCK; i++)
				{
#ifdef MEMCPY
					memset(chip->planes[j].blocks[block].pages[i].data, 0xff, SIZE_OF_PAGE);
#endif
					chip->planes[j].blocks[block].pages[i].nop = 0;
					chip->planes[j].blocks[block].pages[i].state = page_state_transition(chip->planes[j].blocks[block].pages[i].state, op_result);
				}
			}
		}
		else
		{
			chip->cmd = BLOCK_ERASE;
			for (i = 0; i < PAGES_PER_BLOCK; i++)
			{
				chip->planes[plane].blocks[block].last_programmed_page = 0;
				chip->planes[plane].blocks[block].pecycle++;
				chip->planes[plane].blocks[block].block_access_mode = chip->current_access_mode;
#ifdef MEMCPY
				memset(chip->planes[plane].blocks[block].pages[i].data, 0xff, SIZE_OF_PAGE);
#endif
				chip->planes[plane].blocks[block].pages[i].nop = 0;
				chip->planes[plane].blocks[block].pages[i].state = page_state_transition(chip->planes[plane].blocks[block].pages[i].state, op_result);
			}
		}
		//chip->status = sync_fault()
		break;

	case BLOCK_ERASE_MP:
		chip->cmd = BLOCK_ERASE_MP;
		chip->planes[plane].reg_addr = ftl_req.addr;	
		break;

	case READ_STATUS:
#ifdef MEMCPY
		memcpy(ftl_req.data, &chip->status, 1);
#endif
		break;

	case PAGE_PROGRAM:
		if (chip->cmd != PAGE_PROGRAM_MP)
		{
			chip->cmd = PAGE_PROGRAM;
		}
		chip->planes[plane].reg_addr = ftl_req.addr;
#ifdef MEMCPY
		memcpy(chip->planes[plane].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
			ftl_req.data,
			ftl_req.length * SIZE_OF_SECTOR);
#endif
#ifdef DATA_TRANSFER_ENGINE
		chip->planes[plane].shadow_buffer = (char *)malloc(SIZE_OF_PAGE);
		
		dte_req.deadline = 0;
		dte_req.dst = chip->planes[plane].shadow_buffer;
		dte_req.id = ftl_req.id * PLANES_PER_CHIP + plane;
		dte_req.size = ftl_req.length * SIZE_OF_SECTOR;
		dte_req.src = ftl_req.data;

		pthread_mutex_lock(&dte_req_q->mutex);
		dte_request_enqueue(dte_req_q, dte_req);
		pthread_mutex_unlock(&dte_req_q->mutex);
#endif
		break;
	
	case PAGE_PROGRAM_MP:
		chip->cmd = PAGE_PROGRAM_MP;
		chip->planes[plane].reg_addr = ftl_req.addr;
#ifdef MEMCPY
		memcpy(chip->planes[plane].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
			ftl_req.data,
			ftl_req.length * SIZE_OF_SECTOR);
#endif
#ifdef DATA_TRANSFER_ENGINE
		chip->planes[plane].shadow_buffer = (char *)malloc(SIZE_OF_PAGE);

		dte_req.deadline = 0;
		dte_req.dst = chip->planes[plane].shadow_buffer;
		dte_req.id = ftl_req.id * PLANES_PER_CHIP + plane;
		dte_req.size = ftl_req.length * SIZE_OF_SECTOR;
		dte_req.src = ftl_req.data;

		pthread_mutex_lock(&dte_req_q->mutex);
		dte_request_enqueue(dte_req_q, dte_req);
		pthread_mutex_unlock(&dte_req_q->mutex);
#endif
		break;

	case PAGE_PROGRAM_FINISH:
		op_result = sync_fault_gen(ftl_req.cmd, ftl_req.addr); //UECC_ERROR;
		chip->status = op_result;
		if (chip->cmd != PAGE_PROGRAM_MP)
		{
			ascending_order_program_violation(chip->planes[plane].blocks[block].last_programmed_page, page);
			program_after_erase_violation(chip->planes[plane].blocks[block].pages[page].state);
			nop_violation(chip->planes[plane].blocks[block].pages[page].nop, chip->planes[plane].blocks[block].block_access_mode);

			chip->planes[plane].blocks[block].last_programmed_page++;
			chip->planes[plane].blocks[block].pages[page].nop++;
#ifdef MEMCPY
			memcpy(chip->planes[plane].blocks[block].pages[page].data + (sector_offset * SIZE_OF_SECTOR / 4),
				chip->planes[plane].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
				ftl_req.length * SIZE_OF_SECTOR);
#endif
#ifdef DATA_TRANSFER_ENGINE
			pthread_mutex_lock(&dte_req_q->mutex);
			set_dte_request_deadline(dte_req_q, ftl_req.id * PLANES_PER_CHIP + plane, eq_node.time.QuadPart);
			pthread_mutex_unlock(&dte_req_q->mutex);
#endif

			chip->planes[plane].blocks[block].pages[page].state = page_state_transition(chip->planes[plane].blocks[block].pages[page].state, op_result);
			if (op_result == PROGRAM_PF || op_result == PROGRAM_IF && msb_page_flag)
			{
				lsb_page_index = get_lsb_page(ftl_req.addr);
				chip->planes[plane].blocks[block].pages[lsb_page_index].state = lsb_page_state_transition(chip->planes[plane].blocks[block].pages[lsb_page_index].state, op_result);
			}
		}
		else
		{
			for (i = 0; i < PLANES_PER_CHIP; i++)
			{
				ascending_order_program_violation(chip->planes[i].blocks[block].last_programmed_page, page);
				program_after_erase_violation(chip->planes[i].blocks[block].pages[page].state);
				nop_violation(chip->planes[i].blocks[block].pages[page].nop, chip->planes[i].blocks[block].block_access_mode);

				chip->planes[i].blocks[block].last_programmed_page++;
				chip->planes[i].blocks[block].pages[page].nop++;
#ifdef MEMCPY
				memcpy(chip->planes[i].blocks[block].pages[page].data + (sector_offset * SIZE_OF_SECTOR / 4),
					chip->planes[i].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
					ftl_req.length * SIZE_OF_SECTOR);
#endif
#ifdef DATA_TRANSFER_ENGINE
				pthread_mutex_lock(&dte_req_q->mutex);
				set_dte_request_deadline(dte_req_q, ftl_req.id * PLANES_PER_CHIP + i, eq_node.time.QuadPart);
				pthread_mutex_unlock(&dte_req_q->mutex);
#endif

				chip->planes[i].blocks[block].pages[page].state = page_state_transition(chip->planes[i].blocks[block].pages[page].state, op_result);
				if (op_result == PROGRAM_PF || op_result == PROGRAM_IF && msb_page_flag)
				{
					lsb_page_index = get_lsb_page(ftl_req.addr);
					chip->planes[i].blocks[block].pages[lsb_page_index].state = lsb_page_state_transition(chip->planes[i].blocks[block].pages[lsb_page_index].state, op_result);
				}
			}
		}

		break;

	case RESET:
		chip->cmd = IDLE;
		chip->status = IDLE;
		chip->current_access_mode = MLC_MODE;
		for (i = 0; i < PLANES_PER_CHIP; i++)
		{
			chip->planes[i].reg_addr = 0;
		}
		break;

	case CHANGE_ACCESS_MODE:
		chip->current_access_mode = *ftl_req.data;
		break;

	default :
		break;
	}

	return SUCCESS;
}
Esempio n. 8
0
void sync_nand_operation()
{
	//operation 종료후에 chip status 변경 추가 필요
	struct ftl_request ftl_req;
	int channel, way, plane;
	char char_cmd[20];
	char *temp_page_buf;

	while (fou_queue->num_of_entries > 0)
	{
		//request decode
		ftl_req = dequeue(fou_queue);
		channel = addr_to_channel(ftl_req.addr);
		way = addr_to_way(ftl_req.addr);
		plane = addr_to_plane(ftl_req.addr);
		ftl_req.ack = fm.buses[channel].chips[way].status;
		fm_status->wq[channel][way].status = IDLE;

		//QueryPerformanceCounter(&ftl_req.elapsed_tick);
		ftl_req.elapsed_tick.QuadPart = ftl_req.elapsed_tick.QuadPart - ftl_req.start_tick.QuadPart;
		QueryPerformanceFrequency(&freq);
		cmd_to_char(ftl_req.cmd, char_cmd);
		printf("elapsed time(us)\t: %16I64d, ID: %3d, cmd: %s\n", ftl_req.elapsed_tick.QuadPart * 1000000 / 3318393, ftl_req.id, char_cmd);

		switch (ftl_req.cmd)
		{
		case READ:
			set_chip_idle(channel, way);
			break;

		case READ_MP:
			break;

		case DATA_OUT:
			set_bus_idle(channel);
			break;

		case READ_FINISH:
			put_reorder_buffer(ftl_req);
			break;

		case BLOCK_ERASE:
			set_chip_idle(channel, way);
			fm.buses[channel].chips[way].cmd = IDLE;

 			put_reorder_buffer(ftl_req);
			break;

		case BLOCK_ERASE_MP:
			break;

		case READ_STATUS:
			break;

		case PAGE_PROGRAM:
			set_bus_idle(channel);
			break;

		case PAGE_PROGRAM_MP:
			set_bus_idle(channel);
			break;

		case PAGE_PROGRAM_FINISH:
#ifdef DATA_TRANSFER_ENGINE
			pthread_mutex_lock(&dte_req_q->mutex);
			if (is_data_transfer_done(dte_req_q, ftl_req.id * PLANES_PER_CHIP + plane) == 0)
			{
				printf("data transfer is not done!\n");
				assert(0);
			}
			pthread_mutex_unlock(&dte_req_q->mutex);
			if (fm.buses[channel].chips[way].cmd == PAGE_PROGRAM_MP)
			{
				int i, block, page;

				for (i = 0; i < PLANES_PER_CHIP; i++)
				{
					block = addr_to_block(fm.buses[channel].chips[way].planes[i].reg_addr);
					page = addr_to_page(fm.buses[channel].chips[way].planes[i].reg_addr);

					temp_page_buf = fm.buses[channel].chips[way].planes[i].blocks[block].pages[page].data;
					fm.buses[channel].chips[way].planes[i].blocks[block].pages[page].data = fm.buses[channel].chips[way].planes[i].shadow_buffer;
					fm.buses[channel].chips[way].planes[i].shadow_buffer = NULL;
					free(temp_page_buf);
				}
			}
			else
			{
				int block, page;
				block = addr_to_block(fm.buses[channel].chips[way].planes[plane].reg_addr);
				page = addr_to_page(fm.buses[channel].chips[way].planes[plane].reg_addr);

				temp_page_buf = fm.buses[channel].chips[way].planes[plane].blocks[block].pages[page].data;
				fm.buses[channel].chips[way].planes[plane].blocks[block].pages[page].data = fm.buses[channel].chips[way].planes[plane].shadow_buffer;
				fm.buses[channel].chips[way].planes[plane].shadow_buffer = NULL;
				free(temp_page_buf);
			}
#endif

			fm.buses[channel].chips[way].cmd = IDLE;
			set_chip_idle(channel, way);

			put_reorder_buffer(ftl_req);
			break;

		case RESET:
			break;

		default:
			break;
		}
	}
}