示例#1
0
文件: stats.c 项目: kawamuray/criu
static void encode_time(int t, u_int32_t *to)
{
	struct timing *tm;

	tm = get_timing(t);
	*to = tm->total.tv_sec * USEC_PER_SEC + tm->total.tv_usec;
}
示例#2
0
文件: stats.c 项目: kawamuray/criu
void timing_start(int t)
{
	struct timing *tm;

	tm = get_timing(t);
	gettimeofday(&tm->start, NULL);
}
示例#3
0
void
test_using(const char *ctl, char *buf, int bytes, void (*copyf)(const void *s1, void *d, size_t bytes))
{
    int i;
    int loops;
    long long us;

    start_timing();
    for (i = 0; (i & 31) || stop_timing(0, NULL) == 0; ++i) {
	copyf(buf, buf + bytes, bytes);
    }

    loops = i * 2;
    start_timing();
    for (i = loops - 1; i >= 0; --i) {
	copyf(buf, buf + bytes, bytes);
    }
#if 0
    fpcleanup();
#endif
    stop_timing(loops, ctl);
    us = get_timing();
    printf("%s %d %5.2f MBytes/sec\n", ctl, bytes, 
	(double)loops * (double)bytes / (double)us);
}
示例#4
0
文件: stats.c 项目: kawamuray/criu
void timing_stop(int t)
{
	struct timing *tm;
	struct timeval now;

	tm = get_timing(t);
	gettimeofday(&now, NULL);
	timeval_accumulate(&tm->start, &now, &tm->total);
}
示例#5
0
文件: tssfp.c 项目: timburrow/ovj3
pulsesequence()
{
	/* declaration of SGL kernel structures */
	SGL_KERNEL_INFO_T read, phase, slice, ss_pre, ss_post;


	/* declaration of internal variables */
	double freqlist[MAXNSLICE];
	double pe_steps;
	int shapelist1, table;
	double xtime, grad_duration, ror_pad,rod_pad;
	double temp_tr;

	double readAmp, phaseAmp, sliceAmp;
	double tepad, tepad2, temin2, htrmin, delayToRF, delayRFToAcq, delayAcqToRF;
	double rof_pad, delRof;

	double sliceRephTrim, sliceDephTrim;
	double readRephTrim, readDephTrim;

	int rfPhase[2] = {0,2};
	
	/* declaration of realtime variables */
	int  vpe_steps  = v1;
	int  vpe_ctr    = v2;
	int  vms_slices = v3;
	int  vms_ctr    = v4;
	int  vpe_offset = v5;
	int  vpe_index  = v6;
	int  vss        = v7;
	int  vssc       = v8;
	int  vacquire   = v9;
	int  vphase	= v10;
	
	settable(t2,2,rfPhase);

	/* setup phase encoding order */
	table = set_pe_order();

	init_mri();

	if( (sliceRephTrim = getvalnwarn("sliceRephTrim")) == 0.0 ) {
		sliceRephTrim = 1.0;
	}	
	
	if( (sliceDephTrim = getvalnwarn("sliceDephTrim")) == 0.0 ) {
		sliceDephTrim = 1.0;
	}	

	if( (readRephTrim = getvalnwarn("readRephTrim")) == 0.0 ) {
		readRephTrim = 1.0;
	}	
	
	if( (readDephTrim = getvalnwarn("readDephTrim")) == 0.0 ) {
		readDephTrim = 1.0;
	}	

	shape_rf( &p1_rf, "p1", p1pat, p1, flip1, rof1, rof2 );	// excitation pulse

	init_slice( &ss_grad, "ss", thk );					// slice gradient
	init_slice_refocus( &ssr_grad, "ssr" );				// slice refocus
	init_slice_refocus( &ssd_grad, "ssd" );				// slice refocus

	init_readout( &ro_grad, "ro", lro, np, sw );		// read gradient
	init_readout_refocus( &ror_grad, "ror" );			// read dephase
	init_readout_refocus( &rod_grad, "ror" );			// read dephase

	init_phase( &pe_grad, "pe", lpe, nv );				// phase gradient

	ss_grad.maxGrad = gmax * 0.57;
	ssr_grad.maxGrad = gmax * 0.57;
	ssd_grad.maxGrad = gmax * 0.57;
	ro_grad.maxGrad = gmax * 0.57;
	ror_grad.maxGrad = gmax * 0.57;
	rod_grad.maxGrad = gmax * 0.57;
	pe_grad.maxGrad = glimpe < 0.57? gmax*glimpe : gmax * 0.57;

	/* calculate the RF pulses, gradient pulses and their interdependencies */
	calc_rf( &p1_rf, "tpwr1", "tpwr1f" );
	calc_slice( &ss_grad, &p1_rf, NOWRITE, "gss" );

	ssr_grad.amp = ss_grad.amp;	
	ssr_grad.gmult = sliceRephTrim;
	ssr_grad.calcFlag = DURATION_FROM_MOMENT_AMPLITUDE;
	calc_slice_refocus( &ssr_grad, &ss_grad, NOWRITE, "gssr" );
	ssd_grad.amp = ss_grad.amp;	
	ssd_grad.gmult = sliceDephTrim; 
	ssd_grad.calcFlag = DURATION_FROM_MOMENT_AMPLITUDE;
	calc_slice_dephase( &ssd_grad, &ss_grad, NOWRITE, "gssd" ); 
	
	calc_readout( &ro_grad, NOWRITE, "gro", "sw", "at" );

	ror_grad.amp = ro_grad.amp;	
	ror_grad.calcFlag = DURATION_FROM_MOMENT_AMPLITUDE;

	rod_grad.amp = ro_grad.amp;	
	rod_grad.calcFlag = DURATION_FROM_MOMENT_AMPLITUDE;

	ror_grad.gmult = readRephTrim;
	calc_readout_refocus( &ror_grad, &ro_grad, NOWRITE, "gror" );
	rod_grad.gmult = readDephTrim;
	calc_readout_rephase( &rod_grad, &ro_grad, NOWRITE, "grod" );

	calc_phase( &pe_grad, NOWRITE, "gpe", "tpe" );

	/* work out the position of the markers */
	/* markerA */
	/* ss_grad.rfDelayFront indicates the starting point of the
	   RF pulse measured from the start of the slice gradient
       ( rof1:pulse length:rof2 ) */	

	double granulatedRFDelayFront = granularity( ss_grad.rfDelayFront, GRADIENT_RES );
	if( granulatedRFDelayFront > ss_grad.rfDelayFront ) {
		granulatedRFDelayFront -= GRADIENT_RES;
	}

	/* ss_grad.rfDelayBack indicates the end point of the
	   RF pulse measured to the end of the slice gradient
       ( rof1:pulse length:rof2 ) */	

	double granulatedRFDelayBack = granularity( ss_grad.rfDelayBack, GRADIENT_RES );
	if( granulatedRFDelayBack > ss_grad.rfDelayBack ) {
		granulatedRFDelayBack -= GRADIENT_RES;
	}
	
	double granulatedRFDelay = granulatedRFDelayFront < granulatedRFDelayBack ? granulatedRFDelayFront : granulatedRFDelayBack;

	double markerADelay = granulatedRFDelay;

	/* read and phase gradients can overlap the start or end of the slice gradient by max of granulatedRFDElay */

	double granulatedATDelayFront = granularity(ro_grad.atDelayFront, GRADIENT_RES);
	if( granulatedATDelayFront > ro_grad.atDelayFront ) {
		granulatedATDelayFront -= GRADIENT_RES;
	}
	double granulatedATDelayBack = granularity(ro_grad.atDelayBack, GRADIENT_RES);
	if( granulatedATDelayBack > ro_grad.atDelayBack ) {
		granulatedATDelayBack -= GRADIENT_RES;
	}
	double granulatedATDelay = granulatedATDelayFront < granulatedATDelayBack ? granulatedATDelayFront : granulatedATDelayBack;

	/* longest gradient between RF pulse and acquire dominates */

	xtime = ssr_grad.duration + granulatedRFDelay;
	xtime = xtime > ssd_grad.duration + granulatedRFDelay ? xtime : ssd_grad.duration + granulatedRFDelay;
	xtime = xtime > ror_grad.duration + granulatedATDelay ? xtime : ror_grad.duration + granulatedATDelay;
	xtime = xtime > rod_grad.duration + granulatedATDelay ? xtime : rod_grad.duration + granulatedATDelay;
	xtime = xtime > pe_grad.duration ? xtime : pe_grad.duration;

	ror_pad = xtime - ror_grad.duration - granulatedATDelay;
	rod_pad = xtime - rod_grad.duration - granulatedATDelay;

	/* make a gradient list */
	start_kernel( &sk );
	add_gradient( (void*)&ss_grad,  "slice",    	SLICE, START_TIME,	"",         0.0,	PRESERVE );
	add_gradient( (void*)&ssr_grad, "sliceReph", 	SLICE, BEHIND,		"slice",    0.0,	INVERT );
	add_gradient( (void*)&ror_grad, "readDeph", 	READ,  BEHIND,		"slice",   -granulatedRFDelay + ror_pad, INVERT );
	add_gradient( (void*)&ro_grad,  "read",     	READ,  BEHIND,		"readDeph", 0.0,	PRESERVE );	
	add_gradient( (void*)&pe_grad,  "phase",    	PHASE, SAME_START,	"readDeph", 0.0,	PRESERVE );
	add_gradient( (void*)&rod_grad, "readReph", 	READ,  BEHIND,		"read",     0.0,	INVERT );
	add_gradient( (void*)&pe_grad,  "rewind",		PHASE, SAME_END,	"readReph", 0.0, INVERT );
	add_gradient( (void*)&ss_grad,	"nextSlice",	SLICE, BEHIND,		"readReph", rod_pad - granulatedRFDelay, PRESERVE );
	add_gradient( (void*)&ssd_grad,	"sliceDeph",	SLICE, BEFORE,		"nextSlice",    0, INVERT );

	add_marker( "markerA", SAME_START, "slice", granulatedRFDelay );
	add_marker( "markerB", SAME_START, "nextSlice", granulatedRFDelay );

	/* get the minimum echo time */
	temin = get_timing( FROM_RF_CENTER_OF, "slice", TO_ECHO_OF, "read" );
	temin2 = get_timing( FROM_ECHO_OF, "read", TO_RF_CENTER_OF, "nextSlice" );
	
	htrmin = MAX( temin, temin2 );
	
	if( minte[0] == 'y' ){
		te = htrmin;
	}
	
	tepad = granularity( te - temin, GRADIENT_RES );
	tepad2 = granularity( te - temin2, GRADIENT_RES );

	te = temin + tepad;	
	putCmd("setvalue('te', %f, 'current')\n", te );

	if( tepad>0.0 )		change_timing( "readDeph", tepad );
	if( tepad2>0.0 )	change_timing( "nextSlice", tepad2 );

	tr = get_timing( FROM_START_OF, "slice", TO_START_OF, "nextSlice" );
	putvalue("tr", tr );

	delayRFToAcq = get_timing( FROM_RF_PULSE_OF, "slice", TO_ACQ_OF, "read" );
	delayAcqToRF = get_timing( FROM_ACQ_OF, "read", TO_RF_PULSE_OF, "nextSlice" );

	set_comp_info( &ss_pre, "ss_pre" );
	write_comp_grads_snippet( NULL, NULL, &ss_pre, "START_OF_KERNEL", "markerA" );

	set_comp_info( &read, "ro" );
	set_comp_info( &phase, "pe" );
	set_comp_info( &slice, "ss" );
	write_comp_grads_snippet( &read, &phase, &slice, "markerA", "markerB" );

	set_comp_info( &ss_post, "ss_post" );
	write_comp_grads_snippet( NULL, NULL, &ss_post, "markerB", "END_OF_KERNEL" );

	/* Set up frequency offset pulse shape list ********/   	
	offsetlist(pss,ss_grad.ssamp,0,freqlist,ns,seqcon[1]);
	shapelist1 = shapelist(p1_rf.pulseName,ss_grad.rfDuration,freqlist,ns,ss_grad.rfFraction, seqcon[1]);

	/* Set pe_steps for profile or full image **********/   	
	pe_steps = prep_profile(profile[0],nv,&pe_grad,&pe_grad);/* profile[0] is n y or r */
	F_initval(pe_steps/2.0,vpe_offset);

	g_setExpTime(trmean*(ntmean*pe_steps*arraydim + (1+fabs(ssc))*arraydim));

	/* Shift DDR for pro *******************************/   	
	roff = -poffset(pro,ro_grad.roamp);

	/* PULSE SEQUENCE */
	status( A );
	rotate();
        triggerSelect(trigger);
	obsoffset( resto );
	delay( GRADIENT_RES );
	initval( 1+fabs( ssc ), vss );
	
	obspower( p1_rf.powerCoarse );
	obspwrf( p1_rf.powerFine );
	delay( GRADIENT_RES );

	assign(one,vacquire);         // real-time acquire flag
	setacqvar(vacquire);          // Turn on acquire when vacquire is zero 
					
	obl_shapedgradient(ss_pre.name,ss_pre.dur,0,0,ss_pre.amp,NOWAIT);		
	sp1on();
	delay(GRADIENT_RES);
	sp1off();
	delay(ss_pre.dur-GRADIENT_RES );
	msloop( seqcon[1], ns, vms_slices, vms_ctr );
		
		assign(vss,vssc);

		peloop( seqcon[2], pe_steps, vpe_steps, vpe_ctr );

			sub(vpe_ctr,vssc,vpe_ctr);     // vpe_ctr counts up from -ssc
			assign(zero,vssc);
			if (seqcon[2] == 's')
				assign(zero,vacquire); // Always acquire for non-compressed loop
			else {
				ifzero(vpe_ctr);
				assign(zero,vacquire); // Start acquiring when vpe_ctr reaches zero
				endif(vpe_ctr);
			}
		
			if (table)
				getelem(t1,vpe_ctr,vpe_index);
			else {
				ifzero(vacquire);
					sub(vpe_ctr,vpe_offset,vpe_index);
				elsenz(vacquire);
					sub(zero,vpe_offset,vpe_index);
				endif(vacquire);
			}		
			
			pe_shaped3gradient( read.name, phase.name, slice.name,
								read.dur, read.amp, 0, slice.amp,
								-pe_grad.increment, vpe_index, NOWAIT );
			delay(ss_grad.rfDelayFront - granulatedRFDelay);
			shapedpulselist( shapelist1, ss_grad.rfDuration, oph, rof1, rof2, seqcon[1], vms_ctr );

			delay( delayRFToAcq - alfa );
			startacq(alfa);
			acquire( np, 1/ro_grad.bandwidth );
			endacq();
			delay( delayAcqToRF - ss_grad.rfDelayFront + granulatedRFDelay - GRADIENT_RES );
			sp1on();
			delay(GRADIENT_RES);
			sp1off();
			
		endpeloop( seqcon[2], vpe_ctr ); 

	endmsloop( seqcon[1], vms_ctr );

	obl_shapedgradient(ss_post.name,ss_post.dur,0,0,ss_post.amp,WAIT);
}
示例#6
0
int
main(int ac, char **av)
{
    long long count = 0;
    long long max;
    char c;
    int j;
    int loops;
    int bytes;
    int ppri = 999;
    int fds[2];
    char *buf;
    char *ptr;
    char *msg = "datarate";

    if (ac == 1) {
	fprintf(stderr, "%s blocksize[k,m] [pipe_writer_pri] [msg]\n", av[0]);
	exit(1);
    }
    bytes = strtol(av[1], &ptr, 0);
    if (*ptr == 'k' || *ptr == 'K') {
	bytes *= 1024;
    } else if (*ptr == 'm' || *ptr == 'M') {
	bytes *= 1024 * 1024;
    } else if (*ptr) {
	fprintf(stderr, "Illegal numerical suffix: %s\n", ptr);
	exit(1);
    }
    if (bytes <= 0) {
	fprintf(stderr, "I can't handle %d sized buffers\n", bytes);
	exit(1);
    }
    if (ac >= 3)
	ppri = strtol(av[2], NULL, 0);
    if (ac >= 4)
	msg = av[3];

    buf = mmap(NULL, bytes * 2 + PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANON, -1, 0);
    if (buf == MAP_FAILED) {
	perror("mmap/buffer");
	exit(1);
    }

    bzero(buf, bytes * 2 + PAGE_SIZE);

    printf("tests one-way pipe using direct-write buffer\n");
    if (pipe(fds)) {
	perror("pipe");
	exit(1);
    }
    if (fork() == 0) {
	/*
	 * child process
	 */
	int n;
	int i;

	close(fds[0]);
	buf += (bytes + PAGE_MASK) & ~PAGE_MASK;
	i = 0;
	for (;;) {
	    n = read(fds[1], buf + i, bytes - i);
	    if (n <= 0)
		break;
	    if (n + i == bytes)
		i = 0;
	    else
		i += n;
	}
	_exit(0);
    } else {
	/* 
	 * parent process.
	 */
	if (ppri != 999) {
	    if (setpriority(PRIO_PROCESS, getpid(), ppri) < 0) {
		perror("setpriority");
		exit(1);
	    }
	}
	close(fds[1]);

	/*
	 * Figure out how many loops it takes for 1 second's worth.
	 */
	start_timing();
	for (j = 0; ; ++j) {
	    if (write(fds[0], buf, bytes) != bytes) {
		perror("write");
		exit(1);
	    }
	    if ((j & 31) == 0 && stop_timing(0, NULL))
		break;
	}
	loops = j * 2 + 1;
	loops *= 2;
	usleep(1000000 / 10);
	start_timing();

	for (j = loops; j; --j) {
	    if (write(fds[0], buf, bytes) != bytes) {
		perror("write");
		exit(1);
	    }
	}
	close(fds[0]);
	while(wait(NULL) >= 0)
	    ;
	stop_timing(loops, "full duplex pipe / %dK bufs:", bytes / 1024);
	printf("%s: blkSize %d %5.2f MBytes/sec\n",
		msg,
		bytes,
		(double)loops * bytes * 1000000.0 / 
		(1024.0 * 1024.0 * get_timing()));
    }
    return(0);
}
示例#7
0
void TooltipTT::launch(GdkEventMotion event, Gtk::Widget* win)
{
	hide() ;
	// disconnect last timeout
	timeout.disconnect() ;
	// launch new
	timeout = Glib::signal_timeout().connect(sigc::bind<GdkEventMotion,Gtk::Widget*>(sigc::mem_fun(this, &TooltipTT::display), event, win), get_timing()) ;
}
示例#8
0
//flash chip/bus status를 변경하고
//flash memory를 contorl 한다.
int run_nand_operation(int p_channel, int p_way)
{
	struct event_queue_node eq_node;
	struct ftl_request ftl_req;
	struct nand_chip *chip;
	struct dte_request dte_req;
	int plane, block, page;
	int op_result = 0;
	int i, j;
	int msb_page_flag = 0;
	int lsb_page_index;
	long long delay;
	int sector_offset;

	fm_status->wq[p_channel][p_way].status = OP_STARTED;
	ftl_req = fm_status->wq[p_channel][p_way].ftl_req;
	chip = &fm.buses[p_channel].chips[p_way];
	plane = addr_to_plane(ftl_req.addr);
	block = addr_to_block(ftl_req.addr);
	page = addr_to_page(ftl_req.addr);
	sector_offset = addr_to_sector_offset(ftl_req.addr);

	if (check_cmd_validity(ftl_req.cmd, chip->cmd) == FAIL)
	{
		chip->cmd = IDLE;
		fm_status->wq[p_channel][p_way].status = IDLE;
		if (ftl_req.cmd == READ_FINISH || ftl_req.cmd == PAGE_PROGRAM_FINISH || ftl_req.cmd == BLOCK_ERASE)
		{
			ftl_req.ack = INVALID_CMD_CHAIN;
			put_reorder_buffer(ftl_req);
		}
		set_bus_idle(p_channel);
		set_chip_idle(p_channel, p_way);

		dynamic_scheduling();
		return FAIL;
	}
	
	ftl_req.ack = SUCCESS;
	eq_node.ftl_req = ftl_req;
	QueryPerformanceCounter(&eq_node.time);
	//for debugging
	QueryPerformanceCounter(&eq_node.ftl_req.start_tick);
// 	cmd_to_char(ftl_req.cmd, char_cmd);
// 	printf("start tick\t: %16I64d, ID: %3d, cmd: %s\n", eq_node.ftl_req.start_tick.QuadPart, eq_node.ftl_req.id, char_cmd);
	delay = get_timing(ftl_req, ftl_req.addr);
	eq_node.time.QuadPart += delay;
	eq_node.time_offset = 0;
	eq_node.dst = FOU;
	
	enqueue_event_queue(eq, eq_node);

	msb_page_flag = is_msb_page(ftl_req.addr);
	
	switch (ftl_req.cmd)
	{
	case READ:
		op_result = sync_fault_gen(ftl_req.cmd, ftl_req.addr); //UECC_ERROR;
		chip->status = op_result;
		chip->planes[plane].reg_addr = ftl_req.addr;
#ifdef DATA_TRANSFER_ENGINE
		dte_req.deadline = 0;
		dte_req.dst = ftl_req.data;
		dte_req.id = ftl_req.id * PLANES_PER_CHIP + plane;
		dte_req.size = ftl_req.length * SIZE_OF_SECTOR;
		dte_req.src = chip->planes[plane].blocks[block].pages[page].data + sector_offset * SIZE_OF_SECTOR;

		pthread_mutex_lock(&dte_req_q->mutex);
		dte_request_enqueue(dte_req_q, dte_req);
		pthread_mutex_unlock(&dte_req_q->mutex);
#endif

		if (chip->cmd == READ_MP)
		{
			chip->cmd = READ;
			for (i = 0; i < PLANES_PER_CHIP; i++)
			{
				unreliable_read_violation(chip->planes[i].blocks[block].pages[page].state);
#ifdef MEMCPY
				memcpy(chip->planes[i].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
					chip->planes[i].blocks[block].pages[page].data + (sector_offset * SIZE_OF_SECTOR / 4),
					ftl_req.length * SIZE_OF_SECTOR);
#endif
			}
		}
		else
		{
			unreliable_read_violation(chip->planes[plane].blocks[block].pages[page].state);
			chip->cmd = READ;
#ifdef MEMCPY
			memcpy(chip->planes[plane].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
				chip->planes[plane].blocks[block].pages[page].data + (sector_offset * SIZE_OF_SECTOR / 4),
				ftl_req.length * SIZE_OF_SECTOR);
#endif
		}
		//chip->status = sync_fault()
		break;

	case READ_MP:
		chip->planes[plane].reg_addr = ftl_req.addr;
		chip->cmd = READ_MP;

#ifdef DATA_TRANSFER_ENGINE
		dte_req.deadline = 0;
		dte_req.dst = ftl_req.data;
		dte_req.id = ftl_req.id * PLANES_PER_CHIP + plane;
		dte_req.size = ftl_req.length * SIZE_OF_SECTOR;
		dte_req.src = chip->planes[plane].blocks[block].pages[page].data + sector_offset * SIZE_OF_SECTOR;

		pthread_mutex_lock(&dte_req_q->mutex);
		dte_request_enqueue(dte_req_q, dte_req);
		pthread_mutex_unlock(&dte_req_q->mutex);
#endif
		break;

	case DATA_OUT:
#ifdef MEMCPY
		memcpy(fm_status->wq[p_channel][p_way].ftl_req.data,
			chip->planes[plane].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
			ftl_req.length * SIZE_OF_SECTOR);
#endif
#ifdef DATA_TRANSFER_ENGINE
		pthread_mutex_lock(&dte_req_q->mutex);
		set_dte_request_deadline(dte_req_q, ftl_req.id * PLANES_PER_CHIP + plane, eq_node.time.QuadPart);
		pthread_mutex_unlock(&dte_req_q->mutex);
#endif
		break;

	case READ_FINISH:
#ifdef DATA_TRANSFER_ENGINE
		pthread_mutex_lock(&dte_req_q->mutex);
		if (is_data_transfer_done(dte_req_q, ftl_req.id * PLANES_PER_CHIP + plane) == 0)
		{
			printf("data transfer is not done!\n");
			assert(0);
		}
		pthread_mutex_unlock(&dte_req_q->mutex);
#endif
		chip->cmd = IDLE;
		break;

	case BLOCK_ERASE:
		op_result = sync_fault_gen(ftl_req.cmd, ftl_req.addr); //UECC_ERROR;
		chip->status = op_result;
		chip->planes[plane].reg_addr = ftl_req.addr;

		if (chip->cmd == BLOCK_ERASE_MP)
		{
			for (j = 0; j < PLANES_PER_CHIP; j++)
			{
				chip->planes[j].blocks[block].last_programmed_page = 0;
				chip->planes[j].blocks[block].pecycle++;
				chip->planes[j].blocks[block].block_access_mode = chip->current_access_mode;

				for (i = 0; i < PAGES_PER_BLOCK; i++)
				{
#ifdef MEMCPY
					memset(chip->planes[j].blocks[block].pages[i].data, 0xff, SIZE_OF_PAGE);
#endif
					chip->planes[j].blocks[block].pages[i].nop = 0;
					chip->planes[j].blocks[block].pages[i].state = page_state_transition(chip->planes[j].blocks[block].pages[i].state, op_result);
				}
			}
		}
		else
		{
			chip->cmd = BLOCK_ERASE;
			for (i = 0; i < PAGES_PER_BLOCK; i++)
			{
				chip->planes[plane].blocks[block].last_programmed_page = 0;
				chip->planes[plane].blocks[block].pecycle++;
				chip->planes[plane].blocks[block].block_access_mode = chip->current_access_mode;
#ifdef MEMCPY
				memset(chip->planes[plane].blocks[block].pages[i].data, 0xff, SIZE_OF_PAGE);
#endif
				chip->planes[plane].blocks[block].pages[i].nop = 0;
				chip->planes[plane].blocks[block].pages[i].state = page_state_transition(chip->planes[plane].blocks[block].pages[i].state, op_result);
			}
		}
		//chip->status = sync_fault()
		break;

	case BLOCK_ERASE_MP:
		chip->cmd = BLOCK_ERASE_MP;
		chip->planes[plane].reg_addr = ftl_req.addr;	
		break;

	case READ_STATUS:
#ifdef MEMCPY
		memcpy(ftl_req.data, &chip->status, 1);
#endif
		break;

	case PAGE_PROGRAM:
		if (chip->cmd != PAGE_PROGRAM_MP)
		{
			chip->cmd = PAGE_PROGRAM;
		}
		chip->planes[plane].reg_addr = ftl_req.addr;
#ifdef MEMCPY
		memcpy(chip->planes[plane].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
			ftl_req.data,
			ftl_req.length * SIZE_OF_SECTOR);
#endif
#ifdef DATA_TRANSFER_ENGINE
		chip->planes[plane].shadow_buffer = (char *)malloc(SIZE_OF_PAGE);
		
		dte_req.deadline = 0;
		dte_req.dst = chip->planes[plane].shadow_buffer;
		dte_req.id = ftl_req.id * PLANES_PER_CHIP + plane;
		dte_req.size = ftl_req.length * SIZE_OF_SECTOR;
		dte_req.src = ftl_req.data;

		pthread_mutex_lock(&dte_req_q->mutex);
		dte_request_enqueue(dte_req_q, dte_req);
		pthread_mutex_unlock(&dte_req_q->mutex);
#endif
		break;
	
	case PAGE_PROGRAM_MP:
		chip->cmd = PAGE_PROGRAM_MP;
		chip->planes[plane].reg_addr = ftl_req.addr;
#ifdef MEMCPY
		memcpy(chip->planes[plane].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
			ftl_req.data,
			ftl_req.length * SIZE_OF_SECTOR);
#endif
#ifdef DATA_TRANSFER_ENGINE
		chip->planes[plane].shadow_buffer = (char *)malloc(SIZE_OF_PAGE);

		dte_req.deadline = 0;
		dte_req.dst = chip->planes[plane].shadow_buffer;
		dte_req.id = ftl_req.id * PLANES_PER_CHIP + plane;
		dte_req.size = ftl_req.length * SIZE_OF_SECTOR;
		dte_req.src = ftl_req.data;

		pthread_mutex_lock(&dte_req_q->mutex);
		dte_request_enqueue(dte_req_q, dte_req);
		pthread_mutex_unlock(&dte_req_q->mutex);
#endif
		break;

	case PAGE_PROGRAM_FINISH:
		op_result = sync_fault_gen(ftl_req.cmd, ftl_req.addr); //UECC_ERROR;
		chip->status = op_result;
		if (chip->cmd != PAGE_PROGRAM_MP)
		{
			ascending_order_program_violation(chip->planes[plane].blocks[block].last_programmed_page, page);
			program_after_erase_violation(chip->planes[plane].blocks[block].pages[page].state);
			nop_violation(chip->planes[plane].blocks[block].pages[page].nop, chip->planes[plane].blocks[block].block_access_mode);

			chip->planes[plane].blocks[block].last_programmed_page++;
			chip->planes[plane].blocks[block].pages[page].nop++;
#ifdef MEMCPY
			memcpy(chip->planes[plane].blocks[block].pages[page].data + (sector_offset * SIZE_OF_SECTOR / 4),
				chip->planes[plane].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
				ftl_req.length * SIZE_OF_SECTOR);
#endif
#ifdef DATA_TRANSFER_ENGINE
			pthread_mutex_lock(&dte_req_q->mutex);
			set_dte_request_deadline(dte_req_q, ftl_req.id * PLANES_PER_CHIP + plane, eq_node.time.QuadPart);
			pthread_mutex_unlock(&dte_req_q->mutex);
#endif

			chip->planes[plane].blocks[block].pages[page].state = page_state_transition(chip->planes[plane].blocks[block].pages[page].state, op_result);
			if (op_result == PROGRAM_PF || op_result == PROGRAM_IF && msb_page_flag)
			{
				lsb_page_index = get_lsb_page(ftl_req.addr);
				chip->planes[plane].blocks[block].pages[lsb_page_index].state = lsb_page_state_transition(chip->planes[plane].blocks[block].pages[lsb_page_index].state, op_result);
			}
		}
		else
		{
			for (i = 0; i < PLANES_PER_CHIP; i++)
			{
				ascending_order_program_violation(chip->planes[i].blocks[block].last_programmed_page, page);
				program_after_erase_violation(chip->planes[i].blocks[block].pages[page].state);
				nop_violation(chip->planes[i].blocks[block].pages[page].nop, chip->planes[i].blocks[block].block_access_mode);

				chip->planes[i].blocks[block].last_programmed_page++;
				chip->planes[i].blocks[block].pages[page].nop++;
#ifdef MEMCPY
				memcpy(chip->planes[i].blocks[block].pages[page].data + (sector_offset * SIZE_OF_SECTOR / 4),
					chip->planes[i].page_buffer + (sector_offset * SIZE_OF_SECTOR / 4),
					ftl_req.length * SIZE_OF_SECTOR);
#endif
#ifdef DATA_TRANSFER_ENGINE
				pthread_mutex_lock(&dte_req_q->mutex);
				set_dte_request_deadline(dte_req_q, ftl_req.id * PLANES_PER_CHIP + i, eq_node.time.QuadPart);
				pthread_mutex_unlock(&dte_req_q->mutex);
#endif

				chip->planes[i].blocks[block].pages[page].state = page_state_transition(chip->planes[i].blocks[block].pages[page].state, op_result);
				if (op_result == PROGRAM_PF || op_result == PROGRAM_IF && msb_page_flag)
				{
					lsb_page_index = get_lsb_page(ftl_req.addr);
					chip->planes[i].blocks[block].pages[lsb_page_index].state = lsb_page_state_transition(chip->planes[i].blocks[block].pages[lsb_page_index].state, op_result);
				}
			}
		}

		break;

	case RESET:
		chip->cmd = IDLE;
		chip->status = IDLE;
		chip->current_access_mode = MLC_MODE;
		for (i = 0; i < PLANES_PER_CHIP; i++)
		{
			chip->planes[i].reg_addr = 0;
		}
		break;

	case CHANGE_ACCESS_MODE:
		chip->current_access_mode = *ftl_req.data;
		break;

	default :
		break;
	}

	return SUCCESS;
}