static int stheno_request_thread( void *arg ) { struct request *req; int ret; while( 1 ){ ret = wait_event_interruptible( stheno_wait_q, (kthread_should_stop() || stheno_wakeup == 1) ); if( ret != 0 ) break; stheno_wakeup = 0; if( kthread_should_stop() ) break; while( 1 ){ spin_lock_irq( stheno_queue->queue_lock ); req = blk_fetch_request( stheno_queue ); spin_unlock_irq( stheno_queue->queue_lock ); next_segment: if( req == NULL ) break; if( !blk_fs_request( req ) ){ /*blk_end_request_cur( req, -EIO );*/ spin_lock_irq( stheno_queue->queue_lock ); ret = __blk_end_request_cur( req, -EIO ); spin_unlock_irq( stheno_queue->queue_lock ); if( ret == true ) goto next_segment; continue; } if( stheno_read_sector0() != 0 ){ spin_lock_irq( stheno_queue->queue_lock ); ret = __blk_end_request_cur( req, -EIO ); spin_unlock_irq( stheno_queue->queue_lock ); if( ret == true ) goto next_segment; continue; } if( blk_rq_sectors( req ) == 0 || blk_rq_cur_sectors( req ) == 0 ){ spin_lock_irq( stheno_queue->queue_lock ); ret = __blk_end_request_cur( req, -EIO ); spin_unlock_irq( stheno_queue->queue_lock ); if( ret == true ) goto next_segment; continue; } if( rq_data_dir( req ) == 0 ){ ret = euryale_read_process( stheno_lbaoffset + blk_rq_pos( req ), blk_rq_cur_sectors( req ), req->buffer ); }else{ ret = euryale_write_process( stheno_lbaoffset + blk_rq_pos( req ), blk_rq_cur_sectors( req ), req->buffer ); } /*blk_end_request_cur( req, ret == 0 ? 0 : -EIO );*/ spin_lock_irq( stheno_queue->queue_lock ); ret = __blk_end_request_cur( req, ret == 0 ? 0 : -EIO ); spin_unlock_irq( stheno_queue->queue_lock ); if( ret == true ) goto next_segment; } } print_debug("stheno_request_thread was terminated.\n"); return 0; }
static int stheno_read_sector0( void ) { char boot_sector[SECTOR_SIZE]; int ret; if( stheno_read_sector0_flag == 0 ) return 0; ret = euryale_api_init(); if( ret != 0 ){ print_error( "stheno euryale_api_init failed.\n" ); return -1; } #if defined( BACKWARD_COMPATIBILITY ) if( euryale_need_backwardcompatibility() ){ print_info( "stheno backward compatibility enable.\n" ); stheno_lbaoffset = 0; goto exit; } #endif ret = euryale_read_process( 0, 1, boot_sector ); if( ret != 0 ){ print_error( "stheno cannot read sector #0.\n" ); return -1; } if( (boot_sector[0] == 0xEB && boot_sector[2] == 0x90) || boot_sector[0] == 0xE9 ){ /* Boot Sector(FAT12/16/32) */ stheno_lbaoffset = 0; }else{ /* Master Boot Record */ /* PT_LbaOfs in MBR_Partation1 */ memcpy( &stheno_lbaoffset, &boot_sector[446+8], sizeof( unsigned long ) ); stheno_lbaoffset = le32_to_cpu( stheno_lbaoffset ); } print_info( "stheno boot sector = %x %x %x\n", (int)boot_sector[0] & 0xFF, (int)boot_sector[1] & 0xFF, (int)boot_sector[2] & 0xFF ); print_info( "stheno LBA offset = %ld\n", stheno_lbaoffset ); exit: stheno_read_sector0_flag = 0; return 0; }
static int stheno_do_request(void* arg) { while(1) { wait_event_interruptible(stheno_process_q, stheno_processing == 1); stheno_processing = 0; do { struct request *req; int ret; spin_lock_irq( stheno_queue->queue_lock ); req = blk_fetch_request( stheno_queue ); spin_unlock_irq( stheno_queue->queue_lock ); next_segment: if(req == NULL)break; /* ignore not fs cmd */ if( ! blk_fs_request( req ) ) { printk( KERN_ERR "skip no fs request\n" ); spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, -EIO); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; continue; } /* ignore nr_sectors == 0 request */ if( blk_rq_sectors(req) == 0 || blk_rq_cur_sectors(req) == 0) { printk( KERN_ERR "skip nr_sectors == (%d) current_nr_sectors == (%d) request\n", (int)blk_rq_sectors(req), (int)blk_rq_cur_sectors(req) ); spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, -EIO); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; continue; } stheno_printk( KERN_NOTICE "stheno_request: REQUEST START! %s sect(%d) curr_count(%d) count(%d)\n", (rq_data_dir( req ) == WRITE) ? "write" : "read", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req)); if( seg_info.dirty ) { stheno_printk( KERN_NOTICE "stheno_request: %s dirty(%d) seg_start_sect(%d) seg_pos(%d) seg_nr_count(%d)\n", (rq_data_dir( req ) == WRITE) ? "write" : "read", (int)seg_info.dirty, (int)seg_info.start_sector, (int)seg_info.current_pos, (int)seg_info.nr_sectors ); } if( seg_info.dirty && rq_data_dir( req ) != seg_info.cmd ) { if( seg_info.cmd == WRITE ) { stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) start\n", (int)seg_info.start_sector, (int)seg_info.current_pos); ret = euryale_write_process(seg_info.start_sector, seg_info.current_pos, seg_info.buffer); stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) ret(%d) end\n", (int)seg_info.start_sector, (int)seg_info.current_pos, ret); if( ret < 0 ) { printk( KERN_ERR "stheno_request: segment write sect(%d) count(%d) ERROR\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors); spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, -EIO); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; continue; } } seg_info.dirty = 0; } if( rq_data_dir( req ) == WRITE ) { if( !seg_info.dirty && blk_rq_cur_sectors(req) < blk_rq_sectors(req) ) { stheno_printk( KERN_NOTICE "stheno_request: new_segment(%d) cur_count(%d) count(%d)\n", (int)seg_info.dirty, (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req)); seg_info.cmd = rq_data_dir( req ); seg_info.start_sector = blk_rq_pos(req); seg_info.nr_sectors = blk_rq_sectors(req); memcpy( seg_info.buffer, req->buffer, blk_rq_cur_sectors(req) * SECT_SIZE); seg_info.current_pos = blk_rq_cur_sectors(req); seg_info.dirty = 1; spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, 0); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; continue; } else if( seg_info.dirty && seg_info.start_sector + seg_info.current_pos == blk_rq_pos(req) && seg_info.nr_sectors - seg_info.current_pos == blk_rq_sectors(req) && seg_info.nr_sectors - seg_info.current_pos >= blk_rq_cur_sectors(req) ) { stheno_printk( KERN_NOTICE "stheno_request: add_segment(%d) cur_count(%d) count(%d)\n", (int)seg_info.dirty, (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req)); memcpy( seg_info.buffer + (seg_info.current_pos * SECT_SIZE), req->buffer, blk_rq_cur_sectors(req) * SECT_SIZE); seg_info.current_pos += blk_rq_cur_sectors(req); if( seg_info.current_pos == seg_info.nr_sectors ) { stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) start\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors); ret = euryale_write_process(seg_info.start_sector, seg_info.current_pos, seg_info.buffer); stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) end\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors); seg_info.dirty = 0; if( ret < 0 ) { printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) ERROR\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors); spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, -EIO); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; continue; } } spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, 0); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; continue; } else if( seg_info.dirty ) { stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) start\n", (int)seg_info.start_sector, (int)seg_info.current_pos); ret = euryale_write_process(seg_info.start_sector, seg_info.current_pos, seg_info.buffer); stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) end\n", (int)seg_info.start_sector, (int)seg_info.current_pos); seg_info.dirty = 0; if( ret < 0 ) { printk( KERN_ERR "stheno_request: segment write sect(%d) count(%d) ERROR\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors); spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, -EIO); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; continue; } } } else /* READ */ { if( !seg_info.dirty && blk_rq_cur_sectors(req) < blk_rq_sectors(req) ) { seg_info.cmd = rq_data_dir( req ); seg_info.start_sector = blk_rq_pos(req); seg_info.nr_sectors = blk_rq_sectors(req); stheno_printk( KERN_NOTICE "stheno_request: segment read sect(%d) count(%d) start\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors); ret = euryale_read_process(seg_info.start_sector, seg_info.nr_sectors, seg_info.buffer); stheno_printk( KERN_NOTICE "stheno_request: segment read sect(%d) count(%d) end\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors); if( ret < 0 ) { printk( KERN_ERR "stheno_request: segment read sect(%d) count(%d) ERROR\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors); spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, -EIO); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; continue; } memcpy(req->buffer, seg_info.buffer, blk_rq_cur_sectors(req) * SECT_SIZE); seg_info.current_pos = blk_rq_cur_sectors(req); seg_info.dirty = 1; spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, 0); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; continue; } else if( seg_info.dirty && seg_info.start_sector + seg_info.current_pos == blk_rq_pos(req) && seg_info.nr_sectors - seg_info.current_pos == blk_rq_sectors(req) && seg_info.nr_sectors - seg_info.current_pos >= blk_rq_cur_sectors(req) ) { memcpy(req->buffer, seg_info.buffer + (seg_info.current_pos * SECT_SIZE), blk_rq_cur_sectors(req) * SECT_SIZE); seg_info.current_pos += blk_rq_cur_sectors(req); if( seg_info.current_pos == seg_info.nr_sectors ) { seg_info.dirty = 0; } spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, 0); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; continue; } else { seg_info.dirty = 0; } } if( rq_data_dir( req ) == WRITE ) { stheno_printk( KERN_NOTICE "stheno_request: write sect(%d) cur_count(%d) count(%d) start\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req)); ret = euryale_write_process(blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer); stheno_printk( KERN_NOTICE "stheno_request: write sect(%d) cur_count(%d) count(%d) end\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req)); if( ret < 0 ) { printk( KERN_ERR "stheno_request: write sect(%d) cur_count(%d) count(%d) ERROR\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req)); spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, -EIO); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; continue; } } else { stheno_printk( KERN_NOTICE "stheno_request: read sect(%d) cur_count(%d) count(%d) start\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req)); ret = euryale_read_process(blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer); stheno_printk( KERN_NOTICE "stheno_request: read sect(%d) cur_count(%d) count(%d) end\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req)); if( ret < 0 ) { printk( KERN_ERR "stheno_request: read sect(%d) cur_count(%d) count(%d) ERROR\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req)); spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, -EIO); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; continue; } } spin_lock_irq( stheno_queue->queue_lock ); ret=__blk_end_request_cur(req, 0); spin_unlock_irq( stheno_queue->queue_lock ); if(ret==true) goto next_segment; } while(1); } return 0; }