bool ide_wait( ide_device_info *device, int mask, int not_mask, bool check_err, bigtime_t timeout ) { ide_bus_info *bus = device->bus; bigtime_t start_time = system_time(); while( 1 ) { bigtime_t elapsed_time; int status; cpu_spin( 1 ); status = bus->controller->get_altstatus( bus->channel ); if( (status & mask) == mask && (status & not_mask) == 0 ) return true; if( check_err && (status & ide_status_err) != 0 ) { set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE ); return false; } elapsed_time = system_time() - start_time; if( elapsed_time > timeout ) { set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT ); return false; } if( elapsed_time > 3000 ) thread_snooze( elapsed_time / 10 ); } }
bool device_start_service(ide_device_info *device, int *tag) { ide_bus_info *bus = device->bus; device->tf.write.command = IDE_CMD_SERVICE; device->tf.queued.mode = ide_mode_lba; if (bus->active_device != device) { // don't apply any precautions in terms of IRQ // -> the bus is in accessing state, so IRQs are ignored anyway if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_device_head) != B_OK) // on error, pretend that this device asks for service // -> the disappeared controller will be recognized soon ;) return true; bus->active_device = device; // give one clock (400 ns) to take notice spin(1); } // here we go... if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_command) != B_OK) goto err; // we need to wait for the device as we want to read the tag if (!ide_wait(device, ide_status_drdy, ide_status_bsy, false, 1000000)) return false; // read tag if (bus->controller->read_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_sector_count) != B_OK) goto err; if (device->tf.queued.release) { // bus release is the wrong answer to a service request set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE); return false; } *tag = device->tf.queued.tag; return true; err: set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE); return false; }
void ata_dpc_DMA(ide_qrequest *qrequest) { ide_device_info *device = qrequest->device; bool dma_success, dev_err; dma_success = finish_dma(device); dev_err = check_rw_error(device, qrequest); if (dma_success && !dev_err) { // reset error count if DMA worked device->DMA_failures = 0; device->CQ_failures = 0; qrequest->request->data_resid = 0; finish_checksense(qrequest); } else { SHOW_ERROR0( 2, "Error in DMA transmission" ); set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_FAILURE); if (++device->DMA_failures >= MAX_DMA_FAILURES) { SHOW_ERROR0( 2, "Disabled DMA because of too many errors" ); device->DMA_enabled = false; } // reset queue in case queuing is active finish_reset_queue(qrequest); } }
static void finish_setsense( ide_device_info *device, ide_qrequest *qrequest, int sense_key, int sense_asc ) { set_sense( device, sense_key, sense_asc ); finish_checksense( qrequest ); }
int cmd_smell(string str) { set_sense("smell"); set_stealth(1); set_msg_failenvdef("You notice no interesting odors."); set_msg_failobdef("You don't smell anything from $O."); set_msg_toenvdef("$N smells around."); return ::cmd(str); }
/*! Emulate INQUIRY command */ static void ata_inquiry(ide_device_info *device, ide_qrequest *qrequest) { scsi_ccb *request = qrequest->request; scsi_res_inquiry data; scsi_cmd_inquiry *cmd = (scsi_cmd_inquiry *)request->cdb; uint32 allocation_length = cmd->allocation_length; uint32 transfer_size; if (cmd->evpd || cmd->page_code) { set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD); return; } memset(&data, 0, sizeof(data)); data.device_type = scsi_dev_direct_access; data.device_qualifier = scsi_periph_qual_connected; data.device_type_modifier = 0; data.removable_medium = false; data.ansi_version = 2; data.ecma_version = 0; data.iso_version = 0; data.response_data_format = 2; data.term_iop = false; // to be changed if we support TERM I/O data.additional_length = sizeof(scsi_res_inquiry) - 4; data.soft_reset = false; data.cmd_queue = device->queue_depth > 1; data.linked = false; // these values are free-style data.sync = false; data.write_bus16 = true; data.write_bus32 = false; data.relative_address = false; // the following fields are *much* to small, sigh... memcpy(data.vendor_ident, device->infoblock.model_number, sizeof(data.vendor_ident)); memcpy(data.product_ident, device->infoblock.model_number + 8, sizeof(data.product_ident)); memcpy(data.product_rev, " ", sizeof(data.product_rev)); copy_sg_data(request, 0, allocation_length, &data, sizeof(data), false); transfer_size = min(sizeof(data), allocation_length); transfer_size = min(transfer_size, request->data_length); request->data_resid = request->data_length - transfer_size; }
static bool check_rw_status( ide_device_info *device, ide_qrequest *qrequest, bool drq_required ) { ide_bus_info *bus = device->bus; int status; status = bus->controller->get_altstatus( bus->channel ); if( (status & ide_status_bsy) != 0 ) { set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT ); return false; } if( drq_required != ((status & ide_status_drq) != 0) ) { set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE ); return false; } return true; }
static int setup_rx(int new_in_pin) { int ret,irq; if (gpio_in_pin==new_in_pin) return 0; //do not set up, pin not changed if (gpio_in_pin!=0) { //we had rx pin setup. Free it so others can use it! dprintk(": trying to free old in pin index %d: %s\n",gpio_in_pin,gpiochip->names[RX_OFFSET_GPIOCHIP]); gpio_free(gpio_in_pin); irqchip=NULL; irqdata=NULL; } gpio_in_pin=new_in_pin; if (gpio_in_pin==0) { return 0; // do not set up, RX disabled } dprintk(": trying to claim new in pin index %d: %s\n",gpio_in_pin,gpiochip->names[RX_OFFSET_GPIOCHIP]); ret = gpio_request(gpio_in_pin, LIRC_DRIVER_NAME " ir/in"); if (ret) { printk(KERN_ALERT LIRC_DRIVER_NAME ": cant claim gpio pin %d with code %d\n", gpio_in_pin,ret); ret = -ENODEV; goto exit_disable_rx; } gpiochip->direction_input(gpiochip, RX_OFFSET_GPIOCHIP); /* try to setup interrupt data */ irq = gpiochip->to_irq(gpiochip, RX_OFFSET_GPIOCHIP); dprintk("to_irq %d for pin %d\n", irq, gpio_in_pin); irqdata = irq_get_irq_data(irq); if (irqdata && irqdata->chip) { irqchip = irqdata->chip; } else { ret = -ENODEV; goto exit_gpio_free_in_pin; } set_sense(); return 0; //successfully set up exit_gpio_free_in_pin: // interrupt set up failed, so free pin gpio_free(gpio_in_pin); exit_disable_rx: // could not claim new pin gpio_in_pin=0; // disable rx return ret; }
bool ide_wait(ide_device_info *device, int mask, int not_mask, bool check_err, bigtime_t timeout) { ide_bus_info *bus = device->bus; bigtime_t start_time = system_time(); while (1) { bigtime_t elapsed_time; int status; // do spin before test as the device needs 400 ns // to update its status register spin(1); status = bus->controller->get_altstatus(bus->channel_cookie); if ((status & mask) == mask && (status & not_mask) == 0) return true; if (check_err && (status & ide_status_err) != 0) { set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE); return false; } elapsed_time = system_time() - start_time; if (elapsed_time > timeout) { set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT); return false; } // if we've waited more then 5ms, we start passive waiting // to reduce system load if (elapsed_time > 5000) snooze(elapsed_time / 10); } }
/*! Transmit physically continuous data */ static inline status_t transfer_PIO_physcont(ide_device_info *device, addr_t physicalAddress, int length, bool write, int *transferred) { // we must split up chunk into B_PAGE_SIZE blocks as we can map only // one page into address space at once while (length > 0) { addr_t virtualAddress; void* handle; int page_left, cur_len; status_t err; struct thread* thread = thread_get_current_thread(); SHOW_FLOW(4, "Transmitting to/from physical address %lx, %d bytes left", physicalAddress, length); thread_pin_to_current_cpu(thread); if (vm_get_physical_page_current_cpu(physicalAddress, &virtualAddress, &handle) != B_OK) { thread_unpin_from_current_cpu(thread); // ouch: this should never ever happen set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE); return B_ERROR; } // if chunks starts in the middle of a page, we have even less then // a page left page_left = B_PAGE_SIZE - physicalAddress % B_PAGE_SIZE; SHOW_FLOW(4, "page_left=%d", page_left); cur_len = min(page_left, length); SHOW_FLOW(4, "cur_len=%d", cur_len); err = transfer_PIO_virtcont(device, (uint8 *)virtualAddress, cur_len, write, transferred); vm_put_physical_page_current_cpu(virtualAddress, handle); thread_unpin_from_current_cpu(thread); if (err != B_OK) return err; length -= cur_len; physicalAddress += cur_len; } return B_OK; }
int cmd_listen(string str) { string name, what; set_sense("listen"); set_verb("listen to"); set_msg_failenvdef("You hear nothing unusual."); set_msg_failenv("You don't hear that here."); set_msg_failobdef("There is nothing on $O to hear."); set_msg_failob("You don't hear that on $O."); set_msg_totgt("$N listens to you."); set_msg_toenvdef("$N listens intently."); set_msg_toenv("$N listens to $I."); set_msg_toenvown("$N listens to $P $R."); set_msg_toenvob("$N listens to $O."); if (str && str[0..2] == "to ") str = str[3..<1]; if (str && sscanf(str, "%s about %s", name, what) == 2) str = what + " from " + name; return ::cmd(str); }
/*! Load or eject medium load = true - load medium */ static bool ata_load_eject(ide_device_info *device, ide_qrequest *qrequest, bool load) { if (load) { // ATA doesn't support loading set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_NOT_SUPPORTED); return false; } device->tf_param_mask = 0; device->tf.lba.command = IDE_CMD_MEDIA_EJECT; if (!send_command(device, qrequest, true, 15, ide_state_sync_waiting)) return false; wait_for_sync(device->bus); return check_output(device, true, ide_error_abrt | ide_error_nm, false); }
/*! Emulate READ CAPACITY command */ static void read_capacity(ide_device_info *device, ide_qrequest *qrequest) { scsi_ccb *request = qrequest->request; scsi_res_read_capacity data; scsi_cmd_read_capacity *cmd = (scsi_cmd_read_capacity *)request->cdb; uint32 lastBlock; if (cmd->pmi || cmd->lba) { set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD); return; } // TODO: 512 bytes fixed block size? data.block_size = B_HOST_TO_BENDIAN_INT32(512); lastBlock = device->total_sectors - 1; data.lba = B_HOST_TO_BENDIAN_INT32(lastBlock); copy_sg_data(request, 0, request->data_length, &data, sizeof(data), false); request->data_resid = max(request->data_length - sizeof(data), 0); }
// new_state must be either accessing, async_waiting or sync_waiting // param_mask must not include command register bool send_command( ide_device_info *device, bool need_drdy, bigtime_t timeout, ide_bus_state new_state ) { ide_bus_info *bus = device->bus; bigtime_t irq_disabled_at = 0; // make compiler happy bool irq_guard = bus->num_running_reqs > 1; SHOW_FLOW0( 3, "" ); reset_timeouts( device ); if( irq_guard ) { if( bus->controller->write_device_control( bus->channel, ide_devctrl_nien | ide_devctrl_bit3 ) != NO_ERROR ) goto err; irq_disabled_at = system_time(); } if( bus->controller->write_command_block_regs( bus->channel, &device->tf, ide_mask_device_head ) != NO_ERROR ) goto err; SHOW_FLOW0( 3, "1" ); bus->active_device = device; if( !ide_wait( device, 0, ide_status_bsy | ide_status_drq, false, 50000 )) { uint8 status; SHOW_FLOW0( 1, "device is not ready" ); status = bus->controller->get_altstatus( bus->channel ); if( status == 0xff ) { // this shouldn't happen unless the device has died // as we only submit commands to existing devices // (only detection routines shoot at will) set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE ); return false; } if( !reset_bus( device )) return false; SHOW_FLOW0( 1, "retrying" ); if( bus->controller->write_command_block_regs( bus->channel, &device->tf, ide_mask_device_head ) != NO_ERROR ) goto err; bus->active_device = device; if( !ide_wait( device, 0, ide_status_bsy | ide_status_drq, false, 50000 )) { // XXX this is not a device but a bus error, we should return // CAM_SEL_TIMEOUT instead SHOW_FLOW0( 1, "device is dead" ); set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_LUN_SEL_FAILED ); return false; } } SHOW_FLOW0( 3, "3" ); if( need_drdy && (bus->controller->get_altstatus( bus->channel ) & ide_status_drdy) == 0 ) { SHOW_FLOW0( 3, "drdy not set" ); set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT ); return false; } SHOW_FLOW0( 3, "4" ); if( bus->controller->write_command_block_regs( bus->channel, &device->tf, device->tf_param_mask ) != NO_ERROR ) goto err; SHOW_FLOW0( 3, "5" ); if( irq_guard ) { // enable IRQs now // IRQ may be fired by service requests and by the process of disabling(!) // them (I heard this is caused by edge triggered PCI IRQs) // wait at least 50 µs to catch all pending irq's // (at my system, up to 30 µs elapsed) // additionally, old drives (at least my IBM-DTTA-351010) loose // sync if they are pushed too hard - on heavy overlapped write // stress this drive tends to forget outstanding requests, // waiting at least 50 µs seems(!) to solve this while( system_time() - irq_disabled_at < MAX_IRQ_DELAY ) cpu_spin( 1 ); } SHOW_FLOW0( 3, "6" ); if( new_state != ide_state_accessing ) { IDE_LOCK( bus ); } SHOW_FLOW( 3, "Writing command %x", (int)device->tf.write.command ); if( bus->controller->write_command_block_regs( bus->channel, &device->tf, ide_mask_command ) != NO_ERROR ) goto err2; SHOW_FLOW0( 3, "7" ); if( irq_guard ) { if( bus->controller->write_device_control( bus->channel, ide_devctrl_bit3 ) != NO_ERROR ) goto err1; } SHOW_FLOW0( 3, "8" ); if( new_state != ide_state_accessing ) { start_waiting( bus, timeout, new_state ); } SHOW_FLOW0( 3, "9" ); return true; err2: if( irq_guard ) bus->controller->write_device_control( bus->channel, ide_devctrl_bit3 ); err1: if( timeout > 0 ) { bus->state = ide_state_accessing; IDE_UNLOCK( bus ); } err: set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE ); return false; }
//------------------------------------------------------------------------- // Purpose : Constructor // // Special Notes : // // Creator : Xuechen Liu // // Creation Date : 08/02/96 //------------------------------------------------------------------------- CoVertex::CoVertex(RefVertex* vertexPtr) { attach_basic_topology_entity(vertexPtr) ; set_sense(CUBIT_FORWARD) ; }
bool device_start_service( ide_device_info *device, int *tag ) { ide_bus_info *bus = device->bus; bigtime_t irq_disabled_at = 0; // makes compiler happy bool irq_guard = bus->num_running_reqs > 1; device->tf.write.command = IDE_CMD_SERVICE; device->tf.queued.mode = ide_mode_lba; reset_timeouts( device ); //set_irq_state( device, ide_irq_state_ignore ); if( irq_guard ) { irq_disabled_at = system_time(); if( bus->controller->write_device_control( bus->channel, ide_devctrl_nien | ide_devctrl_bit3 ) != NO_ERROR ) goto err; } if( bus->controller->write_command_block_regs( bus->channel, &device->tf, ide_mask_device_head ) != NO_ERROR ) goto err; bus->active_device = device; if( !ide_wait( device, 0, ide_status_bsy | ide_status_drq, false, 50000 )) { // XXX this is not a device but a bus error, we should return // CAM_SEL_TIMEOUT instead set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE ); return false; } if( irq_guard ) { // see send_ata while( system_time() - irq_disabled_at < MAX_IRQ_DELAY ) cpu_spin( 1 ); if( bus->controller->write_device_control( bus->channel, ide_devctrl_bit3 ) != NO_ERROR ) goto err; } // here we go... if( bus->controller->write_command_block_regs( bus->channel, &device->tf, ide_mask_command ) != NO_ERROR ) goto err; if( !ide_wait( device, ide_status_drdy, ide_status_bsy, false, 1000000 )) { set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT ); return false; } if( bus->controller->read_command_block_regs( bus->channel, &device->tf, ide_mask_sector_count ) != NO_ERROR ) goto err; if( device->tf.queued.release ) { // bus release is the wrong answer to a service request set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE ); return false; } *tag = device->tf.queued.tag; return true; err: set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE ); return false; }
bool reset_bus(ide_device_info *device, ide_qrequest *ignore) { ide_bus_info *bus = device->bus; ide_controller_interface *controller = bus->controller; ide_channel_cookie channel = bus->channel_cookie; dprintf("ide: reset_bus() device %p, bus %p\n", device, bus); if (device->reconnect_timer_installed) { cancel_timer(&device->reconnect_timer.te); device->reconnect_timer_installed = false; } if (device->other_device->reconnect_timer_installed) { cancel_timer(&device->other_device->reconnect_timer.te); device->other_device->reconnect_timer_installed = false; } // activate srst signal for 5 µs // also, deactivate IRQ // (as usual, we will get an IRQ on disabling, but as we leave them // disabled for 2 ms, this false report is ignored) if (controller->write_device_control(channel, ide_devctrl_nien | ide_devctrl_srst | ide_devctrl_bit3) != B_OK) goto err0; spin(5); if (controller->write_device_control(channel, ide_devctrl_nien | ide_devctrl_bit3) != B_OK) goto err0; // let devices wake up snooze(2000); // ouch, we have to wait up to 31 seconds! if (!ide_wait(device, 0, ide_status_bsy, true, 31000000)) { // as we don't know which of the devices is broken // we leave them both alive if (controller->write_device_control(channel, ide_devctrl_bit3) != B_OK) goto err0; set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT); goto err1; } if (controller->write_device_control(channel, ide_devctrl_bit3) != B_OK) goto err0; finish_all_requests(bus->devices[0], ignore, SCSI_SCSI_BUS_RESET, true); finish_all_requests(bus->devices[1], ignore, SCSI_SCSI_BUS_RESET, true); dprintf("ide: reset_bus() device %p, bus %p success\n", device, bus); return true; err0: set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE); err1: finish_all_requests(bus->devices[0], ignore, SCSI_SCSI_BUS_RESET, true); finish_all_requests(bus->devices[1], ignore, SCSI_SCSI_BUS_RESET, true); //xpt->call_async( bus->xpt_cookie, -1, -1, AC_BUS_RESET, NULL, 0 ); dprintf("ide: reset_bus() device %p, bus %p failed\n", device, bus); return false; }
// we need a device to store sense information // (we could just take device 0, but this were not fair if the reset // was done because of a device 1 failure) bool reset_bus( ide_device_info *device ) { ide_bus_info *bus = device->bus; ide_controller_interface *controller = bus->controller; ide_channel_cookie channel = bus->channel; SHOW_FLOW0( 3, "" ); // activate srst signal for 5 µs reset_timeouts( device ); reset_timeouts( device->other_device ); //set_irq_state( device, ide_irq_state_ignore ); SHOW_FLOW0( 3, "1" ); if( controller->write_device_control( channel, ide_devctrl_nien | ide_devctrl_srst | ide_devctrl_bit3 ) != NO_ERROR ) goto err0; SHOW_FLOW0( 3, "2" ); cpu_spin( 5 ); if( controller->write_device_control( channel, ide_devctrl_nien | ide_devctrl_bit3 ) != NO_ERROR ) goto err0; SHOW_FLOW0( 3, "3" ); // let devices wake up thread_snooze( 2000 ); SHOW_FLOW0( 3, "4" ); // ouch, we have to wait up to 31 seconds! if( !ide_wait( device, 0, ide_status_bsy, false, 31000000 )) { // we don't know which of the devices is broken // so we don't disable them if( controller->write_device_control( channel, ide_devctrl_bit3 ) != NO_ERROR ) goto err0; set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT ); goto err1; } SHOW_FLOW0( 3, "5" ); if( controller->write_device_control( channel, ide_devctrl_bit3 ) != NO_ERROR ) goto err0; SHOW_FLOW0( 3, "6" ); finish_all_requests( bus->devices[0], CAM_SCSI_BUS_RESET ); finish_all_requests( bus->devices[1], CAM_SCSI_BUS_RESET ); SHOW_FLOW0( 3, "7" ); xpt->call_async( bus->xpt_cookie, -1, -1, AC_BUS_RESET, NULL, 0 ); SHOW_FLOW0( 3, "8" ); return true; err0: set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE ); err1: finish_all_requests( bus->devices[0], CAM_SCSI_BUS_RESET ); finish_all_requests( bus->devices[1], CAM_SCSI_BUS_RESET ); xpt->call_async( bus->xpt_cookie, -1, -1, AC_BUS_RESET, NULL, 0 ); return false; }
static void send_command(void *opaque, struct usb_msd_cbw *cbw, uint8_t *data, uint32_t len) { MSDState *s = (MSDState *)opaque; DPRINTF("Command: lun=%d tag=0x%x len %zd data=0x%02x\n", cbw->lun, cbw->tag, cbw->data_len, cbw->cmd[0]); uint32_t lba; uint32_t xfer_len; s->last_cmd = cbw->cmd[0]; switch(cbw->cmd[0]) { case TEST_UNIT_READY: //Do something? s->result = GOOD; set_sense(s, NO_SENSE, 0); /* If error */ //s->result = CHECK_CONDITION; //set_sense(s, NOT_READY, 0); break; case REQUEST_SENSE: //device shall keep old sense data s->result = GOOD; //memcpy_s(s->buf, s->data_len, s->sense_buf, sizeof(s->sense_buf)); //not on !WINDOWS memcpy(s->buf, s->sense_buf, /* TODO or error out instead? */ s->data_len < sizeof(s->sense_buf) ? s->data_len : sizeof(s->sense_buf)); break; case INQUIRY: set_sense(s, NO_SENSE, 0); memset(s->buf, 0, sizeof(s->buf)); s->off = 0; s->buf[0] = 0; //0x0 - direct access device, 0x1f - no fdd s->buf[1] = 1 << 7; //removable s->buf[3] = 1; //UFI response data format //inq data len can be zero memcpy(&s->buf[8], "QEMU", 4); //8 bytes vendor memcpy(&s->buf[16], "USB Drive", 9); //16 bytes product memcpy(&s->buf[32], "1", 1); //4 bytes product revision s->result = 0; break; case READ_CAPACITY: long cur_tell, end_tell; uint32_t *last_lba, *blk_len; set_sense(s, NO_SENSE, 0); memset(s->buf, 0, sizeof(s->buf)); s->off = 0; cur_tell = ftell(s->hfile); fseek(s->hfile, 0, SEEK_END); end_tell = ftell(s->hfile); fseek(s->hfile, cur_tell, SEEK_SET); last_lba = (uint32_t*)&s->buf[0]; blk_len = (uint32_t*)&s->buf[4]; //in bytes //right? *blk_len = LBA_BLOCK_SIZE;//descriptor is currently max 64 bytes for bulk though *last_lba = end_tell / *blk_len; DPRINTF("read capacity lba=0x%x, block=0x%x\n", *last_lba, *blk_len); *last_lba = bswap32(*last_lba); *blk_len = bswap32(*blk_len); s->result = GOOD; break; case READ_12: case READ_10: s->result = GOOD; s->off = 0; set_sense(s, NO_SENSE, 0); lba = bswap32(*(uint32_t *)&cbw->cmd[2]); if(cbw->cmd[0] == READ_10) xfer_len = bswap16(*(uint16_t *)&cbw->cmd[7]); else xfer_len = bswap32(*(uint32_t *)&cbw->cmd[6]); DPRINTF("read lba=0x%x, len=0x%x\n", lba, xfer_len * LBA_BLOCK_SIZE); if(xfer_len == 0) //TODO nothing to do break; if(fseek(s->hfile, lba * LBA_BLOCK_SIZE, SEEK_SET)) { s->result = 0x2;//? set_sense(s, MEDIUM_ERROR, 0); return; } memset(s->buf, 0, sizeof(s->buf)); //Or do actual reading in USB_MSDM_DATAIN? //TODO probably dont set data_len to read length if(!(s->data_len = fread(s->buf, 1, /*s->data_len*/ xfer_len * LBA_BLOCK_SIZE, s->hfile))) { s->result = 0x2;//? set_sense(s, MEDIUM_ERROR, 0); } break; case WRITE_12: case WRITE_10: s->result = GOOD;//everything is fine s->off = 0; set_sense(s, NO_SENSE, 0); lba = bswap32(*(uint32_t *)&cbw->cmd[2]); if(cbw->cmd[0] == WRITE_10) xfer_len = bswap16(*(uint16_t *)&cbw->cmd[7]); else xfer_len = bswap32(*(uint32_t *)&cbw->cmd[6]); DPRINTF("write lba=0x%x, len=0x%x\n", lba, xfer_len * LBA_BLOCK_SIZE); //if(xfer_len == 0) //nothing to do // break; if(fseek(s->hfile, lba * LBA_BLOCK_SIZE, SEEK_SET)) { s->result = 0x2;//? set_sense(s, MEDIUM_ERROR, 0); return; } s->data_len = xfer_len * LBA_BLOCK_SIZE; //Actual write comes with next command in USB_MSDM_DATAOUT break; default: s->result = 0x1; //COMMAND_FAILED set_sense(s, ILLEGAL_REQUEST, INVALID_COMMAND_OPERATION); break; } }
/*! Execute SCSI command */ void ata_exec_io(ide_device_info *device, ide_qrequest *qrequest) { scsi_ccb *request = qrequest->request; SHOW_FLOW(3, "command=%x", request->cdb[0]); // ATA devices have one LUN only if (request->target_lun != 0) { request->subsys_status = SCSI_SEL_TIMEOUT; finish_request(qrequest, false); return; } // starting a request means deleting sense, so don't do it if // the command wants to read it if (request->cdb[0] != SCSI_OP_REQUEST_SENSE) start_request(device, qrequest); switch (request->cdb[0]) { case SCSI_OP_TEST_UNIT_READY: ata_test_unit_ready(device, qrequest); break; case SCSI_OP_REQUEST_SENSE: ide_request_sense(device, qrequest); return; case SCSI_OP_FORMAT: /* FORMAT UNIT */ // we could forward request to disk, but modern disks cannot // be formatted anyway, so we just refuse request // (exceptions are removable media devices, but to my knowledge // they don't have to be formatted as well) set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE); break; case SCSI_OP_INQUIRY: ata_inquiry(device, qrequest); break; case SCSI_OP_MODE_SELECT_10: ata_mode_select_10(device, qrequest); break; case SCSI_OP_MODE_SENSE_10: ata_mode_sense_10(device, qrequest); break; case SCSI_OP_MODE_SELECT_6: case SCSI_OP_MODE_SENSE_6: // we've told SCSI bus manager to emulates these commands set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE); break; case SCSI_OP_RESERVE: case SCSI_OP_RELEASE: // though mandatory, this doesn't make much sense in a // single initiator environment; so what set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE); break; case SCSI_OP_START_STOP: { scsi_cmd_ssu *cmd = (scsi_cmd_ssu *)request->cdb; // with no LoEj bit set, we should only allow/deny further access // we ignore that (unsupported for ATA) // with LoEj bit set, we should additionally either load or eject the medium // (start = 0 - eject; start = 1 - load) if (!cmd->start) // we must always flush cache if start = 0 ata_flush_cache(device, qrequest); if (cmd->load_eject) ata_load_eject(device, qrequest, cmd->start); break; } case SCSI_OP_PREVENT_ALLOW: { scsi_cmd_prevent_allow *cmd = (scsi_cmd_prevent_allow *)request->cdb; ata_prevent_allow(device, cmd->prevent); break; } case SCSI_OP_READ_CAPACITY: read_capacity(device, qrequest); break; case SCSI_OP_VERIFY: // does anyone uses this function? // effectly, it does a read-and-compare, which IDE doesn't support set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE); break; case SCSI_OP_SYNCHRONIZE_CACHE: // we ignore range and immediate bit, we always immediately flush everything ata_flush_cache(device, qrequest); break; // sadly, there are two possible read/write operation codes; // at least, the third one, read/write(12), is not valid for DAS case SCSI_OP_READ_6: case SCSI_OP_WRITE_6: { scsi_cmd_rw_6 *cmd = (scsi_cmd_rw_6 *)request->cdb; uint32 pos; size_t length; pos = ((uint32)cmd->high_lba << 16) | ((uint32)cmd->mid_lba << 8) | (uint32)cmd->low_lba; length = cmd->length != 0 ? cmd->length : 256; SHOW_FLOW(3, "READ6/WRITE6 pos=%lx, length=%lx", pos, length); ata_send_rw(device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_6); return; } case SCSI_OP_READ_10: case SCSI_OP_WRITE_10: { scsi_cmd_rw_10 *cmd = (scsi_cmd_rw_10 *)request->cdb; uint32 pos; size_t length; pos = B_BENDIAN_TO_HOST_INT32(cmd->lba); length = B_BENDIAN_TO_HOST_INT16(cmd->length); if (length != 0) { ata_send_rw(device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_10); } else { // we cannot transfer zero blocks (apart from LBA48) finish_request(qrequest, false); } return; } default: set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE); } finish_checksense(qrequest); }
static void ata_mode_sense_10(ide_device_info *device, ide_qrequest *qrequest) { scsi_ccb *request = qrequest->request; scsi_cmd_mode_sense_10 *cmd = (scsi_cmd_mode_sense_10 *)request->cdb; scsi_mode_param_header_10 param_header; scsi_modepage_control control; scsi_mode_param_block_desc block_desc; size_t totalLength = sizeof(scsi_mode_param_header_10) + sizeof(scsi_mode_param_block_desc) + sizeof(scsi_modepage_control); scsi_mode_param_dev_spec_da devspec = { _res0_0 : 0, dpo_fua : 0, _res0_6 : 0, write_protected : 0 }; uint32 allocationLength; SHOW_FLOW0(1, "Hi!"); allocationLength = B_BENDIAN_TO_HOST_INT16(cmd->allocation_length); // we answer control page requests and "all pages" requests // (as the latter are the same as the first) if ((cmd->page_code != SCSI_MODEPAGE_CONTROL && cmd->page_code != SCSI_MODEPAGE_ALL) || (cmd->page_control != SCSI_MODE_SENSE_PC_CURRENT && cmd->page_control != SCSI_MODE_SENSE_PC_SAVED)) { set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD); return; } //param_header = (scsi_mode_param_header_10 *)request->data; param_header.mode_data_length = B_HOST_TO_BENDIAN_INT16(totalLength - 1); param_header.medium_type = 0; // XXX standard is a bit vague here param_header.dev_spec_parameter = *(uint8 *)&devspec; param_header.block_desc_length = B_HOST_TO_BENDIAN_INT16(sizeof(scsi_mode_param_block_desc)); copy_sg_data(request, 0, allocationLength, ¶m_header, sizeof(param_header), false); /*block_desc = (scsi_mode_param_block_desc *)(request->data + sizeof(*param_header));*/ memset(&block_desc, 0, sizeof(block_desc)); // density is reserved (0), descriptor apply to entire medium (num_blocks=0) // remains the blocklen to be set block_desc.high_blocklen = 0; block_desc.med_blocklen = 512 >> 8; block_desc.low_blocklen = 512 & 0xff; copy_sg_data(request, sizeof(param_header), allocationLength, &block_desc, sizeof(block_desc), false); /*contr = (scsi_modepage_contr *)(request->data + sizeof(*param_header) + ((uint16)param_header->high_block_desc_len << 8) + param_header->low_block_desc_len);*/ memset(&control, 0, sizeof(control)); control.RLEC = false; control.DQue = !device->CQ_enabled; control.QErr = false; // when a command fails we requeue all // lost commands automagically control.QAM = SCSI_QAM_UNRESTRICTED; copy_sg_data(request, sizeof(param_header) + B_BENDIAN_TO_HOST_INT16(param_header.block_desc_length), allocationLength, &control, sizeof(control), false); // the number of bytes that were transferred to buffer is // restricted by allocation length and by request data buffer size totalLength = min(totalLength, allocationLength); totalLength = min(totalLength, request->data_length); request->data_resid = request->data_length - totalLength; } /*! Emulate modifying control page */ static bool ata_mode_select_control_page(ide_device_info *device, ide_qrequest *qrequest, scsi_modepage_control *page) { if (page->header.page_length != sizeof(*page) - sizeof(page->header)) { set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR); return false; } // we only support enabling/disabling command queuing enable_CQ(device, !page->DQue); return true; } /*! Emulate MODE SELECT 10 command */ static void ata_mode_select_10(ide_device_info *device, ide_qrequest *qrequest) { scsi_ccb *request = qrequest->request; scsi_cmd_mode_select_10 *cmd = (scsi_cmd_mode_select_10 *)request->cdb; scsi_mode_param_header_10 param_header; scsi_modepage_header page_header; uint32 totalLength; uint32 modepageOffset; char modepage_buffer[64]; // !!! enlarge this to support longer mode pages if (cmd->save_pages || cmd->pf != 1) { set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD); return; } totalLength = min(request->data_length, B_BENDIAN_TO_HOST_INT16(cmd->param_list_length)); // first, retrieve page header to get size of different chunks //param_header = (scsi_mode_param_header_10 *)request->data; if (!copy_sg_data(request, 0, totalLength, ¶m_header, sizeof(param_header), true)) goto err; totalLength = min(totalLength, B_BENDIAN_TO_HOST_INT16(param_header.mode_data_length) + 1UL); // this is the start of the first mode page; // we ignore the block descriptor silently modepageOffset = sizeof(param_header) + B_BENDIAN_TO_HOST_INT16(param_header.block_desc_length); // go through list of pages while (modepageOffset < totalLength) { uint32 pageLength; // get header to know how long page is if (!copy_sg_data(request, modepageOffset, totalLength, &page_header, sizeof(page_header), true)) goto err; // get size of one page and copy it to buffer pageLength = page_header.page_length + sizeof(scsi_modepage_header); // the buffer has a maximum size - this is really standard compliant but // sufficient for our needs if (pageLength > sizeof(modepage_buffer)) goto err; if (!copy_sg_data(request, modepageOffset, totalLength, &modepage_buffer, min(pageLength, sizeof(modepage_buffer)), true)) goto err; // modify page; // currently, we only support the control mode page switch (page_header.page_code) { case SCSI_MODEPAGE_CONTROL: if (!ata_mode_select_control_page(device, qrequest, (scsi_modepage_control *)modepage_buffer)) return; break; default: set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_PARAM_LIST_FIELD); return; } modepageOffset += pageLength; } if (modepageOffset != totalLength) goto err; request->data_resid = request->data_length - totalLength; return; // if we arrive here, data length was incorrect err: set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR); }
/*! Emulate PREVENT ALLOW command */ static bool ata_prevent_allow(ide_device_info *device, bool prevent) { set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_ILL_FUNCTION); return false; }