void local_flush_tlb_mm(struct mm_struct *mm) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { #ifdef DEBUG_TLB printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm)); #endif drop_mmu_context(mm, cpu); } }
void local_flush_tlb_mm(struct mm_struct *mm) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) drop_mmu_context(mm,cpu); }
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cpu = smp_processor_id(); unsigned long flags; int oldpid, newpid; signed long idx; if (!cpu_context(cpu, vma->vm_mm)) return; newpid = cpu_asid(cpu, vma->vm_mm); page &= PAGE_MASK; local_irq_save(flags); oldpid = read_c0_entryhi(); write_c0_vaddr(page); write_c0_entryhi(newpid); tlb_probe(); idx = read_c0_tlbset(); if (idx < 0) goto finish; write_c0_entrylo(0); write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); tlb_write(); finish: write_c0_entryhi(oldpid); local_irq_restore(flags); }
void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, mm); } else { cpumask_t mask = cpu_online_map; unsigned int cpu; cpu_clear(smp_processor_id(), mask); for_each_cpu_mask(cpu, mask) if (cpu_context(cpu, mm)) cpu_context(cpu, mm) = 0; } local_flush_tlb_mm(mm); preempt_enable(); }
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long flags; int size; #ifdef DEBUG_TLB printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & ASID_MASK), start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; if (size <= NTLB_ENTRIES_HALF) { int oldpid = (read_c0_entryhi() & ASID_MASK); int newpid = (cpu_context(smp_processor_id(), mm) & ASID_MASK); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while(start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); write_c0_entryhi(KSEG0); if(idx < 0) continue; tlb_write_indexed(); } write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } local_irq_restore(flags); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long flags; int size; #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", cpu_context(cpu, mm) & ASID_MASK, start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= current_cpu_data.tlbsize) { int oldpid = read_c0_entryhi() & ASID_MASK; int newpid = cpu_context(cpu, mm) & ASID_MASK; start &= PAGE_MASK; end += PAGE_SIZE - 1; end &= PAGE_MASK; while (start < end) { int idx; write_c0_entryhi(start | newpid); start += PAGE_SIZE; /* BARRIER */ tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entryhi(KSEG0); if (idx < 0) /* BARRIER */ continue; tlb_write_indexed(); } write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } local_irq_restore(flags); }
void flush_tlb_mm(struct mm_struct *mm) { if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); } else { int i; for (i = 0; i < smp_num_cpus; i++) if (smp_processor_id() != i) cpu_context(i, mm) = 0; } local_flush_tlb_mm(mm); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; unsigned long config6_flags; ENTER_CRITICAL(flags); disable_pgwalker(config6_flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; if (size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while (start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if (idx < 0) continue; /* Make sure all entries differ. */ #ifndef CONFIG_NLM_VMIPS write_c0_entryhi(UNIQUE_ENTRYHI(idx)); #else __write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(idx))); #endif mtc0_tlbw_hazard(); tlb_write_indexed(); } tlbw_use_hazard(); write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } FLUSH_ITLB; enable_pgwalker(config6_flags); EXIT_CRITICAL(flags); }
/* All entries common to a mm share an asid. To effectively flush these entries, we just bump the asid. */ void local_flush_tlb_mm(struct mm_struct *mm) { int cpu; preempt_disable(); cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { drop_mmu_context(mm, cpu); } preempt_enable(); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != NO_CONTEXT) { unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ cpu_context(cpu, mm) = NO_CONTEXT; if (mm == current->mm) activate_context(mm, cpu); } else { unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; asid = cpu_asid(cpu, mm); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; if (mm != current->mm) { saved_asid = get_asid(); set_asid(asid); } while (start < end) { local_flush_tlb_one(asid, start); start += PAGE_SIZE; } if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); } local_irq_restore(flags); }
static void log_data(const omti8621_state *state) { if (verbose > 0) { int i; logerror("%s: OMTI data (length=%02x)", cpu_context(state->device), state->data_length); for (i = 0; i < state->data_length && i < OMTI_DISK_SECTOR_SIZE; i++) { logerror(" %02x", state->data_buffer[i]); } if (i < state->data_length) { logerror(" ..."); } logerror("\n"); } }
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long flags; int size; #ifdef DEBUG_TLB printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & ASID_MASK), start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; if(size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi() & ASID_MASK; int newpid = cpu_asid(cpu, mm); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while(start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); BARRIER; tlb_probe(); BARRIER; idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if(idx < 0) continue; /* Make sure all entries differ. */ write_c0_entryhi(XKPHYS+idx*0x2000); BARRIER; tlb_write_indexed(); BARRIER; } write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } local_irq_restore(flags); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); unsigned long flags; int oldpid, newpid, size; if (!cpu_context(cpu, mm)) return; size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; local_irq_save(flags); if (size > TFP_TLB_SIZE / 2) { drop_mmu_context(mm, cpu); goto out_restore; } oldpid = read_c0_entryhi(); newpid = cpu_asid(cpu, mm); write_c0_entrylo(0); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; while (start < end) { signed long idx; write_c0_vaddr(start); write_c0_entryhi(start); start += PAGE_SIZE; tlb_probe(); idx = read_c0_tlbset(); if (idx < 0) continue; write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); tlb_write(); } write_c0_entryhi(oldpid); out_restore: local_irq_restore(flags); }
void omti8621_device::log_data() { if (verbose > 0) { int i; logerror("%s: OMTI data (length=%02x)", cpu_context(this), data_length); for (i = 0; i < data_length && i < OMTI_DISK_SECTOR_SIZE; i++) { logerror(" %02x", data_buffer[i]); } if (i < data_length) { logerror(" ..."); } logerror("\n"); } }
/* * The following tlb flush calls are invoked when old translations are * being torn down, or pte attributes are changing. For single threaded * address spaces, a new context is obtained on the current cpu, and tlb * context on other cpus are invalidated to force a new context allocation * at switch_mm time, should the mm ever be used on other cpus. For * multithreaded address spaces, intercpu interrupts have to be sent. * Another case where intercpu interrupts are required is when the target * mm might be active on another cpu (eg debuggers doing the flushes on * behalf of debugees, kswapd stealing pages from another process etc). * Kanoj 07/00. */ void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); } else { int i; for_each_online_cpu(i) if (smp_processor_id() != i) cpu_context(i, mm) = 0; } local_flush_tlb_mm(mm); preempt_enable(); }
/* All entries common to a mm share an asid. To effectively flush these entries, we just bump the asid. */ void local_flush_tlb_mm(struct mm_struct *mm) { int cpu; unsigned long config6_flags; preempt_disable(); cpu = smp_processor_id(); disable_pgwalker(config6_flags); if (cpu_context(cpu, mm) != 0) { drop_mmu_context(mm, cpu); } enable_pgwalker(config6_flags); preempt_enable(); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; int huge = is_vm_hugetlb_page(vma); ENTER_CRITICAL(flags); if (huge) { start = round_down(start, HPAGE_SIZE); end = round_up(end, HPAGE_SIZE); size = (end - start) >> HPAGE_SHIFT; } else {
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { struct flush_tlb_data fd; fd.vma = vma; fd.addr1 = page; smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1); } else { int i; for (i = 0; i < smp_num_cpus; i++) if (smp_processor_id() != i) cpu_context(i, vma->vm_mm) = 0; } local_flush_tlb_page(vma, page); }
void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); } else { int i; for (i = 0; i < num_online_cpus(); i++) if (smp_processor_id() != i) cpu_context(i, mm) = 0; } local_flush_tlb_mm(mm); preempt_enable(); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; local_irq_save(flags); start = round_down(start, PAGE_SIZE << 1); end = round_up(end, PAGE_SIZE << 1); size = (end - start) >> (PAGE_SHIFT + 1); if (size <= (current_cpu_data.tlbsizeftlbsets ? current_cpu_data.tlbsize / 8 : current_cpu_data.tlbsize / 2)) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); htw_stop(); while (start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if (idx < 0) continue; /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(idx)); mtc0_tlbw_hazard(); tlb_write_indexed(); } tlbw_use_hazard(); write_c0_entryhi(oldpid); htw_start(); } else { drop_mmu_context(mm, cpu); } flush_micro_tlb(); local_irq_restore(flags); }
void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { struct flush_tlb_data fd; fd.mm = mm; fd.addr1 = start; fd.addr2 = end; smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1); } else { int i; for (i = 0; i < smp_num_cpus; i++) if (smp_processor_id() != i) cpu_context(i, mm) = 0; } local_flush_tlb_range(mm, start, end); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long flags; int size; ENTER_CRITICAL(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; local_irq_save(flags); if (size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); while (start < end) { int idx; write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entrylo1(0); if (idx < 0) continue; /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(idx)); mtc0_tlbw_hazard(); tlb_write_indexed(); } tlbw_use_hazard(); write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } EXIT_CRITICAL(flags); }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { preempt_disable(); if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { struct flush_tlb_data fd; fd.vma = vma; fd.addr1 = page; smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); } else { int i; for (i = 0; i < num_online_cpus(); i++) if (smp_processor_id() != i) cpu_context(i, vma->vm_mm) = 0; } local_flush_tlb_page(vma, page); preempt_enable(); }
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { struct flush_tlb_data fd; fd.vma = vma; fd.addr1 = start; fd.addr2 = end; __on_other_cores(flush_tlb_range_ipi, (void *)&fd); } else { int i; for (i = 0; i < num_online_cpus(); i++) if (smp_processor_id() != i) cpu_context(i, mm) = 0; } local_flush_tlb_range(vma, start, end); preempt_enable(); }
void sc499_device::log_block(const char *text) { int data_length = 16; if (verbose > 0) { int i; logerror("%s: %s %d -", cpu_context(), text, m_tape_pos); for (i = 0; i < data_length && i < SC499_CTAPE_BLOCK_SIZE; i++) { logerror(" %02x", m_ctape_block_buffer[i]); } if (i < SC499_CTAPE_BLOCK_SIZE) { logerror(" ..."); } if (m_ctape_block_index > 0 && m_ctape_block_index != SC499_CTAPE_BLOCK_SIZE) { logerror(" block_index = %d !!!", m_ctape_block_index); } logerror("\n"); } }
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { unsigned int cpu = smp_processor_id(); if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { unsigned long flags; unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; asid = cpu_asid(cpu, vma->vm_mm); page &= PAGE_MASK; local_irq_save(flags); if (vma->vm_mm != current->mm) { saved_asid = get_asid(); set_asid(asid); } local_flush_tlb_one(asid, page); if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); local_irq_restore(flags); } }
void threecom3c505_device::log_command() { if (verbose > 0) { int i; logerror("%s: Command ", cpu_context()); switch (m_command_buffer[0]) { case CMD_RESET: // 0x00 logerror("!!! unexpected CMD_RESET"); break; case CMD_CONFIGURE_ADAPTER_MEMORY: // 0x01 logerror("CMD_CONFIGURE_ADAPTER_MEMORY"); break; case CMD_CONFIGURE_82586: // 0x02 logerror("CMD_CONFIGURE_82586"); break; case CMD_RECEIVE_PACKET: // 0x08 logerror("CMD_RECEIVE_PACKET"); break; case CMD_TRANSMIT_PACKET: // 0x09 logerror("CMD_TRANSMIT_PACKET"); break; case CMD_NETWORK_STATISTICS: // 0x0a logerror("CMD_NETWORK_STATISTICS"); break; case CMD_LOAD_MULTICAST_LIST: // 0x0b, logerror("CMD_LOAD_MULTICAST_LIST"); break; case CMD_CLEAR_PROGRAM: // 0x0c logerror("!!! unexpected CMD_CLEAR_PROGRAM"); break; case CMD_DOWNLOAD_PROGRAM: // 0x0d logerror("CMD_DOWNLOAD_PROGRAM"); break; case CMD_EXECUTE_PROGRAM: // 0x0e logerror("CMD_EXECUTE_PROGRAM"); break; case CMD_SET_STATION_ADDRESS: // 0x10 logerror("CMD_SET_STATION_ADDRESS"); break; case CMD_ADAPTER_INFO: // 0x11 logerror("CMD_ADAPTER_INFO"); break; case CMD_MC_17: // 0x17 logerror("CMD_MC_17"); break; case CMD_TRANSMIT_PACKET_18: // 0x18 logerror("CMD_TRANSMIT_PACKET_18"); break; case CMD_MC_F8: // 0xf8 logerror("!!! CMD_MC_F8"); break; case CMD_TRANSMIT_PACKET_F9: // 0xf9 logerror("CMD_TRANSMIT_PACKET_F9"); break; case CMD_MC_FA: // 0xfa logerror("!!! CMD_MC_FA"); break; default: logerror("!!! unexpected Command"); } switch (m_command_buffer[0]) { case CMD_TRANSMIT_PACKET_F9: // 0xf9 logerror(" (%02x, length=00)", m_command_buffer[0]); break; default: logerror(" (%02x, length=%02x)", m_command_buffer[0], m_command_buffer[1]); for (i = 2; i < m_command_index; i++) { logerror(" %02x", m_command_buffer[i]); } break; } logerror("\n"); } }
void threecom3c505_device::log_response() { if (verbose > 0) { int i; logerror("%s: Response ", cpu_context()); switch (m_response.command) { case CMD_RESET_RESPONSE: // 0x30 logerror("CMD_RESET_RESPONSE"); break; case CMD_CONFIGURE_ADAPTER_RESPONSE: // 0x31 logerror("CMD_CONFIGURE_ADAPTER_RESPONSE"); break; case CMD_CONFIGURE_82586_RESPONSE: // 0x32 logerror("CMD_CONFIGURE_82586_RESPONSE"); break; case CMD_RECEIVE_PACKET_COMPLETE: // 0x38 logerror("CMD_RECEIVE_PACKET_COMPLETE"); break; case CMD_TRANSMIT_PACKET_COMPLETE: // 0x39 logerror("CMD_TRANSMIT_PACKET_COMPLETE"); break; case CMD_NETWORK_STATISTICS_RESPONSE: // 0x3a logerror("CMD_NETWORK_STATISTICS_RESPONSE"); break; case CMD_LOAD_MULTICAST_RESPONSE: // 0x3b logerror("CMD_LOAD_MULTICAST_RESPONSE"); break; case CMD_DOWNLOAD_PROGRAM_RESPONSE: // 0x3d logerror("CMD_DOWNLOAD_PROGRAM_RESPONSE"); break; case CMD_EXECUTE_PROGRAM_RESPONSE: // 0x3e logerror("CMD_EXECUTE_PROGRAM_RESPONSE"); break; case CMD_SET_ADDRESS_RESPONSE: // 0x40 logerror("CMD_SET_ADDRESS_RESPONSE"); break; case CMD_ADAPTER_INFO_RESPONSE: // 0x41 logerror("CMD_ADAPTER_INFO_RESPONSE"); break; case CMD_MC_17_COMPLETE: // 0x47 logerror("CMD_MC_17_COMPLETE"); break; case CMD_TRANSMIT_PACKET_18_COMPLETE: // 0x48 logerror("CMD_TRANSMIT_PACKET_18_COMPLETE"); break; case CMD_MC_E1_RESPONSE: // 0xe1 logerror("!!! CMD_MC_E1_RESPONSE"); break; case CMD_MC_E2_RESPONSE: // 0xe2 logerror("!!! CMD_MC_E2_RESPONSE"); break; default: logerror("!!! unexpected Response"); } logerror(" (%02x, length=%02x)", m_response.command, m_response.length); for (i = 0; i < m_response.length; i++) { logerror(" %02x", m_response.data.raw[i]); } logerror("\n"); } }
void omti8621_device::log_command(const UINT8 cdb[], const UINT16 cdb_length) { if (verbose > 0) { int i; logerror("%s: OMTI command ", cpu_context(this)); switch (cdb[0]) { case OMTI_CMD_TEST_DRIVE_READY: // 0x00 logerror("Test Drive Ready"); break; case OMTI_CMD_RECALIBRATE: // 0x01 logerror("Recalibrate"); break; case OMTI_CMD_REQUEST_SENSE: // 0x03 logerror("Request Sense"); break; case OMTI_CMD_READ_VERIFY: // 0x05 logerror("Read Verify"); break; case OMTI_CMD_FORMAT_TRACK: // 0x06 logerror("Format Track"); break; case OMTI_CMD_FORMAT_BAD_TRACK: // 0x07 logerror("Format Bad Track"); break; case OMTI_CMD_READ: // 0x08 logerror("Read"); break; case OMTI_CMD_WRITE: // 0x0A logerror("Write"); break; case OMTI_CMD_SEEK: // 0x0B logerror("Seek"); break; case OMTI_CMD_READ_SECTOR_BUFFER: // 0x0E logerror("Read Sector Buffer"); break; case OMTI_CMD_WRITE_SECTOR_BUFFER: // 0x0F logerror("Write Sector Buffer"); break; case OMTI_CMD_ASSIGN_ALTERNATE_TRACK: // 0x11 logerror("Assign Alternate Track"); break; case OMTI_CMD_READ_DATA_TO_BUFFER: // 0x1E logerror("Read Data to Buffer"); break; case OMTI_CMD_WRITE_DATA_FROM_BUFFER: // 0x1F logerror("Write Data from Buffer"); break; case OMTI_CMD_COPY: // 0x20 logerror("Copy"); break; case OMTI_CMD_READ_ESDI_DEFECT_LIST: // 0x37 logerror("Read ESDI Defect List"); break; case OMTI_CMD_RAM_DIAGNOSTICS: // 0xE0 logerror("RAM. Diagnostic"); break; case OMTI_CMD_CONTROLLER_INT_DIAGNOSTIC: // 0xE4 logerror("Controller Int. Diagnostic"); break; case OMTI_CMD_READ_LONG: // 0xE5 logerror("Read Long"); break; case OMTI_CMD_WRITE_LONG: // 0xE6 logerror("Write Long"); break; case OMTI_CMD_READ_CONFIGURATION: // 0xEC logerror("Read Configuration"); break; case OMTI_CMD_INVALID_COMMAND: // 0xFF logerror("Invalid Command"); break; default: logerror("!!! Unexpected Command !!!"); } // logerror(" (%02x, length=%02x)", cdb[0], cdb_length); for (i = 0; i < cdb_length; i++) { logerror(" %02x", cdb[i]); } switch (cdb[0]) { case OMTI_CMD_READ_VERIFY: // 0x05 case OMTI_CMD_READ: // 0x08 case OMTI_CMD_WRITE: // 0x0a case OMTI_CMD_SEEK: // 0x0b case OMTI_CMD_READ_DATA_TO_BUFFER: // 0x1E case OMTI_CMD_WRITE_DATA_FROM_BUFFER: // 0x1F case OMTI_CMD_COPY: // 0x20 logerror(" (diskaddr=%x count=%x)", get_disk_address(cdb), cdb[4]); break; } logerror("\n"); } }