errno_t wtty_putc(wtty_t *w, int c) { errno_t ret = 0; assert(w); hal_mutex_lock(&w->mutex); wtty_wrap(w); SHOW_FLOW( 11, "wtty putc %p", w ); if(!w->started) { ret = EPIPE; goto exit; } while( _wtty_is_full(w) ) { hal_cond_wait( &w->wcond, &w->mutex ); if(!w->started) { ret = EPIPE; goto exit; } } wtty_doputc(w, c); exit: hal_mutex_unlock(&w->mutex); return ret; }
static void put_some(int times) { if( times == 0 ) return; put_some(times-1); int cnt = IGS; while(cnt--) { if( igp >= IGS-1 ) igp = -1; if( igot[++igp].size == 0 ) continue; SHOW_FLOW( 10, "put %7d @ %7d", igot[igp].size, igot[igp].pos); phantom_phys_free_region( &pm_map, igot[igp].pos, igot[igp].size ); igot[igp].size = 0; return; } }
void recvReadReply(trfs_pkt_t *rq) { trfs_fio_t *fio = &(rq->readReply.info); void *data = rq->readReply.data; SHOW_FLOW( 2, "read reply for fid %d ioid %d nSect %d start %ld", fio->fileId, fio->ioId, fio->nSectors, fio->startSector); trfs_queue_t *qe = findRequest( fio, TRFS_QEL_TYPE_READ ); if( qe == 0 ) { SHOW_ERROR0( 0, "TRFS: No request for read reply"); return; } trfs_process_received_data(qe, fio, data); trfs_mark_recombine_map(qe, fio->startSector, fio->nSectors); if( trfs_request_complete(qe) ) { removeRequest(qe); trfs_signal_done(qe); } }
int mips_irq_dispatch(struct trap_state *ts, u_int32_t pending) { unsigned mask = mips_read_cp0_status(); mask >>= 8; mask &= 0xFF; SHOW_FLOW( 8, "irq pending %x mask %x", pending, mask ); pending &= mask; // Have software IRQ requests? Clear 'em BEFORE servicing, // or they'll fire again as soon as interrupts are open if( pending & 0x3 ) { int ie = hal_save_cli(); unsigned int cause = mips_read_cp0_cause(); cause &= ~(0x3 << 8); // reset software irq 0 & 1 mips_write_cp0_cause( cause ); if(ie) hal_sti(); } u_int32_t irqs = pending; int nirq = 0; while( irqs ) { if( irqs & 0x1 ) process_irq(ts, nirq); irqs >>= 1; nirq++; } return 0; // We're ok }
int usys_run( int *err, uuprocess_t *u, const char *fname, const char **uav, const char **uep, int flags ) { if(flags) SHOW_ERROR( 0, "flags not impl: %x", flags ); int pid = uu_create_process(u->pid); if( pid < 0 ) { SHOW_ERROR( 0, "out of processes running %s", fname ); *err = EPROCLIM; // TODO is it? return -1; } SHOW_FLOW( 7, "run '%s' setargs", fname ); uu_proc_setargs( pid, uav, uep ); *err= uu_run_file( pid, fname ); // TODO if run file failed, process still exists! return *err ? -1 : pid; }
cache_t * cache_init( size_t page_size ) { SHOW_FLOW( 2, "Cache init blksize %d", page_size ); cache_t *c = calloc(1, sizeof(cache_t)); //assert(c); if( c == 0 ) return 0; errno_t ret = cache_do_init( c, page_size ); //if( ret ) panic("can't init cache"); if( ret ) { free(c); return 0; } return c; }
static void w_do_deliver_event(window_handle_t w) { //if(w != 0 && w->eventDeliverSema) hal_sem_release(w->eventDeliverSema); if(w != 0 && w->inKernelEventProcess) { struct ui_event e; int got = ev_w_get_event( w, &e, 0 ); while(got) { struct ui_event e2; if( ev_w_get_event( w, &e2, 0 ) ) { // 2 repaints follow if((e.type == e2.type) && (e.w.info == e2.w.info) && (e.focus == e2.focus)) { if((e.w.info == UI_EVENT_WIN_REPAINT) || (e.w.info == UI_EVENT_WIN_REDECORATE)) { SHOW_FLOW0( 1, "combined repaint" ); // Choose more powerful spell //e.w.info = UI_EVENT_WIN_REDECORATE; // Eat one //e = e2; continue; } } } else got = 0; SHOW_FLOW(8, "%p, w=%p, us=%p", &e, e.focus, w); w->inKernelEventProcess(w, &e); e = e2; } } }
trfs_queue_t *findRequest( trfs_fio_t *recvFio, u_int32_t type ) { SHOW_FLOW( 6, "look for req such as fid %d ioid %d nSect %d start %ld", recvFio->fileId, recvFio->ioId, recvFio->nSectors, recvFio->startSector); trfs_queue_t *elt; hal_mutex_lock(&lock); queue_iterate( &requests, elt, trfs_queue_t *, chain) { if( elt->type != type ) continue; if( (elt->fio.fileId != recvFio->fileId) || (elt->fio.ioId != recvFio->ioId) ) continue; u_int64_t our_start = elt->fio.startSector; u_int64_t our_end = our_start + elt->fio.nSectors; // one after if( (recvFio->startSector < our_start) || (recvFio->startSector >= our_end) ) { SHOW_ERROR( 0, "reply is out of req bounds, our %ld to %ld, got %ld", our_start, our_end, recvFio->startSector ); continue; } u_int64_t his_end = recvFio->startSector + recvFio->nSectors; if( his_end > our_end ) SHOW_ERROR( 0, "warning: reply brought too many sectors (%ld against %ld)", his_end, our_end ); hal_mutex_unlock(&lock); return elt; } hal_mutex_unlock(&lock); return 0; }
static int mipsnet_write(phantom_device_t * dev, const void *buf, int in_buflen ) { struct mipsnet *pvt = dev->drv_private; SHOW_FLOW( 3, "write %d bytes", in_buflen ); //hexdump( buf, in_buflen, "pkt", 0 ); int buflen = in_buflen; //assert( buflen < 1514 ); if( buflen > 1514 ) buflen = 1514; if(buflen < 0) return ERR_INVALID_ARGS; hal_sem_acquire( &(pvt->send_interrupt_sem) ); mipsnet_pio_write( dev, buf, buflen); return buflen; }
static errno_t cd_read_sectors( cdfs_t *impl, void *buf, int cd_sector, size_t nsectors ) { phantom_disk_partition_t *p = impl->p; SHOW_FLOW( 10, "CDFS disk read @ sect %d, nsect %d", cd_sector * 4, nsectors * 4 ); #if CD_CACHE if( impl->cache ) { if( 0 == cache_get_multiple( impl->cache, cd_sector, nsectors, buf ) ) return 0; } errno_t rc = phantom_sync_read_sector( p, buf, cd_sector * 4, nsectors * 4 ); if( impl->cache && !rc ) cache_put_multiple( impl->cache, cd_sector, nsectors, buf ); return rc; #else return phantom_sync_read_sector( p, buf, cd_sector * 4, nsectors * 4 ); #endif }
void hal_page_control_etc( physaddr_t p, void *page_start_addr, page_mapped_t mapped, page_access_t access, u_int32_t flags ) { assert(PAGE_ALIGNED(p)); assert(PAGE_ALIGNED((unsigned)page_start_addr)); assert((flags & INTEL_PTE_PFN) == 0); if(mapped == page_unmap) access = page_noaccess; int bits = INTEL_PTE_USER | flags; // We need it for V86 mode - REDO IN A MORE SPECIFIC WAY, so that only VM86 pages are user accessible if(mapped == page_map) bits |= INTEL_PTE_VALID; if(mapped == page_map_io) bits |= INTEL_PTE_VALID|INTEL_PTE_WTHRU|INTEL_PTE_NCACHE; if(access == page_rw) bits |= INTEL_PTE_WRITE; pt_entry_t pte; pte = create_pte(p, bits); SHOW_FLOW( 7, "Mapping VA 0x%X to PA 0x%X, pte is 0x%X\n", page_start_addr, p, (long)pte ); if(mapped != page_unmap ) phantom_map_page( (linaddr_t)page_start_addr, pte ); else phantom_unmap_page( (linaddr_t)page_start_addr ); ftlbentry((int)page_start_addr); }
errno_t user_args_load( int mina, int maxa, char **oav, int omax, const char **iav ) { omax--; // leave one for zero ptr while( omax-- ) { if( (0 == iav) || (0 == *iav) ) break; // todo 64 bit bug mina/maxa must be 64 bits // do (long for 64 bits) integer ops for else // iav will be incremented by mina * sizeof(void*) *oav = (void *) (((long int)(*iav)) + mina); if( ((long int)*oav) > maxa ) return EFAULT; SHOW_FLOW( 7, "argv %s", *oav ); oav++; iav++; } *oav = 0; return 0; }
static errno_t cdfs_stat( struct uufile *f, struct stat *dest ) { cdfs_file_t *fi = f->impl; //cdfs_t *impl = f->fs->impl; SHOW_FLOW( 10, "stat '%s'", f->name ); memset( dest, 0, sizeof(struct stat) ); dest->st_nlink = 1; dest->st_uid = -1; dest->st_gid = -1; dest->st_size = fi->e.dataLength[0]; dest->st_mode = 0555; // r-xr-xr-x if(fi->e.flags & CD_ENTRY_FLAG_DIR) dest->st_mode |= S_IFDIR; else dest->st_mode |= _S_IFREG; return 0; }
errno_t fs_probe_cd(phantom_disk_partition_t *p) { char buf[PAGE_SIZE]; //phantom_disk_superblock *sb = (phantom_disk_superblock *)&buf; int cd_sector = 16; // Have some limit while(cd_sector < 64) { if( phantom_sync_read_sector( p, buf, cd_sector * 4, 4 ) ) return EINVAL; if( strncmp( buf, cd_marker, 7 ) || (buf[7] != 0) ) return EINVAL; SHOW_FLOW( 3, "CDFS marker found @ sector %d", cd_sector ); return 0; } return EINVAL; }
static int boot_cpu(imps_processor *proc) { int apicid = proc->apic_id, success = 1, to; unsigned accept_status; unsigned bios_reset_vector = (int)PHYS_TO_VIRTUAL(BIOS_RESET_VECTOR); int ver = IMPS_LAPIC_READ(LAPIC_VER); SHOW_FLOW( 0, "APIC ver = 0x%x (%d)", ver, APIC_VERSION(ver) ); // TODO define size? guard page? ap_stack = calloc(1, 64*1024); /* * Copy boot code for secondary CPUs here. Find it in between * "patch_code_start" and "patch_code_end" symbols. The other CPUs * will start there in 16-bit real mode under the 1MB boundary. * "patch_code_start" should be placed at a 4K-aligned address * under the 1MB boundary. */ //return 0; //panic("boot SMP cpu code is not ready"); //extern char patch_code_start[]; //extern char patch_code_end[]; //bootaddr = (512-64)*1024; //memcpy((char *)bootaddr, patch_code_start, patch_code_end - patch_code_start); install_ap_tramp((void *)bootaddr); smp_ap_booted = 0; //dump_mp_gdt((void *)&MP_GDT); /* * Generic CPU startup sequence starts here. */ /* set BIOS reset vector */ CMOS_WRITE_BYTE(CMOS_RESET_CODE, CMOS_RESET_JUMP); //*((volatile unsigned *) bios_reset_vector) = ((bootaddr & 0xFF000) << 12); //*((volatile unsigned *) bios_reset_vector) = ((bootaddr & 0xFF000) << 12); *((volatile unsigned short *) 0x469) = bootaddr >> 4; *((volatile unsigned short *) 0x467) = bootaddr & 0xf; /* clear the APIC error register */ IMPS_LAPIC_WRITE(LAPIC_ESR, 0); accept_status = IMPS_LAPIC_READ(LAPIC_ESR); /* assert INIT IPI */ send_ipi(apicid, LAPIC_ICR_TM_LEVEL | LAPIC_ICR_LEVELASSERT | LAPIC_ICR_DM_INIT); //UDELAY(10000); phantom_spinwait(10); /* de-assert INIT IPI */ send_ipi(apicid, LAPIC_ICR_TM_LEVEL | LAPIC_ICR_DM_INIT); phantom_spinwait(10); //UDELAY(10000); #if 1 /* * Send Startup IPIs if not an old pre-integrated APIC. */ if (proc->apic_ver >= APIC_VER_NEW) { int i; for (i = 1; i <= 2; i++) { send_ipi(apicid, LAPIC_ICR_DM_SIPI | ((bootaddr >> 12) & 0xFF)); //UDELAY(1000); phantom_spinwait(1); } }
phantom_device_t *driver_virtio_disk_probe( pci_cfg_t *pci, int stage ) { (void) stage; if(vdev.pci) { printf("Just one drv instance yet\n"); return 0; } vdev.interrupt = driver_virtio_disk_interrupt; vdev.name = "VirtIODisk0"; // Say we need it. Not sure, really, that we do. :) vdev.guest_features = VIRTIO_BLK_F_BARRIER; if( virtio_probe( &vdev, pci ) ) return 0; //u_int8_t status = virtio_get_status( &vdev ); //printf("Status is: 0x%x\n", status ); SHOW_FLOW( 1, "Features are: %b", vdev.host_features, "\020\1BARRIER\2SIZE_MAX\3SEG_MAX\5GEOM\6RDONLY\7BLK_SIZE" ); rodisk = vdev.host_features & (1<<VIRTIO_BLK_F_RO); if(rodisk) SHOW_FLOW0( 1, "Disk is RDONLY"); SHOW_FLOW( 1, "Registered at IRQ %d IO 0x%X", vdev.irq, vdev.basereg ); phantom_device_t * dev = (phantom_device_t *)malloc(sizeof(phantom_device_t)); dev->name = "VirtIO Disk"; dev->seq_number = seq_number++; dev->drv_private = &vdev; virtio_set_status( &vdev, VIRTIO_CONFIG_S_DRIVER ); struct virtio_blk_config cfg; virtio_get_config_struct( &vdev, &cfg, sizeof(cfg) ); SHOW_FLOW( 1, "VIRTIO disk size is %d Mb", cfg.capacity/2048 ); virtio_set_status( &vdev, VIRTIO_CONFIG_S_DRIVER|VIRTIO_CONFIG_S_DRIVER_OK ); #if 0 printf("Will write to disk\n"); //getchar(); static char test[512] = "Hello virtio disk"; physaddr_t pa; void *va; hal_pv_alloc( &pa, &va, sizeof(test) ); strlcpy( va, test, sizeof(test) ); driver_virtio_disk_write( &vdev, pa, sizeof(test), 0, 0 ); printf("Write to disk requested\n"); //getchar(); #endif phantom_disk_partition_t *p = phantom_create_virtio_partition_struct( cfg.capacity, &vdev ); (void) p; #if 0 errno_t ret = phantom_register_disk_drive(p); if( ret ) SHOW_ERROR( 0, "Can't register VirtIO drive: %d", ret ); #endif return dev; }
static void ps2ms_int_handler( void *arg ) { (void) arg; static int inbytepos = 0; signed char mousedata = inb( PS2_DATA_ADDR ); SHOW_FLOW( 10 ,"%2X ", mousedata & 0xFFu ); switch(inbytepos) { case 0: // first byte has one in this pos if(1 && ! (0x8 & mousedata) ) { //inbytepos = -1; break; inbytepos = 0; return; } ps2ms_state_buttons = 0x7 & mousedata; xsign = 0x10 & mousedata; ysign = 0x20 & mousedata; break; case 1: xval = mousedata; break; case 2: yval = mousedata; break; case 3: break; case 4: break; } inbytepos++; inbytepos %= 3; //inbytepos %= 4; if(inbytepos != 0) return; xval = insert_bit9( xval, xsign ); yval = insert_bit9( yval, ysign ); ps2ms_state_xpos += xval; ps2ms_state_ypos += yval; if( ps2ms_state_xpos < 0 ) ps2ms_state_xpos = 0; if( ps2ms_state_ypos < 0 ) ps2ms_state_ypos = 0; if( ps2ms_state_xpos > video_drv->xsize ) ps2ms_state_xpos = video_drv->xsize; if( ps2ms_state_ypos > video_drv->ysize ) ps2ms_state_ypos = video_drv->ysize; //printf("ms %d %d %x\n", ps2ms_state_xpos, ps2ms_state_ypos, ps2ms_state_buttons ); if(NULL != video_drv) { video_drv->mouse_x = ps2ms_state_xpos; video_drv->mouse_y = ps2ms_state_ypos; struct ui_event e; e.type = UI_EVENT_TYPE_MOUSE; e.time = fast_time(); e.focus= 0; e.m.buttons = ps2ms_state_buttons; e.abs_x = ps2ms_state_xpos; e.abs_y = ps2ms_state_ypos; put_buf(&e); hal_sem_release( &mouse_sem ); } }
bool send_command(ide_device_info *device, ide_qrequest *qrequest, bool need_drdy, uint32 timeout, ide_bus_state new_state) { ide_bus_info *bus = device->bus; bigtime_t irq_disabled_at = 0; // make compiler happy uint8 num_retries = 0; bool irq_guard; retry: irq_guard = bus->num_running_reqs > 1; SHOW_FLOW(3, "qrequest=%p, request=%p", qrequest, qrequest ? qrequest->request : NULL); // if there are pending requests, IRQs must be disabled to // not mix up IRQ reasons // XXX can we avoid that with the IDE_LOCK trick? It would // save some work and the bug workaround! if (irq_guard) { if (bus->controller->write_device_control(bus->channel_cookie, ide_devctrl_nien | ide_devctrl_bit3) != B_OK) goto err; irq_disabled_at = system_time(); } // select device if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_device_head) != B_OK) goto err; bus->active_device = device; if (!ide_wait(device, 0, ide_status_bsy | ide_status_drq, false, 50000)) { uint8 status; SHOW_FLOW0(1, "device is not ready"); status = bus->controller->get_altstatus(bus->channel_cookie); if (status == 0xff) { // there is no device (should happen during detection only) SHOW_FLOW0(1, "there is no device"); // device detection recognizes this code as "all hope lost", so // neither replace it nor use it anywhere else device->subsys_status = SCSI_TID_INVALID; return false; } // reset device and retry if (reset_device(device, qrequest) && ++num_retries <= MAX_FAILED_SEND) { SHOW_FLOW0(1, "retrying"); goto retry; } SHOW_FLOW0(1, "giving up"); // reset to often - abort request device->subsys_status = SCSI_SEL_TIMEOUT; return false; } if (need_drdy && (bus->controller->get_altstatus(bus->channel_cookie) & ide_status_drdy) == 0) { SHOW_FLOW0(3, "drdy not set"); device->subsys_status = SCSI_SEQUENCE_FAIL; return false; } // write parameters if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, device->tf_param_mask) != B_OK) goto err; if (irq_guard) { // IRQ may be fired by service requests and by the process of disabling(!) // them (I heard this is caused by edge triggered PCI IRQs) // wait at least 50 µs to catch all pending irq's // (at my system, up to 30 µs elapsed) // additionally, old drives (at least my IBM-DTTA-351010) loose // sync if they are pushed too hard - on heavy overlapped write // stress this drive tends to forget outstanding requests, // waiting at least 50 µs seems(!) to solve this while (system_time() - irq_disabled_at < MAX_IRQ_DELAY) spin(1); } // if we will start waiting once the command is sent, we have to // lock the bus before sending; this way, IRQs that are fired // shortly before/after sending of command are delayed until the // command is really sent (start_waiting unlocks the bus) and then // the IRQ handler can check savely whether the IRQ really signals // finishing of command or not by testing the busy-signal of the device if (new_state != ide_state_accessing) { IDE_LOCK(bus); } if (irq_guard) { // now it's clear why IRQs gets fired, so we can enable them again if (bus->controller->write_device_control(bus->channel_cookie, ide_devctrl_bit3) != B_OK) goto err1; } // write command code - this will start the actual command SHOW_FLOW(3, "Writing command 0x%02x", (int)device->tf.write.command); if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_command) != B_OK) goto err1; // start waiting now; also un-blocks IRQ handler (see above) if (new_state != ide_state_accessing) start_waiting(bus, timeout, new_state); return true; err1: if (timeout > 0) { bus->state = ide_state_accessing; IDE_UNLOCK(bus); } err: device->subsys_status = SCSI_HBA_ERR; return false; }
ssize_t rtl8169_rx(rtl8169 *r, char *buf, ssize_t buf_len) { //uint32 tail; ssize_t len; int rc; bool release_sem = false; SHOW_FLOW0(3, "rtl8169_rx: entry\n"); if(buf_len < 1500) return -1; restart: hal_sem_acquire(&r->rx_sem); mutex_lock(&r->lock); int_disable_interrupts(); acquire_spinlock(&r->reg_spinlock); /* look at the descriptor pointed to by rx_idx_free */ if (r->rxdesc[r->rx_idx_free].flags & RTL_DESC_OWN) { /* for some reason it's owned by the card, wait for more packets */ release_spinlock(&r->reg_spinlock); int_restore_interrupts(); mutex_unlock(&r->lock); goto restart; } /* process this packet */ len = r->rxdesc[r->rx_idx_free].frame_len & 0x3fff; SHOW_FLOW(3, "rtl8169_rx: desc idx %d: len %d\n", r->rx_idx_free, len); if (len > buf_len) { rc = ERR_TOO_BIG; release_sem = true; goto out; } memcpy(buf, RXBUF(r, r->rx_idx_free), len); rc = len; #if debug_level_flow >= 3 hexdump(RXBUF(r, r->rx_idx_free), len, 0, 0); #endif /* stick it back in the free list */ r->rxdesc[r->rx_idx_free].buffer_size = BUFSIZE_PER_FRAME; r->rxdesc[r->rx_idx_free].flags = (r->rxdesc[r->rx_idx_free].flags & RTL_DESC_EOR) | RTL_DESC_OWN; inc_rx_idx_free(r); /* see if there are more packets pending */ if ((r->rxdesc[r->rx_idx_free].flags & RTL_DESC_OWN) == 0) release_sem = true; // if so, release the rx sem so the next reader gets a shot out: release_spinlock(&r->reg_spinlock); int_restore_interrupts(); if(release_sem) hal_sem_release(&r->rx_sem); mutex_unlock(&r->lock); return rc; }
static void common_thread_init(phantom_thread_t *t, int stacksize ) { //t->thread_flags = 0; t->priority = THREAD_PRIO_NORM; t->cpu_id = GET_CPU_ID(); #if CONF_NEW_CTTY t_make_ctty( t ); #else if( 0 == t->ctty ) t->ctty = wtty_init( WTTY_SMALL_BUF ); #endif // malloc uses mutex, so we have to use physalloc which is protected with spinlocks physaddr_t pa; t->stack_size = stacksize; //t->stack = calloc( 1, stacksize ); hal_pv_alloc( &pa, &(t->stack), stacksize+PAGE_SIZE ); hal_page_control( pa, t->stack, page_unmap, page_noaccess ); // poor man's guard page - TODO support in page fault t->stack_pa = pa; SHOW_FLOW( 5, "main stk va %p pa %p", t->stack, (void *)pa ); //assert(t->stack != 0); t->kstack_size = stacksize; //t->kstack = calloc( 1, stacksize ); hal_pv_alloc( &pa, &(t->kstack), stacksize+PAGE_SIZE ); hal_page_control( pa, t->kstack, page_unmap, page_noaccess ); // poor man's guard page - TODO support in page fault t->kstack_pa = pa; SHOW_FLOW( 5, "kern stk va %p pa %p", t->kstack, (void *)pa ); #if ARCH_mips // On mips we need unmapped kernel stack for mapping on MIPS is // done with exceptions too and unmapped stack is fault forever. // We achieve this by setting stack virtual address to its // physical address | 0x8000000 - this virt mem area is direct // mapped to physmem at 0 assert( (addr_t)phystokv(t->kstack_pa) > 0x80000000 ); assert( (addr_t)phystokv(t->kstack_pa) < 0xC0000000 ); t->kstack_top = phystokv(t->kstack_pa) +t->kstack_size-4; // Why -4? #else t->kstack_top = t->kstack+t->kstack_size-4; // Why -4? #endif //assert(t->kstack != 0); t->owner = 0; //t->u = 0; t->pid = NO_PID; t->thread_flags = 0;; t->waitcond = 0; hal_spin_init( &(t->waitlock)); queue_init(&(t->chain)); queue_init(&(t->runq_chain)); t->sw_unlock = 0; t->preemption_disabled = 0; }
/*! Transfer virtually continuous data */ static inline status_t transfer_PIO_virtcont(ide_device_info *device, uint8 *virtualAddress, int length, bool write, int *transferred) { ide_bus_info *bus = device->bus; ide_controller_interface *controller = bus->controller; void * channel_cookie = bus->channel_cookie; if (write) { // if there is a byte left from last chunk, transmit it together // with the first byte of the current chunk (IDE requires 16 bits // to be transmitted at once) if (device->has_odd_byte) { uint8 buffer[2]; buffer[0] = device->odd_byte; buffer[1] = *virtualAddress++; controller->write_pio(channel_cookie, (uint16 *)buffer, 1, false); --length; *transferred += 2; } controller->write_pio(channel_cookie, (uint16 *)virtualAddress, length / 2, false); // take care if chunk size was odd, which means that 1 byte remains virtualAddress += length & ~1; *transferred += length & ~1; device->has_odd_byte = (length & 1) != 0; if (device->has_odd_byte) device->odd_byte = *virtualAddress; } else { // if we read one byte too much last time, push it into current chunk if (device->has_odd_byte) { *virtualAddress++ = device->odd_byte; --length; } SHOW_FLOW(4, "Reading PIO to %p, %d bytes", virtualAddress, length); controller->read_pio(channel_cookie, (uint16 *)virtualAddress, length / 2, false); // take care of odd chunk size; // in this case we read 1 byte to few! virtualAddress += length & ~1; *transferred += length & ~1; device->has_odd_byte = (length & 1) != 0; if (device->has_odd_byte) { uint8 buffer[2]; // now read the missing byte; as we have to read 2 bytes at once, // we'll read one byte too much controller->read_pio(channel_cookie, (uint16 *)buffer, 1, false); *virtualAddress = buffer[0]; device->odd_byte = buffer[1]; *transferred += 2; } } return B_OK; }
/*! Check result of request * 1. check SCSI subsystem problems * 2. if request hit device, check SCSI status * 3. if request got executed, check sense */ err_res periph_check_error(scsi_periph_device_info *device, scsi_ccb *request) { SHOW_FLOW(4, "%d", request->subsys_status & SCSI_SUBSYS_STATUS_MASK); switch (request->subsys_status & SCSI_SUBSYS_STATUS_MASK) { // everything is ok case SCSI_REQ_CMP: return MK_ERROR(err_act_ok, B_OK); // no device case SCSI_LUN_INVALID: case SCSI_TID_INVALID: case SCSI_PATH_INVALID: case SCSI_DEV_NOT_THERE: case SCSI_NO_HBA: SHOW_ERROR0(2, "No device"); return MK_ERROR(err_act_fail, B_DEV_BAD_DRIVE_NUM); // device temporary unavailable case SCSI_SEL_TIMEOUT: case SCSI_BUSY: case SCSI_SCSI_BUSY: case SCSI_HBA_ERR: case SCSI_MSG_REJECT_REC: case SCSI_NO_NEXUS: case SCSI_FUNC_NOTAVAIL: case SCSI_RESRC_UNAVAIL: // take a deep breath and hope device becomes ready snooze(1000000); return MK_ERROR(err_act_retry, B_DEV_TIMEOUT); // data transmission went wrong case SCSI_DATA_RUN_ERR: case SCSI_UNCOR_PARITY: SHOW_ERROR0(2, "Data transmission failed"); // retry immediately return MK_ERROR(err_act_retry, B_DEV_READ_ERROR); // request broken case SCSI_REQ_INVALID: SHOW_ERROR0(2, "Invalid request"); return MK_ERROR(err_act_fail, B_ERROR); // request aborted case SCSI_REQ_ABORTED: case SCSI_SCSI_BUS_RESET: case SCSI_REQ_TERMIO: case SCSI_UNEXP_BUSFREE: case SCSI_BDR_SENT: case SCSI_CMD_TIMEOUT: case SCSI_IID_INVALID: case SCSI_UNACKED_EVENT: case SCSI_IDE: case SCSI_SEQUENCE_FAIL: // take a small breath and retry snooze(100000); return MK_ERROR(err_act_retry, B_DEV_TIMEOUT); // device error case SCSI_REQ_CMP_ERR: return check_scsi_status(device, request); // device error, but we don't know what happened case SCSI_AUTOSENSE_FAIL: SHOW_ERROR0(2, "Auto-sense failed, don't know what really happened"); return MK_ERROR(err_act_fail, B_ERROR); // should not happen, give up case SCSI_BUS_RESET_DENIED: case SCSI_PROVIDE_FAIL: case SCSI_UA_TERMIO: case SCSI_CDB_RECVD: case SCSI_LUN_ALLREADY_ENAB: // supposed to fall through default: return MK_ERROR(err_act_fail, B_ERROR); } }
int rtl8169_init(rtl8169 *r) { bigtime_t time; int err = -1; addr_t temp; int i; SHOW_FLOW(2, "rtl8169_init: r %p\n", r); r->region = vm_map_physical_memory(vm_get_kernel_aspace_id(), "rtl8169_region", (void **)&r->virt_base, REGION_ADDR_ANY_ADDRESS, r->phys_size, LOCK_KERNEL|LOCK_RW, r->phys_base); if(r->region < 0) { SHOW_ERROR0(1, "rtl8169_init: error creating memory mapped region\n"); err = -1; goto err; } SHOW_INFO(2, "rtl8169 mapped at address 0x%lx\n", r->virt_base); /* create regions for tx and rx descriptors */ r->rxdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxdesc", (void **)&r->rxdesc, REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW); r->rxdesc_phys = vtophys(r->rxdesc); SHOW_INFO(2, "rtl8169: rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys); r->txdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txdesc", (void **)&r->txdesc, REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW); r->txdesc_phys = vtophys(r->txdesc); SHOW_INFO(2, "rtl8169: tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys); r->reg_spinlock = 0; /* create a large tx and rx buffer for the descriptors to point to */ r->rxbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxbuf", (void **)&r->rxbuf, REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW); r->txbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txbuf", (void **)&r->txbuf, REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW); /* create a receive sem */ r->rx_sem = sem_create(0, "rtl8169 rx_sem"); /* transmit sem */ r->tx_sem = sem_create(1, "rtl8169 tx_sem"); /* reset the chip */ time = system_time(); RTL_WRITE_8(r, REG_CR, (1<<4)); // reset the chip, disable tx/rx do { thread_snooze(10000); // 10ms if(system_time() - time > 1000000) { break; } } while(RTL_READ_8(r, REG_CR) & (1<<4)); /* read in the mac address */ r->mac_addr[0] = RTL_READ_8(r, REG_IDR0); r->mac_addr[1] = RTL_READ_8(r, REG_IDR1); r->mac_addr[2] = RTL_READ_8(r, REG_IDR2); r->mac_addr[3] = RTL_READ_8(r, REG_IDR3); r->mac_addr[4] = RTL_READ_8(r, REG_IDR4); r->mac_addr[5] = RTL_READ_8(r, REG_IDR5); SHOW_INFO(2, "rtl8169: mac addr %x:%x:%x:%x:%x:%x\n", r->mac_addr[0], r->mac_addr[1], r->mac_addr[2], r->mac_addr[3], r->mac_addr[4], r->mac_addr[5]); /* some voodoo from BSD driver */ RTL_WRITE_16(r, REG_CCR, RTL_READ_16(r, REG_CCR)); RTL_SETBITS_16(r, REG_CCR, 0x3); /* mask all interrupts */ RTL_WRITE_16(r, REG_IMR, 0); /* set up the tx/rx descriptors */ rtl8169_setup_descriptors(r); /* enable tx/rx */ RTL_SETBITS_8(r, REG_CR, (1<<3)|(1<<2)); /* set up the rx state */ /* 1024 byte dma threshold, 1024 dma max burst, CRC calc 8 byte+, accept all packets */ RTL_WRITE_32(r, REG_RCR, (1<<16) | (6<<13) | (6<<8) | (0xf << 0)); RTL_SETBITS_16(r, REG_CCR, (1<<5)); // rx checksum enable RTL_WRITE_16(r, REG_RMS, 1518); // rx mtu /* set up the tx state */ RTL_WRITE_32(r, REG_TCR, (RTL_READ_32(r, REG_TCR) & ~0x1ff) | (6<<8)); // 1024 max burst dma RTL_WRITE_8(r, REG_MTPS, 0x3f); // max tx packet size (must be careful to not actually transmit more than mtu) /* set up the interrupt handler */ int_set_io_interrupt_handler(r->irq, &rtl8169_int, r, "rtl8169"); /* clear all pending interrupts */ RTL_WRITE_16(r, REG_ISR, 0xffff); /* unmask interesting interrupts */ RTL_WRITE_16(r, REG_IMR, IMR_SYSERR | IMR_LINKCHG | IMR_TER | IMR_TOK | IMR_RER | IMR_ROK | IMR_RXOVL); return 0; err1: vm_delete_region(vm_get_kernel_aspace_id(), r->region); err: return err; }
int fat_rmdir(fs_cookie _fs, fs_vnode _base_dir, const char *name) { SHOW_FLOW(3, "fs %p, dir %p, name '%s'", _fs, _base_dir, name); return ERR_NOT_IMPLEMENTED; }
static status_t get_set_volume(cd_driver_info *info, scsi_volume *volume, bool set) { scsi_cmd_mode_sense_6 cmd; scsi_mode_param_header_6 header; size_t len; void *buffer; scsi_modepage_audio *page; status_t res; TRACE("get_set_volume()\n"); // determine size of block descriptor memset(&cmd, 0, sizeof(cmd)); cmd.opcode = SCSI_OP_MODE_SENSE_6; cmd.page_code = SCSI_MODEPAGE_AUDIO; cmd.page_control = SCSI_MODE_SENSE_PC_CURRENT; cmd.allocation_length = sizeof(header); memset(&header, -2, sizeof(header)); res = sSCSIPeripheral->simple_exec(info->scsi_periph_device, &cmd, sizeof(cmd), &header, sizeof(header), SCSI_DIR_IN); if (res != B_OK) return res; TRACE(" block_desc_len=%d", header.block_desc_length); #if 0 // ToDo: why this?? return B_ERROR; #endif // retrieve param header, block descriptor and actual codepage len = sizeof(header) + header.block_desc_length + sizeof(scsi_modepage_audio); buffer = malloc(len); if (buffer == NULL) return B_NO_MEMORY; memset(buffer, -1, sizeof(buffer)); cmd.allocation_length = len; res = sSCSIPeripheral->simple_exec(info->scsi_periph_device, &cmd, sizeof(cmd), buffer, len, SCSI_DIR_IN); if (res != B_OK) { free(buffer); return res; } TRACE(" mode_data_len=%d, block_desc_len=%d", ((scsi_mode_param_header_6 *)buffer)->mode_data_length, ((scsi_mode_param_header_6 *)buffer)->block_desc_length); // find control page and retrieve values page = (scsi_modepage_audio *)((char *)buffer + sizeof(header) + header.block_desc_length); TRACE(" page=%p, codepage=%d", page, page->header.page_code); if (!set) { volume->port0_channel = page->ports[0].channel; volume->port0_volume = page->ports[0].volume; volume->port1_channel = page->ports[1].channel; volume->port1_volume = page->ports[1].volume; volume->port2_channel = page->ports[2].channel; volume->port2_volume = page->ports[2].volume; volume->port3_channel = page->ports[3].channel; volume->port3_volume = page->ports[3].volume; #if 0 SHOW_FLOW(3, "1: %d - %d", volume->port0_channel, volume->port0_volume); SHOW_FLOW(3, "2: %d - %d", volume->port1_channel, volume->port1_volume); SHOW_FLOW(3, "3: %d - %d", volume->port2_channel, volume->port2_volume); SHOW_FLOW(3, "4: %d - %d", volume->port3_channel, volume->port3_volume); #endif res = B_OK; } else { scsi_cmd_mode_select_6 cmd; if (volume->flags & 0x01) page->ports[0].channel = volume->port0_channel; if (volume->flags & 0x02) page->ports[0].volume = volume->port0_volume; if (volume->flags & 0x04) page->ports[1].channel = volume->port1_channel; if (volume->flags & 0x08) page->ports[1].volume = volume->port1_volume; if (volume->flags & 0x10) page->ports[2].channel = volume->port2_channel; if (volume->flags & 0x20) page->ports[2].volume = volume->port2_volume; if (volume->flags & 0x40) page->ports[3].channel = volume->port3_channel; if (volume->flags & 0x80) page->ports[3].volume = volume->port3_volume; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = SCSI_OP_MODE_SELECT_6; cmd.pf = 1; cmd.param_list_length = sizeof(header) + header.block_desc_length + sizeof(*page); res = sSCSIPeripheral->simple_exec(info->scsi_periph_device, &cmd, sizeof(cmd), buffer, len, SCSI_DIR_OUT); } free(buffer); return res; }
/*! Execute SCSI command */ void ata_exec_io(ide_device_info *device, ide_qrequest *qrequest) { scsi_ccb *request = qrequest->request; SHOW_FLOW(3, "command=%x", request->cdb[0]); // ATA devices have one LUN only if (request->target_lun != 0) { request->subsys_status = SCSI_SEL_TIMEOUT; finish_request(qrequest, false); return; } // starting a request means deleting sense, so don't do it if // the command wants to read it if (request->cdb[0] != SCSI_OP_REQUEST_SENSE) start_request(device, qrequest); switch (request->cdb[0]) { case SCSI_OP_TEST_UNIT_READY: ata_test_unit_ready(device, qrequest); break; case SCSI_OP_REQUEST_SENSE: ide_request_sense(device, qrequest); return; case SCSI_OP_FORMAT: /* FORMAT UNIT */ // we could forward request to disk, but modern disks cannot // be formatted anyway, so we just refuse request // (exceptions are removable media devices, but to my knowledge // they don't have to be formatted as well) set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE); break; case SCSI_OP_INQUIRY: ata_inquiry(device, qrequest); break; case SCSI_OP_MODE_SELECT_10: ata_mode_select_10(device, qrequest); break; case SCSI_OP_MODE_SENSE_10: ata_mode_sense_10(device, qrequest); break; case SCSI_OP_MODE_SELECT_6: case SCSI_OP_MODE_SENSE_6: // we've told SCSI bus manager to emulates these commands set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE); break; case SCSI_OP_RESERVE: case SCSI_OP_RELEASE: // though mandatory, this doesn't make much sense in a // single initiator environment; so what set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE); break; case SCSI_OP_START_STOP: { scsi_cmd_ssu *cmd = (scsi_cmd_ssu *)request->cdb; // with no LoEj bit set, we should only allow/deny further access // we ignore that (unsupported for ATA) // with LoEj bit set, we should additionally either load or eject the medium // (start = 0 - eject; start = 1 - load) if (!cmd->start) // we must always flush cache if start = 0 ata_flush_cache(device, qrequest); if (cmd->load_eject) ata_load_eject(device, qrequest, cmd->start); break; } case SCSI_OP_PREVENT_ALLOW: { scsi_cmd_prevent_allow *cmd = (scsi_cmd_prevent_allow *)request->cdb; ata_prevent_allow(device, cmd->prevent); break; } case SCSI_OP_READ_CAPACITY: read_capacity(device, qrequest); break; case SCSI_OP_VERIFY: // does anyone uses this function? // effectly, it does a read-and-compare, which IDE doesn't support set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE); break; case SCSI_OP_SYNCHRONIZE_CACHE: // we ignore range and immediate bit, we always immediately flush everything ata_flush_cache(device, qrequest); break; // sadly, there are two possible read/write operation codes; // at least, the third one, read/write(12), is not valid for DAS case SCSI_OP_READ_6: case SCSI_OP_WRITE_6: { scsi_cmd_rw_6 *cmd = (scsi_cmd_rw_6 *)request->cdb; uint32 pos; size_t length; pos = ((uint32)cmd->high_lba << 16) | ((uint32)cmd->mid_lba << 8) | (uint32)cmd->low_lba; length = cmd->length != 0 ? cmd->length : 256; SHOW_FLOW(3, "READ6/WRITE6 pos=%lx, length=%lx", pos, length); ata_send_rw(device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_6); return; } case SCSI_OP_READ_10: case SCSI_OP_WRITE_10: { scsi_cmd_rw_10 *cmd = (scsi_cmd_rw_10 *)request->cdb; uint32 pos; size_t length; pos = B_BENDIAN_TO_HOST_INT32(cmd->lba); length = B_BENDIAN_TO_HOST_INT16(cmd->length); if (length != 0) { ata_send_rw(device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_10); } else { // we cannot transfer zero blocks (apart from LBA48) finish_request(qrequest, false); } return; } default: set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE); } finish_checksense(qrequest); }
static int rtl8169_init(rtl8169 *r) { //bigtime_t time; int err = -1; //addr_t temp; //int i; hal_mutex_init(&r->lock,DEBUG_MSG_PREFIX); SHOW_FLOW(2, "rtl8169_init: r %p\n", r); /* r->region = vm_map_physical_memory(vm_get_kernel_aspace_id(), "rtl8169_region", (void **)&r->virt_base, REGION_ADDR_ANY_ADDRESS, r->phys_size, LOCK_KERNEL|LOCK_RW, r->phys_base); if(r->region < 0) { SHOW_ERROR0(1, "rtl8169_init: error creating memory mapped region\n"); err = -1; goto err; }*/ size_t n_pages = BYTES_TO_PAGES(r->phys_size); hal_alloc_vaddress( (void **)&r->virt_base, n_pages); // alloc address of a page, but not memory hal_pages_control_etc( r->phys_base, (void *)r->virt_base, n_pages, page_map_io, page_rw, 0 ); SHOW_INFO(2, "rtl8169 mapped at address 0x%lx\n", r->virt_base); #if 0 /* create regions for tx and rx descriptors */ r->rxdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxdesc", (void **)&r->rxdesc, REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW); r->rxdesc_phys = vtophys(r->rxdesc); SHOW_INFO(2, "rtl8169: rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys); r->txdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txdesc", (void **)&r->txdesc, REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW); r->txdesc_phys = vtophys(r->txdesc); SHOW_INFO(2, "rtl8169: tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys); r->reg_spinlock = 0; /* create a large tx and rx buffer for the descriptors to point to */ r->rxbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxbuf", (void **)&r->rxbuf, REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW); r->txbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txbuf", (void **)&r->txbuf, REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW); #endif hal_pv_alloc( &r->rxdesc_phys, (void**)&r->rxdesc, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN ); hal_pv_alloc( &r->txdesc_phys, (void**)&r->txdesc, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN ); SHOW_INFO(2, "rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys); SHOW_INFO(2, "tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys); hal_pv_alloc( &r->rxbuf_phys, (void**)&r->rxbuf, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME ); hal_pv_alloc( &r->txbuf_phys, (void**)&r->txbuf, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME ); /* create a receive sem */ hal_sem_init( &r->rx_sem, "rtl8169 rx_sem"); /* transmit sem */ hal_sem_init( &r->tx_sem, "rtl8169 tx_sem"); /* reset the chip */ int repeats = 100; RTL_WRITE_8(r, REG_CR, (1<<4)); // reset the chip, disable tx/rx do { hal_sleep_msec(10); // 10ms if(repeats -- <= 0 ) break; } while(RTL_READ_8(r, REG_CR) & (1<<4)); /* read in the mac address */ r->mac_addr[0] = RTL_READ_8(r, REG_IDR0); r->mac_addr[1] = RTL_READ_8(r, REG_IDR1); r->mac_addr[2] = RTL_READ_8(r, REG_IDR2); r->mac_addr[3] = RTL_READ_8(r, REG_IDR3); r->mac_addr[4] = RTL_READ_8(r, REG_IDR4); r->mac_addr[5] = RTL_READ_8(r, REG_IDR5); SHOW_INFO(2, "rtl8169: mac addr %x:%x:%x:%x:%x:%x\n", r->mac_addr[0], r->mac_addr[1], r->mac_addr[2], r->mac_addr[3], r->mac_addr[4], r->mac_addr[5]); /* some voodoo from BSD driver */ RTL_WRITE_16(r, REG_CCR, RTL_READ_16(r, REG_CCR)); RTL_SETBITS_16(r, REG_CCR, 0x3); /* mask all interrupts */ RTL_WRITE_16(r, REG_IMR, 0); /* set up the tx/rx descriptors */ rtl8169_setup_descriptors(r); /* enable tx/rx */ RTL_SETBITS_8(r, REG_CR, (1<<3)|(1<<2)); /* set up the rx state */ /* 1024 byte dma threshold, 1024 dma max burst, CRC calc 8 byte+, accept all packets */ RTL_WRITE_32(r, REG_RCR, (1<<16) | (6<<13) | (6<<8) | (0xf << 0)); RTL_SETBITS_16(r, REG_CCR, (1<<5)); // rx checksum enable RTL_WRITE_16(r, REG_RMS, 1518); // rx mtu /* set up the tx state */ RTL_WRITE_32(r, REG_TCR, (RTL_READ_32(r, REG_TCR) & ~0x1ff) | (6<<8)); // 1024 max burst dma RTL_WRITE_8(r, REG_MTPS, 0x3f); // max tx packet size (must be careful to not actually transmit more than mtu) /* set up the interrupt handler */ //int_set_io_interrupt_handler(r->irq, &rtl8169_int, r, "rtl8169"); if(hal_irq_alloc( r->irq, &rtl8169_int, r, HAL_IRQ_SHAREABLE )) { SHOW_ERROR( 0, "unable to allocate irq %d", r->irq ); goto err1; } /* clear all pending interrupts */ RTL_WRITE_16(r, REG_ISR, 0xffff); /* unmask interesting interrupts */ RTL_WRITE_16(r, REG_IMR, IMR_SYSERR | IMR_LINKCHG | IMR_TER | IMR_TOK | IMR_RER | IMR_ROK | IMR_RXOVL); return 0; err1: // TODO free what? //vm_delete_region(vm_get_kernel_aspace_id(), r->region); //err: return err; }
errno_t name2ip( in_addr_t *out, const char *name, int flags ) { int ia, ib, ic, id; if( 4 == sscanf( name, "%d.%d.%d.%d", &ia, &ib, &ic, &id ) ) { // No resolver required, ip4 addr given ipv4_addr iaddr = IPV4_DOTADDR_TO_ADDR( ia, ib, ic, id); *out = htonl( iaddr ); SHOW_FLOW( 2, "parsed %s to %s", name, inet_ntoa(* (struct in_addr*)out) ); return 0; } if(!inited) return ENXIO; int tries = 20; if(flags & RESOLVER_FLAG_NORETRY) tries = 1; ipv4_addr result; //ipv4_addr next_servers[MAX_DNS_SERVERS]; SHOW_FLOW( 1, "request '%s'", name ); ipv4_addr * sptr = servers; int sleft = MAX_DNS_SERVERS; if( !(flags & RESOLVER_FLAG_NORCACHE) ) if( lookup_cache( out, name ) == 0 ) { SHOW_FLOW0( 1, "got from cache"); return 0; } // On OS stop don't produce network traffic if( (flags & RESOLVER_FLAG_NOWAIT) || phantom_stop_level ) return ESRCH; while(tries--) { ipv4_addr server = *sptr++; if(sleft-- <= 0 || server == 0) { SHOW_ERROR0( 1, "No more places to look in, give up\n"); return ENOENT; } SHOW_FLOW( 2, "look in %s", inet_ntoa(* (struct in_addr*)&server) ); errno_t res = dns_request( (const unsigned char *)name, server, &result ); if( res == 0 )//|| result != 0 ) { SHOW_FLOW( 1, "answer is %s", inet_ntoa(* (struct in_addr*)&result) ); *out = result; if( !(flags & RESOLVER_FLAG_NOWCACHE) ) store_to_cache( result, name ); return 0; } } return ENOENT; }
// add request to device queue, using evelvator sort static void scsi_insert_new_request( scsi_device_info *device, scsi_ccb *new_request ) { scsi_ccb *first, *last, *before, *next; SHOW_FLOW( 3, "inserting new_request=%p, pos=%Ld", new_request, new_request->sort ); first = device->queued_reqs; if( first == NULL ) { SHOW_FLOW0( 1, "no other queued request" ); scsi_add_req_queue_first( new_request ); return; } SHOW_FLOW( 3, "first=%p, pos=%Ld, last_pos=%Ld", first, first->sort, device->last_sort ); // don't let syncs bypass others if( new_request->ordered ) { SHOW_FLOW0( 1, "adding synced request to tail" ); scsi_add_req_queue_last( new_request ); return; } if( new_request->sort < 0 ) { SHOW_FLOW0( 1, "adding unsortable request to tail" ); scsi_add_req_queue_last( new_request ); return; } // to reduce head seek time, we have three goals: // - sort request accendingly according to head position // as as disks use to read ahead and not backwards // - ordered accesses can neither get overtaken by or overtake other requests // // in general, we only have block position, so head, track or // whatever specific optimizations can only be done by the disks // firmware; // // thus, sorting is done ascendingly with only a few exceptions: // - if position of request to be inserted is between current // (i.e. last) position and position of first queued request, // insert it as first queue entry; i.e. we get descending order // - if position of first queued request is before current position // and position of new req is before first queued request, add it // as first queue entry; i.e. the new and the (previously) first // request are sorted monotically increasing // // the first exception should help if the queue is short (not sure // whether this actually hurts if we have a long queue), the // second one maximizes monotonic ranges last = first->prev; if( (device->last_sort <= new_request->sort && new_request->sort <= first->sort) || (first->sort < device->last_sort && new_request->sort <= first->sort) ) { // these are the exceptions described above SHOW_FLOW0( 3, "trying to insert req at head of device req queue" ); // we should have a new first request, make sure we don't bypass syncs for( before = last; !before->ordered; ) { before = before->prev; if( before == last ) break; } if( !before->ordered ) { SHOW_FLOW0( 1, "scheduled request in front of all other reqs of device" ); scsi_add_req_queue_first( new_request ); return; } else SHOW_FLOW0( 1, "req would bypass ordered request" ); } // the insertion sort loop ignores ordered flag of last request, // so check that here if( last->ordered ) { SHOW_FLOW0( 1, "last entry is ordered, adding new request as last" ); scsi_add_req_queue_last( new_request ); return; } SHOW_FLOW0( 3, "performing insertion sort" ); // insertion sort starts with last entry to avoid unnecessary overtaking for( before = last->prev, next = last; before != last && !before->ordered; next = before, before = before->prev ) { if( before->sort <= new_request->sort && new_request->sort <= next->sort ) break; } // if we bumped into ordered request, append new request at tail if( before->ordered ) { SHOW_FLOW0( 1, "overtaking ordered request in sorting - adding as last" ); scsi_add_req_queue_last( new_request ); return; } SHOW_FLOW( 1, "inserting after %p (pos=%Ld) and before %p (pos=%Ld)", before, before->sort, next, next->sort ); // if we haven't found a proper position, we automatically insert // new request as last because request list is circular; // don't check whether we added request as first as this is impossible new_request->next = next; new_request->prev = before; next->prev = new_request; before->next = new_request; }
static void com_interrupt( void *_dev ) { phantom_device_t * dev = _dev; int unit = dev->seq_number; int addr = dev->iobase; com_port_t *cp = dev->drv_private; (void) unit; SHOW_FLOW( 9, "com port %d interrupt", unit ); //register struct tty *tp = &com_tty[unit]; //static char comoverrun = 0; //char c, line, intr_id; char intr_id; //int modem_stat; int line_stat; while (! ((intr_id=(inb(INTR_ID(addr))&MASKi)) & 1)) { switch (intr_id) { case MODi: /* modem change */ //int ms = inb(MODEM_STAT(addr)); //commodem_intr(unit, ms)); break; case TRAi: hal_sem_release( &(cp->wsem) ); //comtimer_state[unit] = 0; //tp->t_state &= ~(TS_BUSY|TS_FLUSH); //tt_write_wakeup(tp); //(void) comstart(tp); break; case RECi: case CTIi: /* Character timeout indication */ hal_sem_release( &(cp->rsem) ); break; case LINi: line_stat = inb(LINE_STAT(addr)); (void) line_stat; #if 0 if ((line_stat & iPE) && ((tp->t_flags&(EVENP|ODDP)) == EVENP || (tp->t_flags&(EVENP|ODDP)) == ODDP)) { /* parity error */; } else if (line&iOR && !comoverrun) { printf("com%d: overrun\n", unit); comoverrun = 1; } else if (line_stat & (iFE | iBRKINTR)) { /* framing error or break */ ttyinput(tp->t_breakc, tp); } #endif break; } } }