static errno_t startSync( phantom_disk_partition_t *p, void *to, long blockNo, int nBlocks, int isWrite ) { assert( p->block_size < PAGE_SIZE ); SHOW_FLOW( 3, "blk %d", blockNo ); pager_io_request rq; pager_io_request_init( &rq ); rq.phys_page = (physaddr_t)phystokv(to); // why? redundant? rq.disk_page = blockNo; rq.blockNo = blockNo; rq.nSect = nBlocks; rq.rc = 0; if(isWrite) rq.flag_pageout = 1; else rq.flag_pagein = 1; STAT_INC_CNT(STAT_CNT_BLOCK_SYNC_IO); STAT_INC_CNT( STAT_CNT_DISK_Q_SIZE ); // Will decrement on io done void *va; hal_pv_alloc( &rq.phys_page, &va, nBlocks * p->block_size ); errno_t ret = EINVAL; if(isWrite) memcpy( va, to, nBlocks * p->block_size ); int ei = hal_save_cli(); hal_spin_lock(&(rq.lock)); rq.flag_sleep = 1; // Don't return until done rq.sleep_tid = GET_CURRENT_THREAD()->tid; SHOW_FLOW0( 3, "start io" ); if( (ret = p->asyncIo( p, &rq )) ) { rq.flag_sleep = 0; hal_spin_unlock(&(rq.lock)); if( ei ) hal_sti(); //return ret; goto ret; } thread_block( THREAD_SLEEP_IO, &(rq.lock) ); SHOW_FLOW0( 3, "unblock" ); if( ei ) hal_sti(); if(!isWrite) memcpy( to, va, nBlocks * p->block_size ); ret = rq.rc; //return partAsyncIo( p, &rq ); //return p->asyncIo( p, rq ); ret: hal_pv_free( rq.phys_page, va, nBlocks * p->block_size ); return ret; }
static void profiler_init(void) { physaddr_t pa; hal_pv_alloc( &pa, (void **)&map, PROFILER_MAP_SIZE*sizeof(profiler_entry_t) ); profiler_inited = 1; dbg_add_command(&profiler_dump_map_cmd, "profile", "Display current profiler output"); }
void disk_page_io_allocate(disk_page_io *me) { if(me->mem_allocated) return; hal_pv_alloc( &(me->req.phys_page), &(me->mem), PAGE_SIZE ); me->mem_allocated = 1; }
// Called per each CPU except for boot one. void phantom_import_cpu_thread(int ncpu) { assert(threads_inited); // No malloc on new CPU before thread is imported! Malloc has mutex! physaddr_t pa; // unused phantom_thread_t *t; // = calloc(1, sizeof(phantom_thread_t)); hal_pv_alloc( &pa, (void **)&t, sizeof(phantom_thread_t) ); memset( t, 0, sizeof(phantom_thread_t) ); // Can't be run yet t->sleep_flags = THREAD_SLEEP_LOCKED; t->tid = find_tid(t); t->start_func_arg = 0; t->start_func = 0; common_thread_init(t, DEF_STACK_SIZE ); phantom_thread_state_init(t); t->thread_flags |= THREAD_FLAG_UNDEAD; t->thread_flags |= THREAD_FLAG_NOSCHEDULE; // Let it be elegible to run t->sleep_flags &= ~THREAD_SLEEP_LOCKED; //GET_CURRENT_THREAD() = t; SET_CURRENT_THREAD(t); char *name = calloc(1, 20); snprintf( name, 20, "CPU %d idle", ncpu ); t_current_set_name(name); free(name); t->priority = THREAD_PRIO_IDLE; GET_IDLEST_THREAD() = t; }
static int rtl8169_init(rtl8169 *r) { //bigtime_t time; int err = -1; //addr_t temp; //int i; hal_mutex_init(&r->lock,DEBUG_MSG_PREFIX); SHOW_FLOW(2, "rtl8169_init: r %p\n", r); /* r->region = vm_map_physical_memory(vm_get_kernel_aspace_id(), "rtl8169_region", (void **)&r->virt_base, REGION_ADDR_ANY_ADDRESS, r->phys_size, LOCK_KERNEL|LOCK_RW, r->phys_base); if(r->region < 0) { SHOW_ERROR0(1, "rtl8169_init: error creating memory mapped region\n"); err = -1; goto err; }*/ size_t n_pages = BYTES_TO_PAGES(r->phys_size); hal_alloc_vaddress( (void **)&r->virt_base, n_pages); // alloc address of a page, but not memory hal_pages_control_etc( r->phys_base, (void *)r->virt_base, n_pages, page_map_io, page_rw, 0 ); SHOW_INFO(2, "rtl8169 mapped at address 0x%lx\n", r->virt_base); #if 0 /* create regions for tx and rx descriptors */ r->rxdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxdesc", (void **)&r->rxdesc, REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW); r->rxdesc_phys = vtophys(r->rxdesc); SHOW_INFO(2, "rtl8169: rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys); r->txdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txdesc", (void **)&r->txdesc, REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW); r->txdesc_phys = vtophys(r->txdesc); SHOW_INFO(2, "rtl8169: tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys); r->reg_spinlock = 0; /* create a large tx and rx buffer for the descriptors to point to */ r->rxbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxbuf", (void **)&r->rxbuf, REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW); r->txbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txbuf", (void **)&r->txbuf, REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW); #endif hal_pv_alloc( &r->rxdesc_phys, (void**)&r->rxdesc, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN ); hal_pv_alloc( &r->txdesc_phys, (void**)&r->txdesc, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN ); SHOW_INFO(2, "rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys); SHOW_INFO(2, "tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys); hal_pv_alloc( &r->rxbuf_phys, (void**)&r->rxbuf, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME ); hal_pv_alloc( &r->txbuf_phys, (void**)&r->txbuf, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME ); /* create a receive sem */ hal_sem_init( &r->rx_sem, "rtl8169 rx_sem"); /* transmit sem */ hal_sem_init( &r->tx_sem, "rtl8169 tx_sem"); /* reset the chip */ int repeats = 100; RTL_WRITE_8(r, REG_CR, (1<<4)); // reset the chip, disable tx/rx do { hal_sleep_msec(10); // 10ms if(repeats -- <= 0 ) break; } while(RTL_READ_8(r, REG_CR) & (1<<4)); /* read in the mac address */ r->mac_addr[0] = RTL_READ_8(r, REG_IDR0); r->mac_addr[1] = RTL_READ_8(r, REG_IDR1); r->mac_addr[2] = RTL_READ_8(r, REG_IDR2); r->mac_addr[3] = RTL_READ_8(r, REG_IDR3); r->mac_addr[4] = RTL_READ_8(r, REG_IDR4); r->mac_addr[5] = RTL_READ_8(r, REG_IDR5); SHOW_INFO(2, "rtl8169: mac addr %x:%x:%x:%x:%x:%x\n", r->mac_addr[0], r->mac_addr[1], r->mac_addr[2], r->mac_addr[3], r->mac_addr[4], r->mac_addr[5]); /* some voodoo from BSD driver */ RTL_WRITE_16(r, REG_CCR, RTL_READ_16(r, REG_CCR)); RTL_SETBITS_16(r, REG_CCR, 0x3); /* mask all interrupts */ RTL_WRITE_16(r, REG_IMR, 0); /* set up the tx/rx descriptors */ rtl8169_setup_descriptors(r); /* enable tx/rx */ RTL_SETBITS_8(r, REG_CR, (1<<3)|(1<<2)); /* set up the rx state */ /* 1024 byte dma threshold, 1024 dma max burst, CRC calc 8 byte+, accept all packets */ RTL_WRITE_32(r, REG_RCR, (1<<16) | (6<<13) | (6<<8) | (0xf << 0)); RTL_SETBITS_16(r, REG_CCR, (1<<5)); // rx checksum enable RTL_WRITE_16(r, REG_RMS, 1518); // rx mtu /* set up the tx state */ RTL_WRITE_32(r, REG_TCR, (RTL_READ_32(r, REG_TCR) & ~0x1ff) | (6<<8)); // 1024 max burst dma RTL_WRITE_8(r, REG_MTPS, 0x3f); // max tx packet size (must be careful to not actually transmit more than mtu) /* set up the interrupt handler */ //int_set_io_interrupt_handler(r->irq, &rtl8169_int, r, "rtl8169"); if(hal_irq_alloc( r->irq, &rtl8169_int, r, HAL_IRQ_SHAREABLE )) { SHOW_ERROR( 0, "unable to allocate irq %d", r->irq ); goto err1; } /* clear all pending interrupts */ RTL_WRITE_16(r, REG_ISR, 0xffff); /* unmask interesting interrupts */ RTL_WRITE_16(r, REG_IMR, IMR_SYSERR | IMR_LINKCHG | IMR_TER | IMR_TOK | IMR_RER | IMR_ROK | IMR_RXOVL); return 0; err1: // TODO free what? //vm_delete_region(vm_get_kernel_aspace_id(), r->region); //err: return err; }
static void common_thread_init(phantom_thread_t *t, int stacksize ) { //t->thread_flags = 0; t->priority = THREAD_PRIO_NORM; t->cpu_id = GET_CPU_ID(); #if CONF_NEW_CTTY t_make_ctty( t ); #else if( 0 == t->ctty ) t->ctty = wtty_init( WTTY_SMALL_BUF ); #endif // malloc uses mutex, so we have to use physalloc which is protected with spinlocks physaddr_t pa; t->stack_size = stacksize; //t->stack = calloc( 1, stacksize ); hal_pv_alloc( &pa, &(t->stack), stacksize+PAGE_SIZE ); hal_page_control( pa, t->stack, page_unmap, page_noaccess ); // poor man's guard page - TODO support in page fault t->stack_pa = pa; SHOW_FLOW( 5, "main stk va %p pa %p", t->stack, (void *)pa ); //assert(t->stack != 0); t->kstack_size = stacksize; //t->kstack = calloc( 1, stacksize ); hal_pv_alloc( &pa, &(t->kstack), stacksize+PAGE_SIZE ); hal_page_control( pa, t->kstack, page_unmap, page_noaccess ); // poor man's guard page - TODO support in page fault t->kstack_pa = pa; SHOW_FLOW( 5, "kern stk va %p pa %p", t->kstack, (void *)pa ); #if ARCH_mips // On mips we need unmapped kernel stack for mapping on MIPS is // done with exceptions too and unmapped stack is fault forever. // We achieve this by setting stack virtual address to its // physical address | 0x8000000 - this virt mem area is direct // mapped to physmem at 0 assert( (addr_t)phystokv(t->kstack_pa) > 0x80000000 ); assert( (addr_t)phystokv(t->kstack_pa) < 0xC0000000 ); t->kstack_top = phystokv(t->kstack_pa) +t->kstack_size-4; // Why -4? #else t->kstack_top = t->kstack+t->kstack_size-4; // Why -4? #endif //assert(t->kstack != 0); t->owner = 0; //t->u = 0; t->pid = NO_PID; t->thread_flags = 0;; t->waitcond = 0; hal_spin_init( &(t->waitlock)); queue_init(&(t->chain)); queue_init(&(t->runq_chain)); t->sw_unlock = 0; t->preemption_disabled = 0; }
phantom_device_t *driver_virtio_disk_probe( pci_cfg_t *pci, int stage ) { (void) stage; if(vdev.pci) { printf("Just one drv instance yet\n"); return 0; } vdev.interrupt = driver_virtio_disk_interrupt; vdev.name = "VirtIODisk0"; // Say we need it. Not sure, really, that we do. :) vdev.guest_features = VIRTIO_BLK_F_BARRIER; if( virtio_probe( &vdev, pci ) ) return 0; //u_int8_t status = virtio_get_status( &vdev ); //printf("Status is: 0x%x\n", status ); SHOW_FLOW( 1, "Features are: %b", vdev.host_features, "\020\1BARRIER\2SIZE_MAX\3SEG_MAX\5GEOM\6RDONLY\7BLK_SIZE" ); rodisk = vdev.host_features & (1<<VIRTIO_BLK_F_RO); if(rodisk) SHOW_FLOW0( 1, "Disk is RDONLY"); SHOW_FLOW( 1, "Registered at IRQ %d IO 0x%X", vdev.irq, vdev.basereg ); phantom_device_t * dev = (phantom_device_t *)malloc(sizeof(phantom_device_t)); dev->name = "VirtIO Disk"; dev->seq_number = seq_number++; dev->drv_private = &vdev; virtio_set_status( &vdev, VIRTIO_CONFIG_S_DRIVER ); struct virtio_blk_config cfg; virtio_get_config_struct( &vdev, &cfg, sizeof(cfg) ); SHOW_FLOW( 1, "VIRTIO disk size is %d Mb", cfg.capacity/2048 ); virtio_set_status( &vdev, VIRTIO_CONFIG_S_DRIVER|VIRTIO_CONFIG_S_DRIVER_OK ); #if 0 printf("Will write to disk\n"); //getchar(); static char test[512] = "Hello virtio disk"; physaddr_t pa; void *va; hal_pv_alloc( &pa, &va, sizeof(test) ); strlcpy( va, test, sizeof(test) ); driver_virtio_disk_write( &vdev, pa, sizeof(test), 0, 0 ); printf("Write to disk requested\n"); //getchar(); #endif phantom_disk_partition_t *p = phantom_create_virtio_partition_struct( cfg.capacity, &vdev ); (void) p; #if 0 errno_t ret = phantom_register_disk_drive(p); if( ret ) SHOW_ERROR( 0, "Can't register VirtIO drive: %d", ret ); #endif return dev; }
int do_test_physmem(const char *test_parm) { (void) test_parm; #if !defined(ARCH_arm) void *va; physaddr_t pa; char buf[MSIZE]; hal_pv_alloc( &pa, &va, MSIZE ); test_check_true( va != 0 ); test_check_true( pa != 0 ); memset( va, 0, MSIZE ); memcpy_p2v( buf, pa, MSIZE ); if( memnotchar( buf, 0, MSIZE ) ) test_fail_msg( EINVAL, "not 0"); memset( buf, 0xFF, MSIZE ); memcpy_v2p( pa, buf, MSIZE ); if( memnotchar( va, 0xFF, MSIZE ) ) test_fail_msg( EINVAL, "not 1"); memset( va, 0, MSIZE ); memcpy_v2p( pa, "AAA", 3 ); if( memnotchar( va, 'A', 3 ) ) test_fail_msg( EINVAL, "not A"); if( memnotchar( va+3, 0, MSIZE-3 ) ) test_fail_msg( EINVAL, "not A0"); memset( va, 0, MSIZE ); memcpy_v2p( pa+10, "BBB", 3 ); if( memnotchar( va+10, 'B', 3 ) ) test_fail_msg( EINVAL, "not B"); if( memnotchar( va, 0, 10 ) ) test_fail_msg( EINVAL, "not B0-"); if( memnotchar( va+13, 0, MSIZE-13 ) ) test_fail_msg( EINVAL, "not B0+"); // Cross page memset( va, 0, MSIZE ); #define SH (4096-4) memcpy_v2p( pa+SH, "EEEEEEEE", 8 ); if( memnotchar( va+SH, 'E', 8 ) ) test_fail_msg( EINVAL, "not E"); if( memnotchar( va, 0, SH ) ) test_fail_msg( EINVAL, "not E0-"); if( memnotchar( va+SH+8, 0, MSIZE-SH-8 ) ) test_fail_msg( EINVAL, "not E0+"); #if 0 // not impl memset( va, 0, MSIZE ); memset( va+20, 'C', 3 ); memcpy_p2v( buf, pa+20, 3 ); if( memnotchar( buf, 'C', 3 ) ) test_fail_msg( EINVAL, "not C"); #endif hal_pv_free( pa, va, MSIZE ); #endif //!defined(ARCH_arm) return 0; }