Exemple #1
0
void
ata_dpc_DMA(ide_qrequest *qrequest)
{
	ide_device_info *device = qrequest->device;
	bool dma_success, dev_err;

	dma_success = finish_dma(device);
	dev_err = check_rw_error(device, qrequest);

	if (dma_success && !dev_err) {
		// reset error count if DMA worked
		device->DMA_failures = 0;
		device->CQ_failures = 0;
		qrequest->request->data_resid = 0;
		finish_checksense(qrequest);
	} else {
		SHOW_ERROR0( 2, "Error in DMA transmission" );

		set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_FAILURE);

		if (++device->DMA_failures >= MAX_DMA_FAILURES) {
			SHOW_ERROR0( 2, "Disabled DMA because of too many errors" );
			device->DMA_enabled = false;
		}

		// reset queue in case queuing is active
		finish_reset_queue(qrequest);
	}
}
Exemple #2
0
static status_t createGARTBuffer( GART_info *gart, size_t size )
{
	physical_entry map[1];
	void *unaligned_addr, *aligned_phys;

	SHOW_FLOW0( 3, "" );

	gart->buffer.size = size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);

	// we allocate an contiguous area having twice the size
	// to be able to find an aligned, contiguous range within it;
	// the graphics card doesn't care, but the CPU cannot
	// make an arbitrary area WC'ed, at least elder ones
	// question: is this necessary for a PCI GART because of bus snooping?
	gart->buffer.unaligned_area = create_area( "Radeon PCI GART buffer",
		&unaligned_addr, B_ANY_KERNEL_ADDRESS,
		2 * size, B_CONTIGUOUS/*B_FULL_LOCK*/, B_READ_AREA | B_WRITE_AREA | B_USER_CLONEABLE_AREA );
		// TODO: Physical aligning can be done without waste using the
		// private create_area_etc().
	if (gart->buffer.unaligned_area < 0) {
		SHOW_ERROR( 1, "cannot create PCI GART buffer (%s)",
			strerror( gart->buffer.unaligned_area ));
		return gart->buffer.unaligned_area;
	}

	get_memory_map( unaligned_addr, B_PAGE_SIZE, map, 1 );

	aligned_phys =
		(void **)((map[0].address + size - 1) & ~(size - 1));

	SHOW_FLOW( 3, "aligned_phys=%p", aligned_phys );

	gart->buffer.area = map_physical_memory( "Radeon aligned PCI GART buffer",
		(addr_t)aligned_phys,
		size, B_ANY_KERNEL_BLOCK_ADDRESS | B_MTR_WC,
		B_READ_AREA | B_WRITE_AREA, &gart->buffer.ptr );

	if( gart->buffer.area < 0 ) {
		SHOW_ERROR0( 3, "cannot map buffer with WC" );
		gart->buffer.area = map_physical_memory( "Radeon aligned PCI GART buffer",
			(addr_t)aligned_phys,
			size, B_ANY_KERNEL_BLOCK_ADDRESS,
			B_READ_AREA | B_WRITE_AREA, &gart->buffer.ptr );
	}

	if( gart->buffer.area < 0 ) {
		SHOW_ERROR0( 1, "cannot map GART buffer" );
		delete_area( gart->buffer.unaligned_area );
		gart->buffer.unaligned_area = -1;
		return gart->buffer.area;
	}

	memset( gart->buffer.ptr, 0, size );

	return B_OK;
}
Exemple #3
0
errno_t wtty_putc_nowait(wtty_t *w, int c)
{
    int ret = 0;

    assert(w);
    if(!w->started) return EPIPE;

    hal_mutex_lock(&w->mutex);
    wtty_wrap(w);

    SHOW_FLOW( 11, "wtty putc %p", w );

    if( _wtty_is_full(w) )
    {
        SHOW_ERROR0( 10, "wtty putc fail" );
        ret = ENOMEM;
    }
    else
    {
        wtty_doputc(w, c);
    }

    hal_mutex_unlock(&w->mutex);
    return ret;
}
Exemple #4
0
static int ps2ms_do_init( void )
{


    int tries = 10000;
    // Purge buffer
    while( tries-- > 0 && inb( PS2_CTRL_ADDR ) & 0x01 )
        inb( PS2_DATA_ADDR );

    if( tries <= 0 ) goto notfound;

    ps2ms_send_cmd(PS2_CMD_DEV_INIT);

    // hangs
    //ps2ms_send_aux(PS2_CMD_RESET_MOUSE);    ps2ms_aux_wait_ack(); // ignore result

    ps2ms_send_aux(PS2_CMD_ENABLE_MOUSE);
    if( ps2ms_aux_wait_ack() ) goto notfound;

    return 0;

notfound:
    SHOW_ERROR0( 1, "PS/2 mouse not found\n" );
    return ENXIO;

}
ipv4_addr ngethostbyname(unsigned char *host)
{
    int tries = 20;

    ipv4_addr 	result;
    //ipv4_addr 	next_servers[MAX_DNS_SERVERS];

    ipv4_addr *	sptr = servers;
    int         sleft = MAX_DNS_SERVERS;

    while(tries--)
    {

        ipv4_addr 	server = *sptr++;

        if(sleft-- <= 0 || server == 0)
        {
            SHOW_ERROR0( 1, "No more places to look in, give up");
            return 0;
        }


        SHOW_FLOW( 2, "look in %s", inet_ntoa(* (struct in_addr*)&server) );
        errno_t res = dns_request(host, server, &result );

        if( res == 0 || result != 0 )
        {
            SHOW_FLOW( 2, "answer is %s", inet_ntoa(* (struct in_addr*)&result) );
            return result;
        }
    }

    return 0;

}
static int rtl8169_rxint(rtl8169 *r, uint16 int_status)
{
    int rc = INT_NO_RESCHEDULE;

    if (int_status & (IMR_ROK|IMR_RER)) {
        int i;

        /* see how many frames we got, adjust our index */
        i = 0;
        while ((r->rxdesc[r->rx_idx_full].flags & RTL_DESC_OWN) == 0) {
            i++;
            inc_rx_idx_full(r);
            if (r->rx_idx_full == r->rx_idx_free) {
                /* we just used up the last descriptor */
                SHOW_ERROR0(1, "rtl8169_rxint: used up last descriptor, chip is gonna blow.\n");
                /* XXX deal with this somehow */
                break; // no more frames left on the ring
            }
        }
        SHOW_FLOW(3, "rxint: got %d frames, idx_full = %d, idx_free = %d\n", i, r->rx_idx_full, r->rx_idx_free);

        if (i > 0) {
//TODO #warning SEM_FLAG_NO_RESCHED
            //hal_sem_release_etc( &r->rx_sem, 1, SEM_FLAG_NO_RESCHED);
            hal_sem_release( &r->rx_sem );
            rc = INT_RESCHEDULE;
        }
    }

    return rc;
}
Exemple #7
0
bool
create_temp_sg(scsi_ccb *ccb)
{
	physical_entry *temp_sg;
	status_t res;

	SHOW_FLOW(3, "ccb=%p, data=%p, data_length=%" B_PRIu32, ccb, ccb->data,
		ccb->data_length);

	ccb->sg_list = temp_sg = (physical_entry*)locked_pool->alloc(temp_sg_pool);
	if (temp_sg == NULL) {
		SHOW_ERROR0(2, "cannot allocate memory for IO request!");
		return false;
	}

	res = lock_memory(ccb->data, ccb->data_length, B_DMA_IO
		| ((ccb->flags & SCSI_DIR_MASK) == SCSI_DIR_IN ? B_READ_DEVICE : 0));

	if (res != B_OK) {
		SHOW_ERROR(2, "cannot lock memory for IO request (%s)", strerror(res));
		goto err;
	}

	if (fill_temp_sg(ccb))
		// this is the success path
		return true;

	unlock_memory(ccb->data, ccb->data_length, B_DMA_IO
		| ((ccb->flags & SCSI_DIR_MASK) == SCSI_DIR_IN ? B_READ_DEVICE : 0));

err:
	locked_pool->free(temp_sg_pool, temp_sg);
	return false;
}
Exemple #8
0
static errno_t threads_test()
{

    hal_cond_init(&c, "threadTest");
    hal_mutex_init(&m, "threadTest");
    hal_sem_init(&s, "threadTest");

    int i = 40;
    n_t_empty = i;
    while(i-- > 0)
        phantom_create_thread( t_empty, "Empty", 0 );

    pressEnter("will create thread");
    phantom_create_thread( thread1, "__T1__", 0 );
    phantom_create_thread( thread1, "__T2__", 0 );
    //phantom_create_thread( thread1, "__T3__" );

    //phantom_create_thread( t_wait, "__TW__" );
    int tid = hal_start_kernel_thread_arg( t_wait, "__TW__" );

    i = 40;
    while(i-- > 0)
    {
        if(TEST_CHATTY) pressEnter("will yield");
        YIELD();

        if(TEST_CHATTY) printf("!! back in main\n");
    }

    t_kill_thread( tid );
    hal_sleep_msec( 30 );

    thread_stop_request = 1;
    hal_sleep_msec( 10 );

    thread_activity_counter = 0;
    hal_sleep_msec( 1000 );
    if( thread_activity_counter )
    {
        SHOW_ERROR0( 0, "Can't stop thread" );
        return -1;
    }

    while(n_t_empty > 0)
    {
        SHOW_FLOW( 0, "wait for %d threads", n_t_empty );
        hal_sleep_msec(500);
    }

    if(p._ah.refCount != 1)
    {
        SHOW_ERROR( 0, "p._ah.refCount = %d", p._ah.refCount );
        test_fail_msg( -1, "refcount" );
    }
    else
        SHOW_ERROR( 0, "p._ah.refCount = %d, SUCCESS", p._ah.refCount );

    return 0;
}
errno_t plain_lzma_decode( void *dest, size_t *dest_len, void *src, size_t *src_len, int logLevel )
{
    Byte 		propData[5];
    ELzmaStatus 	status;

    SRes rc = LzmaDecode( dest, dest_len, src, src_len,
                          propData, sizeof(propData), LZMA_FINISH_END,
                          &status, &alloc);

    switch(rc)
    {
    case SZ_OK: break;

    case SZ_ERROR_DATA:
        SHOW_ERROR0( logLevel, "Broken data" );
        return EFTYPE;

    case SZ_ERROR_MEM:
        SHOW_ERROR0( logLevel, "Out of mem" );
        return ENOMEM;

    case SZ_ERROR_UNSUPPORTED:
        SHOW_ERROR0( logLevel, "Unsupported props" );
        return EINVAL;

    case SZ_ERROR_INPUT_EOF:
        SHOW_ERROR0( logLevel, "Premature data end" );
        return ENOSPC;
    }


    switch(status)
    {
    case LZMA_STATUS_FINISHED_WITH_MARK: break;

    case LZMA_STATUS_NOT_SPECIFIED: // impossible
    case LZMA_STATUS_NEEDS_MORE_INPUT:
    case LZMA_STATUS_NOT_FINISHED:
    case LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK:
        SHOW_ERROR0( logLevel, "Premature data end" );
        return ENOSPC;
    }

    return 0;
}
int rtl8169_detect(rtl8169 **rtl8169_list)
{
    unsigned int i, j;
    //pci_module_hooks *pci;
    //pci_info pinfo;
    rtl8169 *r;

    *rtl8169_list = NULL;
    if(module_get(PCI_BUS_MODULE_NAME, 0, (void **)(void *)&pci) < 0) {
        SHOW_INFO0(1, "rtl8169_detect: no pci bus found..\n");
        return -1;
    }

    for (i = 0; pci->get_nth_pci_info(i, &pinfo) >= NO_ERROR; i++) {
        for (j = 0; j < sizeof(match)/sizeof(match[0]); j++) {
            if (pinfo.vendor_id == match[j].vendor && pinfo.device_id == match[j].device) {
                // we found one
                SHOW_INFO(1, "rtl8169_detect: found device at pci %d:%d:%d\n", pinfo.bus, pinfo.device, pinfo.function);

                r = kmalloc(sizeof(rtl8169));
                if (r == NULL) {
                    SHOW_ERROR0(1, "rtl8169_detect: error allocating memory for rtl8169 structure\n");
                    continue;
                }

                memset(r, 0, sizeof(rtl8169));
                r->irq = pinfo.u.h0.interrupt_line;
                // find the memory-mapped base
                int range;
                for (range = 0; range < 6; range++) {
                    if (pinfo.u.h0.base_registers[range] > 0xffff) {
                        r->phys_base = pinfo.u.h0.base_registers[range];
                        r->phys_size = pinfo.u.h0.base_register_sizes[range];
                        break;
                    } else if (pinfo.u.h0.base_registers[range] > 0) {
                        r->io_port = pinfo.u.h0.base_registers[range];
                    }
                }
                if (r->phys_base == 0) {
                    kfree(r);
                    r = NULL;
                    continue;
                }

                SHOW_INFO(1, "detected rtl8169 at irq %d, memory base 0x%lx, size 0x%lx, io base 0x%lx\n", r->irq, r->phys_base, r->phys_size, r->io_port);

                // add it to the list
                r->next = *rtl8169_list;
                *rtl8169_list = r;
            }
        }
    }

    module_put(PCI_BUS_MODULE_NAME);

    return *rtl8169_list ? 0 : ERR_NOT_FOUND;
}
Exemple #11
0
static void checkEnterMutex()
{
    inmutex++;
    if(inmutex > 1)
    {
        SHOW_ERROR0( 0, "mutex reentered");
        test_fail( -1 );
    }
}
Exemple #12
0
static void dpc_serve2( void *arg )
{
    if( strcmp( arg, DPC_ARG2 ) )
    {
        SHOW_ERROR0( 0, "DPC 2 arg is wrong" );
        test_fail( -1 );
    }
    dpc2_triggered = 1;
}
Exemple #13
0
void trfs_process_received_data(trfs_queue_t *qe, trfs_fio_t *fio, void *data)
{
    u_int64_t firstReq = qe->fio.startSector;
    u_int64_t oneAfterReq = qe->fio.startSector + qe->fio.nSectors;

    u_int64_t firstIn = fio->startSector;
    u_int64_t oneAfterIn = fio->startSector + fio->nSectors;

    if( oneAfterIn > oneAfterReq )
        oneAfterIn = oneAfterReq;

    if( firstIn < firstReq )
        firstIn = firstReq;

    if( firstIn >= oneAfterReq )
    {
        SHOW_ERROR0( 0, "TRFS: firstIn >= oneAfterReq" );
        return;
    }
    SHOW_FLOW0( 7, "got data" );

    int64_t _len = (int64_t) (oneAfterIn - firstIn);

    if( _len < 0 )
    {
        SHOW_ERROR0( 0, "TRFS: len < 0" );
        return;
    }

    if( _len > 32 )
    {
        SHOW_ERROR0( 0, "TRFS: len > 32");
        return;
    }

    int len = (int)_len*TRFS_SECTOR_SIZE;
    int shift = (int)(firstIn-firstReq)*TRFS_SECTOR_SIZE;

    // TODO crashes, fix
    memcpy_v2p( (qe->orig_request->phys_page) + shift, data, len );

}
Exemple #14
0
static void dpc_serve1( void *arg )
{
    if( strcmp( arg, DPC_ARG1 ) )
    {
        SHOW_ERROR0( 0, "DPC 1 arg is wrong" );
        test_fail( -1 );
    }

    dpc1_triggered = 1;
    hal_sleep_msec(2000);
}
Exemple #15
0
static void trfs_recv_thread(void *arg)
{
    (void) arg;

    u_int8_t    buf[TRFS_MAX_PKT];

    t_current_set_name("TRFS Recv");

    while(connect_trfs())
    {
        SHOW_ERROR0( 1, "Unable to connect" );
        hal_sleep_msec(20000);
        //return;
    }
    while(1)
    {
        int rc;
        if( ( rc = trfs_recv( &buf, TRFS_MAX_PKT)) <= 0 )
        {
            SHOW_ERROR( 1, "recv err %d", rc );
        again:
            hal_sleep_msec( 100 ); // Against tight loop
            continue;
        }

        if( rc < (int)sizeof(trfs_pkt_t) )
        {
            SHOW_ERROR( 1, "recv pkt size %d < required %d", rc, sizeof(trfs_pkt_t) );
            goto again;
        }

        trfs_pkt_t *rq = (trfs_pkt_t *)buf;

        SHOW_FLOW( 6, "got pkt type %d", rq->type );

        if(rq->sessionId != sessionId)
        {
            trfs_reset_session(rq->sessionId);
            if(rq->type != PKT_T_Error)
                continue;
        }

        switch(rq->type)
        {
        case PKT_T_Error:    		recvError(rq);          break;
    	case PKT_T_ReadReply:        	recvReadReply(rq);      break;
    	case PKT_T_FindReply:        	recvFindReply(rq);      break;

        default:
            SHOW_ERROR( 0, "TRFS: unknown packet type %d", rq->type);

        }
    }
}
Exemple #16
0
void wtty_destroy(wtty_t * w)
{
    //wtty_stop(w); // not really good idea
    // Check unstopped
    if( w->started ) SHOW_ERROR0( 0, "unstopped" );
    hal_mutex_destroy( &w->mutex );
    hal_cond_destroy( &w->rcond );
    hal_cond_destroy( &w->wcond );
#if CONF_WTTY_SIZE
    free(w->buf);
#endif
    free(w);
}
Exemple #17
0
void
periph_media_changed(scsi_periph_device_info *device, scsi_ccb *request)
{
	uint32 backup_flags;
	uint8 backup_cdb[SCSI_MAX_CDB_SIZE];
	uchar backup_cdb_len;
	int64 backup_sort;
	bigtime_t backup_timeout;
	uchar *backup_data;
	const physical_entry *backup_sg_list;
	uint16 backup_sg_count;
	uint32 backup_data_len;

	// if there is no hook, the driver doesn't handle removal devices
	if (!device->removable) {
		SHOW_ERROR0( 1, "Driver doesn't support medium changes, but there occured one!?" );
		return;
	}

	// when medium has changed, tell all handles
	periph_media_changed_public(device);

	// the peripheral driver may need a fresh ccb; sadly, we cannot allocate one
	// as this may lead to a deadlock if all ccb are in use already; thus, we
	// have to backup all relevant data of current ccb and use it instead of a 
	// new one - not pretty but working (and performance is not an issue in this
	// path)
	backup_flags = request->flags;
	memcpy(backup_cdb, request->cdb, SCSI_MAX_CDB_SIZE);
	backup_cdb_len = request->cdb_length;
	backup_sort = request->sort;
	backup_timeout = request->timeout;
	backup_data = request->data;
	backup_sg_list = request->sg_list;
	backup_sg_count = request->sg_count;
	backup_data_len = request->data_length;

	if (device->callbacks->media_changed != NULL)
		device->callbacks->media_changed(device->periph_device, request);

	request->flags = backup_flags;
	memcpy(request->cdb, backup_cdb, SCSI_MAX_CDB_SIZE);
	request->cdb_length = backup_cdb_len;
	request->sort = backup_sort;
	request->timeout = backup_timeout;
	request->data = backup_data;
	request->sg_list = backup_sg_list;
	request->sg_count = backup_sg_count;
	request->data_length = backup_data_len;
}
Exemple #18
0
// Create a file struct for given path
static uufile_t *  httpfs_namei(uufs_t *fs, const char *filename)
{
    int ip0, ip1, ip2, ip3, port;

    (void) fs;

    if( 5 != sscanf( filename, "%d.%d.%d.%d:%d", &ip0, &ip1, &ip2, &ip3, &port ) )
    {
        return 0;
    }

    sockaddr addr;
    addr.port = port;

    addr.addr.len = 4;
    addr.addr.type = ADDR_TYPE_IP;
    NETADDR_TO_IPV4(addr.addr) = IPV4_DOTADDR_TO_ADDR(ip0, ip2, ip2, ip3);

    struct uusocket *us = calloc(1, sizeof(struct uusocket));
    if(us == 0)  return 0;

    us->addr = addr;


    if( tcp_open(&(us->prot_data)) )
    {
        SHOW_ERROR0(0, "can't prepare endpoint");
fail:
        free(us);
        return 0;
    }

    if( tcp_connect( us->prot_data, &us->addr) )
    {
        SHOW_ERROR(0, "can't connect to %s", filename);
        goto fail;
    }


    uufile_t *ret = create_uufile();

    ret->ops = &httpfs_fops;

    ret->pos = 0;
    ret->fs = &http_fs;
    ret->impl = us;
    ret->flags = UU_FILE_FLAG_NET|UU_FILE_FLAG_TCP|UU_FILE_FLAG_OPEN; // TODO wrong! open in open!

    return ret;
}
Exemple #19
0
void activate_all_threads()
{
    int nthreads  = get_array_size(pvm_root.threads_list.data);

    if( nthreads == 0 )
        SHOW_ERROR0( 0, "There are 0 live threads in image, system must be dead :(" );

    SHOW_FLOW( 3, "Activating %d threads", nthreads);

    while(nthreads--)
    {
        struct pvm_object th =  pvm_get_array_ofield(pvm_root.threads_list.data, nthreads );
        pvm_check_is_thread( th );
        start_new_vm_thread( th );
    }

    all_threads_started = 1;
}
errno_t
acpi_reboot(void)
{
    ACPI_STATUS status;

    status = AcpiReset();
    if (status == AE_NOT_EXIST)
        return ENXIO;

    if (status != AE_OK) {
        SHOW_ERROR( 0, "Reset failed, status = %d", status );
        return ENXIO;
    }

    phantom_spinwait(10000); // 10 sec
    SHOW_ERROR0( 0, "Reset failed, timeout" );
    return ENXIO;
}
static int phantom_window_getc(void)
{
    //SHOW_FLOW0( 11, "window getc" );
    //wtty_t *tty = &(GET_CURRENT_THREAD()->ctty);
#if CONF_NEW_CTTY
    wtty_t *tty = GET_CURRENT_THREAD()->ctty_w;
#else
    wtty_t *tty = GET_CURRENT_THREAD()->ctty;
#endif
    if(tty == 0)
    {
        SHOW_ERROR0( 0, "No wtty, phantom_window_getc loops forever" );
        while(1)
            hal_sleep_msec(10000);
    }

    return wtty_getc( tty );
}
Exemple #22
0
phantom_device_t * driver_pl192_probe( int port, int irq, int stage )
{
    (void) irq;
    (void) stage;

    if(seq_number)
    {
        SHOW_ERROR0( 0, "Just one" );
        return 0;
    }

    SHOW_FLOW( 1, "probe @ %x", port );

    phantom_device_t * dev = malloc(sizeof(phantom_device_t));

    dev->iomem = port;
    dev->iomemsize = 0;
    dev->iobase = port;
    dev->irq = 0;

    if( check_pl192_sanity(dev->iomem) )
        goto free;

    dev->name = DEBUG_MSG_PREFIX;
    dev->seq_number = seq_number++;

    //pl192_t *es = calloc(1,sizeof(pl192_t));
    //assert(es);
    dev->drv_private = 0;

    if( init_pl192(dev) )
        goto free1;


    return dev;
free1:
    //free(es);

free:
    free(dev);
    return 0;
}
Exemple #23
0
static errno_t trfs_send(void *pkt, int pktsize)
{
    if(trfs_failed || !phantom_tcpip_active)
        return ENOTCONN;

    if(debug_level_flow >= 11) hexdump( pkt, pktsize, "TRFS send pkt", 0 );

    SHOW_FLOW0( 1, "sending" );
    int rc;
    if( 0 == (rc = udp_sendto(trfs_socket, pkt, pktsize, &trfs_addr)) )
        return 0;

    if(rc == ERR_NET_NO_ROUTE)
    {
        SHOW_ERROR0( 0, "No route" );
        return EHOSTUNREACH;
    }
    else
        SHOW_ERROR( 0, "can't send, rc = %d", rc);
    return EIO;
}
Exemple #24
0
void recvReadReply(trfs_pkt_t *rq)
{
    trfs_fio_t *fio = &(rq->readReply.info);
    void *data = rq->readReply.data;

    SHOW_FLOW( 2, "read reply for fid %d ioid %d nSect %d start %ld", fio->fileId, fio->ioId, fio->nSectors, fio->startSector);

    trfs_queue_t *qe = findRequest( fio, TRFS_QEL_TYPE_READ );
    if( qe == 0 )
    {
        SHOW_ERROR0( 0, "TRFS: No request for read reply");
        return;
    }

    trfs_process_received_data(qe, fio, data);
    trfs_mark_recombine_map(qe, fio->startSector, fio->nSectors);
    if( trfs_request_complete(qe) )
    {
        removeRequest(qe);
        trfs_signal_done(qe);
    }
}
Exemple #25
0
static void thread_death_handler( phantom_thread_t *t )
{
    //struct pvm_object current_thread = *((struct pvm_object *)arg);
    n_vm_threads--;

    printf("thread_death_handler called\n");

    pvm_object_storage_t *os = t->owner;
    if( os == 0 )
    {
        SHOW_ERROR0( 0, "!!! thread_death_handler - no pointer to Vm thread object!" );
        return;
    }

    assert( os->_ah.object_start_marker == PVM_OBJECT_START_MARKER );

    //struct data_area_4_thread * tda = ((struct data_area_4_thread *)&(os->da));


    remove_vm_thread_from_list(os);

}
Exemple #26
0
phantom_device_t * driver_etc_smbios_probe( const char *name, int stage )
{
    (void) stage;
    struct smbios_eps * ep = smbios_identify();

    if( seq_number || ep == 0 ) return 0;

    if(smbios_cksum(ep))
    {
        SHOW_ERROR0( 0, "SMBios checksum failed");
        //error = ENXIO;
        return 0;
    }

    SHOW_INFO( 0, "SMBios Version: %u.%u",
               ep->SMBIOS_Major, ep->SMBIOS_Minor);

    if(bcd2bin(ep->SMBIOS_BCD_Revision))
        SHOW_INFO( 4, "SMBios BCD Revision: %u.%u",
                   bcd2bin(ep->SMBIOS_BCD_Revision >> 4),
                   bcd2bin(ep->SMBIOS_BCD_Revision & 0x0f));


    phantom_device_t * dev = malloc(sizeof(phantom_device_t));
    dev->name = name;
    dev->seq_number = seq_number++;
    dev->drv_private = ep;

    dev->dops.read = smbios_read;
    /*
    dev->dops.stop = beep_stop;

    dev->dops.write = beep_write;
    */

    return dev;
}
Exemple #27
0
static int connect_trfs(void)
{
    if( udp_open(&trfs_socket) )
    {
        SHOW_ERROR0( 0, "UDP trfs - can't open endpoint");
        return -1;
    }

    trfs_addr.port = TRFS_PORT; // local port to be the same

    trfs_addr.addr.len = 4;
    trfs_addr.addr.type = ADDR_TYPE_IP;
    NETADDR_TO_IPV4(trfs_addr.addr) = IPV4_DOTADDR_TO_ADDR(192, 168, 1, 106);

    int rc;
    if( 0 != (rc = udp_bind(trfs_socket, &trfs_addr)) )
        return rc;

    trfs_addr.port = TRFS_PORT; // Remote port

    SHOW_FLOW0( 1, "started" );

    return 0;
}
Exemple #28
0
static void remove_vm_thread_from_list(pvm_object_storage_t *os)
{
    // TODO check that is is a thread

    int nthreads  = get_array_size(pvm_root.threads_list.data);

    if( !nthreads )
        SHOW_ERROR0( 0, "There were 0 live threads in image, and some thread is dead. Now -1?" );

    int nkill = 0;
    while(nthreads--)
    {
        struct pvm_object th =  pvm_get_array_ofield(pvm_root.threads_list.data, nthreads );
        pvm_check_is_thread( th );
        if( th.data == os )
        {
            pvm_set_array_ofield(pvm_root.threads_list.data, nthreads, pvm_create_null_object() );
            nkill++;
        }
    }

    if(1 != nkill)
        printf("Nkill = %d\n", nkill);
}
phantom_device_t * driver_rtl_8169_probe( pci_cfg_t *pci, int stage )
{
    (void) stage;
    rtl8169 * nic = NULL;
    static int seq_number = 0;

return 0; // on real hw beheaves strangely

    SHOW_FLOW0( 1, "probe" );

    //nic = rtl8169_new();
    nic = calloc(1, sizeof(rtl8169));
    if (nic == NULL)
    {
        SHOW_ERROR0( 0, "out of mem");
        return 0;
    }

    nic->irq = pci->interrupt;

    int i;
    for (i = 0; i < 6; i++)
    {
        if (pci->base[i] > 0xffff)
        {
            nic->phys_base = pci->base[i];
            nic->phys_size = pci->size[i];
            SHOW_INFO( 0,  "base 0x%lx, size 0x%lx", nic->phys_base, nic->phys_size );
        } else if( pci->base[i] > 0) {
            nic->io_port = pci->base[i];
            SHOW_INFO( 0,  "io_port 0x%x", nic->io_port );
        }
    }

#if 0
    SHOW_FLOW0( 1, "stop" );
    rtl8169_stop(nic);
    hal_sleep_msec(10);
#endif

    SHOW_FLOW0( 1, "init");
    if (rtl8169_init(nic) < 0)
    {
        SHOW_ERROR0( 0, "init failed");
        return 0;
    }



    phantom_device_t * dev = malloc(sizeof(phantom_device_t));
    dev->name = "rtl8169";
    dev->seq_number = seq_number++;
    dev->drv_private = nic;

    dev->dops.read = rtl8169_read;
    dev->dops.write = rtl8169_write;
    dev->dops.get_address = rtl8169_get_address;

    ifnet *interface;
    if( if_register_interface( IF_TYPE_ETHERNET, &interface, dev) )
    {
        SHOW_ERROR( 0,  "Failed to register interface for %s", dev->name );
    }
    else
    {
        if_simple_setup( interface, WIRED_ADDRESS, WIRED_NETMASK, WIRED_BROADCAST, WIRED_NET, WIRED_ROUTER, DEF_ROUTE_ROUTER );
    }

    return dev;

}
Exemple #30
0
// init GATT (could be used for both PCI and AGP)
static status_t initGATT( GART_info *gart )
{
	area_id map_area;
	uint32 map_area_size;
	physical_entry *map;
	physical_entry PTB_map[1];
	size_t map_count;
	uint32 i;
	uint32 *gatt_entry;
	size_t num_pages;

	SHOW_FLOW0( 3, "" );

	num_pages = (gart->buffer.size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);

	// GART must be contiguous
	gart->GATT.area = create_area("Radeon GATT", (void **)&gart->GATT.ptr,
		B_ANY_KERNEL_ADDRESS,
		(num_pages * sizeof( uint32 ) + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1),
		B_32_BIT_CONTIGUOUS,
			// TODO: Physical address is cast to 32 bit below! Use B_CONTIGUOUS,
			// when that is (/can be) fixed!
#ifdef HAIKU_TARGET_PLATFORM_HAIKU
		// TODO: really user read/write?
		B_READ_AREA | B_WRITE_AREA | B_USER_CLONEABLE_AREA
#else
		0
#endif
		);

	if (gart->GATT.area < 0) {
		SHOW_ERROR(1, "cannot create GATT table (%s)",
			strerror(gart->GATT.area));
		return gart->GATT.area;
	}

	get_memory_map(gart->GATT.ptr, B_PAGE_SIZE, PTB_map, 1);
	gart->GATT.phys = PTB_map[0].address;

	SHOW_INFO(3, "GATT_ptr=%p, GATT_phys=%p", gart->GATT.ptr,
		(void *)gart->GATT.phys);

	// get address mapping
	memset(gart->GATT.ptr, 0, num_pages * sizeof(uint32));

	map_count = num_pages + 1;

	// align size to B_PAGE_SIZE
	map_area_size = map_count * sizeof(physical_entry);
	if ((map_area_size / B_PAGE_SIZE) * B_PAGE_SIZE != map_area_size)
		map_area_size = ((map_area_size / B_PAGE_SIZE) + 1) * B_PAGE_SIZE;

	// temporary area where we fill in the memory map (deleted below)
	map_area = create_area("pci_gart_map_area", (void **)&map, B_ANY_ADDRESS,
		map_area_size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA);
		// TODO: We actually have a working malloc() in the kernel. Why create
		// an area?
	dprintf("pci_gart_map_area: %ld\n", map_area);

	get_memory_map( gart->buffer.ptr, gart->buffer.size, map, map_count );

	// the following looks a bit strange as the kernel
	// combines successive entries
	gatt_entry = gart->GATT.ptr;

	for( i = 0; i < map_count; ++i ) {
		phys_addr_t addr = map[i].address;
		size_t size = map[i].size;

		if( size == 0 )
			break;

		while( size > 0 ) {
			*gatt_entry++ = addr;
			//SHOW_FLOW( 3, "%lx", *(gart_entry-1) );
			addr += ATI_PCIGART_PAGE_SIZE;
			size -= ATI_PCIGART_PAGE_SIZE;
		}
	}

	delete_area(map_area);

	if( i == map_count ) {
		// this case should never happen
		SHOW_ERROR0( 0, "memory map of GART buffer too large!" );
		delete_area( gart->GATT.area );
		gart->GATT.area = -1;
		return B_ERROR;
	}

	// this might be a bit more than needed, as
	// 1. Intel CPUs have "processor order", i.e. writes appear to external
	//    devices in program order, so a simple final write should be sufficient
	// 2. if it is a PCI GART, bus snooping should provide cache coherence
	// 3. this function is a no-op :(
	clear_caches( gart->GATT.ptr, num_pages * sizeof( uint32 ),
		B_FLUSH_DCACHE );

	// back to real live - some chipsets have write buffers that
	// proove all previous assumptions wrong
	// (don't know whether this really helps though)
	asm volatile ( "wbinvd" ::: "memory" );
	return B_OK;
}