Ejemplo n.º 1
0
int scsi_register_driver( scsi_periph_callbacks *callbacks, 
	periph_cookie periph_cookie, scsi_driver_info **driver_out )
{
	scsi_driver_info *driver;
	
	SHOW_FLOW0( 3, "" );
	
	driver = kmalloc( sizeof( *driver ));
	if( driver == NULL )
		return ERR_NO_MEMORY;
		
	driver->callbacks = callbacks;
	driver->periph_cookie = periph_cookie;
	driver->devices = NULL;
	
	driver->xpt_cookie = xpt->register_driver( &periph_interface, 
		(cam_periph_cookie)driver );
	if( driver->xpt_cookie == NULL ) {
		kfree( driver );
		return ERR_NO_MEMORY;
	}
	
	SHOW_FLOW0( 3, "1" );
	
	*driver_out = driver;
	
	return NO_ERROR;
}
Ejemplo n.º 2
0
static errno_t startSync( phantom_disk_partition_t *p, void *to, long blockNo, int nBlocks, int isWrite )
{
    assert( p->block_size < PAGE_SIZE );
    SHOW_FLOW( 3, "blk %d", blockNo );

    pager_io_request rq;

    pager_io_request_init( &rq );

    rq.phys_page = (physaddr_t)phystokv(to); // why? redundant?
    rq.disk_page = blockNo;

    rq.blockNo = blockNo;
    rq.nSect   = nBlocks;

    rq.rc = 0;

    if(isWrite) rq.flag_pageout = 1;
    else rq.flag_pagein = 1;

    STAT_INC_CNT(STAT_CNT_BLOCK_SYNC_IO);
    STAT_INC_CNT( STAT_CNT_DISK_Q_SIZE ); // Will decrement on io done

    void *va;
    hal_pv_alloc( &rq.phys_page, &va, nBlocks * p->block_size );

    errno_t ret = EINVAL;

    if(isWrite) memcpy( va, to, nBlocks * p->block_size );

    int ei = hal_save_cli();
    hal_spin_lock(&(rq.lock));
    rq.flag_sleep = 1; // Don't return until done
    rq.sleep_tid = GET_CURRENT_THREAD()->tid;

    SHOW_FLOW0( 3, "start io" );
    if( (ret = p->asyncIo( p, &rq )) )
    {
        rq.flag_sleep = 0;
        hal_spin_unlock(&(rq.lock));
        if( ei ) hal_sti();
        //return ret;
        goto ret;
    }
    thread_block( THREAD_SLEEP_IO, &(rq.lock) );
    SHOW_FLOW0( 3, "unblock" );
    if( ei ) hal_sti();

    if(!isWrite) memcpy( to, va, nBlocks * p->block_size );
    ret = rq.rc;

    //return partAsyncIo( p, &rq );
    //return p->asyncIo( p, rq );


ret:
    hal_pv_free( rq.phys_page, va, nBlocks * p->block_size );
    return ret;
}
Ejemplo n.º 3
0
static errno_t
bootprecv( struct bootp_state *bstate, void *udp_sock, struct bootp *bp, size_t len, size_t *retlen )
{
    SHOW_FLOW0( 3, "bootp_recv");

    sockaddr dest_addr;
    dest_addr.port = IPPORT_BOOTPS; // dest port

    dest_addr.addr.len = 4;
    dest_addr.addr.type = ADDR_TYPE_IP;
    // INADDR_BROADCAST
    //NETADDR_TO_IPV4(dest_addr.addr) = IPV4_DOTADDR_TO_ADDR(0xFF, 0xFF, 0xFF, 0xFF);
    NETADDR_TO_IPV4(dest_addr.addr) = IPV4_DOTADDR_TO_ADDR(0, 0, 0, 0);

    int n = udp_recvfrom(udp_sock, bp, len, &dest_addr, SOCK_FLAG_TIMEOUT, 2000000l);

    if( 0 >= n )
    {
        SHOW_ERROR( 0, "UDP recv err = %d", n);
        return ETIMEDOUT; // TODO errno
    }

    if (n == -1 || n < (int)(sizeof(struct bootp) - BOOTP_VENDSIZE))
        goto bad;

    SHOW_FLOW( 3, "bootprecv: recv %d bytes", n);

    if (bp->bp_xid != htonl(xid)) {
        SHOW_ERROR( 1, "bootprecv: expected xid 0x%x, got 0x%x",
                   xid, ntohl(bp->bp_xid));
        goto bad;
    }

    SHOW_FLOW0( 3, "bootprecv: got one!");

    /* Suck out vendor info */
    if (bcmp(vm_rfc1048, bp->bp_vend, sizeof(vm_rfc1048)) == 0) {
        if(vend_rfc1048(bstate, bp->bp_vend, sizeof(bp->bp_vend)) != 0)
            goto bad;
    }
#ifdef BOOTP_VEND_CMU
    else if (bcmp(vm_cmu, bp->bp_vend, sizeof(vm_cmu)) == 0)
        vend_cmu(bstate,bp->bp_vend);
#endif
    else
        SHOW_ERROR( 0, "bootprecv: unknown vendor 0x%lx", (long)bp->bp_vend);

    if(retlen) *retlen = n;

    return 0;
bad:
    //errno = 0;
    return EINVAL;
}
Ejemplo n.º 4
0
phantom_thread_t *
phantom_create_thread( void (*func)(void *), void *arg, int flags )
{
    assert(threads_inited);
    assert( ! (flags & ~CREATION_POSSIBLE_FLAGS) );

#if NEW_SNAP_SYNC
    // No thread starts in snap, sorry
    snap_lock();
#endif

    SHOW_FLOW( 7, "flags = %b", flags, "\020\1USER\2VM\3JIT\4NATIVE\5KERNEL\6?PF\7?PA\10?CH\11TIMEOUT\12UNDEAD\13NOSCHED" );
    phantom_thread_t *t = calloc(1, sizeof(phantom_thread_t));
    //phantom_thread_t *t = calloc_aligned(1, sizeof(phantom_thread_t),16); // align at 16 bytes for ia32 fxsave

    // Can't be run yet
    t->sleep_flags = THREAD_SLEEP_LOCKED; 

    t->tid = find_tid(t);
    SHOW_FLOW( 7, "tid = %d", t->tid );

#if CONF_NEW_CTTY
    t_inherit_ctty( t );
#else
    // inherit ctty
    t->ctty = GET_CURRENT_THREAD()->ctty;
#endif

    common_thread_init(t, DEF_STACK_SIZE );
    //t->priority = THREAD_PRIO_NORM;

    SHOW_FLOW( 7, "cpu = %d", t->cpu_id );


    t->start_func_arg = arg;
    t->start_func = func;

    phantom_thread_state_init(t);
    SHOW_FLOW0( 7, "phantom_thread_state_init done" );

    t->thread_flags |= flags;
    // Let it be elegible to run
    t->sleep_flags &= ~THREAD_SLEEP_LOCKED;

    t_enqueue_runq(t);
    SHOW_FLOW0( 7, "on run q" );

#if NEW_SNAP_SYNC
    snap_unlock();
#endif

    return t;

}
Ejemplo n.º 5
0
bool
reset_device(ide_device_info *device, ide_qrequest *ignore)
{
	ide_bus_info *bus = device->bus;
	status_t res;
	uint8 orig_command;

	dprintf("ide: reset_device() device %p\n", device);

	SHOW_FLOW0(3, "");

	if (!device->is_atapi)
		goto err;

	if (device->reconnect_timer_installed) {
		cancel_timer(&device->reconnect_timer.te);
		device->reconnect_timer_installed = false;
	}

	// select device
	if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf,
			ide_mask_device_head) != B_OK)
		goto err;

	// safe original command to let caller restart it
	orig_command = device->tf.write.command;

	// send device reset, independ of current device state
	// (that's the point of a reset)
	device->tf.write.command = IDE_CMD_DEVICE_RESET;
	res = bus->controller->write_command_block_regs(bus->channel_cookie,
		&device->tf, ide_mask_command);
	device->tf.write.command = orig_command;

	if (res != B_OK)
		goto err;

	// don't know how long to wait, but 31 seconds, like soft reset,
	// should be enough
	if (!ide_wait(device, 0, ide_status_bsy, true, 31000000))
		goto err;

	// alright, resubmit all requests
	finish_all_requests(device, ignore, SCSI_SCSI_BUS_RESET, true);

	SHOW_FLOW0(3, "done");
	dprintf("ide: reset_device() device %p success\n", device);
	return true;

err:
	// do the hard way
	dprintf("ide: reset_device() device %p failed, calling reset_bus\n", device);
	return reset_bus(device, ignore);
}
Ejemplo n.º 6
0
static void path_inquiry( ide_bus_info *bus, CCB_HEADER *ccb )
{
	CCB_PATHINQ *request = (CCB_PATHINQ *)ccb;
	
	SHOW_FLOW0( 3, "" );
	
	request->cam_version_num = 12;	// XXX 
	
	request->cam_hba_inquiry = PI_TAG_ABLE | PI_WIDE_16;
	
	request->cam_target_sprt = false;
	request->cam_hba_misc = 0;
	request->cam_hba_eng_cnt = 0;
	memset( request->cam_vuhba_flags, 0, sizeof( request->cam_vuhba_flags ));
	request->cam_sim_priv = SIM_PRIV;
	request->cam_async_flags = /*AC_SENT_BDR |*/	// XXX true for ATAPI ?
		AC_BUS_RESET;
		
	request->cam_hpath_id = 0;
	request->cam_initiator_id = 2;	// well, there is no initiator for IDE,
									// but according to SCSI its needed for scanning
	strncpy( request->cam_sim_vid, "NewOS", SIM_ID );
	strncpy( request->cam_hba_vid, bus->controller_name, HBA_ID );
	
	ccb->cam_status = CAM_REQ_CMP;
	xpt->done( &request->cam_ch );
}
Ejemplo n.º 7
0
// hammer CRTC registers
void Radeon_ProgramCRTCRegisters( accelerator_info *ai, int crtc_idx, 
	crtc_regs *values )
{
	vuint8 *regs = ai->regs;
	
	SHOW_FLOW0( 2, "" );

	if( crtc_idx == 0 ) {
		OUTREGP( regs, RADEON_CRTC_GEN_CNTL, values->crtc_gen_cntl,
			RADEON_CRTC_EXT_DISP_EN );
		
		OUTREG( regs, RADEON_CRTC_H_TOTAL_DISP, values->crtc_h_total_disp );
		OUTREG( regs, RADEON_CRTC_H_SYNC_STRT_WID, values->crtc_h_sync_strt_wid );
		OUTREG( regs, RADEON_CRTC_V_TOTAL_DISP, values->crtc_v_total_disp );
		OUTREG( regs, RADEON_CRTC_V_SYNC_STRT_WID, values->crtc_v_sync_strt_wid );
		OUTREG( regs, RADEON_CRTC_OFFSET_CNTL, values->crtc_offset_cntl );
		OUTREG( regs, RADEON_CRTC_PITCH, values->crtc_pitch );

	} else {
		OUTREGP( regs, RADEON_CRTC2_GEN_CNTL, values->crtc_gen_cntl,
			RADEON_CRTC2_VSYNC_DIS |
			RADEON_CRTC2_HSYNC_DIS |
			RADEON_CRTC2_DISP_DIS |
			RADEON_CRTC2_CRT2_ON );

		OUTREG( regs, RADEON_CRTC2_H_TOTAL_DISP, values->crtc_h_total_disp );
		OUTREG( regs, RADEON_CRTC2_H_SYNC_STRT_WID, values->crtc_h_sync_strt_wid );
		OUTREG( regs, RADEON_CRTC2_V_TOTAL_DISP, values->crtc_v_total_disp );
		OUTREG( regs, RADEON_CRTC2_V_SYNC_STRT_WID, values->crtc_v_sync_strt_wid );
		OUTREG( regs, RADEON_CRTC2_OFFSET_CNTL, values->crtc_offset_cntl );
		OUTREG( regs, RADEON_CRTC2_PITCH, values->crtc_pitch );
	}
}
Ejemplo n.º 8
0
// SIM signals that it can handle further requests for this device
void scsi_cont_send_device( scsi_device_info *device )
{
	scsi_bus_info *bus = device->bus;
	bool was_servicable, start_retry;
	
	SHOW_FLOW0( 3, "" );
	
	ACQUIRE_BEN( &bus->mutex );
	
	was_servicable = scsi_can_service_bus( bus );
	
	if( device->sim_overflow ) {			
		device->sim_overflow = false;
		--device->lock_count;
		
		// add to bus queue if not locked explicitly anymore and requests are waiting
		if( device->lock_count == 0 && device->queued_reqs != NULL )
			scsi_add_device_queue_last( device );
	}

	// no device overflow implicits no bus overflow
	// (and if not, we'll detect that on next submit)
	scsi_clear_bus_overflow( bus );
	
	start_retry = !was_servicable && scsi_can_service_bus( bus );
				
	RELEASE_BEN( &bus->mutex );
	
	// tell service thread if there are pending requests which
	// weren't pending before	
	if( start_retry )
		release_sem_etc( bus->start_service, 1, 0/*B_DO_NOT_RESCHEDULE*/ );
}
Ejemplo n.º 9
0
// cleanup PCI GART
void Radeon_CleanupPCIGART( device_info *di )
{
	vuint8 *regs = di->regs;

	SHOW_FLOW0( 3, "" );

	// perhaps we should wait for FIFO space before messing around with registers, but
	// 1. I don't want to add all the sync stuff to the kernel driver
	// 2. I doubt that these regs are buffered by FIFO
	// but still: in worst case CP has written some commands to register FIFO,
	// which can do any kind of nasty things

	// disable CP BM
	OUTREG( regs, RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS );
	// read-back for flushing
	INREG( regs, RADEON_CP_CSQ_CNTL );

	// disable bus mastering
	OUTREGP( regs, RADEON_BUS_CNTL, RADEON_BUS_MASTER_DIS, ~RADEON_BUS_MASTER_DIS );
	// disable PCI GART
	OUTREGP( regs, RADEON_AIC_CNTL, 0, ~RADEON_PCIGART_TRANSLATE_EN );

	destroyGATT( &di->pci_gart );
	destroyGARTBuffer( &di->pci_gart );
}
Ejemplo n.º 10
0
status_t
periph_simple_exec(scsi_periph_device_info* device, void* cdb, uchar cdbLength,
	void* data, size_t dataLength, int ccb_flags)
{
	SHOW_FLOW0( 0, "" );

	scsi_ccb* ccb = device->scsi->alloc_ccb(device->scsi_device);
	if (ccb == NULL)
		return B_NO_MEMORY;

	ccb->flags = ccb_flags;

	memcpy(ccb->cdb, cdb, cdbLength);
	ccb->cdb_length = cdbLength;

	ccb->sort = -1;
	ccb->timeout = device->std_timeout;

	ccb->data = (uint8*)data;
	ccb->sg_list = NULL;
	ccb->data_length = dataLength;

	status_t status = periph_safe_exec(device, ccb);

	device->scsi->free_ccb(ccb);

	return status;
}
Ejemplo n.º 11
0
/*! Emulate TEST UNIT READY */
static bool
ata_test_unit_ready(ide_device_info *device, ide_qrequest *qrequest)
{
	SHOW_FLOW0(3, "");

	if (!device->infoblock.RMSN_supported
		|| device->infoblock._127_RMSN_support != 1)
		return true;

	// ask device about status		
	device->tf_param_mask = 0;
	device->tf.write.command = IDE_CMD_GET_MEDIA_STATUS;

	if (!send_command(device, qrequest, true, 15, ide_state_sync_waiting))
		return false;

	// bits ide_error_mcr | ide_error_mc | ide_error_wp are also valid
	// but not requested by TUR; ide_error_wp can safely be ignored, but
	// we don't want to loose media change (request) reports
	if (!check_output(device, true,
			ide_error_nm | ide_error_abrt | ide_error_mcr | ide_error_mc,
			false)) {
		// SCSI spec is unclear here: we shouldn't report "media change (request)"
		// but what to do if there is one? anyway - we report them
		;
	}

	return true;
}
Ejemplo n.º 12
0
static status_t
raw_device_added(device_node_handle node)
{
	uint8 path_id, target_id, target_lun;
	char name[100];

	SHOW_FLOW0(3, "");

	// compose name	
	if (pnp->get_attr_uint8(node, SCSI_BUS_PATH_ID_ITEM, &path_id, true) != B_OK
		|| pnp->get_attr_uint8(node, SCSI_DEVICE_TARGET_ID_ITEM, &target_id, true) != B_OK
		|| pnp->get_attr_uint8(node, SCSI_DEVICE_TARGET_LUN_ITEM, &target_lun, true) != B_OK)
		return B_ERROR;

	sprintf(name, "bus/scsi/%d/%d/%d/raw",
		path_id, target_id, target_lun);

	SHOW_FLOW(3, "name=%s", name);

	// ready to register
	{
		device_attr attrs[] = {
			{ B_DRIVER_MODULE, B_STRING_TYPE, { string: SCSI_RAW_MODULE_NAME }},

			// default connection is used by peripheral drivers, and as we don't
			// want to kick them out, we use concurrent "raw" connection
			// (btw: this shows nicely that something goes wrong: one device
			// and two drivers means begging for trouble)
			{ PNP_DRIVER_CONNECTION, B_STRING_TYPE, { string: "raw" }},
Ejemplo n.º 13
0
static void video_post_start()
{

    scr_zbuf_init();
    drv_video_init_windows();

    // Have VESA driver, add companion accelerator if possible
    if( was_enforced )
        select_accel_driver();

    SHOW_FLOW0( 3, "Video console init" );
    phantom_init_console_window();

    SHOW_FLOW0( 3, "Video mouse cursor init" );
    scr_mouse_set_cursor(drv_video_get_default_mouse_bmp());
}
Ejemplo n.º 14
0
static status_t
free_hook(void *dev)
{
	device_info *di = (device_info *)dev;

	SHOW_FLOW0( 0, "" );

	ACQUIRE_BEN( devices->kernel );

	mem_freetag( di->memmgr[mt_local], dev );
	
	if( di->memmgr[mt_PCI] )
		mem_freetag( di->memmgr[mt_PCI], dev );

	if( di->memmgr[mt_AGP] )
		mem_freetag( di->memmgr[mt_AGP], dev );
	
	if( di->is_open == 1 )
		Radeon_LastClose( di );

	di->is_open--;
	RELEASE_BEN( devices->kernel );

	return B_OK;
}
Ejemplo n.º 15
0
status_t
vpd_page_get(scsi_periph_device_info *device, uint8 page, void* data,
	uint16 length)
{
	SHOW_FLOW0(0, "");

	status_t status = vpd_page_inquiry(device, 0, data, length);
	if (status != B_OK)
		return status; // or B_BAD_VALUE

	if (page == 0)
		return B_OK;

	scsi_page_list *list_data = (scsi_page_list*)data;
	int page_length = min_c(list_data->page_length, length -
		offsetof(scsi_page_list, pages));
	for (int i = 0; i < page_length; i++) {
		if (list_data->pages[i] == page)
			return vpd_page_inquiry(device, page, data, length);
	}

	// TODO buffer might be not big enough

	return B_BAD_VALUE;
}
Ejemplo n.º 16
0
/*! send TUR
	result: true, if device answered
		false, if there is no device
*/
static bool
scsi_scan_send_tur(scsi_ccb *worker_req)
{
	scsi_cmd_tur *cmd = (scsi_cmd_tur *)worker_req->cdb;

	SHOW_FLOW0( 3, "" );

	memset( cmd, 0, sizeof( *cmd ));
	cmd->opcode = SCSI_OP_TEST_UNIT_READY;

	worker_req->sg_list = NULL;
	worker_req->data = NULL;
	worker_req->data_length = 0;
	worker_req->cdb_length = sizeof(*cmd);
	worker_req->timeout = 0;
	worker_req->sort = -1;
	worker_req->flags = SCSI_DIR_NONE;

	scsi_sync_io( worker_req );

	SHOW_FLOW( 3, "status=%x", worker_req->subsys_status );

	// as this command was only for syncing, we ignore almost all errors
	switch (worker_req->subsys_status) {
		case SCSI_SEL_TIMEOUT:
			// there seems to be no device around
			return false;

		default:
			return true;
	}
}
Ejemplo n.º 17
0
status_t
init_driver(void)
{
	SHOW_FLOW0(3, "");

	if (get_module(B_PCI_MODULE_NAME, (module_info **)&pci_bus) != B_OK)
		return B_ERROR;

	/* get a handle for the agp bus if it exists */
	get_module(B_AGP_GART_MODULE_NAME, (module_info **)&sAGP);

	/* driver private data */
	devices = (radeon_devices *)calloc(1, sizeof(radeon_devices));
	if (devices == NULL) {
		put_module(B_PCI_MODULE_NAME);
		if (sAGP != NULL)
			put_module(B_AGP_GART_MODULE_NAME);
		return B_ERROR;
	}

	(void)INIT_BEN(devices->kernel, "Radeon Kernel");

	GetDriverSettings();
	Radeon_ProbeDevices();
	return B_OK;
}
Ejemplo n.º 18
0
static errno_t sendReadRq( trfs_queue_t *qe )
{
    trfs_pkt_t rq;

    assert( qe->type == TRFS_QEL_TYPE_READ );

    rq.type = PKT_T_ReadRQ;
    rq.sessionId = sessionId;

    rq.readRq.nRequests = 1;
    rq.readRq.request[0] = qe->fio;

    // If we have some sectors receiver, shift request forward.
    // This covers the situation when server sends us back just some
    // first sectors per request.
    {
        unsigned int start = 0;

        for( start = 0; start < sizeof(u_int32_t)*8; start++ )
        {
            if( qe->recombine_map & (1 << start) )
            {
                rq.readRq.request[0].startSector++;
                rq.readRq.request[0].nSectors--;
            }
            else
                break;
        }
    }

    SHOW_FLOW0( 1, "send read rq" );
    return trfs_send(&rq, sizeof(rq)) ? EIO : 0;
}
Ejemplo n.º 19
0
// add request to begin of device queue and device to bus queue
// used only for auto-sense request
void scsi_add_queued_request_first( scsi_ccb *request )
{
	scsi_device_info *device = request->device;
	
	SHOW_FLOW0( 3, "" );

	request->state = SCSI_STATE_QUEUED;
	scsi_add_req_queue_first( request );

	// if device is not deliberately locked, mark it as waiting
	if( device->lock_count == 0 ) {
		SHOW_FLOW0( 3, "mark device as waiting" );
		// make device first in bus queue to execute sense ASAP
		scsi_add_device_queue_first( device );
	}
}
Ejemplo n.º 20
0
// copy from graphics memory to other memory via DMA
// 	src		- offset in graphics mem
//	target	- target address
//	size	- number of bytes to copy
//	lock_mem - true, if memory is not locked
//	contiguous - true, if memory is physically contiguous (implies lock_mem=false)
status_t Radeon_DMACopy( 
	device_info *di, uint32 src, char *target, size_t size, bool lock_mem, bool contiguous )
{
	status_t res;
	
	/*SHOW_FLOW( 0, "src=%ld, target=%p, size=%ld, lock_mem=%d, contiguous=%d",
		src, target, size, lock_mem, contiguous );*/
	
	res =  Radeon_PrepareDMA( di, src, target, size, lock_mem, contiguous );
	if( res != B_OK )
		return res;
		
	//SHOW_FLOW0( 0, "2" );

	OUTREG( di->regs, RADEON_DMA_VID_TABLE_ADDR, di->si->memory[mt_local].virtual_addr_start +
		di->dma_desc_offset );
		
	res = acquire_sem_etc( di->dma_sem, 1, B_RELATIVE_TIMEOUT, 1000000 );
	
	// be sure that transmission is really finished
	while( (INREG( di->regs, RADEON_DMA_VID_STATUS ) & RADEON_DMA_STATUS_ACTIVE) != 0 ) {
		SHOW_FLOW0( 0, "DMA transmission still active" );
		snooze( 1000 );
	}
		
	Radeon_FinishDMA( di, src, target, size, lock_mem, contiguous );
	
	//SHOW_FLOW0( 0, "3" );
	
	return res;
}
Ejemplo n.º 21
0
static status_t
vpd_page_inquiry(scsi_periph_device_info *device, uint8 page, void* data,
	uint16 length)
{
	SHOW_FLOW0(0, "");

	scsi_ccb* ccb = device->scsi->alloc_ccb(device->scsi_device);
	if (ccb == NULL)
		return B_NO_MEMORY;

	scsi_cmd_inquiry *cmd = (scsi_cmd_inquiry *)ccb->cdb;
	memset(cmd, 0, sizeof(scsi_cmd_inquiry));
	cmd->opcode = SCSI_OP_INQUIRY;
	cmd->lun = ccb->target_lun;
	cmd->evpd = 1;
	cmd->page_code = page;
	cmd->allocation_length = length;

	ccb->flags = SCSI_DIR_IN;
	ccb->cdb_length = sizeof(scsi_cmd_inquiry);

	ccb->sort = -1;
	ccb->timeout = device->std_timeout;

	ccb->data = (uint8*)data;
	ccb->sg_list = NULL;
	ccb->data_length = length;

	status_t status = periph_safe_exec(device, ccb);

	device->scsi->free_ccb(ccb);

	return status;
}
Ejemplo n.º 22
0
int dev_bootstrap(void)
{
	int err = 0;
	pcnet32 *nic = NULL;
	pci_module_hooks *bus = NULL;

	SHOW_FLOW0(3, "entry");
	
	err = module_get(PCI_BUS_MODULE_NAME, 0, (void **)&bus);
        if(err < 0)
	{
                SHOW_FLOW(3, "Error finding ISA bus module: %d", err);
                return err;
        }
	SHOW_FLOW(3, "Got bus module: %p", bus);

	nic = pcnet32_new(bus,
		PCNET_INIT_MODE0 | PCNET_INIT_RXLEN_128 | PCNET_INIT_TXLEN_32,
		2048, 2048);

	if (nic == NULL)
	{
		SHOW_FLOW0(3, "pcnet_new returned 0.");
		return ERR_GENERAL;
	}

	if (pcnet32_detect(nic) > -1)
	{
		if (pcnet32_init(nic) < 0)
		{
			SHOW_FLOW0(3, "pcnet_init failed.");
			
			pcnet32_delete(nic);
			return ERR_GENERAL;
		}

		pcnet32_start(nic);

		if (devfs_publish_indexed_device("net/pcnet32", nic, &pcnet32_hooks) < 0)
		{
			SHOW_FLOW0(3, "failed to register device /dev/net/pcnet32/0");
			return ERR_GENERAL;
		}
	}

	return 0;
}
Ejemplo n.º 23
0
status_t
periph_register_device(periph_device_cookie periph_device,
	scsi_periph_callbacks *callbacks, scsi_device scsi_device,
	scsi_device_interface *scsi, device_node *node,
	bool removable, int preferredCcbSize, scsi_periph_device *driver)
{
	SHOW_FLOW0(3, "");

	scsi_periph_device_info *device
		= (scsi_periph_device_info *)malloc(sizeof(*device));
	if (device == NULL)
		return B_NO_MEMORY;

	memset(device, 0, sizeof(*device));

	mutex_init(&device->mutex, "SCSI_PERIPH");
	device->scsi_device = scsi_device;
	device->scsi = scsi;
	device->periph_device = periph_device;
	device->removal_requested = false;
	device->callbacks = callbacks;
	device->node = node;
	device->removable = removable;
	device->std_timeout = SCSI_PERIPH_STD_TIMEOUT;

	// set some default options
	device->next_tag_action = 0;
	device->preferred_ccb_size = preferredCcbSize;
	device->rw10_enabled = true;

	// launch sync daemon
	status_t status = register_kernel_daemon(periph_sync_queue_daemon, device,
		60*10);
	if (status != B_OK)
		goto err1;

	*driver = device;

	SHOW_FLOW0(3, "done");

	return B_OK;

err1:
	mutex_destroy(&device->mutex);
	free(device);
	return status;
}
Ejemplo n.º 24
0
/*!	Check whether S/G list of request is supported DMA controller */
static bool
is_sg_list_dma_safe(scsi_ccb *request)
{
	scsi_bus_info *bus = request->bus;
	const physical_entry *sg_list = request->sg_list;
	uint32 sg_count = request->sg_count;
	uint32 dma_boundary = bus->dma_params.dma_boundary;
	uint32 alignment = bus->dma_params.alignment;
	uint32 max_sg_block_size = bus->dma_params.max_sg_block_size;
	uint32 cur_idx;

	// not too many S/G list entries
	if (sg_count > bus->dma_params.max_sg_blocks) {
		SHOW_FLOW0(1, "S/G-list too long");
		return false;
	}

	// if there are no further restrictions - be happy
	if (dma_boundary == ~0UL && alignment == 0 && max_sg_block_size == 0)
		return true;

	// argh - controller is a bit picky, so make sure he likes us
	for (cur_idx = sg_count; cur_idx >= 1; --cur_idx, ++sg_list) {
		addr_t max_len;

		// calculate space upto next dma boundary crossing and
		// verify that it isn't crossed
		max_len = (dma_boundary + 1) -
			((addr_t)sg_list->address & dma_boundary);

		if (max_len < sg_list->size) {
			SHOW_FLOW(0, "S/G-entry crosses DMA boundary @0x%x",
				(int)sg_list->address + (int)max_len);
			return false;
		}

		// check both begin and end of entry for alignment
		if (((addr_t)sg_list->address & alignment) != 0) {
			SHOW_FLOW(0, "S/G-entry has bad alignment @0x%x",
				(int)sg_list->address);
			return false;
		}

		if ((((addr_t)sg_list->address + sg_list->size) & alignment) != 0) {
			SHOW_FLOW(0, "end of S/G-entry has bad alignment @0x%x",
				(int)sg_list->address + (int)sg_list->size);
			return false;
		}

		// verify entry size
		if (sg_list->size > max_sg_block_size) {
			SHOW_FLOW(0, "S/G-entry is too long (%d/%d bytes)",
				(int)sg_list->size, (int)max_sg_block_size);
			return false;
		}
	}

	return true;
}
Ejemplo n.º 25
0
// public function: number of overlay units
uint32 OVERLAY_COUNT( const display_mode *dm )
{
	SHOW_FLOW0( 3, "" );

	(void) dm;
	
	return 1;
}
Ejemplo n.º 26
0
// public function: return list of supported overlay colour spaces 
//	dm - display mode where overlay is to be used
const uint32 *OVERLAY_SUPPORTED_SPACES( const display_mode *dm )
{
	SHOW_FLOW0( 3, "" );
	
	(void) dm;

	return overlay_colorspaces;
}
Ejemplo n.º 27
0
static void scan_device( ide_bus_info *bus, int device )
{
	SHOW_FLOW0( 3, "" );
	
	schedule_synced_pc( bus, &bus->scan_bus_syncinfo, (void *)device );
	
	sem_acquire( bus->scan_device_sem, 1 );
}
Ejemplo n.º 28
0
/*!	Abort DMA transmission
	must be called _before_ start_dma_wait
*/
void
abort_dma(ide_device_info *device, ide_qrequest *qrequest)
{
	ide_bus_info *bus = device->bus;

	SHOW_FLOW0(0, "");

	bus->controller->finish_dma(bus->channel_cookie);
}
Ejemplo n.º 29
0
// public function: allocate overlay unit
overlay_token ALLOCATE_OVERLAY( void )
{
	shared_info *si = ai->si;
	virtual_card *vc = ai->vc;
	
	SHOW_FLOW0( 3, "" );

	if( atomic_or( &si->overlay_mgr.inuse, 1 ) != 0 ) {
		SHOW_FLOW0( 3, "already in use" );
		return NULL;
	}
	
	SHOW_FLOW0( 3, "success" );
	
	vc->uses_overlay = true;

	return (void *)++si->overlay_mgr.token;
}
Ejemplo n.º 30
0
static void phantom_select_video_driver()
{
    long selected_sq = 0;
    struct drv_video_screen_t *selected_drv = NULL;

    SHOW_FLOW0( 2, "Look for video driver" );

    unsigned int i;
    for( i = 0; i < (sizeof(video_drivers)/sizeof(struct drv_video_screen_t *)); i++ )
    {
        struct drv_video_screen_t *drv = video_drivers[i];

        SHOW_FLOW( 2, "Probing %s video driver: ", drv->name);
        if( drv->probe() != VIDEO_PROBE_SUCCESS )
        {
            SHOW_FLOW( 2, "Video driver %s : No", drv->name);
            continue;
        }
        SHOW_FLOW( 2, "Video driver %s : Yes", drv->name);

        long sq = drv->xsize * drv->ysize;

        if( sq > selected_sq )
        {
            selected_drv = drv;
            selected_sq = sq;
        }
    }

    if( selected_drv == NULL )
    {
        SHOW_FLOW0( 1, "No video driver found!");
    }
    else
    {
        if(video_drv != NULL)
            video_drv->stop();

        SHOW_FLOW( 1, "The best is %s video driver", selected_drv->name);
        video_drv = selected_drv;
        set_video_defaults();
    }

}