static status_t open_hook(const char *name, uint32 flags, void **cookie) { int32 index = 0; device_info *di; status_t result = B_OK; SHOW_FLOW( 3, "name=%s, flags=%ld, cookie=0x%08lx", name, flags, (uint32)cookie ); // find device info while (devices->device_names[index] && strcmp(name, devices->device_names[index]) != 0) { index++; } di = &(devices->di[index / 2]); ACQUIRE_BEN(devices->kernel); if (!di->is_open) result = Radeon_FirstOpen(di); if (result == B_OK) { di->is_open++; *cookie = di; } RELEASE_BEN(devices->kernel); SHOW_FLOW(3, "returning 0x%08lx", result); return result; }
/*! Copy data back and release DMA buffer; you must have called cleanup_tmp_sg before */ void scsi_release_dma_buffer(scsi_ccb *request) { scsi_device_info *device = request->device; dma_buffer *buffer = request->dma_buffer; SHOW_FLOW(1, "Buffering finished, %x, %" B_PRIx32, request->subsys_status & SCSI_SUBSYS_STATUS_MASK, (request->flags & SCSI_DIR_MASK)); // copy data from buffer if required and if operation succeeded if ((request->subsys_status & SCSI_SUBSYS_STATUS_MASK) == SCSI_REQ_CMP && (request->flags & SCSI_DIR_MASK) == SCSI_DIR_IN) scsi_copy_dma_buffer(request, request->data_length - request->data_resid, false); // restore request request->data = buffer->orig_data; request->sg_list = buffer->orig_sg_list; request->sg_count = buffer->orig_sg_count; // free buffer ACQUIRE_BEN(&device->dma_buffer_lock); buffer->last_use = system_time(); buffer->inuse = false; RELEASE_BEN(&device->dma_buffer_lock); release_sem(device->dma_buffer_owner); request->buffered = false; }
static status_t free_hook(void *dev) { device_info *di = (device_info *)dev; SHOW_FLOW0( 0, "" ); ACQUIRE_BEN( devices->kernel ); mem_freetag( di->memmgr[mt_local], dev ); if( di->memmgr[mt_PCI] ) mem_freetag( di->memmgr[mt_PCI], dev ); if( di->memmgr[mt_AGP] ) mem_freetag( di->memmgr[mt_AGP], dev ); if( di->is_open == 1 ) Radeon_LastClose( di ); di->is_open--; RELEASE_BEN( devices->kernel ); return B_OK; }
// SIM signals that it can handle further requests for this device void scsi_cont_send_device( scsi_device_info *device ) { scsi_bus_info *bus = device->bus; bool was_servicable, start_retry; SHOW_FLOW0( 3, "" ); ACQUIRE_BEN( &bus->mutex ); was_servicable = scsi_can_service_bus( bus ); if( device->sim_overflow ) { device->sim_overflow = false; --device->lock_count; // add to bus queue if not locked explicitly anymore and requests are waiting if( device->lock_count == 0 && device->queued_reqs != NULL ) scsi_add_device_queue_last( device ); } // no device overflow implicits no bus overflow // (and if not, we'll detect that on next submit) scsi_clear_bus_overflow( bus ); start_retry = !was_servicable && scsi_can_service_bus( bus ); RELEASE_BEN( &bus->mutex ); // tell service thread if there are pending requests which // weren't pending before if( start_retry ) release_sem_etc( bus->start_service, 1, 0/*B_DO_NOT_RESCHEDULE*/ ); }
status_t periph_get_media_status(scsi_periph_handle_info *handle) { scsi_periph_device_info *device = handle->device; scsi_ccb *request; err_res res; status_t err; ACQUIRE_BEN(&device->mutex); // removal requests are returned to exactly one handle // (no real problem, as noone check medias status "by mistake") if (device->removal_requested) { device->removal_requested = false; err = B_DEV_MEDIA_CHANGE_REQUESTED; goto err; } // if there is a pending error (read: media has changed), return once per handle err = handle->pending_error; if (err != B_OK) { handle->pending_error = B_OK; goto err; } SHOW_FLOW0( 3, "" ); RELEASE_BEN(&device->mutex); // finally, ask the device itself request = device->scsi->alloc_ccb(device->scsi_device); if (request == NULL) return B_NO_MEMORY; res = wait_for_ready(device, request); device->scsi->free_ccb(request); SHOW_FLOW(3, "error_code: %x", (int)res.error_code); return res.error_code; err: RELEASE_BEN(&device->mutex); return err; }
status_t RELEASE_ENGINE(engine_token * et, sync_token * st) { if (st) GET_SYNC_TOKEN(et, st); RELEASE_BEN(gSi->engineLock); return B_OK; }
void FifoEndWrite(void) { uint32 *fifo = gSi->fifo; fifo[SVGA_FIFO_NEXT_CMD] = gSi->fifoNext; RELEASE_BEN(gSi->fifoLock); }
// explicitly block bus static void scsi_block_bus_int( scsi_bus_info *bus, bool by_SIM ) { SHOW_FLOW0( 3, "" ); ACQUIRE_BEN( &bus->mutex ); scsi_block_bus_nolock( bus, by_SIM ); RELEASE_BEN( &bus->mutex ); }
static status_t free_hook(void* dev) { device_info *di = (device_info *)dev; shared_info *si = di->si; vuint32 *regs = di->regs; /* lock the driver */ AQUIRE_BEN(pd->kernel); /* if opened multiple times, decrement the open count and exit */ if (di->is_open > 1) goto unlock_and_exit; /* disable and clear any pending interrupts */ //fixme: //distinquish between crtc1/crtc2 once all heads get seperate driver instances! disable_vbi_all(regs); if (si->ps.int_assigned) { /* remove interrupt handler */ remove_io_interrupt_handler(di->pcii.u.h0.interrupt_line, nv_interrupt, di); /* delete the semaphores, ignoring any errors ('cause the owning team may have died on us) */ delete_sem(si->vblank); si->vblank = -1; } /* free regs and framebuffer areas */ unmap_device(di); /* clean up our aligned DMA area */ delete_area(si->dma_area); si->dma_area = -1; si->dma_buffer = NULL; /* clean up our unaligned DMA area */ delete_area(si->unaligned_dma_area); si->unaligned_dma_area = -1; si->dma_buffer_pci = NULL; /* clean up our shared area */ delete_area(di->shared_area); di->shared_area = -1; di->si = NULL; unlock_and_exit: /* mark the device available */ di->is_open--; /* unlock the driver */ RELEASE_BEN(pd->kernel); /* all done */ return B_OK; }
// find VIP channel of a device // return: >= 0 channel of device // < 0 no device found int Radeon_FindVIPDevice( device_info *di, uint32 device_id ) { uint channel; uint32 cur_device_id; // if card has no VIP port, let hardware detection fail; // in this case, noone will bother us again if( !di->has_vip ) { SHOW_FLOW0( 3, "This Device has no VIP Bus."); return -1; } ACQUIRE_BEN( di->si->cp.lock ); Radeon_VIPReset( di, false ); // there are up to 4 devices, connected to one of 4 channels for( channel = 0; channel < 4; ++channel ) { // read device id if( !Radeon_VIPRead( di, channel, RADEON_VIP_VENDOR_DEVICE_ID, &cur_device_id, false )) { SHOW_FLOW( 3, "No device found on channel %d", channel); continue; } // compare device id directly if( cur_device_id == device_id ) { SHOW_FLOW( 3, "Device %08lx found on channel %d", device_id, channel); RELEASE_BEN( di->si->cp.lock ); return channel; } } RELEASE_BEN( di->si->cp.lock ); // couldn't find device return -1; }
void periph_media_changed_public(scsi_periph_device_info *device) { scsi_periph_handle_info *handle; ACQUIRE_BEN(&device->mutex); // when medium has changed, tell all handles // (this must be atomic for each handle!) for (handle = device->handles; handle; handle = handle->next) handle->pending_error = B_DEV_MEDIA_CHANGED; RELEASE_BEN(&device->mutex); }
// explicitly block device static void scsi_block_device_int( scsi_device_info *device, bool by_SIM ) { scsi_bus_info *bus = device->bus; SHOW_FLOW0( 3, "" ); ACQUIRE_BEN( &bus->mutex ); scsi_block_device_nolock( device, by_SIM ); // remove device from bus queue as it cannot be processed anymore scsi_remove_device_queue( device ); RELEASE_BEN( &bus->mutex ); }
// reset VIP void Radeon_VIPReset( device_info *di, bool lock ) { vuint8 *regs = di->regs; if( lock ) ACQUIRE_BEN( di->si->cp.lock ); Radeon_WaitForFifo( di, 5 ); // Radeon_WaitForIdle( di, false, false ); switch(di->asic){ case rt_r200: case rt_rs200: case rt_rv200: case rt_rs100: case rt_rv100: case rt_r100: OUTREG( regs, RADEON_VIPH_CONTROL, 4 | (15 << RADEON_VIPH_CONTROL_VIPH_MAX_WAIT_SHIFT) | RADEON_VIPH_CONTROL_VIPH_DMA_MODE | RADEON_VIPH_CONTROL_VIPH_EN ); // slowest, timeout in 16 phases OUTREG( regs, RADEON_VIPH_TIMEOUT_STAT, (INREG( regs, RADEON_VIPH_TIMEOUT_STAT) & 0xFFFFFF00) | RADEON_VIPH_TIMEOUT_STAT_VIPH_REGR_DIS); OUTREG( regs, RADEON_VIPH_DV_LAT, 0xff | (4 << RADEON_VIPH_DV_LAT_VIPH_DV0_LAT_SHIFT) | (4 << RADEON_VIPH_DV_LAT_VIPH_DV1_LAT_SHIFT) | (4 << RADEON_VIPH_DV_LAT_VIPH_DV2_LAT_SHIFT) | (4 << RADEON_VIPH_DV_LAT_VIPH_DV3_LAT_SHIFT)); // set timeslice OUTREG( regs, RADEON_VIPH_DMA_CHUNK, 0x151); OUTREG( regs, RADEON_TEST_DEBUG_CNTL, INREG( regs, RADEON_TEST_DEBUG_CNTL) & (~RADEON_TEST_DEBUG_CNTL_OUT_EN)); default: OUTREG( regs, RADEON_VIPH_CONTROL, 9 | (15 << RADEON_VIPH_CONTROL_VIPH_MAX_WAIT_SHIFT) | RADEON_VIPH_CONTROL_VIPH_DMA_MODE | RADEON_VIPH_CONTROL_VIPH_EN ); // slowest, timeout in 16 phases OUTREG( regs, RADEON_VIPH_TIMEOUT_STAT, (INREG( regs, RADEON_VIPH_TIMEOUT_STAT) & 0xFFFFFF00) | RADEON_VIPH_TIMEOUT_STAT_VIPH_REGR_DIS); OUTREG( regs, RADEON_VIPH_DV_LAT, 0xff | (4 << RADEON_VIPH_DV_LAT_VIPH_DV0_LAT_SHIFT) | (4 << RADEON_VIPH_DV_LAT_VIPH_DV1_LAT_SHIFT) | (4 << RADEON_VIPH_DV_LAT_VIPH_DV2_LAT_SHIFT) | (4 << RADEON_VIPH_DV_LAT_VIPH_DV3_LAT_SHIFT)); // set timeslice OUTREG( regs, RADEON_VIPH_DMA_CHUNK, 0x0); OUTREG( regs, RADEON_TEST_DEBUG_CNTL, INREG( regs, RADEON_TEST_DEBUG_CNTL) & (~RADEON_TEST_DEBUG_CNTL_OUT_EN)); break; } if( lock ) RELEASE_BEN( di->si->cp.lock ); }
// public function: pan display status_t MOVE_DISPLAY( uint16 h_display_start, uint16 v_display_start ) { shared_info *si = ai->si; status_t result; ACQUIRE_BEN( si->engine.lock ); // TBD: we should probably lock card first; in this case, we must // split this function into locking and worker part, as this // function is used internally as well result = Radeon_MoveDisplay( ai, h_display_start, v_display_start ); RELEASE_BEN( si->engine.lock ); return result; }
bool Radeon_VIPFifoRead(device_info *di, uint8 channel, uint32 address, uint32 count, uint8 *buffer, bool lock) { bool res; if( lock ) ACQUIRE_BEN( di->si->cp.lock ); res = do_VIPFifoRead( di, channel, address, count, buffer ); if( lock ) RELEASE_BEN( di->si->cp.lock ); //SHOW_FLOW( 2, "address=%x, data=%lx, lock=%d", address, *data, lock ); return res; }
// public function: write data to VIP bool Radeon_VIPWrite(device_info *di, uint8 channel, uint address, uint32 data, bool lock ) { bool res; //SHOW_FLOW( 2, "address=%x, data=%lx, lock=%d", address, data, lock ); if( lock ) ACQUIRE_BEN( di->si->cp.lock ); res = do_VIPWrite( di, channel, address, data ); if( lock ) RELEASE_BEN( di->si->cp.lock ); return res; }
static status_t FreeHook(void *dev) { TRACE("FreeHook\n"); ACQUIRE_BEN(gPd->kernel); if (gPd->isOpen < 2) { UnmapDevice(); FreeShared(); } gPd->isOpen--; RELEASE_BEN(gPd->kernel); TRACE("FreeHook ends\n"); return B_OK; }
void scsi_dma_buffer_daemon(void *dev, int counter) { scsi_device_info *device = (scsi_device_info*)dev; dma_buffer *buffer; ACQUIRE_BEN(&device->dma_buffer_lock); buffer = &device->dma_buffer; if (!buffer->inuse && buffer->last_use - system_time() > SCSI_DMA_BUFFER_CLEANUP_DELAY) { scsi_free_dma_buffer(buffer); scsi_free_dma_buffer_sg_orig(buffer); } RELEASE_BEN(&device->dma_buffer_lock); }
// public function: discard overlay buffer status_t RELEASE_OVERLAY_BUFFER( const overlay_buffer *ob ) { virtual_card *vc = ai->vc; shared_info *si = ai->si; overlay_buffer_node *node; radeon_free_mem fm; status_t result; SHOW_FLOW0( 3, "" ); node = (overlay_buffer_node *)((char *)ob - offsetof( overlay_buffer_node, buffer )); if( si->active_overlay.on == node || si->active_overlay.prev_on ) Radeon_HideOverlay( ai ); // free memory fm.magic = RADEON_PRIVATE_DATA_MAGIC; fm.handle = node->mem_handle; fm.memory_type = mt_local; fm.global = false; result = ioctl( ai->fd, RADEON_FREE_MEM, &fm ); if( result != B_OK ) { SHOW_FLOW( 3, "ups - couldn't free memory (handle=%x, status=%s)", node->mem_handle, strerror( result )); } ACQUIRE_BEN( si->engine.lock ); // remove from list if( node->next ) node->next->prev = node->prev; if( node->prev ) node->prev->next = node->next; else vc->overlay_buffers = node->next; RELEASE_BEN( si->engine.lock ); SHOW_FLOW0( 3, "success" ); return B_OK; }
// explictely unblock bus static void scsi_unblock_bus_int( scsi_bus_info *bus, bool by_SIM ) { bool was_servicable, start_retry; SHOW_FLOW0( 3, "" ); ACQUIRE_BEN( &bus->mutex ); was_servicable = scsi_can_service_bus( bus ); scsi_unblock_bus_noresume( bus, by_SIM ); start_retry = !was_servicable && scsi_can_service_bus( bus ); RELEASE_BEN( &bus->mutex ); if( start_retry ) release_sem( bus->start_service ); }
// SIM signals that it can handle further requests for this bus void scsi_cont_send_bus( scsi_bus_info *bus ) { bool was_servicable, start_retry; SHOW_FLOW0( 3, "" ); ACQUIRE_BEN( &bus->mutex ); was_servicable = scsi_can_service_bus( bus ); scsi_clear_bus_overflow( bus ); start_retry = !was_servicable && scsi_can_service_bus( bus ); RELEASE_BEN( &bus->mutex ); if( start_retry ) release_sem_etc( bus->start_service, 1, 0/*B_DO_NOT_RESCHEDULE*/ ); }
static status_t OpenHook(const char *name, uint32 flags, void **cookie) { status_t ret = B_OK; pci_info *pcii = &gPd->pcii; uint32 tmpUlong; TRACE("OpenHook (%s, %ld)\n", name, flags); ACQUIRE_BEN(gPd->kernel); if (gPd->isOpen) goto markAsOpen; /* Enable memory mapped IO and VGA I/O */ tmpUlong = get_pci(PCI_command, 2); tmpUlong |= PCI_command_memory; tmpUlong |= PCI_command_io; set_pci(PCI_command, 2, tmpUlong); if ((ret = CreateShared()) != B_OK) goto done; if ((ret = CheckCapabilities()) != B_OK) goto freeShared; if ((ret = MapDevice()) != B_OK) goto freeShared; markAsOpen: gPd->isOpen++; *cookie = gPd; goto done; freeShared: FreeShared(); done: RELEASE_BEN(gPd->kernel); TRACE("OpenHook: %ld\n", ret); return ret; }
/* ----------- free_hook - close down the device ----------- */ static status_t free_hook (void* dev) { device_info *di = (device_info *)dev; shared_info *si = di->si; vuint32 *regs = di->regs; /* lock the driver */ AQUIRE_BEN(pd->kernel); /* if opened multiple times, decrement the open count and exit */ if (di->is_open > 1) goto unlock_and_exit; /* disable and clear any pending interrupts */ disable_vbi(regs); /* remove interrupt handler */ remove_io_interrupt_handler(di->pcii.u.h0.interrupt_line, eng_interrupt, di); /* delete the semaphores, ignoring any errors ('cause the owning team may have died on us) */ delete_sem(si->vblank); si->vblank = -1; /* free regs and framebuffer areas */ unmap_device(di); /* clean up our shared area */ delete_area(di->shared_area); di->shared_area = -1; di->si = NULL; unlock_and_exit: /* mark the device available */ di->is_open--; /* unlock the driver */ RELEASE_BEN(pd->kernel); /* all done */ return B_OK; }
// public function: show/hide overlay status_t CONFIGURE_OVERLAY( overlay_token ot, const overlay_buffer *ob, const overlay_window *ow, const overlay_view *ov ) { shared_info *si = ai->si; status_t result; SHOW_FLOW0( 4, "" ); if ( (uintptr_t)ot != si->overlay_mgr.token ) return B_BAD_VALUE; if ( !si->overlay_mgr.inuse ) return B_BAD_VALUE; if ( ow == NULL || ov == NULL ) { SHOW_FLOW0( 3, "hide only" ); Radeon_HideOverlay( ai ); return B_OK; } if ( ob == NULL ) return B_ERROR; ACQUIRE_BEN( si->engine.lock ); // store whished values si->pending_overlay.ot = ot; si->pending_overlay.ob = *ob; si->pending_overlay.ow = *ow; si->pending_overlay.ov = *ov; si->pending_overlay.on = (overlay_buffer_node *)((char *)ob - offsetof( overlay_buffer_node, buffer )); result = Radeon_UpdateOverlay( ai ); RELEASE_BEN( si->engine.lock ); return result; }
/* * et6000FreeHook - close down the device */ static status_t et6000FreeHook(void* dev) { ET6000DeviceInfo *di = (ET6000DeviceInfo *)dev; ET6000SharedInfo *si = di->si; ddprintf(("SKD et6000FreeHook() begins...\n")); /* lock the driver */ AQUIRE_BEN(pd->kernel); /* if opened multiple times, decrement the open count and exit */ if (di->isOpen > 1) goto unlock_and_exit; /* Clear any pending interrupts and disable interrupts. */ et6000aclReadInterruptClear(si->mmRegs); et6000aclWriteInterruptClear(si->mmRegs); et6000aclMasterInterruptDisable(si->mmRegs); /* Remove the interrupt handler */ remove_io_interrupt_handler(di->pcii.u.h0.interrupt_line, et6000Interrupt, di); /* free framebuffer area */ et6000UnmapDevice(di); /* clean up our shared area */ delete_area(di->sharedArea); di->sharedArea = -1; di->si = NULL; unlock_and_exit: /* mark the device available */ di->isOpen--; /* unlock the driver */ RELEASE_BEN(pd->kernel); ddprintf(("SKD et6000FreeHook() ends.\n")); /* all done */ return B_OK; }
// explicitly unblock device static void scsi_unblock_device_int( scsi_device_info *device, bool by_SIM ) { scsi_bus_info *bus = device->bus; bool was_servicable, start_retry; SHOW_FLOW0( 3, "" ); ACQUIRE_BEN( &bus->mutex ); was_servicable = scsi_can_service_bus( bus ); scsi_unblock_device_noresume( device, by_SIM ); // add to bus queue if not locked explicitly anymore and requests are waiting if( device->lock_count == 0 && device->queued_reqs != NULL ) scsi_add_device_queue_last( device ); start_retry = !was_servicable && scsi_can_service_bus( bus ); RELEASE_BEN( &bus->mutex ); if( start_retry ) release_sem( bus->start_service ); }
/* ----------- control_hook - where the real work is done ----------- */ static status_t control_hook (void* dev, uint32 msg, void *buf, size_t len) { device_info *di = (device_info *)dev; status_t result = B_DEV_INVALID_IOCTL; uint32 tmpUlong; switch (msg) { /* the only PUBLIC ioctl */ case B_GET_ACCELERANT_SIGNATURE: { char *sig = (char *)buf; strcpy(sig, current_settings.accelerant); result = B_OK; } break; /* PRIVATE ioctl from here on */ case ENG_GET_PRIVATE_DATA: { eng_get_private_data *gpd = (eng_get_private_data *)buf; if (gpd->magic == VIA_PRIVATE_DATA_MAGIC) { gpd->shared_info_area = di->shared_area; result = B_OK; } } break; case ENG_GET_PCI: { eng_get_set_pci *gsp = (eng_get_set_pci *)buf; if (gsp->magic == VIA_PRIVATE_DATA_MAGIC) { pci_info *pcii = &(di->pcii); gsp->value = get_pci(gsp->offset, gsp->size); result = B_OK; } } break; case ENG_SET_PCI: { eng_get_set_pci *gsp = (eng_get_set_pci *)buf; if (gsp->magic == VIA_PRIVATE_DATA_MAGIC) { pci_info *pcii = &(di->pcii); set_pci(gsp->offset, gsp->size, gsp->value); result = B_OK; } } break; case ENG_DEVICE_NAME: { // apsed eng_device_name *dn = (eng_device_name *)buf; if (dn->magic == VIA_PRIVATE_DATA_MAGIC) { strcpy(dn->name, di->name); result = B_OK; } } break; case ENG_RUN_INTERRUPTS: { eng_set_bool_state *ri = (eng_set_bool_state *)buf; if (ri->magic == VIA_PRIVATE_DATA_MAGIC) { vuint32 *regs = di->regs; if (ri->do_it) { enable_vbi(regs); } else { disable_vbi(regs); } result = B_OK; } } break; case ENG_GET_NTH_AGP_INFO: { eng_nth_agp_info *nai = (eng_nth_agp_info *)buf; if (nai->magic == VIA_PRIVATE_DATA_MAGIC) { nai->exist = false; nai->agp_bus = false; if (agp_bus) { nai->agp_bus = true; if ((*agp_bus->get_nth_agp_info)(nai->index, &(nai->agpi)) == B_NO_ERROR) { nai->exist = true; } } result = B_OK; } } break; case ENG_ENABLE_AGP: { eng_cmd_agp *nca = (eng_cmd_agp *)buf; if (nca->magic == VIA_PRIVATE_DATA_MAGIC) { if (agp_bus) { nca->agp_bus = true; nca->cmd = agp_bus->set_agp_mode(nca->cmd); } else { nca->agp_bus = false; nca->cmd = 0; } result = B_OK; } } break; case ENG_ISA_OUT: { eng_in_out_isa *io_isa = (eng_in_out_isa *)buf; if (io_isa->magic == VIA_PRIVATE_DATA_MAGIC) { pci_info *pcii = &(di->pcii); /* lock the driver: * no other graphics card may have ISA I/O enabled when we enter */ AQUIRE_BEN(pd->kernel); /* enable ISA I/O access */ tmpUlong = get_pci(PCI_command, 2); tmpUlong |= PCI_command_io; set_pci(PCI_command, 2, tmpUlong); if (io_isa->size == 1) isa_bus->write_io_8(io_isa->adress, (uint8)io_isa->data); else isa_bus->write_io_16(io_isa->adress, io_isa->data); result = B_OK; /* disable ISA I/O access */ tmpUlong = get_pci(PCI_command, 2); tmpUlong &= ~PCI_command_io; set_pci(PCI_command, 2, tmpUlong); /* end of critical section */ RELEASE_BEN(pd->kernel); } } break; case ENG_ISA_IN: { eng_in_out_isa *io_isa = (eng_in_out_isa *)buf; if (io_isa->magic == VIA_PRIVATE_DATA_MAGIC) { pci_info *pcii = &(di->pcii); /* lock the driver: * no other graphics card may have ISA I/O enabled when we enter */ AQUIRE_BEN(pd->kernel); /* enable ISA I/O access */ tmpUlong = get_pci(PCI_command, 2); tmpUlong |= PCI_command_io; set_pci(PCI_command, 2, tmpUlong); if (io_isa->size == 1) io_isa->data = isa_bus->read_io_8(io_isa->adress); else io_isa->data = isa_bus->read_io_16(io_isa->adress); result = B_OK; /* disable ISA I/O access */ tmpUlong = get_pci(PCI_command, 2); tmpUlong &= ~PCI_command_io; set_pci(PCI_command, 2, tmpUlong); /* end of critical section */ RELEASE_BEN(pd->kernel); } } break; } return result; }
static status_t control_hook(void *dev, uint32 msg, void *buf, size_t len) { device_info *di = (device_info *)dev; status_t result = B_DEV_INVALID_IOCTL; switch (msg) { // needed by app_server to load accelerant case B_GET_ACCELERANT_SIGNATURE: { char *sig = (char *)buf; strcpy(sig, "radeon.accelerant"); result = B_OK; } break; // needed to share data between kernel and accelerant case RADEON_GET_PRIVATE_DATA: { radeon_get_private_data *gpd = (radeon_get_private_data *)buf; if (gpd->magic == RADEON_PRIVATE_DATA_MAGIC) { gpd->shared_info_area = di->shared_area; gpd->virtual_card_area = di->virtual_card_area; result = B_OK; } } break; // needed for cloning case RADEON_DEVICE_NAME: { radeon_device_name *dn = (radeon_device_name *)buf; if( dn->magic == RADEON_PRIVATE_DATA_MAGIC ) { strncpy( dn->name, di->name, MAX_RADEON_DEVICE_NAME_LENGTH ); result = B_OK; } } break; // graphics mem manager case RADEON_ALLOC_MEM: { radeon_alloc_mem *am = (radeon_alloc_mem *)buf; memory_type_e memory_type; if( am->magic != RADEON_PRIVATE_DATA_MAGIC ) break; if( am->memory_type > mt_last ) break; memory_type = am->memory_type == mt_nonlocal ? di->si->nonlocal_type : am->memory_type; result = mem_alloc( di->memmgr[memory_type], am->size, am->global ? 0 : dev, &am->handle, &am->offset ); } break; case RADEON_FREE_MEM: { radeon_free_mem *fm = (radeon_free_mem *)buf; memory_type_e memory_type; if( fm->magic != RADEON_PRIVATE_DATA_MAGIC ) break; if( fm->memory_type > mt_last ) break; memory_type = fm->memory_type == mt_nonlocal ? di->si->nonlocal_type : fm->memory_type; result = mem_free( di->memmgr[memory_type], fm->handle, fm->global ? 0 : dev ); } break; case RADEON_WAITFORIDLE: { radeon_wait_for_idle *wfi = (radeon_wait_for_idle *)buf; if( wfi->magic != RADEON_PRIVATE_DATA_MAGIC ) break; Radeon_WaitForIdle( di, true, wfi->keep_lock ); result = B_OK; } break; case RADEON_WAITFORFIFO: { radeon_wait_for_fifo *wff = (radeon_wait_for_fifo *)buf; if( wff->magic != RADEON_PRIVATE_DATA_MAGIC ) break; Radeon_WaitForFifo( di, wff->entries ); result = B_OK; } break; case RADEON_RESETENGINE: { radeon_no_arg *na = (radeon_no_arg *)buf; if( na->magic != RADEON_PRIVATE_DATA_MAGIC ) break; ACQUIRE_BEN( di->si->cp.lock ); Radeon_ResetEngine( di ); RELEASE_BEN( di->si->cp.lock ); result = B_OK; } break; case RADEON_VIPREAD: { radeon_vip_read *vr = (radeon_vip_read *)buf; if( vr->magic != RADEON_PRIVATE_DATA_MAGIC ) break; result = Radeon_VIPRead( di, vr->channel, vr->address, &vr->data, vr->lock ) ? B_OK : B_ERROR; } break; case RADEON_VIPWRITE: { radeon_vip_write *vw = (radeon_vip_write *)buf; if( vw->magic != RADEON_PRIVATE_DATA_MAGIC ) break; result = Radeon_VIPWrite( di, vw->channel, vw->address, vw->data, vw->lock ) ? B_OK : B_ERROR; } break; case RADEON_VIPFIFOREAD: { radeon_vip_fifo_read *vr = (radeon_vip_fifo_read *)buf; if( vr->magic != RADEON_PRIVATE_DATA_MAGIC ) break; result = Radeon_VIPFifoRead( di, vr->channel, vr->address, vr->count, vr->data, vr->lock ) ? B_OK : B_ERROR; } break; case RADEON_VIPFIFOWRITE: { radeon_vip_fifo_write *vw = (radeon_vip_fifo_write *)buf; if( vw->magic != RADEON_PRIVATE_DATA_MAGIC ) break; result = Radeon_VIPFifoWrite( di, vw->channel, vw->address, vw->count, vw->data, vw->lock ) ? B_OK : B_ERROR; } break; case RADEON_FINDVIPDEVICE: { radeon_find_vip_device *fvd = (radeon_find_vip_device *)buf; if( fvd->magic != RADEON_PRIVATE_DATA_MAGIC ) break; fvd->channel = Radeon_FindVIPDevice( di, fvd->device_id ); result = B_OK; } break; case RADEON_VIPRESET: { radeon_vip_reset *fvd = (radeon_vip_reset *)buf; if( fvd->magic != RADEON_PRIVATE_DATA_MAGIC ) break; Radeon_VIPReset( di, fvd->lock ); result = B_OK; } break; case RADEON_WAIT_FOR_CAP_IRQ: { radeon_wait_for_cap_irq *wvc = (radeon_wait_for_cap_irq *)buf; if( wvc->magic != RADEON_PRIVATE_DATA_MAGIC ) break; // restrict wait time to 1 sec to get not stuck here in kernel result = acquire_sem_etc( di->cap_sem, 1, B_RELATIVE_TIMEOUT, min( wvc->timeout, 1000000 )); if( result == B_OK ) { cpu_status prev_irq_state = disable_interrupts(); acquire_spinlock( &di->cap_spinlock ); wvc->timestamp = di->cap_timestamp; wvc->int_status = di->cap_int_status; wvc->counter = di->cap_counter; release_spinlock( &di->cap_spinlock ); restore_interrupts( prev_irq_state ); } } break; case RADEON_DMACOPY: { radeon_dma_copy *dc = (radeon_dma_copy *)buf; if( dc->magic != RADEON_PRIVATE_DATA_MAGIC ) break; result = Radeon_DMACopy( di, dc->src, dc->target, dc->size, dc->lock_mem, dc->contiguous ); } break; #ifdef ENABLE_LOGGING #ifdef LOG_INCLUDE_STARTUP // interface to log data case RADEON_GET_LOG_SIZE: *(uint32 *)buf = log_getsize( di->si->log ); result = B_OK; break; case RADEON_GET_LOG_DATA: log_getcopy( di->si->log, buf, ((uint32 *)buf)[0] ); result = B_OK; break; #endif #endif } if( result == B_DEV_INVALID_IOCTL ) SHOW_ERROR( 3, "Invalid ioctl call: code=0x%lx", msg ); return result; }
static status_t open_hook (const char* name, uint32 flags, void** cookie) { int32 index = 0; device_info *di; shared_info *si; thread_id thid; thread_info thinfo; status_t result = B_OK; vuint32 *regs; char shared_name[B_OS_NAME_LENGTH]; /* find the device name in the list of devices */ /* we're never passed a name we didn't publish */ while (pd->device_names[index] && (strcmp(name, pd->device_names[index]) != 0)) index++; /* for convienience */ di = &(pd->di[index]); /* make sure no one else has write access to the common data */ AQUIRE_BEN(pd->kernel); /* if it's already open for writing */ if (di->is_open) { /* mark it open another time */ goto mark_as_open; } /* create the shared area */ sprintf(shared_name, DEVICE_FORMAT " shared", di->pcii.vendor_id, di->pcii.device_id, di->pcii.bus, di->pcii.device, di->pcii.function); /* create this area with NO user-space read or write permissions, to prevent accidental dammage */ di->shared_area = create_area(shared_name, (void **)&(di->si), B_ANY_KERNEL_ADDRESS, ((sizeof(shared_info) + (B_PAGE_SIZE - 1)) & ~(B_PAGE_SIZE - 1)), B_FULL_LOCK, 0); if (di->shared_area < 0) { /* return the error */ result = di->shared_area; goto done; } /* save a few dereferences */ si = di->si; /* save the vendor and device IDs */ si->vendor_id = di->pcii.vendor_id; si->device_id = di->pcii.device_id; si->revision = di->pcii.revision; si->bus = di->pcii.bus; si->device = di->pcii.device; si->function = di->pcii.function; /* device at bus #0, device #0, function #0 holds byte value at byte-index 0xf6 */ si->ps.chip_rev = ((*pci_bus->read_pci_config)(0, 0, 0, 0xf6, 1)); /* map the device */ result = map_device(di); if (result < 0) goto free_shared; result = B_OK; /* create a semaphore for vertical blank management */ si->vblank = create_sem(0, di->name); if (si->vblank < 0) { result = si->vblank; goto unmap; } /* change the owner of the semaphores to the opener's team */ /* this is required because apps can't aquire kernel semaphores */ thid = find_thread(NULL); get_thread_info(thid, &thinfo); set_sem_owner(si->vblank, thinfo.team); /* assign local regs pointer for SAMPLExx() macros */ regs = di->regs; /* disable and clear any pending interrupts */ disable_vbi(regs); /* If there is a valid interrupt line assigned then set up interrupts */ if ((di->pcii.u.h0.interrupt_pin == 0x00) || (di->pcii.u.h0.interrupt_line == 0xff) || /* no IRQ assigned */ (di->pcii.u.h0.interrupt_line <= 0x02)) /* system IRQ assigned */ { /* we are aborting! */ /* Note: the R4 graphics driver kit lacks this statement!! */ result = B_ERROR; /* interrupt does not exist so exit without installing our handler */ goto delete_the_sem; } else { /* otherwise install our interrupt handler */ result = install_io_interrupt_handler(di->pcii.u.h0.interrupt_line, eng_interrupt, (void *)di, 0); /* bail if we couldn't install the handler */ if (result != B_OK) goto delete_the_sem; } mark_as_open: /* mark the device open */ di->is_open++; /* send the cookie to the opener */ *cookie = di; goto done; delete_the_sem: delete_sem(si->vblank); unmap: unmap_device(di); free_shared: /* clean up our shared area */ delete_area(di->shared_area); di->shared_area = -1; di->si = NULL; done: /* end of critical section */ RELEASE_BEN(pd->kernel); /* all done, return the status */ return result; }
bool scsi_get_dma_buffer(scsi_ccb *request) { scsi_device_info *device = request->device; dma_buffer *buffer; request->buffered = false; // perhaps we have luck and no buffering is needed if( is_sg_list_dma_safe( request )) return true; SHOW_FLOW0(1, "Buffer is not DMA safe" ); dump_sg_table(request->sg_list, request->sg_count); // only one buffer at a time acquire_sem(device->dma_buffer_owner); // make sure, clean-up daemon doesn't bother us ACQUIRE_BEN(&device->dma_buffer_lock); // there is only one buffer, so no further management buffer = &device->dma_buffer; buffer->inuse = true; RELEASE_BEN(&device->dma_buffer_lock); // memorize buffer for cleanup request->dma_buffer = buffer; // enlarge buffer if too small if (buffer->size < request->data_length) { if (!scsi_alloc_dma_buffer(buffer, &device->bus->dma_params, request->data_length)) goto err; } // create S/G to original data (necessary for copying from-buffer on end // of request, but also used during copying to-buffer in a second because // of lazyness) scsi_dma_buffer_compose_sg_orig(&device->dma_buffer, request); // copy data to buffer if ((request->flags & SCSI_DIR_MASK) == SCSI_DIR_OUT) { if (!scsi_copy_dma_buffer( request, request->data_length, true)) goto err; } // replace data address, so noone notices that a buffer is used buffer->orig_data = request->data; buffer->orig_sg_list = request->sg_list; buffer->orig_sg_count = request->sg_count; request->data = buffer->address; request->sg_list = buffer->sg_list; request->sg_count = buffer->sg_count; SHOW_INFO(1, "bytes: %" B_PRIu32, request->data_length); SHOW_INFO0(3, "we can start now"); request->buffered = true; return true; err: SHOW_INFO0(3, "error setting up DMA buffer"); ACQUIRE_BEN(&device->dma_buffer_lock); // some of this is probably not required, but I'm paranoid buffer->inuse = false; RELEASE_BEN(&device->dma_buffer_lock); release_sem(device->dma_buffer_owner); return false; }