int vm_install_vsysreg(vm_t* vm) { struct sysreg_priv *sysreg_data; struct device d; vspace_t* vmm_vspace; int err; d = dev_sysreg; vmm_vspace = vm->vmm_vspace; /* Initialise the virtual device */ sysreg_data = malloc(sizeof(struct sysreg_priv)); if (sysreg_data == NULL) { assert(sysreg_data); return -1; } memset(sysreg_data, 0, sizeof(*sysreg_data)); sysreg_data->vm = vm; sysreg_data->regs = map_device(vmm_vspace, vm->vka, vm->simple, d.pstart, 0, seL4_AllRights); if (sysreg_data->regs == NULL) { return -1; } d.priv = sysreg_data; err = vm_add_device(vm, &d); assert(!err); if (err) { free(sysreg_data); return -1; } return 0; }
int pruss_init( const char* ucodename, struct ucode_signature* signature) { char uio_name[ NAME_MAX]; char drv_name[ NAME_MAX]; int start_addr_arg = 0; // set option defaults unsigned int start_addr = 0; if (locate_pruss_device( UIO_DRIVER, drv_name, sizeof( drv_name), uio_name, sizeof( uio_name))) { if (debug_flags & DEBUG_PRUSS) { printf( "Located driver '%s' for the 'pruss' device '%s'.\n", drv_name, uio_name); } // Read PRUSS version register as simple check to verify proper mapping map_device( uio_name); // Note that PRUSS_BASE is 0 because it is remapped! #define MM_PRUSS_REVID (PRUSS_CFG_OFFSET + 0x00000000UL) #define MM_PRUSS_SYSCFG (PRUSS_CFG_OFFSET + 0x00000004UL) #define MM_PRUSS_GPCFG0 (PRUSS_CFG_OFFSET + 0x00000008UL) #define MM_PRUSS_GPCFG1 (PRUSS_CFG_OFFSET + 0x0000000CUL) // Enable clock for the outgoing OCP_HP0&1 (L3_FAST) busses // This will only work if the PRUSS is operational (not reset)! if (pruss_rd32( MM_PRUSS_REVID) == 0x47000000UL) { if (debug_flags & DEBUG_PRUSS) { printf( "Valid PRUSS ID found.\n"); } int status = pruss_rd32( MM_PRUSS_SYSCFG); if ((status & 0x10) != 0) { pruss_wr32( MM_PRUSS_SYSCFG, status & ~(1 << 4)); printf( "PRUSS enabled OCP master ports.\n"); } } else { fprintf( stderr, "PRUSS ID is not found.\n"); return -1; } if (pruss_rd32( PRUSS_PRU_CTRL_CONTROL) & PRUSS_PRU_CTRL_CONTROL_RUNSTATE) { if (debug_flags & DEBUG_PRUSS) { printf( "Found running PRU%d, disable it...", PRU_NR); } pruss_halt_pruss(); if (debug_flags & DEBUG_PRUSS) { printf( " done.\n"); } } else { if (debug_flags & DEBUG_PRUSS) { printf( "Found halted/idle PRU%d\n", PRU_NR); } } } else { printf( "PRUSS driver not found, bailing out\n"); return -1; } // Reset PRUSS counters if (debug_flags & DEBUG_PRUSS) { printf( "Clearing PRUSS counters, old: cycle = %u, stall = %u\n", pruss_rd32( PRUSS_PRU_CTRL_CYCLE), pruss_rd32( PRUSS_PRU_CTRL_STALL)); } pruss_wr32( PRUSS_PRU_CTRL_CYCLE, 0); pruss_wr32( PRUSS_PRU_CTRL_STALL, 0); if (debug_flags & DEBUG_PRUSS) { printf( "Loading microcode from file '%s'\n", ucodename); } if (pruss_load_code( ucodename, (start_addr_arg) ? NULL : &start_addr, signature) < 0) { return -1; } if (debug_flags & DEBUG_PRUSS) { printf( "Clearing register space...\n"); } for (int i = 0 ; i < 30 ; ++i) { pruss_wr32( PRUSS_DBG_OFFSET + 4 * i, 0); } if (debug_flags & DEBUG_PRUSS) { printf( "Initializing 8KB SRAM with deadbeef pattern...\n"); } for (int i = 0 ; i < 2048 ; ++i) { pruss_wr32( PRUSS_RAM_OFFSET + 4 * i, 0xdeadbeef); } if (debug_flags & DEBUG_PRUSS) { printf( "Initializing SRAM buffer with fixed patterns...\n"); } for (int i = 0 ; i < 8 ; ++i) { for (int j = 0 ; j < 4 ; ++j) { pruss_wr32( PRUSS_RAM_OFFSET + 256 + 4 * (4 * i + j), 0xcafe0000 + 256 * i + j); } } if (debug_flags & DEBUG_PRUSS) { printf( "Reset PRU%d and set program counter to %d\n", PRU_NR, start_addr); } /* clear bit 0 to reset the PRU, this is a self clearing (setting) bit! */ pruss_wr32( PRUSS_PRU_CTRL_CONTROL, (start_addr << 16) | 0x00000000); // pc + #softreset if (pruss_rd32( PRUSS_PRU_CTRL_STATUS) != start_addr) { fprintf( stderr, "Failed to set PRUSS code start address (PC)\n"); return -1; } return 0; }
static status_t open_hook (const char* name, uint32 flags, void** cookie) { int32 index = 0; device_info *di; shared_info *si; thread_id thid; thread_info thinfo; status_t result = B_OK; vuint32 *regs; char shared_name[B_OS_NAME_LENGTH]; /* find the device name in the list of devices */ /* we're never passed a name we didn't publish */ while (pd->device_names[index] && (strcmp(name, pd->device_names[index]) != 0)) index++; /* for convienience */ di = &(pd->di[index]); /* make sure no one else has write access to the common data */ AQUIRE_BEN(pd->kernel); /* if it's already open for writing */ if (di->is_open) { /* mark it open another time */ goto mark_as_open; } /* create the shared area */ sprintf(shared_name, DEVICE_FORMAT " shared", di->pcii.vendor_id, di->pcii.device_id, di->pcii.bus, di->pcii.device, di->pcii.function); /* create this area with NO user-space read or write permissions, to prevent accidental dammage */ di->shared_area = create_area(shared_name, (void **)&(di->si), B_ANY_KERNEL_ADDRESS, ((sizeof(shared_info) + (B_PAGE_SIZE - 1)) & ~(B_PAGE_SIZE - 1)), B_FULL_LOCK, 0); if (di->shared_area < 0) { /* return the error */ result = di->shared_area; goto done; } /* save a few dereferences */ si = di->si; /* save the vendor and device IDs */ si->vendor_id = di->pcii.vendor_id; si->device_id = di->pcii.device_id; si->revision = di->pcii.revision; si->bus = di->pcii.bus; si->device = di->pcii.device; si->function = di->pcii.function; /* device at bus #0, device #0, function #0 holds byte value at byte-index 0xf6 */ si->ps.chip_rev = ((*pci_bus->read_pci_config)(0, 0, 0, 0xf6, 1)); /* map the device */ result = map_device(di); if (result < 0) goto free_shared; result = B_OK; /* create a semaphore for vertical blank management */ si->vblank = create_sem(0, di->name); if (si->vblank < 0) { result = si->vblank; goto unmap; } /* change the owner of the semaphores to the opener's team */ /* this is required because apps can't aquire kernel semaphores */ thid = find_thread(NULL); get_thread_info(thid, &thinfo); set_sem_owner(si->vblank, thinfo.team); /* assign local regs pointer for SAMPLExx() macros */ regs = di->regs; /* disable and clear any pending interrupts */ disable_vbi(regs); /* If there is a valid interrupt line assigned then set up interrupts */ if ((di->pcii.u.h0.interrupt_pin == 0x00) || (di->pcii.u.h0.interrupt_line == 0xff) || /* no IRQ assigned */ (di->pcii.u.h0.interrupt_line <= 0x02)) /* system IRQ assigned */ { /* we are aborting! */ /* Note: the R4 graphics driver kit lacks this statement!! */ result = B_ERROR; /* interrupt does not exist so exit without installing our handler */ goto delete_the_sem; } else { /* otherwise install our interrupt handler */ result = install_io_interrupt_handler(di->pcii.u.h0.interrupt_line, eng_interrupt, (void *)di, 0); /* bail if we couldn't install the handler */ if (result != B_OK) goto delete_the_sem; } mark_as_open: /* mark the device open */ di->is_open++; /* send the cookie to the opener */ *cookie = di; goto done; delete_the_sem: delete_sem(si->vblank); unmap: unmap_device(di); free_shared: /* clean up our shared area */ delete_area(di->shared_area); di->shared_area = -1; di->si = NULL; done: /* end of critical section */ RELEASE_BEN(pd->kernel); /* all done, return the status */ return result; }
static status_t open_hook(const char* name, uint32 flags, void** cookie) { int32 index = 0; device_info *di; shared_info *si; thread_id thid; thread_info thinfo; status_t result = B_OK; char shared_name[B_OS_NAME_LENGTH]; physical_entry map[1]; size_t net_buf_size; void *unaligned_dma_buffer; /* find the device name in the list of devices */ /* we're never passed a name we didn't publish */ while (pd->device_names[index] && (strcmp(name, pd->device_names[index]) != 0)) index++; /* for convienience */ di = &(pd->di[index]); /* make sure no one else has write access to the common data */ AQUIRE_BEN(pd->kernel); /* if it's already open for writing */ if (di->is_open) { /* mark it open another time */ goto mark_as_open; } /* create the shared_info area */ sprintf(shared_name, DEVICE_FORMAT " shared", di->pcii.vendor_id, di->pcii.device_id, di->pcii.bus, di->pcii.device, di->pcii.function); /* create this area with NO user-space read or write permissions, to prevent accidental damage */ di->shared_area = create_area(shared_name, (void **)&(di->si), B_ANY_KERNEL_ADDRESS, ((sizeof(shared_info) + (B_PAGE_SIZE - 1)) & ~(B_PAGE_SIZE - 1)), B_FULL_LOCK, B_USER_CLONEABLE_AREA); if (di->shared_area < 0) { /* return the error */ result = di->shared_area; goto done; } /* save a few dereferences */ si = di->si; /* create the DMA command buffer area */ //fixme? for R4.5 a workaround for cloning would be needed! /* we want to setup a 1Mb buffer (size must be multiple of B_PAGE_SIZE) */ net_buf_size = ((1 * 1024 * 1024) + (B_PAGE_SIZE-1)) & ~(B_PAGE_SIZE-1); /* create the area that will hold the DMA command buffer */ si->unaligned_dma_area = create_area("NV DMA cmd buffer", (void **)&unaligned_dma_buffer, B_ANY_KERNEL_ADDRESS, 2 * net_buf_size, /* take twice the net size so we can have MTRR-WC even on old systems */ B_32_BIT_CONTIGUOUS, /* GPU always needs access */ B_USER_CLONEABLE_AREA | B_READ_AREA | B_WRITE_AREA); // TODO: Physical aligning can be done without waste using the // private create_area_etc(). /* on error, abort */ if (si->unaligned_dma_area < 0) { /* free the already created shared_info area, and return the error */ result = si->unaligned_dma_area; goto free_shared; } /* we (also) need the physical adress our DMA buffer is at, as this needs to be * fed into the GPU's engine later on. Get an aligned adress so we can use MTRR-WC * even on older CPU's. */ get_memory_map(unaligned_dma_buffer, B_PAGE_SIZE, map, 1); si->dma_buffer_pci = (void*) ((map[0].address + net_buf_size - 1) & ~(net_buf_size - 1)); /* map the net DMA command buffer into vmem, using Write Combining */ si->dma_area = map_physical_memory( "NV aligned DMA cmd buffer", (addr_t)si->dma_buffer_pci, net_buf_size, B_ANY_KERNEL_BLOCK_ADDRESS | B_MTR_WC, B_READ_AREA | B_WRITE_AREA, &(si->dma_buffer)); /* if failed with write combining try again without */ if (si->dma_area < 0) { si->dma_area = map_physical_memory( "NV aligned DMA cmd buffer", (addr_t)si->dma_buffer_pci, net_buf_size, B_ANY_KERNEL_BLOCK_ADDRESS, B_READ_AREA | B_WRITE_AREA, &(si->dma_buffer)); } /* if there was an error, delete our other areas and pass on error*/ if (si->dma_area < 0) { /* free the already created areas, and return the error */ result = si->dma_area; goto free_shared_and_uadma; } /* save the vendor and device IDs */ si->vendor_id = di->pcii.vendor_id; si->device_id = di->pcii.device_id; si->revision = di->pcii.revision; si->bus = di->pcii.bus; si->device = di->pcii.device; si->function = di->pcii.function; /* ensure that the accelerant's INIT_ACCELERANT function can be executed */ si->accelerant_in_use = false; /* preset singlehead card to prevent early INT routine calls (once installed) to * wrongly identify the INT request coming from us! */ si->ps.secondary_head = false; /* note the amount of system RAM the system BIOS assigned to the card if applicable: * unified memory architecture (UMA) */ switch ((((uint32)(si->device_id)) << 16) | si->vendor_id) { case 0x01a010de: /* Nvidia GeForce2 Integrated GPU */ /* device at bus #0, device #0, function #1 holds value at byte-index 0x7C */ si->ps.memory_size = 1024 * 1024 * (((((*pci_bus->read_pci_config)(0, 0, 1, 0x7c, 4)) & 0x000007c0) >> 6) + 1); /* last 64kB RAM is used for the BIOS (or something else?) */ si->ps.memory_size -= (64 * 1024); break; case 0x01f010de: /* Nvidia GeForce4 MX Integrated GPU */ /* device at bus #0, device #0, function #1 holds value at byte-index 0x84 */ si->ps.memory_size = 1024 * 1024 * (((((*pci_bus->read_pci_config)(0, 0, 1, 0x84, 4)) & 0x000007f0) >> 4) + 1); /* last 64kB RAM is used for the BIOS (or something else?) */ si->ps.memory_size -= (64 * 1024); break; default: /* all other cards have own RAM: the amount of which is determined in the * accelerant. */ break; } /* map the device */ result = map_device(di); if (result < 0) goto free_shared_and_alldma; /* we will be returning OK status for sure now */ result = B_OK; /* disable and clear any pending interrupts */ //fixme: //distinquish between crtc1/crtc2 once all heads get seperate driver instances! disable_vbi_all(di->regs); /* preset we can't use INT related functions */ si->ps.int_assigned = false; /* create a semaphore for vertical blank management */ si->vblank = create_sem(0, di->name); if (si->vblank < 0) goto mark_as_open; /* change the owner of the semaphores to the opener's team */ /* this is required because apps can't aquire kernel semaphores */ thid = find_thread(NULL); get_thread_info(thid, &thinfo); set_sem_owner(si->vblank, thinfo.team); /* If there is a valid interrupt line assigned then set up interrupts */ if ((di->pcii.u.h0.interrupt_pin == 0x00) || (di->pcii.u.h0.interrupt_line == 0xff) || /* no IRQ assigned */ (di->pcii.u.h0.interrupt_line <= 0x02)) /* system IRQ assigned */ { /* delete the semaphore as it won't be used */ delete_sem(si->vblank); si->vblank = -1; } else { /* otherwise install our interrupt handler */ result = install_io_interrupt_handler(di->pcii.u.h0.interrupt_line, nv_interrupt, (void *)di, 0); /* bail if we couldn't install the handler */ if (result != B_OK) { /* delete the semaphore as it won't be used */ delete_sem(si->vblank); si->vblank = -1; } else { /* inform accelerant(s) we can use INT related functions */ si->ps.int_assigned = true; } } mark_as_open: /* mark the device open */ di->is_open++; /* send the cookie to the opener */ *cookie = di; goto done; free_shared_and_alldma: /* clean up our aligned DMA area */ delete_area(si->dma_area); si->dma_area = -1; si->dma_buffer = NULL; free_shared_and_uadma: /* clean up our unaligned DMA area */ delete_area(si->unaligned_dma_area); si->unaligned_dma_area = -1; si->dma_buffer_pci = NULL; free_shared: /* clean up our shared area */ delete_area(di->shared_area); di->shared_area = -1; di->si = NULL; done: /* end of critical section */ RELEASE_BEN(pd->kernel); /* all done, return the status */ return result; }