status_t intel_extreme_init(intel_info &info) { CALLED(); info.aperture = gGART->map_aperture(info.pci->bus, info.pci->device, info.pci->function, 0, &info.aperture_base); if (info.aperture < B_OK) { ERROR("error: could not map GART aperture!\n"); return info.aperture; } AreaKeeper sharedCreator; info.shared_area = sharedCreator.Create("intel extreme shared info", (void**)&info.shared_info, B_ANY_KERNEL_ADDRESS, ROUND_TO_PAGE_SIZE(sizeof(intel_shared_info)) + 3 * B_PAGE_SIZE, B_FULL_LOCK, 0); if (info.shared_area < B_OK) { ERROR("error: could not create shared area!\n"); gGART->unmap_aperture(info.aperture); return info.shared_area; } memset((void*)info.shared_info, 0, sizeof(intel_shared_info)); int fbIndex = 0; int mmioIndex = 1; if (info.device_type.InFamily(INTEL_TYPE_9xx)) { // For some reason Intel saw the need to change the order of the // mappings with the introduction of the i9xx family mmioIndex = 0; fbIndex = 2; } // evaluate driver settings, if any bool hardwareCursor; read_settings(hardwareCursor); // memory mapped I/O // TODO: registers are mapped twice (by us and intel_gart), maybe we // can share it between the drivers AreaKeeper mmioMapper; info.registers_area = mmioMapper.Map("intel extreme mmio", info.pci->u.h0.base_registers[mmioIndex], info.pci->u.h0.base_register_sizes[mmioIndex], B_ANY_KERNEL_ADDRESS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, (void**)&info.registers); if (mmioMapper.InitCheck() < B_OK) { ERROR("error: could not map memory I/O!\n"); gGART->unmap_aperture(info.aperture); return info.registers_area; } uint32* blocks = info.shared_info->register_blocks; blocks[REGISTER_BLOCK(REGS_FLAT)] = 0; // setup the register blocks for the different architectures if (info.device_type.HasPlatformControlHub()) { // PCH based platforms (IronLake and up) blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)] = PCH_NORTH_SHARED_REGISTER_BASE; blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)] = PCH_NORTH_PIPE_AND_PORT_REGISTER_BASE; blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)] = PCH_NORTH_PLANE_CONTROL_REGISTER_BASE; blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)] = PCH_SOUTH_SHARED_REGISTER_BASE; blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)] = PCH_SOUTH_TRANSCODER_AND_PORT_REGISTER_BASE; } else { // (G)MCH/ICH based platforms blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)] = MCH_SHARED_REGISTER_BASE; blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)] = MCH_PIPE_AND_PORT_REGISTER_BASE; blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)] = MCH_PLANE_CONTROL_REGISTER_BASE; blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)] = ICH_SHARED_REGISTER_BASE; blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)] = ICH_PORT_REGISTER_BASE; } // make sure bus master, memory-mapped I/O, and frame buffer is enabled set_pci_config(info.pci, PCI_command, 2, get_pci_config(info.pci, PCI_command, 2) | PCI_command_io | PCI_command_memory | PCI_command_master); // reserve ring buffer memory (currently, this memory is placed in // the graphics memory), but this could bring us problems with // write combining... ring_buffer &primary = info.shared_info->primary_ring_buffer; if (intel_allocate_memory(info, 16 * B_PAGE_SIZE, 0, 0, (addr_t*)&primary.base) == B_OK) { primary.register_base = INTEL_PRIMARY_RING_BUFFER; primary.size = 16 * B_PAGE_SIZE; primary.offset = (addr_t)primary.base - info.aperture_base; } // Enable clock gating intel_en_gating(info); // Enable automatic gpu downclocking if we can to save power intel_en_downclock(info); // no errors, so keep areas and mappings sharedCreator.Detach(); mmioMapper.Detach(); aperture_info apertureInfo; gGART->get_aperture_info(info.aperture, &apertureInfo); info.shared_info->registers_area = info.registers_area; info.shared_info->graphics_memory = (uint8*)info.aperture_base; info.shared_info->physical_graphics_memory = apertureInfo.physical_base; info.shared_info->graphics_memory_size = apertureInfo.size; info.shared_info->frame_buffer = 0; info.shared_info->dpms_mode = B_DPMS_ON; info.shared_info->got_vbt = get_lvds_mode_from_bios( &info.shared_info->current_mode); /* at least 855gm can't drive more than one head at time */ if (info.device_type.InFamily(INTEL_TYPE_8xx)) info.shared_info->single_head_locked = 1; if (info.device_type.InFamily(INTEL_TYPE_9xx)) { info.shared_info->pll_info.reference_frequency = 96000; // 96 kHz info.shared_info->pll_info.max_frequency = 400000; // 400 MHz RAM DAC speed info.shared_info->pll_info.min_frequency = 20000; // 20 MHz } else { info.shared_info->pll_info.reference_frequency = 48000; // 48 kHz info.shared_info->pll_info.max_frequency = 350000; // 350 MHz RAM DAC speed info.shared_info->pll_info.min_frequency = 25000; // 25 MHz } info.shared_info->pll_info.divisor_register = INTEL_DISPLAY_A_PLL_DIVISOR_0; info.shared_info->device_type = info.device_type; #ifdef __HAIKU__ strlcpy(info.shared_info->device_identifier, info.device_identifier, sizeof(info.shared_info->device_identifier)); #else strcpy(info.shared_info->device_identifier, info.device_identifier); #endif // setup overlay registers status_t status = intel_allocate_memory(info, B_PAGE_SIZE, 0, intel_uses_physical_overlay(*info.shared_info) ? B_APERTURE_NEED_PHYSICAL : 0, (addr_t*)&info.overlay_registers, &info.shared_info->physical_overlay_registers); if (status == B_OK) { info.shared_info->overlay_offset = (addr_t)info.overlay_registers - info.aperture_base; init_overlay_registers(info.overlay_registers); } else { ERROR("error: could not allocate overlay memory! %s\n", strerror(status)); } // Allocate hardware status page and the cursor memory if (intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL, (addr_t*)info.shared_info->status_page, &info.shared_info->physical_status_page) == B_OK) { // TODO: set status page } if (hardwareCursor) { intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL, (addr_t*)&info.shared_info->cursor_memory, &info.shared_info->physical_cursor_memory); } init_interrupt_handler(info); TRACE("%s: completed successfully!\n", __func__); return B_OK; }
static status_t device_ioctl(void *data, uint32 op, void *buffer, size_t bufferLength) { struct intel_info *info = (intel_info *)data; switch (op) { case B_GET_ACCELERANT_SIGNATURE: strcpy((char *)buffer, INTEL_ACCELERANT_NAME); TRACE((DEVICE_NAME ": accelerant: %s\n", INTEL_ACCELERANT_NAME)); return B_OK; // needed to share data between kernel and accelerant case INTEL_GET_PRIVATE_DATA: { intel_get_private_data *data = (intel_get_private_data *)buffer; if (data->magic == INTEL_PRIVATE_DATA_MAGIC) { data->shared_info_area = info->shared_area; return B_OK; } break; } // needed for cloning case INTEL_GET_DEVICE_NAME: #ifdef __HAIKU__ if (user_strlcpy((char *)buffer, gDeviceNames[info->id], B_PATH_NAME_LENGTH) < B_OK) return B_BAD_ADDRESS; #else strncpy((char *)buffer, gDeviceNames[info->id], B_PATH_NAME_LENGTH); ((char *)buffer)[B_PATH_NAME_LENGTH - 1] = '\0'; #endif return B_OK; // graphics mem manager case INTEL_ALLOCATE_GRAPHICS_MEMORY: { intel_allocate_graphics_memory allocMemory; #ifdef __HAIKU__ if (user_memcpy(&allocMemory, buffer, sizeof(intel_allocate_graphics_memory)) < B_OK) return B_BAD_ADDRESS; #else memcpy(&allocMemory, buffer, sizeof(intel_allocate_graphics_memory)); #endif if (allocMemory.magic != INTEL_PRIVATE_DATA_MAGIC) return B_BAD_VALUE; status_t status = intel_allocate_memory(*info, allocMemory.size, allocMemory.alignment, allocMemory.flags, (addr_t *)&allocMemory.buffer_base); if (status == B_OK) { // copy result #ifdef __HAIKU__ if (user_memcpy(buffer, &allocMemory, sizeof(intel_allocate_graphics_memory)) < B_OK) return B_BAD_ADDRESS; #else memcpy(buffer, &allocMemory, sizeof(intel_allocate_graphics_memory)); #endif } return status; } case INTEL_FREE_GRAPHICS_MEMORY: { intel_free_graphics_memory freeMemory; #ifdef __HAIKU__ if (user_memcpy(&freeMemory, buffer, sizeof(intel_free_graphics_memory)) < B_OK) return B_BAD_ADDRESS; #else memcpy(&freeMemory, buffer, sizeof(intel_free_graphics_memory)); #endif if (freeMemory.magic == INTEL_PRIVATE_DATA_MAGIC) return intel_free_memory(*info, freeMemory.buffer_base); break; } default: TRACE((DEVICE_NAME ": ioctl() unknown message %ld (length = %ld)\n", op, bufferLength)); break; } return B_DEV_INVALID_IOCTL; }
/*! This is the common accelerant_info initializer. It is called by both, the first accelerant and all clones. */ static status_t init_common(int device, bool isClone) { // initialize global accelerant info structure // Number of register dumps we have... taken. gDumpCount = 0; gInfo = (accelerant_info*)malloc(sizeof(accelerant_info)); if (gInfo == NULL) return B_NO_MEMORY; memset(gInfo, 0, sizeof(accelerant_info)); gInfo->is_clone = isClone; gInfo->device = device; // get basic info from driver intel_get_private_data data; data.magic = INTEL_PRIVATE_DATA_MAGIC; if (ioctl(device, INTEL_GET_PRIVATE_DATA, &data, sizeof(intel_get_private_data)) != 0) { free(gInfo); return B_ERROR; } AreaCloner sharedCloner; gInfo->shared_info_area = sharedCloner.Clone("intel extreme shared info", (void**)&gInfo->shared_info, B_ANY_ADDRESS, B_READ_AREA | B_WRITE_AREA, data.shared_info_area); status_t status = sharedCloner.InitCheck(); if (status < B_OK) { free(gInfo); return status; } AreaCloner regsCloner; gInfo->regs_area = regsCloner.Clone("intel extreme regs", (void**)&gInfo->registers, B_ANY_ADDRESS, B_READ_AREA | B_WRITE_AREA, gInfo->shared_info->registers_area); status = regsCloner.InitCheck(); if (status < B_OK) { free(gInfo); return status; } sharedCloner.Keep(); regsCloner.Keep(); // The overlay registers, hardware status, and cursor memory share // a single area with the shared_info if (gInfo->shared_info->overlay_offset != 0) { gInfo->overlay_registers = (struct overlay_registers*) (gInfo->shared_info->graphics_memory + gInfo->shared_info->overlay_offset); } if (gInfo->shared_info->device_type.InGroup(INTEL_GROUP_96x)) { // allocate some extra memory for the 3D context if (intel_allocate_memory(INTEL_i965_3D_CONTEXT_SIZE, B_APERTURE_NON_RESERVED, gInfo->context_base) == B_OK) { gInfo->context_offset = gInfo->context_base - (addr_t)gInfo->shared_info->graphics_memory; } } gInfo->pipe_count = 0; // Allocate all of our pipes for (int i = 0; i < MAX_PIPES; i++) { switch (i) { case 0: gInfo->pipes[i] = new(std::nothrow) Pipe(INTEL_PIPE_A); break; case 1: gInfo->pipes[i] = new(std::nothrow) Pipe(INTEL_PIPE_B); break; default: ERROR("%s: Unknown pipe %d\n", __func__, i); } if (gInfo->pipes[i] == NULL) ERROR("%s: Error allocating pipe %d\n", __func__, i); else gInfo->pipe_count++; } return B_OK; }
/*! This is the common accelerant_info initializer. It is called by both, the first accelerant and all clones. */ static status_t init_common(int device, bool isClone) { // initialize global accelerant info structure gInfo = (accelerant_info*)malloc(sizeof(accelerant_info)); if (gInfo == NULL) return B_NO_MEMORY; memset(gInfo, 0, sizeof(accelerant_info)); gInfo->is_clone = isClone; gInfo->device = device; // get basic info from driver intel_get_private_data data; data.magic = INTEL_PRIVATE_DATA_MAGIC; if (ioctl(device, INTEL_GET_PRIVATE_DATA, &data, sizeof(intel_get_private_data)) != 0) { free(gInfo); return B_ERROR; } AreaCloner sharedCloner; gInfo->shared_info_area = sharedCloner.Clone("intel extreme shared info", (void**)&gInfo->shared_info, B_ANY_ADDRESS, B_READ_AREA | B_WRITE_AREA, data.shared_info_area); status_t status = sharedCloner.InitCheck(); if (status < B_OK) { free(gInfo); return status; } AreaCloner regsCloner; gInfo->regs_area = regsCloner.Clone("intel extreme regs", (void**)&gInfo->registers, B_ANY_ADDRESS, B_READ_AREA | B_WRITE_AREA, gInfo->shared_info->registers_area); status = regsCloner.InitCheck(); if (status < B_OK) { free(gInfo); return status; } sharedCloner.Keep(); regsCloner.Keep(); // The overlay registers, hardware status, and cursor memory share // a single area with the shared_info gInfo->overlay_registers = (struct overlay_registers*) (gInfo->shared_info->graphics_memory + gInfo->shared_info->overlay_offset); if (gInfo->shared_info->device_type.InGroup(INTEL_TYPE_96x)) { // allocate some extra memory for the 3D context if (intel_allocate_memory(INTEL_i965_3D_CONTEXT_SIZE, B_APERTURE_NON_RESERVED, gInfo->context_base) == B_OK) { gInfo->context_offset = gInfo->context_base - (addr_t)gInfo->shared_info->graphics_memory; } } return B_OK; }