/** * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/dri/card<n>. * * \param filp file pointer. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. */ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); drm_ioctl_compat_t *fn = NULL; int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; if (fn != NULL) ret = (*fn) (filp, cmd, arg); else ret = drm_ioctl(filp, cmd, arg); return ret; }
/** * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/dri/card<n>. * * \param filp file pointer. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. */ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); drm_ioctl_compat_t *fn = NULL; int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); #if 0 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; #endif if (fn != NULL) ret = (*fn)(filp, cmd, arg); else ret = nouveau_drm_ioctl(filp, cmd, arg); return ret; }
/** * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/dri/card<n>. * * \param filp file pointer. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. */ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); drm_ioctl_compat_t *fn = NULL; int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; lock_kernel(); /* XXX for now */ if (fn != NULL) ret = (*fn) (filp, cmd, arg); else ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); unlock_kernel(); return ret; }
/** * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/dri/card<n>. * * \param filp file pointer. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. */ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); drm_ioctl_compat_t *fn = NULL; struct drm_file *file_priv = filp->private_data; struct drm_device *dev = file_priv->minor->dev; int ret; i915_rpm_get_ioctl(dev); if (nr < DRM_COMMAND_BASE) { ret = drm_compat_ioctl(filp, cmd, arg); goto out; } if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; if (fn != NULL) { ret = (*fn) (filp, cmd, arg); } else { #ifdef CONFIG_DRM_VXD_BYT unsigned int nr = DRM_IOCTL_NR(cmd); struct drm_i915_private *dev_priv = dev->dev_private; if ((nr >= DRM_COMMAND_VXD_BASE) && (nr < DRM_COMMAND_VXD_BASE + 0x10)) { BUG_ON(!dev_priv->vxd_ioctl); ret = dev_priv->vxd_ioctl(filp, cmd, arg); } else #endif { ret = drm_ioctl(filp, cmd, arg); } } out: i915_rpm_put_ioctl(dev); return ret; }
/* On Haswell, DDI port buffers must be programmed with correct values * in advance. The buffer values are different for FDI and DP modes, * but the HDMI/DVI fields are shared among those. So we program the DDI * in either FDI or DP modes only, as HDMI connections will work with both * of those */ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode) { struct drm_i915_private *dev_priv = dev->dev_private; u32 reg; int i; const u32 *ddi_translations = ((use_fdi_mode) ? hsw_ddi_translations_fdi : hsw_ddi_translations_dp); DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n", port_name(port), use_fdi_mode ? "FDI" : "DP"); if (use_fdi_mode && (port != PORT_E)) DRM_DEBUG_KMS("Programming port %c in FDI mode, this probably will not work.\n", port_name(port)); for (i=0, reg=DDI_BUF_TRANS(port); i < DRM_ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { I915_WRITE(reg, ddi_translations[i]); reg += 4; } }
int intel_plane_init(struct drm_device *dev, enum pipe pipe) { struct intel_plane *intel_plane; unsigned long possible_crtcs; int ret; if (!(IS_GEN6(dev) || IS_GEN7(dev))) return -ENODEV; intel_plane = malloc(sizeof(struct intel_plane), DRM_MEM_KMS, M_WAITOK | M_ZERO); if (IS_GEN6(dev)) { intel_plane->max_downscale = 16; intel_plane->update_plane = snb_update_plane; intel_plane->disable_plane = snb_disable_plane; intel_plane->update_colorkey = snb_update_colorkey; intel_plane->get_colorkey = snb_get_colorkey; } else if (IS_GEN7(dev)) { intel_plane->max_downscale = 2; intel_plane->update_plane = ivb_update_plane; intel_plane->disable_plane = ivb_disable_plane; intel_plane->update_colorkey = ivb_update_colorkey; intel_plane->get_colorkey = ivb_get_colorkey; } intel_plane->pipe = pipe; possible_crtcs = (1 << pipe); ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs, &intel_plane_funcs, snb_plane_formats, DRM_ARRAY_SIZE(snb_plane_formats), false); if (ret) free(intel_plane, DRM_MEM_KMS); return ret; }
nouveau_channel_free(chan); return 0; } /*********************************** * finally, the ioctl table ***********************************/ struct drm_ioctl_desc nouveau_ioctls[] = { DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH), }; int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
int param; u32 value; } drm_i915_getparam32_t; static int compat_i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_getparam32_t *req32 = data; drm_i915_getparam_t request; request.param = req32->param; request.value = (void *)(unsigned long)req32->value; return i915_getparam(dev, (void *)&request, file_priv); } typedef struct drm_i915_mem_alloc32 { int region; int alignment; int size; u32 region_offset; /* offset from start of fb or agp */ } drm_i915_mem_alloc32_t; drm_ioctl_desc_t i915_compat_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, compat_i915_batchbuffer, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, compat_i915_cmdbuffer, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_GETPARAM, compat_i915_getparam, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, compat_i915_irq_emit, DRM_AUTH) }; int i915_compat_ioctls_nr = DRM_ARRAY_SIZE(i915_compat_ioctls); #endif
} return 0; } #if !defined(SUPPORT_DRI_DRM_EXT) struct drm_ioctl_desc sPVRDrmIoctls[] = { DRM_IOCTL_DEF(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM, 0), DRM_IOCTL_DEF(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster, DRM_MASTER), DRM_IOCTL_DEF(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, 0), #if defined(PDUMP) DRM_IOCTL_DEF(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, 0), #endif }; static IMG_INT pvr_max_ioctl = DRM_ARRAY_SIZE(sPVRDrmIoctls); static struct drm_driver sPVRDrmDriver = { .driver_features = 0, .dev_priv_size = 0, .load = PVRSRVDrmLoad, .unload = PVRSRVDrmUnload, .open = PVRSRVDrmOpen, .postclose = PVRSRVDrmPostClose, .suspend = PVRSRVDriverSuspend, .resume = PVRSRVDriverResume, .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = sPVRDrmIoctls, .fops =
DRM_IOCTL_DEF(DRM_VIA_CHROME9_RESTORE_PRIMARY, via_chrome9_ioctl_restore_primary, DRM_AUTH), DRM_IOCTL_DEF(DRM_VIA_CHROME9_FLUSH_CACHE, via_chrome9_ioctl_flush_cache, DRM_AUTH), DRM_IOCTL_DEF(DRM_VIA_CHROME9_ALLOCMEM, via_chrome9_ioctl_allocate_mem_base, DRM_AUTH), DRM_IOCTL_DEF(DRM_VIA_CHROME9_FREEMEM, via_chrome9_ioctl_freemem_base, DRM_AUTH), DRM_IOCTL_DEF(DRM_VIA_CHROME9_CHECKVIDMEMSIZE, via_chrome9_ioctl_check_vidmem_size, DRM_AUTH), DRM_IOCTL_DEF(DRM_VIA_CHROME9_PCIEMEMCTRL, via_chrome9_ioctl_pciemem_ctrl, DRM_AUTH), DRM_IOCTL_DEF(DRM_VIA_CHROME9_AUTH_MAGIC, via_chrome9_drm_authmagic, 0) }; int via_chrome9_max_ioctl = DRM_ARRAY_SIZE(via_chrome9_ioctls); static struct pci_device_id pciidlist[] = { via_chrome9DRV_PCI_IDS }; int via_chrome9_driver_open(struct drm_device *dev, struct drm_file *priv) { priv->authenticated = 1; return 0; } static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_HAVE_DMA | DRIVER_FB_DMA | DRIVER_USE_MTRR,
retval = setNext(set, &item); } setDestroy(set); /* AGP Memory */ set = global_ppriv[i].sets[1]; retval = setFirst(set, &item); while (retval) { DRM_DEBUG("free agp memory 0x%lx\n", item); mmFreeMem((PMemBlock) item); retval = setNext(set, &item); } setDestroy(set); global_ppriv[i].used = 0; } return 1; } drm_ioctl_desc_t sis_ioctls[] = { DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_fb_free, DRM_AUTH), DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_ioctl_agp_free, DRM_AUTH), DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY) }; int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
0x00000000, }; const u32 cayman_ps[] = { 0x00000004, 0xa00c0000, 0x00000008, 0x80400000, 0x00000000, 0x95000688, 0x00000000, 0x88000000, 0x00380400, 0x00146b10, 0x00380000, 0x20146b10, 0x00380400, 0x40146b00, 0x80380000, 0x60146b00, 0x00000010, 0x000d1000, 0xb0800000, 0x00000000, }; const u32 cayman_ps_size = DRM_ARRAY_SIZE(cayman_ps); const u32 cayman_vs_size = DRM_ARRAY_SIZE(cayman_vs); const u32 cayman_default_size = DRM_ARRAY_SIZE(cayman_default_state);
static int __init armada_drm_init(void) { armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls); return platform_driver_register(&armada_drm_platform_driver); }
void intel_ddi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); int port = intel_hdmi->ddi_port; int pipe = intel_crtc->pipe; int p, n2, r2, valid=0; u32 temp, i; /* On Haswell, we need to enable the clocks and prepare DDI function to * work in HDMI mode for this pipe. */ DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); for (i=0; i < DRM_ARRAY_SIZE(wrpll_tmds_clock_table); i++) { if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) { p = wrpll_tmds_clock_table[i].p; n2 = wrpll_tmds_clock_table[i].n2; r2 = wrpll_tmds_clock_table[i].r2; DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n", crtc->mode.clock, p, n2, r2); valid = 1; break; } } if (!valid) { DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n", crtc->mode.clock); return; } /* Enable LCPLL if disabled */ temp = I915_READ(LCPLL_CTL); if (temp & LCPLL_PLL_DISABLE) I915_WRITE(LCPLL_CTL, temp & ~LCPLL_PLL_DISABLE); /* Configure WR PLL 1, program the correct divider values for * the desired frequency and wait for warmup */ I915_WRITE(WRPLL_CTL1, WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p)); DELAY(20); /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use * this port for connection. */ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_WRPLL1); I915_WRITE(PIPE_CLK_SEL(pipe), PIPE_CLK_SEL_PORT(port)); DELAY(20); if (intel_hdmi->has_audio) { /* Proper support for digital audio needs a new logic and a new set * of registers, so we leave it for future patch bombing. */ DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n", pipe_name(intel_crtc->pipe)); } /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ temp = I915_READ(DDI_FUNC_CTL(pipe)); temp &= ~PIPE_DDI_PORT_MASK; temp &= ~PIPE_DDI_BPC_12; temp |= PIPE_DDI_SELECT_PORT(port) | PIPE_DDI_MODE_SELECT_HDMI | ((intel_crtc->bpp > 24) ? PIPE_DDI_BPC_12 : PIPE_DDI_BPC_8) | PIPE_DDI_FUNC_ENABLE; I915_WRITE(DDI_FUNC_CTL(pipe), temp); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); }
void hsw_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp, i; /* Configure CPU PLL, wait for warmup */ I915_WRITE(SPLL_CTL, SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SCC); /* Use SPLL to drive the output when in FDI mode */ I915_WRITE(PORT_CLK_SEL(PORT_E), PORT_CLK_SEL_SPLL); I915_WRITE(PIPE_CLK_SEL(pipe), PIPE_CLK_SEL_PORT(PORT_E)); DELAY(20); /* Start the training iterating through available voltages and emphasis */ for (i=0; i < DRM_ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) { /* Configure DP_TP_CTL with auto-training */ I915_WRITE(DP_TP_CTL(PORT_E), DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE); /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ temp = I915_READ(DDI_BUF_CTL(PORT_E)); temp = (temp & ~DDI_BUF_EMP_MASK); I915_WRITE(DDI_BUF_CTL(PORT_E), temp | DDI_BUF_CTL_ENABLE | DDI_PORT_WIDTH_X2 | hsw_ddi_buf_ctl_values[i]); DELAY(600); /* Enable CPU FDI Receiver with auto-training */ reg = FDI_RX_CTL(pipe); I915_WRITE(reg, I915_READ(reg) | FDI_LINK_TRAIN_AUTO | FDI_RX_ENABLE | FDI_LINK_TRAIN_PATTERN_1_CPT | FDI_RX_ENHANCE_FRAME_ENABLE | FDI_PORT_WIDTH_2X_LPT | FDI_RX_PLL_ENABLE); POSTING_READ(reg); DELAY(100); temp = I915_READ(DP_TP_STATUS(PORT_E)); if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i); /* Enable normal pixel sending for FDI */ I915_WRITE(DP_TP_CTL(PORT_E), DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE); /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */ temp = I915_READ(DDI_FUNC_CTL(pipe)); temp &= ~PIPE_DDI_PORT_MASK; temp |= PIPE_DDI_SELECT_PORT(PORT_E) | PIPE_DDI_MODE_SELECT_FDI | PIPE_DDI_FUNC_ENABLE | PIPE_DDI_PORT_WIDTH_X2; I915_WRITE(DDI_FUNC_CTL(pipe), temp); break; } else { DRM_ERROR("Error training BUF_CTL %d\n", i); /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */ I915_WRITE(DP_TP_CTL(PORT_E), I915_READ(DP_TP_CTL(PORT_E)) & ~DP_TP_CTL_ENABLE); I915_WRITE(FDI_RX_CTL(pipe), I915_READ(FDI_RX_CTL(pipe)) & ~FDI_RX_PLL_ENABLE); continue; } } DRM_DEBUG_KMS("FDI train done.\n"); }
0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */ 0xffffffff, 0xc0026900, 0x00000316, 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 0x00000010, /* */ }; const u32 si_default_size = DRM_ARRAY_SIZE(si_default_state);
* Device-specific IRQs go here. This type might need to be extended with * the register if there are multiple IRQ control registers. * Currently we activate the HQV interrupts of Unichrome Pro group A. */ static maskarray_t via_pro_group_a_irqs[] = { {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 0x00000000 }, {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 0x00000000 }, {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, }; static int via_num_pro_group_a = DRM_ARRAY_SIZE(via_pro_group_a_irqs); static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; static maskarray_t via_unichrome_irqs[] = { {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} }; static int via_num_unichrome = DRM_ARRAY_SIZE(via_unichrome_irqs); static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; static unsigned time_diff(struct timeval *now, struct timeval *then) { return (now->tv_usec >= then->tv_usec) ?
.dumb_map_offset = PVRSRVGEMDumbMapOffset, .dumb_destroy = PVRSRVGEMDumbDestroy, #endif #if defined(SUPPORT_DRM_DC_MODULE) || defined(SUPPORT_SYSTEM_INTERRUPT_HANDLING) .irq_handler = PVRSRVDRMIRQHandler, #endif #endif .gem_free_object = PVRSRVGEMFreeObject, #if defined(PVR_DRM_USE_PRIME) .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = PVRSRVPrimeExport, .gem_prime_import = PVRSRVPrimeImport, #endif .ioctls = sPVRDRMIoctls, .num_ioctls = DRM_ARRAY_SIZE(sPVRDRMIoctls), #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) .fops = &sPVRFileOps, #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) */ .fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, #if defined(CONFIG_COMPAT) .compat_ioctl = PVRSRVDRMCompatIoctl, #endif .mmap = MMapPMR, .poll = drm_poll, .fasync = drm_fasync,
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); mga_driver_fence_wait(dev, & fence); if (DRM_COPY_TO_USER( (u32 __user *) data, & fence, sizeof(u32))) { DRM_ERROR("copy_to_user\n"); return DRM_ERR(EFAULT); } return 0; } drm_ioctl_desc_t mga_ioctls[] = { [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1}, [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0}, [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0}, [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0}, [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0}, [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0}, [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0}, [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0}, [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0}, [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0}, [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0}, [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0}, [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1}, }; int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
0x00004000, 0x14200b1a, 0x00000000, 0x00000000, 0x3c000000, 0x68cd1000, #ifdef __BIG_ENDIAN 0x000a0000, #else 0x00080000, #endif 0x00000000, }; const u32 r6xx_ps[] = { 0x00000002, 0x80800000, 0x00000000, 0x94200688, 0x00000010, 0x000d1000, 0xb0800000, 0x00000000, }; const u32 r6xx_ps_size = DRM_ARRAY_SIZE(r6xx_ps); const u32 r6xx_vs_size = DRM_ARRAY_SIZE(r6xx_vs); const u32 r6xx_default_size = DRM_ARRAY_SIZE(r6xx_default_state); const u32 r7xx_default_size = DRM_ARRAY_SIZE(r7xx_default_state);
if (!dma->buflist) return; /*i830_flush_queue(dev); */ for (i = 0; i < dma->buf_count; i++) { struct drm_buf *buf = dma->buflist[i]; drm_savage_buf_priv_t *buf_priv = buf->dev_private; if (buf->file_priv == file_priv && buf_priv && buf_priv->next == NULL && buf_priv->prev == NULL) { uint16_t event; DRM_DEBUG("reclaimed from client\n"); event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); savage_freelist_put(dev, buf); } } drm_core_reclaim_buffers(dev, file_priv); } struct drm_ioctl_desc savage_ioctls[] = { DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), }; int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
.pretakedown = i830_driver_pretakedown, .release = i830_driver_release, .dma_quiescent = i830_driver_dma_quiescent, .reclaim_buffers = i830_reclaim_buffers, .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, #if USE_IRQS .irq_preinstall = i830_driver_irq_preinstall, .irq_postinstall = i830_driver_irq_postinstall, .irq_uninstall = i830_driver_irq_uninstall, .irq_handler = i830_driver_irq_handler, #endif .postinit = postinit, .version = version, .ioctls = ioctls, .num_ioctls = DRM_ARRAY_SIZE(ioctls), .fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .ioctl = drm_ioctl, .mmap = i830_mmap_buffers, .poll = drm_poll, .fasync = drm_fasync, }, .pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, .probe = probe, .remove = __devexit_p(drm_cleanup_pci), }
} #define PVR_DRM_FOPS_IOCTL .unlocked_ioctl struct drm_ioctl_desc sPVRDrmIoctls[] = { DRM_IOCTL_DEF_DRV(PVR_SRVKM, PVRSRV_BridgeDispatchKM, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(PVR_IS_MASTER, PVRDRMIsMaster, DRM_MASTER | DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(PVR_UNPRIV, PVRDRMUnprivCmd, DRM_UNLOCKED), #if defined(PDUMP) DRM_IOCTL_DEF_DRV(PVR_DBGDRV, dbgdrv_ioctl, DRM_UNLOCKED), #endif }; static int pvr_max_ioctl = DRM_ARRAY_SIZE(sPVRDrmIoctls); static struct drm_driver sPVRDrmDriver = { .driver_features = 0, .dev_priv_size = 0, .load = PVRSRVDrmLoad, .unload = PVRSRVDrmUnload, .open = PVRSRVDrmOpen, .postclose = PVRSRVDrmPostClose, .suspend = PVRSRVDriverSuspend, .resume = PVRSRVDriverResume, .ioctls = sPVRDrmIoctls, .fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release,
static int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_update_area *update_area = data; struct qxl_rect area = {.left = update_area->left, .top = update_area->top, .right = update_area->right, .bottom = update_area->bottom}; int ret; struct drm_gem_object *gobj = NULL; struct qxl_bo *qobj = NULL; if (update_area->left >= update_area->right || update_area->top >= update_area->bottom) return -EINVAL; gobj = drm_gem_object_lookup(dev, file, update_area->handle); if (gobj == NULL) return -ENOENT; qobj = gem_to_qxl_bo(gobj); ret = qxl_bo_reserve(qobj, false); if (ret) goto out; if (!qobj->pin_count) { qxl_ttm_placement_from_domain(qobj, qobj->type, false); ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, true, false); if (unlikely(ret)) goto out; } ret = qxl_bo_check_id(qdev, qobj); if (ret) goto out2; if (!qobj->surface_id) DRM_ERROR("got update area for surface with no id %d\n", update_area->handle); ret = qxl_io_update_area(qdev, qobj, &area); out2: qxl_bo_unreserve(qobj); out: drm_gem_object_unreference_unlocked(gobj); return ret; } static int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_getparam *param = data; switch (param->param) { case QXL_PARAM_NUM_SURFACES: param->value = qdev->rom->n_surfaces; break; case QXL_PARAM_MAX_RELOCS: param->value = QXL_MAX_RES; break; default: return -EINVAL; } return 0; } static int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_clientcap *param = data; int byte, idx; byte = param->index / 8; idx = param->index % 8; if (qdev->pdev->revision < 4) return -ENOSYS; if (byte >= 58) return -ENOSYS; if (qdev->rom->client_capabilities[byte] & (1 << idx)) return 0; return -ENOSYS; } static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_alloc_surf *param = data; struct qxl_bo *qobj; int handle; int ret; int size, actual_stride; struct qxl_surface surf; /* work out size allocate bo with handle */ actual_stride = param->stride < 0 ? -param->stride : param->stride; size = actual_stride * param->height + actual_stride; surf.format = param->format; surf.width = param->width; surf.height = param->height; surf.stride = param->stride; surf.data = 0; ret = qxl_gem_object_create_with_handle(qdev, file, QXL_GEM_DOMAIN_SURFACE, size, &surf, &qobj, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); return -ENOMEM; } else param->handle = handle; return ret; } const struct drm_ioctl_desc qxl_ioctls[] = { DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, DRM_AUTH|DRM_UNLOCKED), }; int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), /* KMS */ DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED), }; int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
} #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) r = drm_vblank_init(dev, 1); if (r) dev_err(&dev->pdev->dev, "Fatal error during vblank init\n"); #endif out: if (r) vboxvideo_driver_unload(dev); return r; } int vboxvideo_driver_unload(struct drm_device * dev) { struct vboxvideo_device *gdev = dev->dev_private; if (gdev == NULL) return 0; vboxvideo_modeset_fini(gdev); vboxvideo_device_fini(gdev); kfree(gdev); dev->dev_private = NULL; return 0; } struct drm_ioctl_desc vboxvideo_ioctls[] = { }; int vboxvideo_max_ioctl = DRM_ARRAY_SIZE(vboxvideo_ioctls); #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */