/** * sti_gdp_commit_layer * @lay: gdp layer * * Update the NVN field of the 'right' field of the current GDP node (being * used by the HW) with the address of the updated ('free') top field GDP node. * - In interlaced mode the 'right' field is the bottom field as we update * frames starting from their top field * - In progressive mode, we update both bottom and top fields which are * equal nodes. * At the next VSYNC, the updated node list will be used by the HW. * * RETURNS: * 0 on success. */ static int sti_gdp_commit_layer(struct sti_layer *layer) { struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer); struct sti_gdp_node *updated_top_node = updated_list->top_field; struct sti_gdp_node *updated_btm_node = updated_list->btm_field; struct sti_gdp *gdp = to_sti_gdp(layer); u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node); u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node); struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer); dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__, sti_layer_to_str(layer), updated_top_node, updated_btm_node); dev_dbg(layer->dev, "Current NVN:0x%X\n", readl(layer->regs + GAM_GDP_NVN_OFFSET)); dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n", (unsigned long)layer->paddr, readl(layer->regs + GAM_GDP_PML_OFFSET)); if (curr_list == NULL) { /* First update or invalid node should directly write in the * hw register */ DRM_DEBUG_DRIVER("%s first update (or invalid node)", sti_layer_to_str(layer)); writel(gdp->is_curr_top == true ? dma_updated_btm : dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET); return 0; } if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) { if (gdp->is_curr_top == true) { /* Do not update in the middle of the frame, but * postpone the update after the bottom field has * been displayed */ curr_list->btm_field->gam_gdp_nvn = dma_updated_top; } else { /* Direct update to avoid one frame delay */ writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET); } } else { /* Direct update for progressive to avoid one frame delay */ writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET); } return 0; }
/** * sti_gdp_get_current_nodes * @layer: GDP layer * * Look for GDP nodes that are currently read by the HW. * * RETURNS: * Pointer to the current GDP node list */ static struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) { int hw_nvn; void *virt_nvn; struct sti_gdp *gdp = to_sti_gdp(layer); unsigned int i; hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); if (!hw_nvn) goto end; virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn); for (i = 0; i < GDP_NODE_NB_BANK; i++) if ((virt_nvn == gdp->node_list[i].btm_field) || (virt_nvn == gdp->node_list[i].top_field)) return &gdp->node_list[i]; end: DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n", hw_nvn, sti_layer_to_str(layer)); return NULL; }
/** * sti_gdp_get_free_nodes * @layer: gdp layer * * Look for a GDP node list that is not currently read by the HW. * * RETURNS: * Pointer to the free GDP node list */ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) { int hw_nvn; void *virt_nvn; struct sti_gdp *gdp = to_sti_gdp(layer); unsigned int i; hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); if (!hw_nvn) goto end; virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn); for (i = 0; i < GDP_NODE_NB_BANK; i++) if ((virt_nvn != gdp->node_list[i].btm_field) && (virt_nvn != gdp->node_list[i].top_field)) return &gdp->node_list[i]; /* in hazardious cases restart with the first node */ DRM_ERROR("inconsistent NVN for %s: 0x%08X\n", sti_layer_to_str(layer), hw_nvn); end: return &gdp->node_list[0]; }
int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer) { int layer_id = 0, depth = layer->zorder; u32 mask, val; if (depth >= GAM_MIXER_NB_DEPTH_LEVEL) return 1; switch (layer->desc) { case STI_GDP_0: layer_id = GAM_DEPTH_GDP0_ID; break; case STI_GDP_1: layer_id = GAM_DEPTH_GDP1_ID; break; case STI_GDP_2: layer_id = GAM_DEPTH_GDP2_ID; break; case STI_GDP_3: layer_id = GAM_DEPTH_GDP3_ID; break; case STI_VID_0: layer_id = GAM_DEPTH_VID0_ID; break; case STI_VID_1: layer_id = GAM_DEPTH_VID1_ID; break; default: DRM_ERROR("Unknown layer %d\n", layer->desc); return 1; } mask = GAM_DEPTH_MASK_ID << (3 * depth); layer_id = layer_id << (3 * depth); DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer), sti_layer_to_str(layer), depth); dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n", layer_id, mask); val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB); val &= ~mask; val |= layer_id; sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val); dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n", sti_mixer_reg_read(mixer, GAM_MIXER_CRB)); return 0; }
int sti_mixer_set_layer_status(struct sti_mixer *mixer, struct sti_layer *layer, bool status) { u32 mask, val; DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable", sti_mixer_to_str(mixer), sti_layer_to_str(layer)); mask = sti_mixer_get_layer_mask(layer); if (!mask) { DRM_ERROR("Can not find layer mask\n"); return -EINVAL; } val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL); val &= ~mask; val |= status ? mask : 0; sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val); return 0; }
/** * sti_gdp_disable_layer * @lay: gdp layer * * Disable a GDP. * * RETURNS: * 0 on success. */ static int sti_gdp_disable_layer(struct sti_layer *layer) { unsigned int i; struct sti_gdp *gdp = to_sti_gdp(layer); struct sti_compositor *compo = dev_get_drvdata(layer->dev); DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); /* Set the nodes as 'to be ignored on mixer' */ for (i = 0; i < GDP_NODE_NB_BANK; i++) { gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE; gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE; } if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ? compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb)) DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); if (gdp->clk_pix) clk_disable_unprepare(gdp->clk_pix); return 0; }
/** * sti_gdp_prepare_layer * @lay: gdp layer * @first_prepare: true if it is the first time this function is called * * Update the free GDP node list according to the layer properties. * * RETURNS: * 0 on success. */ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare) { struct sti_gdp_node_list *list; struct sti_gdp_node *top_field, *btm_field; struct drm_display_mode *mode = layer->mode; struct device *dev = layer->dev; struct sti_gdp *gdp = to_sti_gdp(layer); struct sti_compositor *compo = dev_get_drvdata(dev); int format; unsigned int depth, bpp; int rate = mode->clock * 1000; int res; u32 ydo, xdo, yds, xds; list = sti_gdp_get_free_nodes(layer); top_field = list->top_field; btm_field = list->btm_field; dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__, sti_layer_to_str(layer), top_field, btm_field); /* Build the top field from layer params */ top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE; top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC; format = sti_gdp_fourcc2format(layer->format); if (format == -1) { DRM_ERROR("Format not supported by GDP %.4s\n", (char *)&layer->format); return 1; } top_field->gam_gdp_ctl |= format; top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format); top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE; /* pixel memory location */ drm_fb_get_bpp_depth(layer->format, &depth, &bpp); top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0]; top_field->gam_gdp_pml += layer->src_x * (bpp >> 3); top_field->gam_gdp_pml += layer->src_y * layer->pitches[0]; /* input parameters */ top_field->gam_gdp_pmp = layer->pitches[0]; top_field->gam_gdp_size = clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 | clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX); /* output parameters */ ydo = sti_vtg_get_line_number(*mode, layer->dst_y); yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1); xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x); xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1); top_field->gam_gdp_vpo = (ydo << 16) | xdo; top_field->gam_gdp_vps = (yds << 16) | xds; /* Same content and chained together */ memcpy(btm_field, top_field, sizeof(*btm_field)); top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field); btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field); /* Interlaced mode */ if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) btm_field->gam_gdp_pml = top_field->gam_gdp_pml + layer->pitches[0]; if (first_prepare) { /* Register gdp callback */ if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ? compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb, layer->mixer_id)) { DRM_ERROR("Cannot register VTG notifier\n"); return 1; } /* Set and enable gdp clock */ if (gdp->clk_pix) { res = clk_set_rate(gdp->clk_pix, rate); if (res < 0) { DRM_ERROR("Cannot set rate (%dHz) for gdp\n", rate); return 1; } if (clk_prepare_enable(gdp->clk_pix)) { DRM_ERROR("Failed to prepare/enable gdp\n"); return 1; } } } return 0; }