/* Check/create surface that can be accessed by the hardware */ static int get_hw_surface( UIOMux * uiomux, uiomux_resource_t resource, struct ren_vid_surface *out, const struct ren_vid_surface *in) { int alloc = 0; if (in == NULL || out == NULL) return 0; *out = *in; if (in->py) alloc |= !uiomux_all_virt_to_phys(in->py); if (in->pc) alloc |= !uiomux_all_virt_to_phys(in->pc); if (alloc) { /* One of the supplied buffers is not usable by the hardware! */ size_t len = size_y(in->format, in->h * in->w); if (in->pc) len += size_c(in->format, in->h * in->w); out->py = uiomux_malloc(uiomux, resource, len, 32); if (!out->py) return -1; if (in->pc) { out->pc = out->py + size_y(in->format, in->h * in->w); } } return 0; }
int shveu_wait(SHVEU *veu) { void *base_addr = veu->uio_mmio.iomem; uint32_t vevtr; uint32_t vstar; int complete = 0; uiomux_sleep(veu->uiomux, veu->uiores); vevtr = read_reg(base_addr, VEVTR); write_reg(base_addr, 0, VEVTR); /* ack interrupts */ /* End of VEU operation? */ if (vevtr & 1) { dbg(__func__, __LINE__, "src_hw", &veu->src_hw); dbg(__func__, __LINE__, "dst_hw", &veu->dst_hw); copy_surface(&veu->dst_user, &veu->dst_hw); /* free locally allocated surfaces */ if (veu->src_hw.py != veu->src_user.py) { size_t len = size_y(veu->src_hw.format, veu->src_hw.h * veu->src_hw.w); len += size_c(veu->src_hw.format, veu->src_hw.h * veu->src_hw.w); uiomux_free(veu->uiomux, veu->uiores, veu->src_hw.py, len); } if (veu->dst_hw.py != veu->dst_user.py) { size_t len = size_y(veu->dst_hw.format, veu->dst_hw.h * veu->dst_hw.w); len += size_c(veu->dst_hw.format, veu->dst_hw.h * veu->dst_hw.w); uiomux_free(veu->uiomux, veu->uiores, veu->dst_hw.py, len); } uiomux_unlock(veu->uiomux, veu->uiores); complete = 1; } return complete; }
static off_t imgsize (ren_vid_format_t colorspace, int w, int h) { return (off_t)(size_y(colorspace, w*h, 0) + size_c(colorspace, w*h, 0)); }
int shveu_setup( SHVEU *veu, const struct ren_vid_surface *src_surface, const struct ren_vid_surface *dst_surface, shveu_rotation_t filter_control) { float scale_x, scale_y; uint32_t temp; uint32_t Y, C; const struct veu_format_info *src_info; const struct veu_format_info *dst_info; struct ren_vid_surface local_src; struct ren_vid_surface local_dst; struct ren_vid_surface *src = &local_src; struct ren_vid_surface *dst = &local_dst; void *base_addr; if (!veu || !src_surface || !dst_surface) { debug_info("ERR: Invalid input - need src and dest"); return -1; } src_info = fmt_info(src_surface->format); dst_info = fmt_info(dst_surface->format); dbg(__func__, __LINE__, "src_user", src_surface); dbg(__func__, __LINE__, "dst_user", dst_surface); /* scale factors */ scale_x = (float)dst_surface->w / src_surface->w; scale_y = (float)dst_surface->h / src_surface->h; if (!format_supported(src_surface->format) || !format_supported(dst_surface->format)) { debug_info("ERR: Invalid surface format!"); return -1; } /* Scaling limits */ if (veu_is_veu2h(veu)) { if ((scale_x > 8.0) || (scale_y > 8.0)) { debug_info("ERR: Outside scaling limits!"); return -1; } } else { if ((scale_x > 16.0) || (scale_y > 16.0)) { debug_info("ERR: Outside scaling limits!"); return -1; } } if ((scale_x < 1.0/16.0) || (scale_y < 1.0/16.0)) { debug_info("ERR: Outside scaling limits!"); return -1; } /* source - use a buffer the hardware can access */ if (get_hw_surface(veu->uiomux, veu->uiores, src, src_surface) < 0) { debug_info("ERR: src is not accessible by hardware"); return -1; } copy_surface(src, src_surface); /* destination - use a buffer the hardware can access */ if (get_hw_surface(veu->uiomux, veu->uiores, dst, dst_surface) < 0) { debug_info("ERR: dest is not accessible by hardware"); return -1; } uiomux_lock (veu->uiomux, veu->uiores); base_addr = veu->uio_mmio.iomem; /* Keep track of the requested surfaces */ veu->src_user = *src_surface; veu->dst_user = *dst_surface; /* Keep track of the actual surfaces used */ veu->src_hw = local_src; veu->dst_hw = local_dst; /* Software reset */ if (read_reg(base_addr, VESTR) & 0x1) write_reg(base_addr, 0, VESTR); while (read_reg(base_addr, VESTR) & 1) ; /* Clear VEU end interrupt flag */ write_reg(base_addr, 0, VEVTR); /* VEU Module reset */ write_reg(base_addr, 0x100, VBSRR); /* default to not using bundle mode */ write_reg(base_addr, 0, VBSSR); /* source */ Y = uiomux_all_virt_to_phys(src->py); C = uiomux_all_virt_to_phys(src->pc); write_reg(base_addr, Y, VSAYR); write_reg(base_addr, C, VSACR); write_reg(base_addr, (src->h << 16) | src->w, VESSR); write_reg(base_addr, size_y(src->format, src->pitch), VESWR); /* destination */ Y = uiomux_all_virt_to_phys(dst->py); C = uiomux_all_virt_to_phys(dst->pc); if (filter_control & 0xFF) { if ((filter_control & 0xFF) == 0x10) { /* Horizontal Mirror (A) */ Y += size_y(dst->format, src->w); C += size_y(dst->format, src->w); } else if ((filter_control & 0xFF) == 0x20) { /* Vertical Mirror (B) */ Y += size_y(dst->format, (src->h-1) * dst->pitch); C += size_c(dst->format, (src->h-2) * dst->pitch); } else if ((filter_control & 0xFF) == 0x30) { /* Rotate 180 (C) */ Y += size_y(dst->format, src->w); C += size_y(dst->format, src->w); Y += size_y(dst->format, src->h * dst->pitch); C += size_c(dst->format, src->h * dst->pitch); } else if ((filter_control & 0xFF) == 1) { /* Rotate 90 (D) */ Y += size_y(dst->format, src->h-16); C += size_y(dst->format, src->h-16); } else if ((filter_control & 0xFF) == 2) { /* Rotate 270 (E) */ Y += size_y(dst->format, (src->w-16) * dst->pitch); C += size_c(dst->format, (src->w-16) * dst->pitch); } else if ((filter_control & 0xFF) == 0x11) { /* Rotate 90 & Mirror Horizontal (F) */ /* Nothing to do */ } else if ((filter_control & 0xFF) == 0x21) { /* Rotate 90 & Mirror Vertical (G) */ Y += size_y(dst->format, src->h-16); C += size_y(dst->format, src->h-16); Y += size_y(dst->format, (src->w-16) * dst->pitch); C += size_c(dst->format, (src->w-16) * dst->pitch); } } write_reg(base_addr, Y, VDAYR); write_reg(base_addr, C, VDACR); write_reg(base_addr, size_y(dst->format, dst->pitch), VEDWR); /* byte/word swapping */ temp = 0; #ifdef __LITTLE_ENDIAN__ temp |= src_info->vswpr; temp |= dst_info->vswpr << 4; #endif write_reg(base_addr, temp, VSWPR); /* transform control */ temp = src_info->vtrcr_src; temp |= dst_info->vtrcr_dst; if (is_rgb(src_surface->format)) temp |= VTRCR_RY_SRC_RGB; if (different_colorspace(src_surface->format, dst_surface->format)) temp |= VTRCR_TE_BIT_SET; if (veu->bt709) temp |= VTRCR_BT709; if (veu->full_range) temp |= VTRCR_FULL_COLOR_CONV; write_reg(base_addr, temp, VTRCR); if (veu_is_veu2h(veu)) { /* color conversion matrix */ write_reg(base_addr, 0x0cc5, VMCR00); write_reg(base_addr, 0x0950, VMCR01); write_reg(base_addr, 0x0000, VMCR02); write_reg(base_addr, 0x397f, VMCR10); write_reg(base_addr, 0x0950, VMCR11); write_reg(base_addr, 0x3cdd, VMCR12); write_reg(base_addr, 0x0000, VMCR20); write_reg(base_addr, 0x0950, VMCR21); write_reg(base_addr, 0x1023, VMCR22); write_reg(base_addr, 0x00800010, VCOFFR); } /* Clipping */ write_reg(base_addr, 0, VRFSR); set_clip(base_addr, 0, dst->w); set_clip(base_addr, 1, dst->h); /* Scaling */ write_reg(base_addr, 0, VRFCR); if (!(filter_control & 0x3)) { /* Not a rotate operation */ set_scale(veu, base_addr, 0, src->w, dst->w, 0); set_scale(veu, base_addr, 1, src->h, dst->h, 0); } /* Filter control - directly pass user arg to register */ write_reg(base_addr, filter_control, VFMCR); return 0; fail: uiomux_unlock(veu->uiomux, veu->uiores); return -1; }