static void flush_imgs(struct mdp_blit_req *req, struct ppp_regs *regs, struct file *src_file, struct file *dst_file) { #ifdef CONFIG_ANDROID_PMEM uint32_t src0_len, src1_len, dst0_len, dst1_len; if (!(req->flags & MDP_BLIT_NON_CACHED)) { /* flush src images to memory before dma to mdp */ get_len(&req->src, &req->src_rect, regs->src_bpp, &src0_len, &src1_len); flush_pmem_file(src_file, req->src.offset, src0_len); if (IS_PSEUDOPLNR(req->src.format)) flush_pmem_file(src_file, req->src.offset + src0_len, src1_len); /* flush dst images */ get_len(&req->dst, &req->dst_rect, regs->dst_bpp, &dst0_len, &dst1_len); flush_pmem_file(dst_file, req->dst.offset, dst0_len); if (IS_PSEUDOPLNR(req->dst.format)) flush_pmem_file(dst_file, req->dst.offset + dst0_len, dst1_len); } #endif }
static int q6_encode(struct q6venc_dev *q6venc, struct encode_param *enc_param) { struct q6_encode_param *q6_param = &enc_param->q6_encode_param; struct file *file; struct buf_info *buf; int i; int ret; int rlc_buf_index; pr_debug("y_addr fd=%d offset=0x%08lx uv_offset=0x%08lx\n", enc_param->y_addr.fd, enc_param->y_addr.offset, enc_param->uv_offset); file = fget(enc_param->y_addr.fd); if (!file) { pr_err("%s: invalid encode buffer fd %d\n", __func__, enc_param->y_addr.fd); return -EBADF; } mutex_lock(&q6venc->lock); for (i = 0; i < q6venc->num_enc_bufs; i++) { buf = &q6venc->enc_bufs[i]; if (buf->file == file && buf->venc_buf.offset == enc_param->y_addr.offset) break; } if (i == q6venc->num_enc_bufs) { if (q6venc->num_enc_bufs == VENC_MAX_BUF_NUM) { pr_err("%s: too many input buffers\n", __func__); ret = -ENOMEM; goto done; } buf = &q6venc->enc_bufs[q6venc->num_enc_bufs]; ret = get_buf_info(buf, &enc_param->y_addr); if (ret) { pr_err("%s: can't get encode buffer\n", __func__); ret = -EINVAL; goto done; } if (!IS_ALIGNED(buf->paddr, PAGE_SIZE)) { pr_err("%s: input buffer not 4k aligned\n", __func__); put_buf_info(buf); ret = -EINVAL; goto done; } q6venc->num_enc_bufs++; } /* We must invalidate the buffer that the DSP will write to * to ensure that a dirty cache line doesn't get flushed on * top of the data that the DSP is writing. * Unfortunately, we have to predict which rlc_buf index the * DSP is going to write to. We assume it will write to buf * 0 the first time we call q6_encode, and alternate afterwards * */ rlc_buf_index = q6venc->rlc_buf_index; dmac_inv_range((const void *)q6venc->rlc_bufs[rlc_buf_index].vaddr, (const void *)(q6venc->rlc_bufs[rlc_buf_index].vaddr + q6venc->rlc_buf_len)); q6venc->rlc_buf_index = (q6venc->rlc_buf_index + 1) % RLC_MAX_BUF_NUM; q6_param->luma_addr = buf->paddr; q6_param->chroma_addr = q6_param->luma_addr + enc_param->uv_offset; pr_debug("luma_addr=0x%08x chroma_addr=0x%08x\n", q6_param->luma_addr, q6_param->chroma_addr); /* Ideally, each ioctl that passed in a data buffer would include the size * of the input buffer, so we can properly flush the cache on it. Since * userspace does not fill in the size fields, we have to assume the size * based on the encoder configuration for now. */ flush_pmem_file(buf->file, enc_param->y_addr.offset, q6venc->enc_buf_size); ret = dal_call_f5(q6venc->venc, VENC_DALRPC_ENCODE, q6_param, sizeof(struct q6_encode_param)); if (ret) { pr_err("%s: encode rpc failed\n", __func__); goto done; } ret = 0; done: mutex_unlock(&q6venc->lock); fput(file); return ret; }