void cx18_stream_rotate_idx_mdls(struct cx18 *cx) { struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_IDX]; struct cx18_mdl *mdl; if (!cx18_stream_enabled(s)) return; /* Return if the firmware is not running low on MDLs */ if ((atomic_read(&s->q_free.depth) + atomic_read(&s->q_busy.depth)) >= CX18_ENC_STREAM_TYPE_IDX_FW_MDL_MIN) return; /* Return if there are no MDLs to rotate back to the firmware */ if (atomic_read(&s->q_full.depth) < 2) return; /* * Take the oldest IDX MDL still holding data, and discard its index * entries by scheduling the MDL to go back to the firmware */ mdl = cx18_dequeue(s, &s->q_full); if (mdl != NULL) cx18_enqueue(s, mdl, &s->q_free); }
static struct cx18_queue *_cx18_stream_put_mdl_fw(struct cx18_stream *s, struct cx18_mdl *mdl) { struct cx18 *cx = s->cx; struct cx18_queue *q; /* Don't give it to the firmware, if we're not running a capture */ if (s->handle == CX18_INVALID_TASK_HANDLE || test_bit(CX18_F_S_STOPPING, &s->s_flags) || !test_bit(CX18_F_S_STREAMING, &s->s_flags)) return cx18_enqueue(s, mdl, &s->q_free); q = cx18_enqueue(s, mdl, &s->q_busy); if (q != &s->q_busy) return q; /* The firmware has the max MDLs it can handle */ cx18_mdl_sync_for_device(s, mdl); cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle, (void __iomem *) &cx->scb->cpu_mdl[mdl->id] - cx->enc_mem, s->bufs_per_mdl, mdl->id, s->mdl_size); return q; }
static struct cx18_queue *_cx18_stream_put_mdl_fw(struct cx18_stream *s, struct cx18_mdl *mdl) { struct cx18 *cx = s->cx; struct cx18_queue *q; if (s->handle == CX18_INVALID_TASK_HANDLE || test_bit(CX18_F_S_STOPPING, &s->s_flags) || !test_bit(CX18_F_S_STREAMING, &s->s_flags)) return cx18_enqueue(s, mdl, &s->q_free); q = cx18_enqueue(s, mdl, &s->q_busy); if (q != &s->q_busy) return q; cx18_mdl_sync_for_device(s, mdl); cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle, (void __iomem *) &cx->scb->cpu_mdl[mdl->id] - cx->enc_mem, s->bufs_per_mdl, mdl->id, s->mdl_size); return q; }
void cx18_stream_rotate_idx_mdls(struct cx18 *cx) { struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_IDX]; struct cx18_mdl *mdl; if (!cx18_stream_enabled(s)) return; if ((atomic_read(&s->q_free.depth) + atomic_read(&s->q_busy.depth)) >= CX18_ENC_STREAM_TYPE_IDX_FW_MDL_MIN) return; if (atomic_read(&s->q_full.depth) < 2) return; mdl = cx18_dequeue(s, &s->q_full); if (mdl != NULL) cx18_enqueue(s, mdl, &s->q_free); }
static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf, size_t tot_count, int non_block) { struct cx18 *cx = s->cx; size_t tot_written = 0; int single_frame = 0; if (atomic_read(&cx->ana_capturing) == 0 && s->id == -1) { /* shouldn't happen */ CX18_DEBUG_WARN("Stream %s not initialized before read\n", s->name); return -EIO; } /* Each VBI buffer is one frame, the v4l2 API says that for VBI the frames should arrive one-by-one, so make sure we never output more than one VBI frame at a time */ if (s->type == CX18_ENC_STREAM_TYPE_VBI && cx->vbi.sliced_in->service_set) single_frame = 1; for (;;) { struct cx18_buffer *buf; int rc; buf = cx18_get_buffer(s, non_block, &rc); /* if there is no data available... */ if (buf == NULL) { /* if we got data, then return that regardless */ if (tot_written) break; /* EOS condition */ if (rc == 0) { clear_bit(CX18_F_S_STREAMOFF, &s->s_flags); clear_bit(CX18_F_S_APPL_IO, &s->s_flags); cx18_release_stream(s); } /* set errno */ return rc; } rc = cx18_copy_buf_to_user(s, buf, ubuf + tot_written, tot_count - tot_written); if (buf != &cx->vbi.sliced_mpeg_buf) { if (buf->readpos == buf->bytesused) { cx18_buf_sync_for_device(s, buf); cx18_enqueue(s, buf, &s->q_free); cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle, (void __iomem *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem, 1, buf->id, s->buf_size); } else cx18_enqueue(s, buf, &s->q_io); } else if (buf->readpos == buf->bytesused) { int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES; cx->vbi.sliced_mpeg_size[idx] = 0; cx->vbi.inserted_frame++; cx->vbi_data_inserted += buf->bytesused; } if (rc < 0) return rc; tot_written += rc; if (tot_written == tot_count || single_frame) break; } return tot_written; }
static struct cx18_buffer *cx18_get_buffer(struct cx18_stream *s, int non_block, int *err) { struct cx18 *cx = s->cx; struct cx18_stream *s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI]; struct cx18_buffer *buf; DEFINE_WAIT(wait); *err = 0; while (1) { if (s->type == CX18_ENC_STREAM_TYPE_MPG) { if (time_after(jiffies, cx->dualwatch_jiffies + msecs_to_jiffies(1000))) { cx->dualwatch_jiffies = jiffies; cx18_dualwatch(cx); } if (test_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags) && !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) { while ((buf = cx18_dequeue(s_vbi, &s_vbi->q_full))) { /* byteswap and process VBI data */ /* cx18_process_vbi_data(cx, buf, s_vbi->dma_pts, s_vbi->type); */ cx18_enqueue(s_vbi, buf, &s_vbi->q_free); } } buf = &cx->vbi.sliced_mpeg_buf; if (buf->readpos != buf->bytesused) return buf; } /* do we have leftover data? */ buf = cx18_dequeue(s, &s->q_io); if (buf) return buf; /* do we have new data? */ buf = cx18_dequeue(s, &s->q_full); if (buf) { if (!test_and_clear_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags)) return buf; if (s->type == CX18_ENC_STREAM_TYPE_MPG) /* byteswap MPG data */ cx18_buf_swap(buf); else { /* byteswap and process VBI data */ cx18_process_vbi_data(cx, buf, s->dma_pts, s->type); } return buf; } /* return if end of stream */ if (!test_bit(CX18_F_S_STREAMING, &s->s_flags)) { CX18_DEBUG_INFO("EOS %s\n", s->name); return NULL; } /* return if file was opened with O_NONBLOCK */ if (non_block) { *err = -EAGAIN; return NULL; } /* wait for more data to arrive */ prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE); /* New buffers might have become available before we were added to the waitqueue */ if (!atomic_read(&s->q_full.buffers)) schedule(); finish_wait(&s->waitq, &wait); if (signal_pending(current)) { /* return if a signal was received */ CX18_DEBUG_INFO("User stopped %s\n", s->name); *err = -EINTR; return NULL; } } }
static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order) { u32 handle, mdl_ack_count, id; struct cx18_mailbox *mb; struct cx18_mdl_ack *mdl_ack; struct cx18_stream *s; struct cx18_buffer *buf; int i; mb = &order->mb; handle = mb->args[0]; s = cx18_handle_to_stream(cx, handle); if (s == NULL) { CX18_WARN("Got DMA done notification for unknown/inactive" " handle %d, %s mailbox seq no %d\n", handle, (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ? "stale" : "good", mb->request); return; } mdl_ack_count = mb->args[2]; mdl_ack = order->mdl_ack; for (i = 0; i < mdl_ack_count; i++, mdl_ack++) { id = mdl_ack->id; /* * Simple integrity check for processing a stale (and possibly * inconsistent mailbox): make sure the buffer id is in the * valid range for the stream. * * We go through the trouble of dealing with stale mailboxes * because most of the time, the mailbox data is still valid and * unchanged (and in practice the firmware ping-pongs the * two mdl_ack buffers so mdl_acks are not stale). * * There are occasions when we get a half changed mailbox, * which this check catches for a handle & id mismatch. If the * handle and id do correspond, the worst case is that we * completely lost the old buffer, but pick up the new buffer * early (but the new mdl_ack is guaranteed to be good in this * case as the firmware wouldn't point us to a new mdl_ack until * it's filled in). * * cx18_queue_get buf() will detect the lost buffers * and send them back to q_free for fw rotation eventually. */ if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) && !(id >= s->mdl_offset && id < (s->mdl_offset + s->buffers))) { CX18_WARN("Fell behind! Ignoring stale mailbox with " " inconsistent data. Lost buffer for mailbox " "seq no %d\n", mb->request); break; } buf = cx18_queue_get_buf(s, id, mdl_ack->data_used); CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id); if (buf == NULL) { CX18_WARN("Could not find buf %d for stream %s\n", id, s->name); continue; } CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n", s->name, buf->bytesused); if (s->type != CX18_ENC_STREAM_TYPE_TS) cx18_enqueue(s, buf, &s->q_full); else { if (s->dvb.enabled) dvb_dmx_swfilter(&s->dvb.demux, buf->buf, buf->bytesused); cx18_enqueue(s, buf, &s->q_free); } } /* Put as many buffers as possible back into fw use */ cx18_stream_load_fw_queue(s); wake_up(&cx->dma_waitq); if (s->id != -1) wake_up(&s->waitq); }