void DeCsaTsBuffer::Action(void) { if (ringBuffer) { bool firstRead = true; cPoller Poller(f); while (Running()) { if (firstRead || Poller.Poll(100)) { firstRead = false; int r = ringBuffer->Read(f); if (r < 0 && FATALERRNO) { if (errno == EOVERFLOW) ERRORLOG("driver buffer overflow on device %d", cardIndex); else { LOG_ERROR; break; } } } } } }
int cKbdRemote::ReadKey(void) { cPoller Poller(STDIN_FILENO); if (Poller.Poll(50)) { uchar ch = 0; int r = safe_read(STDIN_FILENO, &ch, 1); if (r == 1) return ch; if (r < 0) LOG_ERROR_STR("cKbdRemote"); } return -1; }
bool cDvbTuner::GetFrontendStatus(fe_status_t &Status, int TimeoutMs) { if (TimeoutMs) { cPoller Poller(fd_frontend); if (Poller.Poll(TimeoutMs)) { dvb_frontend_event Event; while (ioctl(fd_frontend, FE_GET_EVENT, &Event) == 0) ; // just to clear the event queue - we'll read the actual status below } } do { int stat = ioctl(fd_frontend, FE_READ_STATUS, &Status); if (stat == 0) return true; if (stat < 0) { if (errno == EINTR) continue; } } while (0); return false; }
void cStreamdevFilters::Action(void) { int fails = 0; while (Running()) { const uchar *block = m_TSBuffer->Get(); if (block) { u_short pid = (((u_short)block[1] & PID_MASK_HI) << 8) | block[2]; u_char tid = block[3]; bool Pusi = block[1] & 0x40; // proprietary extension int len = block[4]; #if 0 if (block[1] == 0xff && block[2] == 0xff && block[3] == 0xff && block[4] == 0x7f) isyslog("*********** TRANSPONDER -> %s **********", block+5); #endif LOCK_THREAD; cStreamdevFilter *f = First(); while (f) { cStreamdevFilter *next = Next(f); if (f->Matches(pid, tid)) { if (f->PutSection(block + 5, len, Pusi)) break; if (errno != ECONNREFUSED && errno != ECONNRESET && errno != EPIPE) { Dprintf("FATAL ERROR: %m\n"); esyslog("streamdev-client: couldn't send section packet: %m"); } ClientSocket.SetFilter(f->Pid(), f->Tid(), f->Mask(), false); Del(f); // Filter was closed. // - need to check remaining filters for another match } f = next; } } else { #if 1 // TODO: this should be fixed in vdr cTSBuffer // Check disconnection int fd = *ClientSocket.DataSocket(siLiveFilter); if(fd < 0) break; cPoller Poller(fd); if (Poller.Poll()) { char tmp[1]; errno = 0; Dprintf("cStreamdevFilters::Action(): checking connection"); if (recv(fd, tmp, 1, MSG_PEEK) == 0 && errno != EAGAIN) { ++fails; if (fails >= 10) { esyslog("cStreamdevFilters::Action(): stream disconnected ?"); ClientSocket.CloseDataConnection(siLiveFilter); break; } } else { fails = 0; } } else { fails = 0; } cCondWait::SleepMs(10); #endif } } DELETENULL(m_TSBuffer); dsyslog("StreamdevFilters::Action() ended"); }
void cRawWriter::Action(void) { uint64_t NextHeaderPos = 0ULL; uint64_t GetPos = 0ULL; cPoller Poller(m_fd, true); while (Running()) { if (!Poller.Poll(100)) continue; uint64_t StartPos; int Count = 0; int n; uchar *Data = m_RingBuffer.Get(Count); if (!Data || Count <= 0) continue; Lock(); // uint64_t m_DiscardStart can not be read atomically (IA32) StartPos = m_DiscardEnd; Unlock(); // Next frame ? if (NextHeaderPos == GetPos) { // Discard data ? if (StartPos > GetPos) { // we're at frame boundary Count = min(Count, (int)(StartPos - GetPos)); m_RingBuffer.Del(Count); GetPos += Count; NextHeaderPos = GetPos; continue; } // Next frame if (Count < 6) LOGMSG("cBackgroundWriter @NextHeaderPos: Count < header size !"); int packlen = DATA_IS_TS(Data) ? TS_SIZE : pes_packet_len(Data, Count); if (Count < packlen) ;//LOGMSG("Count = %d < %d", Count, // header->len + sizeof(stream_tcp_header_t)); else Count = packlen; NextHeaderPos = GetPos + packlen; } else { // end of prev frame Count = min(Count, (int)(NextHeaderPos-GetPos)); } errno = 0; n = write(m_fd, Data, Count); if (n <= 0) { if (n == 0) { LOGERR("cBackgroundWriter: Client disconnected data stream ?"); break; } if (errno == EINTR || errno == EWOULDBLOCK) continue; LOGERR("cBackgroundWriter: TCP write error"); break; } GetPos += n; m_RingBuffer.Del(n); } m_RingBuffer.Clear(); }
void cTcpWriter::Action(void) { uint64_t NextHeaderPos = 0; uint64_t GetPos = 0; cPoller Poller (m_fd, true); bool CorkReq = false; while (Running()) { if (!Poller.Poll(100)) continue; if (CorkReq && m_RingBuffer.Available() <= 0) { // Force TCP packet to avoid delaying control messages Cork(); CorkReq = false; } uint64_t StartPos; int Count = 0; int n; uchar *Data = m_RingBuffer.Get(Count); if (!Data || Count <= 0) continue; Lock(); // uint64_t m_DiscardStart can not be read atomically (IA32) StartPos = m_DiscardEnd; Unlock(); // Next frame ? if (NextHeaderPos == GetPos) { // Discard data ? if (StartPos > GetPos) { // we're at frame boundary // drop only data packets, not control messages stream_tcp_header_t *header = (stream_tcp_header_t*)Data; if (eStreamId(header->stream) == sidVdr) { Count = min(Count, (int)(StartPos - GetPos)); // size of next (complete) packet. // drop only one packet at time. int pkt_len = ntohl(header->len) + sizeof(stream_tcp_header_t); if (Count >= pkt_len) { // drop only complete packets. // some packets are not dropped (packets overlapping end of ringbuffer) Count = pkt_len; m_RingBuffer.Del(Count); GetPos += Count; NextHeaderPos = GetPos; CorkReq = true; // force sending last frame continue; } } } // Next frame if (Count < (int)sizeof(stream_tcp_header_t)) LOGMSG("cBackgroundWriter @NextHeaderPos: Count < header size !"); // limit single write to size of next (complete) packet. // (we need to track packet boundaries) stream_tcp_header_t *header = (stream_tcp_header_t*)Data; int pkt_len = ntohl(header->len) + sizeof(stream_tcp_header_t); if (Count > pkt_len) Count = pkt_len; // next packet start position in stream NextHeaderPos = GetPos + pkt_len; // check for control message if (eStreamId(header->stream) == sidControl) CorkReq = true; } else { // end of prev frame Count = min(Count, (int)(NextHeaderPos-GetPos)); } errno = 0; n = write(m_fd, Data, Count); if (n <= 0) { if (n == 0) { LOGERR("cBackgroundWriter: Client disconnected data stream ?"); break; } if (errno == EINTR || errno == EWOULDBLOCK) continue; LOGERR("cBackgroundWriter: TCP write error"); break; } GetPos += n; m_RingBuffer.Del(n); } m_RingBuffer.Clear(); }