void ToneProducer::LateNoticeReceived(const media_source& what, bigtime_t how_much, bigtime_t performance_time) { FPRINTF(stderr, "ToneProducer::LateNoticeReceived\n"); // If we're late, we need to catch up. Respond in a manner appropriate to our // current run mode. if (what == mOutput.source) { if (RunMode() == B_RECORDING) { // A hardware capture node can't adjust; it simply emits buffers at // appropriate points. We (partially) simulate this by not adjusting // our behavior upon receiving late notices -- after all, the hardware // can't choose to capture "sooner".... } else if (RunMode() == B_INCREASE_LATENCY) { // We're late, and our run mode dictates that we try to produce buffers // earlier in order to catch up. This argues that the downstream nodes are // not properly reporting their latency, but there's not much we can do about // that at the moment, so we try to start producing buffers earlier to // compensate. mInternalLatency += how_much; SetEventLatency(mLatency + mInternalLatency); FPRINTF(stderr, "\tincreasing latency to %Ld\n", mLatency + mInternalLatency); } else { // The other run modes dictate various strategies for sacrificing data quality // in the interests of timely data delivery. The way *we* do this is to skip // a buffer, which catches us up in time by one buffer duration. size_t nSamples = mOutput.format.u.raw_audio.buffer_size / sizeof(float); mFramesSent += nSamples; FPRINTF(stderr, "\tskipping a buffer to try to catch up\n"); } } }
void StepMotionBlurFilter::BufferReceived(BBuffer* pBuffer) { ASSERT(pBuffer); // check buffer destination if(pBuffer->Header()->destination != m_input.destination.id) { PRINT(("StepMotionBlurFilter::BufferReceived():\n" "\tBad destination.\n")); pBuffer->Recycle(); return; } if((RunMode() != B_OFFLINE) && (pBuffer->Header()->time_source != TimeSource()->ID())) { PRINT(("* timesource mismatch\n")); } // check output if(m_output.destination == media_destination::null || !m_outputEnabled) { pBuffer->Recycle(); return; } // process and retransmit buffer filterBuffer(pBuffer); status_t err = SendBuffer(pBuffer, m_output.source, m_output.destination); if (err < B_OK) { PRINT(("StepMotionBlurFilter::BufferReceived():\n" "\tSendBuffer() failed: %s\n", strerror(err))); pBuffer->Recycle(); } if (RunMode() == B_OFFLINE) SetOfflineTime(pBuffer->Header()->start_time); // sent! }
void AudioProducer::LateNoticeReceived(const media_source& what, bigtime_t howMuch, bigtime_t performanceTime) { TRACE("%p->AudioProducer::LateNoticeReceived(%lld, %lld)\n", this, howMuch, performanceTime); // If we're late, we need to catch up. Respond in a manner appropriate // to our current run mode. if (what == fOutput.source) { // Ignore the notices for buffers we already send out (or scheduled // their event) before we processed the last notice if (fLastLateNotice > performanceTime) return; fLastLateNotice = fNextScheduledBuffer; if (RunMode() == B_RECORDING) { // ... } else if (RunMode() == B_INCREASE_LATENCY) { fInternalLatency += howMuch; // At some point a too large latency can get annoying if (fInternalLatency > kMaxLatency) fInternalLatency = kMaxLatency; SetEventLatency(fLatency + fInternalLatency); } else { // Skip one buffer ahead in the audio data. size_t sampleSize = fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK; size_t samplesPerBuffer = fOutput.format.u.raw_audio.buffer_size / sampleSize; size_t framesPerBuffer = samplesPerBuffer / fOutput.format.u.raw_audio.channel_count; fFramesSent += framesPerBuffer; } } }
BBuffer* GameProducer::FillNextBuffer(bigtime_t event_time) { // get a buffer from our buffer group BBuffer* buf = fBufferGroup->RequestBuffer(fBufferSize, BufferDuration()); // if we fail to get a buffer (for example, if the request times out), we // skip this buffer and go on to the next, to avoid locking up the control // thread. if (!buf) return NULL; // we need to discribe the buffer int64 frames = int64(fBufferSize / fFrameSize); memset(buf->Data(), 0, fBufferSize); // now fill the buffer with data, continuing where the last buffer left off fObject->Play(buf->Data(), frames); // fill in the buffer header media_header* hdr = buf->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->size_used = fBufferSize; hdr->time_source = TimeSource()->ID(); bigtime_t stamp; if (RunMode() == B_RECORDING) { // In B_RECORDING mode, we stamp with the capture time. We're not // really a hardware capture node, but we simulate it by using the // (precalculated) time at which this buffer "should" have been created. stamp = event_time; } else { // okay, we're in one of the "live" performance run modes. in these // modes, we stamp the buffer with the time at which the buffer should // be rendered to the output, not with the capture time. fStartTime is // the cached value of the first buffer's performance time; we calculate // this buffer's performance time as an offset from that time, based on // the amount of media we've created so far. // Recalculating every buffer like this avoids accumulation of error. stamp = fStartTime + bigtime_t(double(fFramesSent) / double(fOutput.format.u.raw_audio.frame_rate) * 1000000.0); } hdr->start_time = stamp; return buf; }
void VideoRecorderNode::BufferReceived(BBuffer* inBuffer) { INFO("VideoRecorderNode::BufferReceived():\n"); if( RunMode() == B_OFFLINE ) { // int32 destinationID = inBuffer->Header()->destination; SetOfflineTime( inBuffer->Header()->start_time ); } status_t err; media_timed_event event(inBuffer->Header()->start_time, BTimedEventQueue::B_HANDLE_BUFFER, inBuffer, BTimedEventQueue::B_RECYCLE_BUFFER ); err = EventQueue()->AddEvent( event ); if( err ) inBuffer->Recycle(); }
void MediaReader::LateNoticeReceived( const media_source & what, bigtime_t how_much, bigtime_t performance_time) { CALLED(); if (what == output.source) { switch (RunMode()) { case B_OFFLINE: // nothing to do break; case B_RECORDING: // nothing to do break; case B_INCREASE_LATENCY: fInternalLatency += how_much; SetEventLatency(fDownstreamLatency + fInternalLatency); break; case B_DECREASE_PRECISION: // XXX : shorten our buffer period // We could opt to just not wait but we should // probably gradually shorten the period so we // don't starve others. Also, we need to make // sure we are catching up! We may have some sort // of time goal for how long it takes us to // catch up, as well. break; case B_DROP_DATA: // Okay you asked for it, we'll skip ahead in the file! // We'll drop 1 buffer's worth if (GetCurrentFile() == 0) { PRINT("MediaReader::LateNoticeReceived called without" "an GetCurrentFile() (!)\n"); } else { GetCurrentFile()->Seek(output.format.u.multistream.max_chunk_size,SEEK_CUR); } break; default: // huh?? there aren't any more run modes. PRINT("MediaReader::LateNoticeReceived with unexpected run mode.\n"); break; } } }
void SoundPlayNode::LateNoticeReceived(const media_source& what, bigtime_t howMuch, bigtime_t performanceTime) { CALLED(); TRACE("SoundPlayNode::LateNoticeReceived, %" B_PRId64 " too late at %" B_PRId64 "\n", howMuch, performanceTime); // is this our output? if (what != fOutput.source) { TRACE("SoundPlayNode::LateNoticeReceived returning\n"); return; } if (RunMode() != B_DROP_DATA) { // We're late, and our run mode dictates that we try to produce buffers // earlier in order to catch up. This argues that the downstream nodes are // not properly reporting their latency, but there's not much we can do about // that at the moment, so we try to start producing buffers earlier to // compensate. fInternalLatency += howMuch; if (fInternalLatency > 30000) // avoid getting a too high latency fInternalLatency = 30000; SetEventLatency(fLatency + fInternalLatency); TRACE("SoundPlayNode::LateNoticeReceived: increasing latency to %" B_PRId64 "\n", fLatency + fInternalLatency); } else { // The other run modes dictate various strategies for sacrificing data quality // in the interests of timely data delivery. The way *we* do this is to skip // a buffer, which catches us up in time by one buffer duration. size_t nFrames = fOutput.format.u.raw_audio.buffer_size / ((fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK) * fOutput.format.u.raw_audio.channel_count); fFramesSent += nFrames; TRACE("SoundPlayNode::LateNoticeReceived: skipping a buffer to try to catch up\n"); } }
void OffsetFilter::filterBuffer(BBuffer* inBuffer) { if (!inBuffer) return; /* here is where we do all of the real work */ if (RunMode() != B_OFFLINE) CALL("FilterBuffer now: %Ld\n", TimeSource()->Now()); else CALL("FilterBuffer now: %Ld\n", OfflineTime()); media_header *inHeader = inBuffer->Header(); CALL("now: %Ld start_time: %Ld\n", TimeSource()->Now(), inHeader->start_time); uint32 *inData = (uint32*) inBuffer->Data(); /* Sans BBitmap */ uint32 *po = inData; uint32 *pi = (uint32*)malloc(inHeader->size_used); uint32 *last = inData + inHeader->size_used/4; uint32 i=0,C,L,deltax,deltay,lin,col; memcpy(pi,inData,inHeader->size_used); C=m_format.u.raw_video.display.line_width; L=m_format.u.raw_video.display.line_count; deltax=C*DELTA_X/1000; deltay=L*DELTA_Y/1000; while ( po < last) *po++=pi[(((i/C)+deltay)%L)*C+((i++%C)+deltax)%C]; free(pi); // Fin Sans Bitmap }
BBuffer* ClientNode::FillNextBuffer(bigtime_t eventTime, JackPort* port) { //printf("FillNextBuffer\n"); BBuffer* buffer = port->CurrentBuffer(); media_header* header = buffer->Header(); header->type = B_MEDIA_RAW_AUDIO; header->size_used = fFormat.u.raw_audio.buffer_size; header->time_source = TimeSource()->ID(); bigtime_t start; if (RunMode() == B_RECORDING) start = eventTime; else start = fTime + bigtime_t(double(fFramesSent) / double(fFormat.u.raw_audio.frame_rate) * 1000000.0); header->start_time = start; return buffer; }
status_t BMediaEventLooper::SetPriority(int32 priority) { CALLED(); // clamp to a valid value if (priority < 5) priority = 5; if (priority > 120) priority = 120; fSetPriority = priority; fCurrentPriority = (RunMode() == B_OFFLINE) ? min_c(B_NORMAL_PRIORITY, fSetPriority) : fSetPriority; if(fControlThread > 0) { set_thread_priority(fControlThread, fCurrentPriority); /* fSchedulingLatency = estimate_max_scheduling_latency(fControlThread); printf("BMediaEventLooper: SchedulingLatency is %Ld\n", fSchedulingLatency);*/ } return B_OK; }
static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; /* XXX configure ports for input */ writel(0x0000, devpriv->mite->daq_io_addr + Port_Pin_Directions(0)); if (1) { /* enable fifos A B C D */ writeb(0x0f, devpriv->mite->daq_io_addr + Data_Path); /* set transfer width a 32 bits */ writeb(TransferWidth(0) | TransferLength(0), devpriv->mite->daq_io_addr + Transfer_Size_Control); } else { writeb(0x03, devpriv->mite->daq_io_addr + Data_Path); writeb(TransferWidth(3) | TransferLength(0), devpriv->mite->daq_io_addr + Transfer_Size_Control); } /* protocol configuration */ if (cmd->scan_begin_src == TRIG_TIMER) { /* page 4-5, "input with internal REQs" */ writeb(0, devpriv->mite->daq_io_addr + OpMode); writeb(0x00, devpriv->mite->daq_io_addr + ClockReg); writeb(1, devpriv->mite->daq_io_addr + Sequence); writeb(0x04, devpriv->mite->daq_io_addr + ReqReg); writeb(4, devpriv->mite->daq_io_addr + BlockMode); writeb(3, devpriv->mite->daq_io_addr + LinePolarities); writeb(0xc0, devpriv->mite->daq_io_addr + AckSer); writel(ni_pcidio_ns_to_timer(&cmd->scan_begin_arg, TRIG_ROUND_NEAREST), devpriv->mite->daq_io_addr + StartDelay); writeb(1, devpriv->mite->daq_io_addr + ReqDelay); writeb(1, devpriv->mite->daq_io_addr + ReqNotDelay); writeb(1, devpriv->mite->daq_io_addr + AckDelay); writeb(0x0b, devpriv->mite->daq_io_addr + AckNotDelay); writeb(0x01, devpriv->mite->daq_io_addr + Data1Delay); /* manual, page 4-5: ClockSpeed comment is incorrectly listed * on DAQOptions */ writew(0, devpriv->mite->daq_io_addr + ClockSpeed); writeb(0, devpriv->mite->daq_io_addr + DAQOptions); } else { /* TRIG_EXT */ /* page 4-5, "input with external REQs" */ writeb(0, devpriv->mite->daq_io_addr + OpMode); writeb(0x00, devpriv->mite->daq_io_addr + ClockReg); writeb(0, devpriv->mite->daq_io_addr + Sequence); writeb(0x00, devpriv->mite->daq_io_addr + ReqReg); writeb(4, devpriv->mite->daq_io_addr + BlockMode); writeb(0, devpriv->mite->daq_io_addr + LinePolarities); writeb(0x00, devpriv->mite->daq_io_addr + AckSer); writel(1, devpriv->mite->daq_io_addr + StartDelay); writeb(1, devpriv->mite->daq_io_addr + ReqDelay); writeb(1, devpriv->mite->daq_io_addr + ReqNotDelay); writeb(1, devpriv->mite->daq_io_addr + AckDelay); writeb(0x0C, devpriv->mite->daq_io_addr + AckNotDelay); writeb(0x10, devpriv->mite->daq_io_addr + Data1Delay); writew(0, devpriv->mite->daq_io_addr + ClockSpeed); writeb(0x60, devpriv->mite->daq_io_addr + DAQOptions); } if (cmd->stop_src == TRIG_COUNT) { writel(cmd->stop_arg, devpriv->mite->daq_io_addr + Transfer_Count); } else { /* XXX */ } #ifdef USE_DMA writeb(ClearPrimaryTC | ClearSecondaryTC, devpriv->mite->daq_io_addr + Group_1_First_Clear); { int retval = setup_mite_dma(dev, s); if (retval) return retval; } #else writeb(0x00, devpriv->mite->daq_io_addr + DMA_Line_Control_Group1); #endif writeb(0x00, devpriv->mite->daq_io_addr + DMA_Line_Control_Group2); /* clear and enable interrupts */ writeb(0xff, devpriv->mite->daq_io_addr + Group_1_First_Clear); /* writeb(ClearExpired, devpriv->mite->daq_io_addr+Group_1_Second_Clear); */ writeb(IntEn, devpriv->mite->daq_io_addr + Interrupt_Control); writeb(0x03, devpriv->mite->daq_io_addr + Master_DMA_And_Interrupt_Control); if (cmd->stop_src == TRIG_NONE) { devpriv->OpModeBits = DataLatching(0) | RunMode(7); } else { /* TRIG_TIMER */ devpriv->OpModeBits = Numbered | RunMode(7); } if (cmd->start_src == TRIG_NOW) { /* start */ writeb(devpriv->OpModeBits, devpriv->mite->daq_io_addr + OpMode); s->async->inttrig = NULL; } else { /* TRIG_INT */ s->async->inttrig = ni_pcidio_inttrig; } DPRINTK("ni_pcidio: command started\n"); return 0; }
BBuffer* AudioProducer::_FillNextBuffer(bigtime_t eventTime) { BBuffer* buffer = fBufferGroup->RequestBuffer( fOutput.format.u.raw_audio.buffer_size, BufferDuration()); if (!buffer) { ERROR("AudioProducer::_FillNextBuffer() - no buffer\n"); return NULL; } size_t sampleSize = fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK; size_t numSamples = fOutput.format.u.raw_audio.buffer_size / sampleSize; // number of sample in the buffer // fill in the buffer header media_header* header = buffer->Header(); header->type = B_MEDIA_RAW_AUDIO; header->time_source = TimeSource()->ID(); buffer->SetSizeUsed(fOutput.format.u.raw_audio.buffer_size); bigtime_t performanceTime = bigtime_t(double(fFramesSent) * 1000000.0 / double(fOutput.format.u.raw_audio.frame_rate)); // fill in data from audio supplier int64 frameCount = numSamples / fOutput.format.u.raw_audio.channel_count; bigtime_t startTime = performanceTime; bigtime_t endTime = bigtime_t(double(fFramesSent + frameCount) * 1000000.0 / fOutput.format.u.raw_audio.frame_rate); if (!fSupplier || fSupplier->InitCheck() != B_OK || fSupplier->GetFrames(buffer->Data(), frameCount, startTime, endTime) != B_OK) { ERROR("AudioProducer::_FillNextBuffer() - supplier error -> silence\n"); memset(buffer->Data(), 0, buffer->SizeUsed()); } // stamp buffer if (RunMode() == B_RECORDING) { header->start_time = eventTime; } else { header->start_time = fStartTime + performanceTime; } #if DEBUG_TO_FILE BMediaTrack* track; if (BMediaFile* file = init_media_file(fOutput.format, &track)) { track->WriteFrames(buffer->Data(), frameCount); } #endif // DEBUG_TO_FILE if (fPeakListener && fOutput.format.u.raw_audio.format == media_raw_audio_format::B_AUDIO_FLOAT) { // TODO: extend the peak notifier for other sample formats int32 channels = fOutput.format.u.raw_audio.channel_count; float max[channels]; float min[channels]; for (int32 i = 0; i < channels; i++) { max[i] = -1.0; min[i] = 1.0; } float* sample = (float*)buffer->Data(); for (uint32 i = 0; i < frameCount; i++) { for (int32 k = 0; k < channels; k++) { if (*sample < min[k]) min[k] = *sample; if (*sample > max[k]) max[k] = *sample; sample++; } } BMessage message(MSG_PEAK_NOTIFICATION); for (int32 i = 0; i < channels; i++) { float maxAbs = max_c(fabs(min[i]), fabs(max[i])); message.AddFloat("max", maxAbs); } bigtime_t realTime = TimeSource()->RealTimeFor( fStartTime + performanceTime, 0); MessageEvent* event = new (std::nothrow) MessageEvent(realTime, fPeakListener, message); if (event != NULL) EventQueue::Default().AddEvent(event); } return buffer; }
BBuffer* ToneProducer::FillNextBuffer(bigtime_t event_time) { // get a buffer from our buffer group BBuffer* buf = mBufferGroup->RequestBuffer(mOutput.format.u.raw_audio.buffer_size, BufferDuration()); // if we fail to get a buffer (for example, if the request times out), we skip this // buffer and go on to the next, to avoid locking up the control thread if (!buf) { return NULL; } // now fill it with data, continuing where the last buffer left off // 20sep99: multichannel support size_t numFrames = mOutput.format.u.raw_audio.buffer_size / (sizeof(float)*mOutput.format.u.raw_audio.channel_count); bool stereo = (mOutput.format.u.raw_audio.channel_count == 2); if(!stereo) { ASSERT(mOutput.format.u.raw_audio.channel_count == 1); } // PRINT(("buffer: %ld, %ld frames, %s\n", mOutput.format.u.raw_audio.buffer_size, numFrames, stereo ? "stereo" : "mono")); float* data = (float*) buf->Data(); switch (mWaveform) { case SINE_WAVE: FillSineBuffer(data, numFrames, stereo); break; case TRIANGLE_WAVE: FillTriangleBuffer(data, numFrames, stereo); break; case SAWTOOTH_WAVE: FillSawtoothBuffer(data, numFrames, stereo); break; } // fill in the buffer header media_header* hdr = buf->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->size_used = mOutput.format.u.raw_audio.buffer_size; hdr->time_source = TimeSource()->ID(); bigtime_t stamp; if (RunMode() == B_RECORDING) { // In B_RECORDING mode, we stamp with the capture time. We're not // really a hardware capture node, but we simulate it by using the (precalculated) // time at which this buffer "should" have been created. stamp = event_time; } else { // okay, we're in one of the "live" performance run modes. in these modes, we // stamp the buffer with the time at which the buffer should be rendered to the // output, not with the capture time. mStartTime is the cached value of the // first buffer's performance time; we calculate this buffer's performance time as // an offset from that time, based on the amount of media we've created so far. // Recalculating every buffer like this avoids accumulation of error. stamp = mStartTime + bigtime_t(double(mFramesSent) / double(mOutput.format.u.raw_audio.frame_rate) * 1000000.0); } hdr->start_time = stamp; return buf; }
void FlipTransition::BufferReceived(BBuffer* pBuffer) { ASSERT(pBuffer); // check buffer destination if ((pBuffer->Header()->destination != first_input.destination.id) && (pBuffer->Header()->destination != second_input.destination.id)) { PRINT(("FlipTransition::BufferReceived():\n" "\tBad destination.\n")); pBuffer->Recycle(); return; } if ((RunMode() != B_OFFLINE) && (pBuffer->Header()->time_source != TimeSource()->ID())) { PRINT(("* timesource mismatch\n")); } // check output if (m_output.destination == media_destination::null || !m_outputEnabled) { pBuffer->Recycle(); return; } if (pBuffer->Header()->destination != first_input.destination.id) {// buffer vient de la premiere entree firstInputBufferHere = true; firstBuffer = pBuffer; PRINT(("First Buffer Received\n")); } else {// buffer vient de la 2eme entree secondInputBufferHere = true; secondBuffer = pBuffer; PRINT(("Second Buffer Received\n")); } if (firstInputBufferHere && secondInputBufferHere) // que ce passe-t-il si l'un des producteurs n'est plus valable ? { // process and retransmit buffer MakeTransition(firstBuffer, secondBuffer); status_t err = SendBuffer(transitionBuffer, m_output.source, m_output.destination); if (err < B_OK) { PRINT(("FlipTransition::BufferReceived():\n" "\tSendBuffer() failed: %s\n", strerror(err))); transitionBuffer->Recycle(); } firstBuffer->Recycle(); secondBuffer->Recycle(); firstInputBufferHere = false; secondInputBufferHere = false; if (RunMode() == B_OFFLINE) { SetOfflineTime(transitionBuffer->Header()->start_time); // RequestAdditionalBuffer(first_input.source, OfflineTime()); // RequestAdditionalBuffer(second_input.source, OfflineTime()); } // sent! } }
void FlipTransition::MakeTransition(BBuffer* inBuffer, BBuffer *inBuffer2) { status_t err; if (!inBuffer || !inBuffer2) return; /* here is where we do all of the real work */ if (RunMode() != B_OFFLINE) CALL("FilterBuffer now: %Ld\n", TimeSource()->Now()); else CALL("FilterBuffer now: %Ld\n", OfflineTime()); media_header *firstHeader = inBuffer->Header(); media_header *secondHeader = inBuffer2->Header(); // CALL("now: %Ld start_time: %Ld\n", TimeSource()->Now(), inHeader->start_time); transitionBuffer = buffers->RequestBuffer(4 * m_format.u.raw_video.display.line_width * m_format.u.raw_video.display.line_count, 10000); if (transitionBuffer == NULL) { err = buffers->RequestError(); if (err == B_ERROR) printf("Error requesting buffer\n"); else if (err == B_MEDIA_BUFFERS_NOT_RECLAIMED) printf("Buffers not reclaimed"); } else { uint32 *inData1 = (uint32*) inBuffer->Data(); uint32 *inData2 = (uint32*) inBuffer2->Data(); uint32 *finalData = (uint32*) transitionBuffer->Data(); /* WORK IT OUT */ uint32 * p1 = (uint32*)inData1; uint32 * p2 = (uint32*)inData2; uint32 * po = (uint32*)finalData; const unsigned int C=m_format.u.raw_video.display.line_width; const unsigned int L=m_format.u.raw_video.display.line_count; uint32 c,l,dc,dl; uint32 Co,Lo,lino,colo,lini,coli; uint32 bgc = (Red<<16)+(Green<<8)+Blue; while (po<finalData+firstHeader->size_used/4) *po++=bgc; po = (uint32*)finalData; switch (Mode){ case 0: if (TState<=50) { Co = (50-TState)*C/50; dc = Dx*(C-Co)/100; //dc = (Dx*C)/100-Co/2; for (l=0;l<L;l++) for (c=0;c<Co;c++){ po[l*C+c+dc]=p1[l*C+c*(C-1)/(Co-1)]; } } else { Co = (TState-50)*C/50; //dc = (Dx*C)/100-Co/2; dc = Dx*(C-Co)/100; for (l=0;l<L;l++) for (c=0;c<Co;c++){ po[l*C+c+dc]=p2[l*C+c*(C-1)/(Co-1)]; } } break; case 1: if (TState<=50) { Lo = (50-TState)*L/50; //dl = (Dy*L)/100-Lo/2; dl = Dy*(L-Lo)/100; for (l=0;l<Lo;l++) for (c=0;c<C;c++){ po[(l+dl)*C+c]=p1[(l*(L-1)/(Lo-1))*C+c]; } } else { Lo = (TState-50)*L/50; //dl = (Dy*L)/100-Lo/2; dl = Dy*(L-Lo)/100; for (l=0;l<Lo;l++) for (c=0;c<C;c++){ po[(l+dl)*C+c]=p2[(l*(L-1)/(Lo-1))*C+c]; } } break; case 2: if (TState<=50) { Lo = (50-TState)*L/50; Co = (50-TState)*C/50; //dl = (Dy*L)/100-Lo/2; //dc = (Dx*C)/100-Co/2; dl = Dy*(L-Lo)/100; dc = Dx*(C-Co)/100; for (l=0;l<Lo;l++) for (c=0;c<Co;c++){ lini = l*(L-1)/(Lo-1); coli = c*(C-1)/(Co-1); lino = l+dl; colo = c+dc; po[lino*C+colo]=p1[lini*C+coli]; } } else { Lo = (TState-50)*L/50; Co = (TState-50)*C/50; //dl = (Dy*L)/100-Lo/2; //dc = (Dx*C)/100-Co/2; dl = Dy*(L-Lo)/100; dc = Dx*(C-Co)/100; for (l=0;l<Lo;l++) for (c=0;c<Co;c++){ lini = l*(L-1)/(Lo-1); coli = c*(C-1)/(Co-1); lino = l+dl; colo = c+dc; po[lino*C+colo]=p2[lini*C+coli]; } } break; } // auto ++ if (TState<100) TState++; // GO HOME! media_header *h = transitionBuffer->Header(); h->type = B_MEDIA_RAW_VIDEO; h->size_used = firstHeader->size_used; h->start_time = firstHeader->start_time; // A changer !! IMPORTANT h->file_pos = firstHeader->file_pos; memcpy(&h->u.raw_video, &m_format.u.raw_video, sizeof(media_video_header)); // memcpy(h, firstHeader, sizeof(media_header)); } // Fin Sans Bitmap }
static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; writel(0x0000, devpriv->mite->daq_io_addr + Port_Pin_Directions(0)); if (1) { writeb(0x0f, devpriv->mite->daq_io_addr + Data_Path); writeb(TransferWidth(0) | TransferLength(0), devpriv->mite->daq_io_addr + Transfer_Size_Control); } else { writeb(0x03, devpriv->mite->daq_io_addr + Data_Path); writeb(TransferWidth(3) | TransferLength(0), devpriv->mite->daq_io_addr + Transfer_Size_Control); } if (cmd->scan_begin_src == TRIG_TIMER) { writeb(0, devpriv->mite->daq_io_addr + OpMode); writeb(0x00, devpriv->mite->daq_io_addr + ClockReg); writeb(1, devpriv->mite->daq_io_addr + Sequence); writeb(0x04, devpriv->mite->daq_io_addr + ReqReg); writeb(4, devpriv->mite->daq_io_addr + BlockMode); writeb(3, devpriv->mite->daq_io_addr + LinePolarities); writeb(0xc0, devpriv->mite->daq_io_addr + AckSer); writel(ni_pcidio_ns_to_timer(&cmd->scan_begin_arg, TRIG_ROUND_NEAREST), devpriv->mite->daq_io_addr + StartDelay); writeb(1, devpriv->mite->daq_io_addr + ReqDelay); writeb(1, devpriv->mite->daq_io_addr + ReqNotDelay); writeb(1, devpriv->mite->daq_io_addr + AckDelay); writeb(0x0b, devpriv->mite->daq_io_addr + AckNotDelay); writeb(0x01, devpriv->mite->daq_io_addr + Data1Delay); writew(0, devpriv->mite->daq_io_addr + ClockSpeed); writeb(0, devpriv->mite->daq_io_addr + DAQOptions); } else { writeb(0, devpriv->mite->daq_io_addr + OpMode); writeb(0x00, devpriv->mite->daq_io_addr + ClockReg); writeb(0, devpriv->mite->daq_io_addr + Sequence); writeb(0x00, devpriv->mite->daq_io_addr + ReqReg); writeb(4, devpriv->mite->daq_io_addr + BlockMode); writeb(0, devpriv->mite->daq_io_addr + LinePolarities); writeb(0x00, devpriv->mite->daq_io_addr + AckSer); writel(1, devpriv->mite->daq_io_addr + StartDelay); writeb(1, devpriv->mite->daq_io_addr + ReqDelay); writeb(1, devpriv->mite->daq_io_addr + ReqNotDelay); writeb(1, devpriv->mite->daq_io_addr + AckDelay); writeb(0x0C, devpriv->mite->daq_io_addr + AckNotDelay); writeb(0x10, devpriv->mite->daq_io_addr + Data1Delay); writew(0, devpriv->mite->daq_io_addr + ClockSpeed); writeb(0x60, devpriv->mite->daq_io_addr + DAQOptions); } if (cmd->stop_src == TRIG_COUNT) { writel(cmd->stop_arg, devpriv->mite->daq_io_addr + Transfer_Count); } else { } #ifdef USE_DMA writeb(ClearPrimaryTC | ClearSecondaryTC, devpriv->mite->daq_io_addr + Group_1_First_Clear); { int retval = setup_mite_dma(dev, s); if (retval) return retval; } #else writeb(0x00, devpriv->mite->daq_io_addr + DMA_Line_Control_Group1); #endif writeb(0x00, devpriv->mite->daq_io_addr + DMA_Line_Control_Group2); writeb(0xff, devpriv->mite->daq_io_addr + Group_1_First_Clear); writeb(IntEn, devpriv->mite->daq_io_addr + Interrupt_Control); writeb(0x03, devpriv->mite->daq_io_addr + Master_DMA_And_Interrupt_Control); if (cmd->stop_src == TRIG_NONE) { devpriv->OpModeBits = DataLatching(0) | RunMode(7); } else { devpriv->OpModeBits = Numbered | RunMode(7); } if (cmd->start_src == TRIG_NOW) { writeb(devpriv->OpModeBits, devpriv->mite->daq_io_addr + OpMode); s->async->inttrig = NULL; } else { s->async->inttrig = ni_pcidio_inttrig; } DPRINTK("ni_pcidio: command started\n"); return 0; }
void VideoConsumer::_HandleBuffer(BBuffer* buffer) { if (RunState() != B_STARTED || !fConnectionActive) { TRACE("RunState() != B_STARTED\n"); buffer->Recycle(); return; } // See if this is one of our BBitmap buffers uint32 index = 0; fOurBuffers = true; while (index < kBufferCount) { if (buffer->ID() == fBufferMap[index]->ID()) break; else index++; } if (index == kBufferCount) { // Buffers belong to consumer // NOTE: We maintain this in a member variable, since we still need // to recycle this buffer later on, in case it was the last buffer // received before shutting down. fOurBuffers = false; index = (fLastBufferIndex + 1) % kBufferCount; } bool recycle = true; bigtime_t now = TimeSource()->Now(); if (RunMode() == B_OFFLINE || now < buffer->Header()->start_time + kMaxBufferLateness) { // Only display the buffer if it's not too late, or if we are // in B_OFFLINE run-mode. if (!fOurBuffers) { memcpy(fBitmap[index]->Bits(), buffer->Data(), fBitmap[index]->BitsLength()); } bigtime_t tooEarly = buffer->Header()->start_time - now; if (tooEarly > 3000) snooze(tooEarly); fTargetLock.Lock(); if (fTarget) { fTarget->SetBitmap(fBitmap[index]); if (fOurBuffers) { // recycle the previous but not the current buffer if (fLastBufferIndex >= 0) fBufferMap[fLastBufferIndex]->Recycle(); recycle = false; } fLastBufferIndex = index; } fTargetLock.Unlock(); } else { // Drop the buffer if it's too late. if (fManager->LockWithTimeout(10000) == B_OK) { fManager->FrameDropped(); fManager->Unlock(); } PROGRESS("VideoConsumer::HandleEvent - DROPPED FRAME\n" " start_time: %lld, current: %lld, latency: %lld\n", buffer->Header()->start_time, TimeSource()->Now(), SchedulingLatency()); } if (recycle) buffer->Recycle(); }