Example #1
0
/*! \brief Loads the next video chunk into fVideoChunkBuffer and assigns it
		(including the start time) to fTempPacket accordingly only if
		fTempPacket is empty.

	\returns B_OK
		1. meaning: Next video chunk is loaded.
		2. meaning: No need to load and assign anything. Proceed as usual.
	\returns B_LAST_BUFFER_ERROR No more video chunks available.
		fVideoChunkBuffer and fTempPacket are left untouched.
	\returns Other errors Caller should bail out because fVideoChunkBuffer and
		fTempPacket are in unknown states. Normal operation cannot be
		guaranteed.
*/
status_t
AVCodecDecoder::_LoadNextVideoChunkIfNeededAndAssignStartTime()
{
	// TODO: Rename fVideoChunkBuffer to fChunkBuffer, once the audio path is
	// responsible for releasing the chunk buffer, too.

	if (fTempPacket.size > 0)
		return B_OK;

	const void* chunkBuffer = NULL;
	size_t chunkBufferSize = 0;
		// In the case that GetNextChunk() returns an error fChunkBufferSize
		// should be left untouched.
	media_header chunkMediaHeader;

	status_t getNextChunkStatus = GetNextChunk(&chunkBuffer,
		&chunkBufferSize, &chunkMediaHeader);
	if (getNextChunkStatus != B_OK)
		return getNextChunkStatus;

	status_t chunkBufferPaddingStatus
		= _CopyChunkToVideoChunkBufferAndAddPadding(chunkBuffer,
		chunkBufferSize);
	if (chunkBufferPaddingStatus != B_OK)
		return chunkBufferPaddingStatus;

	fTempPacket.data = fVideoChunkBuffer;
	fTempPacket.size = fChunkBufferSize;
	fTempPacket.dts = chunkMediaHeader.start_time;
		// Let FFMPEG handle the correct relationship between start_time and
		// decoded video frame. By doing so we are simply copying the way how
		// it is implemented in ffplay.c
		// \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=ffplay.c;h=09623db374e5289ed20b7cc28c262c4375a8b2e4;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1502
		//
		// FIXME: Research how to establish a meaningful relationship
		// between start_time and decoded video frame when the received
		// chunk buffer contains partial video frames. Maybe some data
		// formats contain time stamps (ake pts / dts fields) that can
		// be evaluated by FFMPEG. But as long as I don't have such
		// video data to test it, it makes no sense to implement it.
		//
		// FIXME: Implement tracking start_time of video frames
		// originating in data chunks that encode more than one video
		// frame at a time. In that case on would increment the
		// start_time for each consecutive frame of such a data chunk
		// (like it is done for audio frame decoding). But as long as
		// I don't have such video data to test it, it makes no sense
		// to implement it.

#ifdef LOG_STREAM_TO_FILE
	if (sDumpedPackets < 100) {
		sStreamLogFile.Write(chunkBuffer, fChunkBufferSize);
		printf("wrote %ld bytes\n", fChunkBufferSize);
		sDumpedPackets++;
	} else if (sDumpedPackets == 100)
		sStreamLogFile.Unset();
#endif

	return B_OK;
}
Example #2
0
uint32 FChunkWriter::FQueuedChunkWriter::Run()
{
	// Loop until there's no more chunks
	while ( ShouldBeRunning() )
	{
		FChunkFile* ChunkFile = GetNextChunk();
		if( ChunkFile != NULL )
		{
			const FGuid& ChunkGuid = ChunkFile->ChunkHeader.Guid;
			const uint64& ChunkHash = ChunkFile->ChunkHeader.RollingHash;
#if SAVE_OLD_CHUNKDATA_FILENAMES
			const FString OldChunkFilename = FBuildPatchUtils::GetChunkOldFilename( ChunkDirectory, ChunkGuid );
#endif
			const FString NewChunkFilename = FBuildPatchUtils::GetChunkNewFilename( EBuildPatchAppManifestVersion::GetLatestVersion(), ChunkDirectory, ChunkGuid, ChunkHash );

			// To be a bit safer, make a few attempts at writing chunks
			int32 RetryCount = 5;
			bool bChunkSaveSuccess = false;
			while ( RetryCount > 0 )
			{
				// Write out chunks
				bChunkSaveSuccess = WriteChunkData( NewChunkFilename, ChunkFile, ChunkGuid );
#if SAVE_OLD_CHUNKDATA_FILENAMES
				bChunkSaveSuccess = bChunkSaveSuccess && WriteChunkData( OldChunkFilename, ChunkFile, ChunkGuid );
#endif
				// Check success
				if( bChunkSaveSuccess )
				{
					RetryCount = 0;
				}
				else
				{
					// Retry after a second if failed
					--RetryCount;
					FPlatformProcess::Sleep( 1.0f );
				}
			}

			// If we really could not save out chunk data successfully, this build will never work, so panic flush logs and then cause a hard error.
			if( !bChunkSaveSuccess )
			{
				GLog->PanicFlushThreadedLogs();
				check( bChunkSaveSuccess );
			}

			// Delete the data memory
			delete ChunkFile;
		}
		FPlatformProcess::Sleep( 0.0f );
	}
	return 0;
}
Example #3
0
status_t
RawDecoder::Decode(void *buffer, int64 *frameCount,
				   media_header *mediaHeader, media_decode_info *info /* = 0 */)
{
	char *output_buffer = (char *)buffer;
	mediaHeader->start_time = fStartTime;
	*frameCount = 0;
	while (*frameCount < fOutputBufferFrameCount) {
		if (fChunkSize == 0) {
			media_header mh;
			status_t err;
			err = GetNextChunk(&fChunkBuffer, &fChunkSize, &mh);
			if (err != B_OK || fChunkSize < fInputFrameSize) {
				fChunkSize = 0;
				break;
			}
			if (fSwapInput)
				fSwapInput(const_cast<void *>(fChunkBuffer), fChunkSize / fInputSampleSize); // XXX TODO! FIX THIS, we write to a const buffer!!!
			fStartTime = mh.start_time;
			continue;
		}
		int32 frames = min_c(fOutputBufferFrameCount - *frameCount, fChunkSize / fInputFrameSize);
		if (frames == 0)
			break;

		int32 samples = frames * fInputFormat.u.raw_audio.channel_count;
		fConvert(output_buffer, fChunkBuffer, samples);
		fChunkBuffer = (const char *)fChunkBuffer + frames * fInputFrameSize;
		fChunkSize -= frames * fInputFrameSize;
		output_buffer += frames * fOutputFrameSize;
		*frameCount += frames;
		fStartTime +=  (1000000LL * frames) / fFrameRate;
	}
	// XXX should change channel order here for
	// B_AUDIO_FORMAT_CHANNEL_ORDER_WAVE and B_AUDIO_FORMAT_CHANNEL_ORDER_AIFF

	if (fSwapOutput)
		fSwapOutput(buffer, *frameCount * fInputFormat.u.raw_audio.channel_count);
	
	TRACE("framecount %Ld, time %Ld\n",*frameCount, mediaHeader->start_time);
		
	return *frameCount ? B_OK : B_ERROR;
}
Example #4
0
    }

    bool map_reader::Load(string Filename,map_data * data)
    {
	// Open map file to read
	    FILE * Stream;// apertura de archivo
        Stream = fopen(Filename.c_str(), "rb");
        if (Stream == NULL)
        {
            std::cerr << "Couldn't find LMU map: " << Filename << std::endl;
            exit(1);
        }
        string Header = ReadString(Stream); // lectura de cabezera
        if (Header != "LcfMapUnit") // comparacion con cabezera del mapa
        {// si no concuerda imprime un error y finaliza
            printf("Reading error: File is not a valid RPG2000 map\n");
            fclose(Stream);
            return false;
        }
        // Set default data of the map
        data->ChipsetID = 1;
        data->MapWidth = 20;
        data->MapHeight = 15;
        data->TypeOfLoop = 0;
        data->ParallaxBackground = false;
        data->BackgroundName = "None";
        data->HorizontalPan = false;
        data->HorizontalAutoPan = false;
        data->HorizontalPanSpeed = 0;
        data->VerticalPan = false;
        data->VerticalAutoPan = false;
        data->VerticalPanSpeed = 0;
        data->LowerLayer = NULL;
        data->UpperLayer = NULL;
        data->NumEvents = 0;
        GetNextChunk(Stream,data);// Get data from map
        fclose(Stream);
Example #5
0
/*! \brief Decode next video frame

    We decode exactly one video frame into fDecodedData. To achieve this goal,
    we might need to request several chunks of encoded data resulting in a
    variable execution time of this function.

    The length of the decoded video frame is stored in
    fDecodedDataSizeInBytes. If this variable is greater than zero, you can
    assert that there is a valid video frame available in fDecodedData.

    The decoded video frame in fDecodedData has color space conversion and
    deinterlacing already applied.

    To every decoded video frame there is a media_header populated in
    fHeader, containing the corresponding video frame properties.
    
	Normally every decoded video frame has a start_time field populated in the
	associated fHeader, that determines the presentation time of the frame.
	This relationship will only hold true, when each data chunk that is
	provided via GetNextChunk() contains data for exactly one encoded video
	frame (one complete frame) - not more and not less.

	We can decode data chunks that contain partial video frame data, too. In
	that case, you cannot trust the value of the start_time field in fHeader.
	We simply have no logic in place to establish a meaningful relationship
	between an incomplete frame and the start time it should be presented.
	Though this	might change in the future.

	We can decode data chunks that contain more than one video frame, too. In
	that case, you cannot trust the value of the start_time field in fHeader.
	We simply have no logic in place to track the start_time across multiple
	video frames. So a meaningful relationship between the 2nd, 3rd, ... frame
	and the start time it should be presented isn't established at the moment.
	Though this	might change in the future.

    More over the fOutputFrameRate variable is updated for every decoded video
    frame.

	On first call, the member variables 	fSwsContext / fFormatConversionFunc
	are initialized.

	\return B_OK, when we successfully decoded one video frame
 */
status_t
AVCodecDecoder::_DecodeNextVideoFrame()
{
	assert(fTempPacket.size >= 0);

	while (true) {
		media_header chunkMediaHeader;

		if (fTempPacket.size == 0) {
			// Our packet buffer is empty, so fill it now.
			status_t getNextChunkStatus = GetNextChunk(&fChunkBuffer,
				&fChunkBufferSize, &chunkMediaHeader);
			switch (getNextChunkStatus) {
				case B_OK:
					break;

				case B_LAST_BUFFER_ERROR:
					return _FlushOneVideoFrameFromDecoderBuffer();

				default:
					TRACE("AVCodecDecoder::_DecodeNextVideoFrame(): error from "
						"GetNextChunk(): %s\n", strerror(err));
					return getNextChunkStatus;
			}

			fTempPacket.data = static_cast<uint8_t*>(const_cast<void*>(
				fChunkBuffer));
			fTempPacket.size = fChunkBufferSize;

			fContext->reordered_opaque = chunkMediaHeader.start_time;
				// Let ffmpeg handle the relationship between start_time and
				// decoded video frame.
				//
				// Explanation:
				// The received chunk buffer may not contain the next video
				// frame to be decoded, due to frame reordering (e.g. MPEG1/2
				// provides encoded video frames in a different order than the
				// decoded video frame).
				//
				// FIXME: Research how to establish a meaningful relationship
				// between start_time and decoded video frame when the received
				// chunk buffer contains partial video frames. Maybe some data
				// formats contain time stamps (ake pts / dts fields) that can
				// be evaluated by FFMPEG. But as long as I don't have such
				// video data to test it, it makes no sense to implement it.
				//
				// FIXME: Implement tracking start_time of video frames
				// originating in data chunks that encode more than one video
				// frame at a time. In that case on would increment the
				// start_time for each consecutive frame of such a data chunk
				// (like it is done for audio frame decoding). But as long as
				// I don't have such video data to test it, it makes no sense
				// to implement it.

#ifdef LOG_STREAM_TO_FILE
			if (sDumpedPackets < 100) {
				sStreamLogFile.Write(fChunkBuffer, fChunkBufferSize);
				printf("wrote %ld bytes\n", fChunkBufferSize);
				sDumpedPackets++;
			} else if (sDumpedPackets == 100)
				sStreamLogFile.Unset();
#endif
		}

#if DO_PROFILING
		bigtime_t startTime = system_time();
#endif

		// NOTE: In the FFMPEG 0.10.2 code example decoding_encoding.c, the
		// length returned by avcodec_decode_video2() is used to update the
		// packet buffer size (here it is fTempPacket.size). This way the
		// packet buffer is allowed to contain incomplete frames so we are
		// required to buffer the packets between different calls to
		// _DecodeNextVideoFrame().
		int gotPicture = 0;
		int decodedDataSizeInBytes = avcodec_decode_video2(fContext,
			fRawDecodedPicture, &gotPicture, &fTempPacket);
		if (decodedDataSizeInBytes < 0) {
			TRACE("[v] AVCodecDecoder: ignoring error in decoding frame %lld:"
				" %d\n", fFrame, len);
			// NOTE: An error from avcodec_decode_video2() is ignored by the
			// FFMPEG 0.10.2 example decoding_encoding.c. Only the packet
			// buffers are flushed accordingly
			fTempPacket.data = NULL;
			fTempPacket.size = 0;
			continue;
		}

		fTempPacket.size -= decodedDataSizeInBytes;
		fTempPacket.data += decodedDataSizeInBytes;

		bool gotNoPictureYet = gotPicture == 0;
		if (gotNoPictureYet) {
			TRACE("frame %lld - no picture yet, decodedDataSizeInBytes: %d, "
				"chunk size: %ld\n", fFrame, decodedDataSizeInBytes,
				fChunkBufferSize);
			continue;
		}

#if DO_PROFILING
		bigtime_t formatConversionStart = system_time();
#endif

		_HandleNewVideoFrameAndUpdateSystemState();

#if DO_PROFILING
		bigtime_t doneTime = system_time();
		decodingTime += formatConversionStart - startTime;
		conversionTime += doneTime - formatConversionStart;
		profileCounter++;
		if (!(fFrame % 5)) {
			if (info) {
				printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required "
					"%Ld\n",
					decodingTime / profileCounter,
					conversionTime / profileCounter,
					fFrame, info->time_to_decode);
			} else {
				printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required "
					"%Ld\n",
					decodingTime / profileCounter,
					conversionTime / profileCounter,
					fFrame, bigtime_t(1000000LL / fOutputFrameRate));
			}
			decodingTime = 0;
			conversionTime = 0;
			profileCounter = 0;
		}
#endif
		return B_OK;
	}
}
Example #6
0
status_t
AVCodecDecoder::_DecodeAudio(void* _buffer, int64* outFrameCount,
	media_header* mediaHeader, media_decode_info* info)
{
	TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n",
		mediaHeader->start_time / 1000000.0);

	*outFrameCount = 0;

	uint8* buffer = reinterpret_cast<uint8*>(_buffer);
	while (*outFrameCount < fOutputFrameCount) {
		// Check conditions which would hint at broken code below.
		if (fOutputBufferSize < 0) {
			fprintf(stderr, "Decoding read past the end of the output buffer! "
				"%ld\n", fOutputBufferSize);
			fOutputBufferSize = 0;
		}
		if (fChunkBufferSize < 0) {
			fprintf(stderr, "Decoding read past the end of the chunk buffer! "
				"%ld\n", fChunkBufferSize);
			fChunkBufferSize = 0;
		}

		if (fOutputBufferSize > 0) {
			// We still have decoded audio frames from the last
			// invokation, which start at fOutputBufferOffset
			// and are of fOutputBufferSize. Copy those into the buffer,
			// but not more than it can hold.
			int32 frames = min_c(fOutputFrameCount - *outFrameCount,
				fOutputBufferSize / fOutputFrameSize);
			if (frames == 0)
				debugger("fOutputBufferSize not multiple of frame size!");
			size_t remainingSize = frames * fOutputFrameSize;
			memcpy(buffer, fOutputFrame->data[0] + fOutputBufferOffset,
				remainingSize);
			fOutputBufferOffset += remainingSize;
			fOutputBufferSize -= remainingSize;
			buffer += remainingSize;
			*outFrameCount += frames;
			fStartTime += (bigtime_t)((1000000LL * frames) / fOutputFrameRate);
			continue;
		}
		if (fChunkBufferSize == 0) {
			// Time to read the next chunk buffer. We use a separate
			// media_header, since the chunk header may not belong to
			// the start of the decoded audio frames we return. For
			// example we may have used frames from a previous invokation,
			// or we may have to read several chunks until we fill up the
			// output buffer.
			media_header chunkMediaHeader;
			status_t err = GetNextChunk(&fChunkBuffer, &fChunkBufferSize,
				&chunkMediaHeader);
			if (err == B_LAST_BUFFER_ERROR) {
				TRACE_AUDIO("  Last Chunk with chunk size %ld\n",
					fChunkBufferSize);
				fChunkBufferSize = 0;
				return err;
			}
			if (err != B_OK || fChunkBufferSize < 0) {
				printf("GetNextChunk error %ld\n",fChunkBufferSize);
				fChunkBufferSize = 0;
				break;
			}
			fChunkBufferOffset = 0;
			fStartTime = chunkMediaHeader.start_time;
		}

		fTempPacket.data = (uint8_t*)fChunkBuffer + fChunkBufferOffset;
		fTempPacket.size = fChunkBufferSize;

		avcodec_get_frame_defaults(fOutputFrame);
		int gotFrame = 0;
		int usedBytes = avcodec_decode_audio4(fContext,
			fOutputFrame, &gotFrame, &fTempPacket);
		if (usedBytes < 0 && !fAudioDecodeError) {
			// Report failure if not done already
			printf("########### audio decode error, "
				"fChunkBufferSize %ld, fChunkBufferOffset %ld\n",
				fChunkBufferSize, fChunkBufferOffset);
			fAudioDecodeError = true;
		}
		if (usedBytes <= 0) {
			// Error or failure to produce decompressed output.
			// Skip the chunk buffer data entirely.
			usedBytes = fChunkBufferSize;
			fOutputBufferSize = 0;
			// Assume the audio decoded until now is broken.
			memset(_buffer, 0, buffer - (uint8*)_buffer);
		} else {
			// Success
			fAudioDecodeError = false;
			if (gotFrame == 1) {
				fOutputBufferSize = av_samples_get_buffer_size(NULL,
					fContext->channels, fOutputFrame->nb_samples,
					fContext->sample_fmt, 1);
				if (fOutputBufferSize < 0)
					fOutputBufferSize = 0;
			} else
				fOutputBufferSize = 0;
		}
//printf("  chunk size: %d, decoded: %d, used: %d\n",
//fTempPacket.size, decodedBytes, usedBytes);

		fChunkBufferOffset += usedBytes;
		fChunkBufferSize -= usedBytes;
		fOutputBufferOffset = 0;
	}
	fFrame += *outFrameCount;
	TRACE_AUDIO("  frame count: %lld current: %lld\n", *outFrameCount, fFrame);

	return B_OK;
}
Example #7
0
status_t
mp3Decoder::DecodeNextChunk()
{
	const void *chunkBuffer;
	size_t chunkSize;
	media_header mh;
	int outsize;
	int result;
	status_t err;

	// decode residual data that is still in the decoder first
	result = decodeMP3(&fMpgLibPrivate, 0, 0, (char *)fDecodeBuffer, DECODE_BUFFER_SIZE, &outsize);
	if (result == MP3_OK) {
		fResidualBuffer = fDecodeBuffer;
		fResidualBytes = outsize;
		return B_OK;
	}
	
	// get another chunk and push it to the decoder
	err = GetNextChunk(&chunkBuffer, &chunkSize, &mh);
	if (err != B_OK)
		return err;

	fStartTime = mh.start_time;
//	TRACE("mp3Decoder: fStartTime reset to %.6f\n", fStartTime / 1000000.0);

	// resync after a seek		
	if (fNeedSync) {
		TRACE("mp3Decoder::DecodeNextChunk: Syncing...\n");
		if (chunkSize < 4) {
			TRACE("mp3Decoder::DecodeNextChunk: Sync failed, frame too small\n");
			return B_ERROR;
		}
		int len = GetFrameLength(chunkBuffer);
		TRACE("mp3Decoder::DecodeNextChunk: chunkSize %ld, first frame length %d\n", chunkSize, len);
		// len == -1 when not at frame start
		// len == chunkSize for mp3 reader (delivers single frames)
		if (len < (int)chunkSize) {
//			FIXME: join a few chunks to create a larger (2k) buffer, and use:
//			while (chunkSize > 100) {
//				if (IsValidStream((uint8 *)chunkBuffer, chunkSize))
			while (chunkSize >= 4) {
				if (GetFrameLength(chunkBuffer) > 0)
					break;
				chunkBuffer = (uint8 *)chunkBuffer + 1;
				chunkSize--;
			}
//			if (chunkSize <= 100) {
			if (chunkSize == 3) {	
				TRACE("mp3Decoder::DecodeNextChunk: Sync failed\n");
				return B_ERROR;
			}
			TRACE("mp3Decoder::DecodeNextChunk: new chunkSize %ld, first frame length %d\n", chunkSize, GetFrameLength(chunkBuffer));
		}
		fNeedSync = false;
	}
	
	result = decodeMP3(&fMpgLibPrivate, (char *)chunkBuffer, chunkSize, (char *)fDecodeBuffer, DECODE_BUFFER_SIZE, &outsize);
	if (result == MP3_NEED_MORE) {
		TRACE("mp3Decoder::DecodeNextChunk: decodeMP3 returned MP3_NEED_MORE\n");
		fResidualBuffer = NULL;
		fResidualBytes = 0;
		return B_OK;
	} else if (result != MP3_OK) {
		TRACE("mp3Decoder::DecodeNextChunk: decodeMP3 returned error %d\n", result);
		return B_ERROR;
//		fNeedSync = true;
//		fResidualBuffer = NULL;
//		fResidualBytes = 0;
//		return B_OK;
	}
		
	//printf("mp3Decoder::Decode: decoded %d bytes into %d bytes\n",chunkSize, outsize);
		
	fResidualBuffer = fDecodeBuffer;
	fResidualBytes = outsize;
	
	return B_OK;
}
Example #8
0
// Called on start/end of macro examination
void XLPOnMacro(int macroId, int no_args, bool start)
{
  wxChar buf[100];
  switch (macroId)
  {
  case ltCHAPTER:
  case ltCHAPTERSTAR:
  case ltCHAPTERHEADING:
  {
    if (!start)
    {
      sectionNo = 0;
      figureNo = 0;
      subsectionNo = 0;
      subsubsectionNo = 0;
      if (macroId != ltCHAPTERSTAR)
        chapterNo ++;

      SetCurrentOutputs(Contents, Chapters);
      long id1 = NewBlockId();
      currentBlockId = NewBlockId();

      startedSections = true;
      wxFprintf(Contents, _T("\\hy-%d{%ld}{"), hyBLOCK_SMALL_HEADING, id1);
      wxFprintf(Chapters, _T("\n\\hy-%d{%ld}{"), hyBLOCK_LARGE_VISIBLE_SECTION, currentBlockId);
      wxFprintf(Index, _T("%ld %ld\n"), id1, currentBlockId);

      OutputCurrentSection(); // Repeat section header

      wxFprintf(Contents, _T("}\n\n"));
      wxFprintf(Chapters, _T("}\n\n"));
      SetCurrentOutput(Chapters);
      wxChar *topicName = FindTopicName(GetNextChunk());
      hyperLabels.Append(topicName, (wxObject *)currentBlockId);
    }
    break;
  }
  case ltSECTION:
  case ltSECTIONSTAR:
  case ltSECTIONHEADING:
  case ltGLOSS:
  {
    if (!start)
    {
      subsectionNo = 0;
      subsubsectionNo = 0;

      if (macroId != ltSECTIONSTAR)
        sectionNo ++;

      SetCurrentOutputs(Chapters, Sections);
      long id1 = NewBlockId();
      currentBlockId = NewBlockId();

      startedSections = true;

      if (DocumentStyle == LATEX_ARTICLE)
        wxFprintf(Contents, _T("\\hy-%d{%ld}{"), hyBLOCK_LARGE_HEADING, id1);
      else
        wxFprintf(Chapters, _T("\\hy-%d{%ld}{"), hyBLOCK_BOLD, id1);
      wxFprintf(Sections, _T("\n\\hy-%d{%ld}{"), hyBLOCK_LARGE_VISIBLE_SECTION, currentBlockId);
      wxFprintf(Index, _T("%ld %ld\n"), id1, currentBlockId);

      OutputCurrentSection(); // Repeat section header

      if (DocumentStyle == LATEX_ARTICLE)
        wxFprintf(Contents, _T("}\n\n"));
      else
        wxFprintf(Chapters, _T("}\n\n"));
      wxFprintf(Sections, _T("}\n\n"));
      SetCurrentOutput(Sections);
      wxChar *topicName = FindTopicName(GetNextChunk());
      hyperLabels.Append(topicName, (wxObject *)currentBlockId);
    }
    break;
  }
  case ltSUBSECTION:
  case ltSUBSECTIONSTAR:
  case ltMEMBERSECTION:
  case ltFUNCTIONSECTION:
  {
    if (!start)
    {
      subsubsectionNo = 0;

      if (macroId != ltSUBSECTIONSTAR)
        subsectionNo ++;

      SetCurrentOutputs(Sections, Subsections);
      long id1 = NewBlockId();
      currentBlockId = NewBlockId();
      wxFprintf(Sections, _T("\\hy-%d{%ld}{"), hyBLOCK_BOLD, id1);
      wxFprintf(Subsections, _T("\n\\hy-%d{%ld}{"), hyBLOCK_LARGE_VISIBLE_SECTION, currentBlockId);
      wxFprintf(Index, _T("%ld %ld\n"), id1, currentBlockId);

      OutputCurrentSection(); // Repeat section header

      wxFprintf(Sections, _T("}\n\n"));
      wxFprintf(Subsections, _T("}\n\n"));
      SetCurrentOutput(Subsections);
      wxChar *topicName = FindTopicName(GetNextChunk());
      hyperLabels.Append(topicName, (wxObject *)currentBlockId);
    }
    break;
  }
  case ltSUBSUBSECTION:
  case ltSUBSUBSECTIONSTAR:
  {
    if (!start)
    {
      if (macroId != ltSUBSUBSECTIONSTAR)
        subsubsectionNo ++;

      SetCurrentOutputs(Subsections, Subsubsections);
      long id1 = NewBlockId();
      currentBlockId = NewBlockId();
      wxFprintf(Subsections, _T("\\hy-%d{%ld}{"), hyBLOCK_BOLD, id1);
      wxFprintf(Subsubsections, _T("\n\\hy-%d{%ld}{"), hyBLOCK_LARGE_VISIBLE_SECTION, currentBlockId);
      wxFprintf(Index, _T("%ld %ld\n"), id1, currentBlockId);

      OutputCurrentSection(); // Repeat section header

      wxFprintf(Subsections, _T("}\n\n"));
      wxFprintf(Subsubsections, _T("}\n\n"));
      SetCurrentOutput(Subsubsections);
      wxChar *topicName = FindTopicName(GetNextChunk());
      hyperLabels.Append(topicName, (wxObject *)currentBlockId);
    }
    break;
  }
  case ltFUNC:
  case ltPFUNC:
  case ltMEMBER:
  {
    SetCurrentOutput(Subsections);
    if (start)
    {
      long id = NewBlockId();
      wxFprintf(Subsections, _T("\\hy-%d{%ld}{"), hyBLOCK_BOLD, id);
    }
    else
      wxFprintf(Subsections, _T("}"));
    break;
  }
  case ltVOID:
//    if (start)
//      TexOutput(_T("void"), true);
    break;
  case ltBACKSLASHCHAR:
    if (start)
      TexOutput(_T("\n"), true);
    break;
  case ltPAR:
  {
    if (start)
    {
      if (ParSkip > 0)
        TexOutput(_T("\n"), true);
      TexOutput(_T("\n"), true);
    }
    break;
  }
  case ltRMFAMILY:
  case ltTEXTRM:
  case ltRM:
  {
    break;
  }
  case ltTEXTBF:
  case ltBFSERIES:
  case ltBF:
  {
    if (start)
    {
      wxChar buf[100];
      long id = NewBlockId();
      wxSnprintf(buf, sizeof(buf), _T("\\hy-%d{%ld}{"), hyBLOCK_BOLD, id);
      TexOutput(buf);
    }
    else TexOutput(_T("}"));
    break;
  }
  case ltTEXTIT:
  case ltITSHAPE:
  case ltIT:
  {
    if (start)
    {
      wxChar buf[100];
      long id = NewBlockId();
      wxSnprintf(buf, sizeof(buf), _T("\\hy-%d{%ld}{"), hyBLOCK_ITALIC, id);
      TexOutput(buf);
    }
    else TexOutput(_T("}"));
    break;
  }
  case ltTTFAMILY:
  case ltTEXTTT:
  case ltTT:
  {
    if (start)
    {
      long id = NewBlockId();
      wxSnprintf(buf, sizeof(buf), _T("\\hy-%d{%ld}{"), hyBLOCK_TELETYPE, id);
      TexOutput(buf);
    }
    else TexOutput(_T("}"));
    break;
  }
  case ltSMALL:
  {
    if (start)
    {
      wxSnprintf(buf, sizeof(buf), _T("\\hy-%d{%ld}{"), hyBLOCK_SMALL_TEXT, NewBlockId());
      TexOutput(buf);
    }
    else TexOutput(_T("}"));
    break;
  }
  case ltTINY:
  {
    if (start)
    {
      wxSnprintf(buf, sizeof(buf), _T("\\hy-%d{%ld}{"), hyBLOCK_SMALL_TEXT, NewBlockId());
      TexOutput(buf);
    }
    else TexOutput(_T("}"));
    break;
  }
  case ltNORMALSIZE:
  {
    if (start)
    {
      wxSnprintf(buf, sizeof(buf), _T("\\hy-%d{%ld}{"), hyBLOCK_NORMAL, NewBlockId());
      TexOutput(buf);
    }
    else TexOutput(_T("}"));
    break;
  }
  case ltlarge:
  {
    if (start)
    {
      wxSnprintf(buf, sizeof(buf), _T("\\hy-%d{%ld}{"), hyBLOCK_SMALL_HEADING, NewBlockId());
      TexOutput(buf);
    }
    else TexOutput(_T("}\n"));
    break;
  }
  case ltLARGE:
  {
    if (start)
    {
      wxSnprintf(buf, sizeof(buf), _T("\\hy-%d{%ld}{"), hyBLOCK_LARGE_HEADING, NewBlockId());
      TexOutput(buf);
    }
    else TexOutput(_T("}\n"));
    break;
  }
  case ltITEMIZE:
  case ltENUMERATE:
  case ltDESCRIPTION:
  case ltTWOCOLLIST:
  {
    if (start)
    {
//      tabCount ++;

//      if (indentLevel > 0)
//        TexOutput(_T("\\par\\par\n"));
      indentLevel ++;
      int listType;
      if (macroId == ltENUMERATE)
        listType = LATEX_ENUMERATE;
      else if (macroId == ltITEMIZE)
        listType = LATEX_ITEMIZE;
      else
        listType = LATEX_DESCRIPTION;
      itemizeStack.Insert(new ItemizeStruc(listType));

    }
    else
    {
      indentLevel --;

      if (itemizeStack.GetFirst())
      {
        ItemizeStruc *struc = (ItemizeStruc *)itemizeStack.GetFirst()->GetData();
        delete struc;
        delete itemizeStack.GetFirst();
      }
    }
    break;
  }
  case ltITEM:
  {
    wxNode *node = itemizeStack.GetFirst();
    if (node)
    {
      ItemizeStruc *struc = (ItemizeStruc *)node->GetData();
      if (!start)
      {
        struc->currentItem += 1;
        wxChar indentBuf[30];

        switch (struc->listType)
        {
          case LATEX_ENUMERATE:
          {
            wxSnprintf(indentBuf, sizeof(indentBuf), _T("\\hy-%d{%ld}{%d.} "),
              hyBLOCK_BOLD, NewBlockId(), struc->currentItem);
            TexOutput(indentBuf);
            break;
          }
          case LATEX_ITEMIZE:
          {
            wxSnprintf(indentBuf, sizeof(indentBuf), _T("\\hy-%d{%ld}{o} "),
              hyBLOCK_BOLD, NewBlockId());
            TexOutput(indentBuf);
            break;
          }
          default:
          case LATEX_DESCRIPTION:
          {
            if (descriptionItemArg)
            {
              wxSnprintf(indentBuf, sizeof(indentBuf), _T("\\hy-%d{%ld}{"),
                 hyBLOCK_BOLD, NewBlockId());
              TexOutput(indentBuf);
              TraverseChildrenFromChunk(descriptionItemArg);
              TexOutput(_T("} "));
              descriptionItemArg = NULL;
            }
            break;
          }
        }
      }
    }
    break;
  }
  case ltMAKETITLE:
  {
    if (start && DocumentTitle && DocumentAuthor)
    {
      wxSnprintf(buf, sizeof(buf), _T("\\hy-%d{%ld}{"), hyBLOCK_LARGE_HEADING, NewBlockId());
      TexOutput(buf);
      TraverseChildrenFromChunk(DocumentTitle);
      TexOutput(_T("}\n\n"));
      wxSnprintf(buf, sizeof(buf), _T("\\hy-%d{%ld}{"), hyBLOCK_SMALL_HEADING, NewBlockId());
      TexOutput(buf);
      TraverseChildrenFromChunk(DocumentAuthor);
      TexOutput(_T("}\n\n"));
      if (DocumentDate)
      {
        TraverseChildrenFromChunk(DocumentDate);
        TexOutput(_T("\n"));
      }
    }
    break;
  }
  case ltTABLEOFCONTENTS:
  {
    if (start)
    {
      FILE *fd = wxFopen(ContentsName, _T("r"));
      if (fd)
      {
        int ch = getc(fd);
        while (ch != EOF)
        {
          wxPutc(ch, Chapters);
          ch = getc(fd);
        }
        fclose(fd);
      }
      else
      {
        TexOutput(_T("RUN TEX2RTF AGAIN FOR CONTENTS PAGE\n"));
        OnInform(_T("Run Tex2RTF again to include contents page."));
      }
    }
    break;
  }
  case ltHARDY:
  {
    if (start)
      TexOutput(_T("HARDY"), true);
    break;
  }
  case ltWXCLIPS:
  {
    if (start)
      TexOutput(_T("wxCLIPS"), true);
    break;
  }
  case ltVERBATIM:
  {
    if (start)
    {
      wxChar buf[100];
      long id = NewBlockId();
      wxSnprintf(buf, sizeof(buf), _T("\\hy-%d{%ld}{"), hyBLOCK_TELETYPE, id);
      TexOutput(buf);
    }
    else TexOutput(_T("}"));
    break;
  }
  case ltHRULE:
  {
    if (start)
    {
      TexOutput(_T("\n------------------------------------------------------------------"), true);
    }
    break;
  }
  case ltHLINE:
  {
    if (start)
    {
      TexOutput(_T("--------------------------------------------------------------------------------"), true);
    }
    break;
  }
  case ltSPECIALAMPERSAND:
  {
    if (start)
    {
      currentTab ++;
      int tabPos = (80/noColumns)*currentTab;
      PadToTab(tabPos);
    }
    break;
  }
  case ltTABULAR:
  case ltSUPERTABULAR:
  {
    if (start)
    {
      wxSnprintf(buf, sizeof(buf), _T("\\hy-%d{%ld}{"), hyBLOCK_TELETYPE, NewBlockId());
      TexOutput(buf);
    }
    else
      TexOutput(_T("}"));
    break;
  }
  case ltNUMBEREDBIBITEM:
  {
    if (!start)
      TexOutput(_T("\n\n"), true);
    break;
  }
  case ltCAPTION:
  case ltCAPTIONSTAR:
  {
    if (start)
    {
      figureNo ++;

      wxChar figBuf[40];
      if (DocumentStyle != LATEX_ARTICLE)
        wxSnprintf(figBuf, sizeof(figBuf), _T("Figure %d.%d: "), chapterNo, figureNo);
      else
        wxSnprintf(figBuf, sizeof(figBuf), _T("Figure %d: "), figureNo);

      TexOutput(figBuf);
    }
    else
    {
      wxChar *topicName = FindTopicName(GetNextChunk());

      AddTexRef(topicName, NULL, NULL,
           ((DocumentStyle != LATEX_ARTICLE) ? chapterNo : figureNo),
            ((DocumentStyle != LATEX_ARTICLE) ? figureNo : 0));
    }
    break;
  }
  default:
  {
    DefaultOnMacro(macroId, no_args, start);
    break;
  }
  }
}