Beispiel #1
0
int main(int argc, char const *argv[])
{
	int n = HuffmanInit("aabcdaaa");
	std::map<char, std::string> code_table;
	std::string encoded_str = HuffmanEncode(n, "aabcdaaa",code_table);
	std::cout << "encoded string : " << encoded_str << std::endl;
	std::string target_str = HuffmanDecode(encoded_str);
	std::cout << "decoded string : " << target_str << std::endl;
	return 0;
}
HRESULT CCINVideoDecompressor::Transform(
	IMediaSample *pIn,
	IMediaSample *pOut
)
{
	// Check and validate the pointers
	CheckPointer(pIn, E_POINTER);
	ValidateReadPtr(pIn, sizeof(IMediaSample));
	CheckPointer(pOut, E_POINTER);
	ValidateReadPtr(pOut, sizeof(IMediaSample));

	// Get the input sample's buffer
	BYTE *pbInBuffer = NULL;
	HRESULT hr = pIn->GetPointer(&pbInBuffer);
	if (FAILED(hr))
		return hr;

	// Check if the frame contains new palette
	BYTE *pbHuffmanData = NULL;
	if (*((DWORD*)pbInBuffer) == CIN_COMMAND_PALETTE) {

		// Get the palette pointer from the input data
		BYTE *pbPalette = pbInBuffer + 4;

		// Get the output media type format
		CMediaType mt((AM_MEDIA_TYPE)m_pOutput->CurrentMediaType());
		VIDEOINFO *pVideoInfo = (VIDEOINFO*)mt.Format();

		// Fill in the output media type format palette
		for (int i = 0; i < 256; i++) {
			pVideoInfo->bmiColors[i].rgbRed			= *pbPalette++;
			pVideoInfo->bmiColors[i].rgbGreen		= *pbPalette++;
			pVideoInfo->bmiColors[i].rgbBlue		= *pbPalette++;
			pVideoInfo->bmiColors[i].rgbReserved	= 0;
		}

		// Set the changed media type for the output sample
		hr = pOut->SetMediaType(&mt);
		if (FAILED(hr))
			return hr;

		// Set up Huffman data pointer
		pbHuffmanData = pbPalette;

	} else
		pbHuffmanData = pbInBuffer + 4;

	// Set up Huffman count
	LONG lHuffmanCount = pIn->GetActualDataLength();
	lHuffmanCount -= (LONG)(pbHuffmanData - pbInBuffer);

	// Get the output sample's buffer
	BYTE *pbOutBuffer = NULL;
	hr = pOut->GetPointer(&pbOutBuffer);
	if (FAILED(hr))
		return hr;

	// Call the decoder function to decompress the frame
	if (!HuffmanDecode(pbHuffmanData, lHuffmanCount, pbOutBuffer))
		return E_FAIL;

	// Set the data length for the output sample. 
	// The data length is the uncompressed frame size
	LONG lOutDataLength = m_pFormat->dwVideoWidth * m_pFormat->dwVideoHeight;
	hr = pOut->SetActualDataLength(lOutDataLength);
	if (FAILED(hr))
		return hr;

	// Each RGB frame is a sync point
	hr = pOut->SetSyncPoint(TRUE);
	if (FAILED(hr))
		return hr;

	// RGB sample should never be a preroll one
	hr = pOut->SetPreroll(FALSE);
	if (FAILED(hr))
		return hr;

	// We rely on the upstream filter (which is most likely 
	// a parser or splitter) in the matter of stream and media 
	// times setting. As to the discontinuity property, we should
	// not drop samples, so we just retain this property's value 
	// set by the upstream filter

	return NOERROR;
}