Beispiel #1
0
void LLImageRaw::composite( LLImageRaw* src )
{
	LLImageRaw* dst = this;  // Just for clarity.

	llassert(3 == src->getComponents());
	llassert(3 == dst->getComponents());

	if( 3 == dst->getComponents() )
	{
		if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
		{
			// No scaling needed
			if( 3 == src->getComponents() )
			{
				copyUnscaled( src );  // alpha is one so just copy the data.
			}
			else
			{
				compositeUnscaled4onto3( src );
			}
		}
		else
		{
			if( 3 == src->getComponents() )
			{
				copyScaled( src );  // alpha is one so just copy the data.
			}
			else
			{
				compositeScaled4onto3( src );
			}
		}
	}
}
// Src and dst can be any size.  Src has 4 components.  Dst has 3 components.
void LLImageRaw::compositeScaled4onto3(LLImageRaw* src)
{
	LLMemType mt1(mMemType);
	llinfos << "compositeScaled4onto3" << llendl;

	LLImageRaw* dst = this;  // Just for clarity.

	llassert( (4 == src->getComponents()) && (3 == dst->getComponents()) );

	S32 temp_data_size = src->getWidth() * dst->getHeight() * src->getComponents();
	llassert_always(temp_data_size > 0);
	std::vector<U8> temp_buffer(temp_data_size);

	// Vertical: scale but no composite
	for( S32 col = 0; col < src->getWidth(); col++ )
	{
		copyLineScaled( src->getData() + (src->getComponents() * col), &temp_buffer[0] + (src->getComponents() * col), src->getHeight(), dst->getHeight(), src->getWidth(), src->getWidth() );
	}

	// Horizontal: scale and composite
	for( S32 row = 0; row < dst->getHeight(); row++ )
	{
		compositeRowScaled4onto3( &temp_buffer[0] + (src->getComponents() * src->getWidth() * row), dst->getData() + (dst->getComponents() * dst->getWidth() * row), src->getWidth(), dst->getWidth() );
	}
}
Beispiel #3
0
void LocalAssetBrowser::PerformSculptUpdates(LocalBitmap* unit)
{

	/* looking for sculptmap using objects only */
	std::vector<affected_object> object_list = unit->getUsingObjects(false, false, true);
	if (object_list.empty()) { return; }

	for( std::vector<affected_object>::iterator iter = object_list.begin();
		 iter != object_list.end(); iter++ )
	{
		affected_object aobj = *iter;
		if ( aobj.object )
		{
			if ( !aobj.local_sculptmap ) { continue; } // should never get here. only in case of misuse.
			
			// update code [begin]
			if ( unit->volume_dirty )
			{
				LLImageRaw* rawimage = gTextureList.findImage( unit->getID() )->getCachedRawImage();

				aobj.object->getVolume()->sculpt(rawimage->getWidth(), rawimage->getHeight(), 
												  rawimage->getComponents(), rawimage->getData(), 0);	
				unit->volume_dirty = false;
			}

				// tell affected drawable it's got updated
				aobj.object->mDrawable->getVOVolume()->setSculptChanged( true );
				aobj.object->mDrawable->getVOVolume()->markForUpdate( true );
			// update code [end]
		}
			
	}

}
Beispiel #4
0
// Src and dst can be any size.  Src has 4 components.  Dst has 3 components.
void LLImageRaw::compositeScaled4onto3(LLImageRaw* src)
{
	LLMemType mt1((LLMemType::EMemType)mMemType);
	llinfos << "compositeScaled4onto3" << llendl;

	LLImageRaw* dst = this;  // Just for clarity.

	llassert( (4 == src->getComponents()) && (3 == dst->getComponents()) );

	// Vertical: scale but no composite
	S32 temp_data_size = src->getWidth() * dst->getHeight() * src->getComponents();
	U8* temp_buffer = new U8[ temp_data_size ];
	for( S32 col = 0; col < src->getWidth(); col++ )
	{
		copyLineScaled( src->getData() + (src->getComponents() * col), temp_buffer + (src->getComponents() * col), src->getHeight(), dst->getHeight(), src->getWidth(), src->getWidth() );
	}

	// Horizontal: scale and composite
	for( S32 row = 0; row < dst->getHeight(); row++ )
	{
		compositeRowScaled4onto3( temp_buffer + (src->getComponents() * src->getWidth() * row), dst->getData() + (dst->getComponents() * dst->getWidth() * row), src->getWidth(), dst->getWidth() );
	}

	// Clean up
	delete[] temp_buffer;
}
Beispiel #5
0
// Src and dst are same size.  Src and dst have same number of components.
void LLImageRaw::copyUnscaled(LLImageRaw* src)
{
	LLImageRaw* dst = this;  // Just for clarity.

	llassert( (1 == src->getComponents()) || (3 == src->getComponents()) || (4 == src->getComponents()) );
	llassert( src->getComponents() == dst->getComponents() );
	llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );

	memcpy( dst->getData(), src->getData(), getWidth() * getHeight() * getComponents() );	/* Flawfinder: ignore */
}
Beispiel #6
0
// Src and dst are same size.  Src has 4 components.  Dst has 3 components.
void LLImageRaw::compositeUnscaled4onto3( LLImageRaw* src )
{
	/*
	//test fastFractionalMult()
	{
		U8 i = 255;
		U8 j = 255;
		do
		{
			do
			{
				llassert( fastFractionalMult(i, j) == (U8)(255*(i/255.f)*(j/255.f) + 0.5f) );
			} while( j-- );
		} while( i-- );
	}
	*/

	LLImageRaw* dst = this;  // Just for clarity.

	llassert( (3 == src->getComponents()) || (4 == src->getComponents()) );
	llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );


	U8* src_data = src->getData();
	U8* dst_data = dst->getData();
	S32 pixels = getWidth() * getHeight();
	while( pixels-- )
	{
		U8 alpha = src_data[3];
		if( alpha )
		{
			if( 255 == alpha )
			{
				dst_data[0] = src_data[0];
				dst_data[1] = src_data[1];
				dst_data[2] = src_data[2];
			}
			else
			{

				U8 transparency = 255 - alpha;
				dst_data[0] = fastFractionalMult( dst_data[0], transparency ) + fastFractionalMult( src_data[0], alpha );
				dst_data[1] = fastFractionalMult( dst_data[1], transparency ) + fastFractionalMult( src_data[1], alpha );
				dst_data[2] = fastFractionalMult( dst_data[2], transparency ) + fastFractionalMult( src_data[2], alpha );
			}
		}

		src_data += 4;
		dst_data += 3;
	}
}
Beispiel #7
0
// Src and dst can be any size.  Src and dst have same number of components.
void LLImageRaw::copyScaled( LLImageRaw* src )
{
	LLImageRaw* dst = this;  // Just for clarity.

	llassert_always( (1 == src->getComponents()) || (3 == src->getComponents()) || (4 == src->getComponents()) );
	llassert_always( src->getComponents() == dst->getComponents() );

	if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
	{
		memcpy( dst->getData(), src->getData(), getWidth() * getHeight() * getComponents() );	/* Flawfinder: ignore */
		return;
	}

	S32 temp_data_size = src->getWidth() * dst->getHeight() * getComponents();
	llassert_always(temp_data_size > 0);
	std::vector<U8> temp_buffer(temp_data_size);

	// Vertical
	for( S32 col = 0; col < src->getWidth(); col++ )
	{
		copyLineScaled( src->getData() + (getComponents() * col), &temp_buffer[0] + (getComponents() * col), src->getHeight(), dst->getHeight(), src->getWidth(), src->getWidth() );
	}

	// Horizontal
	for( S32 row = 0; row < dst->getHeight(); row++ )
	{
		copyLineScaled( &temp_buffer[0] + (getComponents() * src->getWidth() * row), dst->getData() + (getComponents() * dst->getWidth() * row), src->getWidth(), dst->getWidth(), 1, 1 );
	}
}
Beispiel #8
0
// Src and dst can be any size.  Src and dst have same number of components.
void LLImageRaw::copyScaled( LLImageRaw* src )
{
	LLMemType mt1((LLMemType::EMemType)mMemType);
	LLImageRaw* dst = this;  // Just for clarity.

	llassert_always( (1 == src->getComponents()) || (3 == src->getComponents()) || (4 == src->getComponents()) );
	llassert_always( src->getComponents() == dst->getComponents() );

	if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
	{
		memcpy( dst->getData(), src->getData(), getWidth() * getHeight() * getComponents() );	/* Flawfinder: ignore */
		return;
	}

	// Vertical
	S32 temp_data_size = src->getWidth() * dst->getHeight() * getComponents();
	llassert_always(temp_data_size > 0);
	U8* temp_buffer = new U8[ temp_data_size ];
	for( S32 col = 0; col < src->getWidth(); col++ )
	{
		copyLineScaled( src->getData() + (getComponents() * col), temp_buffer + (getComponents() * col), src->getHeight(), dst->getHeight(), src->getWidth(), src->getWidth() );
	}

	// Horizontal
	for( S32 row = 0; row < dst->getHeight(); row++ )
	{
		copyLineScaled( temp_buffer + (getComponents() * src->getWidth() * row), dst->getData() + (getComponents() * dst->getWidth() * row), src->getWidth(), dst->getWidth(), 1, 1 );
	}

	// Clean up
	delete[] temp_buffer;
}
Beispiel #9
0
// Src and dst can be any size.  Src and dst can each have 3 or 4 components.
void LLImageRaw::copy(LLImageRaw* src)
{
	if (!src)
	{
		LL_WARNS() << "LLImageRaw::copy called with a null src pointer" << LL_ENDL;
		return;
	}

	LLImageRaw* dst = this;  // Just for clarity.

	if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
	{
		// No scaling needed
		if( src->getComponents() == dst->getComponents() )
		{
			copyUnscaled( src );
		}
		else
		if( 3 == src->getComponents() )
		{
			copyUnscaled3onto4( src );
		}
		else
		{
			// 4 == src->getComponents()
			copyUnscaled4onto3( src );
		}
	}
	else
	{
		// Scaling needed
		// No scaling needed
		if( src->getComponents() == dst->getComponents() )
		{
			copyScaled( src );
		}
		else
		if( 3 == src->getComponents() )
		{
			copyScaled3onto4( src );
		}
		else
		{
			// 4 == src->getComponents()
			copyScaled4onto3( src );
		}
	}
}
Beispiel #10
0
// Src and dst are same size.  Src has 4 components.  Dst has 3 components.
void LLImageRaw::copyUnscaled4onto3( LLImageRaw* src )
{
	LLImageRaw* dst = this;  // Just for clarity.

	llassert( (3 == dst->getComponents()) && (4 == src->getComponents()) );
	llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );

	S32 pixels = getWidth() * getHeight();
	U8* src_data = src->getData();
	U8* dst_data = dst->getData();
	for( S32 i=0; i<pixels; i++ )
	{
		dst_data[0] = src_data[0];
		dst_data[1] = src_data[1];
		dst_data[2] = src_data[2];
		src_data += 4;
		dst_data += 3;
	}
}
Beispiel #11
0
void LLImageRaw::copyUnscaledAlphaMask( LLImageRaw* src, const LLColor4U& fill)
{
	LLImageRaw* dst = this;  // Just for clarity.

	llassert( 1 == src->getComponents() );
	llassert( 4 == dst->getComponents() );
	llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );

	S32 pixels = getWidth() * getHeight();
	U8* src_data = src->getData();
	U8* dst_data = dst->getData();
	for ( S32 i = 0; i < pixels; i++ )
	{
		dst_data[0] = fill.mV[0];
		dst_data[1] = fill.mV[1];
		dst_data[2] = fill.mV[2];
		dst_data[3] = src_data[0];
		src_data += 1;
		dst_data += 4;
	}
}
void LLLocalBitmapBrowser::performSculptUpdates(LLLocalBitmap* unit)
{

	/* looking for sculptmap using objects only */
	std::vector<LLAffectedObject> object_list = unit->getUsingObjects(false, false, true);
	if (object_list.empty()) { return; }

	for( std::vector<LLAffectedObject>::iterator iter = object_list.begin();
		 iter != object_list.end(); iter++ )
	{
		LLAffectedObject aobj = *iter;
		if ( aobj.object )
		{
			if ( !aobj.local_sculptmap ) { continue; } // should never get here. only in case of misuse.
			
			// update code [begin]   
			if ( unit->mVolumeDirty )
			{
				LLImageRaw* rawimage = gTextureList.findImage( unit->getID() )->getCachedRawImage();

				LLVolumeParams params = aobj.object->getVolume()->getParams();
				LLVolumeLODGroup* lodgroup = aobj.object->mDrawable->getVOVolume()->getVolumeManager()->getGroup(params);
			
				for (S32 i = 0; i < LLVolumeLODGroup::NUM_LODS; i++)
				{
					LLVolume* vol = lodgroup->getVolByLOD(i);

					if (vol)
						{ vol->sculpt(rawimage->getWidth(), rawimage->getHeight(), rawimage->getComponents(), rawimage->getData(), 0); }
				}

				// doing this again to fix the weirdness with selected-for-edit objects not updating otherwise.
				aobj.object->getVolume()->sculpt(rawimage->getWidth(), rawimage->getHeight(), 
												  rawimage->getComponents(), rawimage->getData(), 0);

				unit->mVolumeDirty = false;
			}

			aobj.object->mDrawable->getVOVolume()->setSculptChanged( true ); 
			aobj.object->mDrawable->getVOVolume()->markForUpdate( true );
			// update code [end]
		}
			
	}

}
Beispiel #13
0
// Src and dst can be any size.  Src and dst can each have 3 or 4 components.
void LLImageRaw::copy(LLImageRaw* src)
{
	if (!src)
	{
		llwarns << "LLImageRaw::copy called with a null src pointer" << llendl;
		return;
	}

	LLImageRaw* dst = this;  // Just for clarity.

	llassert( (3 == src->getComponents()) || (4 == src->getComponents()) );
	llassert( (3 == dst->getComponents()) || (4 == dst->getComponents()) );

	if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
	{
		// No scaling needed
		if( src->getComponents() == dst->getComponents() )
		{
			copyUnscaled( src );
		}
		else
		if( 3 == src->getComponents() )
		{
			copyUnscaled3onto4( src );
		}
		else
		{
			// 4 == src->getComponents()
			copyUnscaled4onto3( src );
		}
	}
	else
	{
		// Scaling needed
		// No scaling needed
		if( src->getComponents() == dst->getComponents() )
		{
			copyScaled( src );
		}
		else
		if( 3 == src->getComponents() )
		{
			copyScaled3onto4( src );
		}
		else
		{
			// 4 == src->getComponents()
			copyScaled4onto3( src );
		}
	}
}
// Src and dst can be any size.  Src and dst can each have 3 or 4 components.
void LLImageRaw::copy(LLImageRaw* src)
{
	LLImageRaw* dst = this;  // Just for clarity.

	llassert( (3 == src->getComponents()) || (4 == src->getComponents()) );
	llassert( (3 == dst->getComponents()) || (4 == dst->getComponents()) );

	if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
	{
		// No scaling needed
		if( src->getComponents() == dst->getComponents() )
		{
			copyUnscaled( src );
		}
		else
		if( 3 == src->getComponents() )
		{
			copyUnscaled3onto4( src );
		}
		else
		{
			// 4 == src->getComponents()
			copyUnscaled4onto3( src );
		}
	}
	else
	{
		// Scaling needed
		// No scaling needed
		if( src->getComponents() == dst->getComponents() )
		{
			copyScaled( src );
		}
		else
		if( 3 == src->getComponents() )
		{
			copyScaled3onto4( src );
		}
		else
		{
			// 4 == src->getComponents()
			copyScaled4onto3( src );
		}
	}
}
BOOL LLImageJ2COJ::decodeImpl(LLImageJ2C &base, LLImageRaw &raw_image, F32 decode_time, S32 first_channel, S32 max_channel_count)
{
	raw_image.decodedComment = LLImageMetaDataReader::ExtractKDUUploadComment(base.getData(), base.getDataSize());

	LLTimer decode_timer;

	opj_dparameters_t parameters;	/* decompression parameters */
	opj_event_mgr_t event_mgr;		/* event manager */
	opj_image_t *image = NULL;

	opj_dinfo_t* dinfo = NULL;	/* handle to a decompressor */
	opj_cio_t *cio = NULL;


	/* configure the event callbacks (not required) */
	memset(&event_mgr, 0, sizeof(opj_event_mgr_t));
	event_mgr.error_handler = error_callback;
	event_mgr.warning_handler = warning_callback;
	event_mgr.info_handler = info_callback;

	/* set decoding parameters to default values */
	opj_set_default_decoder_parameters(&parameters);

	parameters.cp_reduce = base.getRawDiscardLevel();

	/* decode the code-stream */
	/* ---------------------- */

	/* JPEG-2000 codestream */

	/* get a decoder handle */
	dinfo = opj_create_decompress(CODEC_J2K);

	/* catch events using our callbacks and give a local context */
	opj_set_event_mgr((opj_common_ptr)dinfo, &event_mgr, stderr);			

	/* setup the decoder decoding parameters using user parameters */
	opj_setup_decoder(dinfo, &parameters);

	/* open a byte stream */
	cio = opj_cio_open((opj_common_ptr)dinfo, base.getData(), base.getDataSize());

	/* decode the stream and fill the image structure.
	   Also fill in an additional structur to get the decoding result.
	   This structure is a bit unusual in that it is not received through
	   opj, but still has somt dynamically allocated fields that need to
	   be cleared up at the end by calling a destroy function. */
	opj_codestream_info_t cinfo;
	memset(&cinfo, 0, sizeof(opj_codestream_info_t));
	image = opj_decode_with_info(dinfo, cio, &cinfo);

	/* close the byte stream */
	opj_cio_close(cio);

	/* free remaining structures */
	if(dinfo)
	{
		opj_destroy_decompress(dinfo);
	}

	// The image decode failed if the return was NULL or the component
	// count was zero.  The latter is just a sanity check before we
	// dereference the array.
	if(!image) 
	{
		LL_WARNS ("Openjpeg")  << "Failed to decode image at discard: " << (S32)base.getRawDiscardLevel() << ". No image." << LL_ENDL;
		if (base.getRawDiscardLevel() == 0)
		{
			base.decodeFailed();
		}
		return TRUE; // done
	}

	S32 img_components = image->numcomps;

	if( !img_components ) // < 1 ||img_components > 4 )
	{
		LL_WARNS("Openjpeg") << "Failed to decode image at discard: " << (S32)base.getRawDiscardLevel() << ". Wrong number of components: " << img_components << LL_ENDL;
		if (image)
		{
			opj_destroy_cstr_info(&cinfo);
			opj_image_destroy(image);
		}
		if (base.getRawDiscardLevel() == 0)
		{
			base.decodeFailed();
		}
		return TRUE; // done
	}

	// sometimes we get bad data out of the cache - check to see if the decode succeeded
	int decompdifference = 0;
	if (cinfo.numdecompos) // sanity
	{
		for (int comp = 0; comp < image->numcomps; comp++)
		{	
			/* get maximum decomposition level difference, first
			   field is from the COD header and the second
			   is what is actually met in the codestream, NB: if
			   everything was ok, this calculation will return
			   what was set in the cp_reduce value! */
			decompdifference = llmax(decompdifference, cinfo.numdecompos[comp] - image->comps[comp].resno_decoded);
		}
		if (decompdifference < 0) // sanity
		{
			decompdifference = 0;
		}
	}
	

	/* if OpenJPEG failed to decode all requested decomposition levels
	   the difference will be greater than this level */
	if (decompdifference > base.getRawDiscardLevel())
	{
		LL_WARNS("Openjpeg") << "Not enough data for requested discard level " << (S32)base.getRawDiscardLevel() << ", difference: " << (decompdifference - base.getRawDiscardLevel()) << llendl;
		opj_destroy_cstr_info(&cinfo);
		opj_image_destroy(image);
		if (base.getRawDiscardLevel() == 0)
		{
			base.decodeFailed();
		}
		return TRUE;
	}

	if(img_components <= first_channel)
	{
		LL_WARNS("Openjpeg") << "Trying to decode more channels than are present in image, numcomps: " << img_components << " first_channel: " << first_channel << LL_ENDL;
		if (image)
		{
			opj_destroy_cstr_info(&cinfo);
			opj_image_destroy(image);
		}
		if (base.getRawDiscardLevel() == 0)
		{
			base.decodeFailed();
		}
		return TRUE;
	}

	// Copy image data into our raw image format (instead of the separate channel format


	S32 channels = img_components - first_channel;
	if( channels > max_channel_count )
		channels = max_channel_count;

	// Component buffers are allocated in an image width by height buffer.
	// The image placed in that buffer is ceil(width/2^factor) by
	// ceil(height/2^factor) and if the factor isn't zero it will be at the
	// top left of the buffer with black filled in the rest of the pixels.
	// It is integer math so the formula is written in ceildivpo2.
	// (Assuming all the components have the same width, height and
	// factor.)
	S32 comp_width = image->comps[0].w;
	S32 f=image->comps[0].factor;
	S32 width = ceildivpow2(image->x1 - image->x0, f);
	S32 height = ceildivpow2(image->y1 - image->y0, f);
	raw_image.resize(width, height, channels);
	U8 *rawp = raw_image.getData();

	// first_channel is what channel to start copying from
	// dest is what channel to copy to.  first_channel comes from the
	// argument, dest always starts writing at channel zero.
	for (S32 comp = first_channel, dest=0; comp < first_channel + channels;
		comp++, dest++)
	{
		if (image->comps[comp].data)
		{
			S32 offset = dest;
			for (S32 y = (height - 1); y >= 0; y--)
			{
				for (S32 x = 0; x < width; x++)
				{
					rawp[offset] = image->comps[comp].data[y*comp_width + x];
					offset += channels;
				}
			}
		}
		else // Some rare OpenJPEG versions have this bug.
		{
			LL_WARNS("Openjpeg") << "Failed to decode image! (NULL comp data - OpenJPEG bug)" << LL_ENDL;
			opj_destroy_cstr_info(&cinfo);
			opj_image_destroy(image);

			if (base.getRawDiscardLevel() == 0)
			{
				base.decodeFailed();
			}
			return TRUE; // done
		}
	}

	/* free opj data structures */
	if (image)
	{
		opj_destroy_cstr_info(&cinfo);
		opj_image_destroy(image);
	}
	
	return TRUE; // done
}
BOOL LLImageJ2COJ::decodeImpl(LLImageJ2C &base, LLImageRaw &raw_image, F32 decode_time, S32 first_channel, S32 max_channel_count)
{
	LLTimer decode_timer;

	/* Extract metadata */
	/* ---------------- */
	U8* c_data = base.getData();
	size_t c_size =  base.getDataSize();
	size_t position = 0;
	
	while (position < 1024 && position < (c_size - 7)) // the comment field should be in the first 1024 bytes.
	{
		if (c_data[position] == 0xff && c_data[position + 1] == 0x64)
		{
			U8 high_byte = c_data[position + 2];
			U8 low_byte = c_data[position + 3];
			S32 c_length = (high_byte * 256) + low_byte; // This size also counts the markers, 00 01 and itself
			if (c_length > 200) // sanity check
			{
				// While comments can be very long, anything longer then 200 is suspect.
				break;
			}
			
			if (position + 2 + c_length > c_size)
			{
				// comment extends past end of data, corruption, or all data not retrived yet.
				break;
			}
			
			// if the comment block does not end at the end of data, check to see if the next
			// block starts with 0xFF
			if (position + 2 + c_length < c_size && c_data[position + 2 + c_length] != 0xff)
			{
				// invalied comment block
				break;
			}
			
			// extract the comment minus the markers, 00 01
			raw_image.mComment.assign((char*)c_data + position + 6, c_length - 4);
			break;
		}
		++position;
	}
	
	opj_dparameters_t parameters;	/* decompression parameters */
	opj_event_mgr_t event_mgr = { };		/* event manager */
	opj_image_t *image = nullptr;

	opj_dinfo_t* dinfo = nullptr;	/* handle to a decompressor */
	opj_cio_t *cio = nullptr;


	/* configure the event callbacks (not required) */
	event_mgr.error_handler = error_callback;
	event_mgr.warning_handler = warning_callback;
	event_mgr.info_handler = info_callback;

	/* set decoding parameters to default values */
	opj_set_default_decoder_parameters(&parameters);

	parameters.cp_reduce = base.getRawDiscardLevel();

	if(parameters.cp_reduce == 0 && *(U16*)(base.getData() + base.getDataSize() - 2) != 0xD9FF)
	{
		bool failed = true;
		for(S32 i = base.getDataSize()-1; i > 42; --i)
		{
			if(base.getData()[i] != 0x00)
			{
				failed = *(U16*)(base.getData()+i-1) != 0xD9FF;
				break;
			}
		}
		if(failed)
		{
			opj_image_destroy(image);
			base.decodeFailed();
			return TRUE;
		}
	}


	/* decode the code-stream */
	/* ---------------------- */

	/* JPEG-2000 codestream */

	/* get a decoder handle */
	dinfo = opj_create_decompress(CODEC_J2K);

	/* catch events using our callbacks and give a local context */
	opj_set_event_mgr((opj_common_ptr)dinfo, &event_mgr, stderr);			

	/* setup the decoder decoding parameters using user parameters */
	opj_setup_decoder(dinfo, &parameters);

	/* open a byte stream */
	cio = opj_cio_open((opj_common_ptr)dinfo, base.getData(), base.getDataSize());

	/* decode the stream and fill the image structure */
	image = opj_decode(dinfo, cio);

	/* close the byte stream */
	opj_cio_close(cio);

	/* free remaining structures */
	if(dinfo)
	{
		opj_destroy_decompress(dinfo);
	}

	// The image decode failed if the return was NULL or the component
	// count was zero.  The latter is just a sanity check before we
	// dereference the array.
	if(!image || !image->numcomps)
	{
		LL_DEBUGS("Texture") << "ERROR -> decodeImpl: failed to decode image!" << LL_ENDL;
		if (image)
		{
			opj_image_destroy(image);
		}
		base.decodeFailed();
		return TRUE; // done
	}

	// sometimes we get bad data out of the cache - check to see if the decode succeeded
	for (S32 i = 0; i < image->numcomps; i++)
	{
		if (image->comps[i].factor != base.getRawDiscardLevel())
		{
			// if we didn't get the discard level we're expecting, fail
			opj_image_destroy(image);
			base.decodeFailed();
			return TRUE;
		}
	}
	
	if(image->numcomps <= first_channel)
	{
		LL_WARNS("Texture") << "trying to decode more channels than are present in image: numcomps: " << image->numcomps << " first_channel: " << first_channel << LL_ENDL;
		if (image)
		{
			opj_image_destroy(image);
		}

		base.decodeFailed();
		return TRUE;
	}

	// Copy image data into our raw image format (instead of the separate channel format

	S32 img_components = image->numcomps;
	S32 channels = img_components - first_channel;
	if( channels > max_channel_count )
		channels = max_channel_count;

	// Component buffers are allocated in an image width by height buffer.
	// The image placed in that buffer is ceil(width/2^factor) by
	// ceil(height/2^factor) and if the factor isn't zero it will be at the
	// top left of the buffer with black filled in the rest of the pixels.
	// It is integer math so the formula is written in ceildivpo2.
	// (Assuming all the components have the same width, height and
	// factor.)
	S32 comp_width = image->comps[0].w;
	S32 f=image->comps[0].factor;
	S32 width = ceildivpow2(image->x1 - image->x0, f);
	S32 height = ceildivpow2(image->y1 - image->y0, f);
	raw_image.resize(width, height, channels);
	U8 *rawp = raw_image.getData();
	if (!rawp)
	{
		opj_image_destroy(image);
		base.setLastError("Memory error");
		base.decodeFailed();
		return true; // done
	}

	// first_channel is what channel to start copying from
	// dest is what channel to copy to.  first_channel comes from the
	// argument, dest always starts writing at channel zero.
	for (S32 comp = first_channel, dest=0; comp < first_channel + channels;
		comp++, dest++)
	{
		if (image->comps[comp].data)
		{
			S32 offset = dest;
			for (S32 y = (height - 1); y >= 0; y--)
			{
				for (S32 x = 0; x < width; x++)
				{
					rawp[offset] = image->comps[comp].data[y*comp_width + x];
					offset += channels;
				}
			}
		}
		else // Some rare OpenJPEG versions have this bug.
		{
			LL_DEBUGS("Texture") << "ERROR -> decodeImpl: failed to decode image! (NULL comp data - OpenJPEG bug)" << LL_ENDL;
			if (image)
			{
				opj_image_destroy(image);
			}
			base.decodeFailed();
			return TRUE; // done
		}
	}

	/* free image data structure */
	opj_image_destroy(image);

	return TRUE; // done
}
Beispiel #17
0
BOOL LLImageJ2COJ::encodeImpl(LLImageJ2C &base, const LLImageRaw &raw_image, const char* comment_text, F32 encode_time, BOOL reversible)
{
	const S32 MAX_COMPS = 5;
	opj_cparameters_t parameters;	/* compression parameters */
	opj_event_mgr_t event_mgr;		/* event manager */


	/* 
	configure the event callbacks (not required)
	setting of each callback is optional 
	*/
	memset(&event_mgr, 0, sizeof(opj_event_mgr_t));
	event_mgr.error_handler = error_callback;
	event_mgr.warning_handler = warning_callback;
	event_mgr.info_handler = info_callback;

	/* set encoding parameters to default values */
	opj_set_default_encoder_parameters(&parameters);
	parameters.cod_format = 0;
	parameters.cp_disto_alloc = 1;

	if (reversible)
	{
		parameters.tcp_numlayers = 1;
		parameters.tcp_rates[0] = 0.0f;
	}
	else
	{
		parameters.tcp_numlayers = 5;
                parameters.tcp_rates[0] = 1920.0f;
                parameters.tcp_rates[1] = 480.0f;
                parameters.tcp_rates[2] = 120.0f;
                parameters.tcp_rates[3] = 30.0f;
		parameters.tcp_rates[4] = 10.0f;
		parameters.irreversible = 1;
		if (raw_image.getComponents() >= 3)
		{
			parameters.tcp_mct = 1;
		}
	}

	if (!comment_text)
	{
		parameters.cp_comment = (char *) "";
	}
	else
	{
		// Awful hacky cast, too lazy to copy right now.
		parameters.cp_comment = (char *) comment_text;
	}

	//
	// Fill in the source image from our raw image
	//
	OPJ_COLOR_SPACE color_space = CLRSPC_SRGB;
	opj_image_cmptparm_t cmptparm[MAX_COMPS];
	opj_image_t * image = NULL;
	S32 numcomps = raw_image.getComponents();
	S32 width = raw_image.getWidth();
	S32 height = raw_image.getHeight();

	memset(&cmptparm[0], 0, MAX_COMPS * sizeof(opj_image_cmptparm_t));
	for(S32 c = 0; c < numcomps; c++) {
		cmptparm[c].prec = 8;
		cmptparm[c].bpp = 8;
		cmptparm[c].sgnd = 0;
		cmptparm[c].dx = parameters.subsampling_dx;
		cmptparm[c].dy = parameters.subsampling_dy;
		cmptparm[c].w = width;
		cmptparm[c].h = height;
	}

	/* create the image */
	image = opj_image_create(numcomps, &cmptparm[0], color_space);

	image->x1 = width;
	image->y1 = height;

	S32 i = 0;
	const U8 *src_datap = raw_image.getData();
	for (S32 y = height - 1; y >= 0; y--)
	{
		for (S32 x = 0; x < width; x++)
		{
			const U8 *pixel = src_datap + (y*width + x) * numcomps;
			for (S32 c = 0; c < numcomps; c++)
			{
				image->comps[c].data[i] = *pixel;
				pixel++;
			}
			i++;
		}
	}



	/* encode the destination image */
	/* ---------------------------- */

	int codestream_length;
	opj_cio_t *cio = NULL;

	/* get a J2K compressor handle */
	opj_cinfo_t* cinfo = opj_create_compress(CODEC_J2K);

	/* catch events using our callbacks and give a local context */
	opj_set_event_mgr((opj_common_ptr)cinfo, &event_mgr, stderr);			

	/* setup the encoder parameters using the current image and using user parameters */
	opj_setup_encoder(cinfo, &parameters, image);

	/* open a byte stream for writing */
	/* allocate memory for all tiles */
	cio = opj_cio_open((opj_common_ptr)cinfo, NULL, 0);

	/* encode the image */
	bool bSuccess = opj_encode(cinfo, cio, image, NULL);
	if (!bSuccess)
	{
		opj_cio_close(cio);
		LL_DEBUGS("Texture") << "Failed to encode image." << LL_ENDL;
		return FALSE;
	}
	codestream_length = cio_tell(cio);

	base.copyData(cio->buffer, codestream_length);
	base.updateData(); // set width, height

	/* close and free the byte stream */
	opj_cio_close(cio);

	/* free remaining compression structures */
	opj_destroy_compress(cinfo);


	/* free user parameters structure */
	if(parameters.cp_matrice) free(parameters.cp_matrice);

	/* free image data */
	opj_image_destroy(image);
	return TRUE;
}
Beispiel #18
0
BOOL LLImageJ2CKDU::encodeImpl(LLImageJ2C &base, const LLImageRaw &raw_image, const char* comment_text, F32 encode_time, BOOL reversible)
{
	// Declare and set simple arguments
	bool transpose = false;
	bool vflip = true;
	bool hflip = false;

	try
	{
		// Set up input image files
		siz_params siz;
		
		// Should set rate someplace here
		LLKDUMemIn mem_in(raw_image.getData(),
			raw_image.getDataSize(),
			raw_image.getWidth(),
			raw_image.getHeight(),
			raw_image.getComponents(),
			&siz);

		base.setSize(raw_image.getWidth(), raw_image.getHeight(), raw_image.getComponents());

		int num_components = raw_image.getComponents();

		siz.set(Scomponents,0,0,num_components);
		siz.set(Sdims,0,0,base.getHeight());  // Height of first image component
		siz.set(Sdims,0,1,base.getWidth());   // Width of first image component
		siz.set(Sprecision,0,0,8);  // Image samples have original bit-depth of 8
		siz.set(Ssigned,0,0,false); // Image samples are originally unsigned

		kdu_params *siz_ref = &siz; 
		siz_ref->finalize();
		siz_params transformed_siz; // Use this one to construct code-stream
		transformed_siz.copy_from(&siz,-1,-1,-1,0,transpose,false,false);

		// Construct the `kdu_codestream' object and parse all remaining arguments
		U32 max_output_size = base.getWidth()*base.getHeight()*base.getComponents();
		max_output_size = (max_output_size < 1000 ? 1000 : max_output_size);
		U8 *output_buffer = new U8[max_output_size];
		U32 output_size = 0; // Address updated by LLKDUMemTarget to give the final compressed buffer size
		LLKDUMemTarget output(output_buffer, output_size, max_output_size);

		kdu_codestream codestream;
		codestream.create(&transformed_siz,&output);

		if (comment_text)
		{
			// Set the comments for the codestream
			kdu_codestream_comment comment = codestream.add_comment();
			comment.put_text(comment_text);
		}

		// Set codestream options
		int num_layer_specs = 0;

		kdu_long layer_bytes[64];
		U32 max_bytes = 0;

		if (num_components >= 3)
		{
			// Note that we always use YCC and not YUV
			// *TODO: Verify this doesn't screws up reversible textures (like sculpties) as YCC is not reversible but YUV is...
			set_default_colour_weights(codestream.access_siz());
		}

		if (reversible)
		{
			codestream.access_siz()->parse_string("Creversible=yes");
			// *TODO: we should use yuv in reversible mode and one level since those images are small. 
			// Don't turn this on now though as both create problems on decoding for the moment
			//codestream.access_siz()->parse_string("Clevels=1");
			//codestream.access_siz()->parse_string("Cycc=no");
			// If we're doing reversible (i.e. lossless compression), assumes we're not using quality layers.
			// *TODO: this is incorrect and unecessary. Try using the regular layer setting.
			codestream.access_siz()->parse_string("Clayers=1");
			num_layer_specs = 1;
			layer_bytes[0] = 0;
		}
		else
		{
			// Rate is the argument passed into the LLImageJ2C which
			// specifies the target compression rate.  The default is 8:1.
			// Possibly if max_bytes < 500, we should just use the default setting?
			// *TODO: mRate is actually always 8:1 in the viewer. Test different values. Also force to reversible for small (< 500 bytes) textures.
			if (base.mRate != 0.f)
			{
				max_bytes = (U32)(base.mRate*base.getWidth()*base.getHeight()*base.getComponents());
			}
			else
			{
				max_bytes = (U32)(base.getWidth()*base.getHeight()*base.getComponents()*0.125);
			}

			const U32 min_bytes = FIRST_PACKET_SIZE;
			if (max_bytes > min_bytes)
			{
				U32 i;
				// This code is where we specify the target number of bytes for
				// each layer.  Not sure if we should do this for small images
				// or not.  The goal is to have this roughly align with
				// different quality levels that we decode at.
				for (i = min_bytes; i < max_bytes; i*=4)
				{
					if (i == min_bytes * 4)
					{
						i = 2000;
					}
					layer_bytes[num_layer_specs] = i;
					num_layer_specs++;
				}
				layer_bytes[num_layer_specs] = max_bytes;
				num_layer_specs++;

				std::string layer_string = llformat("Clayers=%d",num_layer_specs);
				codestream.access_siz()->parse_string(layer_string.c_str());
			}
			else
			{
				layer_bytes[0] = min_bytes;
				num_layer_specs = 1;
				std::string layer_string = llformat("Clayers=%d",num_layer_specs);
				codestream.access_siz()->parse_string(layer_string.c_str());
			}
		}
		
		// Set up data ordering, markers, etc... if precincts or blocks specified
		if ((mBlocksSize != -1) || (mPrecinctsSize != -1))
		{
			if (mPrecinctsSize != -1)
			{
				std::string precincts_string = llformat("Cprecincts={%d,%d}",mPrecinctsSize,mPrecinctsSize);
				codestream.access_siz()->parse_string(precincts_string.c_str());
			}
			if (mBlocksSize != -1)
			{
				std::string blocks_string = llformat("Cblk={%d,%d}",mBlocksSize,mBlocksSize);
				codestream.access_siz()->parse_string(blocks_string.c_str());
			}
			std::string ordering_string = llformat("Corder=RPCL");
			codestream.access_siz()->parse_string(ordering_string.c_str());
			std::string PLT_string = llformat("ORGgen_plt=yes");
			codestream.access_siz()->parse_string(PLT_string.c_str());
			std::string Parts_string = llformat("ORGtparts=R");
			codestream.access_siz()->parse_string(Parts_string.c_str());
		}
		if (mLevels != 0)
		{
			std::string levels_string = llformat("Clevels=%d",mLevels);
			codestream.access_siz()->parse_string(levels_string.c_str());
		}
		
		codestream.access_siz()->finalize_all();
		codestream.change_appearance(transpose,vflip,hflip);

		// Now we are ready for sample data processing.
		kdc_flow_control *tile = new kdc_flow_control(&mem_in,codestream);
		bool done = false;
		while (!done)
		{ 
			// Process line by line
			if (tile->advance_components())
			{
				tile->process_components();
			}
			else
			{
				done = true;
			}
		}

		// Produce the compressed output
		codestream.flush(layer_bytes,num_layer_specs);

		// Cleanup
		delete tile;
		codestream.destroy();

		// Now that we're done encoding, create the new data buffer for the compressed
		// image and stick it there.
		base.copyData(output_buffer, output_size);
		base.updateData(); // set width, height
		delete[] output_buffer;
	}
	catch(const char* msg)
	{
		base.setLastError(ll_safe_string(msg));
		return FALSE;
	}
	catch( ... )
	{
		base.setLastError( "Unknown J2C error" );
		return FALSE;
	}

	return TRUE;
}
BOOL LLImageJ2CKDU::encodeImpl(LLImageJ2C &base, const LLImageRaw &raw_image, const char* comment_text, F32 encode_time, BOOL reversible)
{
	// Declare and set simple arguments
	bool transpose = false;
	bool vflip = true;
	bool hflip = false;

	try
	{
		// Set up input image files
		siz_params siz;
		
		// Should set rate someplace here
		LLKDUMemIn mem_in(raw_image.getData(),
			raw_image.getDataSize(),
			raw_image.getWidth(),
			raw_image.getHeight(),
			raw_image.getComponents(),
			&siz);

		base.setSize(raw_image.getWidth(), raw_image.getHeight(), raw_image.getComponents());

		int num_components = raw_image.getComponents();

		siz.set(Scomponents,0,0,num_components);
		siz.set(Sdims,0,0,base.getHeight());  // Height of first image component
		siz.set(Sdims,0,1,base.getWidth());   // Width of first image component
		siz.set(Sprecision,0,0,8);  // Image samples have original bit-depth of 8
		siz.set(Ssigned,0,0,false); // Image samples are originally unsigned

		kdu_params *siz_ref = &siz; 
		siz_ref->finalize();
		siz_params transformed_siz; // Use this one to construct code-stream
		transformed_siz.copy_from(&siz,-1,-1,-1,0,transpose,false,false);

		// Construct the `kdu_codestream' object and parse all remaining arguments
		U32 max_output_size = base.getWidth()*base.getHeight()*base.getComponents();
		max_output_size = (max_output_size < 1000 ? 1000 : max_output_size);
		U8 *output_buffer = new U8[max_output_size];
		U32 output_size = 0; // Address updated by LLKDUMemTarget to give the final compressed buffer size
		LLKDUMemTarget output(output_buffer, output_size, max_output_size);

		kdu_codestream codestream;
		codestream.create(&transformed_siz,&output);

		if (comment_text)
		{
			// Set the comments for the codestream
			kdu_codestream_comment comment = codestream.add_comment();
			comment.put_text(comment_text);
		}

		if (num_components >= 3)
		{
			// Note that we always use YCC and not YUV
			// *TODO: Verify this doesn't screws up reversible textures (like sculpties) as YCC is not reversible but YUV is...
			set_default_colour_weights(codestream.access_siz());
		}

		// Set codestream options
		int nb_layers = 0;
		kdu_long layer_bytes[MAX_NB_LAYERS];
		U32 max_bytes = (U32)(base.getWidth() * base.getHeight() * base.getComponents());

		// Rate is the argument passed into the LLImageJ2C which specifies the target compression rate. The default is 8:1.
		// *TODO: mRate is actually always 8:1 in the viewer. Test different values.
		llassert (base.mRate > 0.f);
		max_bytes = (U32)((F32)(max_bytes) * base.mRate);
		
		// This code is where we specify the target number of bytes for each quality layer.
		// We're using a logarithmic spacing rule that fits with our way of fetching texture data.
		// Note: For more info on this layers business, read kdu_codestream::flush() doc in kdu_compressed.h
		layer_bytes[nb_layers++] = FIRST_PACKET_SIZE;
		U32 i = MIN_LAYER_SIZE;
		while ((i < max_bytes) && (nb_layers < (MAX_NB_LAYERS-1)))
		{
			layer_bytes[nb_layers++] = i;
			i *= 4;
		}
		// Note: for small images, we can have (max_bytes < FIRST_PACKET_SIZE), hence the test
		if (layer_bytes[nb_layers-1] < max_bytes)
		{
			// Set the last quality layer so to fit the preset compression ratio
			layer_bytes[nb_layers++] = max_bytes;
		}

		if (reversible)
		{
			// Use 0 for a last quality layer for reversible images so all remaining code blocks will be flushed
			// Hack: KDU encoding for reversible images has a bug for small images that leads to j2c images that 
			// cannot be open or are very blurry. Avoiding that last layer prevents the problem to happen.
			if ((base.getWidth() >= 32) || (base.getHeight() >= 32))
			{
				layer_bytes[nb_layers++] = 0;
			}
			codestream.access_siz()->parse_string("Creversible=yes");
			// *TODO: we should use yuv in reversible mode
			// Don't turn this on now though as it creates problems on decoding for the moment
			//codestream.access_siz()->parse_string("Cycc=no");
		}
		
		std::string layer_string = llformat("Clayers=%d",nb_layers);
		codestream.access_siz()->parse_string(layer_string.c_str());
		
		// Set up data ordering, markers, etc... if precincts or blocks specified
		if ((mBlocksSize != -1) || (mPrecinctsSize != -1))
		{
			if (mPrecinctsSize != -1)
			{
				std::string precincts_string = llformat("Cprecincts={%d,%d}",mPrecinctsSize,mPrecinctsSize);
				codestream.access_siz()->parse_string(precincts_string.c_str());
			}
			if (mBlocksSize != -1)
			{
				std::string blocks_string = llformat("Cblk={%d,%d}",mBlocksSize,mBlocksSize);
				codestream.access_siz()->parse_string(blocks_string.c_str());
			}
			std::string ordering_string = llformat("Corder=LRCP");
			codestream.access_siz()->parse_string(ordering_string.c_str());
			std::string PLT_string = llformat("ORGgen_plt=yes");
			codestream.access_siz()->parse_string(PLT_string.c_str());
			std::string Parts_string = llformat("ORGtparts=R");
			codestream.access_siz()->parse_string(Parts_string.c_str());
		}
		
		// Set the number of wavelets subresolutions (aka levels) 
		if (mLevels != 0)
		{
			std::string levels_string = llformat("Clevels=%d",mLevels);
			codestream.access_siz()->parse_string(levels_string.c_str());
		}
		
		// Complete the encode settings
		codestream.access_siz()->finalize_all();
		codestream.change_appearance(transpose,vflip,hflip);

		// Now we are ready for sample data processing
		kdc_flow_control *tile = new kdc_flow_control(&mem_in,codestream);
		bool done = false;
		while (!done)
		{ 
			// Process line by line
			if (tile->advance_components())
			{
				tile->process_components();
			}
			else
			{
				done = true;
			}
		}

		// Produce the compressed output
		codestream.flush(layer_bytes,nb_layers);

		// Cleanup
		delete tile;
		codestream.destroy();

		// Now that we're done encoding, create the new data buffer for the compressed
		// image and stick it there.
		base.copyData(output_buffer, output_size);
		base.updateData(); // set width, height
		delete[] output_buffer;
	}
	catch(const char* msg)
	{
		base.setLastError(ll_safe_string(msg));
		return FALSE;
	}
	catch( ... )
	{
		base.setLastError( "Unknown J2C error" );
		return FALSE;
	}

	return TRUE;
}
// Returns TRUE to mean done, whether successful or not.
BOOL LLImageJ2CKDU::decodeImpl(LLImageJ2C &base, LLImageRaw &raw_image, F32 decode_time, S32 first_channel, S32 max_channel_count)
{
	ECodeStreamMode mode = MODE_FAST;

	LLTimer decode_timer;

	if (!mCodeStreamp)
	{
		if (!initDecode(base, raw_image, decode_time, mode, first_channel, max_channel_count))
		{
			// Initializing the J2C decode failed, bail out.
			cleanupCodeStream();
			return TRUE; // done
		}
	}

	// These can probably be grabbed from what's saved in the class.
	kdu_dims dims;
	mCodeStreamp->get_dims(0,dims);

	// Now we are ready to walk through the tiles processing them one-by-one.
	kdu_byte *buffer = raw_image.getData();

	while (mTPosp->y < mTileIndicesp->size.y)
	{
		while (mTPosp->x < mTileIndicesp->size.x)
		{
			try
			{
				if (!mDecodeState)
				{
					kdu_tile tile = mCodeStreamp->open_tile(*(mTPosp)+mTileIndicesp->pos);

					// Find the region of the buffer occupied by this
					// tile.  Note that we have no control over
					// sub-sampling factors which might have been used
					// during compression and so it can happen that tiles
					// (at the image component level) actually have
					// different dimensions.  For this reason, we cannot
					// figure out the buffer region occupied by a tile
					// directly from the tile indices.  Instead, we query
					// the highest resolution of the first tile-component
					// concerning its location and size on the canvas --
					// the `dims' object already holds the location and
					// size of the entire image component on the same
					// canvas coordinate system.  Comparing the two tells
					// us where the current tile is in the buffer.
					S32 channels = base.getComponents() - first_channel;
					if (channels > max_channel_count)
					{
						channels = max_channel_count;
					}
					kdu_resolution res = tile.access_component(0).access_resolution();
					kdu_dims tile_dims; res.get_dims(tile_dims);
					kdu_coords offset = tile_dims.pos - dims.pos;
					int row_gap = channels*dims.size.x; // inter-row separation
					kdu_byte *buf = buffer + offset.y*row_gap + offset.x*channels;
					mDecodeState = new LLKDUDecodeState(tile, buf, row_gap);
				}
				// Do the actual processing
				F32 remaining_time = decode_time - decode_timer.getElapsedTimeF32();
				// This is where we do the actual decode.  If we run out of time, return false.
				if (mDecodeState->processTileDecode(remaining_time, (decode_time > 0.0f)))
				{
					delete mDecodeState;
					mDecodeState = NULL;
				}
				else
				{
					// Not finished decoding yet.
					//					setLastError("Ran out of time while decoding");
					return FALSE;
				}
			}
			catch (const char* msg)
			{
				base.setLastError(ll_safe_string(msg));
				base.decodeFailed();
				cleanupCodeStream();
				return TRUE; // done
			}
			catch (...)
			{
				base.setLastError( "Unknown J2C error" );
				base.decodeFailed();
				cleanupCodeStream();
				return TRUE; // done
			}


			mTPosp->x++;
		}
		mTPosp->y++;
		mTPosp->x = 0;
	}

	cleanupCodeStream();

	return TRUE;
}
BOOL LLImageJ2CKDU::initDecode(LLImageJ2C &base, LLImageRaw &raw_image, F32 decode_time, ECodeStreamMode mode, S32 first_channel, S32 max_channel_count, int discard_level, int* region)
{
	base.resetLastError();

	// *FIX: kdu calls our callback function if there's an error, and then bombs.
	// To regain control, we throw an exception, and catch it here.
	try
	{
		// Merov : Test!! DO NOT COMMIT!!
		//findDiscardLevelsBoundaries(base);

		base.updateRawDiscardLevel();
		setupCodeStream(base, TRUE, mode);

		mRawImagep = &raw_image;
		mCodeStreamp->change_appearance(false, true, false);

		// Apply loading discard level and cropping if required
		kdu_dims* region_kdu = NULL;
		if (region != NULL)
		{
			region_kdu = new kdu_dims;
			region_kdu->pos.x  = region[0];
			region_kdu->pos.y  = region[1];
			region_kdu->size.x = region[2] - region[0];
			region_kdu->size.y = region[3] - region[1];
		}
		int discard = (discard_level != -1 ? discard_level : base.getRawDiscardLevel());
		//llinfos << "Merov debug : initDecode, discard used = " << discard << ", asked = " << discard_level << llendl;
		// Apply loading restrictions
		mCodeStreamp->apply_input_restrictions( first_channel, max_channel_count, discard, 0, region_kdu);
		
		// Clean-up
		if (region_kdu)
		{
			delete region_kdu;
			region_kdu = NULL;
		}

		// Resize raw_image according to the image to be decoded
		kdu_dims dims; mCodeStreamp->get_dims(0,dims);
		S32 channels = base.getComponents() - first_channel;
		channels = llmin(channels,max_channel_count);
		raw_image.resize(dims.size.x, dims.size.y, channels);

		if (!mTileIndicesp)
		{
			mTileIndicesp = new kdu_dims;
		}
		mCodeStreamp->get_valid_tiles(*mTileIndicesp);
		if (!mTPosp)
		{
			mTPosp = new kdu_coords;
			mTPosp->y = 0;
			mTPosp->x = 0;
		}
	}
	catch (const char* msg)
	{
		base.setLastError(ll_safe_string(msg));
		return FALSE;
	}
	catch (...)
	{
		base.setLastError("Unknown J2C error");
		return FALSE;
	}

	return TRUE;
}
Beispiel #22
0
BOOL LLImageJ2COJ::decodeImpl(LLImageJ2C &base, LLImageRaw &raw_image, F32 decode_time, S32 first_channel, S32 max_channel_count)
{
	//
	// FIXME: Get the comment field out of the texture
	//

	LLTimer decode_timer;

	opj_dparameters_t parameters;	/* decompression parameters */
	opj_event_mgr_t event_mgr;		/* event manager */
	opj_image_t *image = NULL;

	opj_dinfo_t* dinfo = NULL;	/* handle to a decompressor */
	opj_cio_t *cio = NULL;


	/* configure the event callbacks (not required) */
	memset(&event_mgr, 0, sizeof(opj_event_mgr_t));
	event_mgr.error_handler = error_callback;
	event_mgr.warning_handler = warning_callback;
	event_mgr.info_handler = info_callback;

	/* set decoding parameters to default values */
	opj_set_default_decoder_parameters(&parameters);

	parameters.cp_reduce = base.getRawDiscardLevel();

	/* decode the code-stream */
	/* ---------------------- */

	/* JPEG-2000 codestream */

	/* get a decoder handle */
	dinfo = opj_create_decompress(CODEC_J2K);

	/* catch events using our callbacks and give a local context */
	opj_set_event_mgr((opj_common_ptr)dinfo, &event_mgr, stderr);			

	/* setup the decoder decoding parameters using user parameters */
	opj_setup_decoder(dinfo, &parameters);

	/* open a byte stream */
	cio = opj_cio_open((opj_common_ptr)dinfo, base.getData(), base.getDataSize());

	/* decode the stream and fill the image structure */
	image = opj_decode(dinfo, cio);

	/* close the byte stream */
	opj_cio_close(cio);

	/* free remaining structures */
	if(dinfo)
	{
		opj_destroy_decompress(dinfo);
	}

	// The image decode failed if the return was NULL or the component
	// count was zero.  The latter is just a sanity check before we
	// dereference the array.
	if(!image || !image->numcomps)
	{
		LL_DEBUGS("Texture") << "ERROR -> decodeImpl: failed to decode image!" << LL_ENDL;
		if (image)
		{
			opj_image_destroy(image);
		}

		return TRUE; // done
	}

	// sometimes we get bad data out of the cache - check to see if the decode succeeded
	for (S32 i = 0; i < image->numcomps; i++)
	{
		if (image->comps[i].factor != base.getRawDiscardLevel())
		{
			// if we didn't get the discard level we're expecting, fail
			opj_image_destroy(image);
			base.mDecoding = FALSE;
			return TRUE;
		}
	}
	
	if(image->numcomps <= first_channel)
	{
		llwarns << "trying to decode more channels than are present in image: numcomps: " << image->numcomps << " first_channel: " << first_channel << llendl;
		if (image)
		{
			opj_image_destroy(image);
		}
			
		return TRUE;
	}

	// Copy image data into our raw image format (instead of the separate channel format

	S32 img_components = image->numcomps;
	S32 channels = img_components - first_channel;
	if( channels > max_channel_count )
		channels = max_channel_count;

	// Component buffers are allocated in an image width by height buffer.
	// The image placed in that buffer is ceil(width/2^factor) by
	// ceil(height/2^factor) and if the factor isn't zero it will be at the
	// top left of the buffer with black filled in the rest of the pixels.
	// It is integer math so the formula is written in ceildivpo2.
	// (Assuming all the components have the same width, height and
	// factor.)
	S32 comp_width = image->comps[0].w;
	S32 f=image->comps[0].factor;
	S32 width = ceildivpow2(image->x1 - image->x0, f);
	S32 height = ceildivpow2(image->y1 - image->y0, f);
	raw_image.resize(width, height, channels);
	U8 *rawp = raw_image.getData();

	// first_channel is what channel to start copying from
	// dest is what channel to copy to.  first_channel comes from the
	// argument, dest always starts writing at channel zero.
	for (S32 comp = first_channel, dest=0; comp < first_channel + channels;
		comp++, dest++)
	{
		if (image->comps[comp].data)
		{
			S32 offset = dest;
			for (S32 y = (height - 1); y >= 0; y--)
			{
				for (S32 x = 0; x < width; x++)
				{
					rawp[offset] = image->comps[comp].data[y*comp_width + x];
					offset += channels;
				}
			}
		}
		else // Some rare OpenJPEG versions have this bug.
		{
			LL_DEBUGS("Texture") << "ERROR -> decodeImpl: failed to decode image! (NULL comp data - OpenJPEG bug)" << LL_ENDL;
			opj_image_destroy(image);

			return TRUE; // done
		}
	}

	/* free image data structure */
	opj_image_destroy(image);

	return TRUE; // done
}
Beispiel #23
0
BOOL LLImageJ2COJ::encodeImpl(LLImageJ2C &base, const LLImageRaw &raw_image, const char* comment_text, F32 encode_time, BOOL reversible)
{
	const S32 MAX_COMPS = 5;
	opj_cparameters_t parameters;	/* compression parameters */
	opj_event_mgr_t event_mgr;		/* event manager */


	/* 
	configure the event callbacks (not required)
	setting of each callback is optional 
	*/
	memset(&event_mgr, 0, sizeof(opj_event_mgr_t));
	event_mgr.error_handler = error_callback;
	event_mgr.warning_handler = warning_callback;
	event_mgr.info_handler = info_callback;

	/* set encoding parameters to default values */
	opj_set_default_encoder_parameters(&parameters);
	parameters.cod_format = 0;
	parameters.cp_disto_alloc = 1;

	if (reversible)
	{
		parameters.tcp_numlayers = 1;
		parameters.tcp_rates[0] = 0.0f;
	}
	else
	{
		parameters.tcp_numlayers = 5;
                parameters.tcp_rates[0] = 1920.0f;
                parameters.tcp_rates[1] = 480.0f;
                parameters.tcp_rates[2] = 120.0f;
                parameters.tcp_rates[3] = 30.0f;
		parameters.tcp_rates[4] = 10.0f;
		parameters.irreversible = 1;
		if (raw_image.getComponents() >= 3)
		{
			parameters.tcp_mct = 1;
		}
	}

	std::string comment_metadata;
	if (!comment_text)
	{
		//Inserting owner id, upload time, and dimensions 
		//See http://wiki.secondlife.com/wiki/Texture_meta-data for details.
		extern LLUUID gAgentID;
		time_t now = time(NULL);
		tm * ptime = gmtime(&now);
		//std::string color_avg(llformat("c=%02x%02x%02x%02x")); //Perhaps do this some day...
		std::string timestr(llformat("z=%04i%02i%02i%02i%02i%02i",ptime->tm_year+1900,ptime->tm_mon+1,ptime->tm_mday,ptime->tm_hour,ptime->tm_min,ptime->tm_sec));
		comment_metadata=llformat("a=%s&%s&h=%u&w=%u",gAgentID.asString().c_str(),timestr.c_str(),(U32)raw_image.getHeight(),(U32)raw_image.getWidth());
		parameters.cp_comment = (char *) comment_metadata.c_str();
	}
	else
	{
		// Awful hacky cast, too lazy to copy right now.
		parameters.cp_comment = (char *) comment_text;
	}

	//
	// Fill in the source image from our raw image
	//
	OPJ_COLOR_SPACE color_space = CLRSPC_SRGB;
	opj_image_cmptparm_t cmptparm[MAX_COMPS];
	opj_image_t * image = NULL;
	S32 numcomps = llmin((S32)raw_image.getComponents(),(S32)MAX_COMPS); //Clamp avoid overrunning buffer -Shyotl
	S32 width = raw_image.getWidth();
	S32 height = raw_image.getHeight();

	memset(&cmptparm[0], 0, MAX_COMPS * sizeof(opj_image_cmptparm_t));
	for(S32 c = 0; c < numcomps; c++) {
		cmptparm[c].prec = 8;
		cmptparm[c].bpp = 8;
		cmptparm[c].sgnd = 0;
		cmptparm[c].dx = parameters.subsampling_dx;
		cmptparm[c].dy = parameters.subsampling_dy;
		cmptparm[c].w = width;
		cmptparm[c].h = height;
	}

	/* create the image */
	image = opj_image_create(numcomps, &cmptparm[0], color_space);

	image->x1 = width;
	image->y1 = height;

	S32 i = 0;
	const U8 *src_datap = raw_image.getData();
	for (S32 y = height - 1; y >= 0; y--)
	{
		for (S32 x = 0; x < width; x++)
		{
			const U8 *pixel = src_datap + (y*width + x) * numcomps;
			for (S32 c = 0; c < numcomps; c++)
			{
				image->comps[c].data[i] = *pixel;
				pixel++;
			}
			i++;
		}
	}



	/* encode the destination image */
	/* ---------------------------- */

	int codestream_length;
	opj_cio_t *cio = NULL;

	/* get a J2K compressor handle */
	opj_cinfo_t* cinfo = opj_create_compress(CODEC_J2K);

	/* catch events using our callbacks and give a local context */
	opj_set_event_mgr((opj_common_ptr)cinfo, &event_mgr, stderr);			

	/* setup the encoder parameters using the current image and using user parameters */
	opj_setup_encoder(cinfo, &parameters, image);

	/* open a byte stream for writing */
	/* allocate memory for all tiles */
	cio = opj_cio_open((opj_common_ptr)cinfo, NULL, 0);

	/* encode the image */
	bool bSuccess = opj_encode(cinfo, cio, image, NULL);
	if (!bSuccess)
	{
		opj_cio_close(cio);
		llinfos << "Failed to encode image." << llendl;
		return FALSE;
	}
	codestream_length = cio_tell(cio);

	base.copyData(cio->buffer, codestream_length);
	base.updateData(); // set width, height

	/* close and free the byte stream */
	opj_cio_close(cio);

	/* free remaining compression structures */
	opj_destroy_compress(cinfo);


	/* free user parameters structure */
	if(parameters.cp_matrice) free(parameters.cp_matrice);

	/* free image data */
	opj_image_destroy(image);
	return TRUE;
}
Beispiel #24
0
BOOL LLImageJ2CKDU::initDecode(LLImageJ2C &base, LLImageRaw &raw_image, F32 decode_time, ECodeStreamMode mode, S32 first_channel, S32 max_channel_count, int discard_level, int* region)
{
	base.resetLastError();

	// *FIX: kdu calls our callback function if there's an error, and then bombs.
	// To regain control, we throw an exception, and catch it here.
	try
	{
		base.updateRawDiscardLevel();
		setupCodeStream(base, TRUE, mode);

		mRawImagep = &raw_image;
		mCodeStreamp->change_appearance(false, true, false);

		// Apply loading discard level and cropping if required
		kdu_dims* region_kdu = NULL;
		if (region != NULL)
		{
			region_kdu = new kdu_dims;
			region_kdu->pos.x  = region[0];
			region_kdu->pos.y  = region[1];
			region_kdu->size.x = region[2] - region[0];
			region_kdu->size.y = region[3] - region[1];
		}
		int discard = (discard_level != -1 ? discard_level : base.getRawDiscardLevel());
		
		// Apply loading restrictions
		mCodeStreamp->apply_input_restrictions( first_channel, max_channel_count, discard, 0, region_kdu);
		
		// Clean-up
		if (region_kdu)
		{
			delete region_kdu;
			region_kdu = NULL;
		}

		// Resize raw_image according to the image to be decoded
		kdu_dims dims; mCodeStreamp->get_dims(0,dims);
		// *TODO: Use the real number of levels read from the file throughout the code instead of relying on an infered value from dimensions
		//S32 levels = mCodeStreamp->get_min_dwt_levels();
		S32 channels = base.getComponents() - first_channel;
		channels = llmin(channels,max_channel_count);
		raw_image.resize(dims.size.x, dims.size.y, channels);
		//llinfos << "j2c image dimension: width = " << dims.size.x << ", height = " << dims.size.y << ", channels = " << channels << ", levels = " << levels << llendl;

		if (!mTileIndicesp)
		{
			mTileIndicesp = new kdu_dims;
		}
		mCodeStreamp->get_valid_tiles(*mTileIndicesp);
		if (!mTPosp)
		{
			mTPosp = new kdu_coords;
			mTPosp->y = 0;
			mTPosp->x = 0;
		}
	}
	catch (const char* msg)
	{
		base.setLastError(ll_safe_string(msg));
		return FALSE;
	}
	catch (...)
	{
		base.setLastError("Unknown J2C error");
		return FALSE;
	}

	return TRUE;
}