예제 #1
0
static boolean load_collection(
	short collection_index,
	boolean strip)
{
	struct collection_header *header= get_collection_header(collection_index);
	Handle collection= NULL, shading_tables= NULL;
	OSErr error= noErr;
	
	if (bit_depth==8 || header->offset16==-1)
	{
		vassert(header->offset!=-1, csprintf(temporary, "collection #%d does not exist.", collection_index));
		collection= read_handle_from_file(shapes_file_refnum, header->offset, header->length);
	}
	else
	{
		collection= read_handle_from_file(shapes_file_refnum, header->offset16, header->length16);
	}

	if (collection)
	{
		if (strip) strip_collection_handle((struct collection_definition **) collection);
		MoveHHi(collection), HLock(collection);
		header->collection= (struct collection_definition **) collection;
	
		/* allocate enough space for this collectionÕs shading tables */
		if (strip)
		{
			shading_tables= NewHandle(0);
		}
		else
		{
			struct collection_definition *definition= get_collection_definition(collection_index);
			
			shading_tables= NewHandle(get_shading_table_size(collection_index)*definition->clut_count +
				shading_table_size*NUMBER_OF_TINT_TABLES);
			if ((error= MemError())==noErr)
			{
				assert(shading_tables);
				MoveHHi(shading_tables), HLock(shading_tables);
			}
		}
		
		header->shading_tables= shading_tables;
	}
	else
	{
		error= MemError();
//		vhalt(csprintf(temporary, "couldnÕt load collection #%d (error==#%d)", collection_index, error));
	}

	/* if any errors ocurred, free whatever memory we used */
	if (error!=noErr)
	{
		if (collection) DisposeHandle(collection);
		if (shading_tables) DisposeHandle(shading_tables);
	}
	
	return error==noErr ? TRUE : FALSE;
}
예제 #2
0
static void lock_collection(
	struct collection_header *header)
{
	assert(header->collection);
	
	MoveHHi((Handle)header->collection);
	HLock((Handle)header->collection);

	MoveHHi((Handle)header->shading_tables);
	HLock((Handle)header->shading_tables);
	
	return;
}
예제 #3
0
파일: macutils.c 프로젝트: zzhzz/atac_95230
short int *load_table (int id)
{
	Handle h;
	
	if ((h = GetResource ('YYst', id)) == NULL) {
		fputs ("error loading static table resource", stderr);
		putc ('\n', stderr);
		exit (1);
		}
	MoveHHi (h);
	HLock (h);
	return (short int *) *h;
	}
예제 #4
0
파일: sapapi.c 프로젝트: BygoneWorlds/anet
pascal short
AdvertiseService(unsigned short serviceType, char *serviceName,
                 unsigned short serviceSocket, unsigned long *adv_handle)
{
    T_AdvertisingStruct *ASp;
    Handle				h;
    SAPpb				pb;
    short				status;
    unsigned int		i;
    char				*tmp;

    *adv_handle = 0L;

    if ((h = NewHandleClear(sizeof(T_AdvertisingStruct))) == (Handle)0) {
        return (SAP_NO_MEM);
    }
    MoveHHi(h);
    HLock(h);

    ASp = (T_AdvertisingStruct *)*h;
    ASp->signature = ADVERTISING_SIGNATURE;
    ASp->ASServerIDpacket.SAPPacketType = IntSwap( PERIODIC_ID_PACKET );
    ASp->ASServerIDpacket.serverType = IntSwap( serviceType );
    /*
     * Make sure the serviceName isn't longer than 47 characters.
     */
    for(i=0, tmp=serviceName; i<sizeof(ASp->ASServerIDpacket.serverName); i++, tmp++) {
        if (*tmp == 0) {
            break;
        }
    }
    if (i >= sizeof(ASp->ASServerIDpacket.serverName)) {
        serviceName[sizeof(ASp->ASServerIDpacket.serverName)-1] = 0;
    }
    strncpy((char *)ASp->ASServerIDpacket.serverName, serviceName, sizeof(ASp->ASServerIDpacket.serverName));
    ASp->ASServerIDpacket.serverSocket = serviceSocket;
    ASp->ASServerIDpacket.interveningNetworks = IntSwap( 1 );
    pb.csCode = ADVERTISESERVICE;
    pb.csParams.adv_service.h = h;
    status = call_sap(&pb);
    if (status) {
        DisposeHandle(h);
        return (status);
    }
    *adv_handle = (unsigned long)h;
    return (SAP_SUCCESSFUL);
}
예제 #5
0
bool QTImportFileHandle::Import(TrackFactory *trackFactory, Track ***outTracks,
                                  int *outNumTracks)
{
   OSErr err = noErr;

   //
   // Determine the file format.
   //

   // GetMediaSampleDescription takes a SampleDescriptionHandle, but apparently
   // if the media is a sound (which presumably we know it is) then it will treat
   // it as a SoundDescriptionHandle (which in addition to the format of single
   // samples, also tells you sample rate, number of channels, etc.)
   // Pretty messed up interface, if you ask me.
   SoundDescriptionHandle soundDescription = (SoundDescriptionHandle)NewHandle(0);
   GetMediaSampleDescription(mMedia, 1, (SampleDescriptionHandle)soundDescription);

   // If this is a compressed format, it may have out-of-stream compression
   // parameters that need to be passed to the sound converter.  We retrieve
   // these in the form of an audio atom.  To do this, however we have to
   // get the data by way of a handle, then copy it manually from the handle to
   // the atom.  These interfaces get worse all the time!
   Handle decompressionParamsHandle = NewHandle(0);
   AudioFormatAtomPtr decompressionParamsAtom = NULL;
   err = GetSoundDescriptionExtension(soundDescription, &decompressionParamsHandle,
                                      siDecompressionParams);
   if(err == noErr)
   {
      // this stream has decompression parameters.  copy from the handle to the atom.
      int paramsSize = GetHandleSize(decompressionParamsHandle);
      HLock(decompressionParamsHandle);
      decompressionParamsAtom = (AudioFormatAtomPtr)NewPtr(paramsSize);
      //err = MemError();
      BlockMoveData(*decompressionParamsHandle, decompressionParamsAtom, paramsSize);
      HUnlock(decompressionParamsHandle);
   }

   if(decompressionParamsHandle)
      DisposeHandle(decompressionParamsHandle);

   //
   // Now we set up a sound converter to decompress the data if it is compressed.
   //

   SoundComponentData inputFormat;
   SoundComponentData outputFormat;
   SoundConverter     soundConverter = NULL;

   inputFormat.flags       = outputFormat.flags       = 0;
   inputFormat.sampleCount = outputFormat.sampleCount = 0;
   inputFormat.reserved    = outputFormat.reserved    = 0;
   inputFormat.buffer      = outputFormat.buffer      = NULL;
   inputFormat.numChannels = outputFormat.numChannels = (*soundDescription)->numChannels;
   inputFormat.sampleSize  = outputFormat.sampleSize  = (*soundDescription)->sampleSize;
   inputFormat.sampleRate  = outputFormat.sampleRate  = (*soundDescription)->sampleRate;

   inputFormat.format = (*soundDescription)->dataFormat;
   outputFormat.format = kSoundNotCompressed;

   err = SoundConverterOpen(&inputFormat, &outputFormat, &soundConverter);

   //
   // Create the Audacity WaveTracks to house the new data
   //

   *outNumTracks = outputFormat.numChannels;
   WaveTrack **channels = new WaveTrack *[*outNumTracks];

   // determine sample format

   sampleFormat format;
   int bytesPerSample;

   // TODO: do we know for sure that 24 and 32 bit samples are the same kind
   // of 24 and 32 bit samples we expect?
   switch(outputFormat.sampleSize) {
      case 16:
         format = int16Sample;
         bytesPerSample = 2;
         break;

      case 24:
         format = int24Sample;
         bytesPerSample = 3;
         break;

      case 32:
         format = floatSample;
         bytesPerSample = 4;
         break;

      default:
         printf("I can't import a %d-bit file!\n", outputFormat.sampleSize);
         return false;
   }

   int c;
   for (c = 0; c < *outNumTracks; c++)
   {
      channels[c] = trackFactory->NewWaveTrack(format);
      channels[c]->SetRate(outputFormat.sampleRate / 65536.0);

      if(*outNumTracks == 2)
      {
         if(c == 0)
         {
            channels[c]->SetChannel(Track::LeftChannel);
            channels[c]->SetLinked(true);
         }
         else if(c == 1)
         {
            channels[c]->SetChannel(Track::RightChannel);
         }
      }
   }

   //
   // Give the converter the decompression atom.
   //

   // (judging from the sample code, it's OK if the atom is NULL, which
   // it will be if there was no decompression information)

   err = SoundConverterSetInfo(soundConverter, siDecompressionParams, decompressionParamsAtom);
   if(err == siUnknownInfoType)
   {
      // the decompressor didn't need the decompression atom, but that's ok.
      err = noErr;
   }

   // Tell the converter we're cool with VBR audio
   SoundConverterSetInfo(soundConverter, siClientAcceptsVBR, Ptr(true));

   //
   // Determine buffer sizes and allocate output buffer
   //

   int inputBufferSize = 655360;
   int outputBufferSize = 524288;
   char *outputBuffer = new char[outputBufferSize];

   //
   // Populate the structure of data that is passed to the callback
   //

   CallbackData cbData;
   memset(&cbData.compData, 0, sizeof(ExtendedSoundComponentData));

   cbData.isSourceVBR        = ((*soundDescription)->compressionID == variableCompression);
   cbData.sourceMedia        = mMedia;
   cbData.getMediaAtThisTime = 0;
   cbData.sourceDuration     = GetMediaDuration(mMedia);
   cbData.isThereMoreSource  = true;
   cbData.maxBufferSize      = inputBufferSize;

   // allocate source media buffer
   cbData.hSource            = NewHandle((long)cbData.maxBufferSize);
   MoveHHi(cbData.hSource);
   HLock(cbData.hSource);

   cbData.compData.desc = inputFormat;
   cbData.compData.desc.buffer = (BytePtr)*cbData.hSource;

   cbData.compData.desc.flags = kExtendedSoundData;
   cbData.compData.extendedFlags = kExtendedSoundBufferSizeValid |
                                   kExtendedSoundSampleCountNotValid;
   if(cbData.isSourceVBR)
      cbData.compData.extendedFlags |= kExtendedSoundCommonFrameSizeValid;

   cbData.compData.bufferSize = 0; // filled in during callback

   // this doesn't make sense to me, but it is taken from sample code
   cbData.compData.recordSize = sizeof(ExtendedSoundComponentData);


   //
   // Begin the Conversion
   //

   err = SoundConverterBeginConversion(soundConverter);

   SoundConverterFillBufferDataUPP fillBufferUPP;
   fillBufferUPP = NewSoundConverterFillBufferDataUPP(SoundConverterFillBufferCallback);

   bool done = false;
   bool cancelled = false;
   sampleCount samplesSinceLastCallback = 0;
   UInt32 outputFrames;
   UInt32 outputBytes;
   UInt32 outputFlags;

#define SAMPLES_PER_CALLBACK 10000

   while(!done && !cancelled)
   {
      err = SoundConverterFillBuffer(soundConverter,    // a sound converter
                                     fillBufferUPP,     // the callback
                                     &cbData,           // refCon passed to FillDataProc
                                     outputBuffer,      // the buffer to decompress into
                                     outputBufferSize,  // size of that buffer
                                     &outputBytes,      // number of bytes actually output
                                     &outputFrames,     // number of frames actually output
                                     &outputFlags);     // fillbuffer retured advisor flags
      if (err)
         break;

      if((outputFlags & kSoundConverterHasLeftOverData) == false)
         done = true;

      for(c = 0; c < *outNumTracks; c++)
         channels[c]->Append(outputBuffer + (c*bytesPerSample),
                             format,
                             outputFrames,
                             *outNumTracks);

      samplesSinceLastCallback += outputFrames;
      if( samplesSinceLastCallback > SAMPLES_PER_CALLBACK )
      {
         if( mProgressCallback )
            cancelled = mProgressCallback(mUserData,
                                          (float)cbData.getMediaAtThisTime /
                                          cbData.sourceDuration);
         samplesSinceLastCallback -= SAMPLES_PER_CALLBACK;
      }
   }

   HUnlock(cbData.hSource);

   // Flush any remaining data to the output buffer.
   // It appears that we have no way of telling this routine how big the output
   // buffer is!  We had better hope that there isn't more data left than
   // the buffer is big.
   SoundConverterEndConversion(soundConverter, outputBuffer, &outputFrames, &outputBytes);

   for(c = 0; c < *outNumTracks; c++)
   {
       channels[c]->Append(outputBuffer + (c*bytesPerSample),
                           format,
                           outputFrames,
                           *outNumTracks);
      channels[c]->Flush();
   }

   delete[] outputBuffer;
   DisposeHandle(cbData.hSource);
   SoundConverterClose(soundConverter);
   DisposeMovie(mMovie);

   if (cancelled || err != noErr) {
      for (c = 0; c < *outNumTracks; c++)
         delete channels[c];
      delete[] channels;

      return false;
   }
   else {
      *outTracks = new Track *[*outNumTracks];
      for(c = 0; c < *outNumTracks; c++)
         (*outTracks)[c] = channels[c];
      delete[] channels;

      return true;
   }
}
예제 #6
0
static void QTVideo_AddVideoSamplesToMedia (Media theMedia, const Rect *trackFrame)
{
	long maxCompressedSize;
	GWorldPtr theGWorld = nil;
	long curSample;
	Handle compressedData = nil;
	Ptr compressedDataPtr;
	ImageDescriptionHandle imageDesc = nil;
	CGrafPtr oldPort;
	GDHandle oldGDeviceH;
	OSErr err = noErr;



		err = NewGWorld (&theGWorld, 
						kPixelDepth,	/* pixel depth */
						trackFrame, 
						nil, 
						nil, 
						(GWorldFlags) 0 );
		CheckError (err, "NewGWorld error");

		LockPixels (theGWorld->portPixMap);
		err = GetMaxCompressionSize(theGWorld->portPixMap,
									trackFrame, 
									kMgrChoose, /* let ICM choose depth */
									codecNormalQuality, 
									kAnimationCodecType, 
									(CompressorComponent) anyCodec,
									&maxCompressedSize);
		CheckError (err, "GetMaxCompressionSize error" );

		compressedData = NewHandle(maxCompressedSize);
		CheckError( MemError(), "NewHandle error" );

		MoveHHi( compressedData );
		HLock( compressedData );
		compressedDataPtr = StripAddress( *compressedData );

		imageDesc = (ImageDescriptionHandle)NewHandle(4);
		CheckError( MemError(), "NewHandle error" );

		GetGWorld (&oldPort, &oldGDeviceH);
		SetGWorld (theGWorld, nil);

		for (curSample = 1; curSample <= kNumVideoFrames; curSample++) 
		{
			EraseRect (trackFrame);

			QTVideo_DrawFrame(trackFrame, curSample);

			err = CompressImage (theGWorld->portPixMap, 
								trackFrame, 
								codecNormalQuality,
								kAnimationCodecType,
								imageDesc, 
								compressedDataPtr );
			CheckError( err, "CompressImage error" );

			err = AddMediaSample(theMedia, 
								compressedData,
								kNoOffset,	/* no offset in data */
								(**imageDesc).dataSize, 
								kSampleDuration,	/* frame duration = 1/10 sec */
								(SampleDescriptionHandle)imageDesc, 
								kAddOneVideoSample,	/* one sample */
								kSyncSample,	/* self-contained samples */
								nil);
			CheckError( err, "AddMediaSample error" );
		}
		SetGWorld (oldPort, oldGDeviceH);

		if (imageDesc)
		{
			DisposeHandle ((Handle)imageDesc);
		}
		if (compressedData)
		{
			DisposeHandle (compressedData);
		}
		if (theGWorld)
		{
			DisposeGWorld (theGWorld);
		}
} 
예제 #7
0
void TLevelWriter3gp::save(const TImageP &img, int frameIndex)
{
	if (m_cancelled)
		return;

	TRasterImageP image(img);
	int lx = image->getRaster()->getLx();
	int ly = image->getRaster()->getLy();
	//void *buffer = image->getRaster()->getRawData();
	int pixSize = image->getRaster()->getPixelSize();
	if (pixSize != 4)
		throw TImageException(getFilePath(), "Unsupported pixel type");

	QMutexLocker sl(&m_mutex);

	if (!m_properties)
		m_properties = new Tiio::MovWriterProperties();

	Tiio::MovWriterProperties *prop = (Tiio::MovWriterProperties *)(m_properties);

	//CodecType compression = StandardCompressionType;  prop->getCurrentCodec();
	//CodecQ quality = StandardQualityType;  prop->getCurrentQuality();

	if (!m_initDone) {
		//FSSpec fspec;
		Rect frame;
		long max_compressed_size;
		QDErr err;

		m_videoTrack = NewMovieTrack(m_movie, FixRatio((short)lx, 1), FixRatio((short)ly, 1), kNoVolume);

		if ((err = GetMoviesError() != noErr))
			throw TImageException(getFilePath(), "can't create video track");

		m_dataRef = nil;
		m_hMovieData = NewHandle(0);

		// Construct the Handle data reference
		err = PtrToHand(&m_hMovieData, &m_dataRef, sizeof(Handle));

		if ((err = GetMoviesError() != noErr))
			throw TImageException(getFilePath(), "can't create Data Ref");

		m_videoMedia = NewTrackMedia(m_videoTrack, VideoMediaType, (TINT32)m_frameRate, m_dataRef, HandleDataHandlerSubType);

		OpenADefaultComponent(MovieExportType, '3gpp', &m_myExporter);

		//  err = (short)MovieExportDoUserDialog(m_myExporter, m_movie, 0, 0, 0, &m_cancelled);

		//  if (m_cancelled)
		//	  throw TImageException(getFilePath(), "User abort of 3GP render");
		if ((err = GetMoviesError() != noErr))
			throw TImageException(getFilePath(), "can't create video media");
		if ((err = BeginMediaEdits(m_videoMedia)) != noErr)
			throw TImageException(getFilePath(), "can't begin edit video media");
		frame.left = 0;
		frame.top = 0;
		frame.right = lx;
		frame.bottom = ly;

#if 0
  if ((err = NewGWorld(&(m_gworld), pixSize * 8, &frame, 0, 0, 0))!=noErr)
#else /* Mac OSX 10.7 later */
		if ((err = QTNewGWorld(&(m_gworld), pixSize * 8, &frame, 0, 0, 0)) != noErr)
#endif
		throw TImageException(getFilePath(), "can't create movie buffer");
#ifdef WIN32
		LockPixels(m_gworld->portPixMap);
		if ((err = GetMaxCompressionSize(m_gworld->portPixMap, &frame, 0,
										 quality, compression, anyCodec,
										 &max_compressed_size)) != noErr)
			throw TImageException(getFilePath(), "can't get max compression size");

#else

#if 0
  PixMapHandle pixmapH = GetPortPixMap (m_gworld);
  LockPixels(pixmapH);
#else
		PixMapHandle pixmapH = NULL;
#endif
		max_compressed_size = lx * ly * 4 * 20;

/*if ((err = GetMaxCompressionSize(pixmapH, &frame, 0, 
                                quality,  compression,anyCodec, 
				 &max_compressed_size))!=noErr)
    throw TImageException(getFilePath(), "can't get max compression size");*/
#endif

		m_compressedData = NewHandle(max_compressed_size);

		if ((err = MemError()) != noErr)
			throw TImageException(getFilePath(), "can't allocate compressed data for movie");

		MoveHHi(m_compressedData);
		HLock(m_compressedData);
		if ((err = MemError()) != noErr)
			throw TImageException(getFilePath(), "can't allocate img handle");

#if 0
  m_pixmap = GetGWorldPixMap(m_gworld);
  
  
  if (!LockPixels(m_pixmap))
    throw TImageException(getFilePath(), "can't lock pixels");

  buf    = (PixelXRGB*) GetPixBaseAddr(m_pixmap);
#else
		m_pixmap = NULL;
		buf = NULL;
#endif
		buf_lx = lx;
		buf_ly = ly;

		m_initDone = true;
	}

	unsigned short rowBytes = (unsigned short)(((short)(*(m_pixmap))->rowBytes & ~(3 << 14)));

	Rect frame;
	ImageDescriptionHandle img_descr;
	Ptr compressed_data_ptr;
	QDErr err;

	frame.left = 0;
	frame.top = 0;
	frame.right = lx;
	frame.bottom = ly;

	TRasterP ras = image->getRaster();
#ifdef WIN32
	compressed_data_ptr = StripAddress(*(m_compressedData));
	copy(ras, buf, buf_lx, buf_ly);
#else
	compressed_data_ptr = *m_compressedData;
	copy(ras, buf, buf_lx, buf_ly, rowBytes);
#endif
	img_descr = (ImageDescriptionHandle)NewHandle(4);

#ifdef WIN32
	if ((err = CompressImage(m_gworld->portPixMap,
							 &frame,
							 quality, compression,
							 img_descr, compressed_data_ptr)) != noErr)
		throw TImageException(getFilePath(), "can't compress image");
#else

#if 0
 PixMapHandle pixmapH = GetPortPixMap (m_gworld);
 if ((err = CompressImage(pixmapH, 
	                 &frame, 
  			 codecNormalQuality, kJPEGCodecType,
			 img_descr, compressed_data_ptr))!=noErr)
	{
  throw TImageException(getFilePath(), "can't compress image");
}
#endif
#endif

	if ((err = AddMediaSample(m_videoMedia, m_compressedData, 0,
							  (*img_descr)->dataSize, 1,
							  (SampleDescriptionHandle)img_descr,
							  1, 0, 0)) != noErr)
		throw TImageException(getFilePath(), "can't add image to movie media");

	DisposeHandle((Handle)img_descr);
}
예제 #8
0
PixMapHandle get_shape_pixmap(
	short shape,
	boolean force_copy)
{
	OSErr error;
	struct collection_definition *collection;
	struct low_level_shape_definition *low_level_shape;
	struct bitmap_definition *bitmap;
	short collection_index, low_level_shape_index, clut_index;

	collection_index= GET_COLLECTION(GET_DESCRIPTOR_COLLECTION(shape));
	clut_index= GET_COLLECTION_CLUT(GET_DESCRIPTOR_COLLECTION(shape));
	low_level_shape_index= GET_DESCRIPTOR_SHAPE(shape);
 	collection= get_collection_definition(collection_index);

	switch (interface_bit_depth)
	{
		case 8:
			/* if the ctSeed of our offscreen pixmap is different from the ctSeed of the world
				device then the color environment has changed since the last call to our routine,
				and we just HandToHand the deviceÕs ctTable and throw away our old one. */
			if ((*(*(*world_device)->gdPMap)->pmTable)->ctSeed!=(*(*hollow_pixmap)->pmTable)->ctSeed)
			{
				DisposeHandle((Handle)(*hollow_pixmap)->pmTable);
				
				(*hollow_pixmap)->pmTable= (*(*world_device)->gdPMap)->pmTable;	
				HLock((Handle)hollow_pixmap);
				error= HandToHand((Handle *)&(*hollow_pixmap)->pmTable);
				HUnlock((Handle)hollow_pixmap);
				
				assert(error==noErr);
				
				/* this is a device color table so we donÕt clear ctFlags (well, it isnÕt a device
					color table anymore, but itÕs formatted like one */
			}
			break;
		
		case 16:
		case 32:
			if (!hollow_pixmap_color_table)
			{
				hollow_pixmap_color_table= (CTabHandle) NewHandle(sizeof(ColorTable)+PIXEL8_MAXIMUM_COLORS*sizeof(ColorSpec));
				MoveHHi((Handle)hollow_pixmap_color_table);
				HLock((Handle)hollow_pixmap_color_table);
				assert(hollow_pixmap_color_table);
			}
			
			(*hollow_pixmap_color_table)->ctSeed= GetCTSeed();
			(*hollow_pixmap_color_table)->ctSize= collection->color_count-NUMBER_OF_PRIVATE_COLORS-1;
			(*hollow_pixmap_color_table)->ctFlags= 0;
			
			BlockMove(get_collection_colors(collection_index, clut_index)+NUMBER_OF_PRIVATE_COLORS, &(*hollow_pixmap_color_table)->ctTable,
				(collection->color_count-NUMBER_OF_PRIVATE_COLORS)*sizeof(ColorSpec));
			
			(*hollow_pixmap)->pmTable= hollow_pixmap_color_table;
			
			break;
		
		default:
			halt();
	}

	low_level_shape= get_low_level_shape_definition(collection_index, low_level_shape_index);
	bitmap= get_bitmap_definition(collection_index, low_level_shape->bitmap_index);
	
	/* setup the pixmap (canÕt wait to change this for Copland) */
	SetRect(&(*hollow_pixmap)->bounds, 0, 0, bitmap->width, bitmap->height);
	(*hollow_pixmap)->rowBytes= bitmap->width|0x8000;
	(*hollow_pixmap)->baseAddr= (Ptr)bitmap->row_addresses[0];
	
	if (bitmap->bytes_per_row==NONE) /* is this a compressed shape? */
	{
		register pixel8 *read, *write;
		register short run_count;
		short x;

		/* for now all RLE shapes are in column-order */
		assert(bitmap->flags&_COLUMN_ORDER_BIT);
		
		/* donÕt overflow the buffer */
		assert(bitmap->width*bitmap->height<=HOLLOW_PIXMAP_BUFFER_SIZE);
		
		/* decompress column-order shape into row-order buffer */
		for (x=0;x<bitmap->width;x+=1)
		{
			short bytes_per_row= bitmap->width;
			
			write= hollow_data+x;
			read= bitmap->row_addresses[x];
			while (run_count= *((short*)read)++)
			{
				if (run_count<0) while ((run_count+=1)<=0) *write= iBLACK, write+= bytes_per_row; /* fill transparent areas with black */
					else while ((run_count-=1)>=0) *write= *read++, write+= bytes_per_row; /* copy shape data */
			}
		}

		(*hollow_pixmap)->baseAddr= (Ptr)hollow_data;
	}
	else
	{
		/* if this is a raw, row-order shape then only copy it if weÕve been asked to */
		if (force_copy)
		{
			assert(bitmap->width*bitmap->height<=HOLLOW_PIXMAP_BUFFER_SIZE);
			BlockMove(bitmap->row_addresses[0], hollow_data, bitmap->width*bitmap->height);
			(*hollow_pixmap)->baseAddr= (Ptr)hollow_data;
		}
	}
	
	return hollow_pixmap;
}
예제 #9
0
PixMapHandle editor_get_shape_pixmap(
	short shape)
{
	OSErr error;
	struct collection_definition *collection;
	struct low_level_shape_definition *low_level_shape;
	struct bitmap_definition *bitmap;
	short collection_index, low_level_shape_index, clut_index;

	collection_index= GET_COLLECTION(GET_DESCRIPTOR_COLLECTION(shape));
	clut_index= GET_COLLECTION_CLUT(GET_DESCRIPTOR_COLLECTION(shape));
	low_level_shape_index= GET_DESCRIPTOR_SHAPE(shape);
 	collection= get_collection_definition(collection_index);

	switch (interface_bit_depth)
	{
		case 8:
			/* if the ctSeed of our offscreen pixmap is different from the ctSeed of the world
				device then the color environment has changed since the last call to our routine,
				and we just HandToHand the deviceÕs ctTable and throw away our old one. */
			if ((*(*(*world_device)->gdPMap)->pmTable)->ctSeed!=(*(*hollow_pixmap)->pmTable)->ctSeed)
			{
				DisposeHandle((Handle)(*hollow_pixmap)->pmTable);
				
				(*hollow_pixmap)->pmTable= (*(*world_device)->gdPMap)->pmTable;	
				HLock((Handle)hollow_pixmap);
				error= HandToHand((Handle *)&(*hollow_pixmap)->pmTable);
				HUnlock((Handle)hollow_pixmap);
				
				assert(error==noErr);
				
				/* this is a device color table so we donÕt clear ctFlags (well, it isnÕt a device
					color table anymore, but itÕs formatted like one */
			}
			break;
		
		case 16:
		case 32:
			if (!hollow_pixmap_color_table)
			{
				hollow_pixmap_color_table= (CTabHandle) NewHandle(sizeof(ColorTable)+PIXEL8_MAXIMUM_COLORS*sizeof(ColorSpec));
				MoveHHi((Handle)hollow_pixmap_color_table);
				HLock((Handle)hollow_pixmap_color_table);
				assert(hollow_pixmap_color_table);
			}
			
			(*hollow_pixmap_color_table)->ctSeed= GetCTSeed();
			(*hollow_pixmap_color_table)->ctSize= collection->color_count-NUMBER_OF_PRIVATE_COLORS-1;
			(*hollow_pixmap_color_table)->ctFlags= 0;
			
			BlockMove(get_collection_colors(collection_index, clut_index)+NUMBER_OF_PRIVATE_COLORS, &(*hollow_pixmap_color_table)->ctTable,
				(collection->color_count-NUMBER_OF_PRIVATE_COLORS)*sizeof(ColorSpec));
			
			(*hollow_pixmap)->pmTable= hollow_pixmap_color_table;
			
			break;
		
		default:
			halt();
	}

	low_level_shape= get_low_level_shape_definition(collection_index, low_level_shape_index);
	bitmap= get_bitmap_definition(collection_index, low_level_shape->bitmap_index);
	
	/* setup the pixmap (canÕt wait to change this for Copland) */
	SetRect(&(*hollow_pixmap)->bounds, 0, 0, bitmap->width, bitmap->height);
	(*hollow_pixmap)->rowBytes= bitmap->width|0x8000;
	(*hollow_pixmap)->baseAddr= (Ptr)bitmap->row_addresses[0];
	
	/* Rotate if necessary */
	if ((bitmap->flags&_COLUMN_ORDER_BIT) && bitmap->width==128 && bitmap->height==128)
	{
		static char *buffer= NULL;

		if(!buffer)
		{	
			buffer= (char *)malloc(bitmap->width*bitmap->height*sizeof(pixel8));
		}
	
		if(buffer)
		{
			short x, y;
			pixel8 *dest= (pixel8 *) buffer;

			/* decompress column-order shape into row-order buffer */
			for (x=0;x<bitmap->width;x+=1)
			{
				for(y= 0; y<bitmap->height; y+=1)
				{
					*dest++= bitmap->row_addresses[y][x];
				}
			}

			(*hollow_pixmap)->baseAddr= buffer;
		}
	} 
	
	return hollow_pixmap;
}