예제 #1
0
파일: track.c 프로젝트: one-k/rmov
/*
  call-seq: scale(width, height)
  
  Scale the track's size by width and height respectively.
  
  The value passed is a relative float where "1" is the current size.
*/
static VALUE track_scale(VALUE obj, VALUE width, VALUE height)
{
  MatrixRecord matrix;
  GetTrackMatrix(TRACK(obj), &matrix);
  ScaleMatrix(&matrix, FloatToFixed(NUM2DBL(width)), FloatToFixed(NUM2DBL(height)), 0, 0);
  SetTrackMatrix(TRACK(obj), &matrix);
  return obj;
}
예제 #2
0
파일: track.c 프로젝트: one-k/rmov
/*
  call-seq: translate(x, y)
  
  Offset a track's position by x and y values respectively.
  
  Values should be in pixels.
*/
static VALUE track_translate(VALUE obj, VALUE x, VALUE y)
{
  MatrixRecord matrix;
  GetTrackMatrix(TRACK(obj), &matrix);
  TranslateMatrix(&matrix, FloatToFixed(NUM2DBL(x)), FloatToFixed(NUM2DBL(y)));
  SetTrackMatrix(TRACK(obj), &matrix);
  return obj;
}
예제 #3
0
static ATSUStyle
CreateSizedCopyOfStyle(ATSUStyle inStyle, cairo_matrix_t *scale)
{
    ATSUStyle style;
    OSStatus err;


    // Set the style's size
    CGAffineTransform theTransform =
        CGAffineTransformMakeWithCairoFontScale(scale);
    Fixed theSize =
        FloatToFixed(CGSizeApplyAffineTransform
                     (CGSizeMake(1.0, 1.0), theTransform).height);
    const ATSUAttributeTag theFontStyleTags[] = { kATSUSizeTag };
    const ByteCount theFontStyleSizes[] = { sizeof(Fixed) };
    ATSUAttributeValuePtr theFontStyleValues[] = { &theSize };

    err = ATSUCreateAndCopyStyle(inStyle, &style);

    err = ATSUSetAttributes(style,
                            sizeof(theFontStyleTags) /
                            sizeof(ATSUAttributeTag), theFontStyleTags,
                            theFontStyleSizes, theFontStyleValues);

    return style;
}
/** Internal helper function: Returns fps rate of movie and optionally
 *  the total number of video frames in the movie. Framecount is determined
 *  by stepping through the whole movie and counting frames. This can take
 *  significant time on big movie files.
 *
 *  Always returns fps as a double. Only counts and returns full framecount,
 *  if *nrframes is non-NULL.
 */
double PsychDetermineMovieFramecountAndFps(Movie theMovie, int* nrframes)
{
    // Count total number of videoframes: This code is derived from Apple
    // example code.
    long		myCount = -1;
    short		myFlags;
    TimeValue           myTime = 0;
    TimeValue           myDuration = 0;
    OSType		myTypes[1];
    // We want video samples.
    myTypes[0] = VisualMediaCharacteristic;
    // We want to begin with the first frame in the movie:
    myFlags = nextTimeStep + nextTimeEdgeOK;
    
    // We count either the first 3 frames if nrframes==NULL aka only
    // fps requested, or if framecount is requested, we count all frames.
    while (myTime >= 0 && (myCount<2 || nrframes!=NULL)) {
        myCount++;        
        // look for the next frame in the track; when there are no more frames,
        // myTime is set to -1, so we'll exit the while loop
        GetMovieNextInterestingTime(theMovie, myFlags, 1, myTypes, myTime, FloatToFixed(1), &myTime, &myDuration);        
        // after the first interesting time, don't include the time we're currently at
        myFlags = nextTimeStep;
    }    
    
    // Return optional count of frames:
    if (nrframes) *nrframes = (int) myCount;
    
    GoToBeginningOfMovie(theMovie);
    MoviesTask(theMovie, 0);
    
    // Compute and return frame rate in fps as (Ticks per second / Duration of single frame in ticks): 
    return((double) GetMovieTimeScale(theMovie) / (double) myDuration);    
}
static void exportCGImageToFileWithQT(CGImageRef image, CFURLRef url,
					    CFStringRef outputFormat,
					    float dpi)
{
	Handle						dataRef = NULL;
	OSType						dataRefType;
	GraphicsExportComponent		graphicsExporter;
	unsigned long				sizeWritten;
	ComponentResult				result;
	OSType						imageExportType;

	if(CFStringCompare(outputFormat, kUTTypeTIFF, kCFCompareCaseInsensitive) == kCFCompareEqualTo){
	    imageExportType = kQTFileTypeTIFF;
	}else if(CFStringCompare(outputFormat, kUTTypePNG, kCFCompareCaseInsensitive) == kCFCompareEqualTo){
	    imageExportType = kQTFileTypePNG;
	}else if(CFStringCompare(outputFormat, kUTTypeJPEG, kCFCompareCaseInsensitive) == kCFCompareEqualTo){
	    imageExportType = kQTFileTypeJPEG;
	}else{
	    fprintf(stderr, "Requested image export format %@s unsupported\n", outputFormat);
	    return;
	}
	
	result = QTNewDataReferenceFromCFURL(url, 0,  &dataRef, &dataRefType);
	if(!result){
		result = OpenADefaultComponent(GraphicsExporterComponentType,
				    imageExportType, &graphicsExporter);
		if(!result){
			result = GraphicsExportSetInputCGImage(graphicsExporter, 
						image);
			if(!result)
				result = GraphicsExportSetResolution(graphicsExporter, 
							FloatToFixed(dpi), FloatToFixed(dpi));
			if(!result)
				result = GraphicsExportSetOutputDataReference(
							graphicsExporter, dataRef, dataRefType);
			if(!result)
				result = GraphicsExportDoExport(
							graphicsExporter, &sizeWritten);
			CloseComponent(graphicsExporter);
		}
	}
	if(dataRef)
		DisposeHandle(dataRef);
	if(result)
		fprintf(stderr, "QT export got bad result = %d!\n", (int)result);
	return;
}
예제 #6
0
파일: track.c 프로젝트: one-k/rmov
/*
  call-seq: rotate(degrees)
  
  Rotate the track by the given number of degrees.
*/
static VALUE track_rotate(VALUE obj, VALUE degrees)
{
  MatrixRecord matrix;
  GetTrackMatrix(TRACK(obj), &matrix);
  RotateMatrix(&matrix, FloatToFixed(NUM2DBL(degrees)), 0, 0);
  SetTrackMatrix(TRACK(obj), &matrix);
  return obj;
}
예제 #7
0
파일: ff_private.c 프로젝트: klimooo/perian
static void get_track_dimensions_for_codec(AVStream *st, Fixed *fixedWidth, Fixed *fixedHeight)
{
    AVCodecContext *codec = st->codec;
    *fixedHeight = IntToFixed(codec->height);

    if (!st->sample_aspect_ratio.num) *fixedWidth = IntToFixed(codec->width);
    else *fixedWidth = FloatToFixed(codec->width * av_q2d(st->sample_aspect_ratio));
}
예제 #8
0
파일: Font.cpp 프로젝트: 1vanK/Urho3D
FontFace* Font::GetFaceBitmap(float pointSize)
{
    SharedPtr<FontFace> newFace(new FontFaceBitmap(this));
    if (!newFace->Load(&fontData_[0], fontDataSize_, pointSize))
        return nullptr;

    int key = FloatToFixed(pointSize);
    faces_[key] = newFace;
    return newFace;
}
예제 #9
0
파일: feedback.c 프로젝트: Ionic/nx-libs
/*
 * Put a vertex into the feedback buffer.
 */
void _mesa_feedback_vertex( GLcontext *ctx,
                            const GLfloat win[4],
                            const GLfloat color[4],
                            GLfloat index,
                            const GLfloat texcoord[4] )
{
#if 0
   {
      /* snap window x, y to fractional pixel position */
      const GLint snapMask = ~((FIXED_ONE / (1 << SUB_PIXEL_BITS)) - 1);
      GLfixed x, y;
      x = FloatToFixed(win[0]) & snapMask;
      y = FloatToFixed(win[1]) & snapMask;
      FEEDBACK_TOKEN(ctx, FixedToFloat(x));
      FEEDBACK_TOKEN(ctx, FixedToFloat(y) );
   }
#else
   FEEDBACK_TOKEN( ctx, win[0] );
   FEEDBACK_TOKEN( ctx, win[1] );
#endif
   if (ctx->Feedback._Mask & FB_3D) {
      FEEDBACK_TOKEN( ctx, win[2] );
   }
   if (ctx->Feedback._Mask & FB_4D) {
      FEEDBACK_TOKEN( ctx, win[3] );
   }
   if (ctx->Feedback._Mask & FB_INDEX) {
      FEEDBACK_TOKEN( ctx, (GLfloat) index );
   }
   if (ctx->Feedback._Mask & FB_COLOR) {
      FEEDBACK_TOKEN( ctx, color[0] );
      FEEDBACK_TOKEN( ctx, color[1] );
      FEEDBACK_TOKEN( ctx, color[2] );
      FEEDBACK_TOKEN( ctx, color[3] );
   }
   if (ctx->Feedback._Mask & FB_TEXTURE) {
      FEEDBACK_TOKEN( ctx, texcoord[0] );
      FEEDBACK_TOKEN( ctx, texcoord[1] );
      FEEDBACK_TOKEN( ctx, texcoord[2] );
      FEEDBACK_TOKEN( ctx, texcoord[3] );
   }
}
예제 #10
0
void P_SetSlope (secplane_t *plane, bool setCeil, int xyangi, int zangi, const DVector3 &pos)
{
	DAngle xyang;
	DAngle zang;

	if (zangi >= 180)
	{
		zang = 179.;
	}
	else if (zangi <= 0)
	{
		zang = 1.;
	}
	else
	{
		zang = (double)zangi;
	}
	if (setCeil)
	{
		zang += 180.;
	}

	xyang = (double)xyangi;

	DVector3 norm;

	if (ib_compatflags & BCOMPATF_SETSLOPEOVERFLOW)
	{
		// We have to consider an integer multiplication overflow here.
		norm[0] = FixedToFloat(FloatToFixed(zang.Cos()) * FloatToFixed(xyang.Cos()));
		norm[1] = FixedToFloat(FloatToFixed(zang.Cos()) * FloatToFixed(xyang.Sin()));
	}
	else
	{
		norm[0] = zang.Cos() * xyang.Cos();
		norm[1] = zang.Cos() * xyang.Sin();
	}
	norm[2] = zang.Sin();
	norm.MakeUnit();
	double dist = -norm[0] * pos.X - norm[1] * pos.Y - norm[2] * pos.Z;
	plane->set(norm[0], norm[1], norm[2], dist);
}
예제 #11
0
파일: getset.c 프로젝트: ogochan/libmondai
extern	Bool
SetValueFloat(
	ValueStruct	*val,
	double		fval)
{
	time_t	wt;
	Bool	rc;
	char	str[SIZE_NUMBUF+1];

	if		(  val  ==  NULL  ) {
		fprintf(stderr,"no ValueStruct\n");
		rc = FALSE;
	} else {
		ValueIsNonNil(val);
		switch	(ValueType(val)) {
		  case	GL_TYPE_CHAR:
		  case	GL_TYPE_VARCHAR:
		  case	GL_TYPE_DBCODE:
		  case	GL_TYPE_TEXT:
		  case	GL_TYPE_SYMBOL:
			sprintf(str,"%f",fval);
			rc = SetValueString(val,str,NULL);
			break;
		  case	GL_TYPE_NUMBER:
			FloatToFixed(&ValueFixed(val),fval);
			rc = TRUE;
			break;
		  case	GL_TYPE_INT:
			ValueInteger(val) = (int)fval;
			rc = TRUE;
			break;
		  case	GL_TYPE_FLOAT:
			ValueFloat(val) = fval;
			rc = TRUE;
			break;
		  case	GL_TYPE_BOOL:
			ValueBool(val) = ( fval == 0 ) ? FALSE : TRUE;
			rc = TRUE;
			break;
		  case	GL_TYPE_TIMESTAMP:
		  case	GL_TYPE_DATE:
		  case	GL_TYPE_TIME:
			wt = (time_t)fval;
			rc = (  localtime_r(&wt,&ValueDateTime(val))  !=  NULL  ) ? TRUE : FALSE;
			break;
		  default:
			ValueIsNil(val);
			rc = FALSE;
			break;
		}
	}
	return	(rc);
}
예제 #12
0
static struct opengl_sc_bounding_box_t *opengl_sc_bounding_box_create(struct opengl_pa_triangle_t *triangle)
{
	struct opengl_sc_bounding_box_t *bbox;
	float xmin;
	float xmax;
	float ymin;
	float ymax;
	float span;
	int snapMask;
	int s;
	int k;

	/* Allocate */
	bbox = xcalloc(1, sizeof(struct opengl_sc_bounding_box_t));	

	/* Find the length of the span */
	xmin = MIN(triangle->vtx0->pos[X_COMP], MIN(triangle->vtx1->pos[X_COMP], triangle->vtx2->pos[X_COMP]));
	xmax = MAX(triangle->vtx0->pos[X_COMP], MAX(triangle->vtx1->pos[X_COMP], triangle->vtx2->pos[X_COMP]));
	ymin = MIN(triangle->vtx0->pos[Y_COMP], MIN(triangle->vtx1->pos[Y_COMP], triangle->vtx2->pos[Y_COMP]));
	ymax = MAX(triangle->vtx0->pos[Y_COMP], MAX(triangle->vtx1->pos[Y_COMP], triangle->vtx2->pos[Y_COMP]));
	span = MAX(xmax - xmin, ymax - ymin);
	s = IROUND(span);

	/* Calculate bounding box size */
	k = 0;
	do
	{
		k++;
	} while (s > (2<<k));

	/* Snapping to nearest subpixel grid */
	snapMask = ~((FIXED_ONE / (1 << SUB_PIXEL_BITS)) - 1); 
	bbox->x0 = FixedToInt(FloatToFixed(xmin - 0.5F) & snapMask);
	bbox->y0 = FixedToInt(FloatToFixed(ymin - 0.5F) & snapMask);
	bbox->size = 2<<k;

	/* Return */
	return bbox;
}
예제 #13
0
void SkConvolutionFilter1D::AddFilter(int filterOffset,
                                      const float* filterValues,
                                      int filterLength) {
    SkASSERT(filterLength > 0);

    SkTArray<ConvolutionFixed> fixedValues;
    fixedValues.reset(filterLength);

    for (int i = 0; i < filterLength; ++i) {
        fixedValues.push_back(FloatToFixed(filterValues[i]));
    }

    AddFilter(filterOffset, &fixedValues[0], filterLength);
}
예제 #14
0
/******************************************************************************
*
*        Name: SendAllocateCMMessage
*
* Description: Creates and sends an Allocate CM message to the MC/CM.
* 
*  Attributes: azimuth - azimuth to direct the CM
*              jamCode - jamCode to direct the CM to use
*
*      Return: True if the message was sent successfully, false otherwise.
*
*      Author: Austin Dionne
*
******************************************************************************/
bool MCCMTransceiver::SendAllocateCMMessage(float azimuth, int jamCode)
{
  bool success;
  struct AllocateCMMessage message;
  message.header.length = sizeof(message);
  message.header.ID = ALLOCATE_CM_ID;
  message.data.azimuth = FloatToFixed(azimuth);
  message.data.jamCode = jamCode;

  sendSemaphore.Obtain(Semaphore::WAIT_FOREVER);
  success = socket.Send((char*)&message, sizeof(message));
  if (!success)
  {
    MessageLog::Instance()->
      LogMessage("Failed sending AllocateCMMessage in MCCMTransceiver");
  }
  sendSemaphore.Release();
  return success;
}
예제 #15
0
void
qtCanvas::Impl::setupSCComponent()
{
	OSErr err;
	
    err = EnterMovies();
    CheckError (err, "EnterMovies error" );

	mSCComponent = OpenDefaultComponent(
						StandardCompressionType,
						StandardCompressionSubType);
	CheckMoviesError("OpenDefaultComponent");

	if (sSCSettings) {
		SCSetInfo(mSCComponent, scSettingsStateType, &sSCSettings);
	}
	else {
		SCSpatialSettings	spatial;
		SCTemporalSettings	temporal;
		long				preference;
		CodecFlags			codecFlags;
		
		spatial.codecType = kAnimationCodecType;
		spatial.codec = NULL;
		spatial.depth = 32; // reset when the preview is set up
		spatial.spatialQuality = codecNormalQuality;
		
		temporal.temporalQuality = codecNormalQuality;
		temporal.frameRate = FloatToFixed(15.0);
		temporal.keyFrameRate = FixedToInt(temporal.frameRate) * 2;
		
		preference = scListEveryCodec;
		//preference |= scShowBestDepth;
		//preference |= scUseMovableModal;
		
		codecFlags = codecFlagUpdatePreviousComp;
		
		SCSetInfo(mSCComponent, scSpatialSettingsType, &spatial);
		SCSetInfo(mSCComponent, scTemporalSettingsType, &temporal);
		SCSetInfo(mSCComponent, scPreferenceFlagsType, &preference);
		SCSetInfo(mSCComponent, scCodecFlagsType, &codecFlags);
	}
}
예제 #16
0
/******************************************************************************
*
*        Name: SendUnknownThreatMessage
*
* Description: Creates and sends an Unknown Threat message to the MC/CM.
* 
*  Attributes: azimuth - azimuth of the unknown threat.
*
*      Return: True if the message was sent successfully, false otherwise.
*
*      Author: Austin Dionne
*
******************************************************************************/
bool MCCMTransceiver::SendUnknownThreatMessage(float azimuth)
{
  bool success;
  struct UnknownThreatMessage message;
  message.header.length = sizeof(message);
  message.header.ID = UNKNOWN_THREAT_ID;
  message.data.azimuth = FloatToFixed(azimuth);

  sendSemaphore.Obtain(Semaphore::WAIT_FOREVER);
  success = socket.Send((char*)&message, sizeof(message));
  Sleep(100);
  if (!success)
  {
    MessageLog::Instance()->
      LogMessage("Failed sending UnkownThreatMessage in MCCMTransceiver");
  }
  Sleep(100);
  sendSemaphore.Release();
  return success;
}
예제 #17
0
파일: Font.cpp 프로젝트: 1vanK/Urho3D
FontFace* Font::GetFace(float pointSize)
{
    // In headless mode, always return null
    auto* graphics = GetSubsystem<Graphics>();
    if (!graphics)
        return nullptr;

    // For bitmap font type, always return the same font face provided by the font's bitmap file regardless of the actual requested point size
    if (fontType_ == FONT_BITMAP)
        pointSize = 0;
    else
        pointSize = Clamp(pointSize, MIN_POINT_SIZE, MAX_POINT_SIZE);

    // For outline fonts, we return the nearest size in 1/64th increments, as that's what FreeType supports.
    int key = FloatToFixed(pointSize);
    HashMap<int, SharedPtr<FontFace> >::Iterator i = faces_.Find(key);
    if (i != faces_.End())
    {
        if (!i->second_->IsDataLost())
            return i->second_;
        else
        {
            // Erase and reload face if texture data lost (OpenGL mode only)
            faces_.Erase(i);
        }
    }

    URHO3D_PROFILE(GetFontFace);

    switch (fontType_)
    {
    case FONT_FREETYPE:
        return GetFaceFreeType(pointSize);

    case FONT_BITMAP:
        return GetFaceBitmap(pointSize);

    default:
        return nullptr;
    }
}
/*
 *  PsychQTPlaybackRate() - Start- and stop movieplayback, set playback parameters.
 *
 *  moviehandle = Movie to start-/stop.
 *  playbackrate = zero == Stop playback, non-zero == Play movie with spec. rate,
 *                 e.g., 1 = forward, 2 = double speed forward, -1 = backward, ...
 *  loop = 0 = Play once. 1 = Loop, aka rewind at end of movie and restart.
 *  soundvolume = 0 == Mute sound playback, between 0.0 and 1.0 == Set volume to 0 - 100 %.
 *  Returns Number of dropped frames to keep playback in sync.
 */
int PsychQTPlaybackRate(int moviehandle, double playbackrate, int loop, double soundvolume)
{
    int dropped = 0;
    Movie   theMovie;
    
    if (moviehandle < 0 || moviehandle >= PSYCH_MAX_MOVIES) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided!");
    }
        
    // Fetch references to objects we need:
    theMovie = movieRecordBANK[moviehandle].theMovie;    
    if (theMovie == NULL) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided. No movie associated with this handle !!!");
    }
    
    if (playbackrate != 0) {
        // Start playback of movie:
        SetMovieAudioMute(theMovie, (soundvolume==0) ? TRUE : FALSE, 0);
        SetMovieVolume(theMovie, (short) (soundvolume * 255.0));
        movieRecordBANK[moviehandle].loopflag = loop;
        movieRecordBANK[moviehandle].last_pts = -1.0;
        movieRecordBANK[moviehandle].nr_droppedframes = 0;
        SetMoviePreferredRate(theMovie, FloatToFixed(playbackrate));
        StartMovie(theMovie);
        MoviesTask(theMovie, 10000);
    }
    else {
        // Stop playback of movie:
        StopMovie(theMovie);
        QTVisualContextTask(movieRecordBANK[moviehandle].QTMovieContext);

        // Output count of dropped frames:
        if ((dropped=movieRecordBANK[moviehandle].nr_droppedframes) > 0) {
            if (PsychPrefStateGet_Verbosity()>2) printf("PTB-INFO: Movie playback had to drop %i frames of movie %i to keep playback in sync.\n", movieRecordBANK[moviehandle].nr_droppedframes, moviehandle); 
        }
    }
    
    return(dropped);
}
예제 #19
0
파일: s_bitmap.c 프로젝트: aosm/X11
/*
 * Render a bitmap.
 */
void
_swrast_Bitmap( GLcontext *ctx, GLint px, GLint py,
		GLsizei width, GLsizei height,
		const struct gl_pixelstore_attrib *unpack,
		const GLubyte *bitmap )
{
   SWcontext *swrast = SWRAST_CONTEXT(ctx);
   GLint row, col;
   GLuint count = 0;
   struct sw_span span;

   ASSERT(ctx->RenderMode == GL_RENDER);
   ASSERT(bitmap);

   RENDER_START(swrast,ctx);

   if (SWRAST_CONTEXT(ctx)->NewState)
      _swrast_validate_derived( ctx );

   INIT_SPAN(span, GL_BITMAP, width, 0, SPAN_XY);

   if (ctx->Visual.rgbMode) {
      span.interpMask |= SPAN_RGBA;
      span.red   = FloatToFixed(ctx->Current.RasterColor[0] * CHAN_MAXF);
      span.green = FloatToFixed(ctx->Current.RasterColor[1] * CHAN_MAXF);
      span.blue  = FloatToFixed(ctx->Current.RasterColor[2] * CHAN_MAXF);
      span.alpha = FloatToFixed(ctx->Current.RasterColor[3] * CHAN_MAXF);
      span.redStep = span.greenStep = span.blueStep = span.alphaStep = 0;
   }
   else {
      span.interpMask |= SPAN_INDEX;
      span.index = ChanToFixed(ctx->Current.RasterIndex);
      span.indexStep = 0;
   }

   if (ctx->Depth.Test)
      _mesa_span_default_z(ctx, &span);
   if (ctx->Fog.Enabled)
      _mesa_span_default_fog(ctx, &span);
   if (ctx->Texture._EnabledUnits)
      _mesa_span_default_texcoords(ctx, &span);

   for (row = 0; row < height; row++, span.y++) {
      const GLubyte *src = (const GLubyte *) _mesa_image_address( unpack,
                 bitmap, width, height, GL_COLOR_INDEX, GL_BITMAP, 0, row, 0 );

      if (unpack->LsbFirst) {
         /* Lsb first */
         GLubyte mask = 1U << (unpack->SkipPixels & 0x7);
         for (col = 0; col < width; col++) {
            if (*src & mask) {
               span.array->x[count] = px + col;
               span.array->y[count] = py + row;
               count++;
            }
            if (mask == 128U) {
               src++;
               mask = 1U;
            }
            else {
               mask = mask << 1;
            }
         }

         /* get ready for next row */
         if (mask != 1)
            src++;
      }
      else {
         /* Msb first */
         GLubyte mask = 128U >> (unpack->SkipPixels & 0x7);
         for (col = 0; col < width; col++) {
            if (*src & mask) {
               span.array->x[count] = px + col;
               span.array->y[count] = py + row;
               count++;
            }
            if (mask == 1U) {
               src++;
               mask = 128U;
            }
            else {
               mask = mask >> 1;
            }
         }

         /* get ready for next row */
         if (mask != 128)
            src++;
      }

      if (count + width >= MAX_WIDTH || row + 1 == height) {
         /* flush the span */
         span.end = count;
         if (ctx->Visual.rgbMode)
            _mesa_write_rgba_span(ctx, &span);
         else
            _mesa_write_index_span(ctx, &span);
         span.end = 0;
         count = 0;
      }
   }

   RENDER_FINISH(swrast,ctx);
}
예제 #20
0
파일: s_span.c 프로젝트: hoangduit/reactos
/**
 * Set default fragment attributes for the span using the
 * current raster values.  Used prior to glDraw/CopyPixels
 * and glBitmap.
 */
void
_swrast_span_default_attribs(struct gl_context *ctx, SWspan *span)
{
   GLchan r, g, b, a;
   /* Z*/
   {
      const GLfloat depthMax = ctx->DrawBuffer->_DepthMaxF;
      if (ctx->DrawBuffer->Visual.depthBits <= 16)
         span->z = FloatToFixed(ctx->Current.RasterPos[2] * depthMax + 0.5F);
      else {
         GLfloat tmpf = ctx->Current.RasterPos[2] * depthMax; 
         tmpf = MIN2(tmpf, depthMax);
         span->z = (GLint)tmpf;
      }
      span->zStep = 0;
      span->interpMask |= SPAN_Z;
   }

   /* W (for perspective correction) */
   span->attrStart[FRAG_ATTRIB_WPOS][3] = 1.0;
   span->attrStepX[FRAG_ATTRIB_WPOS][3] = 0.0;
   span->attrStepY[FRAG_ATTRIB_WPOS][3] = 0.0;

   /* primary color, or color index */
   UNCLAMPED_FLOAT_TO_CHAN(r, ctx->Current.RasterColor[0]);
   UNCLAMPED_FLOAT_TO_CHAN(g, ctx->Current.RasterColor[1]);
   UNCLAMPED_FLOAT_TO_CHAN(b, ctx->Current.RasterColor[2]);
   UNCLAMPED_FLOAT_TO_CHAN(a, ctx->Current.RasterColor[3]);
#if CHAN_TYPE == GL_FLOAT
   span->red = r;
   span->green = g;
   span->blue = b;
   span->alpha = a;
#else
   span->red   = IntToFixed(r);
   span->green = IntToFixed(g);
   span->blue  = IntToFixed(b);
   span->alpha = IntToFixed(a);
#endif
   span->redStep = 0;
   span->greenStep = 0;
   span->blueStep = 0;
   span->alphaStep = 0;
   span->interpMask |= SPAN_RGBA;

   COPY_4V(span->attrStart[FRAG_ATTRIB_COL], ctx->Current.RasterColor);
   ASSIGN_4V(span->attrStepX[FRAG_ATTRIB_COL], 0.0, 0.0, 0.0, 0.0);
   ASSIGN_4V(span->attrStepY[FRAG_ATTRIB_COL], 0.0, 0.0, 0.0, 0.0);

   /* fog */
   {
      const SWcontext *swrast = SWRAST_CONTEXT(ctx);
      GLfloat fogVal; /* a coord or a blend factor */
      if (swrast->_PreferPixelFog) {
         /* fog blend factors will be computed from fog coordinates per pixel */
         fogVal = ctx->Current.RasterDistance;
      }
      else {
         /* fog blend factor should be computed from fogcoord now */
         fogVal = _swrast_z_to_fogfactor(ctx, ctx->Current.RasterDistance);
      }
      span->attrStart[FRAG_ATTRIB_FOGC][0] = fogVal;
      span->attrStepX[FRAG_ATTRIB_FOGC][0] = 0.0;
      span->attrStepY[FRAG_ATTRIB_FOGC][0] = 0.0;
   }

   /* texcoords */
   {
      const GLuint attr = FRAG_ATTRIB_TEX;
      const GLfloat *tc = ctx->Current.RasterTexCoords;
      if (tc[3] > 0.0F) {
         /* use (s/q, t/q, r/q, 1) */
         span->attrStart[attr][0] = tc[0] / tc[3];
         span->attrStart[attr][1] = tc[1] / tc[3];
         span->attrStart[attr][2] = tc[2] / tc[3];
         span->attrStart[attr][3] = 1.0;
      }
      else {
         ASSIGN_4V(span->attrStart[attr], 0.0F, 0.0F, 0.0F, 1.0F);
      }
      ASSIGN_4V(span->attrStepX[attr], 0.0F, 0.0F, 0.0F, 0.0F);
      ASSIGN_4V(span->attrStepY[attr], 0.0F, 0.0F, 0.0F, 0.0F);
   }
}
예제 #21
0
void raster_tri_rgb_swap_rb(CRasterRGBTriangle2DInfo& info)
{
	if (info.clip_left < 0)
		info.clip_left = 0;
	if (info.clip_right > info.dest_width)
		info.clip_right = info.dest_width;
	if (info.clip_top < 0)
		info.clip_top = 0;
	if (info.clip_bottom > info.dest_height)
		info.clip_bottom = info.dest_height;

	typedef struct
	{
		float* v0, *v1;			/* Y(v0) < Y(v1) */
		float* tv0, *tv1;
		BYTE* cv0, *cv1;
		float dx;				/* X(v1) - X(v0) */
		float dy;				/* Y(v1) - Y(v0) */
		int fdxdy;				/* dx/dy in fixed-point */
		int fsx;				/* first sample point x coord */
		int fsy;
		float adjy;				/* adjust from v[0]->fy to fsy, scaled */
		int lines;				/* number of lines to be sampled on this edge */
		int fx0;				/* fixed pt X of lower endpoint */
	} EdgeT;

	EdgeT eMaj, eTop, eBot;
	float oneOverArea;

	float* vMin, *vMid, *vMax;       /* vertex indices:  Y(vMin)<=Y(vMid)<=Y(vMax) */
	float* tvMin, *tvMid, *tvMax;
	BYTE* cvMin, *cvMid, *cvMax;

	/* find the order of the 3 vertices along the Y axis */
	float y0 = info.v0[1];
	float y1 = info.v1[1];
	float y2 = info.v2[1];

	if (y0 <= y1) {
		if (y1 <= y2) {
			vMin = info.v0;   vMid = info.v1;   vMax = info.v2;   /* y0<=y1<=y2 */
			tvMin = info.tv0;   tvMid = info.tv1;   tvMax = info.tv2;   /* y0<=y1<=y2 */
			cvMin = info.cv0;   cvMid = info.cv1;   cvMax = info.cv2;   /* y0<=y1<=y2 */
		}
		else if (y2 <= y0) {
			vMin = info.v2;   vMid = info.v0;   vMax = info.v1;   /* y2<=y0<=y1 */
			tvMin = info.tv2;   tvMid = info.tv0;   tvMax = info.tv1;   /* y2<=y0<=y1 */
			cvMin = info.cv2;   cvMid = info.cv0;   cvMax = info.cv1;   /* y2<=y0<=y1 */
		}
		else {
			vMin = info.v0;   vMid = info.v2;   vMax = info.v1;   /* y0<=y2<=y1 */
			tvMin = info.tv0;   tvMid = info.tv2;   tvMax = info.tv1;   /* y0<=y2<=y1 */
			cvMin = info.cv0;   cvMid = info.cv2;   cvMax = info.cv1;   /* y0<=y2<=y1 */
		}
	}
	else {
		if (y0 <= y2) {
			vMin = info.v1;   vMid = info.v0;   vMax = info.v2;   /* y1<=y0<=y2 */
			tvMin = info.tv1;   tvMid = info.tv0;   tvMax = info.tv2;   /* y1<=y0<=y2 */
			cvMin = info.cv1;   cvMid = info.cv0;   cvMax = info.cv2;   /* y1<=y0<=y2 */
		}
		else if (y2 <= y1) {
			vMin = info.v2;   vMid = info.v1;   vMax = info.v0;   /* y2<=y1<=y0 */
			tvMin = info.tv2;  tvMid = info.tv1;   tvMax = info.tv0;   /* y2<=y1<=y0 */
			cvMin = info.cv2;  cvMid = info.cv1;   cvMax = info.cv0;   /* y2<=y1<=y0 */
		}
		else {
			vMin = info.v1;   vMid = info.v2;   vMax = info.v0;   /* y1<=y2<=y0 */
			tvMin = info.tv1;   tvMid = info.tv2;   tvMax = info.tv0;   /* y1<=y2<=y0 */
			cvMin = info.cv1;   cvMid = info.cv2;   cvMax = info.cv0;   /* y1<=y2<=y0 */
		}
	}

	/* vertex/edge relationship */
	eMaj.v0 = vMin;   eMaj.v1 = vMax;
	eTop.v0 = vMid;   eTop.v1 = vMax;
	eBot.v0 = vMin;   eBot.v1 = vMid;

	eMaj.tv0 = tvMin;   eMaj.tv1 = tvMax;
	eTop.tv0 = tvMid;   eTop.tv1 = tvMax;
	eBot.tv0 = tvMin;   eBot.tv1 = tvMid;

	eMaj.cv0 = cvMin;   eMaj.cv1 = cvMax;
	eTop.cv0 = cvMid;   eTop.cv1 = cvMax;
	eBot.cv0 = cvMin;   eBot.cv1 = cvMid;

	/* compute deltas for each edge:  vertex[v1] - vertex[v0] */
	eMaj.dx = vMax[0] - vMin[0];
	eMaj.dy = vMax[1] - vMin[1];
	eTop.dx = vMax[0] - vMid[0];
	eTop.dy = vMax[1] - vMid[1];
	eBot.dx = vMid[0] - vMin[0];
	eBot.dy = vMid[1] - vMin[1];

	float area = eMaj.dx * eBot.dy - eBot.dx * eMaj.dy;
	if (area > -0.05f && area < 0.05f)
		return;  /* very small; CULLED */

	oneOverArea = 1.0F / area;

	/* fixed point Y coordinates */
	int vMin_fx = FloatToFixed(vMin[0] + 0.5F);
	int vMin_fy = FloatToFixed(vMin[1] - 0.5F);
	int vMid_fx = FloatToFixed(vMid[0] + 0.5F);
	int vMid_fy = FloatToFixed(vMid[1] - 0.5F);
	int vMax_fy = FloatToFixed(vMax[1] - 0.5F);

	eMaj.fsy = FixedCeil(vMin_fy);
	eMaj.lines = FixedToInt(vMax_fy + FIXED_ONE - FIXED_EPSILON - eMaj.fsy);
	if (eMaj.lines > 0)
	{
		float dxdy = eMaj.dx / eMaj.dy;
		eMaj.fdxdy = SignedFloatToFixed(dxdy);
		eMaj.adjy = (float)(eMaj.fsy - vMin_fy);  /* SCALED! */
		eMaj.fx0 = vMin_fx;
		eMaj.fsx = eMaj.fx0 + (int)(eMaj.adjy * dxdy);
	}
	else
		return;  /*CULLED*/

	eTop.fsy = FixedCeil(vMid_fy);
	eTop.lines = FixedToInt(vMax_fy + FIXED_ONE - FIXED_EPSILON - eTop.fsy);
	if (eTop.lines > 0) {
		float dxdy = eTop.dx / eTop.dy;
		eTop.fdxdy = SignedFloatToFixed(dxdy);
		eTop.adjy = (float)(eTop.fsy - vMid_fy); /* SCALED! */
		eTop.fx0 = vMid_fx;
		eTop.fsx = eTop.fx0 + (int)(eTop.adjy * dxdy);
	}

	eBot.fsy = FixedCeil(vMin_fy);
	eBot.lines = FixedToInt(vMid_fy + FIXED_ONE - FIXED_EPSILON - eBot.fsy);
	if (eBot.lines > 0) {
		float dxdy = eBot.dx / eBot.dy;
		eBot.fdxdy = SignedFloatToFixed(dxdy);
		eBot.adjy = (float)(eBot.fsy - vMin_fy);  /* SCALED! */
		eBot.fx0 = vMin_fx;
		eBot.fsx = eBot.fx0 + (int)(eBot.adjy * dxdy);
	}

	int ltor = (oneOverArea < 0.0F);

	float drdx, drdy;      int fdrdx;
	float dgdx, dgdy;      int fdgdx;
	float dbdx, dbdy;      int fdbdx;
	float dadx, dady;      int fdadx;
	{
		float eMaj_dr, eBot_dr;
		eMaj_dr = (int)cvMax[0] - (int)cvMin[0];
		eBot_dr = (int)cvMid[0] - (int)cvMin[0];
		drdx = oneOverArea * (eMaj_dr * eBot.dy - eMaj.dy * eBot_dr);
		fdrdx = SignedFloatToFixed(drdx);
		drdy = oneOverArea * (eMaj.dx * eBot_dr - eMaj_dr * eBot.dx);
	}
	{
		float eMaj_dg, eBot_dg;
		eMaj_dg = (int)cvMax[1] - (int)cvMin[1];
		eBot_dg = (int)cvMid[1] - (int)cvMin[1];
		dgdx = oneOverArea * (eMaj_dg * eBot.dy - eMaj.dy * eBot_dg);
		fdgdx = SignedFloatToFixed(dgdx);
		dgdy = oneOverArea * (eMaj.dx * eBot_dg - eMaj_dg * eBot.dx);
	}
	{
		float eMaj_db, eBot_db;
		eMaj_db = (int)cvMax[2] - (int)cvMin[2];
		eBot_db = (int)cvMid[2] - (int)cvMin[2];
		dbdx = oneOverArea * (eMaj_db * eBot.dy - eMaj.dy * eBot_db);
		fdbdx = SignedFloatToFixed(dbdx);
		dbdy = oneOverArea * (eMaj.dx * eBot_db - eMaj_db * eBot.dx);
	}

	{
		float eMaj_da, eBot_da;
		eMaj_da = (int)cvMax[3] - (int)cvMin[3];
		eBot_da = (int)cvMid[3] - (int)cvMin[3];
		dadx = oneOverArea * (eMaj_da * eBot.dy - eMaj.dy * eBot_da);
		fdadx = SignedFloatToFixed(dadx);
		dady = oneOverArea * (eMaj.dx * eBot_da - eMaj_da * eBot.dx);
	}



	float wMax = 1.0F;
	float wMin = 1.0F;
	float wMid = 1.0F;

	float eMaj_ds = tvMax[0] * wMax - tvMin[0] * wMin;
	float eBot_ds = tvMid[0] * wMid - tvMin[0] * wMin;
	float eMaj_dt = tvMax[1] * wMax - tvMin[1] * wMin;
	float eBot_dt = tvMid[1] * wMid - tvMin[1] * wMin;

	float dsdx = oneOverArea * (eMaj_ds * eBot.dy - eMaj.dy * eBot_ds);
	float dsdy = oneOverArea * (eMaj.dx * eBot_ds - eMaj_ds * eBot.dx);
	float dtdx = oneOverArea * (eMaj_dt * eBot.dy - eMaj.dy * eBot_dt);
	float dtdy = oneOverArea * (eMaj.dx * eBot_dt - eMaj_dt * eBot.dx);

	int fx, fxLeftEdge, fxRightEdge, fdxLeftEdge, fdxRightEdge;
	int fdxOuter;
	int idxOuter;
	float dxOuter;
	int fError, fdError;
	float adjx, adjy;
	int fy;
	int iy;

	int fr, fdrOuter, fdrInner;
	int fg, fdgOuter, fdgInner;
	int fb, fdbOuter, fdbInner;
	int fa, fdaOuter, fdaInner;

	float sLeft, dsOuter, dsInner;
	float tLeft, dtOuter, dtInner;


	for (int subTriangle = 0; subTriangle <= 1; subTriangle++)
	{
		EdgeT *eLeft, *eRight;
		int setupLeft, setupRight;
		int lines;

		if (subTriangle == 0) {
			/* bottom half */
			if (ltor) {
				eLeft = &eMaj;
				eRight = &eBot;
				lines = eRight->lines;
				setupLeft = 1;
				setupRight = 1;
			}
			else {
				eLeft = &eBot;
				eRight = &eMaj;
				lines = eLeft->lines;
				setupLeft = 1;
				setupRight = 1;
			}
		}
		else {
			/* top half */
			if (ltor)
			{
				eLeft = &eMaj;
				eRight = &eTop;
				lines = eRight->lines;
				setupLeft = 0;
				setupRight = 1;
			}
			else
			{
				eLeft = &eTop;
				eRight = &eMaj;
				lines = eLeft->lines;
				setupLeft = 1;
				setupRight = 0;
			}
			if (lines == 0) return;
		}

		if (setupLeft && eLeft->lines > 0)
		{
			int fsx = eLeft->fsx;
			fx = FixedCeil(fsx);
			fError = fx - fsx - FIXED_ONE;
			fxLeftEdge = fsx - FIXED_EPSILON;
			fdxLeftEdge = eLeft->fdxdy;
			fdxOuter = FixedFloor(fdxLeftEdge - FIXED_EPSILON);
			fdError = fdxOuter - fdxLeftEdge + FIXED_ONE;
			idxOuter = FixedToInt(fdxOuter);
			dxOuter = (float)idxOuter;

			fy = eLeft->fsy;
			iy = FixedToInt(fy);

			adjx = (float)(fx - eLeft->fx0);  /* SCALED! */
			adjy = eLeft->adjy;				 /* SCALED! */

			float* vLower = eLeft->v0;
			float* tvLower = eLeft->tv0;
			BYTE* cvLower = eLeft->cv0;

			sLeft = tvLower[0] * 1.0F + (dsdx * adjx + dsdy * adjy) * (1.0F / FIXED_SCALE);
			dsOuter = dsdy + dxOuter * dsdx;

			tLeft = tvLower[1] * 1.0F + (dtdx * adjx + dtdy * adjy) * (1.0F / FIXED_SCALE);
			dtOuter = dtdy + dxOuter * dtdx;

			fr = (int)(IntToFixed(cvLower[0]) + drdx * adjx + drdy * adjy) + FIXED_HALF;
			fdrOuter = SignedFloatToFixed(drdy + dxOuter * drdx);

			fg = (int)(IntToFixed(cvLower[1]) + dgdx * adjx + dgdy * adjy) + FIXED_HALF;
			fdgOuter = SignedFloatToFixed(dgdy + dxOuter * dgdx);

			fb = (int)(IntToFixed(cvLower[2]) + dbdx * adjx + dbdy * adjy) + FIXED_HALF;
			fdbOuter = SignedFloatToFixed(dbdy + dxOuter * dbdx);

			fa = (int)(IntToFixed(cvLower[3]) + dadx * adjx + dady * adjy) + FIXED_HALF;
			fdaOuter = SignedFloatToFixed(dady + dxOuter * dadx);
		}

		if (setupRight && eRight->lines > 0)
		{
			fxRightEdge = eRight->fsx - FIXED_EPSILON;
			fdxRightEdge = eRight->fdxdy;
		}

		if (lines == 0)
			continue;

		fdrInner = fdrOuter + fdrdx;
		fdgInner = fdgOuter + fdgdx;
		fdbInner = fdbOuter + fdbdx;
		fdaInner = fdaOuter + fdadx;

		dsInner = dsOuter + dsdx;
		dtInner = dtOuter + dtdx;

		if (iy + lines >= info.clip_bottom)
			lines = info.clip_bottom - iy;

		while (lines > 0)
		{
			float ss = sLeft, tt = tLeft;
			int ffr = fr, ffg = fg, ffb = fb;
			int ffa = fa;

			int left = FixedToInt(fxLeftEdge);
			int right = FixedToInt(fxRightEdge);

			int ffrend = ffr + (right - left - 1)*fdrdx;
			int ffgend = ffg + (right - left - 1)*fdgdx;
			int ffbend = ffb + (right - left - 1)*fdbdx;
			if (ffrend < 0) ffr -= ffrend;
			if (ffgend < 0) ffg -= ffgend;
			if (ffbend < 0) ffb -= ffbend;
			if (ffr < 0) ffr = 0;
			if (ffg < 0) ffg = 0;
			if (ffb < 0) ffb = 0;

			int ffaend = ffa + (right - left - 1)*fdadx;
			if (ffaend < 0) ffa -= ffaend;
			if (ffa < 0) ffa = 0;

			int i;
			int n = right - left;

			if (iy >= info.clip_top)
			{

				if (n > 0)
					if (left < info.clip_left)
					{
						int diff = info.clip_left - left;
						ss += dsdx	 *diff;
						tt += dtdx	 *diff;
						ffr += fdrdx *diff;
						ffg += fdgdx *diff;
						ffb += fdbdx *diff;
						ffa += fdadx *diff;

						n -= diff;

						left = info.clip_left;
					}

				if (n > 0)
					if (left + n >= info.clip_right)
					{
						n = info.clip_right - left;
					}

				if (n > 0)
					for (i = 0; i < n; i++)
					{
						int i0, j0, i1, j1;
						float u, v;

						u = ss * info.tex_width;
						v = tt * info.tex_height;



						i0 = (int)floor(u - 0.5F);
						i1 = i0 + 1;
						j0 = (int)floor(v - 0.5F);
						j1 = j0 + 1;


						float a = frac(u - 0.5F);
						float b = frac(v - 0.5F);

						int w00 = (int)((1.0F - a)*(1.0F - b) * 256.0F);
						int w10 = (int)(a *(1.0F - b) * 256.0F);
						int w01 = (int)((1.0F - a)*      b  * 256.0F);
						int w11 = (int)(a *      b  * 256.0F);

						unsigned char red00, green00, blue00, alpha00;
						unsigned char red10, green10, blue10, alpha10;
						unsigned char red01, green01, blue01, alpha01;
						unsigned char red11, green11, blue11, alpha11;

						const unsigned char* texel;

						if (info.tex_repeat)
						{
							if (i0 < 0)
								i0 = info.tex_width + i0;
							else
								if (i0 >= info.tex_width)
									i0 = i0 - info.tex_width;

							if (i1 < 0)
								i1 = info.tex_width + i0;
							else
								if (i1 >= info.tex_width)
									i1 = i1 - info.tex_width;

							if (j0 < 0)
								j0 = info.tex_height + j0;
							else
								if (j0 >= info.tex_height)
									j0 = j0 - info.tex_height;

							if (j1 < 0)
								j1 = info.tex_height + j1;
							else
								if (j1 >= info.tex_height)
									j1 = j1 - info.tex_height;
						}

						BYTE* ppix = (BYTE*)(info.dest + (iy*info.dest_width + left) * 4);
						BYTE pix_r = ppix[i * 4 + 0];
						BYTE pix_g = ppix[i * 4 + 1];
						BYTE pix_b = ppix[i * 4 + 2];
						BYTE pix_a = ppix[i * 4 + 3];

						BYTE border_r = pix_r; //mesa3d cannot do this :)
						BYTE border_g = pix_g;
						BYTE border_b = pix_b;
						BYTE border_a = 0;

						bool i0_border = (i0 < 0 || i0 >= info.tex_width);
						bool i1_border = (i1 < 0 || i1 >= info.tex_width);
						bool j0_border = (j0 < 0 || j0 >= info.tex_height);
						bool j1_border = (j1 < 0 || j1 >= info.tex_height);

						if (i0_border | j0_border)
						{
							red00 = border_r;
							green00 = border_g;
							blue00 = border_b;
							alpha00 = border_a;
						}
						else
						{
							texel = info.tex + (info.tex_width * j0 + i0) * 4;
							red00 = texel[0];
							green00 = texel[1];
							blue00 = texel[2];
							alpha00 = texel[3];
						}

						if (i1_border | j0_border)
						{
							red10 = border_r;
							green10 = border_g;
							blue10 = border_b;
							alpha10 = border_a;
						}
						else
						{
							texel = info.tex + (info.tex_width * j0 + i1) * 4;
							red10 = texel[0];
							green10 = texel[1];
							blue10 = texel[2];
							alpha10 = texel[3];
						}

						if (j1_border | i0_border)
						{
							red01 = border_r;
							green01 = border_g;
							blue01 = border_b;
							alpha01 = border_a;
						}
						else
						{
							texel = info.tex + (info.tex_width * j1 + i0) * 4;
							red01 = texel[0];
							green01 = texel[1];
							blue01 = texel[2];
							alpha01 = texel[3];
						}

						if (i1_border | j1_border)
						{
							red11 = border_r;
							green11 = border_g;
							blue11 = border_b;
							alpha11 = border_a;
						}
						else
						{
							texel = info.tex + (info.tex_width * j1 + i1) * 4;
							red11 = texel[0];
							green11 = texel[1];
							blue11 = texel[2];
							alpha11 = texel[3];
						}

						BYTE _r = (w00*red00 + w10*red10 + w01*red01 + w11*red11) / 256;
						BYTE _g = (w00*green00 + w10*green10 + w01*green01 + w11*green11) / 256;
						BYTE _b = (w00*blue00 + w10*blue10 + w01*blue01 + w11*blue11) / 256;
						BYTE _a = (w00*alpha00 + w10*alpha10 + w01*alpha01 + w11*alpha11) / 256;


						_r = (_r * FixedToInt(ffr)) / 256;
						_g = (_g * FixedToInt(ffg)) / 256;
						_b = (_b * FixedToInt(ffb)) / 256;
						_a = (_a * FixedToInt(ffa)) / 256;

						int t = _a;
						int s = 255 - t;

						ppix[i * 4 + 0] = (_b * t + pix_r * s) / 256; //swap rb
						ppix[i * 4 + 1] = (_g * t + pix_g * s) / 256;
						ppix[i * 4 + 2] = (_r * t + pix_b * s) / 256;
						ppix[i * 4 + 3] = (_a * t + pix_a * s) / 256;

						ss += dsdx;
						tt += dtdx;
						ffr += fdrdx;
						ffg += fdgdx;
						ffb += fdbdx;
						ffa += fdadx;
					}
			}

			iy++;
			lines--;

			fxLeftEdge += fdxLeftEdge;
			fxRightEdge += fdxRightEdge;


			fError += fdError;
			if (fError >= 0)
			{
				fError -= FIXED_ONE;

				sLeft += dsOuter;
				tLeft += dtOuter;
				fr += fdrOuter;
				fg += fdgOuter;
				fb += fdbOuter;
				fa += fdaOuter;
			}
			else {
				sLeft += dsInner;
				tLeft += dtInner;
				fr += fdrInner;
				fg += fdgInner;
				fb += fdbInner;
				fa += fdaInner;
			}
		}
	}
}
예제 #22
0
//------------------------------------------------------------------------
ATSUTextLayout nsATSUIToolkit::GetTextLayout(short aFontNum, short aSize, PRBool aBold, PRBool aItalic, nscolor aColor)
{ 
	ATSUTextLayout txLayout = nsnull;
	OSStatus err;
	if (nsATSUIUtils::gTxLayoutCache->Get(aFontNum, aSize, aBold, aItalic, aColor, &txLayout))
		return txLayout;
		
	UniChar dmy[1];
	err = ::ATSUCreateTextLayoutWithTextPtr (dmy, 0,0,0,0,NULL, NULL, &txLayout);
	if(noErr != err) {
		NS_WARNING("ATSUCreateTextLayoutWithTextPtr failed");
    // goto errorDone;
    return nsnull;
	}

	ATSUStyle				theStyle;
	err = ::ATSUCreateStyle(&theStyle);
	if(noErr != err) {
		NS_WARNING("ATSUCreateStyle failed");
    // goto errorDoneDestroyTextLayout;
  	err = ::ATSUDisposeTextLayout(txLayout);
    return nsnull;
	}

	ATSUAttributeTag 		theTag[ATTR_CNT];
	ByteCount				theValueSize[ATTR_CNT];
	ATSUAttributeValuePtr 	theValue[ATTR_CNT];

	//--- Font ID & Face -----		
	ATSUFontID atsuFontID;
	
	// The use of ATSUFONDtoFontID is not recommended, see
	// http://developer.apple.com/documentation/Carbon/Reference/ATSUI_Reference/atsu_reference_Reference/chapter_1.2_section_19.html
	FMFontStyle fbStyle;
	if (::FMGetFontFromFontFamilyInstance(aFontNum, 0, &atsuFontID, &fbStyle) == kFMInvalidFontErr) {
		NS_WARNING("FMGetFontFromFontFamilyInstance failed");
    // goto errorDoneDestroyStyle;
  	err = ::ATSUDisposeStyle(theStyle);
  	err = ::ATSUDisposeTextLayout(txLayout);
    return nsnull;
	}
	
	theTag[0] = kATSUFontTag;
	theValueSize[0] = (ByteCount) sizeof(ATSUFontID);
	theValue[0] = (ATSUAttributeValuePtr) &atsuFontID;
	//--- Font ID & Face  -----		
	
	//--- Size -----		
	float  dev2app;
	short fontsize = aSize;

	dev2app = mContext->DevUnitsToAppUnits();
  //	Fixed size = FloatToFixed( roundf(float(fontsize) / dev2app));
  Fixed size = FloatToFixed( (float) rint(float(fontsize) / dev2app));
	if( FixRound ( size ) < 9  && !nsFontUtils::DisplayVerySmallFonts())
		size = X2Fix(9);

	theTag[1] = kATSUSizeTag;
	theValueSize[1] = (ByteCount) sizeof(Fixed);
	theValue[1] = (ATSUAttributeValuePtr) &size;
	//--- Size -----		
	
	//--- Color -----		
	RGBColor color;

	#define COLOR8TOCOLOR16(color8)	 ((color8 << 8) | color8) 		

	color.red = COLOR8TOCOLOR16(NS_GET_R(aColor));
	color.green = COLOR8TOCOLOR16(NS_GET_G(aColor));
	color.blue = COLOR8TOCOLOR16(NS_GET_B(aColor));				
	theTag[2] = kATSUColorTag;
	theValueSize[2] = (ByteCount) sizeof(RGBColor);
	theValue[2] = (ATSUAttributeValuePtr) &color;
	//--- Color -----		

	//--- Bold -----
	Boolean isBold = aBold ? true : false;
	theTag[3] = kATSUQDBoldfaceTag;
	theValueSize[3] = (ByteCount) sizeof(Boolean);
	theValue[3] = (ATSUAttributeValuePtr) &isBold;
	//--- Bold -----

	//--- Italic -----
	Boolean isItalic = aItalic ? true : false;
	theTag[4] = kATSUQDItalicTag;
	theValueSize[4] = (ByteCount) sizeof(Boolean);
	theValue[4] = (ATSUAttributeValuePtr) &isItalic;
	//--- Italic -----

	err =  ::ATSUSetAttributes(theStyle, ATTR_CNT, theTag, theValueSize, theValue);
	if(noErr != err) {
		NS_WARNING("ATSUSetAttributes failed");
    // goto errorDoneDestroyStyle;
  	err = ::ATSUDisposeStyle(theStyle);
  	err = ::ATSUDisposeTextLayout(txLayout);
    return nsnull;
	}
	 	
	err = ::ATSUSetRunStyle(txLayout, theStyle, kATSUFromTextBeginning, kATSUToTextEnd);
	if(noErr != err) {
		NS_WARNING("ATSUSetRunStyle failed");
    // goto errorDoneDestroyStyle;
  	err = ::ATSUDisposeStyle(theStyle);
  	err = ::ATSUDisposeTextLayout(txLayout);
    return nsnull;
	}
	
    err = ::ATSUSetTransientFontMatching(txLayout, true);
	if(noErr != err) {
		NS_WARNING( "ATSUSetTransientFontMatching failed");
    // goto errorDoneDestroyStyle;
  	err = ::ATSUDisposeStyle(theStyle);
  	err = ::ATSUDisposeTextLayout(txLayout);
    return nsnull;
	}
    	
	nsATSUIUtils::gTxLayoutCache->Set(aFontNum, aSize, aBold, aItalic, aColor,  txLayout);	

	return txLayout;
}
예제 #23
0
/**
 * Draw large (size >= 1) non-AA point.  RGB or CI mode.
 */
static void
large_point(GLcontext *ctx, const SWvertex *vert)
{
   SWcontext *swrast = SWRAST_CONTEXT(ctx);
   const GLboolean ciMode = !ctx->Visual.rgbMode;
   SWspan span;
   GLfloat size;

   CULL_INVALID(vert);

   /* z coord */
   if (ctx->DrawBuffer->Visual.depthBits <= 16)
      span.z = FloatToFixed(vert->attrib[FRAG_ATTRIB_WPOS][2] + 0.5F);
   else
      span.z = (GLuint) (vert->attrib[FRAG_ATTRIB_WPOS][2] + 0.5F);
   span.zStep = 0;

   size = get_size(ctx, vert, GL_FALSE);

   /* span init */
   INIT_SPAN(span, GL_POINT);
   span.arrayMask = SPAN_XY;
   span.facing = swrast->PointLineFacing;

   if (ciMode) {
      span.interpMask = SPAN_Z | SPAN_INDEX;
      span.index = FloatToFixed(vert->attrib[FRAG_ATTRIB_CI][0]);
      span.indexStep = 0;
   }
   else {
      span.interpMask = SPAN_Z | SPAN_RGBA;
      span.red   = ChanToFixed(vert->color[0]);
      span.green = ChanToFixed(vert->color[1]);
      span.blue  = ChanToFixed(vert->color[2]);
      span.alpha = ChanToFixed(vert->color[3]);
      span.redStep = 0;
      span.greenStep = 0;
      span.blueStep = 0;
      span.alphaStep = 0;
   }

   /* need these for fragment programs */
   span.attrStart[FRAG_ATTRIB_WPOS][3] = 1.0F;
   span.attrStepX[FRAG_ATTRIB_WPOS][3] = 0.0F;
   span.attrStepY[FRAG_ATTRIB_WPOS][3] = 0.0F;

   ATTRIB_LOOP_BEGIN
      COPY_4V(span.attrStart[attr], vert->attrib[attr]);
      ASSIGN_4V(span.attrStepX[attr], 0, 0, 0, 0);
      ASSIGN_4V(span.attrStepY[attr], 0, 0, 0, 0);
   ATTRIB_LOOP_END

   /* compute pos, bounds and render */
   {
      const GLfloat x = vert->attrib[FRAG_ATTRIB_WPOS][0];
      const GLfloat y = vert->attrib[FRAG_ATTRIB_WPOS][1];
      GLint iSize = (GLint) (size + 0.5F);
      GLint xmin, xmax, ymin, ymax, ix, iy;
      GLint iRadius;

      iSize = MAX2(1, iSize);
      iRadius = iSize / 2;

      if (iSize & 1) {
         /* odd size */
         xmin = (GLint) (x - iRadius);
         xmax = (GLint) (x + iRadius);
         ymin = (GLint) (y - iRadius);
         ymax = (GLint) (y + iRadius);
      }
      else {
         /* even size */
         /* 0.501 factor allows conformance to pass */
         xmin = (GLint) (x + 0.501) - iRadius;
         xmax = xmin + iSize - 1;
         ymin = (GLint) (y + 0.501) - iRadius;
         ymax = ymin + iSize - 1;
      }

      /* generate fragments */
      span.end = 0;
      for (iy = ymin; iy <= ymax; iy++) {
         for (ix = xmin; ix <= xmax; ix++) {
            span.array->x[span.end] = ix;
            span.array->y[span.end] = iy;
            span.end++;
         }
      }
      assert(span.end <= MAX_WIDTH);
      _swrast_write_rgba_span(ctx, &span);
   }
}
예제 #24
0
int qCreateDecoderAPI(QDecoder **dptr, char* args, int semaIndex, int width, int height)
{
	QDecoder* decoder;			// the decompressor to be initialized
	ImageDescription* imDesc;	// description of input frame images
	ImageDescriptionHandle myImHandle;
	
	CFNumberRef number = NULL;
	CFMutableDictionaryRef pixelBufferAttributes = NULL;
	ICMDecompressionTrackingCallbackRecord trackingCallbackRecord;
	QVideoArgs* decoderArgs = (QVideoArgs*)args;
	CodecInfo codecInfo;
	
	OSType pixelFormat = Q_PIXEL_FORMAT;
	
	OSStatus err;             // status of QuickTime functions

	fprintf(QSTDERR, "\nqCreateDecoderQT: START DECODER CREATION! (width: %d height: %d)", width, height);

	decoder = (QDecoder*)malloc(sizeof(QDecoder));
	if (decoder == NULL) {
		fprintf(QSTDERR, "\nqCreateDecoderQT: failed to malloc decoder struct");
		return -2; 
	}

	// Get codec info
	decoder->codecType = *((CodecType*)(decoderArgs->codecType));
	err = GetCodecInfo(&codecInfo, decoder->codecType, NULL);
	if (err != noErr) {
		fprintf(QSTDERR, "\nqCreateDecoderQT: cannot get codec info");
		fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err);
		free(decoder);
		return -5;
	}

	// We have found an available camera to initialize.
	decoder->timeScale = decoderArgs->timeScale;
	decoder->semaIndex = semaIndex;
	decoder->width = width;
	decoder->height = height;
	decoder->inFrameCount = decoder->outFrameCount = 0;
	qInitCallbackData(&(decoder->callbackData));
	
	fprintf(QSTDERR, "\nqCreateDecoderQT: INITIALIZED STRUCTURE");
	
	imDesc = &(decoder->imDesc);
	imDesc->idSize          = 86;                     // grabbed from camera (total size of ImageDescription including extra data)
	imDesc->cType			= decoder->codecType;
	//imDesc->resvd1;                                 // reserved for Apple use
	//imDesc->resvd2;                                 // reserved for Apple use
	imDesc->dataRefIndex    = 0;                      // docs say that this must be set to zero
	imDesc->version			= codecInfo.version;
	imDesc->revisionLevel   = codecInfo.revisionLevel;
	imDesc->vendor          = codecInfo.vendor;         
	imDesc->temporalQuality = codecNormalQuality;
	imDesc->spatialQuality  = codecNormalQuality;
	imDesc->width           = width;                  // in pixels
	imDesc->height          = height;                 // in pixels
	imDesc->hRes            = FloatToFixed(72.0);     // DPI, I presume                
	imDesc->vRes            = FloatToFixed(72.0);     // ditto
	imDesc->dataSize        = 0;                      // every frame will have a different size
	imDesc->frameCount      = 0;                      // # of frames this desc applies to (is '1' what we want?)
	memcpy(imDesc->name, codecInfo.typeName, 32 );
	imDesc->depth           = 24;                     // might eventually want to support 32 (to support an alpha-channel)
	imDesc->clutID          = -1;                     // no color-lookup table
		
	fprintf(QSTDERR, "\nqCreateDecoderQT: INITIALIZED IMAGE DESCRIPTION");
	
	pixelBufferAttributes = CFDictionaryCreateMutable( NULL, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks );

		number = CFNumberCreate( NULL, kCFNumberIntType, &width );
		CFDictionaryAddValue( pixelBufferAttributes, kCVPixelBufferWidthKey, number );
		CFRelease( number );
		
		number = CFNumberCreate( NULL, kCFNumberIntType, &height );
		CFDictionaryAddValue( pixelBufferAttributes, kCVPixelBufferHeightKey, number );
		CFRelease( number );
		
		number = CFNumberCreate( NULL, kCFNumberSInt32Type, &pixelFormat );
		CFDictionaryAddValue( pixelBufferAttributes, kCVPixelBufferPixelFormatTypeKey, number );
		CFRelease( number );
		
		CFDictionaryAddValue( pixelBufferAttributes, kCVPixelBufferCGBitmapContextCompatibilityKey, kCFBooleanTrue );
		CFDictionaryAddValue( pixelBufferAttributes, kCVPixelBufferCGImageCompatibilityKey, kCFBooleanTrue );

	fprintf(QSTDERR, "\nqCreateDecoderQT: SET UP PIXEL-BUFFER ATTRIBUTES");

	trackingCallbackRecord.decompressionTrackingCallback = (ICMDecompressionTrackingCallback)qQuickTimeDecoderCallback;
	trackingCallbackRecord.decompressionTrackingRefCon = decoder;
	
	fprintf(QSTDERR, "\nqCreateDecoderQT: SET UP CALLBACK RECORD");
	
	// Actually create the session.  First, we need to allocate copy the image description into a handle
	// (Quicktime requires this... we dispose of the handle immediately afterward).
	myImHandle = (ImageDescriptionHandle)NewHandle(sizeof(ImageDescription));
	**myImHandle = *imDesc;
	fprintf(QSTDERR, "\nDECOMPRESSOR IMAGE DESCRIPTION:");
	qPrintImageDescription(myImHandle);
	
	//  err = ICMDecompressionSessionCreate(kCFAllocatorDefault, myImHandle, NULL, pixelBufferAttributes, &trackingCallbackRecord, &(decomp->session));
	err = ICMDecompressionSessionCreate(NULL, myImHandle, NULL, pixelBufferAttributes, &trackingCallbackRecord, &(decoder->session));

	DisposeHandle((Handle)myImHandle);
	
	if (err != noErr) {
		fprintf(QSTDERR, "\nqCreateDecoderQT: cannot create session");
		fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err);
		free(decoder);
		return -5;
	}
	
	fprintf(QSTDERR, "\nqCreateDecoderQT: FINISHED!!!");
	
	*dptr = decoder;
	return 0;
}
예제 #25
0
파일: getset.c 프로젝트: ogochan/libmondai
extern	Fixed	*
ValueToFixed(
	ValueStruct	*val)
{
	Fixed	*ret;
	Fixed	*xval;

	if		(  val  ==  NULL  ) {
		ret = NewFixed(0,0);
	} else
	switch	(ValueType(val)) {
	  case	GL_TYPE_CHAR:
	  case	GL_TYPE_VARCHAR:
	  case	GL_TYPE_TEXT:
	  case	GL_TYPE_SYMBOL:
		ret = NewFixed(0,0);
		IntToFixed(ret,StrToInt(ValueString(val),strlen(ValueString(val))));
		break;
	  case	GL_TYPE_NUMBER:
		xval = &ValueFixed(val);
		ret = NewFixed(xval->flen,xval->slen);
		strcpy(ret->sval,xval->sval);
		break;
	  case	GL_TYPE_INT:
		ret = NewFixed(0,0);
		IntToFixed(ret,ValueInteger(val));
		break;
	  case	GL_TYPE_FLOAT:
		ret = NewFixed(SIZE_NUMBUF,(SIZE_NUMBUF / 2));
		FloatToFixed(ret,ValueFloat(val));
		break;
	  case	GL_TYPE_BOOL:
		ret = NewFixed(0,0);
		IntToFixed(ret,(int)ValueBool(val));
		break;
	  case	GL_TYPE_TIMESTAMP:
		ret = NewFixed(14,0);
		sprintf(ret->sval,"%04d%02d%02d%02d%02d%02d",
				ValueDateTimeYear(val),
				ValueDateTimeMon(val) + 1,
				ValueDateTimeMDay(val),
				ValueDateTimeHour(val),
				ValueDateTimeMin(val),
				ValueDateTimeSec(val));
		break;
	  case	GL_TYPE_DATE:
		ret = NewFixed(8,0);
		sprintf(ret->sval,"%04d%02d%02d",
				ValueDateTimeYear(val),
				ValueDateTimeMon(val) + 1,
				ValueDateTimeMDay(val));
		break;
	  case	GL_TYPE_TIME:
		ret = NewFixed(6,0);
		sprintf(ret->sval,"%02d%02d%02d",
				ValueDateTimeHour(val),
				ValueDateTimeMin(val),
				ValueDateTimeSec(val));
		break;
	  default:
		ret = NewFixed(0,0);
		break;
	}
	return	(ret);
}
/*
 *  PsychQTGetTextureFromMovie() -- Create an OpenGL texture map from a specific videoframe from given movie object.
 *
 *  win = Window pointer of onscreen window for which a OpenGL texture should be created.
 *  moviehandle = Handle to the movie object.
 *  checkForImage = true == Just check if new image available, false == really retrieve the image, blocking if necessary.
 *  timeindex = When not in playback mode, this allows specification of a requested frame by presentation time.
 *              If set to -1, or if in realtime playback mode, this parameter is ignored and the next video frame is returned.
 *  out_texture = Pointer to the Psychtoolbox texture-record where the new texture should be stored.
 *  presentation_timestamp = A ptr to a double variable, where the presentation timestamp of the returned frame should be stored.
 *
 *  Returns true (1) on success, false (0) if no new image available, -1 if no new image available and there won't be any in future.
 */
int PsychQTGetTextureFromMovie(PsychWindowRecordType *win, int moviehandle, int checkForImage, double timeindex, PsychWindowRecordType *out_texture, double *presentation_timestamp)
{
	static TimeValue myNextTimeCached = -2;
	static TimeValue nextFramesTimeCached = -2;
    TimeValue		myCurrTime;
    TimeValue		myNextTime;
    TimeValue       nextFramesTime=0;
    short		myFlags;
    OSType		myTypes[1];
    OSErr		error = noErr;
    Movie               theMovie;
    CVOpenGLTextureRef newImage = NULL;
    QTVisualContextRef  theMoviecontext;
    unsigned int failcount=0;
    float lowerLeft[2];
    float lowerRight[2];    
    float upperRight[2];    
    float upperLeft[2];
    GLuint texid;
    Rect rect;
    float rate;
    double targetdelta, realdelta, frames;
	PsychRectType outRect;

    if (!PsychIsOnscreenWindow(win)) {
        PsychErrorExitMsg(PsychError_user, "Need onscreen window ptr!!!");
    }
    
    // Activate OpenGL context of target window:
    PsychSetGLContext(win);

    // Explicitely disable Apple's Client storage extensions. For now they are not really useful to us.
    glPixelStorei(GL_UNPACK_CLIENT_STORAGE_APPLE, GL_FALSE);
    
    if (moviehandle < 0 || moviehandle >= PSYCH_MAX_MOVIES) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided.");
    }
    
    if ((timeindex!=-1) && (timeindex < 0 || timeindex >= 10000.0)) {
        PsychErrorExitMsg(PsychError_user, "Invalid timeindex provided.");
    }
    
    if (NULL == out_texture && !checkForImage) {
        PsychErrorExitMsg(PsychError_internal, "NULL-Ptr instead of out_texture ptr passed!!!");
    }
    
    // Fetch references to objects we need:
    theMovie = movieRecordBANK[moviehandle].theMovie;
    theMoviecontext = movieRecordBANK[moviehandle].QTMovieContext;

    if (theMovie == NULL) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided. No movie associated with this handle.");
    }

    // Check if end of movie is reached. Rewind, if so...
    if (IsMovieDone(theMovie) && movieRecordBANK[moviehandle].loopflag > 0) {
        if (GetMovieRate(theMovie)>0) {
            GoToBeginningOfMovie(theMovie);
        } else {
            GoToEndOfMovie(theMovie);
        }
    }
    
    // Is movie actively playing (automatic async playback, possibly with synced sound)?
    // If so, then we ignore the 'timeindex' parameter, because the automatic playback
    // process determines which frames should be delivered to PTB when. This function will
    // simply wait or poll for arrival/presence of a new frame that hasn't been fetched
    // in previous calls.
    if (0 == GetMovieRate(theMovie)) {
        // Movie playback inactive. We are in "manual" mode: No automatic async playback,
        // no synced audio output. The user just wants to manually fetch movie frames into
        // textures for manual playback in a standard Matlab-loop.

		// First pass - checking for new image?
		if (checkForImage) {
			// Image for specific point in time requested?
			if (timeindex >= 0) {
				// Yes. We try to retrieve the next possible image for requested timeindex.
				myCurrTime = (TimeValue) ((timeindex * (double) GetMovieTimeScale(theMovie)) + 0.5f);
			}
			else {
				// No. We just retrieve the next frame, given the current movie time.
				myCurrTime = GetMovieTime(theMovie, NULL);
			}
            
			// Retrieve timeindex of the closest image sample after myCurrTime:
			myFlags = nextTimeStep + nextTimeEdgeOK;	// We want the next frame in the movie's media.
			myTypes[0] = VisualMediaCharacteristic;		// We want video samples.
			GetMovieNextInterestingTime(theMovie, myFlags, 1, myTypes, myCurrTime, FloatToFixed(1), &myNextTime, &nextFramesTime);
			error = GetMoviesError();
			if (error != noErr) {
				PsychErrorExitMsg(PsychError_internal, "Failed to fetch texture from movie for given timeindex!");
			}
			
			// Found useful event?
			if (myNextTime == -1) {
				if (PsychPrefStateGet_Verbosity() > 3) printf("PTB-WARNING: Bogus timevalue in movie track for movie %i. Trying to keep going.\n", moviehandle);
				
				// No. Just push timestamp to current time plus a little bit in the hope
				// this will get us unstuck:
				myNextTime = myCurrTime + (TimeValue) 1;
				nextFramesTime = (TimeValue) 0;
			}
			
			if (myNextTime != myNextTimeCached) {
				// Set movies current time to myNextTime, so the next frame will be fetched from there:
				SetMovieTimeValue(theMovie, myNextTime);
				
				// nextFramesTime is the timeindex to which we need to advance for retrieval of next frame: (see code below)
				nextFramesTime=myNextTime + nextFramesTime;
				
				if (PsychPrefStateGet_Verbosity() > 5) printf("PTB-DEBUG: Current timevalue in movie track for movie %i is %lf secs.\n", moviehandle, (double) myNextTime / (double) GetMovieTimeScale(theMovie));
				if (PsychPrefStateGet_Verbosity() > 5) printf("PTB-DEBUG: Next timevalue in movie track for movie %i is %lf secs.\n", moviehandle, (double) nextFramesTime / (double) GetMovieTimeScale(theMovie));
				
				// Cache values for 2nd pass:
				myNextTimeCached = myNextTime;
				nextFramesTimeCached = nextFramesTime;
			}
			else {
				// Somehow got stuck? Do nothing...
				if (PsychPrefStateGet_Verbosity() > 5) printf("PTB-DEBUG: Seem to be a bit stuck at timevalue [for movie %i] of %lf secs. Nudging a bit forward...\n", moviehandle, (double) myNextTime / (double) GetMovieTimeScale(theMovie));
				// Nudge the timeindex a bit forware in the hope that this helps:
				SetMovieTimeValue(theMovie, GetMovieTime(theMovie, NULL) + 1);
			}
		}
		else {
			// This is the 2nd pass: Image fetching. Use cached values from first pass:
			// Caching in a static works because we're always called immediately for 2nd
			// pass after successfull return from 1st pass, and we're not multi-threaded,
			// i.e., don't need to be reentrant or thread-safe here:
			myNextTime = myNextTimeCached;
			nextFramesTime = nextFramesTimeCached;
			myNextTimeCached = -2;
		}
	}
    else {
        // myNextTime unavailable if in autoplayback-mode:
        myNextTime=-1;
    }
    
    // Presentation timestamp requested?
    if (presentation_timestamp) {
        // Already available?
        if (myNextTime==-1) {
            // Retrieve the exact presentation timestamp of the retrieved frame (in movietime):
            myFlags = nextTimeStep + nextTimeEdgeOK;            // We want the next frame in the movie's media.
            myTypes[0] = VisualMediaCharacteristic;		// We want video samples.
                                                                // We search backward for the closest available image for the current time. Either we get the current time
                                                                // if we happen to fetch a frame exactly when it becomes ready, or we get a bit earlier timestamp, which is
                                                                // the optimal presentation timestamp for this frame:
            GetMovieNextInterestingTime(theMovie, myFlags, 1, myTypes, GetMovieTime(theMovie, NULL), FloatToFixed(-1), &myNextTime, NULL);
        }
        // Convert pts (in Quicktime ticks) to pts in seconds since start of movie and return it:
        *presentation_timestamp = (double) myNextTime / (double) GetMovieTimeScale(theMovie);
    }

    // Allow quicktime visual context task to do its internal bookkeeping and cleanup work:
    if (theMoviecontext) QTVisualContextTask(theMoviecontext);

    // Perform decompress-operation:
    if (checkForImage) MoviesTask(theMovie, 0);
    
    // Should we just check for new image? If so, just return availability status:
    if (checkForImage) {
        if (PSYCH_USE_QT_GWORLDS) {
            // We use GWorlds. In this case we either suceed immediately due to the
            // synchronous nature of GWorld rendering, or we fail completely at end
            // of non-looping movie:
            if (IsMovieDone(theMovie) && movieRecordBANK[moviehandle].loopflag == 0) {
                // No new frame available and there won't be any in the future, because this is a non-looping
                // movie that has reached its end.
                return(-1);
            }
            
            // Is this the special case of a movie without video, but only sound? In that case,
			// we always return a 'false' because there ain't no image to return.
			if (movieRecordBANK[moviehandle].QTMovieGWorld == NULL) return(false);
			
			// Success!
            return(true);
        }
        
        // Code which uses QTVisualContextTasks...
        if (QTVisualContextIsNewImageAvailable(theMoviecontext, NULL)) {
            // New frame ready!
            return(true);
        }
        else if (IsMovieDone(theMovie) && movieRecordBANK[moviehandle].loopflag == 0) {
            // No new frame available and there won't be any in the future, because this is a non-looping
            // movie that has reached its end.
            return(-1);
        }
        else {
            // No new frame available yet:
            return(false);
        }
    }
    
    if (!PSYCH_USE_QT_GWORLDS) {
        // Blocking wait-code for non-GWorld mode:
        // Try up to 1000 iterations for arrival of requested image data in wait-mode:
        failcount=0;
        while ((failcount < 1000) && !QTVisualContextIsNewImageAvailable(theMoviecontext, NULL)) {
            PsychWaitIntervalSeconds(0.005);
            MoviesTask(theMovie, 0);
            failcount++;
        }
        
        // No new frame available and there won't be any in the future, because this is a non-looping
        // movie that has reached its end.
        if ((failcount>=1000) && IsMovieDone(theMovie) && (movieRecordBANK[moviehandle].loopflag == 0)) {
            return(-1);
        }
        
        // Fetch new OpenGL texture with the new movie image frame:
        error = QTVisualContextCopyImageForTime(theMoviecontext, kCFAllocatorDefault, NULL, &newImage);
        if ((error!=noErr) || newImage == NULL) {
            PsychErrorExitMsg(PsychError_internal, "OpenGL<->Quicktime texture fetch failed!!!");
        }
    
        // Disable client storage, if it was enabled:
        glPixelStorei(GL_UNPACK_CLIENT_STORAGE_APPLE, GL_FALSE);
        
        // Build a standard PTB texture record:    
        CVOpenGLTextureGetCleanTexCoords (newImage, lowerLeft, lowerRight, upperRight, upperLeft);
        texid = CVOpenGLTextureGetName(newImage);
        
        // Assign texture rectangle:
        PsychMakeRect(outRect, upperLeft[0], upperLeft[1], lowerRight[0], lowerRight[1]);    
        
        // Set texture orientation as if it were an inverted Offscreen window: Upside-down.
        out_texture->textureOrientation = (CVOpenGLTextureIsFlipped(newImage)) ? 3 : 4;

        // Assign OpenGL texture id:
        out_texture->textureNumber = texid;
        
        // Store special texture object as part of the PTB texture record:
        out_texture->targetSpecific.QuickTimeGLTexture = newImage;
    }
    else {
        // Synchronous texture fetch code for GWorld rendering mode:
        // At this point, the GWorld should contain the source image for creating a
        // standard OpenGL texture:
        
        // Disable client storage, if it was enabled:
        glPixelStorei(GL_UNPACK_CLIENT_STORAGE_APPLE, GL_FALSE);
        
        // Build a standard PTB texture record:    

        // Assign texture rectangle:
        GetMovieBox(theMovie, &rect);

        // Hack: Need to extend rect by 4 pixels, because GWorlds are 4 pixels-aligned via
        // image row padding:
        rect.right = rect.right + 4;
        PsychMakeRect(out_texture->rect, rect.left, rect.top, rect.right, rect.bottom);    
        
        // Set NULL - special texture object as part of the PTB texture record:
        out_texture->targetSpecific.QuickTimeGLTexture = NULL;

        // Set texture orientation as if it were an inverted Offscreen window: Upside-down.
        out_texture->textureOrientation = 3;
        
        // Setup a pointer to our GWorld as texture data pointer:
        out_texture->textureMemorySizeBytes = 0;

		// Quicktime textures are aligned on 4 Byte boundaries:
		out_texture->textureByteAligned = 4;

        // Lock GWorld:
        if(!LockPixels(GetGWorldPixMap(movieRecordBANK[moviehandle].QTMovieGWorld))) {
            // Locking surface failed! We abort.
            PsychErrorExitMsg(PsychError_internal, "PsychQTGetTextureFromMovie(): Locking GWorld pixmap surface failed!!!");
        }
        
        // This will retrieve an OpenGL compatible pointer to the GWorlds pixel data and assign it to our texmemptr:
        out_texture->textureMemory = (GLuint*) GetPixBaseAddr(GetGWorldPixMap(movieRecordBANK[moviehandle].QTMovieGWorld));
            
        // Let PsychCreateTexture() do the rest of the job of creating, setting up and
        // filling an OpenGL texture with GWorlds content:
        PsychCreateTexture(out_texture);
        
        // Undo hack from above after texture creation: Now we need the real width of the
        // texture for proper texture coordinate assignments in drawing code et al.
        rect.right = rect.right - 4;
        PsychMakeRect(outRect, rect.left, rect.top, rect.right, rect.bottom);    

        // Unlock GWorld surface. We do a glFinish() before, for safety reasons...
        //glFinish();
        UnlockPixels(GetGWorldPixMap(movieRecordBANK[moviehandle].QTMovieGWorld));

        // Ready to use the texture... We're done.
    }
    
	// Normalize texture rectangle and assign it:
	PsychNormalizeRect(outRect, out_texture->rect);
	
    rate = FixedToFloat(GetMovieRate(theMovie));
    
    // Detection of dropped frames: This is a heuristic. We'll see how well it works out...
    if (rate && presentation_timestamp) {
        // Try to check for dropped frames in playback mode:

        // Expected delta between successive presentation timestamps:
        targetdelta = 1.0f / (movieRecordBANK[moviehandle].fps * rate);

        // Compute real delta, given rate and playback direction:
        if (rate>0) {
            realdelta = *presentation_timestamp - movieRecordBANK[moviehandle].last_pts;
            if (realdelta<0) realdelta = 0;
        }
        else {
            realdelta = -1.0 * (*presentation_timestamp - movieRecordBANK[moviehandle].last_pts);
            if (realdelta<0) realdelta = 0;
        }
        
        frames = realdelta / targetdelta;
        // Dropped frames?
        if (frames > 1 && movieRecordBANK[moviehandle].last_pts>=0) {
            movieRecordBANK[moviehandle].nr_droppedframes += (int) (frames - 1 + 0.5);
        }

        movieRecordBANK[moviehandle].last_pts = *presentation_timestamp;
    }
    
    // Manually advance movie time, if in fetch mode:
    if (0 == GetMovieRate(theMovie)) {
        // We are in manual fetch mode: Need to manually advance movie time to next
        // media sample:
		if (nextFramesTime == myNextTime) {
			// Invalid value? Try to hack something that gets us unstuck:
			myNextTime = GetMovieTime(theMovie, NULL);
			nextFramesTime = myNextTime + (TimeValue) 1;
		}

        SetMovieTimeValue(theMovie, nextFramesTime);        
    }
    
    // Check if end of movie is reached. Rewind, if so...
    if (IsMovieDone(theMovie) && movieRecordBANK[moviehandle].loopflag > 0) {
        if (GetMovieRate(theMovie)>0) {
            GoToBeginningOfMovie(theMovie);
        } else {
            GoToEndOfMovie(theMovie);
        }
    }

    return(TRUE);
}
예제 #27
0
/**
 * Draw a point sprite
 */
static void
sprite_point(GLcontext *ctx, const SWvertex *vert)
{
   SWcontext *swrast = SWRAST_CONTEXT(ctx);
   SWspan span;
   GLfloat size;
   GLuint tCoords[MAX_TEXTURE_COORD_UNITS + 1];
   GLuint numTcoords = 0;
   GLfloat t0, dtdy;

   CULL_INVALID(vert);

   /* z coord */
   if (ctx->DrawBuffer->Visual.depthBits <= 16)
      span.z = FloatToFixed(vert->attrib[FRAG_ATTRIB_WPOS][2] + 0.5F);
   else
      span.z = (GLuint) (vert->attrib[FRAG_ATTRIB_WPOS][2] + 0.5F);
   span.zStep = 0;

   size = get_size(ctx, vert, GL_FALSE);

   /* span init */
   INIT_SPAN(span, GL_POINT);
   span.interpMask = SPAN_Z | SPAN_RGBA;

   span.facing = swrast->PointLineFacing;

   span.red   = ChanToFixed(vert->color[0]);
   span.green = ChanToFixed(vert->color[1]);
   span.blue  = ChanToFixed(vert->color[2]);
   span.alpha = ChanToFixed(vert->color[3]);
   span.redStep = 0;
   span.greenStep = 0;
   span.blueStep = 0;
   span.alphaStep = 0;

   /* need these for fragment programs */
   span.attrStart[FRAG_ATTRIB_WPOS][3] = 1.0F;
   span.attrStepX[FRAG_ATTRIB_WPOS][3] = 0.0F;
   span.attrStepY[FRAG_ATTRIB_WPOS][3] = 0.0F;

   {
      GLfloat s, r, dsdx;

      /* texcoord / pointcoord interpolants */
      s = 0.0;
      dsdx = 1.0 / size;
      if (ctx->Point.SpriteOrigin == GL_LOWER_LEFT) {
         dtdy = 1.0 / size;
         t0 = 0.5 * dtdy;
      }
      else {
         /* GL_UPPER_LEFT */
         dtdy = -1.0 / size;
         t0 = 1.0 + 0.5 * dtdy;
      }

      ATTRIB_LOOP_BEGIN
         if (attr >= FRAG_ATTRIB_TEX0 && attr < FRAG_ATTRIB_VAR0) {
            const GLuint u = attr - FRAG_ATTRIB_TEX0;
            /* a texcoord */
            if (ctx->Point.CoordReplace[u]) {
               tCoords[numTcoords++] = attr;

               if (ctx->Point.SpriteRMode == GL_ZERO)
                  r = 0.0F;
               else if (ctx->Point.SpriteRMode == GL_S)
                  r = vert->attrib[attr][0];
               else /* GL_R */
                  r = vert->attrib[attr][2];

               span.attrStart[attr][0] = s;
               span.attrStart[attr][1] = 0.0; /* overwritten below */
               span.attrStart[attr][2] = r;
               span.attrStart[attr][3] = 1.0;

               span.attrStepX[attr][0] = dsdx;
               span.attrStepX[attr][1] = 0.0;
               span.attrStepX[attr][2] = 0.0;
               span.attrStepX[attr][3] = 0.0;

               span.attrStepY[attr][0] = 0.0;
               span.attrStepY[attr][1] = dtdy;
               span.attrStepY[attr][2] = 0.0;
               span.attrStepY[attr][3] = 0.0;

               continue;
            }
         }
         else if (attr == FRAG_ATTRIB_FOGC) {
            /* GLSL gl_PointCoord is stored in fog.zw */
            span.attrStart[FRAG_ATTRIB_FOGC][2] = 0.0;
            span.attrStart[FRAG_ATTRIB_FOGC][3] = 0.0; /* t0 set below */
            span.attrStepX[FRAG_ATTRIB_FOGC][2] = dsdx;
            span.attrStepX[FRAG_ATTRIB_FOGC][3] = 0.0;
            span.attrStepY[FRAG_ATTRIB_FOGC][2] = 0.0;
            span.attrStepY[FRAG_ATTRIB_FOGC][3] = dtdy;
            tCoords[numTcoords++] = FRAG_ATTRIB_FOGC;
            continue;
         }
         /* use vertex's texcoord/attrib */
         COPY_4V(span.attrStart[attr], vert->attrib[attr]);
         ASSIGN_4V(span.attrStepX[attr], 0, 0, 0, 0);
         ASSIGN_4V(span.attrStepY[attr], 0, 0, 0, 0);
      ATTRIB_LOOP_END;
   }

   /* compute pos, bounds and render */
   {
      const GLfloat x = vert->attrib[FRAG_ATTRIB_WPOS][0];
      const GLfloat y = vert->attrib[FRAG_ATTRIB_WPOS][1];
      GLint iSize = (GLint) (size + 0.5F);
      GLint xmin, xmax, ymin, ymax, iy;
      GLint iRadius;
      GLfloat tcoord = t0;

      iSize = MAX2(1, iSize);
      iRadius = iSize / 2;

      if (iSize & 1) {
         /* odd size */
         xmin = (GLint) (x - iRadius);
         xmax = (GLint) (x + iRadius);
         ymin = (GLint) (y - iRadius);
         ymax = (GLint) (y + iRadius);
      }
      else {
         /* even size */
         /* 0.501 factor allows conformance to pass */
         xmin = (GLint) (x + 0.501) - iRadius;
         xmax = xmin + iSize - 1;
         ymin = (GLint) (y + 0.501) - iRadius;
         ymax = ymin + iSize - 1;
      }

      /* render spans */
      for (iy = ymin; iy <= ymax; iy++) {
         GLuint i;
         /* setup texcoord T for this row */
         for (i = 0; i < numTcoords; i++) {
            if (tCoords[i] == FRAG_ATTRIB_FOGC)
               span.attrStart[FRAG_ATTRIB_FOGC][3] = tcoord;
            else
               span.attrStart[tCoords[i]][1] = tcoord;
         }

         /* these might get changed by span clipping */
         span.x = xmin;
         span.y = iy;
         span.end = xmax - xmin + 1;

         _swrast_write_rgba_span(ctx, &span);

         tcoord += dtdy;
      }
   }
}
예제 #28
0
void drawPlugin(NPP instance, NPCocoaEvent* event)
{
  if (!browserUAString)
    return;

  PluginInstance* currentInstance = (PluginInstance*)(instance->pdata);
  CGContextRef cgContext = event->data.draw.context;
  if (!cgContext)
    return;

  float windowWidth = currentInstance->window.width;
  float windowHeight = currentInstance->window.height;
  
  // save the cgcontext gstate
  CGContextSaveGState(cgContext);
  
  // we get a flipped context
  CGContextTranslateCTM(cgContext, 0.0, windowHeight);
  CGContextScaleCTM(cgContext, 1.0, -1.0);
  
  // draw a gray background for the plugin
  CGContextAddRect(cgContext, CGRectMake(0, 0, windowWidth, windowHeight));
  CGContextSetGrayFillColor(cgContext, 0.5, 1.0);
  CGContextDrawPath(cgContext, kCGPathFill);
  
  // draw a black frame around the plugin
  CGContextAddRect(cgContext, CGRectMake(0, 0, windowWidth, windowHeight));
  CGContextSetGrayStrokeColor(cgContext, 0.0, 1.0);
  CGContextSetLineWidth(cgContext, 6.0);
  CGContextStrokePath(cgContext);
  
  // draw the UA string using ATSUI
  CGContextSetGrayFillColor(cgContext, 0.0, 1.0);
  ATSUStyle atsuStyle;
  ATSUCreateStyle(&atsuStyle);
  CFIndex stringLength = CFStringGetLength(browserUAString);
  UniChar* unicharBuffer = (UniChar*)malloc((stringLength + 1) * sizeof(UniChar));
  CFStringGetCharacters(browserUAString, CFRangeMake(0, stringLength), unicharBuffer);
  UniCharCount runLengths = kATSUToTextEnd;
  ATSUTextLayout atsuLayout;
  ATSUCreateTextLayoutWithTextPtr(unicharBuffer,
                                  kATSUFromTextBeginning,
                                  kATSUToTextEnd,
                                  stringLength,
                                  1,
                                  &runLengths,
                                  &atsuStyle,
                                  &atsuLayout);
  ATSUAttributeTag contextTag = kATSUCGContextTag;
  ByteCount byteSize = sizeof(CGContextRef);
  ATSUAttributeValuePtr contextATSUPtr = &cgContext;
  ATSUSetLayoutControls(atsuLayout, 1, &contextTag, &byteSize, &contextATSUPtr);
  ATSUTextMeasurement lineAscent, lineDescent;
  ATSUGetLineControl(atsuLayout,
                    kATSUFromTextBeginning,
                    kATSULineAscentTag,
                    sizeof(ATSUTextMeasurement),
                    &lineAscent,
                    &byteSize);
  ATSUGetLineControl(atsuLayout,
                    kATSUFromTextBeginning,
                    kATSULineDescentTag,
                    sizeof(ATSUTextMeasurement),
                    &lineDescent,
                    &byteSize);
  float lineHeight = FixedToFloat(lineAscent) + FixedToFloat(lineDescent);  
  ItemCount softBreakCount;
  ATSUBatchBreakLines(atsuLayout,
                      kATSUFromTextBeginning,
                      stringLength,
                      FloatToFixed(windowWidth - 10.0),
                      &softBreakCount);
  ATSUGetSoftLineBreaks(atsuLayout,
                        kATSUFromTextBeginning,
                        kATSUToTextEnd,
                        0, NULL, &softBreakCount);
  UniCharArrayOffset* softBreaks = (UniCharArrayOffset*)malloc(softBreakCount * sizeof(UniCharArrayOffset));
  ATSUGetSoftLineBreaks(atsuLayout,
                        kATSUFromTextBeginning,
                        kATSUToTextEnd,
                        softBreakCount, softBreaks, &softBreakCount);
  UniCharArrayOffset currentDrawOffset = kATSUFromTextBeginning;
  int i = 0;
  while (i < softBreakCount) {
    ATSUDrawText(atsuLayout, currentDrawOffset, softBreaks[i], FloatToFixed(5.0), FloatToFixed(windowHeight - 5.0 - (lineHeight * (i + 1.0))));
    currentDrawOffset = softBreaks[i];
    i++;
  }
  ATSUDrawText(atsuLayout, currentDrawOffset, kATSUToTextEnd, FloatToFixed(5.0), FloatToFixed(windowHeight - 5.0 - (lineHeight * (i + 1.0))));
  free(unicharBuffer);
  free(softBreaks);
  
  // restore the cgcontext gstate
  CGContextRestoreGState(cgContext);
}
/*
 *  PsychQTSetMovieTimeIndex()  -- Set current playback time of movie.
 */
double PsychQTSetMovieTimeIndex(int moviehandle, double timeindex, psych_bool indexIsFrames)
{
    Movie		theMovie;
    double		oldtime;
	long		targetIndex, myIndex;
    short		myFlags;
    TimeValue	myTime;
    OSType		myTypes[1];
    
    if (moviehandle < 0 || moviehandle >= PSYCH_MAX_MOVIES) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided!");
    }
    
    // Fetch references to objects we need:
    theMovie = movieRecordBANK[moviehandle].theMovie;    
    if (theMovie == NULL) {
        PsychErrorExitMsg(PsychError_user, "Invalid moviehandle provided. No movie associated with this handle !!!");
    }
    
    // Retrieve current timeindex:
    oldtime = (double) GetMovieTime(theMovie, NULL) / (double) GetMovieTimeScale(theMovie);
    
	// Index based or target time based seeking?
	if (indexIsFrames) {
		// Index based seeking:
		
		// Seek to given targetIndex:
		targetIndex = (long) (timeindex + 0.5);

		// We want video samples.
		myTypes[0] = VisualMediaCharacteristic;
		
		// We want to begin with the first frame in the movie:
		myFlags = nextTimeStep + nextTimeEdgeOK;
		
		// Start with iteration at beginning:
		myTime = 0;
		myIndex = -1;
		
		// We iterate until end of movie (myTime < 0) or targetIndex reached:
		while ((myTime >= 0) && (myIndex < targetIndex)) {
			// Increment our index position:
			myIndex++;
			
			// Look for the next frame in the track; when there are no more frames,
			// myTime is set to -1, so we'll exit the while loop
			GetMovieNextInterestingTime(theMovie, myFlags, 1, myTypes, myTime, FloatToFixed(1), &myTime, NULL);

			// after the first interesting time, don't include the time we're currently at
			myFlags = nextTimeStep;
		}    
		
		// Valid time for existing target frame?
		if (myTime >= 0) {
			// Yes. Seek to it:
			SetMovieTimeValue(theMovie, myTime);
		}

		// Done with seek.
	}
	else {
		// Time based seeking:

		// Set new timeindex as time in seconds:
		SetMovieTimeValue(theMovie, (TimeValue) (((timeindex * (double) GetMovieTimeScale(theMovie))) + 0.5f));

		// Done with seek.
	}

    // Check if end of movie is reached. Rewind, if so...
    if (IsMovieDone(theMovie) && movieRecordBANK[moviehandle].loopflag > 0) {
        if (GetMovieRate(theMovie) > 0) {
            GoToBeginningOfMovie(theMovie);
        } else {
            GoToEndOfMovie(theMovie);
        }
    }

	// Yield some processing time to Quicktime to update properly:
    MoviesTask(theMovie, 0);
    
    // Return old time value of previous position:
    return(oldtime);
}
예제 #30
0
/**
 * Draw smooth/antialiased point.  RGB or CI mode.
 */
static void
smooth_point(GLcontext *ctx, const SWvertex *vert)
{
   SWcontext *swrast = SWRAST_CONTEXT(ctx);
   const GLboolean ciMode = !ctx->Visual.rgbMode;
   SWspan span;
   GLfloat size, alphaAtten;

   CULL_INVALID(vert);

   /* z coord */
   if (ctx->DrawBuffer->Visual.depthBits <= 16)
      span.z = FloatToFixed(vert->attrib[FRAG_ATTRIB_WPOS][2] + 0.5F);
   else
      span.z = (GLuint) (vert->attrib[FRAG_ATTRIB_WPOS][2] + 0.5F);
   span.zStep = 0;

   size = get_size(ctx, vert, GL_TRUE);

   /* alpha attenuation / fade factor */
   if (ctx->Multisample._Enabled) {
      if (vert->pointSize >= ctx->Point.Threshold) {
         alphaAtten = 1.0F;
      }
      else {
         GLfloat dsize = vert->pointSize / ctx->Point.Threshold;
         alphaAtten = dsize * dsize;
      }
   }
   else {
      alphaAtten = 1.0;
   }
   (void) alphaAtten; /* not used */

   /* span init */
   INIT_SPAN(span, GL_POINT);
   span.interpMask = SPAN_Z | SPAN_RGBA;
   span.arrayMask = SPAN_COVERAGE | SPAN_MASK;

   span.facing = swrast->PointLineFacing;

   span.red   = ChanToFixed(vert->color[0]);
   span.green = ChanToFixed(vert->color[1]);
   span.blue  = ChanToFixed(vert->color[2]);
   span.alpha = ChanToFixed(vert->color[3]);
   span.redStep = 0;
   span.greenStep = 0;
   span.blueStep = 0;
   span.alphaStep = 0;

   /* need these for fragment programs */
   span.attrStart[FRAG_ATTRIB_WPOS][3] = 1.0F;
   span.attrStepX[FRAG_ATTRIB_WPOS][3] = 0.0F;
   span.attrStepY[FRAG_ATTRIB_WPOS][3] = 0.0F;

   ATTRIB_LOOP_BEGIN
      COPY_4V(span.attrStart[attr], vert->attrib[attr]);
      ASSIGN_4V(span.attrStepX[attr], 0, 0, 0, 0);
      ASSIGN_4V(span.attrStepY[attr], 0, 0, 0, 0);
   ATTRIB_LOOP_END

   /* compute pos, bounds and render */
   {
      const GLfloat x = vert->attrib[FRAG_ATTRIB_WPOS][0];
      const GLfloat y = vert->attrib[FRAG_ATTRIB_WPOS][1];
      const GLfloat radius = 0.5F * size;
      const GLfloat rmin = radius - 0.7071F;  /* 0.7071 = sqrt(2)/2 */
      const GLfloat rmax = radius + 0.7071F;
      const GLfloat rmin2 = MAX2(0.0F, rmin * rmin);
      const GLfloat rmax2 = rmax * rmax;
      const GLfloat cscale = 1.0F / (rmax2 - rmin2);
      const GLint xmin = (GLint) (x - radius);
      const GLint xmax = (GLint) (x + radius);
      const GLint ymin = (GLint) (y - radius);
      const GLint ymax = (GLint) (y + radius);
      GLint ix, iy;

      for (iy = ymin; iy <= ymax; iy++) {

         /* these might get changed by span clipping */
         span.x = xmin;
         span.y = iy;
         span.end = xmax - xmin + 1;

         /* compute coverage for each pixel in span */
         for (ix = xmin; ix <= xmax; ix++) {
            const GLfloat dx = ix - x + 0.5F;
            const GLfloat dy = iy - y + 0.5F;
            const GLfloat dist2 = dx * dx + dy * dy;
            GLfloat coverage;

            if (dist2 < rmax2) {
               if (dist2 >= rmin2) {
                  /* compute partial coverage */
                  coverage = 1.0F - (dist2 - rmin2) * cscale;
                  if (ciMode) {
                     /* coverage in [0,15] */
                     coverage *= 15.0;
                  }
               }
               else {
                  /* full coverage */
                  coverage = 1.0F;
               }
               span.array->mask[ix - xmin] = 1;
            }
            else {
               /* zero coverage - fragment outside the radius */
               coverage = 0.0;
               span.array->mask[ix - xmin] = 0;
            }
            span.array->coverage[ix - xmin] = coverage;
         }

         /* render span */
         _swrast_write_rgba_span(ctx, &span);

      }
   }
}