コード例 #1
0
static void
validate_resources(const struct subtest_t st, GLuint prog, bool *pass)
{
	GLsizei max_size = 0, size, i;
	char * name;

	/* Do not run the test for GL_ATOMIC_COUNTER_BUFFER.
	 * From the GL_ARB_program_interface_query extension:
	 *
	 * "The error INVALID_OPERATION is generated if <programInterface>
	 * is ATOMIC_COUNTER_BUFFER, since active atomic counter buffer
	 * resources are not assigned name strings."
	 */
	if (st.programInterface == GL_ATOMIC_COUNTER_BUFFER)
		return;

	name = (char *) malloc(st.max_length_name);
	for (i = 0; i < st.active_resources; i++) {
		GLuint index;

		glGetProgramResourceName(prog, st.programInterface,
					 i, st.max_length_name,
					 &size, name);
		piglit_check_gl_error(GL_NO_ERROR);

		/* keep track of the maximum size */
		if (size > max_size) {
			max_size = size;
		}

		/* Check the names. Transform feedback requires the order to be
		 * the same as the one given in glTransformFeedbackVaryings.
		 * From the GL_ARB_program_interface_query extension:
		 *
		 * "The order of the active resource list is
		 * implementation-dependent for all interfaces except for
		 * TRANSFORM_FEEDBACK_VARYING. For TRANSFORM_FEEDBACK_VARYING,
		 * the active resource list will use the variable order
		 * specified in the the most recent call to
		 * TransformFeedbackVaryings before the last call to
		 * LinkProgram.
		 */
		if (st.resources && !is_resource_in_list(st.resources, name, i,
			st.programInterface == GL_TRANSFORM_FEEDBACK_VARYING)) {
			fprintf(stderr, "Resource '%s' not found in '%s' "
					"resource list or found at the wrong "
					"index\n", name,
				st.programInterface_str);
			*pass = false;
		}

		/* Check the position of the arguments and see if it matches
		 * with the current position we are in.
		 */
		index = glGetProgramResourceIndex(prog, st.programInterface,
						  name);
		if (index != i) {
			fprintf(stderr, "%s: Resource '%s' is not at the "
					"position reported by "
					"glGetProgramResourceIndex (%i instead "
					"of %i)\n",
				st.programInterface_str, name, index, i);
			*pass = false;
		}

		/* check the equivalence with the old API */
		if (!consistency_check(prog, st.programInterface, name,
				       index)) {
			*pass = false;
		}
	}
	free(name);

	/* glGetProgramResourceName does not count the NULL terminator as part
	 * of the size contrarily to glGetProgramInterfaceiv.
	 * From the GL_ARB_program_interface_query extension:
	 *
	 * "void GetProgramInterfaceiv(uint program, enum programInterface,
	 *                             enum pname, int *params);
	 * [...]
	 * If <pname> is MAX_NAME_LENGTH, the value returned is the length of
	 * the longest active name string for an active resource in
	 * <programInterface>. This length includes an extra character for the
	 * null terminator."
	 *
	 * "void GetProgramResourceName(uint program, enum programInterface,
	 *                              uint index, sizei bufSize,
	 *                              sizei *length, char *name);
	 * [...]
	 * The actual number of characters written into <name>, excluding the
	 * null terminator, is returned in <length>."
	 */
	if (max_size != MAX2(0, st.max_length_name - 1)) {
		fprintf(stderr, "'%s actual max length' expected %i but got "
				"%i\n", st.programInterface_str,
			st.max_length_name - 1,	max_size);
		*pass = false;
	}
}
コード例 #2
0
ファイル: maxwell.c プロジェクト: victorliu/mpb
/* Set the current k point for the Maxwell solver.  k is given in the
   basis of the reciprocal lattice vectors, G1, G2, and G3. */
void update_maxwell_data_k(maxwell_data *d, real k[3],
			   real G1[3], real G2[3], real G3[3])
{
     int nx = d->nx, ny = d->ny, nz = d->nz;
     int cx = MAX2(1,d->nx/2), cy = MAX2(1,d->ny/2), cz = MAX2(1,d->nz/2);
     k_data *kpG = d->k_plus_G;
     real *kpGn2 = d->k_plus_G_normsqr;
     int x, y, z;
     real kx, ky, kz;

     kx = G1[0]*k[0] + G2[0]*k[1] + G3[0]*k[2];
     ky = G1[1]*k[0] + G2[1]*k[1] + G3[1]*k[2];
     kz = G1[2]*k[0] + G2[2]*k[1] + G3[2]*k[2];

     d->zero_k = kx == 0.0 && ky == 0.0 && kz == 0.0;

     d->current_k[0] = kx;
     d->current_k[1] = ky;
     d->current_k[2] = kz;

     /* make sure current parity is still valid: */
     set_maxwell_data_parity(d, d->parity);

     for (x = d->local_x_start; x < d->local_x_start + d->local_nx; ++x) {
	  int kxi = (x >= cx) ? (x - nx) : x;
	  for (y = 0; y < ny; ++y) {
	       int kyi = (y >= cy) ? (y - ny) : y;
	       for (z = 0; z < nz; ++z, kpG++, kpGn2++) {
		    int kzi = (z >= cz) ? (z - nz) : z;
		    real kpGx, kpGy, kpGz, a, b, c, leninv;

		    /* Compute k+G (noting that G is negative because
		       of the choice of sign in the FFTW Fourier transform): */
		    kpGx = kx - (G1[0]*kxi + G2[0]*kyi + G3[0]*kzi);
		    kpGy = ky - (G1[1]*kxi + G2[1]*kyi + G3[1]*kzi);
		    kpGz = kz - (G1[2]*kxi + G2[2]*kyi + G3[2]*kzi);

		    a = kpGx*kpGx + kpGy*kpGy + kpGz*kpGz;
		    kpG->kmag = sqrt(a);
		    *kpGn2 = a;
		    
		    /* Now, compute the two normal vectors: */
		    /* (Note that we choose them so that m has odd/even
		       parity in z/y, and n is even/odd in z/y.) */

		    if (a == 0) {
			 kpG->nx = 0.0; kpG->ny = 1.0; kpG->nz = 0.0;
			 kpG->mx = 0.0; kpG->my = 0.0; kpG->mz = 1.0;
		    }
		    else {
			 if (kpGx == 0.0 && kpGy == 0.0) {
			      /* put n in the y direction if k+G is in z: */
			      kpG->nx = 0.0;
			      kpG->ny = 1.0;
			      kpG->nz = 0.0;
			 }
			 else {
			      /* otherwise, let n = z x (k+G), normalized: */
			      compute_cross(&a, &b, &c,
					    0.0, 0.0, 1.0,
					    kpGx, kpGy, kpGz);
			      leninv = 1.0 / sqrt(a*a + b*b + c*c);
			      kpG->nx = a * leninv;
			      kpG->ny = b * leninv;
			      kpG->nz = c * leninv;
			 }
			 
			 /* m = n x (k+G), normalized */
			 compute_cross(&a, &b, &c,
				       kpG->nx, kpG->ny, kpG->nz,
				       kpGx, kpGy, kpGz);
			 leninv = 1.0 / sqrt(a*a + b*b + c*c);
			 kpG->mx = a * leninv;
			 kpG->my = b * leninv;
			 kpG->mz = c * leninv;
		    }

#ifdef DEBUG
#define DOT(u0,u1,u2,v0,v1,v2) ((u0)*(v0) + (u1)*(v1) + (u2)*(v2))

		    /* check orthogonality */
		    CHECK(fabs(DOT(kpGx, kpGy, kpGz,
				   kpG->nx, kpG->ny, kpG->nz)) < 1e-6,
			  "vectors not orthogonal!");
		    CHECK(fabs(DOT(kpGx, kpGy, kpGz,
				   kpG->mx, kpG->my, kpG->mz)) < 1e-6,
			  "vectors not orthogonal!");
		    CHECK(fabs(DOT(kpG->mx, kpG->my, kpG->mz,
				   kpG->nx, kpG->ny, kpG->nz)) < 1e-6,
			  "vectors not orthogonal!");

		    /* check normalization */
		    CHECK(fabs(DOT(kpG->nx, kpG->ny, kpG->nz,
				   kpG->nx, kpG->ny, kpG->nz) - 1.0) < 1e-6,
			  "vectors not unit vectors!");
		    CHECK(fabs(DOT(kpG->mx, kpG->my, kpG->mz,
				   kpG->mx, kpG->my, kpG->mz) - 1.0) < 1e-6,
			  "vectors not unit vectors!");
#endif
	       }
	  }
     }
}
コード例 #3
0
ファイル: nv50_program.c プロジェクト: xranby/mesa
static int
nv50_fragprog_assign_slots(struct nv50_ir_prog_info *info)
{
   struct nv50_program *prog = (struct nv50_program *)info->driverPriv;
   unsigned i, n, m, c;
   unsigned nvary;
   unsigned nflat;
   unsigned nintp = 0;

   /* count recorded non-flat inputs */
   for (m = 0, i = 0; i < info->numInputs; ++i) {
      switch (info->in[i].sn) {
      case TGSI_SEMANTIC_POSITION:
      case TGSI_SEMANTIC_FACE:
         continue;
      default:
         m += info->in[i].flat ? 0 : 1;
         break;
      }
   }
   /* careful: id may be != i in info->in[prog->in[i].id] */

   /* Fill prog->in[] so that non-flat inputs are first and
    * kick out special inputs that don't use the RESULT_MAP.
    */
   for (n = 0, i = 0; i < info->numInputs; ++i) {
      if (info->in[i].sn == TGSI_SEMANTIC_POSITION) {
         prog->fp.interp |= info->in[i].mask << 24;
         for (c = 0; c < 4; ++c)
            if (info->in[i].mask & (1 << c))
               info->in[i].slot[c] = nintp++;
      } else
      if (info->in[i].sn == TGSI_SEMANTIC_FACE) {
         info->in[i].slot[0] = 255;
      } else {
         unsigned j = info->in[i].flat ? m++ : n++;

         if (info->in[i].sn == TGSI_SEMANTIC_COLOR)
            prog->vp.bfc[info->in[i].si] = j;
         else if (info->in[i].sn == TGSI_SEMANTIC_PRIMID)
            prog->vp.attrs[2] |= NV50_3D_VP_GP_BUILTIN_ATTR_EN_PRIMITIVE_ID;

         prog->in[j].id = i;
         prog->in[j].mask = info->in[i].mask;
         prog->in[j].sn = info->in[i].sn;
         prog->in[j].si = info->in[i].si;
         prog->in[j].linear = info->in[i].linear;

         prog->in_nr++;
      }
   }
   if (!(prog->fp.interp & (8 << 24))) {
      ++nintp;
      prog->fp.interp |= 8 << 24;
   }

   for (i = 0; i < prog->in_nr; ++i) {
      int j = prog->in[i].id;

      prog->in[i].hw = nintp;
      for (c = 0; c < 4; ++c)
         if (prog->in[i].mask & (1 << c))
            info->in[j].slot[c] = nintp++;
   }
   /* (n == m) if m never increased, i.e. no flat inputs */
   nflat = (n < m) ? (nintp - prog->in[n].hw) : 0;
   nintp -= bitcount4(prog->fp.interp >> 24); /* subtract position inputs */
   nvary = nintp - nflat;

   prog->fp.interp |= nvary << NV50_3D_FP_INTERPOLANT_CTRL_COUNT_NONFLAT__SHIFT;
   prog->fp.interp |= nintp << NV50_3D_FP_INTERPOLANT_CTRL_COUNT__SHIFT;

   /* put front/back colors right after HPOS */
   prog->fp.colors = 4 << NV50_3D_SEMANTIC_COLOR_FFC0_ID__SHIFT;
   for (i = 0; i < 2; ++i)
      if (prog->vp.bfc[i] < 0xff)
         prog->fp.colors += bitcount4(prog->in[prog->vp.bfc[i]].mask) << 16;

   /* FP outputs */

   if (info->prop.fp.numColourResults > 1)
      prog->fp.flags[0] |= NV50_3D_FP_CONTROL_MULTIPLE_RESULTS;

   for (i = 0; i < info->numOutputs; ++i) {
      prog->out[i].id = i;
      prog->out[i].sn = info->out[i].sn;
      prog->out[i].si = info->out[i].si;
      prog->out[i].mask = info->out[i].mask;

      if (i == info->io.fragDepth || i == info->io.sampleMask)
         continue;
      prog->out[i].hw = info->out[i].si * 4;

      for (c = 0; c < 4; ++c)
         info->out[i].slot[c] = prog->out[i].hw + c;

      prog->max_out = MAX2(prog->max_out, prog->out[i].hw + 4);
   }

   if (info->io.sampleMask < PIPE_MAX_SHADER_OUTPUTS) {
      info->out[info->io.sampleMask].slot[0] = prog->max_out++;
      prog->fp.has_samplemask = 1;
   }

   if (info->io.fragDepth < PIPE_MAX_SHADER_OUTPUTS)
      info->out[info->io.fragDepth].slot[2] = prog->max_out++;

   if (!prog->max_out)
      prog->max_out = 4;

   return 0;
}
コード例 #4
0
ファイル: svga_link.c プロジェクト: MIPS/external-mesa3d
/**
 * Examine input and output shaders info to link outputs from the
 * output shader to inputs from the input shader.
 * Basically, we'll remap input shader's input slots to new numbers
 * based on semantic name/index of the outputs from the output shader.
 */
void
svga_link_shaders(const struct tgsi_shader_info *outshader_info,
                  const struct tgsi_shader_info *inshader_info,
                  struct shader_linkage *linkage)
{
   unsigned i, free_slot;

   for (i = 0; i < ARRAY_SIZE(linkage->input_map); i++) {
      linkage->input_map[i] = INVALID_INDEX;
   }

   /* Assign input slots for input shader inputs.
    * Basically, we want to use the same index for the output shader's outputs
    * and the input shader's inputs that should be linked together.
    * We'll modify the input shader's inputs to match the output shader.
    */
   assert(inshader_info->num_inputs <=
          ARRAY_SIZE(inshader_info->input_semantic_name));

   /* free register index that can be used for built-in varyings */
   free_slot = outshader_info->num_outputs + 1;

   for (i = 0; i < inshader_info->num_inputs; i++) {
      unsigned sem_name = inshader_info->input_semantic_name[i];
      unsigned sem_index = inshader_info->input_semantic_index[i];
      unsigned j;
      /**
       * Get the clip distance inputs from the output shader's
       * clip distance shadow copy.
       */
      if (sem_name == TGSI_SEMANTIC_CLIPDIST) {
         linkage->input_map[i] = outshader_info->num_outputs + 1 + sem_index;
         /* make sure free_slot includes this extra output */
         free_slot = MAX2(free_slot, linkage->input_map[i] + 1);
      }
      else {
         /* search output shader outputs for same item */
         for (j = 0; j < outshader_info->num_outputs; j++) {
            assert(j < ARRAY_SIZE(outshader_info->output_semantic_name));
            if (outshader_info->output_semantic_name[j] == sem_name &&
                outshader_info->output_semantic_index[j] == sem_index) {
               linkage->input_map[i] = j;
               break;
            }
         }
      }
   }

   linkage->num_inputs = inshader_info->num_inputs;

   /* Things like the front-face register are handled here */
   for (i = 0; i < inshader_info->num_inputs; i++) {
      if (linkage->input_map[i] == INVALID_INDEX) {
         unsigned j = free_slot++;
         linkage->input_map[i] = j;
      }
   }

   /* Debug */
   if (SVGA_DEBUG & DEBUG_TGSI) {
      unsigned reg = 0;
      debug_printf("### linkage info:\n");

      for (i = 0; i < linkage->num_inputs; i++) {

         assert(linkage->input_map[i] != INVALID_INDEX);

         debug_printf("   input[%d] slot %u  %s %u %s\n",
                      i,
                      linkage->input_map[i],
                      tgsi_semantic_names[inshader_info->input_semantic_name[i]],
                      inshader_info->input_semantic_index[i],
                      tgsi_interpolate_names[inshader_info->input_interpolate[i]]);

         /* make sure no repeating register index */
         if (reg & 1 << linkage->input_map[i]) {
            assert(0);
         }
         reg |= 1 << linkage->input_map[i];
      }
   }
}
コード例 #5
0
ファイル: globalDefinitions.hpp プロジェクト: eregon/jvmci
template<class T> inline T MAX3(T a, T b, T c)      { return MAX2(MAX2(a, b), c); }
コード例 #6
0
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
                                                bool clear_space,
                                                bool mangle_space) {
  uintx alignment =
    GenCollectedHeap::heap()->collector_policy()->min_alignment();

  // If the spaces are being cleared (only done at heap initialization
  // currently), the survivor spaces need not be empty.
  // Otherwise, no care is taken for used areas in the survivor spaces
  // so check.
  assert(clear_space || (to()->is_empty() && from()->is_empty()),
    "Initialization of the survivor spaces assumes these are empty");

  // Compute sizes
  uintx size = _virtual_space.committed_size();
  uintx survivor_size = compute_survivor_size(size, alignment);
  uintx eden_size = size - (2*survivor_size);
  assert(eden_size > 0 && survivor_size <= eden_size, "just checking");

  if (eden_size < minimum_eden_size) {
    // May happen due to 64Kb rounding, if so adjust eden size back up
    minimum_eden_size = align_size_up(minimum_eden_size, alignment);
    uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
    uintx unaligned_survivor_size =
      align_size_down(maximum_survivor_size, alignment);
    survivor_size = MAX2(unaligned_survivor_size, alignment);
    eden_size = size - (2*survivor_size);
    assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
    assert(eden_size >= minimum_eden_size, "just checking");
  }

  char *eden_start = _virtual_space.low();
  char *from_start = eden_start + eden_size;
  char *to_start   = from_start + survivor_size;
  char *to_end     = to_start   + survivor_size;

  assert(to_end == _virtual_space.high(), "just checking");
  assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
  assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
  assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");

  MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
  MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
  MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);

  // A minimum eden size implies that there is a part of eden that
  // is being used and that affects the initialization of any
  // newly formed eden.
  bool live_in_eden = minimum_eden_size > 0;

  // If not clearing the spaces, do some checking to verify that
  // the space are already mangled.
  if (!clear_space) {
    // Must check mangling before the spaces are reshaped.  Otherwise,
    // the bottom or end of one space may have moved into another
    // a failure of the check may not correctly indicate which space
    // is not properly mangled.
    if (ZapUnusedHeapArea) {
      HeapWord* limit = (HeapWord*) _virtual_space.high();
      eden()->check_mangled_unused_area(limit);
      from()->check_mangled_unused_area(limit);
        to()->check_mangled_unused_area(limit);
    }
  }

  // Reset the spaces for their new regions.
  eden()->initialize(edenMR,
                     clear_space && !live_in_eden,
                     SpaceDecorator::Mangle);
  // If clear_space and live_in_eden, we will not have cleared any
  // portion of eden above its top. This can cause newly
  // expanded space not to be mangled if using ZapUnusedHeapArea.
  // We explicitly do such mangling here.
  if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
    eden()->mangle_unused_area();
  }
  from()->initialize(fromMR, clear_space, mangle_space);
  to()->initialize(toMR, clear_space, mangle_space);

  // Set next compaction spaces.
  eden()->set_next_compaction_space(from());
  // The to-space is normally empty before a compaction so need
  // not be considered.  The exception is during promotion
  // failure handling when to-space can contain live objects.
  from()->set_next_compaction_space(NULL);
}
コード例 #7
0
static void
copy_image_with_memcpy(struct brw_context *brw,
                       struct intel_mipmap_tree *src_mt, int src_level,
                       int src_x, int src_y, int src_z,
                       struct intel_mipmap_tree *dst_mt, int dst_level,
                       int dst_x, int dst_y, int dst_z,
                       int src_width, int src_height)
{
   bool same_slice;
   void *mapped, *src_mapped, *dst_mapped;
   ptrdiff_t src_stride, dst_stride, cpp;
   int map_x1, map_y1, map_x2, map_y2;
   GLuint src_bw, src_bh;

   cpp = _mesa_get_format_bytes(src_mt->format);
   _mesa_get_format_block_size(src_mt->format, &src_bw, &src_bh);

   assert(src_width % src_bw == 0);
   assert(src_height % src_bw == 0);
   assert(src_x % src_bw == 0);
   assert(src_y % src_bw == 0);

   /* If we are on the same miptree, same level, and same slice, then
    * intel_miptree_map won't let us map it twice.  We have to do things a
    * bit differently.  In particular, we do a single map large enough for
    * both portions and in read-write mode.
    */
   same_slice = src_mt == dst_mt && src_level == dst_level && src_z == dst_z;

   if (same_slice) {
      assert(dst_x % src_bw == 0);
      assert(dst_y % src_bw == 0);

      map_x1 = MIN2(src_x, dst_x);
      map_y1 = MIN2(src_y, dst_y);
      map_x2 = MAX2(src_x, dst_x) + src_width;
      map_y2 = MAX2(src_y, dst_y) + src_height;

      intel_miptree_map(brw, src_mt, src_level, src_z,
                        map_x1, map_y1, map_x2 - map_x1, map_y2 - map_y1,
                        GL_MAP_READ_BIT | GL_MAP_WRITE_BIT,
                        &mapped, &src_stride);

      dst_stride = src_stride;

      /* Set the offsets here so we don't have to think about while looping */
      src_mapped = mapped + ((src_y - map_y1) / src_bh) * src_stride +
                            ((src_x - map_x1) / src_bw) * cpp;
      dst_mapped = mapped + ((dst_y - map_y1) / src_bh) * dst_stride +
                            ((dst_x - map_x1) / src_bw) * cpp;
   } else {
      intel_miptree_map(brw, src_mt, src_level, src_z,
                        src_x, src_y, src_width, src_height,
                        GL_MAP_READ_BIT, &src_mapped, &src_stride);
      intel_miptree_map(brw, dst_mt, dst_level, dst_z,
                        dst_x, dst_y, src_width, src_height,
                        GL_MAP_WRITE_BIT, &dst_mapped, &dst_stride);
   }

   src_width /= (int)src_bw;
   src_height /= (int)src_bh;

   for (int i = 0; i < src_height; ++i) {
      memcpy(dst_mapped, src_mapped, src_width * cpp);
      src_mapped += src_stride;
      dst_mapped += dst_stride;
   }

   if (same_slice) {
      intel_miptree_unmap(brw, src_mt, src_level, src_z);
   } else {
      intel_miptree_unmap(brw, dst_mt, dst_level, dst_z);
      intel_miptree_unmap(brw, src_mt, src_level, src_z);
   }
}
コード例 #8
0
ファイル: NBNetBuilder.cpp プロジェクト: cbrafter/sumo
void
NBNetBuilder::compute(OptionsCont& oc,
                      const std::set<std::string>& explicitTurnarounds,
                      bool removeElements) {
    GeoConvHelper& geoConvHelper = GeoConvHelper::getProcessing();


    const bool lefthand = oc.getBool("lefthand");
    if (lefthand) {
        mirrorX();
    };

    // MODIFYING THE SETS OF NODES AND EDGES

    // Removes edges that are connecting the same node
    long before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Removing self-loops");
    myNodeCont.removeSelfLoops(myDistrictCont, myEdgeCont, myTLLCont);
    PROGRESS_TIME_MESSAGE(before);
    //
    if (oc.exists("remove-edges.isolated") && oc.getBool("remove-edges.isolated")) {
        before = SysUtils::getCurrentMillis();
        PROGRESS_BEGIN_MESSAGE("Finding isolated roads");
        myNodeCont.removeIsolatedRoads(myDistrictCont, myEdgeCont, myTLLCont);
        PROGRESS_TIME_MESSAGE(before);
    }
    //
    if (oc.exists("keep-edges.postload") && oc.getBool("keep-edges.postload")) {
        if (oc.isSet("keep-edges.explicit") || oc.isSet("keep-edges.input-file")) {
            before = SysUtils::getCurrentMillis();
            PROGRESS_BEGIN_MESSAGE("Removing unwished edges");
            myEdgeCont.removeUnwishedEdges(myDistrictCont);
            PROGRESS_TIME_MESSAGE(before);
        }
    }
    if (oc.getBool("junctions.join") || (oc.exists("ramps.guess") && oc.getBool("ramps.guess"))) {
        // preliminary geometry computations to determine the length of edges
        // This depends on turning directions and sorting of edge list
        // in case junctions are joined geometry computations have to be repeated
        // preliminary roundabout computations to avoid damaging roundabouts via junctions.join or ramps.guess
        NBTurningDirectionsComputer::computeTurnDirections(myNodeCont, false);
        NBNodesEdgesSorter::sortNodesEdges(myNodeCont);
        myEdgeCont.computeLaneShapes();
        myNodeCont.computeNodeShapes();
        myEdgeCont.computeEdgeShapes();
        if (oc.getBool("roundabouts.guess")) {
            myEdgeCont.guessRoundabouts();
        }
        const std::set<EdgeSet>& roundabouts = myEdgeCont.getRoundabouts();
        for (std::set<EdgeSet>::const_iterator it_round = roundabouts.begin();
                it_round != roundabouts.end(); ++it_round) {
            std::vector<std::string> nodeIDs;
            for (EdgeSet::const_iterator it_edge = it_round->begin(); it_edge != it_round->end(); ++it_edge) {
                nodeIDs.push_back((*it_edge)->getToNode()->getID());
            }
            myNodeCont.addJoinExclusion(nodeIDs);
        }
    }
    // join junctions (may create new "geometry"-nodes so it needs to come before removing these
    if (oc.exists("junctions.join-exclude") && oc.isSet("junctions.join-exclude")) {
        myNodeCont.addJoinExclusion(oc.getStringVector("junctions.join-exclude"));
    }
    unsigned int numJoined = myNodeCont.joinLoadedClusters(myDistrictCont, myEdgeCont, myTLLCont);
    if (oc.getBool("junctions.join")) {
        before = SysUtils::getCurrentMillis();
        PROGRESS_BEGIN_MESSAGE("Joining junction clusters");
        numJoined += myNodeCont.joinJunctions(oc.getFloat("junctions.join-dist"), myDistrictCont, myEdgeCont, myTLLCont);
        PROGRESS_TIME_MESSAGE(before);
    }
    if (oc.getBool("junctions.join") || (oc.exists("ramps.guess") && oc.getBool("ramps.guess"))) {
        // reset geometry to avoid influencing subsequent steps (ramps.guess)
        myEdgeCont.computeLaneShapes();
    }
    if (numJoined > 0) {
        // bit of a misnomer since we're already done
        WRITE_MESSAGE(" Joined " + toString(numJoined) + " junction cluster(s).");
    }
    //
    if (removeElements) {
        unsigned int no = 0;
        const bool removeGeometryNodes = oc.exists("geometry.remove") && oc.getBool("geometry.remove");
        before = SysUtils::getCurrentMillis();
        PROGRESS_BEGIN_MESSAGE("Removing empty nodes" + std::string(removeGeometryNodes ? " and geometry nodes" : ""));
        // removeUnwishedNodes needs turnDirections. @todo: try to call this less often
        NBTurningDirectionsComputer::computeTurnDirections(myNodeCont, false);
        no = myNodeCont.removeUnwishedNodes(myDistrictCont, myEdgeCont, myTLLCont, removeGeometryNodes);
        PROGRESS_TIME_MESSAGE(before);
        WRITE_MESSAGE("   " + toString(no) + " nodes removed.");
    }

    // MOVE TO ORIGIN
    // compute new boundary after network modifications have taken place
    Boundary boundary;
    for (std::map<std::string, NBNode*>::const_iterator it = myNodeCont.begin(); it != myNodeCont.end(); ++it) {
        boundary.add(it->second->getPosition());
    }
    for (std::map<std::string, NBEdge*>::const_iterator it = myEdgeCont.begin(); it != myEdgeCont.end(); ++it) {
        boundary.add(it->second->getGeometry().getBoxBoundary());
    }
    geoConvHelper.setConvBoundary(boundary);

    if (!oc.getBool("offset.disable-normalization") && oc.isDefault("offset.x") && oc.isDefault("offset.y")) {
        moveToOrigin(geoConvHelper, lefthand);
    }
    geoConvHelper.computeFinal(lefthand); // information needed for location element fixed at this point

    if (oc.exists("geometry.min-dist") && oc.isSet("geometry.min-dist")) {
        before = SysUtils::getCurrentMillis();
        PROGRESS_BEGIN_MESSAGE("Reducing geometries");
        myEdgeCont.reduceGeometries(oc.getFloat("geometry.min-dist"));
        PROGRESS_TIME_MESSAGE(before);
    }
    // @note: removing geometry can create similar edges so joinSimilarEdges  must come afterwards
    // @note: likewise splitting can destroy similarities so joinSimilarEdges must come before
    if (removeElements && oc.getBool("edges.join")) {
        before = SysUtils::getCurrentMillis();
        PROGRESS_BEGIN_MESSAGE("Joining similar edges");
        myNodeCont.joinSimilarEdges(myDistrictCont, myEdgeCont, myTLLCont);
        PROGRESS_TIME_MESSAGE(before);
    }
    if (oc.getBool("opposites.guess")) {
        PROGRESS_BEGIN_MESSAGE("guessing opposite direction edges");
        myEdgeCont.guessOpposites();
        PROGRESS_DONE_MESSAGE();
    }
    //
    if (oc.exists("geometry.split") && oc.getBool("geometry.split")) {
        before = SysUtils::getCurrentMillis();
        PROGRESS_BEGIN_MESSAGE("Splitting geometry edges");
        myEdgeCont.splitGeometry(myNodeCont);
        PROGRESS_TIME_MESSAGE(before);
    }
    // turning direction
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Computing turning directions");
    NBTurningDirectionsComputer::computeTurnDirections(myNodeCont);
    PROGRESS_TIME_MESSAGE(before);
    // correct edge geometries to avoid overlap
    myNodeCont.avoidOverlap();
    // guess ramps
    if ((oc.exists("ramps.guess") && oc.getBool("ramps.guess")) || (oc.exists("ramps.set") && oc.isSet("ramps.set"))) {
        before = SysUtils::getCurrentMillis();
        PROGRESS_BEGIN_MESSAGE("Guessing and setting on-/off-ramps");
        NBNodesEdgesSorter::sortNodesEdges(myNodeCont);
        NBRampsComputer::computeRamps(*this, oc);
        PROGRESS_TIME_MESSAGE(before);
    }
    // guess sidewalks
    if (oc.getBool("sidewalks.guess") || oc.getBool("sidewalks.guess.from-permissions")) {
        const int sidewalks = myEdgeCont.guessSidewalks(oc.getFloat("default.sidewalk-width"),
                              oc.getFloat("sidewalks.guess.min-speed"),
                              oc.getFloat("sidewalks.guess.max-speed"),
                              oc.getBool("sidewalks.guess.from-permissions"));
        WRITE_MESSAGE("Guessed " + toString(sidewalks) + " sidewalks.");
    }

    // check whether any not previously setable connections may be set now
    myEdgeCont.recheckPostProcessConnections();

    // remap ids if wished
    if (oc.getBool("numerical-ids")) {
        int numChangedEdges = myEdgeCont.mapToNumericalIDs();
        int numChangedNodes = myNodeCont.mapToNumericalIDs();
        if (numChangedEdges + numChangedNodes > 0) {
            WRITE_MESSAGE("Remapped " + toString(numChangedEdges) + " edge IDs and " + toString(numChangedNodes) + " node IDs.");
        }
    }

    //
    if (oc.exists("geometry.max-angle")) {
        myEdgeCont.checkGeometries(
            DEG2RAD(oc.getFloat("geometry.max-angle")),
            oc.getFloat("geometry.min-radius"),
            oc.getBool("geometry.min-radius.fix"));
    }

    // GEOMETRY COMPUTATION
    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Sorting nodes' edges");
    NBNodesEdgesSorter::sortNodesEdges(myNodeCont);
    PROGRESS_TIME_MESSAGE(before);
    myEdgeCont.computeLaneShapes();
    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Computing node shapes");
    if (oc.exists("geometry.junction-mismatch-threshold")) {
        myNodeCont.computeNodeShapes(oc.getFloat("geometry.junction-mismatch-threshold"));
    } else {
        myNodeCont.computeNodeShapes();
    }
    PROGRESS_TIME_MESSAGE(before);
    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Computing edge shapes");
    myEdgeCont.computeEdgeShapes();
    PROGRESS_TIME_MESSAGE(before);
    // resort edges based on the node and edge shapes
    NBNodesEdgesSorter::sortNodesEdges(myNodeCont, true);
    NBTurningDirectionsComputer::computeTurnDirections(myNodeCont, false);

    // APPLY SPEED MODIFICATIONS
    if (oc.exists("speed.offset")) {
        const SUMOReal speedOffset = oc.getFloat("speed.offset");
        const SUMOReal speedFactor = oc.getFloat("speed.factor");
        if (speedOffset != 0 || speedFactor != 1 || oc.isSet("speed.minimum")) {
            const SUMOReal speedMin = oc.isSet("speed.minimum") ? oc.getFloat("speed.minimum") : -std::numeric_limits<SUMOReal>::infinity();
            before = SysUtils::getCurrentMillis();
            PROGRESS_BEGIN_MESSAGE("Applying speed modifications");
            for (std::map<std::string, NBEdge*>::const_iterator i = myEdgeCont.begin(); i != myEdgeCont.end(); ++i) {
                (*i).second->setSpeed(-1, MAX2((*i).second->getSpeed() * speedFactor + speedOffset, speedMin));
            }
            PROGRESS_TIME_MESSAGE(before);
        }
    }

    // CONNECTIONS COMPUTATION
    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Computing node types");
    NBNodeTypeComputer::computeNodeTypes(myNodeCont);
    PROGRESS_TIME_MESSAGE(before);
    //
    bool haveCrossings = false;
    if (oc.getBool("crossings.guess")) {
        haveCrossings = true;
        int crossings = 0;
        for (std::map<std::string, NBNode*>::const_iterator i = myNodeCont.begin(); i != myNodeCont.end(); ++i) {
            crossings += (*i).second->guessCrossings();
        }
        WRITE_MESSAGE("Guessed " + toString(crossings) + " pedestrian crossings.");
    }
    if (!haveCrossings) {
        // recheck whether we had crossings in the input
        for (std::map<std::string, NBNode*>::const_iterator i = myNodeCont.begin(); i != myNodeCont.end(); ++i) {
            if (i->second->getCrossings().size() > 0) {
                haveCrossings = true;
                break;
            }
        }
    }

    if (oc.isDefault("no-internal-links") && !haveCrossings && myHaveLoadedNetworkWithoutInternalEdges) {
        oc.set("no-internal-links", "true");
    }

    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Computing priorities");
    NBEdgePriorityComputer::computeEdgePriorities(myNodeCont);
    PROGRESS_TIME_MESSAGE(before);
    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Computing approached edges");
    myEdgeCont.computeEdge2Edges(oc.getBool("no-left-connections"));
    PROGRESS_TIME_MESSAGE(before);
    //
    if (oc.getBool("roundabouts.guess")) {
        before = SysUtils::getCurrentMillis();
        PROGRESS_BEGIN_MESSAGE("Guessing and setting roundabouts");
        const int numGuessed = myEdgeCont.guessRoundabouts();
        if (numGuessed > 0) {
            WRITE_MESSAGE(" Guessed " + toString(numGuessed) + " roundabout(s).");
        }
        PROGRESS_TIME_MESSAGE(before);
    }
    myEdgeCont.markRoundabouts();
    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Computing approaching lanes");
    myEdgeCont.computeLanes2Edges();
    PROGRESS_TIME_MESSAGE(before);
    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Dividing of lanes on approached lanes");
    myNodeCont.computeLanes2Lanes();
    myEdgeCont.sortOutgoingLanesConnections();
    PROGRESS_TIME_MESSAGE(before);
    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Processing turnarounds");
    if (!oc.getBool("no-turnarounds")) {
        myEdgeCont.appendTurnarounds(oc.getBool("no-turnarounds.tls"));
    } else {
        myEdgeCont.appendTurnarounds(explicitTurnarounds, oc.getBool("no-turnarounds.tls"));
    }
    PROGRESS_TIME_MESSAGE(before);
    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Rechecking of lane endings");
    myEdgeCont.recheckLanes();
    PROGRESS_TIME_MESSAGE(before);

    if (haveCrossings && !oc.getBool("no-internal-links")) {
        for (std::map<std::string, NBNode*>::const_iterator i = myNodeCont.begin(); i != myNodeCont.end(); ++i) {
            i->second->buildCrossingsAndWalkingAreas();
        }
    }

    // GUESS TLS POSITIONS
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Assigning nodes to traffic lights");
    if (oc.isSet("tls.set")) {
        std::vector<std::string> tlControlledNodes = oc.getStringVector("tls.set");
        TrafficLightType type = SUMOXMLDefinitions::TrafficLightTypes.get(oc.getString("tls.default-type"));
        for (std::vector<std::string>::const_iterator i = tlControlledNodes.begin(); i != tlControlledNodes.end(); ++i) {
            NBNode* node = myNodeCont.retrieve(*i);
            if (node == 0) {
                WRITE_WARNING("Building a tl-logic for junction '" + *i + "' is not possible." + "\n The junction '" + *i + "' is not known.");
            } else {
                myNodeCont.setAsTLControlled(node, myTLLCont, type);
            }
        }
    }
    myNodeCont.guessTLs(oc, myTLLCont);
    PROGRESS_TIME_MESSAGE(before);
    //
    if (oc.getBool("tls.join")) {
        before = SysUtils::getCurrentMillis();
        PROGRESS_BEGIN_MESSAGE("Joining traffic light nodes");
        myNodeCont.joinTLS(myTLLCont, oc.getFloat("tls.join-dist"));
        PROGRESS_TIME_MESSAGE(before);
    }


    // COMPUTING RIGHT-OF-WAY AND TRAFFIC LIGHT PROGRAMS
    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Computing traffic light control information");
    myTLLCont.setTLControllingInformation(myEdgeCont, myNodeCont);
    PROGRESS_TIME_MESSAGE(before);
    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Computing node logics");
    myNodeCont.computeLogics(myEdgeCont, oc);
    PROGRESS_TIME_MESSAGE(before);
    //
    before = SysUtils::getCurrentMillis();
    PROGRESS_BEGIN_MESSAGE("Computing traffic light logics");
    std::pair<unsigned int, unsigned int> numbers = myTLLCont.computeLogics(oc);
    PROGRESS_TIME_MESSAGE(before);
    std::string progCount = "";
    if (numbers.first != numbers.second) {
        progCount = "(" + toString(numbers.second) + " programs) ";
    }
    WRITE_MESSAGE(" " + toString(numbers.first) + " traffic light(s) " + progCount + "computed.");
    //
    if (oc.isSet("street-sign-output")) {
        before = SysUtils::getCurrentMillis();
        PROGRESS_BEGIN_MESSAGE("Generating street signs");
        myEdgeCont.generateStreetSigns();
        PROGRESS_TIME_MESSAGE(before);
    }

    // FINISHING INNER EDGES
    if (!oc.getBool("no-internal-links")) {
        before = SysUtils::getCurrentMillis();
        PROGRESS_BEGIN_MESSAGE("Building inner edges");
        for (std::map<std::string, NBEdge*>::const_iterator i = myEdgeCont.begin(); i != myEdgeCont.end(); ++i) {
            (*i).second->sortOutgoingConnectionsByIndex();
        }
        // walking areas shall only be built if crossings are wished as well
        for (std::map<std::string, NBNode*>::const_iterator i = myNodeCont.begin(); i != myNodeCont.end(); ++i) {
            (*i).second->buildInnerEdges();
        }
        PROGRESS_TIME_MESSAGE(before);
    }
    if (lefthand) {
        mirrorX();
    };

    // report
    WRITE_MESSAGE("-----------------------------------------------------");
    WRITE_MESSAGE("Summary:");
    myNodeCont.printBuiltNodesStatistics();
    WRITE_MESSAGE(" Network boundaries:");
    WRITE_MESSAGE("  Original boundary  : " + toString(geoConvHelper.getOrigBoundary()));
    WRITE_MESSAGE("  Applied offset     : " + toString(geoConvHelper.getOffsetBase()));
    WRITE_MESSAGE("  Converted boundary : " + toString(geoConvHelper.getConvBoundary()));
    WRITE_MESSAGE("-----------------------------------------------------");
    NBRequest::reportWarnings();
    // report on very large networks
    if (MAX2(geoConvHelper.getConvBoundary().xmax(), geoConvHelper.getConvBoundary().ymax()) > 1000000 ||
            MIN2(geoConvHelper.getConvBoundary().xmin(), geoConvHelper.getConvBoundary().ymin()) < -1000000) {
        WRITE_WARNING("Network contains very large coordinates and will probably flicker in the GUI. Check for outlying nodes and make sure the network is shifted to the coordinate origin");
    }
}
コード例 #9
0
ファイル: shader_query.cpp プロジェクト: ashmew2/kolibriosSVN
unsigned
_mesa_program_resource_prop(struct gl_shader_program *shProg,
                            struct gl_program_resource *res, GLuint index,
                            const GLenum prop, GLint *val, const char *caller)
{
   GET_CURRENT_CONTEXT(ctx);

#define VALIDATE_TYPE(type)\
   if (res->Type != type)\
      goto invalid_operation;

   switch(prop) {
   case GL_NAME_LENGTH:
      if (res->Type == GL_ATOMIC_COUNTER_BUFFER)
         goto invalid_operation;
      /* Base name +3 if array '[0]' + terminator. */
      *val = strlen(_mesa_program_resource_name(res)) +
         (_mesa_program_resource_array_size(res) > 0 ? 3 : 0) + 1;
      return 1;
   case GL_TYPE:
      switch (res->Type) {
      case GL_UNIFORM:
         *val = RESOURCE_UNI(res)->type->gl_type;
         return 1;
      case GL_PROGRAM_INPUT:
      case GL_PROGRAM_OUTPUT:
         *val = RESOURCE_VAR(res)->type->gl_type;
         return 1;
      case GL_TRANSFORM_FEEDBACK_VARYING:
         *val = RESOURCE_XFB(res)->Type;
         return 1;
      default:
         goto invalid_operation;
      }
   case GL_ARRAY_SIZE:
      switch (res->Type) {
      case GL_UNIFORM:
            *val = MAX2(RESOURCE_UNI(res)->array_elements, 1);
            return 1;
      case GL_PROGRAM_INPUT:
      case GL_PROGRAM_OUTPUT:
         *val = MAX2(RESOURCE_VAR(res)->type->length, 1);
         return 1;
      case GL_TRANSFORM_FEEDBACK_VARYING:
         *val = MAX2(RESOURCE_XFB(res)->Size, 1);
         return 1;
      default:
         goto invalid_operation;
      }
   case GL_OFFSET:
      VALIDATE_TYPE(GL_UNIFORM);
      *val = RESOURCE_UNI(res)->offset;
      return 1;
   case GL_BLOCK_INDEX:
      VALIDATE_TYPE(GL_UNIFORM);
      *val = RESOURCE_UNI(res)->block_index;
      return 1;
   case GL_ARRAY_STRIDE:
      VALIDATE_TYPE(GL_UNIFORM);
      *val = RESOURCE_UNI(res)->array_stride;
      return 1;
   case GL_MATRIX_STRIDE:
      VALIDATE_TYPE(GL_UNIFORM);
      *val = RESOURCE_UNI(res)->matrix_stride;
      return 1;
   case GL_IS_ROW_MAJOR:
      VALIDATE_TYPE(GL_UNIFORM);
      *val = RESOURCE_UNI(res)->row_major;
      return 1;
   case GL_ATOMIC_COUNTER_BUFFER_INDEX:
      VALIDATE_TYPE(GL_UNIFORM);
      *val = RESOURCE_UNI(res)->atomic_buffer_index;
      return 1;
   case GL_BUFFER_BINDING:
   case GL_BUFFER_DATA_SIZE:
   case GL_NUM_ACTIVE_VARIABLES:
   case GL_ACTIVE_VARIABLES:
      return get_buffer_property(shProg, res, prop, val, caller);
   case GL_REFERENCED_BY_COMPUTE_SHADER:
      if (!_mesa_has_compute_shaders(ctx))
         goto invalid_enum;
      /* fallthrough */
   case GL_REFERENCED_BY_VERTEX_SHADER:
   case GL_REFERENCED_BY_GEOMETRY_SHADER:
   case GL_REFERENCED_BY_FRAGMENT_SHADER:
      switch (res->Type) {
      case GL_UNIFORM:
      case GL_PROGRAM_INPUT:
      case GL_PROGRAM_OUTPUT:
      case GL_UNIFORM_BLOCK:
      case GL_ATOMIC_COUNTER_BUFFER:
         *val = is_resource_referenced(shProg, res, index,
                                       stage_from_enum(prop));
         return 1;
      default:
         goto invalid_operation;
      }
   case GL_LOCATION:
      switch (res->Type) {
      case GL_UNIFORM:
      case GL_PROGRAM_INPUT:
      case GL_PROGRAM_OUTPUT:
         *val = program_resource_location(shProg, res,
                                          _mesa_program_resource_name(res));
         return 1;
      default:
         goto invalid_operation;
      }
   case GL_LOCATION_INDEX:
      if (res->Type != GL_PROGRAM_OUTPUT)
         goto invalid_operation;
      *val = RESOURCE_VAR(res)->data.index;
      return 1;

   /* GL_ARB_tessellation_shader */
   case GL_IS_PER_PATCH:
   case GL_REFERENCED_BY_TESS_CONTROL_SHADER:
   case GL_REFERENCED_BY_TESS_EVALUATION_SHADER:
   default:
      goto invalid_enum;
   }

#undef VALIDATE_TYPE

invalid_enum:
   _mesa_error(ctx, GL_INVALID_ENUM, "%s(%s prop %s)", caller,
               _mesa_lookup_enum_by_nr(res->Type),
               _mesa_lookup_enum_by_nr(prop));
   return 0;

invalid_operation:
   _mesa_error(ctx, GL_INVALID_OPERATION, "%s(%s prop %s)", caller,
               _mesa_lookup_enum_by_nr(res->Type),
               _mesa_lookup_enum_by_nr(prop));
   return 0;
}
コード例 #10
0
ファイル: vbo_split.c プロジェクト: rib/mesa
void vbo_split_prims( struct gl_context *ctx,
                      const struct gl_client_array *arrays[],
                      const struct _mesa_prim *prim,
                      GLuint nr_prims,
                      const struct _mesa_index_buffer *ib,
                      GLuint min_index,
                      GLuint max_index,
                      vbo_draw_func draw,
                      const struct split_limits *limits )
{
    GLint max_basevertex = prim->basevertex;
    GLuint i;

    for (i = 1; i < nr_prims; i++)
        max_basevertex = MAX2(max_basevertex, prim[i].basevertex);

    /* XXX max_basevertex is computed but not used, why? */

    if (ib) {
        if (limits->max_indices == 0) {
            /* Could traverse the indices, re-emitting vertices in turn.
             * But it's hard to see why this case would be needed - for
             * software tnl, it is better to convert to non-indexed
             * rendering after transformation is complete.  Are there any devices
             * with hardware tnl that cannot do indexed rendering?
             *
             * For now, this path is disabled.
             */
            assert(0);
        }
        else if (max_index - min_index >= limits->max_verts) {
            /* The vertex buffers are too large for hardware (or the
             * swtnl module).  Traverse the indices, re-emitting vertices
             * in turn.  Use a vertex cache to preserve some of the
             * sharing from the original index list.
             */
            vbo_split_copy(ctx, arrays, prim, nr_prims, ib,
                           draw, limits );
        }
        else if (ib->count > limits->max_indices) {
            /* The index buffer is too large for hardware.  Try to split
             * on whole-primitive boundaries, otherwise try to split the
             * individual primitives.
             */
            vbo_split_inplace(ctx, arrays, prim, nr_prims, ib,
                              min_index, max_index, draw, limits );
        }
        else {
            /* Why were we called? */
            assert(0);
        }
    }
    else {
        if (max_index - min_index >= limits->max_verts) {
            /* The vertex buffer is too large for hardware (or the swtnl
             * module).  Try to split on whole-primitive boundaries,
             * otherwise try to split the individual primitives.
             */
            vbo_split_inplace(ctx, arrays, prim, nr_prims, ib,
                              min_index, max_index, draw, limits );
        }
        else {
            /* Why were we called? */
            assert(0);
        }
    }
}
コード例 #11
0
static void
compute_blend_ref(const struct pipe_blend_state *blend,
                  const double *src,
                  const double *dst,
                  const double *con,
                  double *res)
{
   double src_term[4];
   double dst_term[4];

   compute_blend_ref_term(blend->rt[0].rgb_src_factor, blend->rt[0].alpha_src_factor,
                          src, src, dst, con, src_term);
   compute_blend_ref_term(blend->rt[0].rgb_dst_factor, blend->rt[0].alpha_dst_factor,
                          dst, src, dst, con, dst_term);

   /*
    * Combine RGB terms
    */
   switch (blend->rt[0].rgb_func) {
   case PIPE_BLEND_ADD:
      res[0] = src_term[0] + dst_term[0]; /* R */
      res[1] = src_term[1] + dst_term[1]; /* G */
      res[2] = src_term[2] + dst_term[2]; /* B */
      break;
   case PIPE_BLEND_SUBTRACT:
      res[0] = src_term[0] - dst_term[0]; /* R */
      res[1] = src_term[1] - dst_term[1]; /* G */
      res[2] = src_term[2] - dst_term[2]; /* B */
      break;
   case PIPE_BLEND_REVERSE_SUBTRACT:
      res[0] = dst_term[0] - src_term[0]; /* R */
      res[1] = dst_term[1] - src_term[1]; /* G */
      res[2] = dst_term[2] - src_term[2]; /* B */
      break;
   case PIPE_BLEND_MIN:
      res[0] = MIN2(src_term[0], dst_term[0]); /* R */
      res[1] = MIN2(src_term[1], dst_term[1]); /* G */
      res[2] = MIN2(src_term[2], dst_term[2]); /* B */
      break;
   case PIPE_BLEND_MAX:
      res[0] = MAX2(src_term[0], dst_term[0]); /* R */
      res[1] = MAX2(src_term[1], dst_term[1]); /* G */
      res[2] = MAX2(src_term[2], dst_term[2]); /* B */
      break;
   default:
      assert(0);
   }

   /*
    * Combine A terms
    */
   switch (blend->rt[0].alpha_func) {
   case PIPE_BLEND_ADD:
      res[3] = src_term[3] + dst_term[3]; /* A */
      break;
   case PIPE_BLEND_SUBTRACT:
      res[3] = src_term[3] - dst_term[3]; /* A */
      break;
   case PIPE_BLEND_REVERSE_SUBTRACT:
      res[3] = dst_term[3] - src_term[3]; /* A */
      break;
   case PIPE_BLEND_MIN:
      res[3] = MIN2(src_term[3], dst_term[3]); /* A */
      break;
   case PIPE_BLEND_MAX:
      res[3] = MAX2(src_term[3], dst_term[3]); /* A */
      break;
   default:
      assert(0);
   }
}
コード例 #12
0
ファイル: collectorPolicy.cpp プロジェクト: netroby/jdk9-dev
// Minimum sizes of the generations may be different than
// the initial sizes.  An inconsistency is permitted here
// in the total size that can be specified explicitly by
// command line specification of OldSize and NewSize and
// also a command line specification of -Xms.  Issue a warning
// but allow the values to pass.
void GenCollectorPolicy::initialize_size_info() {
  CollectorPolicy::initialize_size_info();

  _initial_young_size = NewSize;
  _max_young_size = MaxNewSize;
  _initial_old_size = OldSize;

  // Determine maximum size of the young generation.

  if (FLAG_IS_DEFAULT(MaxNewSize)) {
    _max_young_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
    // Bound the maximum size by NewSize below (since it historically
    // would have been NewSize and because the NewRatio calculation could
    // yield a size that is too small) and bound it by MaxNewSize above.
    // Ergonomics plays here by previously calculating the desired
    // NewSize and MaxNewSize.
    _max_young_size = MIN2(MAX2(_max_young_size, _initial_young_size), MaxNewSize);
  }

  // Given the maximum young size, determine the initial and
  // minimum young sizes.

  if (_max_heap_byte_size == _initial_heap_byte_size) {
    // The maximum and initial heap sizes are the same so the generation's
    // initial size must be the same as it maximum size. Use NewSize as the
    // size if set on command line.
    _max_young_size = FLAG_IS_CMDLINE(NewSize) ? NewSize : _max_young_size;
    _initial_young_size = _max_young_size;

    // Also update the minimum size if min == initial == max.
    if (_max_heap_byte_size == _min_heap_byte_size) {
      _min_young_size = _max_young_size;
    }
  } else {
    if (FLAG_IS_CMDLINE(NewSize)) {
      // If NewSize is set on the command line, we should use it as
      // the initial size, but make sure it is within the heap bounds.
      _initial_young_size =
        MIN2(_max_young_size, bound_minus_alignment(NewSize, _initial_heap_byte_size));
      _min_young_size = bound_minus_alignment(_initial_young_size, _min_heap_byte_size);
    } else {
      // For the case where NewSize is not set on the command line, use
      // NewRatio to size the initial generation size. Use the current
      // NewSize as the floor, because if NewRatio is overly large, the resulting
      // size can be too small.
      _initial_young_size =
        MIN2(_max_young_size, MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize));
    }
  }

  log_trace(gc, heap)("1: Minimum young " SIZE_FORMAT "  Initial young " SIZE_FORMAT "  Maximum young " SIZE_FORMAT,
                      _min_young_size, _initial_young_size, _max_young_size);

  // At this point the minimum, initial and maximum sizes
  // of the overall heap and of the young generation have been determined.
  // The maximum old size can be determined from the maximum young
  // and maximum heap size since no explicit flags exist
  // for setting the old generation maximum.
  _max_old_size = MAX2(_max_heap_byte_size - _max_young_size, _gen_alignment);

  // If no explicit command line flag has been set for the
  // old generation size, use what is left.
  if (!FLAG_IS_CMDLINE(OldSize)) {
    // The user has not specified any value but the ergonomics
    // may have chosen a value (which may or may not be consistent
    // with the overall heap size).  In either case make
    // the minimum, maximum and initial sizes consistent
    // with the young sizes and the overall heap sizes.
    _min_old_size = _gen_alignment;
    _initial_old_size = MIN2(_max_old_size, MAX2(_initial_heap_byte_size - _initial_young_size, _min_old_size));
    // _max_old_size has already been made consistent above.
  } else {
    // OldSize has been explicitly set on the command line. Use it
    // for the initial size but make sure the minimum allow a young
    // generation to fit as well.
    // If the user has explicitly set an OldSize that is inconsistent
    // with other command line flags, issue a warning.
    // The generation minimums and the overall heap minimum should
    // be within one generation alignment.
    if (_initial_old_size > _max_old_size) {
      log_warning(gc, ergo)("Inconsistency between maximum heap size and maximum "
                            "generation sizes: using maximum heap = " SIZE_FORMAT
                            ", -XX:OldSize flag is being ignored",
                            _max_heap_byte_size);
      _initial_old_size = _max_old_size;
    }

    _min_old_size = MIN2(_initial_old_size, _min_heap_byte_size - _min_young_size);
  }

  // The initial generation sizes should match the initial heap size,
  // if not issue a warning and resize the generations. This behavior
  // differs from JDK8 where the generation sizes have higher priority
  // than the initial heap size.
  if ((_initial_old_size + _initial_young_size) != _initial_heap_byte_size) {
    log_warning(gc, ergo)("Inconsistency between generation sizes and heap size, resizing "
                          "the generations to fit the heap.");

    size_t desired_young_size = _initial_heap_byte_size - _initial_old_size;
    if (_initial_heap_byte_size < _initial_old_size) {
      // Old want all memory, use minimum for young and rest for old
      _initial_young_size = _min_young_size;
      _initial_old_size = _initial_heap_byte_size - _min_young_size;
    } else if (desired_young_size > _max_young_size) {
      // Need to increase both young and old generation
      _initial_young_size = _max_young_size;
      _initial_old_size = _initial_heap_byte_size - _max_young_size;
    } else if (desired_young_size < _min_young_size) {
      // Need to decrease both young and old generation
      _initial_young_size = _min_young_size;
      _initial_old_size = _initial_heap_byte_size - _min_young_size;
    } else {
      // The young generation boundaries allow us to only update the
      // young generation.
      _initial_young_size = desired_young_size;
    }

    log_trace(gc, heap)("2: Minimum young " SIZE_FORMAT "  Initial young " SIZE_FORMAT "  Maximum young " SIZE_FORMAT,
                    _min_young_size, _initial_young_size, _max_young_size);
  }

  // Write back to flags if necessary.
  if (NewSize != _initial_young_size) {
    FLAG_SET_ERGO(size_t, NewSize, _initial_young_size);
  }

  if (MaxNewSize != _max_young_size) {
    FLAG_SET_ERGO(size_t, MaxNewSize, _max_young_size);
  }

  if (OldSize != _initial_old_size) {
    FLAG_SET_ERGO(size_t, OldSize, _initial_old_size);
  }

  log_trace(gc, heap)("Minimum old " SIZE_FORMAT "  Initial old " SIZE_FORMAT "  Maximum old " SIZE_FORMAT,
                  _min_old_size, _initial_old_size, _max_old_size);

  DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
}
コード例 #13
0
ファイル: collectorPolicy.cpp プロジェクト: netroby/jdk9-dev
void GenCollectorPolicy::initialize_flags() {
  CollectorPolicy::initialize_flags();

  assert(_gen_alignment != 0, "Generation alignment not set up properly");
  assert(_heap_alignment >= _gen_alignment,
         "heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT,
         _heap_alignment, _gen_alignment);
  assert(_gen_alignment % _space_alignment == 0,
         "gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT,
         _gen_alignment, _space_alignment);
  assert(_heap_alignment % _gen_alignment == 0,
         "heap_alignment: " SIZE_FORMAT " not aligned by gen_alignment: " SIZE_FORMAT,
         _heap_alignment, _gen_alignment);

  // All generational heaps have a young gen; handle those flags here

  // Make sure the heap is large enough for two generations
  size_t smallest_new_size = young_gen_size_lower_bound();
  size_t smallest_heap_size = align_size_up(smallest_new_size + old_gen_size_lower_bound(),
                                           _heap_alignment);
  if (MaxHeapSize < smallest_heap_size) {
    FLAG_SET_ERGO(size_t, MaxHeapSize, smallest_heap_size);
    _max_heap_byte_size = MaxHeapSize;
  }
  // If needed, synchronize _min_heap_byte size and _initial_heap_byte_size
  if (_min_heap_byte_size < smallest_heap_size) {
    _min_heap_byte_size = smallest_heap_size;
    if (InitialHeapSize < _min_heap_byte_size) {
      FLAG_SET_ERGO(size_t, InitialHeapSize, smallest_heap_size);
      _initial_heap_byte_size = smallest_heap_size;
    }
  }

  // Make sure NewSize allows an old generation to fit even if set on the command line
  if (FLAG_IS_CMDLINE(NewSize) && NewSize >= _initial_heap_byte_size) {
    log_warning(gc, ergo)("NewSize was set larger than initial heap size, will use initial heap size.");
    FLAG_SET_ERGO(size_t, NewSize, bound_minus_alignment(NewSize, _initial_heap_byte_size));
  }

  // Now take the actual NewSize into account. We will silently increase NewSize
  // if the user specified a smaller or unaligned value.
  size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize);
  bounded_new_size = MAX2(smallest_new_size, (size_t)align_size_down(bounded_new_size, _gen_alignment));
  if (bounded_new_size != NewSize) {
    FLAG_SET_ERGO(size_t, NewSize, bounded_new_size);
  }
  _min_young_size = smallest_new_size;
  _initial_young_size = NewSize;

  if (!FLAG_IS_DEFAULT(MaxNewSize)) {
    if (MaxNewSize >= MaxHeapSize) {
      // Make sure there is room for an old generation
      size_t smaller_max_new_size = MaxHeapSize - _gen_alignment;
      if (FLAG_IS_CMDLINE(MaxNewSize)) {
        log_warning(gc, ergo)("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire "
                              "heap (" SIZE_FORMAT "k).  A new max generation size of " SIZE_FORMAT "k will be used.",
                              MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K);
      }
      FLAG_SET_ERGO(size_t, MaxNewSize, smaller_max_new_size);
      if (NewSize > MaxNewSize) {
        FLAG_SET_ERGO(size_t, NewSize, MaxNewSize);
        _initial_young_size = NewSize;
      }
    } else if (MaxNewSize < _initial_young_size) {
      FLAG_SET_ERGO(size_t, MaxNewSize, _initial_young_size);
    } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
      FLAG_SET_ERGO(size_t, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
    }
    _max_young_size = MaxNewSize;
  }

  if (NewSize > MaxNewSize) {
    // At this point this should only happen if the user specifies a large NewSize and/or
    // a small (but not too small) MaxNewSize.
    if (FLAG_IS_CMDLINE(MaxNewSize)) {
      log_warning(gc, ergo)("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
                            "A new max generation size of " SIZE_FORMAT "k will be used.",
                            NewSize/K, MaxNewSize/K, NewSize/K);
    }
    FLAG_SET_ERGO(size_t, MaxNewSize, NewSize);
    _max_young_size = MaxNewSize;
  }

  if (SurvivorRatio < 1 || NewRatio < 1) {
    vm_exit_during_initialization("Invalid young gen ratio specified");
  }

  if (OldSize < old_gen_size_lower_bound()) {
    FLAG_SET_ERGO(size_t, OldSize, old_gen_size_lower_bound());
  }
  if (!is_size_aligned(OldSize, _gen_alignment)) {
    FLAG_SET_ERGO(size_t, OldSize, align_size_down(OldSize, _gen_alignment));
  }

  if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
    // NewRatio will be used later to set the young generation size so we use
    // it to calculate how big the heap should be based on the requested OldSize
    // and NewRatio.
    assert(NewRatio > 0, "NewRatio should have been set up earlier");
    size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);

    calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment);
    FLAG_SET_ERGO(size_t, MaxHeapSize, calculated_heapsize);
    _max_heap_byte_size = MaxHeapSize;
    FLAG_SET_ERGO(size_t, InitialHeapSize, calculated_heapsize);
    _initial_heap_byte_size = InitialHeapSize;
  }

  // Adjust NewSize and OldSize or MaxHeapSize to match each other
  if (NewSize + OldSize > MaxHeapSize) {
    if (FLAG_IS_CMDLINE(MaxHeapSize)) {
      // Somebody has set a maximum heap size with the intention that we should not
      // exceed it. Adjust New/OldSize as necessary.
      size_t calculated_size = NewSize + OldSize;
      double shrink_factor = (double) MaxHeapSize / calculated_size;
      size_t smaller_new_size = align_size_down((size_t)(NewSize * shrink_factor), _gen_alignment);
      FLAG_SET_ERGO(size_t, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
      _initial_young_size = NewSize;

      // OldSize is already aligned because above we aligned MaxHeapSize to
      // _heap_alignment, and we just made sure that NewSize is aligned to
      // _gen_alignment. In initialize_flags() we verified that _heap_alignment
      // is a multiple of _gen_alignment.
      FLAG_SET_ERGO(size_t, OldSize, MaxHeapSize - NewSize);
    } else {
      FLAG_SET_ERGO(size_t, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment));
      _max_heap_byte_size = MaxHeapSize;
    }
  }

  // Update NewSize, if possible, to avoid sizing the young gen too small when only
  // OldSize is set on the command line.
  if (FLAG_IS_CMDLINE(OldSize) && !FLAG_IS_CMDLINE(NewSize)) {
    if (OldSize < _initial_heap_byte_size) {
      size_t new_size = _initial_heap_byte_size - OldSize;
      // Need to compare against the flag value for max since _max_young_size
      // might not have been set yet.
      if (new_size >= _min_young_size && new_size <= MaxNewSize) {
        FLAG_SET_ERGO(size_t, NewSize, new_size);
        _initial_young_size = NewSize;
      }
    }
  }

  always_do_update_barrier = UseConcMarkSweepGC;

  DEBUG_ONLY(GenCollectorPolicy::assert_flags();)
}
コード例 #14
0
ファイル: vc4_draw.c プロジェクト: Echelon9/mesa
static void
vc4_emit_gl_shader_state(struct vc4_context *vc4,
                         const struct pipe_draw_info *info,
                         uint32_t extra_index_bias)
{
        struct vc4_job *job = vc4->job;
        /* VC4_DIRTY_VTXSTATE */
        struct vc4_vertex_stateobj *vtx = vc4->vtx;
        /* VC4_DIRTY_VTXBUF */
        struct vc4_vertexbuf_stateobj *vertexbuf = &vc4->vertexbuf;

        /* The simulator throws a fit if VS or CS don't read an attribute, so
         * we emit a dummy read.
         */
        uint32_t num_elements_emit = MAX2(vtx->num_elements, 1);
        /* Emit the shader record. */
        struct vc4_cl_out *shader_rec =
                cl_start_shader_reloc(&job->shader_rec, 3 + num_elements_emit);
        /* VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER */
        cl_u16(&shader_rec,
               VC4_SHADER_FLAG_ENABLE_CLIPPING |
               (vc4->prog.fs->fs_threaded ?
                0 : VC4_SHADER_FLAG_FS_SINGLE_THREAD) |
               ((info->mode == PIPE_PRIM_POINTS &&
                 vc4->rasterizer->base.point_size_per_vertex) ?
                VC4_SHADER_FLAG_VS_POINT_SIZE : 0));

        /* VC4_DIRTY_COMPILED_FS */
        cl_u8(&shader_rec, 0); /* fs num uniforms (unused) */
        cl_u8(&shader_rec, vc4->prog.fs->num_inputs);
        cl_reloc(job, &job->shader_rec, &shader_rec, vc4->prog.fs->bo, 0);
        cl_u32(&shader_rec, 0); /* UBO offset written by kernel */

        /* VC4_DIRTY_COMPILED_VS */
        cl_u16(&shader_rec, 0); /* vs num uniforms */
        cl_u8(&shader_rec, vc4->prog.vs->vattrs_live);
        cl_u8(&shader_rec, vc4->prog.vs->vattr_offsets[8]);
        cl_reloc(job, &job->shader_rec, &shader_rec, vc4->prog.vs->bo, 0);
        cl_u32(&shader_rec, 0); /* UBO offset written by kernel */

        /* VC4_DIRTY_COMPILED_CS */
        cl_u16(&shader_rec, 0); /* cs num uniforms */
        cl_u8(&shader_rec, vc4->prog.cs->vattrs_live);
        cl_u8(&shader_rec, vc4->prog.cs->vattr_offsets[8]);
        cl_reloc(job, &job->shader_rec, &shader_rec, vc4->prog.cs->bo, 0);
        cl_u32(&shader_rec, 0); /* UBO offset written by kernel */

        uint32_t max_index = 0xffff;
        for (int i = 0; i < vtx->num_elements; i++) {
                struct pipe_vertex_element *elem = &vtx->pipe[i];
                struct pipe_vertex_buffer *vb =
                        &vertexbuf->vb[elem->vertex_buffer_index];
                struct vc4_resource *rsc = vc4_resource(vb->buffer);
                /* not vc4->dirty tracked: vc4->last_index_bias */
                uint32_t offset = (vb->buffer_offset +
                                   elem->src_offset +
                                   vb->stride * (info->index_bias +
                                                 extra_index_bias));
                uint32_t vb_size = rsc->bo->size - offset;
                uint32_t elem_size =
                        util_format_get_blocksize(elem->src_format);

                cl_reloc(job, &job->shader_rec, &shader_rec, rsc->bo, offset);
                cl_u8(&shader_rec, elem_size - 1);
                cl_u8(&shader_rec, vb->stride);
                cl_u8(&shader_rec, vc4->prog.vs->vattr_offsets[i]);
                cl_u8(&shader_rec, vc4->prog.cs->vattr_offsets[i]);

                if (vb->stride > 0) {
                        max_index = MIN2(max_index,
                                         (vb_size - elem_size) / vb->stride);
                }
        }

        if (vtx->num_elements == 0) {
                assert(num_elements_emit == 1);
                struct vc4_bo *bo = vc4_bo_alloc(vc4->screen, 4096, "scratch VBO");
                cl_reloc(job, &job->shader_rec, &shader_rec, bo, 0);
                cl_u8(&shader_rec, 16 - 1); /* element size */
                cl_u8(&shader_rec, 0); /* stride */
                cl_u8(&shader_rec, 0); /* VS VPM offset */
                cl_u8(&shader_rec, 0); /* CS VPM offset */
                vc4_bo_unreference(&bo);
        }
        cl_end(&job->shader_rec, shader_rec);

        struct vc4_cl_out *bcl = cl_start(&job->bcl);
        /* the actual draw call. */
        cl_u8(&bcl, VC4_PACKET_GL_SHADER_STATE);
        assert(vtx->num_elements <= 8);
        /* Note that number of attributes == 0 in the packet means 8
         * attributes.  This field also contains the offset into shader_rec.
         */
        cl_u32(&bcl, num_elements_emit & 0x7);
        cl_end(&job->bcl, bcl);

        vc4_write_uniforms(vc4, vc4->prog.fs,
                           &vc4->constbuf[PIPE_SHADER_FRAGMENT],
                           &vc4->fragtex);
        vc4_write_uniforms(vc4, vc4->prog.vs,
                           &vc4->constbuf[PIPE_SHADER_VERTEX],
                           &vc4->verttex);
        vc4_write_uniforms(vc4, vc4->prog.cs,
                           &vc4->constbuf[PIPE_SHADER_VERTEX],
                           &vc4->verttex);

        vc4->last_index_bias = info->index_bias + extra_index_bias;
        vc4->max_index = max_index;
        job->shader_rec_count++;
}
コード例 #15
0
ファイル: prog_optimize.c プロジェクト: jay8muel/Renderfusion
/**
 * This function implements "Linear Scan Register Allocation" to reduce
 * the number of temporary registers used by the program.
 *
 * We compute the "live interval" for all temporary registers then
 * examine the overlap of the intervals to allocate new registers.
 * Basically, if two intervals do not overlap, they can use the same register.
 */
static void
_mesa_reallocate_registers(struct gl_program *prog)
{
   struct interval_list liveIntervals;
   GLint registerMap[REG_ALLOCATE_MAX_PROGRAM_TEMPS];
   GLboolean usedRegs[REG_ALLOCATE_MAX_PROGRAM_TEMPS];
   GLuint i;
   GLint maxTemp = -1;

   if (dbg) {
      printf("Optimize: Begin live-interval register reallocation\n");
      _mesa_print_program(prog);
   }

   for (i = 0; i < REG_ALLOCATE_MAX_PROGRAM_TEMPS; i++){
      registerMap[i] = -1;
      usedRegs[i] = GL_FALSE;
   }

   if (!find_live_intervals(prog, &liveIntervals)) {
      if (dbg)
         printf("Aborting register reallocation\n");
      return;
   }

   {
      struct interval_list activeIntervals;
      activeIntervals.Num = 0;

      /* loop over live intervals, allocating a new register for each */
      for (i = 0; i < liveIntervals.Num; i++) {
         const struct interval *live = liveIntervals.Intervals + i;

         if (dbg)
            printf("Consider register %u\n", live->Reg);

         /* Expire old intervals.  Intervals which have ended with respect
          * to the live interval can have their remapped registers freed.
          */
         {
            GLint j;
            for (j = 0; j < (GLint) activeIntervals.Num; j++) {
               const struct interval *inv = activeIntervals.Intervals + j;
               if (inv->End >= live->Start) {
                  /* Stop now.  Since the activeInterval list is sorted
                   * we know we don't have to go further.
                   */
                  break;
               }
               else {
                  /* Interval 'inv' has expired */
                  const GLint regNew = registerMap[inv->Reg];
                  ASSERT(regNew >= 0);

                  if (dbg)
                     printf("  expire interval for reg %u\n", inv->Reg);

                  /* remove interval j from active list */
                  remove_interval(&activeIntervals, inv);
                  j--;  /* counter-act j++ in for-loop above */

                  /* return register regNew to the free pool */
                  if (dbg)
                     printf("  free reg %d\n", regNew);
                  ASSERT(usedRegs[regNew] == GL_TRUE);
                  usedRegs[regNew] = GL_FALSE;
               }
            }
         }

         /* find a free register for this live interval */
         {
            const GLint k = alloc_register(usedRegs);
            if (k < 0) {
               /* out of registers, give up */
               return;
            }
            registerMap[live->Reg] = k;
            maxTemp = MAX2(maxTemp, k);
            if (dbg)
               printf("  remap register %u -> %d\n", live->Reg, k);
         }

         /* Insert this live interval into the active list which is sorted
          * by increasing end points.
          */
         insert_interval_by_end(&activeIntervals, live);
      }
   }

   if (maxTemp + 1 < (GLint) liveIntervals.Num) {
      /* OK, we've reduced the number of registers needed.
       * Scan the program and replace all the old temporary register
       * indexes with the new indexes.
       */
      replace_regs(prog, PROGRAM_TEMPORARY, registerMap);

      prog->NumTemporaries = maxTemp + 1;
   }

   if (dbg) {
      printf("Optimize: End live-interval register reallocation\n");
      printf("Num temp regs before: %u  after: %u\n",
                   liveIntervals.Num, maxTemp + 1);
      _mesa_print_program(prog);
   }
}
コード例 #16
0
ファイル: texture_ocean.c プロジェクト: Walid-Shouman/Blender
/* ***** actual texture sampling ***** */
int ocean_texture(Tex *tex, const float texvec[2], TexResult *texres)
{
	OceanTex *ot = tex->ot;
	ModifierData *md;
	OceanModifierData *omd;

	texres->tin = 0.0f;

	if ( !(ot) ||
	     !(ot->object) ||
	     !(md  = (ModifierData *)modifiers_findByType(ot->object, eModifierType_Ocean)) ||
	     !(omd = (OceanModifierData *)md)->ocean)
	{
		return 0;
	}
	else {
		const int do_normals = (omd->flag & MOD_OCEAN_GENERATE_NORMALS);
		int cfra = R.r.cfra;
		int retval = TEX_INT;

		OceanResult ocr;
		const float u = 0.5f + 0.5f * texvec[0];
		const float v = 0.5f + 0.5f * texvec[1];

		if (omd->oceancache && omd->cached == true) {

			CLAMP(cfra, omd->bakestart, omd->bakeend);
			cfra -= omd->bakestart;	/* shift to 0 based */

			BKE_ocean_cache_eval_uv(omd->oceancache, &ocr, cfra, u, v);

		}
		else {	/* non-cached */

			if (G.is_rendering)
				BKE_ocean_eval_uv_catrom(omd->ocean, &ocr, u, v);
			else
				BKE_ocean_eval_uv(omd->ocean, &ocr, u, v);

			ocr.foam = BKE_ocean_jminus_to_foam(ocr.Jminus, omd->foam_coverage);
		}

		switch (ot->output) {
			case TEX_OCN_DISPLACEMENT:
				/* XYZ displacement */
				texres->tr = 0.5f + 0.5f * ocr.disp[0];
				texres->tg = 0.5f + 0.5f * ocr.disp[2];
				texres->tb = 0.5f + 0.5f * ocr.disp[1];

				texres->tr = MAX2(0.0f, texres->tr);
				texres->tg = MAX2(0.0f, texres->tg);
				texres->tb = MAX2(0.0f, texres->tb);

				BRICONTRGB;

				retval = TEX_RGB;
				break;

			case TEX_OCN_EMINUS:
				/* -ve eigenvectors ? */
				texres->tr = ocr.Eminus[0];
				texres->tg = ocr.Eminus[2];
				texres->tb = ocr.Eminus[1];
				retval = TEX_RGB;
				break;

			case TEX_OCN_EPLUS:
				/* -ve eigenvectors ? */
				texres->tr = ocr.Eplus[0];
				texres->tg = ocr.Eplus[2];
				texres->tb = ocr.Eplus[1];
				retval = TEX_RGB;
				break;

			case TEX_OCN_JPLUS:
				texres->tin = ocr.Jplus;
				retval = TEX_INT;
				break;

			case TEX_OCN_FOAM:

				texres->tin = ocr.foam;

				BRICONT;

				retval = TEX_INT;
				break;
		}

		/* if normals needed */

		if (texres->nor && do_normals) {
			normalize_v3_v3(texres->nor, ocr.normal);
			retval |= TEX_NOR;
		}

		texres->ta = 1.0f;

		return retval;
	}
}
コード例 #17
0
static void
i945_texture_layout_2d(struct i915_texture *tex)
{
   struct pipe_resource *pt = &tex->b.b;
   int align_x = 4, align_y = 2;
   unsigned level;
   unsigned x = 0;
   unsigned y = 0;
   unsigned width = util_next_power_of_two(pt->width0);
   unsigned height = util_next_power_of_two(pt->height0);
   unsigned nblocksx = util_format_get_nblocksx(pt->format, width);
   unsigned nblocksy = util_format_get_nblocksy(pt->format, height);

   if (util_format_is_s3tc(pt->format)) {
      align_x = 1;
      align_y = 1;
   }

   tex->stride = align(util_format_get_stride(pt->format, width), 4);

   /* May need to adjust pitch to accomodate the placement of
    * the 2nd mipmap level.  This occurs when the alignment
    * constraints of mipmap placement push the right edge of the
    * 2nd mipmap level out past the width of its parent.
    */
   if (pt->last_level > 0) {
      unsigned mip1_nblocksx =
         align_nblocksx(pt->format, u_minify(width, 1), align_x) +
         util_format_get_nblocksx(pt->format, u_minify(width, 2));

      if (mip1_nblocksx > nblocksx)
         tex->stride = mip1_nblocksx * util_format_get_blocksize(pt->format);
   }

   /* Pitch must be a whole number of dwords
    */
   tex->stride = align(tex->stride, 64);
   tex->total_nblocksy = 0;

   for (level = 0; level <= pt->last_level; level++) {
      i915_texture_set_level_info(tex, level, 1);
      i915_texture_set_image_offset(tex, level, 0, x, y);

      /* Because the images are packed better, the final offset
       * might not be the maximal one:
       */
      tex->total_nblocksy = MAX2(tex->total_nblocksy, y + nblocksy);

      /* Layout_below: step right after second mipmap level.
       */
      if (level == 1) {
         x += nblocksx;
      } else {
         y += nblocksy;
      }

      width  = u_minify(width, 1);
      height = u_minify(height, 1);
      nblocksx = align_nblocksx(pt->format, width, align_x);
      nblocksy = align_nblocksy(pt->format, height, align_y);
   }
}
コード例 #18
0
SUMOTime
MSCalibrator::execute(SUMOTime currentTime) {
    // get current simulation values (valid for the last simulation second)
    // XXX could we miss vehicle movements if this is called less often than every DELTA_T (default) ?
    updateMeanData();
    const bool hadRemovals = removePending();
    // check whether an adaptation value exists
    if (isCurrentStateActive(currentTime)) {
        myAmActive = true;
        // all happens in isCurrentStateActive()
    } else {
        myAmActive = false;
        reset();
        if (!mySpeedIsDefault) {
            // reset speed to default
            if (myLane == nullptr) {
                myEdge->setMaxSpeed(myDefaultSpeed);
            } else {
                myLane->setMaxSpeed(myDefaultSpeed);
            }
            mySpeedIsDefault = true;
        }
        if (myCurrentStateInterval == myIntervals.end()) {
            // keep calibrator alive for gui but do not call again
            return TIME2STEPS(86400);
        }
        return myFrequency;
    }
    // we are active
    if (!myDidSpeedAdaption && myCurrentStateInterval->v >= 0) {
        if (myLane == nullptr) {
            myEdge->setMaxSpeed(myCurrentStateInterval->v);
        } else {
            myLane->setMaxSpeed(myCurrentStateInterval->v);
        }
        mySpeedIsDefault = false;
        myDidSpeedAdaption = true;
    }

    const bool calibrateFlow = myCurrentStateInterval->q >= 0;
    const int totalWishedNum = totalWished();
    int adaptedNum = passed() + myClearedInJam;
#ifdef MSCalibrator_DEBUG
    std::cout << time2string(currentTime) << " " << myID
              << " q=" << myCurrentStateInterval->q
              << " totalWished=" << totalWishedNum
              << " adapted=" << adaptedNum
              << " jam=" << invalidJam(myLane == 0 ? -1 : myLane->getIndex())
              << " entered=" << myEdgeMeanData.nVehEntered
              << " departed=" << myEdgeMeanData.nVehDeparted
              << " arrived=" << myEdgeMeanData.nVehArrived
              << " left=" << myEdgeMeanData.nVehLeft
              << " waitSecs=" << myEdgeMeanData.waitSeconds
              << " vaporized=" << myEdgeMeanData.nVehVaporized
              << "\n";
#endif
    if (calibrateFlow && adaptedNum < totalWishedNum && !hadRemovals) {
        // we need to insert some vehicles
        const double hourFraction = STEPS2TIME(currentTime - myCurrentStateInterval->begin + DELTA_T) / (double) 3600.;
        const int wishedNum = (int)std::floor(myCurrentStateInterval->q * hourFraction + 0.5); // round to closest int
        // only the difference between inflow and aspiredFlow should be added, thus
        // we should not count vehicles vaporized from a jam here
        // if we have enough time left we can add missing vehicles later
        const int relaxedInsertion = (int)std::floor(STEPS2TIME(myCurrentStateInterval->end - currentTime) / 3);
        const int insertionSlack = MAX2(0, adaptedNum + relaxedInsertion - totalWishedNum);
        // increase number of vehicles
#ifdef MSCalibrator_DEBUG
        std::cout
                << "   wished:" << wishedNum
                << " slack:" << insertionSlack
                << " before:" << adaptedNum
                << "\n";
#endif
        while (wishedNum > adaptedNum + insertionSlack) {
            SUMOVehicleParameter* pars = myCurrentStateInterval->vehicleParameter;
            const MSRoute* route = myProbe != nullptr ? myProbe->getRoute() : nullptr;
            if (route == nullptr) {
                route = MSRoute::dictionary(pars->routeid);
            }
            if (route == nullptr) {
                WRITE_WARNING("No valid routes in calibrator '" + myID + "'.");
                break;
            }
            if (!route->contains(myEdge)) {
                WRITE_WARNING("Route '" + route->getID() + "' in calibrator '" + myID + "' does not contain edge '" + myEdge->getID() + "'.");
                break;
            }
            const int routeIndex = (int)std::distance(route->begin(),
                                   std::find(route->begin(), route->end(), myEdge));
            MSVehicleType* vtype = MSNet::getInstance()->getVehicleControl().getVType(pars->vtypeid);
            assert(route != 0 && vtype != 0);
            // build the vehicle
            SUMOVehicleParameter* newPars = new SUMOVehicleParameter(*pars);
            newPars->id = myID + "." + toString((int)STEPS2TIME(myCurrentStateInterval->begin)) + "." + toString(myInserted);
            newPars->depart = currentTime;
            newPars->routeid = route->getID();
            MSVehicle* vehicle;
            try {
                vehicle = dynamic_cast<MSVehicle*>(MSNet::getInstance()->getVehicleControl().buildVehicle(
                                                       newPars, route, vtype, true, false));
            } catch (const ProcessError& e) {
                if (!MSGlobals::gCheckRoutes) {
                    WRITE_WARNING(e.what());
                    vehicle = nullptr;
                    break;
                } else {
                    throw e;
                }
            }
#ifdef MSCalibrator_DEBUG
            std::cout << " resetting route pos: " << routeIndex << "\n";
#endif
            vehicle->resetRoutePosition(routeIndex);
            if (myEdge->insertVehicle(*vehicle, currentTime)) {
                if (!MSNet::getInstance()->getVehicleControl().addVehicle(vehicle->getID(), vehicle)) {
                    throw ProcessError("Emission of vehicle '" + vehicle->getID() + "' in calibrator '" + getID() + "'failed!");
                }
                myInserted++;
                adaptedNum++;
#ifdef MSCalibrator_DEBUG
                std::cout << "I ";
#endif
            } else {
                // could not insert vehicle
#ifdef MSCalibrator_DEBUG
                std::cout << "F ";
#endif
                MSNet::getInstance()->getVehicleControl().deleteVehicle(vehicle, true);
                break;
            }
        }
    }
    if (myCurrentStateInterval->end <= currentTime + myFrequency) {
        writeXMLOutput();
    }
    return myFrequency;
}
コード例 #19
0
void DefNewGeneration::compute_new_size() {
  // This is called after a gc that includes the following generation
  // (which is required to exist.)  So from-space will normally be empty.
  // Note that we check both spaces, since if scavenge failed they revert roles.
  // If not we bail out (otherwise we would have to relocate the objects)
  if (!from()->is_empty() || !to()->is_empty()) {
    return;
  }

  int next_level = level() + 1;
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  assert(next_level < gch->_n_gens,
         "DefNewGeneration cannot be an oldest gen");

  Generation* next_gen = gch->_gens[next_level];
  size_t old_size = next_gen->capacity();
  size_t new_size_before = _virtual_space.committed_size();
  size_t min_new_size = spec()->init_size();
  size_t max_new_size = reserved().byte_size();
  assert(min_new_size <= new_size_before &&
         new_size_before <= max_new_size,
         "just checking");
  // All space sizes must be multiples of Generation::GenGrain.
  size_t alignment = Generation::GenGrain;

  // Compute desired new generation size based on NewRatio and
  // NewSizeThreadIncrease
  size_t desired_new_size = old_size/NewRatio;
  int threads_count = Threads::number_of_non_daemon_threads();
  size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
  desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);

  // Adjust new generation size
  desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
  assert(desired_new_size <= max_new_size, "just checking");

  bool changed = false;
  if (desired_new_size > new_size_before) {
    size_t change = desired_new_size - new_size_before;
    assert(change % alignment == 0, "just checking");
    if (expand(change)) {
       changed = true;
    }
    // If the heap failed to expand to the desired size,
    // "changed" will be false.  If the expansion failed
    // (and at this point it was expected to succeed),
    // ignore the failure (leaving "changed" as false).
  }
  if (desired_new_size < new_size_before && eden()->is_empty()) {
    // bail out of shrinking if objects in eden
    size_t change = new_size_before - desired_new_size;
    assert(change % alignment == 0, "just checking");
    _virtual_space.shrink_by(change);
    changed = true;
  }
  if (changed) {
    // The spaces have already been mangled at this point but
    // may not have been cleared (set top = bottom) and should be.
    // Mangling was done when the heap was being expanded.
    compute_space_boundaries(eden()->used(),
                             SpaceDecorator::Clear,
                             SpaceDecorator::DontMangle);
    MemRegion cmr((HeapWord*)_virtual_space.low(),
                  (HeapWord*)_virtual_space.high());
    Universe::heap()->barrier_set()->resize_covered_region(cmr);
    if (Verbose && PrintGC) {
      size_t new_size_after  = _virtual_space.committed_size();
      size_t eden_size_after = eden()->capacity();
      size_t survivor_size_after = from()->capacity();
      gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
        SIZE_FORMAT "K [eden="
        SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
        new_size_before/K, new_size_after/K,
        eden_size_after/K, survivor_size_after/K);
      if (WizardMode) {
        gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
          thread_increase_size/K, threads_count);
      }
      gclog_or_tty->cr();
    }
  }
}
コード例 #20
0
ファイル: lp_setup.c プロジェクト: MIPS/external-mesa3d
static boolean
begin_binning( struct lp_setup_context *setup )
{
   struct lp_scene *scene = setup->scene;
   boolean need_zsload = FALSE;
   boolean ok;

   assert(scene);
   assert(scene->fence == NULL);

   /* Always create a fence:
    */
   scene->fence = lp_fence_create(MAX2(1, setup->num_threads));
   if (!scene->fence)
      return FALSE;

   ok = try_update_scene_state(setup);
   if (!ok)
      return FALSE;

   if (setup->fb.zsbuf &&
       ((setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) &&
        util_format_is_depth_and_stencil(setup->fb.zsbuf->format))
      need_zsload = TRUE;

   LP_DBG(DEBUG_SETUP, "%s color clear bufs: %x depth: %s\n", __FUNCTION__,
          setup->clear.flags >> 2,
          need_zsload ? "clear": "load");

   if (setup->clear.flags & PIPE_CLEAR_COLOR) {
      unsigned cbuf;
      for (cbuf = 0; cbuf < setup->fb.nr_cbufs; cbuf++) {
         assert(PIPE_CLEAR_COLOR0 == 1 << 2);
         if (setup->clear.flags & (1 << (2 + cbuf))) {
            union lp_rast_cmd_arg clearrb_arg;
            struct lp_rast_clear_rb *cc_scene =
               (struct lp_rast_clear_rb *)
                  lp_scene_alloc(scene, sizeof(struct lp_rast_clear_rb));

            if (!cc_scene) {
               return FALSE;
            }

            cc_scene->cbuf = cbuf;
            cc_scene->color_val = setup->clear.color_val[cbuf];
            clearrb_arg.clear_rb = cc_scene;

            if (!lp_scene_bin_everywhere(scene,
                                         LP_RAST_OP_CLEAR_COLOR,
                                         clearrb_arg))
               return FALSE;
         }
      }
   }

   if (setup->fb.zsbuf) {
      if (setup->clear.flags & PIPE_CLEAR_DEPTHSTENCIL) {
         ok = lp_scene_bin_everywhere( scene,
                                       LP_RAST_OP_CLEAR_ZSTENCIL,
                                       lp_rast_arg_clearzs(
                                          setup->clear.zsvalue,
                                          setup->clear.zsmask));
         if (!ok)
            return FALSE;
      }
   }

   setup->clear.flags = 0;
   setup->clear.zsmask = 0;
   setup->clear.zsvalue = 0;

   scene->had_queries = !!setup->active_binned_queries;

   LP_DBG(DEBUG_SETUP, "%s done\n", __FUNCTION__);
   return TRUE;
}
コード例 #21
0
SUMOReal
MSCFModel_KraussOrig1::dawdle(SUMOReal speed) const throw() {
    return MAX2(SUMOReal(0), speed - ACCEL2SPEED(myDawdle * myAccel * RandHelper::rand()));
}
コード例 #22
0
ファイル: lp_setup.c プロジェクト: MIPS/external-mesa3d
/**
 * Called during state validation when LP_NEW_SAMPLER_VIEW is set.
 */
void
lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup,
                                    unsigned num,
                                    struct pipe_sampler_view **views)
{
   unsigned i, max_tex_num;

   LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__);

   assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);

   max_tex_num = MAX2(num, setup->fs.current_tex_num);

   for (i = 0; i < max_tex_num; i++) {
      struct pipe_sampler_view *view = i < num ? views[i] : NULL;

      if (view) {
         struct pipe_resource *res = view->texture;
         struct llvmpipe_resource *lp_tex = llvmpipe_resource(res);
         struct lp_jit_texture *jit_tex;
         jit_tex = &setup->fs.current.jit_context.textures[i];

         /* We're referencing the texture's internal data, so save a
          * reference to it.
          */
         pipe_resource_reference(&setup->fs.current_tex[i], res);

         if (!lp_tex->dt) {
            /* regular texture - setup array of mipmap level offsets */
            int j;
            unsigned first_level = 0;
            unsigned last_level = 0;

            if (llvmpipe_resource_is_texture(res)) {
               first_level = view->u.tex.first_level;
               last_level = view->u.tex.last_level;
               assert(first_level <= last_level);
               assert(last_level <= res->last_level);
               jit_tex->base = lp_tex->tex_data;
            }
            else {
              jit_tex->base = lp_tex->data;
            }

            if (LP_PERF & PERF_TEX_MEM) {
               /* use dummy tile memory */
               jit_tex->base = lp_dummy_tile;
               jit_tex->width = TILE_SIZE/8;
               jit_tex->height = TILE_SIZE/8;
               jit_tex->depth = 1;
               jit_tex->first_level = 0;
               jit_tex->last_level = 0;
               jit_tex->mip_offsets[0] = 0;
               jit_tex->row_stride[0] = 0;
               jit_tex->img_stride[0] = 0;
            }
            else {
               jit_tex->width = res->width0;
               jit_tex->height = res->height0;
               jit_tex->depth = res->depth0;
               jit_tex->first_level = first_level;
               jit_tex->last_level = last_level;

               if (llvmpipe_resource_is_texture(res)) {
                  for (j = first_level; j <= last_level; j++) {
                     jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j];
                     jit_tex->row_stride[j] = lp_tex->row_stride[j];
                     jit_tex->img_stride[j] = lp_tex->img_stride[j];
                  }

                  if (res->target == PIPE_TEXTURE_1D_ARRAY ||
                      res->target == PIPE_TEXTURE_2D_ARRAY ||
                      res->target == PIPE_TEXTURE_CUBE ||
                      res->target == PIPE_TEXTURE_CUBE_ARRAY) {
                     /*
                      * For array textures, we don't have first_layer, instead
                      * adjust last_layer (stored as depth) plus the mip level offsets
                      * (as we have mip-first layout can't just adjust base ptr).
                      * XXX For mip levels, could do something similar.
                      */
                     jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1;
                     for (j = first_level; j <= last_level; j++) {
                        jit_tex->mip_offsets[j] += view->u.tex.first_layer *
                                                   lp_tex->img_stride[j];
                     }
                     if (view->target == PIPE_TEXTURE_CUBE ||
                         view->target == PIPE_TEXTURE_CUBE_ARRAY) {
                        assert(jit_tex->depth % 6 == 0);
                     }
                     assert(view->u.tex.first_layer <= view->u.tex.last_layer);
                     assert(view->u.tex.last_layer < res->array_size);
                  }
               }
               else {
                  /*
                   * For buffers, we don't have "offset", instead adjust
                   * the size (stored as width) plus the base pointer.
                   */
                  unsigned view_blocksize = util_format_get_blocksize(view->format);
                  /* probably don't really need to fill that out */
                  jit_tex->mip_offsets[0] = 0;
                  jit_tex->row_stride[0] = 0;
                  jit_tex->img_stride[0] = 0;

                  /* everything specified in number of elements here. */
                  jit_tex->width = view->u.buf.size / view_blocksize;
                  jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.offset;
                  /* XXX Unsure if we need to sanitize parameters? */
                  assert(view->u.buf.offset + view->u.buf.size <= res->width0);
               }
            }
         }
         else {
            /* display target texture/surface */
            /*
             * XXX: Where should this be unmapped?
             */
            struct llvmpipe_screen *screen = llvmpipe_screen(res->screen);
            struct sw_winsys *winsys = screen->winsys;
            jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt,
                                                         PIPE_TRANSFER_READ);
            jit_tex->row_stride[0] = lp_tex->row_stride[0];
            jit_tex->img_stride[0] = lp_tex->img_stride[0];
            jit_tex->mip_offsets[0] = 0;
            jit_tex->width = res->width0;
            jit_tex->height = res->height0;
            jit_tex->depth = res->depth0;
            jit_tex->first_level = jit_tex->last_level = 0;
            assert(jit_tex->base);
         }
      }
      else {
         pipe_resource_reference(&setup->fs.current_tex[i], NULL);
      }
   }
   setup->fs.current_tex_num = num;

   setup->dirty |= LP_SETUP_NEW_FS;
}
コード例 #23
0
ファイル: st_cb_bitmap.c プロジェクト: chemecse/mesa
/**
 * Setup pipeline state prior to rendering the bitmap textured quad.
 */
static void
setup_render_state(struct gl_context *ctx,
                   struct pipe_sampler_view *sv,
                   const GLfloat *color,
                   bool atlas)
{
   struct st_context *st = st_context(ctx);
   struct cso_context *cso = st->cso_context;
   struct st_fp_variant *fpv;
   struct st_fp_variant_key key;

   memset(&key, 0, sizeof(key));
   key.st = st->has_shareable_shaders ? NULL : st;
   key.bitmap = GL_TRUE;
   key.clamp_color = st->clamp_frag_color_in_shader &&
                     ctx->Color._ClampFragmentColor;

   fpv = st_get_fp_variant(st, st->fp, &key);

   /* As an optimization, Mesa's fragment programs will sometimes get the
    * primary color from a statevar/constant rather than a varying variable.
    * when that's the case, we need to ensure that we use the 'color'
    * parameter and not the current attribute color (which may have changed
    * through glRasterPos and state validation.
    * So, we force the proper color here.  Not elegant, but it works.
    */
   {
      GLfloat colorSave[4];
      COPY_4V(colorSave, ctx->Current.Attrib[VERT_ATTRIB_COLOR0]);
      COPY_4V(ctx->Current.Attrib[VERT_ATTRIB_COLOR0], color);
      st_upload_constants(st, &st->fp->Base);
      COPY_4V(ctx->Current.Attrib[VERT_ATTRIB_COLOR0], colorSave);
   }

   cso_save_state(cso, (CSO_BIT_RASTERIZER |
                        CSO_BIT_FRAGMENT_SAMPLERS |
                        CSO_BIT_FRAGMENT_SAMPLER_VIEWS |
                        CSO_BIT_VIEWPORT |
                        CSO_BIT_STREAM_OUTPUTS |
                        CSO_BIT_VERTEX_ELEMENTS |
                        CSO_BIT_AUX_VERTEX_BUFFER_SLOT |
                        CSO_BITS_ALL_SHADERS));


   /* rasterizer state: just scissor */
   st->bitmap.rasterizer.scissor = ctx->Scissor.EnableFlags & 1;
   cso_set_rasterizer(cso, &st->bitmap.rasterizer);

   /* fragment shader state: TEX lookup program */
   cso_set_fragment_shader_handle(cso, fpv->driver_shader);

   /* vertex shader state: position + texcoord pass-through */
   cso_set_vertex_shader_handle(cso, st->bitmap.vs);

   /* disable other shaders */
   cso_set_tessctrl_shader_handle(cso, NULL);
   cso_set_tesseval_shader_handle(cso, NULL);
   cso_set_geometry_shader_handle(cso, NULL);

   /* user samplers, plus our bitmap sampler */
   {
      struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
      uint num = MAX2(fpv->bitmap_sampler + 1,
                      st->state.num_frag_samplers);
      uint i;
      for (i = 0; i < st->state.num_frag_samplers; i++) {
         samplers[i] = &st->state.frag_samplers[i];
      }
      if (atlas)
         samplers[fpv->bitmap_sampler] = &st->bitmap.atlas_sampler;
      else
         samplers[fpv->bitmap_sampler] = &st->bitmap.sampler;
      cso_set_samplers(cso, PIPE_SHADER_FRAGMENT, num,
                       (const struct pipe_sampler_state **) samplers);
   }

   /* user textures, plus the bitmap texture */
   {
      struct pipe_sampler_view *sampler_views[PIPE_MAX_SAMPLERS];
      uint num = MAX2(fpv->bitmap_sampler + 1,
                      st->state.num_sampler_views[PIPE_SHADER_FRAGMENT]);
      memcpy(sampler_views, st->state.frag_sampler_views,
             sizeof(sampler_views));
      sampler_views[fpv->bitmap_sampler] = sv;
      cso_set_sampler_views(cso, PIPE_SHADER_FRAGMENT, num, sampler_views);
   }

   /* viewport state: viewport matching window dims */
   cso_set_viewport_dims(cso, st->state.fb_width,
                         st->state.fb_height,
                         st->state.fb_orientation == Y_0_TOP);

   cso_set_vertex_elements(cso, 3, st->util_velems);

   cso_set_stream_outputs(st->cso_context, 0, NULL, NULL);
}
コード例 #24
0
ファイル: MSRouteHandler.cpp プロジェクト: cathyyul/sumo-0.18
void
MSRouteHandler::addStop(const SUMOSAXAttributes& attrs) {
    bool ok = true;
    std::string errorSuffix;
    if (myActiveRouteID != "") {
        errorSuffix = " in route '" + myActiveRouteID + "'.";
    } else if (myActivePlan) {
        errorSuffix = " in person '" + myVehicleParameter->id + "'.";
    } else {
        errorSuffix = " in vehicle '" + myVehicleParameter->id + "'.";
    }
    SUMOVehicleParameter::Stop stop;
    SUMOVehicleParserHelper::parseStop(stop, attrs);
    // try to parse the assigned bus stop
    stop.busstop = attrs.getOpt<std::string>(SUMO_ATTR_BUS_STOP, 0, ok, "");
    if (stop.busstop != "") {
        // ok, we have obviously a bus stop
        MSBusStop* bs = MSNet::getInstance()->getBusStop(stop.busstop);
        if (bs != 0) {
            const MSLane& l = bs->getLane();
            stop.lane = l.getID();
            stop.endPos = bs->getEndLanePosition();
            stop.startPos = bs->getBeginLanePosition();
        } else {
            WRITE_ERROR("The bus stop '" + stop.busstop + "' is not known" + errorSuffix);
            return;
        }
    } else {
        // no, the lane and the position should be given
        // get the lane
        stop.lane = attrs.getOpt<std::string>(SUMO_ATTR_LANE, 0, ok, "");
        if (ok && stop.lane != "") {
            if (MSLane::dictionary(stop.lane) == 0) {
                WRITE_ERROR("The lane '" + stop.lane + "' for a stop is not known" + errorSuffix);
                return;
            }
        } else {
            WRITE_ERROR("A stop must be placed on a bus stop or a lane" + errorSuffix);
            return;
        }
        if (myActivePlan &&
                !myActivePlan->empty() &&
                &myActivePlan->back()->getDestination() != &MSLane::dictionary(stop.lane)->getEdge()) {
            throw ProcessError("Disconnected plan for person '" + myVehicleParameter->id + "' (" + MSLane::dictionary(stop.lane)->getEdge().getID() + "!=" + myActivePlan->back()->getDestination().getID() + ").");
        }
        if (myActivePlan && myActivePlan->empty()) {
            myActivePlan->push_back(new MSPerson::MSPersonStage_Waiting(
                                        MSLane::dictionary(stop.lane)->getEdge(), -1, myVehicleParameter->depart, myVehicleParameter->departPos, "start"));
        }
        stop.endPos = attrs.getOpt<SUMOReal>(SUMO_ATTR_ENDPOS, 0, ok, MSLane::dictionary(stop.lane)->getLength());
        if (attrs.hasAttribute(SUMO_ATTR_POSITION)) {
            WRITE_WARNING("Deprecated attribute 'pos' in description of stop" + errorSuffix);
            stop.endPos = attrs.getOpt<SUMOReal>(SUMO_ATTR_POSITION, 0, ok, stop.endPos);
        }
        stop.startPos = attrs.getOpt<SUMOReal>(SUMO_ATTR_STARTPOS, 0, ok, MAX2(0., stop.endPos - 2 * POSITION_EPS));
        const bool friendlyPos = attrs.getOpt<bool>(SUMO_ATTR_FRIENDLY_POS, 0, ok, false);
        if (!ok || !checkStopPos(stop.startPos, stop.endPos, MSLane::dictionary(stop.lane)->getLength(), POSITION_EPS, friendlyPos)) {
            WRITE_ERROR("Invalid start or end position for stop on lane '" + stop.lane + "'" + errorSuffix);
            return;
        }
    }

    // get the standing duration
    if (!attrs.hasAttribute(SUMO_ATTR_DURATION) && !attrs.hasAttribute(SUMO_ATTR_UNTIL)) {
        stop.triggered = attrs.getOpt<bool>(SUMO_ATTR_TRIGGERED, 0, ok, true);
        stop.duration = -1;
        stop.until = -1;
    } else {
        stop.duration = attrs.getOptSUMOTimeReporting(SUMO_ATTR_DURATION, 0, ok, -1);
        stop.until = attrs.getOptSUMOTimeReporting(SUMO_ATTR_UNTIL, 0, ok, -1);
        if (!ok || (stop.duration < 0 && stop.until < 0)) {
            WRITE_ERROR("Invalid duration or end time is given for a stop on lane '" + stop.lane + "'" + errorSuffix);
            return;
        }
        stop.triggered = attrs.getOpt<bool>(SUMO_ATTR_TRIGGERED, 0, ok, false);
    }
    stop.parking = attrs.getOpt<bool>(SUMO_ATTR_PARKING, 0, ok, stop.triggered);
    if (!ok) {
        WRITE_ERROR("Invalid bool for 'triggered' or 'parking' for stop on lane '" + stop.lane + "'" + errorSuffix);
        return;
    }

    // expected persons
    std::string expectedStr = attrs.getOpt<std::string>(SUMO_ATTR_EXPECTED, 0, ok, "");
    std::set<std::string> personIDs;
    SUMOSAXAttributes::parseStringSet(expectedStr, personIDs);
    stop.awaitedPersons = personIDs;

    const std::string idx = attrs.getOpt<std::string>(SUMO_ATTR_INDEX, 0, ok, "end");
    if (idx == "end") {
        stop.index = STOP_INDEX_END;
    } else if (idx == "fit") {
        stop.index = STOP_INDEX_FIT;
    } else {
        stop.index = attrs.get<int>(SUMO_ATTR_INDEX, 0, ok);
        if (!ok || stop.index < 0) {
            WRITE_ERROR("Invalid 'index' for stop on lane '" + stop.lane + "'" + errorSuffix);
            return;
        }
    }
    if (myActiveRouteID != "") {
        myActiveRouteStops.push_back(stop);
    } else if (myActivePlan) {
        std::string actType = attrs.getOpt<std::string>(SUMO_ATTR_ACTTYPE, 0, ok, "waiting");
        myActivePlan->push_back(new MSPerson::MSPersonStage_Waiting(
                                    MSLane::dictionary(stop.lane)->getEdge(), stop.duration, stop.until, stop.startPos, actType));
    } else {
        myVehicleParameter->stops.push_back(stop);
    }
}
コード例 #25
0
ファイル: globalDefinitions.hpp プロジェクト: eregon/jvmci
template<class T> inline T MAX4(T a, T b, T c, T d) { return MAX2(MAX3(a, b, c), d); }
コード例 #26
0
ファイル: r600_texture.c プロジェクト: TechnoMancer/mesa
/* The number of samples can be specified independently of the texture. */
void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
				 struct r600_texture *rtex,
				 unsigned nr_samples,
				 struct r600_fmask_info *out)
{
	/* FMASK is allocated like an ordinary texture. */
	struct radeon_surface fmask = rtex->surface;

	memset(out, 0, sizeof(*out));

	fmask.bo_alignment = 0;
	fmask.bo_size = 0;
	fmask.nsamples = 1;
	fmask.flags |= RADEON_SURF_FMASK;

	/* Force 2D tiling if it wasn't set. This may occur when creating
	 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
	 * destination buffer must have an FMASK too. */
	fmask.flags = RADEON_SURF_CLR(fmask.flags, MODE);
	fmask.flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);

	if (rscreen->chip_class >= SI) {
		fmask.flags |= RADEON_SURF_HAS_TILE_MODE_INDEX;
	}

	switch (nr_samples) {
	case 2:
	case 4:
		fmask.bpe = 1;
		if (rscreen->chip_class <= CAYMAN) {
			fmask.bankh = 4;
		}
		break;
	case 8:
		fmask.bpe = 4;
		break;
	default:
		R600_ERR("Invalid sample count for FMASK allocation.\n");
		return;
	}

	/* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
	 * This can be fixed by writing a separate FMASK allocator specifically
	 * for R600-R700 asics. */
	if (rscreen->chip_class <= R700) {
		fmask.bpe *= 2;
	}

	if (rscreen->ws->surface_init(rscreen->ws, &fmask)) {
		R600_ERR("Got error in surface_init while allocating FMASK.\n");
		return;
	}

	assert(fmask.level[0].mode == RADEON_SURF_MODE_2D);

	out->slice_tile_max = (fmask.level[0].nblk_x * fmask.level[0].nblk_y) / 64;
	if (out->slice_tile_max)
		out->slice_tile_max -= 1;

	out->tile_mode_index = fmask.tiling_index[0];
	out->pitch = fmask.level[0].nblk_x;
	out->bank_height = fmask.bankh;
	out->alignment = MAX2(256, fmask.bo_alignment);
	out->size = fmask.bo_size;
}
コード例 #27
0
void
x11_fplot(const XDpy * dpy, XAnim * anim, XFall * fall,
	color_t * pal, uint pal_len,
	int x, int y, uint w, uint h,
	double *xdat, double *ydat, uint l,
	double samp_rate,
	const char *xtitle, const char *ytitle,
	const char *fx_title, const char *fy_title,
	int is_log,
	box2d * taxx, box2d * faxx, uint axxhint)
{
  int x1, y1, w1, h1;
  int x2, y2, w2, h2;
  int x3, y3, w3, h3;

  w1 = w;
  w2 = w;
  w3 = w;

  h1 = h / 3;
  h2 = h / 3;
  h3 = h - h1 - h2;

  x1 = x;
  x2 = x;
  x3 = x;

  y1 = y;
  y2 = y + h1;
  y3 = y + 2 * h1;

  if (taxx) {
    double xmin, xmax;
    double ymin, ymax;

    /* get extremes and spans */
    darray_extremes(xdat, l, &xmin, &xmax);
    darray_extremes(ydat, l, &ymin, &ymax);

    if (axxhint == 0) {
      box2d_set(taxx, xmin, ymin, xmax, ymax);
    } else {
      /* extend it */
      taxx->l.x = MIN2(taxx->l.x, xmin);
      taxx->l.y = MIN2(taxx->l.y, ymin);
      taxx->u.x = MAX2(taxx->u.x, xmax);
      taxx->u.y = MAX2(taxx->u.y, ymax);
    }

    x11_plot(dpy, anim, x1, y1, w1, h1, xdat, ydat, l,
	   taxx->l.x, taxx->u.x,
	   taxx->l.y, taxx->u.y,
	   xtitle, ytitle);
  } else {
    x11_plot_autoaxis(dpy, anim, x1, y1, w1, h1, xdat, ydat, l,
		    xtitle, ytitle);
  }

  double *xi = darray_calloc(l);	/* zeros */
  double *tre = darray_malloc(l);	/* real part of time domain data */
  double *tim = darray_malloc(l);	/* imag part of time domain data */
  double *abs = darray_malloc(l);

  uint lo2 = l / 2;
  double *fscale = darray_malloc(lo2);
  darray_ramp(fscale, lo2, 0, samp_rate / 2 / lo2);

  double *ydat_f = darray_malloc(l);

  uint i;

  /* windowing filter */
  for (i = 0; i < l; i++) {
    ydat_f[i] = ydat[i] * double_blackman(i, l);
  }

  if (IS_BINPOW(l)) {		/* only if length is a power of two */
    /* can we perform an FFT */
    darray_FFT(tre, tim, ydat_f, xi, l);
    darray_eud2D(abs, tre, tim, l);
    if (is_log) {
      uint i;
      for (i = 0; i < l; i++) {
	abs[i] = log10(abs[i]);
      }
    }

    if (faxx) {
      double xmin, xmax;
      double ymin, ymax;

      /* get extremes and spans */
      darray_extremes(fscale, lo2, &xmin, &xmax);
      darray_extremes(abs, l, &ymin, &ymax);

      if (axxhint == 0) {
	box2d_set(faxx, xmin, ymin, xmax, ymax);
      } else {
	/* extend it */
	faxx->l.x = MIN2(faxx->l.x, xmin);
	faxx->l.y = MIN2(faxx->l.y, ymin);
	faxx->u.x = MAX2(faxx->u.x, xmax);
	faxx->u.y = MAX2(faxx->u.y, ymax);
      }

      x11_plot(dpy, anim,
	     x2, y2, w2, h2, fscale, abs, lo2,
	     faxx->l.x, faxx->u.x,
	     faxx->l.y, faxx->u.y,
	     fx_title, fy_title);
    } else {
      x11_plot_autoaxis(dpy, anim,
		      x2, y2, w2, h2, fscale, abs, lo2,
		      fx_title, fy_title);
    }

    double ymin, ymax;
    darray_extremes(abs, l, &ymin, &ymax);
    double yspan = ymax - ymin;
    color_t * cols = (color_t*)malloc(lo2 * sizeof(color_t));
    /* Waterfall */
    for (i = 0; i < lo2; i++) {
      uint val =
	uint_clamp(0,
		   (uint)((abs[i] - ymin) / yspan * (pal_len - 1)),
		   pal_len - 1);
      cols[i] = pal[val];
    }
    x11_appFall(dpy, anim, fall, cols, lo2);
    free(cols);
    x11_dWFall(dpy, anim, fall, x3, y3);

  } else {
    leprintf("%s: n=%u is not a power of two. Returning.\n", __FUNCTION__, l);
  }

  free(xi);
  free(tre);
  free(tim);
  free(abs);
  free(fscale);
  free(ydat_f);
}
コード例 #28
0
ファイル: render_skybox.cpp プロジェクト: kapilkd13/stk-code
void IrrDriver::generateDiffuseCoefficients()
{
    if (!m_SH_dirty)
        return;
    m_SH_dirty = false;
    const unsigned texture_permutation[] = { 2, 3, 0, 1, 5, 4 };

    if (SphericalHarmonicsTextures.size() == 6)
    {
        unsigned sh_w = 0, sh_h = 0;
        for (unsigned i = 0; i < 6; i++)
        {
            sh_w = MAX2(sh_w, SphericalHarmonicsTextures[i]->getOriginalSize().Width);
            sh_h = MAX2(sh_h, SphericalHarmonicsTextures[i]->getOriginalSize().Height);
        }

        unsigned char *sh_rgba[6];
        for (unsigned i = 0; i < 6; i++)
            sh_rgba[i] = new unsigned char[sh_w * sh_h * 4];
        for (unsigned i = 0; i < 6; i++)
        {
            unsigned idx = texture_permutation[i];

            video::IImage* image = getVideoDriver()->createImageFromData(
                SphericalHarmonicsTextures[idx]->getColorFormat(),
                SphericalHarmonicsTextures[idx]->getSize(),
                SphericalHarmonicsTextures[idx]->lock(),
                false
                );
            SphericalHarmonicsTextures[idx]->unlock();

            image->copyToScaling(sh_rgba[i], sh_w, sh_h);
            delete image;
        }

        testSH(sh_rgba, sh_w, sh_h, blueSHCoeff, greenSHCoeff, redSHCoeff);

        for (unsigned i = 0; i < 6; i++)
            delete[] sh_rgba[i];
    }
    else
    {
        int sh_w = 16;
        int sh_h = 16;

        video::SColor ambient = m_scene_manager->getAmbientLight().toSColor();

        unsigned char *sh_rgba[6];
        for (unsigned i = 0; i < 6; i++)
        {
            sh_rgba[i] = new unsigned char[sh_w * sh_h * 4];

            for (int j = 0; j < sh_w * sh_h * 4; j += 4)
            {
                sh_rgba[i][j] = ambient.getBlue();
                sh_rgba[i][j + 1] = ambient.getGreen();
                sh_rgba[i][j + 2] = ambient.getRed();
                sh_rgba[i][j + 3] = 255;
            }
        }

        testSH(sh_rgba, sh_w, sh_h, blueSHCoeff, greenSHCoeff, redSHCoeff);

        // Diffuse env map is x 0.25, compensate
        for (unsigned i = 0; i < 9; i++)
        {
            blueSHCoeff[i] *= 4;
            greenSHCoeff[i] *= 4;
            redSHCoeff[i] *= 4;
        }

        for (unsigned i = 0; i < 6; i++)
            delete[] sh_rgba[i];
    }

    /*for (unsigned i = 0; i < 6; i++)
    {
    glBindTexture(GL_TEXTURE_CUBE_MAP, ConvolutedSkyboxCubeMap);
    glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_SRGB_ALPHA, w, h, 0, GL_BGRA, GL_UNSIGNED_BYTE, (GLvoid*)rgba[i]);
    }

    glBindTexture(GL_TEXTURE_CUBE_MAP, 0);*/

}
コード例 #29
0
ファイル: svga_pipe_sampler.c プロジェクト: jon-turney/mesa
/**
 * Define a vgpu10 sampler state.
 */
static void
define_sampler_state_object(struct svga_context *svga,
                            struct svga_sampler_state *ss,
                            const struct pipe_sampler_state *ps)
{
   uint8_t max_aniso = (uint8_t) 255; /* XXX fix me */
   boolean anisotropic;
   uint8 compare_func;
   SVGA3dFilter filter;
   SVGA3dRGBAFloat bcolor;
   unsigned try;
   float min_lod, max_lod;

   assert(svga_have_vgpu10(svga));

   anisotropic = ss->aniso_level > 1.0f;

   filter = translate_filter_mode(ps->min_mip_filter,
                                  ps->min_img_filter,
                                  ps->mag_img_filter,
                                  anisotropic,
                                  ss->compare_mode);

   compare_func = translate_comparison_func(ss->compare_func);

   COPY_4V(bcolor.value, ps->border_color.f);

   assert(ps->min_lod <= ps->max_lod);

   if (ps->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) {
      /* just use the base level image */
      min_lod = max_lod = 0.0f;
   }
   else {
      min_lod = ps->min_lod;
      max_lod = ps->max_lod;
   }

   /* If shadow comparisons are enabled, create two sampler states: one
    * with the given shadow compare mode, another with shadow comparison off.
    * We need the later because in some cases, we have to do the shadow
    * compare in the shader.  So, we don't want to do it twice.
    */
   STATIC_ASSERT(PIPE_TEX_COMPARE_NONE == 0);
   STATIC_ASSERT(PIPE_TEX_COMPARE_R_TO_TEXTURE == 1);
   ss->id[1] = SVGA3D_INVALID_ID;

   unsigned i;
   for (i = 0; i <= ss->compare_mode; i++) {
      ss->id[i] = util_bitmask_add(svga->sampler_object_id_bm);

      /* Loop in case command buffer is full and we need to flush and retry */
      for (try = 0; try < 2; try++) {
         enum pipe_error ret =
            SVGA3D_vgpu10_DefineSamplerState(svga->swc,
                                             ss->id[i],
                                             filter,
                                             ss->addressu,
                                             ss->addressv,
                                             ss->addressw,
                                             ss->lod_bias, /* float */
                                             max_aniso,
                                             compare_func,
                                             bcolor,
                                             min_lod,       /* float */
                                             max_lod);      /* float */
         if (ret == PIPE_OK)
            break;
         svga_context_flush(svga, NULL);
      }

      /* turn off the shadow compare option for second iteration */
      filter &= ~SVGA3D_FILTER_COMPARE;
   }
}


static void *
svga_create_sampler_state(struct pipe_context *pipe,
                          const struct pipe_sampler_state *sampler)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_sampler_state *cso = CALLOC_STRUCT( svga_sampler_state );

   if (!cso)
      return NULL;

   cso->mipfilter = translate_mip_filter(sampler->min_mip_filter);
   cso->magfilter = translate_img_filter( sampler->mag_img_filter );
   cso->minfilter = translate_img_filter( sampler->min_img_filter );
   cso->aniso_level = MAX2( sampler->max_anisotropy, 1 );
   if (sampler->max_anisotropy)
      cso->magfilter = cso->minfilter = SVGA3D_TEX_FILTER_ANISOTROPIC;
   cso->lod_bias = sampler->lod_bias;
   cso->addressu = translate_wrap_mode(sampler->wrap_s);
   cso->addressv = translate_wrap_mode(sampler->wrap_t);
   cso->addressw = translate_wrap_mode(sampler->wrap_r);
   cso->normalized_coords = sampler->normalized_coords;
   cso->compare_mode = sampler->compare_mode;
   cso->compare_func = sampler->compare_func;

   {
      uint32 r = float_to_ubyte(sampler->border_color.f[0]);
      uint32 g = float_to_ubyte(sampler->border_color.f[1]);
      uint32 b = float_to_ubyte(sampler->border_color.f[2]);
      uint32 a = float_to_ubyte(sampler->border_color.f[3]);

      cso->bordercolor = (a << 24) | (r << 16) | (g << 8) | b;
   }

   /* No SVGA3D support for:
    *    - min/max LOD clamping
    */
   cso->min_lod = 0;
   cso->view_min_lod = MAX2((int) (sampler->min_lod + 0.5), 0);
   cso->view_max_lod = MAX2((int) (sampler->max_lod + 0.5), 0);

   /* Use min_mipmap */
   if (svga->debug.use_min_mipmap) {
      if (cso->view_min_lod == cso->view_max_lod) {
         cso->min_lod = cso->view_min_lod;
         cso->view_min_lod = 0;
         cso->view_max_lod = 1000; /* Just a high number */
         cso->mipfilter = SVGA3D_TEX_FILTER_NONE;
      }
   }

   if (svga_have_vgpu10(svga)) {
      define_sampler_state_object(svga, cso, sampler);
   }

   SVGA_DBG(DEBUG_SAMPLERS,
            "New sampler: min %u, view(min %u, max %u) lod, mipfilter %s\n",
            cso->min_lod, cso->view_min_lod, cso->view_max_lod,
            cso->mipfilter == SVGA3D_TEX_FILTER_NONE ? "SVGA3D_TEX_FILTER_NONE" : "SOMETHING");

   svga->hud.num_sampler_objects++;
   SVGA_STATS_COUNT_INC(svga_screen(svga->pipe.screen)->sws,
                        SVGA_STATS_COUNT_SAMPLER);

   return cso;
}


static void
svga_bind_sampler_states(struct pipe_context *pipe,
                         enum pipe_shader_type shader,
                         unsigned start,
                         unsigned num,
                         void **samplers)
{
   struct svga_context *svga = svga_context(pipe);
   unsigned i;
   boolean any_change = FALSE;

   assert(shader < PIPE_SHADER_TYPES);
   assert(start + num <= PIPE_MAX_SAMPLERS);

   /* Pre-VGPU10 only supports FS textures */
   if (!svga_have_vgpu10(svga) && shader != PIPE_SHADER_FRAGMENT)
      return;

   for (i = 0; i < num; i++) {
      if (svga->curr.sampler[shader][start + i] != samplers[i])
         any_change = TRUE;
      svga->curr.sampler[shader][start + i] = samplers[i];
   }

   if (!any_change) {
      return;
   }

   /* find highest non-null sampler[] entry */
   {
      unsigned j = MAX2(svga->curr.num_samplers[shader], start + num);
      while (j > 0 && svga->curr.sampler[shader][j - 1] == NULL)
         j--;
      svga->curr.num_samplers[shader] = j;
   }

   svga->dirty |= SVGA_NEW_SAMPLER;
}


static void
svga_delete_sampler_state(struct pipe_context *pipe, void *sampler)
{
   struct svga_sampler_state *ss = (struct svga_sampler_state *) sampler;
   struct svga_context *svga = svga_context(pipe);

   if (svga_have_vgpu10(svga)) {
      unsigned i;
      for (i = 0; i < 2; i++) {
         enum pipe_error ret;

         if (ss->id[i] != SVGA3D_INVALID_ID) {
            svga_hwtnl_flush_retry(svga);

            ret = SVGA3D_vgpu10_DestroySamplerState(svga->swc, ss->id[i]);
            if (ret != PIPE_OK) {
               svga_context_flush(svga, NULL);
               ret = SVGA3D_vgpu10_DestroySamplerState(svga->swc, ss->id[i]);
            }
            util_bitmask_clear(svga->sampler_object_id_bm, ss->id[i]);
         }
      }
   }

   FREE(sampler);
   svga->hud.num_sampler_objects--;
}


static struct pipe_sampler_view *
svga_create_sampler_view(struct pipe_context *pipe,
                         struct pipe_resource *texture,
                         const struct pipe_sampler_view *templ)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_pipe_sampler_view *sv = CALLOC_STRUCT(svga_pipe_sampler_view);

   if (!sv) {
      return NULL;
   }

   sv->base = *templ;
   sv->base.reference.count = 1;
   sv->base.texture = NULL;
   pipe_resource_reference(&sv->base.texture, texture);

   sv->base.context = pipe;
   sv->id = SVGA3D_INVALID_ID;

   svga->hud.num_samplerview_objects++;
   SVGA_STATS_COUNT_INC(svga_screen(svga->pipe.screen)->sws,
                        SVGA_STATS_COUNT_SAMPLERVIEW);

   return &sv->base;
}


static void
svga_sampler_view_destroy(struct pipe_context *pipe,
                          struct pipe_sampler_view *view)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_pipe_sampler_view *sv = svga_pipe_sampler_view(view);

   if (svga_have_vgpu10(svga) && sv->id != SVGA3D_INVALID_ID) {
      if (view->context != pipe) {
         /* The SVGA3D device will generate an error (and on Linux, cause
          * us to abort) if we try to destroy a shader resource view from
          * a context other than the one it was created with.  Skip the
          * SVGA3D_vgpu10_DestroyShaderResourceView() and leak the sampler
          * view for now.  This should only sometimes happen when a shared
          * texture is deleted.
          */
         _debug_printf("context mismatch in %s\n", __func__);
      }
      else {
         enum pipe_error ret;

         svga_hwtnl_flush_retry(svga); /* XXX is this needed? */

         ret = SVGA3D_vgpu10_DestroyShaderResourceView(svga->swc, sv->id);
         if (ret != PIPE_OK) {
            svga_context_flush(svga, NULL);
            ret = SVGA3D_vgpu10_DestroyShaderResourceView(svga->swc, sv->id);
         }
         util_bitmask_clear(svga->sampler_view_id_bm, sv->id);
      }
   }

   pipe_resource_reference(&sv->base.texture, NULL);

   FREE(sv);
   svga->hud.num_samplerview_objects--;
}


static void
svga_set_sampler_views(struct pipe_context *pipe,
                       enum pipe_shader_type shader,
                       unsigned start,
                       unsigned num,
                       struct pipe_sampler_view **views)
{
   struct svga_context *svga = svga_context(pipe);
   unsigned flag_1d = 0;
   unsigned flag_srgb = 0;
   uint i;
   boolean any_change = FALSE;

   assert(shader < PIPE_SHADER_TYPES);
   assert(start + num <= ARRAY_SIZE(svga->curr.sampler_views[shader]));

   /* Pre-VGPU10 only supports FS textures */
   if (!svga_have_vgpu10(svga) && shader != PIPE_SHADER_FRAGMENT)
      return;

   SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_SETSAMPLERVIEWS);

   /* This bit of code works around a quirk in the CSO module.
    * If start=num=0 it means all sampler views should be released.
    * Note that the CSO module treats sampler views for fragment shaders
    * differently than other shader types.
    */
   if (start == 0 && num == 0 && svga->curr.num_sampler_views[shader] > 0) {
      for (i = 0; i < svga->curr.num_sampler_views[shader]; i++) {
         pipe_sampler_view_release(pipe, &svga->curr.sampler_views[shader][i]);
      }
      any_change = TRUE;
   }

   for (i = 0; i < num; i++) {
      enum pipe_texture_target target;

      if (svga->curr.sampler_views[shader][start + i] != views[i]) {
         /* Note: we're using pipe_sampler_view_release() here to work around
          * a possible crash when the old view belongs to another context that
          * was already destroyed.
          */
         pipe_sampler_view_release(pipe, &svga->curr.sampler_views[shader][start + i]);
         pipe_sampler_view_reference(&svga->curr.sampler_views[shader][start + i],
                                     views[i]);
         any_change = TRUE;
      }

      if (!views[i])
         continue;

      if (util_format_is_srgb(views[i]->format))
         flag_srgb |= 1 << (start + i);

      target = views[i]->target;
      if (target == PIPE_TEXTURE_1D) {
         flag_1d |= 1 << (start + i);
      } else if (target == PIPE_TEXTURE_RECT) {
         /* If the size of the bound texture changes, we need to emit new
          * const buffer values.
          */
         svga->dirty |= SVGA_NEW_TEXTURE_CONSTS;
      } else if (target == PIPE_BUFFER) {
         /* If the size of the bound buffer changes, we need to emit new
          * const buffer values.
          */
         svga->dirty |= SVGA_NEW_TEXTURE_CONSTS;
      }
   }

   if (!any_change) {
      goto done;
   }

   /* find highest non-null sampler_views[] entry */
   {
      unsigned j = MAX2(svga->curr.num_sampler_views[shader], start + num);
      while (j > 0 && svga->curr.sampler_views[shader][j - 1] == NULL)
         j--;
      svga->curr.num_sampler_views[shader] = j;
   }

   svga->dirty |= SVGA_NEW_TEXTURE_BINDING;

   if (flag_srgb != svga->curr.tex_flags.flag_srgb ||
       flag_1d != svga->curr.tex_flags.flag_1d) {
      svga->dirty |= SVGA_NEW_TEXTURE_FLAGS;
      svga->curr.tex_flags.flag_1d = flag_1d;
      svga->curr.tex_flags.flag_srgb = flag_srgb;
   }

   /* Check if any of the sampler view resources collide with the framebuffer
    * color buffers or depth stencil resource. If so, set the NEW_FRAME_BUFFER
    * dirty bit so that emit_framebuffer can be invoked to create backed view
    * for the conflicted surface view.
    */
   if (svga_check_sampler_framebuffer_resource_collision(svga, shader)) {
      svga->dirty |= SVGA_NEW_FRAME_BUFFER;
   }

done:
   SVGA_STATS_TIME_POP(svga_sws(svga));
}

/**
 * Clean up sampler, sampler view state at context destruction time
 */
void
svga_cleanup_sampler_state(struct svga_context *svga)
{
   enum pipe_shader_type shader;

   for (shader = 0; shader <= PIPE_SHADER_GEOMETRY; shader++) {
      unsigned i;

      for (i = 0; i < svga->state.hw_draw.num_sampler_views[shader]; i++) {
         pipe_sampler_view_release(&svga->pipe,
                                   &svga->state.hw_draw.sampler_views[shader][i]);
      }
   }
   
   /* free polygon stipple state */
   if (svga->polygon_stipple.sampler) {
      svga->pipe.delete_sampler_state(&svga->pipe, svga->polygon_stipple.sampler);
   }

   if (svga->polygon_stipple.sampler_view) {
      svga->pipe.sampler_view_destroy(&svga->pipe,
                                      &svga->polygon_stipple.sampler_view->base);
   }
   pipe_resource_reference(&svga->polygon_stipple.texture, NULL);
}

void
svga_init_sampler_functions( struct svga_context *svga )
{
   svga->pipe.create_sampler_state = svga_create_sampler_state;
   svga->pipe.bind_sampler_states = svga_bind_sampler_states;
   svga->pipe.delete_sampler_state = svga_delete_sampler_state;
   svga->pipe.set_sampler_views = svga_set_sampler_views;
   svga->pipe.create_sampler_view = svga_create_sampler_view;
   svga->pipe.sampler_view_destroy = svga_sampler_view_destroy;
}
コード例 #30
0
void ParallelScavengeHeap::initialize() {
  // Cannot be initialized until after the flags are parsed
  GenerationSizer flag_parser;

  const size_t alignment = min_alignment();

  // Check alignments
// NEEDS_CLEANUP   The default TwoGenerationCollectorPolicy uses
//   NewRatio;  it should check UseAdaptiveSizePolicy. Changes from
//   generationSizer could move to the common code.
  size_t young_size = align_size_up(flag_parser.young_gen_size(), alignment);
  size_t max_young_size = align_size_up(flag_parser.max_young_gen_size(), 
                                                                  alignment);

  size_t old_size = align_size_up(flag_parser.old_gen_size(), alignment);
  size_t max_old_size = align_size_up(flag_parser.max_old_gen_size(), 
                                                                  alignment);

  size_t perm_size = align_size_up(flag_parser.perm_gen_size(), alignment);
  size_t max_perm_size = align_size_up(flag_parser.max_perm_gen_size(), 
                                                                  alignment);

  // Calculate the total size.
  size_t total_reserved = max_young_size + max_old_size + max_perm_size;

  if (UseISM || UsePermISM) {
    total_reserved = round_to(total_reserved, LargePageSizeInBytes);
  }

  ReservedSpace heap_rs(total_reserved, alignment, UseISM || UsePermISM);
  if (!heap_rs.is_reserved()) {
    vm_exit_during_initialization("Could not reserve enough space for "
                                  "object heap");
  }

  _reserved = MemRegion((HeapWord*)heap_rs.base(),
			(HeapWord*)(heap_rs.base() + heap_rs.size()));

  HeapWord* boundary = (HeapWord*)(heap_rs.base() + max_young_size);
  CardTableExtension* card_table_barrier_set = new CardTableExtension(_reserved, 3);
  _barrier_set = card_table_barrier_set;

  oopDesc::set_bs(_barrier_set);
  if (_barrier_set == NULL) {
    vm_exit_during_initialization("Could not reserve enough space for "
                                  "barrier set"); 
  }

  // Initial young gen size is 4 Mb
  size_t init_young_size = align_size_up(4 * M, alignment);
  init_young_size = MAX2(MIN2(init_young_size, max_young_size), young_size);

  ReservedSpace generation_rs = heap_rs.first_part(max_young_size);
  _young_gen = new PSYoungGen(generation_rs, 
                              init_young_size, 
                              young_size, 
                              max_young_size);
  heap_rs = heap_rs.last_part(max_young_size);

  generation_rs = heap_rs.first_part(max_old_size);
  _old_gen = new PSOldGen(generation_rs,
                          old_size,
                          old_size,
                          max_old_size,
                          "old", 1);

  heap_rs = heap_rs.last_part(max_old_size);

  _perm_gen = new PSPermGen(heap_rs,
                            perm_size,
                            perm_size,
                            max_perm_size,
                            "perm", 2);

  _size_policy = new AdaptiveSizePolicy(young_gen()->eden_space()->capacity_in_bytes(),
                                        old_gen()->capacity_in_bytes(),
                                        young_gen()->to_space()->capacity_in_bytes(),
                                        max_young_size,
                                        max_old_size,
                                        alignment);

  // initialize the policy counters - 2 collectors, 3 generations
  _gc_policy_counters = new GCPolicyCounters(PERF_GC, "ParScav:MSC", 2, 3);
}