예제 #1
0
/* Texture parameter */
void glTexParameteri(GLenum target, GLenum pname, GLint param) {	
	assert(target == GL_TEXTURE_2D);

	switch(pname) {
		case GL_TEXTURE_FILTER:
			switch(param) {
				case GL_FILTER_NONE:
					gl_cur_texture->txr.filter = PVR_FILTER_NONE;
					break;
				case GL_FILTER_BILINEAR:
					gl_cur_texture->txr.filter = PVR_FILTER_BILINEAR;
					break;
				default:
					assert_msg(0, "Unknown texture filter.");
				break;	
			}
			break;
		case GL_TEXTURE_WRAP_S: /* adjust state of UVCLAMP_U */
			switch(param) {
				case GL_REPEAT:
					if (gl_cur_texture->txr.uv_clamp == PVR_UVCLAMP_UV)
						gl_cur_texture->txr.uv_clamp = PVR_UVCLAMP_V;
					else if (gl_cur_texture->txr.uv_clamp == PVR_UVCLAMP_U)
						gl_cur_texture->txr.uv_clamp = PVR_UVCLAMP_NONE;
					break;
				case GL_CLAMP:
					if (gl_cur_texture->txr.uv_clamp == PVR_UVCLAMP_NONE)
						gl_cur_texture->txr.uv_clamp = PVR_UVCLAMP_U;
					else if (gl_cur_texture->txr.uv_clamp == PVR_UVCLAMP_V)
						gl_cur_texture->txr.uv_clamp = PVR_UVCLAMP_UV;
					break;
				default:
					assert_msg(0, "Unknown texture wrap mode.");
					break;
			}
			break;
		case GL_TEXTURE_WRAP_T: /* adjust state of UVCLAMP_V */
			switch(param) {
				case GL_REPEAT:
					if (gl_cur_texture->txr.uv_clamp == PVR_UVCLAMP_UV)
						gl_cur_texture->txr.uv_clamp = PVR_UVCLAMP_U;
					else if (gl_cur_texture->txr.uv_clamp == PVR_UVCLAMP_V)
						gl_cur_texture->txr.uv_clamp = PVR_UVCLAMP_NONE;
					break;
				case GL_CLAMP:
					if (gl_cur_texture->txr.uv_clamp == PVR_UVCLAMP_NONE)
						gl_cur_texture->txr.uv_clamp = PVR_UVCLAMP_V;
					else if (gl_cur_texture->txr.uv_clamp == PVR_UVCLAMP_U)
						gl_cur_texture->txr.uv_clamp = PVR_UVCLAMP_UV;
					break;
				default:
					assert_msg(0, "Unknown texture wrap mode.");
					break;
			}
			break;
		default:
			assert_msg(0, "Unknown parameter name (pname).");
			break;

	}
	gl_pbuf_submitted = GL_FALSE;
}
예제 #2
0
  void ProjBasedSelector::calc_projection_errors(Element* e, const CandsInfo& info_h, const CandsInfo& info_p, const CandsInfo& info_aniso, Solution* rsln, CandElemProjError herr[4], CandElemProjError perr, CandElemProjError anisoerr[4]) {
    assert_msg(info_h.is_empty() || (H2D_GET_H_ORDER(info_h.max_quad_order) <= H2DRS_MAX_ORDER && H2D_GET_V_ORDER(info_h.max_quad_order) <= H2DRS_MAX_ORDER), "Maximum allowed order of a son of H-candidate is %d but order (H:%d,V:%d) requested.", H2DRS_MAX_ORDER, H2D_GET_H_ORDER(info_h.max_quad_order), H2D_GET_V_ORDER(info_h.max_quad_order));
    assert_msg(info_p.is_empty() || (H2D_GET_H_ORDER(info_p.max_quad_order) <= H2DRS_MAX_ORDER && H2D_GET_V_ORDER(info_p.max_quad_order) <= H2DRS_MAX_ORDER), "Maximum allowed order of a son of P-candidate is %d but order (H:%d,V:%d) requested.", H2DRS_MAX_ORDER, H2D_GET_H_ORDER(info_p.max_quad_order), H2D_GET_V_ORDER(info_p.max_quad_order));
    assert_msg(info_aniso.is_empty() || (H2D_GET_H_ORDER(info_aniso.max_quad_order) <= H2DRS_MAX_ORDER && H2D_GET_V_ORDER(info_aniso.max_quad_order) <= H2DRS_MAX_ORDER), "Maximum allowed order of a son of ANISO-candidate is %d but order (H:%d,V:%d) requested.", H2DRS_MAX_ORDER, H2D_GET_H_ORDER(info_aniso.max_quad_order), H2D_GET_V_ORDER(info_aniso.max_quad_order));

    int mode = e->get_mode();

    // select quadrature, obtain integration points and weights
    Quad2D* quad = &g_quad_2d_std;
    quad->set_mode(mode);
    rsln->set_quad_2d(quad);
    double3* gip_points = quad->get_points(H2DRS_INTR_GIP_ORDER);
    int num_gip_points = quad->get_num_points(H2DRS_INTR_GIP_ORDER);

    // everything is done on the reference domain
    rsln->enable_transform(false);

    // obtain reference solution values on all four refined sons
    scalar** rval[H2D_MAX_ELEMENT_SONS];
    Element* base_element = rsln->get_mesh()->get_element(e->id);
    assert(!base_element->active);
    for (int son = 0; son < H2D_MAX_ELEMENT_SONS; son++)
    {
      //set element
      Element* e = base_element->sons[son];
      assert(e != NULL);

      //obtain precalculated values
      rval[son] = precalc_ref_solution(son, rsln, e, H2DRS_INTR_GIP_ORDER);
    }

    //retrieve transformations
    Trf* trfs = NULL;
    int num_noni_trfs = 0;
    if (mode == H2D_MODE_TRIANGLE) {
      trfs = tri_trf;
      num_noni_trfs = H2D_TRF_TRI_NUM;
    }
    else {
      trfs = quad_trf;
      num_noni_trfs = H2D_TRF_QUAD_NUM;
    }

    // precalculate values of shape functions
    TrfShape empty_shape_vals;
    if (!cached_shape_vals_valid[mode]) {
      precalc_ortho_shapes(gip_points, num_gip_points, trfs, num_noni_trfs, shape_indices[mode], max_shape_inx[mode], cached_shape_ortho_vals[mode]);
      precalc_shapes(gip_points, num_gip_points, trfs, num_noni_trfs, shape_indices[mode], max_shape_inx[mode], cached_shape_vals[mode]);
      cached_shape_vals_valid[mode] = true;

      //issue a warning if ortho values are defined and the selected cand_list might benefit from that but it cannot because elements do not have uniform orders
      if (!warn_uniform_orders && mode == H2D_MODE_QUAD && !cached_shape_ortho_vals[mode][H2D_TRF_IDENTITY].empty()) {
        warn_uniform_orders = true;
        if (cand_list == H2D_H_ISO || cand_list == H2D_H_ANISO || cand_list == H2D_P_ISO || cand_list == H2D_HP_ISO || cand_list == H2D_HP_ANISO_H) {
          warn_if(!info_h.uniform_orders || !info_aniso.uniform_orders || !info_p.uniform_orders, "Possible inefficiency: %s might be more efficient if the input mesh contains elements with uniform orders strictly.", get_cand_list_str(cand_list));
        }
      }
    }
    TrfShape& svals = cached_shape_vals[mode];
    TrfShape& ortho_svals = cached_shape_ortho_vals[mode];

    //H-candidates
    if (!info_h.is_empty()) {
      Trf* p_trf_identity[1] = { &trfs[H2D_TRF_IDENTITY] };
      std::vector<TrfShapeExp>* p_trf_svals[1] = { &svals[H2D_TRF_IDENTITY] };
      std::vector<TrfShapeExp>* p_trf_ortho_svals[1] = { &ortho_svals[H2D_TRF_IDENTITY] };
      for(int son = 0; son < H2D_MAX_ELEMENT_SONS; son++) {
        scalar **sub_rval[1] = { rval[son] };
        calc_error_cand_element(mode, gip_points, num_gip_points
          , 1, &base_element->sons[son], p_trf_identity, sub_rval
          , p_trf_svals, p_trf_ortho_svals
          , info_h, herr[son]);
      }
    }

    //ANISO-candidates
    if (!info_aniso.is_empty()) {
      const int sons[4][2] = { {0,1}, {3,2}, {0,3}, {1,2} }; //indices of sons for sub-areas
      const int tr[4][2]   = { {6,7}, {6,7}, {4,5}, {4,5} }; //indices of ref. domain transformations for sub-areas
      for(int version = 0; version < 4; version++) { // 2 elements for vertical split, 2 elements for horizontal split
        Trf* sub_trfs[2] = { &trfs[tr[version][0]], &trfs[tr[version][1]] };
        Element* sub_domains[2] = { base_element->sons[sons[version][0]], base_element->sons[sons[version][1]] };
        scalar **sub_rval[2] = { rval[sons[version][0]], rval[sons[version][1]] };
        std::vector<TrfShapeExp>* sub_svals[2] = { &svals[tr[version][0]], &svals[tr[version][1]] };
        std::vector<TrfShapeExp>* sub_ortho_svals[2] = { &ortho_svals[tr[version][0]], &ortho_svals[tr[version][1]] };
        calc_error_cand_element(mode, gip_points, num_gip_points
          , 2, sub_domains, sub_trfs, sub_rval
          , sub_svals, sub_ortho_svals
          , info_aniso, anisoerr[version]);
      }
    }

    //P-candidates
    if (!info_p.is_empty()) {
      Trf* sub_trfs[4] = { &trfs[0], &trfs[1], &trfs[2], &trfs[3] };
      scalar **sub_rval[4] = { rval[0], rval[1], rval[2], rval[3] };
      std::vector<TrfShapeExp>* sub_svals[4] = { &svals[0], &svals[1], &svals[2], &svals[3] };
      std::vector<TrfShapeExp>* sub_ortho_svals[4] = { &ortho_svals[0], &ortho_svals[1], &ortho_svals[2], &ortho_svals[3] };

      calc_error_cand_element(mode, gip_points, num_gip_points
        , 4, base_element->sons, sub_trfs, sub_rval
        , sub_svals, sub_ortho_svals
        , info_p, perr);
    }
  }
예제 #3
0
  void ProjBasedSelector::calc_error_cand_element(const int mode
    , double3* gip_points, int num_gip_points
    , const int num_sub, Element** sub_domains, Trf** sub_trfs, scalar*** sub_rvals
    , std::vector<TrfShapeExp>** sub_nonortho_svals, std::vector<TrfShapeExp>** sub_ortho_svals
    , const CandsInfo& info
    , CandElemProjError errors_squared
    ) {
    //allocate space
    int max_num_shapes = next_order_shape[mode][current_max_order];
    scalar* right_side = new scalar[max_num_shapes];
    int* shape_inxs = new int[max_num_shapes];
    int* indx = new int[max_num_shapes]; //solver data
    double* d = new double[max_num_shapes]; //solver data
    double** proj_matrix = new_matrix<double>(max_num_shapes, max_num_shapes);
    ProjMatrixCache& proj_matrices = proj_matrix_cache[mode];
    std::vector<ShapeInx>& full_shape_indices = shape_indices[mode];

    //check whether ortho-svals are available
    bool ortho_svals_available = true;
    for(int i = 0; i < num_sub && ortho_svals_available; i++)
      ortho_svals_available &= !sub_ortho_svals[i]->empty();

    //clenup of the cache
    for(int i = 0; i <= max_shape_inx[mode]; i++) {
      nonortho_rhs_cache[i] = ValueCacheItem<scalar>();
      ortho_rhs_cache[i] = ValueCacheItem<scalar>();
    }

    //calculate for all orders
    double sub_area_corr_coef = 1.0 / num_sub;
    OrderPermutator order_perm(info.min_quad_order, info.max_quad_order, mode == H2D_MODE_TRIANGLE || info.uniform_orders);
    do {
      int quad_order = order_perm.get_quad_order();
      int order_h = H2D_GET_H_ORDER(quad_order), order_v = H2D_GET_V_ORDER(quad_order);

      //build a list of shape indices from the full list
      int num_shapes = 0;
      unsigned int inx_shape = 0;
      while (inx_shape < full_shape_indices.size()) {
        ShapeInx& shape = full_shape_indices[inx_shape];
        if (order_h >= shape.order_h && order_v >= shape.order_v) {
          assert_msg(num_shapes < max_num_shapes, "more shapes than predicted, possible incosistency");
          shape_inxs[num_shapes] = shape.inx;
          num_shapes++;
        }
        inx_shape++;
      }

      //continue only if there are shapes to process
      if (num_shapes > 0) {
        bool use_ortho = ortho_svals_available && order_perm.get_order_h() == order_perm.get_order_v();
        //error_if(!use_ortho, "Non-ortho"); //DEBUG

        //select a cache
        std::vector< ValueCacheItem<scalar> >& rhs_cache = use_ortho ? ortho_rhs_cache : nonortho_rhs_cache;
        std::vector<TrfShapeExp>** sub_svals = use_ortho ? sub_ortho_svals : sub_nonortho_svals;

        //calculate projection matrix iff no ortho is used
        if (!use_ortho) {
          //error_if(!use_ortho, "Non-ortho"); //DEBUG
          if (proj_matrices[order_h][order_v] == NULL)
            proj_matrices[order_h][order_v] = build_projection_matrix(gip_points, num_gip_points, shape_inxs, num_shapes);
          copy_matrix(proj_matrix, proj_matrices[order_h][order_v], num_shapes, num_shapes); //copy projection matrix because original matrix will be modified
        }

        //build right side (fill cache values that are missing)
        for(int inx_sub = 0; inx_sub < num_sub; inx_sub++) {
          Element* this_sub_domain = sub_domains[inx_sub];
          ElemSubTrf this_sub_trf = { sub_trfs[inx_sub], 1 / sub_trfs[inx_sub]->m[0], 1 / sub_trfs[inx_sub]->m[1] };
          ElemGIP this_sub_gip = { gip_points, num_gip_points, sub_rvals[inx_sub] };
          std::vector<TrfShapeExp>& this_sub_svals = *(sub_svals[inx_sub]);

          for(int k = 0; k < num_shapes; k++) {
            int shape_inx = shape_inxs[k];
            ValueCacheItem<scalar>& shape_rhs_cache = rhs_cache[shape_inx];
            if (!shape_rhs_cache.is_valid()) {
              TrfShapeExp empty_sub_vals;
              ElemSubShapeFunc this_sub_shape = { shape_inx, this_sub_svals.empty() ? empty_sub_vals : this_sub_svals[shape_inx] };
              shape_rhs_cache.set(shape_rhs_cache.get() + evaluate_rhs_subdomain(this_sub_domain, this_sub_gip, this_sub_trf, this_sub_shape));
            }
          }
        }

        //copy values from cache and apply area correction coefficient
        for(int k = 0; k < num_shapes; k++) {
          ValueCacheItem<scalar>& rhs_cache_value = rhs_cache[shape_inxs[k]];
          right_side[k] = sub_area_corr_coef * rhs_cache_value.get();
          rhs_cache_value.mark();
        }

        //solve iff no ortho is used
        if (!use_ortho) {
          //error_if(!use_ortho, "Non-ortho"); //DEBUG
          ludcmp(proj_matrix, num_shapes, indx, d);
          lubksb<scalar>(proj_matrix, num_shapes, indx, right_side);
        }

        //calculate error
        double error_squared = 0;
        for(int inx_sub = 0; inx_sub < num_sub; inx_sub++) {
          Element* this_sub_domain = sub_domains[inx_sub];
          ElemSubTrf this_sub_trf = { sub_trfs[inx_sub], 1 / sub_trfs[inx_sub]->m[0], 1 / sub_trfs[inx_sub]->m[1] };
          ElemGIP this_sub_gip = { gip_points, num_gip_points, sub_rvals[inx_sub] };
          ElemProj elem_proj = { shape_inxs, num_shapes, *(sub_svals[inx_sub]), right_side, quad_order };

          error_squared += evaluate_error_squared_subdomain(this_sub_domain, this_sub_gip, this_sub_trf, elem_proj);
        }
        errors_squared[order_h][order_v] = error_squared * sub_area_corr_coef; //apply area correction coefficient
      }
    } while (order_perm.next());

    //clenaup
    delete[] proj_matrix;
    delete[] right_side;
    delete[] shape_inxs;
    delete[] indx;
    delete[] d;
  }
예제 #4
0
/* Compile a polygon context into a polygon header that is affected by
   modifier volumes */
void pvr_poly_mod_compile(pvr_poly_mod_hdr_t *dst, pvr_poly_cxt_t *src) {
    int u, v;
    uint32  txr_base;

    /* Basically we just take each parameter, clip it, shift it
       into place, and OR it into the final result. */

    /* The base values for CMD */
    dst->cmd = PVR_CMD_POLYHDR;

    if(src->txr.enable == PVR_TEXTURE_ENABLE)
        dst->cmd |= 8;

    /* Or in the list type, shading type, color and UV formats */
    dst->cmd |= (src->list_type << PVR_TA_CMD_TYPE_SHIFT) & PVR_TA_CMD_TYPE_MASK;
    dst->cmd |= (src->fmt.color << PVR_TA_CMD_CLRFMT_SHIFT) & PVR_TA_CMD_CLRFMT_MASK;
    dst->cmd |= (src->gen.shading << PVR_TA_CMD_SHADE_SHIFT) & PVR_TA_CMD_SHADE_MASK;
    dst->cmd |= (src->fmt.uv << PVR_TA_CMD_UVFMT_SHIFT) & PVR_TA_CMD_UVFMT_MASK;
    dst->cmd |= (src->gen.clip_mode << PVR_TA_CMD_USERCLIP_SHIFT) & PVR_TA_CMD_USERCLIP_MASK;
    dst->cmd |= (src->fmt.modifier << PVR_TA_CMD_MODIFIER_SHIFT) & PVR_TA_CMD_MODIFIER_MASK;
    dst->cmd |= (src->gen.modifier_mode << PVR_TA_CMD_MODIFIERMODE_SHIFT) & PVR_TA_CMD_MODIFIERMODE_MASK;
    dst->cmd |= (src->gen.specular << PVR_TA_CMD_SPECULAR_SHIFT) & PVR_TA_CMD_SPECULAR_MASK;

    /* Polygon mode 1 */
    dst->mode1  = (src->depth.comparison << PVR_TA_PM1_DEPTHCMP_SHIFT) & PVR_TA_PM1_DEPTHCMP_MASK;
    dst->mode1 |= (src->gen.culling << PVR_TA_PM1_CULLING_SHIFT) & PVR_TA_PM1_CULLING_MASK;
    dst->mode1 |= (src->depth.write << PVR_TA_PM1_DEPTHWRITE_SHIFT) & PVR_TA_PM1_DEPTHWRITE_MASK;
    dst->mode1 |= (src->txr.enable << PVR_TA_PM1_TXRENABLE_SHIFT) & PVR_TA_PM1_TXRENABLE_MASK;

    /* Polygon mode 2 (outside volume) */
    dst->mode2_0  = (src->blend.src << PVR_TA_PM2_SRCBLEND_SHIFT) & PVR_TA_PM2_SRCBLEND_MASK;
    dst->mode2_0 |= (src->blend.dst << PVR_TA_PM2_DSTBLEND_SHIFT) & PVR_TA_PM2_DSTBLEND_MASK;
    dst->mode2_0 |= (src->blend.src_enable << PVR_TA_PM2_SRCENABLE_SHIFT) & PVR_TA_PM2_SRCENABLE_MASK;
    dst->mode2_0 |= (src->blend.dst_enable << PVR_TA_PM2_DSTENABLE_SHIFT) & PVR_TA_PM2_DSTENABLE_MASK;
    dst->mode2_0 |= (src->gen.fog_type << PVR_TA_PM2_FOG_SHIFT) & PVR_TA_PM2_FOG_MASK;
    dst->mode2_0 |= (src->gen.color_clamp << PVR_TA_PM2_CLAMP_SHIFT) & PVR_TA_PM2_CLAMP_MASK;
    dst->mode2_0 |= (src->gen.alpha << PVR_TA_PM2_ALPHA_SHIFT) & PVR_TA_PM2_ALPHA_MASK;

    if(src->txr.enable == PVR_TEXTURE_DISABLE) {
        dst->mode3_0 = 0;
    }
    else {
        dst->mode2_0 |= (src->txr.alpha << PVR_TA_PM2_TXRALPHA_SHIFT) & PVR_TA_PM2_TXRALPHA_MASK;
        dst->mode2_0 |= (src->txr.uv_flip << PVR_TA_PM2_UVFLIP_SHIFT) & PVR_TA_PM2_UVFLIP_MASK;
        dst->mode2_0 |= (src->txr.uv_clamp << PVR_TA_PM2_UVCLAMP_SHIFT) & PVR_TA_PM2_UVCLAMP_MASK;
        dst->mode2_0 |= (src->txr.filter << PVR_TA_PM2_FILTER_SHIFT) & PVR_TA_PM2_FILTER_MASK;
        dst->mode2_0 |= (src->txr.mipmap_bias << PVR_TA_PM2_MIPBIAS_SHIFT) & PVR_TA_PM2_MIPBIAS_MASK;
        dst->mode2_0 |= (src->txr.env << PVR_TA_PM2_TXRENV_SHIFT) & PVR_TA_PM2_TXRENV_MASK;

        switch(src->txr.width) {
            case 8:
                u = 0;
                break;
            case 16:
                u = 1;
                break;
            case 32:
                u = 2;
                break;
            case 64:
                u = 3;
                break;
            case 128:
                u = 4;
                break;
            case 256:
                u = 5;
                break;
            case 512:
                u = 6;
                break;
            case 1024:
                u = 7;
                break;
            default:
                assert_msg(0, "Invalid texture U size");
                u = 0;
                break;
        }

        switch(src->txr.height) {
            case 8:
                v = 0;
                break;
            case 16:
                v = 1;
                break;
            case 32:
                v = 2;
                break;
            case 64:
                v = 3;
                break;
            case 128:
                v = 4;
                break;
            case 256:
                v = 5;
                break;
            case 512:
                v = 6;
                break;
            case 1024:
                v = 7;
                break;
            default:
                assert_msg(0, "Invalid texture V size");
                v = 0;
                break;
        }

        dst->mode2_0 |= (u << PVR_TA_PM2_USIZE_SHIFT) & PVR_TA_PM2_USIZE_MASK;
        dst->mode2_0 |= (v << PVR_TA_PM2_VSIZE_SHIFT) & PVR_TA_PM2_VSIZE_MASK;

        /* Polygon mode 3 (outside volume) */
        dst->mode3_0  = (src->txr.mipmap << PVR_TA_PM3_MIPMAP_SHIFT) & PVR_TA_PM3_MIPMAP_MASK;
        dst->mode3_0 |= (src->txr.format << PVR_TA_PM3_TXRFMT_SHIFT) & PVR_TA_PM3_TXRFMT_MASK;

        /* Convert the texture address */
        txr_base = (uint32)src->txr.base;
        txr_base = (txr_base & 0x00fffff8) >> 3;
        dst->mode3_0 |= txr_base;
    }

    /* Polygon mode 2 (within volume) */
    dst->mode2_1  = (src->blend.src2 << PVR_TA_PM2_SRCBLEND_SHIFT) & PVR_TA_PM2_SRCBLEND_MASK;
    dst->mode2_1 |= (src->blend.dst2 << PVR_TA_PM2_DSTBLEND_SHIFT) & PVR_TA_PM2_DSTBLEND_MASK;
    dst->mode2_1 |= (src->blend.src_enable2 << PVR_TA_PM2_SRCENABLE_SHIFT) & PVR_TA_PM2_SRCENABLE_MASK;
    dst->mode2_1 |= (src->blend.dst_enable2 << PVR_TA_PM2_DSTENABLE_SHIFT) & PVR_TA_PM2_DSTENABLE_MASK;
    dst->mode2_1 |= (src->gen.fog_type2 << PVR_TA_PM2_FOG_SHIFT) & PVR_TA_PM2_FOG_MASK;
    dst->mode2_1 |= (src->gen.color_clamp2 << PVR_TA_PM2_CLAMP_SHIFT) & PVR_TA_PM2_CLAMP_MASK;
    dst->mode2_1 |= (src->gen.alpha2 << PVR_TA_PM2_ALPHA_SHIFT) & PVR_TA_PM2_ALPHA_MASK;

    if(src->txr2.enable == PVR_TEXTURE_DISABLE) {
        dst->mode3_1 = 0;
    }
    else {
        dst->mode2_1 |= (src->txr2.alpha << PVR_TA_PM2_TXRALPHA_SHIFT) & PVR_TA_PM2_TXRALPHA_MASK;
        dst->mode2_1 |= (src->txr2.uv_flip << PVR_TA_PM2_UVFLIP_SHIFT) & PVR_TA_PM2_UVFLIP_MASK;
        dst->mode2_1 |= (src->txr2.uv_clamp << PVR_TA_PM2_UVCLAMP_SHIFT) & PVR_TA_PM2_UVCLAMP_MASK;
        dst->mode2_1 |= (src->txr2.filter << PVR_TA_PM2_FILTER_SHIFT) & PVR_TA_PM2_FILTER_MASK;
        dst->mode2_1 |= (src->txr2.mipmap_bias << PVR_TA_PM2_MIPBIAS_SHIFT) & PVR_TA_PM2_MIPBIAS_MASK;
        dst->mode2_1 |= (src->txr2.env << PVR_TA_PM2_TXRENV_SHIFT) & PVR_TA_PM2_TXRENV_MASK;

        switch(src->txr2.width) {
            case 8:
                u = 0;
                break;
            case 16:
                u = 1;
                break;
            case 32:
                u = 2;
                break;
            case 64:
                u = 3;
                break;
            case 128:
                u = 4;
                break;
            case 256:
                u = 5;
                break;
            case 512:
                u = 6;
                break;
            case 1024:
                u = 7;
                break;
            default:
                assert_msg(0, "Invalid texture U size");
                u = 0;
                break;
        }

        switch(src->txr2.height) {
            case 8:
                v = 0;
                break;
            case 16:
                v = 1;
                break;
            case 32:
                v = 2;
                break;
            case 64:
                v = 3;
                break;
            case 128:
                v = 4;
                break;
            case 256:
                v = 5;
                break;
            case 512:
                v = 6;
                break;
            case 1024:
                v = 7;
                break;
            default:
                assert_msg(0, "Invalid texture V size");
                v = 0;
                break;
        }

        dst->mode2_1 |= (u << PVR_TA_PM2_USIZE_SHIFT) & PVR_TA_PM2_USIZE_MASK;
        dst->mode2_1 |= (v << PVR_TA_PM2_VSIZE_SHIFT) & PVR_TA_PM2_VSIZE_MASK;

        /* Polygon mode 3 (within volume) */
        dst->mode3_1  = (src->txr2.mipmap << PVR_TA_PM3_MIPMAP_SHIFT) & PVR_TA_PM3_MIPMAP_MASK;
        dst->mode3_1 |= (src->txr2.format << PVR_TA_PM3_TXRFMT_SHIFT) & PVR_TA_PM3_TXRFMT_MASK;

        /* Convert the texture address */
        txr_base = (uint32)src->txr2.base;
        txr_base = (txr_base & 0x00fffff8) >> 3;
        dst->mode3_1 |= txr_base;
    }

    dst->d1 = dst->d2 = 0xffffffff;
}
예제 #5
0
void pvr_sprite_compile(pvr_sprite_hdr_t *dst, pvr_sprite_cxt_t *src) {
    int u, v;
    uint32 txr_base;

    /* Basically we just take each parameter, clip it, shift it
       into place, and OR it into the final result. */

    /* The base values for CMD */
    dst->cmd = PVR_CMD_SPRITE;

    if(src->txr.enable == PVR_TEXTURE_ENABLE)
        dst->cmd |= 8;

    /* Or in the list type, clipping mode, and UV formats */
    dst->cmd |= (src->list_type << PVR_TA_CMD_TYPE_SHIFT) & PVR_TA_CMD_TYPE_MASK;
    dst->cmd |= (PVR_UVFMT_16BIT << PVR_TA_CMD_UVFMT_SHIFT) & PVR_TA_CMD_UVFMT_MASK;
    dst->cmd |= (src->gen.clip_mode << PVR_TA_CMD_USERCLIP_SHIFT) & PVR_TA_CMD_USERCLIP_MASK;
    dst->cmd |= (src->gen.specular << PVR_TA_CMD_SPECULAR_SHIFT) & PVR_TA_CMD_SPECULAR_MASK;

    /* Polygon mode 1 */
    dst->mode1  = (src->depth.comparison << PVR_TA_PM1_DEPTHCMP_SHIFT) & PVR_TA_PM1_DEPTHCMP_MASK;
    dst->mode1 |= (src->gen.culling << PVR_TA_PM1_CULLING_SHIFT) & PVR_TA_PM1_CULLING_MASK;
    dst->mode1 |= (src->depth.write << PVR_TA_PM1_DEPTHWRITE_SHIFT) & PVR_TA_PM1_DEPTHWRITE_MASK;
    dst->mode1 |= (src->txr.enable << PVR_TA_PM1_TXRENABLE_SHIFT) & PVR_TA_PM1_TXRENABLE_MASK;

    /* Polygon mode 2 */
    dst->mode2  = (src->blend.src << PVR_TA_PM2_SRCBLEND_SHIFT) & PVR_TA_PM2_SRCBLEND_MASK;
    dst->mode2 |= (src->blend.dst << PVR_TA_PM2_DSTBLEND_SHIFT) & PVR_TA_PM2_DSTBLEND_MASK;
    dst->mode2 |= (src->blend.src_enable << PVR_TA_PM2_SRCENABLE_SHIFT) & PVR_TA_PM2_SRCENABLE_MASK;
    dst->mode2 |= (src->blend.dst_enable << PVR_TA_PM2_DSTENABLE_SHIFT) & PVR_TA_PM2_DSTENABLE_MASK;
    dst->mode2 |= (src->gen.fog_type << PVR_TA_PM2_FOG_SHIFT) & PVR_TA_PM2_FOG_MASK;
    dst->mode2 |= (src->gen.color_clamp << PVR_TA_PM2_CLAMP_SHIFT) & PVR_TA_PM2_CLAMP_MASK;
    dst->mode2 |= (src->gen.alpha << PVR_TA_PM2_ALPHA_SHIFT) & PVR_TA_PM2_ALPHA_MASK;

    if(src->txr.enable == PVR_TEXTURE_DISABLE)  {
        dst->mode3 = 0;
    }
    else    {
        dst->mode2 |= (src->txr.alpha << PVR_TA_PM2_TXRALPHA_SHIFT) & PVR_TA_PM2_TXRALPHA_MASK;
        dst->mode2 |= (src->txr.uv_flip << PVR_TA_PM2_UVFLIP_SHIFT) & PVR_TA_PM2_UVFLIP_MASK;
        dst->mode2 |= (src->txr.uv_clamp << PVR_TA_PM2_UVCLAMP_SHIFT) & PVR_TA_PM2_UVCLAMP_MASK;
        dst->mode2 |= (src->txr.filter << PVR_TA_PM2_FILTER_SHIFT) & PVR_TA_PM2_FILTER_MASK;
        dst->mode2 |= (src->txr.mipmap_bias << PVR_TA_PM2_MIPBIAS_SHIFT) & PVR_TA_PM2_MIPBIAS_MASK;
        dst->mode2 |= (src->txr.env << PVR_TA_PM2_TXRENV_SHIFT) & PVR_TA_PM2_TXRENV_MASK;

        switch(src->txr.width) {
            case 8:
                u = 0;
                break;
            case 16:
                u = 1;
                break;
            case 32:
                u = 2;
                break;
            case 64:
                u = 3;
                break;
            case 128:
                u = 4;
                break;
            case 256:
                u = 5;
                break;
            case 512:
                u = 6;
                break;
            case 1024:
                u = 7;
                break;
            default:
                assert_msg(0, "Invalid texture U size");
                u = 0;
                break;
        }

        switch(src->txr.height) {
            case 8:
                v = 0;
                break;
            case 16:
                v = 1;
                break;
            case 32:
                v = 2;
                break;
            case 64:
                v = 3;
                break;
            case 128:
                v = 4;
                break;
            case 256:
                v = 5;
                break;
            case 512:
                v = 6;
                break;
            case 1024:
                v = 7;
                break;
            default:
                assert_msg(0, "Invalid texture V size");
                v = 0;
                break;
        }

        dst->mode2 |= (u << PVR_TA_PM2_USIZE_SHIFT) & PVR_TA_PM2_USIZE_MASK;
        dst->mode2 |= (v << PVR_TA_PM2_VSIZE_SHIFT) & PVR_TA_PM2_VSIZE_MASK;

        /* Polygon mode 3 */
        dst->mode3  = (src->txr.mipmap << PVR_TA_PM3_MIPMAP_SHIFT) & PVR_TA_PM3_MIPMAP_MASK;
        dst->mode3 |= (src->txr.format << PVR_TA_PM3_TXRFMT_SHIFT) & PVR_TA_PM3_TXRFMT_MASK;

        txr_base = (uint32)src->txr.base;
        txr_base = (txr_base & 0x00fffff8) >> 3;
        dst->mode3 |= txr_base;
    }

    dst->argb = 0xFFFFFFFF;
    dst->oargb = 0x00000000;
}
예제 #6
0
void Vectorizer::calc_vertices_aabb(double* min_x, double* max_x, double* min_y, double* max_y) const {
  assert_msg(verts != NULL, "Cannot calculate AABB from NULL vertices");
  calc_aabb(&verts[0][0], &verts[0][1], sizeof(double4), nv, min_x, max_x, min_y, max_y);
}
예제 #7
0
파일: Aggregate.cpp 프로젝트: alviano/wasp
bool
Aggregate::onLiteralFalse(
    Solver& solver,
    Literal currentLiteral,
    PropagatorData p )
{
    int position = p.position();
    assert_msg( abs( position ) > 0 && abs( position ) < static_cast< int >( literals.size() ), abs( position ) << " >= " << literals.size() );
    assert_msg( currentLiteral == ( position < 0 ? literals[ -position ].getOppositeLiteral() : literals[ position ] ), currentLiteral << " != " << ( position < 0 ? literals[ -position ].getOppositeLiteral() : literals[ position ] ) );
    trace_msg( aggregates, 10, "Aggregate: " << *this << ". Literal: " << currentLiteral.getOppositeLiteral() << " is true. Position: " << position );
    int ac = ( position < 0 ? POS : NEG );
    Literal aggrLiteral = ( ac == POS ? literals[ 1 ].getOppositeLiteral() : literals[ 1 ] );
    
    if( solver.isTrue( aggrLiteral ) || active + ac == 0 )
    {        
        trace_msg( aggregates, 2, "Return. AggrLiteral: " << aggrLiteral << " - Active: " << active << " - Ac: " << ac );
        return false;
    }
    
    unsigned int index = ( position > 0 ? position : -position ); 
    int64_t& counter = ( position > 0 ? counterW2 : counterW1 );

    trace_msg( aggregates, 2, "Updating counter. Old value: " << counter << " - New value: " << counter - weights[ index ] );
    
    if( counter < ( int64_t ) weights[ index ] )
    {
        assert_msg( solver.getDecisionLevel( currentLiteral ) == 0, "Literal " << currentLiteral << " in " << *this << " has a decision level " << solver.getDecisionLevel( currentLiteral ) );
        trace_msg( aggregates, 3, "A conflict happened." );        
        solver.assignLiteral( currentLiteral, this );
        return false;
    }
    assert( counter >= ( int64_t ) weights[ index ] );
    counter -= weights[ index ];
    watched[ index ] = false;
    
    if( solver.getDecisionLevel( currentLiteral ) != 0 )
        trail.push_back( position );

    trace_msg( aggregates, 2, "Umax: " << umax << " - size: " << size() );
    while( umax < literals.size() && ( int64_t ) weights[ umax ] > counter )
    {
        if( watched[ umax ] )
        {
            if( literalOfUnroll == Literal::null )
                literalOfUnroll = currentLiteral;
            active = ac;
            Literal lit = ( ac == POS ? literals[ umax ].getOppositeLiteral() : literals[ umax ] );
            if( !solver.isTrue( lit ) )
            {                
                //Maybe we don't need to add the position of this literal
                trail.push_back( umax * ac );
            
                trace_msg( aggregates, 9, "Inferring " << lit << " as true" );
//                createClauseFromTrail( lit );
                solver.assignLiteral( lit, this );
                if( solver.conflictDetected() )
                    return true;
            }
            else
            {
                trace_msg( aggregates, 9, "Skipping true literal " << lit );
            }
        }
        
        ++umax;
        trace_msg( aggregates, 3, "Updated umax. New Value: " << umax );        
    }
    
    return true;
}
예제 #8
0
uint64_t exclusive_MESIBottomCC::processAccess(Address lineAddr, uint32_t lineId, AccessType type, uint64_t cycle, uint32_t srcId, uint32_t flags) {
    uint64_t respCycle = cycle;
    if ((int) lineId == -1){
        assert( type == GETS || type == GETX );
        if (type == GETS) profGETSMiss.inc();
        else profGETXMissIM.inc();
        if (!(flags & MemReq::INNER_COPY)){ //i.e. if line was found in inner levels in case of excl llc
           MESIState dummyState = I; // does this affect race conditions ?
           MemReq req = {lineAddr, type, selfId, &dummyState, cycle, &ccLock, dummyState , srcId, flags};
           uint32_t parentId = getParentId(lineAddr);
           uint32_t nextLevelLat = parents[parentId]->access(req) - cycle;
           uint32_t netLat = parentRTTs[parentId];
           profGETNextLevelLat.inc(nextLevelLat);
           profGETNetLat.inc(netLat);
           respCycle += nextLevelLat + netLat;
        }
        assert_msg(respCycle >= cycle, "XXX %ld %ld", respCycle, cycle);
        return respCycle;
    }

    MESIState* state = &array[lineId];
    switch (type) {
        // A PUTS/PUTX does nothing w.r.t. higher coherence levels --- it dies here
        case PUTS: //Clean writeback, nothing to do (except profiling)
            assert(*state == I); //we can't assert this a
                                   //a copy of the data may still be there
                                   //in the cache from somewhere else
            if (flags & MemReq::INNER_COPY) { assert(*state == I)}
            else *state = E; //receive the data in exclusive state
                        //for multithreaded application, may need to
                        //receive data in shared state also
            profPUTS.inc();
            break;
        case PUTX: //Dirty writeback
            assert(*state == I);
                //Silent transition, record that block was written to
                if ( flags & MemReq::INNER_COPY ) { assert(*state == I);}
                else
                *state = M;
            profPUTX.inc();
            break;
        case GETS:
            if (*state == I && (!(flags & MemReq::INNER_COPY))) {
                uint32_t parentId = getParentId(lineAddr);
                MESIState dummyState = I; // does this affect race conditions ?
                MemReq req = {lineAddr, GETS, selfId, &dummyState, cycle, &ccLock, dummyState, srcId, flags};
                uint32_t nextLevelLat = parents[parentId]->access(req) - cycle;
                uint32_t netLat = parentRTTs[parentId];
                profGETNextLevelLat.inc(nextLevelLat);
                profGETNetLat.inc(netLat);
                respCycle += nextLevelLat + netLat;
                profGETSMiss.inc();
            } else {
                profGETSHit.inc();
            }
            if (!(flags & MemReq::PREFETCH))
               *state = I;
            else
               *state = E; //this is prefetched data, brought in E state
            break;

        case GETX:
            if ((*state == I || *state == S) && (!(flags & MemReq::INNER_COPY)))  {
                //Profile before access, state changes
                if (*state == I) profGETXMissIM.inc();
                else profGETXMissSM.inc();
                uint32_t parentId = getParentId(lineAddr);
                MemReq req = {lineAddr, GETX, selfId, state, cycle, &ccLock, *state, srcId, flags};
                uint32_t nextLevelLat = parents[parentId]->access(req) - cycle;
                uint32_t netLat = parentRTTs[parentId];
                profGETNextLevelLat.inc(nextLevelLat);
                profGETNetLat.inc(netLat);
                respCycle += nextLevelLat + netLat;
            }else { //means state is E or M
                profGETXHit.inc();

            }
            if (!(flags & MemReq::PREFETCH))
                *state=I; //inv because cache is exclusive
            else
                panic("We should not do GETX for exclusive LLC, which is a prefetch");
            break;

        default: panic("!?");
    }
    assert_msg(respCycle >= cycle, "XXX %ld %ld", respCycle, cycle);
    return respCycle;
}