예제 #1
0
파일: tdfx_tex.c 프로젝트: aosm/X11
/*
 * Compute various texture image parameters.
 * Input:  w, h - source texture width and height
 * Output:  lodlevel - Glide lod level token for the larger texture dimension
 *          aspectratio - Glide aspect ratio token
 *          sscale - S scale factor used during triangle setup
 *          tscale - T scale factor used during triangle setup
 *          wscale - OpenGL -> Glide image width scale factor
 *          hscale - OpenGL -> Glide image height scale factor
 *
 * Sample results:
 *      w    h       lodlevel               aspectRatio
 *     128  128  GR_LOD_LOG2_128 (=7)  GR_ASPECT_LOG2_1x1 (=0)
 *      64   64  GR_LOD_LOG2_64 (=6)   GR_ASPECT_LOG2_1x1 (=0)
 *      64   32  GR_LOD_LOG2_64 (=6)   GR_ASPECT_LOG2_2x1 (=1)
 *      32   64  GR_LOD_LOG2_64 (=6)   GR_ASPECT_LOG2_1x2 (=-1)
 *      32   32  GR_LOD_LOG2_32 (=5)   GR_ASPECT_LOG2_1x1 (=0)
 */
static void
tdfxTexGetInfo(const GLcontext *ctx, int w, int h,
               GrLOD_t *lodlevel, GrAspectRatio_t *aspectratio,
               float *sscale, float *tscale,
               int *wscale, int *hscale)
{
    int logw, logh, ar, lod, ws, hs;
    float s, t;

    ASSERT(w >= 1);
    ASSERT(h >= 1);

    logw = logbase2(w);
    logh = logbase2(h);
    ar = logw - logh;  /* aspect ratio = difference in log dimensions */

    /* Hardware only allows a maximum aspect ratio of 8x1, so handle
       |ar| > 3 by scaling the image and using an 8x1 aspect ratio */
    if (ar >= 0) {
        ASSERT(width >= height);
        lod = logw;
        s = 256.0;
        ws = 1;
        if (ar <= GR_ASPECT_LOG2_8x1) {
            t = 256 >> ar;
            hs = 1;
        }
예제 #2
0
int main()
{
    int t,z;
    long long int n,m,k,ans1,ans2;
    t=scan();
    for(z=1;z<=t;z++)
    {
        //scanf("%lld%lld%lld",&n,&m,&k);
        n=scan();
        m=scan();
        k=scan();
        ans1=n*m*k-1;
        ans2=logbase2(n)+logbase2(m)+logbase2(k);
        printf("Case #%d: %lld %lld\n",z,ans1,ans2);
    }
    return 0;
}
예제 #3
0
파일: memzone.c 프로젝트: tdz/opsys
int
memzone_init(struct memzone *mz,
             struct vmem *as, enum vmem_area_name areaname)
{
        const struct vmem_area *area;
        int err;
        ssize_t pgindex;
        size_t memsz;           /* size of memory */

        area = vmem_area_get_by_name(areaname);

        memsz = page_memory(area->npages);

        pgindex = vmem_helper_alloc_pages_in_area(as, areaname, 1024,       /* one largepage */
                                              PTE_FLAG_PRESENT |
                                              PTE_FLAG_WRITEABLE);
        if (pgindex < 0)
        {
                err = pgindex;
                goto err_vmem_helper_alloc_page_in_area;
        }

        mz->flagslen = 1024 * PAGE_SIZE;
        mz->flags = page_address(pgindex);
        mz->nchunks = BITS_PER_LARGEPAGE / BITS_PER_PAGE;
        mz->chunksize = 1 << (logbase2(memsz / mz->nchunks) + 1);
        mz->nchunks = memsz / mz->chunksize;
        mz->as = as;
        mz->areaname = areaname;

        memset(mz->flags, 0, mz->flagslen);

        console_printf("%s:%x mz->chunksize=%x\n", __FILE__, __LINE__,
                       mz->chunksize);

        return 0;

err_vmem_helper_alloc_page_in_area:
        return err;
}
/* Otherwise, store it in memory if (Border != 0) or (any dimension ==
 * 1).
 *    
 * Otherwise, if max_level >= level >= min_level, create tree with
 * space for textures from min_level down to max_level.
 *
 * Otherwise, create tree with space for textures from (level
 * 0)..(1x1).  Consider pruning this tree at a validation if the
 * saving is worth it.
 */
static void
guess_and_alloc_mipmap_tree(struct intel_context *intel,
                            struct intel_texture_object *intelObj,
                            struct intel_texture_image *intelImage)
{
   GLuint firstLevel;
   GLuint lastLevel;
   GLuint width = intelImage->base.Width;
   GLuint height = intelImage->base.Height;
   GLuint depth = intelImage->base.Depth;
   GLuint l2width, l2height, l2depth;
   GLuint i, comp_byte = 0;

   DBG("%s\n", __FUNCTION__);

   if (intelImage->base.Border)
      return;

   if (intelImage->level > intelObj->base.BaseLevel &&
       (intelImage->base.Width == 1 ||
        (intelObj->base.Target != GL_TEXTURE_1D &&
         intelImage->base.Height == 1) ||
        (intelObj->base.Target == GL_TEXTURE_3D &&
         intelImage->base.Depth == 1)))
      return;

   /* If this image disrespects BaseLevel, allocate from level zero.
    * Usually BaseLevel == 0, so it's unlikely to happen.
    */
   if (intelImage->level < intelObj->base.BaseLevel)
      firstLevel = 0;
   else
      firstLevel = intelObj->base.BaseLevel;


   /* Figure out image dimensions at start level. 
    */
   for (i = intelImage->level; i > firstLevel; i--) {
      width <<= 1;
      if (height != 1)
         height <<= 1;
      if (depth != 1)
         depth <<= 1;
   }

   /* Guess a reasonable value for lastLevel.  This is probably going
    * to be wrong fairly often and might mean that we have to look at
    * resizable buffers, or require that buffers implement lazy
    * pagetable arrangements.
    */
   if ((intelObj->base.MinFilter == GL_NEAREST ||
        intelObj->base.MinFilter == GL_LINEAR) &&
       intelImage->level == firstLevel) {
      lastLevel = firstLevel;
   }
   else {
      l2width = logbase2(width);
      l2height = logbase2(height);
      l2depth = logbase2(depth);
      lastLevel = firstLevel + MAX2(MAX2(l2width, l2height), l2depth);
   }

   assert(!intelObj->mt);
   if (intelImage->base.IsCompressed)
      comp_byte = intel_compressed_num_bytes(intelImage->base.TexFormat->MesaFormat);
   intelObj->mt = intel_miptree_create(intel,
                                       intelObj->base.Target,
                                       intelImage->base.InternalFormat,
                                       firstLevel,
                                       lastLevel,
                                       width,
                                       height,
                                       depth,
                                       intelImage->base.TexFormat->TexelBytes,
                                       comp_byte);

   DBG("%s - success\n", __FUNCTION__);
}