Example #1
0
void lru_put_into_cache(char *name,char *ip){

    int index,temp;
    node *ptr=get(name);
    if(ptr){
        printf("strange,it should not exist\n");
        return;
    }
    if(ehead){
        ptr=ehead;
        if(ehead==etail)
            ehead=etail=NULL;
        else{
            ehead=ehead->next;
            ptr->next=NULL;
        }
        index=cache_num++;
    }
    else{
        ptr=tail;
        tail=tail->prev;
        tail->next=NULL;
        ptr->prev=NULL;
        temp=ptr->hash_index;
        removebykey(temp);
        index=ptr->cache_index;
    }
    cache[index]=ip;
    ptr->cache_index=index;
    temp=put(name,ptr);
    ptr->hash_index=temp;
    move_to_head(ptr);
}
/**
 * Update fragment state.  This is called just prior to drawing
 * something when some fragment-related state has changed.
 */
void 
llvmpipe_update_fs(struct llvmpipe_context *lp)
{
   struct lp_fragment_shader *shader = lp->fs;
   struct lp_fragment_shader_variant_key key;
   struct lp_fragment_shader_variant *variant = NULL;
   struct lp_fs_variant_list_item *li;

   make_variant_key(lp, shader, &key);

   li = first_elem(&shader->variants);
   while(!at_end(&shader->variants, li)) {
      if(memcmp(&li->base->key, &key, shader->variant_key_size) == 0) {
         variant = li->base;
         break;
      }
      li = next_elem(li);
   }

   if (variant) {
      move_to_head(&lp->fs_variants_list, &variant->list_item_global);
   }
   else {
      int64_t t0, t1;
      int64_t dt;
      unsigned i;
      if (lp->nr_fs_variants >= LP_MAX_SHADER_VARIANTS) {
         struct pipe_context *pipe = &lp->pipe;

         /*
          * XXX: we need to flush the context until we have some sort of reference
          * counting in fragment shaders as they may still be binned
          * Flushing alone might not be sufficient we need to wait on it too.
          */
         llvmpipe_finish(pipe, __FUNCTION__);

         for (i = 0; i < LP_MAX_SHADER_VARIANTS / 4; i++) {
            struct lp_fs_variant_list_item *item = last_elem(&lp->fs_variants_list);
            remove_shader_variant(lp, item->base);
         }
      }
      t0 = os_time_get();

      variant = generate_variant(lp, shader, &key);

      t1 = os_time_get();
      dt = t1 - t0;
      LP_COUNT_ADD(llvm_compile_time, dt);
      LP_COUNT_ADD(nr_llvm_compiles, 2);  /* emit vs. omit in/out test */

      if (variant) {
         insert_at_head(&shader->variants, &variant->list_item_local);
         insert_at_head(&lp->fs_variants_list, &variant->list_item_global);
         lp->nr_fs_variants++;
         shader->variants_cached++;
      }
   }

   lp_setup_set_fs_variant(lp->setup, variant);
}
Example #3
0
char* lru_search_cache(char *name){

    int index;
    char *ip;
    node *result=get(name);
    if(!result)
        return NULL;
    index=result->cache_index;
    ip=cache[index];
    move_to_head(result);
    return ip;
}
Example #4
0
File: link.hpp Project: qmc/dsqss
RingIterator<C> Ring<C>::sort_min(){
  RingIterator<C> p1(*this);
  RingIterator<C> p2(*this);
  ++p1;++p1;
  ++p2;
  while ( ! p1.atOrigin() ){
    if( ( (*p1) < (*p2) ) || ((*p1).V_x == (*p2).V_x ) ){
      p2 = p1;
      ++p1;
      move_to_head(p2) ;
    }else{
      ++p1;
    }
  }
  return p2;
}
Example #5
0
void *
util_cache_get(struct util_cache *cache, 
               const void *key)
{
   struct util_cache_entry *entry;
   uint32_t hash = cache->hash(key);

   assert(cache);
   if (!cache)
      return NULL;

   entry = util_cache_entry_get(cache, hash, key);
   if (!entry)
      return NULL;

   if (entry->state == FILLED)
      move_to_head(&cache->lru, entry);
   
   return entry->value;
}
static void
llvm_middle_end_prepare_gs(struct llvm_middle_end *fpme)
{
   struct draw_context *draw = fpme->draw;
   struct draw_geometry_shader *gs = draw->gs.geometry_shader;
   struct draw_gs_llvm_variant_key *key;
   struct draw_gs_llvm_variant *variant = NULL;
   struct draw_gs_llvm_variant_list_item *li;
   struct llvm_geometry_shader *shader = llvm_geometry_shader(gs);
   char store[DRAW_GS_LLVM_MAX_VARIANT_KEY_SIZE];
   unsigned i;

   key = draw_gs_llvm_make_variant_key(fpme->llvm, store);

   /* Search shader's list of variants for the key */
   li = first_elem(&shader->variants);
   while (!at_end(&shader->variants, li)) {
      if (memcmp(&li->base->key, key, shader->variant_key_size) == 0) {
         variant = li->base;
         break;
      }
      li = next_elem(li);
   }

   if (variant) {
      /* found the variant, move to head of global list (for LRU) */
      move_to_head(&fpme->llvm->gs_variants_list,
                   &variant->list_item_global);
   }
   else {
      /* Need to create new variant */

      /* First check if we've created too many variants.  If so, free
       * 25% of the LRU to avoid using too much memory.
       */
      if (fpme->llvm->nr_gs_variants >= DRAW_MAX_SHADER_VARIANTS) {
         /*
          * XXX: should we flush here ?
          */
         for (i = 0; i < DRAW_MAX_SHADER_VARIANTS / 4; i++) {
            struct draw_gs_llvm_variant_list_item *item;
            if (is_empty_list(&fpme->llvm->gs_variants_list)) {
               break;
            }
            item = last_elem(&fpme->llvm->gs_variants_list);
            assert(item);
            assert(item->base);
            draw_gs_llvm_destroy_variant(item->base);
         }
      }

      variant = draw_gs_llvm_create_variant(fpme->llvm, gs->info.num_outputs, key);

      if (variant) {
         insert_at_head(&shader->variants, &variant->list_item_local);
         insert_at_head(&fpme->llvm->gs_variants_list,
                        &variant->list_item_global);
         fpme->llvm->nr_gs_variants++;
         shader->variants_cached++;
      }
   }

   gs->current_variant = variant;
}
/**
 * Prepare/validate middle part of the vertex pipeline.
 * NOTE: if you change this function, also look at the non-LLVM
 * function fetch_pipeline_prepare() for similar changes.
 */
static void
llvm_middle_end_prepare( struct draw_pt_middle_end *middle,
                         unsigned in_prim,
                         unsigned opt,
                         unsigned *max_vertices )
{
   struct llvm_middle_end *fpme = llvm_middle_end(middle);
   struct draw_context *draw = fpme->draw;
   struct draw_vertex_shader *vs = draw->vs.vertex_shader;
   struct draw_geometry_shader *gs = draw->gs.geometry_shader;
   const unsigned out_prim = gs ? gs->output_primitive :
      u_assembled_prim(in_prim);
   unsigned point_clip = draw->rasterizer->fill_front == PIPE_POLYGON_MODE_POINT ||
                         out_prim == PIPE_PRIM_POINTS;
   unsigned nr;

   fpme->input_prim = in_prim;
   fpme->opt = opt;

   draw_pt_post_vs_prepare( fpme->post_vs,
                            draw->clip_xy,
                            draw->clip_z,
                            draw->clip_user,
                            point_clip ? draw->guard_band_points_xy :
                                         draw->guard_band_xy,
                            draw->identity_viewport,
                            draw->rasterizer->clip_halfz,
                            (draw->vs.edgeflag_output ? TRUE : FALSE) );

   draw_pt_so_emit_prepare( fpme->so_emit, gs == NULL );

   if (!(opt & PT_PIPELINE)) {
      draw_pt_emit_prepare( fpme->emit,
			    out_prim,
                            max_vertices );

      *max_vertices = MAX2( *max_vertices, 4096 );
   }
   else {
      /* limit max fetches by limiting max_vertices */
      *max_vertices = 4096;
   }

   /* Get the number of float[4] attributes per vertex.
    * Note: this must be done after draw_pt_emit_prepare() since that
    * can effect the vertex size.
    */
   nr = MAX2(vs->info.num_inputs, draw_total_vs_outputs(draw));

   /* Always leave room for the vertex header whether we need it or
    * not.  It's hard to get rid of it in particular because of the
    * viewport code in draw_pt_post_vs.c.
    */
   fpme->vertex_size = sizeof(struct vertex_header) + nr * 4 * sizeof(float);

   /* Get the number of float[4] attributes per vertex.
    * Note: this must be done after draw_pt_emit_prepare() since that
    * can effect the vertex size.
    */
   nr = MAX2(vs->info.num_inputs, draw_total_vs_outputs(draw));

   /* Always leave room for the vertex header whether we need it or
    * not.  It's hard to get rid of it in particular because of the
    * viewport code in draw_pt_post_vs.c.
    */
   fpme->vertex_size = sizeof(struct vertex_header) + nr * 4 * sizeof(float);

   /* return even number */
   *max_vertices = *max_vertices & ~1;

   /* Find/create the vertex shader variant */
   {
      struct draw_llvm_variant_key *key;
      struct draw_llvm_variant *variant = NULL;
      struct draw_llvm_variant_list_item *li;
      struct llvm_vertex_shader *shader = llvm_vertex_shader(vs);
      char store[DRAW_LLVM_MAX_VARIANT_KEY_SIZE];
      unsigned i;

      key = draw_llvm_make_variant_key(fpme->llvm, store);

      /* Search shader's list of variants for the key */
      li = first_elem(&shader->variants);
      while (!at_end(&shader->variants, li)) {
         if (memcmp(&li->base->key, key, shader->variant_key_size) == 0) {
            variant = li->base;
            break;
         }
         li = next_elem(li);
      }

      if (variant) {
         /* found the variant, move to head of global list (for LRU) */
         move_to_head(&fpme->llvm->vs_variants_list,
                      &variant->list_item_global);
      }
      else {
         /* Need to create new variant */

         /* First check if we've created too many variants.  If so, free
          * 25% of the LRU to avoid using too much memory.
          */
         if (fpme->llvm->nr_variants >= DRAW_MAX_SHADER_VARIANTS) {
            /*
             * XXX: should we flush here ?
             */
            for (i = 0; i < DRAW_MAX_SHADER_VARIANTS / 4; i++) {
               struct draw_llvm_variant_list_item *item;
               if (is_empty_list(&fpme->llvm->vs_variants_list)) {
                  break;
               }
               item = last_elem(&fpme->llvm->vs_variants_list);
               assert(item);
               assert(item->base);
               draw_llvm_destroy_variant(item->base);
            }
         }

         variant = draw_llvm_create_variant(fpme->llvm, nr, key);

         if (variant) {
            insert_at_head(&shader->variants, &variant->list_item_local);
            insert_at_head(&fpme->llvm->vs_variants_list,
                           &variant->list_item_global);
            fpme->llvm->nr_variants++;
            shader->variants_cached++;
         }
      }

      fpme->current_variant = variant;
   }

   if (gs) {
      llvm_middle_end_prepare_gs(fpme);
   }
}
static void
llvm_middle_end_prepare( struct draw_pt_middle_end *middle,
                         unsigned in_prim,
                         unsigned opt,
                         unsigned *max_vertices )
{
    struct llvm_middle_end *fpme = (struct llvm_middle_end *)middle;
    struct draw_context *draw = fpme->draw;
    struct llvm_vertex_shader *shader =
        llvm_vertex_shader(draw->vs.vertex_shader);
    char store[DRAW_LLVM_MAX_VARIANT_KEY_SIZE];
    struct draw_llvm_variant_key *key;
    struct draw_llvm_variant *variant = NULL;
    struct draw_llvm_variant_list_item *li;
    unsigned i;
    unsigned instance_id_index = ~0;
    const unsigned out_prim = (draw->gs.geometry_shader ?
                               draw->gs.geometry_shader->output_primitive :
                               in_prim);

    /* Add one to num_outputs because the pipeline occasionally tags on
     * an additional texcoord, eg for AA lines.
     */
    const unsigned nr = MAX2( shader->base.info.num_inputs,
                              shader->base.info.num_outputs + 1 );

    /* Scan for instanceID system value.
     * XXX but we never use instance_id_index?!
     */
    for (i = 0; i < shader->base.info.num_inputs; i++) {
        if (shader->base.info.input_semantic_name[i] == TGSI_SEMANTIC_INSTANCEID) {
            instance_id_index = i;
            break;
        }
    }

    fpme->input_prim = in_prim;
    fpme->opt = opt;

    /* Always leave room for the vertex header whether we need it or
     * not.  It's hard to get rid of it in particular because of the
     * viewport code in draw_pt_post_vs.c.
     */
    fpme->vertex_size = sizeof(struct vertex_header) + nr * 4 * sizeof(float);


    /* XXX: it's not really gl rasterization rules we care about here,
     * but gl vs dx9 clip spaces.
     */
    draw_pt_post_vs_prepare( fpme->post_vs,
                             draw->clip_xy,
                             draw->clip_z,
                             draw->clip_user,
                             draw->identity_viewport,
                             (boolean)draw->rasterizer->gl_rasterization_rules,
                             (draw->vs.edgeflag_output ? TRUE : FALSE) );

    draw_pt_so_emit_prepare( fpme->so_emit );

    if (!(opt & PT_PIPELINE)) {
        draw_pt_emit_prepare( fpme->emit,
                              out_prim,
                              max_vertices );

        *max_vertices = MAX2( *max_vertices, 4096 );
    }
    else {
        /* limit max fetches by limiting max_vertices */
        *max_vertices = 4096;
    }

    /* return even number */
    *max_vertices = *max_vertices & ~1;

    key = draw_llvm_make_variant_key(fpme->llvm, store);

    /* Search shader's list of variants for the key */
    li = first_elem(&shader->variants);
    while (!at_end(&shader->variants, li)) {
        if (memcmp(&li->base->key, key, shader->variant_key_size) == 0) {
            variant = li->base;
            break;
        }
        li = next_elem(li);
    }

    if (variant) {
        /* found the variant, move to head of global list (for LRU) */
        move_to_head(&fpme->llvm->vs_variants_list, &variant->list_item_global);
    }
    else {
        /* Need to create new variant */
        unsigned i;

        /* First check if we've created too many variants.  If so, free
         * 25% of the LRU to avoid using too much memory.
         */
        if (fpme->llvm->nr_variants >= DRAW_MAX_SHADER_VARIANTS) {
            /*
             * XXX: should we flush here ?
             */
            for (i = 0; i < DRAW_MAX_SHADER_VARIANTS / 4; i++) {
                struct draw_llvm_variant_list_item *item =
                    last_elem(&fpme->llvm->vs_variants_list);
                draw_llvm_destroy_variant(item->base);
            }
        }

        variant = draw_llvm_create_variant(fpme->llvm, nr, key);

        if (variant) {
            insert_at_head(&shader->variants, &variant->list_item_local);
            insert_at_head(&fpme->llvm->vs_variants_list, &variant->list_item_global);
            fpme->llvm->nr_variants++;
            shader->variants_cached++;
        }
    }

    fpme->current_variant = variant;

    /*XXX we only support one constant buffer */
    fpme->llvm->jit_context.vs_constants =
        draw->pt.user.vs_constants[0];
    fpme->llvm->jit_context.gs_constants =
        draw->pt.user.gs_constants[0];
    fpme->llvm->jit_context.planes =
        (float (*) [12][4]) draw->pt.user.planes[0];
    fpme->llvm->jit_context.viewport =
        (float *)draw->viewport.scale;

}
void radeonUpdateTextureState( GLcontext *ctx )
{
   radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
   struct gl_texture_unit *texUnit = &ctx->Texture.Unit[0];

   if ( texUnit->_ReallyEnabled & (TEXTURE_1D_BIT | TEXTURE_2D_BIT) ) {
      struct gl_texture_object *tObj = texUnit->_Current;
      radeonTexObjPtr t = (radeonTexObjPtr) tObj->DriverData;

      /* Upload teximages (not pipelined)
       */
      if ( t->dirty_images ) {
	 RADEON_FIREVERTICES( rmesa );
	 radeonSetTexImages( rmesa, tObj );
      }

      /* Update state if this is a different texture object to last
       * time.
       */
      if ( rmesa->state.texture.unit[0].texobj != t ) {
	 rmesa->state.texture.unit[0].texobj = t;
	 t->dirty_state |= 1<<0;
	 move_to_head( &rmesa->texture.objects[0], t );
      }

      if (t->dirty_state) {
	 GLuint *cmd = RADEON_DB_STATE( tex[0] );

	 cmd[TEX_PP_TXFILTER] &= ~TEXOBJ_TXFILTER_MASK;
	 cmd[TEX_PP_TXFORMAT] &= ~TEXOBJ_TXFORMAT_MASK;
	 cmd[TEX_PP_TXFILTER] |= t->pp_txfilter & TEXOBJ_TXFILTER_MASK;
	 cmd[TEX_PP_TXFORMAT] |= t->pp_txformat & TEXOBJ_TXFORMAT_MASK;
	 cmd[TEX_PP_TXOFFSET] = t->pp_txoffset;
	 cmd[TEX_PP_BORDER_COLOR] = t->pp_border_color;
	 
	 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.tex[0] );
	 t->dirty_state = 0;
      }

      /* Newly enabled?
       */
      if (!(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & RADEON_TEX_0_ENABLE)) {
	 RADEON_STATECHANGE( rmesa, ctx );
	 rmesa->hw.ctx.cmd[CTX_PP_CNTL] |= (RADEON_TEX_0_ENABLE | 
					    RADEON_TEX_BLEND_0_ENABLE);

	 RADEON_STATECHANGE( rmesa, tcl );
	 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] |= RADEON_TCL_VTX_ST0;
      }

      radeonUpdateTextureEnv( ctx, 0 );
   }
   else if (rmesa->hw.ctx.cmd[CTX_PP_CNTL] & (RADEON_TEX_0_ENABLE<<0)) {
      /* Texture unit disabled */
      rmesa->state.texture.unit[0].texobj = 0;
      RADEON_STATECHANGE( rmesa, ctx );
      rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= 
	 ~((RADEON_TEX_0_ENABLE | RADEON_TEX_BLEND_0_ENABLE) << 0);

      RADEON_STATECHANGE( rmesa, tcl );
      rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] &= ~(RADEON_TCL_VTX_ST0 |
						RADEON_TCL_VTX_Q0);
   }
}
/**
 * \brief Upload texture images.
 *
 * This might require removing our own and/or other client's texture objects to
 * make room for these images.
 *
 * \param rmesa Radeon context.
 * \param tObj texture object to upload.
 *
 * Sets the matching hardware texture format. Calculates which mipmap levels to
 * send, depending of the base image size, GL_TEXTURE_MIN_LOD,
 * GL_TEXTURE_MAX_LOD, GL_TEXTURE_BASE_LEVEL, and GL_TEXTURE_MAX_LEVEL and the
 * Radeon offset rules. Kicks out textures until the requested texture fits,
 * sets the texture hardware state and, while holding the hardware lock,
 * uploads any images that are new.
 */
static void radeonSetTexImages( radeonContextPtr rmesa,
				struct gl_texture_object *tObj )
{
   radeonTexObjPtr t = (radeonTexObjPtr)tObj->DriverData;
   const struct gl_texture_image *baseImage = tObj->Image[0][tObj->BaseLevel];
   GLint totalSize;
   GLint texelsPerDword = 0, blitWidth = 0, blitPitch = 0;
   GLint x, y, width, height;
   GLint i;
   GLint firstLevel, lastLevel, numLevels;
   GLint log2Width, log2Height;
   GLuint txformat = 0;

   /* This code cannot be reached once we have lost focus
    */
   assert(rmesa->radeonScreen->buffers);

   /* Set the hardware texture format
    */
   switch (baseImage->TexFormat->MesaFormat) {
   case MESA_FORMAT_I8:
      txformat = RADEON_TXFORMAT_I8;
      texelsPerDword = 4;
      blitPitch = 64;
      break;
   case MESA_FORMAT_RGBA8888:
      txformat = RADEON_TXFORMAT_RGBA8888 | RADEON_TXFORMAT_ALPHA_IN_MAP;
      texelsPerDword = 1;
      blitPitch = 16;
      break;
   case MESA_FORMAT_RGB565:
      txformat = RADEON_TXFORMAT_RGB565;
      texelsPerDword = 2;
      blitPitch = 32;
      break;
   default:
      _mesa_problem(NULL, "unexpected texture format in radeonTexImage2D");
      return;
   }

   t->pp_txformat &= ~(RADEON_TXFORMAT_FORMAT_MASK |
		       RADEON_TXFORMAT_ALPHA_IN_MAP);
   t->pp_txformat |= txformat;


   /* Select the larger of the two widths for our global texture image
    * coordinate space.  As the Radeon has very strict offset rules, we
    * can't upload mipmaps directly and have to reference their location
    * from the aligned start of the whole image.
    */
   blitWidth = MAX2( baseImage->Width, blitPitch );

   /* Calculate mipmap offsets and dimensions.
    */
   totalSize = 0;
   x = 0;
   y = 0;

   /* Compute which mipmap levels we really want to send to the hardware.
    * This depends on the base image size, GL_TEXTURE_MIN_LOD,
    * GL_TEXTURE_MAX_LOD, GL_TEXTURE_BASE_LEVEL, and GL_TEXTURE_MAX_LEVEL.
    * Yes, this looks overly complicated, but it's all needed.
    */
   firstLevel = tObj->BaseLevel + (GLint) (tObj->MinLod + 0.5);
   firstLevel = MAX2(firstLevel, tObj->BaseLevel);
   lastLevel = tObj->BaseLevel + (GLint) (tObj->MaxLod + 0.5);
   lastLevel = MAX2(lastLevel, tObj->BaseLevel);
   lastLevel = MIN2(lastLevel, tObj->BaseLevel + baseImage->MaxLog2);
   lastLevel = MIN2(lastLevel, tObj->MaxLevel);
   lastLevel = MAX2(firstLevel, lastLevel); /* need at least one level */

   /* save these values */
   t->firstLevel = firstLevel;
   t->lastLevel = lastLevel;

   numLevels = lastLevel - firstLevel + 1;

   log2Width = tObj->Image[0][firstLevel]->WidthLog2;
   log2Height = tObj->Image[0][firstLevel]->HeightLog2;

   for ( i = 0 ; i < numLevels ; i++ ) {
      const struct gl_texture_image *texImage = tObj->Image[0][i + firstLevel];
      if ( !texImage )
	 break;

      width = texImage->Width;
      height = texImage->Height;

      /* Texture images have a minimum pitch of 32 bytes (half of the
       * 64-byte minimum pitch for blits).  For images that have a
       * width smaller than this, we must pad each texture image
       * scanline out to this amount.
       */
      if ( width < blitPitch / 2 ) {
	 width = blitPitch / 2;
      }

      totalSize += width * height * baseImage->TexFormat->TexelBytes;
      ASSERT( (totalSize & 31) == 0 );

      while ( width < blitWidth && height > 1 ) {
	 width *= 2;
	 height /= 2;
      }

      ASSERT(i < RADEON_MAX_TEXTURE_LEVELS);
      t->image[i].x = x;
      t->image[i].y = y;
      t->image[i].width  = width;
      t->image[i].height = height;

      /* While blits must have a pitch of at least 64 bytes, mipmaps
       * must be aligned on a 32-byte boundary (just like each texture
       * image scanline).
       */
      if ( width >= blitWidth ) {
	 y += height;
      } else {
	 x += width;
	 if ( x >= blitWidth ) {
	    x = 0;
	    y++;
	 }
      }
   }

   /* Align the total size of texture memory block.
    */
   t->totalSize = (totalSize + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;

   /* Hardware state:
    */
   t->pp_txfilter &= ~RADEON_MAX_MIP_LEVEL_MASK;
   t->pp_txfilter |= (numLevels - 1) << RADEON_MAX_MIP_LEVEL_SHIFT;

   t->pp_txformat &= ~(RADEON_TXFORMAT_WIDTH_MASK |
		       RADEON_TXFORMAT_HEIGHT_MASK);
   t->pp_txformat |= ((log2Width << RADEON_TXFORMAT_WIDTH_SHIFT) |
		      (log2Height << RADEON_TXFORMAT_HEIGHT_SHIFT));
   t->dirty_state = TEX_ALL;

   /* Update the local texture LRU.
    */
   move_to_head( &rmesa->texture.objects[0], t );

   LOCK_HARDWARE( rmesa );

   /* Kick out textures until the requested texture fits */
   while ( !t->memBlock ) {
      t->memBlock = mmAllocMem( rmesa->texture.heap[0], t->totalSize, 12, 0);
	 
      if (!t->memBlock)
	 radeonSwapOutTexObj( rmesa, rmesa->texture.objects[0].prev );
	 
   }

   /* Set the base offset of the texture image */
   t->bufAddr = rmesa->radeonScreen->texOffset[0] + t->memBlock->ofs;
   t->pp_txoffset = t->bufAddr;

   /* Upload any images that are new 
    */
   for ( i = 0 ; i < numLevels ; i++ ) {
      if ( t->dirty_images & (1 << i) ) {
	 radeonUploadSubImage( rmesa, t, i, 0, 0,
			       t->image[i].width, t->image[i].height );
      }
   }

   rmesa->texture.age[0] = ++rmesa->sarea->texAge[0]; 
   UNLOCK_HARDWARE( rmesa );
   t->dirty_images = 0;
}