示例#1
0
static int cavan_ext4_find_file_handler(struct ext2_desc *desc, void *block, size_t count, struct cavan_ext2_traversal_option *_option)
{
	struct ext2_directory_entry *entry, *entry_end;
	struct cavan_ext4_find_file_option *option = (struct cavan_ext4_find_file_option *) _option;

	entry = block;
	entry_end = ADDR_ADD(entry, desc->block_size * count);

	while (entry < entry_end)
	{
		entry->name[entry->name_len] = 0;

#if CAVAN_EXT2_DEBUG
		show_ext2_directory_entry(entry);
#endif

		if (text_cmp(option->filename, entry->name) == 0)
		{
			mem_copy(option->entry, entry, EXT2_DIR_ENTRY_HEADER_SIZE);
			text_ncopy(option->entry->name, entry->name, entry->name_len);
			return CAVAN_EXT2_TRAVERSAL_FOUND;
		}

		entry = ADDR_ADD(entry, entry->rec_len);
	}

	return CAVAN_EXT2_TRAVERSAL_CONTINUE;
}
示例#2
0
文件: sha.c 项目: FuangCao/cavan
void cavan_sha_update(struct cavan_sha_context *context, const void *buff, size_t size)
{
	size_t remain;
	const void *buff_end = ADDR_ADD(buff, size);

	if (context->remain > 0) {
		size_t padding;

		padding = sizeof(context->buff) - context->remain;
		if (padding <= size) {
			mem_copy(context->buff + context->remain, buff, padding);
			context->transform(context->digest, context->dwbuff);
			buff = ADDR_ADD(buff, padding);
			context->remain = 0;
		}
	}

	while (1) {
		remain = ADDR_SUB2(buff_end, buff);
		if (remain < sizeof(context->buff)) {
			break;
		}

		context->transform(context->digest, buff);
		buff = ADDR_ADD(buff, sizeof(context->buff));
	}

	if (remain) {
		mem_copy(context->buff + context->remain, buff, remain);
		context->remain += remain;
	}

	context->count += size;
}
示例#3
0
static mps_addr_t MPS_CALL ps_typed_skip( mps_addr_t object )
{
  TypeTag tag;
  size_t size;

  tag = ( (struct generic_typed_object *)object )->typetag;
  HQASSERT( DWORD_IS_ALIGNED( object ), "unaligned object" );
  switch (tag) {
  case tag_NCACHE: {
    size = sizeof(NAMECACHE) + ((NAMECACHE *)object)->len;
  } break;
  default:
    HQFAIL("Invalid tag in skip");
    size = 4; /* No value correct here; this to silence the compiler. */
  }
  return ADDR_ADD( object, SIZE_ALIGN_UP( size, MM_PS_TYPED_ALIGNMENT ));
}
示例#4
0
mps_res_t MPS_CALL ps_scan(mps_ss_t scan_state, mps_addr_t base, mps_addr_t limit)
{
  register OBJECT *obj;
  OBJECT *obj_limit;
  register mps_addr_t ref;
  size_t len = 0;

  obj_limit = limit;
  MPS_SCAN_BEGIN( scan_state )
    for ( obj = base; obj < obj_limit; obj++ ) {
      ref = (mps_addr_t)oOther( *obj );
      switch ( oType( *obj )) {
      case ONAME:
        MPS_RETAIN( (mps_addr_t *)&oName( *obj ), TRUE );
        continue;
      case OSAVE:
        continue;
      case ODICTIONARY:
        NOTREACHED;
        break;
      case OSTRING: {
        mps_addr_t ref_limit;

        ref_limit = ADDR_ADD( ref, theLen(*obj));
        /* ref could point into the middle of a string, so align it. */
        ref = PTR_ALIGN_DOWN( mps_addr_t, ref, MM_PS_ALIGNMENT );
        len = ADDR_OFFSET( ref, ref_limit );
      } break;
      case OFILE:
        NOTREACHED;
        break;
      case OARRAY:
      case OPACKEDARRAY:
        len = theLen(*obj) * sizeof( OBJECT );
        break;
      case OGSTATE:
      case OLONGSTRING:
        NOTREACHED;
        break;
      default: continue; /* not a composite object */
      }
      PS_MARK_BLOCK( scan_state, ref, len );
    }
  MPS_SCAN_END(scan_state);
  return MPS_RES_OK;
}
示例#5
0
static mps_res_t MPS_CALL ps_typed_scan(mps_ss_t scan_state,
                                        mps_addr_t base, mps_addr_t limit)
{
  TypeTag tag;
  mps_addr_t obj;
  size_t size;
  mps_res_t res = MPS_RES_OK;

  MPS_SCAN_BEGIN(scan_state)
    obj = base;
    while ( obj < limit ) {
      /* The tag is in the same place in all types, that's the point of it. */
      tag = ( (struct generic_typed_object *)obj )->typetag;
      switch (tag) {
      case tag_NCACHE: {
        NAMECACHE *nc = (NAMECACHE *)obj;

        /* The next, dictobj and dictcpy fields are not fixed because we
         * don't use them in the test.  The dictsid field is not fixed,
         * because it's a weak reference cleared by finalization.
         *
         * The length calculation is relying on the name being allocated
         * as a part of the NAMECACHE object. */
        if (!MPS_IS_RETAINED((mps_addr_t*)&nc->dictval, TRUE))
          nc->dictval = NULL;
        size = sizeof(NAMECACHE) + nc->len;
      } break;
      default: {
        HQFAIL("Invalid tag in scan");
        res = MPS_RES_FAIL;
        size = 4; /* No value correct here; this to silence the compiler. */
        break;
      }
      }
      obj = ADDR_ADD( obj, SIZE_ALIGN_UP( size, MM_PS_TYPED_ALIGNMENT ));
    }
  MPS_SCAN_END(scan_state);
  return res;
}
int GGLAssembler::scanline_core(const needs_t& needs, context_t const* c)
{
    int64_t duration = ggl_system_time();

    mBlendFactorCached = 0;
    mBlending = 0;
    mMasking = 0;
    mAA        = GGL_READ_NEEDS(P_AA, needs.p);
    mDithering = GGL_READ_NEEDS(P_DITHER, needs.p);
    mAlphaTest = GGL_READ_NEEDS(P_ALPHA_TEST, needs.p) + GGL_NEVER;
    mDepthTest = GGL_READ_NEEDS(P_DEPTH_TEST, needs.p) + GGL_NEVER;
    mFog       = GGL_READ_NEEDS(P_FOG, needs.p) != 0;
    mSmooth    = GGL_READ_NEEDS(SHADE, needs.n) != 0;
    mBuilderContext.needs = needs;
    mBuilderContext.c = c;
    mBuilderContext.Rctx = reserveReg(R0); // context always in R0
    mCbFormat = c->formats[ GGL_READ_NEEDS(CB_FORMAT, needs.n) ];

    // ------------------------------------------------------------------------

    decodeLogicOpNeeds(needs);

    decodeTMUNeeds(needs, c);

    mBlendSrc  = ggl_needs_to_blendfactor(GGL_READ_NEEDS(BLEND_SRC, needs.n));
    mBlendDst  = ggl_needs_to_blendfactor(GGL_READ_NEEDS(BLEND_DST, needs.n));
    mBlendSrcA = ggl_needs_to_blendfactor(GGL_READ_NEEDS(BLEND_SRCA, needs.n));
    mBlendDstA = ggl_needs_to_blendfactor(GGL_READ_NEEDS(BLEND_DSTA, needs.n));

    if (!mCbFormat.c[GGLFormat::ALPHA].h) {
        if ((mBlendSrc == GGL_ONE_MINUS_DST_ALPHA) ||
            (mBlendSrc == GGL_DST_ALPHA)) {
            mBlendSrc = GGL_ONE;
        }
        if ((mBlendSrcA == GGL_ONE_MINUS_DST_ALPHA) ||
            (mBlendSrcA == GGL_DST_ALPHA)) {
            mBlendSrcA = GGL_ONE;
        }
        if ((mBlendDst == GGL_ONE_MINUS_DST_ALPHA) ||
            (mBlendDst == GGL_DST_ALPHA)) {
            mBlendDst = GGL_ONE;
        }
        if ((mBlendDstA == GGL_ONE_MINUS_DST_ALPHA) ||
            (mBlendDstA == GGL_DST_ALPHA)) {
            mBlendDstA = GGL_ONE;
        }
    }

    // if we need the framebuffer, read it now
    const int blending =    blending_codes(mBlendSrc, mBlendDst) |
                            blending_codes(mBlendSrcA, mBlendDstA);

    // XXX: handle special cases, destination not modified...
    if ((mBlendSrc==GGL_ZERO) && (mBlendSrcA==GGL_ZERO) &&
        (mBlendDst==GGL_ONE) && (mBlendDstA==GGL_ONE)) {
        // Destination unmodified (beware of logic ops)
    } else if ((mBlendSrc==GGL_ZERO) && (mBlendSrcA==GGL_ZERO) &&
        (mBlendDst==GGL_ZERO) && (mBlendDstA==GGL_ZERO)) {
        // Destination is zero (beware of logic ops)
    }
    
    int fbComponents = 0;
    const int masking = GGL_READ_NEEDS(MASK_ARGB, needs.n);
    for (int i=0 ; i<4 ; i++) {
        const int mask = 1<<i;
        component_info_t& info = mInfo[i];
        int fs = i==GGLFormat::ALPHA ? mBlendSrcA : mBlendSrc;
        int fd = i==GGLFormat::ALPHA ? mBlendDstA : mBlendDst;
        if (fs==GGL_SRC_ALPHA_SATURATE && i==GGLFormat::ALPHA)
            fs = GGL_ONE;
        info.masked =   !!(masking & mask);
        info.inDest =   !info.masked && mCbFormat.c[i].h && 
                        ((mLogicOp & LOGIC_OP_SRC) || (!mLogicOp));
        if (mCbFormat.components >= GGL_LUMINANCE &&
                (i==GGLFormat::GREEN || i==GGLFormat::BLUE)) {
            info.inDest = false;
        }
        info.needed =   (i==GGLFormat::ALPHA) && 
                        (isAlphaSourceNeeded() || mAlphaTest != GGL_ALWAYS);
        info.replaced = !!(mTextureMachine.replaced & mask);
        info.iterated = (!info.replaced && (info.inDest || info.needed)); 
        info.smooth =   mSmooth && info.iterated;
        info.fog =      mFog && info.inDest && (i != GGLFormat::ALPHA);
        info.blend =    (fs != int(GGL_ONE)) || (fd > int(GGL_ZERO));

        mBlending |= (info.blend ? mask : 0);
        mMasking |= (mCbFormat.c[i].h && info.masked) ? mask : 0;
        fbComponents |= mCbFormat.c[i].h ? mask : 0;
    }

    mAllMasked = (mMasking == fbComponents);
    if (mAllMasked) {
        mDithering = 0;
    }
    
    fragment_parts_t parts;

    // ------------------------------------------------------------------------
    prolog();
    // ------------------------------------------------------------------------

    build_scanline_prolog(parts, needs);

    if (registerFile().status())
        return registerFile().status();

    // ------------------------------------------------------------------------
    label("fragment_loop");
    // ------------------------------------------------------------------------
    {
        Scratch regs(registerFile());

        if (mDithering) {
            // update the dither index.
            MOV(AL, 0, parts.count.reg,
                    reg_imm(parts.count.reg, ROR, GGL_DITHER_ORDER_SHIFT));
            ADD(AL, 0, parts.count.reg, parts.count.reg,
                    imm( 1 << (32 - GGL_DITHER_ORDER_SHIFT)));
            MOV(AL, 0, parts.count.reg,
                    reg_imm(parts.count.reg, ROR, 32 - GGL_DITHER_ORDER_SHIFT));
        }

        // XXX: could we do an early alpha-test here in some cases?
        // It would probaly be used only with smooth-alpha and no texture
        // (or no alpha component in the texture).

        // Early z-test
        if (mAlphaTest==GGL_ALWAYS) {
            build_depth_test(parts, Z_TEST|Z_WRITE);
        } else {
            // we cannot do the z-write here, because
            // it might be killed by the alpha-test later
            build_depth_test(parts, Z_TEST);
        }

        { // texture coordinates
            Scratch scratches(registerFile());

            // texel generation
            build_textures(parts, regs);
            if (registerFile().status())
                return registerFile().status();
        }

        if ((blending & (FACTOR_DST|BLEND_DST)) || 
                (mMasking && !mAllMasked) ||
                (mLogicOp & LOGIC_OP_DST)) 
        {
            // blending / logic_op / masking need the framebuffer
            mDstPixel.setTo(regs.obtain(), &mCbFormat);

            // load the framebuffer pixel
            comment("fetch color-buffer");
            load(parts.cbPtr, mDstPixel);
        }

        if (registerFile().status())
            return registerFile().status();

        pixel_t pixel;
        int directTex = mTextureMachine.directTexture;
        if (directTex | parts.packed) {
            // note: we can't have both here
            // iterated color or direct texture
            pixel = directTex ? parts.texel[directTex-1] : parts.iterated;
            pixel.flags &= ~CORRUPTIBLE;
        } else {
            if (mDithering) {
                const int ctxtReg = mBuilderContext.Rctx;
                const int mask = GGL_DITHER_SIZE-1;
                parts.dither = reg_t(regs.obtain());
                AND(AL, 0, parts.dither.reg, parts.count.reg, imm(mask));
                ADDR_ADD(AL, 0, parts.dither.reg, ctxtReg, parts.dither.reg);
                LDRB(AL, parts.dither.reg, parts.dither.reg,
                        immed12_pre(GGL_OFFSETOF(ditherMatrix)));
            }
        
            // allocate a register for the resulting pixel
            pixel.setTo(regs.obtain(), &mCbFormat, FIRST);

            build_component(pixel, parts, GGLFormat::ALPHA,    regs);

            if (mAlphaTest!=GGL_ALWAYS) {
                // only handle the z-write part here. We know z-test
                // was successful, as well as alpha-test.
                build_depth_test(parts, Z_WRITE);
            }

            build_component(pixel, parts, GGLFormat::RED,      regs);
            build_component(pixel, parts, GGLFormat::GREEN,    regs);
            build_component(pixel, parts, GGLFormat::BLUE,     regs);

            pixel.flags |= CORRUPTIBLE;
        }

        if (registerFile().status())
            return registerFile().status();
        
        if (pixel.reg == -1) {
            // be defensive here. if we're here it's probably
            // that this whole fragment is a no-op.
            pixel = mDstPixel;
        }
        
        if (!mAllMasked) {
            // logic operation
            build_logic_op(pixel, regs);
    
            // masking
            build_masking(pixel, regs); 
    
            comment("store");
            store(parts.cbPtr, pixel, WRITE_BACK);
        }
    }

    if (registerFile().status())
        return registerFile().status();

    // update the iterated color...
    if (parts.reload != 3) {
        build_smooth_shade(parts);
    }

    // update iterated z
    build_iterate_z(parts);

    // update iterated fog
    build_iterate_f(parts);

    SUB(AL, S, parts.count.reg, parts.count.reg, imm(1<<16));
    B(PL, "fragment_loop");
    label("epilog");
    epilog(registerFile().touched());

    if ((mAlphaTest!=GGL_ALWAYS) || (mDepthTest!=GGL_ALWAYS)) {
        if (mDepthTest!=GGL_ALWAYS) {
            label("discard_before_textures");
            build_iterate_texture_coordinates(parts);
        }
        label("discard_after_textures");
        build_smooth_shade(parts);
        build_iterate_z(parts);
        build_iterate_f(parts);
        if (!mAllMasked) {
            ADDR_ADD(AL, 0, parts.cbPtr.reg, parts.cbPtr.reg, imm(parts.cbPtr.size>>3));
        }
        SUB(AL, S, parts.count.reg, parts.count.reg, imm(1<<16));
        B(PL, "fragment_loop");
        epilog(registerFile().touched());
    }
示例#7
0
static int ext4_find_file_base(struct ext2_desc *desc, const char *filename, struct ext4_extent_header *header, struct ext2_directory_entry *entry)
{
	ssize_t rdlen;

	show_ext4_extent_header(header);

	if (header->depth > 0)
	{
		struct ext4_extent_index *index_end;
		struct ext4_extent_index *index = (struct ext4_extent_index *) (header + 1);

		for (index_end = index + header->entries; index < index_end; index++)
		{
			char buff[desc->block_size];

			show_ext4_extent_index(index);

			rdlen = desc->read_block(desc, (u64) index->leaf_hi << 32 | index->leaf_lo, buff, 1);
			if (rdlen < 0)
			{
				pr_error_info("desc->read_block");
				return rdlen;
			}

			if (ext4_find_file_base(desc, filename, (struct ext4_extent_header *) buff, entry) == 0)
			{
				return 0;
			}
		}
	}
	else
	{
		struct ext2_directory_entry *p, *p_end;
		struct ext4_extent_leaf *leaf_end;
		struct ext4_extent_leaf *leaf = (struct ext4_extent_leaf *) (header + 1);

		p = alloca(desc->block_size);
		p_end = ADDR_ADD(p, desc->block_size);

		for (leaf_end = leaf + header->entries; leaf < leaf_end; leaf++)
		{
			show_ext4_extent_leaf(leaf);

			rdlen = desc->read_block(desc, (u64) leaf->start_hi << 32 | leaf->start_lo, p, 1);
			if (rdlen < 0)
			{
				pr_error_info("desc->read_block");
				return rdlen;
			}

			while (p < p_end)
			{
#if CAVAN_EXT2_DEBUG
				p->name[p->name_len] = 0;
				show_ext2_directory_entry(p);
#endif
				if (text_ncmp(filename, p->name, p->name_len) == 0)
				{
					mem_copy(entry, p, EXT2_DIR_ENTRY_HEADER_SIZE + p->name_len);
					entry->name[entry->name_len] = 0;
					return 0;
				}

				p = ADDR_ADD(p, p->rec_len);
			}
		}
	}

	return -ENOENT;
}