Exemple #1
0
/* If dependence 'index' in 'uinst' is a memory operand, return its size in bytes.
 * Otherwise, return 0. 
 * Also returns a regular dependence corresponding the memory dependence data type. */
static int x86_uinst_mem_dep_size(struct x86_uinst_t *uinst, int index, 
		struct x86_ctx_t *ctx, enum x86_dep_t *mem_regular_dep)
{
	int dep;

	assert(index >= 0 && index < X86_UINST_MAX_DEPS);
	dep = uinst->dep[index];

	switch (dep)
	{

	case x86_dep_rm8:
	case x86_dep_rm16:
	case x86_dep_rm32:

		/* The 'modrm_mod' field indicates whether it's actually a memory
		 * dependence or a register. */
		if(ctx->inst.modrm_mod == 3)
			return 0;

		PTR_ASSIGN(mem_regular_dep, x86_dep_data);
		return 1 << (dep - x86_dep_rm8);

	case x86_dep_mem8:
	case x86_dep_mem16:
	case x86_dep_mem32:
	case x86_dep_mem64:
	case x86_dep_mem128:

		PTR_ASSIGN(mem_regular_dep, x86_dep_data);
		return 1 << (dep - x86_dep_mem8);

	case x86_dep_mem80:

		PTR_ASSIGN(mem_regular_dep, x86_dep_data);
		return 10;

	case x86_dep_xmmm32:
	case x86_dep_xmmm64:
	case x86_dep_xmmm128:
		
		/* The 'modrm_mod' field indicates whether it's actually a memory
		 * dependence or a register. */
		if(ctx->inst.modrm_mod == 3)
			return 0;

		PTR_ASSIGN(mem_regular_dep, x86_dep_xmm_data);
		return 1 << (dep - x86_dep_xmmm32 + 2);

	default:
		return 0;
	}
}
int X86ThreadLookupTraceCache(X86Thread *self, unsigned int eip, int pred,
	int *ptr_mop_count, unsigned int **ptr_mop_array, unsigned int *ptr_neip)
{
	struct x86_trace_cache_t *trace_cache = self->trace_cache;
	struct x86_trace_cache_entry_t *entry;
	struct x86_trace_cache_entry_t *found_entry;

	unsigned int neip;

	int set;
	int way;
	int taken;

	FILE *f;

	/* Debug */
	if (x86_trace_cache_debugging())
	{
		f = debug_file(x86_trace_cache_debug_category);
		fprintf(f, "** Lookup **\n");
		fprintf(f, "eip = 0x%x, pred = ", eip);
		x86_trace_cache_pred_dump(pred, x86_trace_cache_branch_max, f);
		fprintf(f, "\n");
	}

	/* Look for trace cache line */
	found_entry = NULL;
	set = eip % x86_trace_cache_num_sets;
	for (way = 0; way < x86_trace_cache_assoc; way++)
	{
		entry = X86_TRACE_CACHE_ENTRY(set, way);
		if (entry->tag == eip && ((pred & entry->branch_mask) == entry->branch_flags))
		{
			found_entry = entry;
			break;
		}
	}

	/* Statistics */
	trace_cache->accesses++;
	if (found_entry)
		trace_cache->hits++;

	/* Miss */
	if (!found_entry)
	{
		x86_trace_cache_debug("Miss\n");
		x86_trace_cache_debug("\n");
		return 0;
	}
	
	/* Calculate address of the next instruction to fetch after this trace.
	 * The 'neip' value will be the trace 'target' if the last instruction in
	 * the trace is a branch and 'pred' predicts it taken. */
	taken = found_entry->target && (pred & (1 << found_entry->branch_count));
	neip = taken ? found_entry->target : found_entry->fall_through;

	/* Debug */
	if (x86_trace_cache_debugging())
	{
		f = debug_file(x86_trace_cache_debug_category);
		fprintf(f, "Hit - Set = %d, Way = %d\n", set, way);
		X86ThraceDumpTraceCacheEntry(self, found_entry, f);
		fprintf(f, "Next trace prediction = %c\n", taken ? 'T' : 'n');
		fprintf(f, "Next fetch address = 0x%x\n", neip);
		fprintf(f, "\n");
	}

	/* Return fields. */
	PTR_ASSIGN(ptr_mop_count, found_entry->mop_count);
	PTR_ASSIGN(ptr_mop_array, found_entry->mop_array);
	PTR_ASSIGN(ptr_neip, neip);

	/* Hit */
	return 1;
}