Ejemplo n.º 1
0
void evg_compute_unit_map_work_group(struct evg_compute_unit_t *compute_unit, struct evg_work_group_t *work_group)
{
	struct evg_ndrange_t *ndrange = work_group->ndrange;
	struct evg_wavefront_t *wavefront;
	int wavefront_id;

	/* Map work-group */
	assert(compute_unit->work_group_count < evg_gpu->work_groups_per_compute_unit);
	assert(!work_group->id_in_compute_unit);
	while (work_group->id_in_compute_unit < evg_gpu->work_groups_per_compute_unit
		&& compute_unit->work_groups[work_group->id_in_compute_unit])
		work_group->id_in_compute_unit++;
	assert(work_group->id_in_compute_unit < evg_gpu->work_groups_per_compute_unit);
	compute_unit->work_groups[work_group->id_in_compute_unit] = work_group;
	compute_unit->work_group_count++;

	/* If compute unit reached its maximum load, remove it from 'ready' list.
	 * Otherwise, move it to the end of the 'ready' list. */
	assert(DOUBLE_LINKED_LIST_MEMBER(evg_gpu, ready, compute_unit));
	DOUBLE_LINKED_LIST_REMOVE(evg_gpu, ready, compute_unit);
	if (compute_unit->work_group_count < evg_gpu->work_groups_per_compute_unit)
		DOUBLE_LINKED_LIST_INSERT_TAIL(evg_gpu, ready, compute_unit);
	
	/* If this is the first scheduled work-group, insert to 'busy' list. */
	if (!DOUBLE_LINKED_LIST_MEMBER(evg_gpu, busy, compute_unit))
		DOUBLE_LINKED_LIST_INSERT_TAIL(evg_gpu, busy, compute_unit);

	/* Assign wavefronts identifiers in compute unit */
	EVG_FOREACH_WAVEFRONT_IN_WORK_GROUP(work_group, wavefront_id)
	{
		wavefront = ndrange->wavefronts[wavefront_id];
		wavefront->id_in_compute_unit = work_group->id_in_compute_unit *
			ndrange->wavefronts_per_work_group + wavefront->id_in_work_group;
	}
Ejemplo n.º 2
0
void si_compute_unit_map_work_group(struct si_compute_unit_t *compute_unit, 
	struct si_work_group_t *work_group)
{
	struct si_ndrange_t *ndrange = work_group->ndrange;
	struct si_wavefront_t *wavefront;
	int wavefront_id;
	int ib_id;

	assert(compute_unit->work_group_count < 
		si_gpu->work_groups_per_compute_unit);
	assert(!work_group->id_in_compute_unit);

	/* Find an available slot */
	while (work_group->id_in_compute_unit < 
		si_gpu->work_groups_per_compute_unit &&
		compute_unit->work_groups[work_group->id_in_compute_unit])
	{
		work_group->id_in_compute_unit++;
	}
	assert(work_group->id_in_compute_unit < 
		si_gpu->work_groups_per_compute_unit);
	compute_unit->work_groups[work_group->id_in_compute_unit] = work_group;
	compute_unit->work_group_count++;

	/* If compute unit reached its maximum load, remove it from 
	 * 'compute_unit_ready' list.  Otherwise, move it to the end of 
	 * the 'compute_unit_ready' list. */
	assert(DOUBLE_LINKED_LIST_MEMBER(si_gpu, compute_unit_ready, 
		compute_unit));
	DOUBLE_LINKED_LIST_REMOVE(si_gpu, compute_unit_ready, compute_unit);
	if (compute_unit->work_group_count < 
		si_gpu->work_groups_per_compute_unit)
	{
		DOUBLE_LINKED_LIST_INSERT_TAIL(si_gpu, compute_unit_ready, 
			compute_unit);
	}
	
	/* If this is the first scheduled work-group, insert to 
	 * 'compute_unit_busy' list. */
	if (!DOUBLE_LINKED_LIST_MEMBER(si_gpu, compute_unit_busy, compute_unit))
	{
		DOUBLE_LINKED_LIST_INSERT_TAIL(si_gpu, compute_unit_busy, 
			compute_unit);
	}

	/* Assign wavefronts identifiers in compute unit */
	SI_FOREACH_WAVEFRONT_IN_WORK_GROUP(work_group, wavefront_id)
	{
		wavefront = ndrange->wavefronts[wavefront_id];
		wavefront->id_in_compute_unit = work_group->id_in_compute_unit *
			ndrange->wavefronts_per_work_group + 
			wavefront->id_in_work_group;
	}
Ejemplo n.º 3
0
static struct gpu_wavefront_t *gpu_schedule_round_robin(struct gpu_compute_unit_t *compute_unit)
{
	struct gpu_wavefront_t *wavefront, *temp_wavefront;
	struct linked_list_t *wavefront_pool = compute_unit->wavefront_pool;

	/* Select current position in pool as initial candidate wavefront */
	if (!linked_list_get(wavefront_pool))
		linked_list_head(wavefront_pool);
	wavefront = linked_list_get(wavefront_pool);
	temp_wavefront = wavefront;

	/* Look for a valid candidate */
	for (;;)
	{
		/* Wavefront must be running,
		 * and the corresponding slot in fetch buffer must be free. */
		assert(wavefront->id_in_compute_unit < gpu->wavefronts_per_compute_unit);
		if (DOUBLE_LINKED_LIST_MEMBER(wavefront->work_group, running, wavefront) &&
			!compute_unit->cf_engine.fetch_buffer[wavefront->id_in_compute_unit])
			break;

		/* Current candidate is not valid - go to next.
		 * If we went through the whole pool, no fetch. */
		linked_list_next_circular(wavefront_pool);
		wavefront = linked_list_get(wavefront_pool);
		if (wavefront == temp_wavefront)
			return NULL;
	}

	/* Wavefront found, remove from pool and return. */
	assert(wavefront->clause_kind == GPU_CLAUSE_CF);
	linked_list_remove(wavefront_pool);
	return wavefront;
}
Ejemplo n.º 4
0
/* Enqueue access in port wait list. */
void mod_stack_wait_in_port(struct mod_stack_t *stack,
	struct mod_port_t *port, int event)
{
	assert(port == stack->port);
	assert(!DOUBLE_LINKED_LIST_MEMBER(port, waiting, stack));
	stack->waiting_list_event = event;
	DOUBLE_LINKED_LIST_INSERT_TAIL(port, waiting, stack);
}
Ejemplo n.º 5
0
/* Enqueue access in module wait list. */
void mod_stack_wait_in_mod(struct mod_stack_t *stack,
	struct mod_t *mod, int event)
{
	assert(mod == stack->mod);
	assert(!DOUBLE_LINKED_LIST_MEMBER(mod, waiting, stack));
	stack->waiting_list_event = event;
	DOUBLE_LINKED_LIST_INSERT_TAIL(mod, waiting, stack);
}
Ejemplo n.º 6
0
static void evg_cf_engine_fetch(struct evg_compute_unit_t *compute_unit)
{
	struct evg_ndrange_t *ndrange = evg_gpu->ndrange;
	struct evg_wavefront_t *wavefront;

	char str[MAX_LONG_STRING_SIZE];
	char str_trimmed[MAX_LONG_STRING_SIZE];

	struct evg_inst_t *inst;

	struct evg_uop_t *uop;
	struct evg_work_item_uop_t *work_item_uop;
	struct evg_work_item_t *work_item;
	int work_item_id;

	/* Schedule wavefront */
	wavefront = evg_compute_unit_schedule(compute_unit);
	if (!wavefront)
		return;

	/* Emulate CF instruction */
	evg_wavefront_execute(wavefront);
	inst = &wavefront->cf_inst;

	/* Create uop */
	uop = evg_uop_create();
	uop->wavefront = wavefront;
	uop->work_group = wavefront->work_group;
	uop->compute_unit = compute_unit;
	uop->id_in_compute_unit = compute_unit->gpu_uop_id_counter++;
	uop->alu_clause_trigger = wavefront->clause_kind == EVG_CLAUSE_ALU;
	uop->tex_clause_trigger = wavefront->clause_kind == EVG_CLAUSE_TEX;
	uop->no_clause_trigger = wavefront->clause_kind == EVG_CLAUSE_CF;
	uop->last = DOUBLE_LINKED_LIST_MEMBER(wavefront->work_group, finished, wavefront);
	uop->wavefront_last = uop->last && uop->no_clause_trigger;
	uop->global_mem_read = wavefront->global_mem_read;
	uop->global_mem_write = wavefront->global_mem_write;
	uop->active_mask_update = wavefront->active_mask_update;
	uop->active_mask_push = wavefront->active_mask_push;
	uop->active_mask_pop = wavefront->active_mask_pop;
	uop->active_mask_stack_top = wavefront->stack_top;
	uop->vliw_slots = 1;

	/* If debugging active mask, store active state for work-items */
	if (debug_status(evg_stack_debug_category))
		evg_uop_save_active_mask(uop);

	/* If instruction is a global memory write, record addresses */
	if (uop->global_mem_write)
	{
		assert((inst->info->flags & EVG_INST_FLAG_MEM_WRITE));
		EVG_FOREACH_WORK_ITEM_IN_WAVEFRONT(wavefront, work_item_id)
		{
			work_item = ndrange->work_items[work_item_id];
			work_item_uop = &uop->work_item_uop[work_item->id_in_wavefront];
			work_item_uop->global_mem_access_addr = work_item->global_mem_access_addr;
			work_item_uop->global_mem_access_size = work_item->global_mem_access_size;
		}
Ejemplo n.º 7
0
/* Enqueue access in stack wait list. */
void mod_stack_wait_in_stack(struct mod_stack_t *stack,
	struct mod_stack_t *master_stack, int event)
{
	assert(master_stack != stack);
	assert(!DOUBLE_LINKED_LIST_MEMBER(master_stack, waiting, stack));

	stack->waiting_list_event = event;
	DOUBLE_LINKED_LIST_INSERT_TAIL(master_stack, waiting, stack);
}
Ejemplo n.º 8
0
void si_ndrange_free(struct si_ndrange_t *ndrange)
{
	/* Set event status to complete if an event was set. */
	if(ndrange->event)
		ndrange->event->status = SI_OPENCL_EVENT_STATUS_COMPLETE;

	int i;

	/* Clear task from command queue */
	if (ndrange->command_queue && ndrange->command)
	{
		si_opencl_command_queue_complete(ndrange->command_queue, ndrange->command);
		si_opencl_command_free(ndrange->command);
	}

	/* Clear all states that affect lists. */
	si_ndrange_clear_status(ndrange, si_ndrange_pending);
	si_ndrange_clear_status(ndrange, si_ndrange_running);
	si_ndrange_clear_status(ndrange, si_ndrange_finished);

	/* Extract from ND-Range list in Southern Islands emulator */
	assert(DOUBLE_LINKED_LIST_MEMBER(si_emu, ndrange, ndrange));
	DOUBLE_LINKED_LIST_REMOVE(si_emu, ndrange, ndrange);

	/* Free lists */
	list_free(ndrange->uav_list);

	/* Free work-groups */
	for (i = 0; i < ndrange->work_group_count; i++)
		si_work_group_free(ndrange->work_groups[i]);
	free(ndrange->work_groups);

	/* Free wavefronts */
	for (i = 0; i < ndrange->wavefront_count; i++)
	{
		si_wavefront_free(ndrange->wavefronts[i]);
		si_work_item_free(ndrange->scalar_work_items[i]);
	}
	free(ndrange->wavefronts);
	free(ndrange->scalar_work_items);

	/* Free work-items */
	for (i = 0; i < ndrange->work_item_count; i++)
		si_work_item_free(ndrange->work_items[i]);
	free(ndrange->work_items);

	/* Free instruction histogram */
	if (ndrange->inst_histogram)
		free(ndrange->inst_histogram);

	/* Free ndrange */
	free(ndrange->name);
	free(ndrange);
}
Ejemplo n.º 9
0
/* Enqueue access in stack wait list. */
void mod_stack_wait_in_stack(struct mod_stack_t *stack,
	struct mod_stack_t *master_stack, int event)
{
	assert(master_stack != stack);
	assert(!DOUBLE_LINKED_LIST_MEMBER(master_stack, waiting, stack));

	stack->waiting_list_event = event;
	DOUBLE_LINKED_LIST_INSERT_TAIL(master_stack, waiting, stack);
    
    if((stack->addr >= 0x2F20 && stack->addr<= 0x2F2C))
	  ;// fprintf(stderr, "      wait master %x, %x, %d,%lld\n", master_stack->addr, stack->addr , event,esim_time);
}
Ejemplo n.º 10
0
void X86ContextDestroy(X86Context *self)
{
	X86Emu *emu = self->emu;

	/* If context is not finished/zombie, finish it first.
	 * This removes all references to current freed context. */
	if (!X86ContextGetState(self, X86ContextFinished | X86ContextZombie))
		X86ContextFinish(self, 0);
	
	/* Remove context from finished contexts list. This should
	 * be the only list the context is in right now. */
	assert(!DOUBLE_LINKED_LIST_MEMBER(emu, running, self));
	assert(!DOUBLE_LINKED_LIST_MEMBER(emu, suspended, self));
	assert(!DOUBLE_LINKED_LIST_MEMBER(emu, zombie, self));
	assert(DOUBLE_LINKED_LIST_MEMBER(emu, finished, self));
	DOUBLE_LINKED_LIST_REMOVE(emu, finished, self);
		
	/* Free private structures */
	x86_regs_free(self->regs);
	x86_regs_free(self->backup_regs);
	x86_signal_mask_table_free(self->signal_mask_table);
	spec_mem_free(self->spec_mem);
	bit_map_free(self->affinity);

	/* Unlink shared structures */
	x86_loader_unlink(self->loader);
	x86_signal_handler_table_unlink(self->signal_handler_table);
	x86_file_desc_table_unlink(self->file_desc_table);
	mem_unlink(self->mem);

	/* Remove context from contexts list and free */
	DOUBLE_LINKED_LIST_REMOVE(emu, context, self);
	X86ContextDebug("inst %lld: context %d freed\n",
			asEmu(emu)->instructions, self->pid);

	/* Static instruction */
	delete_static(&self->inst);
}
Ejemplo n.º 11
0
void si_ndrange_free(struct si_ndrange_t *ndrange)
{
	int i;

	/* Run free notify call-back */
	if (ndrange->free_notify_func)
		ndrange->free_notify_func(ndrange->free_notify_data);

	/* Clear all states that affect lists. */
	si_ndrange_clear_status(ndrange, si_ndrange_pending);
	si_ndrange_clear_status(ndrange, si_ndrange_running);
	si_ndrange_clear_status(ndrange, si_ndrange_finished);

	/* Extract from ND-Range list in Southern Islands emulator */
	assert(DOUBLE_LINKED_LIST_MEMBER(si_emu, ndrange, ndrange));
	DOUBLE_LINKED_LIST_REMOVE(si_emu, ndrange, ndrange);

	/* Free work-groups */
	for (i = 0; i < ndrange->work_group_count; i++)
		si_work_group_free(ndrange->work_groups[i]);
	free(ndrange->work_groups);

	/* Free wavefronts */
	for (i = 0; i < ndrange->wavefront_count; i++)
	{
		si_wavefront_free(ndrange->wavefronts[i]);
		si_work_item_free(ndrange->scalar_work_items[i]);
	}
	free(ndrange->wavefronts);
	free(ndrange->scalar_work_items);

	/* Free work-items */
	for (i = 0; i < ndrange->work_item_count; i++)
		si_work_item_free(ndrange->work_items[i]);
	free(ndrange->work_items);

	/* Free instruction histogram */
	if (ndrange->inst_histogram)
		free(ndrange->inst_histogram);

	/* Free instruction buffer */
	if (ndrange->inst_buffer)
		free(ndrange->inst_buffer);

	/* Free ndrange */
	free(ndrange->name);
	free(ndrange);
}
Ejemplo n.º 12
0
Archivo: emu.c Proyecto: ajithcj/miaow
int x86_emu_list_member(enum x86_emu_list_kind_t list, struct x86_ctx_t *ctx)
{
	switch (list) {
	case x86_emu_list_context: return DOUBLE_LINKED_LIST_MEMBER(x86_emu, context, ctx);
	case x86_emu_list_running: return DOUBLE_LINKED_LIST_MEMBER(x86_emu, running, ctx);
	case x86_emu_list_finished: return DOUBLE_LINKED_LIST_MEMBER(x86_emu, finished, ctx);
	case x86_emu_list_zombie: return DOUBLE_LINKED_LIST_MEMBER(x86_emu, zombie, ctx);
	case x86_emu_list_suspended: return DOUBLE_LINKED_LIST_MEMBER(x86_emu, suspended, ctx);
	case x86_emu_list_alloc: return DOUBLE_LINKED_LIST_MEMBER(x86_emu, alloc, ctx);
	}
	return 0;
}
Ejemplo n.º 13
0
int MIPSEmuListMember(MIPSEmu *self, enum mips_emu_list_kind_t list,
		struct mips_ctx_t *ctx)
{
	switch (list) {
	case mips_emu_list_context: return DOUBLE_LINKED_LIST_MEMBER(self, context, ctx);
	case mips_emu_list_running: return DOUBLE_LINKED_LIST_MEMBER(self, running, ctx);
	case mips_emu_list_finished: return DOUBLE_LINKED_LIST_MEMBER(self, finished, ctx);
	case mips_emu_list_zombie: return DOUBLE_LINKED_LIST_MEMBER(self, zombie, ctx);
	case mips_emu_list_suspended: return DOUBLE_LINKED_LIST_MEMBER(self, suspended, ctx);
	case mips_emu_list_alloc: return DOUBLE_LINKED_LIST_MEMBER(self, alloc, ctx);
	}
	return 0;
}
Ejemplo n.º 14
0
void evg_ndrange_free(struct evg_ndrange_t *ndrange)
{
	int i;

	/* Clear task from command queue */
	if (ndrange->command_queue && ndrange->command)
	{
		evg_opencl_command_queue_complete(ndrange->command_queue, ndrange->command);
		evg_opencl_command_free(ndrange->command);
	}

	/* Clear all states that affect lists. */
	evg_ndrange_clear_status(ndrange, evg_ndrange_pending);
	evg_ndrange_clear_status(ndrange, evg_ndrange_running);
	evg_ndrange_clear_status(ndrange, evg_ndrange_finished);

	/* Extract from ND-Range list in Evergreen emulator */
	assert(DOUBLE_LINKED_LIST_MEMBER(evg_emu, ndrange, ndrange));
	DOUBLE_LINKED_LIST_REMOVE(evg_emu, ndrange, ndrange);

	/* Free work-groups */
	for (i = 0; i < ndrange->work_group_count; i++)
		evg_work_group_free(ndrange->work_groups[i]);
	free(ndrange->work_groups);

	/* Free wavefronts */
	for (i = 0; i < ndrange->wavefront_count; i++)
		evg_wavefront_free(ndrange->wavefronts[i]);
	free(ndrange->wavefronts);

	/* Free work-items */
	for (i = 0; i < ndrange->work_item_count; i++)
		evg_work_item_free(ndrange->work_items[i]);
	free(ndrange->work_items);

	/* Free instruction histogram */
	if (ndrange->inst_histogram)
		free(ndrange->inst_histogram);

	/* Free ND-Range */
	free(ndrange->name);
	free(ndrange);
}
Ejemplo n.º 15
0
static struct gpu_wavefront_t *gpu_schedule_greedy(struct gpu_compute_unit_t *compute_unit)
{
	struct gpu_wavefront_t *wavefront, *temp_wavefront;
	struct linked_list_t *wavefront_pool = compute_unit->wavefront_pool;

	/* Check all candidates */
	temp_wavefront = NULL;
	LINKED_LIST_FOR_EACH(wavefront_pool)
	{
		/* Get wavefront from list */
		wavefront = linked_list_get(wavefront_pool);
		
		/* Wavefront must be running,
		 * and the corresponding slot in fetch buffer must be free. */
		assert(wavefront->id_in_compute_unit < gpu->wavefronts_per_compute_unit);
		if (!DOUBLE_LINKED_LIST_MEMBER(wavefront->work_group, running, wavefront) ||
			compute_unit->cf_engine.fetch_buffer[wavefront->id_in_compute_unit])
			continue;

		/* Select current wavefront temporarily */
		if (!temp_wavefront || temp_wavefront->sched_when < wavefront->sched_when)
			temp_wavefront = wavefront;
	}

	/* No wavefront found */
	wavefront = NULL;
	if (!temp_wavefront)
		return NULL;

	/* Wavefront found, remove from pool and return. */
	assert(temp_wavefront->clause_kind == GPU_CLAUSE_CF);
	linked_list_find(wavefront_pool, temp_wavefront);
	assert(!wavefront_pool->error_code);
	linked_list_remove(wavefront_pool);
	temp_wavefront->sched_when = gpu->cycle;
	return temp_wavefront;
}
Ejemplo n.º 16
0
/* Execute one instruction in the wavefront */
void si_wavefront_execute(struct si_wavefront_t *wavefront)
{
    struct si_ndrange_t *ndrange;
    struct si_work_group_t *work_group;
    struct si_work_item_t *work_item;
    struct si_inst_t *inst;

    char inst_dump[MAX_INST_STR_SIZE];
    unsigned int pc;

    ndrange = wavefront->ndrange;

    int work_item_id;

    /* Get current work-group */
    ndrange = wavefront->ndrange;
    work_group = wavefront->work_group;
    work_item = NULL;
    inst = NULL;
    assert(!DOUBLE_LINKED_LIST_MEMBER(work_group, finished, wavefront));

    /* Reset instruction flags */
    wavefront->vector_mem_write = 0;
    wavefront->vector_mem_read = 0;
    wavefront->scalar_mem_read = 0;
    wavefront->local_mem_write = 0;
    wavefront->local_mem_read = 0;
    wavefront->pred_mask_update = 0;
    wavefront->mem_wait = 0;
    wavefront->barrier = 0;

    assert(!wavefront->finished);

    /* Grab the next instruction and update the pointer */
    wavefront->inst_size = si_inst_decode(wavefront->wavefront_pool, &wavefront->inst, 0);

    /* Stats */
    si_emu->inst_count++;
    wavefront->emu_inst_count++;
    wavefront->inst_count++;

    /* Set the current instruction */
    inst = &wavefront->inst;
    pc = wavefront->wavefront_pool - wavefront->wavefront_pool_start;

    /*MIAOW start - Print the debug message to stdout, stderr  to the open file stream*/
    si_isa_debug("\n###%d_%d_%d", kernel_config_count - 1, wavefront->work_group->id, wavefront->id_in_work_group, pc, wavefront->inst_size);
    /*MIAOW stop*/

    /* Execute the current instruction */
    switch (inst->info->fmt)
    {

    /* Scalar ALU Instructions */
    case SI_FMT_SOP1:
    {
        /* Dump instruction string when debugging */
        if (debug_status(si_isa_debug_category))
        {
            si_inst_dump_sop1(inst, wavefront->inst_size, pc, wavefront->wavefront_pool, inst_dump,
                              MAX_INST_STR_SIZE);
            si_isa_debug("\n%s", inst_dump);
        }

        /* Stats */
        si_emu->scalar_alu_inst_count++;
        wavefront->scalar_alu_inst_count++;

        /* Only one work item executes the instruction */
        work_item = wavefront->scalar_work_item;
        (*si_isa_inst_func[inst->info->inst])(work_item, inst);

        if (debug_status(si_isa_debug_category))
        {
            si_isa_debug("\n");
        }

        break;
    }

    case SI_FMT_SOP2:
    {
        /* Dump instruction string when debugging */
        if (debug_status(si_isa_debug_category))
        {
            si_inst_dump_sop2(inst, wavefront->inst_size, pc, wavefront->wavefront_pool, inst_dump,
                              MAX_INST_STR_SIZE);
            si_isa_debug("\n%s", inst_dump);
        }

        /* Stats */
        si_emu->scalar_alu_inst_count++;
        wavefront->scalar_alu_inst_count++;

        /* Only one work item executes the instruction */
        work_item = wavefront->scalar_work_item;
        (*si_isa_inst_func[inst->info->inst])(work_item, inst); //Calling a function pointer in machine.c

        if (debug_status(si_isa_debug_category))
        {
            si_isa_debug("\n");
        }

        break;
    }

    case SI_FMT_SOPP:
    {
        /* Dump instruction string when debugging */
        if (debug_status(si_isa_debug_category))
        {
            si_inst_dump_sopp(inst, wavefront->inst_size, pc, wavefront->wavefront_pool, inst_dump,
                              MAX_INST_STR_SIZE);
            si_isa_debug("\n%s", inst_dump);
        }

        /* Stats */
        if (wavefront->inst.micro_inst.sopp.op > 1 &&
                wavefront->inst.micro_inst.sopp.op < 10)
        {
            si_emu->branch_inst_count++;
            wavefront->branch_inst_count++;
        } else
        {
            si_emu->scalar_alu_inst_count++;
            wavefront->scalar_alu_inst_count++;
        }

        /* Only one work item executes the instruction */
        work_item = wavefront->scalar_work_item;
        (*si_isa_inst_func[inst->info->inst])(work_item, inst);

        if (debug_status(si_isa_debug_category))
        {
            si_isa_debug("\n");
        }

        break;
    }

    case SI_FMT_SOPC:
    {
        /* Dump instruction string when debugging */
        if (debug_status(si_isa_debug_category))
        {
            si_inst_dump_sopc(inst, wavefront->inst_size, pc, wavefront->wavefront_pool, inst_dump,
                              MAX_INST_STR_SIZE);
            si_isa_debug("\n%s", inst_dump);
        }

        /* Stats */
        si_emu->scalar_alu_inst_count++;
        wavefront->scalar_alu_inst_count++;

        /* Only one work item executes the instruction */
        work_item = wavefront->scalar_work_item;
        (*si_isa_inst_func[inst->info->inst])(work_item, inst);

        if (debug_status(si_isa_debug_category))
        {
            si_isa_debug("\n");
        }

        break;
    }

    case SI_FMT_SOPK:
    {
        /* Dump instruction string when debugging */
        if (debug_status(si_isa_debug_category))
        {
            si_inst_dump_sopk(inst, wavefront->inst_size, pc, wavefront->wavefront_pool, inst_dump,
                              MAX_INST_STR_SIZE);
            si_isa_debug("\n%s", inst_dump);
        }

        /* Stats */
        si_emu->scalar_alu_inst_count++;
        wavefront->scalar_alu_inst_count++;

        /* Only one work item executes the instruction */
        work_item = wavefront->scalar_work_item;
        (*si_isa_inst_func[inst->info->inst])(work_item, inst);

        if (debug_status(si_isa_debug_category))
        {
            si_isa_debug("\n");
        }

        break;
    }

    /* Scalar Memory Instructions */
    case SI_FMT_SMRD:
    {
        /* Dump instruction string when debugging */
        if (debug_status(si_isa_debug_category))
        {
            si_inst_dump_smrd(inst, wavefront->inst_size, pc, wavefront->wavefront_pool, inst_dump,
                              MAX_INST_STR_SIZE);
            si_isa_debug("\n%s", inst_dump);
        }

        /* Stats */
        si_emu->scalar_mem_inst_count++;
        wavefront->scalar_mem_inst_count++;

        /* Only one work item executes the instruction */
        work_item = wavefront->scalar_work_item;
        (*si_isa_inst_func[inst->info->inst])(work_item, inst);

        if (debug_status(si_isa_debug_category))
        {
            si_isa_debug("\n");
        }

        break;
    }

    /* Vector ALU Instructions */
    case SI_FMT_VOP2:
    {
        /* Dump instruction string when debugging */
        if (debug_status(si_isa_debug_category))
        {
            si_inst_dump_vop2(inst, wavefront->inst_size, pc, wavefront->wavefront_pool, inst_dump,
                              MAX_INST_STR_SIZE);
            si_isa_debug("\n%s", inst_dump);
        }

        /* Stats */
        si_emu->vector_alu_inst_count++;
        wavefront->vector_alu_inst_count++;

        /* Execute the instruction */
        SI_FOREACH_WORK_ITEM_IN_WAVEFRONT(wavefront, work_item_id)
        {
            work_item = ndrange->work_items[work_item_id];
            if(si_wavefront_work_item_active(wavefront, work_item->id_in_wavefront))
                (*si_isa_inst_func[inst->info->inst])(work_item, inst);
        }

        if (debug_status(si_isa_debug_category))
        {
            si_isa_debug("\n");
        }

        break;
    }

    case SI_FMT_VOP1:
    {
        /* Dump instruction string when debugging */
        if (debug_status(si_isa_debug_category))
        {
            si_inst_dump_vop1(inst, wavefront->inst_size, pc, wavefront->wavefront_pool, inst_dump,
                              MAX_INST_STR_SIZE);
            si_isa_debug("\n%s", inst_dump);
        }

        /* Stats */
        si_emu->vector_alu_inst_count++;
        wavefront->vector_alu_inst_count++;

        if (inst->micro_inst.vop1.op == 2)
        {
            /* Instruction ignores execution mask and is only executed on one work item.
             * Execute on the first active work item from the least significant bit in EXEC.
             * (if exec is 0, execute work item 0) */
            work_item = ndrange->work_items[wavefront->work_item_id_first];
            if (si_isa_read_sreg(work_item, SI_EXEC) == 0 && si_isa_read_sreg(work_item, SI_EXEC + 1) == 0)
            {
                (*si_isa_inst_func[inst->info->inst])(work_item, inst);
            }
            else {
                SI_FOREACH_WORK_ITEM_IN_WAVEFRONT(wavefront, work_item_id)
                {
                    work_item = ndrange->work_items[work_item_id];
                    if(si_wavefront_work_item_active(wavefront, work_item->id_in_wavefront))
                    {
                        (*si_isa_inst_func[inst->info->inst])(work_item, inst);
                        break;
                    }
                }
            }
        }
Ejemplo n.º 17
0
static void X86ContextUpdateState(X86Context *self, X86ContextState state)
{
	X86Emu *emu = self->emu;

	X86ContextState status_diff;
	char state_str[MAX_STRING_SIZE];

	/* Remove contexts from the following lists:
	 *   running, suspended, zombie */
	if (DOUBLE_LINKED_LIST_MEMBER(emu, running, self))
		DOUBLE_LINKED_LIST_REMOVE(emu, running, self);
	if (DOUBLE_LINKED_LIST_MEMBER(emu, suspended, self))
		DOUBLE_LINKED_LIST_REMOVE(emu, suspended, self);
	if (DOUBLE_LINKED_LIST_MEMBER(emu, zombie, self))
		DOUBLE_LINKED_LIST_REMOVE(emu, zombie, self);
	if (DOUBLE_LINKED_LIST_MEMBER(emu, finished, self))
		DOUBLE_LINKED_LIST_REMOVE(emu, finished, self);
	
	/* If the difference between the old and new state lies in other
	 * states other than 'x86_ctx_specmode', a reschedule is marked. */
	status_diff = self->state ^ state;
	if (status_diff & ~X86ContextSpecMode)
		emu->schedule_signal = 1;
	
	/* Update state */
	self->state = state;
	if (self->state & X86ContextFinished)
		self->state = X86ContextFinished
				| (state & X86ContextAlloc)
				| (state & X86ContextMapped);
	if (self->state & X86ContextZombie)
		self->state = X86ContextZombie
				| (state & X86ContextAlloc)
				| (state & X86ContextMapped);
	if (!(self->state & X86ContextSuspended) &&
		!(self->state & X86ContextFinished) &&
		!(self->state & X86ContextZombie) &&
		!(self->state & X86ContextLocked))
		self->state |= X86ContextRunning;
	else
		self->state &= ~X86ContextRunning;
	
	/* Insert context into the corresponding lists. */
	if (self->state & X86ContextRunning)
		DOUBLE_LINKED_LIST_INSERT_HEAD(emu, running, self);
	if (self->state & X86ContextZombie)
		DOUBLE_LINKED_LIST_INSERT_HEAD(emu, zombie, self);
	if (self->state & X86ContextFinished)
		DOUBLE_LINKED_LIST_INSERT_HEAD(emu, finished, self);
	if (self->state & X86ContextSuspended)
		DOUBLE_LINKED_LIST_INSERT_HEAD(emu, suspended, self);
	
	/* Dump new state (ignore 'x86_ctx_specmode' state, it's too frequent) */
	if (debug_status(x86_context_debug_category) && (status_diff & ~X86ContextSpecMode))
	{
		str_map_flags(&x86_context_state_map, self->state, state_str, sizeof state_str);
		X86ContextDebug("inst %lld: ctx %d changed state to %s\n",
			asEmu(emu)->instructions, self->pid, state_str);
	}

	/* Start/stop x86 timer depending on whether there are any contexts
	 * currently running. */
	if (emu->running_list_count)
		m2s_timer_start(asEmu(emu)->timer);
	else
		m2s_timer_stop(asEmu(emu)->timer);
}