void evg_compute_unit_map_work_group(struct evg_compute_unit_t *compute_unit, struct evg_work_group_t *work_group) { struct evg_ndrange_t *ndrange = work_group->ndrange; struct evg_wavefront_t *wavefront; int wavefront_id; /* Map work-group */ assert(compute_unit->work_group_count < evg_gpu->work_groups_per_compute_unit); assert(!work_group->id_in_compute_unit); while (work_group->id_in_compute_unit < evg_gpu->work_groups_per_compute_unit && compute_unit->work_groups[work_group->id_in_compute_unit]) work_group->id_in_compute_unit++; assert(work_group->id_in_compute_unit < evg_gpu->work_groups_per_compute_unit); compute_unit->work_groups[work_group->id_in_compute_unit] = work_group; compute_unit->work_group_count++; /* If compute unit reached its maximum load, remove it from 'ready' list. * Otherwise, move it to the end of the 'ready' list. */ assert(DOUBLE_LINKED_LIST_MEMBER(evg_gpu, ready, compute_unit)); DOUBLE_LINKED_LIST_REMOVE(evg_gpu, ready, compute_unit); if (compute_unit->work_group_count < evg_gpu->work_groups_per_compute_unit) DOUBLE_LINKED_LIST_INSERT_TAIL(evg_gpu, ready, compute_unit); /* If this is the first scheduled work-group, insert to 'busy' list. */ if (!DOUBLE_LINKED_LIST_MEMBER(evg_gpu, busy, compute_unit)) DOUBLE_LINKED_LIST_INSERT_TAIL(evg_gpu, busy, compute_unit); /* Assign wavefronts identifiers in compute unit */ EVG_FOREACH_WAVEFRONT_IN_WORK_GROUP(work_group, wavefront_id) { wavefront = ndrange->wavefronts[wavefront_id]; wavefront->id_in_compute_unit = work_group->id_in_compute_unit * ndrange->wavefronts_per_work_group + wavefront->id_in_work_group; }
/* Wake up access waiting in a stack's wait list. */ void mod_stack_wakeup_stack(struct mod_stack_t *master_stack) { struct mod_stack_t *stack; int event; /* No access to wake up */ if (!master_stack->waiting_list_count) return; /* Debug */ mem_debug(" %lld %lld 0x%x wake up accesses:", esim_time, master_stack->id, master_stack->addr); /* Wake up all coalesced accesses */ while (master_stack->waiting_list_head) { stack = master_stack->waiting_list_head; event = stack->waiting_list_event; DOUBLE_LINKED_LIST_REMOVE(master_stack, waiting, stack); stack->master_stack = NULL; esim_schedule_event(event, stack, 0); if(( stack->addr >= 0x2F20 && stack->addr<= 0x2F2C)) ;// fprintf(stderr, " wake master %x, %x, %d, %lld\n", master_stack->addr, stack->addr , event,esim_time); mem_debug(" %lld", stack->id); } /* Debug */ mem_debug("\n"); }
void frm_grid_clear_status(struct frm_grid_t *grid, enum frm_grid_status_t status) { /* Get only the bits that are set */ status &= grid->status; /* Remove ND-Range from lists */ if (status & frm_grid_pending) DOUBLE_LINKED_LIST_REMOVE(frm_emu, pending_grid, grid); if (status & frm_grid_running) DOUBLE_LINKED_LIST_REMOVE(frm_emu, running_grid, grid); if (status & frm_grid_finished) DOUBLE_LINKED_LIST_REMOVE(frm_emu, finished_grid, grid); /* Update status */ grid->status &= ~status; }
/* Wake up access waiting in a stack's wait list. */ void mod_stack_wakeup_stack(struct mod_stack_t *master_stack) { struct mod_stack_t *stack; int event; /* No access to wake up */ if (!master_stack->waiting_list_count) return; /* Debug */ mem_debug(" %lld %lld 0x%x wake up accesses:", esim_time, master_stack->id, master_stack->addr); //fran master_stack->coalesced_count = 0; /* Wake up all coalesced accesses */ while (master_stack->waiting_list_head) { stack = master_stack->waiting_list_head; event = stack->waiting_list_event; stack->waiting_list_event = 0; DOUBLE_LINKED_LIST_REMOVE(master_stack, waiting, stack); stack->state = master_stack->state; esim_schedule_event(event, stack, 0); mem_debug(" %lld", stack->id); } /* Debug */ mem_debug("\n"); }
void si_work_group_clear_status(struct si_work_group_t *work_group, enum si_work_group_status_t status) { struct si_ndrange_t *ndrange = work_group->ndrange; /* Get only the bits that are set */ status &= work_group->status; /* Remove work-group from lists */ if (status & si_work_group_pending) DOUBLE_LINKED_LIST_REMOVE(ndrange, pending, work_group); if (status & si_work_group_running) DOUBLE_LINKED_LIST_REMOVE(ndrange, running, work_group); if (status & si_work_group_finished) DOUBLE_LINKED_LIST_REMOVE(ndrange, finished, work_group); /* Update status */ work_group->status &= ~status; }
void net_msg_free(struct net_msg_t *msg) { while (msg->packet_list_head) { struct net_packet_t *pkt; pkt = msg->packet_list_head; DOUBLE_LINKED_LIST_REMOVE(msg, packet, pkt); net_packet_free(pkt); } free(msg); }
void si_ndrange_free(struct si_ndrange_t *ndrange) { /* Set event status to complete if an event was set. */ if(ndrange->event) ndrange->event->status = SI_OPENCL_EVENT_STATUS_COMPLETE; int i; /* Clear task from command queue */ if (ndrange->command_queue && ndrange->command) { si_opencl_command_queue_complete(ndrange->command_queue, ndrange->command); si_opencl_command_free(ndrange->command); } /* Clear all states that affect lists. */ si_ndrange_clear_status(ndrange, si_ndrange_pending); si_ndrange_clear_status(ndrange, si_ndrange_running); si_ndrange_clear_status(ndrange, si_ndrange_finished); /* Extract from ND-Range list in Southern Islands emulator */ assert(DOUBLE_LINKED_LIST_MEMBER(si_emu, ndrange, ndrange)); DOUBLE_LINKED_LIST_REMOVE(si_emu, ndrange, ndrange); /* Free lists */ list_free(ndrange->uav_list); /* Free work-groups */ for (i = 0; i < ndrange->work_group_count; i++) si_work_group_free(ndrange->work_groups[i]); free(ndrange->work_groups); /* Free wavefronts */ for (i = 0; i < ndrange->wavefront_count; i++) { si_wavefront_free(ndrange->wavefronts[i]); si_work_item_free(ndrange->scalar_work_items[i]); } free(ndrange->wavefronts); free(ndrange->scalar_work_items); /* Free work-items */ for (i = 0; i < ndrange->work_item_count; i++) si_work_item_free(ndrange->work_items[i]); free(ndrange->work_items); /* Free instruction histogram */ if (ndrange->inst_histogram) free(ndrange->inst_histogram); /* Free ndrange */ free(ndrange->name); free(ndrange); }
void si_compute_unit_map_work_group(struct si_compute_unit_t *compute_unit, struct si_work_group_t *work_group) { struct si_ndrange_t *ndrange = work_group->ndrange; struct si_wavefront_t *wavefront; int wavefront_id; int ib_id; assert(compute_unit->work_group_count < si_gpu->work_groups_per_compute_unit); assert(!work_group->id_in_compute_unit); /* Find an available slot */ while (work_group->id_in_compute_unit < si_gpu->work_groups_per_compute_unit && compute_unit->work_groups[work_group->id_in_compute_unit]) { work_group->id_in_compute_unit++; } assert(work_group->id_in_compute_unit < si_gpu->work_groups_per_compute_unit); compute_unit->work_groups[work_group->id_in_compute_unit] = work_group; compute_unit->work_group_count++; /* If compute unit reached its maximum load, remove it from * 'compute_unit_ready' list. Otherwise, move it to the end of * the 'compute_unit_ready' list. */ assert(DOUBLE_LINKED_LIST_MEMBER(si_gpu, compute_unit_ready, compute_unit)); DOUBLE_LINKED_LIST_REMOVE(si_gpu, compute_unit_ready, compute_unit); if (compute_unit->work_group_count < si_gpu->work_groups_per_compute_unit) { DOUBLE_LINKED_LIST_INSERT_TAIL(si_gpu, compute_unit_ready, compute_unit); } /* If this is the first scheduled work-group, insert to * 'compute_unit_busy' list. */ if (!DOUBLE_LINKED_LIST_MEMBER(si_gpu, compute_unit_busy, compute_unit)) { DOUBLE_LINKED_LIST_INSERT_TAIL(si_gpu, compute_unit_busy, compute_unit); } /* Assign wavefronts identifiers in compute unit */ SI_FOREACH_WAVEFRONT_IN_WORK_GROUP(work_group, wavefront_id) { wavefront = ndrange->wavefronts[wavefront_id]; wavefront->id_in_compute_unit = work_group->id_in_compute_unit * ndrange->wavefronts_per_work_group + wavefront->id_in_work_group; }
/* Wake up accesses waiting in module wait list. */ void mod_stack_wakeup_mod(struct mod_t *mod) { struct mod_stack_t *stack; int event; while (mod->waiting_list_head) { stack = mod->waiting_list_head; event = stack->waiting_list_event; DOUBLE_LINKED_LIST_REMOVE(mod, waiting, stack); esim_schedule_event(event, stack, 0); } }
/* Wake up accesses waiting in a port wait list. */ void mod_stack_wakeup_port(struct mod_port_t *port) { struct mod_stack_t *stack; int event; while (port->waiting_list_head) { stack = port->waiting_list_head; event = stack->waiting_list_event; DOUBLE_LINKED_LIST_REMOVE(port, waiting, stack); esim_schedule_event(event, stack, 0); } }
void X86ContextDestroy(X86Context *self) { X86Emu *emu = self->emu; /* If context is not finished/zombie, finish it first. * This removes all references to current freed context. */ if (!X86ContextGetState(self, X86ContextFinished | X86ContextZombie)) X86ContextFinish(self, 0); /* Remove context from finished contexts list. This should * be the only list the context is in right now. */ assert(!DOUBLE_LINKED_LIST_MEMBER(emu, running, self)); assert(!DOUBLE_LINKED_LIST_MEMBER(emu, suspended, self)); assert(!DOUBLE_LINKED_LIST_MEMBER(emu, zombie, self)); assert(DOUBLE_LINKED_LIST_MEMBER(emu, finished, self)); DOUBLE_LINKED_LIST_REMOVE(emu, finished, self); /* Free private structures */ x86_regs_free(self->regs); x86_regs_free(self->backup_regs); x86_signal_mask_table_free(self->signal_mask_table); spec_mem_free(self->spec_mem); bit_map_free(self->affinity); /* Unlink shared structures */ x86_loader_unlink(self->loader); x86_signal_handler_table_unlink(self->signal_handler_table); x86_file_desc_table_unlink(self->file_desc_table); mem_unlink(self->mem); /* Remove context from contexts list and free */ DOUBLE_LINKED_LIST_REMOVE(emu, context, self); X86ContextDebug("inst %lld: context %d freed\n", asEmu(emu)->instructions, self->pid); /* Static instruction */ delete_static(&self->inst); }
void si_ndrange_free(struct si_ndrange_t *ndrange) { int i; /* Run free notify call-back */ if (ndrange->free_notify_func) ndrange->free_notify_func(ndrange->free_notify_data); /* Clear all states that affect lists. */ si_ndrange_clear_status(ndrange, si_ndrange_pending); si_ndrange_clear_status(ndrange, si_ndrange_running); si_ndrange_clear_status(ndrange, si_ndrange_finished); /* Extract from ND-Range list in Southern Islands emulator */ assert(DOUBLE_LINKED_LIST_MEMBER(si_emu, ndrange, ndrange)); DOUBLE_LINKED_LIST_REMOVE(si_emu, ndrange, ndrange); /* Free work-groups */ for (i = 0; i < ndrange->work_group_count; i++) si_work_group_free(ndrange->work_groups[i]); free(ndrange->work_groups); /* Free wavefronts */ for (i = 0; i < ndrange->wavefront_count; i++) { si_wavefront_free(ndrange->wavefronts[i]); si_work_item_free(ndrange->scalar_work_items[i]); } free(ndrange->wavefronts); free(ndrange->scalar_work_items); /* Free work-items */ for (i = 0; i < ndrange->work_item_count; i++) si_work_item_free(ndrange->work_items[i]); free(ndrange->work_items); /* Free instruction histogram */ if (ndrange->inst_histogram) free(ndrange->inst_histogram); /* Free instruction buffer */ if (ndrange->inst_buffer) free(ndrange->inst_buffer); /* Free ndrange */ free(ndrange->name); free(ndrange); }
void x86_emu_list_remove(enum x86_emu_list_kind_t list, struct x86_ctx_t *ctx) { assert(x86_emu_list_member(list, ctx)); switch (list) { case x86_emu_list_context: DOUBLE_LINKED_LIST_REMOVE(x86_emu, context, ctx); break; case x86_emu_list_running: DOUBLE_LINKED_LIST_REMOVE(x86_emu, running, ctx); break; case x86_emu_list_finished: DOUBLE_LINKED_LIST_REMOVE(x86_emu, finished, ctx); break; case x86_emu_list_zombie: DOUBLE_LINKED_LIST_REMOVE(x86_emu, zombie, ctx); break; case x86_emu_list_suspended: DOUBLE_LINKED_LIST_REMOVE(x86_emu, suspended, ctx); break; case x86_emu_list_alloc: DOUBLE_LINKED_LIST_REMOVE(x86_emu, alloc, ctx); break; } }
void MIPSEmuListRemove(MIPSEmu *self, enum mips_emu_list_kind_t list, struct mips_ctx_t *ctx) { assert(MIPSEmuListMember(self, list, ctx)); switch (list) { case mips_emu_list_context: DOUBLE_LINKED_LIST_REMOVE(self, context, ctx); break; case mips_emu_list_running: DOUBLE_LINKED_LIST_REMOVE(self, running, ctx); break; case mips_emu_list_finished: DOUBLE_LINKED_LIST_REMOVE(self, finished, ctx); break; case mips_emu_list_zombie: DOUBLE_LINKED_LIST_REMOVE(self, zombie, ctx); break; case mips_emu_list_suspended: DOUBLE_LINKED_LIST_REMOVE(self, suspended, ctx); break; case mips_emu_list_alloc: DOUBLE_LINKED_LIST_REMOVE(self, alloc, ctx); break; } }
void evg_ndrange_free(struct evg_ndrange_t *ndrange) { int i; /* Clear task from command queue */ if (ndrange->command_queue && ndrange->command) { evg_opencl_command_queue_complete(ndrange->command_queue, ndrange->command); evg_opencl_command_free(ndrange->command); } /* Clear all states that affect lists. */ evg_ndrange_clear_status(ndrange, evg_ndrange_pending); evg_ndrange_clear_status(ndrange, evg_ndrange_running); evg_ndrange_clear_status(ndrange, evg_ndrange_finished); /* Extract from ND-Range list in Evergreen emulator */ assert(DOUBLE_LINKED_LIST_MEMBER(evg_emu, ndrange, ndrange)); DOUBLE_LINKED_LIST_REMOVE(evg_emu, ndrange, ndrange); /* Free work-groups */ for (i = 0; i < ndrange->work_group_count; i++) evg_work_group_free(ndrange->work_groups[i]); free(ndrange->work_groups); /* Free wavefronts */ for (i = 0; i < ndrange->wavefront_count; i++) evg_wavefront_free(ndrange->wavefronts[i]); free(ndrange->wavefronts); /* Free work-items */ for (i = 0; i < ndrange->work_item_count; i++) evg_work_item_free(ndrange->work_items[i]); free(ndrange->work_items); /* Free instruction histogram */ if (ndrange->inst_histogram) free(ndrange->inst_histogram); /* Free ND-Range */ free(ndrange->name); free(ndrange); }
static void X86ContextUpdateState(X86Context *self, X86ContextState state) { X86Emu *emu = self->emu; X86ContextState status_diff; char state_str[MAX_STRING_SIZE]; /* Remove contexts from the following lists: * running, suspended, zombie */ if (DOUBLE_LINKED_LIST_MEMBER(emu, running, self)) DOUBLE_LINKED_LIST_REMOVE(emu, running, self); if (DOUBLE_LINKED_LIST_MEMBER(emu, suspended, self)) DOUBLE_LINKED_LIST_REMOVE(emu, suspended, self); if (DOUBLE_LINKED_LIST_MEMBER(emu, zombie, self)) DOUBLE_LINKED_LIST_REMOVE(emu, zombie, self); if (DOUBLE_LINKED_LIST_MEMBER(emu, finished, self)) DOUBLE_LINKED_LIST_REMOVE(emu, finished, self); /* If the difference between the old and new state lies in other * states other than 'x86_ctx_specmode', a reschedule is marked. */ status_diff = self->state ^ state; if (status_diff & ~X86ContextSpecMode) emu->schedule_signal = 1; /* Update state */ self->state = state; if (self->state & X86ContextFinished) self->state = X86ContextFinished | (state & X86ContextAlloc) | (state & X86ContextMapped); if (self->state & X86ContextZombie) self->state = X86ContextZombie | (state & X86ContextAlloc) | (state & X86ContextMapped); if (!(self->state & X86ContextSuspended) && !(self->state & X86ContextFinished) && !(self->state & X86ContextZombie) && !(self->state & X86ContextLocked)) self->state |= X86ContextRunning; else self->state &= ~X86ContextRunning; /* Insert context into the corresponding lists. */ if (self->state & X86ContextRunning) DOUBLE_LINKED_LIST_INSERT_HEAD(emu, running, self); if (self->state & X86ContextZombie) DOUBLE_LINKED_LIST_INSERT_HEAD(emu, zombie, self); if (self->state & X86ContextFinished) DOUBLE_LINKED_LIST_INSERT_HEAD(emu, finished, self); if (self->state & X86ContextSuspended) DOUBLE_LINKED_LIST_INSERT_HEAD(emu, suspended, self); /* Dump new state (ignore 'x86_ctx_specmode' state, it's too frequent) */ if (debug_status(x86_context_debug_category) && (status_diff & ~X86ContextSpecMode)) { str_map_flags(&x86_context_state_map, self->state, state_str, sizeof state_str); X86ContextDebug("inst %lld: ctx %d changed state to %s\n", asEmu(emu)->instructions, self->pid, state_str); } /* Start/stop x86 timer depending on whether there are any contexts * currently running. */ if (emu->running_list_count) m2s_timer_start(asEmu(emu)->timer); else m2s_timer_stop(asEmu(emu)->timer); }