struct net_msg_t *net_try_send_ev(struct net_t *net, struct net_node_t *src_node, struct net_node_t *dst_node, int size, int receive_event, void *receive_stack, int retry_event, void *retry_stack) { /* Check if network is available */ if (!net_can_send_ev(net, src_node, dst_node, size, retry_event, retry_stack)) return NULL; /* Send message */ return net_send_ev(net, src_node, dst_node, size, receive_event, receive_stack); }
void net_command_handler(int event, void *data) { struct net_command_stack_t *stack= data; struct net_t *net = stack->net; char out_msg[MAX_STRING_SIZE]; char msg_detail[MAX_STRING_SIZE]; char *msg_str = out_msg; int msg_size = sizeof out_msg; char *msg_detail_str = msg_detail; int msg_detail_size = sizeof msg_detail; int test_failed; test_failed = 0; *msg_str = '\0'; *msg_detail_str = '\0'; long long cycle = esim_domain_cycle(net_domain_index); if (event == EV_NET_COMMAND) { char *command_line = stack->command; char command[MAX_STRING_SIZE]; struct list_t *token_list; /* Split command in tokens, skip command */ token_list = str_token_list_create(command_line, " "); assert(list_count(token_list)); long long command_cycle = net_command_get_llint(token_list, command_line,"cycle value"); if (command_cycle > cycle) { str_token_list_free(token_list); esim_schedule_event(event, stack, command_cycle - cycle); return; } net_command_get_string(token_list, command_line, command, sizeof command ); if (!strcasecmp(command, "Send")) { int msg_size; long long int msg_id; struct net_node_t *src_node; struct net_node_t *dst_node; /* Getting the Source Node from the command */ src_node = net_command_get_node(net,token_list, command_line, net_node_end); /* Getting the Source Node from the command */ dst_node = net_command_get_node(net, token_list, command_line, net_node_end); /* Getting the Message Size; or Default values */ msg_size = (int) net_command_get_def(token_list, command_line,"Message size value", (long long) net_msg_size); /* Getting the Message ID; or default value */ msg_id = net_command_get_def(token_list, command_line, "Message id value", net->msg_id_counter + 1); if (msg_id != net->msg_id_counter + 1) { fatal("%s: The message id is out of order. " "\n\t\"You are out o' order. " "The whole Trial is out o' order \" \n" "\tBear with us. We are being " "cute here \n\t> %s", __FUNCTION__, command_line); } stack->msg = net_send_ev(net, src_node, dst_node, msg_size, EV_NET_COMMAND_RCV, stack); fprintf(stderr, "\n Message %lld sent at %lld \n\n", msg_id, cycle); } else if (!strcasecmp(command, "Receive")) { long long msg_id; struct net_node_t *dst_node; struct net_msg_t *msg; dst_node = net_command_get_node(net, token_list, command_line, net_node_end); msg_id = net_command_get_llint(token_list, command_line, "Message id value"); /* Output */ str_printf(&msg_str, &msg_size, "CHECK: Cycle %lld: Receive in node %s for message" " %lld ",cycle, dst_node->name, msg_id); msg = net_msg_table_get(net, msg_id); if (!msg) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "\t Message is either invalid, " "not yet send or already received\n"); } /* Checking : the node indicated in the receive command * is the destination node for the message*/ else { if (dst_node != msg->dst_node) warning("%s: The node %s in the receive " "command is \n\tnot intended node " "%s in the send command\n > %s" ,__FUNCTION__, dst_node->name, msg->dst_node->name, command_line); if (msg->node != dst_node) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "\t Message expected to be " "in node %s, but found in %s\n", dst_node->name, msg->node->name); } else if (msg != list_get(msg->buffer->msg_list, 0)) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "\tMessage expected to be " "ready for receive in node %s \n\tbut " "not in the buffer head\n", dst_node->name); } } /* Output */ fprintf(stderr, ">>> %s - %s\n", out_msg, test_failed ? "failed" : "passed"); fprintf(stderr, "%s", msg_detail); net_command_stack_return(stack); } else if (!strcasecmp(command, "InBufferCheck")) { long long int msg_id; struct net_msg_t * msg; struct net_node_t * node; struct net_buffer_t *buffer; node = net_command_get_node(net, token_list, command_line, 0); msg_id = net_command_get_llint(token_list, command_line, "invalid message id"); /* Output */ str_printf(&msg_str, &msg_size, "CHECK: Cycle %lld: message %lld is in input buffer of " "node %s", cycle, msg_id, node->name); msg = net_msg_table_get(net, msg_id); if (!msg) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "\t Message is either invalid, " "not yet send or already received\n"); } else { buffer = msg->buffer; if (msg->node != node) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "\tMessage expected to be in node %s" "but found in node %s \n", node->name, msg->node->name); } else if (buffer != list_get(node->input_buffer_list, buffer->index)) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "\t Message is not in any of input " "buffers of the node %s\n", node->name); } } /* Output */ fprintf(stderr, ">>> %s - %s\n", out_msg, test_failed ? "failed" : "passed"); fprintf(stderr, "%s", msg_detail); net_command_stack_return(stack); } else if (!strcasecmp(command, "OutBufferCheck")) { long long int msg_id; struct net_msg_t * msg; struct net_node_t * node; struct net_buffer_t *buffer; node = net_command_get_node(net, token_list, command_line, 0); msg_id = net_command_get_llint(token_list, command_line, "invalid message id"); /* Output */ str_printf(&msg_str, &msg_size, "CHECK: Cycle %lld: message %lld is in one of output buffers" " of node %s", cycle, msg_id, node->name); msg = net_msg_table_get(net, msg_id); if (!msg) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "\t Message is either invalid, " "not yet send or already received\n"); } else { buffer = msg->buffer; if (msg->node != node) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "\tMessage expected to be in node %s" "but found in node %s\n", node->name, msg->node->name); } else if (buffer != list_get(node->output_buffer_list, buffer->index)) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "\t Message is not in any of output" "buffers of the node %s\n", node->name); } } /* Output */ fprintf(stderr, ">>> %s - %s\n", out_msg, test_failed ? "failed" : "passed"); fprintf(stderr, "%s", msg_detail); net_command_stack_return(stack); } else if (!strcasecmp(command, "NodeCheck")) { long long int msg_id; struct net_msg_t * msg; struct net_node_t * node; node = net_command_get_node(net, token_list, command_line, 0); msg_id = net_command_get_llint(token_list, command_line, "invalid message id"); /* Output */ str_printf(&msg_str, &msg_size, "CHECK: Cycle %lld: message %lld is in node %s", cycle, msg_id, node->name); msg = net_msg_table_get(net, msg_id); if (!msg) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "\t Message is either invalid, " "not yet send or already received\n"); } else { if (msg->node != node) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "\tMessage is not in the node %s\n", node->name); } } /* Output */ fprintf(stderr, ">>> %s - %s\n", out_msg, test_failed ? "failed" : "passed"); fprintf(stderr, "%s", msg_detail); net_command_stack_return(stack); } else if (!strcasecmp(command, "ExactPosCheck")) { long long int msg_id; struct net_msg_t * msg; struct net_node_t * node; struct net_buffer_t *buffer; node = net_command_get_node(net, token_list, command_line, 0); buffer = net_command_get_buffer(node, token_list, command_line); msg_id = net_command_get_llint(token_list, command_line, "invalid message id"); /* Output */ str_printf(&msg_str, &msg_size, "CHECK: Cycle %lld: message %lld is in buffer %s" " of node %s", cycle, msg_id, buffer->name, node->name); /* Checks */ msg = net_msg_table_get(net, msg_id); if (!msg) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "\t Message is either invalid, " "not yet send or already received\n"); } else { if (msg->buffer != buffer || msg->node != node) { test_failed = 1; str_printf(&msg_detail_str, &msg_detail_size, "Message expected to be in buffer %s" "(node %s) \n\tbut found in buffer %s" "(node %s)\n",buffer->name, node->name, msg->buffer->name, msg->node->name); } } /* Output */ fprintf(stderr, ">>> %s - %s\n", out_msg, test_failed ? "failed" : "passed"); fprintf(stderr, "%s", msg_detail); net_command_stack_return(stack); } else fatal("%s: %s: invalid command.\n\t> %s", __FUNCTION__, command, command_line); free(command_line); str_token_list_free(token_list); } else if (event == EV_NET_COMMAND_RCV) { struct net_msg_t *msg; struct net_node_t *dst_node; msg = stack->msg; dst_node = msg->dst_node; assert(dst_node == msg->node); fprintf(stderr, "\n Message %lld received at %lld \n\n", msg->id, cycle); net_receive(net, dst_node, msg); net_command_stack_return(stack); } }
void moesi_handler_read_request(int event, void *data) { struct moesi_stack_t *stack = data, *ret = stack->retstack, *newstack; struct ccache_t *ccache = stack->ccache, *target = stack->target; uint32_t dir_entry_tag, z; struct dir_t *dir; struct dir_entry_t *dir_entry; if (event == EV_MOESI_READ_REQUEST) { struct net_t *net; int src, dest; cache_debug(" %lld %lld 0x%x %s read request\n", CYCLE, ID, stack->addr, ccache->name); /* Default return values*/ ret->shared = 0; ret->err = 0; /* Send request to target */ assert(ccache->next == target || target->next == ccache); net = ccache->next == target ? ccache->lonet : ccache->hinet; src = ccache->next == target ? ccache->loid : 0; dest = ccache->next == target ? 0 : target->loid; net_send_ev(net, src, dest, 8, EV_MOESI_READ_REQUEST_RECEIVE, stack); return; } if (event == EV_MOESI_READ_REQUEST_RECEIVE) { cache_debug(" %lld %lld 0x%x %s read request receive\n", CYCLE, ID, stack->addr, target->name); /* Find and lock */ newstack = moesi_stack_create(stack->id, target, stack->addr, EV_MOESI_READ_REQUEST_ACTION, stack); newstack->blocking = target->next == ccache; newstack->read = 1; newstack->retry = 0; esim_schedule_event(EV_MOESI_FIND_AND_LOCK, newstack, 0); return; } if (event == EV_MOESI_READ_REQUEST_ACTION) { cache_debug(" %lld %lld 0x%x %s read request action\n", CYCLE, ID, stack->tag, target->name); /* Check block locking error. If read request is down-up, there should not * have been any error while locking. */ if (stack->err) { assert(ccache->next == target); ret->err = 1; stack->response = 8; esim_schedule_event(EV_MOESI_READ_REQUEST_REPLY, stack, 0); return; } esim_schedule_event(ccache->next == target ? EV_MOESI_READ_REQUEST_UPDOWN : EV_MOESI_READ_REQUEST_DOWNUP, stack, 0); return; } if (event == EV_MOESI_READ_REQUEST_UPDOWN) { struct ccache_t *owner; cache_debug(" %lld %lld 0x%x %s read request updown\n", CYCLE, ID, stack->tag, target->name); stack->pending = 1; if (stack->status) { /* Status = M/O/E/S * Check: addr multiple of requester's bsize * Check: no subblock requested by ccache is already owned by ccache */ assert(stack->addr % ccache->bsize == 0); dir = ccache_get_dir(target, stack->tag); for (z = 0; z < dir->zsize; z++) { dir_entry_tag = stack->tag + z * cache_min_block_size; if (dir_entry_tag < stack->addr || dir_entry_tag >= stack->addr + ccache->bsize) continue; dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z); assert(dir_entry->owner != ccache->loid); } /* Send read request to owners other than ccache for all subblocks. */ for (z = 0; z < dir->zsize; z++) { dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z); dir_entry_tag = stack->tag + z * cache_min_block_size; if (!dir_entry->owner) /* no owner */ continue; if (dir_entry->owner == ccache->loid) /* owner is ccache */ continue; owner = net_get_node_data(target->hinet, dir_entry->owner); if (dir_entry_tag % owner->bsize) /* not the first owner subblock */ continue; /* Send read request */ stack->pending++; newstack = moesi_stack_create(stack->id, target, dir_entry_tag, EV_MOESI_READ_REQUEST_UPDOWN_FINISH, stack); newstack->target = owner; esim_schedule_event(EV_MOESI_READ_REQUEST, newstack, 0); } esim_schedule_event(EV_MOESI_READ_REQUEST_UPDOWN_FINISH, stack, 0); } else { /* Status = I */ assert(!dir_entry_group_shared_or_owned(target->dir, stack->set, stack->way)); newstack = moesi_stack_create(stack->id, target, stack->tag, EV_MOESI_READ_REQUEST_UPDOWN_MISS, stack); newstack->target = target->next; esim_schedule_event(EV_MOESI_READ_REQUEST, newstack, 0); } return; } if (event == EV_MOESI_READ_REQUEST_UPDOWN_MISS) { cache_debug(" %lld %lld 0x%x %s read request updown miss\n", CYCLE, ID, stack->tag, target->name); /* Check error */ if (stack->err) { dir_lock_unlock(stack->dir_lock); ret->err = 1; stack->response = 8; esim_schedule_event(EV_MOESI_READ_REQUEST_REPLY, stack, 0); return; } /* Set block state to excl/shared depending on the return value 'shared' * that comes from a read request into the next cache level. * Also set the tag of the block. */ cache_set_block(target->cache, stack->set, stack->way, stack->tag, stack->shared ? moesi_status_shared : moesi_status_exclusive); esim_schedule_event(EV_MOESI_READ_REQUEST_UPDOWN_FINISH, stack, 0); return; } if (event == EV_MOESI_READ_REQUEST_UPDOWN_FINISH) { int shared; /* Ignore while pending requests */ assert(stack->pending > 0); stack->pending--; if (stack->pending) return; cache_debug(" %lld %lld 0x%x %s read request updown finish\n", CYCLE, ID, stack->tag, target->name); /* Set owner to 0 for all directory entries not owned by ccache. */ dir = ccache_get_dir(target, stack->tag); for (z = 0; z < dir->zsize; z++) { dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z); if (dir_entry->owner != ccache->loid) dir_entry->owner = 0; } /* For each subblock requested by ccache, set ccache as sharer, and * check whether there is other cache sharing it. */ shared = 0; for (z = 0; z < dir->zsize; z++) { dir_entry_tag = stack->tag + z * cache_min_block_size; if (dir_entry_tag < stack->addr || dir_entry_tag >= stack->addr + ccache->bsize) continue; dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z); dir_entry_set_sharer(dir, dir_entry, ccache->loid); if (dir_entry->sharers > 1) shared = 1; } /* If no subblock requested by ccache is shared by other cache, set ccache * as owner of all of them. Otherwise, notify requester that the block is * shared by setting the 'shared' return value to true. */ ret->shared = shared; if (!shared) { for (z = 0; z < dir->zsize; z++) { dir_entry_tag = stack->tag + z * cache_min_block_size; if (dir_entry_tag < stack->addr || dir_entry_tag >= stack->addr + ccache->bsize) continue; dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z); dir_entry->owner = ccache->loid; } } /* Respond with data, update LRU, unlock */ stack->response = ccache->bsize + 8; if (target->cache) cache_access_block(target->cache, stack->set, stack->way); dir_lock_unlock(stack->dir_lock); esim_schedule_event(EV_MOESI_READ_REQUEST_REPLY, stack, 0); return; } if (event == EV_MOESI_READ_REQUEST_DOWNUP) { struct ccache_t *owner; cache_debug(" %lld %lld 0x%x %s read request downup\n", CYCLE, ID, stack->tag, target->name); /* Check: status must not be invalid. * By default, only one pending request. * Response depends on status */ assert(stack->status != moesi_status_invalid); stack->pending = 1; stack->response = stack->status == moesi_status_exclusive || stack->status == moesi_status_shared ? 8 : target->bsize + 8; /* Send a read request to the owner of each subblock. */ dir = ccache_get_dir(target, stack->tag); for (z = 0; z < dir->zsize; z++) { dir_entry_tag = stack->tag + z * cache_min_block_size; dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z); if (!dir_entry->owner) /* no owner */ continue; owner = net_get_node_data(target->hinet, dir_entry->owner); if (dir_entry_tag % owner->bsize) /* not the first subblock */ continue; stack->pending++; stack->response = target->bsize + 8; newstack = moesi_stack_create(stack->id, target, dir_entry_tag, EV_MOESI_READ_REQUEST_DOWNUP_FINISH, stack); newstack->target = owner; esim_schedule_event(EV_MOESI_READ_REQUEST, newstack, 0); } esim_schedule_event(EV_MOESI_READ_REQUEST_DOWNUP_FINISH, stack, 0); return; } if (event == EV_MOESI_READ_REQUEST_DOWNUP_FINISH) { /* Ignore while pending requests */ assert(stack->pending > 0); stack->pending--; if (stack->pending) return; cache_debug(" %lld %lld 0x%x %s read request downup finish\n", CYCLE, ID, stack->tag, target->name); /* Set owner of subblocks to 0. */ dir = ccache_get_dir(target, stack->tag); for (z = 0; z < dir->zsize; z++) { dir_entry_tag = stack->tag + z * cache_min_block_size; dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z); dir_entry->owner = 0; } /* Set status to S, update LRU, unlock */ cache_set_block(target->cache, stack->set, stack->way, stack->tag, moesi_status_shared); cache_access_block(target->cache, stack->set, stack->way); dir_lock_unlock(stack->dir_lock); esim_schedule_event(EV_MOESI_READ_REQUEST_REPLY, stack, 0); return; } if (event == EV_MOESI_READ_REQUEST_REPLY) { struct net_t *net; int src, dest; cache_debug(" %lld %lld 0x%x %s read request reply\n", CYCLE, ID, stack->tag, target->name); assert(stack->response); assert(ccache->next == target || target->next == ccache); net = ccache->next == target ? ccache->lonet : ccache->hinet; src = ccache->next == target ? 0 : target->loid; dest = ccache->next == target ? ccache->loid : 0; net_send_ev(net, src, dest, stack->response, EV_MOESI_READ_REQUEST_FINISH, stack); return; } if (event == EV_MOESI_READ_REQUEST_FINISH) { cache_debug(" %lld %lld 0x%x %s read request finish\n", CYCLE, ID, stack->tag, ccache->name); moesi_stack_return(stack); return; } abort(); }
void moesi_handler_evict(int event, void *data) { struct moesi_stack_t *stack = data, *ret = stack->retstack, *newstack; struct ccache_t *ccache = stack->ccache, *target = stack->target; struct dir_t *dir; struct dir_entry_t *dir_entry; uint32_t dir_entry_tag, z; if (event == EV_MOESI_EVICT) { /* Default ret value */ ret->err = 0; /* Get block info */ ccache_get_block(ccache, stack->set, stack->way, &stack->tag, &stack->status); assert(stack->status || !dir_entry_group_shared_or_owned(ccache->dir, stack->set, stack->way)); cache_debug(" %lld %lld 0x%x %s evict (set=%d, way=%d, status=%d)\n", CYCLE, ID, stack->tag, ccache->name, stack->set, stack->way, stack->status); /* Save some data */ stack->src_set = stack->set; stack->src_way = stack->way; stack->src_tag = stack->tag; stack->target = target = ccache->next; /* Send write request to all sharers */ newstack = moesi_stack_create(stack->id, ccache, 0, EV_MOESI_EVICT_ACTION, stack); newstack->except = NULL; newstack->set = stack->set; newstack->way = stack->way; esim_schedule_event(EV_MOESI_INVALIDATE, newstack, 0); return; } if (event == EV_MOESI_EVICT_ACTION) { cache_debug(" %lld %lld 0x%x %s evict action\n", CYCLE, ID, stack->tag, ccache->name); /* status = I */ if (stack->status == moesi_status_invalid) { esim_schedule_event(EV_MOESI_EVICT_FINISH, stack, 0); return; } /* status = M/O */ if (stack->status == moesi_status_modified || stack->status == moesi_status_owned) { net_send_ev(ccache->lonet, ccache->loid, 0, ccache->bsize + 8, EV_MOESI_EVICT_RECEIVE, stack); stack->writeback = 1; return; } /* status = S/E */ net_send_ev(ccache->lonet, ccache->loid, 0, 8, EV_MOESI_EVICT_RECEIVE, stack); return; } if (event == EV_MOESI_EVICT_RECEIVE) { cache_debug(" %lld %lld 0x%x %s evict receive\n", CYCLE, ID, stack->tag, target->name); /* Find and lock */ newstack = moesi_stack_create(stack->id, target, stack->src_tag, EV_MOESI_EVICT_WRITEBACK, stack); newstack->blocking = 0; newstack->read = 0; newstack->retry = 0; esim_schedule_event(EV_MOESI_FIND_AND_LOCK, newstack, 0); return; } if (event == EV_MOESI_EVICT_WRITEBACK) { cache_debug(" %lld %lld 0x%x %s evict writeback\n", CYCLE, ID, stack->tag, target->name); /* Error locking block */ if (stack->err) { ret->err = 1; esim_schedule_event(EV_MOESI_EVICT_REPLY, stack, 0); return; } /* No writeback */ if (!stack->writeback) { esim_schedule_event(EV_MOESI_EVICT_PROCESS, stack, 0); return; } /* Writeback */ newstack = moesi_stack_create(stack->id, target, 0, EV_MOESI_EVICT_WRITEBACK_EXCLUSIVE, stack); newstack->except = ccache; newstack->set = stack->set; newstack->way = stack->way; esim_schedule_event(EV_MOESI_INVALIDATE, newstack, 0); return; } if (event == EV_MOESI_EVICT_WRITEBACK_EXCLUSIVE) { cache_debug(" %lld %lld 0x%x %s evict writeback exclusive\n", CYCLE, ID, stack->tag, target->name); /* Status = O/S/I */ assert(stack->status != moesi_status_invalid); if (stack->status == moesi_status_owned || stack->status == moesi_status_shared) { newstack = moesi_stack_create(stack->id, target, stack->tag, EV_MOESI_EVICT_WRITEBACK_FINISH, stack); newstack->target = target->next; esim_schedule_event(EV_MOESI_WRITE_REQUEST, newstack, 0); return; } /* Status = M/E */ esim_schedule_event(EV_MOESI_EVICT_WRITEBACK_FINISH, stack, 0); return; } if (event == EV_MOESI_EVICT_WRITEBACK_FINISH) { cache_debug(" %lld %lld 0x%x %s evict writeback finish\n", CYCLE, ID, stack->tag, target->name); /* Error in write request */ if (stack->err) { ret->err = 1; dir_lock_unlock(stack->dir_lock); esim_schedule_event(EV_MOESI_EVICT_REPLY, stack, 0); return; } /* Set tag, status and lru */ if (target->cache) { cache_set_block(target->cache, stack->set, stack->way, stack->tag, moesi_status_modified); cache_access_block(target->cache, stack->set, stack->way); } esim_schedule_event(EV_MOESI_EVICT_PROCESS, stack, 0); return; } if (event == EV_MOESI_EVICT_PROCESS) { cache_debug(" %lld %lld 0x%x %s evict process\n", CYCLE, ID, stack->tag, target->name); /* Remove sharer, owner, and unlock */ dir = ccache_get_dir(target, stack->tag); for (z = 0; z < dir->zsize; z++) { dir_entry_tag = stack->tag + z * cache_min_block_size; if (dir_entry_tag < stack->src_tag || dir_entry_tag >= stack->src_tag + ccache->bsize) continue; dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z); dir_entry_clear_sharer(dir, dir_entry, ccache->loid); if (dir_entry->owner == ccache->loid) dir_entry->owner = 0; } dir_lock_unlock(stack->dir_lock); esim_schedule_event(EV_MOESI_EVICT_REPLY, stack, 0); return; } if (event == EV_MOESI_EVICT_REPLY) { cache_debug(" %lld %lld 0x%x %s evict reply\n", CYCLE, ID, stack->tag, target->name); net_send_ev(target->hinet, 0, ccache->loid, 8, EV_MOESI_EVICT_REPLY_RECEIVE, stack); return; } if (event == EV_MOESI_EVICT_REPLY_RECEIVE) { cache_debug(" %lld %lld 0x%x %s evict reply receive\n", CYCLE, ID, stack->tag, ccache->name); /* Invalidate block if there was no error. */ if (!stack->err) cache_set_block(ccache->cache, stack->src_set, stack->src_way, 0, moesi_status_invalid); assert(!dir_entry_group_shared_or_owned(ccache->dir, stack->src_set, stack->src_way)); esim_schedule_event(EV_MOESI_EVICT_FINISH, stack, 0); return; } if (event == EV_MOESI_EVICT_FINISH) { cache_debug(" %lld %lld 0x%x %s evict finish\n", CYCLE, ID, stack->tag, ccache->name); moesi_stack_return(stack); return; } abort(); }
void moesi_handler_write_request(int event, void *data) { struct moesi_stack_t *stack = data, *ret = stack->retstack, *newstack; struct ccache_t *ccache = stack->ccache, *target = stack->target; struct dir_t *dir; struct dir_entry_t *dir_entry; uint32_t dir_entry_tag, z; if (event == EV_MOESI_WRITE_REQUEST) { struct net_t *net; int src, dest; cache_debug(" %lld %lld 0x%x %s write request\n", CYCLE, ID, stack->addr, ccache->name); /* Default return values */ ret->err = 0; /* Send request to target */ assert(ccache->next == target || target->next == ccache); net = ccache->next == target ? ccache->lonet : ccache->hinet; src = ccache->next == target ? ccache->loid : 0; dest = ccache->next == target ? 0 : target->loid; net_send_ev(net, src, dest, 8, EV_MOESI_WRITE_REQUEST_RECEIVE, stack); return; } if (event == EV_MOESI_WRITE_REQUEST_RECEIVE) { cache_debug(" %lld %lld 0x%x %s write request receive\n", CYCLE, ID, stack->addr, target->name); /* Find and lock */ newstack = moesi_stack_create(stack->id, target, stack->addr, EV_MOESI_WRITE_REQUEST_ACTION, stack); newstack->blocking = target->next == ccache; newstack->read = 0; newstack->retry = 0; esim_schedule_event(EV_MOESI_FIND_AND_LOCK, newstack, 0); return; } if (event == EV_MOESI_WRITE_REQUEST_ACTION) { cache_debug(" %lld %lld 0x%x %s write request action\n", CYCLE, ID, stack->tag, target->name); /* Check lock error. If write request is down-up, there should * have been no error. */ if (stack->err) { assert(ccache->next == target); ret->err = 1; stack->response = 8; esim_schedule_event(EV_MOESI_WRITE_REQUEST_REPLY, stack, 0); return; } /* Invalidate the rest of upper level sharers */ newstack = moesi_stack_create(stack->id, target, 0, EV_MOESI_WRITE_REQUEST_EXCLUSIVE, stack); newstack->except = ccache; newstack->set = stack->set; newstack->way = stack->way; esim_schedule_event(EV_MOESI_INVALIDATE, newstack, 0); return; } if (event == EV_MOESI_WRITE_REQUEST_EXCLUSIVE) { cache_debug(" %lld %lld 0x%x %s write request exclusive\n", CYCLE, ID, stack->tag, target->name); if (ccache->next == target) esim_schedule_event(EV_MOESI_WRITE_REQUEST_UPDOWN, stack, 0); else esim_schedule_event(EV_MOESI_WRITE_REQUEST_DOWNUP, stack, 0); return; } if (event == EV_MOESI_WRITE_REQUEST_UPDOWN) { cache_debug(" %lld %lld 0x%x %s write request updown\n", CYCLE, ID, stack->tag, target->name); /* status = M/E */ if (stack->status == moesi_status_modified || stack->status == moesi_status_exclusive) { esim_schedule_event(EV_MOESI_WRITE_REQUEST_UPDOWN_FINISH, stack, 0); return; } /* status = O/S/I */ newstack = moesi_stack_create(stack->id, target, stack->tag, EV_MOESI_WRITE_REQUEST_UPDOWN_FINISH, stack); newstack->target = target->next; esim_schedule_event(EV_MOESI_WRITE_REQUEST, newstack, 0); return; } if (event == EV_MOESI_WRITE_REQUEST_UPDOWN_FINISH) { cache_debug(" %lld %lld 0x%x %s write request updown finish\n", CYCLE, ID, stack->tag, target->name); /* Error in write request to next cache level */ if (stack->err) { ret->err = 1; stack->response = 8; dir_lock_unlock(stack->dir_lock); esim_schedule_event(EV_MOESI_WRITE_REQUEST_REPLY, stack, 0); return; } /* Check that addr is a multiple of ccache.bsize. * Set ccache as sharer and owner. */ dir = ccache_get_dir(target, stack->tag); for (z = 0; z < dir->zsize; z++) { assert(stack->addr % ccache->bsize == 0); dir_entry_tag = stack->tag + z * cache_min_block_size; if (dir_entry_tag < stack->addr || dir_entry_tag >= stack->addr + ccache->bsize) continue; dir_entry = ccache_get_dir_entry(target, stack->set, stack->way, z); dir_entry_set_sharer(dir, dir_entry, ccache->loid); dir_entry->owner = ccache->loid; assert(dir_entry->sharers == 1); } /* Update LRU, set status: M->M, O/E/S/I->E */ if (target->cache) { cache_access_block(target->cache, stack->set, stack->way); if (stack->status != moesi_status_modified) cache_set_block(target->cache, stack->set, stack->way, stack->tag, moesi_status_exclusive); } /* Unlock, response is the data of the size of the requester's block. */ dir_lock_unlock(stack->dir_lock); stack->response = ccache->bsize + 8; esim_schedule_event(EV_MOESI_WRITE_REQUEST_REPLY, stack, 0); return; } if (event == EV_MOESI_WRITE_REQUEST_DOWNUP) { cache_debug(" %lld %lld 0x%x %s write request downup\n", CYCLE, ID, stack->tag, target->name); /* Compute response, set status to I, unlock */ assert(stack->status != moesi_status_invalid); assert(!dir_entry_group_shared_or_owned(target->dir, stack->set, stack->way)); stack->response = stack->status == moesi_status_modified || stack->status == moesi_status_owned ? target->bsize + 8 : 8; cache_set_block(target->cache, stack->set, stack->way, 0, moesi_status_invalid); dir_lock_unlock(stack->dir_lock); esim_schedule_event(EV_MOESI_WRITE_REQUEST_REPLY, stack, 0); return; } if (event == EV_MOESI_WRITE_REQUEST_REPLY) { struct net_t *net; int src, dest; cache_debug(" %lld %lld 0x%x %s write request reply\n", CYCLE, ID, stack->tag, target->name); assert(stack->response); assert(ccache->next == target || target->next == ccache); net = ccache->next == target ? ccache->lonet : ccache->hinet; src = ccache->next == target ? 0 : target->loid; dest = ccache->next == target ? ccache->loid : 0; net_send_ev(net, src, dest, stack->response, EV_MOESI_WRITE_REQUEST_FINISH, stack); return; } if (event == EV_MOESI_WRITE_REQUEST_FINISH) { cache_debug(" %lld %lld 0x%x %s write request finish\n", CYCLE, ID, stack->tag, ccache->name); moesi_stack_return(stack); return; } abort(); }
/* Send a message through the network. When using this function, the caller * must make sure that the message can be injected in the network, calling * 'net_can_send' first. When the message is received in 'dst_node' it will * be removed automatically from its input buffer, and the 'msg' object * return by this function will be invalid. */ struct net_msg_t *net_send(struct net_t *net, struct net_node_t *src_node, struct net_node_t *dst_node, int size) { return net_send_ev(net, src_node, dst_node, size, ESIM_EV_NONE, NULL); }