void thread2() { mbox_t fromMe, to0, to1; int status; fromMe = make_mailbox(2, &status); to0 = get_mailbox(0); to1 = get_mailbox(1); send(fromMe, "Y", to1); send(fromMe, "Z", to0); }
/* pre-condition: * tcb is waiting or receiving on another cpu */ dword_t smp_start_short_ipc(tcb_t * tcb, tcb_t * current) { XIPC_PRINTF("sending start_short_ipc ipi (current=%p)\n", current); cpu_mailbox_t * mailbox = get_mailbox(); mailbox->tcb = tcb; mailbox->param[0] = (dword_t)current; dword_t status = mailbox->send_command(tcb->cpu, SMP_CMD_IPC_SHORT); /* * ok - delivery can start now * partner cpu spins in mailbox loop and waits for message. */ if (status == MAILBOX_OK) return 1; IPI_PRINTF("%d smp_start_short_ipc failed (%x (%d, %x) -> %x (%d, %x))\n", get_cpu_id(), current, current->cpu, current->thread_state, tcb, tcb->cpu, tcb->thread_state); /* ipc failed - check whether we have pending requests */ IPI_PRINTF("pending requests = %x\n", mailbox->pending_requests); mailbox->handle_pending_requests(); return 0; }
int smp_delete_all_threads(space_t * space) { //IPI_PRINTF("%s (%x)\n", __FUNCTION__, victim); cpu_mailbox_t * mailbox = get_mailbox(); for (dword_t cpu = 0; cpu < CONFIG_SMP_MAX_CPU; cpu++) { if (cpu == get_cpu_id()) continue; if (!is_cpu_online(cpu)) continue; mailbox->param[0] = (dword_t)space; dword_t status = mailbox->send_command(cpu, SMP_CMD_DELETE_ALL_THREADS); switch(status) { case MAILBOX_OK: return 1; case MAILBOX_UNWIND_REMOTE: /* we have to perform a remote unwind */ IPI_PRINTF("%s: remote unwind %x\n", mailbox->tcb); unwind_ipc(mailbox->tcb); break; case MAILBOX_ERROR: enter_kdebug("smp_delete_task: error deleting task"); break; default: enter_kdebug("smp_delete_task: unexpected return value"); break; } } return 0; }
/* * Join the ring */ static int join(node_t node){ task_data_t req_data = xbt_new0(s_task_data_t,1); req_data->type = TASK_JOIN; req_data->sender_id = node->id; req_data->answer_id = node->id; req_data->steps = 0; get_mailbox(node->id, req_data->answer_to); char mailbox[MAILBOX_NAME_SIZE]; get_mailbox(node->known_id, mailbox); msg_task_t task_sent = MSG_task_create(NULL, COMP_SIZE, COMM_SIZE, req_data); XBT_DEBUG("Trying to join Pastry ring... (with node %s)", mailbox); MSG_task_send_with_timeout(task_sent, mailbox, timeout); return 1; }
void thread1() { mbox_t me, to0; int status; msg_t C; me = make_mailbox(1, &status); to0 = get_mailbox(0); recv(me, C); send(me, "X", to0); printf("C=%s\n", C); }
/** * send message to given destination */ asmlinkage long sys_SendMsg(pid_t dest, void *a_msg, int len, bool block){ pid_t my_pid = current->pid; void* msg = new_msg(); message* this_mail; mailbox* dest_mailbox; signal* dest_signal; struct task_struct* dest_ts; int existence; if ((len > MAX_MSG_SIZE) || (len < 0)) return MSG_LENGTH_ERROR; if (copy_from_user(msg, a_msg, len)) return MSG_ARG_ERROR; //check if destination is valid if (dest <= 0) return MAILBOX_INVALID; //find task struct for destination pid dest_ts = pid_task(find_vpid(dest), PIDTYPE_PID); // find_task_by_vpid(dest); if (dest_ts == NULL) return MAILBOX_INVALID; //state not 0 or kernel task, invalid dest existence = dest_ts->state; if ((existence != 0) || (dest_ts->mm == NULL)) return MAILBOX_INVALID; //get destination mailbox dest_signal = get_signal(dest); if (dest_signal == NULL) { dest_signal = create_signal(dest, TRUE); } dest_mailbox = get_mailbox(dest); if (dest_mailbox == NULL) { dest_mailbox = create_mailbox(dest); if (dest_mailbox == NULL) return MAILBOX_ERROR; } wake_up(&(dest_signal->wait_null)); if ((block == TRUE) && (dest_mailbox->full == TRUE)){ //wait until not full and send message } else if (block == FALSE && (dest_mailbox->full == TRUE)) return MAILBOX_FULL; if (dest_mailbox->stop) return MAILBOX_STOPPED; this_mail = create_message(my_pid, len, msg); spin_lock(&(dest_mailbox->lock)); add_message(&dest_mailbox, &this_mail); spin_unlock(&(dest_mailbox->lock)); //successfully sent return 0; }
asmlinkage long sys_mb_exit_group(int error_code){ pid_t mypid = current->pid; mailbox *mb = get_mailbox(mypid); if (mb != NULL){ free_mail(mb->msg); if (mb != NULL) kmem_cache_free(mbCache, mb); } (*ref_sys_exit_group)(error_code); return 0; }
/** * receive message from given sender */ asmlinkage long sys_RcvMsg(pid_t *sender, void *msg, int *len, bool block){ pid_t my_pid = current->pid; mailbox* mb = NULL; signal* signal = NULL; message* this_mail; pid_t *a_sender; void *a_msg; int *a_len; signal = get_signal(my_pid); if (signal == NULL) { signal = create_signal(my_pid, TRUE); wait_event(signal->wait_null, mb != NULL); } mb = get_mailbox(my_pid); if ((mb->stop) && (mb->size == 0)) return MAILBOX_STOPPED; if ((block == NO_BLOCK) && (mb->size == 0)) return MAILBOX_EMPTY; if ((block == BLOCK) && (mb->size == 0)) { wait_event(mb->wait_empty, mb->size != 0); printk("LLLLLLLLLLLOOOPPPP"); } spin_lock(&(mb->lock)); this_mail = get_msg(&mb); spin_unlock(&(mb->lock)); if (this_mail == NULL) return MAILBOX_ERROR;; a_sender = &(this_mail->sender); a_msg = this_mail->content; a_len = &(this_mail->len); if (((*a_len) > MAX_MSG_SIZE) || ((*a_len) < 0)) return MSG_LENGTH_ERROR; if ((copy_to_user(sender, a_sender, sizeof(pid_t)))) return MSG_ARG_ERROR; if ((copy_to_user(msg, a_msg, *a_len))) return MSG_ARG_ERROR; if ((copy_to_user(len, a_len, sizeof(int)))) return MSG_ARG_ERROR; spin_lock(&(mb->lock)); rm_message(&mb); spin_unlock(&(mb->lock)); //successful return 0; }
void smp_handle_requests() { cpu_mailbox_t * mailbox = get_mailbox(); while(mailbox->pending_requests) { for (dword_t i = 0; i < CONFIG_SMP_MAX_CPU; i++) if (mailbox->pending_requests & (1 << i)) { mailbox->clear_request(i); smp_handle_request(i); } } }
void thread1() { int status, i; mbox_t fromMe, to0; msg_t msg; fromMe = make_mailbox(1, &status); assert(status == STATUS_OK); to0 = get_mailbox(0); for (i=0;i<N;i++) { sprintf(msg, "Testing message #%d", i); send(fromMe, msg, to0); } }
void smp_end_short_ipc(tcb_t * to_tcb) { cpu_mailbox_t * mailbox; XIPC_PRINTF("smp_end_short_ipc (tcb=%x)\n", to_tcb); if (to_tcb->thread_state == TS_XCPU_LOCKED_RUNNING) mailbox = &cpu_mailbox[to_tcb->cpu]; else mailbox = get_mailbox(); XIPC_PRINTF("notifying mailbox %x\n", mailbox); /* ok notification for partner cpu that transfer is complete */ mailbox->set_status(MAILBOX_DONE); }
dword_t smp_start_receive_ipc(tcb_t * from_tcb, tcb_t * current) { cpu_mailbox_t * mailbox = get_mailbox(); mailbox->tcb = from_tcb; mailbox->param[0] = (dword_t)current; dword_t status = mailbox->send_command(from_tcb->cpu, SMP_CMD_IPC_RECEIVE); if (status == MAILBOX_ERROR) { //enter_kdebug("smp_start_receive_ipc failed"); return 0; } switch_to_idle(current); return 1; }
void smp_flush_tlb() { #warning inefficient implementation of tlb shootdown cpu_mailbox_t * mailbox = get_mailbox(); for (dword_t cpu = 0; cpu < CONFIG_SMP_MAX_CPU; cpu++) { if (cpu == get_cpu_id()) continue; if (!is_cpu_online(cpu)) continue; dword_t status = mailbox->send_command(cpu, SMP_CMD_FLUSH_TLB); if (status != MAILBOX_OK) enter_kdebug("smp_flush_tlb"); } }
/** * functions for maintaining mailboxes * * */ asmlinkage long sys_ManageMailbox(bool stop, int *count){ pid_t my_pid = current->pid; mailbox* mb; int a_count; mb = get_mailbox(my_pid); spin_lock(&(mb->lock)); if (stop) { mb->stop = TRUE; wake_up_all(&(mb->wait_full)); } a_count = mb->size; spin_unlock(&(mb->lock)); if (copy_to_user(count, &a_count, sizeof(int))) return MSG_ARG_ERROR; return 0; }
dword_t smp_end_ipc(tcb_t * to_tcb, tcb_t * current) { XIPC_PRINTF("sending end_ipc ipi (to_tcb=%p)\n", to_tcb); cpu_mailbox_t * mailbox = get_mailbox(); mailbox->tcb = to_tcb; mailbox->param[0] = (dword_t)current; dword_t status = mailbox->send_command(to_tcb->cpu, SMP_CMD_IPC_END); if (status == MAILBOX_OK) return 1; IPI_PRINTF("smp_end_ipc failed (%x (%d, %x)\n", get_cpu_id(), to_tcb, to_tcb->cpu, to_tcb->thread_state); return 0; }
dword_t smp_start_ipc(tcb_t * to_tcb, tcb_t * current) { XIPC_PRINTF("sending start_ipc ipi (current=%p)\n", current); cpu_mailbox_t * mailbox = get_mailbox(); mailbox->tcb = to_tcb; mailbox->param[0] = (dword_t)current; dword_t status = mailbox->send_command(to_tcb->cpu, SMP_CMD_IPC_START); if (status == MAILBOX_OK) return 1; IPI_PRINTF("%d smp_start_ipc failed (%x (%d, %x) -> %x (%d, %x))\n", get_cpu_id(), current, current->cpu, current->thread_state, to_tcb, to_tcb->cpu, to_tcb->thread_state); return 0; }
int smp_ex_regs(tcb_t * tcb, dword_t * uip, dword_t * usp, l4_threadid_t * pager, dword_t * flags) { cpu_mailbox_t * mailbox = get_mailbox(); IPI_PRINTF("xcpu ex_regs tcb=%x ip=%x sp=%x pager=%x\n", tcb, *uip, *usp, *pager); mailbox->tcb = tcb; mailbox->param[0] = *uip; mailbox->param[1] = *usp; mailbox->tid = *pager; switch(mailbox->send_command(tcb->cpu, SMP_CMD_THREAD_EX_REGS)) { case MAILBOX_ERROR: IPI_PRINTF("xcpu ex_regs failed\n"); return 0; case MAILBOX_UNWIND_REMOTE: enter_kdebug("smp_ex_regs unwind remote"); *uip = mailbox->param[0]; *usp = mailbox->param[1]; *flags = mailbox->param[3]; *pager = mailbox->tid; unwind_ipc(mailbox->tcb); return 1; case MAILBOX_OK: IPI_PRINTF("xcpu ex_regs done\n"); *uip = mailbox->param[0]; *usp = mailbox->param[1]; *flags = mailbox->param[3]; *pager = mailbox->tid; return 1; default: printf("smp_ex_regs: unknown response\n"); } return 0; }
int smp_unwind_ipc(tcb_t * tcb) { IPI_PRINTF("smp_unwind_ipc(%x), partner=%x, current=%x, xcpu_mb=%x\n", tcb, tcb->partner, get_current_tcb(), xcpu_unwind_mailbox); /* check if we are already performing an unwind ipc operation */ if (xcpu_unwind_mailbox) { xcpu_unwind_mailbox->tcb = tcb; xcpu_unwind_mailbox->set_status(MAILBOX_UNWIND_REMOTE); return 1; } dword_t status; cpu_mailbox_t * mailbox = get_mailbox(); mailbox->tcb = tcb; status = mailbox->send_command(tcb->cpu, SMP_CMD_UNWIND); switch(status) { case MAILBOX_UNWIND_REMOTE: /* we have to perform a remote unwind */ unwind_ipc(mailbox->tcb); break; case MAILBOX_OK: /* unwind done */ break; case MAILBOX_ERROR: /* thread may have moved */ return 0; default: enter_kdebug("smp_unwind: unexpected return value"); return 0; }; return 1; }
static int ivtv_api_call(struct ivtv *itv, int cmd, int args, u32 data[]) { struct ivtv_mailbox_data *mbdata = (cmd >= 128) ? &itv->enc_mbox : &itv->dec_mbox; volatile struct ivtv_mailbox __iomem *mbox; int api_timeout = msecs_to_jiffies(1000); int flags, mb, i; unsigned long then; /* sanity checks */ if (NULL == mbdata) { IVTV_ERR("No mailbox allocated\n"); return -ENODEV; } if (args < 0 || args > CX2341X_MBOX_MAX_DATA || cmd < 0 || cmd > 255 || api_info[cmd].name == NULL) { IVTV_ERR("Invalid MB call: cmd = 0x%02x, args = %d\n", cmd, args); return -EINVAL; } if (api_info[cmd].flags & API_HIGH_VOL) { IVTV_DEBUG_HI_MB("MB Call: %s\n", api_info[cmd].name); } else { IVTV_DEBUG_MB("MB Call: %s\n", api_info[cmd].name); } /* clear possibly uninitialized part of data array */ for (i = args; i < CX2341X_MBOX_MAX_DATA; i++) data[i] = 0; /* If this command was issued within the last 30 minutes and with identical data, then just return 0 as there is no need to issue this command again. Just an optimization to prevent unnecessary use of mailboxes. */ if (itv->api_cache[cmd].last_jiffies && time_before(jiffies, itv->api_cache[cmd].last_jiffies + msecs_to_jiffies(1800000)) && !memcmp(data, itv->api_cache[cmd].data, sizeof(itv->api_cache[cmd].data))) { itv->api_cache[cmd].last_jiffies = jiffies; return 0; } flags = api_info[cmd].flags; if (flags & API_DMA) { for (i = 0; i < 100; i++) { mb = i % (mbdata->max_mbox + 1); if (try_mailbox(itv, mbdata, mb)) { write_mailbox(&mbdata->mbox[mb], cmd, args, data); clear_bit(mb, &mbdata->busy); return 0; } IVTV_DEBUG_WARN("%s: mailbox %d not free %08x\n", api_info[cmd].name, mb, readl(&mbdata->mbox[mb].flags)); } IVTV_WARN("Could not find free DMA mailbox for %s\n", api_info[cmd].name); clear_all_mailboxes(itv, mbdata); return -EBUSY; } if ((flags & API_FAST_RESULT) == API_FAST_RESULT) api_timeout = msecs_to_jiffies(100); mb = get_mailbox(itv, mbdata, flags); if (mb < 0) { IVTV_DEBUG_WARN("No free mailbox found (%s)\n", api_info[cmd].name); clear_all_mailboxes(itv, mbdata); return -EBUSY; } mbox = &mbdata->mbox[mb]; write_mailbox(mbox, cmd, args, data); if (flags & API_CACHE) { memcpy(itv->api_cache[cmd].data, data, sizeof(itv->api_cache[cmd].data)); itv->api_cache[cmd].last_jiffies = jiffies; } if ((flags & API_RESULT) == 0) { clear_bit(mb, &mbdata->busy); return 0; } /* Get results */ then = jiffies; if (!(flags & API_NO_POLL)) { /* First try to poll, then switch to delays */ for (i = 0; i < 100; i++) { if (readl(&mbox->flags) & IVTV_MBOX_FIRMWARE_DONE) break; } } while (!(readl(&mbox->flags) & IVTV_MBOX_FIRMWARE_DONE)) { if (time_after(jiffies, then + api_timeout)) { IVTV_DEBUG_WARN("Could not get result (%s)\n", api_info[cmd].name); /* reset the mailbox, but it is likely too late already */ write_sync(0, &mbox->flags); clear_bit(mb, &mbdata->busy); return -EIO; } if (flags & API_NO_WAIT_RES) mdelay(1); else ivtv_msleep_timeout(1, 0); } if (time_after(jiffies, then + msecs_to_jiffies(100))) IVTV_DEBUG_WARN("%s took %u jiffies\n", api_info[cmd].name, jiffies_to_msecs(jiffies - then)); for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++) data[i] = readl(&mbox->data[i]); write_sync(0, &mbox->flags); clear_bit(mb, &mbdata->busy); return 0; }
/** Handle a given task */ static void handle_task(node_t node, msg_task_t task) { XBT_DEBUG("Handling task %p", task); char mailbox[MAILBOX_NAME_SIZE]; int i; int j; int min; int max; int next; msg_task_t task_sent; task_data_t req_data; task_data_t task_data = (task_data_t) MSG_task_get_data(task); e_task_type_t type = task_data->type; // If the node is not ready keep the task for later if (node->ready != 0 && !(type==TASK_JOIN_LAST_REPLY || type==TASK_JOIN_REPLY)) { XBT_DEBUG("Task pending %u", type); xbt_dynar_push(node->pending_tasks, &task); return; } switch (type) { /* Try to join the ring */ case TASK_JOIN: next = routing_next(node, task_data->answer_id); XBT_DEBUG("Join request from %08x forwarding to %08x", (unsigned)task_data->answer_id, (unsigned)next); type = TASK_JOIN_LAST_REPLY; req_data = xbt_new0(s_task_data_t,1); req_data->answer_id = task_data->sender_id; req_data->steps = task_data->steps + 1; // if next different from current node forward the join if (next!=node->id) { get_mailbox(next, mailbox); task_data->sender_id = node->id; task_data->steps++; task_sent = MSG_task_create(NULL, COMP_SIZE, COMM_SIZE, task_data); if (MSG_task_send_with_timeout(task_sent, mailbox, timeout)== MSG_TIMEOUT) { XBT_DEBUG("Timeout expired when forwarding join to next %d", next); task_free(task_sent); } type = TASK_JOIN_REPLY; } // send back the current node state to the joining node req_data->type = type; req_data->sender_id = node->id; get_mailbox(node->id, req_data->answer_to); req_data->state = node_get_state(node); task_sent = MSG_task_create(NULL, COMP_SIZE, COMM_SIZE, req_data); if (MSG_task_send_with_timeout(task_sent, task_data->answer_to, timeout)== MSG_TIMEOUT) { XBT_DEBUG("Timeout expired when sending back the current node state to the joining node to %d", node->id); task_free(task_sent); } break; /* Join reply from all the node touched by the join */ case TASK_JOIN_LAST_REPLY: // if last node touched reply, copy its namespace set // TODO: it works only if the two nodes are side to side (is it really the case ?) j = (task_data->sender_id < node->id) ? -1 : 0; for (i=0; i<NAMESPACE_SIZE/2; i++) { node->namespace_set[i] = task_data->state->namespace_set[i-j]; node->namespace_set[NAMESPACE_SIZE-1-i] = task_data->state->namespace_set[NAMESPACE_SIZE-1-i-j-1]; } node->namespace_set[NAMESPACE_SIZE/2+j] = task_data->sender_id; node->ready += task_data->steps + 1; /* no break */ case TASK_JOIN_REPLY: XBT_DEBUG("Joining Reply"); // if first node touched reply, copy its neighborhood set if (task_data->sender_id == node->known_id) { node->neighborhood_set[0] = task_data->sender_id; for (i=1; i<NEIGHBORHOOD_SIZE; i++) node->neighborhood_set[i] = task_data->state->neighborhood_set[i-1]; } // copy the corresponding routing table levels min = (node->id==task_data->answer_id) ? 0 : shl(node->id, task_data->answer_id); max = shl(node->id, task_data->sender_id)+1; for (i=min;i<max;i++) { int d = domain(node->id, i); for (j=0; j<LEVEL_SIZE; j++) if (d!=j) node->routing_table[i][j] = task_data->state->routing_table[i][j]; } node->ready--; // if the node is ready, do all the pending tasks and send update to known nodes if (node->ready==0) { XBT_DEBUG("Node %i is ready!!!", node->id); while (!xbt_dynar_is_empty(node->pending_tasks)) { msg_task_t task; xbt_dynar_shift(node->pending_tasks, &task); handle_task(node, task); } for (i=0; i<NAMESPACE_SIZE; i++) { j = node->namespace_set[i]; if (j!=-1) { XBT_DEBUG("Send update to %i", j); get_mailbox(j, mailbox); req_data = xbt_new0(s_task_data_t,1); req_data->answer_id = node->id; req_data->steps = 0; req_data->type = TASK_UPDATE; req_data->sender_id = node->id; get_mailbox(node->id, req_data->answer_to); req_data->state = node_get_state(node); task_sent = MSG_task_create(NULL, COMP_SIZE, COMM_SIZE, req_data); if (MSG_task_send_with_timeout(task_sent, mailbox, timeout)== MSG_TIMEOUT) { XBT_DEBUG("Timeout expired when sending update to %d", j); task_free(task_sent); } } } } break; /* Received an update of state */ case TASK_UPDATE: XBT_DEBUG("Task update %i !!!", node->id); /* Update namespace ses */ XBT_INFO("Task update from %i !!!", task_data->sender_id); XBT_INFO("Node:"); print_node_id(node); print_node_namespace_set(node); int curr_namespace_set[NAMESPACE_SIZE]; int task_namespace_set[NAMESPACE_SIZE+1]; // Copy the current namespace and the task state namespace with state->id in the middle i=0; for (; i<NAMESPACE_SIZE/2; i++){ curr_namespace_set[i] = node->namespace_set[i]; task_namespace_set[i] = task_data->state->namespace_set[i]; } task_namespace_set[i] = task_data->state->id; for (; i<NAMESPACE_SIZE; i++){ curr_namespace_set[i] = node->namespace_set[i]; task_namespace_set[i+1] = task_data->state->namespace_set[i]; } // get the index of values before and after node->id in task_namespace min = -1; max = -1; for (i=0; i<=NAMESPACE_SIZE; i++) { j = task_namespace_set[i]; if (j != -1 && j < node->id) min = i; if (j != -1 && max == -1 && j > node->id) max = i; } // add lower elements j = NAMESPACE_SIZE/2-1; for (i=NAMESPACE_SIZE/2-1; i>=0; i--) { if (min < 0 || curr_namespace_set[j] > task_namespace_set[min]) { node->namespace_set[i] = curr_namespace_set[j]; j--; } else if (curr_namespace_set[j] == task_namespace_set[min]) { node->namespace_set[i] = curr_namespace_set[j]; j--; min--; } else { node->namespace_set[i] = task_namespace_set[min]; min--; } } // add greater elements j = NAMESPACE_SIZE/2; for (i=NAMESPACE_SIZE/2; i<NAMESPACE_SIZE; i++) { if (min<0 || max>=NAMESPACE_SIZE) { node->namespace_set[i] = curr_namespace_set[j]; j++; } else if (max >= 0){ if (curr_namespace_set[j] == -1 || curr_namespace_set[j] > task_namespace_set[max]) { node->namespace_set[i] = task_namespace_set[max]; max++; } else if (curr_namespace_set[j] == task_namespace_set[max]) { node->namespace_set[i] = curr_namespace_set[j]; j++; max++; } else { node->namespace_set[i] = curr_namespace_set[j]; j++; } } } /* Update routing table */ for (i=shl(node->id, task_data->state->id); i<LEVELS_COUNT; i++) { for (j=0; j<LEVEL_SIZE; j++) { if (node->routing_table[i][j]==-1 && task_data->state->routing_table[i][j]==-1) node->routing_table[i][j] = task_data->state->routing_table[i][j]; } } break; default: THROW_IMPOSSIBLE; } task_free(task); }
void smp_move_thread(tcb_t * tcb, dword_t cpu) { cpu_mailbox_t * mailbox = get_mailbox(); //mailbox->command = SMP_CMD_THREAD_MOVE; mailbox->tcb = tcb; //mailbox->status = MAILBOX_NULL; mailbox->param[0] = tcb->queue_state; //IPI_PRINTF("smp move thread %p from cpu %d to cpu %d\n", tcb, tcb->cpu, cpu); /* do not move thread if already on cpu */ if (tcb->cpu == cpu) return; /* do not migrate to inactive cpus */ if (!is_cpu_online(cpu)) return; retry_migration: if (tcb->cpu == get_cpu_id()) { /* we have the thread - so, we can give it away */ thread_dequeue_present(tcb); thread_dequeue_ready(tcb); thread_dequeue_wakeup(tcb); IPI_PRINTF("before thread put (current: %x, pdir=%x, tcb: %x)\n", get_current_tcb(), get_current_pagetable(), tcb); mailbox->send_command(cpu, SMP_CMD_THREAD_PUT); IPI_PRINTF("thread_put done (current: %x, pdir=%x, cpu: %d/%d)\n", get_current_tcb(), get_current_pagetable(), get_cpu_id(), get_apic_cpu_id()); } else { /* we don't have the thread - ask the cpu */ dword_t status; status = mailbox->send_command(tcb->cpu, SMP_CMD_THREAD_GET); /* thread may have moved meanwhile */ if (status != MAILBOX_OK) goto retry_migration; if (cpu == get_cpu_id()) { /* the thread comes to us */ tcb->cpu = cpu; thread_adapt_queue_state(tcb, mailbox->param[1]); /* adjust the page directory for this tcb */ //printf("pgdir: %x\n", tcb->page_dir); thread_adapt_pagetable(tcb, get_cpu_id()); //printf("pgdir: %x\n", tcb->page_dir); } else { status = mailbox->send_command(cpu, SMP_CMD_THREAD_PUT); if (status != MAILBOX_OK) { enter_kdebug("3-cpu thread migration failed"); return; } } } }
/** * \brief Node Function * Arguments: * - my id * - the id of a guy I know in the system (except for the first node) * - the time to sleep before I join (except for the first node) * - the deadline time */ static int node(int argc, char *argv[]) { double init_time = MSG_get_clock(); msg_task_t task_received = NULL; int join_success = 0; double deadline; xbt_assert(argc == 3 || argc == 5, "Wrong number of arguments for this node"); s_node_t node = {0}; node.id = xbt_str_parse_int(argv[1], "Invalid ID: %s"); node.known_id = -1; node.ready = -1; node.pending_tasks = xbt_fifo_new(); get_mailbox(node.id, node.mailbox); XBT_DEBUG("New node with id %s (%08x)", node.mailbox, node.id); int i,j,d; for (i=0; i<LEVELS_COUNT; i++){ d = domain(node.id, i); for (j=0; j<LEVEL_SIZE; j++) node.routing_table[i][j] = (d==j) ? node.id : -1; } for (i=0; i<NEIGHBORHOOD_SIZE; i++) node.neighborhood_set[i] = -1; for (i=0; i<NAMESPACE_SIZE; i++) node.namespace_set[i] = -1; if (argc == 3) { // first ring XBT_DEBUG("Hey! Let's create the system."); deadline = xbt_str_parse_double(argv[2], "Invalid deadline: %s"); create(&node); join_success = 1; } else { node.known_id = xbt_str_parse_int(argv[2], "Invalid known ID: %s"); double sleep_time = xbt_str_parse_double(argv[3], "Invalid sleep time: %s"); deadline = xbt_str_parse_double(argv[4], "Invalid deadline: %s"); // sleep before starting XBT_DEBUG("Let's sleep during %f", sleep_time); MSG_process_sleep(sleep_time); XBT_DEBUG("Hey! Let's join the system."); join_success = join(&node); } if (join_success) { XBT_DEBUG("Waiting …."); while (MSG_get_clock() < init_time + deadline // && MSG_get_clock() < node.last_change_date + 1000 && MSG_get_clock() < max_simulation_time) { if (node.comm_receive == NULL) { task_received = NULL; node.comm_receive = MSG_task_irecv(&task_received, node.mailbox); // FIXME: do not make MSG_task_irecv() calls from several functions } if (!MSG_comm_test(node.comm_receive)) { MSG_process_sleep(5); } else { // a transfer has occurred msg_error_t status = MSG_comm_get_status(node.comm_receive); if (status != MSG_OK) { XBT_DEBUG("Failed to receive a task. Nevermind."); MSG_comm_destroy(node.comm_receive); node.comm_receive = NULL; } else { // the task was successfully received MSG_comm_destroy(node.comm_receive); node.comm_receive = NULL; handle_task(&node, task_received); } } } print_node(&node); } return 1; }
/* * Handle a given task */ static void handle_task(node_t node, msg_task_t task) { XBT_DEBUG("Handling task %p", task); char mailbox[MAILBOX_NAME_SIZE]; int i, j, min, max, d; msg_task_t task_sent; task_data_t req_data; task_data_t task_data = (task_data_t) MSG_task_get_data(task); e_task_type_t type = task_data->type; // If the node is not ready keep the task for later if (node->ready != 0 && !(type==TASK_JOIN_LAST_REPLY || type==TASK_JOIN_REPLY)) { XBT_DEBUG("Task pending %i", type); xbt_fifo_push(node->pending_tasks, task); return; } switch (type) { /* * Try to join the ring */ case TASK_JOIN: { int next = routing_next(node, task_data->answer_id); XBT_DEBUG("Join request from %08x forwarding to %08x", task_data->answer_id, next); type = TASK_JOIN_LAST_REPLY; req_data = xbt_new0(s_task_data_t,1); req_data->answer_id = task_data->sender_id; req_data->steps = task_data->steps + 1; // if next different from current node forward the join if (next!=node->id) { get_mailbox(next, mailbox); task_data->sender_id = node->id; task_data->steps++; task_sent = MSG_task_create(NULL, COMP_SIZE, COMM_SIZE, task_data); MSG_task_send_with_timeout(task_sent, mailbox, timeout); type = TASK_JOIN_REPLY; } // send back the current node state to the joining node req_data->type = type; req_data->sender_id = node->id; get_mailbox(node->id, req_data->answer_to); req_data->state = node_get_state(node); task_sent = MSG_task_create(NULL, COMP_SIZE, COMM_SIZE, req_data); MSG_task_send_with_timeout(task_sent, task_data->answer_to, timeout); break; } /* * Join reply from all the node touched by the join */ case TASK_JOIN_LAST_REPLY: // if last node touched reply, copy its namespace set // TODO: it's work only if the two nodes are side to side (is it really the case ?) j = (task_data->sender_id < node->id) ? -1 : 0; for (i=0; i<NAMESPACE_SIZE/2; i++) { node->namespace_set[i] = task_data->state->namespace_set[i-j]; node->namespace_set[NAMESPACE_SIZE-1-i] = task_data->state->namespace_set[NAMESPACE_SIZE-1-i-j-1]; } node->namespace_set[NAMESPACE_SIZE/2+j] = task_data->sender_id; node->ready += task_data->steps + 1; case TASK_JOIN_REPLY: XBT_DEBUG("Joining Reply"); // if first node touched reply, copy its neighborood set if (task_data->sender_id == node->known_id) { node->neighborhood_set[0] = task_data->sender_id; for (i=1; i<NEIGHBORHOOD_SIZE; i++) node->neighborhood_set[i] = task_data->state->neighborhood_set[i-1]; } // copy the corresponding routing table levels min = (node->id==task_data->answer_id) ? 0 : shl(node->id, task_data->answer_id); max = shl(node->id, task_data->sender_id)+1; for (i=min;i<max;i++) { d = domain(node->id, i); for (j=0; j<LEVEL_SIZE; j++) if (d!=j) node->routing_table[i][j] = task_data->state->routing_table[i][j]; } node->ready--; // if the node is ready, do all the pending tasks and send update to known nodes if (node->ready==0) { XBT_DEBUG("Node %i is ready!!!", node->id); while(xbt_fifo_size(node->pending_tasks)) handle_task(node, xbt_fifo_pop(node->pending_tasks)); for (i=0; i<NAMESPACE_SIZE; i++) { j = node->namespace_set[i]; if (j!=-1) { XBT_DEBUG("Send update to %i", j); get_mailbox(j, mailbox); req_data = xbt_new0(s_task_data_t,1); req_data->answer_id = node->id; req_data->steps = 0; req_data->type = TASK_UPDATE; req_data->sender_id = node->id; get_mailbox(node->id, req_data->answer_to); req_data->state = node_get_state(node); task_sent = MSG_task_create(NULL, COMP_SIZE, COMM_SIZE, req_data); MSG_task_send_with_timeout(task_sent, mailbox, timeout); } } } break; /* * Recieved an update of state */ case TASK_UPDATE: XBT_DEBUG("Task update %i !!!", node->id); /* Update namespace ses */ printf("Task update from %i !!!\n", task_data->sender_id); print_node_id(node); print_node_namespace_set(node); int curr_namespace_set[NAMESPACE_SIZE]; int task_namespace_set[NAMESPACE_SIZE+1]; // Copy the current namedspace // and the task state namespace with state->id in the middle i=0; for (; i<NAMESPACE_SIZE/2; i++){ curr_namespace_set[i] = node->namespace_set[i]; task_namespace_set[i] = task_data->state->namespace_set[i]; } task_namespace_set[i] = task_data->state->id; for (; i<NAMESPACE_SIZE; i++){ curr_namespace_set[i] = node->namespace_set[i]; task_namespace_set[i+1] = task_data->state->namespace_set[i]; } // get the index of values before and after node->id in task_namespace min = -1; max = -1; for (i=0; i<=NAMESPACE_SIZE; i++) { j = task_namespace_set[i]; if (i<NAMESPACE_SIZE) printf("%08x %08x | ", j, curr_namespace_set[i]); if (j != -1 && j < node->id) min = i; if (j != -1 && max == -1 && j > node->id) max = i; } printf("\n"); // add lower elements j = NAMESPACE_SIZE/2-1; for (i=NAMESPACE_SIZE/2-1; i>=0; i--) { printf("i:%i, j:%i, min:%i, currj:%08x, taskmin:%08x\n", i, j, min, curr_namespace_set[j], task_namespace_set[min]); if (min<0) { node->namespace_set[i] = curr_namespace_set[j]; j--; } else if (curr_namespace_set[j] == task_namespace_set[min]) { node->namespace_set[i] = curr_namespace_set[j]; j--; min--; } else if (curr_namespace_set[j] > task_namespace_set[min]) { node->namespace_set[i] = curr_namespace_set[j]; j--; } else { node->namespace_set[i] = task_namespace_set[min]; min--; } } // add greater elements j = NAMESPACE_SIZE/2; for (i=NAMESPACE_SIZE/2; i<NAMESPACE_SIZE; i++) { printf("i:%i, j:%i, max:%i, currj:%08x, taskmax:%08x\n", i, j, max, curr_namespace_set[j], task_namespace_set[max]); if (min<0 || max>=NAMESPACE_SIZE) { node->namespace_set[i] = curr_namespace_set[j]; j++; } else if (curr_namespace_set[j] == -1) { node->namespace_set[i] = task_namespace_set[max]; max++; } else if (curr_namespace_set[j] == task_namespace_set[max]) { node->namespace_set[i] = curr_namespace_set[j]; j++; max++; } else if (curr_namespace_set[j] < task_namespace_set[max]) { node->namespace_set[i] = curr_namespace_set[j]; j++; } else { node->namespace_set[i] = task_namespace_set[max]; max++; } } print_node_namespace_set(node); /* Update routing table */ for (i=shl(node->id, task_data->state->id); i<LEVELS_COUNT; i++) { for (j=0; j<LEVEL_SIZE; j++) { if (node->routing_table[i][j]==-1 && task_data->state->routing_table[i][j]==-1) node->routing_table[i][j] = task_data->state->routing_table[i][j]; } } } }