static struct smp_msg *smp_check_for_message(int curr_cpu, int *source_mailbox) { struct smp_msg *msg; acquire_spinlock_nocheck(&cpu_msg_spinlock[curr_cpu]); msg = smp_msgs[curr_cpu]; if(msg != 0) { smp_msgs[curr_cpu] = msg->next; release_spinlock(&cpu_msg_spinlock[curr_cpu]); // dprintf(" found msg 0x%x in cpu mailbox\n", msg); *source_mailbox = MAILBOX_LOCAL; } else { // try getting one from the broadcast mailbox release_spinlock(&cpu_msg_spinlock[curr_cpu]); acquire_spinlock_nocheck(&broadcast_msg_spinlock); msg = smp_broadcast_msgs; while(msg != 0) { if(CHECK_BIT(msg->proc_bitmap, curr_cpu) != 0) { // we have handled this one already msg = msg->next; continue; } // mark it so we wont try to process this one again msg->proc_bitmap = SET_BIT(msg->proc_bitmap, curr_cpu); *source_mailbox = MAILBOX_BCAST; break; } release_spinlock(&broadcast_msg_spinlock); // dprintf(" found msg 0x%x in broadcast mailbox\n", msg); } return msg; }
void smp_send_broadcast_ici_interrupts_disabled(int32 currentCPU, int32 message, addr_t data, addr_t data2, addr_t data3, void *dataPointer, uint32 flags) { if (!sICIEnabled) return; TRACE(("smp_send_broadcast_ici_interrupts_disabled: cpu %ld mess 0x%lx, " "data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%lx\n", currentCPU, message, data, data2, data3, dataPointer, flags)); struct smp_msg *msg; find_free_message_interrupts_disabled(currentCPU, &msg); msg->message = message; msg->data = data; msg->data2 = data2; msg->data3 = data3; msg->data_ptr = dataPointer; msg->ref_count = sNumCPUs - 1; msg->flags = flags; msg->proc_bitmap = SET_BIT(0, currentCPU); msg->done = false; TRACE(("smp_send_broadcast_ici_interrupts_disabled %ld: inserting msg %p " "into broadcast mbox\n", currentCPU, msg)); // stick it in the appropriate cpu's mailbox acquire_spinlock_nocheck(&sBroadcastMessageSpinlock); msg->next = sBroadcastMessages; sBroadcastMessages = msg; release_spinlock(&sBroadcastMessageSpinlock); arch_smp_send_broadcast_ici(); TRACE(("smp_send_broadcast_ici_interrupts_disabled %ld: sent interrupt\n", currentCPU)); if ((flags & SMP_MSG_FLAG_SYNC) != 0) { // wait for the other cpus to finish processing it // the interrupt handler will ref count it to <0 // if the message is sync after it has removed it from the mailbox TRACE(("smp_send_broadcast_ici_interrupts_disabled %ld: waiting for " "ack\n", currentCPU)); while (msg->done == false) { process_all_pending_ici(currentCPU); PAUSE(); } TRACE(("smp_send_broadcast_ici_interrupts_disabled %ld: returning " "message to free list\n", currentCPU)); // for SYNC messages, it's our responsibility to put it // back into the free list return_free_message(msg); } TRACE(("smp_send_broadcast_ici_interrupts_disabled: done\n")); }
void smp_send_ici(int32 targetCPU, int32 message, addr_t data, addr_t data2, addr_t data3, void* dataPointer, uint32 flags) { struct smp_msg *msg; TRACE(("smp_send_ici: target 0x%lx, mess 0x%lx, data 0x%lx, data2 0x%lx, " "data3 0x%lx, ptr %p, flags 0x%lx\n", targetCPU, message, data, data2, data3, dataPointer, flags)); if (sICIEnabled) { int state; int currentCPU; // find_free_message leaves interrupts disabled state = find_free_message(&msg); currentCPU = smp_get_current_cpu(); if (targetCPU == currentCPU) { return_free_message(msg); restore_interrupts(state); return; // nope, cant do that } // set up the message msg->message = message; msg->data = data; msg->data2 = data2; msg->data3 = data3; msg->data_ptr = dataPointer; msg->ref_count = 1; msg->flags = flags; msg->done = false; // stick it in the appropriate cpu's mailbox acquire_spinlock_nocheck(&sCPUMessageSpinlock[targetCPU]); msg->next = sCPUMessages[targetCPU]; sCPUMessages[targetCPU] = msg; release_spinlock(&sCPUMessageSpinlock[targetCPU]); arch_smp_send_ici(targetCPU); if ((flags & SMP_MSG_FLAG_SYNC) != 0) { // wait for the other cpu to finish processing it // the interrupt handler will ref count it to <0 // if the message is sync after it has removed it from the mailbox while (msg->done == false) { process_all_pending_ici(currentCPU); PAUSE(); } // for SYNC messages, it's our responsibility to put it // back into the free list return_free_message(msg); } restore_interrupts(state); } }
static void return_free_message(struct smp_msg *msg) { // dprintf("return_free_message: returning msg 0x%x\n", msg); acquire_spinlock_nocheck(&free_msg_spinlock); msg->next = free_msgs; free_msgs = msg; free_msg_count++; release_spinlock(&free_msg_spinlock); }
static void return_free_message(struct smp_msg* msg) { TRACE(("return_free_message: returning msg %p\n", msg)); acquire_spinlock_nocheck(&sFreeMessageSpinlock); msg->next = sFreeMessages; sFreeMessages = msg; sFreeMessageCount++; release_spinlock(&sFreeMessageSpinlock); }
static struct smp_msg* check_for_message(int currentCPU, mailbox_source& sourceMailbox) { if (!sICIEnabled) return NULL; acquire_spinlock_nocheck(&sCPUMessageSpinlock[currentCPU]); struct smp_msg* msg = sCPUMessages[currentCPU]; if (msg != NULL) { sCPUMessages[currentCPU] = msg->next; release_spinlock(&sCPUMessageSpinlock[currentCPU]); TRACE((" cpu %d: found msg %p in cpu mailbox\n", currentCPU, msg)); sourceMailbox = MAILBOX_LOCAL; } else { // try getting one from the broadcast mailbox release_spinlock(&sCPUMessageSpinlock[currentCPU]); acquire_spinlock_nocheck(&sBroadcastMessageSpinlock); msg = sBroadcastMessages; while (msg != NULL) { if (CHECK_BIT(msg->proc_bitmap, currentCPU) != 0) { // we have handled this one already msg = msg->next; continue; } // mark it so we wont try to process this one again msg->proc_bitmap = SET_BIT(msg->proc_bitmap, currentCPU); sourceMailbox = MAILBOX_BCAST; break; } release_spinlock(&sBroadcastMessageSpinlock); TRACE((" cpu %d: found msg %p in broadcast mailbox\n", currentCPU, msg)); } return msg; }
void smp_send_multicast_ici(cpu_mask_t cpuMask, int32 message, addr_t data, addr_t data2, addr_t data3, void *dataPointer, uint32 flags) { if (!sICIEnabled) return; int currentCPU = smp_get_current_cpu(); cpuMask &= ~((cpu_mask_t)1 << currentCPU) & (((cpu_mask_t)1 << sNumCPUs) - 1); if (cpuMask == 0) { panic("smp_send_multicast_ici(): 0 CPU mask"); return; } // count target CPUs int32 targetCPUs = 0; for (int32 i = 0; i < sNumCPUs; i++) { if ((cpuMask & (cpu_mask_t)1 << i) != 0) targetCPUs++; } // find_free_message leaves interrupts disabled struct smp_msg *msg; int state = find_free_message(&msg); msg->message = message; msg->data = data; msg->data2 = data2; msg->data3 = data3; msg->data_ptr = dataPointer; msg->ref_count = targetCPUs; msg->flags = flags; msg->proc_bitmap = ~cpuMask; msg->done = false; // stick it in the broadcast mailbox acquire_spinlock_nocheck(&sBroadcastMessageSpinlock); msg->next = sBroadcastMessages; sBroadcastMessages = msg; release_spinlock(&sBroadcastMessageSpinlock); arch_smp_send_broadcast_ici(); // TODO: Introduce a call that only bothers the target CPUs! if ((flags & SMP_MSG_FLAG_SYNC) != 0) { // wait for the other cpus to finish processing it // the interrupt handler will ref count it to <0 // if the message is sync after it has removed it from the mailbox while (msg->done == false) { process_all_pending_ici(currentCPU); PAUSE(); } // for SYNC messages, it's our responsibility to put it // back into the free list return_free_message(msg); } restore_interrupts(state); }
static void finish_message_processing(int currentCPU, struct smp_msg* msg, mailbox_source sourceMailbox) { if (atomic_add(&msg->ref_count, -1) != 1) return; // we were the last one to decrement the ref_count // it's our job to remove it from the list & possibly clean it up struct smp_msg** mbox; spinlock* spinlock; // clean up the message from one of the mailboxes if (sourceMailbox == MAILBOX_BCAST) { mbox = &sBroadcastMessages; spinlock = &sBroadcastMessageSpinlock; } else { mbox = &sCPUMessages[currentCPU]; spinlock = &sCPUMessageSpinlock[currentCPU]; } acquire_spinlock_nocheck(spinlock); TRACE(("cleaning up message %p\n", msg)); if (sourceMailbox != MAILBOX_BCAST) { // local mailbox -- the message has already been removed in // check_for_message() } else if (msg == *mbox) { *mbox = msg->next; } else { // we need to walk to find the message in the list. // we can't use any data found when previously walking through // the list, since the list may have changed. But, we are guaranteed // to at least have msg in it. struct smp_msg* last = NULL; struct smp_msg* msg1; msg1 = *mbox; while (msg1 != NULL && msg1 != msg) { last = msg1; msg1 = msg1->next; } // by definition, last must be something if (msg1 == msg && last != NULL) last->next = msg->next; else panic("last == NULL or msg != msg1"); } release_spinlock(spinlock); if ((msg->flags & SMP_MSG_FLAG_FREE_ARG) != 0 && msg->data_ptr != NULL) free(msg->data_ptr); if ((msg->flags & SMP_MSG_FLAG_SYNC) != 0) { msg->done = true; // the caller cpu should now free the message } else { // in the !SYNC case, we get to free the message return_free_message(msg); } }