extern "C" int smp_call_function(cpumap_t *cpus, void (*scfunc)(void *), void *param) { struct smp_call_parameter sp; sp.func = scfunc; sp.param = param; sp.cpus = cpus; mp_rendezvous_no_intrs(smp_cfunction, &sp); return 0; }
// Processor Driver void VoodooPState::ProcessorDriver(void) { if(Current != Request){ // Prepare values if (PStateControl) GlobalRequest = PState[Request]; if (TStateControl) GlobalThrottle = Throttle >> 2; // Read state values // only 1 core VoodooReadProc(this); #if SUPPORT_VOODOO_KERNEL UInt32 NewFrequency, OldFrequency; bool doStepping = (!ConstantTSC && VoodooKernel); if( doStepping ){ NewFrequency = VoodooFrequencyProc(this,&GlobalRequest); OldFrequency = VoodooFrequencyProc(this, &GlobalCurrent); rtc_clock_stepping(NewFrequency, OldFrequency); } #endif // Write state values // all the cores. IOSimpleLockLock(SimpleLock); mp_rendezvous_no_intrs(VoodooWriteProc, this); IOSimpleLockUnlock(SimpleLock); #if SUPPORT_VOODOO_KERNEL if( doStepping ){ rtc_clock_stepped(NewFrequency, OldFrequency); } #endif } // Read state values to GlobalCurrent // only 1 core VoodooReadProc(this); // Update final values Current = Request; #if 1 Frequency = PState[Current].frequency; Voltage = PState[Current].voltage; #else // convert fit/vid/did to frequency/voltage VoodooFrequencyProc(this, &GlobalCurrent); Voltage = VoodooVoltageProc(this, &GlobalCurrent); #endif // Update kernel frequency // gPEClockFrequencyInfo.bus_to_cpu_rate_num = GlobalCurrent[0].fid & 0x3F; gPEClockFrequencyInfo.cpu_clock_rate_hz = Frequency * Mega; gPEClockFrequencyInfo.cpu_frequency_hz = gPEClockFrequencyInfo.cpu_clock_rate_hz; }
RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RT_ASSERT_INTS_ON(); RTMPARGS Args; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = NIL_RTCPUID; Args.cHits = 0; mp_rendezvous_no_intrs(rtmpOnAllDarwinWrapper, &Args); return VINF_SUCCESS; }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RT_ASSERT_INTS_ON(); int rc; RTMPARGS Args; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu; Args.cHits = 0; mp_rendezvous_no_intrs(rtmpOnSpecificDarwinWrapper, &Args); return Args.cHits == 1 ? VINF_SUCCESS : VERR_CPU_NOT_FOUND; }
RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RT_ASSERT_INTS_ON(); IPRT_DARWIN_SAVE_EFL_AC(); RTMPARGS Args; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = RTMpCpuId(); Args.cHits = 0; mp_rendezvous_no_intrs(rtmpOnOthersDarwinWrapper, &Args); IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; }
int i386_set_ldt( uint32_t *retval, uint32_t start_sel, uint32_t descs, /* out */ uint32_t num_sels) { user_ldt_t new_ldt, old_ldt; struct real_descriptor *dp; unsigned int i; unsigned int min_selector = LDTSZ_MIN; /* do not allow the system selectors to be changed */ task_t task = current_task(); unsigned int ldt_count; kern_return_t err; if (start_sel != LDT_AUTO_ALLOC && (start_sel != 0 || num_sels != 0) && (start_sel < min_selector || start_sel >= LDTSZ)) return EINVAL; if (start_sel != LDT_AUTO_ALLOC && (uint64_t)start_sel + (uint64_t)num_sels > LDTSZ) /* cast to uint64_t to detect wrap-around */ return EINVAL; task_lock(task); old_ldt = task->i386_ldt; if (start_sel == LDT_AUTO_ALLOC) { if (old_ldt) { unsigned int null_count; struct real_descriptor null_ldt; bzero(&null_ldt, sizeof(null_ldt)); /* * Look for null selectors among the already-allocated * entries. */ null_count = 0; i = 0; while (i < old_ldt->count) { if (!memcmp(&old_ldt->ldt[i++], &null_ldt, sizeof(null_ldt))) { null_count++; if (null_count == num_sels) break; /* break out of while loop */ } else { null_count = 0; } } /* * If we broke out of the while loop, i points to the selector * after num_sels null selectors. Otherwise it points to the end * of the old LDTs, and null_count is the number of null selectors * at the end. * * Either way, there are null_count null selectors just prior to * the i-indexed selector, and either null_count >= num_sels, * or we're at the end, so we can extend. */ start_sel = old_ldt->start + i - null_count; } else { start_sel = LDTSZ_MIN; } if (start_sel + num_sels > LDTSZ) { task_unlock(task); return ENOMEM; } } if (start_sel == 0 && num_sels == 0) { new_ldt = NULL; } else { /* * Allocate new LDT */ unsigned int begin_sel = start_sel; unsigned int end_sel = begin_sel + num_sels; if (old_ldt != NULL) { if (old_ldt->start < begin_sel) begin_sel = old_ldt->start; if (old_ldt->start + old_ldt->count > end_sel) end_sel = old_ldt->start + old_ldt->count; } ldt_count = end_sel - begin_sel; new_ldt = (user_ldt_t)kalloc(sizeof(struct user_ldt) + (ldt_count * sizeof(struct real_descriptor))); if (new_ldt == NULL) { task_unlock(task); return ENOMEM; } new_ldt->start = begin_sel; new_ldt->count = ldt_count; /* * Have new LDT. If there was a an old ldt, copy descriptors * from old to new. */ if (old_ldt) { bcopy(&old_ldt->ldt[0], &new_ldt->ldt[old_ldt->start - begin_sel], old_ldt->count * sizeof(struct real_descriptor)); /* * If the old and new LDTs are non-overlapping, fill the * center in with null selectors. */ if (old_ldt->start + old_ldt->count < start_sel) bzero(&new_ldt->ldt[old_ldt->count], (start_sel - (old_ldt->start + old_ldt->count)) * sizeof(struct real_descriptor)); else if (old_ldt->start > start_sel + num_sels) bzero(&new_ldt->ldt[num_sels], (old_ldt->start - (start_sel + num_sels)) * sizeof(struct real_descriptor)); } /* * Install new descriptors. */ if (descs != 0) { err = copyin(descs, (char *)&new_ldt->ldt[start_sel - begin_sel], num_sels * sizeof(struct real_descriptor)); if (err != 0) { task_unlock(task); user_ldt_free(new_ldt); return err; } } else { bzero(&new_ldt->ldt[start_sel - begin_sel], num_sels * sizeof(struct real_descriptor)); } /* * Validate descriptors. * Only allow descriptors with user priviledges. */ for (i = 0, dp = (struct real_descriptor *) &new_ldt->ldt[start_sel - begin_sel]; i < num_sels; i++, dp++) { switch (dp->access & ~ACC_A) { case 0: case ACC_P: /* valid empty descriptor */ break; case ACC_P | ACC_PL_U | ACC_DATA: case ACC_P | ACC_PL_U | ACC_DATA_W: case ACC_P | ACC_PL_U | ACC_DATA_E: case ACC_P | ACC_PL_U | ACC_DATA_EW: case ACC_P | ACC_PL_U | ACC_CODE: case ACC_P | ACC_PL_U | ACC_CODE_R: case ACC_P | ACC_PL_U | ACC_CODE_C: case ACC_P | ACC_PL_U | ACC_CODE_CR: case ACC_P | ACC_PL_U | ACC_CALL_GATE_16: case ACC_P | ACC_PL_U | ACC_CALL_GATE: break; default: task_unlock(task); user_ldt_free(new_ldt); return EACCES; } } } task->i386_ldt = new_ldt; /* new LDT for task */ /* * Switch to new LDT. We need to do this on all CPUs, since * another thread in this same task may be currently running, * and we need to make sure the new LDT is in place * throughout the task before returning to the user. */ mp_rendezvous_no_intrs(user_ldt_set_action, task); task_unlock(task); /* free old LDT. We can't do this until after we've * rendezvoused with all CPUs, in case another thread * in this task was in the process of context switching. */ if (old_ldt) user_ldt_free(old_ldt); *retval = start_sel; return 0; }