ipc_port_t convert_pset_name_to_port( processor_set_t pset) { ipc_port_t port; pset_lock(pset); if (pset->active) port = ipc_port_make_send(pset->pset_name_self); else port = IP_NULL; pset_unlock(pset); pset_deallocate(pset); return port; }
void processor_doaction( processor_t processor) { thread_t this_thread; spl_t s; register processor_set_t pset; #if MACH_HOST register processor_set_t new_pset; register thread_t thread; register thread_t prev_thread = THREAD_NULL; thread_act_t thr_act; boolean_t have_pset_ref = FALSE; #endif /* MACH_HOST */ /* * Get onto the processor to shutdown */ this_thread = current_thread(); thread_bind(this_thread, processor); thread_block((void (*)(void)) 0); pset = processor->processor_set; #if MACH_HOST /* * If this is the last processor in the processor_set, * stop all the threads first. */ pset_lock(pset); if (pset->processor_count == 1) { thread = (thread_t) queue_first(&pset->threads); prev_thread = THREAD_NULL; pset->ref_count++; have_pset_ref = TRUE; pset->empty = TRUE; /* * loop through freezing the processor set assignment * and reference counting the threads; */ while (!queue_end(&pset->threads, (queue_entry_t) thread)) { thread_reference(thread); pset_unlock(pset); /* * Freeze the thread on the processor set. * If it's moved, just release the reference. * Get the next thread in the processor set list * from the last one which was frozen. */ if( thread_stop_freeze(thread, pset) ) prev_thread = thread; else thread_deallocate(thread); pset_lock(pset); if( prev_thread != THREAD_NULL ) thread = (thread_t)queue_next(&prev_thread->pset_threads); else thread = (thread_t) queue_first(&pset->threads); } /* * Remove the processor from the set so that when the threads * are unstopped below the ones blocked in the kernel don't * start running again. */ s = splsched(); processor_lock(processor); pset_remove_processor(pset, processor); /* * Prevent race with another processor being added to the set * See code after Restart_pset: * while(new_pset->empty && new_pset->processor_count > 0) * * ... it tests for the condition where a new processor is * added to the set while the last one is still being removed. */ pset->processor_count++; /* block new processors being added */ assert( pset->processor_count == 1 ); /* * Release the thread assignment locks, unstop the threads and * release the thread references which were taken above. */ thread = (thread_t) queue_first(&pset->threads); while( !queue_empty( &pset->threads) && (thread != THREAD_NULL) ) { prev_thread = thread; if( queue_end(&pset->threads, (queue_entry_t) thread) ) thread = THREAD_NULL; else thread = (thread_t) queue_next(&prev_thread->pset_threads); pset_unlock(pset); thread_unfreeze(prev_thread); thread_unstop(prev_thread); thread_deallocate(prev_thread); pset_lock(pset); } /* * allow a processor to be added to the empty pset */ pset->processor_count--; } else { /* not last processor in set */ #endif /* MACH_HOST */ /* * At this point, it is ok to rm the processor from the pset. */ s = splsched(); processor_lock(processor); pset_remove_processor(pset, processor); #if MACH_HOST } pset_unlock(pset); /* * Copy the next pset pointer into a local variable and clear * it because we are taking over its reference. */ new_pset = processor->processor_set_next; processor->processor_set_next = PROCESSOR_SET_NULL; if (processor->state == PROCESSOR_ASSIGN) { Restart_pset: /* * Nasty problem: we want to lock the target pset, but * we have to enable interrupts to do that which requires * dropping the processor lock. While the processor * is unlocked, it could be reassigned or shutdown. */ processor_unlock(processor); splx(s); /* * Lock target pset and handle remove last / assign first race. * Only happens if there is more than one action thread. */ pset_lock(new_pset); while (new_pset->empty && new_pset->processor_count > 0) { pset_unlock(new_pset); while (*(volatile boolean_t *)&new_pset->empty && *(volatile int *)&new_pset->processor_count > 0) /* spin */; pset_lock(new_pset); } /* * Finally relock the processor and see if something changed. * The only possibilities are assignment to a different pset * and shutdown. */ s = splsched(); processor_lock(processor); if (processor->state == PROCESSOR_SHUTDOWN) { pset_unlock(new_pset); goto shutdown; /* will release pset reference */ } if (processor->processor_set_next != PROCESSOR_SET_NULL) { /* * Processor was reassigned. Drop the reference * we have on the wrong new_pset, and get the * right one. Involves lots of lock juggling. */ processor_unlock(processor); splx(s); pset_unlock(new_pset); pset_deallocate(new_pset); s = splsched(); processor_lock(processor); new_pset = processor->processor_set_next; processor->processor_set_next = PROCESSOR_SET_NULL; goto Restart_pset; } /* * If the pset has been deactivated since the operation * was requested, redirect to the default pset. */ if (!(new_pset->active)) { pset_unlock(new_pset); pset_deallocate(new_pset); new_pset = &default_pset; pset_lock(new_pset); new_pset->ref_count++; } /* * Do assignment, then wakeup anyone waiting for it. * Finally context switch to have it take effect. */ pset_add_processor(new_pset, processor); if (new_pset->empty) { /* * Set all the threads loose */ thread = (thread_t) queue_first(&new_pset->threads); while (!queue_end(&new_pset->threads,(queue_entry_t)thread)) { thr_act = thread_lock_act(thread); thread_release(thread->top_act); act_unlock_thread(thr_act); thread = (thread_t) queue_next(&thread->pset_threads); } new_pset->empty = FALSE; } processor->processor_set_next = PROCESSOR_SET_NULL; processor->state = PROCESSOR_RUNNING; thread_wakeup((event_t)processor); processor_unlock(processor); splx(s); pset_unlock(new_pset); /* * Clean up dangling references, and release our binding. */ pset_deallocate(new_pset); if (have_pset_ref) pset_deallocate(pset); if (prev_thread != THREAD_NULL) thread_deallocate(prev_thread); thread_bind(this_thread, PROCESSOR_NULL); thread_block((void (*)(void)) 0); return; } shutdown: #endif /* MACH_HOST */ /* * Do shutdown, make sure we live when processor dies. */ if (processor->state != PROCESSOR_SHUTDOWN) { printf("state: %d\n", processor->state); panic("action_thread -- bad processor state"); } processor_unlock(processor); /* * Clean up dangling references, and release our binding. */ #if MACH_HOST if (new_pset != PROCESSOR_SET_NULL) pset_deallocate(new_pset); if (have_pset_ref) pset_deallocate(pset); if (prev_thread != THREAD_NULL) thread_deallocate(prev_thread); #endif /* MACH_HOST */ thread_bind(this_thread, PROCESSOR_NULL); switch_to_shutdown_context(this_thread, processor_doshutdown, processor); splx(s); }
/* * processor_assign() changes the processor set that a processor is * assigned to. Any previous assignment in progress is overriden. * Synchronizes with assignment completion if wait is TRUE. */ kern_return_t processor_assign( processor_t processor, processor_set_t new_pset, boolean_t wait) { spl_t s; register processor_set_t old_next_pset; /* * Check for null arguments. * XXX Can't assign master processor. */ if (processor == PROCESSOR_NULL || new_pset == PROCESSOR_SET_NULL || processor == master_processor) { return(KERN_FAILURE); } /* * Get pset reference to donate to processor_request_action. */ pset_reference(new_pset); s = splsched(); processor_lock(processor); if(processor->state == PROCESSOR_OFF_LINE || processor->state == PROCESSOR_SHUTDOWN) { /* * Already shutdown or being shutdown -- Can't reassign. */ processor_unlock(processor); splx(s); pset_deallocate(new_pset); return(KERN_FAILURE); } old_next_pset = processor_request_action(processor, new_pset); /* * Synchronization with completion. */ if (wait) { while (processor->state == PROCESSOR_ASSIGN || processor->state == PROCESSOR_SHUTDOWN) { assert_wait((event_t)processor, TRUE); processor_unlock(processor); splx(s); thread_block((void (*)(void)) 0); s = splsched(); processor_lock(processor); } } processor_unlock(processor); splx(s); if (old_next_pset != PROCESSOR_SET_NULL) pset_deallocate(old_next_pset); return(KERN_SUCCESS); }
kern_return_t host_processor_sets( host_t host, processor_set_name_array_t *pset_list, natural_t *count) { unsigned int actual; /* this many psets */ processor_set_t pset; processor_set_t *psets; int i; vm_size_t size; vm_size_t size_needed; vm_offset_t addr; if (host == HOST_NULL) return KERN_INVALID_ARGUMENT; size = 0; addr = 0; for (;;) { simple_lock(&all_psets_lock); actual = all_psets_count; /* do we have the memory we need? */ size_needed = actual * sizeof(mach_port_t); if (size_needed <= size) break; /* unlock and allocate more memory */ simple_unlock(&all_psets_lock); if (size != 0) kfree(addr, size); assert(size_needed > 0); size = size_needed; addr = kalloc(size); if (addr == 0) return KERN_RESOURCE_SHORTAGE; } /* OK, have memory and the all_psets_lock */ psets = (processor_set_t *) addr; for (i = 0, pset = (processor_set_t) queue_first(&all_psets); i < actual; i++, pset = (processor_set_t) queue_next(&pset->all_psets)) { /* take ref for convert_pset_name_to_port */ pset_reference(pset); psets[i] = pset; } assert(queue_end(&all_psets, (queue_entry_t) pset)); /* can unlock now that we've got the pset refs */ simple_unlock(&all_psets_lock); /* * Always have default port. */ assert(actual > 0); /* if we allocated too much, must copy */ if (size_needed < size) { vm_offset_t newaddr; newaddr = kalloc(size_needed); if (newaddr == 0) { for (i = 0; i < actual; i++) pset_deallocate(psets[i]); kfree(addr, size); return KERN_RESOURCE_SHORTAGE; } bcopy((char *) addr, (char *) newaddr, size_needed); kfree(addr, size); psets = (processor_set_t *) newaddr; } *pset_list = (mach_port_t *) psets; *count = actual; /* do the conversion that Mig should handle */ for (i = 0; i < actual; i++) ((mach_port_t *) psets)[i] = (mach_port_t)convert_pset_name_to_port(psets[i]); return KERN_SUCCESS; }
kern_return_t host_processor_sets( host_t host, processor_set_name_array_t *pset_list, mach_msg_type_number_t *count) { unsigned int actual; /* this many psets */ processor_set_t pset; processor_set_t *psets; int i; boolean_t rt = FALSE; /* ### This boolean is FALSE, because there * currently exists no mechanism to determine * whether or not the reply port is an RT port */ vm_size_t size; vm_size_t size_needed; vm_offset_t addr; if (host == HOST_NULL) return KERN_INVALID_ARGUMENT; size = 0; addr = 0; for (;;) { mutex_lock(&all_psets_lock); actual = all_psets_count; /* do we have the memory we need? */ size_needed = actual * sizeof(mach_port_t); if (size_needed <= size) break; /* unlock and allocate more memory */ mutex_unlock(&all_psets_lock); if (size != 0) KFREE(addr, size, rt); assert(size_needed > 0); size = size_needed; addr = KALLOC(size, rt); if (addr == 0) return KERN_RESOURCE_SHORTAGE; } /* OK, have memory and the all_psets_lock */ psets = (processor_set_t *) addr; for (i = 0, pset = (processor_set_t) queue_first(&all_psets); i < actual; i++, pset = (processor_set_t) queue_next(&pset->all_psets)) { /* take ref for convert_pset_name_to_port */ pset_reference(pset); psets[i] = pset; } assert(queue_end(&all_psets, (queue_entry_t) pset)); /* can unlock now that we've got the pset refs */ mutex_unlock(&all_psets_lock); /* * Always have default port. */ assert(actual > 0); /* if we allocated too much, must copy */ if (size_needed < size) { vm_offset_t newaddr; newaddr = KALLOC(size_needed, rt); if (newaddr == 0) { for (i = 0; i < actual; i++) pset_deallocate(psets[i]); KFREE(addr, size, rt); return KERN_RESOURCE_SHORTAGE; } bcopy((char *) addr, (char *) newaddr, size_needed); KFREE(addr, size, rt); psets = (processor_set_t *) newaddr; } *pset_list = (mach_port_t *) psets; *count = actual; /* do the conversion that Mig should handle */ for (i = 0; i < actual; i++) ((ipc_port_t *) psets)[i] = convert_pset_name_to_port(psets[i]); return KERN_SUCCESS; }