/* * Only one processor set, the default processor set, in this case. */ kern_return_t host_processor_sets( host_t host, processor_set_name_array_t *pset_list, natural_t *count) { vm_offset_t addr; if (host == HOST_NULL) return KERN_INVALID_ARGUMENT; /* * Allocate memory. Can be pageable because it won't be * touched while holding a lock. */ addr = kalloc((vm_size_t) sizeof(mach_port_t)); if (addr == 0) return KERN_RESOURCE_SHORTAGE; /* take for for convert_pset_name_to_port */ pset_reference(&default_pset); /* do the conversion that Mig should handle */ *((mach_port_t *) addr) = (mach_port_t) convert_pset_name_to_port(&default_pset); *pset_list = (mach_port_t *) addr; *count = 1; return KERN_SUCCESS; }
/* * Only one processor set, the default processor set, in this case. */ kern_return_t host_processor_sets( host_t host, processor_set_name_array_t *pset_list, mach_msg_type_number_t *count) { vm_offset_t addr; boolean_t rt = FALSE; /* ### This boolean is FALSE, because there * currently exists no mechanism to determine * whether or not the reply port is an RT port */ if (host == HOST_NULL) return KERN_INVALID_ARGUMENT; /* * Allocate memory. Can be pageable because it won't be * touched while holding a lock. */ addr = KALLOC((vm_size_t) sizeof(mach_port_t), rt); if (addr == 0) return KERN_RESOURCE_SHORTAGE; /* take ref for convert_pset_name_to_port */ pset_reference(&default_pset); /* do the conversion that Mig should handle */ *((ipc_port_t *) addr) = convert_pset_name_to_port(&default_pset); *pset_list = (mach_port_t *) addr; *count = 1; return KERN_SUCCESS; }
/* * processor_set_default: * * Return ports for manipulating default_processor set. */ kern_return_t processor_set_default( const host_t host, processor_set_t *pset) { if (host == HOST_NULL) return KERN_INVALID_ARGUMENT; *pset = &default_pset; pset_reference(*pset); return KERN_SUCCESS; }
/* * host_processor_set_priv: * * Return control port for given processor set. */ kern_return_t host_processor_set_priv( host_t host, processor_set_t pset_name, processor_set_t *pset) { if ((host == HOST_NULL) || (pset_name == PROCESSOR_SET_NULL)) { *pset = PROCESSOR_SET_NULL; return KERN_INVALID_ARGUMENT; } *pset = pset_name; pset_reference(*pset); return KERN_SUCCESS; }
processor_set_t convert_port_to_pset( ipc_port_t port) { processor_set_t pset = PROCESSOR_SET_NULL; if (IP_VALID(port)) { ip_lock(port); if (ip_active(port) && (ip_kotype(port) == IKOT_PSET)) { pset = (processor_set_t) port->ip_kobject; pset_reference(pset); } ip_unlock(port); } return pset; }
/* * processor_assign() changes the processor set that a processor is * assigned to. Any previous assignment in progress is overriden. * Synchronizes with assignment completion if wait is TRUE. */ kern_return_t processor_assign( processor_t processor, processor_set_t new_pset, boolean_t wait) { spl_t s; register processor_set_t old_next_pset; /* * Check for null arguments. * XXX Can't assign master processor. */ if (processor == PROCESSOR_NULL || new_pset == PROCESSOR_SET_NULL || processor == master_processor) { return(KERN_FAILURE); } /* * Get pset reference to donate to processor_request_action. */ pset_reference(new_pset); s = splsched(); processor_lock(processor); if(processor->state == PROCESSOR_OFF_LINE || processor->state == PROCESSOR_SHUTDOWN) { /* * Already shutdown or being shutdown -- Can't reassign. */ processor_unlock(processor); splx(s); pset_deallocate(new_pset); return(KERN_FAILURE); } old_next_pset = processor_request_action(processor, new_pset); /* * Synchronization with completion. */ if (wait) { while (processor->state == PROCESSOR_ASSIGN || processor->state == PROCESSOR_SHUTDOWN) { assert_wait((event_t)processor, TRUE); processor_unlock(processor); splx(s); thread_block((void (*)(void)) 0); s = splsched(); processor_lock(processor); } } processor_unlock(processor); splx(s); if (old_next_pset != PROCESSOR_SET_NULL) pset_deallocate(old_next_pset); return(KERN_SUCCESS); }
kern_return_t host_processor_sets( host_t host, processor_set_name_array_t *pset_list, natural_t *count) { unsigned int actual; /* this many psets */ processor_set_t pset; processor_set_t *psets; int i; vm_size_t size; vm_size_t size_needed; vm_offset_t addr; if (host == HOST_NULL) return KERN_INVALID_ARGUMENT; size = 0; addr = 0; for (;;) { simple_lock(&all_psets_lock); actual = all_psets_count; /* do we have the memory we need? */ size_needed = actual * sizeof(mach_port_t); if (size_needed <= size) break; /* unlock and allocate more memory */ simple_unlock(&all_psets_lock); if (size != 0) kfree(addr, size); assert(size_needed > 0); size = size_needed; addr = kalloc(size); if (addr == 0) return KERN_RESOURCE_SHORTAGE; } /* OK, have memory and the all_psets_lock */ psets = (processor_set_t *) addr; for (i = 0, pset = (processor_set_t) queue_first(&all_psets); i < actual; i++, pset = (processor_set_t) queue_next(&pset->all_psets)) { /* take ref for convert_pset_name_to_port */ pset_reference(pset); psets[i] = pset; } assert(queue_end(&all_psets, (queue_entry_t) pset)); /* can unlock now that we've got the pset refs */ simple_unlock(&all_psets_lock); /* * Always have default port. */ assert(actual > 0); /* if we allocated too much, must copy */ if (size_needed < size) { vm_offset_t newaddr; newaddr = kalloc(size_needed); if (newaddr == 0) { for (i = 0; i < actual; i++) pset_deallocate(psets[i]); kfree(addr, size); return KERN_RESOURCE_SHORTAGE; } bcopy((char *) addr, (char *) newaddr, size_needed); kfree(addr, size); psets = (processor_set_t *) newaddr; } *pset_list = (mach_port_t *) psets; *count = actual; /* do the conversion that Mig should handle */ for (i = 0; i < actual; i++) ((mach_port_t *) psets)[i] = (mach_port_t)convert_pset_name_to_port(psets[i]); return KERN_SUCCESS; }
kern_return_t host_processor_sets( host_t host, processor_set_name_array_t *pset_list, mach_msg_type_number_t *count) { unsigned int actual; /* this many psets */ processor_set_t pset; processor_set_t *psets; int i; boolean_t rt = FALSE; /* ### This boolean is FALSE, because there * currently exists no mechanism to determine * whether or not the reply port is an RT port */ vm_size_t size; vm_size_t size_needed; vm_offset_t addr; if (host == HOST_NULL) return KERN_INVALID_ARGUMENT; size = 0; addr = 0; for (;;) { mutex_lock(&all_psets_lock); actual = all_psets_count; /* do we have the memory we need? */ size_needed = actual * sizeof(mach_port_t); if (size_needed <= size) break; /* unlock and allocate more memory */ mutex_unlock(&all_psets_lock); if (size != 0) KFREE(addr, size, rt); assert(size_needed > 0); size = size_needed; addr = KALLOC(size, rt); if (addr == 0) return KERN_RESOURCE_SHORTAGE; } /* OK, have memory and the all_psets_lock */ psets = (processor_set_t *) addr; for (i = 0, pset = (processor_set_t) queue_first(&all_psets); i < actual; i++, pset = (processor_set_t) queue_next(&pset->all_psets)) { /* take ref for convert_pset_name_to_port */ pset_reference(pset); psets[i] = pset; } assert(queue_end(&all_psets, (queue_entry_t) pset)); /* can unlock now that we've got the pset refs */ mutex_unlock(&all_psets_lock); /* * Always have default port. */ assert(actual > 0); /* if we allocated too much, must copy */ if (size_needed < size) { vm_offset_t newaddr; newaddr = KALLOC(size_needed, rt); if (newaddr == 0) { for (i = 0; i < actual; i++) pset_deallocate(psets[i]); KFREE(addr, size, rt); return KERN_RESOURCE_SHORTAGE; } bcopy((char *) addr, (char *) newaddr, size_needed); KFREE(addr, size, rt); psets = (processor_set_t *) newaddr; } *pset_list = (mach_port_t *) psets; *count = actual; /* do the conversion that Mig should handle */ for (i = 0; i < actual; i++) ((ipc_port_t *) psets)[i] = convert_pset_name_to_port(psets[i]); return KERN_SUCCESS; }