/* * Duplicate parent state in child * for U**X fork. */ kern_return_t machine_thread_dup( thread_t parent, thread_t child ) { pcb_t parent_pcb; pcb_t child_pcb; if ((child_pcb = child->machine.pcb) == NULL || (parent_pcb = parent->machine.pcb) == NULL) return (KERN_FAILURE); /* * Copy over the x86_saved_state registers */ if (cpu_mode_is64bit()) { if (thread_is_64bit(parent)) bcopy(USER_REGS64(parent), USER_REGS64(child), sizeof(x86_saved_state64_t)); else bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state_compat32_t)); } else bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state32_t)); /* * Check to see if parent is using floating point * and if so, copy the registers to the child */ fpu_dup_fxstate(parent, child); #ifdef MACH_BSD /* * Copy the parent's cthread id and USER_CTHREAD descriptor, if 32-bit. */ child_pcb->cthread_self = parent_pcb->cthread_self; if (!thread_is_64bit(parent)) child_pcb->cthread_desc = parent_pcb->cthread_desc; /* * FIXME - should a user specified LDT, TSS and V86 info * be duplicated as well?? - probably not. */ // duplicate any use LDT entry that was set I think this is appropriate. if (parent_pcb->uldt_selector!= 0) { child_pcb->uldt_selector = parent_pcb->uldt_selector; child_pcb->uldt_desc = parent_pcb->uldt_desc; } #endif return (KERN_SUCCESS); }
kern_return_t thread_setsinglestep(thread_t thread, int on) { pal_register_cache_state(thread, DIRTY); if (thread_is_64bit(thread)) { x86_saved_state64_t *iss64; iss64 = USER_REGS64(thread); if (on) iss64->isf.rflags |= EFL_TF; else iss64->isf.rflags &= ~EFL_TF; } else { x86_saved_state32_t *iss32; iss32 = USER_REGS32(thread); if (on) { iss32->efl |= EFL_TF; /* Ensure IRET */ if (iss32->cs == SYSENTER_CS) iss32->cs = SYSENTER_TF_CS; } else iss32->efl &= ~EFL_TF; } return (KERN_SUCCESS); }
/* * thread_userstackdefault: * * Return the default stack location for the * thread, if otherwise unknown. */ kern_return_t thread_userstackdefault( thread_t thread, mach_vm_offset_t *default_user_stack) { if (thread_is_64bit(thread)) { *default_user_stack = VM_USRSTACK64; } else { *default_user_stack = VM_USRSTACK32; } return (KERN_SUCCESS); }
/* * Routine: ipc_mqueue_select_on_thread * Purpose: * A receiver discovered that there was a message on the queue * before he had to block. Pick the message off the queue and * "post" it to thread. * Conditions: * mqueue locked. * thread not locked. * There is a message. * Returns: * MACH_MSG_SUCCESS Actually selected a message for ourselves. * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large */ void ipc_mqueue_select_on_thread( ipc_mqueue_t mqueue, mach_msg_option_t option, mach_msg_size_t max_size, thread_t thread) { ipc_kmsg_t kmsg; mach_msg_return_t mr = MACH_MSG_SUCCESS; mach_msg_size_t rcv_size; /* * Do some sanity checking of our ability to receive * before pulling the message off the queue. */ kmsg = ipc_kmsg_queue_first(&mqueue->imq_messages); assert(kmsg != IKM_NULL); /* * If we really can't receive it, but we had the * MACH_RCV_LARGE option set, then don't take it off * the queue, instead return the appropriate error * (and size needed). */ rcv_size = ipc_kmsg_copyout_size(kmsg, thread->map); if (rcv_size + REQUESTED_TRAILER_SIZE(thread_is_64bit(thread), option) > max_size) { mr = MACH_RCV_TOO_LARGE; if (option & MACH_RCV_LARGE) { thread->ith_receiver_name = mqueue->imq_receiver_name; thread->ith_kmsg = IKM_NULL; thread->ith_msize = rcv_size; thread->ith_seqno = 0; thread->ith_state = mr; return; } } ipc_kmsg_rmqueue_first_macro(&mqueue->imq_messages, kmsg); ipc_mqueue_release_msgcount(mqueue); thread->ith_seqno = mqueue->imq_seqno++; thread->ith_kmsg = kmsg; thread->ith_state = mr; current_task()->messages_received++; return; }
/* * thread_setentrypoint: * * Sets the user PC into the machine * dependent thread state info. */ void thread_setentrypoint(thread_t thread, mach_vm_address_t entry) { if (thread_is_64bit(thread)) { x86_saved_state64_t *iss64; iss64 = USER_REGS64(thread); iss64->isf.rip = (uint64_t)entry; } else { x86_saved_state32_t *iss32; iss32 = USER_REGS32(thread); iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry); } }
/* * thread_setuserstack: * * Sets the user stack pointer into the machine * dependent thread state info. */ void thread_setuserstack( thread_t thread, mach_vm_address_t user_stack) { if (thread_is_64bit(thread)) { x86_saved_state64_t *iss64; iss64 = USER_REGS64(thread); iss64->isf.rsp = (uint64_t)user_stack; } else { x86_saved_state32_t *iss32; iss32 = USER_REGS32(thread); iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack); } }
void thread_set_parent(thread_t parent, int pid) { if (thread_is_64bit(parent)) { x86_saved_state64_t *iss64; iss64 = USER_REGS64(parent); iss64->rax = pid; iss64->rdx = 0; iss64->isf.rflags &= ~EFL_CF; } else { x86_saved_state32_t *iss32; iss32 = USER_REGS32(parent); iss32->eax = pid; iss32->edx = 0; iss32->efl &= ~EFL_CF; } }
void thread_set_child(thread_t child, int pid) { if (thread_is_64bit(child)) { x86_saved_state64_t *iss64; iss64 = USER_REGS64(child); iss64->rax = pid; iss64->rdx = 1; iss64->isf.rflags &= ~EFL_CF; } else { x86_saved_state32_t *iss32; iss32 = USER_REGS32(child); iss32->eax = pid; iss32->edx = 1; iss32->efl &= ~EFL_CF; } }
/* * thread_adjuserstack: * * Returns the adjusted user stack pointer from the machine * dependent thread state info. Used for small (<2G) deltas. */ uint64_t thread_adjuserstack( thread_t thread, int adjust) { if (thread_is_64bit(thread)) { x86_saved_state64_t *iss64; iss64 = USER_REGS64(thread); iss64->isf.rsp += adjust; return iss64->isf.rsp; } else { x86_saved_state32_t *iss32; iss32 = USER_REGS32(thread); iss32->uesp += adjust; return CAST_USER_ADDR_T(iss32->uesp); } }
/* * Routine: ipc_mqueue_post * Purpose: * Post a message to a waiting receiver or enqueue it. If a * receiver is waiting, we can release our reserved space in * the message queue. * * Conditions: * If we need to queue, our space in the message queue is reserved. */ void ipc_mqueue_post( register ipc_mqueue_t mqueue, register ipc_kmsg_t kmsg) { spl_t s; /* * While the msg queue is locked, we have control of the * kmsg, so the ref in it for the port is still good. * * Check for a receiver for the message. */ s = splsched(); imq_lock(mqueue); for (;;) { wait_queue_t waitq = &mqueue->imq_wait_queue; thread_t receiver; mach_msg_size_t msize; receiver = wait_queue_wakeup64_identity_locked( waitq, IPC_MQUEUE_RECEIVE, THREAD_AWAKENED, FALSE); /* waitq still locked, thread locked */ if (receiver == THREAD_NULL) { /* * no receivers; queue kmsg */ assert(mqueue->imq_msgcount > 0); ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg); break; } /* * If the receiver waited with a facility not directly * related to Mach messaging, then it isn't prepared to get * handed the message directly. Just set it running, and * go look for another thread that can. */ if (receiver->ith_state != MACH_RCV_IN_PROGRESS) { thread_unlock(receiver); continue; } /* * We found a waiting thread. * If the message is too large or the scatter list is too small * the thread we wake up will get that as its status. */ msize = ipc_kmsg_copyout_size(kmsg, receiver->map); if (receiver->ith_msize < (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(receiver), receiver->ith_option))) { receiver->ith_msize = msize; receiver->ith_state = MACH_RCV_TOO_LARGE; } else { receiver->ith_state = MACH_MSG_SUCCESS; } /* * If there is no problem with the upcoming receive, or the * receiver thread didn't specifically ask for special too * large error condition, go ahead and select it anyway. */ if ((receiver->ith_state == MACH_MSG_SUCCESS) || !(receiver->ith_option & MACH_RCV_LARGE)) { receiver->ith_kmsg = kmsg; receiver->ith_seqno = mqueue->imq_seqno++; thread_unlock(receiver); /* we didn't need our reserved spot in the queue */ ipc_mqueue_release_msgcount(mqueue); break; } /* * Otherwise, this thread needs to be released to run * and handle its error without getting the message. We * need to go back and pick another one. */ receiver->ith_receiver_name = mqueue->imq_receiver_name; receiver->ith_kmsg = IKM_NULL; receiver->ith_seqno = 0; thread_unlock(receiver); } imq_unlock(mqueue); splx(s); current_task()->messages_sent++; return; }
/* * Routine: ipc_mqueue_add * Purpose: * Associate the portset's mqueue with the port's mqueue. * This has to be done so that posting the port will wakeup * a portset waiter. If there are waiters on the portset * mqueue and messages on the port mqueue, try to match them * up now. * Conditions: * May block. */ kern_return_t ipc_mqueue_add( ipc_mqueue_t port_mqueue, ipc_mqueue_t set_mqueue, wait_queue_link_t wql) { wait_queue_t port_waitq = &port_mqueue->imq_wait_queue; wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue; ipc_kmsg_queue_t kmsgq; ipc_kmsg_t kmsg, next; kern_return_t kr; spl_t s; kr = wait_queue_link_noalloc(port_waitq, set_waitq, wql); if (kr != KERN_SUCCESS) return kr; /* * Now that the set has been added to the port, there may be * messages queued on the port and threads waiting on the set * waitq. Lets get them together. */ s = splsched(); imq_lock(port_mqueue); kmsgq = &port_mqueue->imq_messages; for (kmsg = ipc_kmsg_queue_first(kmsgq); kmsg != IKM_NULL; kmsg = next) { next = ipc_kmsg_queue_next(kmsgq, kmsg); for (;;) { thread_t th; mach_msg_size_t msize; th = wait_queue_wakeup64_identity_locked( port_waitq, IPC_MQUEUE_RECEIVE, THREAD_AWAKENED, FALSE); /* waitq/mqueue still locked, thread locked */ if (th == THREAD_NULL) goto leave; /* * If the receiver waited with a facility not directly * related to Mach messaging, then it isn't prepared to get * handed the message directly. Just set it running, and * go look for another thread that can. */ if (th->ith_state != MACH_RCV_IN_PROGRESS) { thread_unlock(th); continue; } /* * Found a receiver. see if they can handle the message * correctly (the message is not too large for them, or * they didn't care to be informed that the message was * too large). If they can't handle it, take them off * the list and let them go back and figure it out and * just move onto the next. */ msize = ipc_kmsg_copyout_size(kmsg, th->map); if (th->ith_msize < (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(th), th->ith_option))) { th->ith_state = MACH_RCV_TOO_LARGE; th->ith_msize = msize; if (th->ith_option & MACH_RCV_LARGE) { /* * let him go without message */ th->ith_receiver_name = port_mqueue->imq_receiver_name; th->ith_kmsg = IKM_NULL; th->ith_seqno = 0; thread_unlock(th); continue; /* find another thread */ } } else { th->ith_state = MACH_MSG_SUCCESS; } /* * This thread is going to take this message, * so give it to him. */ ipc_kmsg_rmqueue(kmsgq, kmsg); ipc_mqueue_release_msgcount(port_mqueue); th->ith_kmsg = kmsg; th->ith_seqno = port_mqueue->imq_seqno++; thread_unlock(th); break; /* go to next message */ } } leave: imq_unlock(port_mqueue); splx(s); return KERN_SUCCESS; }