/*===========================================================================* * switch_to_user * *===========================================================================*/ PUBLIC void switch_to_user(void) { /* This function is called an instant before proc_ptr is * to be scheduled again. */ /* * if the current process is still runnable check the misc flags and let * it run unless it becomes not runnable in the meantime */ if (proc_is_runnable(proc_ptr)) goto check_misc_flags; /* * if a process becomes not runnable while handling the misc flags, we * need to pick a new one here and start from scratch. Also if the * current process wasn' runnable, we pick a new one here */ not_runnable_pick_new: if (proc_is_preempted(proc_ptr)) { proc_ptr->p_rts_flags &= ~RTS_PREEMPTED; if (proc_is_runnable(proc_ptr)) { if (!is_zero64(proc_ptr->p_cpu_time_left)) enqueue_head(proc_ptr); else enqueue(proc_ptr); } } /* * if we have no process to run, set IDLE as the current process for * time accounting and put the cpu in and idle state. After the next * timer interrupt the execution resumes here and we can pick another * process. If there is still nothing runnable we "schedule" IDLE again */ while (!(proc_ptr = pick_proc())) { proc_ptr = proc_addr(IDLE); if (priv(proc_ptr)->s_flags & BILLABLE) bill_ptr = proc_ptr; idle(); } switch_address_space(proc_ptr); check_misc_flags: assert(proc_ptr); assert(proc_is_runnable(proc_ptr)); while (proc_ptr->p_misc_flags & (MF_KCALL_RESUME | MF_DELIVERMSG | MF_SC_DEFER | MF_SC_TRACE | MF_SC_ACTIVE)) { assert(proc_is_runnable(proc_ptr)); if (proc_ptr->p_misc_flags & MF_KCALL_RESUME) { kernel_call_resume(proc_ptr); } else if (proc_ptr->p_misc_flags & MF_DELIVERMSG) { TRACE(VF_SCHEDULING, printf("delivering to %s / %d\n", proc_ptr->p_name, proc_ptr->p_endpoint);); delivermsg(proc_ptr); }
/*===========================================================================* * schedcheck * *===========================================================================*/ PUBLIC struct proc * schedcheck(void) { /* This function is called an instant before proc_ptr is * to be scheduled again. */ NOREC_ENTER(schedch); vmassert(intr_disabled()); /* * if the current process is still runnable check the misc flags and let * it run unless it becomes not runnable in the meantime */ if (proc_is_runnable(proc_ptr)) goto check_misc_flags; /* * if a process becomes not runnable while handling the misc flags, we * need to pick a new one here and start from scratch. Also if the * current process wasn' runnable, we pick a new one here */ not_runnable_pick_new: if (proc_is_preempted(proc_ptr)) { proc_ptr->p_rts_flags &= ~RTS_PREEMPTED; if (proc_is_runnable(proc_ptr)) enqueue_head(proc_ptr); } /* this enqueues the process again */ if (proc_no_quantum(proc_ptr)) RTS_UNSET(proc_ptr, RTS_NO_QUANTUM); /* * if we have no process to run, set IDLE as the current process for * time accounting and put the cpu in and idle state. After the next * timer interrupt the execution resumes here and we can pick another * process. If there is still nothing runnable we "schedule" IDLE again */ while( !(proc_ptr = pick_proc())) { proc_ptr = proc_addr(IDLE); if (priv(proc_ptr)->s_flags & BILLABLE) bill_ptr = proc_ptr; idle(); } check_misc_flags: vmassert(proc_ptr); vmassert(proc_is_runnable(proc_ptr)); while (proc_ptr->p_misc_flags & (MF_DELIVERMSG | MF_SC_DEFER | MF_SC_TRACE | MF_SC_ACTIVE)) { vmassert(proc_is_runnable(proc_ptr)); if (proc_ptr->p_misc_flags & MF_DELIVERMSG) { TRACE(VF_SCHEDULING, printf("delivering to %s / %d\n", proc_ptr->p_name, proc_ptr->p_endpoint);); if(delivermsg(proc_ptr) == VMSUSPEND) { TRACE(VF_SCHEDULING, printf("suspending %s / %d\n", proc_ptr->p_name, proc_ptr->p_endpoint););
void smp_schedule_vminhibit(struct proc * p) { if (proc_is_runnable(p)) smp_schedule_sync(p, SCHED_IPI_VM_INHIBIT); else RTS_SET(p, RTS_VMINHIBIT); assert(RTS_ISSET(p, RTS_VMINHIBIT)); }
void smp_schedule_stop_proc(struct proc * p) { if (proc_is_runnable(p)) smp_schedule_sync(p, SCHED_IPI_STOP_PROC); else RTS_SET(p, RTS_PROC_STOP); assert(RTS_ISSET(p, RTS_PROC_STOP)); }
int print_proc_summary(struct proc *proc) { int p, alive, running, sleeping; alive = running = sleeping = 0; for(p = 0; p < PROCS; p++) { if(p - NR_TASKS == IDLE) continue; if(isemptyp(&proc[p])) continue; alive++; if(!proc_is_runnable(&proc[p])) sleeping++; else running++; } printf("%d processes: %d running, %d sleeping\n", alive, running, sleeping); return 1; }
static void profile_sample(struct proc * p, void * pc) { /* This executes on every tick of the CMOS timer. */ /* Are we profiling, and profiling memory not full? */ if (!sprofiling || sprof_info.mem_used == -1) return; /* Check if enough memory available before writing sample. */ if (sprof_info.mem_used + sizeof(sprof_info) + 2*sizeof(struct sprof_sample) + 2*sizeof(struct sprof_sample) > sprof_mem_size) { sprof_info.mem_used = -1; return; } /* Runnable system process? */ if (p->p_endpoint == IDLE) sprof_info.idle_samples++; else if (p->p_endpoint == KERNEL || (priv(p)->s_flags & SYS_PROC && proc_is_runnable(p))) { if (!(p->p_misc_flags & MF_SPROF_SEEN)) { p->p_misc_flags |= MF_SPROF_SEEN; sprof_save_proc(p); } sprof_save_sample(p, pc); sprof_info.system_samples++; } else { /* User process. */ sprof_info.user_samples++; } sprof_info.total_samples++; }
/*===========================================================================* * do_update * *===========================================================================*/ int do_update(struct proc * caller, message * m_ptr) { /* Handle sys_update(). Update a process into another by swapping their process * slots. */ endpoint_t src_e, dst_e; int src_p, dst_p; struct proc *src_rp, *dst_rp; struct priv *src_privp, *dst_privp; struct proc orig_src_proc; struct proc orig_dst_proc; struct priv orig_src_priv; struct priv orig_dst_priv; int i; /* Lookup slots for source and destination process. */ src_e = m_ptr->SYS_UPD_SRC_ENDPT; if(!isokendpt(src_e, &src_p)) { return EINVAL; } src_rp = proc_addr(src_p); src_privp = priv(src_rp); if(!(src_privp->s_flags & SYS_PROC)) { return EPERM; } dst_e = m_ptr->SYS_UPD_DST_ENDPT; if(!isokendpt(dst_e, &dst_p)) { return EINVAL; } dst_rp = proc_addr(dst_p); dst_privp = priv(dst_rp); if(!(dst_privp->s_flags & SYS_PROC)) { return EPERM; } assert(!proc_is_runnable(src_rp) && !proc_is_runnable(dst_rp)); /* Check if processes are updatable. */ if(!proc_is_updatable(src_rp) || !proc_is_updatable(dst_rp)) { return EBUSY; } #if DEBUG printf("do_update: updating %d (%s, %d, %d) into %d (%s, %d, %d)\n", src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr, dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr); proc_stacktrace(src_rp); proc_stacktrace(dst_rp); printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint); #endif /* Let destination inherit the target mask from source. */ for (i=0; i < NR_SYS_PROCS; i++) { if (get_sys_bit(priv(src_rp)->s_ipc_to, i)) { set_sendto_bit(dst_rp, i); } } /* Save existing data. */ orig_src_proc = *src_rp; orig_src_priv = *(priv(src_rp)); orig_dst_proc = *dst_rp; orig_dst_priv = *(priv(dst_rp)); /* Swap slots. */ *src_rp = orig_dst_proc; *src_privp = orig_dst_priv; *dst_rp = orig_src_proc; *dst_privp = orig_src_priv; /* Adjust process slots. */ adjust_proc_slot(src_rp, &orig_src_proc); adjust_proc_slot(dst_rp, &orig_dst_proc); /* Adjust privilege slots. */ adjust_priv_slot(priv(src_rp), &orig_src_priv); adjust_priv_slot(priv(dst_rp), &orig_dst_priv); /* Swap global process slot addresses. */ swap_proc_slot_pointer(get_cpulocal_var_ptr(ptproc), src_rp, dst_rp); #if DEBUG printf("do_update: updated %d (%s, %d, %d) into %d (%s, %d, %d)\n", src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr, dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr); proc_stacktrace(src_rp); proc_stacktrace(dst_rp); printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint); #endif #ifdef CONFIG_SMP bits_fill(src_rp->p_stale_tlb, CONFIG_MAX_CPUS); bits_fill(dst_rp->p_stale_tlb, CONFIG_MAX_CPUS); #endif return OK; }
vmassert(proc_ptr); vmassert(proc_is_runnable(proc_ptr)); while (proc_ptr->p_misc_flags & (MF_DELIVERMSG | MF_SC_DEFER | MF_SC_TRACE | MF_SC_ACTIVE)) { vmassert(proc_is_runnable(proc_ptr)); if (proc_ptr->p_misc_flags & MF_DELIVERMSG) { TRACE(VF_SCHEDULING, printf("delivering to %s / %d\n", proc_ptr->p_name, proc_ptr->p_endpoint);); if(delivermsg(proc_ptr) == VMSUSPEND) { TRACE(VF_SCHEDULING, printf("suspending %s / %d\n", proc_ptr->p_name, proc_ptr->p_endpoint);); vmassert(!proc_is_runnable(proc_ptr)); } } else if (proc_ptr->p_misc_flags & MF_SC_DEFER) { /* Perform the system call that we deferred earlier. */ #if DEBUG_SCHED_CHECK if (proc_ptr->p_misc_flags & MF_SC_ACTIVE) minix_panic("MF_SC_ACTIVE and MF_SC_DEFER set", NO_NUM); #endif arch_do_syscall(proc_ptr); /* If the process is stopped for signal delivery, and * not blocked sending a message after the system call,
int runqueues_ok_cpu(unsigned cpu) { int q, l = 0; register struct proc *xp; struct proc **rdy_head, **rdy_tail; rdy_head = get_cpu_var(cpu, run_q_head); rdy_tail = get_cpu_var(cpu, run_q_tail); for (xp = BEG_PROC_ADDR; xp < END_PROC_ADDR; ++xp) { xp->p_found = 0; if (l++ > MAX_LOOP) panic("check error"); } for (q=l=0; q < NR_SCHED_QUEUES; q++) { if (rdy_head[q] && !rdy_tail[q]) { printf("head but no tail in %d\n", q); return 0; } if (!rdy_head[q] && rdy_tail[q]) { printf("tail but no head in %d\n", q); return 0; } if (rdy_tail[q] && rdy_tail[q]->p_nextready) { printf("tail and tail->next not null in %d\n", q); return 0; } for(xp = rdy_head[q]; xp; xp = xp->p_nextready) { const vir_bytes vxp = (vir_bytes) xp; vir_bytes dxp; if(vxp < (vir_bytes) BEG_PROC_ADDR || vxp >= (vir_bytes) END_PROC_ADDR) { printf("xp out of range\n"); return 0; } dxp = vxp - (vir_bytes) BEG_PROC_ADDR; if(dxp % sizeof(struct proc)) { printf("xp not a real pointer"); return 0; } if(!proc_ptr_ok(xp)) { printf("xp bogus pointer"); return 0; } if (RTS_ISSET(xp, RTS_SLOT_FREE)) { printf("scheduling error: dead proc q %d %d\n", q, xp->p_endpoint); return 0; } if (!proc_is_runnable(xp)) { printf("scheduling error: unready on runq %d proc %d\n", q, xp->p_nr); return 0; } if (xp->p_priority != q) { printf("scheduling error: wrong priority q %d proc %d ep %d name %s\n", q, xp->p_nr, xp->p_endpoint, xp->p_name); return 0; } if (xp->p_found) { printf("scheduling error: double sched q %d proc %d\n", q, xp->p_nr); return 0; } xp->p_found = 1; if (!xp->p_nextready && rdy_tail[q] != xp) { printf("sched err: last element not tail q %d proc %d\n", q, xp->p_nr); return 0; } if (l++ > MAX_LOOP) { printf("loop in schedule queue?"); return 0; } } } for (xp = BEG_PROC_ADDR; xp < END_PROC_ADDR; ++xp) { if(!proc_ptr_ok(xp)) { printf("xp bogus pointer in proc table\n"); return 0; } if (isemptyp(xp)) continue; if(proc_is_runnable(xp) && !xp->p_found) { printf("sched error: ready proc %d not on queue\n", xp->p_nr); return 0; } } /* All is ok. */ return 1; }
void OSSendPtab(void){ int i; if(recordSched == 1){ struct pi sendPi[NR_PROCS + NR_TASKS]; /*Use the following array to recover the next ready processes before we lose the addresses*/ struct proc nextReady[NR_PROCS + NR_TASKS]; struct proc tmpPtab[NR_PROCS +NR_TASKS]; struct proc queuehds[NR_SCHED_QUEUES]; if(pos_count < HISTORY ){ /*Get the current process table */ sys_getproctab((struct proc *) &tmpPtab); sys_cpuvar((char *) &queuehds,SELF); /* Handle the heads of each queue */ struct qh qh_send[NR_SCHED_QUEUES]; for(i=0;i<NR_SCHED_QUEUES;i++){ if(queuehds[i].p_priority!=-1){ strcpy(qh_send[i].p_name,queuehds[i].p_name); qh_send[i].p_endpoint = queuehds[i].p_endpoint; } else{ qh_send[i].p_endpoint = -1; } } for(i=0;i<(NR_PROCS+NR_TASKS);i++){ strcpy(sendPi[i].p_name,tmpPtab[i].p_name); sendPi[i].p_endpoint = tmpPtab[i].p_endpoint; for (int l=0; l<PROCNUM; l++) { if (0 == strcmp(tmpPtab[i].p_name, proc_name[l])) { strcpy(sjf[l].p_name,tmpPtab[i].p_name); sjf[l].p_endpoint = tmpPtab[i].p_endpoint; sjf[l].ticks = tmpPtab[i].p_cycles; if(!proc_is_runnable(&tmpPtab[i])) { sjf[l].is_blocked = 1; // sjf[l].predBurst = INT_MAX; // sjf[l].ticks = INT_MAX; } else { sjf[l].is_blocked = 0; } } } sendPi[i].p_priority = tmpPtab[i].p_priority; sendPi[i].p_cpu_time_left = tmpPtab[i].p_cpu_time_left; sendPi[i].p_rts_flags = tmpPtab[i].p_rts_flags; if(tmpPtab[i].p_nextready){ sys_vircopy(SYSTEM,(vir_bytes) tmpPtab[i].p_nextready, SELF,(vir_bytes) &(nextReady[i]),sizeof(struct proc)); strcpy(sendPi[i].p_nextready,nextReady[i].p_name); sendPi[i].p_nextready_endpoint = nextReady[i].p_endpoint; } else{ strcpy(sendPi[i].p_nextready, NOPROC); sendPi[i].p_nextready_endpoint = -1; } /*Copy the accounting structure. Using CPU cycles instead of times, because CPU speeds will vary*/ sendPi[i].p_times.enter_queue = tmpPtab[i].p_accounting.enter_queue; sendPi[i].p_times.time_in_queue = tmpPtab[i].p_accounting.time_in_queue; sendPi[i].p_times.dequeues = tmpPtab[i].p_accounting.dequeues; sendPi[i].p_times.ipc_sync = tmpPtab[i].p_accounting.ipc_sync; sendPi[i].p_times.ipc_async = tmpPtab[i].p_accounting.ipc_async; sendPi[i].p_times.preempted = tmpPtab[i].p_accounting.preempted; } sys_vircopy(SELF,(vir_bytes) &sendPi, srcAddr,(vir_bytes) pInfoPtrs[pos_count],sizeof(sendPi)); sys_vircopy(SELF,(vir_bytes) &qh_send, srcAddr,(vir_bytes) pQhPtrs[pos_count],sizeof(qh_send)); int piReady = pos_count; sys_vircopy(SELF,(vir_bytes) &piReady, srcAddr, (vir_bytes) srcPtr2, sizeof(piReady)); pos_count++; /* Ensure the proc history buffer does not overflow*/ } } }
/* * Return the LWP status of a process, along with additional information in * case the process is sleeping (LSSLEEP): a wchan value and text to indicate * what the process is sleeping on, and possibly a flag field modification to * indicate that the sleep is interruptible. */ static int get_lwp_stat(int mslot, uint64_t * wcptr, char * wmptr, size_t wmsz, int32_t * flag) { struct mproc *mp; struct fproc_light *fp; struct proc *kp; const char *wmesg; uint64_t wchan; endpoint_t endpt; mp = &mproc_tab[mslot]; fp = &fproc_tab[mslot]; kp = &proc_tab[NR_TASKS + mslot]; /* * First cover all the cases that the process is not sleeping. In * those cases, we need not return additional sleep information either. */ if (mp->mp_flags & (TRACE_ZOMBIE | ZOMBIE)) return LSZOMB; if (mp->mp_flags & EXITING) return LSDEAD; if ((mp->mp_flags & TRACE_STOPPED) || RTS_ISSET(kp, RTS_P_STOP)) return LSSTOP; if (proc_is_runnable(kp)) return LSRUN; /* * The process is sleeping. In that case, we must also figure out why, * and return an appropriate wchan value and human-readable wmesg text. * * The process can be blocked on either a known sleep state in PM or * VFS, or otherwise on IPC communication with another process, or * otherwise on a kernel RTS flag. In each case, decide what to use as * wchan value and wmesg text, and whether the sleep is interruptible. * * The wchan value should be unique for the sleep reason. We use its * lower eight bits to indicate a class: * 0x00 = kernel task * 0x01 = kerel RTS block * 0x02 = PM call * 0x03 = VFS call * 0x04 = MIB call * 0xff = blocked on process * The upper bits are used for class-specific information. The actual * value does not really matter, as long as it is nonzero and there is * no overlap between the different values. */ wchan = 0; wmesg = NULL; /* * First see if the process is marked as blocked in the tables of PM or * VFS. Such a block reason is always an interruptible sleep. Note * that we do not use the kernel table at all in this case: each of the * three tables is consistent within itself, but not necessarily * consistent with any of the other tables, so we avoid internal * mismatches if we can. */ if (mp->mp_flags & WAITING) { wchan = 0x102; wmesg = "wait"; } else if (mp->mp_flags & SIGSUSPENDED) { wchan = 0x202; wmesg = "pause"; } else if (fp->fpl_blocked_on != FP_BLOCKED_ON_NONE) { wchan = (fp->fpl_blocked_on << 8) | 0x03; switch (fp->fpl_blocked_on) { case FP_BLOCKED_ON_PIPE: wmesg = "pipe"; break; case FP_BLOCKED_ON_FLOCK: wmesg = "flock"; break; case FP_BLOCKED_ON_POPEN: wmesg = "popen"; break; case FP_BLOCKED_ON_SELECT: wmesg = "select"; break; case FP_BLOCKED_ON_CDEV: case FP_BLOCKED_ON_SDEV: /* * Add the task (= character or socket driver) endpoint * to the wchan value, and use the driver's process * name, without parentheses, as wmesg text. */ wchan |= (uint64_t)fp->fpl_task << 16; fill_wmesg(wmptr, wmsz, fp->fpl_task, FALSE /*ipc*/); break; default: /* A newly added flag we don't yet know about? */ wmesg = "???"; break; } } if (wchan != 0) { *wcptr = wchan; if (wmesg != NULL) /* NULL means "already set" here */ strlcpy(wmptr, wmesg, wmsz); *flag |= L_SINTR; } /* * See if the process is blocked on sending or receiving. If not, then * use one of the kernel RTS flags as reason. */ endpt = P_BLOCKEDON(kp); switch (endpt) { case MIB_PROC_NR: /* This is really just aesthetics. */ wchan = 0x04; wmesg = "sysctl"; break; case NONE: /* * The process is not running, but also not blocked on IPC with * another process. This means it must be stopped on a kernel * RTS flag. */ wchan = ((uint64_t)kp->p_rts_flags << 8) | 0x01; if (RTS_ISSET(kp, RTS_PROC_STOP)) wmesg = "kstop"; else if (RTS_ISSET(kp, RTS_SIGNALED) || RTS_ISSET(kp, RTS_SIGNALED)) wmesg = "ksignal"; else if (RTS_ISSET(kp, RTS_NO_PRIV)) wmesg = "knopriv"; else if (RTS_ISSET(kp, RTS_PAGEFAULT) || RTS_ISSET(kp, RTS_VMREQTARGET)) wmesg = "fault"; else if (RTS_ISSET(kp, RTS_NO_QUANTUM)) wmesg = "sched"; else wmesg = "kflag"; break; case ANY: /* * If the process is blocked receiving from ANY, mark it as * being in an interruptible sleep. This looks nicer, even * though "interruptible" is not applicable to services at all. */ *flag |= L_SINTR; break; } /* * If at this point wchan is still zero, the process is blocked sending * or receiving. Use a wchan value based on the target endpoint, and * use "(procname)" as wmesg text. */ if (wchan == 0) { *wcptr = ((uint64_t)endpt << 8) | 0xff; fill_wmesg(wmptr, wmsz, endpt, TRUE /*ipc*/); } else { *wcptr = wchan; if (wmesg != NULL) /* NULL means "already set" here */ strlcpy(wmptr, wmesg, wmsz); } return LSSLEEP; }
/*===========================================================================* * do_update * *===========================================================================*/ int do_update(struct proc * caller, message * m_ptr) { /* Handle sys_update(). Update a process into another by swapping their process * slots. */ endpoint_t src_e, dst_e; int src_p, dst_p, flags; struct proc *src_rp, *dst_rp; struct priv *src_privp, *dst_privp; struct proc orig_src_proc; struct proc orig_dst_proc; struct priv orig_src_priv; struct priv orig_dst_priv; int i, r; /* Lookup slots for source and destination process. */ flags = m_ptr->SYS_UPD_FLAGS; src_e = m_ptr->SYS_UPD_SRC_ENDPT; if(!isokendpt(src_e, &src_p)) { return EINVAL; } src_rp = proc_addr(src_p); src_privp = priv(src_rp); if(!(src_privp->s_flags & SYS_PROC)) { return EPERM; } dst_e = m_ptr->SYS_UPD_DST_ENDPT; if(!isokendpt(dst_e, &dst_p)) { return EINVAL; } dst_rp = proc_addr(dst_p); dst_privp = priv(dst_rp); if(!(dst_privp->s_flags & SYS_PROC)) { return EPERM; } assert(!proc_is_runnable(src_rp) && !proc_is_runnable(dst_rp)); /* Check if processes are updatable. */ if(!proc_is_updatable(src_rp) || !proc_is_updatable(dst_rp)) { return EBUSY; } #if DEBUG printf("do_update: updating %d (%s, %d, %d) into %d (%s, %d, %d)\n", src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr, dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr); proc_stacktrace(src_rp); proc_stacktrace(dst_rp); printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint); printf("do_update: endpoint %d rts flags %x asyn tab %08x asyn endpoint %d grant tab %08x grant endpoint %d\n", src_rp->p_endpoint, src_rp->p_rts_flags, priv(src_rp)->s_asyntab, priv(src_rp)->s_asynendpoint, priv(src_rp)->s_grant_table, priv(src_rp)->s_grant_endpoint); printf("do_update: endpoint %d rts flags %x asyn tab %08x asyn endpoint %d grant tab %08x grant endpoint %d\n", dst_rp->p_endpoint, dst_rp->p_rts_flags, priv(dst_rp)->s_asyntab, priv(dst_rp)->s_asynendpoint, priv(dst_rp)->s_grant_table, priv(dst_rp)->s_grant_endpoint); #endif /* Let destination inherit allowed IRQ, I/O ranges, and memory ranges. */ r = inherit_priv_irq(src_rp, dst_rp); if(r != OK) { return r; } r = inherit_priv_io(src_rp, dst_rp); if(r != OK) { return r; } r = inherit_priv_mem(src_rp, dst_rp); if(r != OK) { return r; } /* Let destination inherit the target mask from source. */ for (i=0; i < NR_SYS_PROCS; i++) { if (get_sys_bit(priv(src_rp)->s_ipc_to, i)) { set_sendto_bit(dst_rp, i); } } /* Save existing data. */ orig_src_proc = *src_rp; orig_src_priv = *(priv(src_rp)); orig_dst_proc = *dst_rp; orig_dst_priv = *(priv(dst_rp)); /* Adjust asyn tables. */ adjust_asyn_table(priv(src_rp), priv(dst_rp)); adjust_asyn_table(priv(dst_rp), priv(src_rp)); /* Abort any pending send() on rollback. */ if(flags & SYS_UPD_ROLLBACK) { abort_proc_ipc_send(src_rp); } /* Swap slots. */ *src_rp = orig_dst_proc; *src_privp = orig_dst_priv; *dst_rp = orig_src_proc; *dst_privp = orig_src_priv; /* Adjust process slots. */ adjust_proc_slot(src_rp, &orig_src_proc); adjust_proc_slot(dst_rp, &orig_dst_proc); /* Adjust privilege slots. */ adjust_priv_slot(priv(src_rp), &orig_src_priv); adjust_priv_slot(priv(dst_rp), &orig_dst_priv); /* Swap global process slot addresses. */ swap_proc_slot_pointer(get_cpulocal_var_ptr(ptproc), src_rp, dst_rp); /* Swap VM request entries. */ swap_memreq(src_rp, dst_rp); #if DEBUG printf("do_update: updated %d (%s, %d, %d) into %d (%s, %d, %d)\n", src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr, dst_rp->p_endpoint, dst_rp->p_name, dst_rp->p_nr, priv(dst_rp)->s_proc_nr); proc_stacktrace(src_rp); proc_stacktrace(dst_rp); printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint); printf("do_update: endpoint %d rts flags %x asyn tab %08x asyn endpoint %d grant tab %08x grant endpoint %d\n", src_rp->p_endpoint, src_rp->p_rts_flags, priv(src_rp)->s_asyntab, priv(src_rp)->s_asynendpoint, priv(src_rp)->s_grant_table, priv(src_rp)->s_grant_endpoint); printf("do_update: endpoint %d rts flags %x asyn tab %08x asyn endpoint %d grant tab %08x grant endpoint %d\n", dst_rp->p_endpoint, dst_rp->p_rts_flags, priv(dst_rp)->s_asyntab, priv(dst_rp)->s_asynendpoint, priv(dst_rp)->s_grant_table, priv(dst_rp)->s_grant_endpoint); #endif #ifdef CONFIG_SMP bits_fill(src_rp->p_stale_tlb, CONFIG_MAX_CPUS); bits_fill(dst_rp->p_stale_tlb, CONFIG_MAX_CPUS); #endif return OK; }