// Give up the CPU for one scheduling round. void yield(void) { //cprintf("yield...\n"); acquire(&ptable.lock); //DOC: yieldlock proc->state = RUNNABLE; // Loop thru pstat.pid to find the slot number in ptable. int slot_no; // slot idx for (slot_no = 0; slot_no < NPROC; ++slot_no) if (proc_stat.pid[slot_no] == proc->pid) break; //assert(i < NPROC); if (slot_no == NPROC) cprintf("slot_no == NPROC\n"); int pri = proc_stat.priority[slot_no]; ++proc_stat.ticks[slot_no][pri]; //cprintf("pid: %d have used %d timerticks.\n", //proc->pid, proc_stat.ticks[slot_no][pri]); // Updates priority queues here. if (proc_stat.ticks[slot_no][pri] == tick_bounds(pri) || (pri == 3 && proc_stat.ticks[slot_no][pri] % 8 == 0)) { switch (pri) { case 0: //cprintf("pri 0 to pri 1.\n"); remove_from_queue(&q0_head, &q0_tail, proc); append_to_queue(&q1_head, &q1_tail, proc); proc_stat.priority[slot_no] = 1; break; case 1: //cprintf("pri 1 to pri 2.\n"); remove_from_queue(&q1_head, &q1_tail, proc); append_to_queue(&q2_head, &q2_tail, proc); proc_stat.priority[slot_no] = 2; break; case 2: //cprintf("pri 2 to pri 3.\n"); remove_from_queue(&q2_head, &q2_tail, proc); append_to_queue(&q3_head, &q3_tail, proc); proc_stat.priority[slot_no] = 3; break; case 3: // RR //cprintf("RR here.\n"); remove_from_queue(&q3_head, &q3_tail, proc); append_to_queue(&q3_head, &q3_tail, proc); break; default: cprintf("Wrong priority.\n"); } } sched(); release(&ptable.lock); }
// Look in the process table for an UNUSED proc. // If found, change state to EMBRYO and initialize // state required to run in the kernel. // Otherwise return 0. static struct proc* allocproc(void) { //cprintf("allocproc...\n"); struct proc *p; char *sp; acquire(&ptable.lock); int slot_idx = -1; for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) { ++slot_idx; if(p->state == UNUSED) goto found; } release(&ptable.lock); return 0; found: p->state = EMBRYO; p->pid = nextpid++; proc_stat.inuse[slot_idx] = 1; proc_stat.pid[slot_idx] = p->pid; proc_stat.priority[slot_idx] = 0; int i; for (i = 0; i < 4; ++i) { proc_stat.ticks[slot_idx][i] = 0; } // Adds the new process to the priority queue. append_to_queue(&q0_head, &q0_tail, p); if (!q0_tail) cprintf("still NULL\n"); //cprintf("q0_tail: %d\n", q0_tail->pid); release(&ptable.lock); // Allocate kernel stack if possible. if((p->kstack = kalloc()) == 0){ p->state = UNUSED; return 0; } sp = p->kstack + KSTACKSIZE; // Leave room for trap frame. sp -= sizeof *p->tf; p->tf = (struct trapframe*)sp; // Set up new context to start executing at forkret, // which returns to trapret. sp -= 4; *(uint*)sp = (uint)trapret; sp -= sizeof *p->context; p->context = (struct context*)sp; memset(p->context, 0, sizeof *p->context); p->context->eip = (uint)forkret; return p; }
asmlinkage int sys_semop (int semid, struct sembuf *tsops, unsigned nsops) { int id, size, error = -EINVAL; struct semid_ds *sma; struct sembuf sops[SEMOPM], *sop; struct sem_undo *un; int undos = 0, decrease = 0, alter = 0; struct sem_queue queue; lock_kernel(); if (nsops < 1 || semid < 0) goto out; error = -E2BIG; if (nsops > SEMOPM) goto out; error = -EFAULT; if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) goto out; id = (unsigned int) semid % SEMMNI; error = -EINVAL; if ((sma = semary[id]) == IPC_UNUSED || sma == IPC_NOID) goto out; error = -EIDRM; if (sma->sem_perm.seq != (unsigned int) semid / SEMMNI) goto out; error = -EFBIG; for (sop = sops; sop < sops + nsops; sop++) { if (sop->sem_num >= sma->sem_nsems) goto out; if (sop->sem_flg & SEM_UNDO) undos++; if (sop->sem_op < 0) decrease = 1; if (sop->sem_op > 0) alter = 1; } alter |= decrease; error = -EACCES; if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) goto out; if (undos) { /* Make sure we have an undo structure * for this process and this semaphore set. */ for (un = current->semundo; un; un = un->proc_next) if (un->semid == semid) break; if (!un) { size = sizeof(struct sem_undo) + sizeof(short)*sma->sem_nsems; un = (struct sem_undo *) kmalloc(size, GFP_ATOMIC); if (!un) { error = -ENOMEM; goto out; } memset(un, 0, size); un->semadj = (short *) &un[1]; un->semid = semid; un->proc_next = current->semundo; current->semundo = un; un->id_next = sma->undo; sma->undo = un; } } else un = NULL; error = try_atomic_semop (sma, sops, nsops, un, current->pid, 0); if (error <= 0) goto update; /* We need to sleep on this operation, so we put the current * task into the pending queue and go to sleep. */ queue.sma = sma; queue.sops = sops; queue.nsops = nsops; queue.undo = un; queue.pid = current->pid; queue.alter = decrease; current->semsleeping = &queue; if (alter) append_to_queue(sma ,&queue); else prepend_to_queue(sma ,&queue); for (;;) { queue.status = -EINTR; queue.sleeper = NULL; interruptible_sleep_on(&queue.sleeper); /* * If queue.status == 1 we where woken up and * have to retry else we simply return. * If an interrupt occurred we have to clean up the * queue * */ if (queue.status == 1) { error = try_atomic_semop (sma, sops, nsops, un, current->pid,0); if (error <= 0) break; } else { error = queue.status;; if (queue.prev) /* got Interrupt */ break; /* Everything done by update_queue */ current->semsleeping = NULL; goto out; } } current->semsleeping = NULL; remove_from_queue(sma,&queue); update: if (alter) update_queue (sma); out: unlock_kernel(); return error; }
asmlinkage long sys_semtimedop (int semid, struct sembuf *tsops, unsigned nsops, const struct timespec *timeout) { int error = -EINVAL; struct sem_array *sma; struct sembuf fast_sops[SEMOPM_FAST]; struct sembuf* sops = fast_sops, *sop; struct sem_undo *un; int undos = 0, decrease = 0, alter = 0; struct sem_queue queue; unsigned long jiffies_left = 0; if (nsops < 1 || semid < 0) return -EINVAL; if (nsops > sc_semopm) return -E2BIG; if(nsops > SEMOPM_FAST) { sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); if(sops==NULL) return -ENOMEM; } if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { error=-EFAULT; goto out_free; } if (timeout) { struct timespec _timeout; if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { error = -EFAULT; goto out_free; } if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || _timeout.tv_nsec >= 1000000000L) { error = -EINVAL; goto out_free; } jiffies_left = timespec_to_jiffies(&_timeout); } sma = sem_lock(semid); error=-EINVAL; if(sma==NULL) goto out_free; error = -EIDRM; if (sem_checkid(sma,semid)) goto out_unlock_free; error = -EFBIG; for (sop = sops; sop < sops + nsops; sop++) { if (sop->sem_num >= sma->sem_nsems) goto out_unlock_free; if (sop->sem_flg & SEM_UNDO) undos++; if (sop->sem_op < 0) decrease = 1; if (sop->sem_op > 0) alter = 1; } alter |= decrease; error = -EACCES; if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) goto out_unlock_free; if (undos) { /* Make sure we have an undo structure * for this process and this semaphore set. */ un=current->semundo; while(un != NULL) { if(un->semid==semid) break; if(un->semid==-1) un=freeundos(sma,un); else un=un->proc_next; } if (!un) { error = alloc_undo(sma,&un,semid,alter); if(error) goto out_free; } } else un = NULL; error = try_atomic_semop (sma, sops, nsops, un, current->tgid, 0); if (error <= 0) goto update; /* We need to sleep on this operation, so we put the current * task into the pending queue and go to sleep. */ queue.sma = sma; queue.sops = sops; queue.nsops = nsops; queue.undo = un; queue.pid = current->tgid; queue.alter = decrease; queue.id = semid; if (alter) append_to_queue(sma ,&queue); else prepend_to_queue(sma ,&queue); current->semsleeping = &queue; for (;;) { struct sem_array* tmp; queue.status = -EINTR; queue.sleeper = current; current->state = TASK_INTERRUPTIBLE; sem_unlock(semid); if (timeout) jiffies_left = schedule_timeout(jiffies_left); else schedule(); tmp = sem_lock(semid); if(tmp==NULL) { if(queue.prev != NULL) BUG(); current->semsleeping = NULL; error = -EIDRM; goto out_free; } /* * If queue.status == 1 we where woken up and * have to retry else we simply return. * If an interrupt occurred we have to clean up the * queue * */ if (queue.status == 1) { error = try_atomic_semop (sma, sops, nsops, un, current->tgid, 0); if (error <= 0) break; } else { error = queue.status; if (error == -EINTR && timeout && jiffies_left == 0) error = -EAGAIN; if (queue.prev) /* got Interrupt */ break; /* Everything done by update_queue */ current->semsleeping = NULL; goto out_unlock_free; } } current->semsleeping = NULL; remove_from_queue(sma,&queue); update: if (alter) update_queue (sma); out_unlock_free: sem_unlock(semid); out_free: if(sops != fast_sops) kfree(sops); return error; }