// this has a parallell in exe$allocate_pool int exe$alononpagvar (int reqsize, int *alosize_p, void **pool_p) { // round up to nearest 16 should be moved here, and the statics fixed // pool spinlock etc int sts=SS$_NORMAL; int ipl = vmslock(&SPIN_POOL, IPL$_POOL); sts=exe$allocate(reqsize , &exe$gl_nonpaged, 0 , alosize_p, pool_p); vmsunlock(&SPIN_POOL, ipl); // unlock pool if (sts==SS$_NORMAL) return sts; struct _npool_data * pooldata = exe$gs_npp_npool; struct _lsthds * lsthd = pooldata->npool$ar_lsthds; void * array = &lsthd->lsthds$q_listheads; exe$reclaim_pool_aggressive(exe$gs_npp_npool); ipl = vmslock(&SPIN_POOL, IPL$_POOL); sts=exe$allocate(reqsize , &exe$gl_nonpaged, 0 , alosize_p, pool_p); vmsunlock(&SPIN_POOL, ipl); if (sts==SS$_NORMAL) return sts; sts=exe$extendpool(exe$gs_npp_npool); if (sts==SS$_NORMAL) { int ipl = vmslock(&SPIN_POOL, IPL$_POOL); sts=exe$allocate(reqsize , &exe$gl_nonpaged, 0 , alosize_p, pool_p); vmsunlock(&SPIN_POOL, ipl); } if (sts==SS$_NORMAL) return sts; sts=exe$flushlists(exe$gs_npp_npool, reqsize); if (sts==SS$_NORMAL) { int ipl = vmslock(&SPIN_POOL, IPL$_POOL); sts=exe$allocate(reqsize , &exe$gl_nonpaged, 0 , alosize_p, pool_p); vmsunlock(&SPIN_POOL, ipl); } return sts; }
int lck$deqlock(struct _lkb *lck, int flags, unsigned int lkid) { vmslock(&SPIN_SCS,IPL$_SCS); struct _rsb * res = lck->lkb$l_rsb; int newmode; remque(&lck->lkb$l_ownqfl,0); remque(&lck->lkb$l_sqfl,0); // check if no locks on resource, remove the resource then newmode=find_highest(lck,res); res->rsb$b_fgmode=newmode; res->rsb$b_ggmode=newmode; res->rsb$b_cgmode=newmode; grant_queued(res,0,1,1); if (lck->lkb$b_state) { } kfree(lck); lockidtbl[lkid] = lkid + 1; if (aqempty(&res->rsb$l_grqfl) && aqempty(&res->rsb$l_cvtqfl) && aqempty(&res->rsb$l_wtqfl) && aqempty(&res->rsb$l_rrsfl) && aqempty(&res->rsb$l_srsfl)) { remque(res, 0); kfree(res); } vmsunlock(&SPIN_SCS,IPL$_ASTDEL); }
void sch$unlock(struct _mtx * m) { struct _pcb * p = ctl$gl_pcb; /** spinlock sched */ int ipl = vmslock(&SPIN_SCHED, IPL$_SCHED); /** decrement mtxcnt */ p->pcb$w_mtxcnt--; /** if process does not own anymore, restore pris and change pri */ /** TODO check if all saved are restore, and TODO check realtime */ if (p->pcb$w_mtxcnt==0) { p->pcb$b_prib = p->pcb$b_pribsav; sch$change_cur_priority(p, p->pcb$b_pri); } /** decrement owncnt */ m->mtx$w_owncnt--; if (m->mtx$w_owncnt==65535 /* not yet: -1 */ ) { /** if no other owners */ /** bbcci, test and clear write bit */ if (test_and_clear_bit(16, m)) { /** if a writer had it */ int rsn = m; // check 32 bit // pasted from ravail struct _wqh * wq=sch$gq_mwait; struct _pcb * p=wq->wqh$l_wqfl; struct _pcb * next; /** scan misc wait queue */ for (; p!=wq; p=next) { next = p->pcb$l_sqfl; /** search for those waiting for the now unlocked mutex */ if (p->pcb$l_efwm==rsn) { /** when found */ /** decrease wqueue count */ wq->wqh$l_wqcnt--; /** schedule that process */ sch$chse(p,PRI$_RESAVL); } } } } out: /** release spinlock */ vmsunlock(&SPIN_SCHED, ipl); }
void sch$lockw(struct _mtx * m) { struct _pcb * p = ctl$gl_pcb; again: {} /** spinlock sched */ int ipl = vmslock(&SPIN_SCHED, IPL$_SCHED); /** test and set mutex write flag */ if (test_and_set_bit(16, m)) { /** if set, no further readers or writers allowed, stall process */ mutexwait(p, m); goto again; } else { /** if no other owners */ if (m->mtx$w_owncnt==65535 /* not yet: -1 */ ) { /** own it is done by incrementing owncnt and mtxcnt */ m->mtx$w_owncnt++; p->pcb$w_mtxcnt++; /** if it is the first to be locked */ if (p->pcb$w_mtxcnt==1) { /** then record current priorities in pcb */ /** TODO check if saved right with regard to realtime */ p->pcb$b_prisav = p->pcb$b_pri; p->pcb$b_pribsav = p->pcb$b_prib; /** if not realtime process, boost priority */ if (16>=p->pcb$b_pri) { p->pcb$b_prib = 15; sch$change_cur_priority(p, 15); } } } else { /** if other owners, stall */ mutexwait(p, m); goto again; } } /** release spinlock */ ipl = IPL$_ASTDEL; vmsunlock(&SPIN_SCHED, ipl); }
void sch_std$ravail(int rsn) // check. still 32 bit prob { int retval=test_and_clear_bit(rsn,&sch$gl_resmask); if (retval==0) return; int savipl=vmslock(&SPIN_SCHED,IPL$_MAILBOX); struct _wqh * wq=sch$gq_mwait; struct _pcb * p=wq->wqh$l_wqfl; for (; p!=wq; p=p->pcb$l_sqfl) { if (p->pcb$l_efwm==rsn) { wq->wqh$l_wqcnt--; sch$chse(p,PRI$_RESAVL); } } vmsunlock(&SPIN_SCHED,savipl); }
asmlinkage int exe$creprc(unsigned int *pidadr, void *image, void *input, void *output, void *error, struct _generic_64 *prvadr, unsigned int *quota, void*prcnam, unsigned int baspri, unsigned int uic, unsigned short int mbxunt, unsigned int stsflg,...) { unsigned long stack_here; struct _pcb * p, * cur; int retval; struct dsc$descriptor * imd = image, * ind = input, * oud = output, * erd = error; unsigned long clone_flags=CLONE_VFORK; //check pidadr ctl$gl_creprc_flags = stsflg; // check for PRC$M_NOUAF sometime if (stsflg&PRC$M_DETACH) { } if (uic) { } //setipl(IPL$_ASTDEL);//postpone this? cur=ctl$gl_pcb; vmslock(&SPIN_SCHED, IPL$_SCHED); vmslock(&SPIN_MMG, IPL$_MMG); p = alloc_task_struct(); //bzero(p,sizeof(struct _pcb));//not wise? memset(p,0,sizeof(struct _pcb)); // check more // compensate for no struct clone/copy p->sigmask_lock = SPIN_LOCK_UNLOCKED; p->alloc_lock = SPIN_LOCK_UNLOCKED; qhead_init(&p->pcb$l_astqfl); // and enable ast del to all modes p->pcb$b_type = DYN$C_PCB; p->pcb$b_asten=15; p->phd$b_astlvl=4; p->pr_astlvl=4; p->psl=0; p->pslindex=0; qhead_init(&p->pcb$l_lockqfl); // set capabilities p->pcb$l_permanent_capability = sch$gl_default_process_cap; p->pcb$l_capability = p->pcb$l_permanent_capability; // set affinity // set default fileprot // set arb // set mbx stuff // from setprn: if (prcnam) { struct dsc$descriptor *s=prcnam; strncpy(p->pcb$t_lname,s->dsc$a_pointer,s->dsc$w_length); } // set priv p->pcb$l_priv=ctl$gl_pcb->pcb$l_priv; // set pris p->pcb$b_prib=31-baspri; p->pcb$b_pri=31-baspri-6; // if (p->pcb$b_pri<16) p->pcb$b_pri=16; p->pcb$w_quant=-QUANTUM; // set uic p->pcb$l_uic=ctl$gl_pcb->pcb$l_uic; // set vms pid // check process name // do something with pqb p->pcb$l_pqb=kmalloc(sizeof(struct _pqb),GFP_KERNEL); memset(p->pcb$l_pqb,0,sizeof(struct _pqb)); struct _pqb * pqb = p->pcb$l_pqb; pqb->pqb$q_prvmsk = ctl$gq_procpriv; if (imd) memcpy(pqb->pqb$t_image,imd->dsc$a_pointer,imd->dsc$w_length); if (ind) memcpy(pqb->pqb$t_input,ind->dsc$a_pointer,ind->dsc$w_length); if (oud) memcpy(pqb->pqb$t_output,oud->dsc$a_pointer,oud->dsc$w_length); if (erd) memcpy(pqb->pqb$t_error,erd->dsc$a_pointer,erd->dsc$w_length); if (oud) // temp measure memcpy(p->pcb$t_terminal,oud->dsc$a_pointer,oud->dsc$w_length); // translate some logicals // copy security clearance // copy msg // copy flags // set jib // do quotas // process itmlst // set pcb$l_pqb #if 0 setipl(IPL$_MMG); vmslock(&SPIN_SCHED,-1); // find vacant slot in pcb vector // and store it #endif // make ipid and epid p->pcb$l_pid=alloc_ipid(); { unsigned long *vec=sch$gl_pcbvec; vec[p->pcb$l_pid&0xffff]=p; } p->pcb$l_epid=exe$ipid_to_epid(p->pcb$l_pid); // should invoke sch$chse, put this at bottom? // setipl(0) and return // now lots of things from fork retval = -EAGAIN; /* * Check if we are over our maximum process limit, but be sure to * exclude root. This is needed to make it possible for login and * friends to set the per-user process limit to something lower * than the amount of processes root is running. -- Rik */ #if 0 if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur && !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) goto bad_fork_free; atomic_inc(&p->user->__count); atomic_inc(&p->user->processes); #endif /* * Counter increases are protected by * the kernel lock so nr_threads can't * increase under us (but it may decrease). */ get_exec_domain(p->exec_domain); if (p->binfmt && p->binfmt->module) __MOD_INC_USE_COUNT(p->binfmt->module); p->did_exec = 0; p->swappable = 0; p->state = TASK_UNINTERRUPTIBLE; //copy_flags(clone_flags, p); // not here? p->pcb$l_pid = alloc_ipid(); p->run_list.next = NULL; p->run_list.prev = NULL; p->p_cptr = NULL; init_waitqueue_head(&p->wait_chldexit); p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); p->sigpending = 0; init_sigpending(&p->pending); p->it_real_value = p->it_virt_value = p->it_prof_value = 0; p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0; init_timer(&p->real_timer); p->real_timer.data = (unsigned long) p; p->leader = 0; /* session leadership doesn't inherit */ p->tty_old_pgrp = 0; p->times.tms_utime = p->times.tms_stime = 0; p->times.tms_cutime = p->times.tms_cstime = 0; p->lock_depth = -1; /* -1 = no lock */ p->start_time = jiffies; INIT_LIST_HEAD(&p->local_pages); p->files = current->files; p->fs = current->fs; p->sig = current->sig; /* copy all the process information */ if (copy_files(clone_flags, p)) goto bad_fork_cleanup; if (copy_fs(clone_flags, p)) goto bad_fork_cleanup_files; if (copy_sighand(clone_flags, p)) goto bad_fork_cleanup_fs; bad_fork_cleanup: bad_fork_cleanup_files: bad_fork_cleanup_fs: // now a hole // now more from fork /* ok, now we should be set up.. */ p->swappable = 1; p->exit_signal = 0; p->pdeath_signal = 0; /* * "share" dynamic priority between parent and child, thus the * total amount of dynamic priorities in the system doesnt change, * more scheduling fairness. This is only important in the first * timeslice, on the long run the scheduling behaviour is unchanged. */ /* * Ok, add it to the run-queues and make it * visible to the rest of the system. * * Let it rip! */ retval = p->pcb$l_epid; INIT_LIST_HEAD(&p->thread_group); /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* CLONE_PARENT and CLONE_THREAD re-use the old parent */ p->p_opptr = current->p_opptr; p->p_pptr = current->p_pptr; p->p_opptr = current /*->p_opptr*/; p->p_pptr = current /*->p_pptr*/; SET_LINKS(p); nr_threads++; write_unlock_irq(&tasklist_lock); // printk("fork befwak\n"); //wake_up_process(p); /* do this last */ // wake_up_process2(p,PRI$_TICOM); /* do this last */ //goto fork_out;//?? // now something from exec // wait, better do execve itself memcpy(p->rlim, current->rlim, sizeof(p->rlim)); qhead_init(&p->pcb$l_sqfl); struct mm_struct * mm = mm_alloc(); p->mm = mm; p->active_mm = mm; p->user = INIT_USER; spin_lock(&mmlist_lock); #if 0 list_add(&mm->mmlist, &p->p_pptr->mm->mmlist); #endif mmlist_nr++; spin_unlock(&mmlist_lock); // Now we are getting into the area that is really the swappers // To be moved to shell.c and swp$shelinit later p->pcb$l_phd=kmalloc(sizeof(struct _phd),GFP_KERNEL); init_phd(p->pcb$l_phd); init_fork_p1pp(p,p->pcb$l_phd,ctl$gl_pcb,ctl$gl_pcb->pcb$l_phd); #ifdef __x86_64__ shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x1000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x2000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x1000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x2000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ffa0000-0x1000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ffa0000-0x2000,0x7fffe000); #else shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x1000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x2000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x1000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x2000,0x7fffe000); #endif int exe$procstrt(struct _pcb * p); struct pt_regs * regs = &pidadr; //printk("newthread %x\n",p), retval = new_thread(0, clone_flags, 0, 0, p, 0); int eip=0,esp=0; // start_thread(regs,eip,esp); sch$chse(p, PRI$_TICOM); vmsunlock(&SPIN_MMG,-1); vmsunlock(&SPIN_SCHED,0); return SS$_NORMAL; #if 0 return sys_execve(((struct dsc$descriptor *)image)->dsc$a_pointer,0,0); return SS$_NORMAL; #endif #if 0 { char * filename=((struct dsc$descriptor *)image)->dsc$a_pointer; char ** argv=0; char ** envp=0; struct pt_regs * regs=0; struct linux_binprm bprm; struct file *file; int retval; int i; file = open_exec(filename); retval = PTR_ERR(file); if (IS_ERR(file)) return retval; bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *); memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0])); bprm.file = file; bprm.filename = filename; bprm.sh_bang = 0; bprm.loader = 0; bprm.exec = 0; if ((bprm.argc = count(argv, bprm.p / sizeof(void *))) < 0) { allow_write_access(file); fput(file); //printk("here 7 %x\n",bprm.argc); return bprm.argc; } if ((bprm.envc = count(envp, bprm.p / sizeof(void *))) < 0) { allow_write_access(file); fput(file); //printk("here 6\n"); return bprm.envc; } retval = prepare_binprm(&bprm); //printk("here 4\n"); if (retval < 0) goto out; retval = copy_strings_kernel(1, &bprm.filename, &bprm); //printk("here 3\n"); if (retval < 0) goto out; bprm.exec = bprm.p; retval = copy_strings(bprm.envc, envp, &bprm); //printk("here 2\n"); if (retval < 0) goto out; retval = copy_strings(bprm.argc, argv, &bprm); //printk("here 1\n"); if (retval < 0) goto out; retval = search_binary_handler(&bprm,regs); if (retval >= 0) /* execve success */ return retval; out: /* Something went wrong, return the inode and free the argument pages*/ allow_write_access(bprm.file); if (bprm.file) fput(bprm.file); for (i = 0 ; i < MAX_ARG_PAGES ; i++) { struct page * page = bprm.page[i]; if (page) __free_page(page); } return retval; } #endif fork_out: return retval; bad_fork_free: free_task_struct(p); goto fork_out; }
asmlinkage int exe$enq(unsigned int efn, unsigned int lkmode, struct _lksb *lksb, unsigned int flags, void *resnam, unsigned int parid, void (*astadr)(), unsigned long astprm, void (*blkastadr)(), unsigned int acmode, unsigned int rsdm_id) { int convert; int retval=SS$_NORMAL; int sts; // some tests. one only for now, should be more. if (lkmode>LCK$K_EXMODE) return SS$_BADPARAM; vmslock(&SPIN_SCS,IPL$_SCS); // check. probably too early convert=flags&LCK$M_CONVERT; if (!convert) { /* new lock */ struct _rsb * res = 0; struct _rsb * old; struct _lkb * lck = 0, *par = 0; struct dsc$descriptor * resnamdsc; int sserror=0; resnamdsc=resnam; if (resnamdsc->dsc$w_length==0 || resnamdsc->dsc$w_length>RSB$K_MAXLEN) { sserror=SS$_IVBUFLEN; goto error; } if (flags&LCK$M_EXPEDITE) if (lkmode!=LCK$K_NLMODE) { sserror=SS$_UNSUPPORTED; goto error; } if (lkmode!=LCK$K_NLMODE) { sserror=SS$_UNSUPPORTED; goto error; } res=kmalloc(sizeof(struct _rsb),GFP_KERNEL); memset(res,0,sizeof(struct _rsb)); lck=kmalloc(sizeof(struct _lkb),GFP_KERNEL); memset(lck,0,sizeof(struct _lkb)); lck->lkb$b_efn=efn; lck->lkb$l_flags=flags; lck->lkb$b_rqmode=lkmode; lck->lkb$l_cplastadr=astadr; lck->lkb$l_blkastadr=blkastadr; lck->lkb$l_astprm=astprm; lck->lkb$l_pid=current->pcb$l_pid; lck->lkb$l_lksb=lksb; qhead_init(&lck->lkb$l_sqfl); qhead_init(&lck->lkb$l_ownqfl); strncpy(res->rsb$t_resnam,resnamdsc->dsc$a_pointer,resnamdsc->dsc$w_length); res->rsb$b_rsnlen=resnamdsc->dsc$w_length; setipl(IPL$_SCS); // do scs spinlock //setipl(IPL$_ASTDEL); if (flags&LCK$M_SYSTEM) { /* priv checks */ } else { } if (parid==0) { //list_add(&res->lr_childof, &ns->ns_root_list); //this is added to lck$gl_rrsfl down below, I think } else { //check valid lock // check lock access mode par=lockidtbl[parid]; if (current->pcb$l_pid != par->lkb$l_pid) { vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return SS$_IVLOCKID; } //check if parent granted, if not return SS$_PARNOTGRANT; if (par->lkb$b_state!=LKB$K_CONVERT || par->lkb$b_state!=LKB$K_GRANTED) if ((par->lkb$l_flags & LCK$M_CONVERT) == 0) { vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return SS$_PARNOTGRANT; } par->lkb$w_refcnt++; res->rsb$l_parent = par->lkb$l_rsb; // should not be here? //check if uic-specific resource //check if system-wide //charge lock against quota //list_add(&res->lr_childof, &parent->lr_children); //res->rsb$l_rtrsb=enq_find_oldest_parent(r,p->lkb$l_rsb); lck->lkb$l_parent=par; } old=find_reshashtbl(resnamdsc); if (!old) { lck$gl_rsbcnt++; lck$gl_lckcnt++; if (flags & LCK$M_SYNCSTS) retval=SS$_SYNCH; qhead_init(&res->rsb$l_grqfl); qhead_init(&res->rsb$l_cvtqfl); qhead_init(&res->rsb$l_wtqfl); //insque(&lck->lkb$l_sqfl,res->rsb$l_grqfl); lck->lkb$l_rsb=res; insert_reshashtbl(res); if (parid==0) { insque(&res->rsb$l_rrsfl,lck$gl_rrsfl); qhead_init(&res->rsb$l_srsfl); res->rsb$b_depth=0; res->rsb$l_rtrsb=res; exe$clref(lck->lkb$b_efn); insque(&lck->lkb$l_ownqfl,¤t->pcb$l_lockqfl); //?if (q->flags & LKB$M_DCPLAST) lksb->lksb$l_lkid=insert_lck(lck); lksb->lksb$w_status=SS$_NORMAL; sts = lck$grant_lock(lck ,res ,-1,lkmode,flags,efn,res->rsb$b_ggmode); goto end; } else { // it has a parid non-zero res->rsb$l_csid=par->lkb$l_rsb->rsb$l_csid; par->lkb$l_rsb->rsb$w_refcnt++; res->rsb$b_depth=par->lkb$l_rsb->rsb$b_depth+1; //check maxdepth if (res->rsb$b_depth>10) { // pick a number ? retval=SS$_EXDEPTH; goto error; } res->rsb$l_rtrsb=par->lkb$l_rsb->rsb$l_rtrsb; insque(&res->rsb$l_srsfl,&par->lkb$l_rsb->rsb$l_srsfl); if (par->lkb$l_csid) { //remote lck$snd_granted(lck); } else { sts = lck$grant_lock(lck,res,-1,lkmode,flags,efn,res->rsb$b_ggmode); } } } else { /* old, found in resource hash table */ /* something else? */ int granted = 0; if (flags & LCK$M_SYNCSTS) retval=SS$_SYNCH; kfree(res); res=old; lck->lkb$l_rsb=res; //after, also check whether something in cvtqfl or wtqfl -> insque wtqfl if (0!=test_bit(res->rsb$b_ggmode,&lck$ar_compat_tbl[lck->lkb$b_rqmode])) { if (aqempty(res->rsb$l_wtqfl)) { granted=1; //sts = lck$grant_lock(lck ,res ,-1,lkmode,flags,efn); } else { if (flags&LCK$M_NOQUEUE) { res->rsb$w_lckcnt--; kfree(lck); vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return SS$_NOTQUEUED; } else { lck->lkb$b_state=LKB$K_WAITING; insque(&lck->lkb$l_sqfl,res->rsb$l_wtqfl); lksb->lksb$w_status=0; lck->lkb$l_status|=LKB$M_ASYNC; maybe_blkast(res,lck); } } } else { // if not compatible if (flags&LCK$M_NOQUEUE) { res->rsb$w_lckcnt--; kfree(lck); vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return SS$_NOTQUEUED; } else { lck->lkb$b_state=LKB$K_WAITING; insque(&lck->lkb$l_sqfl,res->rsb$l_wtqfl); lksb->lksb$w_status=0; lck->lkb$l_status|=LKB$M_ASYNC; maybe_blkast(res,lck); // insque(&lck->lkb$l_ownqfl,¤t->pcb$l_lockqfl); } } lksb->lksb$l_lkid=insert_lck(lck); lksb->lksb$w_status=SS$_NORMAL; if ((granted & 1)==1) { if (0/*par->lkb$l_csid*/) { //remote lck$snd_granted(lck); } else { sts = lck$grant_lock(lck, res, -1,lkmode,flags,efn,res->rsb$b_ggmode); } } } end: /* raise ipl */ vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return retval; error: /* ipl back */ kfree(res); kfree(lck); vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return sserror; } else { // convert /* convert */ int granted = 0, newmodes = 0; struct _lkb * lck; struct _rsb * res; void * dummy; int newmode; lck=lockidtbl[lksb->lksb$l_lkid]; res=lck->lkb$l_rsb; if (lck->lkb$b_state!=LKB$K_GRANTED) { vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return SS$_CVTUNGRANT; } lck->lkb$b_efn=efn; lck->lkb$l_flags=flags; lck->lkb$b_rqmode=lkmode; lck->lkb$l_cplastadr=astadr; lck->lkb$l_blkastadr=blkastadr; lck->lkb$l_astprm=astprm; lck->lkb$l_lksb=lksb; remque(&lck->lkb$l_sqfl,&lck->lkb$l_sqfl);// ? //remque(&res->rsb$l_grqfl,dummy); // superfluous if (aqempty(res->rsb$l_cvtqfl) && aqempty(res->rsb$l_grqfl)) { sts = lck$grant_lock(lck ,res,lck->lkb$b_grmode,lkmode,flags,efn,-1); vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return SS$_NORMAL; } else { // convert, something in cvtqfl or grqfl if (res->rsb$b_cgmode!=lck->lkb$b_grmode) { newmode=res->rsb$b_ggmode; } else { newmode=find_highest(lck,res); newmodes= 0; } if (test_bit(lkmode,&lck$ar_compat_tbl[newmode])) { //sts = lck$grant_lock(lck,res,lck->lkb$b_grmode,lkmode,flags,efn); granted = 1; } } if (granted) { if (newmodes) { res->rsb$b_fgmode=newmode; res->rsb$b_ggmode=newmode; res->rsb$b_cgmode=newmode; } sts = lck$grant_lock(lck,res,lck->lkb$b_grmode,lkmode /*newmode*/,flags,efn,res->rsb$b_ggmode); grant_queued(res,newmode,1,1); } else { int wasempty=aqempty(&res->rsb$l_cvtqfl); lck->lkb$b_rqmode=lkmode; insque(&lck->lkb$l_sqfl,res->rsb$l_cvtqfl); lck->lkb$b_state=LKB$K_CONVERT; lksb->lksb$w_status=0; lck->lkb$l_status|=LKB$M_ASYNC; maybe_blkast(res,lck); if (wasempty) res->rsb$b_cgmode=newmode; sts=SS$_NORMAL; } vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return sts; } vmsunlock(&SPIN_SCS,IPL$_ASTDEL); }
asmlinkage void sch$sched(int from_sch$resched) { int cpuid = smp_processor_id(); struct _cpu * cpu=smp$gl_cpu_data[cpuid]; struct _pcb *next = 0, *curpcb; int curpri, affinity; unsigned char tmppri; unsigned long qhead = 0; int after, before; curpcb=cpu->cpu$l_curpcb; curpri=cpu->cpu$b_cur_pri; // if (!countme--) { countme=500; printk("."); } if (from_sch$resched == 1) goto skip_lock; #if 0 // NOT YET??? nope,not an interrupt. pushpsl+setipl/vmslock instead? if (intr_blocked(IPL$_SCHED)) return; regtrap(REG_INTR,IPL$_SCHED); #endif int ipl = getipl(); if (ipl != 8 || SPIN_SCHED.spl$l_spinlock == 0) panic("schsch\n"); #if 0 // temp workaround // must avoid nesting, since I do not know how to get out of it setipl(IPL$_SCHED); vmslock(&SPIN_SCHED,-1); #endif /** clear cpu_priority for current pri bit - TODO: where did this come from? */ sch$al_cpu_priority[curpri]=sch$al_cpu_priority[curpri] & (~ cpu->cpu$l_cpuid_mask ); /** skip if ... TODO: from where? */ if (sch$al_cpu_priority[curpri]) goto skip_lock; /** clear active_priority for current pri bit - TODO: where did this come from? */ sch$gl_active_priority=sch$gl_active_priority & (~ (1 << (31-curpri))); //if (spl(IPL$_SCHED)) return; // old=spl(IPL$_SCHED); /** now 4 linux leftovers */ spin_lock_prefetch(&runqueue_lock); if (!curpcb->active_mm) BUG(); release_kernel_lock(curpcb, cpuid); spin_lock_irq(&runqueue_lock); skip_lock: /** reset cpu affinity TODO: from where? */ affinity=0; struct _pcb * aff_next = 0; /** find highest pri comqueue */ tmppri=ffs(sch$gl_comqs); #ifdef DEBUG_SCHED if (mydebug5) printk("ffs %x %x\n",tmppri,sch$gl_comqs); #endif if (!tmppri) { /** if none found, idle */ #if 0 // spot for more vms sched goto sch$idle; #endif go_idle: /** set bit in idle_cpus */ sch$gl_idle_cpus=sch$gl_idle_cpus | (cpu->cpu$l_cpuid_mask); /** store null pcb and -1 pri: MISSING check why */ /** necessary idle_task line from linux */ next=idle_task(cpuid); goto skip_cap; } else {
asmlinkage void sch$resched(void) { int cpuid = smp_processor_id(); struct _cpu * cpu=smp$gl_cpu_data[cpuid]; struct _pcb * curpcb; unsigned long curpri; unsigned long qhead; int before,after; // lock sched db, soon //if (spl(IPL$_SCHED)) return; // old=spl(IPL$_SCHED); // svpctx, do not think we need to do this here #ifdef __x86_64__ if (intr_blocked(IPL$_RESCHED)) return; regtrap(REG_INTR,IPL$_RESCHED); #endif /** spinlock sched and set ipl */ setipl(IPL$_SCHED); vmslock(&SPIN_SCHED,-1); spin_lock_irq(&runqueue_lock); /* eventually change to sched? */ /** get current pcb and priority */ curpcb=cpu->cpu$l_curpcb; release_kernel_lock(curpcb, cpuid); curpri=cpu->cpu$b_cur_pri; /** clear bit in cpu_priority table */ sch$al_cpu_priority[curpri]=sch$al_cpu_priority[curpri] & (~ cpu->cpu$l_cpuid_mask ); /** if no process with this pri on any cpu, clear bit in active_priority table */ if (!sch$al_cpu_priority[curpri]) sch$gl_active_priority=sch$gl_active_priority & (~ (1 << (31-curpri))); /** now some if's remaining from linux - TODO: check if still needed */ if (curpcb == idle_task(curpcb->pcb$l_cpu_id)) goto out; if (curpcb->state==TASK_INTERRUPTIBLE) if (signal_pending(curpcb)) { curpcb->state = TASK_RUNNING; curpcb->pcb$w_state = SCH$C_CUR; } #if 0 if (curpcb->state!=TASK_RUNNING) { curpcb->pcb$w_state=SCH$C_LEF; // use here temporarily } #endif #if 0 if (curpcb->state==TASK_RUNNING) { #endif #ifdef DEBUG_SCHED before=numproc(); // printcom(); //if (curpcb==0xa018c000 && qhead==0xa018c000) // panic("aieeeeeh\n"); mycheckaddr(0); //if (curpcb==qhead) panic(" a panic\n"); #endif /** set pri bit in comqs */ sch$gl_comqs=sch$gl_comqs | (1 << curpri); // curpcb->state=TASK_INTERRUPTIBLE; /* soon SCH$C_COM ? */ /** set state of cur pcb to COM */ curpcb->pcb$w_state=SCH$C_COM; /** insert pcb at tail of comqueue */ #ifdef __i386__ qhead=*(unsigned long *)&sch$aq_comt[curpri]; #else qhead=*(unsigned long *)&sch$aq_comh[curpri][1]; #endif if (!task_on_comqueue(curpcb)) { if (curpcb==qhead) panic(" a panic\n"); insque(curpcb,qhead); } else { panic("something\n"); } #ifdef DEBUG_SCHED mycheckaddr(42); #endif /** linux leftover */ nr_running++; #ifdef DEBUG_SCHED after=numproc(); if(after-before!=1) { //printk("entry qhead %x %x\n",curpcb,qhead); printcom(); panic("insq2 %x %x\n",before,after); } #endif out: /** clear idle_cpus to signal all idle cpus to try to reschedule */ sch$gl_idle_cpus=0; #if 0 } #endif /** go some intro sch$sched */ sch$sched(1); }