int f11b_io_done(struct _irp * i) { void (*func)(unsigned long); // move user_status into irp$l_media if (i->irp$l_iosb==0) // not set, or something vipes the irp { i->irp$l_iost1=SS$_NORMAL; // temporary value, must be set } else { i->irp$l_iost1=((struct _iosb *)i->irp$l_iosb)->iosb$w_status; } // decr vcb transaction count // clear namestring // copy local fib back intro the complex buffer packet // set irp$l_bcnt to abd$c_attrib // call check-dismount setipl(IPL$_ASTDEL); ioc$bufpost(i); sch$postef(i->irp$l_pid,0,i->irp$b_efn); func=i->irp$l_wind; //really acb$l_kast func(i); setipl(0); }
unsigned MTID () { register short inuse, oldipl; register unsigned newtid; register TCB *tcp; inuse = TRUE; /* preset inuse to get things started */ if ((struct _mt_def *) NIL == _MT_) _MT_ = (struct _mt_def *) XBIOS (X_MTDEFS); oldipl = setipl (7); /* DISABLE INTERRUPTS */ while (inuse) { newtid = _MT_->mtp->IDct++; /* pick the next ID to try */ inuse = FALSE; /* say it's not in use */ tcp = _MT_->mtp->TCBs; /* start search at beginning */ while (tcp) { /* search the TCB table */ if (tcp->flags & MTF_OCC) /* TCB occupied ? */ if (tcp->tid == newtid) { /* tid in use ? */ inuse = TRUE; /* set to search again */ break; } tcp = tcp->fwd; } } setipl (oldipl); /* RESTORE INTERRUPTS */ return (newtid); /* return the new task ID */ }
signed int mmg$ininewpfn(struct _pcb * p, struct _phd * phd, void * va, struct _mypte * pte) { int ipl=getipl(); setipl(8); // check signed long pfn=mmg$allocpfn(); setipl(ipl); struct _pfn * page; if (pfn&0x80000000) return pfn; if ((((int)va)&WSL$M_PAGTYP)>=WSL$C_GLOBAL) { phd=mmg$gl_sysphd; pte=&((struct _mypte *)mmg$gq_gpt_base)[pte->pte$v_gptx]; // not implemented yet } if ((((unsigned long)va)&0x80000000) == 0) { mmg$incptref(p->pcb$l_phd,pte); } // wrong page=&((struct _pfn *)pfn$al_head[PFN$C_FREPAGLST])[pfn]; // also set page type mem_map[pfn].pfn$v_pagtyp=((unsigned long)va)&PFN$M_PAGTYP; // mem_map[pfn].virtual=__va(pfn*PAGE_SIZE); // not necessary //mem_map[pfn].count.counter=1; mem_map[pfn].pfn$l_pt_pfn=0; #if 0 #ifdef __i386__ // check. debug mem_map[pfn].pfn$l_pt_pfn=va; #endif #endif mem_map[pfn].pfn$q_pte_index=0; mem_map[pfn].pfn$q_pte_index=pte; // hope it's the right one? page=&mem_map[pfn]; //set_page_count(page, 1); mem_map[pfn].pfn$l_refcnt=1; // aah bug mmg$makewsle(p,p->pcb$l_phd,va,pte,pfn); return pfn; }
void mutexwait(struct _pcb * p, struct _mtx * m) { /** store mutex address in efwm */ p->pcb$l_efwm = m; // check. 32 bit problem /** new pcb state MWAIT */ p->pcb$w_state = SCH$C_MWAIT; /** insert into MWAIT scheduling queue */ insque(p,sch$aq_wqhdr[SCH$C_MWAIT].wqh$l_wqfl); int ipl=getipl(); /** put on wait */ sch$waitl(p, &sch$aq_wqhdr[SCH$C_MWAIT]); setipl(ipl); }
static void ubd_intr2(int irq, void *dev, struct pt_regs *unused) { struct _irp * i; struct _ucb * u; void (*func)(); if (intr_blocked(20)) return; regtrap(REG_INTR,20); setipl(20); /* have to do this until we get things more in order */ i=globali; u=globalu; func=u->ucb$l_fpc; func(i,u); myrei(); }
asmlinkage int cmod$astexit() { struct _pcb * p = ctl$gl_pcb; setipl(IPL$_ASTDEL); // clear a pcb$l_astact bit test_and_clear_bit(p->psl_prv_mod, &p->pcb$b_astact); // check sch$newlvl(p); #ifdef __i386__ __asm__ __volatile__( "addl $0x40, %esp\n\t" // check. rewind stack ); #endif #ifdef __x86_64__ __asm__ __volatile__( "addq $0x88, %rsp\n\t" // check. rewind stack ); #endif }
static long tsunami_probe_read(volatile unsigned long *vaddr) { long dont_care, probe_result; int cpu = smp_processor_id(); int s = swpipl(6); /* Block everything but machine checks. */ TSUNAMI_mcheck_taken[cpu] = 0; TSUNAMI_mcheck_expected[cpu] = 1; dont_care = *vaddr; draina(); TSUNAMI_mcheck_expected[cpu] = 0; probe_result = !TSUNAMI_mcheck_taken[cpu]; TSUNAMI_mcheck_taken[cpu] = 0; setipl(s); printk("dont_care == 0x%lx\n", dont_care); return probe_result; }
asmlinkage int exe$creprc(unsigned int *pidadr, void *image, void *input, void *output, void *error, struct _generic_64 *prvadr, unsigned int *quota, void*prcnam, unsigned int baspri, unsigned int uic, unsigned short int mbxunt, unsigned int stsflg,...) { unsigned long stack_here; struct _pcb * p, * cur; int retval; struct dsc$descriptor * imd = image, * ind = input, * oud = output, * erd = error; unsigned long clone_flags=CLONE_VFORK; //check pidadr ctl$gl_creprc_flags = stsflg; // check for PRC$M_NOUAF sometime if (stsflg&PRC$M_DETACH) { } if (uic) { } //setipl(IPL$_ASTDEL);//postpone this? cur=ctl$gl_pcb; vmslock(&SPIN_SCHED, IPL$_SCHED); vmslock(&SPIN_MMG, IPL$_MMG); p = alloc_task_struct(); //bzero(p,sizeof(struct _pcb));//not wise? memset(p,0,sizeof(struct _pcb)); // check more // compensate for no struct clone/copy p->sigmask_lock = SPIN_LOCK_UNLOCKED; p->alloc_lock = SPIN_LOCK_UNLOCKED; qhead_init(&p->pcb$l_astqfl); // and enable ast del to all modes p->pcb$b_type = DYN$C_PCB; p->pcb$b_asten=15; p->phd$b_astlvl=4; p->pr_astlvl=4; p->psl=0; p->pslindex=0; qhead_init(&p->pcb$l_lockqfl); // set capabilities p->pcb$l_permanent_capability = sch$gl_default_process_cap; p->pcb$l_capability = p->pcb$l_permanent_capability; // set affinity // set default fileprot // set arb // set mbx stuff // from setprn: if (prcnam) { struct dsc$descriptor *s=prcnam; strncpy(p->pcb$t_lname,s->dsc$a_pointer,s->dsc$w_length); } // set priv p->pcb$l_priv=ctl$gl_pcb->pcb$l_priv; // set pris p->pcb$b_prib=31-baspri; p->pcb$b_pri=31-baspri-6; // if (p->pcb$b_pri<16) p->pcb$b_pri=16; p->pcb$w_quant=-QUANTUM; // set uic p->pcb$l_uic=ctl$gl_pcb->pcb$l_uic; // set vms pid // check process name // do something with pqb p->pcb$l_pqb=kmalloc(sizeof(struct _pqb),GFP_KERNEL); memset(p->pcb$l_pqb,0,sizeof(struct _pqb)); struct _pqb * pqb = p->pcb$l_pqb; pqb->pqb$q_prvmsk = ctl$gq_procpriv; if (imd) memcpy(pqb->pqb$t_image,imd->dsc$a_pointer,imd->dsc$w_length); if (ind) memcpy(pqb->pqb$t_input,ind->dsc$a_pointer,ind->dsc$w_length); if (oud) memcpy(pqb->pqb$t_output,oud->dsc$a_pointer,oud->dsc$w_length); if (erd) memcpy(pqb->pqb$t_error,erd->dsc$a_pointer,erd->dsc$w_length); if (oud) // temp measure memcpy(p->pcb$t_terminal,oud->dsc$a_pointer,oud->dsc$w_length); // translate some logicals // copy security clearance // copy msg // copy flags // set jib // do quotas // process itmlst // set pcb$l_pqb #if 0 setipl(IPL$_MMG); vmslock(&SPIN_SCHED,-1); // find vacant slot in pcb vector // and store it #endif // make ipid and epid p->pcb$l_pid=alloc_ipid(); { unsigned long *vec=sch$gl_pcbvec; vec[p->pcb$l_pid&0xffff]=p; } p->pcb$l_epid=exe$ipid_to_epid(p->pcb$l_pid); // should invoke sch$chse, put this at bottom? // setipl(0) and return // now lots of things from fork retval = -EAGAIN; /* * Check if we are over our maximum process limit, but be sure to * exclude root. This is needed to make it possible for login and * friends to set the per-user process limit to something lower * than the amount of processes root is running. -- Rik */ #if 0 if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur && !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) goto bad_fork_free; atomic_inc(&p->user->__count); atomic_inc(&p->user->processes); #endif /* * Counter increases are protected by * the kernel lock so nr_threads can't * increase under us (but it may decrease). */ get_exec_domain(p->exec_domain); if (p->binfmt && p->binfmt->module) __MOD_INC_USE_COUNT(p->binfmt->module); p->did_exec = 0; p->swappable = 0; p->state = TASK_UNINTERRUPTIBLE; //copy_flags(clone_flags, p); // not here? p->pcb$l_pid = alloc_ipid(); p->run_list.next = NULL; p->run_list.prev = NULL; p->p_cptr = NULL; init_waitqueue_head(&p->wait_chldexit); p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); p->sigpending = 0; init_sigpending(&p->pending); p->it_real_value = p->it_virt_value = p->it_prof_value = 0; p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0; init_timer(&p->real_timer); p->real_timer.data = (unsigned long) p; p->leader = 0; /* session leadership doesn't inherit */ p->tty_old_pgrp = 0; p->times.tms_utime = p->times.tms_stime = 0; p->times.tms_cutime = p->times.tms_cstime = 0; p->lock_depth = -1; /* -1 = no lock */ p->start_time = jiffies; INIT_LIST_HEAD(&p->local_pages); p->files = current->files; p->fs = current->fs; p->sig = current->sig; /* copy all the process information */ if (copy_files(clone_flags, p)) goto bad_fork_cleanup; if (copy_fs(clone_flags, p)) goto bad_fork_cleanup_files; if (copy_sighand(clone_flags, p)) goto bad_fork_cleanup_fs; bad_fork_cleanup: bad_fork_cleanup_files: bad_fork_cleanup_fs: // now a hole // now more from fork /* ok, now we should be set up.. */ p->swappable = 1; p->exit_signal = 0; p->pdeath_signal = 0; /* * "share" dynamic priority between parent and child, thus the * total amount of dynamic priorities in the system doesnt change, * more scheduling fairness. This is only important in the first * timeslice, on the long run the scheduling behaviour is unchanged. */ /* * Ok, add it to the run-queues and make it * visible to the rest of the system. * * Let it rip! */ retval = p->pcb$l_epid; INIT_LIST_HEAD(&p->thread_group); /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* CLONE_PARENT and CLONE_THREAD re-use the old parent */ p->p_opptr = current->p_opptr; p->p_pptr = current->p_pptr; p->p_opptr = current /*->p_opptr*/; p->p_pptr = current /*->p_pptr*/; SET_LINKS(p); nr_threads++; write_unlock_irq(&tasklist_lock); // printk("fork befwak\n"); //wake_up_process(p); /* do this last */ // wake_up_process2(p,PRI$_TICOM); /* do this last */ //goto fork_out;//?? // now something from exec // wait, better do execve itself memcpy(p->rlim, current->rlim, sizeof(p->rlim)); qhead_init(&p->pcb$l_sqfl); struct mm_struct * mm = mm_alloc(); p->mm = mm; p->active_mm = mm; p->user = INIT_USER; spin_lock(&mmlist_lock); #if 0 list_add(&mm->mmlist, &p->p_pptr->mm->mmlist); #endif mmlist_nr++; spin_unlock(&mmlist_lock); // Now we are getting into the area that is really the swappers // To be moved to shell.c and swp$shelinit later p->pcb$l_phd=kmalloc(sizeof(struct _phd),GFP_KERNEL); init_phd(p->pcb$l_phd); init_fork_p1pp(p,p->pcb$l_phd,ctl$gl_pcb,ctl$gl_pcb->pcb$l_phd); #ifdef __x86_64__ shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x1000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x2000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x1000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x2000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ffa0000-0x1000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ffa0000-0x2000,0x7fffe000); #else shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x1000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x2000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x1000,0x7fffe000); shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x2000,0x7fffe000); #endif int exe$procstrt(struct _pcb * p); struct pt_regs * regs = &pidadr; //printk("newthread %x\n",p), retval = new_thread(0, clone_flags, 0, 0, p, 0); int eip=0,esp=0; // start_thread(regs,eip,esp); sch$chse(p, PRI$_TICOM); vmsunlock(&SPIN_MMG,-1); vmsunlock(&SPIN_SCHED,0); return SS$_NORMAL; #if 0 return sys_execve(((struct dsc$descriptor *)image)->dsc$a_pointer,0,0); return SS$_NORMAL; #endif #if 0 { char * filename=((struct dsc$descriptor *)image)->dsc$a_pointer; char ** argv=0; char ** envp=0; struct pt_regs * regs=0; struct linux_binprm bprm; struct file *file; int retval; int i; file = open_exec(filename); retval = PTR_ERR(file); if (IS_ERR(file)) return retval; bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *); memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0])); bprm.file = file; bprm.filename = filename; bprm.sh_bang = 0; bprm.loader = 0; bprm.exec = 0; if ((bprm.argc = count(argv, bprm.p / sizeof(void *))) < 0) { allow_write_access(file); fput(file); //printk("here 7 %x\n",bprm.argc); return bprm.argc; } if ((bprm.envc = count(envp, bprm.p / sizeof(void *))) < 0) { allow_write_access(file); fput(file); //printk("here 6\n"); return bprm.envc; } retval = prepare_binprm(&bprm); //printk("here 4\n"); if (retval < 0) goto out; retval = copy_strings_kernel(1, &bprm.filename, &bprm); //printk("here 3\n"); if (retval < 0) goto out; bprm.exec = bprm.p; retval = copy_strings(bprm.envc, envp, &bprm); //printk("here 2\n"); if (retval < 0) goto out; retval = copy_strings(bprm.argc, argv, &bprm); //printk("here 1\n"); if (retval < 0) goto out; retval = search_binary_handler(&bprm,regs); if (retval >= 0) /* execve success */ return retval; out: /* Something went wrong, return the inode and free the argument pages*/ allow_write_access(bprm.file); if (bprm.file) fput(bprm.file); for (i = 0 ; i < MAX_ARG_PAGES ; i++) { struct page * page = bprm.page[i]; if (page) __free_page(page); } return retval; } #endif fork_out: return retval; bad_fork_free: free_task_struct(p); goto fork_out; }
asmlinkage int exe$enq(unsigned int efn, unsigned int lkmode, struct _lksb *lksb, unsigned int flags, void *resnam, unsigned int parid, void (*astadr)(), unsigned long astprm, void (*blkastadr)(), unsigned int acmode, unsigned int rsdm_id) { int convert; int retval=SS$_NORMAL; int sts; // some tests. one only for now, should be more. if (lkmode>LCK$K_EXMODE) return SS$_BADPARAM; vmslock(&SPIN_SCS,IPL$_SCS); // check. probably too early convert=flags&LCK$M_CONVERT; if (!convert) { /* new lock */ struct _rsb * res = 0; struct _rsb * old; struct _lkb * lck = 0, *par = 0; struct dsc$descriptor * resnamdsc; int sserror=0; resnamdsc=resnam; if (resnamdsc->dsc$w_length==0 || resnamdsc->dsc$w_length>RSB$K_MAXLEN) { sserror=SS$_IVBUFLEN; goto error; } if (flags&LCK$M_EXPEDITE) if (lkmode!=LCK$K_NLMODE) { sserror=SS$_UNSUPPORTED; goto error; } if (lkmode!=LCK$K_NLMODE) { sserror=SS$_UNSUPPORTED; goto error; } res=kmalloc(sizeof(struct _rsb),GFP_KERNEL); memset(res,0,sizeof(struct _rsb)); lck=kmalloc(sizeof(struct _lkb),GFP_KERNEL); memset(lck,0,sizeof(struct _lkb)); lck->lkb$b_efn=efn; lck->lkb$l_flags=flags; lck->lkb$b_rqmode=lkmode; lck->lkb$l_cplastadr=astadr; lck->lkb$l_blkastadr=blkastadr; lck->lkb$l_astprm=astprm; lck->lkb$l_pid=current->pcb$l_pid; lck->lkb$l_lksb=lksb; qhead_init(&lck->lkb$l_sqfl); qhead_init(&lck->lkb$l_ownqfl); strncpy(res->rsb$t_resnam,resnamdsc->dsc$a_pointer,resnamdsc->dsc$w_length); res->rsb$b_rsnlen=resnamdsc->dsc$w_length; setipl(IPL$_SCS); // do scs spinlock //setipl(IPL$_ASTDEL); if (flags&LCK$M_SYSTEM) { /* priv checks */ } else { } if (parid==0) { //list_add(&res->lr_childof, &ns->ns_root_list); //this is added to lck$gl_rrsfl down below, I think } else { //check valid lock // check lock access mode par=lockidtbl[parid]; if (current->pcb$l_pid != par->lkb$l_pid) { vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return SS$_IVLOCKID; } //check if parent granted, if not return SS$_PARNOTGRANT; if (par->lkb$b_state!=LKB$K_CONVERT || par->lkb$b_state!=LKB$K_GRANTED) if ((par->lkb$l_flags & LCK$M_CONVERT) == 0) { vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return SS$_PARNOTGRANT; } par->lkb$w_refcnt++; res->rsb$l_parent = par->lkb$l_rsb; // should not be here? //check if uic-specific resource //check if system-wide //charge lock against quota //list_add(&res->lr_childof, &parent->lr_children); //res->rsb$l_rtrsb=enq_find_oldest_parent(r,p->lkb$l_rsb); lck->lkb$l_parent=par; } old=find_reshashtbl(resnamdsc); if (!old) { lck$gl_rsbcnt++; lck$gl_lckcnt++; if (flags & LCK$M_SYNCSTS) retval=SS$_SYNCH; qhead_init(&res->rsb$l_grqfl); qhead_init(&res->rsb$l_cvtqfl); qhead_init(&res->rsb$l_wtqfl); //insque(&lck->lkb$l_sqfl,res->rsb$l_grqfl); lck->lkb$l_rsb=res; insert_reshashtbl(res); if (parid==0) { insque(&res->rsb$l_rrsfl,lck$gl_rrsfl); qhead_init(&res->rsb$l_srsfl); res->rsb$b_depth=0; res->rsb$l_rtrsb=res; exe$clref(lck->lkb$b_efn); insque(&lck->lkb$l_ownqfl,¤t->pcb$l_lockqfl); //?if (q->flags & LKB$M_DCPLAST) lksb->lksb$l_lkid=insert_lck(lck); lksb->lksb$w_status=SS$_NORMAL; sts = lck$grant_lock(lck ,res ,-1,lkmode,flags,efn,res->rsb$b_ggmode); goto end; } else { // it has a parid non-zero res->rsb$l_csid=par->lkb$l_rsb->rsb$l_csid; par->lkb$l_rsb->rsb$w_refcnt++; res->rsb$b_depth=par->lkb$l_rsb->rsb$b_depth+1; //check maxdepth if (res->rsb$b_depth>10) { // pick a number ? retval=SS$_EXDEPTH; goto error; } res->rsb$l_rtrsb=par->lkb$l_rsb->rsb$l_rtrsb; insque(&res->rsb$l_srsfl,&par->lkb$l_rsb->rsb$l_srsfl); if (par->lkb$l_csid) { //remote lck$snd_granted(lck); } else { sts = lck$grant_lock(lck,res,-1,lkmode,flags,efn,res->rsb$b_ggmode); } } } else { /* old, found in resource hash table */ /* something else? */ int granted = 0; if (flags & LCK$M_SYNCSTS) retval=SS$_SYNCH; kfree(res); res=old; lck->lkb$l_rsb=res; //after, also check whether something in cvtqfl or wtqfl -> insque wtqfl if (0!=test_bit(res->rsb$b_ggmode,&lck$ar_compat_tbl[lck->lkb$b_rqmode])) { if (aqempty(res->rsb$l_wtqfl)) { granted=1; //sts = lck$grant_lock(lck ,res ,-1,lkmode,flags,efn); } else { if (flags&LCK$M_NOQUEUE) { res->rsb$w_lckcnt--; kfree(lck); vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return SS$_NOTQUEUED; } else { lck->lkb$b_state=LKB$K_WAITING; insque(&lck->lkb$l_sqfl,res->rsb$l_wtqfl); lksb->lksb$w_status=0; lck->lkb$l_status|=LKB$M_ASYNC; maybe_blkast(res,lck); } } } else { // if not compatible if (flags&LCK$M_NOQUEUE) { res->rsb$w_lckcnt--; kfree(lck); vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return SS$_NOTQUEUED; } else { lck->lkb$b_state=LKB$K_WAITING; insque(&lck->lkb$l_sqfl,res->rsb$l_wtqfl); lksb->lksb$w_status=0; lck->lkb$l_status|=LKB$M_ASYNC; maybe_blkast(res,lck); // insque(&lck->lkb$l_ownqfl,¤t->pcb$l_lockqfl); } } lksb->lksb$l_lkid=insert_lck(lck); lksb->lksb$w_status=SS$_NORMAL; if ((granted & 1)==1) { if (0/*par->lkb$l_csid*/) { //remote lck$snd_granted(lck); } else { sts = lck$grant_lock(lck, res, -1,lkmode,flags,efn,res->rsb$b_ggmode); } } } end: /* raise ipl */ vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return retval; error: /* ipl back */ kfree(res); kfree(lck); vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return sserror; } else { // convert /* convert */ int granted = 0, newmodes = 0; struct _lkb * lck; struct _rsb * res; void * dummy; int newmode; lck=lockidtbl[lksb->lksb$l_lkid]; res=lck->lkb$l_rsb; if (lck->lkb$b_state!=LKB$K_GRANTED) { vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return SS$_CVTUNGRANT; } lck->lkb$b_efn=efn; lck->lkb$l_flags=flags; lck->lkb$b_rqmode=lkmode; lck->lkb$l_cplastadr=astadr; lck->lkb$l_blkastadr=blkastadr; lck->lkb$l_astprm=astprm; lck->lkb$l_lksb=lksb; remque(&lck->lkb$l_sqfl,&lck->lkb$l_sqfl);// ? //remque(&res->rsb$l_grqfl,dummy); // superfluous if (aqempty(res->rsb$l_cvtqfl) && aqempty(res->rsb$l_grqfl)) { sts = lck$grant_lock(lck ,res,lck->lkb$b_grmode,lkmode,flags,efn,-1); vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return SS$_NORMAL; } else { // convert, something in cvtqfl or grqfl if (res->rsb$b_cgmode!=lck->lkb$b_grmode) { newmode=res->rsb$b_ggmode; } else { newmode=find_highest(lck,res); newmodes= 0; } if (test_bit(lkmode,&lck$ar_compat_tbl[newmode])) { //sts = lck$grant_lock(lck,res,lck->lkb$b_grmode,lkmode,flags,efn); granted = 1; } } if (granted) { if (newmodes) { res->rsb$b_fgmode=newmode; res->rsb$b_ggmode=newmode; res->rsb$b_cgmode=newmode; } sts = lck$grant_lock(lck,res,lck->lkb$b_grmode,lkmode /*newmode*/,flags,efn,res->rsb$b_ggmode); grant_queued(res,newmode,1,1); } else { int wasempty=aqempty(&res->rsb$l_cvtqfl); lck->lkb$b_rqmode=lkmode; insque(&lck->lkb$l_sqfl,res->rsb$l_cvtqfl); lck->lkb$b_state=LKB$K_CONVERT; lksb->lksb$w_status=0; lck->lkb$l_status|=LKB$M_ASYNC; maybe_blkast(res,lck); if (wasempty) res->rsb$b_cgmode=newmode; sts=SS$_NORMAL; } vmsunlock(&SPIN_SCS,IPL$_ASTDEL); return sts; } vmsunlock(&SPIN_SCS,IPL$_ASTDEL); }
asmlinkage void sch$sched(int from_sch$resched) { int cpuid = smp_processor_id(); struct _cpu * cpu=smp$gl_cpu_data[cpuid]; struct _pcb *next = 0, *curpcb; int curpri, affinity; unsigned char tmppri; unsigned long qhead = 0; int after, before; curpcb=cpu->cpu$l_curpcb; curpri=cpu->cpu$b_cur_pri; // if (!countme--) { countme=500; printk("."); } if (from_sch$resched == 1) goto skip_lock; #if 0 // NOT YET??? nope,not an interrupt. pushpsl+setipl/vmslock instead? if (intr_blocked(IPL$_SCHED)) return; regtrap(REG_INTR,IPL$_SCHED); #endif int ipl = getipl(); if (ipl != 8 || SPIN_SCHED.spl$l_spinlock == 0) panic("schsch\n"); #if 0 // temp workaround // must avoid nesting, since I do not know how to get out of it setipl(IPL$_SCHED); vmslock(&SPIN_SCHED,-1); #endif /** clear cpu_priority for current pri bit - TODO: where did this come from? */ sch$al_cpu_priority[curpri]=sch$al_cpu_priority[curpri] & (~ cpu->cpu$l_cpuid_mask ); /** skip if ... TODO: from where? */ if (sch$al_cpu_priority[curpri]) goto skip_lock; /** clear active_priority for current pri bit - TODO: where did this come from? */ sch$gl_active_priority=sch$gl_active_priority & (~ (1 << (31-curpri))); //if (spl(IPL$_SCHED)) return; // old=spl(IPL$_SCHED); /** now 4 linux leftovers */ spin_lock_prefetch(&runqueue_lock); if (!curpcb->active_mm) BUG(); release_kernel_lock(curpcb, cpuid); spin_lock_irq(&runqueue_lock); skip_lock: /** reset cpu affinity TODO: from where? */ affinity=0; struct _pcb * aff_next = 0; /** find highest pri comqueue */ tmppri=ffs(sch$gl_comqs); #ifdef DEBUG_SCHED if (mydebug5) printk("ffs %x %x\n",tmppri,sch$gl_comqs); #endif if (!tmppri) { /** if none found, idle */ #if 0 // spot for more vms sched goto sch$idle; #endif go_idle: /** set bit in idle_cpus */ sch$gl_idle_cpus=sch$gl_idle_cpus | (cpu->cpu$l_cpuid_mask); /** store null pcb and -1 pri: MISSING check why */ /** necessary idle_task line from linux */ next=idle_task(cpuid); goto skip_cap; } else {
asmlinkage void sch$resched(void) { int cpuid = smp_processor_id(); struct _cpu * cpu=smp$gl_cpu_data[cpuid]; struct _pcb * curpcb; unsigned long curpri; unsigned long qhead; int before,after; // lock sched db, soon //if (spl(IPL$_SCHED)) return; // old=spl(IPL$_SCHED); // svpctx, do not think we need to do this here #ifdef __x86_64__ if (intr_blocked(IPL$_RESCHED)) return; regtrap(REG_INTR,IPL$_RESCHED); #endif /** spinlock sched and set ipl */ setipl(IPL$_SCHED); vmslock(&SPIN_SCHED,-1); spin_lock_irq(&runqueue_lock); /* eventually change to sched? */ /** get current pcb and priority */ curpcb=cpu->cpu$l_curpcb; release_kernel_lock(curpcb, cpuid); curpri=cpu->cpu$b_cur_pri; /** clear bit in cpu_priority table */ sch$al_cpu_priority[curpri]=sch$al_cpu_priority[curpri] & (~ cpu->cpu$l_cpuid_mask ); /** if no process with this pri on any cpu, clear bit in active_priority table */ if (!sch$al_cpu_priority[curpri]) sch$gl_active_priority=sch$gl_active_priority & (~ (1 << (31-curpri))); /** now some if's remaining from linux - TODO: check if still needed */ if (curpcb == idle_task(curpcb->pcb$l_cpu_id)) goto out; if (curpcb->state==TASK_INTERRUPTIBLE) if (signal_pending(curpcb)) { curpcb->state = TASK_RUNNING; curpcb->pcb$w_state = SCH$C_CUR; } #if 0 if (curpcb->state!=TASK_RUNNING) { curpcb->pcb$w_state=SCH$C_LEF; // use here temporarily } #endif #if 0 if (curpcb->state==TASK_RUNNING) { #endif #ifdef DEBUG_SCHED before=numproc(); // printcom(); //if (curpcb==0xa018c000 && qhead==0xa018c000) // panic("aieeeeeh\n"); mycheckaddr(0); //if (curpcb==qhead) panic(" a panic\n"); #endif /** set pri bit in comqs */ sch$gl_comqs=sch$gl_comqs | (1 << curpri); // curpcb->state=TASK_INTERRUPTIBLE; /* soon SCH$C_COM ? */ /** set state of cur pcb to COM */ curpcb->pcb$w_state=SCH$C_COM; /** insert pcb at tail of comqueue */ #ifdef __i386__ qhead=*(unsigned long *)&sch$aq_comt[curpri]; #else qhead=*(unsigned long *)&sch$aq_comh[curpri][1]; #endif if (!task_on_comqueue(curpcb)) { if (curpcb==qhead) panic(" a panic\n"); insque(curpcb,qhead); } else { panic("something\n"); } #ifdef DEBUG_SCHED mycheckaddr(42); #endif /** linux leftover */ nr_running++; #ifdef DEBUG_SCHED after=numproc(); if(after-before!=1) { //printk("entry qhead %x %x\n",curpcb,qhead); printcom(); panic("insq2 %x %x\n",before,after); } #endif out: /** clear idle_cpus to signal all idle cpus to try to reschedule */ sch$gl_idle_cpus=0; #if 0 } #endif /** go some intro sch$sched */ sch$sched(1); }
short delpnts () { register struct instpnt *pp; register char *pp1, *pp2; register short np, pt1, i, pif, cf; struct idfnhdr *fp; struct instdef *vp; unsigned *fpu; short pt2, nmv, oldi; vp = &vbufs[curvce]; /* voice buffer pointer */ fp = &vp->idhfnc[curfunc]; /* function pointer */ pif = 0x00FF & fp->idfpif; /* number of points in function */ np = pif - subj; /* number of points to delete */ pt1 = (0x00FF & fp->idfpt1) + subj; /* first point to delete */ #if DEBUGIT if (debugsw && debugdf) { printf ("delpnts(): curfunc = %d curvce = %d\n", curfunc, curvce); printf ("delpnts(): idfpt1=%d, pif=%d, np=%d, pt1=%d, vp=$%lX, fp=$%lX\n", (0x00FF & fp->idfpt1), pif, np, pt1, vp, fp); } #endif if (np <= 0) /* have to delete at least 1 point */ return (FAILURE); if (subj >= pif) /* make sure point number is valid */ return (FAILURE); if ((pif - np) < 0) /* make sure we have enough points */ return (FAILURE); if ((subj + np) >= (pif + 1)) /* check the span */ return (FAILURE); pt2 = pt1 + np; /* move from point */ nmv = NIPNTS - pt2; /* move count */ #if DEBUGIT if (debugsw && debugdf) { printf ("delpnts(): pt2=%d, nmv=%d\n", pt2, nmv); printf (" fnc pif\n"); for (cf = 0; cf < NFINST; cf++) printf (" %3d %3d%s\n", cf, vp->idhfnc[cf].idfpif, (cf == curfunc) ? " <-- curfunc" : ""); printf ("\n"); } #endif /* */ oldi = setipl (FPU_DI); /* +++++ disable FPU interrupts +++++ */ fpu = io_fpu + FPU_OFNC + (curvce << 8); /* get fpu base */ for (i = 0; i < NFINST; i++) { /* stop all functions for this voice */ fp = &vp->idhfnc[i]; /* point at the function */ *(fpu + (fnoff[i] << 4) + FPU_TCTL) = (fp->idftmd = (fp->idftmd & ~3) | 1); } fp = &vp->idhfnc[curfunc]; /* point at the function */ if (subj) { /* deleting trailing points */ /* move points down */ pp1 = &vp->idhpnt[pt1]; pp2 = &vp->idhpnt[pt2]; for (i = nmv * PT_SIZE; i > 0; i--) *pp1++ = *pp2++; /* adjust total points remaining */ vp->idhplft += np; /* adjust number of points in this function */ vp->idhfnc[curfunc].idfpif -= np; /* adjust starting points in other functions */ for (cf = curfunc + 1; cf < NFINST; cf++) vp->idhfnc[cf].idfpt1 -= np; setipl (oldi); /* +++++ restore interrupts +++++ */ edfunc (curfunc); /* set new current point */ subj -= 1; /* */ } else { /* deleting all points */ /* reset first point in function */ pp = &vp->idhpnt[fp->idfpt1]; pp->iptim = FPU_MINT; pp->ipval = finival[curfunc]; pp->ipvmlt = 0; pp->ipvsrc = SM_NONE; pp->ipact = AC_NULL; pp->ippar1 = 0; pp->ippar2 = 0; pp->ippar3 = 0; /* adjust functions */ if (np > 1) { /* if deleting more points than 1 ... */ --nmv; /* one less point to move */ ++pt1; /* start one slot up */ /* move points down */ pp1 = &vp->idhpnt[pt1]; pp2 = &vp->idhpnt[pt2]; for (i = nmv * PT_SIZE; i > 0; i--) *pp1++ = *pp2++; /* adjust total points remaining */ vp->idhplft += (np - 1); /* adjust number of points in this function */ vp->idhfnc[curfunc].idfpif -= (np - 1); /* adjust starting points in other functions */ for (cf = curfunc + 1; cf < NFINST; cf++) vp->idhfnc[cf].idfpt1 -= (np - 1); } setipl (oldi); /* restore interrupts */ edfunc (curfunc); /* make point 0 current */ subj = 0; } /* */ #if DEBUGIT if (debugsw && debugdf) { printf ("delpnts(): plft = %3d pif = %3d subj = %3d\n", vp->idhplft, vp->idhfnc[curfunc].idfpif, subj); printf (" fnc pif\n"); for (cf = 0; cf < NFINST; cf++) printf (" %3d %3d%s\n", cf, vp->idhfnc[cf].idfpif, (cf == curfunc) ? " <-- curfunc" : ""); printf ("\n"); } #endif pntsel (); pntsv = 0; showpt (1); modinst (); return (SUCCESS); }