static void
pc164_device_interrupt(unsigned long v, struct pt_regs *r)
{
	__min_ipl = getipl();
	cabriolet_device_interrupt(v, r);
	__min_ipl = 0;
}
static void
pc164_device_interrupt(unsigned long v)
{
	__min_ipl = getipl();
	cabriolet_device_interrupt(v);
	__min_ipl = 0;
}
static void
pc164_srm_device_interrupt(unsigned long v)
{
	__min_ipl = getipl();
	srm_device_interrupt(v);
	__min_ipl = 0;
}
示例#4
0
void mutexwait(struct _pcb * p, struct _mtx * m)
{
    /** store mutex address in efwm */
    p->pcb$l_efwm = m; // check. 32 bit problem
    /** new pcb state MWAIT */
    p->pcb$w_state = SCH$C_MWAIT;
    /** insert into MWAIT scheduling queue */
    insque(p,sch$aq_wqhdr[SCH$C_MWAIT].wqh$l_wqfl);
    int ipl=getipl();
    /** put on wait */
    sch$waitl(p, &sch$aq_wqhdr[SCH$C_MWAIT]);
    setipl(ipl);
}
示例#5
0
signed int mmg$ininewpfn(struct _pcb * p, struct _phd * phd, void * va, struct _mypte * pte)
{
    int ipl=getipl();
    setipl(8); // check
    signed long pfn=mmg$allocpfn();
    setipl(ipl);
    struct _pfn * page;
    if (pfn&0x80000000) return pfn;
    if ((((int)va)&WSL$M_PAGTYP)>=WSL$C_GLOBAL)
    {
        phd=mmg$gl_sysphd;
        pte=&((struct _mypte *)mmg$gq_gpt_base)[pte->pte$v_gptx];
        // not implemented yet
    }
    if ((((unsigned long)va)&0x80000000) == 0)
    {
        mmg$incptref(p->pcb$l_phd,pte);
    }
    // wrong page=&((struct _pfn *)pfn$al_head[PFN$C_FREPAGLST])[pfn];
    // also set page type
    mem_map[pfn].pfn$v_pagtyp=((unsigned long)va)&PFN$M_PAGTYP;
    //  mem_map[pfn].virtual=__va(pfn*PAGE_SIZE); // not necessary
    //mem_map[pfn].count.counter=1;
    mem_map[pfn].pfn$l_pt_pfn=0;
#if 0
#ifdef __i386__
    // check. debug
    mem_map[pfn].pfn$l_pt_pfn=va;
#endif
#endif
    mem_map[pfn].pfn$q_pte_index=0;
    mem_map[pfn].pfn$q_pte_index=pte; // hope it's the right one?

    page=&mem_map[pfn];
    //set_page_count(page, 1);
    mem_map[pfn].pfn$l_refcnt=1; // aah bug

    mmg$makewsle(p,p->pcb$l_phd,va,pte,pfn);
    return pfn;
}
示例#6
0
asmlinkage void sch$sched(int from_sch$resched) {
  int cpuid = smp_processor_id();
  struct _cpu * cpu=smp$gl_cpu_data[cpuid]; 
  struct _pcb *next = 0, *curpcb;
  int curpri, affinity;
  unsigned char tmppri;
  unsigned long qhead = 0;
  int after, before;

  curpcb=cpu->cpu$l_curpcb;
  curpri=cpu->cpu$b_cur_pri;

  //  if (!countme--) { countme=500; printk("."); }

  if (from_sch$resched == 1)
    goto skip_lock;

#if 0

  // NOT YET??? nope,not an interrupt. pushpsl+setipl/vmslock instead?

  if (intr_blocked(IPL$_SCHED))
    return;

  regtrap(REG_INTR,IPL$_SCHED);
#endif

  int ipl = getipl();
  if (ipl != 8 || SPIN_SCHED.spl$l_spinlock == 0)
    panic("schsch\n");

#if 0
  // temp workaround
  // must avoid nesting, since I do not know how to get out of it
  setipl(IPL$_SCHED);
  vmslock(&SPIN_SCHED,-1);
#endif

  /** clear cpu_priority for current pri bit - TODO: where did this come from? */

  sch$al_cpu_priority[curpri]=sch$al_cpu_priority[curpri] & (~ cpu->cpu$l_cpuid_mask );

  /** skip if ... TODO: from where? */

  if (sch$al_cpu_priority[curpri]) 
    goto skip_lock;

  /** clear active_priority for current pri bit - TODO: where did this come from? */

  sch$gl_active_priority=sch$gl_active_priority & (~ (1 << (31-curpri)));

  //if (spl(IPL$_SCHED)) return;
  //  old=spl(IPL$_SCHED);

  /** now 4 linux leftovers */

  spin_lock_prefetch(&runqueue_lock);

  if (!curpcb->active_mm) BUG();

  release_kernel_lock(curpcb, cpuid);

  spin_lock_irq(&runqueue_lock);

 skip_lock:

  /** reset cpu affinity TODO: from where? */

  affinity=0;
  struct _pcb * aff_next = 0;

  /** find highest pri comqueue */

  tmppri=ffs(sch$gl_comqs);
#ifdef DEBUG_SCHED
  if (mydebug5)
    printk("ffs %x %x\n",tmppri,sch$gl_comqs);
#endif

  if (!tmppri) {
    /** if none found, idle */
#if 0
    // spot for more vms sched
    goto sch$idle;
#endif
  go_idle:
    /** set bit in idle_cpus */
    sch$gl_idle_cpus=sch$gl_idle_cpus | (cpu->cpu$l_cpuid_mask);
    /** store null pcb and -1 pri: MISSING check why */
    /** necessary idle_task line from linux */
    next=idle_task(cpuid);
    goto skip_cap;
  } else {