/*ARGSUSED*/ void trap(struct frame *fp, int type, u_int code, u_int v) { extern char fubail[], subail[]; struct lwp *l; struct proc *p; struct pcb *pcb; void *onfault; ksiginfo_t ksi; int s; int rv; u_quad_t sticks; curcpu()->ci_data.cpu_ntrap++; l = curlwp; p = l->l_proc; pcb = lwp_getpcb(l); KSI_INIT_TRAP(&ksi); ksi.ksi_trap = type & ~T_USER; if (USERMODE(fp->f_sr)) { type |= T_USER; sticks = p->p_sticks; l->l_md.md_regs = fp->f_regs; LWP_CACHE_CREDS(l, p); } else sticks = 0; switch (type) { default: dopanic: printf("trap type %d, code = 0x%x, v = 0x%x\n", type, code, v); printf("%s program counter = 0x%x\n", (type & T_USER) ? "user" : "kernel", fp->f_pc); /* * Let the kernel debugger see the trap frame that * caused us to panic. This is a convenience so * one can see registers at the point of failure. */ s = splhigh(); #ifdef KGDB /* If connected, step or cont returns 1 */ if (kgdb_trap(type, (db_regs_t *)fp)) goto kgdb_cont; #endif #ifdef DDB (void)kdb_trap(type, (db_regs_t *)fp); #endif #ifdef KGDB kgdb_cont: #endif splx(s); if (panicstr) { printf("trap during panic!\n"); #ifdef DEBUG /* XXX should be a machine-dependent hook */ printf("(press a key)\n"); (void)cngetc(); #endif } regdump((struct trapframe *)fp, 128); type &= ~T_USER; if ((u_int)type < trap_types) panic(trap_type[type]); panic("trap"); case T_BUSERR: /* Kernel bus error */ onfault = pcb->pcb_onfault; if (onfault == NULL) goto dopanic; rv = EFAULT; /* * If we have arranged to catch this fault in any of the * copy to/from user space routines, set PC to return to * indicated location and set flag informing buserror code * that it may need to clean up stack frame. */ copyfault: fp->f_stackadj = exframesize[fp->f_format]; fp->f_format = fp->f_vector = 0; fp->f_pc = (int)onfault; fp->f_regs[D0] = rv; return; case T_BUSERR|T_USER: /* Bus error */ case T_ADDRERR|T_USER: /* Address error */ ksi.ksi_addr = (void *)v; ksi.ksi_signo = SIGBUS; ksi.ksi_code = (type == (T_BUSERR|T_USER)) ? BUS_OBJERR : BUS_ADRERR; break; case T_ILLINST|T_USER: /* Illegal instruction fault */ case T_PRIVINST|T_USER: /* Privileged instruction fault */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was ILL_PRIVIN_FAULT */ ksi.ksi_signo = SIGILL; ksi.ksi_code = (type == (T_PRIVINST|T_USER)) ? ILL_PRVOPC : ILL_ILLOPC; break; /* * divde by zero, CHK/TRAPV inst */ case T_ZERODIV|T_USER: /* Divide by zero trap */ ksi.ksi_code = FPE_FLTDIV; case T_CHKINST|T_USER: /* CHK instruction trap */ case T_TRAPVINST|T_USER: /* TRAPV instruction trap */ ksi.ksi_addr = (void *)(int)fp->f_format; ksi.ksi_signo = SIGFPE; break; /* * User coprocessor violation */ case T_COPERR|T_USER: /* XXX What is a proper response here? */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTINV; break; /* * 6888x exceptions */ case T_FPERR|T_USER: /* * We decode the 68881 status register which locore * stashed in code for us. */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = fpsr2siginfocode(code); break; /* * FPU faults in supervisor mode. */ case T_ILLINST: /* fnop generates this, apparently. */ case T_FPEMULI: case T_FPEMULD: { extern label_t *nofault; if (nofault) /* If we're probing. */ longjmp(nofault); if (type == T_ILLINST) printf("Kernel Illegal Instruction trap.\n"); else printf("Kernel FPU trap.\n"); goto dopanic; } /* * Unimplemented FPU instructions/datatypes. */ case T_FPEMULI|T_USER: case T_FPEMULD|T_USER: #ifdef FPU_EMULATE if (fpu_emulate(fp, &pcb->pcb_fpregs, &ksi) == 0) ; /* XXX - Deal with tracing? (fp->f_sr & PSL_T) */ #else uprintf("pid %d killed: no floating point support.\n", p->p_pid); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLOPC; #endif break; case T_COPERR: /* Kernel coprocessor violation */ case T_FMTERR: /* Kernel format error */ case T_FMTERR|T_USER: /* User format error */ /* * The user has most likely trashed the RTE or FP state info * in the stack frame of a signal handler. */ printf("pid %d: kernel %s exception\n", p->p_pid, type==T_COPERR ? "coprocessor" : "format"); type |= T_USER; mutex_enter(p->p_lock); SIGACTION(p, SIGILL).sa_handler = SIG_DFL; sigdelset(&p->p_sigctx.ps_sigignore, SIGILL); sigdelset(&p->p_sigctx.ps_sigcatch, SIGILL); sigdelset(&l->l_sigmask, SIGILL); mutex_exit(p->p_lock); ksi.ksi_signo = SIGILL; ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was ILL_RESAD_FAULT */ ksi.ksi_code = (type == T_COPERR) ? ILL_COPROC : ILL_ILLOPC; break; /* * XXX: Trace traps are a nightmare. * * HP-UX uses trap #1 for breakpoints, * NetBSD/m68k uses trap #2, * SUN 3.x uses trap #15, * DDB and KGDB uses trap #15 (for kernel breakpoints; * handled elsewhere). * * NetBSD and HP-UX traps both get mapped by locore.s into T_TRACE. * SUN 3.x traps get passed through as T_TRAP15 and are not really * supported yet. * * XXX: We should never get kernel-mode T_TRAP15 because * XXX: locore.s now gives it special treatment. */ case T_TRAP15: /* SUN trace trap */ #ifdef DEBUG printf("unexpected kernel trace trap, type = %d\n", type); printf("program counter = 0x%x\n", fp->f_pc); #endif fp->f_sr &= ~PSL_T; ksi.ksi_signo = SIGTRAP; break; case T_TRACE|T_USER: /* user trace trap */ #ifdef COMPAT_SUNOS /* * SunOS uses Trap #2 for a "CPU cache flush". * Just flush the on-chip caches and return. */ if (p->p_emul == &emul_sunos) { ICIA(); DCIU(); return; } #endif /* FALLTHROUGH */ case T_TRACE: /* tracing a trap instruction */ case T_TRAP15|T_USER: /* SUN user trace trap */ fp->f_sr &= ~PSL_T; ksi.ksi_signo = SIGTRAP; break; case T_ASTFLT: /* System async trap, cannot happen */ goto dopanic; case T_ASTFLT|T_USER: /* User async trap. */ astpending = 0; /* * We check for software interrupts first. This is because * they are at a higher level than ASTs, and on a VAX would * interrupt the AST. We assume that if we are processing * an AST that we must be at IPL0 so we don't bother to * check. Note that we ensure that we are at least at SIR * IPL while processing the SIR. */ spl1(); /* fall into... */ case T_SSIR: /* Software interrupt */ case T_SSIR|T_USER: /* * If this was not an AST trap, we are all done. */ if (type != (T_ASTFLT|T_USER)) { curcpu()->ci_data.cpu_ntrap--; return; } spl0(); if (l->l_pflag & LP_OWEUPC) { l->l_pflag &= ~LP_OWEUPC; ADDUPROF(l); } if (curcpu()->ci_want_resched) preempt(); goto out; case T_MMUFLT: /* Kernel mode page fault */ /* * If we were doing profiling ticks or other user mode * stuff from interrupt code, Just Say No. */ onfault = pcb->pcb_onfault; if (onfault == fubail || onfault == subail) { rv = EFAULT; goto copyfault; } /* fall into... */ case T_MMUFLT|T_USER: /* page fault */ { vaddr_t va; struct vmspace *vm = p->p_vmspace; struct vm_map *map; vm_prot_t ftype; extern struct vm_map *kernel_map; onfault = pcb->pcb_onfault; #ifdef DEBUG if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid)) printf("trap: T_MMUFLT pid=%d, code=%x, v=%x, pc=%x, sr=%x\n", p->p_pid, code, v, fp->f_pc, fp->f_sr); #endif /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor data fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ if (type == T_MMUFLT && (onfault == NULL || KDFAULT(code))) map = kernel_map; else { map = vm ? &vm->vm_map : kernel_map; } if (WRFAULT(code)) ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; va = trunc_page((vaddr_t)v); #ifdef DEBUG if (map == kernel_map && va == 0) { printf("trap: bad kernel access at %x\n", v); goto dopanic; } #endif pcb->pcb_onfault = NULL; rv = uvm_fault(map, va, ftype); pcb->pcb_onfault = onfault; #ifdef DEBUG if (rv && MDB_ISPID(p->p_pid)) printf("uvm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); #endif /* * If this was a stack access, we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure, it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if (rv == 0) { if (map != kernel_map && (void *)va >= vm->vm_maxsaddr) uvm_grow(p, va); if (type == T_MMUFLT) { if (ucas_ras_check(&fp->F_t)) { return; } #if defined(M68040) if (mmutype == MMU_68040) (void)writeback(fp, 1); #endif return; } goto out; } if (rv == EACCES) { ksi.ksi_code = SEGV_ACCERR; rv = EFAULT; } else ksi.ksi_code = SEGV_MAPERR; if (type == T_MMUFLT) { if (onfault) goto copyfault; printf("uvm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); printf(" type %x, code [mmu,,ssw]: %x\n", type, code); goto dopanic; } ksi.ksi_addr = (void *)v; if (rv == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); ksi.ksi_signo = SIGKILL; } else { ksi.ksi_signo = SIGSEGV; } break; } } if (ksi.ksi_signo) trapsignal(l, &ksi); if ((type & T_USER) == 0) return; out: userret(l, fp, sticks, v, 1); }
/* * Attach a found zs. * * Match slave number to zs unit number, so that misconfiguration will * not set up the keyboard as ttya, etc. */ static void zs_hpc_attach(device_t parent, device_t self, void *aux) { struct zsc_softc *zsc = device_private(self); struct hpc_attach_args *haa = aux; struct zsc_attach_args zsc_args; struct zs_chanstate *cs; struct zs_channel *ch; int zs_unit, channel, err, s; const char *promconsdev; promconsdev = ARCBIOS->GetEnvironmentVariable("ConsoleOut"); zsc->zsc_dev = self; zsc->zsc_bustag = haa->ha_st; if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, haa->ha_devoff, 0x10, &zsc->zsc_base)) != 0) { aprint_error(": unable to map 85c30 registers, error = %d\n", err); return; } zs_unit = device_unit(self); aprint_normal("\n"); /* * Initialize software state for each channel. * * Done in reverse order of channels since the first serial port * is actually attached to the *second* channel, and vice versa. * Doing it this way should force a 'zstty*' to attach zstty0 to * channel 1 and zstty1 to channel 0. They couldn't have wired * it up in a more sensible fashion, could they? */ for (channel = 1; channel >= 0; channel--) { zsc_args.channel = channel; ch = &zsc->zsc_cs_store[channel]; cs = zsc->zsc_cs[channel] = (struct zs_chanstate *)ch; zs_lock_init(cs); cs->cs_reg_csr = NULL; cs->cs_reg_data = NULL; cs->cs_channel = channel; cs->cs_private = NULL; cs->cs_ops = &zsops_null; cs->cs_brg_clk = PCLK / 16; if (bus_space_subregion(zsc->zsc_bustag, zsc->zsc_base, zs_chan_offset[channel], sizeof(struct zschan), &ch->cs_regs) != 0) { aprint_error_dev(self, "cannot map regs\n"); return; } ch->cs_bustag = zsc->zsc_bustag; memcpy(cs->cs_creg, zs_init_reg, 16); memcpy(cs->cs_preg, zs_init_reg, 16); zsc_args.hwflags = 0; zsc_args.consdev = NULL; if (zs_consunit == -1 && zs_conschan == -1) { /* * If this channel is being used by the PROM console, * pass the generic zs driver a 'no reset' flag so the * channel gets left in the appropriate state after * attach. * * Note: the channel mappings are swapped. */ if (promconsdev != NULL && strlen(promconsdev) == 9 && strncmp(promconsdev, "serial", 6) == 0 && (promconsdev[7] == '0' || promconsdev[7] == '1')) { if (promconsdev[7] == '1' && channel == 0) zsc_args.hwflags |= ZS_HWFLAG_NORESET; else if (promconsdev[7] == '0' && channel == 1) zsc_args.hwflags |= ZS_HWFLAG_NORESET; } } /* If console, don't stomp speed, let zstty know */ if (zs_unit == zs_consunit && channel == zs_conschan) { zsc_args.consdev = &zs_cn; zsc_args.hwflags = ZS_HWFLAG_CONSOLE; cs->cs_defspeed = zs_get_speed(cs); } else cs->cs_defspeed = zs_defspeed; cs->cs_defcflag = zs_def_cflag; /* Make these correspond to cs_defcflag (-crtscts) */ cs->cs_rr0_dcd = ZSRR0_DCD; cs->cs_rr0_cts = 0; cs->cs_wr5_dtr = ZSWR5_DTR | ZSWR5_RTS; cs->cs_wr5_rts = 0; /* * Clear the master interrupt enable. * The INTENA is common to both channels, * so just do it on the A channel. */ if (channel == 0) { zs_write_reg(cs, 9, 0); } /* * Look for a child driver for this channel. * The child attach will setup the hardware. */ if (!config_found(self, (void *)&zsc_args, zs_print)) { /* No sub-driver. Just reset it. */ uint8_t reset = (channel == 0) ? ZSWR9_A_RESET : ZSWR9_B_RESET; s = splhigh(); zs_write_reg(cs, 9, reset); splx(s); } } zsc->sc_si = softint_establish(SOFTINT_SERIAL, zssoft, zsc); cpu_intr_establish(haa->ha_irq, IPL_TTY, zshard, NULL); evcnt_attach_dynamic(&zsc->zsc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(self), "intr"); /* * Set the master interrupt enable and interrupt vector. * (common to both channels, do it on A) */ cs = zsc->zsc_cs[0]; s = splhigh(); /* interrupt vector */ zs_write_reg(cs, 2, zs_init_reg[2]); /* master interrupt control (enable) */ zs_write_reg(cs, 9, zs_init_reg[9]); splx(s); }
void gpio_init(uint32_t pin, uint32_t mode){ int plx = splhigh(); int io = pin >> 5; Pio *addr = _gpio_addr(pin); pin &= 0x1F; _unprotect(addr); // (1:input schmitt) (1:input deglitch) (2:pull up/dn) (1:push/pull,open-drain) (2:AF) (2:mode) switch( mode & 3 ){ case GPIO_INPUT: _enable_pio(io); // fall through case GPIO_ANALOG: addr->PIO_PER = 1<<pin; addr->PIO_ODR = 1<<pin; break; case GPIO_OUTPUT: _enable_pio(io); addr->PIO_PER = 1<<pin; addr->PIO_OER = 1<<pin; addr->PIO_OWER = 1<<pin; // permit writes via odsr break; default: addr->PIO_PDR = 1<<pin; addr->PIO_ODR = 1<<pin; switch( mode & 0xF ){ case GPIO_AF_A: addr->PIO_ABCDSR[0] &= ~(1<<pin); addr->PIO_ABCDSR[1] &= ~(1<<pin); break; case GPIO_AF_B: addr->PIO_ABCDSR[0] |= 1<<pin; addr->PIO_ABCDSR[1] &= ~(1<<pin); break; case GPIO_AF_C: addr->PIO_ABCDSR[0] &= ~(1<<pin); addr->PIO_ABCDSR[1] |= 1<<pin; break; case GPIO_AF_D: addr->PIO_ABCDSR[0] |= 1<<pin; addr->PIO_ABCDSR[1] |= 1<<pin; break; } break; } // push-pull or open-drain if( mode & GPIO_OPEN_DRAIN ) addr->PIO_MDER |= 1<<pin; else addr->PIO_MDDR |= 1<<pin; // pull up, pull down? if( mode & GPIO_PULL_UP ) addr->PIO_PUER |= 1<<pin; else addr->PIO_PUDR |= 1<<pin; if( mode & GPIO_PULL_DN ) addr->PIO_PPDER |= 1<<pin; else addr->PIO_PPDDR |= 1<<pin; // input filters if( mode & GPIO_SCHMITT ) addr->PIO_SCHMITT |= 1<<pin; else addr->PIO_SCHMITT &= ~(1<<pin); // ... _protect(addr); splx(plx); }
static u_int cpi_get_timecount(struct timecounter *tc) { int s; uint msw, msw2, lsw; uint8_t reg; bus_space_tag_t bst; bus_space_handle_t bsh; bst = ((struct cpi_softc *)tc->tc_priv)->sc_bst; bsh = ((struct cpi_softc *)tc->tc_priv)->sc_bsh; /* * We run CIO counters 1 and 2 in an internally coupled mode, * where the output of counter 1 (LSW) clocks counter 2 (MSW). * The counters are buffered, and the buffers have to be * locked before we can read out a consistent counter * value. Reading the LSB releases the buffer lock. * * Unfortunately, there is no such mechanism between MSW and * LSW of the coupled counter. To ensure a consistent * read-out, we read the MSW, then the LSW, then re-read the * MSW and compare with the old value. If we find that the MSW * has just been incremented, we re-read the LSW. This avoids * a race that could leave us with a new (just wrapped) LSW * and an old MSW value. * * For simplicity, we roll the procedure into a loop - the * rollover case is rare. */ do { #define delay(a) /* Guard HW timer access */ s = splhigh(); /* Lock counter 2 latch in preparation for read-out */ bus_space_write_1(bst, bsh, CIO_CTRL, Z8536_CTCSR2); delay(1); reg = bus_space_read_1(bst, bsh, CIO_CTRL); bus_space_write_1(bst, bsh, CIO_CTRL, Z8536_CTCSR2); bus_space_write_1(bst, bsh, CIO_CTRL, CTCSR_MASK(reg | CTCS_RCC)); /* Read out counter 2 MSB,then LSB (releasing the latch) */ bus_space_write_1(bst, bsh, CIO_CTRL, Z8536_CTCCR2_MSB); delay(1); msw = bus_space_read_1(bst, bsh, CIO_CTRL) << 8; bus_space_write_1(bst, bsh, CIO_CTRL, Z8536_CTCCR2_LSB); delay(1); msw |= bus_space_read_1(bst, bsh, CIO_CTRL); /* Lock counter 1 latch in preparation for read-out */ bus_space_write_1(bst, bsh, CIO_CTRL, Z8536_CTCSR1); delay(1); reg = bus_space_read_1(bst, bsh, CIO_CTRL); bus_space_write_1(bst, bsh, CIO_CTRL, Z8536_CTCSR1); bus_space_write_1(bst, bsh, CIO_CTRL, CTCSR_MASK(reg |CTCS_RCC)); /* Read out counter 1 MSB,then LSB (releasing the latch) */ bus_space_write_1(bst, bsh, CIO_CTRL, Z8536_CTCCR1_MSB); delay(1); lsw = bus_space_read_1(bst, bsh, CIO_CTRL) << 8; bus_space_write_1(bst, bsh, CIO_CTRL, Z8536_CTCCR1_LSB); delay(1); lsw |= bus_space_read_1(bst, bsh, CIO_CTRL); /* Lock counter 2 latch in preparation for read-out */ bus_space_write_1(bst, bsh, CIO_CTRL, Z8536_CTCSR2); delay(1); reg = bus_space_read_1(bst, bsh, CIO_CTRL); bus_space_write_1(bst, bsh, CIO_CTRL, Z8536_CTCSR2); bus_space_write_1(bst, bsh, CIO_CTRL, CTCSR_MASK(reg | CTCS_RCC)); /* Read out counter 2 MSB,then LSB (releasing the latch) */ bus_space_write_1(bst, bsh, CIO_CTRL, Z8536_CTCCR2_MSB); delay(1); msw2 = bus_space_read_1(bst, bsh, CIO_CTRL) << 8; bus_space_write_1(bst, bsh, CIO_CTRL, Z8536_CTCCR2_LSB); delay(1); msw2 |= bus_space_read_1(bst, bsh, CIO_CTRL); splx(s); } while (msw2 != msw); /* timecounter expects an upward counter */ return ~0u - ((msw << 16) | lsw); }
/* * kdb_trap - field a TRACE or BPT trap */ int kdb_trap(int type, struct trapframe *tf) { db_regs_t dbregs; int s; #if NFB > 0 fb_unblank(); #endif switch (type) { case T_BREAKPOINT: /* breakpoint */ case -1: /* keyboard interrupt */ break; default: if (!db_onpanic && db_recover==0) return (0); printf("kernel: %s trap\n", trap_type[type & 0xff]); if (db_recover != 0) { db_error("Faulted in DDB; continuing...\n"); /*NOTREACHED*/ } } #ifdef MULTIPROCESSOR if (!db_suspend_others()) { ddb_suspend(tf); return 1; } #endif /* Initialise local dbregs storage from trap frame */ dbregs.db_tf = *tf; dbregs.db_fr = *(struct frame *)tf->tf_out[6]; /* Setup current CPU & reg pointers */ ddb_cpuinfo = curcpu(); curcpu()->ci_ddb_regs = ddb_regp = &dbregs; /* Should switch to kdb`s own stack here. */ s = splhigh(); db_active++; cnpollc(true); db_trap(type, 0/*code*/); cnpollc(false); db_active--; splx(s); /* Update trap frame from local dbregs storage */ *(struct frame *)tf->tf_out[6] = dbregs.db_fr; *tf = dbregs.db_tf; curcpu()->ci_ddb_regs = ddb_regp = 0; ddb_cpuinfo = NULL; #ifdef MULTIPROCESSOR db_resume_others(); #endif return (1); }
static int ct_start_selection(struct ct_softc *ct, struct slccb *cb) { struct scsi_low_softc *slp = &ct->sc_sclow; struct ct_bus_access_handle *chp = &ct->sc_ch; struct targ_info *ti = slp->sl_Tnexus; struct lun_info *li = slp->sl_Lnexus; int s, satok; u_int8_t cmd; ct->sc_tmaxcnt = cb->ccb_tcmax * 1000 * 1000; ct->sc_atten = 0; satok = 0; if (scsi_low_is_disconnect_ok(cb) != 0) { if (ct->sc_chiprev >= CT_WD33C93_A) satok = 1; else if (cthw_cmdlevel[slp->sl_scp.scp_cmd[0]] != 0) satok = 1; } if (satok != 0 && scsi_low_is_msgout_continue(ti, SCSI_LOW_MSG_IDENTIFY) == 0) { cmd = WD3S_SELECT_ATN_TFR; ct->sc_satgo = CT_SAT_GOING; } else { cmd = WD3S_SELECT_ATN; ct->sc_satgo = 0; } if ((ct_stat_read_1(chp) & (STR_BSY | STR_INT | STR_CIP)) != 0) return SCSI_LOW_START_FAIL; if ((ct->sc_satgo & CT_SAT_GOING) != 0) { (void) scsi_low_msgout(slp, ti, SCSI_LOW_MSGOUT_INIT); scsi_low_cmd(slp, ti); ct_cr_write_1(chp, wd3s_oid, slp->sl_scp.scp_cmdlen); ct_write_cmds(chp, slp->sl_scp.scp_cmd, slp->sl_scp.scp_cmdlen); } else { /* anyway attention assert */ SCSI_LOW_ASSERT_ATN(slp); } ct_target_nexus_establish(ct, li->li_lun, slp->sl_scp.scp_direction); s = splhigh(); if ((ct_stat_read_1(chp) & (STR_BSY | STR_INT | STR_CIP)) == 0) { /* XXX: * Reload a lun again here. */ ct_cr_write_1(chp, wd3s_lun, li->li_lun); ct_cr_write_1(chp, wd3s_cmd, cmd); if ((ct_stat_read_1(chp) & STR_LCI) == 0) { splx(s); SCSI_LOW_SETUP_PHASE(ti, PH_SELSTART); return SCSI_LOW_START_OK; } } splx(s); return SCSI_LOW_START_FAIL; }
/* * Create a new thread based on an existing one. * The new thread has name NAME, and starts executing in function FUNC. * DATA1 and DATA2 are passed to FUNC. */ int thread_fork(const char *name, void *data1, unsigned long data2, void (*func)(void *, unsigned long), struct thread **ret) { struct thread *newguy; int s, result; /* Allocate a thread */ newguy = thread_create(name); if (newguy==NULL) { return ENOMEM; } /* Allocate a stack */ newguy->t_stack = kmalloc(STACK_SIZE); if (newguy->t_stack==NULL) { kfree(newguy->t_name); kfree(newguy); return ENOMEM; } /* stick a magic number on the bottom end of the stack */ newguy->t_stack[0] = 0xae; newguy->t_stack[1] = 0x11; newguy->t_stack[2] = 0xda; newguy->t_stack[3] = 0x33; /* Inherit the current directory */ if (curthread->t_cwd != NULL) { VOP_INCREF(curthread->t_cwd); newguy->t_cwd = curthread->t_cwd; } /* Set up the pcb (this arranges for func to be called) */ md_initpcb(&newguy->t_pcb, newguy->t_stack, data1, data2, func); /* Interrupts off for atomicity */ s = splhigh(); /* * Make sure our data structures have enough space, so we won't * run out later at an inconvenient time. */ result = array_preallocate(sleepers, numthreads+1); if (result) { goto fail; } result = array_preallocate(zombies, numthreads+1); if (result) { goto fail; } /* Do the same for the scheduler. */ result = scheduler_preallocate(numthreads+1); if (result) { goto fail; } /* Make the new thread runnable */ result = make_runnable(newguy); if (result != 0) { goto fail; } /* * Increment the thread counter. This must be done atomically * with the preallocate calls; otherwise the count can be * temporarily too low, which would obviate its reason for * existence. */ numthreads++; /* Done with stuff that needs to be atomic */ splx(s); /* * Return new thread structure if it's wanted. Note that * using the thread structure from the parent thread should be * done only with caution, because in general the child thread * might exit at any time. */ if (ret != NULL) { *ret = newguy; } return 0; fail: splx(s); if (newguy->t_cwd != NULL) { VOP_DECREF(newguy->t_cwd); } kfree(newguy->t_stack); kfree(newguy->t_name); kfree(newguy); return result; }
/* * void cpu_reboot(int howto, char *bootstr) * * Reboots the system * * Deal with any syncing, unmounting, dumping and shutdown hooks, * then reset the CPU. */ void cpu_reboot(int howto, char *bootstr) { /* * If we are still cold then hit the air brakes * and crash to earth fast */ if (cold) { doshutdownhooks(); printf("The operating system has halted.\n"); printf("Please press any key to reboot.\n\n"); cngetc(); printf("rebooting...\n"); goto reset; } /* Disable console buffering */ /* * If RB_NOSYNC was not specified sync the discs. * Note: Unless cold is set to 1 here, syslogd will die during the * unmount. It looks like syslogd is getting woken up only to find * that it cannot page part of the binary in as the filesystem has * been unmounted. */ if (!(howto & RB_NOSYNC)) bootsync(); /* Say NO to interrupts */ splhigh(); /* Do a dump if requested. */ if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP) dumpsys(); /* Run any shutdown hooks */ doshutdownhooks(); /* Make sure IRQ's are disabled */ IRQdisable; if (howto & RB_HALT) { printf("The operating system has halted.\n"); printf("Please press any key to reboot.\n\n"); cngetc(); } printf("rebooting...\n\r"); reset: /* * Make really really sure that all interrupts are disabled, * and poke the Internal Bus and Peripheral Bus reset lines. */ (void) disable_interrupts(I32_bit|F32_bit); *(volatile uint32_t *)(IQ80321_80321_VBASE + VERDE_ATU_BASE + ATU_PCSR) = PCSR_RIB | PCSR_RPB; /* ...and if that didn't work, just croak. */ printf("RESET FAILED!\n"); for (;;); }
int kdp_intr_disbl(void) { return splhigh(); }
void omgpio_intr_level(struct omgpio_softc *sc, unsigned int gpio, unsigned int level) { u_int32_t fe, re, l0, l1, bit; struct omgpio_softc *sc = omgpio_cd.cd_devs[GPIO_PIN_TO_INST(gpio)]; int s; s = splhigh(); fe = READ4(sc, sc->sc_regs.fallingdetect); re = READ4(sc, sc->sc_regs.risingdetect); l0 = READ4(sc, sc->sc_regs.leveldetect0); l1 = READ4(sc, sc->sc_regs.leveldetect1); bit = 1 << GPIO_PIN_TO_OFFSET(gpio); switch (level) { case IST_NONE: fe &= ~bit; re &= ~bit; l0 &= ~bit; l1 &= ~bit; break; case IST_EDGE_FALLING: fe |= bit; re &= ~bit; l0 &= ~bit; l1 &= ~bit; break; case IST_EDGE_RISING: fe &= ~bit; re |= bit; l0 &= ~bit; l1 &= ~bit; break; case IST_PULSE: /* XXX */ /* FALLTHRU */ case IST_EDGE_BOTH: fe |= bit; re |= bit; l0 &= ~bit; l1 &= ~bit; break; case IST_LEVEL_LOW: fe &= ~bit; re &= ~bit; l0 |= bit; l1 &= ~bit; break; case IST_LEVEL_HIGH: fe &= ~bit; re &= ~bit; l0 &= ~bit; l1 |= bit; break; default: panic("omgpio_intr_level: bad level: %d", level); break; } WRITE4(sc, sc->sc_regs.fallingdetect, fe); WRITE4(sc, sc->sc_regs.risingdetect, re); WRITE4(sc, sc->sc_regs.leveldetect0, l0); WRITE4(sc, sc->sc_regs.leveldetect1, l1); splx(s); }
int waittest(int nargs, char **args) { int i, spl, status, err; pid_t kid; pid_t kids2[NTHREADS]; int kids2_head = 0, kids2_tail = 0; (void)nargs; (void)args; init_sem(); kprintf("Starting wait test...\n"); /* * This first set should (hopefully) still be running when * wait is called (helped by the splhigh). */ kprintf("\n"); kprintf("Set 1 (wait should generally succeed)\n"); kprintf("-------------------------------------\n"); spl = splhigh(); for (i = 0; i < NTHREADS; i++) { err = thread_fork("wait test thread", waitfirstthread, NULL, i, &kid); if (err) { panic("waittest: thread_fork failed (%d)\n", err); } kprintf("Spawned pid %d\n", kid); kids2[kids2_tail] = kid; kids2_tail = (kids2_tail+1) % NTHREADS; } splx(spl); for (i = 0; i < NTHREADS; i++) { kid = kids2[kids2_head]; kids2_head = (kids2_head+1) % NTHREADS; kprintf("Waiting on pid %d...\n", kid); err = pid_join(kid, &status, 0); if (err) { kprintf("Pid %d waitpid error %d!\n", kid, err); } else { kprintf("Pid %d exit status: %d\n", kid, status); } } /* * This second set has to V their semaphore before the exit, * so when wait is called, they will have already exited, but * their parent is still alive. */ kprintf("\n"); kprintf("Set 2 (wait should always succeed)\n"); kprintf("----------------------------------\n"); for (i = 0; i < NTHREADS; i++) { err = thread_fork("wait test thread", exitfirstthread, NULL, i, &kid); if (err) { panic("waittest: thread_fork failed (%d)\n", err); } kprintf("Spawned pid %d\n", kid); kids2[kids2_tail] = kid; kids2_tail = (kids2_tail+1) % NTHREADS; if (err) { panic("waittest: q_addtail failed (%d)\n", err); } } for (i = 0; i < NTHREADS; i++) { kid = kids2[kids2_head]; kids2_head = (kids2_head+1) % NTHREADS; kprintf("Waiting for pid %d to V()...\n", kid); P(exitsems[i]); kprintf("Appears that pid %d P()'d\n", kid); kprintf("Waiting on pid %d...\n", kid); err = pid_join(kid, &status, 0); if (err) { kprintf("Pid %d waitpid error %d!\n", kid, err); } else { kprintf("Pid %d exit status: %d\n", kid, status); } } /* * This third set has to V their semaphore before the exit, so * when wait is called, they will have already exited, and * since we've gone through and disowned them all, their exit * statuses should have been disposed of already and our waits * should all fail. */ kprintf("\n"); kprintf("Set 3 (wait should never succeed)\n"); kprintf("---------------------------------\n"); for (i = 0; i < NTHREADS; i++) { err = thread_fork("wait test thread", exitfirstthread, NULL, i, &kid); if (err) { panic("waittest: thread_fork failed (%d)\n", err); } kprintf("Spawned pid %d\n", kid); pid_detach(kid); kids2[kids2_tail] = kid; kids2_tail = (kids2_tail+1) % NTHREADS; } for (i = 0; i < NTHREADS; i++) { kid = kids2[kids2_head]; kids2_head = (kids2_head+1) % NTHREADS; kprintf("Waiting for pid %d to V()...\n", kid); P(exitsems[i]); kprintf("Appears that pid %d P()'d\n", kid); kprintf("Waiting on pid %d...\n", kid); err = pid_join(kid, &status, 0); if (err) { kprintf("Pid %d waitpid error %d!\n", kid, err); } else { kprintf("Pid %d exit status: %d\n", kid, status); } } kprintf("\nWait test done.\n"); return 0; }
/* * Gets called when softcall queue is not moving forward. We choose * a CPU and poke except the ones which are already poked. */ static int softcall_choose_cpu() { cpu_t *cplist = CPU; cpu_t *cp; int intr_load = INT_MAX; int cpuid = -1; cpuset_t poke; int s; ASSERT(getpil() >= DISP_LEVEL); ASSERT(ncpus > 1); ASSERT(MUTEX_HELD(&softcall_lock)); CPUSET_ZERO(poke); /* * The hint is to start from current CPU. */ cp = cplist; do { /* * Don't select this CPU if : * - in cpuset already * - CPU is not accepting interrupts * - CPU is being offlined */ if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) || (cp->cpu_flags & CPU_ENABLE) == 0 || (cp == cpu_inmotion)) continue; #if defined(__x86) /* * Don't select this CPU if a hypervisor indicates it * isn't currently scheduled onto a physical cpu. We are * looking for a cpu that can respond quickly and the time * to get the virtual cpu scheduled and switched to running * state is likely to be relatively lengthy. */ if (vcpu_on_pcpu(cp->cpu_id) == VCPU_NOT_ON_PCPU) continue; #endif /* __x86 */ /* if CPU is not busy */ if (cp->cpu_intrload == 0) { cpuid = cp->cpu_id; break; } if (cp->cpu_intrload < intr_load) { cpuid = cp->cpu_id; intr_load = cp->cpu_intrload; } else if (cp->cpu_intrload == intr_load) { /* * We want to poke CPUs having similar * load because we don't know which CPU is * can acknowledge level1 interrupt. The * list of such CPUs should not be large. */ if (cpuid != -1) { /* * Put the last CPU chosen because * it also has same interrupt load. */ CPUSET_ADD(poke, cpuid); cpuid = -1; } CPUSET_ADD(poke, cp->cpu_id); } } while ((cp = cp->cpu_next_onln) != cplist); /* if we found a CPU which suits best to poke */ if (cpuid != -1) { CPUSET_ZERO(poke); CPUSET_ADD(poke, cpuid); } if (CPUSET_ISNULL(poke)) { mutex_exit(&softcall_lock); return (0); } /* * We first set the bit in cpuset and then poke. */ CPUSET_XOR(*softcall_cpuset, poke); mutex_exit(&softcall_lock); /* * If softcall() was called at low pil then we may * get preempted before we raise PIL. It should be okay * because we are just going to poke CPUs now or at most * another thread may start choosing CPUs in this routine. */ s = splhigh(); siron_poke_cpu(poke); splx(s); return (1); }
/* * Implement device not available (DNA) exception * * If we were the last lwp to use the FPU, we can simply return. * Otherwise, we save the previous state, if necessary, and restore * our last saved state. */ void fpudna(struct cpu_info *ci) { uint16_t cw; uint32_t mxcsr; struct lwp *l, *fl; struct pcb *pcb; int s; if (ci->ci_fpsaving) { /* Recursive trap. */ x86_enable_intr(); return; } /* Lock out IPIs and disable preemption. */ s = splhigh(); x86_enable_intr(); /* Save state on current CPU. */ l = ci->ci_curlwp; pcb = lwp_getpcb(l); fl = ci->ci_fpcurlwp; if (fl != NULL) { /* * It seems we can get here on Xen even if we didn't * switch lwp. In this case do nothing */ if (fl == l) { KASSERT(pcb->pcb_fpcpu == ci); clts(); splx(s); return; } KASSERT(fl != l); fpusave_cpu(true); KASSERT(ci->ci_fpcurlwp == NULL); } /* Save our state if on a remote CPU. */ if (pcb->pcb_fpcpu != NULL) { /* Explicitly disable preemption before dropping spl. */ KPREEMPT_DISABLE(l); splx(s); fpusave_lwp(l, true); KASSERT(pcb->pcb_fpcpu == NULL); s = splhigh(); KPREEMPT_ENABLE(l); } /* * Restore state on this CPU, or initialize. Ensure that * the entire update is atomic with respect to FPU-sync IPIs. */ clts(); ci->ci_fpcurlwp = l; pcb->pcb_fpcpu = ci; if ((l->l_md.md_flags & MDL_USEDFPU) == 0) { fninit(); cw = pcb->pcb_savefpu.fp_fxsave.fx_fcw; fldcw(&cw); mxcsr = pcb->pcb_savefpu.fp_fxsave.fx_mxcsr; x86_ldmxcsr(&mxcsr); l->l_md.md_flags |= MDL_USEDFPU; } else { /* * AMD FPU's do not restore FIP, FDP, and FOP on fxrstor, * leaking other process's execution history. Clear them * manually. */ static const double zero = 0.0; int status; /* * Clear the ES bit in the x87 status word if it is currently * set, in order to avoid causing a fault in the upcoming load. */ fnstsw(&status); if (status & 0x80) fnclex(); /* * Load the dummy variable into the x87 stack. This mangles * the x87 stack, but we don't care since we're about to call * fxrstor() anyway. */ fldummy(&zero); fxrstor(&pcb->pcb_savefpu); } KASSERT(ci == curcpu()); splx(s); }
/*---------------------------------------------------------------------------*/ int cc2420_init(void) { uint16_t reg; { int s = splhigh(); cc2420_arch_init(); /* Initalize ports and SPI. */ CC2420_DISABLE_FIFOP_INT(); CC2420_FIFOP_INT_INIT(); splx(s); } /* Turn on voltage regulator and reset. */ SET_VREG_ACTIVE(); clock_delay(250); SET_RESET_ACTIVE(); clock_delay(127); SET_RESET_INACTIVE(); clock_delay(125); /* Turn on the crystal oscillator. */ strobe(CC2420_SXOSCON); /* Turn on/off automatic packet acknowledgment and address decoding. */ reg = getreg(CC2420_MDMCTRL0); #if CC2420_CONF_AUTOACK reg |= AUTOACK | ADR_DECODE; #else reg &= ~(AUTOACK | ADR_DECODE); #endif /* CC2420_CONF_AUTOACK */ setreg(CC2420_MDMCTRL0, reg); /* Set transmission turnaround time to the lower setting (8 symbols = 0.128 ms) instead of the default (12 symbols = 0.192 ms). */ /* reg = getreg(CC2420_TXCTRL); reg &= ~(1 << 13); setreg(CC2420_TXCTRL, reg);*/ /* Change default values as recomended in the data sheet, */ /* correlation threshold = 20, RX bandpass filter = 1.3uA. */ setreg(CC2420_MDMCTRL1, CORR_THR(20)); reg = getreg(CC2420_RXCTRL1); reg |= RXBPF_LOCUR; setreg(CC2420_RXCTRL1, reg); /* Set the FIFOP threshold to maximum. */ setreg(CC2420_IOCFG0, FIFOP_THR(127)); /* Turn off "Security enable" (page 32). */ reg = getreg(CC2420_SECCTRL0); reg &= ~RXFIFO_PROTECTION; setreg(CC2420_SECCTRL0, reg); cc2420_set_pan_addr(0xffff, 0x0000, NULL); cc2420_set_channel(26); flushrx(); process_start(&cc2420_process, NULL); return 1; }
static int hpcapm_hook(void *ctx, int type, long id, void *msg) { struct apmhpc_softc *sc; int s; int charge; int message; sc = ctx; if (type != CONFIG_HOOK_PMEVENT) return 1; if (CONFIG_HOOK_VALUEP(msg)) message = (int)msg; else message = *(int *)msg; s = splhigh(); switch (id) { case CONFIG_HOOK_PMEVENT_STANDBYREQ: if (sc->power_state != APM_SYS_STANDBY) { sc->events |= (1 << APM_USER_STANDBY_REQ); } else { sc->events |= (1 << APM_NORMAL_RESUME); } break; case CONFIG_HOOK_PMEVENT_SUSPENDREQ: if (sc->power_state != APM_SYS_SUSPEND) { DPRINTF(("hpcapm: suspend request\n")); sc->events |= (1 << APM_USER_SUSPEND_REQ); } else { sc->events |= (1 << APM_NORMAL_RESUME); } break; case CONFIG_HOOK_PMEVENT_BATTERY: switch (message) { case CONFIG_HOOK_BATT_CRITICAL: DPRINTF(("hpcapm: battery state critical\n")); charge = sc->battery_flags & APM_BATT_FLAG_CHARGING; sc->battery_flags = APM_BATT_FLAG_CRITICAL; sc->battery_flags |= charge; sc->battery_life = 0; break; case CONFIG_HOOK_BATT_LOW: DPRINTF(("hpcapm: battery state low\n")); charge = sc->battery_flags & APM_BATT_FLAG_CHARGING; sc->battery_flags = APM_BATT_FLAG_LOW; sc->battery_flags |= charge; break; case CONFIG_HOOK_BATT_HIGH: DPRINTF(("hpcapm: battery state high\n")); charge = sc->battery_flags & APM_BATT_FLAG_CHARGING; sc->battery_flags = APM_BATT_FLAG_HIGH; sc->battery_flags |= charge; break; case CONFIG_HOOK_BATT_10P: DPRINTF(("hpcapm: battery life 10%%\n")); sc->battery_life = 10; break; case CONFIG_HOOK_BATT_20P: DPRINTF(("hpcapm: battery life 20%%\n")); sc->battery_life = 20; break; case CONFIG_HOOK_BATT_30P: DPRINTF(("hpcapm: battery life 30%%\n")); sc->battery_life = 30; break; case CONFIG_HOOK_BATT_40P: DPRINTF(("hpcapm: battery life 40%%\n")); sc->battery_life = 40; break; case CONFIG_HOOK_BATT_50P: DPRINTF(("hpcapm: battery life 50%%\n")); sc->battery_life = 50; break; case CONFIG_HOOK_BATT_60P: DPRINTF(("hpcapm: battery life 60%%\n")); sc->battery_life = 60; break; case CONFIG_HOOK_BATT_70P: DPRINTF(("hpcapm: battery life 70%%\n")); sc->battery_life = 70; break; case CONFIG_HOOK_BATT_80P: DPRINTF(("hpcapm: battery life 80%%\n")); sc->battery_life = 80; break; case CONFIG_HOOK_BATT_90P: DPRINTF(("hpcapm: battery life 90%%\n")); sc->battery_life = 90; break; case CONFIG_HOOK_BATT_100P: DPRINTF(("hpcapm: battery life 100%%\n")); sc->battery_life = 100; break; case CONFIG_HOOK_BATT_UNKNOWN: DPRINTF(("hpcapm: battery state unknown\n")); sc->battery_flags = APM_BATT_FLAG_UNKNOWN; sc->battery_life = APM_BATT_LIFE_UNKNOWN; break; case CONFIG_HOOK_BATT_NO_SYSTEM_BATTERY: DPRINTF(("hpcapm: battery state no system battery?\n")); sc->battery_flags = APM_BATT_FLAG_NO_SYSTEM_BATTERY; sc->battery_life = APM_BATT_LIFE_UNKNOWN; break; } break; case CONFIG_HOOK_PMEVENT_AC: switch (message) { case CONFIG_HOOK_AC_OFF: DPRINTF(("hpcapm: ac not connected\n")); sc->battery_flags &= ~APM_BATT_FLAG_CHARGING; sc->ac_state = APM_AC_OFF; break; case CONFIG_HOOK_AC_ON_CHARGE: DPRINTF(("hpcapm: charging\n")); sc->battery_flags |= APM_BATT_FLAG_CHARGING; sc->ac_state = APM_AC_ON; break; case CONFIG_HOOK_AC_ON_NOCHARGE: DPRINTF(("hpcapm: ac connected\n")); sc->battery_flags &= ~APM_BATT_FLAG_CHARGING; sc->ac_state = APM_AC_ON; break; case CONFIG_HOOK_AC_UNKNOWN: sc->ac_state = APM_AC_UNKNOWN; break; } break; } splx(s); return (0); }
/*---------------------------------------------------------------------------*/ int main(int argc, char **argv) { /* * Initalize hardware. */ msp430_cpu_init(); clock_init(); leds_init(); leds_on(LEDS_RED); clock_wait(2); uart1_init(115200); /* Must come before first printf */ #if WITH_UIP slip_arch_init(115200); #endif /* WITH_UIP */ clock_wait(1); leds_on(LEDS_GREEN); //ds2411_init(); /* XXX hack: Fix it so that the 802.15.4 MAC address is compatible with an Ethernet MAC address - byte 0 (byte 2 in the DS ID) cannot be odd. */ //ds2411_id[2] &= 0xfe; leds_on(LEDS_BLUE); //xmem_init(); leds_off(LEDS_RED); rtimer_init(); /* * Hardware initialization done! */ node_id = NODE_ID; /* Restore node id if such has been stored in external mem */ //node_id_restore(); /* for setting "hardcoded" IEEE 802.15.4 MAC addresses */ #ifdef IEEE_802154_MAC_ADDRESS { uint8_t ieee[] = IEEE_802154_MAC_ADDRESS; //memcpy(ds2411_id, ieee, sizeof(uip_lladdr.addr)); //ds2411_id[7] = node_id & 0xff; } #endif //random_init(ds2411_id[0] + node_id); leds_off(LEDS_BLUE); /* * Initialize Contiki and our processes. */ process_init(); process_start(&etimer_process, NULL); ctimer_init(); init_platform(); set_rime_addr(); cc2520_init(); { uint8_t longaddr[8]; uint16_t shortaddr; shortaddr = (linkaddr_node_addr.u8[0] << 8) + linkaddr_node_addr.u8[1]; memset(longaddr, 0, sizeof(longaddr)); linkaddr_copy((linkaddr_t *)&longaddr, &linkaddr_node_addr); printf("MAC %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", longaddr[0], longaddr[1], longaddr[2], longaddr[3], longaddr[4], longaddr[5], longaddr[6], longaddr[7]); cc2520_set_pan_addr(IEEE802154_PANID, shortaddr, longaddr); } cc2520_set_channel(RF_CHANNEL); printf(CONTIKI_VERSION_STRING " started. "); if(node_id > 0) { printf("Node id is set to %u.\n", node_id); } else { printf("Node id is not set.\n"); } #if WITH_UIP6 /* memcpy(&uip_lladdr.addr, ds2411_id, sizeof(uip_lladdr.addr)); */ memcpy(&uip_lladdr.addr, linkaddr_node_addr.u8, UIP_LLADDR_LEN > LINKADDR_SIZE ? LINKADDR_SIZE : UIP_LLADDR_LEN); /* Setup nullmac-like MAC for 802.15.4 */ /* sicslowpan_init(sicslowmac_init(&cc2520_driver)); */ /* printf(" %s channel %u\n", sicslowmac_driver.name, RF_CHANNEL); */ /* Setup X-MAC for 802.15.4 */ queuebuf_init(); NETSTACK_RDC.init(); NETSTACK_MAC.init(); NETSTACK_NETWORK.init(); printf("%s %s, channel check rate %lu Hz, radio channel %u\n", NETSTACK_MAC.name, NETSTACK_RDC.name, CLOCK_SECOND / (NETSTACK_RDC.channel_check_interval() == 0 ? 1: NETSTACK_RDC.channel_check_interval()), RF_CHANNEL); process_start(&tcpip_process, NULL); printf("Tentative link-local IPv6 address "); { uip_ds6_addr_t *lladdr; int i; lladdr = uip_ds6_get_link_local(-1); for(i = 0; i < 7; ++i) { printf("%02x%02x:", lladdr->ipaddr.u8[i * 2], lladdr->ipaddr.u8[i * 2 + 1]); } printf("%02x%02x\n", lladdr->ipaddr.u8[14], lladdr->ipaddr.u8[15]); } if(!UIP_CONF_IPV6_RPL) { uip_ipaddr_t ipaddr; int i; uip_ip6addr(&ipaddr, 0xaaaa, 0, 0, 0, 0, 0, 0, 0); uip_ds6_set_addr_iid(&ipaddr, &uip_lladdr); uip_ds6_addr_add(&ipaddr, 0, ADDR_TENTATIVE); printf("Tentative global IPv6 address "); for(i = 0; i < 7; ++i) { printf("%02x%02x:", ipaddr.u8[i * 2], ipaddr.u8[i * 2 + 1]); } printf("%02x%02x\n", ipaddr.u8[7 * 2], ipaddr.u8[7 * 2 + 1]); } #else /* WITH_UIP6 */ NETSTACK_RDC.init(); NETSTACK_MAC.init(); NETSTACK_NETWORK.init(); printf("%s %s, channel check rate %lu Hz, radio channel %u\n", NETSTACK_MAC.name, NETSTACK_RDC.name, CLOCK_SECOND / (NETSTACK_RDC.channel_check_interval() == 0? 1: NETSTACK_RDC.channel_check_interval()), RF_CHANNEL); #endif /* WITH_UIP6 */ #if !WITH_UIP && !WITH_UIP6 uart1_set_input(serial_line_input_byte); serial_line_init(); #endif leds_off(LEDS_GREEN); #if TIMESYNCH_CONF_ENABLED timesynch_init(); timesynch_set_authority_level((linkaddr_node_addr.u8[0] << 4) + 16); #endif /* TIMESYNCH_CONF_ENABLED */ #if WITH_UIP process_start(&tcpip_process, NULL); process_start(&uip_fw_process, NULL); /* Start IP output */ process_start(&slip_process, NULL); slip_set_input_callback(set_gateway); { uip_ipaddr_t hostaddr, netmask; uip_init(); uip_ipaddr(&hostaddr, 172,16, linkaddr_node_addr.u8[0],linkaddr_node_addr.u8[1]); uip_ipaddr(&netmask, 255,255,0,0); uip_ipaddr_copy(&meshif.ipaddr, &hostaddr); uip_sethostaddr(&hostaddr); uip_setnetmask(&netmask); uip_over_mesh_set_net(&hostaddr, &netmask); /* uip_fw_register(&slipif);*/ uip_over_mesh_set_gateway_netif(&slipif); uip_fw_default(&meshif); uip_over_mesh_init(UIP_OVER_MESH_CHANNEL); printf("uIP started with IP address %d.%d.%d.%d\n", uip_ipaddr_to_quad(&hostaddr)); } #endif /* WITH_UIP */ energest_init(); ENERGEST_ON(ENERGEST_TYPE_CPU); watchdog_start(); /* Stop the watchdog */ watchdog_stop(); #if !PROCESS_CONF_NO_PROCESS_NAMES print_processes(autostart_processes); #else /* !PROCESS_CONF_NO_PROCESS_NAMES */ putchar('\n'); /* include putchar() */ #endif /* !PROCESS_CONF_NO_PROCESS_NAMES */ autostart_start(autostart_processes); /* * This is the scheduler loop. */ while(1) { int r; do { /* Reset watchdog. */ watchdog_periodic(); r = process_run(); } while(r > 0); /* * Idle processing. */ int s = splhigh(); /* Disable interrupts. */ /* uart1_active is for avoiding LPM3 when still sending or receiving */ if(process_nevents() != 0 || uart1_active()) { splx(s); /* Re-enable interrupts. */ } else { static unsigned long irq_energest = 0; /* Re-enable interrupts and go to sleep atomically. */ ENERGEST_OFF(ENERGEST_TYPE_CPU); ENERGEST_ON(ENERGEST_TYPE_LPM); /* We only want to measure the processing done in IRQs when we are asleep, so we discard the processing time done when we were awake. */ energest_type_set(ENERGEST_TYPE_IRQ, irq_energest); watchdog_stop(); _BIS_SR(GIE | SCG0 | SCG1 | CPUOFF); /* LPM3 sleep. This statement will block until the CPU is woken up by an interrupt that sets the wake up flag. */ /* We get the current processing time for interrupts that was done during the LPM and store it for next time around. */ dint(); irq_energest = energest_type_time(ENERGEST_TYPE_IRQ); eint(); watchdog_start(); ENERGEST_OFF(ENERGEST_TYPE_LPM); ENERGEST_ON(ENERGEST_TYPE_CPU); } } }
void waxattach(device_t parent, device_t self, void *aux) { struct confargs *ca = aux; struct wax_softc *sc = device_private(self); struct gsc_attach_args ga; struct cpu_info *ci = &cpus[0]; bus_space_handle_t ioh; int s; ca->ca_irq = hppa_intr_allocate_bit(&ci->ci_ir, ca->ca_irq); if (ca->ca_irq == HPPACF_IRQ_UNDEF) { aprint_error(": can't allocate interrupt\n"); return; } sc->sc_dv = self; wax_attached = 1; aprint_normal("\n"); /* * Map the WAX interrupt registers. */ if (bus_space_map(ca->ca_iot, ca->ca_hpa, sizeof(struct wax_regs), 0, &ioh)) { aprint_error(": can't map interrupt registers\n"); return; } sc->sc_regs = (struct wax_regs *)ca->ca_hpa; /* interrupts guts */ s = splhigh(); sc->sc_regs->wax_iar = ci->ci_hpa | (31 - ca->ca_irq); sc->sc_regs->wax_icr = 0; sc->sc_regs->wax_imr = ~0U; (void)sc->sc_regs->wax_irr; sc->sc_regs->wax_imr = 0; splx(s); /* Establish the interrupt register. */ hppa_interrupt_register_establish(ci, &sc->sc_ir); sc->sc_ir.ir_name = device_xname(self); sc->sc_ir.ir_mask = &sc->sc_regs->wax_imr; sc->sc_ir.ir_req = &sc->sc_regs->wax_irr; /* Attach the GSC bus. */ ga.ga_ca = *ca; /* clone from us */ if (strcmp(device_xname(parent), "mainbus0") == 0) { ga.ga_dp.dp_bc[0] = ga.ga_dp.dp_bc[1]; ga.ga_dp.dp_bc[1] = ga.ga_dp.dp_bc[2]; ga.ga_dp.dp_bc[2] = ga.ga_dp.dp_bc[3]; ga.ga_dp.dp_bc[3] = ga.ga_dp.dp_bc[4]; ga.ga_dp.dp_bc[4] = ga.ga_dp.dp_bc[5]; ga.ga_dp.dp_bc[5] = ga.ga_dp.dp_mod; ga.ga_dp.dp_mod = 0; } ga.ga_name = "gsc"; ga.ga_ir = &sc->sc_ir; ga.ga_fix_args = wax_fix_args; ga.ga_fix_args_cookie = sc; ga.ga_scsi_target = 7; /* XXX */ config_found(self, &ga, gscprint); }
static int pci_mem_find(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t type, bus_addr_t *basep, bus_size_t *sizep, int *flagsp) { pcireg_t address, mask, address1 = 0, mask1 = 0xffffffff; u_int64_t waddress, wmask; int s, is64bit, isrom; is64bit = (PCI_MAPREG_MEM_TYPE(type) == PCI_MAPREG_MEM_TYPE_64BIT); isrom = (reg == PCI_MAPREG_ROM); if ((!isrom) && (reg < PCI_MAPREG_START || #if 0 /* * Can't do this check; some devices have mapping registers * way out in left field. */ reg >= PCI_MAPREG_END || #endif (reg & 3))) panic("pci_mem_find: bad request"); if (is64bit && (reg + 4) >= PCI_MAPREG_END) panic("pci_mem_find: bad 64-bit request"); /* * Section 6.2.5.1, `Address Maps', tells us that: * * 1) The builtin software should have already mapped the device in a * reasonable way. * * 2) A device which wants 2^n bytes of memory will hardwire the bottom * n bits of the address to 0. As recommended, we write all 1s and see * what we get back. */ s = splhigh(); address = pci_conf_read(pc, tag, reg); pci_conf_write(pc, tag, reg, 0xffffffff); mask = pci_conf_read(pc, tag, reg); pci_conf_write(pc, tag, reg, address); if (is64bit) { address1 = pci_conf_read(pc, tag, reg + 4); pci_conf_write(pc, tag, reg + 4, 0xffffffff); mask1 = pci_conf_read(pc, tag, reg + 4); pci_conf_write(pc, tag, reg + 4, address1); } splx(s); if (!isrom) { /* * roms should have an enable bit instead of a memory * type decoder bit. For normal BARs, make sure that * the address decoder type matches what we asked for. */ if (PCI_MAPREG_TYPE(address) != PCI_MAPREG_TYPE_MEM) { printf("pci_mem_find: expected type mem, found i/o\n"); return (1); } /* XXX Allow 64bit bars for 32bit requests.*/ if (PCI_MAPREG_MEM_TYPE(address) != PCI_MAPREG_MEM_TYPE(type) && PCI_MAPREG_MEM_TYPE(address) != PCI_MAPREG_MEM_TYPE_64BIT) { printf("pci_mem_find: " "expected mem type %08x, found %08x\n", PCI_MAPREG_MEM_TYPE(type), PCI_MAPREG_MEM_TYPE(address)); return (1); } } waddress = (u_int64_t)address1 << 32UL | address; wmask = (u_int64_t)mask1 << 32UL | mask; if ((is64bit && PCI_MAPREG_MEM64_SIZE(wmask) == 0) || (!is64bit && PCI_MAPREG_MEM_SIZE(mask) == 0)) { aprint_debug("pci_mem_find: void region\n"); return (1); } switch (PCI_MAPREG_MEM_TYPE(address)) { case PCI_MAPREG_MEM_TYPE_32BIT: case PCI_MAPREG_MEM_TYPE_32BIT_1M: break; case PCI_MAPREG_MEM_TYPE_64BIT: /* * Handle the case of a 64-bit memory register on a * platform with 32-bit addressing. Make sure that * the address assigned and the device's memory size * fit in 32 bits. We implicitly assume that if * bus_addr_t is 64-bit, then so is bus_size_t. */ if (sizeof(u_int64_t) > sizeof(bus_addr_t) && (address1 != 0 || mask1 != 0xffffffff)) { printf("pci_mem_find: 64-bit memory map which is " "inaccessible on a 32-bit platform\n"); return (1); } break; default: printf("pci_mem_find: reserved mapping register type\n"); return (1); } if (sizeof(u_int64_t) > sizeof(bus_addr_t)) { if (basep != 0) *basep = PCI_MAPREG_MEM_ADDR(address); if (sizep != 0) *sizep = PCI_MAPREG_MEM_SIZE(mask); } else { if (basep != 0) *basep = PCI_MAPREG_MEM64_ADDR(waddress); if (sizep != 0) *sizep = PCI_MAPREG_MEM64_SIZE(wmask); } if (flagsp != 0) *flagsp = (isrom || PCI_MAPREG_MEM_PREFETCHABLE(address)) ? BUS_SPACE_MAP_PREFETCHABLE : 0; return (0); }
static int init_zs_linemon( register queue_t *q, register queue_t *my_q ) { register struct zscom *zs; register struct savedzsops *szs; register parsestream_t *parsestream = (parsestream_t *)(void *)my_q->q_ptr; /* * we expect the zsaline pointer in the q_data pointer * from there on we insert our on EXTERNAL/STATUS ISR routine * into the interrupt path, before the standard handler */ zs = ((struct zsaline *)(void *)q->q_ptr)->za_common; if (!zs) { /* * well - not found on startup - just say no (shouldn't happen though) */ return 0; } else { unsigned long s; /* * we do a direct replacement, in case others fiddle also * if somebody else grabs our hook and we disconnect * we are in DEEP trouble - panic is likely to be next, sorry */ szs = (struct savedzsops *)(void *)kmem_alloc(sizeof(struct savedzsops)); if (szs == (struct savedzsops *)0) { parseprintf(DD_INSTALL, ("init_zs_linemon: CD monitor NOT installed - no memory\n")); return 0; } else { parsestream->parse_data = (void *)szs; s = splhigh(); parsestream->parse_dqueue = q; /* remember driver */ szs->zsops = *zs->zs_ops; szs->zsops.zsop_xsint = zs_xsisr; /* place our bastard */ szs->oldzsops = zs->zs_ops; emergencyzs = zs->zs_ops; zsopinit(zs, &szs->zsops); /* hook it up */ (void) splx(s); parseprintf(DD_INSTALL, ("init_zs_linemon: CD monitor installed\n")); return 1; } } }
int pci_mapreg_submap(struct pci_attach_args *pa, int reg, pcireg_t type, int busflags, bus_size_t maxsize, bus_size_t offset, bus_space_tag_t *tagp, bus_space_handle_t *handlep, bus_addr_t *basep, bus_size_t *sizep) { bus_space_tag_t tag; bus_space_handle_t handle; bus_addr_t base; bus_size_t size; int flags; if (PCI_MAPREG_TYPE(type) == PCI_MAPREG_TYPE_IO) { if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) return (1); if (pci_io_find(pa->pa_pc, pa->pa_tag, reg, type, &base, &size, &flags)) return (1); tag = pa->pa_iot; } else { if ((pa->pa_flags & PCI_FLAGS_MEM_ENABLED) == 0) return (1); if (pci_mem_find(pa->pa_pc, pa->pa_tag, reg, type, &base, &size, &flags)) return (1); tag = pa->pa_memt; } if (reg == PCI_MAPREG_ROM) { pcireg_t mask; int s; /* we have to enable the ROM address decoder... */ s = splhigh(); mask = pci_conf_read(pa->pa_pc, pa->pa_tag, reg); mask |= PCI_MAPREG_ROM_ENABLE; pci_conf_write(pa->pa_pc, pa->pa_tag, reg, mask); splx(s); } /* If we're called with maxsize/offset of 0, behave like * pci_mapreg_map. */ maxsize = (maxsize && offset) ? maxsize : size; base += offset; if ((maxsize < size && offset + maxsize <= size) || offset != 0) return (1); if (bus_space_map(tag, base, maxsize, busflags | flags, &handle)) return (1); if (tagp != 0) *tagp = tag; if (handlep != 0) *handlep = handle; if (basep != 0) *basep = base; if (sizep != 0) *sizep = maxsize; return (0); }
/*--------------------------------------------------------------------------*/ int main(int argc, char **argv) { /* * Initalize hardware. */ msp430_cpu_init(); clock_init(); leds_init(); leds_on(LEDS_RED); uart1_init(BAUD2UBR(115200)); /* Must come before first printf */ #if WITH_UIP slip_arch_init(BAUD2UBR(115200)); #endif /* WITH_UIP */ leds_on(LEDS_GREEN); /* xmem_init(); */ rtimer_init(); lcd_init(); PRINTF(CONTIKI_VERSION_STRING "\n"); /* * Hardware initialization done! */ leds_on(LEDS_RED); /* Restore node id if such has been stored in external mem */ // node_id_restore(); #ifdef NODEID node_id = NODEID; #ifdef BURN_NODEID flash_setup(); flash_clear(0x1800); flash_write(0x1800, node_id); flash_done(); #endif /* BURN_NODEID */ #endif /* NODE_ID */ if(node_id == 0) { node_id = *((unsigned short *)0x1800); } memset(node_mac, 0, sizeof(node_mac)); node_mac[6] = node_id >> 8; node_mac[7] = node_id & 0xff; /* for setting "hardcoded" IEEE 802.15.4 MAC addresses */ #ifdef MAC_1 { uint8_t ieee[] = { MAC_1, MAC_2, MAC_3, MAC_4, MAC_5, MAC_6, MAC_7, MAC_8 }; memcpy(node_mac, ieee, sizeof(uip_lladdr.addr)); } #endif /* * Initialize Contiki and our processes. */ process_init(); process_start(&etimer_process, NULL); ctimer_init(); set_rime_addr(); cc2420_init(); { uint8_t longaddr[8]; uint16_t shortaddr; shortaddr = (rimeaddr_node_addr.u8[0] << 8) + rimeaddr_node_addr.u8[1]; memset(longaddr, 0, sizeof(longaddr)); rimeaddr_copy((rimeaddr_t *)&longaddr, &rimeaddr_node_addr); printf("MAC %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", longaddr[0], longaddr[1], longaddr[2], longaddr[3], longaddr[4], longaddr[5], longaddr[6], longaddr[7]); cc2420_set_pan_addr(IEEE802154_PANID, shortaddr, longaddr); } cc2420_set_channel(RF_CHANNEL); leds_off(LEDS_ALL); if(node_id > 0) { PRINTF("Node id %u.\n", node_id); } else { PRINTF("Node id not set.\n"); } #if WITH_UIP6 memcpy(&uip_lladdr.addr, node_mac, sizeof(uip_lladdr.addr)); /* Setup nullmac-like MAC for 802.15.4 */ queuebuf_init(); NETSTACK_RDC.init(); NETSTACK_MAC.init(); NETSTACK_NETWORK.init(); printf("%s %lu %u\n", NETSTACK_RDC.name, CLOCK_SECOND / (NETSTACK_RDC.channel_check_interval() == 0 ? 1: NETSTACK_RDC.channel_check_interval()), RF_CHANNEL); process_start(&tcpip_process, NULL); printf("IPv6 "); { uip_ds6_addr_t *lladdr; int i; lladdr = uip_ds6_get_link_local(-1); for(i = 0; i < 7; ++i) { printf("%02x%02x:", lladdr->ipaddr.u8[i * 2], lladdr->ipaddr.u8[i * 2 + 1]); } printf("%02x%02x\n", lladdr->ipaddr.u8[14], lladdr->ipaddr.u8[15]); } if(!UIP_CONF_IPV6_RPL) { uip_ipaddr_t ipaddr; int i; uip_ip6addr(&ipaddr, 0xaaaa, 0, 0, 0, 0, 0, 0, 0); uip_ds6_set_addr_iid(&ipaddr, &uip_lladdr); uip_ds6_addr_add(&ipaddr, 0, ADDR_TENTATIVE); printf("Tentative global IPv6 address "); for(i = 0; i < 7; ++i) { printf("%02x%02x:", ipaddr.u8[i * 2], ipaddr.u8[i * 2 + 1]); } printf("%02x%02x\n", ipaddr.u8[7 * 2], ipaddr.u8[7 * 2 + 1]); } #else /* WITH_UIP6 */ NETSTACK_RDC.init(); NETSTACK_MAC.init(); NETSTACK_NETWORK.init(); printf("%s %lu %u\n", NETSTACK_RDC.name, CLOCK_SECOND / (NETSTACK_RDC.channel_check_interval() == 0? 1: NETSTACK_RDC.channel_check_interval()), RF_CHANNEL); #endif /* WITH_UIP6 */ #if !WITH_UIP6 uart1_set_input(serial_line_input_byte); serial_line_init(); #endif #if TIMESYNCH_CONF_ENABLED timesynch_init(); timesynch_set_authority_level(rimeaddr_node_addr.u8[0]); #endif /* TIMESYNCH_CONF_ENABLED */ /* process_start(&sensors_process, NULL); SENSORS_ACTIVATE(button_sensor);*/ energest_init(); ENERGEST_ON(ENERGEST_TYPE_CPU); print_processes(autostart_processes); autostart_start(autostart_processes); duty_cycle_scroller_start(CLOCK_SECOND * 2); /* * This is the scheduler loop. */ watchdog_start(); watchdog_stop(); /* Stop the wdt... */ while(1) { int r; do { /* Reset watchdog. */ watchdog_periodic(); r = process_run(); } while(r > 0); /* * Idle processing. */ int s = splhigh(); /* Disable interrupts. */ /* uart1_active is for avoiding LPM3 when still sending or receiving */ if(process_nevents() != 0 || uart1_active()) { splx(s); /* Re-enable interrupts. */ } else { static unsigned long irq_energest = 0; /* Re-enable interrupts and go to sleep atomically. */ ENERGEST_OFF(ENERGEST_TYPE_CPU); ENERGEST_ON(ENERGEST_TYPE_LPM); /* We only want to measure the processing done in IRQs when we are asleep, so we discard the processing time done when we were awake. */ energest_type_set(ENERGEST_TYPE_IRQ, irq_energest); watchdog_stop(); _BIS_SR(GIE | SCG0 | SCG1 | CPUOFF); /* LPM3 sleep. This statement will block until the CPU is woken up by an interrupt that sets the wake up flag. */ /* We get the current processing time for interrupts that was done during the LPM and store it for next time around. */ dint(); irq_energest = energest_type_time(ENERGEST_TYPE_IRQ); eint(); watchdog_start(); ENERGEST_OFF(ENERGEST_TYPE_LPM); ENERGEST_ON(ENERGEST_TYPE_CPU); } } }
void panic(const char *fmt, ...) { va_list ap; /* * When we reach panic, the system is usually fairly screwed up. * It's not entirely uncommon for anything else we try to do * here to trigger more panics. * * This variable makes sure that if we try to do something here, * and it causes another panic, *that* panic doesn't try again; * trying again almost inevitably causes infinite recursion. * * This is not excessively paranoid - these things DO happen! */ static volatile int evil; if (evil==0) { evil = 1; /* * Not only do we not want to be interrupted while * panicking, but we also want the console to be * printing in polling mode so as not to do context * switches. So turn interrupts off. */ splhigh(); } if (evil==1) { evil = 2; thread_panic(); } if (evil==2) { evil = 3; kprintf("panic: "); va_start(ap, fmt); __vprintf(console_send, NULL, fmt, ap); va_end(ap); } if (evil==3) { evil = 4; vfs_sync(); } if (evil==4) { evil = 5; md_panic(); } /* * Last resort, just in case. */ for (;;); }
int ccdioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) { int unit = ccdunit(dev); int i, j, lookedup = 0, error = 0; int part, pmask, s; struct ccd_softc *cs; struct ccd_ioctl *ccio = (struct ccd_ioctl *)data; struct ccddevice ccd; char **cpp; struct vnode **vpp; vaddr_t min, max; if (unit >= numccd) return (ENXIO); cs = &ccd_softc[unit]; if (cmd != CCDIOCSET && !(cs->sc_flags & CCDF_INITED)) return (ENXIO); /* access control */ switch (cmd) { case CCDIOCSET: case CCDIOCCLR: case DIOCWDINFO: case DIOCSDINFO: case DIOCWLABEL: if ((flag & FWRITE) == 0) return (EBADF); } bzero(&ccd, sizeof(ccd)); switch (cmd) { case CCDIOCSET: if (cs->sc_flags & CCDF_INITED) return (EBUSY); if (ccio->ccio_ndisks == 0 || ccio->ccio_ndisks > INT_MAX || ccio->ccio_ileave < 0) return (EINVAL); if ((error = ccdlock(cs)) != 0) return (error); /* Fill in some important bits. */ ccd.ccd_unit = unit; ccd.ccd_interleave = ccio->ccio_ileave; ccd.ccd_flags = ccio->ccio_flags & CCDF_USERMASK; /* XXX the new code is unstable still */ ccd.ccd_flags |= CCDF_OLD; /* * Interleaving which is not a multiple of the click size * must use the old I/O code (by design) */ if (ccio->ccio_ileave % (PAGE_SIZE / DEV_BSIZE) != 0) ccd.ccd_flags |= CCDF_OLD; /* * Allocate space for and copy in the array of * componet pathnames and device numbers. */ cpp = malloc(ccio->ccio_ndisks * sizeof(char *), M_DEVBUF, M_WAITOK); vpp = malloc(ccio->ccio_ndisks * sizeof(struct vnode *), M_DEVBUF, M_WAITOK); error = copyin((caddr_t)ccio->ccio_disks, (caddr_t)cpp, ccio->ccio_ndisks * sizeof(char **)); if (error) { free(vpp, M_DEVBUF); free(cpp, M_DEVBUF); ccdunlock(cs); return (error); } for (i = 0; i < ccio->ccio_ndisks; ++i) { CCD_DPRINTF(CCDB_INIT, ("ccdioctl: component %d: %p, lookedup = %d\n", i, cpp[i], lookedup)); if ((error = ccdlookup(cpp[i], p, &vpp[i])) != 0) { for (j = 0; j < lookedup; ++j) (void)vn_close(vpp[j], FREAD|FWRITE, p->p_ucred, p); free(vpp, M_DEVBUF); free(cpp, M_DEVBUF); ccdunlock(cs); return (error); } ++lookedup; } ccd.ccd_cpp = cpp; ccd.ccd_vpp = vpp; ccd.ccd_ndev = ccio->ccio_ndisks; /* * Initialize the ccd. Fills in the softc for us. */ if ((error = ccdinit(&ccd, cpp, p)) != 0) { for (j = 0; j < lookedup; ++j) (void)vn_close(vpp[j], FREAD|FWRITE, p->p_ucred, p); bzero(&ccd_softc[unit], sizeof(struct ccd_softc)); free(vpp, M_DEVBUF); free(cpp, M_DEVBUF); ccdunlock(cs); return (error); } /* * The ccd has been successfully initialized, so * we can place it into the array. Don't try to * read the disklabel until the disk has been attached, * because space for the disklabel is allocated * in disk_attach(); */ bcopy(&ccd, &ccddevs[unit], sizeof(ccd)); ccio->ccio_unit = unit; ccio->ccio_size = cs->sc_size; /* * If we use the optimized protocol we need some kvm space * for the component buffers. Allocate it here. * * XXX I'd like to have a more dynamic way of acquiring kvm * XXX space, but that is problematic as we are not allowed * XXX to lock the kernel_map in interrupt context. It is * XXX doable via a freelist implementation though. */ if (!ccdmap && !(ccd.ccd_flags & CCDF_OLD)) { min = vm_map_min(kernel_map); ccdmap = uvm_km_suballoc(kernel_map, &min, &max, CCD_CLUSTERS * MAXBSIZE, VM_MAP_INTRSAFE, FALSE, NULL); } /* Attach the disk. */ cs->sc_dkdev.dk_name = cs->sc_xname; disk_attach(&cs->sc_dkdev); /* Try and read the disklabel. */ ccdgetdisklabel(dev, cs, cs->sc_dkdev.dk_label, cs->sc_dkdev.dk_cpulabel, 0); ccdunlock(cs); break; case CCDIOCCLR: if ((error = ccdlock(cs)) != 0) return (error); /* * Don't unconfigure if any other partitions are open * or if both the character and block flavors of this * partition are open. */ part = DISKPART(dev); pmask = (1 << part); if ((cs->sc_dkdev.dk_openmask & ~pmask) || ((cs->sc_dkdev.dk_bopenmask & pmask) && (cs->sc_dkdev.dk_copenmask & pmask))) { ccdunlock(cs); return (EBUSY); } /* * Free ccd_softc information and clear entry. */ /* Close the components and free their pathnames. */ for (i = 0; i < cs->sc_nccdisks; ++i) { /* * XXX: this close could potentially fail and * cause Bad Things. Maybe we need to force * the close to happen? */ #ifdef DIAGNOSTIC CCD_DCALL(CCDB_VNODE, vprint("CCDIOCCLR: vnode info", cs->sc_cinfo[i].ci_vp)); #endif (void)vn_close(cs->sc_cinfo[i].ci_vp, FREAD|FWRITE, p->p_ucred, p); free(cs->sc_cinfo[i].ci_path, M_DEVBUF); } /* Free interleave index. */ for (i = 0; cs->sc_itable[i].ii_ndisk; ++i) free(cs->sc_itable[i].ii_index, M_DEVBUF); /* Free component info and interleave table. */ free(cs->sc_cinfo, M_DEVBUF); free(cs->sc_itable, M_DEVBUF); cs->sc_flags &= ~CCDF_INITED; /* * Free ccddevice information and clear entry. */ free(ccddevs[unit].ccd_cpp, M_DEVBUF); free(ccddevs[unit].ccd_vpp, M_DEVBUF); bcopy(&ccd, &ccddevs[unit], sizeof(ccd)); /* Detatch the disk. */ disk_detach(&cs->sc_dkdev); /* This must be atomic. */ s = splhigh(); ccdunlock(cs); bzero(cs, sizeof(struct ccd_softc)); splx(s); break; case DIOCGPDINFO: { struct cpu_disklabel osdep; if ((error = ccdlock(cs)) != 0) return (error); ccdgetdisklabel(dev, cs, (struct disklabel *)data, &osdep, 1); ccdunlock(cs); break; } case DIOCGDINFO: *(struct disklabel *)data = *(cs->sc_dkdev.dk_label); break; case DIOCGPART: ((struct partinfo *)data)->disklab = cs->sc_dkdev.dk_label; ((struct partinfo *)data)->part = &cs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)]; break; case DIOCWDINFO: case DIOCSDINFO: if ((error = ccdlock(cs)) != 0) return (error); cs->sc_flags |= CCDF_LABELLING; error = setdisklabel(cs->sc_dkdev.dk_label, (struct disklabel *)data, 0, cs->sc_dkdev.dk_cpulabel); if (error == 0) { if (cmd == DIOCWDINFO) error = writedisklabel(CCDLABELDEV(dev), ccdstrategy, cs->sc_dkdev.dk_label, cs->sc_dkdev.dk_cpulabel); } cs->sc_flags &= ~CCDF_LABELLING; ccdunlock(cs); if (error) return (error); break; case DIOCWLABEL: if (*(int *)data != 0) cs->sc_flags |= CCDF_WLABEL; else cs->sc_flags &= ~CCDF_WLABEL; break; default: return (ENOTTY); } return (0); }
/* * Gets called when softcall queue is not moving forward. We choose * a CPU and poke except the ones which are already poked. */ static int softcall_choose_cpu() { cpu_t *cplist = CPU; cpu_t *cp; int intr_load = INT_MAX; int cpuid = -1; cpuset_t poke; int s; ASSERT(getpil() >= DISP_LEVEL); ASSERT(ncpus > 1); ASSERT(MUTEX_HELD(&softcall_lock)); CPUSET_ZERO(poke); /* * The hint is to start from current CPU. */ cp = cplist; do { if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) || (cp->cpu_flags & CPU_ENABLE) == 0) continue; /* if CPU is not busy */ if (cp->cpu_intrload == 0) { cpuid = cp->cpu_id; break; } if (cp->cpu_intrload < intr_load) { cpuid = cp->cpu_id; intr_load = cp->cpu_intrload; } else if (cp->cpu_intrload == intr_load) { /* * We want to poke CPUs having similar * load because we don't know which CPU is * can acknowledge level1 interrupt. The * list of such CPUs should not be large. */ if (cpuid != -1) { /* * Put the last CPU chosen because * it also has same interrupt load. */ CPUSET_ADD(poke, cpuid); cpuid = -1; } CPUSET_ADD(poke, cp->cpu_id); } } while ((cp = cp->cpu_next_onln) != cplist); /* if we found a CPU which suits best to poke */ if (cpuid != -1) { CPUSET_ZERO(poke); CPUSET_ADD(poke, cpuid); } if (CPUSET_ISNULL(poke)) { mutex_exit(&softcall_lock); return (0); } /* * We first set the bit in cpuset and then poke. */ CPUSET_XOR(*softcall_cpuset, poke); mutex_exit(&softcall_lock); /* * If softcall() was called at low pil then we may * get preempted before we raise PIL. It should be okay * because we are just going to poke CPUs now or at most * another thread may start choosing CPUs in this routine. */ s = splhigh(); siron_poke_cpu(poke); splx(s); return (1); }
static void apix_intr_thread_epilog(struct cpu *cpu, uint_t oldpil) { struct machcpu *mcpu = &cpu->cpu_m; kthread_t *t, *it = cpu->cpu_thread; uint_t pil, basespl; hrtime_t intrtime; hrtime_t now = tsc_read(); pil = it->t_pil; cpu->cpu_stats.sys.intr[pil - 1]++; ASSERT(cpu->cpu_intr_actv & (1 << pil)); cpu->cpu_intr_actv &= ~(1 << pil); ASSERT(it->t_intr_start != 0); intrtime = now - it->t_intr_start; mcpu->intrstat[pil][0] += intrtime; cpu->cpu_intracct[cpu->cpu_mstate] += intrtime; /* * If there is still an interrupted thread underneath this one * then the interrupt was never blocked and the return is * fairly simple. Otherwise it isn't. */ if ((t = it->t_intr) == NULL) { /* * The interrupted thread is no longer pinned underneath * the interrupt thread. This means the interrupt must * have blocked, and the interrupted thread has been * unpinned, and has probably been running around the * system for a while. * * Since there is no longer a thread under this one, put * this interrupt thread back on the CPU's free list and * resume the idle thread which will dispatch the next * thread to run. */ cpu->cpu_stats.sys.intrblk++; /* * Put thread back on the interrupt thread list. * This was an interrupt thread, so set CPU's base SPL. */ set_base_spl(); basespl = cpu->cpu_base_spl; mcpu->mcpu_pri = basespl; (*setlvlx)(basespl, 0); it->t_state = TS_FREE; /* * Return interrupt thread to pool */ it->t_link = cpu->cpu_intr_thread; cpu->cpu_intr_thread = it; (void) splhigh(); sti(); swtch(); /*NOTREACHED*/ panic("dosoftint_epilog: swtch returned"); } /* * Return interrupt thread to the pool */ it->t_link = cpu->cpu_intr_thread; cpu->cpu_intr_thread = it; it->t_state = TS_FREE; cpu->cpu_thread = t; if (t->t_flag & T_INTR_THREAD) t->t_intr_start = now; basespl = cpu->cpu_base_spl; mcpu->mcpu_pri = MAX(oldpil, basespl); (*setlvlx)(mcpu->mcpu_pri, 0); }
void cpu_reboot(int howto, char *bootstr) { static int waittime = -1; /* Take a snapshot before clobbering any registers. */ savectx(curpcb); /* If "always halt" was specified as a boot flag, obey. */ if (boothowto & RB_HALT) howto |= RB_HALT; boothowto = howto; /* If system is cold, just halt. */ if (cold) { boothowto |= RB_HALT; goto haltsys; } if ((boothowto & RB_NOSYNC) == 0 && waittime < 0) { waittime = 0; /* * Synchronize the disks.... */ vfs_shutdown(); /* * If we've been adjusting the clock, the todr * will be out of synch; adjust it now. */ resettodr(); } /* Disable interrupts. */ splhigh(); if (boothowto & RB_DUMP) dumpsys(); haltsys: /* Run any shutdown hooks. */ doshutdownhooks(); pmf_system_shutdown(boothowto); #if 0 if ((boothowto & RB_POWERDOWN) == RB_POWERDOWN) if (board && board->ab_poweroff) board->ab_poweroff(); #endif /* * Firmware may autoboot (depending on settings), and we cannot pass * flags to it (at least I haven't figured out how to yet), so * we "pseudo-halt" now. */ if (boothowto & RB_HALT) { printf("\n"); printf("The operating system has halted.\n"); printf("Please press any key to reboot.\n\n"); cnpollc(1); /* For proper keyboard command handling */ cngetc(); cnpollc(0); } printf("reseting board...\n\n"); mips_icache_sync_all(); mips_dcache_wbinv_all(); ingenic_reset(); __asm volatile("jr %0" :: "r"(MIPS_RESET_EXC_VEC)); printf("Oops, back from reset\n\nSpinning..."); for (;;) /* spin forever */ ; /* XXX */ /*NOTREACHED*/ }
mcount() { register char *selfpc; /* r11 => r5 */ register unsigned short *frompcindex; /* r10 => r4 */ register struct tostruct *top; /* r9 => r3 */ register struct tostruct *prevtop; /* r8 => r2 */ register long toindex; /* r7 => r1 */ static int s; #ifdef lint selfpc = (char *)0; frompcindex = 0; #else not lint /* * find the return address for mcount, * and the return address for mcount's caller. */ asm(" .text"); /* make sure we're in text space */ asm(" movl (sp), r11"); /* selfpc = ... (jsb frame) */ asm(" movl 16(fp), r10"); /* frompcindex = (calls frame) */ #endif not lint /* * check that we are profiling */ if (profiling) { goto out; } /* * insure that we cannot be recursively invoked. * this requires that splhigh() and splx() below * do NOT call mcount! */ s = splhigh(); /* * check that frompcindex is a reasonable pc value. * for example: signal catchers get called from the stack, * not from text space. too bad. */ frompcindex = (unsigned short *)((long)frompcindex - (long)s_lowpc); if ((unsigned long)frompcindex > s_textsize) { goto done; } frompcindex = &froms[((long)frompcindex) / (HASHFRACTION * sizeof(*froms))]; toindex = *frompcindex; if (toindex == 0) { /* * first time traversing this arc */ toindex = ++tos[0].link; if (toindex >= tolimit) { goto overflow; } *frompcindex = toindex; top = &tos[toindex]; top->selfpc = selfpc; top->count = 1; top->link = 0; goto done; } top = &tos[toindex]; if (top->selfpc == selfpc) { /* * arc at front of chain; usual case. */ top->count++; goto done; } /* * have to go looking down chain for it. * top points to what we are looking at, * prevtop points to previous top. * we know it is not at the head of the chain. */ for (; /* goto done */; ) { if (top->link == 0) { /* * top is end of the chain and none of the chain * had top->selfpc == selfpc. * so we allocate a new tostruct * and link it to the head of the chain. */ toindex = ++tos[0].link; if (toindex >= tolimit) { goto overflow; } top = &tos[toindex]; top->selfpc = selfpc; top->count = 1; top->link = *frompcindex; *frompcindex = toindex; goto done; } /* * otherwise, check the next arc on the chain. */ prevtop = top; top = &tos[top->link]; if (top->selfpc == selfpc) { /* * there it is. * increment its count * move it to the head of the chain. */ top->count++; toindex = prevtop->link; prevtop->link = top->link; top->link = *frompcindex; *frompcindex = toindex; goto done; } } done: splx(s); /* and fall through */ out: asm(" rsb"); overflow: profiling = 3; printf("mcount: tos overflow\n"); goto out; }
/* * General trap (exception) handling function for mips. * This is called by the assembly-language exception handler once * the trapframe has been set up. */ void mips_trap(struct trapframe *tf) { uint32_t code; bool isutlb, iskern; int spl; /* The trap frame is supposed to be 37 registers long. */ KASSERT(sizeof(struct trapframe)==(37*4)); /* * Extract the exception code info from the register fields. */ code = (tf->tf_cause & CCA_CODE) >> CCA_CODESHIFT; isutlb = (tf->tf_cause & CCA_UTLB) != 0; iskern = (tf->tf_status & CST_KUp) == 0; KASSERT(code < NTRAPCODES); /* Make sure we haven't run off our stack */ if (curthread != NULL && curthread->t_stack != NULL) { KASSERT((vaddr_t)tf > (vaddr_t)curthread->t_stack); KASSERT((vaddr_t)tf < (vaddr_t)(curthread->t_stack + STACK_SIZE)); } /* Interrupt? Call the interrupt handler and return. */ if (code == EX_IRQ) { int old_in; bool doadjust; old_in = curthread->t_in_interrupt; curthread->t_in_interrupt = 1; /* * The processor has turned interrupts off; if the * currently recorded interrupt state is interrupts on * (spl of 0), adjust the recorded state to match, and * restore after processing the interrupt. * * How can we get an interrupt if the recorded state * is interrupts off? Well, as things currently stand * when the CPU finishes idling it flips interrupts on * and off to allow things to happen, but leaves * curspl high while doing so. * * While we're here, assert that the interrupt * handling code hasn't leaked a spinlock or an * splhigh(). */ if (curthread->t_curspl == 0) { KASSERT(curthread->t_curspl == 0); KASSERT(curthread->t_iplhigh_count == 0); curthread->t_curspl = IPL_HIGH; curthread->t_iplhigh_count++; doadjust = true; } else { doadjust = false; } mainbus_interrupt(tf); if (doadjust) { KASSERT(curthread->t_curspl == IPL_HIGH); KASSERT(curthread->t_iplhigh_count == 1); curthread->t_iplhigh_count--; curthread->t_curspl = 0; } curthread->t_in_interrupt = old_in; goto done2; } /* * The processor turned interrupts off when it took the trap. * * While we're in the kernel, and not actually handling an * interrupt, restore the interrupt state to where it was in * the previous context, which may be low (interrupts on). * * Do this by forcing splhigh(), which may do a redundant * cpu_irqoff() but forces the stored MI interrupt state into * sync, then restoring the previous state. */ spl = splhigh(); splx(spl); /* Syscall? Call the syscall handler and return. */ if (code == EX_SYS) { /* Interrupts should have been on while in user mode. */ KASSERT(curthread->t_curspl == 0); KASSERT(curthread->t_iplhigh_count == 0); DEBUG(DB_SYSCALL, "syscall: #%d, args %x %x %x %x\n", tf->tf_v0, tf->tf_a0, tf->tf_a1, tf->tf_a2, tf->tf_a3); syscall(tf); goto done; } /* * Ok, it wasn't any of the really easy cases. * Call vm_fault on the TLB exceptions. * Panic on the bus error exceptions. */ switch (code) { case EX_MOD: if (vm_fault(VM_FAULT_READONLY, tf->tf_vaddr)==0) { goto done; } break; case EX_TLBL: if (vm_fault(VM_FAULT_READ, tf->tf_vaddr)==0) { goto done; } break; case EX_TLBS: if (vm_fault(VM_FAULT_WRITE, tf->tf_vaddr)==0) { goto done; } break; case EX_IBE: case EX_DBE: /* * This means you loaded invalid TLB entries, or * touched invalid parts of the direct-mapped * segments. These are serious kernel errors, so * panic. * * The MIPS won't even tell you what invalid address * caused the bus error. */ panic("Bus error exception, PC=0x%x\n", tf->tf_epc); break; } /* * If we get to this point, it's a fatal fault - either it's * one of the other exceptions, like illegal instruction, or * it was a page fault we couldn't handle. */ if (!iskern) { /* * Fatal fault in user mode. * Kill the current user process. */ kill_curthread(tf->tf_epc, code, tf->tf_vaddr); goto done; } /* * Fatal fault in kernel mode. * * If pcb_badfaultfunc is set, we do not panic; badfaultfunc is * set by copyin/copyout and related functions to signify that * the addresses they're accessing are userlevel-supplied and * not trustable. What we actually want to do is resume * execution at the function pointed to by badfaultfunc. That's * going to be "copyfail" (see copyinout.c), which longjmps * back to copyin/copyout or wherever and returns EFAULT. * * Note that we do not just *call* this function, because that * won't necessarily do anything. We want the control flow * that is currently executing in copyin (or whichever), and * is stopped while we process the exception, to *teleport* to * copyfail. * * This is accomplished by changing tf->tf_epc and returning * from the exception handler. */ if (curthread != NULL && curthread->t_machdep.tm_badfaultfunc != NULL) { tf->tf_epc = (vaddr_t) curthread->t_machdep.tm_badfaultfunc; goto done; } /* * Really fatal kernel-mode fault. */ kprintf("panic: Fatal exception %u (%s) in kernel mode\n", code, trapcodenames[code]); kprintf("panic: EPC 0x%x, exception vaddr 0x%x\n", tf->tf_epc, tf->tf_vaddr); panic("I can't handle this... I think I'll just die now...\n"); done: /* * Turn interrupts off on the processor, without affecting the * stored interrupt state. */ cpu_irqoff(); done2: /* * The boot thread can get here (e.g. on interrupt return) but * since it doesn't go to userlevel, it can't be returning to * userlevel, so there's no need to set cputhreads[] and * cpustacks[]. Just return. */ if (curthread->t_stack == NULL) { return; } if(!iskern){ pid_executeflags(curthread->t_pid); } cputhreads[curcpu->c_number] = (vaddr_t)curthread; cpustacks[curcpu->c_number] = (vaddr_t)curthread->t_stack + STACK_SIZE; /* * This assertion will fail if either * (1) curthread->t_stack is corrupted, or * (2) the trap frame is somehow on the wrong kernel stack. * * If cpustacks[] is corrupted, the next trap back to the * kernel will (most likely) hang the system, so it's better * to find out now. */ KASSERT(SAME_STACK(cpustacks[curcpu->c_number]-1, (vaddr_t)tf)); }
/* * Create a new thread based on an existing one. * The new thread has name NAME, and starts executing in function FUNC. * DATA1 and DATA2 are passed to FUNC. */ int thread_fork(const char *name, void *data1, unsigned long data2, void (*func)(void *, unsigned long), struct thread **ret) { struct thread *newguy; int s, result; /* Allocate a thread */ newguy = thread_create(name); if (newguy==NULL) { return ENOMEM; } /* Allocate a stack */ newguy->t_stack = kmalloc(STACK_SIZE); if (newguy->t_stack==NULL) { kfree(newguy->t_name); kfree(newguy); return ENOMEM; } /* stick a magic number on the bottom end of the stack */ newguy->t_stack[0] = 0xae; newguy->t_stack[1] = 0x11; newguy->t_stack[2] = 0xda; newguy->t_stack[3] = 0x33; /* Inherit the current directory */ if (curthread->t_cwd != NULL) { VOP_INCREF(curthread->t_cwd); newguy->t_cwd = curthread->t_cwd; } #if OPT_A2 result = conSetup(newguy); if(result) goto exit; // pid pid_t pid = newguy->pid; struct process* child = p_table[pid]; assert(child != NULL); if (call_from_fork){ int i; for (i = 3 ; i < MAX_FILE ; ++i) { if (curthread->ft[i] != NULL) { newguy->ft[i] = (struct filetable*)copy_ft(curthread->ft[i]); if (newguy->ft[i] == NULL) return ENOMEM; } } child->ppid = curthread->pid; call_from_fork = 0; } assert(call_from_fork == 0); #endif /* Set up the pcb (this arranges for func to be called) */ md_initpcb(&newguy->t_pcb, newguy->t_stack, data1, data2, func); /* Interrupts off for atomicity */ s = splhigh(); /* * Make sure our data structures have enough space, so we won't * run out later at an inconvenient time. */ result = array_preallocate(sleepers, numthreads+1); if (result) { goto fail; } result = array_preallocate(zombies, numthreads+1); if (result) { goto fail; } /* Do the same for the scheduler. */ result = scheduler_preallocate(numthreads+1); if (result) { goto fail; } /* Make the new thread runnable */ result = make_runnable(newguy); if (result != 0) { goto fail; } //kprintf("gonna run fork func\n"); /* * Increment the thread counter. This must be done atomically * with the preallocate calls; otherwise the count can be * temporarily too low, which would obviate its reason for * existence. */ numthreads++; /* Done with stuff that needs to be atomic */ splx(s); /* * Return new thread structure if it's wanted. Note that * using the thread structure from the parent thread should be * done only with caution, because in general the child thread * might exit at any time. */ if (ret != NULL) { *ret = newguy; //kprintf("new thread returns\n"); } // kprintf("comfirm the new pid is %d\n",(*ret)->pid); return 0; fail: splx(s); exit: if (newguy->t_cwd != NULL) { VOP_DECREF(newguy->t_cwd); } kfree(newguy->t_stack); kfree(newguy->t_name); kfree(newguy); return result; }
static int gusisa_probe(device_t dev) { device_t child; struct resource *res, *res2; int base, rid, rid2, s, flags; unsigned char val; base = isa_get_port(dev); flags = device_get_flags(dev); rid = 1; res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, base + 0x100, base + 0x107, 8, RF_ACTIVE); if (res == NULL) return ENXIO; res2 = NULL; /* * Check for the presence of some GUS card. Reset the card, * then see if we can access the memory on it. */ port_wr(res, 3, 0x4c); port_wr(res, 5, 0); DELAY(30 * 1000); port_wr(res, 3, 0x4c); port_wr(res, 5, 1); DELAY(30 * 1000); s = splhigh(); /* Write to DRAM. */ port_wr(res, 3, 0x43); /* Register select */ port_wr(res, 4, 0); /* Low addr */ port_wr(res, 5, 0); /* Med addr */ port_wr(res, 3, 0x44); /* Register select */ port_wr(res, 4, 0); /* High addr */ port_wr(res, 7, 0x55); /* DRAM */ /* Read from DRAM. */ port_wr(res, 3, 0x43); /* Register select */ port_wr(res, 4, 0); /* Low addr */ port_wr(res, 5, 0); /* Med addr */ port_wr(res, 3, 0x44); /* Register select */ port_wr(res, 4, 0); /* High addr */ val = port_rd(res, 7); /* DRAM */ splx(s); if (val != 0x55) goto fail; rid2 = 0; res2 = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid2, base, base, 1, RF_ACTIVE); if (res2 == NULL) goto fail; s = splhigh(); port_wr(res2, 0x0f, 0x20); val = port_rd(res2, 0x0f); splx(s); if (val == 0xff || (val & 0x06) == 0) val = 0; else { val = port_rd(res2, 0x506); /* XXX Out of range. */ if (val == 0xff) val = 0; } bus_release_resource(dev, SYS_RES_IOPORT, rid2, res2); bus_release_resource(dev, SYS_RES_IOPORT, rid, res); if (val >= 10) { struct sndcard_func *func; /* Looks like a GUS MAX. Set the rest of the resources. */ bus_set_resource(dev, SYS_RES_IOPORT, 2, base + 0x10c, 8); if (flags & DV_F_DUAL_DMA) bus_set_resource(dev, SYS_RES_DRQ, 1, flags & DV_F_DRQ_MASK, 1); /* We can support the CS4231 and MIDI devices. */ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO); if (func == NULL) return ENOMEM; func->func = SCF_MIDI; child = device_add_child(dev, "midi", -1); device_set_ivars(child, func); func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO); if (func == NULL) printf("xxx: gus pcm not attached, out of memory\n"); else { func->func = SCF_PCM; child = device_add_child(dev, "pcm", -1); device_set_ivars(child, func); } device_set_desc(dev, "Gravis UltraSound MAX"); return 0; } else { /* * TODO: Support even older GUS cards. MIDI should work on * all models. */ return ENXIO; } fail: bus_release_resource(dev, SYS_RES_IOPORT, rid, res); return ENXIO; }