int pirq_irq(int pin) { assert((pin > 0) && (((unsigned) pin) <= nitems(pirqs))); return (pirqs[pin - 1].reg & PIRQ_IRQ); }
static int bcm_bsc_attach(device_t dev) { struct bcm_bsc_softc *sc; unsigned long start; device_t gpio; int i, rid; sc = device_get_softc(dev); sc->sc_dev = dev; rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); /* Check the unit we are attaching by its base address. */ start = rman_get_start(sc->sc_mem_res); for (i = 0; i < nitems(bcm_bsc_pins); i++) { if (bcm_bsc_pins[i].start == start) break; } if (i == nitems(bcm_bsc_pins)) { device_printf(dev, "only bsc0 and bsc1 are supported\n"); return (ENXIO); } /* * Configure the GPIO pins to ALT0 function to enable BSC control * over the pins. */ gpio = devclass_get_device(devclass_find("gpio"), 0); if (!gpio) { device_printf(dev, "cannot find gpio0\n"); return (ENXIO); } bcm_gpio_set_alternate(gpio, bcm_bsc_pins[i].sda, BCM_GPIO_ALT0); bcm_gpio_set_alternate(gpio, bcm_bsc_pins[i].scl, BCM_GPIO_ALT0); rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->sc_irq_res) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot allocate interrupt\n"); return (ENXIO); } /* Hook up our interrupt handler. */ if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, bcm_bsc_intr, sc, &sc->sc_intrhand)) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot setup the interrupt handler\n"); return (ENXIO); } mtx_init(&sc->sc_mtx, "bcm_bsc", NULL, MTX_DEF); bcm_bsc_sysctl_init(sc); /* Enable the BSC controller. Flush the FIFO. */ BCM_BSC_LOCK(sc); BCM_BSC_WRITE(sc, BCM_BSC_CTRL, BCM_BSC_CTRL_I2CEN); bcm_bsc_reset(sc); BCM_BSC_UNLOCK(sc); device_add_child(dev, "iicbus", -1); return (bus_generic_attach(dev)); }
static int rk30_gpio_attach(device_t dev) { struct rk30_gpio_softc *sc = device_get_softc(dev); int i, rid; phandle_t gpio; unsigned long start; if (rk30_gpio_sc) return (ENXIO); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "rk30 gpio", "gpio", MTX_DEF); rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); goto fail; } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); /* Check the unit we are attaching by our base address. */ sc->sc_bank = -1; start = rman_get_start(sc->sc_mem_res); for (i = 0; i < nitems(rk30_gpio_base_addr); i++) { if (rk30_gpio_base_addr[i] == start) { sc->sc_bank = i; break; } } if (sc->sc_bank == -1) { device_printf(dev, "unsupported device unit (only GPIO0..3 are supported)\n"); goto fail; } rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->sc_irq_res) { device_printf(dev, "cannot allocate interrupt\n"); goto fail; } /* Find our node. */ gpio = ofw_bus_get_node(sc->sc_dev); if (!OF_hasprop(gpio, "gpio-controller")) /* Node is not a GPIO controller. */ goto fail; /* Initialize the software controlled pins. */ for (i = 0; i < RK30_GPIO_PINS; i++) { snprintf(sc->sc_gpio_pins[i].gp_name, GPIOMAXNAME, "pin %d", i); sc->sc_gpio_pins[i].gp_pin = i; sc->sc_gpio_pins[i].gp_caps = RK30_GPIO_DEFAULT_CAPS; sc->sc_gpio_pins[i].gp_flags = rk30_gpio_get_function(sc, i); } sc->sc_gpio_npins = i; rk30_gpio_sc = sc; rk30_gpio_init(); sc->sc_busdev = gpiobus_attach_bus(dev); if (sc->sc_busdev == NULL) goto fail; return (0); fail: if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); mtx_destroy(&sc->sc_mtx); return (ENXIO); }
static int xlp_rsa_init(struct xlp_rsa_softc *sc, int node) { struct xlp_rsa_command *cmd = NULL; uint32_t fbvc, dstvc, endsel, regval; struct nlm_fmn_msg m; int err, ret, i; uint64_t base; /* Register interrupt handler for the RSA/ECC CMS messages */ if (register_msgring_handler(sc->rsaecc_vc_start, sc->rsaecc_vc_end, nlm_xlprsaecc_msgring_handler, sc) != 0) { err = -1; printf("Couldn't register rsa/ecc msgring handler\n"); goto errout; } fbvc = nlm_cpuid() * 4 + XLPGE_FB_VC; /* Do the CMS credit initialization */ /* Currently it is configured by default to 50 when kernel comes up */ #if BYTE_ORDER == LITTLE_ENDIAN for (i = 0; i < nitems(nlm_rsa_ucode_data); i++) nlm_rsa_ucode_data[i] = htobe64(nlm_rsa_ucode_data[i]); #endif for (dstvc = sc->rsaecc_vc_start; dstvc <= sc->rsaecc_vc_end; dstvc++) { cmd = malloc(sizeof(struct xlp_rsa_command), M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT(cmd != NULL, ("%s:cmd is NULL\n", __func__)); cmd->rsasrc = contigmalloc(sizeof(nlm_rsa_ucode_data), M_DEVBUF, (M_WAITOK | M_ZERO), 0UL /* low address */, -1UL /* high address */, XLP_L2L3_CACHELINE_SIZE /* alignment */, 0UL /* boundary */); KASSERT(cmd->rsasrc != NULL, ("%s:cmd->rsasrc is NULL\n", __func__)); memcpy(cmd->rsasrc, nlm_rsa_ucode_data, sizeof(nlm_rsa_ucode_data)); m.msg[0] = nlm_crypto_form_rsa_ecc_fmn_entry0(1, 0x70, 0, vtophys(cmd->rsasrc)); m.msg[1] = nlm_crypto_form_rsa_ecc_fmn_entry1(0, 1, fbvc, vtophys(cmd->rsasrc)); /* Software scratch pad */ m.msg[2] = (uintptr_t)cmd; m.msg[3] = 0; ret = nlm_fmn_msgsend(dstvc, 3, FMN_SWCODE_RSA, &m); if (ret != 0) { err = -1; printf("%s: msgsnd failed (%x)\n", __func__, ret); goto errout; } } /* Configure so that all VCs send request to all RSA pipes */ base = nlm_get_rsa_regbase(node); if (nlm_is_xlp3xx()) { endsel = 1; regval = 0xFFFF; } else { endsel = 3; regval = 0x07FFFFFF; } for (i = 0; i < endsel; i++) nlm_write_rsa_reg(base, RSA_ENG_SEL_0 + i, regval); return (0); errout: xlp_free_cmd_params(cmd); return (err); }
#include <sys/param.h> #include <sys/disklabel.h> #include <sys/reboot.h> #include <dev/cons.h> #include <machine/iomod.h> #include "dev_hppa.h" extern int debug; const char cdevs[][4] = { "ite", "", "", "", "", "", "", "", "", "", "", "", "" }; const int ncdevs = nitems(cdevs); const struct pdc_devs { char name[3]; int dev_type; } pdc_devs[] = { { "dk", 0 }, { "ct", 1 }, { "lf", 2 }, { "", -1 }, { "rd", -1 }, { "sw", -1 }, { "fl", -1 }, }; /* pass dev_t to the open routines */
int rtw_cardbus_match(struct device *parent, void *match, void *aux) { return (cardbus_matchbyid((struct cardbus_attach_args *)aux, rtw_cardbus_devices, nitems(rtw_cardbus_devices))); }
static void p_rtable_sysctl(int fibnum, int af) { size_t needed; int mib[7]; char *buf, *next, *lim; struct rt_msghdr *rtm; struct sockaddr *sa; int fam = AF_UNSPEC, ifindex = 0, size; int need_table_close = false; struct ifaddrs *ifap, *ifa; struct sockaddr_dl *sdl; /* * Retrieve interface list at first * since we need #ifindex -> if_xname match */ if (getifaddrs(&ifap) != 0) err(EX_OSERR, "getifaddrs"); for (ifa = ifap; ifa; ifa = ifa->ifa_next) { if (ifa->ifa_addr->sa_family != AF_LINK) continue; sdl = (struct sockaddr_dl *)ifa->ifa_addr; ifindex = sdl->sdl_index; if (ifindex >= ifmap_size) { size = roundup(ifindex + 1, 32) * sizeof(struct ifmap_entry); if ((ifmap = realloc(ifmap, size)) == NULL) errx(2, "realloc(%d) failed", size); memset(&ifmap[ifmap_size], 0, size - ifmap_size * sizeof(struct ifmap_entry)); ifmap_size = roundup(ifindex + 1, 32); } if (*ifmap[ifindex].ifname != '\0') continue; strlcpy(ifmap[ifindex].ifname, ifa->ifa_name, IFNAMSIZ); } freeifaddrs(ifap); mib[0] = CTL_NET; mib[1] = PF_ROUTE; mib[2] = 0; mib[3] = af; mib[4] = NET_RT_DUMP; mib[5] = 0; mib[6] = fibnum; if (sysctl(mib, nitems(mib), NULL, &needed, NULL, 0) < 0) err(EX_OSERR, "sysctl: net.route.0.%d.dump.%d estimate", af, fibnum); if ((buf = malloc(needed)) == NULL) errx(2, "malloc(%lu)", (unsigned long)needed); if (sysctl(mib, nitems(mib), buf, &needed, NULL, 0) < 0) err(1, "sysctl: net.route.0.%d.dump.%d", af, fibnum); lim = buf + needed; xo_open_container("route-table"); xo_open_list("rt-family"); for (next = buf; next < lim; next += rtm->rtm_msglen) { rtm = (struct rt_msghdr *)next; if (rtm->rtm_version != RTM_VERSION) continue; /* * Peek inside header to determine AF */ sa = (struct sockaddr *)(rtm + 1); /* Only print family first time. */ if (fam != sa->sa_family) { if (need_table_close) { xo_close_list("rt-entry"); xo_close_instance("rt-family"); } need_table_close = true; fam = sa->sa_family; wid_dst = WID_DST_DEFAULT(fam); wid_gw = WID_GW_DEFAULT(fam); wid_flags = 6; wid_pksent = 8; wid_mtu = 6; wid_if = WID_IF_DEFAULT(fam); wid_expire = 6; xo_open_instance("rt-family"); pr_family(fam); xo_open_list("rt-entry"); pr_rthdr(fam); } p_rtentry_sysctl("rt-entry", rtm); } if (need_table_close) { xo_close_list("rt-entry"); xo_close_instance("rt-family"); } xo_close_list("rt-family"); xo_close_container("route-table"); free(buf); }
static struct ddp_buffer * alloc_ddp_buffer(struct tom_data *td, vm_page_t *pages, int npages, int offset, int len) { int i, hcf, seglen, idx, ppod, nppods; struct ddp_buffer *db; /* * The DDP page size is unrelated to the VM page size. We combine * contiguous physical pages into larger segments to get the best DDP * page size possible. This is the largest of the four sizes in * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in * the page list. */ hcf = 0; for (i = 0; i < npages; i++) { seglen = PAGE_SIZE; while (i < npages - 1 && pages[i]->phys_addr + PAGE_SIZE == pages[i + 1]->phys_addr) { seglen += PAGE_SIZE; i++; } hcf = calculate_hcf(hcf, seglen); if (hcf < t4_ddp_pgsz[1]) { idx = 0; goto have_pgsz; /* give up, short circuit */ } } if (hcf % t4_ddp_pgsz[0] != 0) { /* hmmm. This could only happen when PAGE_SIZE < 4K */ KASSERT(PAGE_SIZE < 4096, ("%s: PAGE_SIZE %d, hcf %d", __func__, PAGE_SIZE, hcf)); CTR3(KTR_CXGBE, "%s: PAGE_SIZE %d, hcf %d", __func__, PAGE_SIZE, hcf); return (NULL); } for (idx = nitems(t4_ddp_pgsz) - 1; idx > 0; idx--) { if (hcf % t4_ddp_pgsz[idx] == 0) break; } have_pgsz: MPASS(idx <= M_PPOD_PGSZ); db = malloc(sizeof(*db), M_CXGBE, M_NOWAIT); if (db == NULL) { CTR1(KTR_CXGBE, "%s: malloc failed.", __func__); return (NULL); } nppods = pages_to_nppods(npages, t4_ddp_pgsz[idx]); if (alloc_ppods(td, nppods, &db->ppod_addr) != 0) { free(db, M_CXGBE); CTR4(KTR_CXGBE, "%s: no pods, nppods %d, resid %d, pgsz %d", __func__, nppods, len, t4_ddp_pgsz[idx]); return (NULL); } ppod = (db->ppod_addr - td->ppod_start) / PPOD_SIZE; db->tag = V_PPOD_PGSZ(idx) | V_PPOD_TAG(ppod); db->nppods = nppods; db->npages = npages; db->pages = pages; db->offset = offset; db->len = len; CTR6(KTR_CXGBE, "New DDP buffer. " "ddp_pgsz %d, ppod 0x%x, npages %d, nppods %d, offset %d, len %d", t4_ddp_pgsz[idx], ppod, db->npages, db->nppods, db->offset, db->len); return (db); }
static int write_page_pods(struct adapter *sc, struct toepcb *toep, struct ddp_buffer *db) { struct wrqe *wr; struct ulp_mem_io *ulpmc; struct ulptx_idata *ulpsc; struct pagepod *ppod; int i, j, k, n, chunk, len, ddp_pgsz, idx; u_int ppod_addr; uint32_t cmd; cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); if (is_t4(sc)) cmd |= htobe32(F_ULP_MEMIO_ORDER); else cmd |= htobe32(F_T5_ULP_MEMIO_IMM); ddp_pgsz = t4_ddp_pgsz[G_PPOD_PGSZ(db->tag)]; ppod_addr = db->ppod_addr; for (i = 0; i < db->nppods; ppod_addr += chunk) { /* How many page pods are we writing in this cycle */ n = min(db->nppods - i, NUM_ULP_TX_SC_IMM_PPODS); chunk = PPOD_SZ(n); len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); wr = alloc_wrqe(len, toep->ctrlq); if (wr == NULL) return (ENOMEM); /* ok to just bail out */ ulpmc = wrtod(wr); INIT_ULPTX_WR(ulpmc, len, 0, 0); ulpmc->cmd = cmd; ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); ulpsc = (struct ulptx_idata *)(ulpmc + 1); ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); ulpsc->len = htobe32(chunk); ppod = (struct pagepod *)(ulpsc + 1); for (j = 0; j < n; i++, j++, ppod++) { ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | V_PPOD_TID(toep->tid) | db->tag); ppod->len_offset = htobe64(V_PPOD_LEN(db->len) | V_PPOD_OFST(db->offset)); ppod->rsvd = 0; idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE); for (k = 0; k < nitems(ppod->addr); k++) { if (idx < db->npages) { ppod->addr[k] = htobe64(db->pages[idx]->phys_addr); idx += ddp_pgsz / PAGE_SIZE; } else ppod->addr[k] = 0; #if 0 CTR5(KTR_CXGBE, "%s: tid %d ppod[%d]->addr[%d] = %p", __func__, toep->tid, i, k, htobe64(ppod->addr[k])); #endif } } t4_wrq_tx(sc, wr); } return (0); }
static int command_efi_show(int argc, char *argv[]) { /* * efi-show [-a] * print all the env * efi-show -u UUID * print all the env vars tagged with UUID * efi-show -v var * search all the env vars and print the ones matching var * eif-show -u UUID -v var * eif-show UUID var * print all the env vars that match UUID and var */ /* NB: We assume EFI_GUID is the same as uuid_t */ int aflag = 0, gflag = 0, lflag = 0, vflag = 0; int ch, rv; unsigned i; EFI_STATUS status; EFI_GUID varguid = { 0,0,0,{0,0,0,0,0,0,0,0} }; EFI_GUID matchguid = { 0,0,0,{0,0,0,0,0,0,0,0} }; uint32_t uuid_status; CHAR16 *varname; CHAR16 *newnm; CHAR16 varnamearg[128]; UINTN varalloc; UINTN varsz; while ((ch = getopt(argc, argv, "ag:lv:")) != -1) { switch (ch) { case 'a': aflag = 1; break; case 'g': gflag = 1; uuid_from_string(optarg, (uuid_t *)&matchguid, &uuid_status); if (uuid_status != uuid_s_ok) { printf("uid %s could not be parsed\n", optarg); return (CMD_ERROR); } break; case 'l': lflag = 1; break; case 'v': vflag = 1; if (strlen(optarg) >= nitems(varnamearg)) { printf("Variable %s is longer than %zd characters\n", optarg, nitems(varnamearg)); return (CMD_ERROR); } for (i = 0; i < strlen(optarg); i++) varnamearg[i] = optarg[i]; varnamearg[i] = 0; break; default: printf("Invalid argument %c\n", ch); return (CMD_ERROR); } } if (aflag && (gflag || vflag)) { printf("-a isn't compatible with -v or -u\n"); return (CMD_ERROR); } if (aflag && optind < argc) { printf("-a doesn't take any args"); return (CMD_ERROR); } if (optind == argc) aflag = 1; argc -= optind; argv += optind; pager_open(); if (vflag && gflag) { rv = efi_print_var(varnamearg, &matchguid, lflag); pager_close(); return (rv); } if (argc == 2) { optarg = argv[0]; if (strlen(optarg) >= nitems(varnamearg)) { printf("Variable %s is longer than %zd characters\n", optarg, nitems(varnamearg)); pager_close(); return (CMD_ERROR); } for (i = 0; i < strlen(optarg); i++) varnamearg[i] = optarg[i]; varnamearg[i] = 0; optarg = argv[1]; uuid_from_string(optarg, (uuid_t *)&matchguid, &uuid_status); if (uuid_status != uuid_s_ok) { printf("uid %s could not be parsed\n", optarg); pager_close(); return (CMD_ERROR); } rv = efi_print_var(varnamearg, &matchguid, lflag); pager_close(); return (rv); } if (argc > 0) { printf("Too many args %d\n", argc); pager_close(); return (CMD_ERROR); } /* * Initiate the search -- note the standard takes pain * to specify the initial call must be a poiner to a NULL * character. */ varalloc = 1024; varname = malloc(varalloc); if (varname == NULL) { printf("Can't allocate memory to get variables\n"); pager_close(); return (CMD_ERROR); } varname[0] = 0; while (1) { varsz = varalloc; status = RS->GetNextVariableName(&varsz, varname, &varguid); if (status == EFI_BUFFER_TOO_SMALL) { varalloc = varsz; newnm = malloc(varalloc); if (newnm == NULL) { printf("Can't allocate memory to get variables\n"); free(varname); pager_close(); return (CMD_ERROR); } memcpy(newnm, varname, varsz); free(varname); varname = newnm; continue; /* Try again with bigger buffer */ } if (status != EFI_SUCCESS) break; if (aflag) { if (efi_print_var(varname, &varguid, lflag) != CMD_OK) break; continue; } if (vflag) { if (wcscmp(varnamearg, varname) == 0) { if (efi_print_var(varname, &varguid, lflag) != CMD_OK) break; continue; } } if (gflag) { if (memcmp(&varguid, &matchguid, sizeof(varguid)) == 0) { if (efi_print_var(varname, &varguid, lflag) != CMD_OK) break; continue; } } } free(varname); pager_close(); return (CMD_OK); }
/* * This is now called from local media FS's to operate against their * own vnodes if they fail to implement VOP_GETPAGES. */ int vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count, int *a_rbehind, int *a_rahead, vop_getpages_iodone_t iodone, void *arg) { vm_object_t object; struct bufobj *bo; struct buf *bp; off_t foff; #ifdef INVARIANTS off_t blkno0; #endif int bsize, pagesperblock, *freecnt; int error, before, after, rbehind, rahead, poff, i; int bytecount, secmask; KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, ("%s does not support devices", __func__)); if (vp->v_iflag & VI_DOOMED) return (VM_PAGER_BAD); object = vp->v_object; foff = IDX_TO_OFF(m[0]->pindex); bsize = vp->v_mount->mnt_stat.f_iosize; pagesperblock = bsize / PAGE_SIZE; KASSERT(foff < object->un_pager.vnp.vnp_size, ("%s: page %p offset beyond vp %p size", __func__, m[0], vp)); KASSERT(count <= sizeof(bp->b_pages), ("%s: requested %d pages", __func__, count)); /* * The last page has valid blocks. Invalid part can only * exist at the end of file, and the page is made fully valid * by zeroing in vm_pager_get_pages(). */ if (m[count - 1]->valid != 0 && --count == 0) { if (iodone != NULL) iodone(arg, m, 1, 0); return (VM_PAGER_OK); } /* * Synchronous and asynchronous paging operations use different * free pbuf counters. This is done to avoid asynchronous requests * to consume all pbufs. * Allocate the pbuf at the very beginning of the function, so that * if we are low on certain kind of pbufs don't even proceed to BMAP, * but sleep. */ freecnt = iodone != NULL ? &vnode_async_pbuf_freecnt : &vnode_pbuf_freecnt; bp = getpbuf(freecnt); /* * Get the underlying device blocks for the file with VOP_BMAP(). * If the file system doesn't support VOP_BMAP, use old way of * getting pages via VOP_READ. */ error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before); if (error == EOPNOTSUPP) { relpbuf(bp, freecnt); VM_OBJECT_WLOCK(object); for (i = 0; i < count; i++) { PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodepgsin); error = vnode_pager_input_old(object, m[i]); if (error) break; } VM_OBJECT_WUNLOCK(object); return (error); } else if (error != 0) { relpbuf(bp, freecnt); return (VM_PAGER_ERROR); } /* * If the file system supports BMAP, but blocksize is smaller * than a page size, then use special small filesystem code. */ if (pagesperblock == 0) { relpbuf(bp, freecnt); for (i = 0; i < count; i++) { PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodepgsin); error = vnode_pager_input_smlfs(object, m[i]); if (error) break; } return (error); } /* * A sparse file can be encountered only for a single page request, * which may not be preceded by call to vm_pager_haspage(). */ if (bp->b_blkno == -1) { KASSERT(count == 1, ("%s: array[%d] request to a sparse file %p", __func__, count, vp)); relpbuf(bp, freecnt); pmap_zero_page(m[0]); KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty", __func__, m[0])); VM_OBJECT_WLOCK(object); m[0]->valid = VM_PAGE_BITS_ALL; VM_OBJECT_WUNLOCK(object); return (VM_PAGER_OK); } #ifdef INVARIANTS blkno0 = bp->b_blkno; #endif bp->b_blkno += (foff % bsize) / DEV_BSIZE; /* Recalculate blocks available after/before to pages. */ poff = (foff % bsize) / PAGE_SIZE; before *= pagesperblock; before += poff; after *= pagesperblock; after += pagesperblock - (poff + 1); if (m[0]->pindex + after >= object->size) after = object->size - 1 - m[0]->pindex; KASSERT(count <= after + 1, ("%s: %d pages asked, can do only %d", __func__, count, after + 1)); after -= count - 1; /* Trim requested rbehind/rahead to possible values. */ rbehind = a_rbehind ? *a_rbehind : 0; rahead = a_rahead ? *a_rahead : 0; rbehind = min(rbehind, before); rbehind = min(rbehind, m[0]->pindex); rahead = min(rahead, after); rahead = min(rahead, object->size - m[count - 1]->pindex); /* * Check that total amount of pages fit into buf. Trim rbehind and * rahead evenly if not. */ if (rbehind + rahead + count > nitems(bp->b_pages)) { int trim, sum; trim = rbehind + rahead + count - nitems(bp->b_pages) + 1; sum = rbehind + rahead; if (rbehind == before) { /* Roundup rbehind trim to block size. */ rbehind -= roundup(trim * rbehind / sum, pagesperblock); if (rbehind < 0) rbehind = 0; } else rbehind -= trim * rbehind / sum; rahead -= trim * rahead / sum; } KASSERT(rbehind + rahead + count <= nitems(bp->b_pages), ("%s: behind %d ahead %d count %d", __func__, rbehind, rahead, count)); /* * Fill in the bp->b_pages[] array with requested and optional * read behind or read ahead pages. Read behind pages are looked * up in a backward direction, down to a first cached page. Same * for read ahead pages, but there is no need to shift the array * in case of encountering a cached page. */ i = bp->b_npages = 0; if (rbehind) { vm_pindex_t startpindex, tpindex; vm_page_t p; VM_OBJECT_WLOCK(object); startpindex = m[0]->pindex - rbehind; if ((p = TAILQ_PREV(m[0], pglist, listq)) != NULL && p->pindex >= startpindex) startpindex = p->pindex + 1; /* tpindex is unsigned; beware of numeric underflow. */ for (tpindex = m[0]->pindex - 1; tpindex >= startpindex && tpindex < m[0]->pindex; tpindex--, i++) { p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); if (p == NULL) { /* Shift the array. */ for (int j = 0; j < i; j++) bp->b_pages[j] = bp->b_pages[j + tpindex + 1 - startpindex]; break; } bp->b_pages[tpindex - startpindex] = p; } bp->b_pgbefore = i; bp->b_npages += i; bp->b_blkno -= IDX_TO_OFF(i) / DEV_BSIZE; } else bp->b_pgbefore = 0; /* Requested pages. */ for (int j = 0; j < count; j++, i++) bp->b_pages[i] = m[j]; bp->b_npages += count; if (rahead) { vm_pindex_t endpindex, tpindex; vm_page_t p; if (!VM_OBJECT_WOWNED(object)) VM_OBJECT_WLOCK(object); endpindex = m[count - 1]->pindex + rahead + 1; if ((p = TAILQ_NEXT(m[count - 1], listq)) != NULL && p->pindex < endpindex) endpindex = p->pindex; if (endpindex > object->size) endpindex = object->size; for (tpindex = m[count - 1]->pindex + 1; tpindex < endpindex; i++, tpindex++) { p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); if (p == NULL) break; bp->b_pages[i] = p; } bp->b_pgafter = i - bp->b_npages; bp->b_npages = i; } else bp->b_pgafter = 0; if (VM_OBJECT_WOWNED(object)) VM_OBJECT_WUNLOCK(object); /* Report back actual behind/ahead read. */ if (a_rbehind) *a_rbehind = bp->b_pgbefore; if (a_rahead) *a_rahead = bp->b_pgafter; #ifdef INVARIANTS KASSERT(bp->b_npages <= nitems(bp->b_pages), ("%s: buf %p overflowed", __func__, bp)); for (int j = 1, prev = 1; j < bp->b_npages; j++) { if (bp->b_pages[j] == bogus_page) continue; KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex == j - prev, ("%s: pages array not consecutive, bp %p", __func__, bp)); prev = j; } #endif /* * Recalculate first offset and bytecount with regards to read behind. * Truncate bytecount to vnode real size and round up physical size * for real devices. */ foff = IDX_TO_OFF(bp->b_pages[0]->pindex); bytecount = bp->b_npages << PAGE_SHIFT; if ((foff + bytecount) > object->un_pager.vnp.vnp_size) bytecount = object->un_pager.vnp.vnp_size - foff; secmask = bo->bo_bsize - 1; KASSERT(secmask < PAGE_SIZE && secmask > 0, ("%s: sector size %d too large", __func__, secmask + 1)); bytecount = (bytecount + secmask) & ~secmask; /* * And map the pages to be read into the kva, if the filesystem * requires mapped buffers. */ if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 && unmapped_buf_allowed) { bp->b_data = unmapped_buf; bp->b_offset = 0; } else { bp->b_data = bp->b_kvabase; pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); } /* Build a minimal buffer header. */ bp->b_iocmd = BIO_READ; KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); bp->b_rcred = crhold(curthread->td_ucred); bp->b_wcred = crhold(curthread->td_ucred); pbgetbo(bo, bp); bp->b_vp = vp; bp->b_bcount = bp->b_bufsize = bp->b_runningbufspace = bytecount; bp->b_iooffset = dbtob(bp->b_blkno); KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) == (blkno0 - bp->b_blkno) * DEV_BSIZE + IDX_TO_OFF(m[0]->pindex) % bsize, ("wrong offsets bsize %d m[0] %ju b_pages[0] %ju " "blkno0 %ju b_blkno %ju", bsize, (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex, (uintmax_t)blkno0, (uintmax_t)bp->b_blkno)); atomic_add_long(&runningbufspace, bp->b_runningbufspace); PCPU_INC(cnt.v_vnodein); PCPU_ADD(cnt.v_vnodepgsin, bp->b_npages); if (iodone != NULL) { /* async */ bp->b_pgiodone = iodone; bp->b_caller1 = arg; bp->b_iodone = vnode_pager_generic_getpages_done_async; bp->b_flags |= B_ASYNC; BUF_KERNPROC(bp); bstrategy(bp); return (VM_PAGER_OK); } else { bp->b_iodone = bdone; bstrategy(bp); bwait(bp, PVM, "vnread"); error = vnode_pager_generic_getpages_done(bp); for (i = 0; i < bp->b_npages; i++) bp->b_pages[i] = NULL; bp->b_vp = NULL; pbrelbo(bp); relpbuf(bp, &vnode_pbuf_freecnt); return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK); } }
#include <lib/libsa/ufs.h> #include <lib/libsa/cd9660.h> #include <dev/cons.h> const char version[] = "0.9"; int debug = 0; struct fs_ops file_system[] = { { ufs_open, ufs_close, ufs_read, ufs_write, ufs_seek, ufs_stat, ufs_readdir }, { cd9660_open, cd9660_close, cd9660_read, cd9660_write, cd9660_seek, cd9660_stat, cd9660_readdir }, { lif_open, lif_close, lif_read, lif_write, lif_seek, lif_stat, lif_readdir }, }; int nfsys = nitems(file_system); struct devsw devsw[] = { { "dk", iodcstrategy, dkopen, dkclose, noioctl }, { "ct", iodcstrategy, ctopen, ctclose, noioctl }, { "lf", iodcstrategy, lfopen, lfclose, noioctl } }; int ndevs = nitems(devsw); struct consdev constab[] = { { ite_probe, ite_init, ite_getc, ite_putc }, { NULL } }; struct consdev *cn_tab;
uint8_t pirq_read(int pin) { assert((pin > 0) && (((unsigned) pin) <= nitems(pirqs))); return (pirqs[pin - 1].reg); }
static void pirq_dsdt(void) { char *irq_prs, *old; int irq, pin; irq_prs = NULL; for (irq = 0; ((unsigned) irq) < nitems(irq_counts); irq++) { if (!IRQ_PERMITTED(irq)) continue; if (irq_prs == NULL) asprintf(&irq_prs, "%d", irq); else { old = irq_prs; asprintf(&irq_prs, "%s,%d", old, irq); free(old); } } /* * A helper method to validate a link register's value. This * duplicates pirq_valid_irq(). */ dsdt_line(""); dsdt_line("Method (PIRV, 1, NotSerialized)"); dsdt_line("{"); dsdt_line(" If (And (Arg0, 0x%02X))", PIRQ_DIS); dsdt_line(" {"); dsdt_line(" Return (0x00)"); dsdt_line(" }"); dsdt_line(" And (Arg0, 0x%02X, Local0)", PIRQ_IRQ); dsdt_line(" If (LLess (Local0, 0x03))"); dsdt_line(" {"); dsdt_line(" Return (0x00)"); dsdt_line(" }"); dsdt_line(" If (LEqual (Local0, 0x08))"); dsdt_line(" {"); dsdt_line(" Return (0x00)"); dsdt_line(" }"); dsdt_line(" If (LEqual (Local0, 0x0D))"); dsdt_line(" {"); dsdt_line(" Return (0x00)"); dsdt_line(" }"); dsdt_line(" Return (0x01)"); dsdt_line("}"); for (pin = 0; ((unsigned) pin) < nitems(pirqs); pin++) { dsdt_line(""); dsdt_line("Device (LNK%c)", 'A' + pin); dsdt_line("{"); dsdt_line(" Name (_HID, EisaId (\"PNP0C0F\"))"); dsdt_line(" Name (_UID, 0x%02X)", pin + 1); dsdt_line(" Method (_STA, 0, NotSerialized)"); dsdt_line(" {"); dsdt_line(" If (PIRV (PIR%c))", 'A' + pin); dsdt_line(" {"); dsdt_line(" Return (0x0B)"); dsdt_line(" }"); dsdt_line(" Else"); dsdt_line(" {"); dsdt_line(" Return (0x09)"); dsdt_line(" }"); dsdt_line(" }"); dsdt_line(" Name (_PRS, ResourceTemplate ()"); dsdt_line(" {"); dsdt_line(" IRQ (Level, ActiveLow, Shared, )"); dsdt_line(" {%s}", irq_prs); dsdt_line(" })"); dsdt_line(" Name (CB%02X, ResourceTemplate ()", pin + 1); dsdt_line(" {"); dsdt_line(" IRQ (Level, ActiveLow, Shared, )"); dsdt_line(" {}"); dsdt_line(" })"); dsdt_line(" CreateWordField (CB%02X, 0x01, CIR%c)", pin + 1, 'A' + pin); dsdt_line(" Method (_CRS, 0, NotSerialized)"); dsdt_line(" {"); dsdt_line(" And (PIR%c, 0x%02X, Local0)", 'A' + pin, PIRQ_DIS | PIRQ_IRQ); dsdt_line(" If (PIRV (Local0))"); dsdt_line(" {"); dsdt_line(" ShiftLeft (0x01, Local0, CIR%c)", 'A' + pin); dsdt_line(" }"); dsdt_line(" Else"); dsdt_line(" {"); dsdt_line(" Store (0x00, CIR%c)", 'A' + pin); dsdt_line(" }"); dsdt_line(" Return (CB%02X)", pin + 1); dsdt_line(" }"); dsdt_line(" Method (_DIS, 0, NotSerialized)"); dsdt_line(" {"); dsdt_line(" Store (0x80, PIR%c)", 'A' + pin); dsdt_line(" }"); dsdt_line(" Method (_SRS, 1, NotSerialized)"); dsdt_line(" {"); dsdt_line(" CreateWordField (Arg0, 0x01, SIR%c)", 'A' + pin); dsdt_line(" FindSetRightBit (SIR%c, Local0)", 'A' + pin); dsdt_line(" Store (Decrement (Local0), PIR%c)", 'A' + pin); dsdt_line(" }"); dsdt_line("}"); } free(irq_prs); }
int main(int argc, char *argv[]) { char *domainname, *inkey, *inmap, *outbuf; int outbuflen, key, notrans, rval; int c, r; u_int i; domainname = NULL; notrans = key = 0; while ((c = getopt(argc, argv, "xd:kt")) != -1) switch (c) { case 'x': for (i = 0; i < nitems(ypaliases); i++) printf("Use \"%s\" for \"%s\"\n", ypaliases[i].alias, ypaliases[i].name); exit(0); case 'd': domainname = optarg; break; case 't': notrans = 1; break; case 'k': key = 1; break; default: usage(); } if (argc - optind < 2) usage(); if (domainname == NULL) yp_get_default_domain(&domainname); inmap = argv[argc-1]; if (notrans == 0) { for (i = 0; i < nitems(ypaliases); i++) if (strcmp(inmap, ypaliases[i].alias) == 0) inmap = ypaliases[i].name; } rval = 0; for (; optind < argc - 1; optind++) { inkey = argv[optind]; r = yp_match(domainname, inmap, inkey, strlen(inkey), &outbuf, &outbuflen); switch (r) { case 0: if (key) printf("%s: ", inkey); printf("%*.*s\n", outbuflen, outbuflen, outbuf); break; case YPERR_YPBIND: errx(1, "not running ypbind"); default: errx(1, "can't match key %s in map %s. reason: %s", inkey, inmap, yperr_string(r)); rval = 1; break; } } exit(rval); }
char * osdep_get_name(int fd, char *tty) { int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PGRP, 0 }; struct stat sb; size_t len; struct kinfo_proc *buf, *newbuf, *p, *bestp; u_int i; char *name; buf = NULL; if (stat(tty, &sb) == -1) return (NULL); if ((mib[3] = tcgetpgrp(fd)) == -1) return (NULL); retry: if (sysctl(mib, nitems(mib), NULL, &len, NULL, 0) == -1) return (NULL); len = (len * 5) / 4; if ((newbuf = realloc(buf, len)) == NULL) { free(buf); return (NULL); } buf = newbuf; if (sysctl(mib, nitems(mib), buf, &len, NULL, 0) == -1) { if (errno == ENOMEM) goto retry; free(buf); return (NULL); } bestp = NULL; for (i = 0; i < len / sizeof (struct kinfo_proc); i++) { if (buf[i].ki_tdev != sb.st_rdev) continue; p = &buf[i]; if (bestp == NULL) { bestp = p; continue; } if (is_runnable(p) && !is_runnable(bestp)) bestp = p; else if (!is_runnable(p) && is_runnable(bestp)) continue; if (!is_stopped(p) && is_stopped(bestp)) bestp = p; else if (is_stopped(p) && !is_stopped(bestp)) continue; if (p->ki_estcpu > bestp->ki_estcpu) bestp = p; else if (p->ki_estcpu < bestp->ki_estcpu) continue; if (p->ki_slptime < bestp->ki_slptime) bestp = p; else if (p->ki_slptime > bestp->ki_slptime) continue; if (strcmp(p->ki_comm, p->ki_comm) < 0) bestp = p; else if (strcmp(p->ki_comm, p->ki_comm) > 0) continue; if (p->ki_pid > bestp->ki_pid) bestp = p; } name = NULL; if (bestp != NULL) name = strdup(bestp->ki_comm); free(buf); return (name); }
static int mtk_gic_attach(device_t dev) { struct mtk_gic_softc *sc; intptr_t xref = gic_xref(dev); int i; sc = device_get_softc(dev); if (bus_alloc_resources(dev, mtk_gic_spec, sc->gic_res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->gic_dev = dev; /* Initialize mutex */ mtx_init(&sc->mutex, "PIC lock", "", MTX_SPIN); /* Set the number of interrupts */ sc->nirqs = nitems(sc->gic_irqs); /* Mask all interrupts */ WRITE4(sc, MTK_INTDIS, 0xFFFFFFFF); /* All interrupts are of type level */ WRITE4(sc, MTK_INTTRIG, 0x00000000); /* All interrupts are of positive polarity */ WRITE4(sc, MTK_INTPOL, 0xFFFFFFFF); /* * Route all interrupts to pin 0 on VPE 0; */ for (i = 0; i < 32; i++) { WRITE4(sc, MTK_MAPPIN(i), MTK_PIN_BITS(0)); WRITE4(sc, MTK_MAPVPE(i, 0), MTK_VPE_BITS(0)); } /* Register the interrupts */ if (mtk_gic_register_isrcs(sc) != 0) { device_printf(dev, "could not register GIC ISRCs\n"); goto cleanup; } /* * Now, when everything is initialized, it's right time to * register interrupt controller to interrupt framefork. */ if (intr_pic_register(dev, xref) == NULL) { device_printf(dev, "could not register PIC\n"); goto cleanup; } cpu_establish_hardintr("gic", mtk_gic_intr, NULL, sc, 0, INTR_TYPE_CLK, NULL); return (0); cleanup: bus_release_resources(dev, mtk_gic_spec, sc->gic_res); return(ENXIO); }
#include <sys/param.h> #include <sys/libkern.h> u_char const bcd2bin_data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 0, 0, 0, 0, 0, 0, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 0, 0, 0, 0, 0, 0, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 0, 0, 0, 0, 0, 0, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 0, 0, 0, 0, 0, 0, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 0, 0, 0, 0, 0, 0, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 0, 0, 0, 0, 0, 0, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 0, 0, 0, 0, 0, 0, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 }; CTASSERT(nitems(bcd2bin_data) == LIBKERN_LEN_BCD2BIN); u_char const bin2bcd_data[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99 }; CTASSERT(nitems(bin2bcd_data) == LIBKERN_LEN_BIN2BCD);
int eap_match(struct device *parent, void *match, void *aux) { return (pci_matchbyid((struct pci_attach_args *)aux, eap_devices, nitems(eap_devices))); }
/* * Returns ANSI code to set particular attributes (colour, bold and so on) * given a current state. The output buffer must be able to hold at least 57 * bytes. */ static void grid_string_cells_code(const struct grid_cell *lastgc, const struct grid_cell *gc, char *buf, size_t len, int escape_c0) { int oldc[64], newc[64], s[128]; size_t noldc, nnewc, n, i; u_int attr = gc->attr; u_int lastattr = lastgc->attr; char tmp[64]; struct { u_int mask; u_int code; } attrs[] = { { GRID_ATTR_BRIGHT, 1 }, { GRID_ATTR_DIM, 2 }, { GRID_ATTR_ITALICS, 3 }, { GRID_ATTR_UNDERSCORE, 4 }, { GRID_ATTR_BLINK, 5 }, { GRID_ATTR_REVERSE, 7 }, { GRID_ATTR_HIDDEN, 8 }, { GRID_ATTR_STRIKETHROUGH, 9 } }; n = 0; /* If any attribute is removed, begin with 0. */ for (i = 0; i < nitems(attrs); i++) { if (!(attr & attrs[i].mask) && (lastattr & attrs[i].mask)) { s[n++] = 0; lastattr &= GRID_ATTR_CHARSET; break; } } /* For each attribute that is newly set, add its code. */ for (i = 0; i < nitems(attrs); i++) { if ((attr & attrs[i].mask) && !(lastattr & attrs[i].mask)) s[n++] = attrs[i].code; } /* If the foreground colour changed, append its parameters. */ nnewc = grid_string_cells_fg(gc, newc); noldc = grid_string_cells_fg(lastgc, oldc); if (nnewc != noldc || memcmp(newc, oldc, nnewc * sizeof newc[0]) != 0) { for (i = 0; i < nnewc; i++) s[n++] = newc[i]; } /* If the background colour changed, append its parameters. */ nnewc = grid_string_cells_bg(gc, newc); noldc = grid_string_cells_bg(lastgc, oldc); if (nnewc != noldc || memcmp(newc, oldc, nnewc * sizeof newc[0]) != 0) { for (i = 0; i < nnewc; i++) s[n++] = newc[i]; } /* If there are any parameters, append an SGR code. */ *buf = '\0'; if (n > 0) { if (escape_c0) strlcat(buf, "\\033[", len); else strlcat(buf, "\033[", len); for (i = 0; i < n; i++) { if (i + 1 < n) xsnprintf(tmp, sizeof tmp, "%d;", s[i]); else xsnprintf(tmp, sizeof tmp, "%d", s[i]); strlcat(buf, tmp, len); } strlcat(buf, "m", len); } /* Append shift in/shift out if needed. */ if ((attr & GRID_ATTR_CHARSET) && !(lastattr & GRID_ATTR_CHARSET)) { if (escape_c0) strlcat(buf, "\\016", len); /* SO */ else strlcat(buf, "\016", len); /* SO */ } if (!(attr & GRID_ATTR_CHARSET) && (lastattr & GRID_ATTR_CHARSET)) { if (escape_c0) strlcat(buf, "\\017", len); /* SI */ else strlcat(buf, "\017", len); /* SI */ } }
SYSCTL_BOOL(_security_stack_protect, OID_AUTO, permit_nonrandom_cookies, CTLFLAG_RDTUN, &permit_nonrandom_cookies, 0, "Allow stack guard to be used without real random cookies"); void __stack_chk_fail(void) { panic("stack overflow detected; backtrace may be corrupted"); } static void __stack_chk_init(void *dummy __unused) { size_t i; long guard[nitems(__stack_chk_guard)]; if (is_random_seeded()) { arc4rand(guard, sizeof(guard), 0); for (i = 0; i < nitems(guard); i++) __stack_chk_guard[i] = guard[i]; return; } if (permit_nonrandom_cookies) { printf("%s: WARNING: Initializing stack protection with " "non-random cookies!\n", __func__); printf("%s: WARNING: This severely limits the benefit of " "-fstack-protector!\n", __func__); /*
int athn_cardbus_match(struct device *parent, void *match, void *aux) { return (cardbus_matchbyid(aux, athn_cardbus_devices, nitems(athn_cardbus_devices))); }
odyssey_alloc_screen, odyssey_free_screen, odyssey_show_screen, NULL, /* load_font */ NULL, /* scrollback */ NULL, /* getchar */ odyssey_burner, NULL /* pollc */ }; const struct wsscreen_descr *odyssey_scrlist[] = { &odyssey_stdscreen }; struct wsscreen_list odyssey_screenlist = { nitems(odyssey_scrlist), odyssey_scrlist }; const struct cfattach odyssey_ca = { sizeof(struct odyssey_softc), odyssey_match, odyssey_attach, }; struct cfdriver odyssey_cd = { NULL, "odyssey", DV_DULL, }; int odyssey_match(struct device *parent, void *match, void *aux) { struct xbow_attach_args *xaa = aux;
/* Translate a key code into an output key sequence. */ void input_key(struct window_pane *wp, int key, struct mouse_event *m) { const struct input_key_ent *ike; u_int i; size_t dlen; char *out; u_char ch; log_debug("writing key 0x%x (%s)", key, key_string_lookup_key(key)); /* If this is a mouse key, pass off to mouse function. */ if (KEYC_IS_MOUSE(key)) { if (m != NULL && m->wp != -1 && (u_int)m->wp == wp->id) input_key_mouse(wp, m); return; } /* * If this is a normal 7-bit key, just send it, with a leading escape * if necessary. */ if (key != KEYC_NONE && (key & ~KEYC_ESCAPE) < 0x100) { if (key & KEYC_ESCAPE) bufferevent_write(wp->event, "\033", 1); ch = key & ~KEYC_ESCAPE; bufferevent_write(wp->event, &ch, 1); return; } /* * Then try to look this up as an xterm key, if the flag to output them * is set. */ if (options_get_number(&wp->window->options, "xterm-keys")) { if ((out = xterm_keys_lookup(key)) != NULL) { bufferevent_write(wp->event, out, strlen(out)); free(out); return; } } /* Otherwise look the key up in the table. */ for (i = 0; i < nitems(input_keys); i++) { ike = &input_keys[i]; if ((ike->flags & INPUTKEY_KEYPAD) && !(wp->screen->mode & MODE_KKEYPAD)) continue; if ((ike->flags & INPUTKEY_CURSOR) && !(wp->screen->mode & MODE_KCURSOR)) continue; if ((key & KEYC_ESCAPE) && (ike->key | KEYC_ESCAPE) == key) break; if (ike->key == key) break; } if (i == nitems(input_keys)) { log_debug("key 0x%x missing", key); return; } dlen = strlen(ike->data); log_debug("found key 0x%x: \"%s\"", key, ike->data); /* Prefix a \033 for escape. */ if (key & KEYC_ESCAPE) bufferevent_write(wp->event, "\033", 1); bufferevent_write(wp->event, ike->data, dlen); }
bdev_tape_init(NST,st), /* 5: SCSI tape */ bdev_disk_init(NCD,cd), /* 6: SCSI CD-ROM */ bdev_disk_init(NRD,rd), /* 7: ramdisk */ bdev_disk_init(NVND,vnd), /* 8: vnode disk driver */ bdev_notdef(), /* 9: was: concatenated disk driver */ bdev_notdef(), /* 10 */ bdev_notdef(), /* 11 */ bdev_notdef(), /* 12 */ bdev_lkm_dummy(), /* 13 */ bdev_lkm_dummy(), /* 14 */ bdev_lkm_dummy(), /* 15 */ bdev_lkm_dummy(), /* 16 */ bdev_lkm_dummy(), /* 17 */ bdev_lkm_dummy(), /* 18 */ }; int nblkdev = nitems(bdevsw); struct cdevsw cdevsw[] = { cdev_cn_init(1,cn), /* 0: virtual console */ cdev_ctty_init(1,ctty), /* 1: controlling terminal */ cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */ cdev_notdef(), /* 3 was /dev/drum */ cdev_tty_init(NPTY,pts), /* 4: pseudo-tty slave */ cdev_ptc_init(NPTY,ptc), /* 5: pseudo-tty master */ cdev_log_init(1,log), /* 6: /dev/klog */ cdev_notdef(), /* 7 */ cdev_disk_init(NSD,sd), /* 8: SCSI disk */ cdev_disk_init(NCD,cd), /* 9: SCSI CD-ROM */ cdev_lcd_init(NLCD,lcd), /* 10: /dev/lcd */ cdev_notdef(), /* 11 */
.pr_output = rip6_output, .pr_ctloutput = rip6_ctloutput, .pr_usrreqs = &rip6_usrreqs }, }; extern int in6_inithead(void **, int); #ifdef VIMAGE extern int in6_detachhead(void **, int); #endif struct domain inet6domain = { .dom_family = AF_INET6, .dom_name = "internet6", .dom_protosw = (struct protosw *)inet6sw, .dom_protoswNPROTOSW = (struct protosw *)&inet6sw[nitems(inet6sw)], #ifdef RADIX_MPATH .dom_rtattach = rn6_mpath_inithead, #else .dom_rtattach = in6_inithead, #endif #ifdef VIMAGE .dom_rtdetach = in6_detachhead, #endif .dom_ifattach = in6_domifattach, .dom_ifdetach = in6_domifdetach, .dom_ifmtu = in6_domifmtu }; VNET_DOMAIN_SET(inet6);
/* reset and initialize the device */ static int atkbd_init(int unit, keyboard_t **kbdp, void *arg, int flags) { keyboard_t *kbd; atkbd_state_t *state; keymap_t *keymap; accentmap_t *accmap; fkeytab_t *fkeymap; int fkeymap_size; int delay[2]; int *data = (int *)arg; /* data[0]: controller, data[1]: irq */ int error, needfree; #ifdef EVDEV_SUPPORT struct evdev_dev *evdev; char phys_loc[8]; #endif /* XXX */ if (unit == ATKBD_DEFAULT) { *kbdp = kbd = &default_kbd; if (KBD_IS_INITIALIZED(kbd) && KBD_IS_CONFIGURED(kbd)) return 0; state = &default_kbd_state; keymap = &default_keymap; accmap = &default_accentmap; fkeymap = default_fkeytab; fkeymap_size = nitems(default_fkeytab); needfree = 0; } else if (*kbdp == NULL) { *kbdp = kbd = malloc(sizeof(*kbd), M_DEVBUF, M_NOWAIT | M_ZERO); state = malloc(sizeof(*state), M_DEVBUF, M_NOWAIT | M_ZERO); /* NB: these will always be initialized 'cuz !KBD_IS_PROBED */ keymap = malloc(sizeof(key_map), M_DEVBUF, M_NOWAIT); accmap = malloc(sizeof(accent_map), M_DEVBUF, M_NOWAIT); fkeymap = malloc(sizeof(fkey_tab), M_DEVBUF, M_NOWAIT); fkeymap_size = sizeof(fkey_tab)/sizeof(fkey_tab[0]); needfree = 1; if ((kbd == NULL) || (state == NULL) || (keymap == NULL) || (accmap == NULL) || (fkeymap == NULL)) { error = ENOMEM; goto bad; } } else if (KBD_IS_INITIALIZED(*kbdp) && KBD_IS_CONFIGURED(*kbdp)) { return 0; } else { kbd = *kbdp; state = (atkbd_state_t *)kbd->kb_data; bzero(state, sizeof(*state)); keymap = kbd->kb_keymap; accmap = kbd->kb_accentmap; fkeymap = kbd->kb_fkeytab; fkeymap_size = kbd->kb_fkeytab_size; needfree = 0; } if (!KBD_IS_PROBED(kbd)) { state->kbdc = atkbdc_open(data[0]); if (state->kbdc == NULL) { error = ENXIO; goto bad; } kbd_init_struct(kbd, ATKBD_DRIVER_NAME, KB_OTHER, unit, flags, 0, 0); bcopy(&key_map, keymap, sizeof(key_map)); bcopy(&accent_map, accmap, sizeof(accent_map)); bcopy(fkey_tab, fkeymap, imin(fkeymap_size * sizeof(fkeymap[0]), sizeof(fkey_tab))); kbd_set_maps(kbd, keymap, accmap, fkeymap, fkeymap_size); kbd->kb_data = (void *)state; if (probe_keyboard(state->kbdc, flags)) { /* shouldn't happen */ if (flags & KB_CONF_FAIL_IF_NO_KBD) { error = ENXIO; goto bad; } } else { KBD_FOUND_DEVICE(kbd); } atkbd_clear_state(kbd); state->ks_mode = K_XLATE; /* * FIXME: set the initial value for lock keys in ks_state * according to the BIOS data? */ KBD_PROBE_DONE(kbd); } if (!KBD_IS_INITIALIZED(kbd) && !(flags & KB_CONF_PROBE_ONLY)) { kbd->kb_config = flags & ~KB_CONF_PROBE_ONLY; if (KBD_HAS_DEVICE(kbd) && init_keyboard(state->kbdc, &kbd->kb_type, kbd->kb_config) && (kbd->kb_config & KB_CONF_FAIL_IF_NO_KBD)) { kbd_unregister(kbd); error = ENXIO; goto bad; } atkbd_ioctl(kbd, KDSETLED, (caddr_t)&state->ks_state); set_typematic(kbd); delay[0] = kbd->kb_delay1; delay[1] = kbd->kb_delay2; atkbd_ioctl(kbd, KDSETREPEAT, (caddr_t)delay); #ifdef EVDEV_SUPPORT /* register as evdev provider on first init */ if (state->ks_evdev == NULL) { snprintf(phys_loc, sizeof(phys_loc), "atkbd%d", unit); evdev = evdev_alloc(); evdev_set_name(evdev, "AT keyboard"); evdev_set_phys(evdev, phys_loc); evdev_set_id(evdev, BUS_I8042, PS2_KEYBOARD_VENDOR, PS2_KEYBOARD_PRODUCT, 0); evdev_set_methods(evdev, kbd, &atkbd_evdev_methods); evdev_support_event(evdev, EV_SYN); evdev_support_event(evdev, EV_KEY); evdev_support_event(evdev, EV_LED); evdev_support_event(evdev, EV_REP); evdev_support_all_known_keys(evdev); evdev_support_led(evdev, LED_NUML); evdev_support_led(evdev, LED_CAPSL); evdev_support_led(evdev, LED_SCROLLL); if (evdev_register_mtx(evdev, &Giant)) evdev_free(evdev); else state->ks_evdev = evdev; state->ks_evdev_state = 0; } #endif KBD_INIT_DONE(kbd); } if (!KBD_IS_CONFIGURED(kbd)) { if (kbd_register(kbd) < 0) { error = ENXIO; goto bad; } KBD_CONFIG_DONE(kbd); } return 0; bad: if (needfree) { if (state != NULL) free(state, M_DEVBUF); if (keymap != NULL) free(keymap, M_DEVBUF); if (accmap != NULL) free(accmap, M_DEVBUF); if (fkeymap != NULL) free(fkeymap, M_DEVBUF); if (kbd != NULL) { free(kbd, M_DEVBUF); *kbdp = NULL; /* insure ref doesn't leak to caller */ } } return error; }
pid_t lka(void) { pid_t pid; struct passwd *pw; struct event ev_sigint; struct event ev_sigterm; struct event ev_sigchld; struct peer peers[] = { { PROC_PARENT, imsg_dispatch }, { PROC_MFA, imsg_dispatch }, { PROC_QUEUE, imsg_dispatch }, { PROC_SMTP, imsg_dispatch }, { PROC_MTA, imsg_dispatch }, { PROC_CONTROL, imsg_dispatch } }; switch (pid = fork()) { case -1: fatal("lka: cannot fork"); case 0: break; default: return (pid); } purge_config(PURGE_EVERYTHING); pw = env->sc_pw; smtpd_process = PROC_LKA; setproctitle("%s", env->sc_title[smtpd_process]); if (setgroups(1, &pw->pw_gid) || setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) fatal("lka: cannot drop privileges"); imsg_callback = lka_imsg; event_init(); SPLAY_INIT(&env->lka_sessions); signal_set(&ev_sigint, SIGINT, lka_sig_handler, NULL); signal_set(&ev_sigterm, SIGTERM, lka_sig_handler, NULL); signal_set(&ev_sigchld, SIGCHLD, lka_sig_handler, NULL); signal_add(&ev_sigint, NULL); signal_add(&ev_sigterm, NULL); signal_add(&ev_sigchld, NULL); signal(SIGPIPE, SIG_IGN); signal(SIGHUP, SIG_IGN); /* * lka opens all kinds of files and sockets, so bump the limit to max. * XXX: need to analyse the exact hard limit. */ fdlimit(1.0); config_pipes(peers, nitems(peers)); config_peers(peers, nitems(peers)); /* ignore them until we get our config */ event_del(&env->sc_ievs[PROC_MTA]->ev); event_del(&env->sc_ievs[PROC_MFA]->ev); event_del(&env->sc_ievs[PROC_SMTP]->ev); if (event_dispatch() < 0) fatal("event_dispatch"); lka_shutdown(); return (0); }
void pccard_check_cis_quirks(device_t dev) { struct pccard_softc *sc = PCCARD_SOFTC(dev); int wiped = 0; int i, j; struct pccard_function *pf, *pf_next, *pf_last; struct pccard_config_entry *cfe, *cfe_next; struct pccard_cis_quirk *q; pf = NULL; pf_last = NULL; for (i = 0; i < nitems(pccard_cis_quirks); i++) { q = &pccard_cis_quirks[i]; if (!pccard_cis_quirk_match(sc, q)) continue; if (!wiped) { if (bootverbose) { device_printf(dev, "using CIS quirks for "); for (j = 0; j < 4; j++) { if (sc->card.cis1_info[j] == NULL) break; if (j) printf(", "); printf("%s", sc->card.cis1_info[j]); } printf("\n"); } for (pf = STAILQ_FIRST(&sc->card.pf_head); pf != NULL; pf = pf_next) { for (cfe = STAILQ_FIRST(&pf->cfe_head); cfe != NULL; cfe = cfe_next) { cfe_next = STAILQ_NEXT(cfe, cfe_list); free(cfe, M_DEVBUF); } pf_next = STAILQ_NEXT(pf, pf_list); free(pf, M_DEVBUF); } STAILQ_INIT(&sc->card.pf_head); wiped = 1; } if (pf_last == q->pf) { cfe = malloc(sizeof(*cfe), M_DEVBUF, M_NOWAIT); if (cfe == NULL) { device_printf(dev, "no memory for quirk (1)\n"); continue; } *cfe = *q->cfe; STAILQ_INSERT_TAIL(&pf->cfe_head, cfe, cfe_list); } else { pf = malloc(sizeof(*pf), M_DEVBUF, M_NOWAIT); if (pf == NULL) { device_printf(dev, "no memory for pccard function\n"); continue; } *pf = *q->pf; STAILQ_INIT(&pf->cfe_head); cfe = malloc(sizeof(*cfe), M_DEVBUF, M_NOWAIT); if (cfe == NULL) { free(pf, M_DEVBUF); device_printf(dev, "no memory for quirk (2)\n"); continue; } *cfe = *q->cfe; STAILQ_INSERT_TAIL(&pf->cfe_head, cfe, cfe_list); STAILQ_INSERT_TAIL(&sc->card.pf_head, pf, pf_list); pf_last = q->pf; } } }
/* Convert a key code into string format, with prefix if necessary. */ const char * key_string_lookup_key(key_code key) { static char out[24]; char tmp[8]; u_int i; struct utf8_data ud; size_t off; *out = '\0'; /* Handle no key. */ if (key == KEYC_NONE) return ("None"); /* Handle special keys. */ if (key == KEYC_UNKNOWN) return ("Unknown"); if (key == KEYC_MOUSE) return ("Mouse"); /* * Special case: display C-@ as C-Space. Could do this below in * the (key >= 0 && key <= 32), but this way we let it be found * in key_string_table, for the unlikely chance that we might * change its name. */ if ((key & KEYC_MASK_KEY) == 0) key = ' ' | KEYC_CTRL | (key & KEYC_MASK_MOD); /* Fill in the modifiers. */ if (key & KEYC_CTRL) strlcat(out, "C-", sizeof out); if (key & KEYC_ESCAPE) strlcat(out, "M-", sizeof out); if (key & KEYC_SHIFT) strlcat(out, "S-", sizeof out); key &= KEYC_MASK_KEY; /* Try the key against the string table. */ for (i = 0; i < nitems(key_string_table); i++) { if (key == key_string_table[i].key) break; } if (i != nitems(key_string_table)) { strlcat(out, key_string_table[i].string, sizeof out); return (out); } /* Is this a UTF-8 key? */ if (key > 127 && key < KEYC_BASE) { if (utf8_split(key, &ud) == UTF8_DONE) { off = strlen(out); memcpy(out + off, ud.data, ud.size); out[off + ud.size] = '\0'; return (out); } } /* Invalid keys are errors. */ if (key == 127 || key > 255) { snprintf(out, sizeof out, "Invalid#%llx", key); return (out); } /* Check for standard or control key. */ if (key <= 32) { if (key == 0 || key > 26) xsnprintf(tmp, sizeof tmp, "C-%c", (int)(64 + key)); else xsnprintf(tmp, sizeof tmp, "C-%c", (int)(96 + key)); } else if (key >= 32 && key <= 126) { tmp[0] = key; tmp[1] = '\0'; } else if (key >= 128) xsnprintf(tmp, sizeof tmp, "\\%llo", key); strlcat(out, tmp, sizeof out); return (out); }