/* * This function is expected to be called in a critical section since it * changes the per-cpu pci config space va-to-pa mappings. */ static vm_offset_t zbpci_config_space_va(int bus, int slot, int func, int reg, int bytes) { int cpu; vm_offset_t va_page; vm_paddr_t pa, pa_page; if (bus <= PCI_BUSMAX && slot <= PCI_SLOTMAX && func <= PCI_FUNCMAX && reg <= PCI_REGMAX && (bytes == 1 || bytes == 2 || bytes == 4) && ((reg & (bytes - 1)) == 0)) { cpu = PCPU_GET(cpuid); va_page = zbpci_config_space[cpu].vaddr; pa = CFG_PADDR_BASE | (bus << 16) | (slot << 11) | (func << 8) | reg; #if _BYTE_ORDER == _BIG_ENDIAN pa = pa ^ (4 - bytes); #endif pa_page = rounddown2(pa, PAGE_SIZE); if (zbpci_config_space[cpu].paddr != pa_page) { pmap_kremove(va_page); pmap_kenter_attr(va_page, pa_page, PTE_C_UNCACHED); zbpci_config_space[cpu].paddr = pa_page; } return (va_page + (pa - pa_page)); } else { return (0); } }
static ssize_t get_phys_buffer(vm_offset_t dest, const size_t len, void **buf) { int i = 0; const size_t segsize = 2*1024*1024; for (i = 0; i < nkexec_segments; i++) { if (dest >= (vm_offset_t)loaded_segments[i].mem && dest < (vm_offset_t)loaded_segments[i].mem + loaded_segments[i].memsz) goto out; } loaded_segments[nkexec_segments].buf = host_getmem(segsize); loaded_segments[nkexec_segments].bufsz = segsize; loaded_segments[nkexec_segments].mem = (void *)rounddown2(dest,segsize); loaded_segments[nkexec_segments].memsz = segsize; i = nkexec_segments; nkexec_segments++; out: *buf = loaded_segments[i].buf + (dest - (vm_offset_t)loaded_segments[i].mem); return (min(len,loaded_segments[i].bufsz - (dest - (vm_offset_t)loaded_segments[i].mem))); }
static void cheri_capability_set_user_sigcode(struct chericap *cp, struct sysentvec *se) { uintptr_t base; int szsigcode = *se->sv_szsigcode; /* XXX: true for mips64 and mip64-cheriabi... */ base = (uintptr_t)se->sv_psstrings - szsigcode; base = rounddown2(base, sizeof(struct chericap)); cheri_capability_set(cp, CHERI_CAP_USER_CODE_PERMS, CHERI_CAP_USER_CODE_OTYPE, (void *)base, szsigcode, 0); }
register_t * cloudabi64_copyout_strings(struct image_params *imgp) { struct image_args *args; uintptr_t begin; size_t len; /* Copy out program arguments. */ args = imgp->args; len = args->begin_envv - args->begin_argv; begin = rounddown2(imgp->sysent->sv_usrstack - len, sizeof(register_t)); copyout(args->begin_argv, (void *)begin, len); return ((register_t *)begin); }
/* * Seek to an entry in a directory. * Only values returned by rst_telldir should be passed to rst_seekdir. * This routine handles many directories in a single file. * It takes the base of the directory in the file, plus * the desired seek offset into it. */ static void rst_seekdir(RST_DIR *dirp, long loc, long base) { if (loc == rst_telldir(dirp)) return; loc -= base; if (loc < 0) fprintf(stderr, "bad seek pointer to rst_seekdir %ld\n", loc); (void) lseek(dirp->dd_fd, base + rounddown2(loc, DIRBLKSIZ), SEEK_SET); dirp->dd_loc = loc & (DIRBLKSIZ - 1); if (dirp->dd_loc != 0) dirp->dd_size = read(dirp->dd_fd, dirp->dd_buf, DIRBLKSIZ); }
static void read_pdu_limits(struct adapter *sc, uint32_t *max_tx_pdu_len, uint32_t *max_rx_pdu_len) { uint32_t tx_len, rx_len, r, v; rx_len = t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE); tx_len = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); r = t4_read_reg(sc, A_TP_PARA_REG2); rx_len = min(rx_len, G_MAXRXDATA(r)); tx_len = min(tx_len, G_MAXRXDATA(r)); r = t4_read_reg(sc, A_TP_PARA_REG7); v = min(G_PMMAXXFERLEN0(r), G_PMMAXXFERLEN1(r)); rx_len = min(rx_len, v); tx_len = min(tx_len, v); /* Remove after FW_FLOWC_MNEM_TXDATAPLEN_MAX fix in firmware. */ tx_len = min(tx_len, 3 * 4096); *max_tx_pdu_len = rounddown2(tx_len, 512); *max_rx_pdu_len = rounddown2(rx_len, 512); }
static int geli_dev_strategy(void *devdata, int rw, daddr_t blk, size_t size, char *buf, size_t *rsize) { struct geli_devdesc *gdesc; off_t alnend, alnstart, reqend, reqstart; size_t alnsize; char *iobuf; int rc; /* We only handle reading; no write support. */ if ((rw & F_MASK) != F_READ) return (EOPNOTSUPP); gdesc = (struct geli_devdesc *)devdata; /* * We can only decrypt full geli blocks. The blk arg is expressed in * units of DEV_BSIZE blocks, while size is in bytes. Convert * everything to bytes, and calculate the geli-blocksize-aligned start * and end points. * * Note: md_sectorsize must be cast to a signed type for the round2 * macros to work correctly (otherwise they get zero-extended to 64 bits * and mask off the high order 32 bits of the requested start/end). */ reqstart = blk * DEV_BSIZE; reqend = reqstart + size; alnstart = rounddown2(reqstart, (int)gdesc->gdev->md.md_sectorsize); alnend = roundup2(reqend, (int)gdesc->gdev->md.md_sectorsize); alnsize = alnend - alnstart; /* * If alignment requires us to read more than the size of the provided * buffer, allocate a temporary buffer. */ if (alnsize <= size) iobuf = buf; else if ((iobuf = malloc(alnsize)) == NULL) return (ENOMEM); /* * Read the encrypted data using the host provider, then decrypt it. */ rc = gdesc->hdesc->dd.d_dev->dv_strategy(gdesc->hdesc, rw, alnstart / DEV_BSIZE, alnsize, iobuf, NULL); if (rc != 0) goto out; rc = geli_read(gdesc->gdev, alnstart, iobuf, alnsize); if (rc != 0) goto out; /* * If we had to use a temporary buffer, copy the requested part of the * data to the caller's buffer. */ if (iobuf != buf) memcpy(buf, iobuf + (reqstart - alnstart), size); if (rsize != NULL) *rsize = size; out: if (iobuf != buf) free(iobuf); return (rc); }
void * initarm(struct arm_boot_params *abp) { #define next_chunk2(a,b) (((a) + (b)) &~ ((b)-1)) #define next_page(a) next_chunk2(a,PAGE_SIZE) struct pv_addr kernel_l1pt; struct pv_addr dpcpu; int loop, i; u_int l1pagetable; vm_offset_t freemempos; vm_offset_t freemem_pt; vm_offset_t afterkern; vm_offset_t freemem_after; vm_offset_t lastaddr; uint32_t memsize; /* kernel text starts where we were loaded at boot */ #define KERNEL_TEXT_OFF (abp->abp_physaddr - PHYSADDR) #define KERNEL_TEXT_BASE (KERNBASE + KERNEL_TEXT_OFF) #define KERNEL_TEXT_PHYS (PHYSADDR + KERNEL_TEXT_OFF) lastaddr = parse_boot_param(abp); arm_physmem_kernaddr = abp->abp_physaddr; set_cpufuncs(); /* NB: sets cputype */ pcpu_init(pcpup, 0, sizeof(struct pcpu)); PCPU_SET(curthread, &thread0); init_static_kenv(NULL, 0); /* Do basic tuning, hz etc */ init_param1(); /* * We allocate memory downwards from where we were loaded * by RedBoot; first the L1 page table, then NUM_KERNEL_PTS * entries in the L2 page table. Past that we re-align the * allocation boundary so later data structures (stacks, etc) * can be mapped with different attributes (write-back vs * write-through). Note this leaves a gap for expansion * (or might be repurposed). */ freemempos = abp->abp_physaddr; /* macros to simplify initial memory allocation */ #define alloc_pages(var, np) do { \ freemempos -= (np * PAGE_SIZE); \ (var) = freemempos; \ /* NB: this works because locore maps PA=VA */ \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); \ } while (0) #define valloc_pages(var, np) do { \ alloc_pages((var).pv_pa, (np)); \ (var).pv_va = (var).pv_pa + (KERNVIRTADDR - abp->abp_physaddr); \ } while (0) /* force L1 page table alignment */ while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) freemempos -= PAGE_SIZE; /* allocate contiguous L1 page table */ valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); /* now allocate L2 page tables; they are linked to L1 below */ for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[loop], L2_TABLE_SIZE / PAGE_SIZE); } else { kernel_pt_table[loop].pv_pa = freemempos + (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) * L2_TABLE_SIZE_REAL; kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa + (KERNVIRTADDR - abp->abp_physaddr); } } freemem_pt = freemempos; /* base of allocated pt's */ /* * Re-align allocation boundary so we can map the area * write-back instead of write-through for the stacks and * related structures allocated below. */ freemempos = PHYSADDR + 0x100000; /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ valloc_pages(systempage, 1); /* Allocate dynamic per-cpu area. */ valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu.pv_va, 0); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, kstack_pages); alloc_pages(minidataclean.pv_pa, 1); valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); /* * Now construct the L1 page table. First map the L2 * page tables into the L1 so we can replace L1 mappings * later on if necessary */ l1pagetable = kernel_l1pt.pv_va; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, rounddown2(ARM_VECTORS_HIGH, 0x00100000), &kernel_pt_table[KERNEL_PT_SYS]); pmap_link_l2pt(l1pagetable, IXP425_IO_VBASE, &kernel_pt_table[KERNEL_PT_IO]); pmap_link_l2pt(l1pagetable, IXP425_MCU_VBASE, &kernel_pt_table[KERNEL_PT_IO + 1]); pmap_link_l2pt(l1pagetable, IXP425_PCI_MEM_VBASE, &kernel_pt_table[KERNEL_PT_IO + 2]); pmap_link_l2pt(l1pagetable, KERNBASE, &kernel_pt_table[KERNEL_PT_BEFOREKERN]); pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR, 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, PHYSADDR + 0x100000, 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, KERNEL_TEXT_PHYS, next_chunk2(((uint32_t)lastaddr) - KERNEL_TEXT_BASE, L1_S_SIZE), VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); freemem_after = next_page((int)lastaddr); afterkern = round_page(next_chunk2((vm_offset_t)lastaddr, L1_S_SIZE)); for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) { pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000, &kernel_pt_table[KERNEL_PT_AFKERNEL + i]); } pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the Mini-Data cache clean area. */ xscale_setup_minidata(l1pagetable, afterkern, minidataclean.pv_pa); /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); if (cpu_is_ixp43x()) arm_devmap_bootstrap(l1pagetable, ixp435_devmap); else arm_devmap_bootstrap(l1pagetable, ixp425_devmap); /* * Give the XScale global cache clean code an appropriately * sized chunk of unmapped VA space starting at 0xff000000 * (our device mappings end before this address). */ xscale_cache_clean_addr = 0xff000000U; cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); cpu_setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ set_stackptrs(0); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); cpu_setup(); /* ready to setup the console (XXX move earlier if possible) */ cninit(); /* * Fetch the RAM size from the MCU registers. The * expansion bus was mapped above so we can now read 'em. */ if (cpu_is_ixp43x()) memsize = ixp435_ddram_size(); else memsize = ixp425_sdram_size(); undefined_init(); init_proc0(kernelstack.pv_va); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); pmap_curmaxkvaddr = afterkern + PAGE_SIZE; vm_max_kernel_address = 0xe0000000; pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt); msgbufp = (void*)msgbufpv.pv_va; msgbufinit(msgbufp, msgbufsize); mutex_init(); /* * Add the physical ram we have available. * * Exclude the kernel, and all the things we allocated which immediately * follow the kernel, from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_hardware_region(PHYSADDR, memsize); arm_physmem_exclude_region(freemem_pt, abp->abp_physaddr - freemem_pt, EXFLAG_NOALLOC); arm_physmem_exclude_region(freemempos, abp->abp_physaddr - 0x100000 - freemempos, EXFLAG_NOALLOC); arm_physmem_exclude_region(abp->abp_physaddr, virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); #undef next_page #undef next_chunk2 }
/*------------------------------------------------------------------------* * usb_pc_common_mem_cb - BUS-DMA callback function *------------------------------------------------------------------------*/ static void usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error, uint8_t isload) { struct usb_dma_parent_tag *uptag; struct usb_page_cache *pc; struct usb_page *pg; usb_size_t rem; bus_size_t off; uint8_t owned; pc = arg; uptag = pc->tag_parent; /* * XXX There is sometimes recursive locking here. * XXX We should try to find a better solution. * XXX Until further the "owned" variable does * XXX the trick. */ if (error) { goto done; } off = 0; pg = pc->page_start; pg->physaddr = rounddown2(segs->ds_addr, USB_PAGE_SIZE); rem = segs->ds_addr & (USB_PAGE_SIZE - 1); pc->page_offset_buf = rem; pc->page_offset_end += rem; #ifdef USB_DEBUG if (nseg > 1) { int x; for (x = 0; x != nseg - 1; x++) { if (((segs[x].ds_addr + segs[x].ds_len) & (USB_PAGE_SIZE - 1)) == ((segs[x + 1].ds_addr & (USB_PAGE_SIZE - 1)))) continue; /* * This check verifies there is no page offset * hole between any of the segments. See the * BUS_DMA_KEEP_PG_OFFSET flag. */ DPRINTFN(0, "Page offset was not preserved\n"); error = 1; goto done; } } #endif while (pc->ismultiseg) { off += USB_PAGE_SIZE; if (off >= (segs->ds_len + rem)) { /* page crossing */ nseg--; segs++; off = 0; rem = 0; if (nseg == 0) break; } pg++; pg->physaddr = rounddown2(segs->ds_addr + off, USB_PAGE_SIZE); } done: owned = mtx_owned(uptag->mtx); if (!owned) mtx_lock(uptag->mtx); uptag->dma_error = (error ? 1 : 0); if (isload) { (uptag->func) (uptag); } else { cv_broadcast(uptag->cv); } if (!owned) mtx_unlock(uptag->mtx); }
/* * This is called for every object loaded (kernel, module, dtb file, etc). The * expected return value is the next address at or after the given addr which is * appropriate for loading the given object described by type and data. On each * call the addr is the next address following the previously loaded object. * * The first call is for loading the kernel, and the addr argument will be zero, * and we search for a big block of ram to load the kernel and modules. * * On subsequent calls the addr will be non-zero, and we just round it up so * that each object begins on a page boundary. */ uint64_t uboot_loadaddr(uint_t type, void *data, uint64_t addr) { struct sys_info *si; uint64_t sblock, eblock, subldr, eubldr; uint64_t biggest_block, this_block; uint64_t biggest_size, this_size; int i; char *envstr; if (addr == 0) { /* * If the loader_kernaddr environment variable is set, blindly * honor it. It had better be right. We force interpretation * of the value in base-16 regardless of any leading 0x prefix, * because that's the U-Boot convention. */ envstr = ub_env_get("loader_kernaddr"); if (envstr != NULL) return (strtoul(envstr, NULL, 16)); /* * Find addr/size of largest DRAM block. Carve our own address * range out of the block, because loading the kernel over the * top ourself is a poor memory-conservation strategy. Avoid * memory at beginning of the first block of physical ram, * since u-boot likes to pass args and data there. Assume that * u-boot has moved itself to the very top of ram and * optimistically assume that we won't run into it up there. */ if ((si = ub_get_sys_info()) == NULL) panic("could not retrieve system info"); biggest_block = 0; biggest_size = 0; subldr = rounddown2((uintptr_t)_start, KERN_ALIGN); eubldr = roundup2((uint64_t)uboot_heap_end, KERN_ALIGN); for (i = 0; i < si->mr_no; i++) { if (si->mr[i].flags != MR_ATTR_DRAM) continue; sblock = roundup2((uint64_t)si->mr[i].start, KERN_ALIGN); eblock = rounddown2((uint64_t)si->mr[i].start + si->mr[i].size, KERN_ALIGN); if (biggest_size == 0) sblock += KERN_MINADDR; if (subldr >= sblock && subldr < eblock) { if (subldr - sblock > eblock - eubldr) { this_block = sblock; this_size = subldr - sblock; } else { this_block = eubldr; this_size = eblock - eubldr; } } else if (subldr < sblock && eubldr < eblock) { /* Loader is below or engulfs the sblock */ this_block = (eubldr < sblock) ? sblock : eubldr; this_size = eblock - this_block; } else { this_block = 0; this_size = 0; } if (biggest_size < this_size) { biggest_block = this_block; biggest_size = this_size; } } if (biggest_size == 0) panic("Not enough DRAM to load kernel"); #if 0 printf("Loading kernel into region 0x%08jx-0x%08jx (%ju MiB)\n", (uintmax_t)biggest_block, (uintmax_t)biggest_block + biggest_size - 1, (uintmax_t)biggest_size / 1024 / 1024); #endif return (biggest_block); } return (roundup2(addr, PAGE_SIZE)); }
static u_int adb_mouse_receive_packet(device_t dev, u_char status, u_char command, u_char reg, int len, u_char *data) { struct adb_mouse_softc *sc; int i = 0; int xdelta, ydelta; int buttons, tmp_buttons; sc = device_get_softc(dev); if (command != ADB_COMMAND_TALK || reg != 0 || len < 2) return (0); ydelta = data[0] & 0x7f; xdelta = data[1] & 0x7f; buttons = 0; buttons |= !(data[0] & 0x80); buttons |= !(data[1] & 0x80) << 1; if (sc->flags & AMS_EXTENDED) { for (i = 2; i < len && i < 5; i++) { xdelta |= (data[i] & 0x07) << (3*i + 1); ydelta |= (data[i] & 0x70) << (3*i - 3); buttons |= !(data[i] & 0x08) << (2*i - 2); buttons |= !(data[i] & 0x80) << (2*i - 1); } } else { len = 2; /* Ignore extra data */ } /* Do sign extension as necessary */ if (xdelta & (0x40 << 3*(len-2))) xdelta |= 0xffffffc0 << 3*(len - 2); if (ydelta & (0x40 << 3*(len-2))) ydelta |= 0xffffffc0 << 3*(len - 2); if ((sc->flags & AMS_TOUCHPAD) && (sc->sc_tapping == 1)) { tmp_buttons = buttons; if (buttons == 0x12) { /* Map a double tap on button 3. Keep the button state for the next sequence. A double tap sequence is followed by a single tap sequence. */ tmp_buttons = 0x3; sc->button_buf = tmp_buttons; } else if (buttons == 0x2) { /* Map a single tap on button 2. But only if it is not a successor from a double tap. */ if (sc->button_buf != 0x3) tmp_buttons = 0x2; else tmp_buttons = 0; sc->button_buf = 0; } buttons = tmp_buttons; } /* * Some mice report high-numbered buttons on the wrong button number, * so set the highest-numbered real button as pressed if there are * mysterious high-numbered ones set. * * Don't do this for touchpads, because touchpads also trigger * high button events when they are touched. */ if (rounddown2(buttons, 1 << sc->hw.buttons) && !(sc->flags & AMS_TOUCHPAD)) { buttons |= 1 << (sc->hw.buttons - 1); } buttons &= (1 << sc->hw.buttons) - 1; mtx_lock(&sc->sc_mtx); /* Add in our new deltas, and take into account Apple's opposite meaning for Y axis motion */ sc->xdelta += xdelta; sc->ydelta -= ydelta; sc->buttons = buttons; mtx_unlock(&sc->sc_mtx); cv_broadcast(&sc->sc_cv); selwakeuppri(&sc->rsel, PZERO); return (0); }
static int init_msix_table(struct vmctx *ctx, struct passthru_softc *sc, uint64_t base) { int b, s, f; int error, idx; size_t len, remaining; uint32_t table_size, table_offset; uint32_t pba_size, pba_offset; vm_paddr_t start; struct pci_devinst *pi = sc->psc_pi; assert(pci_msix_table_bar(pi) >= 0 && pci_msix_pba_bar(pi) >= 0); b = sc->psc_sel.pc_bus; s = sc->psc_sel.pc_dev; f = sc->psc_sel.pc_func; /* * If the MSI-X table BAR maps memory intended for * other uses, it is at least assured that the table * either resides in its own page within the region, * or it resides in a page shared with only the PBA. */ table_offset = rounddown2(pi->pi_msix.table_offset, 4096); table_size = pi->pi_msix.table_offset - table_offset; table_size += pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE; table_size = roundup2(table_size, 4096); if (pi->pi_msix.pba_bar == pi->pi_msix.table_bar) { pba_offset = pi->pi_msix.pba_offset; pba_size = pi->pi_msix.pba_size; if (pba_offset >= table_offset + table_size || table_offset >= pba_offset + pba_size) { /* * The PBA can reside in the same BAR as the MSI-x * tables as long as it does not overlap with any * naturally aligned page occupied by the tables. */ } else { /* Need to also emulate the PBA, not supported yet */ printf("Unsupported MSI-X configuration: %d/%d/%d\n", b, s, f); return (-1); } } idx = pi->pi_msix.table_bar; start = pi->pi_bar[idx].addr; remaining = pi->pi_bar[idx].size; /* Map everything before the MSI-X table */ if (table_offset > 0) { len = table_offset; error = vm_map_pptdev_mmio(ctx, b, s, f, start, len, base); if (error) return (error); base += len; start += len; remaining -= len; } /* Skip the MSI-X table */ base += table_size; start += table_size; remaining -= table_size; /* Map everything beyond the end of the MSI-X table */ if (remaining > 0) { len = remaining; error = vm_map_pptdev_mmio(ctx, b, s, f, start, len, base); if (error) return (error); } return (0); }