/** * @brief This function downloads firmware image to the card. * * @param priv A pointer to bt_private structure * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int sd_download_firmware_w_helper(bt_private * priv) { struct sdio_mmc_card *card = (struct sdio_mmc_card *) priv->bt_dev.card; const struct firmware *fw_firmware = NULL; u8 *firmware = NULL; int firmwarelen; u8 base0; u8 base1; int ret = BT_STATUS_SUCCESS; int offset; void *tmpfwbuf = NULL; int tmpfwbufsz; u8 *fwbuf; u16 len; int txlen = 0; int tx_blocks = 0; int i = 0; int tries = 0; #ifdef FW_DOWNLOAD_SPEED u32 tv1, tv2; #endif char *cur_fw_name = NULL; ENTER(); if (fw_name == NULL) /* Check revision ID */ switch (priv->adapter->chip_rev) { case SD8787_W0: case SD8787_W1: cur_fw_name = SD8787_W1_FW_NAME; break; case SD8787_A0: case SD8787_A1: cur_fw_name = SD8787_AX_FW_NAME; break; default: cur_fw_name = DEFAULT_FW_NAME; break; } else cur_fw_name = fw_name; if ((ret = request_firmware(&fw_firmware, cur_fw_name, priv->hotplug_device)) < 0) { PRINTM(FATAL, "request_firmware() failed, error code = %#x\n", ret); goto done; } if (fw_firmware) { firmware = (u8 *) fw_firmware->data; firmwarelen = fw_firmware->size; } else { PRINTM(MSG, "No firmware image found! Terminating download\n"); ret = BT_STATUS_FAILURE; goto done; } PRINTM(INFO, "Downloading FW image (%d bytes)\n", firmwarelen); #ifdef FW_DOWNLOAD_SPEED tv1 = get_utimeofday(); #endif #ifdef PXA3XX_DMA_ALIGN tmpfwbufsz = ALIGN_SZ(BT_UPLD_SIZE, PXA3XX_DMA_ALIGNMENT); #else /* PXA3XX_DMA_ALIGN */ tmpfwbufsz = BT_UPLD_SIZE; #endif tmpfwbuf = kmalloc(tmpfwbufsz, GFP_KERNEL); if (!tmpfwbuf) { PRINTM(ERROR, "Unable to allocate buffer for firmware. Terminating download\n"); ret = BT_STATUS_FAILURE; goto done; } memset(tmpfwbuf, 0, tmpfwbufsz); #ifdef PXA3XX_DMA_ALIGN /* Ensure 8-byte aligned firmware buffer */ fwbuf = (u8 *) ALIGN_ADDR(tmpfwbuf, PXA3XX_DMA_ALIGNMENT); #else /* PXA3XX_DMA_ALIGN */ fwbuf = (u8 *) tmpfwbuf; #endif /* Perform firmware data transfer */ offset = 0; do { /* The host polls for the DN_LD_CARD_RDY and CARD_IO_READY bits */ ret = sd_poll_card_status(priv, CARD_IO_READY | DN_LD_CARD_RDY); if (ret < 0) { PRINTM(FATAL, "FW download with helper poll status timeout @ %d\n", offset); goto done; } /* More data? */ if (offset >= firmwarelen) break; for (tries = 0; tries < MAX_POLL_TRIES; tries++) { base0 = sdio_readb(card->func, SQ_READ_BASE_ADDRESS_A0_REG, &ret); if (ret) { PRINTM(WARN, "Dev BASE0 register read failed:" " base0=0x%04X(%d). Terminating download\n", base0, base0); ret = BT_STATUS_FAILURE; goto done; } base1 = sdio_readb(card->func, SQ_READ_BASE_ADDRESS_A1_REG, &ret); if (ret) { PRINTM(WARN, "Dev BASE1 register read failed:" " base1=0x%04X(%d). Terminating download\n", base1, base1); ret = BT_STATUS_FAILURE; goto done; } len = (((u16) base1) << 8) | base0; if (len != 0) break; udelay(10); } if (len == 0) break; else if (len > BT_UPLD_SIZE) { PRINTM(FATAL, "FW download failure @ %d, invalid length %d\n", offset, len); ret = BT_STATUS_FAILURE; goto done; } txlen = len; if (len & BIT(0)) { i++; if (i > MAX_WRITE_IOMEM_RETRY) { PRINTM(FATAL, "FW download failure @ %d, over max retry count\n", offset); ret = BT_STATUS_FAILURE; goto done; } PRINTM(ERROR, "FW CRC error indicated by the helper:" " len = 0x%04X, txlen = %d\n", len, txlen); len &= ~BIT(0); /* Setting this to 0 to resend from same offset */ txlen = 0; } else { i = 0; /* Set blocksize to transfer - checking for last block */ if (firmwarelen - offset < txlen) txlen = firmwarelen - offset; PRINTM(INFO, "."); tx_blocks = (txlen + SD_BLOCK_SIZE_FW_DL - 1) / SD_BLOCK_SIZE_FW_DL; /* Copy payload to buffer */ memcpy(fwbuf, &firmware[offset], txlen); } /* Send data */ ret = sdio_writesb(card->func, priv->bt_dev.ioport, fwbuf, tx_blocks * SD_BLOCK_SIZE_FW_DL); if (ret < 0) { PRINTM(ERROR, "FW download, write iomem (%d) failed @ %d\n", i, offset); sdio_writeb(card->func, 0x04, CONFIGURATION_REG, &ret); if (ret) PRINTM(ERROR, "write ioreg failed (CFG)\n"); } offset += txlen; } while (TRUE); PRINTM(INFO, "\nFW download over, size %d bytes\n", offset); ret = BT_STATUS_SUCCESS; done: #ifdef FW_DOWNLOAD_SPEED tv2 = get_utimeofday(); PRINTM(INFO, "FW: %ld.%03ld.%03ld ", tv1 / 1000000, (tv1 % 1000000) / 1000, tv1 % 1000); PRINTM(INFO, " -> %ld.%03ld.%03ld ", tv2 / 1000000, (tv2 % 1000000) / 1000, tv2 % 1000); tv2 -= tv1; PRINTM(INFO, " == %ld.%03ld.%03ld\n", tv2 / 1000000, (tv2 % 1000000) / 1000, tv2 % 1000); #endif if (tmpfwbuf) kfree(tmpfwbuf); if (fw_firmware) release_firmware(fw_firmware); LEAVE(); return ret; }
/** * @brief This function allocates buffer for the members of adapter * structure like command buffer and BSSID list. * * @param pmadapter A pointer to mlan_adapter structure * * @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE */ static mlan_status wlan_allocate_adapter(pmlan_adapter pmadapter) { int i; mlan_status ret = MLAN_STATUS_SUCCESS; #ifdef STA_SUPPORT t_u32 buf_size; BSSDescriptor_t *ptemp_scan_table = MNULL; #endif ENTER(); #ifdef STA_SUPPORT /* Allocate buffer to store the BSSID list */ buf_size = sizeof(BSSDescriptor_t) * MRVDRV_MAX_BSSID_LIST; ret = pmadapter->callbacks.moal_malloc(pmadapter->pmoal_handle, buf_size, MLAN_MEM_DEF, (t_u8 **) & ptemp_scan_table); if (ret != MLAN_STATUS_SUCCESS || !ptemp_scan_table) { PRINTM(MERROR, "Failed to allocate scan table\n"); LEAVE(); return MLAN_STATUS_FAILURE; } pmadapter->pscan_table = ptemp_scan_table; #endif /* Initialize cmd_free_q */ util_init_list_head((t_void *) pmadapter->pmoal_handle, &pmadapter->cmd_free_q, MTRUE, pmadapter->callbacks.moal_init_lock); /* Initialize cmd_pending_q */ util_init_list_head((t_void *) pmadapter->pmoal_handle, &pmadapter->cmd_pending_q, MTRUE, pmadapter->callbacks.moal_init_lock); /* Initialize scan_pending_q */ util_init_list_head((t_void *) pmadapter->pmoal_handle, &pmadapter->scan_pending_q, MTRUE, pmadapter->callbacks.moal_init_lock); /* Allocate command buffer */ ret = wlan_alloc_cmd_buffer(pmadapter); if (ret != MLAN_STATUS_SUCCESS) { PRINTM(MERROR, "Failed to allocate command buffer\n"); LEAVE(); return MLAN_STATUS_FAILURE; } for (i = 0; i < MLAN_MAX_BSS_NUM; ++i) { util_init_list_head((t_void *) pmadapter->pmoal_handle, &pmadapter->bssprio_tbl[i].bssprio_head, MTRUE, pmadapter->callbacks.moal_init_lock); pmadapter->bssprio_tbl[i].bssprio_cur = MNULL; } ret = pmadapter->callbacks.moal_malloc(pmadapter->pmoal_handle, MAX_MP_REGS + HEADER_ALIGNMENT, MLAN_MEM_DEF | MLAN_MEM_DMA, (t_u8 **) & pmadapter->mp_regs_buf); if (ret != MLAN_STATUS_SUCCESS || !pmadapter->mp_regs_buf) { PRINTM(MERROR, "Failed to allocate mp_regs_buf\n"); LEAVE(); return MLAN_STATUS_FAILURE; } pmadapter->mp_regs = (t_u8 *) ALIGN_ADDR(pmadapter->mp_regs_buf, HEADER_ALIGNMENT); #if defined(SDIO_MULTI_PORT_TX_AGGR) || defined(SDIO_MULTI_PORT_RX_AGGR) ret = wlan_alloc_sdio_mpa_buffers(pmadapter, SDIO_MP_TX_AGGR_DEF_BUF_SIZE, SDIO_MP_RX_AGGR_DEF_BUF_SIZE); if (ret != MLAN_STATUS_SUCCESS) { PRINTM(MERROR, "Failed to allocate sdio mp-a buffers\n"); LEAVE(); return MLAN_STATUS_FAILURE; } #endif pmadapter->psleep_cfm = wlan_alloc_mlan_buffer(pmadapter, sizeof(opt_sleep_confirm_buffer), 0, MTRUE); LEAVE(); return MLAN_STATUS_SUCCESS; }
/* Allocate a descriptor ring, initializing as much as possible. */ u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries) { int i; u32 desc_base, srcid, destid; u32 cmd0, cmd1, src1, dest1; u32 src0, dest0; chan_tab_t *ctp; dbdev_tab_t *stp, *dtp; au1x_ddma_desc_t *dp; /* * I guess we could check this to be within the * range of the table...... */ ctp = *((chan_tab_t **)chanid); stp = ctp->chan_src; dtp = ctp->chan_dest; /* * The descriptors must be 32-byte aligned. There is a * possibility the allocation will give us such an address, * and if we try that first we are likely to not waste larger * slabs of memory. */ desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t), GFP_KERNEL|GFP_DMA); if (desc_base == 0) return 0; if (desc_base & 0x1f) { /* * Lost....do it again, allocate extra, and round * the address base. */ kfree((const void *)desc_base); i = entries * sizeof(au1x_ddma_desc_t); i += (sizeof(au1x_ddma_desc_t) - 1); desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA); if (desc_base == 0) return 0; ctp->cdb_membase = desc_base; desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t)); } else ctp->cdb_membase = desc_base; dp = (au1x_ddma_desc_t *)desc_base; /* Keep track of the base descriptor. */ ctp->chan_desc_base = dp; /* Initialize the rings with as much information as we know. */ srcid = stp->dev_id; destid = dtp->dev_id; cmd0 = cmd1 = src1 = dest1 = 0; src0 = dest0 = 0; cmd0 |= DSCR_CMD0_SID(srcid); cmd0 |= DSCR_CMD0_DID(destid); cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV; cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE); /* Is it mem to mem transfer? */ if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) || (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) && ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) || (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS))) cmd0 |= DSCR_CMD0_MEM; switch (stp->dev_devwidth) { case 8: cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE); break; case 16: cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD); break; case 32: default: cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD); break; } switch (dtp->dev_devwidth) { case 8: cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE); break; case 16: cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD); break; case 32: default: cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD); break; } /* * If the device is marked as an in/out FIFO, ensure it is * set non-coherent. */ if (stp->dev_flags & DEV_FLAGS_IN) cmd0 |= DSCR_CMD0_SN; /* Source in FIFO */ if (dtp->dev_flags & DEV_FLAGS_OUT) cmd0 |= DSCR_CMD0_DN; /* Destination out FIFO */ /* * Set up source1. For now, assume no stride and increment. * A channel attribute update can change this later. */ switch (stp->dev_tsize) { case 1: src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1); break; case 2: src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2); break; case 4: src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4); break; case 8: default: src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8); break; } /* If source input is FIFO, set static address. */ if (stp->dev_flags & DEV_FLAGS_IN) { if (stp->dev_flags & DEV_FLAGS_BURSTABLE) src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST); else src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC); } if (stp->dev_physaddr) src0 = stp->dev_physaddr; /* * Set up dest1. For now, assume no stride and increment. * A channel attribute update can change this later. */ switch (dtp->dev_tsize) { case 1: dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1); break; case 2: dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2); break; case 4: dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4); break; case 8: default: dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8); break; } /* If destination output is FIFO, set static address. */ if (dtp->dev_flags & DEV_FLAGS_OUT) { if (dtp->dev_flags & DEV_FLAGS_BURSTABLE) dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST); else dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC); } if (dtp->dev_physaddr) dest0 = dtp->dev_physaddr; #if 0 printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x " "source1:%x dest0:%x dest1:%x\n", dtp->dev_id, stp->dev_id, cmd0, cmd1, src0, src1, dest0, dest1); #endif for (i = 0; i < entries; i++) { dp->dscr_cmd0 = cmd0; dp->dscr_cmd1 = cmd1; dp->dscr_source0 = src0; dp->dscr_source1 = src1; dp->dscr_dest0 = dest0; dp->dscr_dest1 = dest1; dp->dscr_stat = 0; dp->sw_context = 0; dp->sw_status = 0; dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1)); dp++; } /* Make last descrptor point to the first. */ dp--; dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base)); ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base; return (u32)ctp->chan_desc_base; }
/* ARGSUSED */ int sys_madvise(struct proc *p, void *v, register_t *retval) { struct sys_madvise_args /* { syscallarg(void *) addr; syscallarg(size_t) len; syscallarg(int) behav; } */ *uap = v; vaddr_t addr; vsize_t size, pageoff; int advice, error; addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); advice = SCARG(uap, behav); /* * align the address to a page boundary, and adjust the size accordingly */ ALIGN_ADDR(addr, size, pageoff); if (addr > SIZE_MAX - size) return (EINVAL); /* disallow wrap-around. */ switch (advice) { case MADV_NORMAL: case MADV_RANDOM: case MADV_SEQUENTIAL: error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size, advice); break; case MADV_WILLNEED: /* * Activate all these pages, pre-faulting them in if * necessary. */ /* * XXX IMPLEMENT ME. * Should invent a "weak" mode for uvm_fault() * which would only do the PGO_LOCKED pgo_get(). */ return (0); case MADV_DONTNEED: /* * Deactivate all these pages. We don't need them * any more. We don't, however, toss the data in * the pages. */ error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size, PGO_DEACTIVATE); break; case MADV_FREE: /* * These pages contain no valid data, and may be * garbage-collected. Toss all resources, including * any swap space in use. */ error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size, PGO_FREE); break; case MADV_SPACEAVAIL: /* * XXXMRG What is this? I think it's: * * Ensure that we have allocated backing-store * for these pages. * * This is going to require changes to the page daemon, * as it will free swap space allocated to pages in core. * There's also what to do for device/file/anonymous memory. */ return (EINVAL); default: return (EINVAL); } return (error); }
int sys_munmap(struct proc *p, void *v, register_t *retval) { struct sys_munmap_args /* { syscallarg(void *) addr; syscallarg(size_t) len; } */ *uap = v; vaddr_t addr; vsize_t size, pageoff; vm_map_t map; vaddr_t vm_min_address = VM_MIN_ADDRESS; struct vm_map_entry *dead_entries; /* * get syscall args... */ addr = (vaddr_t) SCARG(uap, addr); size = (vsize_t) SCARG(uap, len); /* * align the address to a page boundary, and adjust the size accordingly */ ALIGN_ADDR(addr, size, pageoff); /* * Check for illegal addresses. Watch out for address wrap... * Note that VM_*_ADDRESS are not constants due to casts (argh). */ if (addr > SIZE_MAX - size) return (EINVAL); if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS) return (EINVAL); if (vm_min_address > 0 && addr < vm_min_address) return (EINVAL); map = &p->p_vmspace->vm_map; vm_map_lock(map); /* lock map so we can checkprot */ /* * interesting system call semantic: make sure entire range is * allocated before allowing an unmap. */ if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) { vm_map_unlock(map); return (EINVAL); } /* * doit! */ uvm_unmap_remove(map, addr, addr + size, &dead_entries, p); vm_map_unlock(map); /* and unlock */ if (dead_entries != NULL) uvm_unmap_detach(dead_entries, 0); return (0); }
int sys_msync(struct proc *p, void *v, register_t *retval) { struct sys_msync_args /* { syscallarg(void *) addr; syscallarg(size_t) len; syscallarg(int) flags; } */ *uap = v; vaddr_t addr; vsize_t size, pageoff; vm_map_t map; int rv, flags, uvmflags; /* * extract syscall args from the uap */ addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); flags = SCARG(uap, flags); /* sanity check flags */ if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 || (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 || (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC)) return (EINVAL); if ((flags & (MS_ASYNC | MS_SYNC)) == 0) flags |= MS_SYNC; /* * align the address to a page boundary, and adjust the size accordingly */ ALIGN_ADDR(addr, size, pageoff); if (addr > SIZE_MAX - size) return (EINVAL); /* disallow wrap-around. */ /* * get map */ map = &p->p_vmspace->vm_map; /* * XXXCDC: do we really need this semantic? * * XXX Gak! If size is zero we are supposed to sync "all modified * pages with the region containing addr". Unfortunately, we * don't really keep track of individual mmaps so we approximate * by flushing the range of the map entry containing addr. * This can be incorrect if the region splits or is coalesced * with a neighbor. */ if (size == 0) { vm_map_entry_t entry; vm_map_lock_read(map); rv = uvm_map_lookup_entry(map, addr, &entry); if (rv == TRUE) { addr = entry->start; size = entry->end - entry->start; } vm_map_unlock_read(map); if (rv == FALSE) return (EINVAL); } /* * translate MS_ flags into PGO_ flags */ uvmflags = PGO_CLEANIT; if (flags & MS_INVALIDATE) uvmflags |= PGO_FREE; if (flags & MS_SYNC) uvmflags |= PGO_SYNCIO; else uvmflags |= PGO_SYNCIO; /* XXXCDC: force sync for now! */ return (uvm_map_clean(map, addr, addr+size, uvmflags)); }
int sys_mmap(struct proc *p, void *v, register_t *retval) { struct sys_mmap_args /* { syscallarg(void *) addr; syscallarg(size_t) len; syscallarg(int) prot; syscallarg(int) flags; syscallarg(int) fd; syscallarg(long) pad; syscallarg(off_t) pos; } */ *uap = v; vaddr_t addr; struct vattr va; off_t pos; vsize_t size, pageoff; vm_prot_t prot, maxprot; int flags, fd; vaddr_t vm_min_address = VM_MIN_ADDRESS; struct filedesc *fdp = p->p_fd; struct file *fp = NULL; struct vnode *vp; caddr_t handle; int error; /* * first, extract syscall args from the uap. */ addr = (vaddr_t) SCARG(uap, addr); size = (vsize_t) SCARG(uap, len); prot = SCARG(uap, prot); flags = SCARG(uap, flags); fd = SCARG(uap, fd); pos = SCARG(uap, pos); /* * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and * validate the flags. */ if ((prot & VM_PROT_ALL) != prot) return (EINVAL); if ((flags & MAP_FLAGMASK) != flags) return (EINVAL); if (flags & MAP_COPY) flags = (flags & ~MAP_COPY) | MAP_PRIVATE; if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE)) return (EINVAL); if (flags & MAP_DENYWRITE) return (EINVAL); /* * align file position and save offset. adjust size. */ ALIGN_ADDR(pos, size, pageoff); /* * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr" */ if (flags & MAP_FIXED) { /* adjust address by the same amount as we did the offset */ addr -= pageoff; if (addr & PAGE_MASK) return (EINVAL); /* not page aligned */ if (addr > SIZE_MAX - size) return (EINVAL); /* no wrapping! */ if (VM_MAXUSER_ADDRESS > 0 && (addr + size) > VM_MAXUSER_ADDRESS) return (EINVAL); if (vm_min_address > 0 && addr < vm_min_address) return (EINVAL); } else { /* * not fixed: make sure we skip over the largest possible heap. * we will refine our guess later (e.g. to account for VAC, etc) */ if (addr == 0) addr = uvm_map_hint(p, prot); else if (!(flags & MAP_TRYFIXED) && addr < (vaddr_t)p->p_vmspace->vm_daddr) addr = uvm_map_hint(p, prot); } /* * check for file mappings (i.e. not anonymous) and verify file. */ if ((flags & MAP_ANON) == 0) { if ((fp = fd_getfile(fdp, fd)) == NULL) return (EBADF); FREF(fp); if (fp->f_type != DTYPE_VNODE) { error = ENODEV; /* only mmap vnodes! */ goto out; } vp = (struct vnode *)fp->f_data; /* convert to vnode */ if (vp->v_type != VREG && vp->v_type != VCHR && vp->v_type != VBLK) { error = ENODEV; /* only REG/CHR/BLK support mmap */ goto out; } if (vp->v_type == VREG && (pos + size) < pos) { error = EINVAL; /* no offset wrapping */ goto out; } /* special case: catch SunOS style /dev/zero */ if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) { flags |= MAP_ANON; FRELE(fp); fp = NULL; goto is_anon; } /* * Old programs may not select a specific sharing type, so * default to an appropriate one. * * XXX: how does MAP_ANON fit in the picture? */ if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) { #if defined(DEBUG) printf("WARNING: defaulted mmap() share type to " "%s (pid %d comm %s)\n", vp->v_type == VCHR ? "MAP_SHARED" : "MAP_PRIVATE", p->p_pid, p->p_comm); #endif if (vp->v_type == VCHR) flags |= MAP_SHARED; /* for a device */ else flags |= MAP_PRIVATE; /* for a file */ } /* * MAP_PRIVATE device mappings don't make sense (and aren't * supported anyway). However, some programs rely on this, * so just change it to MAP_SHARED. */ if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) { flags = (flags & ~MAP_PRIVATE) | MAP_SHARED; } #ifdef ANOUBIS /* Force DENYWRITE mappings if file->denywrite is set. */ if (fp->denywrite) flags |= MAP_DENYWRITE; #endif /* * now check protection */ /* * Don't allow the file to be mapped into executable memory if * the underlying file system is marked as 'noexec'. */ if (prot & PROT_EXEC && vp->v_mount->mnt_flag & MNT_NOEXEC) { error = EACCES; goto out; } maxprot = VM_PROT_EXECUTE; /* check read access */ if (fp->f_flag & FREAD) maxprot |= VM_PROT_READ; else if (prot & PROT_READ) { error = EACCES; goto out; } /* PROT_EXEC only makes sense if the descriptor is readable. */ if (!(fp->f_flag & FREAD) && prot & PROT_EXEC) { error = EACCES; goto out; } /* check write access, shared case first */ if (flags & MAP_SHARED) { /* * if the file is writable, only add PROT_WRITE to * maxprot if the file is not immutable, append-only. * otherwise, if we have asked for PROT_WRITE, return * EPERM. */ if (fp->f_flag & FWRITE) { if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p))) goto out; if ((va.va_flags & (IMMUTABLE|APPEND)) == 0) maxprot |= VM_PROT_WRITE; else if (prot & PROT_WRITE) { error = EPERM; goto out; } } else if (prot & PROT_WRITE) { error = EACCES; goto out; } } else { /* MAP_PRIVATE mappings can always write to */ maxprot |= VM_PROT_WRITE; } #ifdef MAC error = mac_vnode_check_mmap(p->p_ucred, vp, prot, flags); if (error) goto out; #endif vfs_mark_atime(vp, p->p_ucred); /* * set handle to vnode */ handle = (caddr_t)vp; } else { /* MAP_ANON case */ /* * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0? */ if (fd != -1) { error = EINVAL; goto out; } is_anon: /* label for SunOS style /dev/zero */ handle = NULL; maxprot = VM_PROT_ALL; pos = 0; } if ((flags & MAP_ANON) != 0 || ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) { if (size > (p->p_rlimit[RLIMIT_DATA].rlim_cur - ptoa(p->p_vmspace->vm_dused))) { error = ENOMEM; goto out; } } /* * now let kernel internal function uvm_mmap do the work. */ error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot, flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur, p); if (error == 0) /* remember to add offset */ *retval = (register_t)(addr + pageoff); out: if (fp) FRELE(fp); return (error); }