Example #1
0
unsigned long
arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len,
			unsigned long pgoff, unsigned long flags)
{
	long map_shared = (flags & MAP_SHARED);
	unsigned long start_addr, align_mask = PAGE_SIZE - 1;
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;

	if (len > RGN_MAP_LIMIT)
		return -ENOMEM;

#ifdef CONFIG_HUGETLB_PAGE
	if (REGION_NUMBER(addr) == RGN_HPAGE)
		addr = 0;
#endif
	if (!addr)
		addr = mm->free_area_cache;

	if (map_shared && (TASK_SIZE > 0xfffffffful))
		/*
		 * For 64-bit tasks, align shared segments to 1MB to avoid potential
		 * performance penalty due to virtual aliasing (see ASDM).  For 32-bit
		 * tasks, we prefer to avoid exhausting the address space too quickly by
		 * limiting alignment to a single page.
		 */
		align_mask = SHMLBA - 1;

  full_search:
	start_addr = addr = (addr + align_mask) & ~align_mask;

	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
		/* At this point:  (!vma || addr < vma->vm_end). */
		if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
			if (start_addr != TASK_UNMAPPED_BASE) {
				/* Start a new search --- just in case we missed some holes.  */
				addr = TASK_UNMAPPED_BASE;
				goto full_search;
			}
			return -ENOMEM;
		}
		if (!vma || addr + len <= vma->vm_start) {
			/* Remember the address where we stopped this search:  */
			mm->free_area_cache = addr + len;
			return addr;
		}
		addr = (vma->vm_end + align_mask) & ~align_mask;
	}
}
int ia64_map_check_rgn(unsigned long addr, unsigned long len,
		unsigned long flags)
{
	unsigned long roff;

	/*
	 * Don't permit mappings into unmapped space, the virtual page table
	 * of a region, or across a region boundary.  Note: RGN_MAP_LIMIT is
	 * equal to 2^n-PAGE_SIZE (for some integer n <= 61) and len > 0.
	 */
	roff = REGION_OFFSET(addr);
	if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len)))
		return -EINVAL;
	return 0;
}
Example #3
0
static inline unsigned long
do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
{
	unsigned long roff;
	struct file *file = NULL;

	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
	if (!(flags & MAP_ANONYMOUS)) {
		file = fget(fd);
		if (!file)
			return -EBADF;

		if (!file->f_op || !file->f_op->mmap) {
			addr = -ENODEV;
			goto out;
		}
	}

	/* Careful about overflows.. */
	len = PAGE_ALIGN(len);
	if (!len || len > TASK_SIZE) {
		addr = -EINVAL;
		goto out;
	}

	/*
	 * Don't permit mappings into unmapped space, the virtual page table of a region,
	 * or across a region boundary.  Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE
	 * (for some integer n <= 61) and len > 0.
	 */
	roff = REGION_OFFSET(addr);
	if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len))) {
		addr = -EINVAL;
		goto out;
	}

	down_write(&current->mm->mmap_sem);
	addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
	up_write(&current->mm->mmap_sem);

out:	if (file)
		fput(file);
	return addr;
}
void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
	int nasid, cnode, j;
	struct hubdev_info *hubdev_info;
	struct pcibus_info *soft;
	struct sn_flush_device_kernel *sn_flush_device_kernel;
	struct sn_flush_device_common *common;

	if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
		return NULL;
	}

	/*
	 * Allocate kernel bus soft and copy from prom.
	 */

	soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
	if (!soft) {
		return NULL;
	}

	memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
	soft->pbi_buscommon.bs_base = (unsigned long)
		ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base),
			sizeof(struct pic));

	spin_lock_init(&soft->pbi_lock);

	/*
	 * register the bridge's error interrupt handler
	 */
	if (request_irq(SGI_PCIASIC_ERROR, pcibr_error_intr_handler,
			IRQF_SHARED, "PCIBR error", (void *)(soft))) {
		printk(KERN_WARNING
		       "pcibr cannot allocate interrupt for error handler\n");
	}
	sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);

	/* 
	 * Update the Bridge with the "kernel" pagesize 
	 */
	if (PAGE_SIZE < 16384) {
		pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
	} else {
		pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
	}

	nasid = NASID_GET(soft->pbi_buscommon.bs_base);
	cnode = nasid_to_cnodeid(nasid);
	hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);

	if (hubdev_info->hdi_flush_nasid_list.widget_p) {
		sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list.
		    widget_p[(int)soft->pbi_buscommon.bs_xid];
		if (sn_flush_device_kernel) {
			for (j = 0; j < DEV_PER_WIDGET;
			     j++, sn_flush_device_kernel++) {
				common = sn_flush_device_kernel->common;
				if (common->sfdl_slot == -1)
					continue;
				if ((common->sfdl_persistent_segment ==
				     soft->pbi_buscommon.bs_persist_segment) &&
				     (common->sfdl_persistent_busnum ==
				     soft->pbi_buscommon.bs_persist_busnum))
					common->sfdl_pcibus_info =
					    soft;
			}
		}
	}

	/* Setup the PMU ATE map */
	soft->pbi_int_ate_resource.lowest_free_index = 0;
	soft->pbi_int_ate_resource.ate =
	    kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);

	if (!soft->pbi_int_ate_resource.ate) {
		kfree(soft);
		return NULL;
	}

	return soft;
}
Example #5
0
/* Translate virtual address to physical address.  */
unsigned long
xencomm_vtop(unsigned long vaddr)
{
	struct page *page;
	struct vm_area_struct *vma;

	if (vaddr == 0)
		return 0UL;

	if (REGION_NUMBER(vaddr) == 5) {
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pte_t *ptep;

		/* On ia64, TASK_SIZE refers to current.  It is not initialized
		   during boot.
		   Furthermore the kernel is relocatable and __pa() doesn't
		   work on  addresses.  */
		if (vaddr >= KERNEL_START
		    && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE))
			return vaddr - kernel_virtual_offset;

		/* In kernel area -- virtually mapped.  */
		pgd = pgd_offset_k(vaddr);
		if (pgd_none(*pgd) || pgd_bad(*pgd))
			return ~0UL;

		pud = pud_offset(pgd, vaddr);
		if (pud_none(*pud) || pud_bad(*pud))
			return ~0UL;

		pmd = pmd_offset(pud, vaddr);
		if (pmd_none(*pmd) || pmd_bad(*pmd))
			return ~0UL;

		ptep = pte_offset_kernel(pmd, vaddr);
		if (!ptep)
			return ~0UL;

		return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
	}

	if (vaddr > TASK_SIZE) {
		/* percpu variables */
		if (REGION_NUMBER(vaddr) == 7 &&
		    REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS))
			ia64_tpa(vaddr);

		/* kernel address */
		return __pa(vaddr);
	}

	vma = find_extend_vma(current->mm, vaddr);
	if (!vma)
		return ~0UL;

	/* We assume the page is modified.  */
	page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
	if (!page)
		return ~0UL;

	return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
}